ide-taskfile.c revision 130e886708d6e11f3d54e5d27c266578de56f343
1/* 2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org> 3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 2001-2002 Klaus Smolin 5 * IBM Storage Technology Division 6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz 7 * 8 * The big the bad and the ugly. 9 */ 10 11#include <linux/types.h> 12#include <linux/string.h> 13#include <linux/kernel.h> 14#include <linux/sched.h> 15#include <linux/interrupt.h> 16#include <linux/errno.h> 17#include <linux/slab.h> 18#include <linux/delay.h> 19#include <linux/hdreg.h> 20#include <linux/ide.h> 21#include <linux/scatterlist.h> 22 23#include <asm/uaccess.h> 24#include <asm/io.h> 25 26void ide_tf_dump(const char *s, struct ide_taskfile *tf) 27{ 28#ifdef DEBUG 29 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " 30 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", 31 s, tf->feature, tf->nsect, tf->lbal, 32 tf->lbam, tf->lbah, tf->device, tf->command); 33 printk("%s: hob: nsect 0x%02x lbal 0x%02x " 34 "lbam 0x%02x lbah 0x%02x\n", 35 s, tf->hob_nsect, tf->hob_lbal, 36 tf->hob_lbam, tf->hob_lbah); 37#endif 38} 39 40int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) 41{ 42 struct ide_cmd cmd; 43 44 memset(&cmd, 0, sizeof(cmd)); 45 cmd.tf.nsect = 0x01; 46 if (drive->media == ide_disk) 47 cmd.tf.command = ATA_CMD_ID_ATA; 48 else 49 cmd.tf.command = ATA_CMD_ID_ATAPI; 50 cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 51 cmd.protocol = ATA_PROT_PIO; 52 53 return ide_raw_taskfile(drive, &cmd, buf, 1); 54} 55 56static ide_startstop_t task_no_data_intr(ide_drive_t *); 57static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *); 58static ide_startstop_t task_pio_intr(ide_drive_t *); 59 60ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) 61{ 62 ide_hwif_t *hwif = drive->hwif; 63 struct ide_cmd *cmd = &hwif->cmd; 64 struct ide_taskfile *tf = &cmd->tf; 65 ide_handler_t *handler = NULL; 66 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 67 const struct ide_dma_ops *dma_ops = hwif->dma_ops; 68 69 if (orig_cmd->protocol == ATA_PROT_PIO && 70 (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) && 71 drive->mult_count == 0) { 72 printk(KERN_ERR "%s: multimode not set!\n", drive->name); 73 return ide_stopped; 74 } 75 76 if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED) 77 orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS; 78 79 memcpy(cmd, orig_cmd, sizeof(*cmd)); 80 81 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 82 ide_tf_dump(drive->name, tf); 83 tp_ops->set_irq(hwif, 1); 84 SELECT_MASK(drive, 0); 85 tp_ops->tf_load(drive, cmd); 86 } 87 88 switch (cmd->protocol) { 89 case ATA_PROT_PIO: 90 if (cmd->tf_flags & IDE_TFLAG_WRITE) { 91 tp_ops->exec_command(hwif, tf->command); 92 ndelay(400); /* FIXME */ 93 return pre_task_out_intr(drive, cmd); 94 } 95 handler = task_pio_intr; 96 /* fall-through */ 97 case ATA_PROT_NODATA: 98 if (handler == NULL) 99 handler = task_no_data_intr; 100 ide_execute_command(drive, tf->command, handler, 101 WAIT_WORSTCASE, NULL); 102 return ide_started; 103 default: 104 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 || 105 ide_build_sglist(drive, hwif->rq) == 0 || 106 dma_ops->dma_setup(drive)) 107 return ide_stopped; 108 dma_ops->dma_exec_cmd(drive, tf->command); 109 dma_ops->dma_start(drive); 110 return ide_started; 111 } 112} 113EXPORT_SYMBOL_GPL(do_rw_taskfile); 114 115static ide_startstop_t task_no_data_intr(ide_drive_t *drive) 116{ 117 ide_hwif_t *hwif = drive->hwif; 118 struct ide_cmd *cmd = &hwif->cmd; 119 struct ide_taskfile *tf = &cmd->tf; 120 int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0; 121 int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1; 122 u8 stat; 123 124 local_irq_enable_in_hardirq(); 125 126 while (1) { 127 stat = hwif->tp_ops->read_status(hwif); 128 if ((stat & ATA_BUSY) == 0 || retries-- == 0) 129 break; 130 udelay(10); 131 }; 132 133 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 134 if (custom && tf->command == ATA_CMD_SET_MULTI) { 135 drive->mult_req = drive->mult_count = 0; 136 drive->special.b.recalibrate = 1; 137 (void)ide_dump_status(drive, __func__, stat); 138 return ide_stopped; 139 } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { 140 if ((stat & (ATA_ERR | ATA_DRQ)) == 0) { 141 ide_set_handler(drive, &task_no_data_intr, 142 WAIT_WORSTCASE, NULL); 143 return ide_started; 144 } 145 } 146 return ide_error(drive, "task_no_data_intr", stat); 147 } 148 149 if (custom && tf->command == ATA_CMD_SET_MULTI) 150 drive->mult_count = drive->mult_req; 151 152 if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE || 153 tf->command == ATA_CMD_CHK_POWER) { 154 struct request *rq = hwif->rq; 155 156 if (blk_pm_request(rq)) 157 ide_complete_pm_rq(drive, rq); 158 else 159 ide_finish_cmd(drive, cmd, stat); 160 } 161 162 return ide_stopped; 163} 164 165static u8 wait_drive_not_busy(ide_drive_t *drive) 166{ 167 ide_hwif_t *hwif = drive->hwif; 168 int retries; 169 u8 stat; 170 171 /* 172 * Last sector was transfered, wait until device is ready. This can 173 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. 174 */ 175 for (retries = 0; retries < 1000; retries++) { 176 stat = hwif->tp_ops->read_status(hwif); 177 178 if (stat & ATA_BUSY) 179 udelay(10); 180 else 181 break; 182 } 183 184 if (stat & ATA_BUSY) 185 printk(KERN_ERR "%s: drive still BUSY!\n", drive->name); 186 187 return stat; 188} 189 190static void ide_pio_sector(ide_drive_t *drive, struct ide_cmd *cmd, 191 unsigned int write) 192{ 193 ide_hwif_t *hwif = drive->hwif; 194 struct scatterlist *sg = hwif->sg_table; 195 struct scatterlist *cursg = cmd->cursg; 196 struct page *page; 197#ifdef CONFIG_HIGHMEM 198 unsigned long flags; 199#endif 200 unsigned int offset; 201 u8 *buf; 202 203 cursg = cmd->cursg; 204 if (!cursg) { 205 cursg = sg; 206 cmd->cursg = sg; 207 } 208 209 page = sg_page(cursg); 210 offset = cursg->offset + cmd->cursg_ofs * SECTOR_SIZE; 211 212 /* get the current page and offset */ 213 page = nth_page(page, (offset >> PAGE_SHIFT)); 214 offset %= PAGE_SIZE; 215 216#ifdef CONFIG_HIGHMEM 217 local_irq_save(flags); 218#endif 219 buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; 220 221 cmd->nleft--; 222 cmd->cursg_ofs++; 223 224 if ((cmd->cursg_ofs * SECTOR_SIZE) == cursg->length) { 225 cmd->cursg = sg_next(cmd->cursg); 226 cmd->cursg_ofs = 0; 227 } 228 229 /* do the actual data transfer */ 230 if (write) 231 hwif->tp_ops->output_data(drive, cmd, buf, SECTOR_SIZE); 232 else 233 hwif->tp_ops->input_data(drive, cmd, buf, SECTOR_SIZE); 234 235 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 236#ifdef CONFIG_HIGHMEM 237 local_irq_restore(flags); 238#endif 239} 240 241static void ide_pio_multi(ide_drive_t *drive, struct ide_cmd *cmd, 242 unsigned int write) 243{ 244 unsigned int nsect; 245 246 nsect = min_t(unsigned int, cmd->nleft, drive->mult_count); 247 while (nsect--) 248 ide_pio_sector(drive, cmd, write); 249} 250 251static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd, 252 unsigned int write) 253{ 254 u8 saved_io_32bit = drive->io_32bit; 255 256 if (cmd->tf_flags & IDE_TFLAG_FS) 257 cmd->rq->errors = 0; 258 259 if (cmd->tf_flags & IDE_TFLAG_IO_16BIT) 260 drive->io_32bit = 0; 261 262 touch_softlockup_watchdog(); 263 264 if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) 265 ide_pio_multi(drive, cmd, write); 266 else 267 ide_pio_sector(drive, cmd, write); 268 269 drive->io_32bit = saved_io_32bit; 270} 271 272static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) 273{ 274 if (cmd->tf_flags & IDE_TFLAG_FS) { 275 int sectors = cmd->nsect - cmd->nleft; 276 277 if (cmd->protocol == ATA_PROT_PIO && 278 ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) { 279 if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) 280 sectors -= drive->mult_count; 281 else 282 sectors--; 283 } 284 285 if (sectors > 0) 286 ide_complete_rq(drive, 0, sectors << 9); 287 } 288} 289 290void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat) 291{ 292 struct request *rq = drive->hwif->rq; 293 u8 err = ide_read_error(drive); 294 295 ide_complete_cmd(drive, cmd, stat, err); 296 rq->errors = err; 297 ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); 298} 299 300/* 301 * Handler for command with PIO data phase. 302 */ 303static ide_startstop_t task_pio_intr(ide_drive_t *drive) 304{ 305 ide_hwif_t *hwif = drive->hwif; 306 struct ide_cmd *cmd = &drive->hwif->cmd; 307 u8 stat = hwif->tp_ops->read_status(hwif); 308 u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE); 309 310 if (write == 0) { 311 /* Error? */ 312 if (stat & ATA_ERR) 313 goto out_err; 314 315 /* Didn't want any data? Odd. */ 316 if ((stat & ATA_DRQ) == 0) { 317 /* Command all done? */ 318 if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) 319 goto out_end; 320 321 /* Assume it was a spurious irq */ 322 goto out_wait; 323 } 324 } else { 325 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 326 goto out_err; 327 328 /* Deal with unexpected ATA data phase. */ 329 if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0)) 330 goto out_err; 331 } 332 333 if (write && cmd->nleft == 0) 334 goto out_end; 335 336 /* Still data left to transfer. */ 337 ide_pio_datablock(drive, cmd, write); 338 339 /* Are we done? Check status and finish transfer. */ 340 if (write == 0 && cmd->nleft == 0) { 341 stat = wait_drive_not_busy(drive); 342 if (!OK_STAT(stat, 0, BAD_STAT)) 343 goto out_err; 344 345 goto out_end; 346 } 347out_wait: 348 /* Still data left to transfer. */ 349 ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE, NULL); 350 return ide_started; 351out_end: 352 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 353 ide_finish_cmd(drive, cmd, stat); 354 else 355 ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9); 356 return ide_stopped; 357out_err: 358 ide_error_cmd(drive, cmd); 359 return ide_error(drive, __func__, stat); 360} 361 362static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, 363 struct ide_cmd *cmd) 364{ 365 ide_startstop_t startstop; 366 367 if (ide_wait_stat(&startstop, drive, ATA_DRQ, 368 drive->bad_wstat, WAIT_DRQ)) { 369 printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n", 370 drive->name, 371 (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "", 372 (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : ""); 373 return startstop; 374 } 375 376 if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) 377 local_irq_disable(); 378 379 ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE, NULL); 380 381 ide_pio_datablock(drive, cmd, 1); 382 383 return ide_started; 384} 385 386int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, 387 u16 nsect) 388{ 389 struct request *rq; 390 int error; 391 392 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 393 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 394 rq->buffer = buf; 395 396 /* 397 * (ks) We transfer currently only whole sectors. 398 * This is suffient for now. But, it would be great, 399 * if we would find a solution to transfer any size. 400 * To support special commands like READ LONG. 401 */ 402 rq->hard_nr_sectors = rq->nr_sectors = nsect; 403 rq->hard_cur_sectors = rq->current_nr_sectors = nsect; 404 405 if (cmd->tf_flags & IDE_TFLAG_WRITE) 406 rq->cmd_flags |= REQ_RW; 407 408 rq->special = cmd; 409 cmd->rq = rq; 410 411 error = blk_execute_rq(drive->queue, NULL, rq, 0); 412 blk_put_request(rq); 413 414 return error; 415} 416 417EXPORT_SYMBOL(ide_raw_taskfile); 418 419int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd) 420{ 421 cmd->protocol = ATA_PROT_NODATA; 422 423 return ide_raw_taskfile(drive, cmd, NULL, 0); 424} 425EXPORT_SYMBOL_GPL(ide_no_data_taskfile); 426 427#ifdef CONFIG_IDE_TASK_IOCTL 428int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) 429{ 430 ide_task_request_t *req_task; 431 struct ide_cmd cmd; 432 u8 *outbuf = NULL; 433 u8 *inbuf = NULL; 434 u8 *data_buf = NULL; 435 int err = 0; 436 int tasksize = sizeof(struct ide_task_request_s); 437 unsigned int taskin = 0; 438 unsigned int taskout = 0; 439 u16 nsect = 0; 440 char __user *buf = (char __user *)arg; 441 442// printk("IDE Taskfile ...\n"); 443 444 req_task = kzalloc(tasksize, GFP_KERNEL); 445 if (req_task == NULL) return -ENOMEM; 446 if (copy_from_user(req_task, buf, tasksize)) { 447 kfree(req_task); 448 return -EFAULT; 449 } 450 451 taskout = req_task->out_size; 452 taskin = req_task->in_size; 453 454 if (taskin > 65536 || taskout > 65536) { 455 err = -EINVAL; 456 goto abort; 457 } 458 459 if (taskout) { 460 int outtotal = tasksize; 461 outbuf = kzalloc(taskout, GFP_KERNEL); 462 if (outbuf == NULL) { 463 err = -ENOMEM; 464 goto abort; 465 } 466 if (copy_from_user(outbuf, buf + outtotal, taskout)) { 467 err = -EFAULT; 468 goto abort; 469 } 470 } 471 472 if (taskin) { 473 int intotal = tasksize + taskout; 474 inbuf = kzalloc(taskin, GFP_KERNEL); 475 if (inbuf == NULL) { 476 err = -ENOMEM; 477 goto abort; 478 } 479 if (copy_from_user(inbuf, buf + intotal, taskin)) { 480 err = -EFAULT; 481 goto abort; 482 } 483 } 484 485 memset(&cmd, 0, sizeof(cmd)); 486 487 memcpy(&cmd.tf_array[0], req_task->hob_ports, 488 HDIO_DRIVE_HOB_HDR_SIZE - 2); 489 memcpy(&cmd.tf_array[6], req_task->io_ports, 490 HDIO_DRIVE_TASK_HDR_SIZE); 491 492 cmd.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE | 493 IDE_TFLAG_IN_TF; 494 495 if (drive->dev_flags & IDE_DFLAG_LBA48) 496 cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB); 497 498 if (req_task->out_flags.all) { 499 cmd.ftf_flags |= IDE_FTFLAG_FLAGGED; 500 501 if (req_task->out_flags.b.data) 502 cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA; 503 504 if (req_task->out_flags.b.nsector_hob) 505 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT; 506 if (req_task->out_flags.b.sector_hob) 507 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL; 508 if (req_task->out_flags.b.lcyl_hob) 509 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM; 510 if (req_task->out_flags.b.hcyl_hob) 511 cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH; 512 513 if (req_task->out_flags.b.error_feature) 514 cmd.tf_flags |= IDE_TFLAG_OUT_FEATURE; 515 if (req_task->out_flags.b.nsector) 516 cmd.tf_flags |= IDE_TFLAG_OUT_NSECT; 517 if (req_task->out_flags.b.sector) 518 cmd.tf_flags |= IDE_TFLAG_OUT_LBAL; 519 if (req_task->out_flags.b.lcyl) 520 cmd.tf_flags |= IDE_TFLAG_OUT_LBAM; 521 if (req_task->out_flags.b.hcyl) 522 cmd.tf_flags |= IDE_TFLAG_OUT_LBAH; 523 } else { 524 cmd.tf_flags |= IDE_TFLAG_OUT_TF; 525 if (cmd.tf_flags & IDE_TFLAG_LBA48) 526 cmd.tf_flags |= IDE_TFLAG_OUT_HOB; 527 } 528 529 if (req_task->in_flags.b.data) 530 cmd.ftf_flags |= IDE_FTFLAG_IN_DATA; 531 532 if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) { 533 /* fixup data phase if needed */ 534 if (req_task->data_phase == TASKFILE_IN_DMAQ || 535 req_task->data_phase == TASKFILE_IN_DMA) 536 cmd.tf_flags |= IDE_TFLAG_WRITE; 537 } 538 539 cmd.protocol = ATA_PROT_DMA; 540 541 switch (req_task->data_phase) { 542 case TASKFILE_MULTI_OUT: 543 if (!drive->mult_count) { 544 /* (hs): give up if multcount is not set */ 545 printk(KERN_ERR "%s: %s Multimode Write " \ 546 "multcount is not set\n", 547 drive->name, __func__); 548 err = -EPERM; 549 goto abort; 550 } 551 cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; 552 /* fall through */ 553 case TASKFILE_OUT: 554 cmd.protocol = ATA_PROT_PIO; 555 /* fall through */ 556 case TASKFILE_OUT_DMAQ: 557 case TASKFILE_OUT_DMA: 558 cmd.tf_flags |= IDE_TFLAG_WRITE; 559 nsect = taskout / SECTOR_SIZE; 560 data_buf = outbuf; 561 break; 562 case TASKFILE_MULTI_IN: 563 if (!drive->mult_count) { 564 /* (hs): give up if multcount is not set */ 565 printk(KERN_ERR "%s: %s Multimode Read failure " \ 566 "multcount is not set\n", 567 drive->name, __func__); 568 err = -EPERM; 569 goto abort; 570 } 571 cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; 572 /* fall through */ 573 case TASKFILE_IN: 574 cmd.protocol = ATA_PROT_PIO; 575 /* fall through */ 576 case TASKFILE_IN_DMAQ: 577 case TASKFILE_IN_DMA: 578 nsect = taskin / SECTOR_SIZE; 579 data_buf = inbuf; 580 break; 581 case TASKFILE_NO_DATA: 582 cmd.protocol = ATA_PROT_NODATA; 583 break; 584 default: 585 err = -EFAULT; 586 goto abort; 587 } 588 589 if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) 590 nsect = 0; 591 else if (!nsect) { 592 nsect = (cmd.tf.hob_nsect << 8) | cmd.tf.nsect; 593 594 if (!nsect) { 595 printk(KERN_ERR "%s: in/out command without data\n", 596 drive->name); 597 err = -EFAULT; 598 goto abort; 599 } 600 } 601 602 err = ide_raw_taskfile(drive, &cmd, data_buf, nsect); 603 604 memcpy(req_task->hob_ports, &cmd.tf_array[0], 605 HDIO_DRIVE_HOB_HDR_SIZE - 2); 606 memcpy(req_task->io_ports, &cmd.tf_array[6], 607 HDIO_DRIVE_TASK_HDR_SIZE); 608 609 if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) && 610 req_task->in_flags.all == 0) { 611 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; 612 if (drive->dev_flags & IDE_DFLAG_LBA48) 613 req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8); 614 } 615 616 if (copy_to_user(buf, req_task, tasksize)) { 617 err = -EFAULT; 618 goto abort; 619 } 620 if (taskout) { 621 int outtotal = tasksize; 622 if (copy_to_user(buf + outtotal, outbuf, taskout)) { 623 err = -EFAULT; 624 goto abort; 625 } 626 } 627 if (taskin) { 628 int intotal = tasksize + taskout; 629 if (copy_to_user(buf + intotal, inbuf, taskin)) { 630 err = -EFAULT; 631 goto abort; 632 } 633 } 634abort: 635 kfree(req_task); 636 kfree(outbuf); 637 kfree(inbuf); 638 639// printk("IDE Taskfile ioctl ended. rc = %i\n", err); 640 641 return err; 642} 643#endif 644