ide-io.c revision 2624565caacedd740fce7803fe2c162842aa5df4
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50#include <linux/bitops.h> 51 52#include <asm/byteorder.h> 53#include <asm/irq.h> 54#include <asm/uaccess.h> 55#include <asm/io.h> 56 57static int __ide_end_request(ide_drive_t *drive, struct request *rq, 58 int uptodate, unsigned int nr_bytes, int dequeue) 59{ 60 int ret = 1; 61 62 /* 63 * if failfast is set on a request, override number of sectors and 64 * complete the whole request right now 65 */ 66 if (blk_noretry_request(rq) && end_io_error(uptodate)) 67 nr_bytes = rq->hard_nr_sectors << 9; 68 69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 70 rq->errors = -EIO; 71 72 /* 73 * decide whether to reenable DMA -- 3 is a random magic for now, 74 * if we DMA timeout more than 3 times, just stay in PIO 75 */ 76 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 77 drive->state = 0; 78 ide_dma_on(drive); 79 } 80 81 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { 82 add_disk_randomness(rq->rq_disk); 83 if (dequeue) { 84 if (!list_empty(&rq->queuelist)) 85 blkdev_dequeue_request(rq); 86 HWGROUP(drive)->rq = NULL; 87 } 88 end_that_request_last(rq, uptodate); 89 ret = 0; 90 } 91 92 return ret; 93} 94 95/** 96 * ide_end_request - complete an IDE I/O 97 * @drive: IDE device for the I/O 98 * @uptodate: 99 * @nr_sectors: number of sectors completed 100 * 101 * This is our end_request wrapper function. We complete the I/O 102 * update random number input and dequeue the request, which if 103 * it was tagged may be out of order. 104 */ 105 106int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 107{ 108 unsigned int nr_bytes = nr_sectors << 9; 109 struct request *rq; 110 unsigned long flags; 111 int ret = 1; 112 113 /* 114 * room for locking improvements here, the calls below don't 115 * need the queue lock held at all 116 */ 117 spin_lock_irqsave(&ide_lock, flags); 118 rq = HWGROUP(drive)->rq; 119 120 if (!nr_bytes) { 121 if (blk_pc_request(rq)) 122 nr_bytes = rq->data_len; 123 else 124 nr_bytes = rq->hard_cur_sectors << 9; 125 } 126 127 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 128 129 spin_unlock_irqrestore(&ide_lock, flags); 130 return ret; 131} 132EXPORT_SYMBOL(ide_end_request); 133 134/* 135 * Power Management state machine. This one is rather trivial for now, 136 * we should probably add more, like switching back to PIO on suspend 137 * to help some BIOSes, re-do the door locking on resume, etc... 138 */ 139 140enum { 141 ide_pm_flush_cache = ide_pm_state_start_suspend, 142 idedisk_pm_standby, 143 144 idedisk_pm_restore_pio = ide_pm_state_start_resume, 145 idedisk_pm_idle, 146 ide_pm_restore_dma, 147}; 148 149static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 150{ 151 struct request_pm_state *pm = rq->data; 152 153 if (drive->media != ide_disk) 154 return; 155 156 switch (pm->pm_step) { 157 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 158 if (pm->pm_state == PM_EVENT_FREEZE) 159 pm->pm_step = ide_pm_state_completed; 160 else 161 pm->pm_step = idedisk_pm_standby; 162 break; 163 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */ 164 pm->pm_step = ide_pm_state_completed; 165 break; 166 case idedisk_pm_restore_pio: /* Resume step 1 complete */ 167 pm->pm_step = idedisk_pm_idle; 168 break; 169 case idedisk_pm_idle: /* Resume step 2 (idle) complete */ 170 pm->pm_step = ide_pm_restore_dma; 171 break; 172 } 173} 174 175static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 176{ 177 struct request_pm_state *pm = rq->data; 178 ide_task_t *args = rq->special; 179 180 memset(args, 0, sizeof(*args)); 181 182 switch (pm->pm_step) { 183 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 184 if (drive->media != ide_disk) 185 break; 186 /* Not supported? Switch to next step now. */ 187 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) { 188 ide_complete_power_step(drive, rq, 0, 0); 189 return ide_stopped; 190 } 191 if (ide_id_has_flush_cache_ext(drive->id)) 192 args->tf.command = WIN_FLUSH_CACHE_EXT; 193 else 194 args->tf.command = WIN_FLUSH_CACHE; 195 goto out_do_tf; 196 197 case idedisk_pm_standby: /* Suspend step 2 (standby) */ 198 args->tf.command = WIN_STANDBYNOW1; 199 goto out_do_tf; 200 201 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ 202 ide_set_max_pio(drive); 203 /* 204 * skip idedisk_pm_idle for ATAPI devices 205 */ 206 if (drive->media != ide_disk) 207 pm->pm_step = ide_pm_restore_dma; 208 else 209 ide_complete_power_step(drive, rq, 0, 0); 210 return ide_stopped; 211 212 case idedisk_pm_idle: /* Resume step 2 (idle) */ 213 args->tf.command = WIN_IDLEIMMEDIATE; 214 goto out_do_tf; 215 216 case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ 217 /* 218 * Right now, all we do is call ide_set_dma(drive), 219 * we could be smarter and check for current xfer_speed 220 * in struct drive etc... 221 */ 222 if (drive->hwif->dma_host_set == NULL) 223 break; 224 /* 225 * TODO: respect ->using_dma setting 226 */ 227 ide_set_dma(drive); 228 break; 229 } 230 pm->pm_step = ide_pm_state_completed; 231 return ide_stopped; 232 233out_do_tf: 234 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 235 args->data_phase = TASKFILE_NO_DATA; 236 return do_rw_taskfile(drive, args); 237} 238 239/** 240 * ide_end_dequeued_request - complete an IDE I/O 241 * @drive: IDE device for the I/O 242 * @uptodate: 243 * @nr_sectors: number of sectors completed 244 * 245 * Complete an I/O that is no longer on the request queue. This 246 * typically occurs when we pull the request and issue a REQUEST_SENSE. 247 * We must still finish the old request but we must not tamper with the 248 * queue in the meantime. 249 * 250 * NOTE: This path does not handle barrier, but barrier is not supported 251 * on ide-cd anyway. 252 */ 253 254int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 255 int uptodate, int nr_sectors) 256{ 257 unsigned long flags; 258 int ret; 259 260 spin_lock_irqsave(&ide_lock, flags); 261 BUG_ON(!blk_rq_started(rq)); 262 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); 263 spin_unlock_irqrestore(&ide_lock, flags); 264 265 return ret; 266} 267EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 268 269 270/** 271 * ide_complete_pm_request - end the current Power Management request 272 * @drive: target drive 273 * @rq: request 274 * 275 * This function cleans up the current PM request and stops the queue 276 * if necessary. 277 */ 278static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 279{ 280 unsigned long flags; 281 282#ifdef DEBUG_PM 283 printk("%s: completing PM request, %s\n", drive->name, 284 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 285#endif 286 spin_lock_irqsave(&ide_lock, flags); 287 if (blk_pm_suspend_request(rq)) { 288 blk_stop_queue(drive->queue); 289 } else { 290 drive->blocked = 0; 291 blk_start_queue(drive->queue); 292 } 293 blkdev_dequeue_request(rq); 294 HWGROUP(drive)->rq = NULL; 295 end_that_request_last(rq, 1); 296 spin_unlock_irqrestore(&ide_lock, flags); 297} 298 299void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 300{ 301 ide_hwif_t *hwif = drive->hwif; 302 struct ide_taskfile *tf = &task->tf; 303 304 if (task->tf_flags & IDE_TFLAG_IN_DATA) { 305 u16 data = hwif->INW(IDE_DATA_REG); 306 307 tf->data = data & 0xff; 308 tf->hob_data = (data >> 8) & 0xff; 309 } 310 311 /* be sure we're looking at the low order bits */ 312 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); 313 314 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 315 tf->nsect = hwif->INB(IDE_NSECTOR_REG); 316 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 317 tf->lbal = hwif->INB(IDE_SECTOR_REG); 318 if (task->tf_flags & IDE_TFLAG_IN_LBAM) 319 tf->lbam = hwif->INB(IDE_LCYL_REG); 320 if (task->tf_flags & IDE_TFLAG_IN_LBAH) 321 tf->lbah = hwif->INB(IDE_HCYL_REG); 322 if (task->tf_flags & IDE_TFLAG_IN_DEVICE) 323 tf->device = hwif->INB(IDE_SELECT_REG); 324 325 if (task->tf_flags & IDE_TFLAG_LBA48) { 326 hwif->OUTB(drive->ctl | 0x80, IDE_CONTROL_REG); 327 328 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) 329 tf->hob_feature = hwif->INB(IDE_FEATURE_REG); 330 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) 331 tf->hob_nsect = hwif->INB(IDE_NSECTOR_REG); 332 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) 333 tf->hob_lbal = hwif->INB(IDE_SECTOR_REG); 334 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) 335 tf->hob_lbam = hwif->INB(IDE_LCYL_REG); 336 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) 337 tf->hob_lbah = hwif->INB(IDE_HCYL_REG); 338 } 339} 340 341/** 342 * ide_end_drive_cmd - end an explicit drive command 343 * @drive: command 344 * @stat: status bits 345 * @err: error bits 346 * 347 * Clean up after success/failure of an explicit drive command. 348 * These get thrown onto the queue so they are synchronized with 349 * real I/O operations on the drive. 350 * 351 * In LBA48 mode we have to read the register set twice to get 352 * all the extra information out. 353 */ 354 355void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 356{ 357 ide_hwif_t *hwif = HWIF(drive); 358 unsigned long flags; 359 struct request *rq; 360 361 spin_lock_irqsave(&ide_lock, flags); 362 rq = HWGROUP(drive)->rq; 363 spin_unlock_irqrestore(&ide_lock, flags); 364 365 if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 366 u8 *args = (u8 *) rq->buffer; 367 if (rq->errors == 0) 368 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 369 370 if (args) { 371 args[0] = stat; 372 args[1] = err; 373 /* be sure we're looking at the low order bits */ 374 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); 375 args[2] = hwif->INB(IDE_NSECTOR_REG); 376 } 377 } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 378 ide_task_t *args = (ide_task_t *) rq->special; 379 if (rq->errors == 0) 380 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 381 382 if (args) { 383 struct ide_taskfile *tf = &args->tf; 384 385 tf->error = err; 386 tf->status = stat; 387 388 ide_tf_read(drive, args); 389 } 390 } else if (blk_pm_request(rq)) { 391 struct request_pm_state *pm = rq->data; 392#ifdef DEBUG_PM 393 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 394 drive->name, rq->pm->pm_step, stat, err); 395#endif 396 ide_complete_power_step(drive, rq, stat, err); 397 if (pm->pm_step == ide_pm_state_completed) 398 ide_complete_pm_request(drive, rq); 399 return; 400 } 401 402 spin_lock_irqsave(&ide_lock, flags); 403 blkdev_dequeue_request(rq); 404 HWGROUP(drive)->rq = NULL; 405 rq->errors = err; 406 end_that_request_last(rq, !rq->errors); 407 spin_unlock_irqrestore(&ide_lock, flags); 408} 409 410EXPORT_SYMBOL(ide_end_drive_cmd); 411 412/** 413 * try_to_flush_leftover_data - flush junk 414 * @drive: drive to flush 415 * 416 * try_to_flush_leftover_data() is invoked in response to a drive 417 * unexpectedly having its DRQ_STAT bit set. As an alternative to 418 * resetting the drive, this routine tries to clear the condition 419 * by read a sector's worth of data from the drive. Of course, 420 * this may not help if the drive is *waiting* for data from *us*. 421 */ 422static void try_to_flush_leftover_data (ide_drive_t *drive) 423{ 424 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; 425 426 if (drive->media != ide_disk) 427 return; 428 while (i > 0) { 429 u32 buffer[16]; 430 u32 wcount = (i > 16) ? 16 : i; 431 432 i -= wcount; 433 HWIF(drive)->ata_input_data(drive, buffer, wcount); 434 } 435} 436 437static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 438{ 439 if (rq->rq_disk) { 440 ide_driver_t *drv; 441 442 drv = *(ide_driver_t **)rq->rq_disk->private_data; 443 drv->end_request(drive, 0, 0); 444 } else 445 ide_end_request(drive, 0, 0); 446} 447 448static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 449{ 450 ide_hwif_t *hwif = drive->hwif; 451 452 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 453 /* other bits are useless when BUSY */ 454 rq->errors |= ERROR_RESET; 455 } else if (stat & ERR_STAT) { 456 /* err has different meaning on cdrom and tape */ 457 if (err == ABRT_ERR) { 458 if (drive->select.b.lba && 459 /* some newer drives don't support WIN_SPECIFY */ 460 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY) 461 return ide_stopped; 462 } else if ((err & BAD_CRC) == BAD_CRC) { 463 /* UDMA crc error, just retry the operation */ 464 drive->crc_count++; 465 } else if (err & (BBD_ERR | ECC_ERR)) { 466 /* retries won't help these */ 467 rq->errors = ERROR_MAX; 468 } else if (err & TRK0_ERR) { 469 /* help it find track zero */ 470 rq->errors |= ERROR_RECAL; 471 } 472 } 473 474 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && 475 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) 476 try_to_flush_leftover_data(drive); 477 478 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 479 ide_kill_rq(drive, rq); 480 return ide_stopped; 481 } 482 483 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 484 rq->errors |= ERROR_RESET; 485 486 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 487 ++rq->errors; 488 return ide_do_reset(drive); 489 } 490 491 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 492 drive->special.b.recalibrate = 1; 493 494 ++rq->errors; 495 496 return ide_stopped; 497} 498 499static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 500{ 501 ide_hwif_t *hwif = drive->hwif; 502 503 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 504 /* other bits are useless when BUSY */ 505 rq->errors |= ERROR_RESET; 506 } else { 507 /* add decoding error stuff */ 508 } 509 510 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 511 /* force an abort */ 512 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); 513 514 if (rq->errors >= ERROR_MAX) { 515 ide_kill_rq(drive, rq); 516 } else { 517 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 518 ++rq->errors; 519 return ide_do_reset(drive); 520 } 521 ++rq->errors; 522 } 523 524 return ide_stopped; 525} 526 527ide_startstop_t 528__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 529{ 530 if (drive->media == ide_disk) 531 return ide_ata_error(drive, rq, stat, err); 532 return ide_atapi_error(drive, rq, stat, err); 533} 534 535EXPORT_SYMBOL_GPL(__ide_error); 536 537/** 538 * ide_error - handle an error on the IDE 539 * @drive: drive the error occurred on 540 * @msg: message to report 541 * @stat: status bits 542 * 543 * ide_error() takes action based on the error returned by the drive. 544 * For normal I/O that may well include retries. We deal with 545 * both new-style (taskfile) and old style command handling here. 546 * In the case of taskfile command handling there is work left to 547 * do 548 */ 549 550ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 551{ 552 struct request *rq; 553 u8 err; 554 555 err = ide_dump_status(drive, msg, stat); 556 557 if ((rq = HWGROUP(drive)->rq) == NULL) 558 return ide_stopped; 559 560 /* retry only "normal" I/O: */ 561 if (!blk_fs_request(rq)) { 562 rq->errors = 1; 563 ide_end_drive_cmd(drive, stat, err); 564 return ide_stopped; 565 } 566 567 if (rq->rq_disk) { 568 ide_driver_t *drv; 569 570 drv = *(ide_driver_t **)rq->rq_disk->private_data; 571 return drv->error(drive, rq, stat, err); 572 } else 573 return __ide_error(drive, rq, stat, err); 574} 575 576EXPORT_SYMBOL_GPL(ide_error); 577 578ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) 579{ 580 if (drive->media != ide_disk) 581 rq->errors |= ERROR_RESET; 582 583 ide_kill_rq(drive, rq); 584 585 return ide_stopped; 586} 587 588EXPORT_SYMBOL_GPL(__ide_abort); 589 590/** 591 * ide_abort - abort pending IDE operations 592 * @drive: drive the error occurred on 593 * @msg: message to report 594 * 595 * ide_abort kills and cleans up when we are about to do a 596 * host initiated reset on active commands. Longer term we 597 * want handlers to have sensible abort handling themselves 598 * 599 * This differs fundamentally from ide_error because in 600 * this case the command is doing just fine when we 601 * blow it away. 602 */ 603 604ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) 605{ 606 struct request *rq; 607 608 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) 609 return ide_stopped; 610 611 /* retry only "normal" I/O: */ 612 if (!blk_fs_request(rq)) { 613 rq->errors = 1; 614 ide_end_drive_cmd(drive, BUSY_STAT, 0); 615 return ide_stopped; 616 } 617 618 if (rq->rq_disk) { 619 ide_driver_t *drv; 620 621 drv = *(ide_driver_t **)rq->rq_disk->private_data; 622 return drv->abort(drive, rq); 623 } else 624 return __ide_abort(drive, rq); 625} 626 627/** 628 * drive_cmd_intr - drive command completion interrupt 629 * @drive: drive the completion interrupt occurred on 630 * 631 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. 632 * We do any necessary data reading and then wait for the drive to 633 * go non busy. At that point we may read the error data and complete 634 * the request 635 */ 636 637static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) 638{ 639 struct request *rq = HWGROUP(drive)->rq; 640 ide_hwif_t *hwif = HWIF(drive); 641 u8 *args = (u8 *) rq->buffer; 642 u8 stat = hwif->INB(IDE_STATUS_REG); 643 644 local_irq_enable_in_hardirq(); 645 if (rq->cmd_type == REQ_TYPE_ATA_CMD && 646 (stat & DRQ_STAT) && args && args[3]) { 647 u8 io_32bit = drive->io_32bit; 648 drive->io_32bit = 0; 649 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS); 650 drive->io_32bit = io_32bit; 651 stat = wait_drive_not_busy(drive); 652 } 653 654 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 655 return ide_error(drive, "drive_cmd", stat); 656 /* calls ide_end_drive_cmd */ 657 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG)); 658 return ide_stopped; 659} 660 661static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 662{ 663 tf->nsect = drive->sect; 664 tf->lbal = drive->sect; 665 tf->lbam = drive->cyl; 666 tf->lbah = drive->cyl >> 8; 667 tf->device = ((drive->head - 1) | drive->select.all) & ~ATA_LBA; 668 tf->command = WIN_SPECIFY; 669} 670 671static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 672{ 673 tf->nsect = drive->sect; 674 tf->command = WIN_RESTORE; 675} 676 677static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 678{ 679 tf->nsect = drive->mult_req; 680 tf->command = WIN_SETMULT; 681} 682 683static ide_startstop_t ide_disk_special(ide_drive_t *drive) 684{ 685 special_t *s = &drive->special; 686 ide_task_t args; 687 688 memset(&args, 0, sizeof(ide_task_t)); 689 args.data_phase = TASKFILE_NO_DATA; 690 691 if (s->b.set_geometry) { 692 s->b.set_geometry = 0; 693 ide_tf_set_specify_cmd(drive, &args.tf); 694 } else if (s->b.recalibrate) { 695 s->b.recalibrate = 0; 696 ide_tf_set_restore_cmd(drive, &args.tf); 697 } else if (s->b.set_multmode) { 698 s->b.set_multmode = 0; 699 if (drive->mult_req > drive->id->max_multsect) 700 drive->mult_req = drive->id->max_multsect; 701 ide_tf_set_setmult_cmd(drive, &args.tf); 702 } else if (s->all) { 703 int special = s->all; 704 s->all = 0; 705 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 706 return ide_stopped; 707 } 708 709 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 710 IDE_TFLAG_CUSTOM_HANDLER; 711 712 do_rw_taskfile(drive, &args); 713 714 return ide_started; 715} 716 717/* 718 * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away 719 */ 720static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) 721{ 722 switch (req_pio) { 723 case 202: 724 case 201: 725 case 200: 726 case 102: 727 case 101: 728 case 100: 729 return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; 730 case 9: 731 case 8: 732 return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; 733 case 7: 734 case 6: 735 return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; 736 default: 737 return 0; 738 } 739} 740 741/** 742 * do_special - issue some special commands 743 * @drive: drive the command is for 744 * 745 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT 746 * commands to a drive. It used to do much more, but has been scaled 747 * back. 748 */ 749 750static ide_startstop_t do_special (ide_drive_t *drive) 751{ 752 special_t *s = &drive->special; 753 754#ifdef DEBUG 755 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 756#endif 757 if (s->b.set_tune) { 758 ide_hwif_t *hwif = drive->hwif; 759 u8 req_pio = drive->tune_req; 760 761 s->b.set_tune = 0; 762 763 if (set_pio_mode_abuse(drive->hwif, req_pio)) { 764 765 if (hwif->set_pio_mode == NULL) 766 return ide_stopped; 767 768 /* 769 * take ide_lock for drive->[no_]unmask/[no_]io_32bit 770 */ 771 if (req_pio == 8 || req_pio == 9) { 772 unsigned long flags; 773 774 spin_lock_irqsave(&ide_lock, flags); 775 hwif->set_pio_mode(drive, req_pio); 776 spin_unlock_irqrestore(&ide_lock, flags); 777 } else 778 hwif->set_pio_mode(drive, req_pio); 779 } else { 780 int keep_dma = drive->using_dma; 781 782 ide_set_pio(drive, req_pio); 783 784 if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { 785 if (keep_dma) 786 ide_dma_on(drive); 787 } 788 } 789 790 return ide_stopped; 791 } else { 792 if (drive->media == ide_disk) 793 return ide_disk_special(drive); 794 795 s->all = 0; 796 drive->mult_req = 0; 797 return ide_stopped; 798 } 799} 800 801void ide_map_sg(ide_drive_t *drive, struct request *rq) 802{ 803 ide_hwif_t *hwif = drive->hwif; 804 struct scatterlist *sg = hwif->sg_table; 805 806 if (hwif->sg_mapped) /* needed by ide-scsi */ 807 return; 808 809 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 810 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 811 } else { 812 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 813 hwif->sg_nents = 1; 814 } 815} 816 817EXPORT_SYMBOL_GPL(ide_map_sg); 818 819void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 820{ 821 ide_hwif_t *hwif = drive->hwif; 822 823 hwif->nsect = hwif->nleft = rq->nr_sectors; 824 hwif->cursg_ofs = 0; 825 hwif->cursg = NULL; 826} 827 828EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 829 830/** 831 * execute_drive_command - issue special drive command 832 * @drive: the drive to issue the command on 833 * @rq: the request structure holding the command 834 * 835 * execute_drive_cmd() issues a special drive command, usually 836 * initiated by ioctl() from the external hdparm program. The 837 * command can be a drive command, drive task or taskfile 838 * operation. Weirdly you can call it with NULL to wait for 839 * all commands to finish. Don't do this as that is due to change 840 */ 841 842static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 843 struct request *rq) 844{ 845 ide_hwif_t *hwif = HWIF(drive); 846 u8 *args = rq->buffer; 847 ide_task_t ltask; 848 struct ide_taskfile *tf = <ask.tf; 849 850 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 851 ide_task_t *task = rq->special; 852 853 if (task == NULL) 854 goto done; 855 856 hwif->data_phase = task->data_phase; 857 858 switch (hwif->data_phase) { 859 case TASKFILE_MULTI_OUT: 860 case TASKFILE_OUT: 861 case TASKFILE_MULTI_IN: 862 case TASKFILE_IN: 863 ide_init_sg_cmd(drive, rq); 864 ide_map_sg(drive, rq); 865 default: 866 break; 867 } 868 869 return do_rw_taskfile(drive, task); 870 } 871 872 if (args == NULL) 873 goto done; 874 875 memset(<ask, 0, sizeof(ltask)); 876 if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 877#ifdef DEBUG 878 printk("%s: DRIVE_CMD\n", drive->name); 879#endif 880 tf->feature = args[2]; 881 if (args[0] == WIN_SMART) { 882 tf->nsect = args[3]; 883 tf->lbal = args[1]; 884 tf->lbam = 0x4f; 885 tf->lbah = 0xc2; 886 ltask.tf_flags = IDE_TFLAG_OUT_TF; 887 } else { 888 tf->nsect = args[1]; 889 ltask.tf_flags = IDE_TFLAG_OUT_FEATURE | 890 IDE_TFLAG_OUT_NSECT; 891 } 892 } 893 tf->command = args[0]; 894 ide_tf_load(drive, <ask); 895 ide_execute_command(drive, args[0], &drive_cmd_intr, WAIT_WORSTCASE, NULL); 896 return ide_started; 897 898done: 899 /* 900 * NULL is actually a valid way of waiting for 901 * all current requests to be flushed from the queue. 902 */ 903#ifdef DEBUG 904 printk("%s: DRIVE_CMD (null)\n", drive->name); 905#endif 906 ide_end_drive_cmd(drive, 907 hwif->INB(IDE_STATUS_REG), 908 hwif->INB(IDE_ERROR_REG)); 909 return ide_stopped; 910} 911 912static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 913{ 914 struct request_pm_state *pm = rq->data; 915 916 if (blk_pm_suspend_request(rq) && 917 pm->pm_step == ide_pm_state_start_suspend) 918 /* Mark drive blocked when starting the suspend sequence. */ 919 drive->blocked = 1; 920 else if (blk_pm_resume_request(rq) && 921 pm->pm_step == ide_pm_state_start_resume) { 922 /* 923 * The first thing we do on wakeup is to wait for BSY bit to 924 * go away (with a looong timeout) as a drive on this hwif may 925 * just be POSTing itself. 926 * We do that before even selecting as the "other" device on 927 * the bus may be broken enough to walk on our toes at this 928 * point. 929 */ 930 int rc; 931#ifdef DEBUG_PM 932 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 933#endif 934 rc = ide_wait_not_busy(HWIF(drive), 35000); 935 if (rc) 936 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 937 SELECT_DRIVE(drive); 938 ide_set_irq(drive, 1); 939 rc = ide_wait_not_busy(HWIF(drive), 100000); 940 if (rc) 941 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 942 } 943} 944 945/** 946 * start_request - start of I/O and command issuing for IDE 947 * 948 * start_request() initiates handling of a new I/O request. It 949 * accepts commands and I/O (read/write) requests. It also does 950 * the final remapping for weird stuff like EZDrive. Once 951 * device mapper can work sector level the EZDrive stuff can go away 952 * 953 * FIXME: this function needs a rename 954 */ 955 956static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 957{ 958 ide_startstop_t startstop; 959 sector_t block; 960 961 BUG_ON(!blk_rq_started(rq)); 962 963#ifdef DEBUG 964 printk("%s: start_request: current=0x%08lx\n", 965 HWIF(drive)->name, (unsigned long) rq); 966#endif 967 968 /* bail early if we've exceeded max_failures */ 969 if (drive->max_failures && (drive->failures > drive->max_failures)) { 970 rq->cmd_flags |= REQ_FAILED; 971 goto kill_rq; 972 } 973 974 block = rq->sector; 975 if (blk_fs_request(rq) && 976 (drive->media == ide_disk || drive->media == ide_floppy)) { 977 block += drive->sect0; 978 } 979 /* Yecch - this will shift the entire interval, 980 possibly killing some innocent following sector */ 981 if (block == 0 && drive->remap_0_to_1 == 1) 982 block = 1; /* redirect MBR access to EZ-Drive partn table */ 983 984 if (blk_pm_request(rq)) 985 ide_check_pm_state(drive, rq); 986 987 SELECT_DRIVE(drive); 988 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { 989 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 990 return startstop; 991 } 992 if (!drive->special.all) { 993 ide_driver_t *drv; 994 995 /* 996 * We reset the drive so we need to issue a SETFEATURES. 997 * Do it _after_ do_special() restored device parameters. 998 */ 999 if (drive->current_speed == 0xff) 1000 ide_config_drive_speed(drive, drive->desired_speed); 1001 1002 if (rq->cmd_type == REQ_TYPE_ATA_CMD || 1003 rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 1004 return execute_drive_cmd(drive, rq); 1005 else if (blk_pm_request(rq)) { 1006 struct request_pm_state *pm = rq->data; 1007#ifdef DEBUG_PM 1008 printk("%s: start_power_step(step: %d)\n", 1009 drive->name, rq->pm->pm_step); 1010#endif 1011 startstop = ide_start_power_step(drive, rq); 1012 if (startstop == ide_stopped && 1013 pm->pm_step == ide_pm_state_completed) 1014 ide_complete_pm_request(drive, rq); 1015 return startstop; 1016 } 1017 1018 drv = *(ide_driver_t **)rq->rq_disk->private_data; 1019 return drv->do_request(drive, rq, block); 1020 } 1021 return do_special(drive); 1022kill_rq: 1023 ide_kill_rq(drive, rq); 1024 return ide_stopped; 1025} 1026 1027/** 1028 * ide_stall_queue - pause an IDE device 1029 * @drive: drive to stall 1030 * @timeout: time to stall for (jiffies) 1031 * 1032 * ide_stall_queue() can be used by a drive to give excess bandwidth back 1033 * to the hwgroup by sleeping for timeout jiffies. 1034 */ 1035 1036void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 1037{ 1038 if (timeout > WAIT_WORSTCASE) 1039 timeout = WAIT_WORSTCASE; 1040 drive->sleep = timeout + jiffies; 1041 drive->sleeping = 1; 1042} 1043 1044EXPORT_SYMBOL(ide_stall_queue); 1045 1046#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 1047 1048/** 1049 * choose_drive - select a drive to service 1050 * @hwgroup: hardware group to select on 1051 * 1052 * choose_drive() selects the next drive which will be serviced. 1053 * This is necessary because the IDE layer can't issue commands 1054 * to both drives on the same cable, unlike SCSI. 1055 */ 1056 1057static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 1058{ 1059 ide_drive_t *drive, *best; 1060 1061repeat: 1062 best = NULL; 1063 drive = hwgroup->drive; 1064 1065 /* 1066 * drive is doing pre-flush, ordered write, post-flush sequence. even 1067 * though that is 3 requests, it must be seen as a single transaction. 1068 * we must not preempt this drive until that is complete 1069 */ 1070 if (blk_queue_flushing(drive->queue)) { 1071 /* 1072 * small race where queue could get replugged during 1073 * the 3-request flush cycle, just yank the plug since 1074 * we want it to finish asap 1075 */ 1076 blk_remove_plug(drive->queue); 1077 return drive; 1078 } 1079 1080 do { 1081 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep)) 1082 && !elv_queue_empty(drive->queue)) { 1083 if (!best 1084 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep))) 1085 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best)))) 1086 { 1087 if (!blk_queue_plugged(drive->queue)) 1088 best = drive; 1089 } 1090 } 1091 } while ((drive = drive->next) != hwgroup->drive); 1092 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 1093 long t = (signed long)(WAKEUP(best) - jiffies); 1094 if (t >= WAIT_MIN_SLEEP) { 1095 /* 1096 * We *may* have some time to spare, but first let's see if 1097 * someone can potentially benefit from our nice mood today.. 1098 */ 1099 drive = best->next; 1100 do { 1101 if (!drive->sleeping 1102 && time_before(jiffies - best->service_time, WAKEUP(drive)) 1103 && time_before(WAKEUP(drive), jiffies + t)) 1104 { 1105 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 1106 goto repeat; 1107 } 1108 } while ((drive = drive->next) != best); 1109 } 1110 } 1111 return best; 1112} 1113 1114/* 1115 * Issue a new request to a drive from hwgroup 1116 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 1117 * 1118 * A hwgroup is a serialized group of IDE interfaces. Usually there is 1119 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 1120 * may have both interfaces in a single hwgroup to "serialize" access. 1121 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 1122 * together into one hwgroup for serialized access. 1123 * 1124 * Note also that several hwgroups can end up sharing a single IRQ, 1125 * possibly along with many other devices. This is especially common in 1126 * PCI-based systems with off-board IDE controller cards. 1127 * 1128 * The IDE driver uses the single global ide_lock spinlock to protect 1129 * access to the request queues, and to protect the hwgroup->busy flag. 1130 * 1131 * The first thread into the driver for a particular hwgroup sets the 1132 * hwgroup->busy flag to indicate that this hwgroup is now active, 1133 * and then initiates processing of the top request from the request queue. 1134 * 1135 * Other threads attempting entry notice the busy setting, and will simply 1136 * queue their new requests and exit immediately. Note that hwgroup->busy 1137 * remains set even when the driver is merely awaiting the next interrupt. 1138 * Thus, the meaning is "this hwgroup is busy processing a request". 1139 * 1140 * When processing of a request completes, the completing thread or IRQ-handler 1141 * will start the next request from the queue. If no more work remains, 1142 * the driver will clear the hwgroup->busy flag and exit. 1143 * 1144 * The ide_lock (spinlock) is used to protect all access to the 1145 * hwgroup->busy flag, but is otherwise not needed for most processing in 1146 * the driver. This makes the driver much more friendlier to shared IRQs 1147 * than previous designs, while remaining 100% (?) SMP safe and capable. 1148 */ 1149static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 1150{ 1151 ide_drive_t *drive; 1152 ide_hwif_t *hwif; 1153 struct request *rq; 1154 ide_startstop_t startstop; 1155 int loops = 0; 1156 1157 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1158 ide_get_lock(ide_intr, hwgroup); 1159 1160 /* caller must own ide_lock */ 1161 BUG_ON(!irqs_disabled()); 1162 1163 while (!hwgroup->busy) { 1164 hwgroup->busy = 1; 1165 drive = choose_drive(hwgroup); 1166 if (drive == NULL) { 1167 int sleeping = 0; 1168 unsigned long sleep = 0; /* shut up, gcc */ 1169 hwgroup->rq = NULL; 1170 drive = hwgroup->drive; 1171 do { 1172 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) { 1173 sleeping = 1; 1174 sleep = drive->sleep; 1175 } 1176 } while ((drive = drive->next) != hwgroup->drive); 1177 if (sleeping) { 1178 /* 1179 * Take a short snooze, and then wake up this hwgroup again. 1180 * This gives other hwgroups on the same a chance to 1181 * play fairly with us, just in case there are big differences 1182 * in relative throughputs.. don't want to hog the cpu too much. 1183 */ 1184 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1185 sleep = jiffies + WAIT_MIN_SLEEP; 1186#if 1 1187 if (timer_pending(&hwgroup->timer)) 1188 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1189#endif 1190 /* so that ide_timer_expiry knows what to do */ 1191 hwgroup->sleeping = 1; 1192 hwgroup->req_gen_timer = hwgroup->req_gen; 1193 mod_timer(&hwgroup->timer, sleep); 1194 /* we purposely leave hwgroup->busy==1 1195 * while sleeping */ 1196 } else { 1197 /* Ugly, but how can we sleep for the lock 1198 * otherwise? perhaps from tq_disk? 1199 */ 1200 1201 /* for atari only */ 1202 ide_release_lock(); 1203 hwgroup->busy = 0; 1204 } 1205 1206 /* no more work for this hwgroup (for now) */ 1207 return; 1208 } 1209 again: 1210 hwif = HWIF(drive); 1211 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 1212 /* 1213 * set nIEN for previous hwif, drives in the 1214 * quirk_list may not like intr setups/cleanups 1215 */ 1216 if (drive->quirk_list != 1) 1217 ide_set_irq(drive, 0); 1218 } 1219 hwgroup->hwif = hwif; 1220 hwgroup->drive = drive; 1221 drive->sleeping = 0; 1222 drive->service_start = jiffies; 1223 1224 if (blk_queue_plugged(drive->queue)) { 1225 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1226 break; 1227 } 1228 1229 /* 1230 * we know that the queue isn't empty, but this can happen 1231 * if the q->prep_rq_fn() decides to kill a request 1232 */ 1233 rq = elv_next_request(drive->queue); 1234 if (!rq) { 1235 hwgroup->busy = 0; 1236 break; 1237 } 1238 1239 /* 1240 * Sanity: don't accept a request that isn't a PM request 1241 * if we are currently power managed. This is very important as 1242 * blk_stop_queue() doesn't prevent the elv_next_request() 1243 * above to return us whatever is in the queue. Since we call 1244 * ide_do_request() ourselves, we end up taking requests while 1245 * the queue is blocked... 1246 * 1247 * We let requests forced at head of queue with ide-preempt 1248 * though. I hope that doesn't happen too much, hopefully not 1249 * unless the subdriver triggers such a thing in its own PM 1250 * state machine. 1251 * 1252 * We count how many times we loop here to make sure we service 1253 * all drives in the hwgroup without looping for ever 1254 */ 1255 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { 1256 drive = drive->next ? drive->next : hwgroup->drive; 1257 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1258 goto again; 1259 /* We clear busy, there should be no pending ATA command at this point. */ 1260 hwgroup->busy = 0; 1261 break; 1262 } 1263 1264 hwgroup->rq = rq; 1265 1266 /* 1267 * Some systems have trouble with IDE IRQs arriving while 1268 * the driver is still setting things up. So, here we disable 1269 * the IRQ used by this interface while the request is being started. 1270 * This may look bad at first, but pretty much the same thing 1271 * happens anyway when any interrupt comes in, IDE or otherwise 1272 * -- the kernel masks the IRQ while it is being handled. 1273 */ 1274 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1275 disable_irq_nosync(hwif->irq); 1276 spin_unlock(&ide_lock); 1277 local_irq_enable_in_hardirq(); 1278 /* allow other IRQs while we start this request */ 1279 startstop = start_request(drive, rq); 1280 spin_lock_irq(&ide_lock); 1281 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1282 enable_irq(hwif->irq); 1283 if (startstop == ide_stopped) 1284 hwgroup->busy = 0; 1285 } 1286} 1287 1288/* 1289 * Passes the stuff to ide_do_request 1290 */ 1291void do_ide_request(struct request_queue *q) 1292{ 1293 ide_drive_t *drive = q->queuedata; 1294 1295 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1296} 1297 1298/* 1299 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1300 * retry the current request in pio mode instead of risking tossing it 1301 * all away 1302 */ 1303static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1304{ 1305 ide_hwif_t *hwif = HWIF(drive); 1306 struct request *rq; 1307 ide_startstop_t ret = ide_stopped; 1308 1309 /* 1310 * end current dma transaction 1311 */ 1312 1313 if (error < 0) { 1314 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1315 (void)HWIF(drive)->ide_dma_end(drive); 1316 ret = ide_error(drive, "dma timeout error", 1317 hwif->INB(IDE_STATUS_REG)); 1318 } else { 1319 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1320 hwif->dma_timeout(drive); 1321 } 1322 1323 /* 1324 * disable dma for now, but remember that we did so because of 1325 * a timeout -- we'll reenable after we finish this next request 1326 * (or rather the first chunk of it) in pio. 1327 */ 1328 drive->retry_pio++; 1329 drive->state = DMA_PIO_RETRY; 1330 ide_dma_off_quietly(drive); 1331 1332 /* 1333 * un-busy drive etc (hwgroup->busy is cleared on return) and 1334 * make sure request is sane 1335 */ 1336 rq = HWGROUP(drive)->rq; 1337 1338 if (!rq) 1339 goto out; 1340 1341 HWGROUP(drive)->rq = NULL; 1342 1343 rq->errors = 0; 1344 1345 if (!rq->bio) 1346 goto out; 1347 1348 rq->sector = rq->bio->bi_sector; 1349 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1350 rq->hard_cur_sectors = rq->current_nr_sectors; 1351 rq->buffer = bio_data(rq->bio); 1352out: 1353 return ret; 1354} 1355 1356/** 1357 * ide_timer_expiry - handle lack of an IDE interrupt 1358 * @data: timer callback magic (hwgroup) 1359 * 1360 * An IDE command has timed out before the expected drive return 1361 * occurred. At this point we attempt to clean up the current 1362 * mess. If the current handler includes an expiry handler then 1363 * we invoke the expiry handler, and providing it is happy the 1364 * work is done. If that fails we apply generic recovery rules 1365 * invoking the handler and checking the drive DMA status. We 1366 * have an excessively incestuous relationship with the DMA 1367 * logic that wants cleaning up. 1368 */ 1369 1370void ide_timer_expiry (unsigned long data) 1371{ 1372 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1373 ide_handler_t *handler; 1374 ide_expiry_t *expiry; 1375 unsigned long flags; 1376 unsigned long wait = -1; 1377 1378 spin_lock_irqsave(&ide_lock, flags); 1379 1380 if (((handler = hwgroup->handler) == NULL) || 1381 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1382 /* 1383 * Either a marginal timeout occurred 1384 * (got the interrupt just as timer expired), 1385 * or we were "sleeping" to give other devices a chance. 1386 * Either way, we don't really want to complain about anything. 1387 */ 1388 if (hwgroup->sleeping) { 1389 hwgroup->sleeping = 0; 1390 hwgroup->busy = 0; 1391 } 1392 } else { 1393 ide_drive_t *drive = hwgroup->drive; 1394 if (!drive) { 1395 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1396 hwgroup->handler = NULL; 1397 } else { 1398 ide_hwif_t *hwif; 1399 ide_startstop_t startstop = ide_stopped; 1400 if (!hwgroup->busy) { 1401 hwgroup->busy = 1; /* paranoia */ 1402 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1403 } 1404 if ((expiry = hwgroup->expiry) != NULL) { 1405 /* continue */ 1406 if ((wait = expiry(drive)) > 0) { 1407 /* reset timer */ 1408 hwgroup->timer.expires = jiffies + wait; 1409 hwgroup->req_gen_timer = hwgroup->req_gen; 1410 add_timer(&hwgroup->timer); 1411 spin_unlock_irqrestore(&ide_lock, flags); 1412 return; 1413 } 1414 } 1415 hwgroup->handler = NULL; 1416 /* 1417 * We need to simulate a real interrupt when invoking 1418 * the handler() function, which means we need to 1419 * globally mask the specific IRQ: 1420 */ 1421 spin_unlock(&ide_lock); 1422 hwif = HWIF(drive); 1423 /* disable_irq_nosync ?? */ 1424 disable_irq(hwif->irq); 1425 /* local CPU only, 1426 * as if we were handling an interrupt */ 1427 local_irq_disable(); 1428 if (hwgroup->polling) { 1429 startstop = handler(drive); 1430 } else if (drive_is_ready(drive)) { 1431 if (drive->waiting_for_dma) 1432 hwgroup->hwif->dma_lost_irq(drive); 1433 (void)ide_ack_intr(hwif); 1434 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1435 startstop = handler(drive); 1436 } else { 1437 if (drive->waiting_for_dma) { 1438 startstop = ide_dma_timeout_retry(drive, wait); 1439 } else 1440 startstop = 1441 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); 1442 } 1443 drive->service_time = jiffies - drive->service_start; 1444 spin_lock_irq(&ide_lock); 1445 enable_irq(hwif->irq); 1446 if (startstop == ide_stopped) 1447 hwgroup->busy = 0; 1448 } 1449 } 1450 ide_do_request(hwgroup, IDE_NO_IRQ); 1451 spin_unlock_irqrestore(&ide_lock, flags); 1452} 1453 1454/** 1455 * unexpected_intr - handle an unexpected IDE interrupt 1456 * @irq: interrupt line 1457 * @hwgroup: hwgroup being processed 1458 * 1459 * There's nothing really useful we can do with an unexpected interrupt, 1460 * other than reading the status register (to clear it), and logging it. 1461 * There should be no way that an irq can happen before we're ready for it, 1462 * so we needn't worry much about losing an "important" interrupt here. 1463 * 1464 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1465 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1466 * looks "good", we just ignore the interrupt completely. 1467 * 1468 * This routine assumes __cli() is in effect when called. 1469 * 1470 * If an unexpected interrupt happens on irq15 while we are handling irq14 1471 * and if the two interfaces are "serialized" (CMD640), then it looks like 1472 * we could screw up by interfering with a new request being set up for 1473 * irq15. 1474 * 1475 * In reality, this is a non-issue. The new command is not sent unless 1476 * the drive is ready to accept one, in which case we know the drive is 1477 * not trying to interrupt us. And ide_set_handler() is always invoked 1478 * before completing the issuance of any new drive command, so we will not 1479 * be accidentally invoked as a result of any valid command completion 1480 * interrupt. 1481 * 1482 * Note that we must walk the entire hwgroup here. We know which hwif 1483 * is doing the current command, but we don't know which hwif burped 1484 * mysteriously. 1485 */ 1486 1487static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1488{ 1489 u8 stat; 1490 ide_hwif_t *hwif = hwgroup->hwif; 1491 1492 /* 1493 * handle the unexpected interrupt 1494 */ 1495 do { 1496 if (hwif->irq == irq) { 1497 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1498 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1499 /* Try to not flood the console with msgs */ 1500 static unsigned long last_msgtime, count; 1501 ++count; 1502 if (time_after(jiffies, last_msgtime + HZ)) { 1503 last_msgtime = jiffies; 1504 printk(KERN_ERR "%s%s: unexpected interrupt, " 1505 "status=0x%02x, count=%ld\n", 1506 hwif->name, 1507 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1508 } 1509 } 1510 } 1511 } while ((hwif = hwif->next) != hwgroup->hwif); 1512} 1513 1514/** 1515 * ide_intr - default IDE interrupt handler 1516 * @irq: interrupt number 1517 * @dev_id: hwif group 1518 * @regs: unused weirdness from the kernel irq layer 1519 * 1520 * This is the default IRQ handler for the IDE layer. You should 1521 * not need to override it. If you do be aware it is subtle in 1522 * places 1523 * 1524 * hwgroup->hwif is the interface in the group currently performing 1525 * a command. hwgroup->drive is the drive and hwgroup->handler is 1526 * the IRQ handler to call. As we issue a command the handlers 1527 * step through multiple states, reassigning the handler to the 1528 * next step in the process. Unlike a smart SCSI controller IDE 1529 * expects the main processor to sequence the various transfer 1530 * stages. We also manage a poll timer to catch up with most 1531 * timeout situations. There are still a few where the handlers 1532 * don't ever decide to give up. 1533 * 1534 * The handler eventually returns ide_stopped to indicate the 1535 * request completed. At this point we issue the next request 1536 * on the hwgroup and the process begins again. 1537 */ 1538 1539irqreturn_t ide_intr (int irq, void *dev_id) 1540{ 1541 unsigned long flags; 1542 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1543 ide_hwif_t *hwif; 1544 ide_drive_t *drive; 1545 ide_handler_t *handler; 1546 ide_startstop_t startstop; 1547 1548 spin_lock_irqsave(&ide_lock, flags); 1549 hwif = hwgroup->hwif; 1550 1551 if (!ide_ack_intr(hwif)) { 1552 spin_unlock_irqrestore(&ide_lock, flags); 1553 return IRQ_NONE; 1554 } 1555 1556 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1557 /* 1558 * Not expecting an interrupt from this drive. 1559 * That means this could be: 1560 * (1) an interrupt from another PCI device 1561 * sharing the same PCI INT# as us. 1562 * or (2) a drive just entered sleep or standby mode, 1563 * and is interrupting to let us know. 1564 * or (3) a spurious interrupt of unknown origin. 1565 * 1566 * For PCI, we cannot tell the difference, 1567 * so in that case we just ignore it and hope it goes away. 1568 * 1569 * FIXME: unexpected_intr should be hwif-> then we can 1570 * remove all the ifdef PCI crap 1571 */ 1572#ifdef CONFIG_BLK_DEV_IDEPCI 1573 if (hwif->pci_dev && !hwif->pci_dev->vendor) 1574#endif /* CONFIG_BLK_DEV_IDEPCI */ 1575 { 1576 /* 1577 * Probably not a shared PCI interrupt, 1578 * so we can safely try to do something about it: 1579 */ 1580 unexpected_intr(irq, hwgroup); 1581#ifdef CONFIG_BLK_DEV_IDEPCI 1582 } else { 1583 /* 1584 * Whack the status register, just in case 1585 * we have a leftover pending IRQ. 1586 */ 1587 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1588#endif /* CONFIG_BLK_DEV_IDEPCI */ 1589 } 1590 spin_unlock_irqrestore(&ide_lock, flags); 1591 return IRQ_NONE; 1592 } 1593 drive = hwgroup->drive; 1594 if (!drive) { 1595 /* 1596 * This should NEVER happen, and there isn't much 1597 * we could do about it here. 1598 * 1599 * [Note - this can occur if the drive is hot unplugged] 1600 */ 1601 spin_unlock_irqrestore(&ide_lock, flags); 1602 return IRQ_HANDLED; 1603 } 1604 if (!drive_is_ready(drive)) { 1605 /* 1606 * This happens regularly when we share a PCI IRQ with 1607 * another device. Unfortunately, it can also happen 1608 * with some buggy drives that trigger the IRQ before 1609 * their status register is up to date. Hopefully we have 1610 * enough advance overhead that the latter isn't a problem. 1611 */ 1612 spin_unlock_irqrestore(&ide_lock, flags); 1613 return IRQ_NONE; 1614 } 1615 if (!hwgroup->busy) { 1616 hwgroup->busy = 1; /* paranoia */ 1617 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1618 } 1619 hwgroup->handler = NULL; 1620 hwgroup->req_gen++; 1621 del_timer(&hwgroup->timer); 1622 spin_unlock(&ide_lock); 1623 1624 /* Some controllers might set DMA INTR no matter DMA or PIO; 1625 * bmdma status might need to be cleared even for 1626 * PIO interrupts to prevent spurious/lost irq. 1627 */ 1628 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) 1629 /* ide_dma_end() needs bmdma status for error checking. 1630 * So, skip clearing bmdma status here and leave it 1631 * to ide_dma_end() if this is dma interrupt. 1632 */ 1633 hwif->ide_dma_clear_irq(drive); 1634 1635 if (drive->unmask) 1636 local_irq_enable_in_hardirq(); 1637 /* service this interrupt, may set handler for next interrupt */ 1638 startstop = handler(drive); 1639 spin_lock_irq(&ide_lock); 1640 1641 /* 1642 * Note that handler() may have set things up for another 1643 * interrupt to occur soon, but it cannot happen until 1644 * we exit from this routine, because it will be the 1645 * same irq as is currently being serviced here, and Linux 1646 * won't allow another of the same (on any CPU) until we return. 1647 */ 1648 drive->service_time = jiffies - drive->service_start; 1649 if (startstop == ide_stopped) { 1650 if (hwgroup->handler == NULL) { /* paranoia */ 1651 hwgroup->busy = 0; 1652 ide_do_request(hwgroup, hwif->irq); 1653 } else { 1654 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1655 "on exit\n", drive->name); 1656 } 1657 } 1658 spin_unlock_irqrestore(&ide_lock, flags); 1659 return IRQ_HANDLED; 1660} 1661 1662/** 1663 * ide_init_drive_cmd - initialize a drive command request 1664 * @rq: request object 1665 * 1666 * Initialize a request before we fill it in and send it down to 1667 * ide_do_drive_cmd. Commands must be set up by this function. Right 1668 * now it doesn't do a lot, but if that changes abusers will have a 1669 * nasty surprise. 1670 */ 1671 1672void ide_init_drive_cmd (struct request *rq) 1673{ 1674 memset(rq, 0, sizeof(*rq)); 1675 rq->cmd_type = REQ_TYPE_ATA_CMD; 1676 rq->ref_count = 1; 1677} 1678 1679EXPORT_SYMBOL(ide_init_drive_cmd); 1680 1681/** 1682 * ide_do_drive_cmd - issue IDE special command 1683 * @drive: device to issue command 1684 * @rq: request to issue 1685 * @action: action for processing 1686 * 1687 * This function issues a special IDE device request 1688 * onto the request queue. 1689 * 1690 * If action is ide_wait, then the rq is queued at the end of the 1691 * request queue, and the function sleeps until it has been processed. 1692 * This is for use when invoked from an ioctl handler. 1693 * 1694 * If action is ide_preempt, then the rq is queued at the head of 1695 * the request queue, displacing the currently-being-processed 1696 * request and this function returns immediately without waiting 1697 * for the new rq to be completed. This is VERY DANGEROUS, and is 1698 * intended for careful use by the ATAPI tape/cdrom driver code. 1699 * 1700 * If action is ide_end, then the rq is queued at the end of the 1701 * request queue, and the function returns immediately without waiting 1702 * for the new rq to be completed. This is again intended for careful 1703 * use by the ATAPI tape/cdrom driver code. 1704 */ 1705 1706int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1707{ 1708 unsigned long flags; 1709 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1710 DECLARE_COMPLETION_ONSTACK(wait); 1711 int where = ELEVATOR_INSERT_BACK, err; 1712 int must_wait = (action == ide_wait || action == ide_head_wait); 1713 1714 rq->errors = 0; 1715 1716 /* 1717 * we need to hold an extra reference to request for safe inspection 1718 * after completion 1719 */ 1720 if (must_wait) { 1721 rq->ref_count++; 1722 rq->end_io_data = &wait; 1723 rq->end_io = blk_end_sync_rq; 1724 } 1725 1726 spin_lock_irqsave(&ide_lock, flags); 1727 if (action == ide_preempt) 1728 hwgroup->rq = NULL; 1729 if (action == ide_preempt || action == ide_head_wait) { 1730 where = ELEVATOR_INSERT_FRONT; 1731 rq->cmd_flags |= REQ_PREEMPT; 1732 } 1733 __elv_add_request(drive->queue, rq, where, 0); 1734 ide_do_request(hwgroup, IDE_NO_IRQ); 1735 spin_unlock_irqrestore(&ide_lock, flags); 1736 1737 err = 0; 1738 if (must_wait) { 1739 wait_for_completion(&wait); 1740 if (rq->errors) 1741 err = -EIO; 1742 1743 blk_put_request(rq); 1744 } 1745 1746 return err; 1747} 1748 1749EXPORT_SYMBOL(ide_do_drive_cmd); 1750 1751void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1752{ 1753 ide_task_t task; 1754 1755 memset(&task, 0, sizeof(task)); 1756 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 1757 IDE_TFLAG_OUT_FEATURE | tf_flags; 1758 task.tf.feature = dma; /* Use PIO/DMA */ 1759 task.tf.lbam = bcount & 0xff; 1760 task.tf.lbah = (bcount >> 8) & 0xff; 1761 1762 ide_tf_load(drive, &task); 1763} 1764 1765EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1766