ide-io.c revision c283f5dbe31920ca70b80a594a97bfaa2a28be13
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50 51#include <asm/byteorder.h> 52#include <asm/irq.h> 53#include <asm/uaccess.h> 54#include <asm/io.h> 55#include <asm/bitops.h> 56 57static int __ide_end_request(ide_drive_t *drive, struct request *rq, 58 int uptodate, int nr_sectors) 59{ 60 int ret = 1; 61 62 /* 63 * if failfast is set on a request, override number of sectors and 64 * complete the whole request right now 65 */ 66 if (blk_noretry_request(rq) && end_io_error(uptodate)) 67 nr_sectors = rq->hard_nr_sectors; 68 69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 70 rq->errors = -EIO; 71 72 /* 73 * decide whether to reenable DMA -- 3 is a random magic for now, 74 * if we DMA timeout more than 3 times, just stay in PIO 75 */ 76 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 77 drive->state = 0; 78 HWGROUP(drive)->hwif->ide_dma_on(drive); 79 } 80 81 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 82 add_disk_randomness(rq->rq_disk); 83 if (!list_empty(&rq->queuelist)) 84 blkdev_dequeue_request(rq); 85 HWGROUP(drive)->rq = NULL; 86 end_that_request_last(rq, uptodate); 87 ret = 0; 88 } 89 90 return ret; 91} 92 93/** 94 * ide_end_request - complete an IDE I/O 95 * @drive: IDE device for the I/O 96 * @uptodate: 97 * @nr_sectors: number of sectors completed 98 * 99 * This is our end_request wrapper function. We complete the I/O 100 * update random number input and dequeue the request, which if 101 * it was tagged may be out of order. 102 */ 103 104int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 105{ 106 struct request *rq; 107 unsigned long flags; 108 int ret = 1; 109 110 /* 111 * room for locking improvements here, the calls below don't 112 * need the queue lock held at all 113 */ 114 spin_lock_irqsave(&ide_lock, flags); 115 rq = HWGROUP(drive)->rq; 116 117 if (!nr_sectors) 118 nr_sectors = rq->hard_cur_sectors; 119 120 ret = __ide_end_request(drive, rq, uptodate, nr_sectors); 121 122 spin_unlock_irqrestore(&ide_lock, flags); 123 return ret; 124} 125EXPORT_SYMBOL(ide_end_request); 126 127/* 128 * Power Management state machine. This one is rather trivial for now, 129 * we should probably add more, like switching back to PIO on suspend 130 * to help some BIOSes, re-do the door locking on resume, etc... 131 */ 132 133enum { 134 ide_pm_flush_cache = ide_pm_state_start_suspend, 135 idedisk_pm_standby, 136 137 idedisk_pm_restore_pio = ide_pm_state_start_resume, 138 idedisk_pm_idle, 139 ide_pm_restore_dma, 140}; 141 142static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 143{ 144 struct request_pm_state *pm = rq->data; 145 146 if (drive->media != ide_disk) 147 return; 148 149 switch (pm->pm_step) { 150 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 151 if (pm->pm_state == PM_EVENT_FREEZE) 152 pm->pm_step = ide_pm_state_completed; 153 else 154 pm->pm_step = idedisk_pm_standby; 155 break; 156 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */ 157 pm->pm_step = ide_pm_state_completed; 158 break; 159 case idedisk_pm_restore_pio: /* Resume step 1 complete */ 160 pm->pm_step = idedisk_pm_idle; 161 break; 162 case idedisk_pm_idle: /* Resume step 2 (idle) complete */ 163 pm->pm_step = ide_pm_restore_dma; 164 break; 165 } 166} 167 168static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 169{ 170 struct request_pm_state *pm = rq->data; 171 ide_task_t *args = rq->special; 172 173 memset(args, 0, sizeof(*args)); 174 175 switch (pm->pm_step) { 176 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 177 if (drive->media != ide_disk) 178 break; 179 /* Not supported? Switch to next step now. */ 180 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) { 181 ide_complete_power_step(drive, rq, 0, 0); 182 return ide_stopped; 183 } 184 if (ide_id_has_flush_cache_ext(drive->id)) 185 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; 186 else 187 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; 188 args->command_type = IDE_DRIVE_TASK_NO_DATA; 189 args->handler = &task_no_data_intr; 190 return do_rw_taskfile(drive, args); 191 192 case idedisk_pm_standby: /* Suspend step 2 (standby) */ 193 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1; 194 args->command_type = IDE_DRIVE_TASK_NO_DATA; 195 args->handler = &task_no_data_intr; 196 return do_rw_taskfile(drive, args); 197 198 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ 199 if (drive->hwif->tuneproc != NULL) 200 drive->hwif->tuneproc(drive, 255); 201 /* 202 * skip idedisk_pm_idle for ATAPI devices 203 */ 204 if (drive->media != ide_disk) 205 pm->pm_step = ide_pm_restore_dma; 206 else 207 ide_complete_power_step(drive, rq, 0, 0); 208 return ide_stopped; 209 210 case idedisk_pm_idle: /* Resume step 2 (idle) */ 211 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE; 212 args->command_type = IDE_DRIVE_TASK_NO_DATA; 213 args->handler = task_no_data_intr; 214 return do_rw_taskfile(drive, args); 215 216 case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ 217 /* 218 * Right now, all we do is call hwif->ide_dma_check(drive), 219 * we could be smarter and check for current xfer_speed 220 * in struct drive etc... 221 */ 222 if ((drive->id->capability & 1) == 0) 223 break; 224 if (drive->hwif->ide_dma_check == NULL) 225 break; 226 drive->hwif->dma_off_quietly(drive); 227 ide_set_dma(drive); 228 break; 229 } 230 pm->pm_step = ide_pm_state_completed; 231 return ide_stopped; 232} 233 234/** 235 * ide_end_dequeued_request - complete an IDE I/O 236 * @drive: IDE device for the I/O 237 * @uptodate: 238 * @nr_sectors: number of sectors completed 239 * 240 * Complete an I/O that is no longer on the request queue. This 241 * typically occurs when we pull the request and issue a REQUEST_SENSE. 242 * We must still finish the old request but we must not tamper with the 243 * queue in the meantime. 244 * 245 * NOTE: This path does not handle barrier, but barrier is not supported 246 * on ide-cd anyway. 247 */ 248 249int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 250 int uptodate, int nr_sectors) 251{ 252 unsigned long flags; 253 int ret = 1; 254 255 spin_lock_irqsave(&ide_lock, flags); 256 257 BUG_ON(!blk_rq_started(rq)); 258 259 /* 260 * if failfast is set on a request, override number of sectors and 261 * complete the whole request right now 262 */ 263 if (blk_noretry_request(rq) && end_io_error(uptodate)) 264 nr_sectors = rq->hard_nr_sectors; 265 266 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 267 rq->errors = -EIO; 268 269 /* 270 * decide whether to reenable DMA -- 3 is a random magic for now, 271 * if we DMA timeout more than 3 times, just stay in PIO 272 */ 273 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 274 drive->state = 0; 275 HWGROUP(drive)->hwif->ide_dma_on(drive); 276 } 277 278 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 279 add_disk_randomness(rq->rq_disk); 280 if (blk_rq_tagged(rq)) 281 blk_queue_end_tag(drive->queue, rq); 282 end_that_request_last(rq, uptodate); 283 ret = 0; 284 } 285 spin_unlock_irqrestore(&ide_lock, flags); 286 return ret; 287} 288EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 289 290 291/** 292 * ide_complete_pm_request - end the current Power Management request 293 * @drive: target drive 294 * @rq: request 295 * 296 * This function cleans up the current PM request and stops the queue 297 * if necessary. 298 */ 299static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 300{ 301 unsigned long flags; 302 303#ifdef DEBUG_PM 304 printk("%s: completing PM request, %s\n", drive->name, 305 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 306#endif 307 spin_lock_irqsave(&ide_lock, flags); 308 if (blk_pm_suspend_request(rq)) { 309 blk_stop_queue(drive->queue); 310 } else { 311 drive->blocked = 0; 312 blk_start_queue(drive->queue); 313 } 314 blkdev_dequeue_request(rq); 315 HWGROUP(drive)->rq = NULL; 316 end_that_request_last(rq, 1); 317 spin_unlock_irqrestore(&ide_lock, flags); 318} 319 320/* 321 * FIXME: probably move this somewhere else, name is bad too :) 322 */ 323u64 ide_get_error_location(ide_drive_t *drive, char *args) 324{ 325 u32 high, low; 326 u8 hcyl, lcyl, sect; 327 u64 sector; 328 329 high = 0; 330 hcyl = args[5]; 331 lcyl = args[4]; 332 sect = args[3]; 333 334 if (ide_id_has_flush_cache_ext(drive->id)) { 335 low = (hcyl << 16) | (lcyl << 8) | sect; 336 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 337 high = ide_read_24(drive); 338 } else { 339 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG); 340 if (cur & 0x40) { 341 high = cur & 0xf; 342 low = (hcyl << 16) | (lcyl << 8) | sect; 343 } else { 344 low = hcyl * drive->head * drive->sect; 345 low += lcyl * drive->sect; 346 low += sect - 1; 347 } 348 } 349 350 sector = ((u64) high << 24) | low; 351 return sector; 352} 353EXPORT_SYMBOL(ide_get_error_location); 354 355/** 356 * ide_end_drive_cmd - end an explicit drive command 357 * @drive: command 358 * @stat: status bits 359 * @err: error bits 360 * 361 * Clean up after success/failure of an explicit drive command. 362 * These get thrown onto the queue so they are synchronized with 363 * real I/O operations on the drive. 364 * 365 * In LBA48 mode we have to read the register set twice to get 366 * all the extra information out. 367 */ 368 369void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 370{ 371 ide_hwif_t *hwif = HWIF(drive); 372 unsigned long flags; 373 struct request *rq; 374 375 spin_lock_irqsave(&ide_lock, flags); 376 rq = HWGROUP(drive)->rq; 377 spin_unlock_irqrestore(&ide_lock, flags); 378 379 if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 380 u8 *args = (u8 *) rq->buffer; 381 if (rq->errors == 0) 382 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 383 384 if (args) { 385 args[0] = stat; 386 args[1] = err; 387 args[2] = hwif->INB(IDE_NSECTOR_REG); 388 } 389 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 390 u8 *args = (u8 *) rq->buffer; 391 if (rq->errors == 0) 392 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 393 394 if (args) { 395 args[0] = stat; 396 args[1] = err; 397 args[2] = hwif->INB(IDE_NSECTOR_REG); 398 args[3] = hwif->INB(IDE_SECTOR_REG); 399 args[4] = hwif->INB(IDE_LCYL_REG); 400 args[5] = hwif->INB(IDE_HCYL_REG); 401 args[6] = hwif->INB(IDE_SELECT_REG); 402 } 403 } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 404 ide_task_t *args = (ide_task_t *) rq->special; 405 if (rq->errors == 0) 406 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 407 408 if (args) { 409 if (args->tf_in_flags.b.data) { 410 u16 data = hwif->INW(IDE_DATA_REG); 411 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF; 412 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF; 413 } 414 args->tfRegister[IDE_ERROR_OFFSET] = err; 415 /* be sure we're looking at the low order bits */ 416 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); 417 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 418 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 419 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 420 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 421 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG); 422 args->tfRegister[IDE_STATUS_OFFSET] = stat; 423 424 if (drive->addressing == 1) { 425 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 426 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG); 427 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 428 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 429 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 430 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 431 } 432 } 433 } else if (blk_pm_request(rq)) { 434 struct request_pm_state *pm = rq->data; 435#ifdef DEBUG_PM 436 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 437 drive->name, rq->pm->pm_step, stat, err); 438#endif 439 ide_complete_power_step(drive, rq, stat, err); 440 if (pm->pm_step == ide_pm_state_completed) 441 ide_complete_pm_request(drive, rq); 442 return; 443 } 444 445 spin_lock_irqsave(&ide_lock, flags); 446 blkdev_dequeue_request(rq); 447 HWGROUP(drive)->rq = NULL; 448 rq->errors = err; 449 end_that_request_last(rq, !rq->errors); 450 spin_unlock_irqrestore(&ide_lock, flags); 451} 452 453EXPORT_SYMBOL(ide_end_drive_cmd); 454 455/** 456 * try_to_flush_leftover_data - flush junk 457 * @drive: drive to flush 458 * 459 * try_to_flush_leftover_data() is invoked in response to a drive 460 * unexpectedly having its DRQ_STAT bit set. As an alternative to 461 * resetting the drive, this routine tries to clear the condition 462 * by read a sector's worth of data from the drive. Of course, 463 * this may not help if the drive is *waiting* for data from *us*. 464 */ 465static void try_to_flush_leftover_data (ide_drive_t *drive) 466{ 467 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; 468 469 if (drive->media != ide_disk) 470 return; 471 while (i > 0) { 472 u32 buffer[16]; 473 u32 wcount = (i > 16) ? 16 : i; 474 475 i -= wcount; 476 HWIF(drive)->ata_input_data(drive, buffer, wcount); 477 } 478} 479 480static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 481{ 482 if (rq->rq_disk) { 483 ide_driver_t *drv; 484 485 drv = *(ide_driver_t **)rq->rq_disk->private_data; 486 drv->end_request(drive, 0, 0); 487 } else 488 ide_end_request(drive, 0, 0); 489} 490 491static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 492{ 493 ide_hwif_t *hwif = drive->hwif; 494 495 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 496 /* other bits are useless when BUSY */ 497 rq->errors |= ERROR_RESET; 498 } else if (stat & ERR_STAT) { 499 /* err has different meaning on cdrom and tape */ 500 if (err == ABRT_ERR) { 501 if (drive->select.b.lba && 502 /* some newer drives don't support WIN_SPECIFY */ 503 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY) 504 return ide_stopped; 505 } else if ((err & BAD_CRC) == BAD_CRC) { 506 /* UDMA crc error, just retry the operation */ 507 drive->crc_count++; 508 } else if (err & (BBD_ERR | ECC_ERR)) { 509 /* retries won't help these */ 510 rq->errors = ERROR_MAX; 511 } else if (err & TRK0_ERR) { 512 /* help it find track zero */ 513 rq->errors |= ERROR_RECAL; 514 } 515 } 516 517 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0) 518 try_to_flush_leftover_data(drive); 519 520 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 521 ide_kill_rq(drive, rq); 522 return ide_stopped; 523 } 524 525 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 526 rq->errors |= ERROR_RESET; 527 528 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 529 ++rq->errors; 530 return ide_do_reset(drive); 531 } 532 533 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 534 drive->special.b.recalibrate = 1; 535 536 ++rq->errors; 537 538 return ide_stopped; 539} 540 541static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 542{ 543 ide_hwif_t *hwif = drive->hwif; 544 545 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 546 /* other bits are useless when BUSY */ 547 rq->errors |= ERROR_RESET; 548 } else { 549 /* add decoding error stuff */ 550 } 551 552 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 553 /* force an abort */ 554 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); 555 556 if (rq->errors >= ERROR_MAX) { 557 ide_kill_rq(drive, rq); 558 } else { 559 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 560 ++rq->errors; 561 return ide_do_reset(drive); 562 } 563 ++rq->errors; 564 } 565 566 return ide_stopped; 567} 568 569ide_startstop_t 570__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 571{ 572 if (drive->media == ide_disk) 573 return ide_ata_error(drive, rq, stat, err); 574 return ide_atapi_error(drive, rq, stat, err); 575} 576 577EXPORT_SYMBOL_GPL(__ide_error); 578 579/** 580 * ide_error - handle an error on the IDE 581 * @drive: drive the error occurred on 582 * @msg: message to report 583 * @stat: status bits 584 * 585 * ide_error() takes action based on the error returned by the drive. 586 * For normal I/O that may well include retries. We deal with 587 * both new-style (taskfile) and old style command handling here. 588 * In the case of taskfile command handling there is work left to 589 * do 590 */ 591 592ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 593{ 594 struct request *rq; 595 u8 err; 596 597 err = ide_dump_status(drive, msg, stat); 598 599 if ((rq = HWGROUP(drive)->rq) == NULL) 600 return ide_stopped; 601 602 /* retry only "normal" I/O: */ 603 if (!blk_fs_request(rq)) { 604 rq->errors = 1; 605 ide_end_drive_cmd(drive, stat, err); 606 return ide_stopped; 607 } 608 609 if (rq->rq_disk) { 610 ide_driver_t *drv; 611 612 drv = *(ide_driver_t **)rq->rq_disk->private_data; 613 return drv->error(drive, rq, stat, err); 614 } else 615 return __ide_error(drive, rq, stat, err); 616} 617 618EXPORT_SYMBOL_GPL(ide_error); 619 620ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) 621{ 622 if (drive->media != ide_disk) 623 rq->errors |= ERROR_RESET; 624 625 ide_kill_rq(drive, rq); 626 627 return ide_stopped; 628} 629 630EXPORT_SYMBOL_GPL(__ide_abort); 631 632/** 633 * ide_abort - abort pending IDE operations 634 * @drive: drive the error occurred on 635 * @msg: message to report 636 * 637 * ide_abort kills and cleans up when we are about to do a 638 * host initiated reset on active commands. Longer term we 639 * want handlers to have sensible abort handling themselves 640 * 641 * This differs fundamentally from ide_error because in 642 * this case the command is doing just fine when we 643 * blow it away. 644 */ 645 646ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) 647{ 648 struct request *rq; 649 650 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) 651 return ide_stopped; 652 653 /* retry only "normal" I/O: */ 654 if (!blk_fs_request(rq)) { 655 rq->errors = 1; 656 ide_end_drive_cmd(drive, BUSY_STAT, 0); 657 return ide_stopped; 658 } 659 660 if (rq->rq_disk) { 661 ide_driver_t *drv; 662 663 drv = *(ide_driver_t **)rq->rq_disk->private_data; 664 return drv->abort(drive, rq); 665 } else 666 return __ide_abort(drive, rq); 667} 668 669/** 670 * ide_cmd - issue a simple drive command 671 * @drive: drive the command is for 672 * @cmd: command byte 673 * @nsect: sector byte 674 * @handler: handler for the command completion 675 * 676 * Issue a simple drive command with interrupts. 677 * The drive must be selected beforehand. 678 */ 679 680static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, 681 ide_handler_t *handler) 682{ 683 ide_hwif_t *hwif = HWIF(drive); 684 if (IDE_CONTROL_REG) 685 hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */ 686 SELECT_MASK(drive,0); 687 hwif->OUTB(nsect,IDE_NSECTOR_REG); 688 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL); 689} 690 691/** 692 * drive_cmd_intr - drive command completion interrupt 693 * @drive: drive the completion interrupt occurred on 694 * 695 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. 696 * We do any necessary data reading and then wait for the drive to 697 * go non busy. At that point we may read the error data and complete 698 * the request 699 */ 700 701static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) 702{ 703 struct request *rq = HWGROUP(drive)->rq; 704 ide_hwif_t *hwif = HWIF(drive); 705 u8 *args = (u8 *) rq->buffer; 706 u8 stat = hwif->INB(IDE_STATUS_REG); 707 int retries = 10; 708 709 local_irq_enable_in_hardirq(); 710 if ((stat & DRQ_STAT) && args && args[3]) { 711 u8 io_32bit = drive->io_32bit; 712 drive->io_32bit = 0; 713 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS); 714 drive->io_32bit = io_32bit; 715 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--) 716 udelay(100); 717 } 718 719 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 720 return ide_error(drive, "drive_cmd", stat); 721 /* calls ide_end_drive_cmd */ 722 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG)); 723 return ide_stopped; 724} 725 726static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task) 727{ 728 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 729 task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect; 730 task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl; 731 task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8; 732 task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF; 733 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY; 734 735 task->handler = &set_geometry_intr; 736} 737 738static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task) 739{ 740 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 741 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE; 742 743 task->handler = &recal_intr; 744} 745 746static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task) 747{ 748 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req; 749 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT; 750 751 task->handler = &set_multmode_intr; 752} 753 754static ide_startstop_t ide_disk_special(ide_drive_t *drive) 755{ 756 special_t *s = &drive->special; 757 ide_task_t args; 758 759 memset(&args, 0, sizeof(ide_task_t)); 760 args.command_type = IDE_DRIVE_TASK_NO_DATA; 761 762 if (s->b.set_geometry) { 763 s->b.set_geometry = 0; 764 ide_init_specify_cmd(drive, &args); 765 } else if (s->b.recalibrate) { 766 s->b.recalibrate = 0; 767 ide_init_restore_cmd(drive, &args); 768 } else if (s->b.set_multmode) { 769 s->b.set_multmode = 0; 770 if (drive->mult_req > drive->id->max_multsect) 771 drive->mult_req = drive->id->max_multsect; 772 ide_init_setmult_cmd(drive, &args); 773 } else if (s->all) { 774 int special = s->all; 775 s->all = 0; 776 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 777 return ide_stopped; 778 } 779 780 do_rw_taskfile(drive, &args); 781 782 return ide_started; 783} 784 785/** 786 * do_special - issue some special commands 787 * @drive: drive the command is for 788 * 789 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT 790 * commands to a drive. It used to do much more, but has been scaled 791 * back. 792 */ 793 794static ide_startstop_t do_special (ide_drive_t *drive) 795{ 796 special_t *s = &drive->special; 797 798#ifdef DEBUG 799 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 800#endif 801 if (s->b.set_tune) { 802 s->b.set_tune = 0; 803 if (HWIF(drive)->tuneproc != NULL) 804 HWIF(drive)->tuneproc(drive, drive->tune_req); 805 return ide_stopped; 806 } else { 807 if (drive->media == ide_disk) 808 return ide_disk_special(drive); 809 810 s->all = 0; 811 drive->mult_req = 0; 812 return ide_stopped; 813 } 814} 815 816void ide_map_sg(ide_drive_t *drive, struct request *rq) 817{ 818 ide_hwif_t *hwif = drive->hwif; 819 struct scatterlist *sg = hwif->sg_table; 820 821 if (hwif->sg_mapped) /* needed by ide-scsi */ 822 return; 823 824 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 825 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 826 } else { 827 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 828 hwif->sg_nents = 1; 829 } 830} 831 832EXPORT_SYMBOL_GPL(ide_map_sg); 833 834void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 835{ 836 ide_hwif_t *hwif = drive->hwif; 837 838 hwif->nsect = hwif->nleft = rq->nr_sectors; 839 hwif->cursg = hwif->cursg_ofs = 0; 840} 841 842EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 843 844/** 845 * execute_drive_command - issue special drive command 846 * @drive: the drive to issue the command on 847 * @rq: the request structure holding the command 848 * 849 * execute_drive_cmd() issues a special drive command, usually 850 * initiated by ioctl() from the external hdparm program. The 851 * command can be a drive command, drive task or taskfile 852 * operation. Weirdly you can call it with NULL to wait for 853 * all commands to finish. Don't do this as that is due to change 854 */ 855 856static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 857 struct request *rq) 858{ 859 ide_hwif_t *hwif = HWIF(drive); 860 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 861 ide_task_t *args = rq->special; 862 863 if (!args) 864 goto done; 865 866 hwif->data_phase = args->data_phase; 867 868 switch (hwif->data_phase) { 869 case TASKFILE_MULTI_OUT: 870 case TASKFILE_OUT: 871 case TASKFILE_MULTI_IN: 872 case TASKFILE_IN: 873 ide_init_sg_cmd(drive, rq); 874 ide_map_sg(drive, rq); 875 default: 876 break; 877 } 878 879 if (args->tf_out_flags.all != 0) 880 return flagged_taskfile(drive, args); 881 return do_rw_taskfile(drive, args); 882 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 883 u8 *args = rq->buffer; 884 u8 sel; 885 886 if (!args) 887 goto done; 888#ifdef DEBUG 889 printk("%s: DRIVE_TASK_CMD ", drive->name); 890 printk("cmd=0x%02x ", args[0]); 891 printk("fr=0x%02x ", args[1]); 892 printk("ns=0x%02x ", args[2]); 893 printk("sc=0x%02x ", args[3]); 894 printk("lcyl=0x%02x ", args[4]); 895 printk("hcyl=0x%02x ", args[5]); 896 printk("sel=0x%02x\n", args[6]); 897#endif 898 hwif->OUTB(args[1], IDE_FEATURE_REG); 899 hwif->OUTB(args[3], IDE_SECTOR_REG); 900 hwif->OUTB(args[4], IDE_LCYL_REG); 901 hwif->OUTB(args[5], IDE_HCYL_REG); 902 sel = (args[6] & ~0x10); 903 if (drive->select.b.unit) 904 sel |= 0x10; 905 hwif->OUTB(sel, IDE_SELECT_REG); 906 ide_cmd(drive, args[0], args[2], &drive_cmd_intr); 907 return ide_started; 908 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 909 u8 *args = rq->buffer; 910 911 if (!args) 912 goto done; 913#ifdef DEBUG 914 printk("%s: DRIVE_CMD ", drive->name); 915 printk("cmd=0x%02x ", args[0]); 916 printk("sc=0x%02x ", args[1]); 917 printk("fr=0x%02x ", args[2]); 918 printk("xx=0x%02x\n", args[3]); 919#endif 920 if (args[0] == WIN_SMART) { 921 hwif->OUTB(0x4f, IDE_LCYL_REG); 922 hwif->OUTB(0xc2, IDE_HCYL_REG); 923 hwif->OUTB(args[2],IDE_FEATURE_REG); 924 hwif->OUTB(args[1],IDE_SECTOR_REG); 925 ide_cmd(drive, args[0], args[3], &drive_cmd_intr); 926 return ide_started; 927 } 928 hwif->OUTB(args[2],IDE_FEATURE_REG); 929 ide_cmd(drive, args[0], args[1], &drive_cmd_intr); 930 return ide_started; 931 } 932 933done: 934 /* 935 * NULL is actually a valid way of waiting for 936 * all current requests to be flushed from the queue. 937 */ 938#ifdef DEBUG 939 printk("%s: DRIVE_CMD (null)\n", drive->name); 940#endif 941 ide_end_drive_cmd(drive, 942 hwif->INB(IDE_STATUS_REG), 943 hwif->INB(IDE_ERROR_REG)); 944 return ide_stopped; 945} 946 947static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 948{ 949 struct request_pm_state *pm = rq->data; 950 951 if (blk_pm_suspend_request(rq) && 952 pm->pm_step == ide_pm_state_start_suspend) 953 /* Mark drive blocked when starting the suspend sequence. */ 954 drive->blocked = 1; 955 else if (blk_pm_resume_request(rq) && 956 pm->pm_step == ide_pm_state_start_resume) { 957 /* 958 * The first thing we do on wakeup is to wait for BSY bit to 959 * go away (with a looong timeout) as a drive on this hwif may 960 * just be POSTing itself. 961 * We do that before even selecting as the "other" device on 962 * the bus may be broken enough to walk on our toes at this 963 * point. 964 */ 965 int rc; 966#ifdef DEBUG_PM 967 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 968#endif 969 rc = ide_wait_not_busy(HWIF(drive), 35000); 970 if (rc) 971 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 972 SELECT_DRIVE(drive); 973 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); 974 rc = ide_wait_not_busy(HWIF(drive), 100000); 975 if (rc) 976 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 977 } 978} 979 980/** 981 * start_request - start of I/O and command issuing for IDE 982 * 983 * start_request() initiates handling of a new I/O request. It 984 * accepts commands and I/O (read/write) requests. It also does 985 * the final remapping for weird stuff like EZDrive. Once 986 * device mapper can work sector level the EZDrive stuff can go away 987 * 988 * FIXME: this function needs a rename 989 */ 990 991static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 992{ 993 ide_startstop_t startstop; 994 sector_t block; 995 996 BUG_ON(!blk_rq_started(rq)); 997 998#ifdef DEBUG 999 printk("%s: start_request: current=0x%08lx\n", 1000 HWIF(drive)->name, (unsigned long) rq); 1001#endif 1002 1003 /* bail early if we've exceeded max_failures */ 1004 if (drive->max_failures && (drive->failures > drive->max_failures)) { 1005 goto kill_rq; 1006 } 1007 1008 block = rq->sector; 1009 if (blk_fs_request(rq) && 1010 (drive->media == ide_disk || drive->media == ide_floppy)) { 1011 block += drive->sect0; 1012 } 1013 /* Yecch - this will shift the entire interval, 1014 possibly killing some innocent following sector */ 1015 if (block == 0 && drive->remap_0_to_1 == 1) 1016 block = 1; /* redirect MBR access to EZ-Drive partn table */ 1017 1018 if (blk_pm_request(rq)) 1019 ide_check_pm_state(drive, rq); 1020 1021 SELECT_DRIVE(drive); 1022 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { 1023 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 1024 return startstop; 1025 } 1026 if (!drive->special.all) { 1027 ide_driver_t *drv; 1028 1029 /* 1030 * We reset the drive so we need to issue a SETFEATURES. 1031 * Do it _after_ do_special() restored device parameters. 1032 */ 1033 if (drive->current_speed == 0xff) 1034 ide_config_drive_speed(drive, drive->desired_speed); 1035 1036 if (rq->cmd_type == REQ_TYPE_ATA_CMD || 1037 rq->cmd_type == REQ_TYPE_ATA_TASK || 1038 rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 1039 return execute_drive_cmd(drive, rq); 1040 else if (blk_pm_request(rq)) { 1041 struct request_pm_state *pm = rq->data; 1042#ifdef DEBUG_PM 1043 printk("%s: start_power_step(step: %d)\n", 1044 drive->name, rq->pm->pm_step); 1045#endif 1046 startstop = ide_start_power_step(drive, rq); 1047 if (startstop == ide_stopped && 1048 pm->pm_step == ide_pm_state_completed) 1049 ide_complete_pm_request(drive, rq); 1050 return startstop; 1051 } 1052 1053 drv = *(ide_driver_t **)rq->rq_disk->private_data; 1054 return drv->do_request(drive, rq, block); 1055 } 1056 return do_special(drive); 1057kill_rq: 1058 ide_kill_rq(drive, rq); 1059 return ide_stopped; 1060} 1061 1062/** 1063 * ide_stall_queue - pause an IDE device 1064 * @drive: drive to stall 1065 * @timeout: time to stall for (jiffies) 1066 * 1067 * ide_stall_queue() can be used by a drive to give excess bandwidth back 1068 * to the hwgroup by sleeping for timeout jiffies. 1069 */ 1070 1071void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 1072{ 1073 if (timeout > WAIT_WORSTCASE) 1074 timeout = WAIT_WORSTCASE; 1075 drive->sleep = timeout + jiffies; 1076 drive->sleeping = 1; 1077} 1078 1079EXPORT_SYMBOL(ide_stall_queue); 1080 1081#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 1082 1083/** 1084 * choose_drive - select a drive to service 1085 * @hwgroup: hardware group to select on 1086 * 1087 * choose_drive() selects the next drive which will be serviced. 1088 * This is necessary because the IDE layer can't issue commands 1089 * to both drives on the same cable, unlike SCSI. 1090 */ 1091 1092static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 1093{ 1094 ide_drive_t *drive, *best; 1095 1096repeat: 1097 best = NULL; 1098 drive = hwgroup->drive; 1099 1100 /* 1101 * drive is doing pre-flush, ordered write, post-flush sequence. even 1102 * though that is 3 requests, it must be seen as a single transaction. 1103 * we must not preempt this drive until that is complete 1104 */ 1105 if (blk_queue_flushing(drive->queue)) { 1106 /* 1107 * small race where queue could get replugged during 1108 * the 3-request flush cycle, just yank the plug since 1109 * we want it to finish asap 1110 */ 1111 blk_remove_plug(drive->queue); 1112 return drive; 1113 } 1114 1115 do { 1116 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep)) 1117 && !elv_queue_empty(drive->queue)) { 1118 if (!best 1119 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep))) 1120 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best)))) 1121 { 1122 if (!blk_queue_plugged(drive->queue)) 1123 best = drive; 1124 } 1125 } 1126 } while ((drive = drive->next) != hwgroup->drive); 1127 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 1128 long t = (signed long)(WAKEUP(best) - jiffies); 1129 if (t >= WAIT_MIN_SLEEP) { 1130 /* 1131 * We *may* have some time to spare, but first let's see if 1132 * someone can potentially benefit from our nice mood today.. 1133 */ 1134 drive = best->next; 1135 do { 1136 if (!drive->sleeping 1137 && time_before(jiffies - best->service_time, WAKEUP(drive)) 1138 && time_before(WAKEUP(drive), jiffies + t)) 1139 { 1140 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 1141 goto repeat; 1142 } 1143 } while ((drive = drive->next) != best); 1144 } 1145 } 1146 return best; 1147} 1148 1149/* 1150 * Issue a new request to a drive from hwgroup 1151 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 1152 * 1153 * A hwgroup is a serialized group of IDE interfaces. Usually there is 1154 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 1155 * may have both interfaces in a single hwgroup to "serialize" access. 1156 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 1157 * together into one hwgroup for serialized access. 1158 * 1159 * Note also that several hwgroups can end up sharing a single IRQ, 1160 * possibly along with many other devices. This is especially common in 1161 * PCI-based systems with off-board IDE controller cards. 1162 * 1163 * The IDE driver uses the single global ide_lock spinlock to protect 1164 * access to the request queues, and to protect the hwgroup->busy flag. 1165 * 1166 * The first thread into the driver for a particular hwgroup sets the 1167 * hwgroup->busy flag to indicate that this hwgroup is now active, 1168 * and then initiates processing of the top request from the request queue. 1169 * 1170 * Other threads attempting entry notice the busy setting, and will simply 1171 * queue their new requests and exit immediately. Note that hwgroup->busy 1172 * remains set even when the driver is merely awaiting the next interrupt. 1173 * Thus, the meaning is "this hwgroup is busy processing a request". 1174 * 1175 * When processing of a request completes, the completing thread or IRQ-handler 1176 * will start the next request from the queue. If no more work remains, 1177 * the driver will clear the hwgroup->busy flag and exit. 1178 * 1179 * The ide_lock (spinlock) is used to protect all access to the 1180 * hwgroup->busy flag, but is otherwise not needed for most processing in 1181 * the driver. This makes the driver much more friendlier to shared IRQs 1182 * than previous designs, while remaining 100% (?) SMP safe and capable. 1183 */ 1184static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 1185{ 1186 ide_drive_t *drive; 1187 ide_hwif_t *hwif; 1188 struct request *rq; 1189 ide_startstop_t startstop; 1190 int loops = 0; 1191 1192 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1193 ide_get_lock(ide_intr, hwgroup); 1194 1195 /* caller must own ide_lock */ 1196 BUG_ON(!irqs_disabled()); 1197 1198 while (!hwgroup->busy) { 1199 hwgroup->busy = 1; 1200 drive = choose_drive(hwgroup); 1201 if (drive == NULL) { 1202 int sleeping = 0; 1203 unsigned long sleep = 0; /* shut up, gcc */ 1204 hwgroup->rq = NULL; 1205 drive = hwgroup->drive; 1206 do { 1207 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) { 1208 sleeping = 1; 1209 sleep = drive->sleep; 1210 } 1211 } while ((drive = drive->next) != hwgroup->drive); 1212 if (sleeping) { 1213 /* 1214 * Take a short snooze, and then wake up this hwgroup again. 1215 * This gives other hwgroups on the same a chance to 1216 * play fairly with us, just in case there are big differences 1217 * in relative throughputs.. don't want to hog the cpu too much. 1218 */ 1219 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1220 sleep = jiffies + WAIT_MIN_SLEEP; 1221#if 1 1222 if (timer_pending(&hwgroup->timer)) 1223 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1224#endif 1225 /* so that ide_timer_expiry knows what to do */ 1226 hwgroup->sleeping = 1; 1227 hwgroup->req_gen_timer = hwgroup->req_gen; 1228 mod_timer(&hwgroup->timer, sleep); 1229 /* we purposely leave hwgroup->busy==1 1230 * while sleeping */ 1231 } else { 1232 /* Ugly, but how can we sleep for the lock 1233 * otherwise? perhaps from tq_disk? 1234 */ 1235 1236 /* for atari only */ 1237 ide_release_lock(); 1238 hwgroup->busy = 0; 1239 } 1240 1241 /* no more work for this hwgroup (for now) */ 1242 return; 1243 } 1244 again: 1245 hwif = HWIF(drive); 1246 if (hwgroup->hwif->sharing_irq && 1247 hwif != hwgroup->hwif && 1248 hwif->io_ports[IDE_CONTROL_OFFSET]) { 1249 /* set nIEN for previous hwif */ 1250 SELECT_INTERRUPT(drive); 1251 } 1252 hwgroup->hwif = hwif; 1253 hwgroup->drive = drive; 1254 drive->sleeping = 0; 1255 drive->service_start = jiffies; 1256 1257 if (blk_queue_plugged(drive->queue)) { 1258 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1259 break; 1260 } 1261 1262 /* 1263 * we know that the queue isn't empty, but this can happen 1264 * if the q->prep_rq_fn() decides to kill a request 1265 */ 1266 rq = elv_next_request(drive->queue); 1267 if (!rq) { 1268 hwgroup->busy = 0; 1269 break; 1270 } 1271 1272 /* 1273 * Sanity: don't accept a request that isn't a PM request 1274 * if we are currently power managed. This is very important as 1275 * blk_stop_queue() doesn't prevent the elv_next_request() 1276 * above to return us whatever is in the queue. Since we call 1277 * ide_do_request() ourselves, we end up taking requests while 1278 * the queue is blocked... 1279 * 1280 * We let requests forced at head of queue with ide-preempt 1281 * though. I hope that doesn't happen too much, hopefully not 1282 * unless the subdriver triggers such a thing in its own PM 1283 * state machine. 1284 * 1285 * We count how many times we loop here to make sure we service 1286 * all drives in the hwgroup without looping for ever 1287 */ 1288 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { 1289 drive = drive->next ? drive->next : hwgroup->drive; 1290 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1291 goto again; 1292 /* We clear busy, there should be no pending ATA command at this point. */ 1293 hwgroup->busy = 0; 1294 break; 1295 } 1296 1297 hwgroup->rq = rq; 1298 1299 /* 1300 * Some systems have trouble with IDE IRQs arriving while 1301 * the driver is still setting things up. So, here we disable 1302 * the IRQ used by this interface while the request is being started. 1303 * This may look bad at first, but pretty much the same thing 1304 * happens anyway when any interrupt comes in, IDE or otherwise 1305 * -- the kernel masks the IRQ while it is being handled. 1306 */ 1307 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1308 disable_irq_nosync(hwif->irq); 1309 spin_unlock(&ide_lock); 1310 local_irq_enable_in_hardirq(); 1311 /* allow other IRQs while we start this request */ 1312 startstop = start_request(drive, rq); 1313 spin_lock_irq(&ide_lock); 1314 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1315 enable_irq(hwif->irq); 1316 if (startstop == ide_stopped) 1317 hwgroup->busy = 0; 1318 } 1319} 1320 1321/* 1322 * Passes the stuff to ide_do_request 1323 */ 1324void do_ide_request(request_queue_t *q) 1325{ 1326 ide_drive_t *drive = q->queuedata; 1327 1328 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1329} 1330 1331/* 1332 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1333 * retry the current request in pio mode instead of risking tossing it 1334 * all away 1335 */ 1336static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1337{ 1338 ide_hwif_t *hwif = HWIF(drive); 1339 struct request *rq; 1340 ide_startstop_t ret = ide_stopped; 1341 1342 /* 1343 * end current dma transaction 1344 */ 1345 1346 if (error < 0) { 1347 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1348 (void)HWIF(drive)->ide_dma_end(drive); 1349 ret = ide_error(drive, "dma timeout error", 1350 hwif->INB(IDE_STATUS_REG)); 1351 } else { 1352 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1353 hwif->dma_timeout(drive); 1354 } 1355 1356 /* 1357 * disable dma for now, but remember that we did so because of 1358 * a timeout -- we'll reenable after we finish this next request 1359 * (or rather the first chunk of it) in pio. 1360 */ 1361 drive->retry_pio++; 1362 drive->state = DMA_PIO_RETRY; 1363 hwif->dma_off_quietly(drive); 1364 1365 /* 1366 * un-busy drive etc (hwgroup->busy is cleared on return) and 1367 * make sure request is sane 1368 */ 1369 rq = HWGROUP(drive)->rq; 1370 1371 if (!rq) 1372 goto out; 1373 1374 HWGROUP(drive)->rq = NULL; 1375 1376 rq->errors = 0; 1377 1378 if (!rq->bio) 1379 goto out; 1380 1381 rq->sector = rq->bio->bi_sector; 1382 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1383 rq->hard_cur_sectors = rq->current_nr_sectors; 1384 rq->buffer = bio_data(rq->bio); 1385out: 1386 return ret; 1387} 1388 1389/** 1390 * ide_timer_expiry - handle lack of an IDE interrupt 1391 * @data: timer callback magic (hwgroup) 1392 * 1393 * An IDE command has timed out before the expected drive return 1394 * occurred. At this point we attempt to clean up the current 1395 * mess. If the current handler includes an expiry handler then 1396 * we invoke the expiry handler, and providing it is happy the 1397 * work is done. If that fails we apply generic recovery rules 1398 * invoking the handler and checking the drive DMA status. We 1399 * have an excessively incestuous relationship with the DMA 1400 * logic that wants cleaning up. 1401 */ 1402 1403void ide_timer_expiry (unsigned long data) 1404{ 1405 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1406 ide_handler_t *handler; 1407 ide_expiry_t *expiry; 1408 unsigned long flags; 1409 unsigned long wait = -1; 1410 1411 spin_lock_irqsave(&ide_lock, flags); 1412 1413 if (((handler = hwgroup->handler) == NULL) || 1414 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1415 /* 1416 * Either a marginal timeout occurred 1417 * (got the interrupt just as timer expired), 1418 * or we were "sleeping" to give other devices a chance. 1419 * Either way, we don't really want to complain about anything. 1420 */ 1421 if (hwgroup->sleeping) { 1422 hwgroup->sleeping = 0; 1423 hwgroup->busy = 0; 1424 } 1425 } else { 1426 ide_drive_t *drive = hwgroup->drive; 1427 if (!drive) { 1428 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1429 hwgroup->handler = NULL; 1430 } else { 1431 ide_hwif_t *hwif; 1432 ide_startstop_t startstop = ide_stopped; 1433 if (!hwgroup->busy) { 1434 hwgroup->busy = 1; /* paranoia */ 1435 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1436 } 1437 if ((expiry = hwgroup->expiry) != NULL) { 1438 /* continue */ 1439 if ((wait = expiry(drive)) > 0) { 1440 /* reset timer */ 1441 hwgroup->timer.expires = jiffies + wait; 1442 hwgroup->req_gen_timer = hwgroup->req_gen; 1443 add_timer(&hwgroup->timer); 1444 spin_unlock_irqrestore(&ide_lock, flags); 1445 return; 1446 } 1447 } 1448 hwgroup->handler = NULL; 1449 /* 1450 * We need to simulate a real interrupt when invoking 1451 * the handler() function, which means we need to 1452 * globally mask the specific IRQ: 1453 */ 1454 spin_unlock(&ide_lock); 1455 hwif = HWIF(drive); 1456#if DISABLE_IRQ_NOSYNC 1457 disable_irq_nosync(hwif->irq); 1458#else 1459 /* disable_irq_nosync ?? */ 1460 disable_irq(hwif->irq); 1461#endif /* DISABLE_IRQ_NOSYNC */ 1462 /* local CPU only, 1463 * as if we were handling an interrupt */ 1464 local_irq_disable(); 1465 if (hwgroup->polling) { 1466 startstop = handler(drive); 1467 } else if (drive_is_ready(drive)) { 1468 if (drive->waiting_for_dma) 1469 hwgroup->hwif->dma_lost_irq(drive); 1470 (void)ide_ack_intr(hwif); 1471 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1472 startstop = handler(drive); 1473 } else { 1474 if (drive->waiting_for_dma) { 1475 startstop = ide_dma_timeout_retry(drive, wait); 1476 } else 1477 startstop = 1478 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); 1479 } 1480 drive->service_time = jiffies - drive->service_start; 1481 spin_lock_irq(&ide_lock); 1482 enable_irq(hwif->irq); 1483 if (startstop == ide_stopped) 1484 hwgroup->busy = 0; 1485 } 1486 } 1487 ide_do_request(hwgroup, IDE_NO_IRQ); 1488 spin_unlock_irqrestore(&ide_lock, flags); 1489} 1490 1491/** 1492 * unexpected_intr - handle an unexpected IDE interrupt 1493 * @irq: interrupt line 1494 * @hwgroup: hwgroup being processed 1495 * 1496 * There's nothing really useful we can do with an unexpected interrupt, 1497 * other than reading the status register (to clear it), and logging it. 1498 * There should be no way that an irq can happen before we're ready for it, 1499 * so we needn't worry much about losing an "important" interrupt here. 1500 * 1501 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1502 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1503 * looks "good", we just ignore the interrupt completely. 1504 * 1505 * This routine assumes __cli() is in effect when called. 1506 * 1507 * If an unexpected interrupt happens on irq15 while we are handling irq14 1508 * and if the two interfaces are "serialized" (CMD640), then it looks like 1509 * we could screw up by interfering with a new request being set up for 1510 * irq15. 1511 * 1512 * In reality, this is a non-issue. The new command is not sent unless 1513 * the drive is ready to accept one, in which case we know the drive is 1514 * not trying to interrupt us. And ide_set_handler() is always invoked 1515 * before completing the issuance of any new drive command, so we will not 1516 * be accidentally invoked as a result of any valid command completion 1517 * interrupt. 1518 * 1519 * Note that we must walk the entire hwgroup here. We know which hwif 1520 * is doing the current command, but we don't know which hwif burped 1521 * mysteriously. 1522 */ 1523 1524static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1525{ 1526 u8 stat; 1527 ide_hwif_t *hwif = hwgroup->hwif; 1528 1529 /* 1530 * handle the unexpected interrupt 1531 */ 1532 do { 1533 if (hwif->irq == irq) { 1534 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1535 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1536 /* Try to not flood the console with msgs */ 1537 static unsigned long last_msgtime, count; 1538 ++count; 1539 if (time_after(jiffies, last_msgtime + HZ)) { 1540 last_msgtime = jiffies; 1541 printk(KERN_ERR "%s%s: unexpected interrupt, " 1542 "status=0x%02x, count=%ld\n", 1543 hwif->name, 1544 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1545 } 1546 } 1547 } 1548 } while ((hwif = hwif->next) != hwgroup->hwif); 1549} 1550 1551/** 1552 * ide_intr - default IDE interrupt handler 1553 * @irq: interrupt number 1554 * @dev_id: hwif group 1555 * @regs: unused weirdness from the kernel irq layer 1556 * 1557 * This is the default IRQ handler for the IDE layer. You should 1558 * not need to override it. If you do be aware it is subtle in 1559 * places 1560 * 1561 * hwgroup->hwif is the interface in the group currently performing 1562 * a command. hwgroup->drive is the drive and hwgroup->handler is 1563 * the IRQ handler to call. As we issue a command the handlers 1564 * step through multiple states, reassigning the handler to the 1565 * next step in the process. Unlike a smart SCSI controller IDE 1566 * expects the main processor to sequence the various transfer 1567 * stages. We also manage a poll timer to catch up with most 1568 * timeout situations. There are still a few where the handlers 1569 * don't ever decide to give up. 1570 * 1571 * The handler eventually returns ide_stopped to indicate the 1572 * request completed. At this point we issue the next request 1573 * on the hwgroup and the process begins again. 1574 */ 1575 1576irqreturn_t ide_intr (int irq, void *dev_id) 1577{ 1578 unsigned long flags; 1579 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1580 ide_hwif_t *hwif; 1581 ide_drive_t *drive; 1582 ide_handler_t *handler; 1583 ide_startstop_t startstop; 1584 1585 spin_lock_irqsave(&ide_lock, flags); 1586 hwif = hwgroup->hwif; 1587 1588 if (!ide_ack_intr(hwif)) { 1589 spin_unlock_irqrestore(&ide_lock, flags); 1590 return IRQ_NONE; 1591 } 1592 1593 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1594 /* 1595 * Not expecting an interrupt from this drive. 1596 * That means this could be: 1597 * (1) an interrupt from another PCI device 1598 * sharing the same PCI INT# as us. 1599 * or (2) a drive just entered sleep or standby mode, 1600 * and is interrupting to let us know. 1601 * or (3) a spurious interrupt of unknown origin. 1602 * 1603 * For PCI, we cannot tell the difference, 1604 * so in that case we just ignore it and hope it goes away. 1605 * 1606 * FIXME: unexpected_intr should be hwif-> then we can 1607 * remove all the ifdef PCI crap 1608 */ 1609#ifdef CONFIG_BLK_DEV_IDEPCI 1610 if (hwif->pci_dev && !hwif->pci_dev->vendor) 1611#endif /* CONFIG_BLK_DEV_IDEPCI */ 1612 { 1613 /* 1614 * Probably not a shared PCI interrupt, 1615 * so we can safely try to do something about it: 1616 */ 1617 unexpected_intr(irq, hwgroup); 1618#ifdef CONFIG_BLK_DEV_IDEPCI 1619 } else { 1620 /* 1621 * Whack the status register, just in case 1622 * we have a leftover pending IRQ. 1623 */ 1624 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1625#endif /* CONFIG_BLK_DEV_IDEPCI */ 1626 } 1627 spin_unlock_irqrestore(&ide_lock, flags); 1628 return IRQ_NONE; 1629 } 1630 drive = hwgroup->drive; 1631 if (!drive) { 1632 /* 1633 * This should NEVER happen, and there isn't much 1634 * we could do about it here. 1635 * 1636 * [Note - this can occur if the drive is hot unplugged] 1637 */ 1638 spin_unlock_irqrestore(&ide_lock, flags); 1639 return IRQ_HANDLED; 1640 } 1641 if (!drive_is_ready(drive)) { 1642 /* 1643 * This happens regularly when we share a PCI IRQ with 1644 * another device. Unfortunately, it can also happen 1645 * with some buggy drives that trigger the IRQ before 1646 * their status register is up to date. Hopefully we have 1647 * enough advance overhead that the latter isn't a problem. 1648 */ 1649 spin_unlock_irqrestore(&ide_lock, flags); 1650 return IRQ_NONE; 1651 } 1652 if (!hwgroup->busy) { 1653 hwgroup->busy = 1; /* paranoia */ 1654 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1655 } 1656 hwgroup->handler = NULL; 1657 hwgroup->req_gen++; 1658 del_timer(&hwgroup->timer); 1659 spin_unlock(&ide_lock); 1660 1661 /* Some controllers might set DMA INTR no matter DMA or PIO; 1662 * bmdma status might need to be cleared even for 1663 * PIO interrupts to prevent spurious/lost irq. 1664 */ 1665 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) 1666 /* ide_dma_end() needs bmdma status for error checking. 1667 * So, skip clearing bmdma status here and leave it 1668 * to ide_dma_end() if this is dma interrupt. 1669 */ 1670 hwif->ide_dma_clear_irq(drive); 1671 1672 if (drive->unmask) 1673 local_irq_enable_in_hardirq(); 1674 /* service this interrupt, may set handler for next interrupt */ 1675 startstop = handler(drive); 1676 spin_lock_irq(&ide_lock); 1677 1678 /* 1679 * Note that handler() may have set things up for another 1680 * interrupt to occur soon, but it cannot happen until 1681 * we exit from this routine, because it will be the 1682 * same irq as is currently being serviced here, and Linux 1683 * won't allow another of the same (on any CPU) until we return. 1684 */ 1685 drive->service_time = jiffies - drive->service_start; 1686 if (startstop == ide_stopped) { 1687 if (hwgroup->handler == NULL) { /* paranoia */ 1688 hwgroup->busy = 0; 1689 ide_do_request(hwgroup, hwif->irq); 1690 } else { 1691 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1692 "on exit\n", drive->name); 1693 } 1694 } 1695 spin_unlock_irqrestore(&ide_lock, flags); 1696 return IRQ_HANDLED; 1697} 1698 1699/** 1700 * ide_init_drive_cmd - initialize a drive command request 1701 * @rq: request object 1702 * 1703 * Initialize a request before we fill it in and send it down to 1704 * ide_do_drive_cmd. Commands must be set up by this function. Right 1705 * now it doesn't do a lot, but if that changes abusers will have a 1706 * nasty surprise. 1707 */ 1708 1709void ide_init_drive_cmd (struct request *rq) 1710{ 1711 memset(rq, 0, sizeof(*rq)); 1712 rq->cmd_type = REQ_TYPE_ATA_CMD; 1713 rq->ref_count = 1; 1714} 1715 1716EXPORT_SYMBOL(ide_init_drive_cmd); 1717 1718/** 1719 * ide_do_drive_cmd - issue IDE special command 1720 * @drive: device to issue command 1721 * @rq: request to issue 1722 * @action: action for processing 1723 * 1724 * This function issues a special IDE device request 1725 * onto the request queue. 1726 * 1727 * If action is ide_wait, then the rq is queued at the end of the 1728 * request queue, and the function sleeps until it has been processed. 1729 * This is for use when invoked from an ioctl handler. 1730 * 1731 * If action is ide_preempt, then the rq is queued at the head of 1732 * the request queue, displacing the currently-being-processed 1733 * request and this function returns immediately without waiting 1734 * for the new rq to be completed. This is VERY DANGEROUS, and is 1735 * intended for careful use by the ATAPI tape/cdrom driver code. 1736 * 1737 * If action is ide_end, then the rq is queued at the end of the 1738 * request queue, and the function returns immediately without waiting 1739 * for the new rq to be completed. This is again intended for careful 1740 * use by the ATAPI tape/cdrom driver code. 1741 */ 1742 1743int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1744{ 1745 unsigned long flags; 1746 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1747 DECLARE_COMPLETION_ONSTACK(wait); 1748 int where = ELEVATOR_INSERT_BACK, err; 1749 int must_wait = (action == ide_wait || action == ide_head_wait); 1750 1751 rq->errors = 0; 1752 1753 /* 1754 * we need to hold an extra reference to request for safe inspection 1755 * after completion 1756 */ 1757 if (must_wait) { 1758 rq->ref_count++; 1759 rq->end_io_data = &wait; 1760 rq->end_io = blk_end_sync_rq; 1761 } 1762 1763 spin_lock_irqsave(&ide_lock, flags); 1764 if (action == ide_preempt) 1765 hwgroup->rq = NULL; 1766 if (action == ide_preempt || action == ide_head_wait) { 1767 where = ELEVATOR_INSERT_FRONT; 1768 rq->cmd_flags |= REQ_PREEMPT; 1769 } 1770 __elv_add_request(drive->queue, rq, where, 0); 1771 ide_do_request(hwgroup, IDE_NO_IRQ); 1772 spin_unlock_irqrestore(&ide_lock, flags); 1773 1774 err = 0; 1775 if (must_wait) { 1776 wait_for_completion(&wait); 1777 if (rq->errors) 1778 err = -EIO; 1779 1780 blk_put_request(rq); 1781 } 1782 1783 return err; 1784} 1785 1786EXPORT_SYMBOL(ide_do_drive_cmd); 1787