ide-io.c revision 23450319e2890986c247ec0aa1442f060e657e6d
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50 51#include <asm/byteorder.h> 52#include <asm/irq.h> 53#include <asm/uaccess.h> 54#include <asm/io.h> 55#include <asm/bitops.h> 56 57static int __ide_end_request(ide_drive_t *drive, struct request *rq, 58 int uptodate, int nr_sectors) 59{ 60 int ret = 1; 61 62 /* 63 * if failfast is set on a request, override number of sectors and 64 * complete the whole request right now 65 */ 66 if (blk_noretry_request(rq) && end_io_error(uptodate)) 67 nr_sectors = rq->hard_nr_sectors; 68 69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 70 rq->errors = -EIO; 71 72 /* 73 * decide whether to reenable DMA -- 3 is a random magic for now, 74 * if we DMA timeout more than 3 times, just stay in PIO 75 */ 76 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 77 drive->state = 0; 78 HWGROUP(drive)->hwif->ide_dma_on(drive); 79 } 80 81 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 82 add_disk_randomness(rq->rq_disk); 83 if (!list_empty(&rq->queuelist)) 84 blkdev_dequeue_request(rq); 85 HWGROUP(drive)->rq = NULL; 86 end_that_request_last(rq, uptodate); 87 ret = 0; 88 } 89 90 return ret; 91} 92 93/** 94 * ide_end_request - complete an IDE I/O 95 * @drive: IDE device for the I/O 96 * @uptodate: 97 * @nr_sectors: number of sectors completed 98 * 99 * This is our end_request wrapper function. We complete the I/O 100 * update random number input and dequeue the request, which if 101 * it was tagged may be out of order. 102 */ 103 104int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 105{ 106 struct request *rq; 107 unsigned long flags; 108 int ret = 1; 109 110 /* 111 * room for locking improvements here, the calls below don't 112 * need the queue lock held at all 113 */ 114 spin_lock_irqsave(&ide_lock, flags); 115 rq = HWGROUP(drive)->rq; 116 117 if (!nr_sectors) 118 nr_sectors = rq->hard_cur_sectors; 119 120 ret = __ide_end_request(drive, rq, uptodate, nr_sectors); 121 122 spin_unlock_irqrestore(&ide_lock, flags); 123 return ret; 124} 125EXPORT_SYMBOL(ide_end_request); 126 127/* 128 * Power Management state machine. This one is rather trivial for now, 129 * we should probably add more, like switching back to PIO on suspend 130 * to help some BIOSes, re-do the door locking on resume, etc... 131 */ 132 133enum { 134 ide_pm_flush_cache = ide_pm_state_start_suspend, 135 idedisk_pm_standby, 136 137 idedisk_pm_restore_pio = ide_pm_state_start_resume, 138 idedisk_pm_idle, 139 ide_pm_restore_dma, 140}; 141 142static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 143{ 144 struct request_pm_state *pm = rq->data; 145 146 if (drive->media != ide_disk) 147 return; 148 149 switch (pm->pm_step) { 150 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 151 if (pm->pm_state == PM_EVENT_FREEZE) 152 pm->pm_step = ide_pm_state_completed; 153 else 154 pm->pm_step = idedisk_pm_standby; 155 break; 156 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */ 157 pm->pm_step = ide_pm_state_completed; 158 break; 159 case idedisk_pm_restore_pio: /* Resume step 1 complete */ 160 pm->pm_step = idedisk_pm_idle; 161 break; 162 case idedisk_pm_idle: /* Resume step 2 (idle) complete */ 163 pm->pm_step = ide_pm_restore_dma; 164 break; 165 } 166} 167 168static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 169{ 170 struct request_pm_state *pm = rq->data; 171 ide_task_t *args = rq->special; 172 173 memset(args, 0, sizeof(*args)); 174 175 if (drive->media != ide_disk) { 176 /* 177 * skip idedisk_pm_restore_pio and idedisk_pm_idle for ATAPI 178 * devices 179 */ 180 if (pm->pm_step == idedisk_pm_restore_pio) 181 pm->pm_step = ide_pm_restore_dma; 182 } 183 184 switch (pm->pm_step) { 185 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 186 if (drive->media != ide_disk) 187 break; 188 /* Not supported? Switch to next step now. */ 189 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) { 190 ide_complete_power_step(drive, rq, 0, 0); 191 return ide_stopped; 192 } 193 if (ide_id_has_flush_cache_ext(drive->id)) 194 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; 195 else 196 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; 197 args->command_type = IDE_DRIVE_TASK_NO_DATA; 198 args->handler = &task_no_data_intr; 199 return do_rw_taskfile(drive, args); 200 201 case idedisk_pm_standby: /* Suspend step 2 (standby) */ 202 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1; 203 args->command_type = IDE_DRIVE_TASK_NO_DATA; 204 args->handler = &task_no_data_intr; 205 return do_rw_taskfile(drive, args); 206 207 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ 208 if (drive->hwif->tuneproc != NULL) 209 drive->hwif->tuneproc(drive, 255); 210 ide_complete_power_step(drive, rq, 0, 0); 211 return ide_stopped; 212 213 case idedisk_pm_idle: /* Resume step 2 (idle) */ 214 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE; 215 args->command_type = IDE_DRIVE_TASK_NO_DATA; 216 args->handler = task_no_data_intr; 217 return do_rw_taskfile(drive, args); 218 219 case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ 220 /* 221 * Right now, all we do is call hwif->ide_dma_check(drive), 222 * we could be smarter and check for current xfer_speed 223 * in struct drive etc... 224 */ 225 if ((drive->id->capability & 1) == 0) 226 break; 227 if (drive->hwif->ide_dma_check == NULL) 228 break; 229 ide_set_dma(drive); 230 break; 231 } 232 pm->pm_step = ide_pm_state_completed; 233 return ide_stopped; 234} 235 236/** 237 * ide_end_dequeued_request - complete an IDE I/O 238 * @drive: IDE device for the I/O 239 * @uptodate: 240 * @nr_sectors: number of sectors completed 241 * 242 * Complete an I/O that is no longer on the request queue. This 243 * typically occurs when we pull the request and issue a REQUEST_SENSE. 244 * We must still finish the old request but we must not tamper with the 245 * queue in the meantime. 246 * 247 * NOTE: This path does not handle barrier, but barrier is not supported 248 * on ide-cd anyway. 249 */ 250 251int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 252 int uptodate, int nr_sectors) 253{ 254 unsigned long flags; 255 int ret = 1; 256 257 spin_lock_irqsave(&ide_lock, flags); 258 259 BUG_ON(!blk_rq_started(rq)); 260 261 /* 262 * if failfast is set on a request, override number of sectors and 263 * complete the whole request right now 264 */ 265 if (blk_noretry_request(rq) && end_io_error(uptodate)) 266 nr_sectors = rq->hard_nr_sectors; 267 268 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 269 rq->errors = -EIO; 270 271 /* 272 * decide whether to reenable DMA -- 3 is a random magic for now, 273 * if we DMA timeout more than 3 times, just stay in PIO 274 */ 275 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 276 drive->state = 0; 277 HWGROUP(drive)->hwif->ide_dma_on(drive); 278 } 279 280 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 281 add_disk_randomness(rq->rq_disk); 282 if (blk_rq_tagged(rq)) 283 blk_queue_end_tag(drive->queue, rq); 284 end_that_request_last(rq, uptodate); 285 ret = 0; 286 } 287 spin_unlock_irqrestore(&ide_lock, flags); 288 return ret; 289} 290EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 291 292 293/** 294 * ide_complete_pm_request - end the current Power Management request 295 * @drive: target drive 296 * @rq: request 297 * 298 * This function cleans up the current PM request and stops the queue 299 * if necessary. 300 */ 301static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 302{ 303 unsigned long flags; 304 305#ifdef DEBUG_PM 306 printk("%s: completing PM request, %s\n", drive->name, 307 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 308#endif 309 spin_lock_irqsave(&ide_lock, flags); 310 if (blk_pm_suspend_request(rq)) { 311 blk_stop_queue(drive->queue); 312 } else { 313 drive->blocked = 0; 314 blk_start_queue(drive->queue); 315 } 316 blkdev_dequeue_request(rq); 317 HWGROUP(drive)->rq = NULL; 318 end_that_request_last(rq, 1); 319 spin_unlock_irqrestore(&ide_lock, flags); 320} 321 322/* 323 * FIXME: probably move this somewhere else, name is bad too :) 324 */ 325u64 ide_get_error_location(ide_drive_t *drive, char *args) 326{ 327 u32 high, low; 328 u8 hcyl, lcyl, sect; 329 u64 sector; 330 331 high = 0; 332 hcyl = args[5]; 333 lcyl = args[4]; 334 sect = args[3]; 335 336 if (ide_id_has_flush_cache_ext(drive->id)) { 337 low = (hcyl << 16) | (lcyl << 8) | sect; 338 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 339 high = ide_read_24(drive); 340 } else { 341 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG); 342 if (cur & 0x40) { 343 high = cur & 0xf; 344 low = (hcyl << 16) | (lcyl << 8) | sect; 345 } else { 346 low = hcyl * drive->head * drive->sect; 347 low += lcyl * drive->sect; 348 low += sect - 1; 349 } 350 } 351 352 sector = ((u64) high << 24) | low; 353 return sector; 354} 355EXPORT_SYMBOL(ide_get_error_location); 356 357/** 358 * ide_end_drive_cmd - end an explicit drive command 359 * @drive: command 360 * @stat: status bits 361 * @err: error bits 362 * 363 * Clean up after success/failure of an explicit drive command. 364 * These get thrown onto the queue so they are synchronized with 365 * real I/O operations on the drive. 366 * 367 * In LBA48 mode we have to read the register set twice to get 368 * all the extra information out. 369 */ 370 371void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 372{ 373 ide_hwif_t *hwif = HWIF(drive); 374 unsigned long flags; 375 struct request *rq; 376 377 spin_lock_irqsave(&ide_lock, flags); 378 rq = HWGROUP(drive)->rq; 379 spin_unlock_irqrestore(&ide_lock, flags); 380 381 if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 382 u8 *args = (u8 *) rq->buffer; 383 if (rq->errors == 0) 384 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 385 386 if (args) { 387 args[0] = stat; 388 args[1] = err; 389 args[2] = hwif->INB(IDE_NSECTOR_REG); 390 } 391 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 392 u8 *args = (u8 *) rq->buffer; 393 if (rq->errors == 0) 394 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 395 396 if (args) { 397 args[0] = stat; 398 args[1] = err; 399 args[2] = hwif->INB(IDE_NSECTOR_REG); 400 args[3] = hwif->INB(IDE_SECTOR_REG); 401 args[4] = hwif->INB(IDE_LCYL_REG); 402 args[5] = hwif->INB(IDE_HCYL_REG); 403 args[6] = hwif->INB(IDE_SELECT_REG); 404 } 405 } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 406 ide_task_t *args = (ide_task_t *) rq->special; 407 if (rq->errors == 0) 408 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 409 410 if (args) { 411 if (args->tf_in_flags.b.data) { 412 u16 data = hwif->INW(IDE_DATA_REG); 413 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF; 414 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF; 415 } 416 args->tfRegister[IDE_ERROR_OFFSET] = err; 417 /* be sure we're looking at the low order bits */ 418 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); 419 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 420 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 421 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 422 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 423 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG); 424 args->tfRegister[IDE_STATUS_OFFSET] = stat; 425 426 if (drive->addressing == 1) { 427 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 428 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG); 429 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 430 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 431 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 432 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 433 } 434 } 435 } else if (blk_pm_request(rq)) { 436 struct request_pm_state *pm = rq->data; 437#ifdef DEBUG_PM 438 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 439 drive->name, rq->pm->pm_step, stat, err); 440#endif 441 ide_complete_power_step(drive, rq, stat, err); 442 if (pm->pm_step == ide_pm_state_completed) 443 ide_complete_pm_request(drive, rq); 444 return; 445 } 446 447 spin_lock_irqsave(&ide_lock, flags); 448 blkdev_dequeue_request(rq); 449 HWGROUP(drive)->rq = NULL; 450 rq->errors = err; 451 end_that_request_last(rq, !rq->errors); 452 spin_unlock_irqrestore(&ide_lock, flags); 453} 454 455EXPORT_SYMBOL(ide_end_drive_cmd); 456 457/** 458 * try_to_flush_leftover_data - flush junk 459 * @drive: drive to flush 460 * 461 * try_to_flush_leftover_data() is invoked in response to a drive 462 * unexpectedly having its DRQ_STAT bit set. As an alternative to 463 * resetting the drive, this routine tries to clear the condition 464 * by read a sector's worth of data from the drive. Of course, 465 * this may not help if the drive is *waiting* for data from *us*. 466 */ 467static void try_to_flush_leftover_data (ide_drive_t *drive) 468{ 469 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; 470 471 if (drive->media != ide_disk) 472 return; 473 while (i > 0) { 474 u32 buffer[16]; 475 u32 wcount = (i > 16) ? 16 : i; 476 477 i -= wcount; 478 HWIF(drive)->ata_input_data(drive, buffer, wcount); 479 } 480} 481 482static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 483{ 484 if (rq->rq_disk) { 485 ide_driver_t *drv; 486 487 drv = *(ide_driver_t **)rq->rq_disk->private_data; 488 drv->end_request(drive, 0, 0); 489 } else 490 ide_end_request(drive, 0, 0); 491} 492 493static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 494{ 495 ide_hwif_t *hwif = drive->hwif; 496 497 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 498 /* other bits are useless when BUSY */ 499 rq->errors |= ERROR_RESET; 500 } else if (stat & ERR_STAT) { 501 /* err has different meaning on cdrom and tape */ 502 if (err == ABRT_ERR) { 503 if (drive->select.b.lba && 504 /* some newer drives don't support WIN_SPECIFY */ 505 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY) 506 return ide_stopped; 507 } else if ((err & BAD_CRC) == BAD_CRC) { 508 /* UDMA crc error, just retry the operation */ 509 drive->crc_count++; 510 } else if (err & (BBD_ERR | ECC_ERR)) { 511 /* retries won't help these */ 512 rq->errors = ERROR_MAX; 513 } else if (err & TRK0_ERR) { 514 /* help it find track zero */ 515 rq->errors |= ERROR_RECAL; 516 } 517 } 518 519 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0) 520 try_to_flush_leftover_data(drive); 521 522 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 523 ide_kill_rq(drive, rq); 524 return ide_stopped; 525 } 526 527 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 528 rq->errors |= ERROR_RESET; 529 530 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 531 ++rq->errors; 532 return ide_do_reset(drive); 533 } 534 535 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 536 drive->special.b.recalibrate = 1; 537 538 ++rq->errors; 539 540 return ide_stopped; 541} 542 543static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 544{ 545 ide_hwif_t *hwif = drive->hwif; 546 547 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 548 /* other bits are useless when BUSY */ 549 rq->errors |= ERROR_RESET; 550 } else { 551 /* add decoding error stuff */ 552 } 553 554 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 555 /* force an abort */ 556 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); 557 558 if (rq->errors >= ERROR_MAX) { 559 ide_kill_rq(drive, rq); 560 } else { 561 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 562 ++rq->errors; 563 return ide_do_reset(drive); 564 } 565 ++rq->errors; 566 } 567 568 return ide_stopped; 569} 570 571ide_startstop_t 572__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 573{ 574 if (drive->media == ide_disk) 575 return ide_ata_error(drive, rq, stat, err); 576 return ide_atapi_error(drive, rq, stat, err); 577} 578 579EXPORT_SYMBOL_GPL(__ide_error); 580 581/** 582 * ide_error - handle an error on the IDE 583 * @drive: drive the error occurred on 584 * @msg: message to report 585 * @stat: status bits 586 * 587 * ide_error() takes action based on the error returned by the drive. 588 * For normal I/O that may well include retries. We deal with 589 * both new-style (taskfile) and old style command handling here. 590 * In the case of taskfile command handling there is work left to 591 * do 592 */ 593 594ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 595{ 596 struct request *rq; 597 u8 err; 598 599 err = ide_dump_status(drive, msg, stat); 600 601 if ((rq = HWGROUP(drive)->rq) == NULL) 602 return ide_stopped; 603 604 /* retry only "normal" I/O: */ 605 if (!blk_fs_request(rq)) { 606 rq->errors = 1; 607 ide_end_drive_cmd(drive, stat, err); 608 return ide_stopped; 609 } 610 611 if (rq->rq_disk) { 612 ide_driver_t *drv; 613 614 drv = *(ide_driver_t **)rq->rq_disk->private_data; 615 return drv->error(drive, rq, stat, err); 616 } else 617 return __ide_error(drive, rq, stat, err); 618} 619 620EXPORT_SYMBOL_GPL(ide_error); 621 622ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) 623{ 624 if (drive->media != ide_disk) 625 rq->errors |= ERROR_RESET; 626 627 ide_kill_rq(drive, rq); 628 629 return ide_stopped; 630} 631 632EXPORT_SYMBOL_GPL(__ide_abort); 633 634/** 635 * ide_abort - abort pending IDE operations 636 * @drive: drive the error occurred on 637 * @msg: message to report 638 * 639 * ide_abort kills and cleans up when we are about to do a 640 * host initiated reset on active commands. Longer term we 641 * want handlers to have sensible abort handling themselves 642 * 643 * This differs fundamentally from ide_error because in 644 * this case the command is doing just fine when we 645 * blow it away. 646 */ 647 648ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) 649{ 650 struct request *rq; 651 652 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) 653 return ide_stopped; 654 655 /* retry only "normal" I/O: */ 656 if (!blk_fs_request(rq)) { 657 rq->errors = 1; 658 ide_end_drive_cmd(drive, BUSY_STAT, 0); 659 return ide_stopped; 660 } 661 662 if (rq->rq_disk) { 663 ide_driver_t *drv; 664 665 drv = *(ide_driver_t **)rq->rq_disk->private_data; 666 return drv->abort(drive, rq); 667 } else 668 return __ide_abort(drive, rq); 669} 670 671/** 672 * ide_cmd - issue a simple drive command 673 * @drive: drive the command is for 674 * @cmd: command byte 675 * @nsect: sector byte 676 * @handler: handler for the command completion 677 * 678 * Issue a simple drive command with interrupts. 679 * The drive must be selected beforehand. 680 */ 681 682static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, 683 ide_handler_t *handler) 684{ 685 ide_hwif_t *hwif = HWIF(drive); 686 if (IDE_CONTROL_REG) 687 hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */ 688 SELECT_MASK(drive,0); 689 hwif->OUTB(nsect,IDE_NSECTOR_REG); 690 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL); 691} 692 693/** 694 * drive_cmd_intr - drive command completion interrupt 695 * @drive: drive the completion interrupt occurred on 696 * 697 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. 698 * We do any necessary data reading and then wait for the drive to 699 * go non busy. At that point we may read the error data and complete 700 * the request 701 */ 702 703static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) 704{ 705 struct request *rq = HWGROUP(drive)->rq; 706 ide_hwif_t *hwif = HWIF(drive); 707 u8 *args = (u8 *) rq->buffer; 708 u8 stat = hwif->INB(IDE_STATUS_REG); 709 int retries = 10; 710 711 local_irq_enable_in_hardirq(); 712 if ((stat & DRQ_STAT) && args && args[3]) { 713 u8 io_32bit = drive->io_32bit; 714 drive->io_32bit = 0; 715 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS); 716 drive->io_32bit = io_32bit; 717 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--) 718 udelay(100); 719 } 720 721 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 722 return ide_error(drive, "drive_cmd", stat); 723 /* calls ide_end_drive_cmd */ 724 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG)); 725 return ide_stopped; 726} 727 728static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task) 729{ 730 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 731 task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect; 732 task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl; 733 task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8; 734 task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF; 735 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY; 736 737 task->handler = &set_geometry_intr; 738} 739 740static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task) 741{ 742 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 743 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE; 744 745 task->handler = &recal_intr; 746} 747 748static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task) 749{ 750 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req; 751 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT; 752 753 task->handler = &set_multmode_intr; 754} 755 756static ide_startstop_t ide_disk_special(ide_drive_t *drive) 757{ 758 special_t *s = &drive->special; 759 ide_task_t args; 760 761 memset(&args, 0, sizeof(ide_task_t)); 762 args.command_type = IDE_DRIVE_TASK_NO_DATA; 763 764 if (s->b.set_geometry) { 765 s->b.set_geometry = 0; 766 ide_init_specify_cmd(drive, &args); 767 } else if (s->b.recalibrate) { 768 s->b.recalibrate = 0; 769 ide_init_restore_cmd(drive, &args); 770 } else if (s->b.set_multmode) { 771 s->b.set_multmode = 0; 772 if (drive->mult_req > drive->id->max_multsect) 773 drive->mult_req = drive->id->max_multsect; 774 ide_init_setmult_cmd(drive, &args); 775 } else if (s->all) { 776 int special = s->all; 777 s->all = 0; 778 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 779 return ide_stopped; 780 } 781 782 do_rw_taskfile(drive, &args); 783 784 return ide_started; 785} 786 787/** 788 * do_special - issue some special commands 789 * @drive: drive the command is for 790 * 791 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT 792 * commands to a drive. It used to do much more, but has been scaled 793 * back. 794 */ 795 796static ide_startstop_t do_special (ide_drive_t *drive) 797{ 798 special_t *s = &drive->special; 799 800#ifdef DEBUG 801 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 802#endif 803 if (s->b.set_tune) { 804 s->b.set_tune = 0; 805 if (HWIF(drive)->tuneproc != NULL) 806 HWIF(drive)->tuneproc(drive, drive->tune_req); 807 return ide_stopped; 808 } else { 809 if (drive->media == ide_disk) 810 return ide_disk_special(drive); 811 812 s->all = 0; 813 drive->mult_req = 0; 814 return ide_stopped; 815 } 816} 817 818void ide_map_sg(ide_drive_t *drive, struct request *rq) 819{ 820 ide_hwif_t *hwif = drive->hwif; 821 struct scatterlist *sg = hwif->sg_table; 822 823 if (hwif->sg_mapped) /* needed by ide-scsi */ 824 return; 825 826 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 827 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 828 } else { 829 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 830 hwif->sg_nents = 1; 831 } 832} 833 834EXPORT_SYMBOL_GPL(ide_map_sg); 835 836void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 837{ 838 ide_hwif_t *hwif = drive->hwif; 839 840 hwif->nsect = hwif->nleft = rq->nr_sectors; 841 hwif->cursg = hwif->cursg_ofs = 0; 842} 843 844EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 845 846/** 847 * execute_drive_command - issue special drive command 848 * @drive: the drive to issue the command on 849 * @rq: the request structure holding the command 850 * 851 * execute_drive_cmd() issues a special drive command, usually 852 * initiated by ioctl() from the external hdparm program. The 853 * command can be a drive command, drive task or taskfile 854 * operation. Weirdly you can call it with NULL to wait for 855 * all commands to finish. Don't do this as that is due to change 856 */ 857 858static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 859 struct request *rq) 860{ 861 ide_hwif_t *hwif = HWIF(drive); 862 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 863 ide_task_t *args = rq->special; 864 865 if (!args) 866 goto done; 867 868 hwif->data_phase = args->data_phase; 869 870 switch (hwif->data_phase) { 871 case TASKFILE_MULTI_OUT: 872 case TASKFILE_OUT: 873 case TASKFILE_MULTI_IN: 874 case TASKFILE_IN: 875 ide_init_sg_cmd(drive, rq); 876 ide_map_sg(drive, rq); 877 default: 878 break; 879 } 880 881 if (args->tf_out_flags.all != 0) 882 return flagged_taskfile(drive, args); 883 return do_rw_taskfile(drive, args); 884 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 885 u8 *args = rq->buffer; 886 u8 sel; 887 888 if (!args) 889 goto done; 890#ifdef DEBUG 891 printk("%s: DRIVE_TASK_CMD ", drive->name); 892 printk("cmd=0x%02x ", args[0]); 893 printk("fr=0x%02x ", args[1]); 894 printk("ns=0x%02x ", args[2]); 895 printk("sc=0x%02x ", args[3]); 896 printk("lcyl=0x%02x ", args[4]); 897 printk("hcyl=0x%02x ", args[5]); 898 printk("sel=0x%02x\n", args[6]); 899#endif 900 hwif->OUTB(args[1], IDE_FEATURE_REG); 901 hwif->OUTB(args[3], IDE_SECTOR_REG); 902 hwif->OUTB(args[4], IDE_LCYL_REG); 903 hwif->OUTB(args[5], IDE_HCYL_REG); 904 sel = (args[6] & ~0x10); 905 if (drive->select.b.unit) 906 sel |= 0x10; 907 hwif->OUTB(sel, IDE_SELECT_REG); 908 ide_cmd(drive, args[0], args[2], &drive_cmd_intr); 909 return ide_started; 910 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 911 u8 *args = rq->buffer; 912 913 if (!args) 914 goto done; 915#ifdef DEBUG 916 printk("%s: DRIVE_CMD ", drive->name); 917 printk("cmd=0x%02x ", args[0]); 918 printk("sc=0x%02x ", args[1]); 919 printk("fr=0x%02x ", args[2]); 920 printk("xx=0x%02x\n", args[3]); 921#endif 922 if (args[0] == WIN_SMART) { 923 hwif->OUTB(0x4f, IDE_LCYL_REG); 924 hwif->OUTB(0xc2, IDE_HCYL_REG); 925 hwif->OUTB(args[2],IDE_FEATURE_REG); 926 hwif->OUTB(args[1],IDE_SECTOR_REG); 927 ide_cmd(drive, args[0], args[3], &drive_cmd_intr); 928 return ide_started; 929 } 930 hwif->OUTB(args[2],IDE_FEATURE_REG); 931 ide_cmd(drive, args[0], args[1], &drive_cmd_intr); 932 return ide_started; 933 } 934 935done: 936 /* 937 * NULL is actually a valid way of waiting for 938 * all current requests to be flushed from the queue. 939 */ 940#ifdef DEBUG 941 printk("%s: DRIVE_CMD (null)\n", drive->name); 942#endif 943 ide_end_drive_cmd(drive, 944 hwif->INB(IDE_STATUS_REG), 945 hwif->INB(IDE_ERROR_REG)); 946 return ide_stopped; 947} 948 949static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 950{ 951 struct request_pm_state *pm = rq->data; 952 953 if (blk_pm_suspend_request(rq) && 954 pm->pm_step == ide_pm_state_start_suspend) 955 /* Mark drive blocked when starting the suspend sequence. */ 956 drive->blocked = 1; 957 else if (blk_pm_resume_request(rq) && 958 pm->pm_step == ide_pm_state_start_resume) { 959 /* 960 * The first thing we do on wakeup is to wait for BSY bit to 961 * go away (with a looong timeout) as a drive on this hwif may 962 * just be POSTing itself. 963 * We do that before even selecting as the "other" device on 964 * the bus may be broken enough to walk on our toes at this 965 * point. 966 */ 967 int rc; 968#ifdef DEBUG_PM 969 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 970#endif 971 rc = ide_wait_not_busy(HWIF(drive), 35000); 972 if (rc) 973 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 974 SELECT_DRIVE(drive); 975 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); 976 rc = ide_wait_not_busy(HWIF(drive), 100000); 977 if (rc) 978 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 979 } 980} 981 982/** 983 * start_request - start of I/O and command issuing for IDE 984 * 985 * start_request() initiates handling of a new I/O request. It 986 * accepts commands and I/O (read/write) requests. It also does 987 * the final remapping for weird stuff like EZDrive. Once 988 * device mapper can work sector level the EZDrive stuff can go away 989 * 990 * FIXME: this function needs a rename 991 */ 992 993static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 994{ 995 ide_startstop_t startstop; 996 sector_t block; 997 998 BUG_ON(!blk_rq_started(rq)); 999 1000#ifdef DEBUG 1001 printk("%s: start_request: current=0x%08lx\n", 1002 HWIF(drive)->name, (unsigned long) rq); 1003#endif 1004 1005 /* bail early if we've exceeded max_failures */ 1006 if (drive->max_failures && (drive->failures > drive->max_failures)) { 1007 goto kill_rq; 1008 } 1009 1010 block = rq->sector; 1011 if (blk_fs_request(rq) && 1012 (drive->media == ide_disk || drive->media == ide_floppy)) { 1013 block += drive->sect0; 1014 } 1015 /* Yecch - this will shift the entire interval, 1016 possibly killing some innocent following sector */ 1017 if (block == 0 && drive->remap_0_to_1 == 1) 1018 block = 1; /* redirect MBR access to EZ-Drive partn table */ 1019 1020 if (blk_pm_request(rq)) 1021 ide_check_pm_state(drive, rq); 1022 1023 SELECT_DRIVE(drive); 1024 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { 1025 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 1026 return startstop; 1027 } 1028 if (!drive->special.all) { 1029 ide_driver_t *drv; 1030 1031 /* 1032 * We reset the drive so we need to issue a SETFEATURES. 1033 * Do it _after_ do_special() restored device parameters. 1034 */ 1035 if (drive->current_speed == 0xff) 1036 ide_config_drive_speed(drive, drive->desired_speed); 1037 1038 if (rq->cmd_type == REQ_TYPE_ATA_CMD || 1039 rq->cmd_type == REQ_TYPE_ATA_TASK || 1040 rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 1041 return execute_drive_cmd(drive, rq); 1042 else if (blk_pm_request(rq)) { 1043 struct request_pm_state *pm = rq->data; 1044#ifdef DEBUG_PM 1045 printk("%s: start_power_step(step: %d)\n", 1046 drive->name, rq->pm->pm_step); 1047#endif 1048 startstop = ide_start_power_step(drive, rq); 1049 if (startstop == ide_stopped && 1050 pm->pm_step == ide_pm_state_completed) 1051 ide_complete_pm_request(drive, rq); 1052 return startstop; 1053 } 1054 1055 drv = *(ide_driver_t **)rq->rq_disk->private_data; 1056 return drv->do_request(drive, rq, block); 1057 } 1058 return do_special(drive); 1059kill_rq: 1060 ide_kill_rq(drive, rq); 1061 return ide_stopped; 1062} 1063 1064/** 1065 * ide_stall_queue - pause an IDE device 1066 * @drive: drive to stall 1067 * @timeout: time to stall for (jiffies) 1068 * 1069 * ide_stall_queue() can be used by a drive to give excess bandwidth back 1070 * to the hwgroup by sleeping for timeout jiffies. 1071 */ 1072 1073void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 1074{ 1075 if (timeout > WAIT_WORSTCASE) 1076 timeout = WAIT_WORSTCASE; 1077 drive->sleep = timeout + jiffies; 1078 drive->sleeping = 1; 1079} 1080 1081EXPORT_SYMBOL(ide_stall_queue); 1082 1083#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 1084 1085/** 1086 * choose_drive - select a drive to service 1087 * @hwgroup: hardware group to select on 1088 * 1089 * choose_drive() selects the next drive which will be serviced. 1090 * This is necessary because the IDE layer can't issue commands 1091 * to both drives on the same cable, unlike SCSI. 1092 */ 1093 1094static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 1095{ 1096 ide_drive_t *drive, *best; 1097 1098repeat: 1099 best = NULL; 1100 drive = hwgroup->drive; 1101 1102 /* 1103 * drive is doing pre-flush, ordered write, post-flush sequence. even 1104 * though that is 3 requests, it must be seen as a single transaction. 1105 * we must not preempt this drive until that is complete 1106 */ 1107 if (blk_queue_flushing(drive->queue)) { 1108 /* 1109 * small race where queue could get replugged during 1110 * the 3-request flush cycle, just yank the plug since 1111 * we want it to finish asap 1112 */ 1113 blk_remove_plug(drive->queue); 1114 return drive; 1115 } 1116 1117 do { 1118 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep)) 1119 && !elv_queue_empty(drive->queue)) { 1120 if (!best 1121 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep))) 1122 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best)))) 1123 { 1124 if (!blk_queue_plugged(drive->queue)) 1125 best = drive; 1126 } 1127 } 1128 } while ((drive = drive->next) != hwgroup->drive); 1129 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 1130 long t = (signed long)(WAKEUP(best) - jiffies); 1131 if (t >= WAIT_MIN_SLEEP) { 1132 /* 1133 * We *may* have some time to spare, but first let's see if 1134 * someone can potentially benefit from our nice mood today.. 1135 */ 1136 drive = best->next; 1137 do { 1138 if (!drive->sleeping 1139 && time_before(jiffies - best->service_time, WAKEUP(drive)) 1140 && time_before(WAKEUP(drive), jiffies + t)) 1141 { 1142 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 1143 goto repeat; 1144 } 1145 } while ((drive = drive->next) != best); 1146 } 1147 } 1148 return best; 1149} 1150 1151/* 1152 * Issue a new request to a drive from hwgroup 1153 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 1154 * 1155 * A hwgroup is a serialized group of IDE interfaces. Usually there is 1156 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 1157 * may have both interfaces in a single hwgroup to "serialize" access. 1158 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 1159 * together into one hwgroup for serialized access. 1160 * 1161 * Note also that several hwgroups can end up sharing a single IRQ, 1162 * possibly along with many other devices. This is especially common in 1163 * PCI-based systems with off-board IDE controller cards. 1164 * 1165 * The IDE driver uses the single global ide_lock spinlock to protect 1166 * access to the request queues, and to protect the hwgroup->busy flag. 1167 * 1168 * The first thread into the driver for a particular hwgroup sets the 1169 * hwgroup->busy flag to indicate that this hwgroup is now active, 1170 * and then initiates processing of the top request from the request queue. 1171 * 1172 * Other threads attempting entry notice the busy setting, and will simply 1173 * queue their new requests and exit immediately. Note that hwgroup->busy 1174 * remains set even when the driver is merely awaiting the next interrupt. 1175 * Thus, the meaning is "this hwgroup is busy processing a request". 1176 * 1177 * When processing of a request completes, the completing thread or IRQ-handler 1178 * will start the next request from the queue. If no more work remains, 1179 * the driver will clear the hwgroup->busy flag and exit. 1180 * 1181 * The ide_lock (spinlock) is used to protect all access to the 1182 * hwgroup->busy flag, but is otherwise not needed for most processing in 1183 * the driver. This makes the driver much more friendlier to shared IRQs 1184 * than previous designs, while remaining 100% (?) SMP safe and capable. 1185 */ 1186static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 1187{ 1188 ide_drive_t *drive; 1189 ide_hwif_t *hwif; 1190 struct request *rq; 1191 ide_startstop_t startstop; 1192 int loops = 0; 1193 1194 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1195 ide_get_lock(ide_intr, hwgroup); 1196 1197 /* caller must own ide_lock */ 1198 BUG_ON(!irqs_disabled()); 1199 1200 while (!hwgroup->busy) { 1201 hwgroup->busy = 1; 1202 drive = choose_drive(hwgroup); 1203 if (drive == NULL) { 1204 int sleeping = 0; 1205 unsigned long sleep = 0; /* shut up, gcc */ 1206 hwgroup->rq = NULL; 1207 drive = hwgroup->drive; 1208 do { 1209 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) { 1210 sleeping = 1; 1211 sleep = drive->sleep; 1212 } 1213 } while ((drive = drive->next) != hwgroup->drive); 1214 if (sleeping) { 1215 /* 1216 * Take a short snooze, and then wake up this hwgroup again. 1217 * This gives other hwgroups on the same a chance to 1218 * play fairly with us, just in case there are big differences 1219 * in relative throughputs.. don't want to hog the cpu too much. 1220 */ 1221 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1222 sleep = jiffies + WAIT_MIN_SLEEP; 1223#if 1 1224 if (timer_pending(&hwgroup->timer)) 1225 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1226#endif 1227 /* so that ide_timer_expiry knows what to do */ 1228 hwgroup->sleeping = 1; 1229 hwgroup->req_gen_timer = hwgroup->req_gen; 1230 mod_timer(&hwgroup->timer, sleep); 1231 /* we purposely leave hwgroup->busy==1 1232 * while sleeping */ 1233 } else { 1234 /* Ugly, but how can we sleep for the lock 1235 * otherwise? perhaps from tq_disk? 1236 */ 1237 1238 /* for atari only */ 1239 ide_release_lock(); 1240 hwgroup->busy = 0; 1241 } 1242 1243 /* no more work for this hwgroup (for now) */ 1244 return; 1245 } 1246 again: 1247 hwif = HWIF(drive); 1248 if (hwgroup->hwif->sharing_irq && 1249 hwif != hwgroup->hwif && 1250 hwif->io_ports[IDE_CONTROL_OFFSET]) { 1251 /* set nIEN for previous hwif */ 1252 SELECT_INTERRUPT(drive); 1253 } 1254 hwgroup->hwif = hwif; 1255 hwgroup->drive = drive; 1256 drive->sleeping = 0; 1257 drive->service_start = jiffies; 1258 1259 if (blk_queue_plugged(drive->queue)) { 1260 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1261 break; 1262 } 1263 1264 /* 1265 * we know that the queue isn't empty, but this can happen 1266 * if the q->prep_rq_fn() decides to kill a request 1267 */ 1268 rq = elv_next_request(drive->queue); 1269 if (!rq) { 1270 hwgroup->busy = 0; 1271 break; 1272 } 1273 1274 /* 1275 * Sanity: don't accept a request that isn't a PM request 1276 * if we are currently power managed. This is very important as 1277 * blk_stop_queue() doesn't prevent the elv_next_request() 1278 * above to return us whatever is in the queue. Since we call 1279 * ide_do_request() ourselves, we end up taking requests while 1280 * the queue is blocked... 1281 * 1282 * We let requests forced at head of queue with ide-preempt 1283 * though. I hope that doesn't happen too much, hopefully not 1284 * unless the subdriver triggers such a thing in its own PM 1285 * state machine. 1286 * 1287 * We count how many times we loop here to make sure we service 1288 * all drives in the hwgroup without looping for ever 1289 */ 1290 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { 1291 drive = drive->next ? drive->next : hwgroup->drive; 1292 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1293 goto again; 1294 /* We clear busy, there should be no pending ATA command at this point. */ 1295 hwgroup->busy = 0; 1296 break; 1297 } 1298 1299 hwgroup->rq = rq; 1300 1301 /* 1302 * Some systems have trouble with IDE IRQs arriving while 1303 * the driver is still setting things up. So, here we disable 1304 * the IRQ used by this interface while the request is being started. 1305 * This may look bad at first, but pretty much the same thing 1306 * happens anyway when any interrupt comes in, IDE or otherwise 1307 * -- the kernel masks the IRQ while it is being handled. 1308 */ 1309 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1310 disable_irq_nosync(hwif->irq); 1311 spin_unlock(&ide_lock); 1312 local_irq_enable_in_hardirq(); 1313 /* allow other IRQs while we start this request */ 1314 startstop = start_request(drive, rq); 1315 spin_lock_irq(&ide_lock); 1316 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1317 enable_irq(hwif->irq); 1318 if (startstop == ide_stopped) 1319 hwgroup->busy = 0; 1320 } 1321} 1322 1323/* 1324 * Passes the stuff to ide_do_request 1325 */ 1326void do_ide_request(request_queue_t *q) 1327{ 1328 ide_drive_t *drive = q->queuedata; 1329 1330 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1331} 1332 1333/* 1334 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1335 * retry the current request in pio mode instead of risking tossing it 1336 * all away 1337 */ 1338static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1339{ 1340 ide_hwif_t *hwif = HWIF(drive); 1341 struct request *rq; 1342 ide_startstop_t ret = ide_stopped; 1343 1344 /* 1345 * end current dma transaction 1346 */ 1347 1348 if (error < 0) { 1349 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1350 (void)HWIF(drive)->ide_dma_end(drive); 1351 ret = ide_error(drive, "dma timeout error", 1352 hwif->INB(IDE_STATUS_REG)); 1353 } else { 1354 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1355 (void) hwif->ide_dma_timeout(drive); 1356 } 1357 1358 /* 1359 * disable dma for now, but remember that we did so because of 1360 * a timeout -- we'll reenable after we finish this next request 1361 * (or rather the first chunk of it) in pio. 1362 */ 1363 drive->retry_pio++; 1364 drive->state = DMA_PIO_RETRY; 1365 hwif->dma_off_quietly(drive); 1366 1367 /* 1368 * un-busy drive etc (hwgroup->busy is cleared on return) and 1369 * make sure request is sane 1370 */ 1371 rq = HWGROUP(drive)->rq; 1372 1373 if (!rq) 1374 goto out; 1375 1376 HWGROUP(drive)->rq = NULL; 1377 1378 rq->errors = 0; 1379 1380 if (!rq->bio) 1381 goto out; 1382 1383 rq->sector = rq->bio->bi_sector; 1384 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1385 rq->hard_cur_sectors = rq->current_nr_sectors; 1386 rq->buffer = bio_data(rq->bio); 1387out: 1388 return ret; 1389} 1390 1391/** 1392 * ide_timer_expiry - handle lack of an IDE interrupt 1393 * @data: timer callback magic (hwgroup) 1394 * 1395 * An IDE command has timed out before the expected drive return 1396 * occurred. At this point we attempt to clean up the current 1397 * mess. If the current handler includes an expiry handler then 1398 * we invoke the expiry handler, and providing it is happy the 1399 * work is done. If that fails we apply generic recovery rules 1400 * invoking the handler and checking the drive DMA status. We 1401 * have an excessively incestuous relationship with the DMA 1402 * logic that wants cleaning up. 1403 */ 1404 1405void ide_timer_expiry (unsigned long data) 1406{ 1407 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1408 ide_handler_t *handler; 1409 ide_expiry_t *expiry; 1410 unsigned long flags; 1411 unsigned long wait = -1; 1412 1413 spin_lock_irqsave(&ide_lock, flags); 1414 1415 if (((handler = hwgroup->handler) == NULL) || 1416 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1417 /* 1418 * Either a marginal timeout occurred 1419 * (got the interrupt just as timer expired), 1420 * or we were "sleeping" to give other devices a chance. 1421 * Either way, we don't really want to complain about anything. 1422 */ 1423 if (hwgroup->sleeping) { 1424 hwgroup->sleeping = 0; 1425 hwgroup->busy = 0; 1426 } 1427 } else { 1428 ide_drive_t *drive = hwgroup->drive; 1429 if (!drive) { 1430 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1431 hwgroup->handler = NULL; 1432 } else { 1433 ide_hwif_t *hwif; 1434 ide_startstop_t startstop = ide_stopped; 1435 if (!hwgroup->busy) { 1436 hwgroup->busy = 1; /* paranoia */ 1437 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1438 } 1439 if ((expiry = hwgroup->expiry) != NULL) { 1440 /* continue */ 1441 if ((wait = expiry(drive)) > 0) { 1442 /* reset timer */ 1443 hwgroup->timer.expires = jiffies + wait; 1444 hwgroup->req_gen_timer = hwgroup->req_gen; 1445 add_timer(&hwgroup->timer); 1446 spin_unlock_irqrestore(&ide_lock, flags); 1447 return; 1448 } 1449 } 1450 hwgroup->handler = NULL; 1451 /* 1452 * We need to simulate a real interrupt when invoking 1453 * the handler() function, which means we need to 1454 * globally mask the specific IRQ: 1455 */ 1456 spin_unlock(&ide_lock); 1457 hwif = HWIF(drive); 1458#if DISABLE_IRQ_NOSYNC 1459 disable_irq_nosync(hwif->irq); 1460#else 1461 /* disable_irq_nosync ?? */ 1462 disable_irq(hwif->irq); 1463#endif /* DISABLE_IRQ_NOSYNC */ 1464 /* local CPU only, 1465 * as if we were handling an interrupt */ 1466 local_irq_disable(); 1467 if (hwgroup->polling) { 1468 startstop = handler(drive); 1469 } else if (drive_is_ready(drive)) { 1470 if (drive->waiting_for_dma) 1471 (void) hwgroup->hwif->ide_dma_lostirq(drive); 1472 (void)ide_ack_intr(hwif); 1473 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1474 startstop = handler(drive); 1475 } else { 1476 if (drive->waiting_for_dma) { 1477 startstop = ide_dma_timeout_retry(drive, wait); 1478 } else 1479 startstop = 1480 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); 1481 } 1482 drive->service_time = jiffies - drive->service_start; 1483 spin_lock_irq(&ide_lock); 1484 enable_irq(hwif->irq); 1485 if (startstop == ide_stopped) 1486 hwgroup->busy = 0; 1487 } 1488 } 1489 ide_do_request(hwgroup, IDE_NO_IRQ); 1490 spin_unlock_irqrestore(&ide_lock, flags); 1491} 1492 1493/** 1494 * unexpected_intr - handle an unexpected IDE interrupt 1495 * @irq: interrupt line 1496 * @hwgroup: hwgroup being processed 1497 * 1498 * There's nothing really useful we can do with an unexpected interrupt, 1499 * other than reading the status register (to clear it), and logging it. 1500 * There should be no way that an irq can happen before we're ready for it, 1501 * so we needn't worry much about losing an "important" interrupt here. 1502 * 1503 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1504 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1505 * looks "good", we just ignore the interrupt completely. 1506 * 1507 * This routine assumes __cli() is in effect when called. 1508 * 1509 * If an unexpected interrupt happens on irq15 while we are handling irq14 1510 * and if the two interfaces are "serialized" (CMD640), then it looks like 1511 * we could screw up by interfering with a new request being set up for 1512 * irq15. 1513 * 1514 * In reality, this is a non-issue. The new command is not sent unless 1515 * the drive is ready to accept one, in which case we know the drive is 1516 * not trying to interrupt us. And ide_set_handler() is always invoked 1517 * before completing the issuance of any new drive command, so we will not 1518 * be accidentally invoked as a result of any valid command completion 1519 * interrupt. 1520 * 1521 * Note that we must walk the entire hwgroup here. We know which hwif 1522 * is doing the current command, but we don't know which hwif burped 1523 * mysteriously. 1524 */ 1525 1526static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1527{ 1528 u8 stat; 1529 ide_hwif_t *hwif = hwgroup->hwif; 1530 1531 /* 1532 * handle the unexpected interrupt 1533 */ 1534 do { 1535 if (hwif->irq == irq) { 1536 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1537 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1538 /* Try to not flood the console with msgs */ 1539 static unsigned long last_msgtime, count; 1540 ++count; 1541 if (time_after(jiffies, last_msgtime + HZ)) { 1542 last_msgtime = jiffies; 1543 printk(KERN_ERR "%s%s: unexpected interrupt, " 1544 "status=0x%02x, count=%ld\n", 1545 hwif->name, 1546 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1547 } 1548 } 1549 } 1550 } while ((hwif = hwif->next) != hwgroup->hwif); 1551} 1552 1553/** 1554 * ide_intr - default IDE interrupt handler 1555 * @irq: interrupt number 1556 * @dev_id: hwif group 1557 * @regs: unused weirdness from the kernel irq layer 1558 * 1559 * This is the default IRQ handler for the IDE layer. You should 1560 * not need to override it. If you do be aware it is subtle in 1561 * places 1562 * 1563 * hwgroup->hwif is the interface in the group currently performing 1564 * a command. hwgroup->drive is the drive and hwgroup->handler is 1565 * the IRQ handler to call. As we issue a command the handlers 1566 * step through multiple states, reassigning the handler to the 1567 * next step in the process. Unlike a smart SCSI controller IDE 1568 * expects the main processor to sequence the various transfer 1569 * stages. We also manage a poll timer to catch up with most 1570 * timeout situations. There are still a few where the handlers 1571 * don't ever decide to give up. 1572 * 1573 * The handler eventually returns ide_stopped to indicate the 1574 * request completed. At this point we issue the next request 1575 * on the hwgroup and the process begins again. 1576 */ 1577 1578irqreturn_t ide_intr (int irq, void *dev_id) 1579{ 1580 unsigned long flags; 1581 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1582 ide_hwif_t *hwif; 1583 ide_drive_t *drive; 1584 ide_handler_t *handler; 1585 ide_startstop_t startstop; 1586 1587 spin_lock_irqsave(&ide_lock, flags); 1588 hwif = hwgroup->hwif; 1589 1590 if (!ide_ack_intr(hwif)) { 1591 spin_unlock_irqrestore(&ide_lock, flags); 1592 return IRQ_NONE; 1593 } 1594 1595 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1596 /* 1597 * Not expecting an interrupt from this drive. 1598 * That means this could be: 1599 * (1) an interrupt from another PCI device 1600 * sharing the same PCI INT# as us. 1601 * or (2) a drive just entered sleep or standby mode, 1602 * and is interrupting to let us know. 1603 * or (3) a spurious interrupt of unknown origin. 1604 * 1605 * For PCI, we cannot tell the difference, 1606 * so in that case we just ignore it and hope it goes away. 1607 * 1608 * FIXME: unexpected_intr should be hwif-> then we can 1609 * remove all the ifdef PCI crap 1610 */ 1611#ifdef CONFIG_BLK_DEV_IDEPCI 1612 if (hwif->pci_dev && !hwif->pci_dev->vendor) 1613#endif /* CONFIG_BLK_DEV_IDEPCI */ 1614 { 1615 /* 1616 * Probably not a shared PCI interrupt, 1617 * so we can safely try to do something about it: 1618 */ 1619 unexpected_intr(irq, hwgroup); 1620#ifdef CONFIG_BLK_DEV_IDEPCI 1621 } else { 1622 /* 1623 * Whack the status register, just in case 1624 * we have a leftover pending IRQ. 1625 */ 1626 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1627#endif /* CONFIG_BLK_DEV_IDEPCI */ 1628 } 1629 spin_unlock_irqrestore(&ide_lock, flags); 1630 return IRQ_NONE; 1631 } 1632 drive = hwgroup->drive; 1633 if (!drive) { 1634 /* 1635 * This should NEVER happen, and there isn't much 1636 * we could do about it here. 1637 * 1638 * [Note - this can occur if the drive is hot unplugged] 1639 */ 1640 spin_unlock_irqrestore(&ide_lock, flags); 1641 return IRQ_HANDLED; 1642 } 1643 if (!drive_is_ready(drive)) { 1644 /* 1645 * This happens regularly when we share a PCI IRQ with 1646 * another device. Unfortunately, it can also happen 1647 * with some buggy drives that trigger the IRQ before 1648 * their status register is up to date. Hopefully we have 1649 * enough advance overhead that the latter isn't a problem. 1650 */ 1651 spin_unlock_irqrestore(&ide_lock, flags); 1652 return IRQ_NONE; 1653 } 1654 if (!hwgroup->busy) { 1655 hwgroup->busy = 1; /* paranoia */ 1656 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1657 } 1658 hwgroup->handler = NULL; 1659 hwgroup->req_gen++; 1660 del_timer(&hwgroup->timer); 1661 spin_unlock(&ide_lock); 1662 1663 /* Some controllers might set DMA INTR no matter DMA or PIO; 1664 * bmdma status might need to be cleared even for 1665 * PIO interrupts to prevent spurious/lost irq. 1666 */ 1667 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) 1668 /* ide_dma_end() needs bmdma status for error checking. 1669 * So, skip clearing bmdma status here and leave it 1670 * to ide_dma_end() if this is dma interrupt. 1671 */ 1672 hwif->ide_dma_clear_irq(drive); 1673 1674 if (drive->unmask) 1675 local_irq_enable_in_hardirq(); 1676 /* service this interrupt, may set handler for next interrupt */ 1677 startstop = handler(drive); 1678 spin_lock_irq(&ide_lock); 1679 1680 /* 1681 * Note that handler() may have set things up for another 1682 * interrupt to occur soon, but it cannot happen until 1683 * we exit from this routine, because it will be the 1684 * same irq as is currently being serviced here, and Linux 1685 * won't allow another of the same (on any CPU) until we return. 1686 */ 1687 drive->service_time = jiffies - drive->service_start; 1688 if (startstop == ide_stopped) { 1689 if (hwgroup->handler == NULL) { /* paranoia */ 1690 hwgroup->busy = 0; 1691 ide_do_request(hwgroup, hwif->irq); 1692 } else { 1693 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1694 "on exit\n", drive->name); 1695 } 1696 } 1697 spin_unlock_irqrestore(&ide_lock, flags); 1698 return IRQ_HANDLED; 1699} 1700 1701/** 1702 * ide_init_drive_cmd - initialize a drive command request 1703 * @rq: request object 1704 * 1705 * Initialize a request before we fill it in and send it down to 1706 * ide_do_drive_cmd. Commands must be set up by this function. Right 1707 * now it doesn't do a lot, but if that changes abusers will have a 1708 * nasty surprise. 1709 */ 1710 1711void ide_init_drive_cmd (struct request *rq) 1712{ 1713 memset(rq, 0, sizeof(*rq)); 1714 rq->cmd_type = REQ_TYPE_ATA_CMD; 1715 rq->ref_count = 1; 1716} 1717 1718EXPORT_SYMBOL(ide_init_drive_cmd); 1719 1720/** 1721 * ide_do_drive_cmd - issue IDE special command 1722 * @drive: device to issue command 1723 * @rq: request to issue 1724 * @action: action for processing 1725 * 1726 * This function issues a special IDE device request 1727 * onto the request queue. 1728 * 1729 * If action is ide_wait, then the rq is queued at the end of the 1730 * request queue, and the function sleeps until it has been processed. 1731 * This is for use when invoked from an ioctl handler. 1732 * 1733 * If action is ide_preempt, then the rq is queued at the head of 1734 * the request queue, displacing the currently-being-processed 1735 * request and this function returns immediately without waiting 1736 * for the new rq to be completed. This is VERY DANGEROUS, and is 1737 * intended for careful use by the ATAPI tape/cdrom driver code. 1738 * 1739 * If action is ide_end, then the rq is queued at the end of the 1740 * request queue, and the function returns immediately without waiting 1741 * for the new rq to be completed. This is again intended for careful 1742 * use by the ATAPI tape/cdrom driver code. 1743 */ 1744 1745int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1746{ 1747 unsigned long flags; 1748 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1749 DECLARE_COMPLETION_ONSTACK(wait); 1750 int where = ELEVATOR_INSERT_BACK, err; 1751 int must_wait = (action == ide_wait || action == ide_head_wait); 1752 1753 rq->errors = 0; 1754 1755 /* 1756 * we need to hold an extra reference to request for safe inspection 1757 * after completion 1758 */ 1759 if (must_wait) { 1760 rq->ref_count++; 1761 rq->end_io_data = &wait; 1762 rq->end_io = blk_end_sync_rq; 1763 } 1764 1765 spin_lock_irqsave(&ide_lock, flags); 1766 if (action == ide_preempt) 1767 hwgroup->rq = NULL; 1768 if (action == ide_preempt || action == ide_head_wait) { 1769 where = ELEVATOR_INSERT_FRONT; 1770 rq->cmd_flags |= REQ_PREEMPT; 1771 } 1772 __elv_add_request(drive->queue, rq, where, 0); 1773 ide_do_request(hwgroup, IDE_NO_IRQ); 1774 spin_unlock_irqrestore(&ide_lock, flags); 1775 1776 err = 0; 1777 if (must_wait) { 1778 wait_for_completion(&wait); 1779 if (rq->errors) 1780 err = -EIO; 1781 1782 blk_put_request(rq); 1783 } 1784 1785 return err; 1786} 1787 1788EXPORT_SYMBOL(ide_do_drive_cmd); 1789