ide-io.c revision dbe217af3be08346f4b1abb885c2d9ec29c98fac
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/config.h> 28#include <linux/module.h> 29#include <linux/types.h> 30#include <linux/string.h> 31#include <linux/kernel.h> 32#include <linux/timer.h> 33#include <linux/mm.h> 34#include <linux/interrupt.h> 35#include <linux/major.h> 36#include <linux/errno.h> 37#include <linux/genhd.h> 38#include <linux/blkpg.h> 39#include <linux/slab.h> 40#include <linux/init.h> 41#include <linux/pci.h> 42#include <linux/delay.h> 43#include <linux/ide.h> 44#include <linux/completion.h> 45#include <linux/reboot.h> 46#include <linux/cdrom.h> 47#include <linux/seq_file.h> 48#include <linux/device.h> 49#include <linux/kmod.h> 50#include <linux/scatterlist.h> 51 52#include <asm/byteorder.h> 53#include <asm/irq.h> 54#include <asm/uaccess.h> 55#include <asm/io.h> 56#include <asm/bitops.h> 57 58static int __ide_end_request(ide_drive_t *drive, struct request *rq, 59 int uptodate, int nr_sectors) 60{ 61 int ret = 1; 62 63 BUG_ON(!(rq->flags & REQ_STARTED)); 64 65 /* 66 * if failfast is set on a request, override number of sectors and 67 * complete the whole request right now 68 */ 69 if (blk_noretry_request(rq) && end_io_error(uptodate)) 70 nr_sectors = rq->hard_nr_sectors; 71 72 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 73 rq->errors = -EIO; 74 75 /* 76 * decide whether to reenable DMA -- 3 is a random magic for now, 77 * if we DMA timeout more than 3 times, just stay in PIO 78 */ 79 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 80 drive->state = 0; 81 HWGROUP(drive)->hwif->ide_dma_on(drive); 82 } 83 84 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 85 add_disk_randomness(rq->rq_disk); 86 blkdev_dequeue_request(rq); 87 HWGROUP(drive)->rq = NULL; 88 end_that_request_last(rq, uptodate); 89 ret = 0; 90 } 91 92 return ret; 93} 94 95/** 96 * ide_end_request - complete an IDE I/O 97 * @drive: IDE device for the I/O 98 * @uptodate: 99 * @nr_sectors: number of sectors completed 100 * 101 * This is our end_request wrapper function. We complete the I/O 102 * update random number input and dequeue the request, which if 103 * it was tagged may be out of order. 104 */ 105 106int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 107{ 108 struct request *rq; 109 unsigned long flags; 110 int ret = 1; 111 112 /* 113 * room for locking improvements here, the calls below don't 114 * need the queue lock held at all 115 */ 116 spin_lock_irqsave(&ide_lock, flags); 117 rq = HWGROUP(drive)->rq; 118 119 if (!nr_sectors) 120 nr_sectors = rq->hard_cur_sectors; 121 122 ret = __ide_end_request(drive, rq, uptodate, nr_sectors); 123 124 spin_unlock_irqrestore(&ide_lock, flags); 125 return ret; 126} 127EXPORT_SYMBOL(ide_end_request); 128 129/* 130 * Power Management state machine. This one is rather trivial for now, 131 * we should probably add more, like switching back to PIO on suspend 132 * to help some BIOSes, re-do the door locking on resume, etc... 133 */ 134 135enum { 136 ide_pm_flush_cache = ide_pm_state_start_suspend, 137 idedisk_pm_standby, 138 139 idedisk_pm_idle = ide_pm_state_start_resume, 140 ide_pm_restore_dma, 141}; 142 143static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 144{ 145 struct request_pm_state *pm = rq->end_io_data; 146 147 if (drive->media != ide_disk) 148 return; 149 150 switch (pm->pm_step) { 151 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 152 if (pm->pm_state == PM_EVENT_FREEZE) 153 pm->pm_step = ide_pm_state_completed; 154 else 155 pm->pm_step = idedisk_pm_standby; 156 break; 157 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */ 158 pm->pm_step = ide_pm_state_completed; 159 break; 160 case idedisk_pm_idle: /* Resume step 1 (idle) complete */ 161 pm->pm_step = ide_pm_restore_dma; 162 break; 163 } 164} 165 166static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 167{ 168 struct request_pm_state *pm = rq->end_io_data; 169 ide_task_t *args = rq->special; 170 171 memset(args, 0, sizeof(*args)); 172 173 if (drive->media != ide_disk) { 174 /* skip idedisk_pm_idle for ATAPI devices */ 175 if (pm->pm_step == idedisk_pm_idle) 176 pm->pm_step = ide_pm_restore_dma; 177 } 178 179 switch (pm->pm_step) { 180 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 181 if (drive->media != ide_disk) 182 break; 183 /* Not supported? Switch to next step now. */ 184 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) { 185 ide_complete_power_step(drive, rq, 0, 0); 186 return ide_stopped; 187 } 188 if (ide_id_has_flush_cache_ext(drive->id)) 189 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; 190 else 191 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; 192 args->command_type = IDE_DRIVE_TASK_NO_DATA; 193 args->handler = &task_no_data_intr; 194 return do_rw_taskfile(drive, args); 195 196 case idedisk_pm_standby: /* Suspend step 2 (standby) */ 197 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1; 198 args->command_type = IDE_DRIVE_TASK_NO_DATA; 199 args->handler = &task_no_data_intr; 200 return do_rw_taskfile(drive, args); 201 202 case idedisk_pm_idle: /* Resume step 1 (idle) */ 203 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE; 204 args->command_type = IDE_DRIVE_TASK_NO_DATA; 205 args->handler = task_no_data_intr; 206 return do_rw_taskfile(drive, args); 207 208 case ide_pm_restore_dma: /* Resume step 2 (restore DMA) */ 209 /* 210 * Right now, all we do is call hwif->ide_dma_check(drive), 211 * we could be smarter and check for current xfer_speed 212 * in struct drive etc... 213 */ 214 if ((drive->id->capability & 1) == 0) 215 break; 216 if (drive->hwif->ide_dma_check == NULL) 217 break; 218 drive->hwif->ide_dma_check(drive); 219 break; 220 } 221 pm->pm_step = ide_pm_state_completed; 222 return ide_stopped; 223} 224 225/** 226 * ide_end_dequeued_request - complete an IDE I/O 227 * @drive: IDE device for the I/O 228 * @uptodate: 229 * @nr_sectors: number of sectors completed 230 * 231 * Complete an I/O that is no longer on the request queue. This 232 * typically occurs when we pull the request and issue a REQUEST_SENSE. 233 * We must still finish the old request but we must not tamper with the 234 * queue in the meantime. 235 * 236 * NOTE: This path does not handle barrier, but barrier is not supported 237 * on ide-cd anyway. 238 */ 239 240int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 241 int uptodate, int nr_sectors) 242{ 243 unsigned long flags; 244 int ret = 1; 245 246 spin_lock_irqsave(&ide_lock, flags); 247 248 BUG_ON(!(rq->flags & REQ_STARTED)); 249 250 /* 251 * if failfast is set on a request, override number of sectors and 252 * complete the whole request right now 253 */ 254 if (blk_noretry_request(rq) && end_io_error(uptodate)) 255 nr_sectors = rq->hard_nr_sectors; 256 257 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) 258 rq->errors = -EIO; 259 260 /* 261 * decide whether to reenable DMA -- 3 is a random magic for now, 262 * if we DMA timeout more than 3 times, just stay in PIO 263 */ 264 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 265 drive->state = 0; 266 HWGROUP(drive)->hwif->ide_dma_on(drive); 267 } 268 269 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 270 add_disk_randomness(rq->rq_disk); 271 if (blk_rq_tagged(rq)) 272 blk_queue_end_tag(drive->queue, rq); 273 end_that_request_last(rq, uptodate); 274 ret = 0; 275 } 276 spin_unlock_irqrestore(&ide_lock, flags); 277 return ret; 278} 279EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 280 281 282/** 283 * ide_complete_pm_request - end the current Power Management request 284 * @drive: target drive 285 * @rq: request 286 * 287 * This function cleans up the current PM request and stops the queue 288 * if necessary. 289 */ 290static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 291{ 292 unsigned long flags; 293 294#ifdef DEBUG_PM 295 printk("%s: completing PM request, %s\n", drive->name, 296 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 297#endif 298 spin_lock_irqsave(&ide_lock, flags); 299 if (blk_pm_suspend_request(rq)) { 300 blk_stop_queue(drive->queue); 301 } else { 302 drive->blocked = 0; 303 blk_start_queue(drive->queue); 304 } 305 blkdev_dequeue_request(rq); 306 HWGROUP(drive)->rq = NULL; 307 end_that_request_last(rq, 1); 308 spin_unlock_irqrestore(&ide_lock, flags); 309} 310 311/* 312 * FIXME: probably move this somewhere else, name is bad too :) 313 */ 314u64 ide_get_error_location(ide_drive_t *drive, char *args) 315{ 316 u32 high, low; 317 u8 hcyl, lcyl, sect; 318 u64 sector; 319 320 high = 0; 321 hcyl = args[5]; 322 lcyl = args[4]; 323 sect = args[3]; 324 325 if (ide_id_has_flush_cache_ext(drive->id)) { 326 low = (hcyl << 16) | (lcyl << 8) | sect; 327 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 328 high = ide_read_24(drive); 329 } else { 330 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG); 331 if (cur & 0x40) { 332 high = cur & 0xf; 333 low = (hcyl << 16) | (lcyl << 8) | sect; 334 } else { 335 low = hcyl * drive->head * drive->sect; 336 low += lcyl * drive->sect; 337 low += sect - 1; 338 } 339 } 340 341 sector = ((u64) high << 24) | low; 342 return sector; 343} 344EXPORT_SYMBOL(ide_get_error_location); 345 346/** 347 * ide_end_drive_cmd - end an explicit drive command 348 * @drive: command 349 * @stat: status bits 350 * @err: error bits 351 * 352 * Clean up after success/failure of an explicit drive command. 353 * These get thrown onto the queue so they are synchronized with 354 * real I/O operations on the drive. 355 * 356 * In LBA48 mode we have to read the register set twice to get 357 * all the extra information out. 358 */ 359 360void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 361{ 362 ide_hwif_t *hwif = HWIF(drive); 363 unsigned long flags; 364 struct request *rq; 365 366 spin_lock_irqsave(&ide_lock, flags); 367 rq = HWGROUP(drive)->rq; 368 spin_unlock_irqrestore(&ide_lock, flags); 369 370 if (rq->flags & REQ_DRIVE_CMD) { 371 u8 *args = (u8 *) rq->buffer; 372 if (rq->errors == 0) 373 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 374 375 if (args) { 376 args[0] = stat; 377 args[1] = err; 378 args[2] = hwif->INB(IDE_NSECTOR_REG); 379 } 380 } else if (rq->flags & REQ_DRIVE_TASK) { 381 u8 *args = (u8 *) rq->buffer; 382 if (rq->errors == 0) 383 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 384 385 if (args) { 386 args[0] = stat; 387 args[1] = err; 388 args[2] = hwif->INB(IDE_NSECTOR_REG); 389 args[3] = hwif->INB(IDE_SECTOR_REG); 390 args[4] = hwif->INB(IDE_LCYL_REG); 391 args[5] = hwif->INB(IDE_HCYL_REG); 392 args[6] = hwif->INB(IDE_SELECT_REG); 393 } 394 } else if (rq->flags & REQ_DRIVE_TASKFILE) { 395 ide_task_t *args = (ide_task_t *) rq->special; 396 if (rq->errors == 0) 397 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 398 399 if (args) { 400 if (args->tf_in_flags.b.data) { 401 u16 data = hwif->INW(IDE_DATA_REG); 402 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF; 403 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF; 404 } 405 args->tfRegister[IDE_ERROR_OFFSET] = err; 406 /* be sure we're looking at the low order bits */ 407 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); 408 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 409 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 410 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 411 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 412 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG); 413 args->tfRegister[IDE_STATUS_OFFSET] = stat; 414 415 if (drive->addressing == 1) { 416 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); 417 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG); 418 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); 419 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); 420 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); 421 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); 422 } 423 } 424 } else if (blk_pm_request(rq)) { 425 struct request_pm_state *pm = rq->end_io_data; 426#ifdef DEBUG_PM 427 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 428 drive->name, rq->pm->pm_step, stat, err); 429#endif 430 ide_complete_power_step(drive, rq, stat, err); 431 if (pm->pm_step == ide_pm_state_completed) 432 ide_complete_pm_request(drive, rq); 433 return; 434 } 435 436 spin_lock_irqsave(&ide_lock, flags); 437 blkdev_dequeue_request(rq); 438 HWGROUP(drive)->rq = NULL; 439 rq->errors = err; 440 end_that_request_last(rq, !rq->errors); 441 spin_unlock_irqrestore(&ide_lock, flags); 442} 443 444EXPORT_SYMBOL(ide_end_drive_cmd); 445 446/** 447 * try_to_flush_leftover_data - flush junk 448 * @drive: drive to flush 449 * 450 * try_to_flush_leftover_data() is invoked in response to a drive 451 * unexpectedly having its DRQ_STAT bit set. As an alternative to 452 * resetting the drive, this routine tries to clear the condition 453 * by read a sector's worth of data from the drive. Of course, 454 * this may not help if the drive is *waiting* for data from *us*. 455 */ 456static void try_to_flush_leftover_data (ide_drive_t *drive) 457{ 458 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; 459 460 if (drive->media != ide_disk) 461 return; 462 while (i > 0) { 463 u32 buffer[16]; 464 u32 wcount = (i > 16) ? 16 : i; 465 466 i -= wcount; 467 HWIF(drive)->ata_input_data(drive, buffer, wcount); 468 } 469} 470 471static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 472{ 473 if (rq->rq_disk) { 474 ide_driver_t *drv; 475 476 drv = *(ide_driver_t **)rq->rq_disk->private_data; 477 drv->end_request(drive, 0, 0); 478 } else 479 ide_end_request(drive, 0, 0); 480} 481 482static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 483{ 484 ide_hwif_t *hwif = drive->hwif; 485 486 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 487 /* other bits are useless when BUSY */ 488 rq->errors |= ERROR_RESET; 489 } else if (stat & ERR_STAT) { 490 /* err has different meaning on cdrom and tape */ 491 if (err == ABRT_ERR) { 492 if (drive->select.b.lba && 493 /* some newer drives don't support WIN_SPECIFY */ 494 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY) 495 return ide_stopped; 496 } else if ((err & BAD_CRC) == BAD_CRC) { 497 /* UDMA crc error, just retry the operation */ 498 drive->crc_count++; 499 } else if (err & (BBD_ERR | ECC_ERR)) { 500 /* retries won't help these */ 501 rq->errors = ERROR_MAX; 502 } else if (err & TRK0_ERR) { 503 /* help it find track zero */ 504 rq->errors |= ERROR_RECAL; 505 } 506 } 507 508 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ) 509 try_to_flush_leftover_data(drive); 510 511 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 512 /* force an abort */ 513 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); 514 515 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) 516 ide_kill_rq(drive, rq); 517 else { 518 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 519 ++rq->errors; 520 return ide_do_reset(drive); 521 } 522 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 523 drive->special.b.recalibrate = 1; 524 ++rq->errors; 525 } 526 return ide_stopped; 527} 528 529static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 530{ 531 ide_hwif_t *hwif = drive->hwif; 532 533 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 534 /* other bits are useless when BUSY */ 535 rq->errors |= ERROR_RESET; 536 } else { 537 /* add decoding error stuff */ 538 } 539 540 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) 541 /* force an abort */ 542 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); 543 544 if (rq->errors >= ERROR_MAX) { 545 ide_kill_rq(drive, rq); 546 } else { 547 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 548 ++rq->errors; 549 return ide_do_reset(drive); 550 } 551 ++rq->errors; 552 } 553 554 return ide_stopped; 555} 556 557ide_startstop_t 558__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 559{ 560 if (drive->media == ide_disk) 561 return ide_ata_error(drive, rq, stat, err); 562 return ide_atapi_error(drive, rq, stat, err); 563} 564 565EXPORT_SYMBOL_GPL(__ide_error); 566 567/** 568 * ide_error - handle an error on the IDE 569 * @drive: drive the error occurred on 570 * @msg: message to report 571 * @stat: status bits 572 * 573 * ide_error() takes action based on the error returned by the drive. 574 * For normal I/O that may well include retries. We deal with 575 * both new-style (taskfile) and old style command handling here. 576 * In the case of taskfile command handling there is work left to 577 * do 578 */ 579 580ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 581{ 582 struct request *rq; 583 u8 err; 584 585 err = ide_dump_status(drive, msg, stat); 586 587 if ((rq = HWGROUP(drive)->rq) == NULL) 588 return ide_stopped; 589 590 /* retry only "normal" I/O: */ 591 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { 592 rq->errors = 1; 593 ide_end_drive_cmd(drive, stat, err); 594 return ide_stopped; 595 } 596 597 if (rq->rq_disk) { 598 ide_driver_t *drv; 599 600 drv = *(ide_driver_t **)rq->rq_disk->private_data; 601 return drv->error(drive, rq, stat, err); 602 } else 603 return __ide_error(drive, rq, stat, err); 604} 605 606EXPORT_SYMBOL_GPL(ide_error); 607 608ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) 609{ 610 if (drive->media != ide_disk) 611 rq->errors |= ERROR_RESET; 612 613 ide_kill_rq(drive, rq); 614 615 return ide_stopped; 616} 617 618EXPORT_SYMBOL_GPL(__ide_abort); 619 620/** 621 * ide_abort - abort pending IDE operations 622 * @drive: drive the error occurred on 623 * @msg: message to report 624 * 625 * ide_abort kills and cleans up when we are about to do a 626 * host initiated reset on active commands. Longer term we 627 * want handlers to have sensible abort handling themselves 628 * 629 * This differs fundamentally from ide_error because in 630 * this case the command is doing just fine when we 631 * blow it away. 632 */ 633 634ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) 635{ 636 struct request *rq; 637 638 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) 639 return ide_stopped; 640 641 /* retry only "normal" I/O: */ 642 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { 643 rq->errors = 1; 644 ide_end_drive_cmd(drive, BUSY_STAT, 0); 645 return ide_stopped; 646 } 647 648 if (rq->rq_disk) { 649 ide_driver_t *drv; 650 651 drv = *(ide_driver_t **)rq->rq_disk->private_data; 652 return drv->abort(drive, rq); 653 } else 654 return __ide_abort(drive, rq); 655} 656 657/** 658 * ide_cmd - issue a simple drive command 659 * @drive: drive the command is for 660 * @cmd: command byte 661 * @nsect: sector byte 662 * @handler: handler for the command completion 663 * 664 * Issue a simple drive command with interrupts. 665 * The drive must be selected beforehand. 666 */ 667 668static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, 669 ide_handler_t *handler) 670{ 671 ide_hwif_t *hwif = HWIF(drive); 672 if (IDE_CONTROL_REG) 673 hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */ 674 SELECT_MASK(drive,0); 675 hwif->OUTB(nsect,IDE_NSECTOR_REG); 676 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL); 677} 678 679/** 680 * drive_cmd_intr - drive command completion interrupt 681 * @drive: drive the completion interrupt occurred on 682 * 683 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD. 684 * We do any necessary data reading and then wait for the drive to 685 * go non busy. At that point we may read the error data and complete 686 * the request 687 */ 688 689static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) 690{ 691 struct request *rq = HWGROUP(drive)->rq; 692 ide_hwif_t *hwif = HWIF(drive); 693 u8 *args = (u8 *) rq->buffer; 694 u8 stat = hwif->INB(IDE_STATUS_REG); 695 int retries = 10; 696 697 local_irq_enable(); 698 if ((stat & DRQ_STAT) && args && args[3]) { 699 u8 io_32bit = drive->io_32bit; 700 drive->io_32bit = 0; 701 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS); 702 drive->io_32bit = io_32bit; 703 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--) 704 udelay(100); 705 } 706 707 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 708 return ide_error(drive, "drive_cmd", stat); 709 /* calls ide_end_drive_cmd */ 710 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG)); 711 return ide_stopped; 712} 713 714static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task) 715{ 716 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 717 task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect; 718 task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl; 719 task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8; 720 task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF; 721 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY; 722 723 task->handler = &set_geometry_intr; 724} 725 726static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task) 727{ 728 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; 729 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE; 730 731 task->handler = &recal_intr; 732} 733 734static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task) 735{ 736 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req; 737 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT; 738 739 task->handler = &set_multmode_intr; 740} 741 742static ide_startstop_t ide_disk_special(ide_drive_t *drive) 743{ 744 special_t *s = &drive->special; 745 ide_task_t args; 746 747 memset(&args, 0, sizeof(ide_task_t)); 748 args.command_type = IDE_DRIVE_TASK_NO_DATA; 749 750 if (s->b.set_geometry) { 751 s->b.set_geometry = 0; 752 ide_init_specify_cmd(drive, &args); 753 } else if (s->b.recalibrate) { 754 s->b.recalibrate = 0; 755 ide_init_restore_cmd(drive, &args); 756 } else if (s->b.set_multmode) { 757 s->b.set_multmode = 0; 758 if (drive->mult_req > drive->id->max_multsect) 759 drive->mult_req = drive->id->max_multsect; 760 ide_init_setmult_cmd(drive, &args); 761 } else if (s->all) { 762 int special = s->all; 763 s->all = 0; 764 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 765 return ide_stopped; 766 } 767 768 do_rw_taskfile(drive, &args); 769 770 return ide_started; 771} 772 773/** 774 * do_special - issue some special commands 775 * @drive: drive the command is for 776 * 777 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT 778 * commands to a drive. It used to do much more, but has been scaled 779 * back. 780 */ 781 782static ide_startstop_t do_special (ide_drive_t *drive) 783{ 784 special_t *s = &drive->special; 785 786#ifdef DEBUG 787 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 788#endif 789 if (s->b.set_tune) { 790 s->b.set_tune = 0; 791 if (HWIF(drive)->tuneproc != NULL) 792 HWIF(drive)->tuneproc(drive, drive->tune_req); 793 return ide_stopped; 794 } else { 795 if (drive->media == ide_disk) 796 return ide_disk_special(drive); 797 798 s->all = 0; 799 drive->mult_req = 0; 800 return ide_stopped; 801 } 802} 803 804void ide_map_sg(ide_drive_t *drive, struct request *rq) 805{ 806 ide_hwif_t *hwif = drive->hwif; 807 struct scatterlist *sg = hwif->sg_table; 808 809 if (hwif->sg_mapped) /* needed by ide-scsi */ 810 return; 811 812 if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) { 813 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 814 } else { 815 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 816 hwif->sg_nents = 1; 817 } 818} 819 820EXPORT_SYMBOL_GPL(ide_map_sg); 821 822void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 823{ 824 ide_hwif_t *hwif = drive->hwif; 825 826 hwif->nsect = hwif->nleft = rq->nr_sectors; 827 hwif->cursg = hwif->cursg_ofs = 0; 828} 829 830EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 831 832/** 833 * execute_drive_command - issue special drive command 834 * @drive: the drive to issue the command on 835 * @rq: the request structure holding the command 836 * 837 * execute_drive_cmd() issues a special drive command, usually 838 * initiated by ioctl() from the external hdparm program. The 839 * command can be a drive command, drive task or taskfile 840 * operation. Weirdly you can call it with NULL to wait for 841 * all commands to finish. Don't do this as that is due to change 842 */ 843 844static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 845 struct request *rq) 846{ 847 ide_hwif_t *hwif = HWIF(drive); 848 if (rq->flags & REQ_DRIVE_TASKFILE) { 849 ide_task_t *args = rq->special; 850 851 if (!args) 852 goto done; 853 854 hwif->data_phase = args->data_phase; 855 856 switch (hwif->data_phase) { 857 case TASKFILE_MULTI_OUT: 858 case TASKFILE_OUT: 859 case TASKFILE_MULTI_IN: 860 case TASKFILE_IN: 861 ide_init_sg_cmd(drive, rq); 862 ide_map_sg(drive, rq); 863 default: 864 break; 865 } 866 867 if (args->tf_out_flags.all != 0) 868 return flagged_taskfile(drive, args); 869 return do_rw_taskfile(drive, args); 870 } else if (rq->flags & REQ_DRIVE_TASK) { 871 u8 *args = rq->buffer; 872 u8 sel; 873 874 if (!args) 875 goto done; 876#ifdef DEBUG 877 printk("%s: DRIVE_TASK_CMD ", drive->name); 878 printk("cmd=0x%02x ", args[0]); 879 printk("fr=0x%02x ", args[1]); 880 printk("ns=0x%02x ", args[2]); 881 printk("sc=0x%02x ", args[3]); 882 printk("lcyl=0x%02x ", args[4]); 883 printk("hcyl=0x%02x ", args[5]); 884 printk("sel=0x%02x\n", args[6]); 885#endif 886 hwif->OUTB(args[1], IDE_FEATURE_REG); 887 hwif->OUTB(args[3], IDE_SECTOR_REG); 888 hwif->OUTB(args[4], IDE_LCYL_REG); 889 hwif->OUTB(args[5], IDE_HCYL_REG); 890 sel = (args[6] & ~0x10); 891 if (drive->select.b.unit) 892 sel |= 0x10; 893 hwif->OUTB(sel, IDE_SELECT_REG); 894 ide_cmd(drive, args[0], args[2], &drive_cmd_intr); 895 return ide_started; 896 } else if (rq->flags & REQ_DRIVE_CMD) { 897 u8 *args = rq->buffer; 898 899 if (!args) 900 goto done; 901#ifdef DEBUG 902 printk("%s: DRIVE_CMD ", drive->name); 903 printk("cmd=0x%02x ", args[0]); 904 printk("sc=0x%02x ", args[1]); 905 printk("fr=0x%02x ", args[2]); 906 printk("xx=0x%02x\n", args[3]); 907#endif 908 if (args[0] == WIN_SMART) { 909 hwif->OUTB(0x4f, IDE_LCYL_REG); 910 hwif->OUTB(0xc2, IDE_HCYL_REG); 911 hwif->OUTB(args[2],IDE_FEATURE_REG); 912 hwif->OUTB(args[1],IDE_SECTOR_REG); 913 ide_cmd(drive, args[0], args[3], &drive_cmd_intr); 914 return ide_started; 915 } 916 hwif->OUTB(args[2],IDE_FEATURE_REG); 917 ide_cmd(drive, args[0], args[1], &drive_cmd_intr); 918 return ide_started; 919 } 920 921done: 922 /* 923 * NULL is actually a valid way of waiting for 924 * all current requests to be flushed from the queue. 925 */ 926#ifdef DEBUG 927 printk("%s: DRIVE_CMD (null)\n", drive->name); 928#endif 929 ide_end_drive_cmd(drive, 930 hwif->INB(IDE_STATUS_REG), 931 hwif->INB(IDE_ERROR_REG)); 932 return ide_stopped; 933} 934 935static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 936{ 937 struct request_pm_state *pm = rq->end_io_data; 938 939 if (blk_pm_suspend_request(rq) && 940 pm->pm_step == ide_pm_state_start_suspend) 941 /* Mark drive blocked when starting the suspend sequence. */ 942 drive->blocked = 1; 943 else if (blk_pm_resume_request(rq) && 944 pm->pm_step == ide_pm_state_start_resume) { 945 /* 946 * The first thing we do on wakeup is to wait for BSY bit to 947 * go away (with a looong timeout) as a drive on this hwif may 948 * just be POSTing itself. 949 * We do that before even selecting as the "other" device on 950 * the bus may be broken enough to walk on our toes at this 951 * point. 952 */ 953 int rc; 954#ifdef DEBUG_PM 955 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 956#endif 957 rc = ide_wait_not_busy(HWIF(drive), 35000); 958 if (rc) 959 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 960 SELECT_DRIVE(drive); 961 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]); 962 rc = ide_wait_not_busy(HWIF(drive), 10000); 963 if (rc) 964 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 965 } 966} 967 968/** 969 * start_request - start of I/O and command issuing for IDE 970 * 971 * start_request() initiates handling of a new I/O request. It 972 * accepts commands and I/O (read/write) requests. It also does 973 * the final remapping for weird stuff like EZDrive. Once 974 * device mapper can work sector level the EZDrive stuff can go away 975 * 976 * FIXME: this function needs a rename 977 */ 978 979static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 980{ 981 ide_startstop_t startstop; 982 sector_t block; 983 984 BUG_ON(!(rq->flags & REQ_STARTED)); 985 986#ifdef DEBUG 987 printk("%s: start_request: current=0x%08lx\n", 988 HWIF(drive)->name, (unsigned long) rq); 989#endif 990 991 /* bail early if we've exceeded max_failures */ 992 if (drive->max_failures && (drive->failures > drive->max_failures)) { 993 goto kill_rq; 994 } 995 996 block = rq->sector; 997 if (blk_fs_request(rq) && 998 (drive->media == ide_disk || drive->media == ide_floppy)) { 999 block += drive->sect0; 1000 } 1001 /* Yecch - this will shift the entire interval, 1002 possibly killing some innocent following sector */ 1003 if (block == 0 && drive->remap_0_to_1 == 1) 1004 block = 1; /* redirect MBR access to EZ-Drive partn table */ 1005 1006 if (blk_pm_request(rq)) 1007 ide_check_pm_state(drive, rq); 1008 1009 SELECT_DRIVE(drive); 1010 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { 1011 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 1012 return startstop; 1013 } 1014 if (!drive->special.all) { 1015 ide_driver_t *drv; 1016 1017 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) 1018 return execute_drive_cmd(drive, rq); 1019 else if (rq->flags & REQ_DRIVE_TASKFILE) 1020 return execute_drive_cmd(drive, rq); 1021 else if (blk_pm_request(rq)) { 1022 struct request_pm_state *pm = rq->end_io_data; 1023#ifdef DEBUG_PM 1024 printk("%s: start_power_step(step: %d)\n", 1025 drive->name, rq->pm->pm_step); 1026#endif 1027 startstop = ide_start_power_step(drive, rq); 1028 if (startstop == ide_stopped && 1029 pm->pm_step == ide_pm_state_completed) 1030 ide_complete_pm_request(drive, rq); 1031 return startstop; 1032 } 1033 1034 drv = *(ide_driver_t **)rq->rq_disk->private_data; 1035 return drv->do_request(drive, rq, block); 1036 } 1037 return do_special(drive); 1038kill_rq: 1039 ide_kill_rq(drive, rq); 1040 return ide_stopped; 1041} 1042 1043/** 1044 * ide_stall_queue - pause an IDE device 1045 * @drive: drive to stall 1046 * @timeout: time to stall for (jiffies) 1047 * 1048 * ide_stall_queue() can be used by a drive to give excess bandwidth back 1049 * to the hwgroup by sleeping for timeout jiffies. 1050 */ 1051 1052void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 1053{ 1054 if (timeout > WAIT_WORSTCASE) 1055 timeout = WAIT_WORSTCASE; 1056 drive->sleep = timeout + jiffies; 1057 drive->sleeping = 1; 1058} 1059 1060EXPORT_SYMBOL(ide_stall_queue); 1061 1062#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 1063 1064/** 1065 * choose_drive - select a drive to service 1066 * @hwgroup: hardware group to select on 1067 * 1068 * choose_drive() selects the next drive which will be serviced. 1069 * This is necessary because the IDE layer can't issue commands 1070 * to both drives on the same cable, unlike SCSI. 1071 */ 1072 1073static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 1074{ 1075 ide_drive_t *drive, *best; 1076 1077repeat: 1078 best = NULL; 1079 drive = hwgroup->drive; 1080 1081 /* 1082 * drive is doing pre-flush, ordered write, post-flush sequence. even 1083 * though that is 3 requests, it must be seen as a single transaction. 1084 * we must not preempt this drive until that is complete 1085 */ 1086 if (blk_queue_flushing(drive->queue)) { 1087 /* 1088 * small race where queue could get replugged during 1089 * the 3-request flush cycle, just yank the plug since 1090 * we want it to finish asap 1091 */ 1092 blk_remove_plug(drive->queue); 1093 return drive; 1094 } 1095 1096 do { 1097 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep)) 1098 && !elv_queue_empty(drive->queue)) { 1099 if (!best 1100 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep))) 1101 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best)))) 1102 { 1103 if (!blk_queue_plugged(drive->queue)) 1104 best = drive; 1105 } 1106 } 1107 } while ((drive = drive->next) != hwgroup->drive); 1108 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 1109 long t = (signed long)(WAKEUP(best) - jiffies); 1110 if (t >= WAIT_MIN_SLEEP) { 1111 /* 1112 * We *may* have some time to spare, but first let's see if 1113 * someone can potentially benefit from our nice mood today.. 1114 */ 1115 drive = best->next; 1116 do { 1117 if (!drive->sleeping 1118 && time_before(jiffies - best->service_time, WAKEUP(drive)) 1119 && time_before(WAKEUP(drive), jiffies + t)) 1120 { 1121 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 1122 goto repeat; 1123 } 1124 } while ((drive = drive->next) != best); 1125 } 1126 } 1127 return best; 1128} 1129 1130/* 1131 * Issue a new request to a drive from hwgroup 1132 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 1133 * 1134 * A hwgroup is a serialized group of IDE interfaces. Usually there is 1135 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 1136 * may have both interfaces in a single hwgroup to "serialize" access. 1137 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 1138 * together into one hwgroup for serialized access. 1139 * 1140 * Note also that several hwgroups can end up sharing a single IRQ, 1141 * possibly along with many other devices. This is especially common in 1142 * PCI-based systems with off-board IDE controller cards. 1143 * 1144 * The IDE driver uses the single global ide_lock spinlock to protect 1145 * access to the request queues, and to protect the hwgroup->busy flag. 1146 * 1147 * The first thread into the driver for a particular hwgroup sets the 1148 * hwgroup->busy flag to indicate that this hwgroup is now active, 1149 * and then initiates processing of the top request from the request queue. 1150 * 1151 * Other threads attempting entry notice the busy setting, and will simply 1152 * queue their new requests and exit immediately. Note that hwgroup->busy 1153 * remains set even when the driver is merely awaiting the next interrupt. 1154 * Thus, the meaning is "this hwgroup is busy processing a request". 1155 * 1156 * When processing of a request completes, the completing thread or IRQ-handler 1157 * will start the next request from the queue. If no more work remains, 1158 * the driver will clear the hwgroup->busy flag and exit. 1159 * 1160 * The ide_lock (spinlock) is used to protect all access to the 1161 * hwgroup->busy flag, but is otherwise not needed for most processing in 1162 * the driver. This makes the driver much more friendlier to shared IRQs 1163 * than previous designs, while remaining 100% (?) SMP safe and capable. 1164 */ 1165static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 1166{ 1167 ide_drive_t *drive; 1168 ide_hwif_t *hwif; 1169 struct request *rq; 1170 ide_startstop_t startstop; 1171 int loops = 0; 1172 1173 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1174 ide_get_lock(ide_intr, hwgroup); 1175 1176 /* caller must own ide_lock */ 1177 BUG_ON(!irqs_disabled()); 1178 1179 while (!hwgroup->busy) { 1180 hwgroup->busy = 1; 1181 drive = choose_drive(hwgroup); 1182 if (drive == NULL) { 1183 int sleeping = 0; 1184 unsigned long sleep = 0; /* shut up, gcc */ 1185 hwgroup->rq = NULL; 1186 drive = hwgroup->drive; 1187 do { 1188 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) { 1189 sleeping = 1; 1190 sleep = drive->sleep; 1191 } 1192 } while ((drive = drive->next) != hwgroup->drive); 1193 if (sleeping) { 1194 /* 1195 * Take a short snooze, and then wake up this hwgroup again. 1196 * This gives other hwgroups on the same a chance to 1197 * play fairly with us, just in case there are big differences 1198 * in relative throughputs.. don't want to hog the cpu too much. 1199 */ 1200 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1201 sleep = jiffies + WAIT_MIN_SLEEP; 1202#if 1 1203 if (timer_pending(&hwgroup->timer)) 1204 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1205#endif 1206 /* so that ide_timer_expiry knows what to do */ 1207 hwgroup->sleeping = 1; 1208 mod_timer(&hwgroup->timer, sleep); 1209 /* we purposely leave hwgroup->busy==1 1210 * while sleeping */ 1211 } else { 1212 /* Ugly, but how can we sleep for the lock 1213 * otherwise? perhaps from tq_disk? 1214 */ 1215 1216 /* for atari only */ 1217 ide_release_lock(); 1218 hwgroup->busy = 0; 1219 } 1220 1221 /* no more work for this hwgroup (for now) */ 1222 return; 1223 } 1224 again: 1225 hwif = HWIF(drive); 1226 if (hwgroup->hwif->sharing_irq && 1227 hwif != hwgroup->hwif && 1228 hwif->io_ports[IDE_CONTROL_OFFSET]) { 1229 /* set nIEN for previous hwif */ 1230 SELECT_INTERRUPT(drive); 1231 } 1232 hwgroup->hwif = hwif; 1233 hwgroup->drive = drive; 1234 drive->sleeping = 0; 1235 drive->service_start = jiffies; 1236 1237 if (blk_queue_plugged(drive->queue)) { 1238 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1239 break; 1240 } 1241 1242 /* 1243 * we know that the queue isn't empty, but this can happen 1244 * if the q->prep_rq_fn() decides to kill a request 1245 */ 1246 rq = elv_next_request(drive->queue); 1247 if (!rq) { 1248 hwgroup->busy = 0; 1249 break; 1250 } 1251 1252 /* 1253 * Sanity: don't accept a request that isn't a PM request 1254 * if we are currently power managed. This is very important as 1255 * blk_stop_queue() doesn't prevent the elv_next_request() 1256 * above to return us whatever is in the queue. Since we call 1257 * ide_do_request() ourselves, we end up taking requests while 1258 * the queue is blocked... 1259 * 1260 * We let requests forced at head of queue with ide-preempt 1261 * though. I hope that doesn't happen too much, hopefully not 1262 * unless the subdriver triggers such a thing in its own PM 1263 * state machine. 1264 * 1265 * We count how many times we loop here to make sure we service 1266 * all drives in the hwgroup without looping for ever 1267 */ 1268 if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) { 1269 drive = drive->next ? drive->next : hwgroup->drive; 1270 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1271 goto again; 1272 /* We clear busy, there should be no pending ATA command at this point. */ 1273 hwgroup->busy = 0; 1274 break; 1275 } 1276 1277 hwgroup->rq = rq; 1278 1279 /* 1280 * Some systems have trouble with IDE IRQs arriving while 1281 * the driver is still setting things up. So, here we disable 1282 * the IRQ used by this interface while the request is being started. 1283 * This may look bad at first, but pretty much the same thing 1284 * happens anyway when any interrupt comes in, IDE or otherwise 1285 * -- the kernel masks the IRQ while it is being handled. 1286 */ 1287 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1288 disable_irq_nosync(hwif->irq); 1289 spin_unlock(&ide_lock); 1290 local_irq_enable(); 1291 /* allow other IRQs while we start this request */ 1292 startstop = start_request(drive, rq); 1293 spin_lock_irq(&ide_lock); 1294 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1295 enable_irq(hwif->irq); 1296 if (startstop == ide_stopped) 1297 hwgroup->busy = 0; 1298 } 1299} 1300 1301/* 1302 * Passes the stuff to ide_do_request 1303 */ 1304void do_ide_request(request_queue_t *q) 1305{ 1306 ide_drive_t *drive = q->queuedata; 1307 1308 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1309} 1310 1311/* 1312 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1313 * retry the current request in pio mode instead of risking tossing it 1314 * all away 1315 */ 1316static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1317{ 1318 ide_hwif_t *hwif = HWIF(drive); 1319 struct request *rq; 1320 ide_startstop_t ret = ide_stopped; 1321 1322 /* 1323 * end current dma transaction 1324 */ 1325 1326 if (error < 0) { 1327 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1328 (void)HWIF(drive)->ide_dma_end(drive); 1329 ret = ide_error(drive, "dma timeout error", 1330 hwif->INB(IDE_STATUS_REG)); 1331 } else { 1332 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1333 (void) hwif->ide_dma_timeout(drive); 1334 } 1335 1336 /* 1337 * disable dma for now, but remember that we did so because of 1338 * a timeout -- we'll reenable after we finish this next request 1339 * (or rather the first chunk of it) in pio. 1340 */ 1341 drive->retry_pio++; 1342 drive->state = DMA_PIO_RETRY; 1343 (void) hwif->ide_dma_off_quietly(drive); 1344 1345 /* 1346 * un-busy drive etc (hwgroup->busy is cleared on return) and 1347 * make sure request is sane 1348 */ 1349 rq = HWGROUP(drive)->rq; 1350 HWGROUP(drive)->rq = NULL; 1351 1352 rq->errors = 0; 1353 1354 if (!rq->bio) 1355 goto out; 1356 1357 rq->sector = rq->bio->bi_sector; 1358 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1359 rq->hard_cur_sectors = rq->current_nr_sectors; 1360 rq->buffer = bio_data(rq->bio); 1361out: 1362 return ret; 1363} 1364 1365/** 1366 * ide_timer_expiry - handle lack of an IDE interrupt 1367 * @data: timer callback magic (hwgroup) 1368 * 1369 * An IDE command has timed out before the expected drive return 1370 * occurred. At this point we attempt to clean up the current 1371 * mess. If the current handler includes an expiry handler then 1372 * we invoke the expiry handler, and providing it is happy the 1373 * work is done. If that fails we apply generic recovery rules 1374 * invoking the handler and checking the drive DMA status. We 1375 * have an excessively incestuous relationship with the DMA 1376 * logic that wants cleaning up. 1377 */ 1378 1379void ide_timer_expiry (unsigned long data) 1380{ 1381 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1382 ide_handler_t *handler; 1383 ide_expiry_t *expiry; 1384 unsigned long flags; 1385 unsigned long wait = -1; 1386 1387 spin_lock_irqsave(&ide_lock, flags); 1388 1389 if ((handler = hwgroup->handler) == NULL) { 1390 /* 1391 * Either a marginal timeout occurred 1392 * (got the interrupt just as timer expired), 1393 * or we were "sleeping" to give other devices a chance. 1394 * Either way, we don't really want to complain about anything. 1395 */ 1396 if (hwgroup->sleeping) { 1397 hwgroup->sleeping = 0; 1398 hwgroup->busy = 0; 1399 } 1400 } else { 1401 ide_drive_t *drive = hwgroup->drive; 1402 if (!drive) { 1403 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1404 hwgroup->handler = NULL; 1405 } else { 1406 ide_hwif_t *hwif; 1407 ide_startstop_t startstop = ide_stopped; 1408 if (!hwgroup->busy) { 1409 hwgroup->busy = 1; /* paranoia */ 1410 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1411 } 1412 if ((expiry = hwgroup->expiry) != NULL) { 1413 /* continue */ 1414 if ((wait = expiry(drive)) > 0) { 1415 /* reset timer */ 1416 hwgroup->timer.expires = jiffies + wait; 1417 add_timer(&hwgroup->timer); 1418 spin_unlock_irqrestore(&ide_lock, flags); 1419 return; 1420 } 1421 } 1422 hwgroup->handler = NULL; 1423 /* 1424 * We need to simulate a real interrupt when invoking 1425 * the handler() function, which means we need to 1426 * globally mask the specific IRQ: 1427 */ 1428 spin_unlock(&ide_lock); 1429 hwif = HWIF(drive); 1430#if DISABLE_IRQ_NOSYNC 1431 disable_irq_nosync(hwif->irq); 1432#else 1433 /* disable_irq_nosync ?? */ 1434 disable_irq(hwif->irq); 1435#endif /* DISABLE_IRQ_NOSYNC */ 1436 /* local CPU only, 1437 * as if we were handling an interrupt */ 1438 local_irq_disable(); 1439 if (hwgroup->polling) { 1440 startstop = handler(drive); 1441 } else if (drive_is_ready(drive)) { 1442 if (drive->waiting_for_dma) 1443 (void) hwgroup->hwif->ide_dma_lostirq(drive); 1444 (void)ide_ack_intr(hwif); 1445 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1446 startstop = handler(drive); 1447 } else { 1448 if (drive->waiting_for_dma) { 1449 startstop = ide_dma_timeout_retry(drive, wait); 1450 } else 1451 startstop = 1452 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG)); 1453 } 1454 drive->service_time = jiffies - drive->service_start; 1455 spin_lock_irq(&ide_lock); 1456 enable_irq(hwif->irq); 1457 if (startstop == ide_stopped) 1458 hwgroup->busy = 0; 1459 } 1460 } 1461 ide_do_request(hwgroup, IDE_NO_IRQ); 1462 spin_unlock_irqrestore(&ide_lock, flags); 1463} 1464 1465/** 1466 * unexpected_intr - handle an unexpected IDE interrupt 1467 * @irq: interrupt line 1468 * @hwgroup: hwgroup being processed 1469 * 1470 * There's nothing really useful we can do with an unexpected interrupt, 1471 * other than reading the status register (to clear it), and logging it. 1472 * There should be no way that an irq can happen before we're ready for it, 1473 * so we needn't worry much about losing an "important" interrupt here. 1474 * 1475 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1476 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1477 * looks "good", we just ignore the interrupt completely. 1478 * 1479 * This routine assumes __cli() is in effect when called. 1480 * 1481 * If an unexpected interrupt happens on irq15 while we are handling irq14 1482 * and if the two interfaces are "serialized" (CMD640), then it looks like 1483 * we could screw up by interfering with a new request being set up for 1484 * irq15. 1485 * 1486 * In reality, this is a non-issue. The new command is not sent unless 1487 * the drive is ready to accept one, in which case we know the drive is 1488 * not trying to interrupt us. And ide_set_handler() is always invoked 1489 * before completing the issuance of any new drive command, so we will not 1490 * be accidentally invoked as a result of any valid command completion 1491 * interrupt. 1492 * 1493 * Note that we must walk the entire hwgroup here. We know which hwif 1494 * is doing the current command, but we don't know which hwif burped 1495 * mysteriously. 1496 */ 1497 1498static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1499{ 1500 u8 stat; 1501 ide_hwif_t *hwif = hwgroup->hwif; 1502 1503 /* 1504 * handle the unexpected interrupt 1505 */ 1506 do { 1507 if (hwif->irq == irq) { 1508 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1509 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1510 /* Try to not flood the console with msgs */ 1511 static unsigned long last_msgtime, count; 1512 ++count; 1513 if (time_after(jiffies, last_msgtime + HZ)) { 1514 last_msgtime = jiffies; 1515 printk(KERN_ERR "%s%s: unexpected interrupt, " 1516 "status=0x%02x, count=%ld\n", 1517 hwif->name, 1518 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1519 } 1520 } 1521 } 1522 } while ((hwif = hwif->next) != hwgroup->hwif); 1523} 1524 1525/** 1526 * ide_intr - default IDE interrupt handler 1527 * @irq: interrupt number 1528 * @dev_id: hwif group 1529 * @regs: unused weirdness from the kernel irq layer 1530 * 1531 * This is the default IRQ handler for the IDE layer. You should 1532 * not need to override it. If you do be aware it is subtle in 1533 * places 1534 * 1535 * hwgroup->hwif is the interface in the group currently performing 1536 * a command. hwgroup->drive is the drive and hwgroup->handler is 1537 * the IRQ handler to call. As we issue a command the handlers 1538 * step through multiple states, reassigning the handler to the 1539 * next step in the process. Unlike a smart SCSI controller IDE 1540 * expects the main processor to sequence the various transfer 1541 * stages. We also manage a poll timer to catch up with most 1542 * timeout situations. There are still a few where the handlers 1543 * don't ever decide to give up. 1544 * 1545 * The handler eventually returns ide_stopped to indicate the 1546 * request completed. At this point we issue the next request 1547 * on the hwgroup and the process begins again. 1548 */ 1549 1550irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs) 1551{ 1552 unsigned long flags; 1553 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1554 ide_hwif_t *hwif; 1555 ide_drive_t *drive; 1556 ide_handler_t *handler; 1557 ide_startstop_t startstop; 1558 1559 spin_lock_irqsave(&ide_lock, flags); 1560 hwif = hwgroup->hwif; 1561 1562 if (!ide_ack_intr(hwif)) { 1563 spin_unlock_irqrestore(&ide_lock, flags); 1564 return IRQ_NONE; 1565 } 1566 1567 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1568 /* 1569 * Not expecting an interrupt from this drive. 1570 * That means this could be: 1571 * (1) an interrupt from another PCI device 1572 * sharing the same PCI INT# as us. 1573 * or (2) a drive just entered sleep or standby mode, 1574 * and is interrupting to let us know. 1575 * or (3) a spurious interrupt of unknown origin. 1576 * 1577 * For PCI, we cannot tell the difference, 1578 * so in that case we just ignore it and hope it goes away. 1579 * 1580 * FIXME: unexpected_intr should be hwif-> then we can 1581 * remove all the ifdef PCI crap 1582 */ 1583#ifdef CONFIG_BLK_DEV_IDEPCI 1584 if (hwif->pci_dev && !hwif->pci_dev->vendor) 1585#endif /* CONFIG_BLK_DEV_IDEPCI */ 1586 { 1587 /* 1588 * Probably not a shared PCI interrupt, 1589 * so we can safely try to do something about it: 1590 */ 1591 unexpected_intr(irq, hwgroup); 1592#ifdef CONFIG_BLK_DEV_IDEPCI 1593 } else { 1594 /* 1595 * Whack the status register, just in case 1596 * we have a leftover pending IRQ. 1597 */ 1598 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1599#endif /* CONFIG_BLK_DEV_IDEPCI */ 1600 } 1601 spin_unlock_irqrestore(&ide_lock, flags); 1602 return IRQ_NONE; 1603 } 1604 drive = hwgroup->drive; 1605 if (!drive) { 1606 /* 1607 * This should NEVER happen, and there isn't much 1608 * we could do about it here. 1609 * 1610 * [Note - this can occur if the drive is hot unplugged] 1611 */ 1612 spin_unlock_irqrestore(&ide_lock, flags); 1613 return IRQ_HANDLED; 1614 } 1615 if (!drive_is_ready(drive)) { 1616 /* 1617 * This happens regularly when we share a PCI IRQ with 1618 * another device. Unfortunately, it can also happen 1619 * with some buggy drives that trigger the IRQ before 1620 * their status register is up to date. Hopefully we have 1621 * enough advance overhead that the latter isn't a problem. 1622 */ 1623 spin_unlock_irqrestore(&ide_lock, flags); 1624 return IRQ_NONE; 1625 } 1626 if (!hwgroup->busy) { 1627 hwgroup->busy = 1; /* paranoia */ 1628 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1629 } 1630 hwgroup->handler = NULL; 1631 del_timer(&hwgroup->timer); 1632 spin_unlock(&ide_lock); 1633 1634 if (drive->unmask) 1635 local_irq_enable(); 1636 /* service this interrupt, may set handler for next interrupt */ 1637 startstop = handler(drive); 1638 spin_lock_irq(&ide_lock); 1639 1640 /* 1641 * Note that handler() may have set things up for another 1642 * interrupt to occur soon, but it cannot happen until 1643 * we exit from this routine, because it will be the 1644 * same irq as is currently being serviced here, and Linux 1645 * won't allow another of the same (on any CPU) until we return. 1646 */ 1647 drive->service_time = jiffies - drive->service_start; 1648 if (startstop == ide_stopped) { 1649 if (hwgroup->handler == NULL) { /* paranoia */ 1650 hwgroup->busy = 0; 1651 ide_do_request(hwgroup, hwif->irq); 1652 } else { 1653 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1654 "on exit\n", drive->name); 1655 } 1656 } 1657 spin_unlock_irqrestore(&ide_lock, flags); 1658 return IRQ_HANDLED; 1659} 1660 1661/** 1662 * ide_init_drive_cmd - initialize a drive command request 1663 * @rq: request object 1664 * 1665 * Initialize a request before we fill it in and send it down to 1666 * ide_do_drive_cmd. Commands must be set up by this function. Right 1667 * now it doesn't do a lot, but if that changes abusers will have a 1668 * nasty suprise. 1669 */ 1670 1671void ide_init_drive_cmd (struct request *rq) 1672{ 1673 memset(rq, 0, sizeof(*rq)); 1674 rq->flags = REQ_DRIVE_CMD; 1675 rq->ref_count = 1; 1676} 1677 1678EXPORT_SYMBOL(ide_init_drive_cmd); 1679 1680/** 1681 * ide_do_drive_cmd - issue IDE special command 1682 * @drive: device to issue command 1683 * @rq: request to issue 1684 * @action: action for processing 1685 * 1686 * This function issues a special IDE device request 1687 * onto the request queue. 1688 * 1689 * If action is ide_wait, then the rq is queued at the end of the 1690 * request queue, and the function sleeps until it has been processed. 1691 * This is for use when invoked from an ioctl handler. 1692 * 1693 * If action is ide_preempt, then the rq is queued at the head of 1694 * the request queue, displacing the currently-being-processed 1695 * request and this function returns immediately without waiting 1696 * for the new rq to be completed. This is VERY DANGEROUS, and is 1697 * intended for careful use by the ATAPI tape/cdrom driver code. 1698 * 1699 * If action is ide_end, then the rq is queued at the end of the 1700 * request queue, and the function returns immediately without waiting 1701 * for the new rq to be completed. This is again intended for careful 1702 * use by the ATAPI tape/cdrom driver code. 1703 */ 1704 1705int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1706{ 1707 unsigned long flags; 1708 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1709 DECLARE_COMPLETION(wait); 1710 int where = ELEVATOR_INSERT_BACK, err; 1711 int must_wait = (action == ide_wait || action == ide_head_wait); 1712 1713 rq->errors = 0; 1714 rq->rq_status = RQ_ACTIVE; 1715 1716 /* 1717 * we need to hold an extra reference to request for safe inspection 1718 * after completion 1719 */ 1720 if (must_wait) { 1721 rq->ref_count++; 1722 rq->waiting = &wait; 1723 rq->end_io = blk_end_sync_rq; 1724 } 1725 1726 spin_lock_irqsave(&ide_lock, flags); 1727 if (action == ide_preempt) 1728 hwgroup->rq = NULL; 1729 if (action == ide_preempt || action == ide_head_wait) { 1730 where = ELEVATOR_INSERT_FRONT; 1731 rq->flags |= REQ_PREEMPT; 1732 } 1733 __elv_add_request(drive->queue, rq, where, 0); 1734 ide_do_request(hwgroup, IDE_NO_IRQ); 1735 spin_unlock_irqrestore(&ide_lock, flags); 1736 1737 err = 0; 1738 if (must_wait) { 1739 wait_for_completion(&wait); 1740 rq->waiting = NULL; 1741 if (rq->errors) 1742 err = -EIO; 1743 1744 blk_put_request(rq); 1745 } 1746 1747 return err; 1748} 1749 1750EXPORT_SYMBOL(ide_do_drive_cmd); 1751