ide-io.c revision 395d8ef5bebe547a80737692f9789d2e36da16f2
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50#include <linux/bitops.h> 51 52#include <asm/byteorder.h> 53#include <asm/irq.h> 54#include <asm/uaccess.h> 55#include <asm/io.h> 56 57static int __ide_end_request(ide_drive_t *drive, struct request *rq, 58 int uptodate, unsigned int nr_bytes, int dequeue) 59{ 60 int ret = 1; 61 int error = 0; 62 63 if (uptodate <= 0) 64 error = uptodate ? uptodate : -EIO; 65 66 /* 67 * if failfast is set on a request, override number of sectors and 68 * complete the whole request right now 69 */ 70 if (blk_noretry_request(rq) && error) 71 nr_bytes = rq->hard_nr_sectors << 9; 72 73 if (!blk_fs_request(rq) && error && !rq->errors) 74 rq->errors = -EIO; 75 76 /* 77 * decide whether to reenable DMA -- 3 is a random magic for now, 78 * if we DMA timeout more than 3 times, just stay in PIO 79 */ 80 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 81 drive->state = 0; 82 ide_dma_on(drive); 83 } 84 85 if (!__blk_end_request(rq, error, nr_bytes)) { 86 if (dequeue) 87 HWGROUP(drive)->rq = NULL; 88 ret = 0; 89 } 90 91 return ret; 92} 93 94/** 95 * ide_end_request - complete an IDE I/O 96 * @drive: IDE device for the I/O 97 * @uptodate: 98 * @nr_sectors: number of sectors completed 99 * 100 * This is our end_request wrapper function. We complete the I/O 101 * update random number input and dequeue the request, which if 102 * it was tagged may be out of order. 103 */ 104 105int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 106{ 107 unsigned int nr_bytes = nr_sectors << 9; 108 struct request *rq; 109 unsigned long flags; 110 int ret = 1; 111 112 /* 113 * room for locking improvements here, the calls below don't 114 * need the queue lock held at all 115 */ 116 spin_lock_irqsave(&ide_lock, flags); 117 rq = HWGROUP(drive)->rq; 118 119 if (!nr_bytes) { 120 if (blk_pc_request(rq)) 121 nr_bytes = rq->data_len; 122 else 123 nr_bytes = rq->hard_cur_sectors << 9; 124 } 125 126 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 127 128 spin_unlock_irqrestore(&ide_lock, flags); 129 return ret; 130} 131EXPORT_SYMBOL(ide_end_request); 132 133/* 134 * Power Management state machine. This one is rather trivial for now, 135 * we should probably add more, like switching back to PIO on suspend 136 * to help some BIOSes, re-do the door locking on resume, etc... 137 */ 138 139enum { 140 ide_pm_flush_cache = ide_pm_state_start_suspend, 141 idedisk_pm_standby, 142 143 idedisk_pm_restore_pio = ide_pm_state_start_resume, 144 idedisk_pm_idle, 145 ide_pm_restore_dma, 146}; 147 148static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 149{ 150 struct request_pm_state *pm = rq->data; 151 152 if (drive->media != ide_disk) 153 return; 154 155 switch (pm->pm_step) { 156 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 157 if (pm->pm_state == PM_EVENT_FREEZE) 158 pm->pm_step = ide_pm_state_completed; 159 else 160 pm->pm_step = idedisk_pm_standby; 161 break; 162 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */ 163 pm->pm_step = ide_pm_state_completed; 164 break; 165 case idedisk_pm_restore_pio: /* Resume step 1 complete */ 166 pm->pm_step = idedisk_pm_idle; 167 break; 168 case idedisk_pm_idle: /* Resume step 2 (idle) complete */ 169 pm->pm_step = ide_pm_restore_dma; 170 break; 171 } 172} 173 174static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 175{ 176 struct request_pm_state *pm = rq->data; 177 ide_task_t *args = rq->special; 178 179 memset(args, 0, sizeof(*args)); 180 181 switch (pm->pm_step) { 182 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 183 if (drive->media != ide_disk) 184 break; 185 /* Not supported? Switch to next step now. */ 186 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) { 187 ide_complete_power_step(drive, rq, 0, 0); 188 return ide_stopped; 189 } 190 if (ide_id_has_flush_cache_ext(drive->id)) 191 args->tf.command = WIN_FLUSH_CACHE_EXT; 192 else 193 args->tf.command = WIN_FLUSH_CACHE; 194 goto out_do_tf; 195 196 case idedisk_pm_standby: /* Suspend step 2 (standby) */ 197 args->tf.command = WIN_STANDBYNOW1; 198 goto out_do_tf; 199 200 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ 201 ide_set_max_pio(drive); 202 /* 203 * skip idedisk_pm_idle for ATAPI devices 204 */ 205 if (drive->media != ide_disk) 206 pm->pm_step = ide_pm_restore_dma; 207 else 208 ide_complete_power_step(drive, rq, 0, 0); 209 return ide_stopped; 210 211 case idedisk_pm_idle: /* Resume step 2 (idle) */ 212 args->tf.command = WIN_IDLEIMMEDIATE; 213 goto out_do_tf; 214 215 case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ 216 /* 217 * Right now, all we do is call ide_set_dma(drive), 218 * we could be smarter and check for current xfer_speed 219 * in struct drive etc... 220 */ 221 if (drive->hwif->dma_host_set == NULL) 222 break; 223 /* 224 * TODO: respect ->using_dma setting 225 */ 226 ide_set_dma(drive); 227 break; 228 } 229 pm->pm_step = ide_pm_state_completed; 230 return ide_stopped; 231 232out_do_tf: 233 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 234 args->data_phase = TASKFILE_NO_DATA; 235 return do_rw_taskfile(drive, args); 236} 237 238/** 239 * ide_end_dequeued_request - complete an IDE I/O 240 * @drive: IDE device for the I/O 241 * @uptodate: 242 * @nr_sectors: number of sectors completed 243 * 244 * Complete an I/O that is no longer on the request queue. This 245 * typically occurs when we pull the request and issue a REQUEST_SENSE. 246 * We must still finish the old request but we must not tamper with the 247 * queue in the meantime. 248 * 249 * NOTE: This path does not handle barrier, but barrier is not supported 250 * on ide-cd anyway. 251 */ 252 253int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 254 int uptodate, int nr_sectors) 255{ 256 unsigned long flags; 257 int ret; 258 259 spin_lock_irqsave(&ide_lock, flags); 260 BUG_ON(!blk_rq_started(rq)); 261 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); 262 spin_unlock_irqrestore(&ide_lock, flags); 263 264 return ret; 265} 266EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 267 268 269/** 270 * ide_complete_pm_request - end the current Power Management request 271 * @drive: target drive 272 * @rq: request 273 * 274 * This function cleans up the current PM request and stops the queue 275 * if necessary. 276 */ 277static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 278{ 279 unsigned long flags; 280 281#ifdef DEBUG_PM 282 printk("%s: completing PM request, %s\n", drive->name, 283 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 284#endif 285 spin_lock_irqsave(&ide_lock, flags); 286 if (blk_pm_suspend_request(rq)) { 287 blk_stop_queue(drive->queue); 288 } else { 289 drive->blocked = 0; 290 blk_start_queue(drive->queue); 291 } 292 HWGROUP(drive)->rq = NULL; 293 if (__blk_end_request(rq, 0, 0)) 294 BUG(); 295 spin_unlock_irqrestore(&ide_lock, flags); 296} 297 298void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 299{ 300 ide_hwif_t *hwif = drive->hwif; 301 struct ide_taskfile *tf = &task->tf; 302 303 if (task->tf_flags & IDE_TFLAG_IN_DATA) { 304 u16 data = hwif->INW(IDE_DATA_REG); 305 306 tf->data = data & 0xff; 307 tf->hob_data = (data >> 8) & 0xff; 308 } 309 310 /* be sure we're looking at the low order bits */ 311 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); 312 313 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 314 tf->nsect = hwif->INB(IDE_NSECTOR_REG); 315 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 316 tf->lbal = hwif->INB(IDE_SECTOR_REG); 317 if (task->tf_flags & IDE_TFLAG_IN_LBAM) 318 tf->lbam = hwif->INB(IDE_LCYL_REG); 319 if (task->tf_flags & IDE_TFLAG_IN_LBAH) 320 tf->lbah = hwif->INB(IDE_HCYL_REG); 321 if (task->tf_flags & IDE_TFLAG_IN_DEVICE) 322 tf->device = hwif->INB(IDE_SELECT_REG); 323 324 if (task->tf_flags & IDE_TFLAG_LBA48) { 325 hwif->OUTB(drive->ctl | 0x80, IDE_CONTROL_REG); 326 327 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) 328 tf->hob_feature = hwif->INB(IDE_FEATURE_REG); 329 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) 330 tf->hob_nsect = hwif->INB(IDE_NSECTOR_REG); 331 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) 332 tf->hob_lbal = hwif->INB(IDE_SECTOR_REG); 333 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) 334 tf->hob_lbam = hwif->INB(IDE_LCYL_REG); 335 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) 336 tf->hob_lbah = hwif->INB(IDE_HCYL_REG); 337 } 338} 339 340/** 341 * ide_end_drive_cmd - end an explicit drive command 342 * @drive: command 343 * @stat: status bits 344 * @err: error bits 345 * 346 * Clean up after success/failure of an explicit drive command. 347 * These get thrown onto the queue so they are synchronized with 348 * real I/O operations on the drive. 349 * 350 * In LBA48 mode we have to read the register set twice to get 351 * all the extra information out. 352 */ 353 354void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 355{ 356 unsigned long flags; 357 struct request *rq; 358 359 spin_lock_irqsave(&ide_lock, flags); 360 rq = HWGROUP(drive)->rq; 361 spin_unlock_irqrestore(&ide_lock, flags); 362 363 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 364 ide_task_t *task = (ide_task_t *)rq->special; 365 366 if (rq->errors == 0) 367 rq->errors = !OK_STAT(stat, READY_STAT, BAD_STAT); 368 369 if (task) { 370 struct ide_taskfile *tf = &task->tf; 371 372 tf->error = err; 373 tf->status = stat; 374 375 ide_tf_read(drive, task); 376 377 if (task->tf_flags & IDE_TFLAG_DYN) 378 kfree(task); 379 } 380 } else if (blk_pm_request(rq)) { 381 struct request_pm_state *pm = rq->data; 382#ifdef DEBUG_PM 383 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 384 drive->name, rq->pm->pm_step, stat, err); 385#endif 386 ide_complete_power_step(drive, rq, stat, err); 387 if (pm->pm_step == ide_pm_state_completed) 388 ide_complete_pm_request(drive, rq); 389 return; 390 } 391 392 spin_lock_irqsave(&ide_lock, flags); 393 HWGROUP(drive)->rq = NULL; 394 rq->errors = err; 395 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), 396 blk_rq_bytes(rq)))) 397 BUG(); 398 spin_unlock_irqrestore(&ide_lock, flags); 399} 400 401EXPORT_SYMBOL(ide_end_drive_cmd); 402 403/** 404 * try_to_flush_leftover_data - flush junk 405 * @drive: drive to flush 406 * 407 * try_to_flush_leftover_data() is invoked in response to a drive 408 * unexpectedly having its DRQ_STAT bit set. As an alternative to 409 * resetting the drive, this routine tries to clear the condition 410 * by read a sector's worth of data from the drive. Of course, 411 * this may not help if the drive is *waiting* for data from *us*. 412 */ 413static void try_to_flush_leftover_data (ide_drive_t *drive) 414{ 415 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; 416 417 if (drive->media != ide_disk) 418 return; 419 while (i > 0) { 420 u32 buffer[16]; 421 u32 wcount = (i > 16) ? 16 : i; 422 423 i -= wcount; 424 HWIF(drive)->ata_input_data(drive, buffer, wcount); 425 } 426} 427 428static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 429{ 430 if (rq->rq_disk) { 431 ide_driver_t *drv; 432 433 drv = *(ide_driver_t **)rq->rq_disk->private_data; 434 drv->end_request(drive, 0, 0); 435 } else 436 ide_end_request(drive, 0, 0); 437} 438 439static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 440{ 441 ide_hwif_t *hwif = drive->hwif; 442 443 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 444 /* other bits are useless when BUSY */ 445 rq->errors |= ERROR_RESET; 446 } else if (stat & ERR_STAT) { 447 /* err has different meaning on cdrom and tape */ 448 if (err == ABRT_ERR) { 449 if (drive->select.b.lba && 450 /* some newer drives don't support WIN_SPECIFY */ 451 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY) 452 return ide_stopped; 453 } else if ((err & BAD_CRC) == BAD_CRC) { 454 /* UDMA crc error, just retry the operation */ 455 drive->crc_count++; 456 } else if (err & (BBD_ERR | ECC_ERR)) { 457 /* retries won't help these */ 458 rq->errors = ERROR_MAX; 459 } else if (err & TRK0_ERR) { 460 /* help it find track zero */ 461 rq->errors |= ERROR_RECAL; 462 } 463 } 464 465 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && 466 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) 467 try_to_flush_leftover_data(drive); 468 469 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 470 ide_kill_rq(drive, rq); 471 return ide_stopped; 472 } 473 474 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 475 rq->errors |= ERROR_RESET; 476 477 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 478 ++rq->errors; 479 return ide_do_reset(drive); 480 } 481 482 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 483 drive->special.b.recalibrate = 1; 484 485 ++rq->errors; 486 487 return ide_stopped; 488} 489 490static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 491{ 492 ide_hwif_t *hwif = drive->hwif; 493 494 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 495 /* other bits are useless when BUSY */ 496 rq->errors |= ERROR_RESET; 497 } else { 498 /* add decoding error stuff */ 499 } 500 501 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 502 /* force an abort */ 503 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG); 504 505 if (rq->errors >= ERROR_MAX) { 506 ide_kill_rq(drive, rq); 507 } else { 508 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 509 ++rq->errors; 510 return ide_do_reset(drive); 511 } 512 ++rq->errors; 513 } 514 515 return ide_stopped; 516} 517 518ide_startstop_t 519__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 520{ 521 if (drive->media == ide_disk) 522 return ide_ata_error(drive, rq, stat, err); 523 return ide_atapi_error(drive, rq, stat, err); 524} 525 526EXPORT_SYMBOL_GPL(__ide_error); 527 528/** 529 * ide_error - handle an error on the IDE 530 * @drive: drive the error occurred on 531 * @msg: message to report 532 * @stat: status bits 533 * 534 * ide_error() takes action based on the error returned by the drive. 535 * For normal I/O that may well include retries. We deal with 536 * both new-style (taskfile) and old style command handling here. 537 * In the case of taskfile command handling there is work left to 538 * do 539 */ 540 541ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 542{ 543 struct request *rq; 544 u8 err; 545 546 err = ide_dump_status(drive, msg, stat); 547 548 if ((rq = HWGROUP(drive)->rq) == NULL) 549 return ide_stopped; 550 551 /* retry only "normal" I/O: */ 552 if (!blk_fs_request(rq)) { 553 rq->errors = 1; 554 ide_end_drive_cmd(drive, stat, err); 555 return ide_stopped; 556 } 557 558 if (rq->rq_disk) { 559 ide_driver_t *drv; 560 561 drv = *(ide_driver_t **)rq->rq_disk->private_data; 562 return drv->error(drive, rq, stat, err); 563 } else 564 return __ide_error(drive, rq, stat, err); 565} 566 567EXPORT_SYMBOL_GPL(ide_error); 568 569ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) 570{ 571 if (drive->media != ide_disk) 572 rq->errors |= ERROR_RESET; 573 574 ide_kill_rq(drive, rq); 575 576 return ide_stopped; 577} 578 579EXPORT_SYMBOL_GPL(__ide_abort); 580 581/** 582 * ide_abort - abort pending IDE operations 583 * @drive: drive the error occurred on 584 * @msg: message to report 585 * 586 * ide_abort kills and cleans up when we are about to do a 587 * host initiated reset on active commands. Longer term we 588 * want handlers to have sensible abort handling themselves 589 * 590 * This differs fundamentally from ide_error because in 591 * this case the command is doing just fine when we 592 * blow it away. 593 */ 594 595ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) 596{ 597 struct request *rq; 598 599 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) 600 return ide_stopped; 601 602 /* retry only "normal" I/O: */ 603 if (!blk_fs_request(rq)) { 604 rq->errors = 1; 605 ide_end_drive_cmd(drive, BUSY_STAT, 0); 606 return ide_stopped; 607 } 608 609 if (rq->rq_disk) { 610 ide_driver_t *drv; 611 612 drv = *(ide_driver_t **)rq->rq_disk->private_data; 613 return drv->abort(drive, rq); 614 } else 615 return __ide_abort(drive, rq); 616} 617 618static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 619{ 620 tf->nsect = drive->sect; 621 tf->lbal = drive->sect; 622 tf->lbam = drive->cyl; 623 tf->lbah = drive->cyl >> 8; 624 tf->device = ((drive->head - 1) | drive->select.all) & ~ATA_LBA; 625 tf->command = WIN_SPECIFY; 626} 627 628static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 629{ 630 tf->nsect = drive->sect; 631 tf->command = WIN_RESTORE; 632} 633 634static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 635{ 636 tf->nsect = drive->mult_req; 637 tf->command = WIN_SETMULT; 638} 639 640static ide_startstop_t ide_disk_special(ide_drive_t *drive) 641{ 642 special_t *s = &drive->special; 643 ide_task_t args; 644 645 memset(&args, 0, sizeof(ide_task_t)); 646 args.data_phase = TASKFILE_NO_DATA; 647 648 if (s->b.set_geometry) { 649 s->b.set_geometry = 0; 650 ide_tf_set_specify_cmd(drive, &args.tf); 651 } else if (s->b.recalibrate) { 652 s->b.recalibrate = 0; 653 ide_tf_set_restore_cmd(drive, &args.tf); 654 } else if (s->b.set_multmode) { 655 s->b.set_multmode = 0; 656 if (drive->mult_req > drive->id->max_multsect) 657 drive->mult_req = drive->id->max_multsect; 658 ide_tf_set_setmult_cmd(drive, &args.tf); 659 } else if (s->all) { 660 int special = s->all; 661 s->all = 0; 662 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 663 return ide_stopped; 664 } 665 666 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 667 IDE_TFLAG_CUSTOM_HANDLER; 668 669 do_rw_taskfile(drive, &args); 670 671 return ide_started; 672} 673 674/* 675 * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away 676 */ 677static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) 678{ 679 switch (req_pio) { 680 case 202: 681 case 201: 682 case 200: 683 case 102: 684 case 101: 685 case 100: 686 return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; 687 case 9: 688 case 8: 689 return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; 690 case 7: 691 case 6: 692 return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; 693 default: 694 return 0; 695 } 696} 697 698/** 699 * do_special - issue some special commands 700 * @drive: drive the command is for 701 * 702 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT 703 * commands to a drive. It used to do much more, but has been scaled 704 * back. 705 */ 706 707static ide_startstop_t do_special (ide_drive_t *drive) 708{ 709 special_t *s = &drive->special; 710 711#ifdef DEBUG 712 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 713#endif 714 if (s->b.set_tune) { 715 ide_hwif_t *hwif = drive->hwif; 716 u8 req_pio = drive->tune_req; 717 718 s->b.set_tune = 0; 719 720 if (set_pio_mode_abuse(drive->hwif, req_pio)) { 721 722 if (hwif->set_pio_mode == NULL) 723 return ide_stopped; 724 725 /* 726 * take ide_lock for drive->[no_]unmask/[no_]io_32bit 727 */ 728 if (req_pio == 8 || req_pio == 9) { 729 unsigned long flags; 730 731 spin_lock_irqsave(&ide_lock, flags); 732 hwif->set_pio_mode(drive, req_pio); 733 spin_unlock_irqrestore(&ide_lock, flags); 734 } else 735 hwif->set_pio_mode(drive, req_pio); 736 } else { 737 int keep_dma = drive->using_dma; 738 739 ide_set_pio(drive, req_pio); 740 741 if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { 742 if (keep_dma) 743 ide_dma_on(drive); 744 } 745 } 746 747 return ide_stopped; 748 } else { 749 if (drive->media == ide_disk) 750 return ide_disk_special(drive); 751 752 s->all = 0; 753 drive->mult_req = 0; 754 return ide_stopped; 755 } 756} 757 758void ide_map_sg(ide_drive_t *drive, struct request *rq) 759{ 760 ide_hwif_t *hwif = drive->hwif; 761 struct scatterlist *sg = hwif->sg_table; 762 763 if (hwif->sg_mapped) /* needed by ide-scsi */ 764 return; 765 766 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 767 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 768 } else { 769 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 770 hwif->sg_nents = 1; 771 } 772} 773 774EXPORT_SYMBOL_GPL(ide_map_sg); 775 776void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 777{ 778 ide_hwif_t *hwif = drive->hwif; 779 780 hwif->nsect = hwif->nleft = rq->nr_sectors; 781 hwif->cursg_ofs = 0; 782 hwif->cursg = NULL; 783} 784 785EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 786 787/** 788 * execute_drive_command - issue special drive command 789 * @drive: the drive to issue the command on 790 * @rq: the request structure holding the command 791 * 792 * execute_drive_cmd() issues a special drive command, usually 793 * initiated by ioctl() from the external hdparm program. The 794 * command can be a drive command, drive task or taskfile 795 * operation. Weirdly you can call it with NULL to wait for 796 * all commands to finish. Don't do this as that is due to change 797 */ 798 799static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 800 struct request *rq) 801{ 802 ide_hwif_t *hwif = HWIF(drive); 803 ide_task_t *task = rq->special; 804 805 if (task) { 806 hwif->data_phase = task->data_phase; 807 808 switch (hwif->data_phase) { 809 case TASKFILE_MULTI_OUT: 810 case TASKFILE_OUT: 811 case TASKFILE_MULTI_IN: 812 case TASKFILE_IN: 813 ide_init_sg_cmd(drive, rq); 814 ide_map_sg(drive, rq); 815 default: 816 break; 817 } 818 819 return do_rw_taskfile(drive, task); 820 } 821 822 /* 823 * NULL is actually a valid way of waiting for 824 * all current requests to be flushed from the queue. 825 */ 826#ifdef DEBUG 827 printk("%s: DRIVE_CMD (null)\n", drive->name); 828#endif 829 ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive)); 830 831 return ide_stopped; 832} 833 834static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 835{ 836 struct request_pm_state *pm = rq->data; 837 838 if (blk_pm_suspend_request(rq) && 839 pm->pm_step == ide_pm_state_start_suspend) 840 /* Mark drive blocked when starting the suspend sequence. */ 841 drive->blocked = 1; 842 else if (blk_pm_resume_request(rq) && 843 pm->pm_step == ide_pm_state_start_resume) { 844 /* 845 * The first thing we do on wakeup is to wait for BSY bit to 846 * go away (with a looong timeout) as a drive on this hwif may 847 * just be POSTing itself. 848 * We do that before even selecting as the "other" device on 849 * the bus may be broken enough to walk on our toes at this 850 * point. 851 */ 852 int rc; 853#ifdef DEBUG_PM 854 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 855#endif 856 rc = ide_wait_not_busy(HWIF(drive), 35000); 857 if (rc) 858 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 859 SELECT_DRIVE(drive); 860 ide_set_irq(drive, 1); 861 rc = ide_wait_not_busy(HWIF(drive), 100000); 862 if (rc) 863 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 864 } 865} 866 867/** 868 * start_request - start of I/O and command issuing for IDE 869 * 870 * start_request() initiates handling of a new I/O request. It 871 * accepts commands and I/O (read/write) requests. It also does 872 * the final remapping for weird stuff like EZDrive. Once 873 * device mapper can work sector level the EZDrive stuff can go away 874 * 875 * FIXME: this function needs a rename 876 */ 877 878static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 879{ 880 ide_startstop_t startstop; 881 sector_t block; 882 883 BUG_ON(!blk_rq_started(rq)); 884 885#ifdef DEBUG 886 printk("%s: start_request: current=0x%08lx\n", 887 HWIF(drive)->name, (unsigned long) rq); 888#endif 889 890 /* bail early if we've exceeded max_failures */ 891 if (drive->max_failures && (drive->failures > drive->max_failures)) { 892 rq->cmd_flags |= REQ_FAILED; 893 goto kill_rq; 894 } 895 896 block = rq->sector; 897 if (blk_fs_request(rq) && 898 (drive->media == ide_disk || drive->media == ide_floppy)) { 899 block += drive->sect0; 900 } 901 /* Yecch - this will shift the entire interval, 902 possibly killing some innocent following sector */ 903 if (block == 0 && drive->remap_0_to_1 == 1) 904 block = 1; /* redirect MBR access to EZ-Drive partn table */ 905 906 if (blk_pm_request(rq)) 907 ide_check_pm_state(drive, rq); 908 909 SELECT_DRIVE(drive); 910 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { 911 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 912 return startstop; 913 } 914 if (!drive->special.all) { 915 ide_driver_t *drv; 916 917 /* 918 * We reset the drive so we need to issue a SETFEATURES. 919 * Do it _after_ do_special() restored device parameters. 920 */ 921 if (drive->current_speed == 0xff) 922 ide_config_drive_speed(drive, drive->desired_speed); 923 924 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 925 return execute_drive_cmd(drive, rq); 926 else if (blk_pm_request(rq)) { 927 struct request_pm_state *pm = rq->data; 928#ifdef DEBUG_PM 929 printk("%s: start_power_step(step: %d)\n", 930 drive->name, rq->pm->pm_step); 931#endif 932 startstop = ide_start_power_step(drive, rq); 933 if (startstop == ide_stopped && 934 pm->pm_step == ide_pm_state_completed) 935 ide_complete_pm_request(drive, rq); 936 return startstop; 937 } 938 939 drv = *(ide_driver_t **)rq->rq_disk->private_data; 940 return drv->do_request(drive, rq, block); 941 } 942 return do_special(drive); 943kill_rq: 944 ide_kill_rq(drive, rq); 945 return ide_stopped; 946} 947 948/** 949 * ide_stall_queue - pause an IDE device 950 * @drive: drive to stall 951 * @timeout: time to stall for (jiffies) 952 * 953 * ide_stall_queue() can be used by a drive to give excess bandwidth back 954 * to the hwgroup by sleeping for timeout jiffies. 955 */ 956 957void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 958{ 959 if (timeout > WAIT_WORSTCASE) 960 timeout = WAIT_WORSTCASE; 961 drive->sleep = timeout + jiffies; 962 drive->sleeping = 1; 963} 964 965EXPORT_SYMBOL(ide_stall_queue); 966 967#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 968 969/** 970 * choose_drive - select a drive to service 971 * @hwgroup: hardware group to select on 972 * 973 * choose_drive() selects the next drive which will be serviced. 974 * This is necessary because the IDE layer can't issue commands 975 * to both drives on the same cable, unlike SCSI. 976 */ 977 978static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 979{ 980 ide_drive_t *drive, *best; 981 982repeat: 983 best = NULL; 984 drive = hwgroup->drive; 985 986 /* 987 * drive is doing pre-flush, ordered write, post-flush sequence. even 988 * though that is 3 requests, it must be seen as a single transaction. 989 * we must not preempt this drive until that is complete 990 */ 991 if (blk_queue_flushing(drive->queue)) { 992 /* 993 * small race where queue could get replugged during 994 * the 3-request flush cycle, just yank the plug since 995 * we want it to finish asap 996 */ 997 blk_remove_plug(drive->queue); 998 return drive; 999 } 1000 1001 do { 1002 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep)) 1003 && !elv_queue_empty(drive->queue)) { 1004 if (!best 1005 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep))) 1006 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best)))) 1007 { 1008 if (!blk_queue_plugged(drive->queue)) 1009 best = drive; 1010 } 1011 } 1012 } while ((drive = drive->next) != hwgroup->drive); 1013 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 1014 long t = (signed long)(WAKEUP(best) - jiffies); 1015 if (t >= WAIT_MIN_SLEEP) { 1016 /* 1017 * We *may* have some time to spare, but first let's see if 1018 * someone can potentially benefit from our nice mood today.. 1019 */ 1020 drive = best->next; 1021 do { 1022 if (!drive->sleeping 1023 && time_before(jiffies - best->service_time, WAKEUP(drive)) 1024 && time_before(WAKEUP(drive), jiffies + t)) 1025 { 1026 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 1027 goto repeat; 1028 } 1029 } while ((drive = drive->next) != best); 1030 } 1031 } 1032 return best; 1033} 1034 1035/* 1036 * Issue a new request to a drive from hwgroup 1037 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 1038 * 1039 * A hwgroup is a serialized group of IDE interfaces. Usually there is 1040 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 1041 * may have both interfaces in a single hwgroup to "serialize" access. 1042 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 1043 * together into one hwgroup for serialized access. 1044 * 1045 * Note also that several hwgroups can end up sharing a single IRQ, 1046 * possibly along with many other devices. This is especially common in 1047 * PCI-based systems with off-board IDE controller cards. 1048 * 1049 * The IDE driver uses the single global ide_lock spinlock to protect 1050 * access to the request queues, and to protect the hwgroup->busy flag. 1051 * 1052 * The first thread into the driver for a particular hwgroup sets the 1053 * hwgroup->busy flag to indicate that this hwgroup is now active, 1054 * and then initiates processing of the top request from the request queue. 1055 * 1056 * Other threads attempting entry notice the busy setting, and will simply 1057 * queue their new requests and exit immediately. Note that hwgroup->busy 1058 * remains set even when the driver is merely awaiting the next interrupt. 1059 * Thus, the meaning is "this hwgroup is busy processing a request". 1060 * 1061 * When processing of a request completes, the completing thread or IRQ-handler 1062 * will start the next request from the queue. If no more work remains, 1063 * the driver will clear the hwgroup->busy flag and exit. 1064 * 1065 * The ide_lock (spinlock) is used to protect all access to the 1066 * hwgroup->busy flag, but is otherwise not needed for most processing in 1067 * the driver. This makes the driver much more friendlier to shared IRQs 1068 * than previous designs, while remaining 100% (?) SMP safe and capable. 1069 */ 1070static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 1071{ 1072 ide_drive_t *drive; 1073 ide_hwif_t *hwif; 1074 struct request *rq; 1075 ide_startstop_t startstop; 1076 int loops = 0; 1077 1078 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1079 ide_get_lock(ide_intr, hwgroup); 1080 1081 /* caller must own ide_lock */ 1082 BUG_ON(!irqs_disabled()); 1083 1084 while (!hwgroup->busy) { 1085 hwgroup->busy = 1; 1086 drive = choose_drive(hwgroup); 1087 if (drive == NULL) { 1088 int sleeping = 0; 1089 unsigned long sleep = 0; /* shut up, gcc */ 1090 hwgroup->rq = NULL; 1091 drive = hwgroup->drive; 1092 do { 1093 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) { 1094 sleeping = 1; 1095 sleep = drive->sleep; 1096 } 1097 } while ((drive = drive->next) != hwgroup->drive); 1098 if (sleeping) { 1099 /* 1100 * Take a short snooze, and then wake up this hwgroup again. 1101 * This gives other hwgroups on the same a chance to 1102 * play fairly with us, just in case there are big differences 1103 * in relative throughputs.. don't want to hog the cpu too much. 1104 */ 1105 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1106 sleep = jiffies + WAIT_MIN_SLEEP; 1107#if 1 1108 if (timer_pending(&hwgroup->timer)) 1109 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1110#endif 1111 /* so that ide_timer_expiry knows what to do */ 1112 hwgroup->sleeping = 1; 1113 hwgroup->req_gen_timer = hwgroup->req_gen; 1114 mod_timer(&hwgroup->timer, sleep); 1115 /* we purposely leave hwgroup->busy==1 1116 * while sleeping */ 1117 } else { 1118 /* Ugly, but how can we sleep for the lock 1119 * otherwise? perhaps from tq_disk? 1120 */ 1121 1122 /* for atari only */ 1123 ide_release_lock(); 1124 hwgroup->busy = 0; 1125 } 1126 1127 /* no more work for this hwgroup (for now) */ 1128 return; 1129 } 1130 again: 1131 hwif = HWIF(drive); 1132 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 1133 /* 1134 * set nIEN for previous hwif, drives in the 1135 * quirk_list may not like intr setups/cleanups 1136 */ 1137 if (drive->quirk_list != 1) 1138 ide_set_irq(drive, 0); 1139 } 1140 hwgroup->hwif = hwif; 1141 hwgroup->drive = drive; 1142 drive->sleeping = 0; 1143 drive->service_start = jiffies; 1144 1145 if (blk_queue_plugged(drive->queue)) { 1146 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1147 break; 1148 } 1149 1150 /* 1151 * we know that the queue isn't empty, but this can happen 1152 * if the q->prep_rq_fn() decides to kill a request 1153 */ 1154 rq = elv_next_request(drive->queue); 1155 if (!rq) { 1156 hwgroup->busy = 0; 1157 break; 1158 } 1159 1160 /* 1161 * Sanity: don't accept a request that isn't a PM request 1162 * if we are currently power managed. This is very important as 1163 * blk_stop_queue() doesn't prevent the elv_next_request() 1164 * above to return us whatever is in the queue. Since we call 1165 * ide_do_request() ourselves, we end up taking requests while 1166 * the queue is blocked... 1167 * 1168 * We let requests forced at head of queue with ide-preempt 1169 * though. I hope that doesn't happen too much, hopefully not 1170 * unless the subdriver triggers such a thing in its own PM 1171 * state machine. 1172 * 1173 * We count how many times we loop here to make sure we service 1174 * all drives in the hwgroup without looping for ever 1175 */ 1176 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { 1177 drive = drive->next ? drive->next : hwgroup->drive; 1178 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1179 goto again; 1180 /* We clear busy, there should be no pending ATA command at this point. */ 1181 hwgroup->busy = 0; 1182 break; 1183 } 1184 1185 hwgroup->rq = rq; 1186 1187 /* 1188 * Some systems have trouble with IDE IRQs arriving while 1189 * the driver is still setting things up. So, here we disable 1190 * the IRQ used by this interface while the request is being started. 1191 * This may look bad at first, but pretty much the same thing 1192 * happens anyway when any interrupt comes in, IDE or otherwise 1193 * -- the kernel masks the IRQ while it is being handled. 1194 */ 1195 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1196 disable_irq_nosync(hwif->irq); 1197 spin_unlock(&ide_lock); 1198 local_irq_enable_in_hardirq(); 1199 /* allow other IRQs while we start this request */ 1200 startstop = start_request(drive, rq); 1201 spin_lock_irq(&ide_lock); 1202 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1203 enable_irq(hwif->irq); 1204 if (startstop == ide_stopped) 1205 hwgroup->busy = 0; 1206 } 1207} 1208 1209/* 1210 * Passes the stuff to ide_do_request 1211 */ 1212void do_ide_request(struct request_queue *q) 1213{ 1214 ide_drive_t *drive = q->queuedata; 1215 1216 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1217} 1218 1219/* 1220 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1221 * retry the current request in pio mode instead of risking tossing it 1222 * all away 1223 */ 1224static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1225{ 1226 ide_hwif_t *hwif = HWIF(drive); 1227 struct request *rq; 1228 ide_startstop_t ret = ide_stopped; 1229 1230 /* 1231 * end current dma transaction 1232 */ 1233 1234 if (error < 0) { 1235 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1236 (void)HWIF(drive)->ide_dma_end(drive); 1237 ret = ide_error(drive, "dma timeout error", 1238 ide_read_status(drive)); 1239 } else { 1240 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1241 hwif->dma_timeout(drive); 1242 } 1243 1244 /* 1245 * disable dma for now, but remember that we did so because of 1246 * a timeout -- we'll reenable after we finish this next request 1247 * (or rather the first chunk of it) in pio. 1248 */ 1249 drive->retry_pio++; 1250 drive->state = DMA_PIO_RETRY; 1251 ide_dma_off_quietly(drive); 1252 1253 /* 1254 * un-busy drive etc (hwgroup->busy is cleared on return) and 1255 * make sure request is sane 1256 */ 1257 rq = HWGROUP(drive)->rq; 1258 1259 if (!rq) 1260 goto out; 1261 1262 HWGROUP(drive)->rq = NULL; 1263 1264 rq->errors = 0; 1265 1266 if (!rq->bio) 1267 goto out; 1268 1269 rq->sector = rq->bio->bi_sector; 1270 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1271 rq->hard_cur_sectors = rq->current_nr_sectors; 1272 rq->buffer = bio_data(rq->bio); 1273out: 1274 return ret; 1275} 1276 1277/** 1278 * ide_timer_expiry - handle lack of an IDE interrupt 1279 * @data: timer callback magic (hwgroup) 1280 * 1281 * An IDE command has timed out before the expected drive return 1282 * occurred. At this point we attempt to clean up the current 1283 * mess. If the current handler includes an expiry handler then 1284 * we invoke the expiry handler, and providing it is happy the 1285 * work is done. If that fails we apply generic recovery rules 1286 * invoking the handler and checking the drive DMA status. We 1287 * have an excessively incestuous relationship with the DMA 1288 * logic that wants cleaning up. 1289 */ 1290 1291void ide_timer_expiry (unsigned long data) 1292{ 1293 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1294 ide_handler_t *handler; 1295 ide_expiry_t *expiry; 1296 unsigned long flags; 1297 unsigned long wait = -1; 1298 1299 spin_lock_irqsave(&ide_lock, flags); 1300 1301 if (((handler = hwgroup->handler) == NULL) || 1302 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1303 /* 1304 * Either a marginal timeout occurred 1305 * (got the interrupt just as timer expired), 1306 * or we were "sleeping" to give other devices a chance. 1307 * Either way, we don't really want to complain about anything. 1308 */ 1309 if (hwgroup->sleeping) { 1310 hwgroup->sleeping = 0; 1311 hwgroup->busy = 0; 1312 } 1313 } else { 1314 ide_drive_t *drive = hwgroup->drive; 1315 if (!drive) { 1316 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1317 hwgroup->handler = NULL; 1318 } else { 1319 ide_hwif_t *hwif; 1320 ide_startstop_t startstop = ide_stopped; 1321 if (!hwgroup->busy) { 1322 hwgroup->busy = 1; /* paranoia */ 1323 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1324 } 1325 if ((expiry = hwgroup->expiry) != NULL) { 1326 /* continue */ 1327 if ((wait = expiry(drive)) > 0) { 1328 /* reset timer */ 1329 hwgroup->timer.expires = jiffies + wait; 1330 hwgroup->req_gen_timer = hwgroup->req_gen; 1331 add_timer(&hwgroup->timer); 1332 spin_unlock_irqrestore(&ide_lock, flags); 1333 return; 1334 } 1335 } 1336 hwgroup->handler = NULL; 1337 /* 1338 * We need to simulate a real interrupt when invoking 1339 * the handler() function, which means we need to 1340 * globally mask the specific IRQ: 1341 */ 1342 spin_unlock(&ide_lock); 1343 hwif = HWIF(drive); 1344 /* disable_irq_nosync ?? */ 1345 disable_irq(hwif->irq); 1346 /* local CPU only, 1347 * as if we were handling an interrupt */ 1348 local_irq_disable(); 1349 if (hwgroup->polling) { 1350 startstop = handler(drive); 1351 } else if (drive_is_ready(drive)) { 1352 if (drive->waiting_for_dma) 1353 hwgroup->hwif->dma_lost_irq(drive); 1354 (void)ide_ack_intr(hwif); 1355 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1356 startstop = handler(drive); 1357 } else { 1358 if (drive->waiting_for_dma) { 1359 startstop = ide_dma_timeout_retry(drive, wait); 1360 } else 1361 startstop = 1362 ide_error(drive, "irq timeout", 1363 ide_read_status(drive)); 1364 } 1365 drive->service_time = jiffies - drive->service_start; 1366 spin_lock_irq(&ide_lock); 1367 enable_irq(hwif->irq); 1368 if (startstop == ide_stopped) 1369 hwgroup->busy = 0; 1370 } 1371 } 1372 ide_do_request(hwgroup, IDE_NO_IRQ); 1373 spin_unlock_irqrestore(&ide_lock, flags); 1374} 1375 1376/** 1377 * unexpected_intr - handle an unexpected IDE interrupt 1378 * @irq: interrupt line 1379 * @hwgroup: hwgroup being processed 1380 * 1381 * There's nothing really useful we can do with an unexpected interrupt, 1382 * other than reading the status register (to clear it), and logging it. 1383 * There should be no way that an irq can happen before we're ready for it, 1384 * so we needn't worry much about losing an "important" interrupt here. 1385 * 1386 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1387 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1388 * looks "good", we just ignore the interrupt completely. 1389 * 1390 * This routine assumes __cli() is in effect when called. 1391 * 1392 * If an unexpected interrupt happens on irq15 while we are handling irq14 1393 * and if the two interfaces are "serialized" (CMD640), then it looks like 1394 * we could screw up by interfering with a new request being set up for 1395 * irq15. 1396 * 1397 * In reality, this is a non-issue. The new command is not sent unless 1398 * the drive is ready to accept one, in which case we know the drive is 1399 * not trying to interrupt us. And ide_set_handler() is always invoked 1400 * before completing the issuance of any new drive command, so we will not 1401 * be accidentally invoked as a result of any valid command completion 1402 * interrupt. 1403 * 1404 * Note that we must walk the entire hwgroup here. We know which hwif 1405 * is doing the current command, but we don't know which hwif burped 1406 * mysteriously. 1407 */ 1408 1409static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1410{ 1411 u8 stat; 1412 ide_hwif_t *hwif = hwgroup->hwif; 1413 1414 /* 1415 * handle the unexpected interrupt 1416 */ 1417 do { 1418 if (hwif->irq == irq) { 1419 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1420 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1421 /* Try to not flood the console with msgs */ 1422 static unsigned long last_msgtime, count; 1423 ++count; 1424 if (time_after(jiffies, last_msgtime + HZ)) { 1425 last_msgtime = jiffies; 1426 printk(KERN_ERR "%s%s: unexpected interrupt, " 1427 "status=0x%02x, count=%ld\n", 1428 hwif->name, 1429 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1430 } 1431 } 1432 } 1433 } while ((hwif = hwif->next) != hwgroup->hwif); 1434} 1435 1436/** 1437 * ide_intr - default IDE interrupt handler 1438 * @irq: interrupt number 1439 * @dev_id: hwif group 1440 * @regs: unused weirdness from the kernel irq layer 1441 * 1442 * This is the default IRQ handler for the IDE layer. You should 1443 * not need to override it. If you do be aware it is subtle in 1444 * places 1445 * 1446 * hwgroup->hwif is the interface in the group currently performing 1447 * a command. hwgroup->drive is the drive and hwgroup->handler is 1448 * the IRQ handler to call. As we issue a command the handlers 1449 * step through multiple states, reassigning the handler to the 1450 * next step in the process. Unlike a smart SCSI controller IDE 1451 * expects the main processor to sequence the various transfer 1452 * stages. We also manage a poll timer to catch up with most 1453 * timeout situations. There are still a few where the handlers 1454 * don't ever decide to give up. 1455 * 1456 * The handler eventually returns ide_stopped to indicate the 1457 * request completed. At this point we issue the next request 1458 * on the hwgroup and the process begins again. 1459 */ 1460 1461irqreturn_t ide_intr (int irq, void *dev_id) 1462{ 1463 unsigned long flags; 1464 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1465 ide_hwif_t *hwif; 1466 ide_drive_t *drive; 1467 ide_handler_t *handler; 1468 ide_startstop_t startstop; 1469 1470 spin_lock_irqsave(&ide_lock, flags); 1471 hwif = hwgroup->hwif; 1472 1473 if (!ide_ack_intr(hwif)) { 1474 spin_unlock_irqrestore(&ide_lock, flags); 1475 return IRQ_NONE; 1476 } 1477 1478 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1479 /* 1480 * Not expecting an interrupt from this drive. 1481 * That means this could be: 1482 * (1) an interrupt from another PCI device 1483 * sharing the same PCI INT# as us. 1484 * or (2) a drive just entered sleep or standby mode, 1485 * and is interrupting to let us know. 1486 * or (3) a spurious interrupt of unknown origin. 1487 * 1488 * For PCI, we cannot tell the difference, 1489 * so in that case we just ignore it and hope it goes away. 1490 * 1491 * FIXME: unexpected_intr should be hwif-> then we can 1492 * remove all the ifdef PCI crap 1493 */ 1494#ifdef CONFIG_BLK_DEV_IDEPCI 1495 if (hwif->chipset != ide_pci) 1496#endif /* CONFIG_BLK_DEV_IDEPCI */ 1497 { 1498 /* 1499 * Probably not a shared PCI interrupt, 1500 * so we can safely try to do something about it: 1501 */ 1502 unexpected_intr(irq, hwgroup); 1503#ifdef CONFIG_BLK_DEV_IDEPCI 1504 } else { 1505 /* 1506 * Whack the status register, just in case 1507 * we have a leftover pending IRQ. 1508 */ 1509 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1510#endif /* CONFIG_BLK_DEV_IDEPCI */ 1511 } 1512 spin_unlock_irqrestore(&ide_lock, flags); 1513 return IRQ_NONE; 1514 } 1515 drive = hwgroup->drive; 1516 if (!drive) { 1517 /* 1518 * This should NEVER happen, and there isn't much 1519 * we could do about it here. 1520 * 1521 * [Note - this can occur if the drive is hot unplugged] 1522 */ 1523 spin_unlock_irqrestore(&ide_lock, flags); 1524 return IRQ_HANDLED; 1525 } 1526 if (!drive_is_ready(drive)) { 1527 /* 1528 * This happens regularly when we share a PCI IRQ with 1529 * another device. Unfortunately, it can also happen 1530 * with some buggy drives that trigger the IRQ before 1531 * their status register is up to date. Hopefully we have 1532 * enough advance overhead that the latter isn't a problem. 1533 */ 1534 spin_unlock_irqrestore(&ide_lock, flags); 1535 return IRQ_NONE; 1536 } 1537 if (!hwgroup->busy) { 1538 hwgroup->busy = 1; /* paranoia */ 1539 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1540 } 1541 hwgroup->handler = NULL; 1542 hwgroup->req_gen++; 1543 del_timer(&hwgroup->timer); 1544 spin_unlock(&ide_lock); 1545 1546 /* Some controllers might set DMA INTR no matter DMA or PIO; 1547 * bmdma status might need to be cleared even for 1548 * PIO interrupts to prevent spurious/lost irq. 1549 */ 1550 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) 1551 /* ide_dma_end() needs bmdma status for error checking. 1552 * So, skip clearing bmdma status here and leave it 1553 * to ide_dma_end() if this is dma interrupt. 1554 */ 1555 hwif->ide_dma_clear_irq(drive); 1556 1557 if (drive->unmask) 1558 local_irq_enable_in_hardirq(); 1559 /* service this interrupt, may set handler for next interrupt */ 1560 startstop = handler(drive); 1561 spin_lock_irq(&ide_lock); 1562 1563 /* 1564 * Note that handler() may have set things up for another 1565 * interrupt to occur soon, but it cannot happen until 1566 * we exit from this routine, because it will be the 1567 * same irq as is currently being serviced here, and Linux 1568 * won't allow another of the same (on any CPU) until we return. 1569 */ 1570 drive->service_time = jiffies - drive->service_start; 1571 if (startstop == ide_stopped) { 1572 if (hwgroup->handler == NULL) { /* paranoia */ 1573 hwgroup->busy = 0; 1574 ide_do_request(hwgroup, hwif->irq); 1575 } else { 1576 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1577 "on exit\n", drive->name); 1578 } 1579 } 1580 spin_unlock_irqrestore(&ide_lock, flags); 1581 return IRQ_HANDLED; 1582} 1583 1584/** 1585 * ide_init_drive_cmd - initialize a drive command request 1586 * @rq: request object 1587 * 1588 * Initialize a request before we fill it in and send it down to 1589 * ide_do_drive_cmd. Commands must be set up by this function. Right 1590 * now it doesn't do a lot, but if that changes abusers will have a 1591 * nasty surprise. 1592 */ 1593 1594void ide_init_drive_cmd (struct request *rq) 1595{ 1596 memset(rq, 0, sizeof(*rq)); 1597 rq->ref_count = 1; 1598} 1599 1600EXPORT_SYMBOL(ide_init_drive_cmd); 1601 1602/** 1603 * ide_do_drive_cmd - issue IDE special command 1604 * @drive: device to issue command 1605 * @rq: request to issue 1606 * @action: action for processing 1607 * 1608 * This function issues a special IDE device request 1609 * onto the request queue. 1610 * 1611 * If action is ide_wait, then the rq is queued at the end of the 1612 * request queue, and the function sleeps until it has been processed. 1613 * This is for use when invoked from an ioctl handler. 1614 * 1615 * If action is ide_preempt, then the rq is queued at the head of 1616 * the request queue, displacing the currently-being-processed 1617 * request and this function returns immediately without waiting 1618 * for the new rq to be completed. This is VERY DANGEROUS, and is 1619 * intended for careful use by the ATAPI tape/cdrom driver code. 1620 * 1621 * If action is ide_end, then the rq is queued at the end of the 1622 * request queue, and the function returns immediately without waiting 1623 * for the new rq to be completed. This is again intended for careful 1624 * use by the ATAPI tape/cdrom driver code. 1625 */ 1626 1627int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1628{ 1629 unsigned long flags; 1630 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1631 DECLARE_COMPLETION_ONSTACK(wait); 1632 int where = ELEVATOR_INSERT_BACK, err; 1633 int must_wait = (action == ide_wait || action == ide_head_wait); 1634 1635 rq->errors = 0; 1636 1637 /* 1638 * we need to hold an extra reference to request for safe inspection 1639 * after completion 1640 */ 1641 if (must_wait) { 1642 rq->ref_count++; 1643 rq->end_io_data = &wait; 1644 rq->end_io = blk_end_sync_rq; 1645 } 1646 1647 spin_lock_irqsave(&ide_lock, flags); 1648 if (action == ide_preempt) 1649 hwgroup->rq = NULL; 1650 if (action == ide_preempt || action == ide_head_wait) { 1651 where = ELEVATOR_INSERT_FRONT; 1652 rq->cmd_flags |= REQ_PREEMPT; 1653 } 1654 __elv_add_request(drive->queue, rq, where, 0); 1655 ide_do_request(hwgroup, IDE_NO_IRQ); 1656 spin_unlock_irqrestore(&ide_lock, flags); 1657 1658 err = 0; 1659 if (must_wait) { 1660 wait_for_completion(&wait); 1661 if (rq->errors) 1662 err = -EIO; 1663 1664 blk_put_request(rq); 1665 } 1666 1667 return err; 1668} 1669 1670EXPORT_SYMBOL(ide_do_drive_cmd); 1671 1672void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1673{ 1674 ide_task_t task; 1675 1676 memset(&task, 0, sizeof(task)); 1677 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 1678 IDE_TFLAG_OUT_FEATURE | tf_flags; 1679 task.tf.feature = dma; /* Use PIO/DMA */ 1680 task.tf.lbam = bcount & 0xff; 1681 task.tf.lbah = (bcount >> 8) & 0xff; 1682 1683 ide_tf_load(drive, &task); 1684} 1685 1686EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1687