ide-io.c revision 23579a2a170265aacf78069f4817a41c1d6e9323
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50#include <linux/bitops.h> 51 52#include <asm/byteorder.h> 53#include <asm/irq.h> 54#include <asm/uaccess.h> 55#include <asm/io.h> 56 57static int __ide_end_request(ide_drive_t *drive, struct request *rq, 58 int uptodate, unsigned int nr_bytes, int dequeue) 59{ 60 int ret = 1; 61 int error = 0; 62 63 if (uptodate <= 0) 64 error = uptodate ? uptodate : -EIO; 65 66 /* 67 * if failfast is set on a request, override number of sectors and 68 * complete the whole request right now 69 */ 70 if (blk_noretry_request(rq) && error) 71 nr_bytes = rq->hard_nr_sectors << 9; 72 73 if (!blk_fs_request(rq) && error && !rq->errors) 74 rq->errors = -EIO; 75 76 /* 77 * decide whether to reenable DMA -- 3 is a random magic for now, 78 * if we DMA timeout more than 3 times, just stay in PIO 79 */ 80 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { 81 drive->state = 0; 82 ide_dma_on(drive); 83 } 84 85 if (!__blk_end_request(rq, error, nr_bytes)) { 86 if (dequeue) 87 HWGROUP(drive)->rq = NULL; 88 ret = 0; 89 } 90 91 return ret; 92} 93 94/** 95 * ide_end_request - complete an IDE I/O 96 * @drive: IDE device for the I/O 97 * @uptodate: 98 * @nr_sectors: number of sectors completed 99 * 100 * This is our end_request wrapper function. We complete the I/O 101 * update random number input and dequeue the request, which if 102 * it was tagged may be out of order. 103 */ 104 105int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 106{ 107 unsigned int nr_bytes = nr_sectors << 9; 108 struct request *rq; 109 unsigned long flags; 110 int ret = 1; 111 112 /* 113 * room for locking improvements here, the calls below don't 114 * need the queue lock held at all 115 */ 116 spin_lock_irqsave(&ide_lock, flags); 117 rq = HWGROUP(drive)->rq; 118 119 if (!nr_bytes) { 120 if (blk_pc_request(rq)) 121 nr_bytes = rq->data_len; 122 else 123 nr_bytes = rq->hard_cur_sectors << 9; 124 } 125 126 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 127 128 spin_unlock_irqrestore(&ide_lock, flags); 129 return ret; 130} 131EXPORT_SYMBOL(ide_end_request); 132 133/* 134 * Power Management state machine. This one is rather trivial for now, 135 * we should probably add more, like switching back to PIO on suspend 136 * to help some BIOSes, re-do the door locking on resume, etc... 137 */ 138 139enum { 140 ide_pm_flush_cache = ide_pm_state_start_suspend, 141 idedisk_pm_standby, 142 143 idedisk_pm_restore_pio = ide_pm_state_start_resume, 144 idedisk_pm_idle, 145 ide_pm_restore_dma, 146}; 147 148static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 149{ 150 struct request_pm_state *pm = rq->data; 151 152 if (drive->media != ide_disk) 153 return; 154 155 switch (pm->pm_step) { 156 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */ 157 if (pm->pm_state == PM_EVENT_FREEZE) 158 pm->pm_step = ide_pm_state_completed; 159 else 160 pm->pm_step = idedisk_pm_standby; 161 break; 162 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */ 163 pm->pm_step = ide_pm_state_completed; 164 break; 165 case idedisk_pm_restore_pio: /* Resume step 1 complete */ 166 pm->pm_step = idedisk_pm_idle; 167 break; 168 case idedisk_pm_idle: /* Resume step 2 (idle) complete */ 169 pm->pm_step = ide_pm_restore_dma; 170 break; 171 } 172} 173 174static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 175{ 176 struct request_pm_state *pm = rq->data; 177 ide_task_t *args = rq->special; 178 179 memset(args, 0, sizeof(*args)); 180 181 switch (pm->pm_step) { 182 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 183 if (drive->media != ide_disk) 184 break; 185 /* Not supported? Switch to next step now. */ 186 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) { 187 ide_complete_power_step(drive, rq, 0, 0); 188 return ide_stopped; 189 } 190 if (ide_id_has_flush_cache_ext(drive->id)) 191 args->tf.command = WIN_FLUSH_CACHE_EXT; 192 else 193 args->tf.command = WIN_FLUSH_CACHE; 194 goto out_do_tf; 195 196 case idedisk_pm_standby: /* Suspend step 2 (standby) */ 197 args->tf.command = WIN_STANDBYNOW1; 198 goto out_do_tf; 199 200 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ 201 ide_set_max_pio(drive); 202 /* 203 * skip idedisk_pm_idle for ATAPI devices 204 */ 205 if (drive->media != ide_disk) 206 pm->pm_step = ide_pm_restore_dma; 207 else 208 ide_complete_power_step(drive, rq, 0, 0); 209 return ide_stopped; 210 211 case idedisk_pm_idle: /* Resume step 2 (idle) */ 212 args->tf.command = WIN_IDLEIMMEDIATE; 213 goto out_do_tf; 214 215 case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ 216 /* 217 * Right now, all we do is call ide_set_dma(drive), 218 * we could be smarter and check for current xfer_speed 219 * in struct drive etc... 220 */ 221 if (drive->hwif->dma_host_set == NULL) 222 break; 223 /* 224 * TODO: respect ->using_dma setting 225 */ 226 ide_set_dma(drive); 227 break; 228 } 229 pm->pm_step = ide_pm_state_completed; 230 return ide_stopped; 231 232out_do_tf: 233 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 234 args->data_phase = TASKFILE_NO_DATA; 235 return do_rw_taskfile(drive, args); 236} 237 238/** 239 * ide_end_dequeued_request - complete an IDE I/O 240 * @drive: IDE device for the I/O 241 * @uptodate: 242 * @nr_sectors: number of sectors completed 243 * 244 * Complete an I/O that is no longer on the request queue. This 245 * typically occurs when we pull the request and issue a REQUEST_SENSE. 246 * We must still finish the old request but we must not tamper with the 247 * queue in the meantime. 248 * 249 * NOTE: This path does not handle barrier, but barrier is not supported 250 * on ide-cd anyway. 251 */ 252 253int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 254 int uptodate, int nr_sectors) 255{ 256 unsigned long flags; 257 int ret; 258 259 spin_lock_irqsave(&ide_lock, flags); 260 BUG_ON(!blk_rq_started(rq)); 261 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); 262 spin_unlock_irqrestore(&ide_lock, flags); 263 264 return ret; 265} 266EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 267 268 269/** 270 * ide_complete_pm_request - end the current Power Management request 271 * @drive: target drive 272 * @rq: request 273 * 274 * This function cleans up the current PM request and stops the queue 275 * if necessary. 276 */ 277static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 278{ 279 unsigned long flags; 280 281#ifdef DEBUG_PM 282 printk("%s: completing PM request, %s\n", drive->name, 283 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 284#endif 285 spin_lock_irqsave(&ide_lock, flags); 286 if (blk_pm_suspend_request(rq)) { 287 blk_stop_queue(drive->queue); 288 } else { 289 drive->blocked = 0; 290 blk_start_queue(drive->queue); 291 } 292 HWGROUP(drive)->rq = NULL; 293 if (__blk_end_request(rq, 0, 0)) 294 BUG(); 295 spin_unlock_irqrestore(&ide_lock, flags); 296} 297 298void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 299{ 300 ide_hwif_t *hwif = drive->hwif; 301 struct ide_taskfile *tf = &task->tf; 302 303 if (task->tf_flags & IDE_TFLAG_IN_DATA) { 304 u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]); 305 306 tf->data = data & 0xff; 307 tf->hob_data = (data >> 8) & 0xff; 308 } 309 310 /* be sure we're looking at the low order bits */ 311 hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]); 312 313 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 314 tf->nsect = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); 315 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 316 tf->lbal = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); 317 if (task->tf_flags & IDE_TFLAG_IN_LBAM) 318 tf->lbam = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); 319 if (task->tf_flags & IDE_TFLAG_IN_LBAH) 320 tf->lbah = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); 321 if (task->tf_flags & IDE_TFLAG_IN_DEVICE) 322 tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]); 323 324 if (task->tf_flags & IDE_TFLAG_LBA48) { 325 hwif->OUTB(drive->ctl | 0x80, 326 hwif->io_ports[IDE_CONTROL_OFFSET]); 327 328 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) 329 tf->hob_feature = 330 hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]); 331 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) 332 tf->hob_nsect = 333 hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); 334 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) 335 tf->hob_lbal = 336 hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); 337 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) 338 tf->hob_lbam = 339 hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); 340 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) 341 tf->hob_lbah = 342 hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); 343 } 344} 345 346/** 347 * ide_end_drive_cmd - end an explicit drive command 348 * @drive: command 349 * @stat: status bits 350 * @err: error bits 351 * 352 * Clean up after success/failure of an explicit drive command. 353 * These get thrown onto the queue so they are synchronized with 354 * real I/O operations on the drive. 355 * 356 * In LBA48 mode we have to read the register set twice to get 357 * all the extra information out. 358 */ 359 360void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 361{ 362 unsigned long flags; 363 struct request *rq; 364 365 spin_lock_irqsave(&ide_lock, flags); 366 rq = HWGROUP(drive)->rq; 367 spin_unlock_irqrestore(&ide_lock, flags); 368 369 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 370 ide_task_t *task = (ide_task_t *)rq->special; 371 372 if (rq->errors == 0) 373 rq->errors = !OK_STAT(stat, READY_STAT, BAD_STAT); 374 375 if (task) { 376 struct ide_taskfile *tf = &task->tf; 377 378 tf->error = err; 379 tf->status = stat; 380 381 ide_tf_read(drive, task); 382 383 if (task->tf_flags & IDE_TFLAG_DYN) 384 kfree(task); 385 } 386 } else if (blk_pm_request(rq)) { 387 struct request_pm_state *pm = rq->data; 388#ifdef DEBUG_PM 389 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 390 drive->name, rq->pm->pm_step, stat, err); 391#endif 392 ide_complete_power_step(drive, rq, stat, err); 393 if (pm->pm_step == ide_pm_state_completed) 394 ide_complete_pm_request(drive, rq); 395 return; 396 } 397 398 spin_lock_irqsave(&ide_lock, flags); 399 HWGROUP(drive)->rq = NULL; 400 rq->errors = err; 401 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), 402 blk_rq_bytes(rq)))) 403 BUG(); 404 spin_unlock_irqrestore(&ide_lock, flags); 405} 406 407EXPORT_SYMBOL(ide_end_drive_cmd); 408 409/** 410 * try_to_flush_leftover_data - flush junk 411 * @drive: drive to flush 412 * 413 * try_to_flush_leftover_data() is invoked in response to a drive 414 * unexpectedly having its DRQ_STAT bit set. As an alternative to 415 * resetting the drive, this routine tries to clear the condition 416 * by read a sector's worth of data from the drive. Of course, 417 * this may not help if the drive is *waiting* for data from *us*. 418 */ 419static void try_to_flush_leftover_data (ide_drive_t *drive) 420{ 421 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; 422 423 if (drive->media != ide_disk) 424 return; 425 while (i > 0) { 426 u32 buffer[16]; 427 u32 wcount = (i > 16) ? 16 : i; 428 429 i -= wcount; 430 HWIF(drive)->ata_input_data(drive, buffer, wcount); 431 } 432} 433 434static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 435{ 436 if (rq->rq_disk) { 437 ide_driver_t *drv; 438 439 drv = *(ide_driver_t **)rq->rq_disk->private_data; 440 drv->end_request(drive, 0, 0); 441 } else 442 ide_end_request(drive, 0, 0); 443} 444 445static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 446{ 447 ide_hwif_t *hwif = drive->hwif; 448 449 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 450 /* other bits are useless when BUSY */ 451 rq->errors |= ERROR_RESET; 452 } else if (stat & ERR_STAT) { 453 /* err has different meaning on cdrom and tape */ 454 if (err == ABRT_ERR) { 455 if (drive->select.b.lba && 456 /* some newer drives don't support WIN_SPECIFY */ 457 hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) == 458 WIN_SPECIFY) 459 return ide_stopped; 460 } else if ((err & BAD_CRC) == BAD_CRC) { 461 /* UDMA crc error, just retry the operation */ 462 drive->crc_count++; 463 } else if (err & (BBD_ERR | ECC_ERR)) { 464 /* retries won't help these */ 465 rq->errors = ERROR_MAX; 466 } else if (err & TRK0_ERR) { 467 /* help it find track zero */ 468 rq->errors |= ERROR_RECAL; 469 } 470 } 471 472 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && 473 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) 474 try_to_flush_leftover_data(drive); 475 476 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 477 ide_kill_rq(drive, rq); 478 return ide_stopped; 479 } 480 481 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 482 rq->errors |= ERROR_RESET; 483 484 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 485 ++rq->errors; 486 return ide_do_reset(drive); 487 } 488 489 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 490 drive->special.b.recalibrate = 1; 491 492 ++rq->errors; 493 494 return ide_stopped; 495} 496 497static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 498{ 499 ide_hwif_t *hwif = drive->hwif; 500 501 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) { 502 /* other bits are useless when BUSY */ 503 rq->errors |= ERROR_RESET; 504 } else { 505 /* add decoding error stuff */ 506 } 507 508 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 509 /* force an abort */ 510 hwif->OUTB(WIN_IDLEIMMEDIATE, 511 hwif->io_ports[IDE_COMMAND_OFFSET]); 512 513 if (rq->errors >= ERROR_MAX) { 514 ide_kill_rq(drive, rq); 515 } else { 516 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 517 ++rq->errors; 518 return ide_do_reset(drive); 519 } 520 ++rq->errors; 521 } 522 523 return ide_stopped; 524} 525 526ide_startstop_t 527__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 528{ 529 if (drive->media == ide_disk) 530 return ide_ata_error(drive, rq, stat, err); 531 return ide_atapi_error(drive, rq, stat, err); 532} 533 534EXPORT_SYMBOL_GPL(__ide_error); 535 536/** 537 * ide_error - handle an error on the IDE 538 * @drive: drive the error occurred on 539 * @msg: message to report 540 * @stat: status bits 541 * 542 * ide_error() takes action based on the error returned by the drive. 543 * For normal I/O that may well include retries. We deal with 544 * both new-style (taskfile) and old style command handling here. 545 * In the case of taskfile command handling there is work left to 546 * do 547 */ 548 549ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 550{ 551 struct request *rq; 552 u8 err; 553 554 err = ide_dump_status(drive, msg, stat); 555 556 if ((rq = HWGROUP(drive)->rq) == NULL) 557 return ide_stopped; 558 559 /* retry only "normal" I/O: */ 560 if (!blk_fs_request(rq)) { 561 rq->errors = 1; 562 ide_end_drive_cmd(drive, stat, err); 563 return ide_stopped; 564 } 565 566 if (rq->rq_disk) { 567 ide_driver_t *drv; 568 569 drv = *(ide_driver_t **)rq->rq_disk->private_data; 570 return drv->error(drive, rq, stat, err); 571 } else 572 return __ide_error(drive, rq, stat, err); 573} 574 575EXPORT_SYMBOL_GPL(ide_error); 576 577ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq) 578{ 579 if (drive->media != ide_disk) 580 rq->errors |= ERROR_RESET; 581 582 ide_kill_rq(drive, rq); 583 584 return ide_stopped; 585} 586 587EXPORT_SYMBOL_GPL(__ide_abort); 588 589/** 590 * ide_abort - abort pending IDE operations 591 * @drive: drive the error occurred on 592 * @msg: message to report 593 * 594 * ide_abort kills and cleans up when we are about to do a 595 * host initiated reset on active commands. Longer term we 596 * want handlers to have sensible abort handling themselves 597 * 598 * This differs fundamentally from ide_error because in 599 * this case the command is doing just fine when we 600 * blow it away. 601 */ 602 603ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) 604{ 605 struct request *rq; 606 607 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL) 608 return ide_stopped; 609 610 /* retry only "normal" I/O: */ 611 if (!blk_fs_request(rq)) { 612 rq->errors = 1; 613 ide_end_drive_cmd(drive, BUSY_STAT, 0); 614 return ide_stopped; 615 } 616 617 if (rq->rq_disk) { 618 ide_driver_t *drv; 619 620 drv = *(ide_driver_t **)rq->rq_disk->private_data; 621 return drv->abort(drive, rq); 622 } else 623 return __ide_abort(drive, rq); 624} 625 626static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 627{ 628 tf->nsect = drive->sect; 629 tf->lbal = drive->sect; 630 tf->lbam = drive->cyl; 631 tf->lbah = drive->cyl >> 8; 632 tf->device = ((drive->head - 1) | drive->select.all) & ~ATA_LBA; 633 tf->command = WIN_SPECIFY; 634} 635 636static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 637{ 638 tf->nsect = drive->sect; 639 tf->command = WIN_RESTORE; 640} 641 642static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 643{ 644 tf->nsect = drive->mult_req; 645 tf->command = WIN_SETMULT; 646} 647 648static ide_startstop_t ide_disk_special(ide_drive_t *drive) 649{ 650 special_t *s = &drive->special; 651 ide_task_t args; 652 653 memset(&args, 0, sizeof(ide_task_t)); 654 args.data_phase = TASKFILE_NO_DATA; 655 656 if (s->b.set_geometry) { 657 s->b.set_geometry = 0; 658 ide_tf_set_specify_cmd(drive, &args.tf); 659 } else if (s->b.recalibrate) { 660 s->b.recalibrate = 0; 661 ide_tf_set_restore_cmd(drive, &args.tf); 662 } else if (s->b.set_multmode) { 663 s->b.set_multmode = 0; 664 if (drive->mult_req > drive->id->max_multsect) 665 drive->mult_req = drive->id->max_multsect; 666 ide_tf_set_setmult_cmd(drive, &args.tf); 667 } else if (s->all) { 668 int special = s->all; 669 s->all = 0; 670 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 671 return ide_stopped; 672 } 673 674 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 675 IDE_TFLAG_CUSTOM_HANDLER; 676 677 do_rw_taskfile(drive, &args); 678 679 return ide_started; 680} 681 682/* 683 * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away 684 */ 685static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) 686{ 687 switch (req_pio) { 688 case 202: 689 case 201: 690 case 200: 691 case 102: 692 case 101: 693 case 100: 694 return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; 695 case 9: 696 case 8: 697 return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; 698 case 7: 699 case 6: 700 return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; 701 default: 702 return 0; 703 } 704} 705 706/** 707 * do_special - issue some special commands 708 * @drive: drive the command is for 709 * 710 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT 711 * commands to a drive. It used to do much more, but has been scaled 712 * back. 713 */ 714 715static ide_startstop_t do_special (ide_drive_t *drive) 716{ 717 special_t *s = &drive->special; 718 719#ifdef DEBUG 720 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 721#endif 722 if (s->b.set_tune) { 723 ide_hwif_t *hwif = drive->hwif; 724 u8 req_pio = drive->tune_req; 725 726 s->b.set_tune = 0; 727 728 if (set_pio_mode_abuse(drive->hwif, req_pio)) { 729 730 if (hwif->set_pio_mode == NULL) 731 return ide_stopped; 732 733 /* 734 * take ide_lock for drive->[no_]unmask/[no_]io_32bit 735 */ 736 if (req_pio == 8 || req_pio == 9) { 737 unsigned long flags; 738 739 spin_lock_irqsave(&ide_lock, flags); 740 hwif->set_pio_mode(drive, req_pio); 741 spin_unlock_irqrestore(&ide_lock, flags); 742 } else 743 hwif->set_pio_mode(drive, req_pio); 744 } else { 745 int keep_dma = drive->using_dma; 746 747 ide_set_pio(drive, req_pio); 748 749 if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { 750 if (keep_dma) 751 ide_dma_on(drive); 752 } 753 } 754 755 return ide_stopped; 756 } else { 757 if (drive->media == ide_disk) 758 return ide_disk_special(drive); 759 760 s->all = 0; 761 drive->mult_req = 0; 762 return ide_stopped; 763 } 764} 765 766void ide_map_sg(ide_drive_t *drive, struct request *rq) 767{ 768 ide_hwif_t *hwif = drive->hwif; 769 struct scatterlist *sg = hwif->sg_table; 770 771 if (hwif->sg_mapped) /* needed by ide-scsi */ 772 return; 773 774 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 775 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 776 } else { 777 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 778 hwif->sg_nents = 1; 779 } 780} 781 782EXPORT_SYMBOL_GPL(ide_map_sg); 783 784void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 785{ 786 ide_hwif_t *hwif = drive->hwif; 787 788 hwif->nsect = hwif->nleft = rq->nr_sectors; 789 hwif->cursg_ofs = 0; 790 hwif->cursg = NULL; 791} 792 793EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 794 795/** 796 * execute_drive_command - issue special drive command 797 * @drive: the drive to issue the command on 798 * @rq: the request structure holding the command 799 * 800 * execute_drive_cmd() issues a special drive command, usually 801 * initiated by ioctl() from the external hdparm program. The 802 * command can be a drive command, drive task or taskfile 803 * operation. Weirdly you can call it with NULL to wait for 804 * all commands to finish. Don't do this as that is due to change 805 */ 806 807static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 808 struct request *rq) 809{ 810 ide_hwif_t *hwif = HWIF(drive); 811 ide_task_t *task = rq->special; 812 813 if (task) { 814 hwif->data_phase = task->data_phase; 815 816 switch (hwif->data_phase) { 817 case TASKFILE_MULTI_OUT: 818 case TASKFILE_OUT: 819 case TASKFILE_MULTI_IN: 820 case TASKFILE_IN: 821 ide_init_sg_cmd(drive, rq); 822 ide_map_sg(drive, rq); 823 default: 824 break; 825 } 826 827 return do_rw_taskfile(drive, task); 828 } 829 830 /* 831 * NULL is actually a valid way of waiting for 832 * all current requests to be flushed from the queue. 833 */ 834#ifdef DEBUG 835 printk("%s: DRIVE_CMD (null)\n", drive->name); 836#endif 837 ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive)); 838 839 return ide_stopped; 840} 841 842static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 843{ 844 struct request_pm_state *pm = rq->data; 845 846 if (blk_pm_suspend_request(rq) && 847 pm->pm_step == ide_pm_state_start_suspend) 848 /* Mark drive blocked when starting the suspend sequence. */ 849 drive->blocked = 1; 850 else if (blk_pm_resume_request(rq) && 851 pm->pm_step == ide_pm_state_start_resume) { 852 /* 853 * The first thing we do on wakeup is to wait for BSY bit to 854 * go away (with a looong timeout) as a drive on this hwif may 855 * just be POSTing itself. 856 * We do that before even selecting as the "other" device on 857 * the bus may be broken enough to walk on our toes at this 858 * point. 859 */ 860 int rc; 861#ifdef DEBUG_PM 862 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 863#endif 864 rc = ide_wait_not_busy(HWIF(drive), 35000); 865 if (rc) 866 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 867 SELECT_DRIVE(drive); 868 ide_set_irq(drive, 1); 869 rc = ide_wait_not_busy(HWIF(drive), 100000); 870 if (rc) 871 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 872 } 873} 874 875/** 876 * start_request - start of I/O and command issuing for IDE 877 * 878 * start_request() initiates handling of a new I/O request. It 879 * accepts commands and I/O (read/write) requests. It also does 880 * the final remapping for weird stuff like EZDrive. Once 881 * device mapper can work sector level the EZDrive stuff can go away 882 * 883 * FIXME: this function needs a rename 884 */ 885 886static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 887{ 888 ide_startstop_t startstop; 889 sector_t block; 890 891 BUG_ON(!blk_rq_started(rq)); 892 893#ifdef DEBUG 894 printk("%s: start_request: current=0x%08lx\n", 895 HWIF(drive)->name, (unsigned long) rq); 896#endif 897 898 /* bail early if we've exceeded max_failures */ 899 if (drive->max_failures && (drive->failures > drive->max_failures)) { 900 rq->cmd_flags |= REQ_FAILED; 901 goto kill_rq; 902 } 903 904 block = rq->sector; 905 if (blk_fs_request(rq) && 906 (drive->media == ide_disk || drive->media == ide_floppy)) { 907 block += drive->sect0; 908 } 909 /* Yecch - this will shift the entire interval, 910 possibly killing some innocent following sector */ 911 if (block == 0 && drive->remap_0_to_1 == 1) 912 block = 1; /* redirect MBR access to EZ-Drive partn table */ 913 914 if (blk_pm_request(rq)) 915 ide_check_pm_state(drive, rq); 916 917 SELECT_DRIVE(drive); 918 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) { 919 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 920 return startstop; 921 } 922 if (!drive->special.all) { 923 ide_driver_t *drv; 924 925 /* 926 * We reset the drive so we need to issue a SETFEATURES. 927 * Do it _after_ do_special() restored device parameters. 928 */ 929 if (drive->current_speed == 0xff) 930 ide_config_drive_speed(drive, drive->desired_speed); 931 932 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 933 return execute_drive_cmd(drive, rq); 934 else if (blk_pm_request(rq)) { 935 struct request_pm_state *pm = rq->data; 936#ifdef DEBUG_PM 937 printk("%s: start_power_step(step: %d)\n", 938 drive->name, rq->pm->pm_step); 939#endif 940 startstop = ide_start_power_step(drive, rq); 941 if (startstop == ide_stopped && 942 pm->pm_step == ide_pm_state_completed) 943 ide_complete_pm_request(drive, rq); 944 return startstop; 945 } 946 947 drv = *(ide_driver_t **)rq->rq_disk->private_data; 948 return drv->do_request(drive, rq, block); 949 } 950 return do_special(drive); 951kill_rq: 952 ide_kill_rq(drive, rq); 953 return ide_stopped; 954} 955 956/** 957 * ide_stall_queue - pause an IDE device 958 * @drive: drive to stall 959 * @timeout: time to stall for (jiffies) 960 * 961 * ide_stall_queue() can be used by a drive to give excess bandwidth back 962 * to the hwgroup by sleeping for timeout jiffies. 963 */ 964 965void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 966{ 967 if (timeout > WAIT_WORSTCASE) 968 timeout = WAIT_WORSTCASE; 969 drive->sleep = timeout + jiffies; 970 drive->sleeping = 1; 971} 972 973EXPORT_SYMBOL(ide_stall_queue); 974 975#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 976 977/** 978 * choose_drive - select a drive to service 979 * @hwgroup: hardware group to select on 980 * 981 * choose_drive() selects the next drive which will be serviced. 982 * This is necessary because the IDE layer can't issue commands 983 * to both drives on the same cable, unlike SCSI. 984 */ 985 986static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 987{ 988 ide_drive_t *drive, *best; 989 990repeat: 991 best = NULL; 992 drive = hwgroup->drive; 993 994 /* 995 * drive is doing pre-flush, ordered write, post-flush sequence. even 996 * though that is 3 requests, it must be seen as a single transaction. 997 * we must not preempt this drive until that is complete 998 */ 999 if (blk_queue_flushing(drive->queue)) { 1000 /* 1001 * small race where queue could get replugged during 1002 * the 3-request flush cycle, just yank the plug since 1003 * we want it to finish asap 1004 */ 1005 blk_remove_plug(drive->queue); 1006 return drive; 1007 } 1008 1009 do { 1010 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep)) 1011 && !elv_queue_empty(drive->queue)) { 1012 if (!best 1013 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep))) 1014 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best)))) 1015 { 1016 if (!blk_queue_plugged(drive->queue)) 1017 best = drive; 1018 } 1019 } 1020 } while ((drive = drive->next) != hwgroup->drive); 1021 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 1022 long t = (signed long)(WAKEUP(best) - jiffies); 1023 if (t >= WAIT_MIN_SLEEP) { 1024 /* 1025 * We *may* have some time to spare, but first let's see if 1026 * someone can potentially benefit from our nice mood today.. 1027 */ 1028 drive = best->next; 1029 do { 1030 if (!drive->sleeping 1031 && time_before(jiffies - best->service_time, WAKEUP(drive)) 1032 && time_before(WAKEUP(drive), jiffies + t)) 1033 { 1034 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 1035 goto repeat; 1036 } 1037 } while ((drive = drive->next) != best); 1038 } 1039 } 1040 return best; 1041} 1042 1043/* 1044 * Issue a new request to a drive from hwgroup 1045 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 1046 * 1047 * A hwgroup is a serialized group of IDE interfaces. Usually there is 1048 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 1049 * may have both interfaces in a single hwgroup to "serialize" access. 1050 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 1051 * together into one hwgroup for serialized access. 1052 * 1053 * Note also that several hwgroups can end up sharing a single IRQ, 1054 * possibly along with many other devices. This is especially common in 1055 * PCI-based systems with off-board IDE controller cards. 1056 * 1057 * The IDE driver uses the single global ide_lock spinlock to protect 1058 * access to the request queues, and to protect the hwgroup->busy flag. 1059 * 1060 * The first thread into the driver for a particular hwgroup sets the 1061 * hwgroup->busy flag to indicate that this hwgroup is now active, 1062 * and then initiates processing of the top request from the request queue. 1063 * 1064 * Other threads attempting entry notice the busy setting, and will simply 1065 * queue their new requests and exit immediately. Note that hwgroup->busy 1066 * remains set even when the driver is merely awaiting the next interrupt. 1067 * Thus, the meaning is "this hwgroup is busy processing a request". 1068 * 1069 * When processing of a request completes, the completing thread or IRQ-handler 1070 * will start the next request from the queue. If no more work remains, 1071 * the driver will clear the hwgroup->busy flag and exit. 1072 * 1073 * The ide_lock (spinlock) is used to protect all access to the 1074 * hwgroup->busy flag, but is otherwise not needed for most processing in 1075 * the driver. This makes the driver much more friendlier to shared IRQs 1076 * than previous designs, while remaining 100% (?) SMP safe and capable. 1077 */ 1078static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 1079{ 1080 ide_drive_t *drive; 1081 ide_hwif_t *hwif; 1082 struct request *rq; 1083 ide_startstop_t startstop; 1084 int loops = 0; 1085 1086 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1087 ide_get_lock(ide_intr, hwgroup); 1088 1089 /* caller must own ide_lock */ 1090 BUG_ON(!irqs_disabled()); 1091 1092 while (!hwgroup->busy) { 1093 hwgroup->busy = 1; 1094 drive = choose_drive(hwgroup); 1095 if (drive == NULL) { 1096 int sleeping = 0; 1097 unsigned long sleep = 0; /* shut up, gcc */ 1098 hwgroup->rq = NULL; 1099 drive = hwgroup->drive; 1100 do { 1101 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) { 1102 sleeping = 1; 1103 sleep = drive->sleep; 1104 } 1105 } while ((drive = drive->next) != hwgroup->drive); 1106 if (sleeping) { 1107 /* 1108 * Take a short snooze, and then wake up this hwgroup again. 1109 * This gives other hwgroups on the same a chance to 1110 * play fairly with us, just in case there are big differences 1111 * in relative throughputs.. don't want to hog the cpu too much. 1112 */ 1113 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1114 sleep = jiffies + WAIT_MIN_SLEEP; 1115#if 1 1116 if (timer_pending(&hwgroup->timer)) 1117 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1118#endif 1119 /* so that ide_timer_expiry knows what to do */ 1120 hwgroup->sleeping = 1; 1121 hwgroup->req_gen_timer = hwgroup->req_gen; 1122 mod_timer(&hwgroup->timer, sleep); 1123 /* we purposely leave hwgroup->busy==1 1124 * while sleeping */ 1125 } else { 1126 /* Ugly, but how can we sleep for the lock 1127 * otherwise? perhaps from tq_disk? 1128 */ 1129 1130 /* for atari only */ 1131 ide_release_lock(); 1132 hwgroup->busy = 0; 1133 } 1134 1135 /* no more work for this hwgroup (for now) */ 1136 return; 1137 } 1138 again: 1139 hwif = HWIF(drive); 1140 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 1141 /* 1142 * set nIEN for previous hwif, drives in the 1143 * quirk_list may not like intr setups/cleanups 1144 */ 1145 if (drive->quirk_list != 1) 1146 ide_set_irq(drive, 0); 1147 } 1148 hwgroup->hwif = hwif; 1149 hwgroup->drive = drive; 1150 drive->sleeping = 0; 1151 drive->service_start = jiffies; 1152 1153 if (blk_queue_plugged(drive->queue)) { 1154 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1155 break; 1156 } 1157 1158 /* 1159 * we know that the queue isn't empty, but this can happen 1160 * if the q->prep_rq_fn() decides to kill a request 1161 */ 1162 rq = elv_next_request(drive->queue); 1163 if (!rq) { 1164 hwgroup->busy = 0; 1165 break; 1166 } 1167 1168 /* 1169 * Sanity: don't accept a request that isn't a PM request 1170 * if we are currently power managed. This is very important as 1171 * blk_stop_queue() doesn't prevent the elv_next_request() 1172 * above to return us whatever is in the queue. Since we call 1173 * ide_do_request() ourselves, we end up taking requests while 1174 * the queue is blocked... 1175 * 1176 * We let requests forced at head of queue with ide-preempt 1177 * though. I hope that doesn't happen too much, hopefully not 1178 * unless the subdriver triggers such a thing in its own PM 1179 * state machine. 1180 * 1181 * We count how many times we loop here to make sure we service 1182 * all drives in the hwgroup without looping for ever 1183 */ 1184 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { 1185 drive = drive->next ? drive->next : hwgroup->drive; 1186 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1187 goto again; 1188 /* We clear busy, there should be no pending ATA command at this point. */ 1189 hwgroup->busy = 0; 1190 break; 1191 } 1192 1193 hwgroup->rq = rq; 1194 1195 /* 1196 * Some systems have trouble with IDE IRQs arriving while 1197 * the driver is still setting things up. So, here we disable 1198 * the IRQ used by this interface while the request is being started. 1199 * This may look bad at first, but pretty much the same thing 1200 * happens anyway when any interrupt comes in, IDE or otherwise 1201 * -- the kernel masks the IRQ while it is being handled. 1202 */ 1203 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1204 disable_irq_nosync(hwif->irq); 1205 spin_unlock(&ide_lock); 1206 local_irq_enable_in_hardirq(); 1207 /* allow other IRQs while we start this request */ 1208 startstop = start_request(drive, rq); 1209 spin_lock_irq(&ide_lock); 1210 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1211 enable_irq(hwif->irq); 1212 if (startstop == ide_stopped) 1213 hwgroup->busy = 0; 1214 } 1215} 1216 1217/* 1218 * Passes the stuff to ide_do_request 1219 */ 1220void do_ide_request(struct request_queue *q) 1221{ 1222 ide_drive_t *drive = q->queuedata; 1223 1224 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1225} 1226 1227/* 1228 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1229 * retry the current request in pio mode instead of risking tossing it 1230 * all away 1231 */ 1232static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1233{ 1234 ide_hwif_t *hwif = HWIF(drive); 1235 struct request *rq; 1236 ide_startstop_t ret = ide_stopped; 1237 1238 /* 1239 * end current dma transaction 1240 */ 1241 1242 if (error < 0) { 1243 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1244 (void)HWIF(drive)->ide_dma_end(drive); 1245 ret = ide_error(drive, "dma timeout error", 1246 ide_read_status(drive)); 1247 } else { 1248 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1249 hwif->dma_timeout(drive); 1250 } 1251 1252 /* 1253 * disable dma for now, but remember that we did so because of 1254 * a timeout -- we'll reenable after we finish this next request 1255 * (or rather the first chunk of it) in pio. 1256 */ 1257 drive->retry_pio++; 1258 drive->state = DMA_PIO_RETRY; 1259 ide_dma_off_quietly(drive); 1260 1261 /* 1262 * un-busy drive etc (hwgroup->busy is cleared on return) and 1263 * make sure request is sane 1264 */ 1265 rq = HWGROUP(drive)->rq; 1266 1267 if (!rq) 1268 goto out; 1269 1270 HWGROUP(drive)->rq = NULL; 1271 1272 rq->errors = 0; 1273 1274 if (!rq->bio) 1275 goto out; 1276 1277 rq->sector = rq->bio->bi_sector; 1278 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1279 rq->hard_cur_sectors = rq->current_nr_sectors; 1280 rq->buffer = bio_data(rq->bio); 1281out: 1282 return ret; 1283} 1284 1285/** 1286 * ide_timer_expiry - handle lack of an IDE interrupt 1287 * @data: timer callback magic (hwgroup) 1288 * 1289 * An IDE command has timed out before the expected drive return 1290 * occurred. At this point we attempt to clean up the current 1291 * mess. If the current handler includes an expiry handler then 1292 * we invoke the expiry handler, and providing it is happy the 1293 * work is done. If that fails we apply generic recovery rules 1294 * invoking the handler and checking the drive DMA status. We 1295 * have an excessively incestuous relationship with the DMA 1296 * logic that wants cleaning up. 1297 */ 1298 1299void ide_timer_expiry (unsigned long data) 1300{ 1301 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1302 ide_handler_t *handler; 1303 ide_expiry_t *expiry; 1304 unsigned long flags; 1305 unsigned long wait = -1; 1306 1307 spin_lock_irqsave(&ide_lock, flags); 1308 1309 if (((handler = hwgroup->handler) == NULL) || 1310 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1311 /* 1312 * Either a marginal timeout occurred 1313 * (got the interrupt just as timer expired), 1314 * or we were "sleeping" to give other devices a chance. 1315 * Either way, we don't really want to complain about anything. 1316 */ 1317 if (hwgroup->sleeping) { 1318 hwgroup->sleeping = 0; 1319 hwgroup->busy = 0; 1320 } 1321 } else { 1322 ide_drive_t *drive = hwgroup->drive; 1323 if (!drive) { 1324 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1325 hwgroup->handler = NULL; 1326 } else { 1327 ide_hwif_t *hwif; 1328 ide_startstop_t startstop = ide_stopped; 1329 if (!hwgroup->busy) { 1330 hwgroup->busy = 1; /* paranoia */ 1331 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1332 } 1333 if ((expiry = hwgroup->expiry) != NULL) { 1334 /* continue */ 1335 if ((wait = expiry(drive)) > 0) { 1336 /* reset timer */ 1337 hwgroup->timer.expires = jiffies + wait; 1338 hwgroup->req_gen_timer = hwgroup->req_gen; 1339 add_timer(&hwgroup->timer); 1340 spin_unlock_irqrestore(&ide_lock, flags); 1341 return; 1342 } 1343 } 1344 hwgroup->handler = NULL; 1345 /* 1346 * We need to simulate a real interrupt when invoking 1347 * the handler() function, which means we need to 1348 * globally mask the specific IRQ: 1349 */ 1350 spin_unlock(&ide_lock); 1351 hwif = HWIF(drive); 1352 /* disable_irq_nosync ?? */ 1353 disable_irq(hwif->irq); 1354 /* local CPU only, 1355 * as if we were handling an interrupt */ 1356 local_irq_disable(); 1357 if (hwgroup->polling) { 1358 startstop = handler(drive); 1359 } else if (drive_is_ready(drive)) { 1360 if (drive->waiting_for_dma) 1361 hwgroup->hwif->dma_lost_irq(drive); 1362 (void)ide_ack_intr(hwif); 1363 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1364 startstop = handler(drive); 1365 } else { 1366 if (drive->waiting_for_dma) { 1367 startstop = ide_dma_timeout_retry(drive, wait); 1368 } else 1369 startstop = 1370 ide_error(drive, "irq timeout", 1371 ide_read_status(drive)); 1372 } 1373 drive->service_time = jiffies - drive->service_start; 1374 spin_lock_irq(&ide_lock); 1375 enable_irq(hwif->irq); 1376 if (startstop == ide_stopped) 1377 hwgroup->busy = 0; 1378 } 1379 } 1380 ide_do_request(hwgroup, IDE_NO_IRQ); 1381 spin_unlock_irqrestore(&ide_lock, flags); 1382} 1383 1384/** 1385 * unexpected_intr - handle an unexpected IDE interrupt 1386 * @irq: interrupt line 1387 * @hwgroup: hwgroup being processed 1388 * 1389 * There's nothing really useful we can do with an unexpected interrupt, 1390 * other than reading the status register (to clear it), and logging it. 1391 * There should be no way that an irq can happen before we're ready for it, 1392 * so we needn't worry much about losing an "important" interrupt here. 1393 * 1394 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1395 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1396 * looks "good", we just ignore the interrupt completely. 1397 * 1398 * This routine assumes __cli() is in effect when called. 1399 * 1400 * If an unexpected interrupt happens on irq15 while we are handling irq14 1401 * and if the two interfaces are "serialized" (CMD640), then it looks like 1402 * we could screw up by interfering with a new request being set up for 1403 * irq15. 1404 * 1405 * In reality, this is a non-issue. The new command is not sent unless 1406 * the drive is ready to accept one, in which case we know the drive is 1407 * not trying to interrupt us. And ide_set_handler() is always invoked 1408 * before completing the issuance of any new drive command, so we will not 1409 * be accidentally invoked as a result of any valid command completion 1410 * interrupt. 1411 * 1412 * Note that we must walk the entire hwgroup here. We know which hwif 1413 * is doing the current command, but we don't know which hwif burped 1414 * mysteriously. 1415 */ 1416 1417static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1418{ 1419 u8 stat; 1420 ide_hwif_t *hwif = hwgroup->hwif; 1421 1422 /* 1423 * handle the unexpected interrupt 1424 */ 1425 do { 1426 if (hwif->irq == irq) { 1427 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1428 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1429 /* Try to not flood the console with msgs */ 1430 static unsigned long last_msgtime, count; 1431 ++count; 1432 if (time_after(jiffies, last_msgtime + HZ)) { 1433 last_msgtime = jiffies; 1434 printk(KERN_ERR "%s%s: unexpected interrupt, " 1435 "status=0x%02x, count=%ld\n", 1436 hwif->name, 1437 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1438 } 1439 } 1440 } 1441 } while ((hwif = hwif->next) != hwgroup->hwif); 1442} 1443 1444/** 1445 * ide_intr - default IDE interrupt handler 1446 * @irq: interrupt number 1447 * @dev_id: hwif group 1448 * @regs: unused weirdness from the kernel irq layer 1449 * 1450 * This is the default IRQ handler for the IDE layer. You should 1451 * not need to override it. If you do be aware it is subtle in 1452 * places 1453 * 1454 * hwgroup->hwif is the interface in the group currently performing 1455 * a command. hwgroup->drive is the drive and hwgroup->handler is 1456 * the IRQ handler to call. As we issue a command the handlers 1457 * step through multiple states, reassigning the handler to the 1458 * next step in the process. Unlike a smart SCSI controller IDE 1459 * expects the main processor to sequence the various transfer 1460 * stages. We also manage a poll timer to catch up with most 1461 * timeout situations. There are still a few where the handlers 1462 * don't ever decide to give up. 1463 * 1464 * The handler eventually returns ide_stopped to indicate the 1465 * request completed. At this point we issue the next request 1466 * on the hwgroup and the process begins again. 1467 */ 1468 1469irqreturn_t ide_intr (int irq, void *dev_id) 1470{ 1471 unsigned long flags; 1472 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1473 ide_hwif_t *hwif; 1474 ide_drive_t *drive; 1475 ide_handler_t *handler; 1476 ide_startstop_t startstop; 1477 1478 spin_lock_irqsave(&ide_lock, flags); 1479 hwif = hwgroup->hwif; 1480 1481 if (!ide_ack_intr(hwif)) { 1482 spin_unlock_irqrestore(&ide_lock, flags); 1483 return IRQ_NONE; 1484 } 1485 1486 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1487 /* 1488 * Not expecting an interrupt from this drive. 1489 * That means this could be: 1490 * (1) an interrupt from another PCI device 1491 * sharing the same PCI INT# as us. 1492 * or (2) a drive just entered sleep or standby mode, 1493 * and is interrupting to let us know. 1494 * or (3) a spurious interrupt of unknown origin. 1495 * 1496 * For PCI, we cannot tell the difference, 1497 * so in that case we just ignore it and hope it goes away. 1498 * 1499 * FIXME: unexpected_intr should be hwif-> then we can 1500 * remove all the ifdef PCI crap 1501 */ 1502#ifdef CONFIG_BLK_DEV_IDEPCI 1503 if (hwif->chipset != ide_pci) 1504#endif /* CONFIG_BLK_DEV_IDEPCI */ 1505 { 1506 /* 1507 * Probably not a shared PCI interrupt, 1508 * so we can safely try to do something about it: 1509 */ 1510 unexpected_intr(irq, hwgroup); 1511#ifdef CONFIG_BLK_DEV_IDEPCI 1512 } else { 1513 /* 1514 * Whack the status register, just in case 1515 * we have a leftover pending IRQ. 1516 */ 1517 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); 1518#endif /* CONFIG_BLK_DEV_IDEPCI */ 1519 } 1520 spin_unlock_irqrestore(&ide_lock, flags); 1521 return IRQ_NONE; 1522 } 1523 drive = hwgroup->drive; 1524 if (!drive) { 1525 /* 1526 * This should NEVER happen, and there isn't much 1527 * we could do about it here. 1528 * 1529 * [Note - this can occur if the drive is hot unplugged] 1530 */ 1531 spin_unlock_irqrestore(&ide_lock, flags); 1532 return IRQ_HANDLED; 1533 } 1534 if (!drive_is_ready(drive)) { 1535 /* 1536 * This happens regularly when we share a PCI IRQ with 1537 * another device. Unfortunately, it can also happen 1538 * with some buggy drives that trigger the IRQ before 1539 * their status register is up to date. Hopefully we have 1540 * enough advance overhead that the latter isn't a problem. 1541 */ 1542 spin_unlock_irqrestore(&ide_lock, flags); 1543 return IRQ_NONE; 1544 } 1545 if (!hwgroup->busy) { 1546 hwgroup->busy = 1; /* paranoia */ 1547 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1548 } 1549 hwgroup->handler = NULL; 1550 hwgroup->req_gen++; 1551 del_timer(&hwgroup->timer); 1552 spin_unlock(&ide_lock); 1553 1554 /* Some controllers might set DMA INTR no matter DMA or PIO; 1555 * bmdma status might need to be cleared even for 1556 * PIO interrupts to prevent spurious/lost irq. 1557 */ 1558 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) 1559 /* ide_dma_end() needs bmdma status for error checking. 1560 * So, skip clearing bmdma status here and leave it 1561 * to ide_dma_end() if this is dma interrupt. 1562 */ 1563 hwif->ide_dma_clear_irq(drive); 1564 1565 if (drive->unmask) 1566 local_irq_enable_in_hardirq(); 1567 /* service this interrupt, may set handler for next interrupt */ 1568 startstop = handler(drive); 1569 spin_lock_irq(&ide_lock); 1570 1571 /* 1572 * Note that handler() may have set things up for another 1573 * interrupt to occur soon, but it cannot happen until 1574 * we exit from this routine, because it will be the 1575 * same irq as is currently being serviced here, and Linux 1576 * won't allow another of the same (on any CPU) until we return. 1577 */ 1578 drive->service_time = jiffies - drive->service_start; 1579 if (startstop == ide_stopped) { 1580 if (hwgroup->handler == NULL) { /* paranoia */ 1581 hwgroup->busy = 0; 1582 ide_do_request(hwgroup, hwif->irq); 1583 } else { 1584 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1585 "on exit\n", drive->name); 1586 } 1587 } 1588 spin_unlock_irqrestore(&ide_lock, flags); 1589 return IRQ_HANDLED; 1590} 1591 1592/** 1593 * ide_init_drive_cmd - initialize a drive command request 1594 * @rq: request object 1595 * 1596 * Initialize a request before we fill it in and send it down to 1597 * ide_do_drive_cmd. Commands must be set up by this function. Right 1598 * now it doesn't do a lot, but if that changes abusers will have a 1599 * nasty surprise. 1600 */ 1601 1602void ide_init_drive_cmd (struct request *rq) 1603{ 1604 memset(rq, 0, sizeof(*rq)); 1605 rq->ref_count = 1; 1606} 1607 1608EXPORT_SYMBOL(ide_init_drive_cmd); 1609 1610/** 1611 * ide_do_drive_cmd - issue IDE special command 1612 * @drive: device to issue command 1613 * @rq: request to issue 1614 * @action: action for processing 1615 * 1616 * This function issues a special IDE device request 1617 * onto the request queue. 1618 * 1619 * If action is ide_wait, then the rq is queued at the end of the 1620 * request queue, and the function sleeps until it has been processed. 1621 * This is for use when invoked from an ioctl handler. 1622 * 1623 * If action is ide_preempt, then the rq is queued at the head of 1624 * the request queue, displacing the currently-being-processed 1625 * request and this function returns immediately without waiting 1626 * for the new rq to be completed. This is VERY DANGEROUS, and is 1627 * intended for careful use by the ATAPI tape/cdrom driver code. 1628 * 1629 * If action is ide_end, then the rq is queued at the end of the 1630 * request queue, and the function returns immediately without waiting 1631 * for the new rq to be completed. This is again intended for careful 1632 * use by the ATAPI tape/cdrom driver code. 1633 */ 1634 1635int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) 1636{ 1637 unsigned long flags; 1638 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1639 DECLARE_COMPLETION_ONSTACK(wait); 1640 int where = ELEVATOR_INSERT_BACK, err; 1641 int must_wait = (action == ide_wait || action == ide_head_wait); 1642 1643 rq->errors = 0; 1644 1645 /* 1646 * we need to hold an extra reference to request for safe inspection 1647 * after completion 1648 */ 1649 if (must_wait) { 1650 rq->ref_count++; 1651 rq->end_io_data = &wait; 1652 rq->end_io = blk_end_sync_rq; 1653 } 1654 1655 spin_lock_irqsave(&ide_lock, flags); 1656 if (action == ide_preempt) 1657 hwgroup->rq = NULL; 1658 if (action == ide_preempt || action == ide_head_wait) { 1659 where = ELEVATOR_INSERT_FRONT; 1660 rq->cmd_flags |= REQ_PREEMPT; 1661 } 1662 __elv_add_request(drive->queue, rq, where, 0); 1663 ide_do_request(hwgroup, IDE_NO_IRQ); 1664 spin_unlock_irqrestore(&ide_lock, flags); 1665 1666 err = 0; 1667 if (must_wait) { 1668 wait_for_completion(&wait); 1669 if (rq->errors) 1670 err = -EIO; 1671 1672 blk_put_request(rq); 1673 } 1674 1675 return err; 1676} 1677 1678EXPORT_SYMBOL(ide_do_drive_cmd); 1679 1680void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1681{ 1682 ide_task_t task; 1683 1684 memset(&task, 0, sizeof(task)); 1685 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 1686 IDE_TFLAG_OUT_FEATURE | tf_flags; 1687 task.tf.feature = dma; /* Use PIO/DMA */ 1688 task.tf.lbam = bcount & 0xff; 1689 task.tf.lbah = (bcount >> 8) & 0xff; 1690 1691 ide_tf_load(drive, &task); 1692} 1693 1694EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1695