ide-io.c revision 0d346ba0730d84f04022f9f984d3f606f69cef37
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/hdreg.h> 44#include <linux/completion.h> 45#include <linux/reboot.h> 46#include <linux/cdrom.h> 47#include <linux/seq_file.h> 48#include <linux/device.h> 49#include <linux/kmod.h> 50#include <linux/scatterlist.h> 51#include <linux/bitops.h> 52 53#include <asm/byteorder.h> 54#include <asm/irq.h> 55#include <asm/uaccess.h> 56#include <asm/io.h> 57 58static int __ide_end_request(ide_drive_t *drive, struct request *rq, 59 int uptodate, unsigned int nr_bytes, int dequeue) 60{ 61 int ret = 1; 62 int error = 0; 63 64 if (uptodate <= 0) 65 error = uptodate ? uptodate : -EIO; 66 67 /* 68 * if failfast is set on a request, override number of sectors and 69 * complete the whole request right now 70 */ 71 if (blk_noretry_request(rq) && error) 72 nr_bytes = rq->hard_nr_sectors << 9; 73 74 if (!blk_fs_request(rq) && error && !rq->errors) 75 rq->errors = -EIO; 76 77 /* 78 * decide whether to reenable DMA -- 3 is a random magic for now, 79 * if we DMA timeout more than 3 times, just stay in PIO 80 */ 81 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) && 82 drive->retry_pio <= 3) { 83 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY; 84 ide_dma_on(drive); 85 } 86 87 if (!__blk_end_request(rq, error, nr_bytes)) { 88 if (dequeue) 89 HWGROUP(drive)->rq = NULL; 90 ret = 0; 91 } 92 93 return ret; 94} 95 96/** 97 * ide_end_request - complete an IDE I/O 98 * @drive: IDE device for the I/O 99 * @uptodate: 100 * @nr_sectors: number of sectors completed 101 * 102 * This is our end_request wrapper function. We complete the I/O 103 * update random number input and dequeue the request, which if 104 * it was tagged may be out of order. 105 */ 106 107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 108{ 109 unsigned int nr_bytes = nr_sectors << 9; 110 struct request *rq; 111 unsigned long flags; 112 int ret = 1; 113 114 /* 115 * room for locking improvements here, the calls below don't 116 * need the queue lock held at all 117 */ 118 spin_lock_irqsave(&ide_lock, flags); 119 rq = HWGROUP(drive)->rq; 120 121 if (!nr_bytes) { 122 if (blk_pc_request(rq)) 123 nr_bytes = rq->data_len; 124 else 125 nr_bytes = rq->hard_cur_sectors << 9; 126 } 127 128 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 129 130 spin_unlock_irqrestore(&ide_lock, flags); 131 return ret; 132} 133EXPORT_SYMBOL(ide_end_request); 134 135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 136{ 137 struct request_pm_state *pm = rq->data; 138 139 if (drive->media != ide_disk) 140 return; 141 142 switch (pm->pm_step) { 143 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 144 if (pm->pm_state == PM_EVENT_FREEZE) 145 pm->pm_step = IDE_PM_COMPLETED; 146 else 147 pm->pm_step = IDE_PM_STANDBY; 148 break; 149 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 150 pm->pm_step = IDE_PM_COMPLETED; 151 break; 152 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 153 pm->pm_step = IDE_PM_IDLE; 154 break; 155 case IDE_PM_IDLE: /* Resume step 2 (idle)*/ 156 pm->pm_step = IDE_PM_RESTORE_DMA; 157 break; 158 } 159} 160 161static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 162{ 163 struct request_pm_state *pm = rq->data; 164 ide_task_t *args = rq->special; 165 166 memset(args, 0, sizeof(*args)); 167 168 switch (pm->pm_step) { 169 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 170 if (drive->media != ide_disk) 171 break; 172 /* Not supported? Switch to next step now. */ 173 if (ata_id_flush_enabled(drive->id) == 0 || 174 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { 175 ide_complete_power_step(drive, rq, 0, 0); 176 return ide_stopped; 177 } 178 if (ata_id_flush_ext_enabled(drive->id)) 179 args->tf.command = ATA_CMD_FLUSH_EXT; 180 else 181 args->tf.command = ATA_CMD_FLUSH; 182 goto out_do_tf; 183 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 184 args->tf.command = ATA_CMD_STANDBYNOW1; 185 goto out_do_tf; 186 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 187 ide_set_max_pio(drive); 188 /* 189 * skip IDE_PM_IDLE for ATAPI devices 190 */ 191 if (drive->media != ide_disk) 192 pm->pm_step = IDE_PM_RESTORE_DMA; 193 else 194 ide_complete_power_step(drive, rq, 0, 0); 195 return ide_stopped; 196 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 197 args->tf.command = ATA_CMD_IDLEIMMEDIATE; 198 goto out_do_tf; 199 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 200 /* 201 * Right now, all we do is call ide_set_dma(drive), 202 * we could be smarter and check for current xfer_speed 203 * in struct drive etc... 204 */ 205 if (drive->hwif->dma_ops == NULL) 206 break; 207 /* 208 * TODO: respect IDE_DFLAG_USING_DMA 209 */ 210 ide_set_dma(drive); 211 break; 212 } 213 214 pm->pm_step = IDE_PM_COMPLETED; 215 return ide_stopped; 216 217out_do_tf: 218 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 219 args->data_phase = TASKFILE_NO_DATA; 220 return do_rw_taskfile(drive, args); 221} 222 223/** 224 * ide_end_dequeued_request - complete an IDE I/O 225 * @drive: IDE device for the I/O 226 * @uptodate: 227 * @nr_sectors: number of sectors completed 228 * 229 * Complete an I/O that is no longer on the request queue. This 230 * typically occurs when we pull the request and issue a REQUEST_SENSE. 231 * We must still finish the old request but we must not tamper with the 232 * queue in the meantime. 233 * 234 * NOTE: This path does not handle barrier, but barrier is not supported 235 * on ide-cd anyway. 236 */ 237 238int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 239 int uptodate, int nr_sectors) 240{ 241 unsigned long flags; 242 int ret; 243 244 spin_lock_irqsave(&ide_lock, flags); 245 BUG_ON(!blk_rq_started(rq)); 246 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); 247 spin_unlock_irqrestore(&ide_lock, flags); 248 249 return ret; 250} 251EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 252 253 254/** 255 * ide_complete_pm_request - end the current Power Management request 256 * @drive: target drive 257 * @rq: request 258 * 259 * This function cleans up the current PM request and stops the queue 260 * if necessary. 261 */ 262static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 263{ 264 unsigned long flags; 265 266#ifdef DEBUG_PM 267 printk("%s: completing PM request, %s\n", drive->name, 268 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 269#endif 270 spin_lock_irqsave(&ide_lock, flags); 271 if (blk_pm_suspend_request(rq)) { 272 blk_stop_queue(drive->queue); 273 } else { 274 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; 275 blk_start_queue(drive->queue); 276 } 277 HWGROUP(drive)->rq = NULL; 278 if (__blk_end_request(rq, 0, 0)) 279 BUG(); 280 spin_unlock_irqrestore(&ide_lock, flags); 281} 282 283/** 284 * ide_end_drive_cmd - end an explicit drive command 285 * @drive: command 286 * @stat: status bits 287 * @err: error bits 288 * 289 * Clean up after success/failure of an explicit drive command. 290 * These get thrown onto the queue so they are synchronized with 291 * real I/O operations on the drive. 292 * 293 * In LBA48 mode we have to read the register set twice to get 294 * all the extra information out. 295 */ 296 297void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 298{ 299 unsigned long flags; 300 struct request *rq; 301 302 spin_lock_irqsave(&ide_lock, flags); 303 rq = HWGROUP(drive)->rq; 304 spin_unlock_irqrestore(&ide_lock, flags); 305 306 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 307 ide_task_t *task = (ide_task_t *)rq->special; 308 309 if (rq->errors == 0) 310 rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT); 311 312 if (task) { 313 struct ide_taskfile *tf = &task->tf; 314 315 tf->error = err; 316 tf->status = stat; 317 318 drive->hwif->tp_ops->tf_read(drive, task); 319 320 if (task->tf_flags & IDE_TFLAG_DYN) 321 kfree(task); 322 } 323 } else if (blk_pm_request(rq)) { 324 struct request_pm_state *pm = rq->data; 325#ifdef DEBUG_PM 326 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 327 drive->name, rq->pm->pm_step, stat, err); 328#endif 329 ide_complete_power_step(drive, rq, stat, err); 330 if (pm->pm_step == IDE_PM_COMPLETED) 331 ide_complete_pm_request(drive, rq); 332 return; 333 } 334 335 spin_lock_irqsave(&ide_lock, flags); 336 HWGROUP(drive)->rq = NULL; 337 rq->errors = err; 338 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), 339 blk_rq_bytes(rq)))) 340 BUG(); 341 spin_unlock_irqrestore(&ide_lock, flags); 342} 343 344EXPORT_SYMBOL(ide_end_drive_cmd); 345 346static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 347{ 348 if (rq->rq_disk) { 349 ide_driver_t *drv; 350 351 drv = *(ide_driver_t **)rq->rq_disk->private_data; 352 drv->end_request(drive, 0, 0); 353 } else 354 ide_end_request(drive, 0, 0); 355} 356 357static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 358{ 359 ide_hwif_t *hwif = drive->hwif; 360 361 if ((stat & ATA_BUSY) || 362 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 363 /* other bits are useless when BUSY */ 364 rq->errors |= ERROR_RESET; 365 } else if (stat & ATA_ERR) { 366 /* err has different meaning on cdrom and tape */ 367 if (err == ATA_ABORTED) { 368 if ((drive->dev_flags & IDE_DFLAG_LBA) && 369 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */ 370 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS) 371 return ide_stopped; 372 } else if ((err & BAD_CRC) == BAD_CRC) { 373 /* UDMA crc error, just retry the operation */ 374 drive->crc_count++; 375 } else if (err & (ATA_BBK | ATA_UNC)) { 376 /* retries won't help these */ 377 rq->errors = ERROR_MAX; 378 } else if (err & ATA_TRK0NF) { 379 /* help it find track zero */ 380 rq->errors |= ERROR_RECAL; 381 } 382 } 383 384 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && 385 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { 386 int nsect = drive->mult_count ? drive->mult_count : 1; 387 388 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); 389 } 390 391 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 392 ide_kill_rq(drive, rq); 393 return ide_stopped; 394 } 395 396 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 397 rq->errors |= ERROR_RESET; 398 399 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 400 ++rq->errors; 401 return ide_do_reset(drive); 402 } 403 404 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 405 drive->special.b.recalibrate = 1; 406 407 ++rq->errors; 408 409 return ide_stopped; 410} 411 412static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 413{ 414 ide_hwif_t *hwif = drive->hwif; 415 416 if ((stat & ATA_BUSY) || 417 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 418 /* other bits are useless when BUSY */ 419 rq->errors |= ERROR_RESET; 420 } else { 421 /* add decoding error stuff */ 422 } 423 424 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 425 /* force an abort */ 426 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); 427 428 if (rq->errors >= ERROR_MAX) { 429 ide_kill_rq(drive, rq); 430 } else { 431 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 432 ++rq->errors; 433 return ide_do_reset(drive); 434 } 435 ++rq->errors; 436 } 437 438 return ide_stopped; 439} 440 441ide_startstop_t 442__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 443{ 444 if (drive->media == ide_disk) 445 return ide_ata_error(drive, rq, stat, err); 446 return ide_atapi_error(drive, rq, stat, err); 447} 448 449EXPORT_SYMBOL_GPL(__ide_error); 450 451/** 452 * ide_error - handle an error on the IDE 453 * @drive: drive the error occurred on 454 * @msg: message to report 455 * @stat: status bits 456 * 457 * ide_error() takes action based on the error returned by the drive. 458 * For normal I/O that may well include retries. We deal with 459 * both new-style (taskfile) and old style command handling here. 460 * In the case of taskfile command handling there is work left to 461 * do 462 */ 463 464ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 465{ 466 struct request *rq; 467 u8 err; 468 469 err = ide_dump_status(drive, msg, stat); 470 471 if ((rq = HWGROUP(drive)->rq) == NULL) 472 return ide_stopped; 473 474 /* retry only "normal" I/O: */ 475 if (!blk_fs_request(rq)) { 476 rq->errors = 1; 477 ide_end_drive_cmd(drive, stat, err); 478 return ide_stopped; 479 } 480 481 if (rq->rq_disk) { 482 ide_driver_t *drv; 483 484 drv = *(ide_driver_t **)rq->rq_disk->private_data; 485 return drv->error(drive, rq, stat, err); 486 } else 487 return __ide_error(drive, rq, stat, err); 488} 489 490EXPORT_SYMBOL_GPL(ide_error); 491 492static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 493{ 494 tf->nsect = drive->sect; 495 tf->lbal = drive->sect; 496 tf->lbam = drive->cyl; 497 tf->lbah = drive->cyl >> 8; 498 tf->device = (drive->head - 1) | drive->select.all; 499 tf->command = ATA_CMD_INIT_DEV_PARAMS; 500} 501 502static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 503{ 504 tf->nsect = drive->sect; 505 tf->command = ATA_CMD_RESTORE; 506} 507 508static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 509{ 510 tf->nsect = drive->mult_req; 511 tf->command = ATA_CMD_SET_MULTI; 512} 513 514static ide_startstop_t ide_disk_special(ide_drive_t *drive) 515{ 516 special_t *s = &drive->special; 517 ide_task_t args; 518 519 memset(&args, 0, sizeof(ide_task_t)); 520 args.data_phase = TASKFILE_NO_DATA; 521 522 if (s->b.set_geometry) { 523 s->b.set_geometry = 0; 524 ide_tf_set_specify_cmd(drive, &args.tf); 525 } else if (s->b.recalibrate) { 526 s->b.recalibrate = 0; 527 ide_tf_set_restore_cmd(drive, &args.tf); 528 } else if (s->b.set_multmode) { 529 s->b.set_multmode = 0; 530 ide_tf_set_setmult_cmd(drive, &args.tf); 531 } else if (s->all) { 532 int special = s->all; 533 s->all = 0; 534 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 535 return ide_stopped; 536 } 537 538 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 539 IDE_TFLAG_CUSTOM_HANDLER; 540 541 do_rw_taskfile(drive, &args); 542 543 return ide_started; 544} 545 546/* 547 * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away 548 */ 549static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) 550{ 551 switch (req_pio) { 552 case 202: 553 case 201: 554 case 200: 555 case 102: 556 case 101: 557 case 100: 558 return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; 559 case 9: 560 case 8: 561 return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; 562 case 7: 563 case 6: 564 return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; 565 default: 566 return 0; 567 } 568} 569 570/** 571 * do_special - issue some special commands 572 * @drive: drive the command is for 573 * 574 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, 575 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. 576 * 577 * It used to do much more, but has been scaled back. 578 */ 579 580static ide_startstop_t do_special (ide_drive_t *drive) 581{ 582 special_t *s = &drive->special; 583 584#ifdef DEBUG 585 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 586#endif 587 if (s->b.set_tune) { 588 ide_hwif_t *hwif = drive->hwif; 589 const struct ide_port_ops *port_ops = hwif->port_ops; 590 u8 req_pio = drive->tune_req; 591 592 s->b.set_tune = 0; 593 594 if (set_pio_mode_abuse(drive->hwif, req_pio)) { 595 /* 596 * take ide_lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT 597 */ 598 if (req_pio == 8 || req_pio == 9) { 599 unsigned long flags; 600 601 spin_lock_irqsave(&ide_lock, flags); 602 port_ops->set_pio_mode(drive, req_pio); 603 spin_unlock_irqrestore(&ide_lock, flags); 604 } else 605 port_ops->set_pio_mode(drive, req_pio); 606 } else { 607 int keep_dma = 608 !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 609 610 ide_set_pio(drive, req_pio); 611 612 if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { 613 if (keep_dma) 614 ide_dma_on(drive); 615 } 616 } 617 618 return ide_stopped; 619 } else { 620 if (drive->media == ide_disk) 621 return ide_disk_special(drive); 622 623 s->all = 0; 624 drive->mult_req = 0; 625 return ide_stopped; 626 } 627} 628 629void ide_map_sg(ide_drive_t *drive, struct request *rq) 630{ 631 ide_hwif_t *hwif = drive->hwif; 632 struct scatterlist *sg = hwif->sg_table; 633 634 if (hwif->sg_mapped) /* needed by ide-scsi */ 635 return; 636 637 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 638 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 639 } else { 640 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 641 hwif->sg_nents = 1; 642 } 643} 644 645EXPORT_SYMBOL_GPL(ide_map_sg); 646 647void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 648{ 649 ide_hwif_t *hwif = drive->hwif; 650 651 hwif->nsect = hwif->nleft = rq->nr_sectors; 652 hwif->cursg_ofs = 0; 653 hwif->cursg = NULL; 654} 655 656EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 657 658/** 659 * execute_drive_command - issue special drive command 660 * @drive: the drive to issue the command on 661 * @rq: the request structure holding the command 662 * 663 * execute_drive_cmd() issues a special drive command, usually 664 * initiated by ioctl() from the external hdparm program. The 665 * command can be a drive command, drive task or taskfile 666 * operation. Weirdly you can call it with NULL to wait for 667 * all commands to finish. Don't do this as that is due to change 668 */ 669 670static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 671 struct request *rq) 672{ 673 ide_hwif_t *hwif = HWIF(drive); 674 ide_task_t *task = rq->special; 675 676 if (task) { 677 hwif->data_phase = task->data_phase; 678 679 switch (hwif->data_phase) { 680 case TASKFILE_MULTI_OUT: 681 case TASKFILE_OUT: 682 case TASKFILE_MULTI_IN: 683 case TASKFILE_IN: 684 ide_init_sg_cmd(drive, rq); 685 ide_map_sg(drive, rq); 686 default: 687 break; 688 } 689 690 return do_rw_taskfile(drive, task); 691 } 692 693 /* 694 * NULL is actually a valid way of waiting for 695 * all current requests to be flushed from the queue. 696 */ 697#ifdef DEBUG 698 printk("%s: DRIVE_CMD (null)\n", drive->name); 699#endif 700 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), 701 ide_read_error(drive)); 702 703 return ide_stopped; 704} 705 706int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, 707 int arg) 708{ 709 struct request_queue *q = drive->queue; 710 struct request *rq; 711 int ret = 0; 712 713 if (!(setting->flags & DS_SYNC)) 714 return setting->set(drive, arg); 715 716 rq = blk_get_request(q, READ, GFP_KERNEL); 717 if (!rq) 718 return -ENOMEM; 719 720 rq->cmd_type = REQ_TYPE_SPECIAL; 721 rq->cmd_len = 5; 722 rq->cmd[0] = REQ_DEVSET_EXEC; 723 *(int *)&rq->cmd[1] = arg; 724 rq->special = setting->set; 725 726 if (blk_execute_rq(q, NULL, rq, 0)) 727 ret = rq->errors; 728 blk_put_request(rq); 729 730 return ret; 731} 732EXPORT_SYMBOL_GPL(ide_devset_execute); 733 734static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 735{ 736 switch (rq->cmd[0]) { 737 case REQ_DEVSET_EXEC: 738 { 739 int err, (*setfunc)(ide_drive_t *, int) = rq->special; 740 741 err = setfunc(drive, *(int *)&rq->cmd[1]); 742 if (err) 743 rq->errors = err; 744 else 745 err = 1; 746 ide_end_request(drive, err, 0); 747 return ide_stopped; 748 } 749 case REQ_DRIVE_RESET: 750 return ide_do_reset(drive); 751 default: 752 blk_dump_rq_flags(rq, "ide_special_rq - bad request"); 753 ide_end_request(drive, 0, 0); 754 return ide_stopped; 755 } 756} 757 758static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 759{ 760 struct request_pm_state *pm = rq->data; 761 762 if (blk_pm_suspend_request(rq) && 763 pm->pm_step == IDE_PM_START_SUSPEND) 764 /* Mark drive blocked when starting the suspend sequence. */ 765 drive->dev_flags |= IDE_DFLAG_BLOCKED; 766 else if (blk_pm_resume_request(rq) && 767 pm->pm_step == IDE_PM_START_RESUME) { 768 /* 769 * The first thing we do on wakeup is to wait for BSY bit to 770 * go away (with a looong timeout) as a drive on this hwif may 771 * just be POSTing itself. 772 * We do that before even selecting as the "other" device on 773 * the bus may be broken enough to walk on our toes at this 774 * point. 775 */ 776 ide_hwif_t *hwif = drive->hwif; 777 int rc; 778#ifdef DEBUG_PM 779 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 780#endif 781 rc = ide_wait_not_busy(hwif, 35000); 782 if (rc) 783 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 784 SELECT_DRIVE(drive); 785 hwif->tp_ops->set_irq(hwif, 1); 786 rc = ide_wait_not_busy(hwif, 100000); 787 if (rc) 788 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 789 } 790} 791 792/** 793 * start_request - start of I/O and command issuing for IDE 794 * 795 * start_request() initiates handling of a new I/O request. It 796 * accepts commands and I/O (read/write) requests. 797 * 798 * FIXME: this function needs a rename 799 */ 800 801static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 802{ 803 ide_startstop_t startstop; 804 805 BUG_ON(!blk_rq_started(rq)); 806 807#ifdef DEBUG 808 printk("%s: start_request: current=0x%08lx\n", 809 HWIF(drive)->name, (unsigned long) rq); 810#endif 811 812 /* bail early if we've exceeded max_failures */ 813 if (drive->max_failures && (drive->failures > drive->max_failures)) { 814 rq->cmd_flags |= REQ_FAILED; 815 goto kill_rq; 816 } 817 818 if (blk_pm_request(rq)) 819 ide_check_pm_state(drive, rq); 820 821 SELECT_DRIVE(drive); 822 if (ide_wait_stat(&startstop, drive, drive->ready_stat, 823 ATA_BUSY | ATA_DRQ, WAIT_READY)) { 824 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 825 return startstop; 826 } 827 if (!drive->special.all) { 828 ide_driver_t *drv; 829 830 /* 831 * We reset the drive so we need to issue a SETFEATURES. 832 * Do it _after_ do_special() restored device parameters. 833 */ 834 if (drive->current_speed == 0xff) 835 ide_config_drive_speed(drive, drive->desired_speed); 836 837 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 838 return execute_drive_cmd(drive, rq); 839 else if (blk_pm_request(rq)) { 840 struct request_pm_state *pm = rq->data; 841#ifdef DEBUG_PM 842 printk("%s: start_power_step(step: %d)\n", 843 drive->name, rq->pm->pm_step); 844#endif 845 startstop = ide_start_power_step(drive, rq); 846 if (startstop == ide_stopped && 847 pm->pm_step == IDE_PM_COMPLETED) 848 ide_complete_pm_request(drive, rq); 849 return startstop; 850 } else if (!rq->rq_disk && blk_special_request(rq)) 851 /* 852 * TODO: Once all ULDs have been modified to 853 * check for specific op codes rather than 854 * blindly accepting any special request, the 855 * check for ->rq_disk above may be replaced 856 * by a more suitable mechanism or even 857 * dropped entirely. 858 */ 859 return ide_special_rq(drive, rq); 860 861 drv = *(ide_driver_t **)rq->rq_disk->private_data; 862 863 return drv->do_request(drive, rq, rq->sector); 864 } 865 return do_special(drive); 866kill_rq: 867 ide_kill_rq(drive, rq); 868 return ide_stopped; 869} 870 871/** 872 * ide_stall_queue - pause an IDE device 873 * @drive: drive to stall 874 * @timeout: time to stall for (jiffies) 875 * 876 * ide_stall_queue() can be used by a drive to give excess bandwidth back 877 * to the hwgroup by sleeping for timeout jiffies. 878 */ 879 880void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 881{ 882 if (timeout > WAIT_WORSTCASE) 883 timeout = WAIT_WORSTCASE; 884 drive->sleep = timeout + jiffies; 885 drive->dev_flags |= IDE_DFLAG_SLEEPING; 886} 887 888EXPORT_SYMBOL(ide_stall_queue); 889 890#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 891 892/** 893 * choose_drive - select a drive to service 894 * @hwgroup: hardware group to select on 895 * 896 * choose_drive() selects the next drive which will be serviced. 897 * This is necessary because the IDE layer can't issue commands 898 * to both drives on the same cable, unlike SCSI. 899 */ 900 901static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 902{ 903 ide_drive_t *drive, *best; 904 905repeat: 906 best = NULL; 907 drive = hwgroup->drive; 908 909 /* 910 * drive is doing pre-flush, ordered write, post-flush sequence. even 911 * though that is 3 requests, it must be seen as a single transaction. 912 * we must not preempt this drive until that is complete 913 */ 914 if (blk_queue_flushing(drive->queue)) { 915 /* 916 * small race where queue could get replugged during 917 * the 3-request flush cycle, just yank the plug since 918 * we want it to finish asap 919 */ 920 blk_remove_plug(drive->queue); 921 return drive; 922 } 923 924 do { 925 u8 dev_s = !!(drive->dev_flags & IDE_DFLAG_SLEEPING); 926 u8 best_s = (best && !!(best->dev_flags & IDE_DFLAG_SLEEPING)); 927 928 if ((dev_s == 0 || time_after_eq(jiffies, drive->sleep)) && 929 !elv_queue_empty(drive->queue)) { 930 if (best == NULL || 931 (dev_s && (best_s == 0 || time_before(drive->sleep, best->sleep))) || 932 (best_s == 0 && time_before(WAKEUP(drive), WAKEUP(best)))) { 933 if (!blk_queue_plugged(drive->queue)) 934 best = drive; 935 } 936 } 937 } while ((drive = drive->next) != hwgroup->drive); 938 939 if (best && (best->dev_flags & IDE_DFLAG_NICE1) && 940 (best->dev_flags & IDE_DFLAG_SLEEPING) == 0 && 941 best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 942 long t = (signed long)(WAKEUP(best) - jiffies); 943 if (t >= WAIT_MIN_SLEEP) { 944 /* 945 * We *may* have some time to spare, but first let's see if 946 * someone can potentially benefit from our nice mood today.. 947 */ 948 drive = best->next; 949 do { 950 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) == 0 951 && time_before(jiffies - best->service_time, WAKEUP(drive)) 952 && time_before(WAKEUP(drive), jiffies + t)) 953 { 954 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 955 goto repeat; 956 } 957 } while ((drive = drive->next) != best); 958 } 959 } 960 return best; 961} 962 963/* 964 * Issue a new request to a drive from hwgroup 965 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 966 * 967 * A hwgroup is a serialized group of IDE interfaces. Usually there is 968 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 969 * may have both interfaces in a single hwgroup to "serialize" access. 970 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 971 * together into one hwgroup for serialized access. 972 * 973 * Note also that several hwgroups can end up sharing a single IRQ, 974 * possibly along with many other devices. This is especially common in 975 * PCI-based systems with off-board IDE controller cards. 976 * 977 * The IDE driver uses the single global ide_lock spinlock to protect 978 * access to the request queues, and to protect the hwgroup->busy flag. 979 * 980 * The first thread into the driver for a particular hwgroup sets the 981 * hwgroup->busy flag to indicate that this hwgroup is now active, 982 * and then initiates processing of the top request from the request queue. 983 * 984 * Other threads attempting entry notice the busy setting, and will simply 985 * queue their new requests and exit immediately. Note that hwgroup->busy 986 * remains set even when the driver is merely awaiting the next interrupt. 987 * Thus, the meaning is "this hwgroup is busy processing a request". 988 * 989 * When processing of a request completes, the completing thread or IRQ-handler 990 * will start the next request from the queue. If no more work remains, 991 * the driver will clear the hwgroup->busy flag and exit. 992 * 993 * The ide_lock (spinlock) is used to protect all access to the 994 * hwgroup->busy flag, but is otherwise not needed for most processing in 995 * the driver. This makes the driver much more friendlier to shared IRQs 996 * than previous designs, while remaining 100% (?) SMP safe and capable. 997 */ 998static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 999{ 1000 ide_drive_t *drive; 1001 ide_hwif_t *hwif; 1002 struct request *rq; 1003 ide_startstop_t startstop; 1004 int loops = 0; 1005 1006 /* for atari only: POSSIBLY BROKEN HERE(?) */ 1007 ide_get_lock(ide_intr, hwgroup); 1008 1009 /* caller must own ide_lock */ 1010 BUG_ON(!irqs_disabled()); 1011 1012 while (!hwgroup->busy) { 1013 hwgroup->busy = 1; 1014 drive = choose_drive(hwgroup); 1015 if (drive == NULL) { 1016 int sleeping = 0; 1017 unsigned long sleep = 0; /* shut up, gcc */ 1018 hwgroup->rq = NULL; 1019 drive = hwgroup->drive; 1020 do { 1021 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) && 1022 (sleeping == 0 || 1023 time_before(drive->sleep, sleep))) { 1024 sleeping = 1; 1025 sleep = drive->sleep; 1026 } 1027 } while ((drive = drive->next) != hwgroup->drive); 1028 if (sleeping) { 1029 /* 1030 * Take a short snooze, and then wake up this hwgroup again. 1031 * This gives other hwgroups on the same a chance to 1032 * play fairly with us, just in case there are big differences 1033 * in relative throughputs.. don't want to hog the cpu too much. 1034 */ 1035 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 1036 sleep = jiffies + WAIT_MIN_SLEEP; 1037#if 1 1038 if (timer_pending(&hwgroup->timer)) 1039 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 1040#endif 1041 /* so that ide_timer_expiry knows what to do */ 1042 hwgroup->sleeping = 1; 1043 hwgroup->req_gen_timer = hwgroup->req_gen; 1044 mod_timer(&hwgroup->timer, sleep); 1045 /* we purposely leave hwgroup->busy==1 1046 * while sleeping */ 1047 } else { 1048 /* Ugly, but how can we sleep for the lock 1049 * otherwise? perhaps from tq_disk? 1050 */ 1051 1052 /* for atari only */ 1053 ide_release_lock(); 1054 hwgroup->busy = 0; 1055 } 1056 1057 /* no more work for this hwgroup (for now) */ 1058 return; 1059 } 1060 again: 1061 hwif = HWIF(drive); 1062 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 1063 /* 1064 * set nIEN for previous hwif, drives in the 1065 * quirk_list may not like intr setups/cleanups 1066 */ 1067 if (drive->quirk_list != 1) 1068 hwif->tp_ops->set_irq(hwif, 0); 1069 } 1070 hwgroup->hwif = hwif; 1071 hwgroup->drive = drive; 1072 drive->dev_flags &= ~IDE_DFLAG_SLEEPING; 1073 drive->service_start = jiffies; 1074 1075 if (blk_queue_plugged(drive->queue)) { 1076 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1077 break; 1078 } 1079 1080 /* 1081 * we know that the queue isn't empty, but this can happen 1082 * if the q->prep_rq_fn() decides to kill a request 1083 */ 1084 rq = elv_next_request(drive->queue); 1085 if (!rq) { 1086 hwgroup->busy = 0; 1087 break; 1088 } 1089 1090 /* 1091 * Sanity: don't accept a request that isn't a PM request 1092 * if we are currently power managed. This is very important as 1093 * blk_stop_queue() doesn't prevent the elv_next_request() 1094 * above to return us whatever is in the queue. Since we call 1095 * ide_do_request() ourselves, we end up taking requests while 1096 * the queue is blocked... 1097 * 1098 * We let requests forced at head of queue with ide-preempt 1099 * though. I hope that doesn't happen too much, hopefully not 1100 * unless the subdriver triggers such a thing in its own PM 1101 * state machine. 1102 * 1103 * We count how many times we loop here to make sure we service 1104 * all drives in the hwgroup without looping for ever 1105 */ 1106 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 1107 blk_pm_request(rq) == 0 && 1108 (rq->cmd_flags & REQ_PREEMPT) == 0) { 1109 drive = drive->next ? drive->next : hwgroup->drive; 1110 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1111 goto again; 1112 /* We clear busy, there should be no pending ATA command at this point. */ 1113 hwgroup->busy = 0; 1114 break; 1115 } 1116 1117 hwgroup->rq = rq; 1118 1119 /* 1120 * Some systems have trouble with IDE IRQs arriving while 1121 * the driver is still setting things up. So, here we disable 1122 * the IRQ used by this interface while the request is being started. 1123 * This may look bad at first, but pretty much the same thing 1124 * happens anyway when any interrupt comes in, IDE or otherwise 1125 * -- the kernel masks the IRQ while it is being handled. 1126 */ 1127 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1128 disable_irq_nosync(hwif->irq); 1129 spin_unlock(&ide_lock); 1130 local_irq_enable_in_hardirq(); 1131 /* allow other IRQs while we start this request */ 1132 startstop = start_request(drive, rq); 1133 spin_lock_irq(&ide_lock); 1134 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1135 enable_irq(hwif->irq); 1136 if (startstop == ide_stopped) 1137 hwgroup->busy = 0; 1138 } 1139} 1140 1141/* 1142 * Passes the stuff to ide_do_request 1143 */ 1144void do_ide_request(struct request_queue *q) 1145{ 1146 ide_drive_t *drive = q->queuedata; 1147 1148 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1149} 1150 1151/* 1152 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1153 * retry the current request in pio mode instead of risking tossing it 1154 * all away 1155 */ 1156static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1157{ 1158 ide_hwif_t *hwif = HWIF(drive); 1159 struct request *rq; 1160 ide_startstop_t ret = ide_stopped; 1161 1162 /* 1163 * end current dma transaction 1164 */ 1165 1166 if (error < 0) { 1167 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1168 (void)hwif->dma_ops->dma_end(drive); 1169 ret = ide_error(drive, "dma timeout error", 1170 hwif->tp_ops->read_status(hwif)); 1171 } else { 1172 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1173 hwif->dma_ops->dma_timeout(drive); 1174 } 1175 1176 /* 1177 * disable dma for now, but remember that we did so because of 1178 * a timeout -- we'll reenable after we finish this next request 1179 * (or rather the first chunk of it) in pio. 1180 */ 1181 drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; 1182 drive->retry_pio++; 1183 ide_dma_off_quietly(drive); 1184 1185 /* 1186 * un-busy drive etc (hwgroup->busy is cleared on return) and 1187 * make sure request is sane 1188 */ 1189 rq = HWGROUP(drive)->rq; 1190 1191 if (!rq) 1192 goto out; 1193 1194 HWGROUP(drive)->rq = NULL; 1195 1196 rq->errors = 0; 1197 1198 if (!rq->bio) 1199 goto out; 1200 1201 rq->sector = rq->bio->bi_sector; 1202 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1203 rq->hard_cur_sectors = rq->current_nr_sectors; 1204 rq->buffer = bio_data(rq->bio); 1205out: 1206 return ret; 1207} 1208 1209/** 1210 * ide_timer_expiry - handle lack of an IDE interrupt 1211 * @data: timer callback magic (hwgroup) 1212 * 1213 * An IDE command has timed out before the expected drive return 1214 * occurred. At this point we attempt to clean up the current 1215 * mess. If the current handler includes an expiry handler then 1216 * we invoke the expiry handler, and providing it is happy the 1217 * work is done. If that fails we apply generic recovery rules 1218 * invoking the handler and checking the drive DMA status. We 1219 * have an excessively incestuous relationship with the DMA 1220 * logic that wants cleaning up. 1221 */ 1222 1223void ide_timer_expiry (unsigned long data) 1224{ 1225 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1226 ide_handler_t *handler; 1227 ide_expiry_t *expiry; 1228 unsigned long flags; 1229 unsigned long wait = -1; 1230 1231 spin_lock_irqsave(&ide_lock, flags); 1232 1233 if (((handler = hwgroup->handler) == NULL) || 1234 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1235 /* 1236 * Either a marginal timeout occurred 1237 * (got the interrupt just as timer expired), 1238 * or we were "sleeping" to give other devices a chance. 1239 * Either way, we don't really want to complain about anything. 1240 */ 1241 if (hwgroup->sleeping) { 1242 hwgroup->sleeping = 0; 1243 hwgroup->busy = 0; 1244 } 1245 } else { 1246 ide_drive_t *drive = hwgroup->drive; 1247 if (!drive) { 1248 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1249 hwgroup->handler = NULL; 1250 } else { 1251 ide_hwif_t *hwif; 1252 ide_startstop_t startstop = ide_stopped; 1253 if (!hwgroup->busy) { 1254 hwgroup->busy = 1; /* paranoia */ 1255 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1256 } 1257 if ((expiry = hwgroup->expiry) != NULL) { 1258 /* continue */ 1259 if ((wait = expiry(drive)) > 0) { 1260 /* reset timer */ 1261 hwgroup->timer.expires = jiffies + wait; 1262 hwgroup->req_gen_timer = hwgroup->req_gen; 1263 add_timer(&hwgroup->timer); 1264 spin_unlock_irqrestore(&ide_lock, flags); 1265 return; 1266 } 1267 } 1268 hwgroup->handler = NULL; 1269 /* 1270 * We need to simulate a real interrupt when invoking 1271 * the handler() function, which means we need to 1272 * globally mask the specific IRQ: 1273 */ 1274 spin_unlock(&ide_lock); 1275 hwif = HWIF(drive); 1276 /* disable_irq_nosync ?? */ 1277 disable_irq(hwif->irq); 1278 /* local CPU only, 1279 * as if we were handling an interrupt */ 1280 local_irq_disable(); 1281 if (hwgroup->polling) { 1282 startstop = handler(drive); 1283 } else if (drive_is_ready(drive)) { 1284 if (drive->waiting_for_dma) 1285 hwif->dma_ops->dma_lost_irq(drive); 1286 (void)ide_ack_intr(hwif); 1287 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1288 startstop = handler(drive); 1289 } else { 1290 if (drive->waiting_for_dma) { 1291 startstop = ide_dma_timeout_retry(drive, wait); 1292 } else 1293 startstop = 1294 ide_error(drive, "irq timeout", 1295 hwif->tp_ops->read_status(hwif)); 1296 } 1297 drive->service_time = jiffies - drive->service_start; 1298 spin_lock_irq(&ide_lock); 1299 enable_irq(hwif->irq); 1300 if (startstop == ide_stopped) 1301 hwgroup->busy = 0; 1302 } 1303 } 1304 ide_do_request(hwgroup, IDE_NO_IRQ); 1305 spin_unlock_irqrestore(&ide_lock, flags); 1306} 1307 1308/** 1309 * unexpected_intr - handle an unexpected IDE interrupt 1310 * @irq: interrupt line 1311 * @hwgroup: hwgroup being processed 1312 * 1313 * There's nothing really useful we can do with an unexpected interrupt, 1314 * other than reading the status register (to clear it), and logging it. 1315 * There should be no way that an irq can happen before we're ready for it, 1316 * so we needn't worry much about losing an "important" interrupt here. 1317 * 1318 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1319 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1320 * looks "good", we just ignore the interrupt completely. 1321 * 1322 * This routine assumes __cli() is in effect when called. 1323 * 1324 * If an unexpected interrupt happens on irq15 while we are handling irq14 1325 * and if the two interfaces are "serialized" (CMD640), then it looks like 1326 * we could screw up by interfering with a new request being set up for 1327 * irq15. 1328 * 1329 * In reality, this is a non-issue. The new command is not sent unless 1330 * the drive is ready to accept one, in which case we know the drive is 1331 * not trying to interrupt us. And ide_set_handler() is always invoked 1332 * before completing the issuance of any new drive command, so we will not 1333 * be accidentally invoked as a result of any valid command completion 1334 * interrupt. 1335 * 1336 * Note that we must walk the entire hwgroup here. We know which hwif 1337 * is doing the current command, but we don't know which hwif burped 1338 * mysteriously. 1339 */ 1340 1341static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1342{ 1343 u8 stat; 1344 ide_hwif_t *hwif = hwgroup->hwif; 1345 1346 /* 1347 * handle the unexpected interrupt 1348 */ 1349 do { 1350 if (hwif->irq == irq) { 1351 stat = hwif->tp_ops->read_status(hwif); 1352 1353 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 1354 /* Try to not flood the console with msgs */ 1355 static unsigned long last_msgtime, count; 1356 ++count; 1357 if (time_after(jiffies, last_msgtime + HZ)) { 1358 last_msgtime = jiffies; 1359 printk(KERN_ERR "%s%s: unexpected interrupt, " 1360 "status=0x%02x, count=%ld\n", 1361 hwif->name, 1362 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1363 } 1364 } 1365 } 1366 } while ((hwif = hwif->next) != hwgroup->hwif); 1367} 1368 1369/** 1370 * ide_intr - default IDE interrupt handler 1371 * @irq: interrupt number 1372 * @dev_id: hwif group 1373 * @regs: unused weirdness from the kernel irq layer 1374 * 1375 * This is the default IRQ handler for the IDE layer. You should 1376 * not need to override it. If you do be aware it is subtle in 1377 * places 1378 * 1379 * hwgroup->hwif is the interface in the group currently performing 1380 * a command. hwgroup->drive is the drive and hwgroup->handler is 1381 * the IRQ handler to call. As we issue a command the handlers 1382 * step through multiple states, reassigning the handler to the 1383 * next step in the process. Unlike a smart SCSI controller IDE 1384 * expects the main processor to sequence the various transfer 1385 * stages. We also manage a poll timer to catch up with most 1386 * timeout situations. There are still a few where the handlers 1387 * don't ever decide to give up. 1388 * 1389 * The handler eventually returns ide_stopped to indicate the 1390 * request completed. At this point we issue the next request 1391 * on the hwgroup and the process begins again. 1392 */ 1393 1394irqreturn_t ide_intr (int irq, void *dev_id) 1395{ 1396 unsigned long flags; 1397 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1398 ide_hwif_t *hwif; 1399 ide_drive_t *drive; 1400 ide_handler_t *handler; 1401 ide_startstop_t startstop; 1402 1403 spin_lock_irqsave(&ide_lock, flags); 1404 hwif = hwgroup->hwif; 1405 1406 if (!ide_ack_intr(hwif)) { 1407 spin_unlock_irqrestore(&ide_lock, flags); 1408 return IRQ_NONE; 1409 } 1410 1411 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1412 /* 1413 * Not expecting an interrupt from this drive. 1414 * That means this could be: 1415 * (1) an interrupt from another PCI device 1416 * sharing the same PCI INT# as us. 1417 * or (2) a drive just entered sleep or standby mode, 1418 * and is interrupting to let us know. 1419 * or (3) a spurious interrupt of unknown origin. 1420 * 1421 * For PCI, we cannot tell the difference, 1422 * so in that case we just ignore it and hope it goes away. 1423 * 1424 * FIXME: unexpected_intr should be hwif-> then we can 1425 * remove all the ifdef PCI crap 1426 */ 1427#ifdef CONFIG_BLK_DEV_IDEPCI 1428 if (hwif->chipset != ide_pci) 1429#endif /* CONFIG_BLK_DEV_IDEPCI */ 1430 { 1431 /* 1432 * Probably not a shared PCI interrupt, 1433 * so we can safely try to do something about it: 1434 */ 1435 unexpected_intr(irq, hwgroup); 1436#ifdef CONFIG_BLK_DEV_IDEPCI 1437 } else { 1438 /* 1439 * Whack the status register, just in case 1440 * we have a leftover pending IRQ. 1441 */ 1442 (void)hwif->tp_ops->read_status(hwif); 1443#endif /* CONFIG_BLK_DEV_IDEPCI */ 1444 } 1445 spin_unlock_irqrestore(&ide_lock, flags); 1446 return IRQ_NONE; 1447 } 1448 drive = hwgroup->drive; 1449 if (!drive) { 1450 /* 1451 * This should NEVER happen, and there isn't much 1452 * we could do about it here. 1453 * 1454 * [Note - this can occur if the drive is hot unplugged] 1455 */ 1456 spin_unlock_irqrestore(&ide_lock, flags); 1457 return IRQ_HANDLED; 1458 } 1459 if (!drive_is_ready(drive)) { 1460 /* 1461 * This happens regularly when we share a PCI IRQ with 1462 * another device. Unfortunately, it can also happen 1463 * with some buggy drives that trigger the IRQ before 1464 * their status register is up to date. Hopefully we have 1465 * enough advance overhead that the latter isn't a problem. 1466 */ 1467 spin_unlock_irqrestore(&ide_lock, flags); 1468 return IRQ_NONE; 1469 } 1470 if (!hwgroup->busy) { 1471 hwgroup->busy = 1; /* paranoia */ 1472 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1473 } 1474 hwgroup->handler = NULL; 1475 hwgroup->req_gen++; 1476 del_timer(&hwgroup->timer); 1477 spin_unlock(&ide_lock); 1478 1479 /* Some controllers might set DMA INTR no matter DMA or PIO; 1480 * bmdma status might need to be cleared even for 1481 * PIO interrupts to prevent spurious/lost irq. 1482 */ 1483 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma)) 1484 /* ide_dma_end() needs bmdma status for error checking. 1485 * So, skip clearing bmdma status here and leave it 1486 * to ide_dma_end() if this is dma interrupt. 1487 */ 1488 hwif->ide_dma_clear_irq(drive); 1489 1490 if (drive->dev_flags & IDE_DFLAG_UNMASK) 1491 local_irq_enable_in_hardirq(); 1492 /* service this interrupt, may set handler for next interrupt */ 1493 startstop = handler(drive); 1494 spin_lock_irq(&ide_lock); 1495 1496 /* 1497 * Note that handler() may have set things up for another 1498 * interrupt to occur soon, but it cannot happen until 1499 * we exit from this routine, because it will be the 1500 * same irq as is currently being serviced here, and Linux 1501 * won't allow another of the same (on any CPU) until we return. 1502 */ 1503 drive->service_time = jiffies - drive->service_start; 1504 if (startstop == ide_stopped) { 1505 if (hwgroup->handler == NULL) { /* paranoia */ 1506 hwgroup->busy = 0; 1507 ide_do_request(hwgroup, hwif->irq); 1508 } else { 1509 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1510 "on exit\n", drive->name); 1511 } 1512 } 1513 spin_unlock_irqrestore(&ide_lock, flags); 1514 return IRQ_HANDLED; 1515} 1516 1517/** 1518 * ide_do_drive_cmd - issue IDE special command 1519 * @drive: device to issue command 1520 * @rq: request to issue 1521 * 1522 * This function issues a special IDE device request 1523 * onto the request queue. 1524 * 1525 * the rq is queued at the head of the request queue, displacing 1526 * the currently-being-processed request and this function 1527 * returns immediately without waiting for the new rq to be 1528 * completed. This is VERY DANGEROUS, and is intended for 1529 * careful use by the ATAPI tape/cdrom driver code. 1530 */ 1531 1532void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 1533{ 1534 unsigned long flags; 1535 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1536 1537 spin_lock_irqsave(&ide_lock, flags); 1538 hwgroup->rq = NULL; 1539 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 1); 1540 __generic_unplug_device(drive->queue); 1541 spin_unlock_irqrestore(&ide_lock, flags); 1542} 1543 1544EXPORT_SYMBOL(ide_do_drive_cmd); 1545 1546void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1547{ 1548 ide_hwif_t *hwif = drive->hwif; 1549 ide_task_t task; 1550 1551 memset(&task, 0, sizeof(task)); 1552 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 1553 IDE_TFLAG_OUT_FEATURE | tf_flags; 1554 task.tf.feature = dma; /* Use PIO/DMA */ 1555 task.tf.lbam = bcount & 0xff; 1556 task.tf.lbah = (bcount >> 8) & 0xff; 1557 1558 ide_tf_dump(drive->name, &task.tf); 1559 hwif->tp_ops->set_irq(hwif, 1); 1560 SELECT_MASK(drive, 0); 1561 hwif->tp_ops->tf_load(drive, &task); 1562} 1563 1564EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1565 1566void ide_pad_transfer(ide_drive_t *drive, int write, int len) 1567{ 1568 ide_hwif_t *hwif = drive->hwif; 1569 u8 buf[4] = { 0 }; 1570 1571 while (len > 0) { 1572 if (write) 1573 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); 1574 else 1575 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); 1576 len -= 4; 1577 } 1578} 1579EXPORT_SYMBOL_GPL(ide_pad_transfer); 1580