ide-io.c revision bfa7d8e55f0c5ae22ef57eb22942c74fdde7b9bd
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/hdreg.h> 44#include <linux/completion.h> 45#include <linux/reboot.h> 46#include <linux/cdrom.h> 47#include <linux/seq_file.h> 48#include <linux/device.h> 49#include <linux/kmod.h> 50#include <linux/scatterlist.h> 51#include <linux/bitops.h> 52 53#include <asm/byteorder.h> 54#include <asm/irq.h> 55#include <asm/uaccess.h> 56#include <asm/io.h> 57 58static int __ide_end_request(ide_drive_t *drive, struct request *rq, 59 int uptodate, unsigned int nr_bytes, int dequeue) 60{ 61 int ret = 1; 62 int error = 0; 63 64 if (uptodate <= 0) 65 error = uptodate ? uptodate : -EIO; 66 67 /* 68 * if failfast is set on a request, override number of sectors and 69 * complete the whole request right now 70 */ 71 if (blk_noretry_request(rq) && error) 72 nr_bytes = rq->hard_nr_sectors << 9; 73 74 if (!blk_fs_request(rq) && error && !rq->errors) 75 rq->errors = -EIO; 76 77 /* 78 * decide whether to reenable DMA -- 3 is a random magic for now, 79 * if we DMA timeout more than 3 times, just stay in PIO 80 */ 81 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) && 82 drive->retry_pio <= 3) { 83 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY; 84 ide_dma_on(drive); 85 } 86 87 if (!__blk_end_request(rq, error, nr_bytes)) { 88 if (dequeue) 89 HWGROUP(drive)->rq = NULL; 90 ret = 0; 91 } 92 93 return ret; 94} 95 96/** 97 * ide_end_request - complete an IDE I/O 98 * @drive: IDE device for the I/O 99 * @uptodate: 100 * @nr_sectors: number of sectors completed 101 * 102 * This is our end_request wrapper function. We complete the I/O 103 * update random number input and dequeue the request, which if 104 * it was tagged may be out of order. 105 */ 106 107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 108{ 109 unsigned int nr_bytes = nr_sectors << 9; 110 struct request *rq; 111 unsigned long flags; 112 int ret = 1; 113 114 /* 115 * room for locking improvements here, the calls below don't 116 * need the queue lock held at all 117 */ 118 spin_lock_irqsave(&ide_lock, flags); 119 rq = HWGROUP(drive)->rq; 120 121 if (!nr_bytes) { 122 if (blk_pc_request(rq)) 123 nr_bytes = rq->data_len; 124 else 125 nr_bytes = rq->hard_cur_sectors << 9; 126 } 127 128 ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 129 130 spin_unlock_irqrestore(&ide_lock, flags); 131 return ret; 132} 133EXPORT_SYMBOL(ide_end_request); 134 135static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error) 136{ 137 struct request_pm_state *pm = rq->data; 138 139 if (drive->media != ide_disk) 140 return; 141 142 switch (pm->pm_step) { 143 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 144 if (pm->pm_state == PM_EVENT_FREEZE) 145 pm->pm_step = IDE_PM_COMPLETED; 146 else 147 pm->pm_step = IDE_PM_STANDBY; 148 break; 149 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 150 pm->pm_step = IDE_PM_COMPLETED; 151 break; 152 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 153 pm->pm_step = IDE_PM_IDLE; 154 break; 155 case IDE_PM_IDLE: /* Resume step 2 (idle)*/ 156 pm->pm_step = IDE_PM_RESTORE_DMA; 157 break; 158 } 159} 160 161static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 162{ 163 struct request_pm_state *pm = rq->data; 164 ide_task_t *args = rq->special; 165 166 memset(args, 0, sizeof(*args)); 167 168 switch (pm->pm_step) { 169 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 170 if (drive->media != ide_disk) 171 break; 172 /* Not supported? Switch to next step now. */ 173 if (ata_id_flush_enabled(drive->id) == 0 || 174 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { 175 ide_complete_power_step(drive, rq, 0, 0); 176 return ide_stopped; 177 } 178 if (ata_id_flush_ext_enabled(drive->id)) 179 args->tf.command = ATA_CMD_FLUSH_EXT; 180 else 181 args->tf.command = ATA_CMD_FLUSH; 182 goto out_do_tf; 183 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 184 args->tf.command = ATA_CMD_STANDBYNOW1; 185 goto out_do_tf; 186 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 187 ide_set_max_pio(drive); 188 /* 189 * skip IDE_PM_IDLE for ATAPI devices 190 */ 191 if (drive->media != ide_disk) 192 pm->pm_step = IDE_PM_RESTORE_DMA; 193 else 194 ide_complete_power_step(drive, rq, 0, 0); 195 return ide_stopped; 196 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 197 args->tf.command = ATA_CMD_IDLEIMMEDIATE; 198 goto out_do_tf; 199 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 200 /* 201 * Right now, all we do is call ide_set_dma(drive), 202 * we could be smarter and check for current xfer_speed 203 * in struct drive etc... 204 */ 205 if (drive->hwif->dma_ops == NULL) 206 break; 207 /* 208 * TODO: respect IDE_DFLAG_USING_DMA 209 */ 210 ide_set_dma(drive); 211 break; 212 } 213 214 pm->pm_step = IDE_PM_COMPLETED; 215 return ide_stopped; 216 217out_do_tf: 218 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 219 args->data_phase = TASKFILE_NO_DATA; 220 return do_rw_taskfile(drive, args); 221} 222 223/** 224 * ide_end_dequeued_request - complete an IDE I/O 225 * @drive: IDE device for the I/O 226 * @uptodate: 227 * @nr_sectors: number of sectors completed 228 * 229 * Complete an I/O that is no longer on the request queue. This 230 * typically occurs when we pull the request and issue a REQUEST_SENSE. 231 * We must still finish the old request but we must not tamper with the 232 * queue in the meantime. 233 * 234 * NOTE: This path does not handle barrier, but barrier is not supported 235 * on ide-cd anyway. 236 */ 237 238int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 239 int uptodate, int nr_sectors) 240{ 241 unsigned long flags; 242 int ret; 243 244 spin_lock_irqsave(&ide_lock, flags); 245 BUG_ON(!blk_rq_started(rq)); 246 ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); 247 spin_unlock_irqrestore(&ide_lock, flags); 248 249 return ret; 250} 251EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 252 253 254/** 255 * ide_complete_pm_request - end the current Power Management request 256 * @drive: target drive 257 * @rq: request 258 * 259 * This function cleans up the current PM request and stops the queue 260 * if necessary. 261 */ 262static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) 263{ 264 unsigned long flags; 265 266#ifdef DEBUG_PM 267 printk("%s: completing PM request, %s\n", drive->name, 268 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 269#endif 270 spin_lock_irqsave(&ide_lock, flags); 271 if (blk_pm_suspend_request(rq)) { 272 blk_stop_queue(drive->queue); 273 } else { 274 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; 275 blk_start_queue(drive->queue); 276 } 277 HWGROUP(drive)->rq = NULL; 278 if (__blk_end_request(rq, 0, 0)) 279 BUG(); 280 spin_unlock_irqrestore(&ide_lock, flags); 281} 282 283/** 284 * ide_end_drive_cmd - end an explicit drive command 285 * @drive: command 286 * @stat: status bits 287 * @err: error bits 288 * 289 * Clean up after success/failure of an explicit drive command. 290 * These get thrown onto the queue so they are synchronized with 291 * real I/O operations on the drive. 292 * 293 * In LBA48 mode we have to read the register set twice to get 294 * all the extra information out. 295 */ 296 297void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 298{ 299 unsigned long flags; 300 struct request *rq; 301 302 spin_lock_irqsave(&ide_lock, flags); 303 rq = HWGROUP(drive)->rq; 304 spin_unlock_irqrestore(&ide_lock, flags); 305 306 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 307 ide_task_t *task = (ide_task_t *)rq->special; 308 309 if (rq->errors == 0) 310 rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT); 311 312 if (task) { 313 struct ide_taskfile *tf = &task->tf; 314 315 tf->error = err; 316 tf->status = stat; 317 318 drive->hwif->tp_ops->tf_read(drive, task); 319 320 if (task->tf_flags & IDE_TFLAG_DYN) 321 kfree(task); 322 } 323 } else if (blk_pm_request(rq)) { 324 struct request_pm_state *pm = rq->data; 325#ifdef DEBUG_PM 326 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n", 327 drive->name, rq->pm->pm_step, stat, err); 328#endif 329 ide_complete_power_step(drive, rq, stat, err); 330 if (pm->pm_step == IDE_PM_COMPLETED) 331 ide_complete_pm_request(drive, rq); 332 return; 333 } 334 335 spin_lock_irqsave(&ide_lock, flags); 336 HWGROUP(drive)->rq = NULL; 337 rq->errors = err; 338 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), 339 blk_rq_bytes(rq)))) 340 BUG(); 341 spin_unlock_irqrestore(&ide_lock, flags); 342} 343 344EXPORT_SYMBOL(ide_end_drive_cmd); 345 346static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 347{ 348 if (rq->rq_disk) { 349 ide_driver_t *drv; 350 351 drv = *(ide_driver_t **)rq->rq_disk->private_data; 352 drv->end_request(drive, 0, 0); 353 } else 354 ide_end_request(drive, 0, 0); 355} 356 357static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 358{ 359 ide_hwif_t *hwif = drive->hwif; 360 361 if ((stat & ATA_BUSY) || 362 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 363 /* other bits are useless when BUSY */ 364 rq->errors |= ERROR_RESET; 365 } else if (stat & ATA_ERR) { 366 /* err has different meaning on cdrom and tape */ 367 if (err == ATA_ABORTED) { 368 if ((drive->dev_flags & IDE_DFLAG_LBA) && 369 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */ 370 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS) 371 return ide_stopped; 372 } else if ((err & BAD_CRC) == BAD_CRC) { 373 /* UDMA crc error, just retry the operation */ 374 drive->crc_count++; 375 } else if (err & (ATA_BBK | ATA_UNC)) { 376 /* retries won't help these */ 377 rq->errors = ERROR_MAX; 378 } else if (err & ATA_TRK0NF) { 379 /* help it find track zero */ 380 rq->errors |= ERROR_RECAL; 381 } 382 } 383 384 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && 385 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { 386 int nsect = drive->mult_count ? drive->mult_count : 1; 387 388 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); 389 } 390 391 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 392 ide_kill_rq(drive, rq); 393 return ide_stopped; 394 } 395 396 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 397 rq->errors |= ERROR_RESET; 398 399 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 400 ++rq->errors; 401 return ide_do_reset(drive); 402 } 403 404 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 405 drive->special.b.recalibrate = 1; 406 407 ++rq->errors; 408 409 return ide_stopped; 410} 411 412static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 413{ 414 ide_hwif_t *hwif = drive->hwif; 415 416 if ((stat & ATA_BUSY) || 417 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 418 /* other bits are useless when BUSY */ 419 rq->errors |= ERROR_RESET; 420 } else { 421 /* add decoding error stuff */ 422 } 423 424 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 425 /* force an abort */ 426 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); 427 428 if (rq->errors >= ERROR_MAX) { 429 ide_kill_rq(drive, rq); 430 } else { 431 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 432 ++rq->errors; 433 return ide_do_reset(drive); 434 } 435 ++rq->errors; 436 } 437 438 return ide_stopped; 439} 440 441ide_startstop_t 442__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 443{ 444 if (drive->media == ide_disk) 445 return ide_ata_error(drive, rq, stat, err); 446 return ide_atapi_error(drive, rq, stat, err); 447} 448 449EXPORT_SYMBOL_GPL(__ide_error); 450 451/** 452 * ide_error - handle an error on the IDE 453 * @drive: drive the error occurred on 454 * @msg: message to report 455 * @stat: status bits 456 * 457 * ide_error() takes action based on the error returned by the drive. 458 * For normal I/O that may well include retries. We deal with 459 * both new-style (taskfile) and old style command handling here. 460 * In the case of taskfile command handling there is work left to 461 * do 462 */ 463 464ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 465{ 466 struct request *rq; 467 u8 err; 468 469 err = ide_dump_status(drive, msg, stat); 470 471 if ((rq = HWGROUP(drive)->rq) == NULL) 472 return ide_stopped; 473 474 /* retry only "normal" I/O: */ 475 if (!blk_fs_request(rq)) { 476 rq->errors = 1; 477 ide_end_drive_cmd(drive, stat, err); 478 return ide_stopped; 479 } 480 481 if (rq->rq_disk) { 482 ide_driver_t *drv; 483 484 drv = *(ide_driver_t **)rq->rq_disk->private_data; 485 return drv->error(drive, rq, stat, err); 486 } else 487 return __ide_error(drive, rq, stat, err); 488} 489 490EXPORT_SYMBOL_GPL(ide_error); 491 492static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 493{ 494 tf->nsect = drive->sect; 495 tf->lbal = drive->sect; 496 tf->lbam = drive->cyl; 497 tf->lbah = drive->cyl >> 8; 498 tf->device = (drive->head - 1) | drive->select; 499 tf->command = ATA_CMD_INIT_DEV_PARAMS; 500} 501 502static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 503{ 504 tf->nsect = drive->sect; 505 tf->command = ATA_CMD_RESTORE; 506} 507 508static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 509{ 510 tf->nsect = drive->mult_req; 511 tf->command = ATA_CMD_SET_MULTI; 512} 513 514static ide_startstop_t ide_disk_special(ide_drive_t *drive) 515{ 516 special_t *s = &drive->special; 517 ide_task_t args; 518 519 memset(&args, 0, sizeof(ide_task_t)); 520 args.data_phase = TASKFILE_NO_DATA; 521 522 if (s->b.set_geometry) { 523 s->b.set_geometry = 0; 524 ide_tf_set_specify_cmd(drive, &args.tf); 525 } else if (s->b.recalibrate) { 526 s->b.recalibrate = 0; 527 ide_tf_set_restore_cmd(drive, &args.tf); 528 } else if (s->b.set_multmode) { 529 s->b.set_multmode = 0; 530 ide_tf_set_setmult_cmd(drive, &args.tf); 531 } else if (s->all) { 532 int special = s->all; 533 s->all = 0; 534 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 535 return ide_stopped; 536 } 537 538 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 539 IDE_TFLAG_CUSTOM_HANDLER; 540 541 do_rw_taskfile(drive, &args); 542 543 return ide_started; 544} 545 546/** 547 * do_special - issue some special commands 548 * @drive: drive the command is for 549 * 550 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, 551 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. 552 * 553 * It used to do much more, but has been scaled back. 554 */ 555 556static ide_startstop_t do_special (ide_drive_t *drive) 557{ 558 special_t *s = &drive->special; 559 560#ifdef DEBUG 561 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 562#endif 563 if (drive->media == ide_disk) 564 return ide_disk_special(drive); 565 566 s->all = 0; 567 drive->mult_req = 0; 568 return ide_stopped; 569} 570 571void ide_map_sg(ide_drive_t *drive, struct request *rq) 572{ 573 ide_hwif_t *hwif = drive->hwif; 574 struct scatterlist *sg = hwif->sg_table; 575 576 if (hwif->sg_mapped) /* needed by ide-scsi */ 577 return; 578 579 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 580 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 581 } else { 582 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 583 hwif->sg_nents = 1; 584 } 585} 586 587EXPORT_SYMBOL_GPL(ide_map_sg); 588 589void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 590{ 591 ide_hwif_t *hwif = drive->hwif; 592 593 hwif->nsect = hwif->nleft = rq->nr_sectors; 594 hwif->cursg_ofs = 0; 595 hwif->cursg = NULL; 596} 597 598EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 599 600/** 601 * execute_drive_command - issue special drive command 602 * @drive: the drive to issue the command on 603 * @rq: the request structure holding the command 604 * 605 * execute_drive_cmd() issues a special drive command, usually 606 * initiated by ioctl() from the external hdparm program. The 607 * command can be a drive command, drive task or taskfile 608 * operation. Weirdly you can call it with NULL to wait for 609 * all commands to finish. Don't do this as that is due to change 610 */ 611 612static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 613 struct request *rq) 614{ 615 ide_hwif_t *hwif = HWIF(drive); 616 ide_task_t *task = rq->special; 617 618 if (task) { 619 hwif->data_phase = task->data_phase; 620 621 switch (hwif->data_phase) { 622 case TASKFILE_MULTI_OUT: 623 case TASKFILE_OUT: 624 case TASKFILE_MULTI_IN: 625 case TASKFILE_IN: 626 ide_init_sg_cmd(drive, rq); 627 ide_map_sg(drive, rq); 628 default: 629 break; 630 } 631 632 return do_rw_taskfile(drive, task); 633 } 634 635 /* 636 * NULL is actually a valid way of waiting for 637 * all current requests to be flushed from the queue. 638 */ 639#ifdef DEBUG 640 printk("%s: DRIVE_CMD (null)\n", drive->name); 641#endif 642 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), 643 ide_read_error(drive)); 644 645 return ide_stopped; 646} 647 648int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, 649 int arg) 650{ 651 struct request_queue *q = drive->queue; 652 struct request *rq; 653 int ret = 0; 654 655 if (!(setting->flags & DS_SYNC)) 656 return setting->set(drive, arg); 657 658 rq = blk_get_request(q, READ, GFP_KERNEL); 659 if (!rq) 660 return -ENOMEM; 661 662 rq->cmd_type = REQ_TYPE_SPECIAL; 663 rq->cmd_len = 5; 664 rq->cmd[0] = REQ_DEVSET_EXEC; 665 *(int *)&rq->cmd[1] = arg; 666 rq->special = setting->set; 667 668 if (blk_execute_rq(q, NULL, rq, 0)) 669 ret = rq->errors; 670 blk_put_request(rq); 671 672 return ret; 673} 674EXPORT_SYMBOL_GPL(ide_devset_execute); 675 676static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 677{ 678 switch (rq->cmd[0]) { 679 case REQ_DEVSET_EXEC: 680 { 681 int err, (*setfunc)(ide_drive_t *, int) = rq->special; 682 683 err = setfunc(drive, *(int *)&rq->cmd[1]); 684 if (err) 685 rq->errors = err; 686 else 687 err = 1; 688 ide_end_request(drive, err, 0); 689 return ide_stopped; 690 } 691 case REQ_DRIVE_RESET: 692 return ide_do_reset(drive); 693 default: 694 blk_dump_rq_flags(rq, "ide_special_rq - bad request"); 695 ide_end_request(drive, 0, 0); 696 return ide_stopped; 697 } 698} 699 700static void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 701{ 702 struct request_pm_state *pm = rq->data; 703 704 if (blk_pm_suspend_request(rq) && 705 pm->pm_step == IDE_PM_START_SUSPEND) 706 /* Mark drive blocked when starting the suspend sequence. */ 707 drive->dev_flags |= IDE_DFLAG_BLOCKED; 708 else if (blk_pm_resume_request(rq) && 709 pm->pm_step == IDE_PM_START_RESUME) { 710 /* 711 * The first thing we do on wakeup is to wait for BSY bit to 712 * go away (with a looong timeout) as a drive on this hwif may 713 * just be POSTing itself. 714 * We do that before even selecting as the "other" device on 715 * the bus may be broken enough to walk on our toes at this 716 * point. 717 */ 718 ide_hwif_t *hwif = drive->hwif; 719 int rc; 720#ifdef DEBUG_PM 721 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 722#endif 723 rc = ide_wait_not_busy(hwif, 35000); 724 if (rc) 725 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 726 SELECT_DRIVE(drive); 727 hwif->tp_ops->set_irq(hwif, 1); 728 rc = ide_wait_not_busy(hwif, 100000); 729 if (rc) 730 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 731 } 732} 733 734/** 735 * start_request - start of I/O and command issuing for IDE 736 * 737 * start_request() initiates handling of a new I/O request. It 738 * accepts commands and I/O (read/write) requests. 739 * 740 * FIXME: this function needs a rename 741 */ 742 743static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 744{ 745 ide_startstop_t startstop; 746 747 BUG_ON(!blk_rq_started(rq)); 748 749#ifdef DEBUG 750 printk("%s: start_request: current=0x%08lx\n", 751 HWIF(drive)->name, (unsigned long) rq); 752#endif 753 754 /* bail early if we've exceeded max_failures */ 755 if (drive->max_failures && (drive->failures > drive->max_failures)) { 756 rq->cmd_flags |= REQ_FAILED; 757 goto kill_rq; 758 } 759 760 if (blk_pm_request(rq)) 761 ide_check_pm_state(drive, rq); 762 763 SELECT_DRIVE(drive); 764 if (ide_wait_stat(&startstop, drive, drive->ready_stat, 765 ATA_BUSY | ATA_DRQ, WAIT_READY)) { 766 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 767 return startstop; 768 } 769 if (!drive->special.all) { 770 ide_driver_t *drv; 771 772 /* 773 * We reset the drive so we need to issue a SETFEATURES. 774 * Do it _after_ do_special() restored device parameters. 775 */ 776 if (drive->current_speed == 0xff) 777 ide_config_drive_speed(drive, drive->desired_speed); 778 779 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 780 return execute_drive_cmd(drive, rq); 781 else if (blk_pm_request(rq)) { 782 struct request_pm_state *pm = rq->data; 783#ifdef DEBUG_PM 784 printk("%s: start_power_step(step: %d)\n", 785 drive->name, rq->pm->pm_step); 786#endif 787 startstop = ide_start_power_step(drive, rq); 788 if (startstop == ide_stopped && 789 pm->pm_step == IDE_PM_COMPLETED) 790 ide_complete_pm_request(drive, rq); 791 return startstop; 792 } else if (!rq->rq_disk && blk_special_request(rq)) 793 /* 794 * TODO: Once all ULDs have been modified to 795 * check for specific op codes rather than 796 * blindly accepting any special request, the 797 * check for ->rq_disk above may be replaced 798 * by a more suitable mechanism or even 799 * dropped entirely. 800 */ 801 return ide_special_rq(drive, rq); 802 803 drv = *(ide_driver_t **)rq->rq_disk->private_data; 804 805 return drv->do_request(drive, rq, rq->sector); 806 } 807 return do_special(drive); 808kill_rq: 809 ide_kill_rq(drive, rq); 810 return ide_stopped; 811} 812 813/** 814 * ide_stall_queue - pause an IDE device 815 * @drive: drive to stall 816 * @timeout: time to stall for (jiffies) 817 * 818 * ide_stall_queue() can be used by a drive to give excess bandwidth back 819 * to the hwgroup by sleeping for timeout jiffies. 820 */ 821 822void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 823{ 824 if (timeout > WAIT_WORSTCASE) 825 timeout = WAIT_WORSTCASE; 826 drive->sleep = timeout + jiffies; 827 drive->dev_flags |= IDE_DFLAG_SLEEPING; 828} 829 830EXPORT_SYMBOL(ide_stall_queue); 831 832#define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) 833 834/** 835 * choose_drive - select a drive to service 836 * @hwgroup: hardware group to select on 837 * 838 * choose_drive() selects the next drive which will be serviced. 839 * This is necessary because the IDE layer can't issue commands 840 * to both drives on the same cable, unlike SCSI. 841 */ 842 843static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) 844{ 845 ide_drive_t *drive, *best; 846 847repeat: 848 best = NULL; 849 drive = hwgroup->drive; 850 851 /* 852 * drive is doing pre-flush, ordered write, post-flush sequence. even 853 * though that is 3 requests, it must be seen as a single transaction. 854 * we must not preempt this drive until that is complete 855 */ 856 if (blk_queue_flushing(drive->queue)) { 857 /* 858 * small race where queue could get replugged during 859 * the 3-request flush cycle, just yank the plug since 860 * we want it to finish asap 861 */ 862 blk_remove_plug(drive->queue); 863 return drive; 864 } 865 866 do { 867 u8 dev_s = !!(drive->dev_flags & IDE_DFLAG_SLEEPING); 868 u8 best_s = (best && !!(best->dev_flags & IDE_DFLAG_SLEEPING)); 869 870 if ((dev_s == 0 || time_after_eq(jiffies, drive->sleep)) && 871 !elv_queue_empty(drive->queue)) { 872 if (best == NULL || 873 (dev_s && (best_s == 0 || time_before(drive->sleep, best->sleep))) || 874 (best_s == 0 && time_before(WAKEUP(drive), WAKEUP(best)))) { 875 if (!blk_queue_plugged(drive->queue)) 876 best = drive; 877 } 878 } 879 } while ((drive = drive->next) != hwgroup->drive); 880 881 if (best && (best->dev_flags & IDE_DFLAG_NICE1) && 882 (best->dev_flags & IDE_DFLAG_SLEEPING) == 0 && 883 best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { 884 long t = (signed long)(WAKEUP(best) - jiffies); 885 if (t >= WAIT_MIN_SLEEP) { 886 /* 887 * We *may* have some time to spare, but first let's see if 888 * someone can potentially benefit from our nice mood today.. 889 */ 890 drive = best->next; 891 do { 892 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) == 0 893 && time_before(jiffies - best->service_time, WAKEUP(drive)) 894 && time_before(WAKEUP(drive), jiffies + t)) 895 { 896 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); 897 goto repeat; 898 } 899 } while ((drive = drive->next) != best); 900 } 901 } 902 return best; 903} 904 905/* 906 * Issue a new request to a drive from hwgroup 907 * Caller must have already done spin_lock_irqsave(&ide_lock, ..); 908 * 909 * A hwgroup is a serialized group of IDE interfaces. Usually there is 910 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) 911 * may have both interfaces in a single hwgroup to "serialize" access. 912 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped 913 * together into one hwgroup for serialized access. 914 * 915 * Note also that several hwgroups can end up sharing a single IRQ, 916 * possibly along with many other devices. This is especially common in 917 * PCI-based systems with off-board IDE controller cards. 918 * 919 * The IDE driver uses the single global ide_lock spinlock to protect 920 * access to the request queues, and to protect the hwgroup->busy flag. 921 * 922 * The first thread into the driver for a particular hwgroup sets the 923 * hwgroup->busy flag to indicate that this hwgroup is now active, 924 * and then initiates processing of the top request from the request queue. 925 * 926 * Other threads attempting entry notice the busy setting, and will simply 927 * queue their new requests and exit immediately. Note that hwgroup->busy 928 * remains set even when the driver is merely awaiting the next interrupt. 929 * Thus, the meaning is "this hwgroup is busy processing a request". 930 * 931 * When processing of a request completes, the completing thread or IRQ-handler 932 * will start the next request from the queue. If no more work remains, 933 * the driver will clear the hwgroup->busy flag and exit. 934 * 935 * The ide_lock (spinlock) is used to protect all access to the 936 * hwgroup->busy flag, but is otherwise not needed for most processing in 937 * the driver. This makes the driver much more friendlier to shared IRQs 938 * than previous designs, while remaining 100% (?) SMP safe and capable. 939 */ 940static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) 941{ 942 ide_drive_t *drive; 943 ide_hwif_t *hwif; 944 struct request *rq; 945 ide_startstop_t startstop; 946 int loops = 0; 947 948 /* for atari only: POSSIBLY BROKEN HERE(?) */ 949 ide_get_lock(ide_intr, hwgroup); 950 951 /* caller must own ide_lock */ 952 BUG_ON(!irqs_disabled()); 953 954 while (!hwgroup->busy) { 955 hwgroup->busy = 1; 956 drive = choose_drive(hwgroup); 957 if (drive == NULL) { 958 int sleeping = 0; 959 unsigned long sleep = 0; /* shut up, gcc */ 960 hwgroup->rq = NULL; 961 drive = hwgroup->drive; 962 do { 963 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) && 964 (sleeping == 0 || 965 time_before(drive->sleep, sleep))) { 966 sleeping = 1; 967 sleep = drive->sleep; 968 } 969 } while ((drive = drive->next) != hwgroup->drive); 970 if (sleeping) { 971 /* 972 * Take a short snooze, and then wake up this hwgroup again. 973 * This gives other hwgroups on the same a chance to 974 * play fairly with us, just in case there are big differences 975 * in relative throughputs.. don't want to hog the cpu too much. 976 */ 977 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) 978 sleep = jiffies + WAIT_MIN_SLEEP; 979#if 1 980 if (timer_pending(&hwgroup->timer)) 981 printk(KERN_CRIT "ide_set_handler: timer already active\n"); 982#endif 983 /* so that ide_timer_expiry knows what to do */ 984 hwgroup->sleeping = 1; 985 hwgroup->req_gen_timer = hwgroup->req_gen; 986 mod_timer(&hwgroup->timer, sleep); 987 /* we purposely leave hwgroup->busy==1 988 * while sleeping */ 989 } else { 990 /* Ugly, but how can we sleep for the lock 991 * otherwise? perhaps from tq_disk? 992 */ 993 994 /* for atari only */ 995 ide_release_lock(); 996 hwgroup->busy = 0; 997 } 998 999 /* no more work for this hwgroup (for now) */ 1000 return; 1001 } 1002 again: 1003 hwif = HWIF(drive); 1004 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { 1005 /* 1006 * set nIEN for previous hwif, drives in the 1007 * quirk_list may not like intr setups/cleanups 1008 */ 1009 if (drive->quirk_list != 1) 1010 hwif->tp_ops->set_irq(hwif, 0); 1011 } 1012 hwgroup->hwif = hwif; 1013 hwgroup->drive = drive; 1014 drive->dev_flags &= ~IDE_DFLAG_SLEEPING; 1015 drive->service_start = jiffies; 1016 1017 if (blk_queue_plugged(drive->queue)) { 1018 printk(KERN_ERR "ide: huh? queue was plugged!\n"); 1019 break; 1020 } 1021 1022 /* 1023 * we know that the queue isn't empty, but this can happen 1024 * if the q->prep_rq_fn() decides to kill a request 1025 */ 1026 rq = elv_next_request(drive->queue); 1027 if (!rq) { 1028 hwgroup->busy = 0; 1029 break; 1030 } 1031 1032 /* 1033 * Sanity: don't accept a request that isn't a PM request 1034 * if we are currently power managed. This is very important as 1035 * blk_stop_queue() doesn't prevent the elv_next_request() 1036 * above to return us whatever is in the queue. Since we call 1037 * ide_do_request() ourselves, we end up taking requests while 1038 * the queue is blocked... 1039 * 1040 * We let requests forced at head of queue with ide-preempt 1041 * though. I hope that doesn't happen too much, hopefully not 1042 * unless the subdriver triggers such a thing in its own PM 1043 * state machine. 1044 * 1045 * We count how many times we loop here to make sure we service 1046 * all drives in the hwgroup without looping for ever 1047 */ 1048 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 1049 blk_pm_request(rq) == 0 && 1050 (rq->cmd_flags & REQ_PREEMPT) == 0) { 1051 drive = drive->next ? drive->next : hwgroup->drive; 1052 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1053 goto again; 1054 /* We clear busy, there should be no pending ATA command at this point. */ 1055 hwgroup->busy = 0; 1056 break; 1057 } 1058 1059 hwgroup->rq = rq; 1060 1061 /* 1062 * Some systems have trouble with IDE IRQs arriving while 1063 * the driver is still setting things up. So, here we disable 1064 * the IRQ used by this interface while the request is being started. 1065 * This may look bad at first, but pretty much the same thing 1066 * happens anyway when any interrupt comes in, IDE or otherwise 1067 * -- the kernel masks the IRQ while it is being handled. 1068 */ 1069 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1070 disable_irq_nosync(hwif->irq); 1071 spin_unlock(&ide_lock); 1072 local_irq_enable_in_hardirq(); 1073 /* allow other IRQs while we start this request */ 1074 startstop = start_request(drive, rq); 1075 spin_lock_irq(&ide_lock); 1076 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) 1077 enable_irq(hwif->irq); 1078 if (startstop == ide_stopped) 1079 hwgroup->busy = 0; 1080 } 1081} 1082 1083/* 1084 * Passes the stuff to ide_do_request 1085 */ 1086void do_ide_request(struct request_queue *q) 1087{ 1088 ide_drive_t *drive = q->queuedata; 1089 1090 ide_do_request(HWGROUP(drive), IDE_NO_IRQ); 1091} 1092 1093/* 1094 * un-busy the hwgroup etc, and clear any pending DMA status. we want to 1095 * retry the current request in pio mode instead of risking tossing it 1096 * all away 1097 */ 1098static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 1099{ 1100 ide_hwif_t *hwif = HWIF(drive); 1101 struct request *rq; 1102 ide_startstop_t ret = ide_stopped; 1103 1104 /* 1105 * end current dma transaction 1106 */ 1107 1108 if (error < 0) { 1109 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1110 (void)hwif->dma_ops->dma_end(drive); 1111 ret = ide_error(drive, "dma timeout error", 1112 hwif->tp_ops->read_status(hwif)); 1113 } else { 1114 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1115 hwif->dma_ops->dma_timeout(drive); 1116 } 1117 1118 /* 1119 * disable dma for now, but remember that we did so because of 1120 * a timeout -- we'll reenable after we finish this next request 1121 * (or rather the first chunk of it) in pio. 1122 */ 1123 drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; 1124 drive->retry_pio++; 1125 ide_dma_off_quietly(drive); 1126 1127 /* 1128 * un-busy drive etc (hwgroup->busy is cleared on return) and 1129 * make sure request is sane 1130 */ 1131 rq = HWGROUP(drive)->rq; 1132 1133 if (!rq) 1134 goto out; 1135 1136 HWGROUP(drive)->rq = NULL; 1137 1138 rq->errors = 0; 1139 1140 if (!rq->bio) 1141 goto out; 1142 1143 rq->sector = rq->bio->bi_sector; 1144 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 1145 rq->hard_cur_sectors = rq->current_nr_sectors; 1146 rq->buffer = bio_data(rq->bio); 1147out: 1148 return ret; 1149} 1150 1151/** 1152 * ide_timer_expiry - handle lack of an IDE interrupt 1153 * @data: timer callback magic (hwgroup) 1154 * 1155 * An IDE command has timed out before the expected drive return 1156 * occurred. At this point we attempt to clean up the current 1157 * mess. If the current handler includes an expiry handler then 1158 * we invoke the expiry handler, and providing it is happy the 1159 * work is done. If that fails we apply generic recovery rules 1160 * invoking the handler and checking the drive DMA status. We 1161 * have an excessively incestuous relationship with the DMA 1162 * logic that wants cleaning up. 1163 */ 1164 1165void ide_timer_expiry (unsigned long data) 1166{ 1167 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; 1168 ide_handler_t *handler; 1169 ide_expiry_t *expiry; 1170 unsigned long flags; 1171 unsigned long wait = -1; 1172 1173 spin_lock_irqsave(&ide_lock, flags); 1174 1175 if (((handler = hwgroup->handler) == NULL) || 1176 (hwgroup->req_gen != hwgroup->req_gen_timer)) { 1177 /* 1178 * Either a marginal timeout occurred 1179 * (got the interrupt just as timer expired), 1180 * or we were "sleeping" to give other devices a chance. 1181 * Either way, we don't really want to complain about anything. 1182 */ 1183 if (hwgroup->sleeping) { 1184 hwgroup->sleeping = 0; 1185 hwgroup->busy = 0; 1186 } 1187 } else { 1188 ide_drive_t *drive = hwgroup->drive; 1189 if (!drive) { 1190 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); 1191 hwgroup->handler = NULL; 1192 } else { 1193 ide_hwif_t *hwif; 1194 ide_startstop_t startstop = ide_stopped; 1195 if (!hwgroup->busy) { 1196 hwgroup->busy = 1; /* paranoia */ 1197 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); 1198 } 1199 if ((expiry = hwgroup->expiry) != NULL) { 1200 /* continue */ 1201 if ((wait = expiry(drive)) > 0) { 1202 /* reset timer */ 1203 hwgroup->timer.expires = jiffies + wait; 1204 hwgroup->req_gen_timer = hwgroup->req_gen; 1205 add_timer(&hwgroup->timer); 1206 spin_unlock_irqrestore(&ide_lock, flags); 1207 return; 1208 } 1209 } 1210 hwgroup->handler = NULL; 1211 /* 1212 * We need to simulate a real interrupt when invoking 1213 * the handler() function, which means we need to 1214 * globally mask the specific IRQ: 1215 */ 1216 spin_unlock(&ide_lock); 1217 hwif = HWIF(drive); 1218 /* disable_irq_nosync ?? */ 1219 disable_irq(hwif->irq); 1220 /* local CPU only, 1221 * as if we were handling an interrupt */ 1222 local_irq_disable(); 1223 if (hwgroup->polling) { 1224 startstop = handler(drive); 1225 } else if (drive_is_ready(drive)) { 1226 if (drive->waiting_for_dma) 1227 hwif->dma_ops->dma_lost_irq(drive); 1228 (void)ide_ack_intr(hwif); 1229 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1230 startstop = handler(drive); 1231 } else { 1232 if (drive->waiting_for_dma) { 1233 startstop = ide_dma_timeout_retry(drive, wait); 1234 } else 1235 startstop = 1236 ide_error(drive, "irq timeout", 1237 hwif->tp_ops->read_status(hwif)); 1238 } 1239 drive->service_time = jiffies - drive->service_start; 1240 spin_lock_irq(&ide_lock); 1241 enable_irq(hwif->irq); 1242 if (startstop == ide_stopped) 1243 hwgroup->busy = 0; 1244 } 1245 } 1246 ide_do_request(hwgroup, IDE_NO_IRQ); 1247 spin_unlock_irqrestore(&ide_lock, flags); 1248} 1249 1250/** 1251 * unexpected_intr - handle an unexpected IDE interrupt 1252 * @irq: interrupt line 1253 * @hwgroup: hwgroup being processed 1254 * 1255 * There's nothing really useful we can do with an unexpected interrupt, 1256 * other than reading the status register (to clear it), and logging it. 1257 * There should be no way that an irq can happen before we're ready for it, 1258 * so we needn't worry much about losing an "important" interrupt here. 1259 * 1260 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 1261 * the drive enters "idle", "standby", or "sleep" mode, so if the status 1262 * looks "good", we just ignore the interrupt completely. 1263 * 1264 * This routine assumes __cli() is in effect when called. 1265 * 1266 * If an unexpected interrupt happens on irq15 while we are handling irq14 1267 * and if the two interfaces are "serialized" (CMD640), then it looks like 1268 * we could screw up by interfering with a new request being set up for 1269 * irq15. 1270 * 1271 * In reality, this is a non-issue. The new command is not sent unless 1272 * the drive is ready to accept one, in which case we know the drive is 1273 * not trying to interrupt us. And ide_set_handler() is always invoked 1274 * before completing the issuance of any new drive command, so we will not 1275 * be accidentally invoked as a result of any valid command completion 1276 * interrupt. 1277 * 1278 * Note that we must walk the entire hwgroup here. We know which hwif 1279 * is doing the current command, but we don't know which hwif burped 1280 * mysteriously. 1281 */ 1282 1283static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) 1284{ 1285 u8 stat; 1286 ide_hwif_t *hwif = hwgroup->hwif; 1287 1288 /* 1289 * handle the unexpected interrupt 1290 */ 1291 do { 1292 if (hwif->irq == irq) { 1293 stat = hwif->tp_ops->read_status(hwif); 1294 1295 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 1296 /* Try to not flood the console with msgs */ 1297 static unsigned long last_msgtime, count; 1298 ++count; 1299 if (time_after(jiffies, last_msgtime + HZ)) { 1300 last_msgtime = jiffies; 1301 printk(KERN_ERR "%s%s: unexpected interrupt, " 1302 "status=0x%02x, count=%ld\n", 1303 hwif->name, 1304 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count); 1305 } 1306 } 1307 } 1308 } while ((hwif = hwif->next) != hwgroup->hwif); 1309} 1310 1311/** 1312 * ide_intr - default IDE interrupt handler 1313 * @irq: interrupt number 1314 * @dev_id: hwif group 1315 * @regs: unused weirdness from the kernel irq layer 1316 * 1317 * This is the default IRQ handler for the IDE layer. You should 1318 * not need to override it. If you do be aware it is subtle in 1319 * places 1320 * 1321 * hwgroup->hwif is the interface in the group currently performing 1322 * a command. hwgroup->drive is the drive and hwgroup->handler is 1323 * the IRQ handler to call. As we issue a command the handlers 1324 * step through multiple states, reassigning the handler to the 1325 * next step in the process. Unlike a smart SCSI controller IDE 1326 * expects the main processor to sequence the various transfer 1327 * stages. We also manage a poll timer to catch up with most 1328 * timeout situations. There are still a few where the handlers 1329 * don't ever decide to give up. 1330 * 1331 * The handler eventually returns ide_stopped to indicate the 1332 * request completed. At this point we issue the next request 1333 * on the hwgroup and the process begins again. 1334 */ 1335 1336irqreturn_t ide_intr (int irq, void *dev_id) 1337{ 1338 unsigned long flags; 1339 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; 1340 ide_hwif_t *hwif; 1341 ide_drive_t *drive; 1342 ide_handler_t *handler; 1343 ide_startstop_t startstop; 1344 1345 spin_lock_irqsave(&ide_lock, flags); 1346 hwif = hwgroup->hwif; 1347 1348 if (!ide_ack_intr(hwif)) { 1349 spin_unlock_irqrestore(&ide_lock, flags); 1350 return IRQ_NONE; 1351 } 1352 1353 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { 1354 /* 1355 * Not expecting an interrupt from this drive. 1356 * That means this could be: 1357 * (1) an interrupt from another PCI device 1358 * sharing the same PCI INT# as us. 1359 * or (2) a drive just entered sleep or standby mode, 1360 * and is interrupting to let us know. 1361 * or (3) a spurious interrupt of unknown origin. 1362 * 1363 * For PCI, we cannot tell the difference, 1364 * so in that case we just ignore it and hope it goes away. 1365 * 1366 * FIXME: unexpected_intr should be hwif-> then we can 1367 * remove all the ifdef PCI crap 1368 */ 1369#ifdef CONFIG_BLK_DEV_IDEPCI 1370 if (hwif->chipset != ide_pci) 1371#endif /* CONFIG_BLK_DEV_IDEPCI */ 1372 { 1373 /* 1374 * Probably not a shared PCI interrupt, 1375 * so we can safely try to do something about it: 1376 */ 1377 unexpected_intr(irq, hwgroup); 1378#ifdef CONFIG_BLK_DEV_IDEPCI 1379 } else { 1380 /* 1381 * Whack the status register, just in case 1382 * we have a leftover pending IRQ. 1383 */ 1384 (void)hwif->tp_ops->read_status(hwif); 1385#endif /* CONFIG_BLK_DEV_IDEPCI */ 1386 } 1387 spin_unlock_irqrestore(&ide_lock, flags); 1388 return IRQ_NONE; 1389 } 1390 drive = hwgroup->drive; 1391 if (!drive) { 1392 /* 1393 * This should NEVER happen, and there isn't much 1394 * we could do about it here. 1395 * 1396 * [Note - this can occur if the drive is hot unplugged] 1397 */ 1398 spin_unlock_irqrestore(&ide_lock, flags); 1399 return IRQ_HANDLED; 1400 } 1401 if (!drive_is_ready(drive)) { 1402 /* 1403 * This happens regularly when we share a PCI IRQ with 1404 * another device. Unfortunately, it can also happen 1405 * with some buggy drives that trigger the IRQ before 1406 * their status register is up to date. Hopefully we have 1407 * enough advance overhead that the latter isn't a problem. 1408 */ 1409 spin_unlock_irqrestore(&ide_lock, flags); 1410 return IRQ_NONE; 1411 } 1412 if (!hwgroup->busy) { 1413 hwgroup->busy = 1; /* paranoia */ 1414 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); 1415 } 1416 hwgroup->handler = NULL; 1417 hwgroup->req_gen++; 1418 del_timer(&hwgroup->timer); 1419 spin_unlock(&ide_lock); 1420 1421 if (hwif->port_ops && hwif->port_ops->clear_irq) 1422 hwif->port_ops->clear_irq(drive); 1423 1424 if (drive->dev_flags & IDE_DFLAG_UNMASK) 1425 local_irq_enable_in_hardirq(); 1426 1427 /* service this interrupt, may set handler for next interrupt */ 1428 startstop = handler(drive); 1429 1430 spin_lock_irq(&ide_lock); 1431 /* 1432 * Note that handler() may have set things up for another 1433 * interrupt to occur soon, but it cannot happen until 1434 * we exit from this routine, because it will be the 1435 * same irq as is currently being serviced here, and Linux 1436 * won't allow another of the same (on any CPU) until we return. 1437 */ 1438 drive->service_time = jiffies - drive->service_start; 1439 if (startstop == ide_stopped) { 1440 if (hwgroup->handler == NULL) { /* paranoia */ 1441 hwgroup->busy = 0; 1442 ide_do_request(hwgroup, hwif->irq); 1443 } else { 1444 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " 1445 "on exit\n", drive->name); 1446 } 1447 } 1448 spin_unlock_irqrestore(&ide_lock, flags); 1449 return IRQ_HANDLED; 1450} 1451 1452/** 1453 * ide_do_drive_cmd - issue IDE special command 1454 * @drive: device to issue command 1455 * @rq: request to issue 1456 * 1457 * This function issues a special IDE device request 1458 * onto the request queue. 1459 * 1460 * the rq is queued at the head of the request queue, displacing 1461 * the currently-being-processed request and this function 1462 * returns immediately without waiting for the new rq to be 1463 * completed. This is VERY DANGEROUS, and is intended for 1464 * careful use by the ATAPI tape/cdrom driver code. 1465 */ 1466 1467void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 1468{ 1469 unsigned long flags; 1470 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1471 1472 spin_lock_irqsave(&ide_lock, flags); 1473 hwgroup->rq = NULL; 1474 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 1); 1475 __generic_unplug_device(drive->queue); 1476 spin_unlock_irqrestore(&ide_lock, flags); 1477} 1478 1479EXPORT_SYMBOL(ide_do_drive_cmd); 1480 1481void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1482{ 1483 ide_hwif_t *hwif = drive->hwif; 1484 ide_task_t task; 1485 1486 memset(&task, 0, sizeof(task)); 1487 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 1488 IDE_TFLAG_OUT_FEATURE | tf_flags; 1489 task.tf.feature = dma; /* Use PIO/DMA */ 1490 task.tf.lbam = bcount & 0xff; 1491 task.tf.lbah = (bcount >> 8) & 0xff; 1492 1493 ide_tf_dump(drive->name, &task.tf); 1494 hwif->tp_ops->set_irq(hwif, 1); 1495 SELECT_MASK(drive, 0); 1496 hwif->tp_ops->tf_load(drive, &task); 1497} 1498 1499EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1500 1501void ide_pad_transfer(ide_drive_t *drive, int write, int len) 1502{ 1503 ide_hwif_t *hwif = drive->hwif; 1504 u8 buf[4] = { 0 }; 1505 1506 while (len > 0) { 1507 if (write) 1508 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); 1509 else 1510 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); 1511 len -= 4; 1512 } 1513} 1514EXPORT_SYMBOL_GPL(ide_pad_transfer); 1515