ide-io.c revision e3b29f05124b07303088795396ff858811d2acb8
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50#include <linux/bitops.h> 51 52#include <asm/byteorder.h> 53#include <asm/irq.h> 54#include <asm/uaccess.h> 55#include <asm/io.h> 56 57int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, 58 unsigned int nr_bytes) 59{ 60 /* 61 * decide whether to reenable DMA -- 3 is a random magic for now, 62 * if we DMA timeout more than 3 times, just stay in PIO 63 */ 64 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) && 65 drive->retry_pio <= 3) { 66 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY; 67 ide_dma_on(drive); 68 } 69 70 return blk_end_request(rq, error, nr_bytes); 71} 72EXPORT_SYMBOL_GPL(ide_end_rq); 73 74void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) 75{ 76 const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops; 77 struct ide_taskfile *tf = &cmd->tf; 78 struct request *rq = cmd->rq; 79 u8 tf_cmd = tf->command; 80 81 tf->error = err; 82 tf->status = stat; 83 84 if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) { 85 u8 data[2]; 86 87 tp_ops->input_data(drive, cmd, data, 2); 88 89 cmd->tf.data = data[0]; 90 cmd->hob.data = data[1]; 91 } 92 93 ide_tf_readback(drive, cmd); 94 95 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && 96 tf_cmd == ATA_CMD_IDLEIMMEDIATE) { 97 if (tf->lbal != 0xc4) { 98 printk(KERN_ERR "%s: head unload failed!\n", 99 drive->name); 100 ide_tf_dump(drive->name, cmd); 101 } else 102 drive->dev_flags |= IDE_DFLAG_PARKED; 103 } 104 105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 106 struct ide_cmd *orig_cmd = rq->special; 107 108 if (cmd->tf_flags & IDE_TFLAG_DYN) 109 kfree(orig_cmd); 110 else 111 memcpy(orig_cmd, cmd, sizeof(*cmd)); 112 } 113} 114 115/* obsolete, blk_rq_bytes() should be used instead */ 116unsigned int ide_rq_bytes(struct request *rq) 117{ 118 if (blk_pc_request(rq)) 119 return rq->data_len; 120 else 121 return rq->hard_cur_sectors << 9; 122} 123EXPORT_SYMBOL_GPL(ide_rq_bytes); 124 125int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) 126{ 127 ide_hwif_t *hwif = drive->hwif; 128 struct request *rq = hwif->rq; 129 int rc; 130 131 /* 132 * if failfast is set on a request, override number of sectors 133 * and complete the whole request right now 134 */ 135 if (blk_noretry_request(rq) && error <= 0) 136 nr_bytes = rq->hard_nr_sectors << 9; 137 138 rc = ide_end_rq(drive, rq, error, nr_bytes); 139 if (rc == 0) 140 hwif->rq = NULL; 141 142 return rc; 143} 144EXPORT_SYMBOL(ide_complete_rq); 145 146void ide_kill_rq(ide_drive_t *drive, struct request *rq) 147{ 148 u8 drv_req = blk_special_request(rq) && rq->rq_disk; 149 u8 media = drive->media; 150 151 drive->failed_pc = NULL; 152 153 if ((media == ide_floppy || media == ide_tape) && drv_req) { 154 rq->errors = 0; 155 ide_complete_rq(drive, 0, blk_rq_bytes(rq)); 156 } else { 157 if (media == ide_tape) 158 rq->errors = IDE_DRV_ERROR_GENERAL; 159 else if (blk_fs_request(rq) == 0 && rq->errors == 0) 160 rq->errors = -EIO; 161 ide_complete_rq(drive, -EIO, ide_rq_bytes(rq)); 162 } 163} 164 165static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 166{ 167 tf->nsect = drive->sect; 168 tf->lbal = drive->sect; 169 tf->lbam = drive->cyl; 170 tf->lbah = drive->cyl >> 8; 171 tf->device = (drive->head - 1) | drive->select; 172 tf->command = ATA_CMD_INIT_DEV_PARAMS; 173} 174 175static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 176{ 177 tf->nsect = drive->sect; 178 tf->command = ATA_CMD_RESTORE; 179} 180 181static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 182{ 183 tf->nsect = drive->mult_req; 184 tf->command = ATA_CMD_SET_MULTI; 185} 186 187static ide_startstop_t ide_disk_special(ide_drive_t *drive) 188{ 189 special_t *s = &drive->special; 190 struct ide_cmd cmd; 191 192 memset(&cmd, 0, sizeof(cmd)); 193 cmd.protocol = ATA_PROT_NODATA; 194 195 if (s->b.set_geometry) { 196 s->b.set_geometry = 0; 197 ide_tf_set_specify_cmd(drive, &cmd.tf); 198 } else if (s->b.recalibrate) { 199 s->b.recalibrate = 0; 200 ide_tf_set_restore_cmd(drive, &cmd.tf); 201 } else if (s->b.set_multmode) { 202 s->b.set_multmode = 0; 203 ide_tf_set_setmult_cmd(drive, &cmd.tf); 204 } else if (s->all) { 205 int special = s->all; 206 s->all = 0; 207 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 208 return ide_stopped; 209 } 210 211 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 212 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 213 cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER; 214 215 do_rw_taskfile(drive, &cmd); 216 217 return ide_started; 218} 219 220/** 221 * do_special - issue some special commands 222 * @drive: drive the command is for 223 * 224 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, 225 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. 226 * 227 * It used to do much more, but has been scaled back. 228 */ 229 230static ide_startstop_t do_special (ide_drive_t *drive) 231{ 232 special_t *s = &drive->special; 233 234#ifdef DEBUG 235 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 236#endif 237 if (drive->media == ide_disk) 238 return ide_disk_special(drive); 239 240 s->all = 0; 241 drive->mult_req = 0; 242 return ide_stopped; 243} 244 245void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) 246{ 247 ide_hwif_t *hwif = drive->hwif; 248 struct scatterlist *sg = hwif->sg_table; 249 struct request *rq = cmd->rq; 250 251 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 252 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 253 cmd->sg_nents = 1; 254 } else if (!rq->bio) { 255 sg_init_one(sg, rq->data, rq->data_len); 256 cmd->sg_nents = 1; 257 } else 258 cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 259} 260EXPORT_SYMBOL_GPL(ide_map_sg); 261 262void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes) 263{ 264 cmd->nbytes = cmd->nleft = nr_bytes; 265 cmd->cursg_ofs = 0; 266 cmd->cursg = NULL; 267} 268EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 269 270/** 271 * execute_drive_command - issue special drive command 272 * @drive: the drive to issue the command on 273 * @rq: the request structure holding the command 274 * 275 * execute_drive_cmd() issues a special drive command, usually 276 * initiated by ioctl() from the external hdparm program. The 277 * command can be a drive command, drive task or taskfile 278 * operation. Weirdly you can call it with NULL to wait for 279 * all commands to finish. Don't do this as that is due to change 280 */ 281 282static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 283 struct request *rq) 284{ 285 struct ide_cmd *cmd = rq->special; 286 287 if (cmd) { 288 if (cmd->protocol == ATA_PROT_PIO) { 289 ide_init_sg_cmd(cmd, rq->nr_sectors << 9); 290 ide_map_sg(drive, cmd); 291 } 292 293 return do_rw_taskfile(drive, cmd); 294 } 295 296 /* 297 * NULL is actually a valid way of waiting for 298 * all current requests to be flushed from the queue. 299 */ 300#ifdef DEBUG 301 printk("%s: DRIVE_CMD (null)\n", drive->name); 302#endif 303 rq->errors = 0; 304 ide_complete_rq(drive, 0, blk_rq_bytes(rq)); 305 306 return ide_stopped; 307} 308 309static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 310{ 311 u8 cmd = rq->cmd[0]; 312 313 switch (cmd) { 314 case REQ_PARK_HEADS: 315 case REQ_UNPARK_HEADS: 316 return ide_do_park_unpark(drive, rq); 317 case REQ_DEVSET_EXEC: 318 return ide_do_devset(drive, rq); 319 case REQ_DRIVE_RESET: 320 return ide_do_reset(drive); 321 default: 322 BUG(); 323 } 324} 325 326/** 327 * start_request - start of I/O and command issuing for IDE 328 * 329 * start_request() initiates handling of a new I/O request. It 330 * accepts commands and I/O (read/write) requests. 331 * 332 * FIXME: this function needs a rename 333 */ 334 335static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 336{ 337 ide_startstop_t startstop; 338 339 BUG_ON(!blk_rq_started(rq)); 340 341#ifdef DEBUG 342 printk("%s: start_request: current=0x%08lx\n", 343 drive->hwif->name, (unsigned long) rq); 344#endif 345 346 /* bail early if we've exceeded max_failures */ 347 if (drive->max_failures && (drive->failures > drive->max_failures)) { 348 rq->cmd_flags |= REQ_FAILED; 349 goto kill_rq; 350 } 351 352 if (blk_pm_request(rq)) 353 ide_check_pm_state(drive, rq); 354 355 drive->hwif->tp_ops->dev_select(drive); 356 if (ide_wait_stat(&startstop, drive, drive->ready_stat, 357 ATA_BUSY | ATA_DRQ, WAIT_READY)) { 358 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 359 return startstop; 360 } 361 if (!drive->special.all) { 362 struct ide_driver *drv; 363 364 /* 365 * We reset the drive so we need to issue a SETFEATURES. 366 * Do it _after_ do_special() restored device parameters. 367 */ 368 if (drive->current_speed == 0xff) 369 ide_config_drive_speed(drive, drive->desired_speed); 370 371 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 372 return execute_drive_cmd(drive, rq); 373 else if (blk_pm_request(rq)) { 374 struct request_pm_state *pm = rq->data; 375#ifdef DEBUG_PM 376 printk("%s: start_power_step(step: %d)\n", 377 drive->name, pm->pm_step); 378#endif 379 startstop = ide_start_power_step(drive, rq); 380 if (startstop == ide_stopped && 381 pm->pm_step == IDE_PM_COMPLETED) 382 ide_complete_pm_rq(drive, rq); 383 return startstop; 384 } else if (!rq->rq_disk && blk_special_request(rq)) 385 /* 386 * TODO: Once all ULDs have been modified to 387 * check for specific op codes rather than 388 * blindly accepting any special request, the 389 * check for ->rq_disk above may be replaced 390 * by a more suitable mechanism or even 391 * dropped entirely. 392 */ 393 return ide_special_rq(drive, rq); 394 395 drv = *(struct ide_driver **)rq->rq_disk->private_data; 396 397 return drv->do_request(drive, rq, rq->sector); 398 } 399 return do_special(drive); 400kill_rq: 401 ide_kill_rq(drive, rq); 402 return ide_stopped; 403} 404 405/** 406 * ide_stall_queue - pause an IDE device 407 * @drive: drive to stall 408 * @timeout: time to stall for (jiffies) 409 * 410 * ide_stall_queue() can be used by a drive to give excess bandwidth back 411 * to the port by sleeping for timeout jiffies. 412 */ 413 414void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 415{ 416 if (timeout > WAIT_WORSTCASE) 417 timeout = WAIT_WORSTCASE; 418 drive->sleep = timeout + jiffies; 419 drive->dev_flags |= IDE_DFLAG_SLEEPING; 420} 421EXPORT_SYMBOL(ide_stall_queue); 422 423static inline int ide_lock_port(ide_hwif_t *hwif) 424{ 425 if (hwif->busy) 426 return 1; 427 428 hwif->busy = 1; 429 430 return 0; 431} 432 433static inline void ide_unlock_port(ide_hwif_t *hwif) 434{ 435 hwif->busy = 0; 436} 437 438static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) 439{ 440 int rc = 0; 441 442 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 443 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); 444 if (rc == 0) { 445 if (host->get_lock) 446 host->get_lock(ide_intr, hwif); 447 } 448 } 449 return rc; 450} 451 452static inline void ide_unlock_host(struct ide_host *host) 453{ 454 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 455 if (host->release_lock) 456 host->release_lock(); 457 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); 458 } 459} 460 461/* 462 * Issue a new request to a device. 463 */ 464void do_ide_request(struct request_queue *q) 465{ 466 ide_drive_t *drive = q->queuedata; 467 ide_hwif_t *hwif = drive->hwif; 468 struct ide_host *host = hwif->host; 469 struct request *rq = NULL; 470 ide_startstop_t startstop; 471 472 /* 473 * drive is doing pre-flush, ordered write, post-flush sequence. even 474 * though that is 3 requests, it must be seen as a single transaction. 475 * we must not preempt this drive until that is complete 476 */ 477 if (blk_queue_flushing(q)) 478 /* 479 * small race where queue could get replugged during 480 * the 3-request flush cycle, just yank the plug since 481 * we want it to finish asap 482 */ 483 blk_remove_plug(q); 484 485 spin_unlock_irq(q->queue_lock); 486 487 if (ide_lock_host(host, hwif)) 488 goto plug_device_2; 489 490 spin_lock_irq(&hwif->lock); 491 492 if (!ide_lock_port(hwif)) { 493 ide_hwif_t *prev_port; 494repeat: 495 prev_port = hwif->host->cur_port; 496 hwif->rq = NULL; 497 498 if (drive->dev_flags & IDE_DFLAG_SLEEPING && 499 time_after(drive->sleep, jiffies)) { 500 ide_unlock_port(hwif); 501 goto plug_device; 502 } 503 504 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && 505 hwif != prev_port) { 506 /* 507 * set nIEN for previous port, drives in the 508 * quirk_list may not like intr setups/cleanups 509 */ 510 if (prev_port && prev_port->cur_dev->quirk_list == 0) 511 prev_port->tp_ops->write_devctl(prev_port, 512 ATA_NIEN | 513 ATA_DEVCTL_OBS); 514 515 hwif->host->cur_port = hwif; 516 } 517 hwif->cur_dev = drive; 518 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 519 520 spin_unlock_irq(&hwif->lock); 521 spin_lock_irq(q->queue_lock); 522 /* 523 * we know that the queue isn't empty, but this can happen 524 * if the q->prep_rq_fn() decides to kill a request 525 */ 526 rq = elv_next_request(drive->queue); 527 spin_unlock_irq(q->queue_lock); 528 spin_lock_irq(&hwif->lock); 529 530 if (!rq) { 531 ide_unlock_port(hwif); 532 goto out; 533 } 534 535 /* 536 * Sanity: don't accept a request that isn't a PM request 537 * if we are currently power managed. This is very important as 538 * blk_stop_queue() doesn't prevent the elv_next_request() 539 * above to return us whatever is in the queue. Since we call 540 * ide_do_request() ourselves, we end up taking requests while 541 * the queue is blocked... 542 * 543 * We let requests forced at head of queue with ide-preempt 544 * though. I hope that doesn't happen too much, hopefully not 545 * unless the subdriver triggers such a thing in its own PM 546 * state machine. 547 */ 548 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 549 blk_pm_request(rq) == 0 && 550 (rq->cmd_flags & REQ_PREEMPT) == 0) { 551 /* there should be no pending command at this point */ 552 ide_unlock_port(hwif); 553 goto plug_device; 554 } 555 556 hwif->rq = rq; 557 558 spin_unlock_irq(&hwif->lock); 559 startstop = start_request(drive, rq); 560 spin_lock_irq(&hwif->lock); 561 562 if (startstop == ide_stopped) 563 goto repeat; 564 } else 565 goto plug_device; 566out: 567 spin_unlock_irq(&hwif->lock); 568 if (rq == NULL) 569 ide_unlock_host(host); 570 spin_lock_irq(q->queue_lock); 571 return; 572 573plug_device: 574 spin_unlock_irq(&hwif->lock); 575 ide_unlock_host(host); 576plug_device_2: 577 spin_lock_irq(q->queue_lock); 578 579 if (!elv_queue_empty(q)) 580 blk_plug_device(q); 581} 582 583static void ide_plug_device(ide_drive_t *drive) 584{ 585 struct request_queue *q = drive->queue; 586 unsigned long flags; 587 588 spin_lock_irqsave(q->queue_lock, flags); 589 if (!elv_queue_empty(q)) 590 blk_plug_device(q); 591 spin_unlock_irqrestore(q->queue_lock, flags); 592} 593 594static int drive_is_ready(ide_drive_t *drive) 595{ 596 ide_hwif_t *hwif = drive->hwif; 597 u8 stat = 0; 598 599 if (drive->waiting_for_dma) 600 return hwif->dma_ops->dma_test_irq(drive); 601 602 if (hwif->io_ports.ctl_addr && 603 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) 604 stat = hwif->tp_ops->read_altstatus(hwif); 605 else 606 /* Note: this may clear a pending IRQ!! */ 607 stat = hwif->tp_ops->read_status(hwif); 608 609 if (stat & ATA_BUSY) 610 /* drive busy: definitely not interrupting */ 611 return 0; 612 613 /* drive ready: *might* be interrupting */ 614 return 1; 615} 616 617/** 618 * ide_timer_expiry - handle lack of an IDE interrupt 619 * @data: timer callback magic (hwif) 620 * 621 * An IDE command has timed out before the expected drive return 622 * occurred. At this point we attempt to clean up the current 623 * mess. If the current handler includes an expiry handler then 624 * we invoke the expiry handler, and providing it is happy the 625 * work is done. If that fails we apply generic recovery rules 626 * invoking the handler and checking the drive DMA status. We 627 * have an excessively incestuous relationship with the DMA 628 * logic that wants cleaning up. 629 */ 630 631void ide_timer_expiry (unsigned long data) 632{ 633 ide_hwif_t *hwif = (ide_hwif_t *)data; 634 ide_drive_t *uninitialized_var(drive); 635 ide_handler_t *handler; 636 unsigned long flags; 637 int wait = -1; 638 int plug_device = 0; 639 640 spin_lock_irqsave(&hwif->lock, flags); 641 642 handler = hwif->handler; 643 644 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) { 645 /* 646 * Either a marginal timeout occurred 647 * (got the interrupt just as timer expired), 648 * or we were "sleeping" to give other devices a chance. 649 * Either way, we don't really want to complain about anything. 650 */ 651 } else { 652 ide_expiry_t *expiry = hwif->expiry; 653 ide_startstop_t startstop = ide_stopped; 654 655 drive = hwif->cur_dev; 656 657 if (expiry) { 658 wait = expiry(drive); 659 if (wait > 0) { /* continue */ 660 /* reset timer */ 661 hwif->timer.expires = jiffies + wait; 662 hwif->req_gen_timer = hwif->req_gen; 663 add_timer(&hwif->timer); 664 spin_unlock_irqrestore(&hwif->lock, flags); 665 return; 666 } 667 } 668 hwif->handler = NULL; 669 hwif->expiry = NULL; 670 /* 671 * We need to simulate a real interrupt when invoking 672 * the handler() function, which means we need to 673 * globally mask the specific IRQ: 674 */ 675 spin_unlock(&hwif->lock); 676 /* disable_irq_nosync ?? */ 677 disable_irq(hwif->irq); 678 /* local CPU only, as if we were handling an interrupt */ 679 local_irq_disable(); 680 if (hwif->polling) { 681 startstop = handler(drive); 682 } else if (drive_is_ready(drive)) { 683 if (drive->waiting_for_dma) 684 hwif->dma_ops->dma_lost_irq(drive); 685 if (hwif->ack_intr) 686 hwif->ack_intr(hwif); 687 printk(KERN_WARNING "%s: lost interrupt\n", 688 drive->name); 689 startstop = handler(drive); 690 } else { 691 if (drive->waiting_for_dma) 692 startstop = ide_dma_timeout_retry(drive, wait); 693 else 694 startstop = ide_error(drive, "irq timeout", 695 hwif->tp_ops->read_status(hwif)); 696 } 697 spin_lock_irq(&hwif->lock); 698 enable_irq(hwif->irq); 699 if (startstop == ide_stopped && hwif->polling == 0) { 700 ide_unlock_port(hwif); 701 plug_device = 1; 702 } 703 } 704 spin_unlock_irqrestore(&hwif->lock, flags); 705 706 if (plug_device) { 707 ide_unlock_host(hwif->host); 708 ide_plug_device(drive); 709 } 710} 711 712/** 713 * unexpected_intr - handle an unexpected IDE interrupt 714 * @irq: interrupt line 715 * @hwif: port being processed 716 * 717 * There's nothing really useful we can do with an unexpected interrupt, 718 * other than reading the status register (to clear it), and logging it. 719 * There should be no way that an irq can happen before we're ready for it, 720 * so we needn't worry much about losing an "important" interrupt here. 721 * 722 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 723 * the drive enters "idle", "standby", or "sleep" mode, so if the status 724 * looks "good", we just ignore the interrupt completely. 725 * 726 * This routine assumes __cli() is in effect when called. 727 * 728 * If an unexpected interrupt happens on irq15 while we are handling irq14 729 * and if the two interfaces are "serialized" (CMD640), then it looks like 730 * we could screw up by interfering with a new request being set up for 731 * irq15. 732 * 733 * In reality, this is a non-issue. The new command is not sent unless 734 * the drive is ready to accept one, in which case we know the drive is 735 * not trying to interrupt us. And ide_set_handler() is always invoked 736 * before completing the issuance of any new drive command, so we will not 737 * be accidentally invoked as a result of any valid command completion 738 * interrupt. 739 */ 740 741static void unexpected_intr(int irq, ide_hwif_t *hwif) 742{ 743 u8 stat = hwif->tp_ops->read_status(hwif); 744 745 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 746 /* Try to not flood the console with msgs */ 747 static unsigned long last_msgtime, count; 748 ++count; 749 750 if (time_after(jiffies, last_msgtime + HZ)) { 751 last_msgtime = jiffies; 752 printk(KERN_ERR "%s: unexpected interrupt, " 753 "status=0x%02x, count=%ld\n", 754 hwif->name, stat, count); 755 } 756 } 757} 758 759/** 760 * ide_intr - default IDE interrupt handler 761 * @irq: interrupt number 762 * @dev_id: hwif 763 * @regs: unused weirdness from the kernel irq layer 764 * 765 * This is the default IRQ handler for the IDE layer. You should 766 * not need to override it. If you do be aware it is subtle in 767 * places 768 * 769 * hwif is the interface in the group currently performing 770 * a command. hwif->cur_dev is the drive and hwif->handler is 771 * the IRQ handler to call. As we issue a command the handlers 772 * step through multiple states, reassigning the handler to the 773 * next step in the process. Unlike a smart SCSI controller IDE 774 * expects the main processor to sequence the various transfer 775 * stages. We also manage a poll timer to catch up with most 776 * timeout situations. There are still a few where the handlers 777 * don't ever decide to give up. 778 * 779 * The handler eventually returns ide_stopped to indicate the 780 * request completed. At this point we issue the next request 781 * on the port and the process begins again. 782 */ 783 784irqreturn_t ide_intr (int irq, void *dev_id) 785{ 786 ide_hwif_t *hwif = (ide_hwif_t *)dev_id; 787 struct ide_host *host = hwif->host; 788 ide_drive_t *uninitialized_var(drive); 789 ide_handler_t *handler; 790 unsigned long flags; 791 ide_startstop_t startstop; 792 irqreturn_t irq_ret = IRQ_NONE; 793 int plug_device = 0; 794 795 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 796 if (hwif != host->cur_port) 797 goto out_early; 798 } 799 800 spin_lock_irqsave(&hwif->lock, flags); 801 802 if (hwif->ack_intr && hwif->ack_intr(hwif) == 0) 803 goto out; 804 805 handler = hwif->handler; 806 807 if (handler == NULL || hwif->polling) { 808 /* 809 * Not expecting an interrupt from this drive. 810 * That means this could be: 811 * (1) an interrupt from another PCI device 812 * sharing the same PCI INT# as us. 813 * or (2) a drive just entered sleep or standby mode, 814 * and is interrupting to let us know. 815 * or (3) a spurious interrupt of unknown origin. 816 * 817 * For PCI, we cannot tell the difference, 818 * so in that case we just ignore it and hope it goes away. 819 */ 820 if ((host->irq_flags & IRQF_SHARED) == 0) { 821 /* 822 * Probably not a shared PCI interrupt, 823 * so we can safely try to do something about it: 824 */ 825 unexpected_intr(irq, hwif); 826 } else { 827 /* 828 * Whack the status register, just in case 829 * we have a leftover pending IRQ. 830 */ 831 (void)hwif->tp_ops->read_status(hwif); 832 } 833 goto out; 834 } 835 836 drive = hwif->cur_dev; 837 838 if (!drive_is_ready(drive)) 839 /* 840 * This happens regularly when we share a PCI IRQ with 841 * another device. Unfortunately, it can also happen 842 * with some buggy drives that trigger the IRQ before 843 * their status register is up to date. Hopefully we have 844 * enough advance overhead that the latter isn't a problem. 845 */ 846 goto out; 847 848 hwif->handler = NULL; 849 hwif->expiry = NULL; 850 hwif->req_gen++; 851 del_timer(&hwif->timer); 852 spin_unlock(&hwif->lock); 853 854 if (hwif->port_ops && hwif->port_ops->clear_irq) 855 hwif->port_ops->clear_irq(drive); 856 857 if (drive->dev_flags & IDE_DFLAG_UNMASK) 858 local_irq_enable_in_hardirq(); 859 860 /* service this interrupt, may set handler for next interrupt */ 861 startstop = handler(drive); 862 863 spin_lock_irq(&hwif->lock); 864 /* 865 * Note that handler() may have set things up for another 866 * interrupt to occur soon, but it cannot happen until 867 * we exit from this routine, because it will be the 868 * same irq as is currently being serviced here, and Linux 869 * won't allow another of the same (on any CPU) until we return. 870 */ 871 if (startstop == ide_stopped && hwif->polling == 0) { 872 BUG_ON(hwif->handler); 873 ide_unlock_port(hwif); 874 plug_device = 1; 875 } 876 irq_ret = IRQ_HANDLED; 877out: 878 spin_unlock_irqrestore(&hwif->lock, flags); 879out_early: 880 if (plug_device) { 881 ide_unlock_host(hwif->host); 882 ide_plug_device(drive); 883 } 884 885 return irq_ret; 886} 887EXPORT_SYMBOL_GPL(ide_intr); 888 889void ide_pad_transfer(ide_drive_t *drive, int write, int len) 890{ 891 ide_hwif_t *hwif = drive->hwif; 892 u8 buf[4] = { 0 }; 893 894 while (len > 0) { 895 if (write) 896 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); 897 else 898 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); 899 len -= 4; 900 } 901} 902EXPORT_SYMBOL_GPL(ide_pad_transfer); 903