ide-io.c revision 9600dcf1347d304cf4dff34ef50569d6584b6968
1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/hdreg.h> 44#include <linux/completion.h> 45#include <linux/reboot.h> 46#include <linux/cdrom.h> 47#include <linux/seq_file.h> 48#include <linux/device.h> 49#include <linux/kmod.h> 50#include <linux/scatterlist.h> 51#include <linux/bitops.h> 52 53#include <asm/byteorder.h> 54#include <asm/irq.h> 55#include <asm/uaccess.h> 56#include <asm/io.h> 57 58static int __ide_end_request(ide_drive_t *drive, struct request *rq, 59 int uptodate, unsigned int nr_bytes, int dequeue) 60{ 61 int ret = 1; 62 int error = 0; 63 64 if (uptodate <= 0) 65 error = uptodate ? uptodate : -EIO; 66 67 /* 68 * if failfast is set on a request, override number of sectors and 69 * complete the whole request right now 70 */ 71 if (blk_noretry_request(rq) && error) 72 nr_bytes = rq->hard_nr_sectors << 9; 73 74 if (!blk_fs_request(rq) && error && !rq->errors) 75 rq->errors = -EIO; 76 77 /* 78 * decide whether to reenable DMA -- 3 is a random magic for now, 79 * if we DMA timeout more than 3 times, just stay in PIO 80 */ 81 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) && 82 drive->retry_pio <= 3) { 83 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY; 84 ide_dma_on(drive); 85 } 86 87 if (!blk_end_request(rq, error, nr_bytes)) 88 ret = 0; 89 90 if (ret == 0 && dequeue) 91 drive->hwif->rq = NULL; 92 93 return ret; 94} 95 96/** 97 * ide_end_request - complete an IDE I/O 98 * @drive: IDE device for the I/O 99 * @uptodate: 100 * @nr_sectors: number of sectors completed 101 * 102 * This is our end_request wrapper function. We complete the I/O 103 * update random number input and dequeue the request, which if 104 * it was tagged may be out of order. 105 */ 106 107int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) 108{ 109 unsigned int nr_bytes = nr_sectors << 9; 110 struct request *rq = drive->hwif->rq; 111 112 if (!nr_bytes) { 113 if (blk_pc_request(rq)) 114 nr_bytes = rq->data_len; 115 else 116 nr_bytes = rq->hard_cur_sectors << 9; 117 } 118 119 return __ide_end_request(drive, rq, uptodate, nr_bytes, 1); 120} 121EXPORT_SYMBOL(ide_end_request); 122 123/** 124 * ide_end_dequeued_request - complete an IDE I/O 125 * @drive: IDE device for the I/O 126 * @uptodate: 127 * @nr_sectors: number of sectors completed 128 * 129 * Complete an I/O that is no longer on the request queue. This 130 * typically occurs when we pull the request and issue a REQUEST_SENSE. 131 * We must still finish the old request but we must not tamper with the 132 * queue in the meantime. 133 * 134 * NOTE: This path does not handle barrier, but barrier is not supported 135 * on ide-cd anyway. 136 */ 137 138int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 139 int uptodate, int nr_sectors) 140{ 141 BUG_ON(!blk_rq_started(rq)); 142 143 return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); 144} 145EXPORT_SYMBOL_GPL(ide_end_dequeued_request); 146 147/** 148 * ide_end_drive_cmd - end an explicit drive command 149 * @drive: command 150 * @stat: status bits 151 * @err: error bits 152 * 153 * Clean up after success/failure of an explicit drive command. 154 * These get thrown onto the queue so they are synchronized with 155 * real I/O operations on the drive. 156 * 157 * In LBA48 mode we have to read the register set twice to get 158 * all the extra information out. 159 */ 160 161void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) 162{ 163 ide_hwif_t *hwif = drive->hwif; 164 struct request *rq = hwif->rq; 165 166 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 167 ide_task_t *task = (ide_task_t *)rq->special; 168 169 if (task) { 170 struct ide_taskfile *tf = &task->tf; 171 172 tf->error = err; 173 tf->status = stat; 174 175 drive->hwif->tp_ops->tf_read(drive, task); 176 177 if (task->tf_flags & IDE_TFLAG_DYN) 178 kfree(task); 179 } 180 } else if (blk_pm_request(rq)) { 181 struct request_pm_state *pm = rq->data; 182 183 ide_complete_power_step(drive, rq); 184 if (pm->pm_step == IDE_PM_COMPLETED) 185 ide_complete_pm_request(drive, rq); 186 return; 187 } 188 189 hwif->rq = NULL; 190 191 rq->errors = err; 192 193 if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0), 194 blk_rq_bytes(rq)))) 195 BUG(); 196} 197EXPORT_SYMBOL(ide_end_drive_cmd); 198 199static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 200{ 201 if (rq->rq_disk) { 202 struct ide_driver *drv; 203 204 drv = *(struct ide_driver **)rq->rq_disk->private_data; 205 drv->end_request(drive, 0, 0); 206 } else 207 ide_end_request(drive, 0, 0); 208} 209 210static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 211{ 212 ide_hwif_t *hwif = drive->hwif; 213 214 if ((stat & ATA_BUSY) || 215 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 216 /* other bits are useless when BUSY */ 217 rq->errors |= ERROR_RESET; 218 } else if (stat & ATA_ERR) { 219 /* err has different meaning on cdrom and tape */ 220 if (err == ATA_ABORTED) { 221 if ((drive->dev_flags & IDE_DFLAG_LBA) && 222 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */ 223 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS) 224 return ide_stopped; 225 } else if ((err & BAD_CRC) == BAD_CRC) { 226 /* UDMA crc error, just retry the operation */ 227 drive->crc_count++; 228 } else if (err & (ATA_BBK | ATA_UNC)) { 229 /* retries won't help these */ 230 rq->errors = ERROR_MAX; 231 } else if (err & ATA_TRK0NF) { 232 /* help it find track zero */ 233 rq->errors |= ERROR_RECAL; 234 } 235 } 236 237 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && 238 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { 239 int nsect = drive->mult_count ? drive->mult_count : 1; 240 241 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); 242 } 243 244 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 245 ide_kill_rq(drive, rq); 246 return ide_stopped; 247 } 248 249 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 250 rq->errors |= ERROR_RESET; 251 252 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 253 ++rq->errors; 254 return ide_do_reset(drive); 255 } 256 257 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 258 drive->special.b.recalibrate = 1; 259 260 ++rq->errors; 261 262 return ide_stopped; 263} 264 265static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 266{ 267 ide_hwif_t *hwif = drive->hwif; 268 269 if ((stat & ATA_BUSY) || 270 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 271 /* other bits are useless when BUSY */ 272 rq->errors |= ERROR_RESET; 273 } else { 274 /* add decoding error stuff */ 275 } 276 277 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 278 /* force an abort */ 279 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); 280 281 if (rq->errors >= ERROR_MAX) { 282 ide_kill_rq(drive, rq); 283 } else { 284 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 285 ++rq->errors; 286 return ide_do_reset(drive); 287 } 288 ++rq->errors; 289 } 290 291 return ide_stopped; 292} 293 294static ide_startstop_t 295__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 296{ 297 if (drive->media == ide_disk) 298 return ide_ata_error(drive, rq, stat, err); 299 return ide_atapi_error(drive, rq, stat, err); 300} 301 302/** 303 * ide_error - handle an error on the IDE 304 * @drive: drive the error occurred on 305 * @msg: message to report 306 * @stat: status bits 307 * 308 * ide_error() takes action based on the error returned by the drive. 309 * For normal I/O that may well include retries. We deal with 310 * both new-style (taskfile) and old style command handling here. 311 * In the case of taskfile command handling there is work left to 312 * do 313 */ 314 315ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 316{ 317 struct request *rq; 318 u8 err; 319 320 err = ide_dump_status(drive, msg, stat); 321 322 rq = drive->hwif->rq; 323 if (rq == NULL) 324 return ide_stopped; 325 326 /* retry only "normal" I/O: */ 327 if (!blk_fs_request(rq)) { 328 rq->errors = 1; 329 ide_end_drive_cmd(drive, stat, err); 330 return ide_stopped; 331 } 332 333 return __ide_error(drive, rq, stat, err); 334} 335EXPORT_SYMBOL_GPL(ide_error); 336 337static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 338{ 339 tf->nsect = drive->sect; 340 tf->lbal = drive->sect; 341 tf->lbam = drive->cyl; 342 tf->lbah = drive->cyl >> 8; 343 tf->device = (drive->head - 1) | drive->select; 344 tf->command = ATA_CMD_INIT_DEV_PARAMS; 345} 346 347static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 348{ 349 tf->nsect = drive->sect; 350 tf->command = ATA_CMD_RESTORE; 351} 352 353static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 354{ 355 tf->nsect = drive->mult_req; 356 tf->command = ATA_CMD_SET_MULTI; 357} 358 359static ide_startstop_t ide_disk_special(ide_drive_t *drive) 360{ 361 special_t *s = &drive->special; 362 ide_task_t args; 363 364 memset(&args, 0, sizeof(ide_task_t)); 365 args.data_phase = TASKFILE_NO_DATA; 366 367 if (s->b.set_geometry) { 368 s->b.set_geometry = 0; 369 ide_tf_set_specify_cmd(drive, &args.tf); 370 } else if (s->b.recalibrate) { 371 s->b.recalibrate = 0; 372 ide_tf_set_restore_cmd(drive, &args.tf); 373 } else if (s->b.set_multmode) { 374 s->b.set_multmode = 0; 375 ide_tf_set_setmult_cmd(drive, &args.tf); 376 } else if (s->all) { 377 int special = s->all; 378 s->all = 0; 379 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); 380 return ide_stopped; 381 } 382 383 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | 384 IDE_TFLAG_CUSTOM_HANDLER; 385 386 do_rw_taskfile(drive, &args); 387 388 return ide_started; 389} 390 391/** 392 * do_special - issue some special commands 393 * @drive: drive the command is for 394 * 395 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, 396 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. 397 * 398 * It used to do much more, but has been scaled back. 399 */ 400 401static ide_startstop_t do_special (ide_drive_t *drive) 402{ 403 special_t *s = &drive->special; 404 405#ifdef DEBUG 406 printk("%s: do_special: 0x%02x\n", drive->name, s->all); 407#endif 408 if (drive->media == ide_disk) 409 return ide_disk_special(drive); 410 411 s->all = 0; 412 drive->mult_req = 0; 413 return ide_stopped; 414} 415 416void ide_map_sg(ide_drive_t *drive, struct request *rq) 417{ 418 ide_hwif_t *hwif = drive->hwif; 419 struct scatterlist *sg = hwif->sg_table; 420 421 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 422 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 423 } else { 424 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); 425 hwif->sg_nents = 1; 426 } 427} 428 429EXPORT_SYMBOL_GPL(ide_map_sg); 430 431void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) 432{ 433 ide_hwif_t *hwif = drive->hwif; 434 435 hwif->nsect = hwif->nleft = rq->nr_sectors; 436 hwif->cursg_ofs = 0; 437 hwif->cursg = NULL; 438} 439 440EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 441 442/** 443 * execute_drive_command - issue special drive command 444 * @drive: the drive to issue the command on 445 * @rq: the request structure holding the command 446 * 447 * execute_drive_cmd() issues a special drive command, usually 448 * initiated by ioctl() from the external hdparm program. The 449 * command can be a drive command, drive task or taskfile 450 * operation. Weirdly you can call it with NULL to wait for 451 * all commands to finish. Don't do this as that is due to change 452 */ 453 454static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 455 struct request *rq) 456{ 457 ide_hwif_t *hwif = drive->hwif; 458 ide_task_t *task = rq->special; 459 460 if (task) { 461 hwif->data_phase = task->data_phase; 462 463 switch (hwif->data_phase) { 464 case TASKFILE_MULTI_OUT: 465 case TASKFILE_OUT: 466 case TASKFILE_MULTI_IN: 467 case TASKFILE_IN: 468 ide_init_sg_cmd(drive, rq); 469 ide_map_sg(drive, rq); 470 default: 471 break; 472 } 473 474 return do_rw_taskfile(drive, task); 475 } 476 477 /* 478 * NULL is actually a valid way of waiting for 479 * all current requests to be flushed from the queue. 480 */ 481#ifdef DEBUG 482 printk("%s: DRIVE_CMD (null)\n", drive->name); 483#endif 484 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), 485 ide_read_error(drive)); 486 487 return ide_stopped; 488} 489 490int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, 491 int arg) 492{ 493 struct request_queue *q = drive->queue; 494 struct request *rq; 495 int ret = 0; 496 497 if (!(setting->flags & DS_SYNC)) 498 return setting->set(drive, arg); 499 500 rq = blk_get_request(q, READ, __GFP_WAIT); 501 rq->cmd_type = REQ_TYPE_SPECIAL; 502 rq->cmd_len = 5; 503 rq->cmd[0] = REQ_DEVSET_EXEC; 504 *(int *)&rq->cmd[1] = arg; 505 rq->special = setting->set; 506 507 if (blk_execute_rq(q, NULL, rq, 0)) 508 ret = rq->errors; 509 blk_put_request(rq); 510 511 return ret; 512} 513EXPORT_SYMBOL_GPL(ide_devset_execute); 514 515static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 516{ 517 u8 cmd = rq->cmd[0]; 518 519 if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) { 520 ide_task_t task; 521 struct ide_taskfile *tf = &task.tf; 522 523 memset(&task, 0, sizeof(task)); 524 if (cmd == REQ_PARK_HEADS) { 525 drive->sleep = *(unsigned long *)rq->special; 526 drive->dev_flags |= IDE_DFLAG_SLEEPING; 527 tf->command = ATA_CMD_IDLEIMMEDIATE; 528 tf->feature = 0x44; 529 tf->lbal = 0x4c; 530 tf->lbam = 0x4e; 531 tf->lbah = 0x55; 532 task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER; 533 } else /* cmd == REQ_UNPARK_HEADS */ 534 tf->command = ATA_CMD_CHK_POWER; 535 536 task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 537 task.rq = rq; 538 drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA; 539 return do_rw_taskfile(drive, &task); 540 } 541 542 switch (cmd) { 543 case REQ_DEVSET_EXEC: 544 { 545 int err, (*setfunc)(ide_drive_t *, int) = rq->special; 546 547 err = setfunc(drive, *(int *)&rq->cmd[1]); 548 if (err) 549 rq->errors = err; 550 else 551 err = 1; 552 ide_end_request(drive, err, 0); 553 return ide_stopped; 554 } 555 case REQ_DRIVE_RESET: 556 return ide_do_reset(drive); 557 default: 558 blk_dump_rq_flags(rq, "ide_special_rq - bad request"); 559 ide_end_request(drive, 0, 0); 560 return ide_stopped; 561 } 562} 563 564/** 565 * start_request - start of I/O and command issuing for IDE 566 * 567 * start_request() initiates handling of a new I/O request. It 568 * accepts commands and I/O (read/write) requests. 569 * 570 * FIXME: this function needs a rename 571 */ 572 573static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 574{ 575 ide_startstop_t startstop; 576 577 BUG_ON(!blk_rq_started(rq)); 578 579#ifdef DEBUG 580 printk("%s: start_request: current=0x%08lx\n", 581 drive->hwif->name, (unsigned long) rq); 582#endif 583 584 /* bail early if we've exceeded max_failures */ 585 if (drive->max_failures && (drive->failures > drive->max_failures)) { 586 rq->cmd_flags |= REQ_FAILED; 587 goto kill_rq; 588 } 589 590 if (blk_pm_request(rq)) 591 ide_check_pm_state(drive, rq); 592 593 SELECT_DRIVE(drive); 594 if (ide_wait_stat(&startstop, drive, drive->ready_stat, 595 ATA_BUSY | ATA_DRQ, WAIT_READY)) { 596 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 597 return startstop; 598 } 599 if (!drive->special.all) { 600 struct ide_driver *drv; 601 602 /* 603 * We reset the drive so we need to issue a SETFEATURES. 604 * Do it _after_ do_special() restored device parameters. 605 */ 606 if (drive->current_speed == 0xff) 607 ide_config_drive_speed(drive, drive->desired_speed); 608 609 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 610 return execute_drive_cmd(drive, rq); 611 else if (blk_pm_request(rq)) { 612 struct request_pm_state *pm = rq->data; 613#ifdef DEBUG_PM 614 printk("%s: start_power_step(step: %d)\n", 615 drive->name, pm->pm_step); 616#endif 617 startstop = ide_start_power_step(drive, rq); 618 if (startstop == ide_stopped && 619 pm->pm_step == IDE_PM_COMPLETED) 620 ide_complete_pm_request(drive, rq); 621 return startstop; 622 } else if (!rq->rq_disk && blk_special_request(rq)) 623 /* 624 * TODO: Once all ULDs have been modified to 625 * check for specific op codes rather than 626 * blindly accepting any special request, the 627 * check for ->rq_disk above may be replaced 628 * by a more suitable mechanism or even 629 * dropped entirely. 630 */ 631 return ide_special_rq(drive, rq); 632 633 drv = *(struct ide_driver **)rq->rq_disk->private_data; 634 635 return drv->do_request(drive, rq, rq->sector); 636 } 637 return do_special(drive); 638kill_rq: 639 ide_kill_rq(drive, rq); 640 return ide_stopped; 641} 642 643/** 644 * ide_stall_queue - pause an IDE device 645 * @drive: drive to stall 646 * @timeout: time to stall for (jiffies) 647 * 648 * ide_stall_queue() can be used by a drive to give excess bandwidth back 649 * to the port by sleeping for timeout jiffies. 650 */ 651 652void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 653{ 654 if (timeout > WAIT_WORSTCASE) 655 timeout = WAIT_WORSTCASE; 656 drive->sleep = timeout + jiffies; 657 drive->dev_flags |= IDE_DFLAG_SLEEPING; 658} 659EXPORT_SYMBOL(ide_stall_queue); 660 661static inline int ide_lock_port(ide_hwif_t *hwif) 662{ 663 if (hwif->busy) 664 return 1; 665 666 hwif->busy = 1; 667 668 return 0; 669} 670 671static inline void ide_unlock_port(ide_hwif_t *hwif) 672{ 673 hwif->busy = 0; 674} 675 676static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) 677{ 678 int rc = 0; 679 680 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 681 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); 682 if (rc == 0) { 683 /* for atari only */ 684 ide_get_lock(ide_intr, hwif); 685 } 686 } 687 return rc; 688} 689 690static inline void ide_unlock_host(struct ide_host *host) 691{ 692 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 693 /* for atari only */ 694 ide_release_lock(); 695 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); 696 } 697} 698 699/* 700 * Issue a new request to a device. 701 */ 702void do_ide_request(struct request_queue *q) 703{ 704 ide_drive_t *drive = q->queuedata; 705 ide_hwif_t *hwif = drive->hwif; 706 struct ide_host *host = hwif->host; 707 struct request *rq = NULL; 708 ide_startstop_t startstop; 709 710 /* 711 * drive is doing pre-flush, ordered write, post-flush sequence. even 712 * though that is 3 requests, it must be seen as a single transaction. 713 * we must not preempt this drive until that is complete 714 */ 715 if (blk_queue_flushing(q)) 716 /* 717 * small race where queue could get replugged during 718 * the 3-request flush cycle, just yank the plug since 719 * we want it to finish asap 720 */ 721 blk_remove_plug(q); 722 723 spin_unlock_irq(q->queue_lock); 724 725 if (ide_lock_host(host, hwif)) 726 goto plug_device_2; 727 728 spin_lock_irq(&hwif->lock); 729 730 if (!ide_lock_port(hwif)) { 731 ide_hwif_t *prev_port; 732repeat: 733 prev_port = hwif->host->cur_port; 734 hwif->rq = NULL; 735 736 if (drive->dev_flags & IDE_DFLAG_SLEEPING) { 737 if (time_before(drive->sleep, jiffies)) { 738 ide_unlock_port(hwif); 739 goto plug_device; 740 } 741 } 742 743 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && 744 hwif != prev_port) { 745 /* 746 * set nIEN for previous port, drives in the 747 * quirk_list may not like intr setups/cleanups 748 */ 749 if (prev_port && prev_port->cur_dev->quirk_list == 0) 750 prev_port->tp_ops->set_irq(prev_port, 0); 751 752 hwif->host->cur_port = hwif; 753 } 754 hwif->cur_dev = drive; 755 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 756 757 spin_unlock_irq(&hwif->lock); 758 spin_lock_irq(q->queue_lock); 759 /* 760 * we know that the queue isn't empty, but this can happen 761 * if the q->prep_rq_fn() decides to kill a request 762 */ 763 rq = elv_next_request(drive->queue); 764 spin_unlock_irq(q->queue_lock); 765 spin_lock_irq(&hwif->lock); 766 767 if (!rq) { 768 ide_unlock_port(hwif); 769 goto out; 770 } 771 772 /* 773 * Sanity: don't accept a request that isn't a PM request 774 * if we are currently power managed. This is very important as 775 * blk_stop_queue() doesn't prevent the elv_next_request() 776 * above to return us whatever is in the queue. Since we call 777 * ide_do_request() ourselves, we end up taking requests while 778 * the queue is blocked... 779 * 780 * We let requests forced at head of queue with ide-preempt 781 * though. I hope that doesn't happen too much, hopefully not 782 * unless the subdriver triggers such a thing in its own PM 783 * state machine. 784 */ 785 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 786 blk_pm_request(rq) == 0 && 787 (rq->cmd_flags & REQ_PREEMPT) == 0) { 788 /* there should be no pending command at this point */ 789 ide_unlock_port(hwif); 790 goto plug_device; 791 } 792 793 hwif->rq = rq; 794 795 spin_unlock_irq(&hwif->lock); 796 startstop = start_request(drive, rq); 797 spin_lock_irq(&hwif->lock); 798 799 if (startstop == ide_stopped) 800 goto repeat; 801 } else 802 goto plug_device; 803out: 804 spin_unlock_irq(&hwif->lock); 805 if (rq == NULL) 806 ide_unlock_host(host); 807 spin_lock_irq(q->queue_lock); 808 return; 809 810plug_device: 811 spin_unlock_irq(&hwif->lock); 812 ide_unlock_host(host); 813plug_device_2: 814 spin_lock_irq(q->queue_lock); 815 816 if (!elv_queue_empty(q)) 817 blk_plug_device(q); 818} 819 820/* 821 * un-busy the port etc, and clear any pending DMA status. we want to 822 * retry the current request in pio mode instead of risking tossing it 823 * all away 824 */ 825static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 826{ 827 ide_hwif_t *hwif = drive->hwif; 828 struct request *rq; 829 ide_startstop_t ret = ide_stopped; 830 831 /* 832 * end current dma transaction 833 */ 834 835 if (error < 0) { 836 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 837 (void)hwif->dma_ops->dma_end(drive); 838 ret = ide_error(drive, "dma timeout error", 839 hwif->tp_ops->read_status(hwif)); 840 } else { 841 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 842 hwif->dma_ops->dma_timeout(drive); 843 } 844 845 /* 846 * disable dma for now, but remember that we did so because of 847 * a timeout -- we'll reenable after we finish this next request 848 * (or rather the first chunk of it) in pio. 849 */ 850 drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; 851 drive->retry_pio++; 852 ide_dma_off_quietly(drive); 853 854 /* 855 * un-busy drive etc and make sure request is sane 856 */ 857 858 rq = hwif->rq; 859 if (!rq) 860 goto out; 861 862 hwif->rq = NULL; 863 864 rq->errors = 0; 865 866 if (!rq->bio) 867 goto out; 868 869 rq->sector = rq->bio->bi_sector; 870 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 871 rq->hard_cur_sectors = rq->current_nr_sectors; 872 rq->buffer = bio_data(rq->bio); 873out: 874 return ret; 875} 876 877static void ide_plug_device(ide_drive_t *drive) 878{ 879 struct request_queue *q = drive->queue; 880 unsigned long flags; 881 882 spin_lock_irqsave(q->queue_lock, flags); 883 if (!elv_queue_empty(q)) 884 blk_plug_device(q); 885 spin_unlock_irqrestore(q->queue_lock, flags); 886} 887 888/** 889 * ide_timer_expiry - handle lack of an IDE interrupt 890 * @data: timer callback magic (hwif) 891 * 892 * An IDE command has timed out before the expected drive return 893 * occurred. At this point we attempt to clean up the current 894 * mess. If the current handler includes an expiry handler then 895 * we invoke the expiry handler, and providing it is happy the 896 * work is done. If that fails we apply generic recovery rules 897 * invoking the handler and checking the drive DMA status. We 898 * have an excessively incestuous relationship with the DMA 899 * logic that wants cleaning up. 900 */ 901 902void ide_timer_expiry (unsigned long data) 903{ 904 ide_hwif_t *hwif = (ide_hwif_t *)data; 905 ide_drive_t *uninitialized_var(drive); 906 ide_handler_t *handler; 907 unsigned long flags; 908 unsigned long wait = -1; 909 int plug_device = 0; 910 911 spin_lock_irqsave(&hwif->lock, flags); 912 913 handler = hwif->handler; 914 915 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) { 916 /* 917 * Either a marginal timeout occurred 918 * (got the interrupt just as timer expired), 919 * or we were "sleeping" to give other devices a chance. 920 * Either way, we don't really want to complain about anything. 921 */ 922 } else { 923 drive = hwif->cur_dev; 924 if (!drive) { 925 printk(KERN_ERR "%s: ->cur_dev was NULL\n", __func__); 926 hwif->handler = NULL; 927 } else { 928 ide_expiry_t *expiry = hwif->expiry; 929 ide_startstop_t startstop = ide_stopped; 930 931 if (expiry) { 932 /* continue */ 933 if ((wait = expiry(drive)) > 0) { 934 /* reset timer */ 935 hwif->timer.expires = jiffies + wait; 936 hwif->req_gen_timer = hwif->req_gen; 937 add_timer(&hwif->timer); 938 spin_unlock_irqrestore(&hwif->lock, flags); 939 return; 940 } 941 } 942 hwif->handler = NULL; 943 /* 944 * We need to simulate a real interrupt when invoking 945 * the handler() function, which means we need to 946 * globally mask the specific IRQ: 947 */ 948 spin_unlock(&hwif->lock); 949 hwif = drive->hwif; 950 /* disable_irq_nosync ?? */ 951 disable_irq(hwif->irq); 952 /* local CPU only, 953 * as if we were handling an interrupt */ 954 local_irq_disable(); 955 if (hwif->polling) { 956 startstop = handler(drive); 957 } else if (drive_is_ready(drive)) { 958 if (drive->waiting_for_dma) 959 hwif->dma_ops->dma_lost_irq(drive); 960 (void)ide_ack_intr(hwif); 961 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 962 startstop = handler(drive); 963 } else { 964 if (drive->waiting_for_dma) { 965 startstop = ide_dma_timeout_retry(drive, wait); 966 } else 967 startstop = 968 ide_error(drive, "irq timeout", 969 hwif->tp_ops->read_status(hwif)); 970 } 971 spin_lock_irq(&hwif->lock); 972 enable_irq(hwif->irq); 973 if (startstop == ide_stopped) { 974 ide_unlock_port(hwif); 975 plug_device = 1; 976 } 977 } 978 } 979 spin_unlock_irqrestore(&hwif->lock, flags); 980 981 if (plug_device) { 982 ide_unlock_host(hwif->host); 983 ide_plug_device(drive); 984 } 985} 986 987/** 988 * unexpected_intr - handle an unexpected IDE interrupt 989 * @irq: interrupt line 990 * @hwif: port being processed 991 * 992 * There's nothing really useful we can do with an unexpected interrupt, 993 * other than reading the status register (to clear it), and logging it. 994 * There should be no way that an irq can happen before we're ready for it, 995 * so we needn't worry much about losing an "important" interrupt here. 996 * 997 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 998 * the drive enters "idle", "standby", or "sleep" mode, so if the status 999 * looks "good", we just ignore the interrupt completely. 1000 * 1001 * This routine assumes __cli() is in effect when called. 1002 * 1003 * If an unexpected interrupt happens on irq15 while we are handling irq14 1004 * and if the two interfaces are "serialized" (CMD640), then it looks like 1005 * we could screw up by interfering with a new request being set up for 1006 * irq15. 1007 * 1008 * In reality, this is a non-issue. The new command is not sent unless 1009 * the drive is ready to accept one, in which case we know the drive is 1010 * not trying to interrupt us. And ide_set_handler() is always invoked 1011 * before completing the issuance of any new drive command, so we will not 1012 * be accidentally invoked as a result of any valid command completion 1013 * interrupt. 1014 */ 1015 1016static void unexpected_intr(int irq, ide_hwif_t *hwif) 1017{ 1018 u8 stat = hwif->tp_ops->read_status(hwif); 1019 1020 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 1021 /* Try to not flood the console with msgs */ 1022 static unsigned long last_msgtime, count; 1023 ++count; 1024 1025 if (time_after(jiffies, last_msgtime + HZ)) { 1026 last_msgtime = jiffies; 1027 printk(KERN_ERR "%s: unexpected interrupt, " 1028 "status=0x%02x, count=%ld\n", 1029 hwif->name, stat, count); 1030 } 1031 } 1032} 1033 1034/** 1035 * ide_intr - default IDE interrupt handler 1036 * @irq: interrupt number 1037 * @dev_id: hwif 1038 * @regs: unused weirdness from the kernel irq layer 1039 * 1040 * This is the default IRQ handler for the IDE layer. You should 1041 * not need to override it. If you do be aware it is subtle in 1042 * places 1043 * 1044 * hwif is the interface in the group currently performing 1045 * a command. hwif->cur_dev is the drive and hwif->handler is 1046 * the IRQ handler to call. As we issue a command the handlers 1047 * step through multiple states, reassigning the handler to the 1048 * next step in the process. Unlike a smart SCSI controller IDE 1049 * expects the main processor to sequence the various transfer 1050 * stages. We also manage a poll timer to catch up with most 1051 * timeout situations. There are still a few where the handlers 1052 * don't ever decide to give up. 1053 * 1054 * The handler eventually returns ide_stopped to indicate the 1055 * request completed. At this point we issue the next request 1056 * on the port and the process begins again. 1057 */ 1058 1059irqreturn_t ide_intr (int irq, void *dev_id) 1060{ 1061 ide_hwif_t *hwif = (ide_hwif_t *)dev_id; 1062 ide_drive_t *uninitialized_var(drive); 1063 ide_handler_t *handler; 1064 unsigned long flags; 1065 ide_startstop_t startstop; 1066 irqreturn_t irq_ret = IRQ_NONE; 1067 int plug_device = 0; 1068 1069 if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) { 1070 if (hwif != hwif->host->cur_port) 1071 goto out_early; 1072 } 1073 1074 spin_lock_irqsave(&hwif->lock, flags); 1075 1076 if (!ide_ack_intr(hwif)) 1077 goto out; 1078 1079 handler = hwif->handler; 1080 1081 if (handler == NULL || hwif->polling) { 1082 /* 1083 * Not expecting an interrupt from this drive. 1084 * That means this could be: 1085 * (1) an interrupt from another PCI device 1086 * sharing the same PCI INT# as us. 1087 * or (2) a drive just entered sleep or standby mode, 1088 * and is interrupting to let us know. 1089 * or (3) a spurious interrupt of unknown origin. 1090 * 1091 * For PCI, we cannot tell the difference, 1092 * so in that case we just ignore it and hope it goes away. 1093 * 1094 * FIXME: unexpected_intr should be hwif-> then we can 1095 * remove all the ifdef PCI crap 1096 */ 1097#ifdef CONFIG_BLK_DEV_IDEPCI 1098 if (hwif->chipset != ide_pci) 1099#endif /* CONFIG_BLK_DEV_IDEPCI */ 1100 { 1101 /* 1102 * Probably not a shared PCI interrupt, 1103 * so we can safely try to do something about it: 1104 */ 1105 unexpected_intr(irq, hwif); 1106#ifdef CONFIG_BLK_DEV_IDEPCI 1107 } else { 1108 /* 1109 * Whack the status register, just in case 1110 * we have a leftover pending IRQ. 1111 */ 1112 (void)hwif->tp_ops->read_status(hwif); 1113#endif /* CONFIG_BLK_DEV_IDEPCI */ 1114 } 1115 goto out; 1116 } 1117 1118 drive = hwif->cur_dev; 1119 if (!drive) { 1120 /* 1121 * This should NEVER happen, and there isn't much 1122 * we could do about it here. 1123 * 1124 * [Note - this can occur if the drive is hot unplugged] 1125 */ 1126 goto out_handled; 1127 } 1128 1129 if (!drive_is_ready(drive)) 1130 /* 1131 * This happens regularly when we share a PCI IRQ with 1132 * another device. Unfortunately, it can also happen 1133 * with some buggy drives that trigger the IRQ before 1134 * their status register is up to date. Hopefully we have 1135 * enough advance overhead that the latter isn't a problem. 1136 */ 1137 goto out; 1138 1139 hwif->handler = NULL; 1140 hwif->req_gen++; 1141 del_timer(&hwif->timer); 1142 spin_unlock(&hwif->lock); 1143 1144 if (hwif->port_ops && hwif->port_ops->clear_irq) 1145 hwif->port_ops->clear_irq(drive); 1146 1147 if (drive->dev_flags & IDE_DFLAG_UNMASK) 1148 local_irq_enable_in_hardirq(); 1149 1150 /* service this interrupt, may set handler for next interrupt */ 1151 startstop = handler(drive); 1152 1153 spin_lock_irq(&hwif->lock); 1154 /* 1155 * Note that handler() may have set things up for another 1156 * interrupt to occur soon, but it cannot happen until 1157 * we exit from this routine, because it will be the 1158 * same irq as is currently being serviced here, and Linux 1159 * won't allow another of the same (on any CPU) until we return. 1160 */ 1161 if (startstop == ide_stopped) { 1162 BUG_ON(hwif->handler); 1163 ide_unlock_port(hwif); 1164 plug_device = 1; 1165 } 1166out_handled: 1167 irq_ret = IRQ_HANDLED; 1168out: 1169 spin_unlock_irqrestore(&hwif->lock, flags); 1170out_early: 1171 if (plug_device) { 1172 ide_unlock_host(hwif->host); 1173 ide_plug_device(drive); 1174 } 1175 1176 return irq_ret; 1177} 1178 1179/** 1180 * ide_do_drive_cmd - issue IDE special command 1181 * @drive: device to issue command 1182 * @rq: request to issue 1183 * 1184 * This function issues a special IDE device request 1185 * onto the request queue. 1186 * 1187 * the rq is queued at the head of the request queue, displacing 1188 * the currently-being-processed request and this function 1189 * returns immediately without waiting for the new rq to be 1190 * completed. This is VERY DANGEROUS, and is intended for 1191 * careful use by the ATAPI tape/cdrom driver code. 1192 */ 1193 1194void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 1195{ 1196 struct request_queue *q = drive->queue; 1197 unsigned long flags; 1198 1199 drive->hwif->rq = NULL; 1200 1201 spin_lock_irqsave(q->queue_lock, flags); 1202 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 1203 spin_unlock_irqrestore(q->queue_lock, flags); 1204} 1205EXPORT_SYMBOL(ide_do_drive_cmd); 1206 1207void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1208{ 1209 ide_hwif_t *hwif = drive->hwif; 1210 ide_task_t task; 1211 1212 memset(&task, 0, sizeof(task)); 1213 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 1214 IDE_TFLAG_OUT_FEATURE | tf_flags; 1215 task.tf.feature = dma; /* Use PIO/DMA */ 1216 task.tf.lbam = bcount & 0xff; 1217 task.tf.lbah = (bcount >> 8) & 0xff; 1218 1219 ide_tf_dump(drive->name, &task.tf); 1220 hwif->tp_ops->set_irq(hwif, 1); 1221 SELECT_MASK(drive, 0); 1222 hwif->tp_ops->tf_load(drive, &task); 1223} 1224 1225EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1226 1227void ide_pad_transfer(ide_drive_t *drive, int write, int len) 1228{ 1229 ide_hwif_t *hwif = drive->hwif; 1230 u8 buf[4] = { 0 }; 1231 1232 while (len > 0) { 1233 if (write) 1234 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); 1235 else 1236 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); 1237 len -= 4; 1238 } 1239} 1240EXPORT_SYMBOL_GPL(ide_pad_transfer); 1241