ide-dma.c revision 81e8d5a34f7d2a2acbe309cfa5810a9699a63239
1/* 2 * IDE DMA support (including IDE PCI BM-DMA). 3 * 4 * Copyright (C) 1995-1998 Mark Lord 5 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 6 * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz 7 * 8 * May be copied or modified under the terms of the GNU General Public License 9 * 10 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). 11 */ 12 13/* 14 * Special Thanks to Mark for his Six years of work. 15 */ 16 17/* 18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for 19 * fixing the problem with the BIOS on some Acer motherboards. 20 * 21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing 22 * "TX" chipset compatibility and for providing patches for the "TX" chipset. 23 * 24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack 25 * at generic DMA -- his patches were referred to when preparing this code. 26 * 27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> 28 * for supplying a Promise UDMA board & WD UDMA drive for this work! 29 */ 30 31#include <linux/module.h> 32#include <linux/types.h> 33#include <linux/kernel.h> 34#include <linux/timer.h> 35#include <linux/mm.h> 36#include <linux/interrupt.h> 37#include <linux/pci.h> 38#include <linux/init.h> 39#include <linux/ide.h> 40#include <linux/delay.h> 41#include <linux/scatterlist.h> 42#include <linux/dma-mapping.h> 43 44#include <asm/io.h> 45#include <asm/irq.h> 46 47static const struct drive_list_entry drive_whitelist [] = { 48 49 { "Micropolis 2112A" , NULL }, 50 { "CONNER CTMA 4000" , NULL }, 51 { "CONNER CTT8000-A" , NULL }, 52 { "ST34342A" , NULL }, 53 { NULL , NULL } 54}; 55 56static const struct drive_list_entry drive_blacklist [] = { 57 58 { "WDC AC11000H" , NULL }, 59 { "WDC AC22100H" , NULL }, 60 { "WDC AC32500H" , NULL }, 61 { "WDC AC33100H" , NULL }, 62 { "WDC AC31600H" , NULL }, 63 { "WDC AC32100H" , "24.09P07" }, 64 { "WDC AC23200L" , "21.10N21" }, 65 { "Compaq CRD-8241B" , NULL }, 66 { "CRD-8400B" , NULL }, 67 { "CRD-8480B", NULL }, 68 { "CRD-8482B", NULL }, 69 { "CRD-84" , NULL }, 70 { "SanDisk SDP3B" , NULL }, 71 { "SanDisk SDP3B-64" , NULL }, 72 { "SANYO CD-ROM CRD" , NULL }, 73 { "HITACHI CDR-8" , NULL }, 74 { "HITACHI CDR-8335" , NULL }, 75 { "HITACHI CDR-8435" , NULL }, 76 { "Toshiba CD-ROM XM-6202B" , NULL }, 77 { "TOSHIBA CD-ROM XM-1702BC", NULL }, 78 { "CD-532E-A" , NULL }, 79 { "E-IDE CD-ROM CR-840", NULL }, 80 { "CD-ROM Drive/F5A", NULL }, 81 { "WPI CDD-820", NULL }, 82 { "SAMSUNG CD-ROM SC-148C", NULL }, 83 { "SAMSUNG CD-ROM SC", NULL }, 84 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL }, 85 { "_NEC DV5800A", NULL }, 86 { "SAMSUNG CD-ROM SN-124", "N001" }, 87 { "Seagate STT20000A", NULL }, 88 { "CD-ROM CDR_U200", "1.09" }, 89 { NULL , NULL } 90 91}; 92 93/** 94 * ide_dma_intr - IDE DMA interrupt handler 95 * @drive: the drive the interrupt is for 96 * 97 * Handle an interrupt completing a read/write DMA transfer on an 98 * IDE device 99 */ 100 101ide_startstop_t ide_dma_intr (ide_drive_t *drive) 102{ 103 u8 stat = 0, dma_stat = 0; 104 105 dma_stat = drive->hwif->dma_ops->dma_end(drive); 106 stat = ide_read_status(drive); 107 108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 109 if (!dma_stat) { 110 struct request *rq = HWGROUP(drive)->rq; 111 112 task_end_request(drive, rq, stat); 113 return ide_stopped; 114 } 115 printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 116 drive->name, dma_stat); 117 } 118 return ide_error(drive, "dma_intr", stat); 119} 120 121EXPORT_SYMBOL_GPL(ide_dma_intr); 122 123static int ide_dma_good_drive(ide_drive_t *drive) 124{ 125 return ide_in_drive_list(drive->id, drive_whitelist); 126} 127 128/** 129 * ide_build_sglist - map IDE scatter gather for DMA I/O 130 * @drive: the drive to build the DMA table for 131 * @rq: the request holding the sg list 132 * 133 * Perform the DMA mapping magic necessary to access the source or 134 * target buffers of a request via DMA. The lower layers of the 135 * kernel provide the necessary cache management so that we can 136 * operate in a portable fashion. 137 */ 138 139int ide_build_sglist(ide_drive_t *drive, struct request *rq) 140{ 141 ide_hwif_t *hwif = HWIF(drive); 142 struct scatterlist *sg = hwif->sg_table; 143 144 ide_map_sg(drive, rq); 145 146 if (rq_data_dir(rq) == READ) 147 hwif->sg_dma_direction = DMA_FROM_DEVICE; 148 else 149 hwif->sg_dma_direction = DMA_TO_DEVICE; 150 151 return dma_map_sg(hwif->dev, sg, hwif->sg_nents, 152 hwif->sg_dma_direction); 153} 154 155EXPORT_SYMBOL_GPL(ide_build_sglist); 156 157#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 158/** 159 * ide_build_dmatable - build IDE DMA table 160 * 161 * ide_build_dmatable() prepares a dma request. We map the command 162 * to get the pci bus addresses of the buffers and then build up 163 * the PRD table that the IDE layer wants to be fed. The code 164 * knows about the 64K wrap bug in the CS5530. 165 * 166 * Returns the number of built PRD entries if all went okay, 167 * returns 0 otherwise. 168 * 169 * May also be invoked from trm290.c 170 */ 171 172int ide_build_dmatable (ide_drive_t *drive, struct request *rq) 173{ 174 ide_hwif_t *hwif = HWIF(drive); 175 unsigned int *table = hwif->dmatable_cpu; 176 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0; 177 unsigned int count = 0; 178 int i; 179 struct scatterlist *sg; 180 181 hwif->sg_nents = i = ide_build_sglist(drive, rq); 182 183 if (!i) 184 return 0; 185 186 sg = hwif->sg_table; 187 while (i) { 188 u32 cur_addr; 189 u32 cur_len; 190 191 cur_addr = sg_dma_address(sg); 192 cur_len = sg_dma_len(sg); 193 194 /* 195 * Fill in the dma table, without crossing any 64kB boundaries. 196 * Most hardware requires 16-bit alignment of all blocks, 197 * but the trm290 requires 32-bit alignment. 198 */ 199 200 while (cur_len) { 201 if (count++ >= PRD_ENTRIES) { 202 printk(KERN_ERR "%s: DMA table too small\n", drive->name); 203 goto use_pio_instead; 204 } else { 205 u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff); 206 207 if (bcount > cur_len) 208 bcount = cur_len; 209 *table++ = cpu_to_le32(cur_addr); 210 xcount = bcount & 0xffff; 211 if (is_trm290) 212 xcount = ((xcount >> 2) - 1) << 16; 213 if (xcount == 0x0000) { 214 /* 215 * Most chipsets correctly interpret a length of 0x0000 as 64KB, 216 * but at least one (e.g. CS5530) misinterprets it as zero (!). 217 * So here we break the 64KB entry into two 32KB entries instead. 218 */ 219 if (count++ >= PRD_ENTRIES) { 220 printk(KERN_ERR "%s: DMA table too small\n", drive->name); 221 goto use_pio_instead; 222 } 223 *table++ = cpu_to_le32(0x8000); 224 *table++ = cpu_to_le32(cur_addr + 0x8000); 225 xcount = 0x8000; 226 } 227 *table++ = cpu_to_le32(xcount); 228 cur_addr += bcount; 229 cur_len -= bcount; 230 } 231 } 232 233 sg = sg_next(sg); 234 i--; 235 } 236 237 if (count) { 238 if (!is_trm290) 239 *--table |= cpu_to_le32(0x80000000); 240 return count; 241 } 242 243 printk(KERN_ERR "%s: empty DMA table?\n", drive->name); 244 245use_pio_instead: 246 ide_destroy_dmatable(drive); 247 248 return 0; /* revert to PIO for this request */ 249} 250 251EXPORT_SYMBOL_GPL(ide_build_dmatable); 252#endif 253 254/** 255 * ide_destroy_dmatable - clean up DMA mapping 256 * @drive: The drive to unmap 257 * 258 * Teardown mappings after DMA has completed. This must be called 259 * after the completion of each use of ide_build_dmatable and before 260 * the next use of ide_build_dmatable. Failure to do so will cause 261 * an oops as only one mapping can be live for each target at a given 262 * time. 263 */ 264 265void ide_destroy_dmatable (ide_drive_t *drive) 266{ 267 ide_hwif_t *hwif = drive->hwif; 268 269 dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents, 270 hwif->sg_dma_direction); 271} 272 273EXPORT_SYMBOL_GPL(ide_destroy_dmatable); 274 275#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 276/** 277 * config_drive_for_dma - attempt to activate IDE DMA 278 * @drive: the drive to place in DMA mode 279 * 280 * If the drive supports at least mode 2 DMA or UDMA of any kind 281 * then attempt to place it into DMA mode. Drives that are known to 282 * support DMA but predate the DMA properties or that are known 283 * to have DMA handling bugs are also set up appropriately based 284 * on the good/bad drive lists. 285 */ 286 287static int config_drive_for_dma (ide_drive_t *drive) 288{ 289 ide_hwif_t *hwif = drive->hwif; 290 struct hd_driveid *id = drive->id; 291 292 if (drive->media != ide_disk) { 293 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) 294 return 0; 295 } 296 297 /* 298 * Enable DMA on any drive that has 299 * UltraDMA (mode 0/1/2/3/4/5/6) enabled 300 */ 301 if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f)) 302 return 1; 303 304 /* 305 * Enable DMA on any drive that has mode2 DMA 306 * (multi or single) enabled 307 */ 308 if (id->field_valid & 2) /* regular DMA */ 309 if ((id->dma_mword & 0x404) == 0x404 || 310 (id->dma_1word & 0x404) == 0x404) 311 return 1; 312 313 /* Consult the list of known "good" drives */ 314 if (ide_dma_good_drive(drive)) 315 return 1; 316 317 return 0; 318} 319 320/** 321 * dma_timer_expiry - handle a DMA timeout 322 * @drive: Drive that timed out 323 * 324 * An IDE DMA transfer timed out. In the event of an error we ask 325 * the driver to resolve the problem, if a DMA transfer is still 326 * in progress we continue to wait (arguably we need to add a 327 * secondary 'I don't care what the drive thinks' timeout here) 328 * Finally if we have an interrupt we let it complete the I/O. 329 * But only one time - we clear expiry and if it's still not 330 * completed after WAIT_CMD, we error and retry in PIO. 331 * This can occur if an interrupt is lost or due to hang or bugs. 332 */ 333 334static int dma_timer_expiry (ide_drive_t *drive) 335{ 336 ide_hwif_t *hwif = HWIF(drive); 337 u8 dma_stat = hwif->read_sff_dma_status(hwif); 338 339 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", 340 drive->name, dma_stat); 341 342 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ 343 return WAIT_CMD; 344 345 HWGROUP(drive)->expiry = NULL; /* one free ride for now */ 346 347 /* 1 dmaing, 2 error, 4 intr */ 348 if (dma_stat & 2) /* ERROR */ 349 return -1; 350 351 if (dma_stat & 1) /* DMAing */ 352 return WAIT_CMD; 353 354 if (dma_stat & 4) /* Got an Interrupt */ 355 return WAIT_CMD; 356 357 return 0; /* Status is unknown -- reset the bus */ 358} 359 360/** 361 * ide_dma_host_set - Enable/disable DMA on a host 362 * @drive: drive to control 363 * 364 * Enable/disable DMA on an IDE controller following generic 365 * bus-mastering IDE controller behaviour. 366 */ 367 368void ide_dma_host_set(ide_drive_t *drive, int on) 369{ 370 ide_hwif_t *hwif = HWIF(drive); 371 u8 unit = (drive->select.b.unit & 0x01); 372 u8 dma_stat = hwif->read_sff_dma_status(hwif); 373 374 if (on) 375 dma_stat |= (1 << (5 + unit)); 376 else 377 dma_stat &= ~(1 << (5 + unit)); 378 379 if (hwif->host_flags & IDE_HFLAG_MMIO) 380 writeb(dma_stat, 381 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 382 else 383 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); 384} 385 386EXPORT_SYMBOL_GPL(ide_dma_host_set); 387#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 388 389/** 390 * ide_dma_off_quietly - Generic DMA kill 391 * @drive: drive to control 392 * 393 * Turn off the current DMA on this IDE controller. 394 */ 395 396void ide_dma_off_quietly(ide_drive_t *drive) 397{ 398 drive->using_dma = 0; 399 ide_toggle_bounce(drive, 0); 400 401 drive->hwif->dma_ops->dma_host_set(drive, 0); 402} 403 404EXPORT_SYMBOL(ide_dma_off_quietly); 405 406/** 407 * ide_dma_off - disable DMA on a device 408 * @drive: drive to disable DMA on 409 * 410 * Disable IDE DMA for a device on this IDE controller. 411 * Inform the user that DMA has been disabled. 412 */ 413 414void ide_dma_off(ide_drive_t *drive) 415{ 416 printk(KERN_INFO "%s: DMA disabled\n", drive->name); 417 ide_dma_off_quietly(drive); 418} 419 420EXPORT_SYMBOL(ide_dma_off); 421 422/** 423 * ide_dma_on - Enable DMA on a device 424 * @drive: drive to enable DMA on 425 * 426 * Enable IDE DMA for a device on this IDE controller. 427 */ 428 429void ide_dma_on(ide_drive_t *drive) 430{ 431 drive->using_dma = 1; 432 ide_toggle_bounce(drive, 1); 433 434 drive->hwif->dma_ops->dma_host_set(drive, 1); 435} 436 437#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 438/** 439 * ide_dma_setup - begin a DMA phase 440 * @drive: target device 441 * 442 * Build an IDE DMA PRD (IDE speak for scatter gather table) 443 * and then set up the DMA transfer registers for a device 444 * that follows generic IDE PCI DMA behaviour. Controllers can 445 * override this function if they need to 446 * 447 * Returns 0 on success. If a PIO fallback is required then 1 448 * is returned. 449 */ 450 451int ide_dma_setup(ide_drive_t *drive) 452{ 453 ide_hwif_t *hwif = drive->hwif; 454 struct request *rq = HWGROUP(drive)->rq; 455 unsigned int reading; 456 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 457 u8 dma_stat; 458 459 if (rq_data_dir(rq)) 460 reading = 0; 461 else 462 reading = 1 << 3; 463 464 /* fall back to pio! */ 465 if (!ide_build_dmatable(drive, rq)) { 466 ide_map_sg(drive, rq); 467 return 1; 468 } 469 470 /* PRD table */ 471 if (hwif->host_flags & IDE_HFLAG_MMIO) 472 writel(hwif->dmatable_dma, 473 (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS)); 474 else 475 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); 476 477 /* specify r/w */ 478 if (mmio) 479 writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 480 else 481 outb(reading, hwif->dma_base + ATA_DMA_CMD); 482 483 /* read DMA status for INTR & ERROR flags */ 484 dma_stat = hwif->read_sff_dma_status(hwif); 485 486 /* clear INTR & ERROR flags */ 487 if (mmio) 488 writeb(dma_stat | 6, 489 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 490 else 491 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 492 493 drive->waiting_for_dma = 1; 494 return 0; 495} 496 497EXPORT_SYMBOL_GPL(ide_dma_setup); 498 499void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 500{ 501 /* issue cmd to drive */ 502 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); 503} 504EXPORT_SYMBOL_GPL(ide_dma_exec_cmd); 505 506void ide_dma_start(ide_drive_t *drive) 507{ 508 ide_hwif_t *hwif = drive->hwif; 509 u8 dma_cmd; 510 511 /* Note that this is done *after* the cmd has 512 * been issued to the drive, as per the BM-IDE spec. 513 * The Promise Ultra33 doesn't work correctly when 514 * we do this part before issuing the drive cmd. 515 */ 516 if (hwif->host_flags & IDE_HFLAG_MMIO) { 517 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 518 /* start DMA */ 519 writeb(dma_cmd | 1, 520 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 521 } else { 522 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 523 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); 524 } 525 526 hwif->dma = 1; 527 wmb(); 528} 529 530EXPORT_SYMBOL_GPL(ide_dma_start); 531 532/* returns 1 on error, 0 otherwise */ 533int __ide_dma_end (ide_drive_t *drive) 534{ 535 ide_hwif_t *hwif = drive->hwif; 536 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 537 u8 dma_stat = 0, dma_cmd = 0; 538 539 drive->waiting_for_dma = 0; 540 541 if (mmio) { 542 /* get DMA command mode */ 543 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 544 /* stop DMA */ 545 writeb(dma_cmd & ~1, 546 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 547 } else { 548 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 549 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); 550 } 551 552 /* get DMA status */ 553 dma_stat = hwif->read_sff_dma_status(hwif); 554 555 if (mmio) 556 /* clear the INTR & ERROR bits */ 557 writeb(dma_stat | 6, 558 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 559 else 560 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 561 562 /* purge DMA mappings */ 563 ide_destroy_dmatable(drive); 564 /* verify good DMA status */ 565 hwif->dma = 0; 566 wmb(); 567 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; 568} 569 570EXPORT_SYMBOL(__ide_dma_end); 571 572/* returns 1 if dma irq issued, 0 otherwise */ 573int ide_dma_test_irq(ide_drive_t *drive) 574{ 575 ide_hwif_t *hwif = HWIF(drive); 576 u8 dma_stat = hwif->read_sff_dma_status(hwif); 577 578 /* return 1 if INTR asserted */ 579 if ((dma_stat & 4) == 4) 580 return 1; 581 if (!drive->waiting_for_dma) 582 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 583 drive->name, __func__); 584 return 0; 585} 586EXPORT_SYMBOL_GPL(ide_dma_test_irq); 587#else 588static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } 589#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 590 591int __ide_dma_bad_drive (ide_drive_t *drive) 592{ 593 struct hd_driveid *id = drive->id; 594 595 int blacklist = ide_in_drive_list(id, drive_blacklist); 596 if (blacklist) { 597 printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", 598 drive->name, id->model); 599 return blacklist; 600 } 601 return 0; 602} 603 604EXPORT_SYMBOL(__ide_dma_bad_drive); 605 606static const u8 xfer_mode_bases[] = { 607 XFER_UDMA_0, 608 XFER_MW_DMA_0, 609 XFER_SW_DMA_0, 610}; 611 612static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) 613{ 614 struct hd_driveid *id = drive->id; 615 ide_hwif_t *hwif = drive->hwif; 616 const struct ide_port_ops *port_ops = hwif->port_ops; 617 unsigned int mask = 0; 618 619 switch(base) { 620 case XFER_UDMA_0: 621 if ((id->field_valid & 4) == 0) 622 break; 623 624 if (port_ops && port_ops->udma_filter) 625 mask = port_ops->udma_filter(drive); 626 else 627 mask = hwif->ultra_mask; 628 mask &= id->dma_ultra; 629 630 /* 631 * avoid false cable warning from eighty_ninty_three() 632 */ 633 if (req_mode > XFER_UDMA_2) { 634 if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) 635 mask &= 0x07; 636 } 637 break; 638 case XFER_MW_DMA_0: 639 if ((id->field_valid & 2) == 0) 640 break; 641 if (port_ops && port_ops->mdma_filter) 642 mask = port_ops->mdma_filter(drive); 643 else 644 mask = hwif->mwdma_mask; 645 mask &= id->dma_mword; 646 break; 647 case XFER_SW_DMA_0: 648 if (id->field_valid & 2) { 649 mask = id->dma_1word & hwif->swdma_mask; 650 } else if (id->tDMA) { 651 /* 652 * ide_fix_driveid() doesn't convert ->tDMA to the 653 * CPU endianness so we need to do it here 654 */ 655 u8 mode = le16_to_cpu(id->tDMA); 656 657 /* 658 * if the mode is valid convert it to the mask 659 * (the maximum allowed mode is XFER_SW_DMA_2) 660 */ 661 if (mode <= 2) 662 mask = ((2 << mode) - 1) & hwif->swdma_mask; 663 } 664 break; 665 default: 666 BUG(); 667 break; 668 } 669 670 return mask; 671} 672 673/** 674 * ide_find_dma_mode - compute DMA speed 675 * @drive: IDE device 676 * @req_mode: requested mode 677 * 678 * Checks the drive/host capabilities and finds the speed to use for 679 * the DMA transfer. The speed is then limited by the requested mode. 680 * 681 * Returns 0 if the drive/host combination is incapable of DMA transfers 682 * or if the requested mode is not a DMA mode. 683 */ 684 685u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) 686{ 687 ide_hwif_t *hwif = drive->hwif; 688 unsigned int mask; 689 int x, i; 690 u8 mode = 0; 691 692 if (drive->media != ide_disk) { 693 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) 694 return 0; 695 } 696 697 for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) { 698 if (req_mode < xfer_mode_bases[i]) 699 continue; 700 mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode); 701 x = fls(mask) - 1; 702 if (x >= 0) { 703 mode = xfer_mode_bases[i] + x; 704 break; 705 } 706 } 707 708 if (hwif->chipset == ide_acorn && mode == 0) { 709 /* 710 * is this correct? 711 */ 712 if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150) 713 mode = XFER_MW_DMA_1; 714 } 715 716 mode = min(mode, req_mode); 717 718 printk(KERN_INFO "%s: %s mode selected\n", drive->name, 719 mode ? ide_xfer_verbose(mode) : "no DMA"); 720 721 return mode; 722} 723 724EXPORT_SYMBOL_GPL(ide_find_dma_mode); 725 726static int ide_tune_dma(ide_drive_t *drive) 727{ 728 ide_hwif_t *hwif = drive->hwif; 729 u8 speed; 730 731 if (drive->nodma || (drive->id->capability & 1) == 0) 732 return 0; 733 734 /* consult the list of known "bad" drives */ 735 if (__ide_dma_bad_drive(drive)) 736 return 0; 737 738 if (ide_id_dma_bug(drive)) 739 return 0; 740 741 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) 742 return config_drive_for_dma(drive); 743 744 speed = ide_max_dma_mode(drive); 745 746 if (!speed) 747 return 0; 748 749 if (ide_set_dma_mode(drive, speed)) 750 return 0; 751 752 return 1; 753} 754 755static int ide_dma_check(ide_drive_t *drive) 756{ 757 ide_hwif_t *hwif = drive->hwif; 758 int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0; 759 760 if (!vdma && ide_tune_dma(drive)) 761 return 0; 762 763 /* TODO: always do PIO fallback */ 764 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) 765 return -1; 766 767 ide_set_max_pio(drive); 768 769 return vdma ? 0 : -1; 770} 771 772int ide_id_dma_bug(ide_drive_t *drive) 773{ 774 struct hd_driveid *id = drive->id; 775 776 if (id->field_valid & 4) { 777 if ((id->dma_ultra >> 8) && (id->dma_mword >> 8)) 778 goto err_out; 779 } else if (id->field_valid & 2) { 780 if ((id->dma_mword >> 8) && (id->dma_1word >> 8)) 781 goto err_out; 782 } 783 return 0; 784err_out: 785 printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name); 786 return 1; 787} 788 789int ide_set_dma(ide_drive_t *drive) 790{ 791 int rc; 792 793 /* 794 * Force DMAing for the beginning of the check. 795 * Some chipsets appear to do interesting 796 * things, if not checked and cleared. 797 * PARANOIA!!! 798 */ 799 ide_dma_off_quietly(drive); 800 801 rc = ide_dma_check(drive); 802 if (rc) 803 return rc; 804 805 ide_dma_on(drive); 806 807 return 0; 808} 809 810void ide_check_dma_crc(ide_drive_t *drive) 811{ 812 u8 mode; 813 814 ide_dma_off_quietly(drive); 815 drive->crc_count = 0; 816 mode = drive->current_speed; 817 /* 818 * Don't try non Ultra-DMA modes without iCRC's. Force the 819 * device to PIO and make the user enable SWDMA/MWDMA modes. 820 */ 821 if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7) 822 mode--; 823 else 824 mode = XFER_PIO_4; 825 ide_set_xfer_rate(drive, mode); 826 if (drive->current_speed >= XFER_SW_DMA_0) 827 ide_dma_on(drive); 828} 829 830#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 831void ide_dma_lost_irq (ide_drive_t *drive) 832{ 833 printk("%s: DMA interrupt recovery\n", drive->name); 834} 835 836EXPORT_SYMBOL(ide_dma_lost_irq); 837 838void ide_dma_timeout (ide_drive_t *drive) 839{ 840 ide_hwif_t *hwif = HWIF(drive); 841 842 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); 843 844 if (hwif->dma_ops->dma_test_irq(drive)) 845 return; 846 847 hwif->dma_ops->dma_end(drive); 848} 849 850EXPORT_SYMBOL(ide_dma_timeout); 851 852void ide_release_dma_engine(ide_hwif_t *hwif) 853{ 854 if (hwif->dmatable_cpu) { 855 struct pci_dev *pdev = to_pci_dev(hwif->dev); 856 857 pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES, 858 hwif->dmatable_cpu, hwif->dmatable_dma); 859 hwif->dmatable_cpu = NULL; 860 } 861} 862 863int ide_allocate_dma_engine(ide_hwif_t *hwif) 864{ 865 struct pci_dev *pdev = to_pci_dev(hwif->dev); 866 867 hwif->dmatable_cpu = pci_alloc_consistent(pdev, 868 PRD_ENTRIES * PRD_BYTES, 869 &hwif->dmatable_dma); 870 871 if (hwif->dmatable_cpu) 872 return 0; 873 874 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", 875 hwif->name); 876 877 return 1; 878} 879EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); 880 881const struct ide_dma_ops sff_dma_ops = { 882 .dma_host_set = ide_dma_host_set, 883 .dma_setup = ide_dma_setup, 884 .dma_exec_cmd = ide_dma_exec_cmd, 885 .dma_start = ide_dma_start, 886 .dma_end = __ide_dma_end, 887 .dma_test_irq = ide_dma_test_irq, 888 .dma_timeout = ide_dma_timeout, 889 .dma_lost_irq = ide_dma_lost_irq, 890}; 891EXPORT_SYMBOL_GPL(sff_dma_ops); 892#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 893