ide-dma.c revision 97100fc816badbbc162644cfde7ad39ae9211fb4
1/* 2 * IDE DMA support (including IDE PCI BM-DMA). 3 * 4 * Copyright (C) 1995-1998 Mark Lord 5 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 6 * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz 7 * 8 * May be copied or modified under the terms of the GNU General Public License 9 * 10 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). 11 */ 12 13/* 14 * Special Thanks to Mark for his Six years of work. 15 */ 16 17/* 18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for 19 * fixing the problem with the BIOS on some Acer motherboards. 20 * 21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing 22 * "TX" chipset compatibility and for providing patches for the "TX" chipset. 23 * 24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack 25 * at generic DMA -- his patches were referred to when preparing this code. 26 * 27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> 28 * for supplying a Promise UDMA board & WD UDMA drive for this work! 29 */ 30 31#include <linux/module.h> 32#include <linux/types.h> 33#include <linux/kernel.h> 34#include <linux/timer.h> 35#include <linux/mm.h> 36#include <linux/interrupt.h> 37#include <linux/pci.h> 38#include <linux/init.h> 39#include <linux/ide.h> 40#include <linux/delay.h> 41#include <linux/scatterlist.h> 42#include <linux/dma-mapping.h> 43 44#include <asm/io.h> 45#include <asm/irq.h> 46 47static const struct drive_list_entry drive_whitelist [] = { 48 49 { "Micropolis 2112A" , NULL }, 50 { "CONNER CTMA 4000" , NULL }, 51 { "CONNER CTT8000-A" , NULL }, 52 { "ST34342A" , NULL }, 53 { NULL , NULL } 54}; 55 56static const struct drive_list_entry drive_blacklist [] = { 57 58 { "WDC AC11000H" , NULL }, 59 { "WDC AC22100H" , NULL }, 60 { "WDC AC32500H" , NULL }, 61 { "WDC AC33100H" , NULL }, 62 { "WDC AC31600H" , NULL }, 63 { "WDC AC32100H" , "24.09P07" }, 64 { "WDC AC23200L" , "21.10N21" }, 65 { "Compaq CRD-8241B" , NULL }, 66 { "CRD-8400B" , NULL }, 67 { "CRD-8480B", NULL }, 68 { "CRD-8482B", NULL }, 69 { "CRD-84" , NULL }, 70 { "SanDisk SDP3B" , NULL }, 71 { "SanDisk SDP3B-64" , NULL }, 72 { "SANYO CD-ROM CRD" , NULL }, 73 { "HITACHI CDR-8" , NULL }, 74 { "HITACHI CDR-8335" , NULL }, 75 { "HITACHI CDR-8435" , NULL }, 76 { "Toshiba CD-ROM XM-6202B" , NULL }, 77 { "TOSHIBA CD-ROM XM-1702BC", NULL }, 78 { "CD-532E-A" , NULL }, 79 { "E-IDE CD-ROM CR-840", NULL }, 80 { "CD-ROM Drive/F5A", NULL }, 81 { "WPI CDD-820", NULL }, 82 { "SAMSUNG CD-ROM SC-148C", NULL }, 83 { "SAMSUNG CD-ROM SC", NULL }, 84 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL }, 85 { "_NEC DV5800A", NULL }, 86 { "SAMSUNG CD-ROM SN-124", "N001" }, 87 { "Seagate STT20000A", NULL }, 88 { "CD-ROM CDR_U200", "1.09" }, 89 { NULL , NULL } 90 91}; 92 93/** 94 * ide_dma_intr - IDE DMA interrupt handler 95 * @drive: the drive the interrupt is for 96 * 97 * Handle an interrupt completing a read/write DMA transfer on an 98 * IDE device 99 */ 100 101ide_startstop_t ide_dma_intr (ide_drive_t *drive) 102{ 103 ide_hwif_t *hwif = drive->hwif; 104 u8 stat = 0, dma_stat = 0; 105 106 dma_stat = hwif->dma_ops->dma_end(drive); 107 stat = hwif->tp_ops->read_status(hwif); 108 109 if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { 110 if (!dma_stat) { 111 struct request *rq = HWGROUP(drive)->rq; 112 113 task_end_request(drive, rq, stat); 114 return ide_stopped; 115 } 116 printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 117 drive->name, dma_stat); 118 } 119 return ide_error(drive, "dma_intr", stat); 120} 121 122EXPORT_SYMBOL_GPL(ide_dma_intr); 123 124static int ide_dma_good_drive(ide_drive_t *drive) 125{ 126 return ide_in_drive_list(drive->id, drive_whitelist); 127} 128 129/** 130 * ide_build_sglist - map IDE scatter gather for DMA I/O 131 * @drive: the drive to build the DMA table for 132 * @rq: the request holding the sg list 133 * 134 * Perform the DMA mapping magic necessary to access the source or 135 * target buffers of a request via DMA. The lower layers of the 136 * kernel provide the necessary cache management so that we can 137 * operate in a portable fashion. 138 */ 139 140int ide_build_sglist(ide_drive_t *drive, struct request *rq) 141{ 142 ide_hwif_t *hwif = HWIF(drive); 143 struct scatterlist *sg = hwif->sg_table; 144 145 ide_map_sg(drive, rq); 146 147 if (rq_data_dir(rq) == READ) 148 hwif->sg_dma_direction = DMA_FROM_DEVICE; 149 else 150 hwif->sg_dma_direction = DMA_TO_DEVICE; 151 152 return dma_map_sg(hwif->dev, sg, hwif->sg_nents, 153 hwif->sg_dma_direction); 154} 155 156EXPORT_SYMBOL_GPL(ide_build_sglist); 157 158#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 159/** 160 * ide_build_dmatable - build IDE DMA table 161 * 162 * ide_build_dmatable() prepares a dma request. We map the command 163 * to get the pci bus addresses of the buffers and then build up 164 * the PRD table that the IDE layer wants to be fed. The code 165 * knows about the 64K wrap bug in the CS5530. 166 * 167 * Returns the number of built PRD entries if all went okay, 168 * returns 0 otherwise. 169 * 170 * May also be invoked from trm290.c 171 */ 172 173int ide_build_dmatable (ide_drive_t *drive, struct request *rq) 174{ 175 ide_hwif_t *hwif = HWIF(drive); 176 __le32 *table = (__le32 *)hwif->dmatable_cpu; 177 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0; 178 unsigned int count = 0; 179 int i; 180 struct scatterlist *sg; 181 182 hwif->sg_nents = i = ide_build_sglist(drive, rq); 183 184 if (!i) 185 return 0; 186 187 sg = hwif->sg_table; 188 while (i) { 189 u32 cur_addr; 190 u32 cur_len; 191 192 cur_addr = sg_dma_address(sg); 193 cur_len = sg_dma_len(sg); 194 195 /* 196 * Fill in the dma table, without crossing any 64kB boundaries. 197 * Most hardware requires 16-bit alignment of all blocks, 198 * but the trm290 requires 32-bit alignment. 199 */ 200 201 while (cur_len) { 202 if (count++ >= PRD_ENTRIES) { 203 printk(KERN_ERR "%s: DMA table too small\n", drive->name); 204 goto use_pio_instead; 205 } else { 206 u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff); 207 208 if (bcount > cur_len) 209 bcount = cur_len; 210 *table++ = cpu_to_le32(cur_addr); 211 xcount = bcount & 0xffff; 212 if (is_trm290) 213 xcount = ((xcount >> 2) - 1) << 16; 214 else if (xcount == 0x0000) { 215 /* 216 * Most chipsets correctly interpret a length of 0x0000 as 64KB, 217 * but at least one (e.g. CS5530) misinterprets it as zero (!). 218 * So here we break the 64KB entry into two 32KB entries instead. 219 */ 220 if (count++ >= PRD_ENTRIES) { 221 printk(KERN_ERR "%s: DMA table too small\n", drive->name); 222 goto use_pio_instead; 223 } 224 *table++ = cpu_to_le32(0x8000); 225 *table++ = cpu_to_le32(cur_addr + 0x8000); 226 xcount = 0x8000; 227 } 228 *table++ = cpu_to_le32(xcount); 229 cur_addr += bcount; 230 cur_len -= bcount; 231 } 232 } 233 234 sg = sg_next(sg); 235 i--; 236 } 237 238 if (count) { 239 if (!is_trm290) 240 *--table |= cpu_to_le32(0x80000000); 241 return count; 242 } 243 244 printk(KERN_ERR "%s: empty DMA table?\n", drive->name); 245 246use_pio_instead: 247 ide_destroy_dmatable(drive); 248 249 return 0; /* revert to PIO for this request */ 250} 251 252EXPORT_SYMBOL_GPL(ide_build_dmatable); 253#endif 254 255/** 256 * ide_destroy_dmatable - clean up DMA mapping 257 * @drive: The drive to unmap 258 * 259 * Teardown mappings after DMA has completed. This must be called 260 * after the completion of each use of ide_build_dmatable and before 261 * the next use of ide_build_dmatable. Failure to do so will cause 262 * an oops as only one mapping can be live for each target at a given 263 * time. 264 */ 265 266void ide_destroy_dmatable (ide_drive_t *drive) 267{ 268 ide_hwif_t *hwif = drive->hwif; 269 270 dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents, 271 hwif->sg_dma_direction); 272} 273 274EXPORT_SYMBOL_GPL(ide_destroy_dmatable); 275 276#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 277/** 278 * config_drive_for_dma - attempt to activate IDE DMA 279 * @drive: the drive to place in DMA mode 280 * 281 * If the drive supports at least mode 2 DMA or UDMA of any kind 282 * then attempt to place it into DMA mode. Drives that are known to 283 * support DMA but predate the DMA properties or that are known 284 * to have DMA handling bugs are also set up appropriately based 285 * on the good/bad drive lists. 286 */ 287 288static int config_drive_for_dma (ide_drive_t *drive) 289{ 290 ide_hwif_t *hwif = drive->hwif; 291 u16 *id = drive->id; 292 293 if (drive->media != ide_disk) { 294 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) 295 return 0; 296 } 297 298 /* 299 * Enable DMA on any drive that has 300 * UltraDMA (mode 0/1/2/3/4/5/6) enabled 301 */ 302 if ((id[ATA_ID_FIELD_VALID] & 4) && 303 ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f)) 304 return 1; 305 306 /* 307 * Enable DMA on any drive that has mode2 DMA 308 * (multi or single) enabled 309 */ 310 if (id[ATA_ID_FIELD_VALID] & 2) /* regular DMA */ 311 if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 || 312 (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404) 313 return 1; 314 315 /* Consult the list of known "good" drives */ 316 if (ide_dma_good_drive(drive)) 317 return 1; 318 319 return 0; 320} 321 322/** 323 * dma_timer_expiry - handle a DMA timeout 324 * @drive: Drive that timed out 325 * 326 * An IDE DMA transfer timed out. In the event of an error we ask 327 * the driver to resolve the problem, if a DMA transfer is still 328 * in progress we continue to wait (arguably we need to add a 329 * secondary 'I don't care what the drive thinks' timeout here) 330 * Finally if we have an interrupt we let it complete the I/O. 331 * But only one time - we clear expiry and if it's still not 332 * completed after WAIT_CMD, we error and retry in PIO. 333 * This can occur if an interrupt is lost or due to hang or bugs. 334 */ 335 336static int dma_timer_expiry (ide_drive_t *drive) 337{ 338 ide_hwif_t *hwif = HWIF(drive); 339 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 340 341 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", 342 drive->name, dma_stat); 343 344 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ 345 return WAIT_CMD; 346 347 HWGROUP(drive)->expiry = NULL; /* one free ride for now */ 348 349 /* 1 dmaing, 2 error, 4 intr */ 350 if (dma_stat & 2) /* ERROR */ 351 return -1; 352 353 if (dma_stat & 1) /* DMAing */ 354 return WAIT_CMD; 355 356 if (dma_stat & 4) /* Got an Interrupt */ 357 return WAIT_CMD; 358 359 return 0; /* Status is unknown -- reset the bus */ 360} 361 362/** 363 * ide_dma_host_set - Enable/disable DMA on a host 364 * @drive: drive to control 365 * 366 * Enable/disable DMA on an IDE controller following generic 367 * bus-mastering IDE controller behaviour. 368 */ 369 370void ide_dma_host_set(ide_drive_t *drive, int on) 371{ 372 ide_hwif_t *hwif = HWIF(drive); 373 u8 unit = (drive->select.b.unit & 0x01); 374 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 375 376 if (on) 377 dma_stat |= (1 << (5 + unit)); 378 else 379 dma_stat &= ~(1 << (5 + unit)); 380 381 if (hwif->host_flags & IDE_HFLAG_MMIO) 382 writeb(dma_stat, 383 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 384 else 385 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); 386} 387 388EXPORT_SYMBOL_GPL(ide_dma_host_set); 389#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 390 391/** 392 * ide_dma_off_quietly - Generic DMA kill 393 * @drive: drive to control 394 * 395 * Turn off the current DMA on this IDE controller. 396 */ 397 398void ide_dma_off_quietly(ide_drive_t *drive) 399{ 400 drive->dev_flags &= ~IDE_DFLAG_USING_DMA; 401 ide_toggle_bounce(drive, 0); 402 403 drive->hwif->dma_ops->dma_host_set(drive, 0); 404} 405 406EXPORT_SYMBOL(ide_dma_off_quietly); 407 408/** 409 * ide_dma_off - disable DMA on a device 410 * @drive: drive to disable DMA on 411 * 412 * Disable IDE DMA for a device on this IDE controller. 413 * Inform the user that DMA has been disabled. 414 */ 415 416void ide_dma_off(ide_drive_t *drive) 417{ 418 printk(KERN_INFO "%s: DMA disabled\n", drive->name); 419 ide_dma_off_quietly(drive); 420} 421 422EXPORT_SYMBOL(ide_dma_off); 423 424/** 425 * ide_dma_on - Enable DMA on a device 426 * @drive: drive to enable DMA on 427 * 428 * Enable IDE DMA for a device on this IDE controller. 429 */ 430 431void ide_dma_on(ide_drive_t *drive) 432{ 433 drive->dev_flags |= IDE_DFLAG_USING_DMA; 434 ide_toggle_bounce(drive, 1); 435 436 drive->hwif->dma_ops->dma_host_set(drive, 1); 437} 438 439#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 440/** 441 * ide_dma_setup - begin a DMA phase 442 * @drive: target device 443 * 444 * Build an IDE DMA PRD (IDE speak for scatter gather table) 445 * and then set up the DMA transfer registers for a device 446 * that follows generic IDE PCI DMA behaviour. Controllers can 447 * override this function if they need to 448 * 449 * Returns 0 on success. If a PIO fallback is required then 1 450 * is returned. 451 */ 452 453int ide_dma_setup(ide_drive_t *drive) 454{ 455 ide_hwif_t *hwif = drive->hwif; 456 struct request *rq = HWGROUP(drive)->rq; 457 unsigned int reading; 458 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 459 u8 dma_stat; 460 461 if (rq_data_dir(rq)) 462 reading = 0; 463 else 464 reading = 1 << 3; 465 466 /* fall back to pio! */ 467 if (!ide_build_dmatable(drive, rq)) { 468 ide_map_sg(drive, rq); 469 return 1; 470 } 471 472 /* PRD table */ 473 if (hwif->host_flags & IDE_HFLAG_MMIO) 474 writel(hwif->dmatable_dma, 475 (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS)); 476 else 477 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); 478 479 /* specify r/w */ 480 if (mmio) 481 writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 482 else 483 outb(reading, hwif->dma_base + ATA_DMA_CMD); 484 485 /* read DMA status for INTR & ERROR flags */ 486 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 487 488 /* clear INTR & ERROR flags */ 489 if (mmio) 490 writeb(dma_stat | 6, 491 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 492 else 493 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 494 495 drive->waiting_for_dma = 1; 496 return 0; 497} 498 499EXPORT_SYMBOL_GPL(ide_dma_setup); 500 501void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) 502{ 503 /* issue cmd to drive */ 504 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry); 505} 506EXPORT_SYMBOL_GPL(ide_dma_exec_cmd); 507 508void ide_dma_start(ide_drive_t *drive) 509{ 510 ide_hwif_t *hwif = drive->hwif; 511 u8 dma_cmd; 512 513 /* Note that this is done *after* the cmd has 514 * been issued to the drive, as per the BM-IDE spec. 515 * The Promise Ultra33 doesn't work correctly when 516 * we do this part before issuing the drive cmd. 517 */ 518 if (hwif->host_flags & IDE_HFLAG_MMIO) { 519 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 520 /* start DMA */ 521 writeb(dma_cmd | 1, 522 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 523 } else { 524 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 525 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); 526 } 527 528 hwif->dma = 1; 529 wmb(); 530} 531 532EXPORT_SYMBOL_GPL(ide_dma_start); 533 534/* returns 1 on error, 0 otherwise */ 535int __ide_dma_end (ide_drive_t *drive) 536{ 537 ide_hwif_t *hwif = drive->hwif; 538 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 539 u8 dma_stat = 0, dma_cmd = 0; 540 541 drive->waiting_for_dma = 0; 542 543 if (mmio) { 544 /* get DMA command mode */ 545 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 546 /* stop DMA */ 547 writeb(dma_cmd & ~1, 548 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); 549 } else { 550 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); 551 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); 552 } 553 554 /* get DMA status */ 555 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 556 557 if (mmio) 558 /* clear the INTR & ERROR bits */ 559 writeb(dma_stat | 6, 560 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); 561 else 562 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); 563 564 /* purge DMA mappings */ 565 ide_destroy_dmatable(drive); 566 /* verify good DMA status */ 567 hwif->dma = 0; 568 wmb(); 569 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; 570} 571 572EXPORT_SYMBOL(__ide_dma_end); 573 574/* returns 1 if dma irq issued, 0 otherwise */ 575int ide_dma_test_irq(ide_drive_t *drive) 576{ 577 ide_hwif_t *hwif = HWIF(drive); 578 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); 579 580 /* return 1 if INTR asserted */ 581 if ((dma_stat & 4) == 4) 582 return 1; 583 if (!drive->waiting_for_dma) 584 printk(KERN_WARNING "%s: (%s) called while not waiting\n", 585 drive->name, __func__); 586 return 0; 587} 588EXPORT_SYMBOL_GPL(ide_dma_test_irq); 589#else 590static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } 591#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 592 593int __ide_dma_bad_drive (ide_drive_t *drive) 594{ 595 u16 *id = drive->id; 596 597 int blacklist = ide_in_drive_list(id, drive_blacklist); 598 if (blacklist) { 599 printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n", 600 drive->name, (char *)&id[ATA_ID_PROD]); 601 return blacklist; 602 } 603 return 0; 604} 605 606EXPORT_SYMBOL(__ide_dma_bad_drive); 607 608static const u8 xfer_mode_bases[] = { 609 XFER_UDMA_0, 610 XFER_MW_DMA_0, 611 XFER_SW_DMA_0, 612}; 613 614static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) 615{ 616 u16 *id = drive->id; 617 ide_hwif_t *hwif = drive->hwif; 618 const struct ide_port_ops *port_ops = hwif->port_ops; 619 unsigned int mask = 0; 620 621 switch(base) { 622 case XFER_UDMA_0: 623 if ((id[ATA_ID_FIELD_VALID] & 4) == 0) 624 break; 625 626 if (port_ops && port_ops->udma_filter) 627 mask = port_ops->udma_filter(drive); 628 else 629 mask = hwif->ultra_mask; 630 mask &= id[ATA_ID_UDMA_MODES]; 631 632 /* 633 * avoid false cable warning from eighty_ninty_three() 634 */ 635 if (req_mode > XFER_UDMA_2) { 636 if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) 637 mask &= 0x07; 638 } 639 break; 640 case XFER_MW_DMA_0: 641 if ((id[ATA_ID_FIELD_VALID] & 2) == 0) 642 break; 643 if (port_ops && port_ops->mdma_filter) 644 mask = port_ops->mdma_filter(drive); 645 else 646 mask = hwif->mwdma_mask; 647 mask &= id[ATA_ID_MWDMA_MODES]; 648 break; 649 case XFER_SW_DMA_0: 650 if (id[ATA_ID_FIELD_VALID] & 2) { 651 mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask; 652 } else if (id[ATA_ID_OLD_DMA_MODES] >> 8) { 653 u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; 654 655 /* 656 * if the mode is valid convert it to the mask 657 * (the maximum allowed mode is XFER_SW_DMA_2) 658 */ 659 if (mode <= 2) 660 mask = ((2 << mode) - 1) & hwif->swdma_mask; 661 } 662 break; 663 default: 664 BUG(); 665 break; 666 } 667 668 return mask; 669} 670 671/** 672 * ide_find_dma_mode - compute DMA speed 673 * @drive: IDE device 674 * @req_mode: requested mode 675 * 676 * Checks the drive/host capabilities and finds the speed to use for 677 * the DMA transfer. The speed is then limited by the requested mode. 678 * 679 * Returns 0 if the drive/host combination is incapable of DMA transfers 680 * or if the requested mode is not a DMA mode. 681 */ 682 683u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) 684{ 685 ide_hwif_t *hwif = drive->hwif; 686 unsigned int mask; 687 int x, i; 688 u8 mode = 0; 689 690 if (drive->media != ide_disk) { 691 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) 692 return 0; 693 } 694 695 for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) { 696 if (req_mode < xfer_mode_bases[i]) 697 continue; 698 mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode); 699 x = fls(mask) - 1; 700 if (x >= 0) { 701 mode = xfer_mode_bases[i] + x; 702 break; 703 } 704 } 705 706 if (hwif->chipset == ide_acorn && mode == 0) { 707 /* 708 * is this correct? 709 */ 710 if (ide_dma_good_drive(drive) && 711 drive->id[ATA_ID_EIDE_DMA_TIME] < 150) 712 mode = XFER_MW_DMA_1; 713 } 714 715 mode = min(mode, req_mode); 716 717 printk(KERN_INFO "%s: %s mode selected\n", drive->name, 718 mode ? ide_xfer_verbose(mode) : "no DMA"); 719 720 return mode; 721} 722 723EXPORT_SYMBOL_GPL(ide_find_dma_mode); 724 725static int ide_tune_dma(ide_drive_t *drive) 726{ 727 ide_hwif_t *hwif = drive->hwif; 728 u8 speed; 729 730 if (ata_id_has_dma(drive->id) == 0 || 731 (drive->dev_flags & IDE_DFLAG_NODMA)) 732 return 0; 733 734 /* consult the list of known "bad" drives */ 735 if (__ide_dma_bad_drive(drive)) 736 return 0; 737 738 if (ide_id_dma_bug(drive)) 739 return 0; 740 741 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) 742 return config_drive_for_dma(drive); 743 744 speed = ide_max_dma_mode(drive); 745 746 if (!speed) 747 return 0; 748 749 if (ide_set_dma_mode(drive, speed)) 750 return 0; 751 752 return 1; 753} 754 755static int ide_dma_check(ide_drive_t *drive) 756{ 757 ide_hwif_t *hwif = drive->hwif; 758 759 if (ide_tune_dma(drive)) 760 return 0; 761 762 /* TODO: always do PIO fallback */ 763 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) 764 return -1; 765 766 ide_set_max_pio(drive); 767 768 return -1; 769} 770 771int ide_id_dma_bug(ide_drive_t *drive) 772{ 773 u16 *id = drive->id; 774 775 if (id[ATA_ID_FIELD_VALID] & 4) { 776 if ((id[ATA_ID_UDMA_MODES] >> 8) && 777 (id[ATA_ID_MWDMA_MODES] >> 8)) 778 goto err_out; 779 } else if (id[ATA_ID_FIELD_VALID] & 2) { 780 if ((id[ATA_ID_MWDMA_MODES] >> 8) && 781 (id[ATA_ID_SWDMA_MODES] >> 8)) 782 goto err_out; 783 } 784 return 0; 785err_out: 786 printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name); 787 return 1; 788} 789 790int ide_set_dma(ide_drive_t *drive) 791{ 792 int rc; 793 794 /* 795 * Force DMAing for the beginning of the check. 796 * Some chipsets appear to do interesting 797 * things, if not checked and cleared. 798 * PARANOIA!!! 799 */ 800 ide_dma_off_quietly(drive); 801 802 rc = ide_dma_check(drive); 803 if (rc) 804 return rc; 805 806 ide_dma_on(drive); 807 808 return 0; 809} 810 811void ide_check_dma_crc(ide_drive_t *drive) 812{ 813 u8 mode; 814 815 ide_dma_off_quietly(drive); 816 drive->crc_count = 0; 817 mode = drive->current_speed; 818 /* 819 * Don't try non Ultra-DMA modes without iCRC's. Force the 820 * device to PIO and make the user enable SWDMA/MWDMA modes. 821 */ 822 if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7) 823 mode--; 824 else 825 mode = XFER_PIO_4; 826 ide_set_xfer_rate(drive, mode); 827 if (drive->current_speed >= XFER_SW_DMA_0) 828 ide_dma_on(drive); 829} 830 831#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 832void ide_dma_lost_irq (ide_drive_t *drive) 833{ 834 printk("%s: DMA interrupt recovery\n", drive->name); 835} 836 837EXPORT_SYMBOL(ide_dma_lost_irq); 838 839void ide_dma_timeout (ide_drive_t *drive) 840{ 841 ide_hwif_t *hwif = HWIF(drive); 842 843 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); 844 845 if (hwif->dma_ops->dma_test_irq(drive)) 846 return; 847 848 hwif->dma_ops->dma_end(drive); 849} 850 851EXPORT_SYMBOL(ide_dma_timeout); 852 853void ide_release_dma_engine(ide_hwif_t *hwif) 854{ 855 if (hwif->dmatable_cpu) { 856 struct pci_dev *pdev = to_pci_dev(hwif->dev); 857 858 pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES, 859 hwif->dmatable_cpu, hwif->dmatable_dma); 860 hwif->dmatable_cpu = NULL; 861 } 862} 863 864int ide_allocate_dma_engine(ide_hwif_t *hwif) 865{ 866 struct pci_dev *pdev = to_pci_dev(hwif->dev); 867 868 hwif->dmatable_cpu = pci_alloc_consistent(pdev, 869 PRD_ENTRIES * PRD_BYTES, 870 &hwif->dmatable_dma); 871 872 if (hwif->dmatable_cpu) 873 return 0; 874 875 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", 876 hwif->name); 877 878 return 1; 879} 880EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); 881 882const struct ide_dma_ops sff_dma_ops = { 883 .dma_host_set = ide_dma_host_set, 884 .dma_setup = ide_dma_setup, 885 .dma_exec_cmd = ide_dma_exec_cmd, 886 .dma_start = ide_dma_start, 887 .dma_end = __ide_dma_end, 888 .dma_test_irq = ide_dma_test_irq, 889 .dma_timeout = ide_dma_timeout, 890 .dma_lost_irq = ide_dma_lost_irq, 891}; 892EXPORT_SYMBOL_GPL(sff_dma_ops); 893#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 894