ide-dma.c revision f37afdaca711838b50ecd89b9c15fc745270d77c
1/*
2 *  IDE DMA support (including IDE PCI BM-DMA).
3 *
4 *  Copyright (C) 1995-1998   Mark Lord
5 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
6 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
7 *
8 *  May be copied or modified under the terms of the GNU General Public License
9 *
10 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
11 */
12
13/*
14 *  Special Thanks to Mark for his Six years of work.
15 */
16
17/*
18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
19 * fixing the problem with the BIOS on some Acer motherboards.
20 *
21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
22 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
23 *
24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
25 * at generic DMA -- his patches were referred to when preparing this code.
26 *
27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
28 * for supplying a Promise UDMA board & WD UDMA drive for this work!
29 */
30
31#include <linux/module.h>
32#include <linux/types.h>
33#include <linux/kernel.h>
34#include <linux/timer.h>
35#include <linux/mm.h>
36#include <linux/interrupt.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/ide.h>
40#include <linux/delay.h>
41#include <linux/scatterlist.h>
42#include <linux/dma-mapping.h>
43
44#include <asm/io.h>
45#include <asm/irq.h>
46
47static const struct drive_list_entry drive_whitelist [] = {
48
49	{ "Micropolis 2112A"	,       NULL		},
50	{ "CONNER CTMA 4000"	,       NULL		},
51	{ "CONNER CTT8000-A"	,       NULL		},
52	{ "ST34342A"		,	NULL		},
53	{ NULL			,	NULL		}
54};
55
56static const struct drive_list_entry drive_blacklist [] = {
57
58	{ "WDC AC11000H"	,	NULL 		},
59	{ "WDC AC22100H"	,	NULL 		},
60	{ "WDC AC32500H"	,	NULL 		},
61	{ "WDC AC33100H"	,	NULL 		},
62	{ "WDC AC31600H"	,	NULL 		},
63	{ "WDC AC32100H"	,	"24.09P07"	},
64	{ "WDC AC23200L"	,	"21.10N21"	},
65	{ "Compaq CRD-8241B"	,	NULL 		},
66	{ "CRD-8400B"		,	NULL 		},
67	{ "CRD-8480B",			NULL 		},
68	{ "CRD-8482B",			NULL 		},
69	{ "CRD-84"		,	NULL 		},
70	{ "SanDisk SDP3B"	,	NULL 		},
71	{ "SanDisk SDP3B-64"	,	NULL 		},
72	{ "SANYO CD-ROM CRD"	,	NULL 		},
73	{ "HITACHI CDR-8"	,	NULL 		},
74	{ "HITACHI CDR-8335"	,	NULL 		},
75	{ "HITACHI CDR-8435"	,	NULL 		},
76	{ "Toshiba CD-ROM XM-6202B"	,	NULL 		},
77	{ "TOSHIBA CD-ROM XM-1702BC",	NULL 		},
78	{ "CD-532E-A"		,	NULL 		},
79	{ "E-IDE CD-ROM CR-840",	NULL 		},
80	{ "CD-ROM Drive/F5A",	NULL 		},
81	{ "WPI CDD-820",		NULL 		},
82	{ "SAMSUNG CD-ROM SC-148C",	NULL 		},
83	{ "SAMSUNG CD-ROM SC",	NULL 		},
84	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",	NULL 		},
85	{ "_NEC DV5800A",               NULL            },
86	{ "SAMSUNG CD-ROM SN-124",	"N001" },
87	{ "Seagate STT20000A",		NULL  },
88	{ "CD-ROM CDR_U200",		"1.09" },
89	{ NULL			,	NULL		}
90
91};
92
93/**
94 *	ide_dma_intr	-	IDE DMA interrupt handler
95 *	@drive: the drive the interrupt is for
96 *
97 *	Handle an interrupt completing a read/write DMA transfer on an
98 *	IDE device
99 */
100
101ide_startstop_t ide_dma_intr (ide_drive_t *drive)
102{
103	u8 stat = 0, dma_stat = 0;
104
105	dma_stat = drive->hwif->dma_ops->dma_end(drive);
106	stat = ide_read_status(drive);
107
108	if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
109		if (!dma_stat) {
110			struct request *rq = HWGROUP(drive)->rq;
111
112			task_end_request(drive, rq, stat);
113			return ide_stopped;
114		}
115		printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",
116		       drive->name, dma_stat);
117	}
118	return ide_error(drive, "dma_intr", stat);
119}
120
121EXPORT_SYMBOL_GPL(ide_dma_intr);
122
123static int ide_dma_good_drive(ide_drive_t *drive)
124{
125	return ide_in_drive_list(drive->id, drive_whitelist);
126}
127
128/**
129 *	ide_build_sglist	-	map IDE scatter gather for DMA I/O
130 *	@drive: the drive to build the DMA table for
131 *	@rq: the request holding the sg list
132 *
133 *	Perform the DMA mapping magic necessary to access the source or
134 *	target buffers of a request via DMA.  The lower layers of the
135 *	kernel provide the necessary cache management so that we can
136 *	operate in a portable fashion.
137 */
138
139int ide_build_sglist(ide_drive_t *drive, struct request *rq)
140{
141	ide_hwif_t *hwif = HWIF(drive);
142	struct scatterlist *sg = hwif->sg_table;
143
144	ide_map_sg(drive, rq);
145
146	if (rq_data_dir(rq) == READ)
147		hwif->sg_dma_direction = DMA_FROM_DEVICE;
148	else
149		hwif->sg_dma_direction = DMA_TO_DEVICE;
150
151	return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
152			  hwif->sg_dma_direction);
153}
154
155EXPORT_SYMBOL_GPL(ide_build_sglist);
156
157#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
158/**
159 *	ide_build_dmatable	-	build IDE DMA table
160 *
161 *	ide_build_dmatable() prepares a dma request. We map the command
162 *	to get the pci bus addresses of the buffers and then build up
163 *	the PRD table that the IDE layer wants to be fed. The code
164 *	knows about the 64K wrap bug in the CS5530.
165 *
166 *	Returns the number of built PRD entries if all went okay,
167 *	returns 0 otherwise.
168 *
169 *	May also be invoked from trm290.c
170 */
171
172int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
173{
174	ide_hwif_t *hwif	= HWIF(drive);
175	unsigned int *table	= hwif->dmatable_cpu;
176	unsigned int is_trm290	= (hwif->chipset == ide_trm290) ? 1 : 0;
177	unsigned int count = 0;
178	int i;
179	struct scatterlist *sg;
180
181	hwif->sg_nents = i = ide_build_sglist(drive, rq);
182
183	if (!i)
184		return 0;
185
186	sg = hwif->sg_table;
187	while (i) {
188		u32 cur_addr;
189		u32 cur_len;
190
191		cur_addr = sg_dma_address(sg);
192		cur_len = sg_dma_len(sg);
193
194		/*
195		 * Fill in the dma table, without crossing any 64kB boundaries.
196		 * Most hardware requires 16-bit alignment of all blocks,
197		 * but the trm290 requires 32-bit alignment.
198		 */
199
200		while (cur_len) {
201			if (count++ >= PRD_ENTRIES) {
202				printk(KERN_ERR "%s: DMA table too small\n", drive->name);
203				goto use_pio_instead;
204			} else {
205				u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);
206
207				if (bcount > cur_len)
208					bcount = cur_len;
209				*table++ = cpu_to_le32(cur_addr);
210				xcount = bcount & 0xffff;
211				if (is_trm290)
212					xcount = ((xcount >> 2) - 1) << 16;
213				if (xcount == 0x0000) {
214	/*
215	 * Most chipsets correctly interpret a length of 0x0000 as 64KB,
216	 * but at least one (e.g. CS5530) misinterprets it as zero (!).
217	 * So here we break the 64KB entry into two 32KB entries instead.
218	 */
219					if (count++ >= PRD_ENTRIES) {
220						printk(KERN_ERR "%s: DMA table too small\n", drive->name);
221						goto use_pio_instead;
222					}
223					*table++ = cpu_to_le32(0x8000);
224					*table++ = cpu_to_le32(cur_addr + 0x8000);
225					xcount = 0x8000;
226				}
227				*table++ = cpu_to_le32(xcount);
228				cur_addr += bcount;
229				cur_len -= bcount;
230			}
231		}
232
233		sg = sg_next(sg);
234		i--;
235	}
236
237	if (count) {
238		if (!is_trm290)
239			*--table |= cpu_to_le32(0x80000000);
240		return count;
241	}
242
243	printk(KERN_ERR "%s: empty DMA table?\n", drive->name);
244
245use_pio_instead:
246	ide_destroy_dmatable(drive);
247
248	return 0; /* revert to PIO for this request */
249}
250
251EXPORT_SYMBOL_GPL(ide_build_dmatable);
252#endif
253
254/**
255 *	ide_destroy_dmatable	-	clean up DMA mapping
256 *	@drive: The drive to unmap
257 *
258 *	Teardown mappings after DMA has completed. This must be called
259 *	after the completion of each use of ide_build_dmatable and before
260 *	the next use of ide_build_dmatable. Failure to do so will cause
261 *	an oops as only one mapping can be live for each target at a given
262 *	time.
263 */
264
265void ide_destroy_dmatable (ide_drive_t *drive)
266{
267	ide_hwif_t *hwif = drive->hwif;
268
269	dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
270		     hwif->sg_dma_direction);
271}
272
273EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
274
275#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
276/**
277 *	config_drive_for_dma	-	attempt to activate IDE DMA
278 *	@drive: the drive to place in DMA mode
279 *
280 *	If the drive supports at least mode 2 DMA or UDMA of any kind
281 *	then attempt to place it into DMA mode. Drives that are known to
282 *	support DMA but predate the DMA properties or that are known
283 *	to have DMA handling bugs are also set up appropriately based
284 *	on the good/bad drive lists.
285 */
286
287static int config_drive_for_dma (ide_drive_t *drive)
288{
289	ide_hwif_t *hwif = drive->hwif;
290	struct hd_driveid *id = drive->id;
291
292	if (drive->media != ide_disk) {
293		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
294			return 0;
295	}
296
297	/*
298	 * Enable DMA on any drive that has
299	 * UltraDMA (mode 0/1/2/3/4/5/6) enabled
300	 */
301	if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
302		return 1;
303
304	/*
305	 * Enable DMA on any drive that has mode2 DMA
306	 * (multi or single) enabled
307	 */
308	if (id->field_valid & 2)	/* regular DMA */
309		if ((id->dma_mword & 0x404) == 0x404 ||
310		    (id->dma_1word & 0x404) == 0x404)
311			return 1;
312
313	/* Consult the list of known "good" drives */
314	if (ide_dma_good_drive(drive))
315		return 1;
316
317	return 0;
318}
319
320/**
321 *	dma_timer_expiry	-	handle a DMA timeout
322 *	@drive: Drive that timed out
323 *
324 *	An IDE DMA transfer timed out. In the event of an error we ask
325 *	the driver to resolve the problem, if a DMA transfer is still
326 *	in progress we continue to wait (arguably we need to add a
327 *	secondary 'I don't care what the drive thinks' timeout here)
328 *	Finally if we have an interrupt we let it complete the I/O.
329 *	But only one time - we clear expiry and if it's still not
330 *	completed after WAIT_CMD, we error and retry in PIO.
331 *	This can occur if an interrupt is lost or due to hang or bugs.
332 */
333
334static int dma_timer_expiry (ide_drive_t *drive)
335{
336	ide_hwif_t *hwif	= HWIF(drive);
337	u8 dma_stat		= hwif->INB(hwif->dma_status);
338
339	printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
340		drive->name, dma_stat);
341
342	if ((dma_stat & 0x18) == 0x18)	/* BUSY Stupid Early Timer !! */
343		return WAIT_CMD;
344
345	HWGROUP(drive)->expiry = NULL;	/* one free ride for now */
346
347	/* 1 dmaing, 2 error, 4 intr */
348	if (dma_stat & 2)	/* ERROR */
349		return -1;
350
351	if (dma_stat & 1)	/* DMAing */
352		return WAIT_CMD;
353
354	if (dma_stat & 4)	/* Got an Interrupt */
355		return WAIT_CMD;
356
357	return 0;	/* Status is unknown -- reset the bus */
358}
359
360/**
361 *	ide_dma_host_set	-	Enable/disable DMA on a host
362 *	@drive: drive to control
363 *
364 *	Enable/disable DMA on an IDE controller following generic
365 *	bus-mastering IDE controller behaviour.
366 */
367
368void ide_dma_host_set(ide_drive_t *drive, int on)
369{
370	ide_hwif_t *hwif	= HWIF(drive);
371	u8 unit			= (drive->select.b.unit & 0x01);
372	u8 dma_stat		= hwif->INB(hwif->dma_status);
373
374	if (on)
375		dma_stat |= (1 << (5 + unit));
376	else
377		dma_stat &= ~(1 << (5 + unit));
378
379	hwif->OUTB(dma_stat, hwif->dma_status);
380}
381
382EXPORT_SYMBOL_GPL(ide_dma_host_set);
383#endif /* CONFIG_BLK_DEV_IDEDMA_SFF  */
384
385/**
386 *	ide_dma_off_quietly	-	Generic DMA kill
387 *	@drive: drive to control
388 *
389 *	Turn off the current DMA on this IDE controller.
390 */
391
392void ide_dma_off_quietly(ide_drive_t *drive)
393{
394	drive->using_dma = 0;
395	ide_toggle_bounce(drive, 0);
396
397	drive->hwif->dma_ops->dma_host_set(drive, 0);
398}
399
400EXPORT_SYMBOL(ide_dma_off_quietly);
401
402/**
403 *	ide_dma_off	-	disable DMA on a device
404 *	@drive: drive to disable DMA on
405 *
406 *	Disable IDE DMA for a device on this IDE controller.
407 *	Inform the user that DMA has been disabled.
408 */
409
410void ide_dma_off(ide_drive_t *drive)
411{
412	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
413	ide_dma_off_quietly(drive);
414}
415
416EXPORT_SYMBOL(ide_dma_off);
417
418/**
419 *	ide_dma_on		-	Enable DMA on a device
420 *	@drive: drive to enable DMA on
421 *
422 *	Enable IDE DMA for a device on this IDE controller.
423 */
424
425void ide_dma_on(ide_drive_t *drive)
426{
427	drive->using_dma = 1;
428	ide_toggle_bounce(drive, 1);
429
430	drive->hwif->dma_ops->dma_host_set(drive, 1);
431}
432
433#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
434/**
435 *	ide_dma_setup	-	begin a DMA phase
436 *	@drive: target device
437 *
438 *	Build an IDE DMA PRD (IDE speak for scatter gather table)
439 *	and then set up the DMA transfer registers for a device
440 *	that follows generic IDE PCI DMA behaviour. Controllers can
441 *	override this function if they need to
442 *
443 *	Returns 0 on success. If a PIO fallback is required then 1
444 *	is returned.
445 */
446
447int ide_dma_setup(ide_drive_t *drive)
448{
449	ide_hwif_t *hwif = drive->hwif;
450	struct request *rq = HWGROUP(drive)->rq;
451	unsigned int reading;
452	u8 dma_stat;
453
454	if (rq_data_dir(rq))
455		reading = 0;
456	else
457		reading = 1 << 3;
458
459	/* fall back to pio! */
460	if (!ide_build_dmatable(drive, rq)) {
461		ide_map_sg(drive, rq);
462		return 1;
463	}
464
465	/* PRD table */
466	if (hwif->mmio)
467		writel(hwif->dmatable_dma, (void __iomem *)hwif->dma_prdtable);
468	else
469		outl(hwif->dmatable_dma, hwif->dma_prdtable);
470
471	/* specify r/w */
472	hwif->OUTB(reading, hwif->dma_command);
473
474	/* read dma_status for INTR & ERROR flags */
475	dma_stat = hwif->INB(hwif->dma_status);
476
477	/* clear INTR & ERROR flags */
478	hwif->OUTB(dma_stat|6, hwif->dma_status);
479	drive->waiting_for_dma = 1;
480	return 0;
481}
482
483EXPORT_SYMBOL_GPL(ide_dma_setup);
484
485void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
486{
487	/* issue cmd to drive */
488	ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
489}
490EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
491
492void ide_dma_start(ide_drive_t *drive)
493{
494	ide_hwif_t *hwif	= HWIF(drive);
495	u8 dma_cmd		= hwif->INB(hwif->dma_command);
496
497	/* Note that this is done *after* the cmd has
498	 * been issued to the drive, as per the BM-IDE spec.
499	 * The Promise Ultra33 doesn't work correctly when
500	 * we do this part before issuing the drive cmd.
501	 */
502	/* start DMA */
503	hwif->OUTB(dma_cmd|1, hwif->dma_command);
504	hwif->dma = 1;
505	wmb();
506}
507
508EXPORT_SYMBOL_GPL(ide_dma_start);
509
510/* returns 1 on error, 0 otherwise */
511int __ide_dma_end (ide_drive_t *drive)
512{
513	ide_hwif_t *hwif	= HWIF(drive);
514	u8 dma_stat = 0, dma_cmd = 0;
515
516	drive->waiting_for_dma = 0;
517	/* get dma_command mode */
518	dma_cmd = hwif->INB(hwif->dma_command);
519	/* stop DMA */
520	hwif->OUTB(dma_cmd&~1, hwif->dma_command);
521	/* get DMA status */
522	dma_stat = hwif->INB(hwif->dma_status);
523	/* clear the INTR & ERROR bits */
524	hwif->OUTB(dma_stat|6, hwif->dma_status);
525	/* purge DMA mappings */
526	ide_destroy_dmatable(drive);
527	/* verify good DMA status */
528	hwif->dma = 0;
529	wmb();
530	return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
531}
532
533EXPORT_SYMBOL(__ide_dma_end);
534
535/* returns 1 if dma irq issued, 0 otherwise */
536int ide_dma_test_irq(ide_drive_t *drive)
537{
538	ide_hwif_t *hwif	= HWIF(drive);
539	u8 dma_stat		= hwif->INB(hwif->dma_status);
540
541	/* return 1 if INTR asserted */
542	if ((dma_stat & 4) == 4)
543		return 1;
544	if (!drive->waiting_for_dma)
545		printk(KERN_WARNING "%s: (%s) called while not waiting\n",
546			drive->name, __func__);
547	return 0;
548}
549EXPORT_SYMBOL_GPL(ide_dma_test_irq);
550#else
551static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
552#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
553
554int __ide_dma_bad_drive (ide_drive_t *drive)
555{
556	struct hd_driveid *id = drive->id;
557
558	int blacklist = ide_in_drive_list(id, drive_blacklist);
559	if (blacklist) {
560		printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
561				    drive->name, id->model);
562		return blacklist;
563	}
564	return 0;
565}
566
567EXPORT_SYMBOL(__ide_dma_bad_drive);
568
569static const u8 xfer_mode_bases[] = {
570	XFER_UDMA_0,
571	XFER_MW_DMA_0,
572	XFER_SW_DMA_0,
573};
574
575static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
576{
577	struct hd_driveid *id = drive->id;
578	ide_hwif_t *hwif = drive->hwif;
579	const struct ide_port_ops *port_ops = hwif->port_ops;
580	unsigned int mask = 0;
581
582	switch(base) {
583	case XFER_UDMA_0:
584		if ((id->field_valid & 4) == 0)
585			break;
586
587		if (port_ops && port_ops->udma_filter)
588			mask = port_ops->udma_filter(drive);
589		else
590			mask = hwif->ultra_mask;
591		mask &= id->dma_ultra;
592
593		/*
594		 * avoid false cable warning from eighty_ninty_three()
595		 */
596		if (req_mode > XFER_UDMA_2) {
597			if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
598				mask &= 0x07;
599		}
600		break;
601	case XFER_MW_DMA_0:
602		if ((id->field_valid & 2) == 0)
603			break;
604		if (port_ops && port_ops->mdma_filter)
605			mask = port_ops->mdma_filter(drive);
606		else
607			mask = hwif->mwdma_mask;
608		mask &= id->dma_mword;
609		break;
610	case XFER_SW_DMA_0:
611		if (id->field_valid & 2) {
612			mask = id->dma_1word & hwif->swdma_mask;
613		} else if (id->tDMA) {
614			/*
615			 * ide_fix_driveid() doesn't convert ->tDMA to the
616			 * CPU endianness so we need to do it here
617			 */
618			u8 mode = le16_to_cpu(id->tDMA);
619
620			/*
621			 * if the mode is valid convert it to the mask
622			 * (the maximum allowed mode is XFER_SW_DMA_2)
623			 */
624			if (mode <= 2)
625				mask = ((2 << mode) - 1) & hwif->swdma_mask;
626		}
627		break;
628	default:
629		BUG();
630		break;
631	}
632
633	return mask;
634}
635
636/**
637 *	ide_find_dma_mode	-	compute DMA speed
638 *	@drive: IDE device
639 *	@req_mode: requested mode
640 *
641 *	Checks the drive/host capabilities and finds the speed to use for
642 *	the DMA transfer.  The speed is then limited by the requested mode.
643 *
644 *	Returns 0 if the drive/host combination is incapable of DMA transfers
645 *	or if the requested mode is not a DMA mode.
646 */
647
648u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
649{
650	ide_hwif_t *hwif = drive->hwif;
651	unsigned int mask;
652	int x, i;
653	u8 mode = 0;
654
655	if (drive->media != ide_disk) {
656		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
657			return 0;
658	}
659
660	for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
661		if (req_mode < xfer_mode_bases[i])
662			continue;
663		mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
664		x = fls(mask) - 1;
665		if (x >= 0) {
666			mode = xfer_mode_bases[i] + x;
667			break;
668		}
669	}
670
671	if (hwif->chipset == ide_acorn && mode == 0) {
672		/*
673		 * is this correct?
674		 */
675		if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150)
676			mode = XFER_MW_DMA_1;
677	}
678
679	mode = min(mode, req_mode);
680
681	printk(KERN_INFO "%s: %s mode selected\n", drive->name,
682			  mode ? ide_xfer_verbose(mode) : "no DMA");
683
684	return mode;
685}
686
687EXPORT_SYMBOL_GPL(ide_find_dma_mode);
688
689static int ide_tune_dma(ide_drive_t *drive)
690{
691	ide_hwif_t *hwif = drive->hwif;
692	u8 speed;
693
694	if (noautodma || drive->nodma || (drive->id->capability & 1) == 0)
695		return 0;
696
697	/* consult the list of known "bad" drives */
698	if (__ide_dma_bad_drive(drive))
699		return 0;
700
701	if (ide_id_dma_bug(drive))
702		return 0;
703
704	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
705		return config_drive_for_dma(drive);
706
707	speed = ide_max_dma_mode(drive);
708
709	if (!speed)
710		return 0;
711
712	if (ide_set_dma_mode(drive, speed))
713		return 0;
714
715	return 1;
716}
717
718static int ide_dma_check(ide_drive_t *drive)
719{
720	ide_hwif_t *hwif = drive->hwif;
721	int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
722
723	if (!vdma && ide_tune_dma(drive))
724		return 0;
725
726	/* TODO: always do PIO fallback */
727	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
728		return -1;
729
730	ide_set_max_pio(drive);
731
732	return vdma ? 0 : -1;
733}
734
735int ide_id_dma_bug(ide_drive_t *drive)
736{
737	struct hd_driveid *id = drive->id;
738
739	if (id->field_valid & 4) {
740		if ((id->dma_ultra >> 8) && (id->dma_mword >> 8))
741			goto err_out;
742	} else if (id->field_valid & 2) {
743		if ((id->dma_mword >> 8) && (id->dma_1word >> 8))
744			goto err_out;
745	}
746	return 0;
747err_out:
748	printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
749	return 1;
750}
751
752int ide_set_dma(ide_drive_t *drive)
753{
754	int rc;
755
756	/*
757	 * Force DMAing for the beginning of the check.
758	 * Some chipsets appear to do interesting
759	 * things, if not checked and cleared.
760	 *   PARANOIA!!!
761	 */
762	ide_dma_off_quietly(drive);
763
764	rc = ide_dma_check(drive);
765	if (rc)
766		return rc;
767
768	ide_dma_on(drive);
769
770	return 0;
771}
772
773void ide_check_dma_crc(ide_drive_t *drive)
774{
775	u8 mode;
776
777	ide_dma_off_quietly(drive);
778	drive->crc_count = 0;
779	mode = drive->current_speed;
780	/*
781	 * Don't try non Ultra-DMA modes without iCRC's.  Force the
782	 * device to PIO and make the user enable SWDMA/MWDMA modes.
783	 */
784	if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
785		mode--;
786	else
787		mode = XFER_PIO_4;
788	ide_set_xfer_rate(drive, mode);
789	if (drive->current_speed >= XFER_SW_DMA_0)
790		ide_dma_on(drive);
791}
792
793#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
794void ide_dma_lost_irq (ide_drive_t *drive)
795{
796	printk("%s: DMA interrupt recovery\n", drive->name);
797}
798
799EXPORT_SYMBOL(ide_dma_lost_irq);
800
801void ide_dma_timeout (ide_drive_t *drive)
802{
803	ide_hwif_t *hwif = HWIF(drive);
804
805	printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
806
807	if (hwif->dma_ops->dma_test_irq(drive))
808		return;
809
810	hwif->dma_ops->dma_end(drive);
811}
812
813EXPORT_SYMBOL(ide_dma_timeout);
814
815void ide_release_dma_engine(ide_hwif_t *hwif)
816{
817	if (hwif->dmatable_cpu) {
818		struct pci_dev *pdev = to_pci_dev(hwif->dev);
819
820		pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES,
821				    hwif->dmatable_cpu, hwif->dmatable_dma);
822		hwif->dmatable_cpu = NULL;
823	}
824}
825
826int ide_allocate_dma_engine(ide_hwif_t *hwif)
827{
828	struct pci_dev *pdev = to_pci_dev(hwif->dev);
829
830	hwif->dmatable_cpu = pci_alloc_consistent(pdev,
831						  PRD_ENTRIES * PRD_BYTES,
832						  &hwif->dmatable_dma);
833
834	if (hwif->dmatable_cpu)
835		return 0;
836
837	printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
838			hwif->name);
839
840	return 1;
841}
842EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
843
844static const struct ide_dma_ops sff_dma_ops = {
845	.dma_host_set		= ide_dma_host_set,
846	.dma_setup		= ide_dma_setup,
847	.dma_exec_cmd		= ide_dma_exec_cmd,
848	.dma_start		= ide_dma_start,
849	.dma_end		= __ide_dma_end,
850	.dma_test_irq		= ide_dma_test_irq,
851	.dma_timeout		= ide_dma_timeout,
852	.dma_lost_irq		= ide_dma_lost_irq,
853};
854
855void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
856{
857	hwif->dma_base = base;
858
859	if (!hwif->dma_command)
860		hwif->dma_command	= hwif->dma_base + 0;
861	if (!hwif->dma_vendor1)
862		hwif->dma_vendor1	= hwif->dma_base + 1;
863	if (!hwif->dma_status)
864		hwif->dma_status	= hwif->dma_base + 2;
865	if (!hwif->dma_vendor3)
866		hwif->dma_vendor3	= hwif->dma_base + 3;
867	if (!hwif->dma_prdtable)
868		hwif->dma_prdtable	= hwif->dma_base + 4;
869
870	hwif->dma_ops = &sff_dma_ops;
871}
872
873EXPORT_SYMBOL_GPL(ide_setup_dma);
874#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
875