pdc_adma.c revision 9af5c9c97dc9d599281778864c72b385f0c63341
1/*
2 *  pdc_adma.c - Pacific Digital Corporation ADMA
3 *
4 *  Maintained by:  Mark Lord <mlord@pobox.com>
5 *
6 *  Copyright 2005 Mark Lord
7 *
8 *  This program is free software; you can redistribute it and/or modify
9 *  it under the terms of the GNU General Public License as published by
10 *  the Free Software Foundation; either version 2, or (at your option)
11 *  any later version.
12 *
13 *  This program is distributed in the hope that it will be useful,
14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 *  GNU General Public License for more details.
17 *
18 *  You should have received a copy of the GNU General Public License
19 *  along with this program; see the file COPYING.  If not, write to
20 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 *  libata documentation is available via 'make {ps|pdf}docs',
24 *  as Documentation/DocBook/libata.*
25 *
26 *
27 *  Supports ATA disks in single-packet ADMA mode.
28 *  Uses PIO for everything else.
29 *
30 *  TODO:  Use ADMA transfers for ATAPI devices, when possible.
31 *  This requires careful attention to a number of quirks of the chip.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/interrupt.h>
42#include <linux/device.h>
43#include <scsi/scsi_host.h>
44#include <linux/libata.h>
45
46#define DRV_NAME	"pdc_adma"
47#define DRV_VERSION	"1.0"
48
49/* macro to calculate base address for ATA regs */
50#define ADMA_ATA_REGS(base,port_no)	((base) + ((port_no) * 0x40))
51
52/* macro to calculate base address for ADMA regs */
53#define ADMA_REGS(base,port_no)		((base) + 0x80 + ((port_no) * 0x20))
54
55/* macro to obtain addresses from ata_port */
56#define ADMA_PORT_REGS(ap) \
57	ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no)
58
59enum {
60	ADMA_MMIO_BAR		= 4,
61
62	ADMA_PORTS		= 2,
63	ADMA_CPB_BYTES		= 40,
64	ADMA_PRD_BYTES		= LIBATA_MAX_PRD * 16,
65	ADMA_PKT_BYTES		= ADMA_CPB_BYTES + ADMA_PRD_BYTES,
66
67	ADMA_DMA_BOUNDARY	= 0xffffffff,
68
69	/* global register offsets */
70	ADMA_MODE_LOCK		= 0x00c7,
71
72	/* per-channel register offsets */
73	ADMA_CONTROL		= 0x0000, /* ADMA control */
74	ADMA_STATUS		= 0x0002, /* ADMA status */
75	ADMA_CPB_COUNT		= 0x0004, /* CPB count */
76	ADMA_CPB_CURRENT	= 0x000c, /* current CPB address */
77	ADMA_CPB_NEXT		= 0x000c, /* next CPB address */
78	ADMA_CPB_LOOKUP		= 0x0010, /* CPB lookup table */
79	ADMA_FIFO_IN		= 0x0014, /* input FIFO threshold */
80	ADMA_FIFO_OUT		= 0x0016, /* output FIFO threshold */
81
82	/* ADMA_CONTROL register bits */
83	aNIEN			= (1 << 8), /* irq mask: 1==masked */
84	aGO			= (1 << 7), /* packet trigger ("Go!") */
85	aRSTADM			= (1 << 5), /* ADMA logic reset */
86	aPIOMD4			= 0x0003,   /* PIO mode 4 */
87
88	/* ADMA_STATUS register bits */
89	aPSD			= (1 << 6),
90	aUIRQ			= (1 << 4),
91	aPERR			= (1 << 0),
92
93	/* CPB bits */
94	cDONE			= (1 << 0),
95	cATERR			= (1 << 3),
96
97	cVLD			= (1 << 0),
98	cDAT			= (1 << 2),
99	cIEN			= (1 << 3),
100
101	/* PRD bits */
102	pORD			= (1 << 4),
103	pDIRO			= (1 << 5),
104	pEND			= (1 << 7),
105
106	/* ATA register flags */
107	rIGN			= (1 << 5),
108	rEND			= (1 << 7),
109
110	/* ATA register addresses */
111	ADMA_REGS_CONTROL	= 0x0e,
112	ADMA_REGS_SECTOR_COUNT	= 0x12,
113	ADMA_REGS_LBA_LOW	= 0x13,
114	ADMA_REGS_LBA_MID	= 0x14,
115	ADMA_REGS_LBA_HIGH	= 0x15,
116	ADMA_REGS_DEVICE	= 0x16,
117	ADMA_REGS_COMMAND	= 0x17,
118
119	/* PCI device IDs */
120	board_1841_idx		= 0,	/* ADMA 2-port controller */
121};
122
123typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
124
125struct adma_port_priv {
126	u8			*pkt;
127	dma_addr_t		pkt_dma;
128	adma_state_t		state;
129};
130
131static int adma_ata_init_one (struct pci_dev *pdev,
132				const struct pci_device_id *ent);
133static int adma_port_start(struct ata_port *ap);
134static void adma_host_stop(struct ata_host *host);
135static void adma_port_stop(struct ata_port *ap);
136static void adma_qc_prep(struct ata_queued_cmd *qc);
137static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
138static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
139static void adma_bmdma_stop(struct ata_queued_cmd *qc);
140static u8 adma_bmdma_status(struct ata_port *ap);
141static void adma_irq_clear(struct ata_port *ap);
142static void adma_freeze(struct ata_port *ap);
143static void adma_thaw(struct ata_port *ap);
144static void adma_error_handler(struct ata_port *ap);
145
146static struct scsi_host_template adma_ata_sht = {
147	.module			= THIS_MODULE,
148	.name			= DRV_NAME,
149	.ioctl			= ata_scsi_ioctl,
150	.queuecommand		= ata_scsi_queuecmd,
151	.slave_configure	= ata_scsi_slave_config,
152	.slave_destroy		= ata_scsi_slave_destroy,
153	.bios_param		= ata_std_bios_param,
154	.proc_name		= DRV_NAME,
155	.can_queue		= ATA_DEF_QUEUE,
156	.this_id		= ATA_SHT_THIS_ID,
157	.sg_tablesize		= LIBATA_MAX_PRD,
158	.dma_boundary		= ADMA_DMA_BOUNDARY,
159	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
160	.use_clustering		= ENABLE_CLUSTERING,
161	.emulated		= ATA_SHT_EMULATED,
162};
163
164static const struct ata_port_operations adma_ata_ops = {
165	.port_disable		= ata_port_disable,
166	.tf_load		= ata_tf_load,
167	.tf_read		= ata_tf_read,
168	.exec_command		= ata_exec_command,
169	.check_status		= ata_check_status,
170	.dev_select		= ata_std_dev_select,
171	.check_atapi_dma	= adma_check_atapi_dma,
172	.data_xfer		= ata_data_xfer,
173	.qc_prep		= adma_qc_prep,
174	.qc_issue		= adma_qc_issue,
175	.freeze			= adma_freeze,
176	.thaw			= adma_thaw,
177	.error_handler		= adma_error_handler,
178	.irq_clear		= adma_irq_clear,
179	.irq_on			= ata_irq_on,
180	.irq_ack		= ata_irq_ack,
181	.port_start		= adma_port_start,
182	.port_stop		= adma_port_stop,
183	.host_stop		= adma_host_stop,
184	.bmdma_stop		= adma_bmdma_stop,
185	.bmdma_status		= adma_bmdma_status,
186};
187
188static struct ata_port_info adma_port_info[] = {
189	/* board_1841_idx */
190	{
191		.flags		= ATA_FLAG_SLAVE_POSS |
192				  ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
193				  ATA_FLAG_PIO_POLLING,
194		.pio_mask	= 0x10, /* pio4 */
195		.udma_mask	= ATA_UDMA4,
196		.port_ops	= &adma_ata_ops,
197	},
198};
199
200static const struct pci_device_id adma_ata_pci_tbl[] = {
201	{ PCI_VDEVICE(PDC, 0x1841), board_1841_idx },
202
203	{ }	/* terminate list */
204};
205
206static struct pci_driver adma_ata_pci_driver = {
207	.name			= DRV_NAME,
208	.id_table		= adma_ata_pci_tbl,
209	.probe			= adma_ata_init_one,
210	.remove			= ata_pci_remove_one,
211};
212
213static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
214{
215	return 1;	/* ATAPI DMA not yet supported */
216}
217
218static void adma_bmdma_stop(struct ata_queued_cmd *qc)
219{
220	/* nothing */
221}
222
223static u8 adma_bmdma_status(struct ata_port *ap)
224{
225	return 0;
226}
227
228static void adma_irq_clear(struct ata_port *ap)
229{
230	/* nothing */
231}
232
233static void adma_reset_engine(struct ata_port *ap)
234{
235	void __iomem *chan = ADMA_PORT_REGS(ap);
236
237	/* reset ADMA to idle state */
238	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
239	udelay(2);
240	writew(aPIOMD4, chan + ADMA_CONTROL);
241	udelay(2);
242}
243
244static void adma_reinit_engine(struct ata_port *ap)
245{
246	struct adma_port_priv *pp = ap->private_data;
247	void __iomem *chan = ADMA_PORT_REGS(ap);
248
249	/* mask/clear ATA interrupts */
250	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
251	ata_check_status(ap);
252
253	/* reset the ADMA engine */
254	adma_reset_engine(ap);
255
256	/* set in-FIFO threshold to 0x100 */
257	writew(0x100, chan + ADMA_FIFO_IN);
258
259	/* set CPB pointer */
260	writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
261
262	/* set out-FIFO threshold to 0x100 */
263	writew(0x100, chan + ADMA_FIFO_OUT);
264
265	/* set CPB count */
266	writew(1, chan + ADMA_CPB_COUNT);
267
268	/* read/discard ADMA status */
269	readb(chan + ADMA_STATUS);
270}
271
272static inline void adma_enter_reg_mode(struct ata_port *ap)
273{
274	void __iomem *chan = ADMA_PORT_REGS(ap);
275
276	writew(aPIOMD4, chan + ADMA_CONTROL);
277	readb(chan + ADMA_STATUS);	/* flush */
278}
279
280static void adma_freeze(struct ata_port *ap)
281{
282	void __iomem *chan = ADMA_PORT_REGS(ap);
283
284	/* mask/clear ATA interrupts */
285	writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
286	ata_check_status(ap);
287
288	/* reset ADMA to idle state */
289	writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
290	udelay(2);
291	writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL);
292	udelay(2);
293}
294
295static void adma_thaw(struct ata_port *ap)
296{
297	adma_reinit_engine(ap);
298}
299
300static int adma_prereset(struct ata_port *ap, unsigned long deadline)
301{
302	struct adma_port_priv *pp = ap->private_data;
303
304	if (pp->state != adma_state_idle) /* healthy paranoia */
305		pp->state = adma_state_mmio;
306	adma_reinit_engine(ap);
307
308	return ata_std_prereset(ap, deadline);
309}
310
311static void adma_error_handler(struct ata_port *ap)
312{
313	ata_do_eh(ap, adma_prereset, ata_std_softreset, NULL,
314		  ata_std_postreset);
315}
316
317static int adma_fill_sg(struct ata_queued_cmd *qc)
318{
319	struct scatterlist *sg;
320	struct ata_port *ap = qc->ap;
321	struct adma_port_priv *pp = ap->private_data;
322	u8  *buf = pp->pkt;
323	int i = (2 + buf[3]) * 8;
324	u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
325
326	ata_for_each_sg(sg, qc) {
327		u32 addr;
328		u32 len;
329
330		addr = (u32)sg_dma_address(sg);
331		*(__le32 *)(buf + i) = cpu_to_le32(addr);
332		i += 4;
333
334		len = sg_dma_len(sg) >> 3;
335		*(__le32 *)(buf + i) = cpu_to_le32(len);
336		i += 4;
337
338		if (ata_sg_is_last(sg, qc))
339			pFLAGS |= pEND;
340		buf[i++] = pFLAGS;
341		buf[i++] = qc->dev->dma_mode & 0xf;
342		buf[i++] = 0;	/* pPKLW */
343		buf[i++] = 0;	/* reserved */
344
345		*(__le32 *)(buf + i)
346			= (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
347		i += 4;
348
349		VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
350					(unsigned long)addr, len);
351	}
352	return i;
353}
354
355static void adma_qc_prep(struct ata_queued_cmd *qc)
356{
357	struct adma_port_priv *pp = qc->ap->private_data;
358	u8  *buf = pp->pkt;
359	u32 pkt_dma = (u32)pp->pkt_dma;
360	int i = 0;
361
362	VPRINTK("ENTER\n");
363
364	adma_enter_reg_mode(qc->ap);
365	if (qc->tf.protocol != ATA_PROT_DMA) {
366		ata_qc_prep(qc);
367		return;
368	}
369
370	buf[i++] = 0;	/* Response flags */
371	buf[i++] = 0;	/* reserved */
372	buf[i++] = cVLD | cDAT | cIEN;
373	i++;		/* cLEN, gets filled in below */
374
375	*(__le32 *)(buf+i) = cpu_to_le32(pkt_dma);	/* cNCPB */
376	i += 4;		/* cNCPB */
377	i += 4;		/* cPRD, gets filled in below */
378
379	buf[i++] = 0;	/* reserved */
380	buf[i++] = 0;	/* reserved */
381	buf[i++] = 0;	/* reserved */
382	buf[i++] = 0;	/* reserved */
383
384	/* ATA registers; must be a multiple of 4 */
385	buf[i++] = qc->tf.device;
386	buf[i++] = ADMA_REGS_DEVICE;
387	if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
388		buf[i++] = qc->tf.hob_nsect;
389		buf[i++] = ADMA_REGS_SECTOR_COUNT;
390		buf[i++] = qc->tf.hob_lbal;
391		buf[i++] = ADMA_REGS_LBA_LOW;
392		buf[i++] = qc->tf.hob_lbam;
393		buf[i++] = ADMA_REGS_LBA_MID;
394		buf[i++] = qc->tf.hob_lbah;
395		buf[i++] = ADMA_REGS_LBA_HIGH;
396	}
397	buf[i++] = qc->tf.nsect;
398	buf[i++] = ADMA_REGS_SECTOR_COUNT;
399	buf[i++] = qc->tf.lbal;
400	buf[i++] = ADMA_REGS_LBA_LOW;
401	buf[i++] = qc->tf.lbam;
402	buf[i++] = ADMA_REGS_LBA_MID;
403	buf[i++] = qc->tf.lbah;
404	buf[i++] = ADMA_REGS_LBA_HIGH;
405	buf[i++] = 0;
406	buf[i++] = ADMA_REGS_CONTROL;
407	buf[i++] = rIGN;
408	buf[i++] = 0;
409	buf[i++] = qc->tf.command;
410	buf[i++] = ADMA_REGS_COMMAND | rEND;
411
412	buf[3] = (i >> 3) - 2;				/* cLEN */
413	*(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i);	/* cPRD */
414
415	i = adma_fill_sg(qc);
416	wmb();	/* flush PRDs and pkt to memory */
417#if 0
418	/* dump out CPB + PRDs for debug */
419	{
420		int j, len = 0;
421		static char obuf[2048];
422		for (j = 0; j < i; ++j) {
423			len += sprintf(obuf+len, "%02x ", buf[j]);
424			if ((j & 7) == 7) {
425				printk("%s\n", obuf);
426				len = 0;
427			}
428		}
429		if (len)
430			printk("%s\n", obuf);
431	}
432#endif
433}
434
435static inline void adma_packet_start(struct ata_queued_cmd *qc)
436{
437	struct ata_port *ap = qc->ap;
438	void __iomem *chan = ADMA_PORT_REGS(ap);
439
440	VPRINTK("ENTER, ap %p\n", ap);
441
442	/* fire up the ADMA engine */
443	writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
444}
445
446static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
447{
448	struct adma_port_priv *pp = qc->ap->private_data;
449
450	switch (qc->tf.protocol) {
451	case ATA_PROT_DMA:
452		pp->state = adma_state_pkt;
453		adma_packet_start(qc);
454		return 0;
455
456	case ATA_PROT_ATAPI_DMA:
457		BUG();
458		break;
459
460	default:
461		break;
462	}
463
464	pp->state = adma_state_mmio;
465	return ata_qc_issue_prot(qc);
466}
467
468static inline unsigned int adma_intr_pkt(struct ata_host *host)
469{
470	unsigned int handled = 0, port_no;
471
472	for (port_no = 0; port_no < host->n_ports; ++port_no) {
473		struct ata_port *ap = host->ports[port_no];
474		struct adma_port_priv *pp;
475		struct ata_queued_cmd *qc;
476		void __iomem *chan = ADMA_PORT_REGS(ap);
477		u8 status = readb(chan + ADMA_STATUS);
478
479		if (status == 0)
480			continue;
481		handled = 1;
482		adma_enter_reg_mode(ap);
483		if (ap->flags & ATA_FLAG_DISABLED)
484			continue;
485		pp = ap->private_data;
486		if (!pp || pp->state != adma_state_pkt)
487			continue;
488		qc = ata_qc_from_tag(ap, ap->link.active_tag);
489		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
490			if (status & aPERR)
491				qc->err_mask |= AC_ERR_HOST_BUS;
492			else if ((status & (aPSD | aUIRQ)))
493				qc->err_mask |= AC_ERR_OTHER;
494
495			if (pp->pkt[0] & cATERR)
496				qc->err_mask |= AC_ERR_DEV;
497			else if (pp->pkt[0] != cDONE)
498				qc->err_mask |= AC_ERR_OTHER;
499
500			if (!qc->err_mask)
501				ata_qc_complete(qc);
502			else {
503				struct ata_eh_info *ehi = &ap->link.eh_info;
504				ata_ehi_clear_desc(ehi);
505				ata_ehi_push_desc(ehi,
506					"ADMA-status 0x%02X", status);
507				ata_ehi_push_desc(ehi,
508					"pkt[0] 0x%02X", pp->pkt[0]);
509
510				if (qc->err_mask == AC_ERR_DEV)
511					ata_port_abort(ap);
512				else
513					ata_port_freeze(ap);
514			}
515		}
516	}
517	return handled;
518}
519
520static inline unsigned int adma_intr_mmio(struct ata_host *host)
521{
522	unsigned int handled = 0, port_no;
523
524	for (port_no = 0; port_no < host->n_ports; ++port_no) {
525		struct ata_port *ap;
526		ap = host->ports[port_no];
527		if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
528			struct ata_queued_cmd *qc;
529			struct adma_port_priv *pp = ap->private_data;
530			if (!pp || pp->state != adma_state_mmio)
531				continue;
532			qc = ata_qc_from_tag(ap, ap->link.active_tag);
533			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
534
535				/* check main status, clearing INTRQ */
536				u8 status = ata_check_status(ap);
537				if ((status & ATA_BUSY))
538					continue;
539				DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
540					ap->print_id, qc->tf.protocol, status);
541
542				/* complete taskfile transaction */
543				pp->state = adma_state_idle;
544				qc->err_mask |= ac_err_mask(status);
545				if (!qc->err_mask)
546					ata_qc_complete(qc);
547				else {
548					struct ata_eh_info *ehi =
549						&ap->link.eh_info;
550					ata_ehi_clear_desc(ehi);
551					ata_ehi_push_desc(ehi,
552						"status 0x%02X", status);
553
554					if (qc->err_mask == AC_ERR_DEV)
555						ata_port_abort(ap);
556					else
557						ata_port_freeze(ap);
558				}
559				handled = 1;
560			}
561		}
562	}
563	return handled;
564}
565
566static irqreturn_t adma_intr(int irq, void *dev_instance)
567{
568	struct ata_host *host = dev_instance;
569	unsigned int handled = 0;
570
571	VPRINTK("ENTER\n");
572
573	spin_lock(&host->lock);
574	handled  = adma_intr_pkt(host) | adma_intr_mmio(host);
575	spin_unlock(&host->lock);
576
577	VPRINTK("EXIT\n");
578
579	return IRQ_RETVAL(handled);
580}
581
582static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
583{
584	port->cmd_addr		=
585	port->data_addr		= base + 0x000;
586	port->error_addr	=
587	port->feature_addr	= base + 0x004;
588	port->nsect_addr	= base + 0x008;
589	port->lbal_addr		= base + 0x00c;
590	port->lbam_addr		= base + 0x010;
591	port->lbah_addr		= base + 0x014;
592	port->device_addr	= base + 0x018;
593	port->status_addr	=
594	port->command_addr	= base + 0x01c;
595	port->altstatus_addr	=
596	port->ctl_addr		= base + 0x038;
597}
598
599static int adma_port_start(struct ata_port *ap)
600{
601	struct device *dev = ap->host->dev;
602	struct adma_port_priv *pp;
603	int rc;
604
605	rc = ata_port_start(ap);
606	if (rc)
607		return rc;
608	adma_enter_reg_mode(ap);
609	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
610	if (!pp)
611		return -ENOMEM;
612	pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
613				      GFP_KERNEL);
614	if (!pp->pkt)
615		return -ENOMEM;
616	/* paranoia? */
617	if ((pp->pkt_dma & 7) != 0) {
618		printk("bad alignment for pp->pkt_dma: %08x\n",
619						(u32)pp->pkt_dma);
620		return -ENOMEM;
621	}
622	memset(pp->pkt, 0, ADMA_PKT_BYTES);
623	ap->private_data = pp;
624	adma_reinit_engine(ap);
625	return 0;
626}
627
628static void adma_port_stop(struct ata_port *ap)
629{
630	adma_reset_engine(ap);
631}
632
633static void adma_host_stop(struct ata_host *host)
634{
635	unsigned int port_no;
636
637	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
638		adma_reset_engine(host->ports[port_no]);
639}
640
641static void adma_host_init(struct ata_host *host, unsigned int chip_id)
642{
643	unsigned int port_no;
644
645	/* enable/lock aGO operation */
646	writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK);
647
648	/* reset the ADMA logic */
649	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
650		adma_reset_engine(host->ports[port_no]);
651}
652
653static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
654{
655	int rc;
656
657	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
658	if (rc) {
659		dev_printk(KERN_ERR, &pdev->dev,
660			"32-bit DMA enable failed\n");
661		return rc;
662	}
663	rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
664	if (rc) {
665		dev_printk(KERN_ERR, &pdev->dev,
666			"32-bit consistent DMA enable failed\n");
667		return rc;
668	}
669	return 0;
670}
671
672static int adma_ata_init_one(struct pci_dev *pdev,
673			     const struct pci_device_id *ent)
674{
675	static int printed_version;
676	unsigned int board_idx = (unsigned int) ent->driver_data;
677	const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
678	struct ata_host *host;
679	void __iomem *mmio_base;
680	int rc, port_no;
681
682	if (!printed_version++)
683		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
684
685	/* alloc host */
686	host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
687	if (!host)
688		return -ENOMEM;
689
690	/* acquire resources and fill host */
691	rc = pcim_enable_device(pdev);
692	if (rc)
693		return rc;
694
695	if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
696		return -ENODEV;
697
698	rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
699	if (rc)
700		return rc;
701	host->iomap = pcim_iomap_table(pdev);
702	mmio_base = host->iomap[ADMA_MMIO_BAR];
703
704	rc = adma_set_dma_masks(pdev, mmio_base);
705	if (rc)
706		return rc;
707
708	for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
709		adma_ata_setup_port(&host->ports[port_no]->ioaddr,
710				    ADMA_ATA_REGS(mmio_base, port_no));
711
712	/* initialize adapter */
713	adma_host_init(host, board_idx);
714
715	pci_set_master(pdev);
716	return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED,
717				 &adma_ata_sht);
718}
719
720static int __init adma_ata_init(void)
721{
722	return pci_register_driver(&adma_ata_pci_driver);
723}
724
725static void __exit adma_ata_exit(void)
726{
727	pci_unregister_driver(&adma_ata_pci_driver);
728}
729
730MODULE_AUTHOR("Mark Lord");
731MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
732MODULE_LICENSE("GPL");
733MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
734MODULE_VERSION(DRV_VERSION);
735
736module_init(adma_ata_init);
737module_exit(adma_ata_exit);
738