sata_sil.c revision 6d32d30f55020d766388df7515f771f68c973033
1/*
2 *  sata_sil.c - Silicon Image SATA
3 *
4 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5 *  		    Please ALWAYS copy linux-ide@vger.kernel.org
6 *		    on emails.
7 *
8 *  Copyright 2003-2005 Red Hat, Inc.
9 *  Copyright 2003 Benjamin Herrenschmidt
10 *
11 *
12 *  This program is free software; you can redistribute it and/or modify
13 *  it under the terms of the GNU General Public License as published by
14 *  the Free Software Foundation; either version 2, or (at your option)
15 *  any later version.
16 *
17 *  This program is distributed in the hope that it will be useful,
18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *  GNU General Public License for more details.
21 *
22 *  You should have received a copy of the GNU General Public License
23 *  along with this program; see the file COPYING.  If not, write to
24 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 *  libata documentation is available via 'make {ps|pdf}docs',
28 *  as Documentation/DocBook/libata.*
29 *
30 *  Documentation for SiI 3112:
31 *  http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
32 *
33 *  Other errata and documentation available under NDA.
34 *
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/pci.h>
40#include <linux/init.h>
41#include <linux/blkdev.h>
42#include <linux/delay.h>
43#include <linux/interrupt.h>
44#include <linux/device.h>
45#include <scsi/scsi_host.h>
46#include <linux/libata.h>
47
48#define DRV_NAME	"sata_sil"
49#define DRV_VERSION	"2.3"
50
51enum {
52	SIL_MMIO_BAR		= 5,
53
54	/*
55	 * host flags
56	 */
57	SIL_FLAG_NO_SATA_IRQ	= (1 << 28),
58	SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
59	SIL_FLAG_MOD15WRITE	= (1 << 30),
60
61	SIL_DFL_PORT_FLAGS	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
62				  ATA_FLAG_MMIO,
63	SIL_DFL_LINK_FLAGS	= ATA_LFLAG_HRST_TO_RESUME,
64
65	/*
66	 * Controller IDs
67	 */
68	sil_3112		= 0,
69	sil_3112_no_sata_irq	= 1,
70	sil_3512		= 2,
71	sil_3114		= 3,
72
73	/*
74	 * Register offsets
75	 */
76	SIL_SYSCFG		= 0x48,
77
78	/*
79	 * Register bits
80	 */
81	/* SYSCFG */
82	SIL_MASK_IDE0_INT	= (1 << 22),
83	SIL_MASK_IDE1_INT	= (1 << 23),
84	SIL_MASK_IDE2_INT	= (1 << 24),
85	SIL_MASK_IDE3_INT	= (1 << 25),
86	SIL_MASK_2PORT		= SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
87	SIL_MASK_4PORT		= SIL_MASK_2PORT |
88				  SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
89
90	/* BMDMA/BMDMA2 */
91	SIL_INTR_STEERING	= (1 << 1),
92
93	SIL_DMA_ENABLE		= (1 << 0),  /* DMA run switch */
94	SIL_DMA_RDWR		= (1 << 3),  /* DMA Rd-Wr */
95	SIL_DMA_SATA_IRQ	= (1 << 4),  /* OR of all SATA IRQs */
96	SIL_DMA_ACTIVE		= (1 << 16), /* DMA running */
97	SIL_DMA_ERROR		= (1 << 17), /* PCI bus error */
98	SIL_DMA_COMPLETE	= (1 << 18), /* cmd complete / IRQ pending */
99	SIL_DMA_N_SATA_IRQ	= (1 << 6),  /* SATA_IRQ for the next channel */
100	SIL_DMA_N_ACTIVE	= (1 << 24), /* ACTIVE for the next channel */
101	SIL_DMA_N_ERROR		= (1 << 25), /* ERROR for the next channel */
102	SIL_DMA_N_COMPLETE	= (1 << 26), /* COMPLETE for the next channel */
103
104	/* SIEN */
105	SIL_SIEN_N		= (1 << 16), /* triggered by SError.N */
106
107	/*
108	 * Others
109	 */
110	SIL_QUIRK_MOD15WRITE	= (1 << 0),
111	SIL_QUIRK_UDMA5MAX	= (1 << 1),
112};
113
114static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
115#ifdef CONFIG_PM
116static int sil_pci_device_resume(struct pci_dev *pdev);
117#endif
118static void sil_dev_config(struct ata_device *dev);
119static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
120static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
121static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
122static void sil_freeze(struct ata_port *ap);
123static void sil_thaw(struct ata_port *ap);
124
125
126static const struct pci_device_id sil_pci_tbl[] = {
127	{ PCI_VDEVICE(CMD, 0x3112), sil_3112 },
128	{ PCI_VDEVICE(CMD, 0x0240), sil_3112 },
129	{ PCI_VDEVICE(CMD, 0x3512), sil_3512 },
130	{ PCI_VDEVICE(CMD, 0x3114), sil_3114 },
131	{ PCI_VDEVICE(ATI, 0x436e), sil_3112 },
132	{ PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
133	{ PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
134
135	{ }	/* terminate list */
136};
137
138
139/* TODO firmware versions should be added - eric */
140static const struct sil_drivelist {
141	const char * product;
142	unsigned int quirk;
143} sil_blacklist [] = {
144	{ "ST320012AS",		SIL_QUIRK_MOD15WRITE },
145	{ "ST330013AS",		SIL_QUIRK_MOD15WRITE },
146	{ "ST340017AS",		SIL_QUIRK_MOD15WRITE },
147	{ "ST360015AS",		SIL_QUIRK_MOD15WRITE },
148	{ "ST380023AS",		SIL_QUIRK_MOD15WRITE },
149	{ "ST3120023AS",	SIL_QUIRK_MOD15WRITE },
150	{ "ST340014ASL",	SIL_QUIRK_MOD15WRITE },
151	{ "ST360014ASL",	SIL_QUIRK_MOD15WRITE },
152	{ "ST380011ASL",	SIL_QUIRK_MOD15WRITE },
153	{ "ST3120022ASL",	SIL_QUIRK_MOD15WRITE },
154	{ "ST3160021ASL",	SIL_QUIRK_MOD15WRITE },
155	{ "Maxtor 4D060H3",	SIL_QUIRK_UDMA5MAX },
156	{ }
157};
158
159static struct pci_driver sil_pci_driver = {
160	.name			= DRV_NAME,
161	.id_table		= sil_pci_tbl,
162	.probe			= sil_init_one,
163	.remove			= ata_pci_remove_one,
164#ifdef CONFIG_PM
165	.suspend		= ata_pci_device_suspend,
166	.resume			= sil_pci_device_resume,
167#endif
168};
169
170static struct scsi_host_template sil_sht = {
171	.module			= THIS_MODULE,
172	.name			= DRV_NAME,
173	.ioctl			= ata_scsi_ioctl,
174	.queuecommand		= ata_scsi_queuecmd,
175	.can_queue		= ATA_DEF_QUEUE,
176	.this_id		= ATA_SHT_THIS_ID,
177	.sg_tablesize		= LIBATA_MAX_PRD,
178	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
179	.emulated		= ATA_SHT_EMULATED,
180	.use_clustering		= ATA_SHT_USE_CLUSTERING,
181	.proc_name		= DRV_NAME,
182	.dma_boundary		= ATA_DMA_BOUNDARY,
183	.slave_configure	= ata_scsi_slave_config,
184	.slave_destroy		= ata_scsi_slave_destroy,
185	.bios_param		= ata_std_bios_param,
186};
187
188static const struct ata_port_operations sil_ops = {
189	.port_disable		= ata_port_disable,
190	.dev_config		= sil_dev_config,
191	.tf_load		= ata_tf_load,
192	.tf_read		= ata_tf_read,
193	.check_status		= ata_check_status,
194	.exec_command		= ata_exec_command,
195	.dev_select		= ata_std_dev_select,
196	.set_mode		= sil_set_mode,
197	.bmdma_setup            = ata_bmdma_setup,
198	.bmdma_start            = ata_bmdma_start,
199	.bmdma_stop		= ata_bmdma_stop,
200	.bmdma_status		= ata_bmdma_status,
201	.qc_prep		= ata_qc_prep,
202	.qc_issue		= ata_qc_issue_prot,
203	.data_xfer		= ata_data_xfer,
204	.freeze			= sil_freeze,
205	.thaw			= sil_thaw,
206	.error_handler		= ata_bmdma_error_handler,
207	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
208	.irq_clear		= ata_bmdma_irq_clear,
209	.irq_on			= ata_irq_on,
210	.scr_read		= sil_scr_read,
211	.scr_write		= sil_scr_write,
212	.port_start		= ata_port_start,
213};
214
215static const struct ata_port_info sil_port_info[] = {
216	/* sil_3112 */
217	{
218		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
219		.link_flags	= SIL_DFL_LINK_FLAGS,
220		.pio_mask	= 0x1f,			/* pio0-4 */
221		.mwdma_mask	= 0x07,			/* mwdma0-2 */
222		.udma_mask	= ATA_UDMA5,
223		.port_ops	= &sil_ops,
224	},
225	/* sil_3112_no_sata_irq */
226	{
227		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
228				  SIL_FLAG_NO_SATA_IRQ,
229		.link_flags	= SIL_DFL_LINK_FLAGS,
230		.pio_mask	= 0x1f,			/* pio0-4 */
231		.mwdma_mask	= 0x07,			/* mwdma0-2 */
232		.udma_mask	= ATA_UDMA5,
233		.port_ops	= &sil_ops,
234	},
235	/* sil_3512 */
236	{
237		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
238		.link_flags	= SIL_DFL_LINK_FLAGS,
239		.pio_mask	= 0x1f,			/* pio0-4 */
240		.mwdma_mask	= 0x07,			/* mwdma0-2 */
241		.udma_mask	= ATA_UDMA5,
242		.port_ops	= &sil_ops,
243	},
244	/* sil_3114 */
245	{
246		.flags		= SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
247		.link_flags	= SIL_DFL_LINK_FLAGS,
248		.pio_mask	= 0x1f,			/* pio0-4 */
249		.mwdma_mask	= 0x07,			/* mwdma0-2 */
250		.udma_mask	= ATA_UDMA5,
251		.port_ops	= &sil_ops,
252	},
253};
254
255/* per-port register offsets */
256/* TODO: we can probably calculate rather than use a table */
257static const struct {
258	unsigned long tf;	/* ATA taskfile register block */
259	unsigned long ctl;	/* ATA control/altstatus register block */
260	unsigned long bmdma;	/* DMA register block */
261	unsigned long bmdma2;	/* DMA register block #2 */
262	unsigned long fifo_cfg;	/* FIFO Valid Byte Count and Control */
263	unsigned long scr;	/* SATA control register block */
264	unsigned long sien;	/* SATA Interrupt Enable register */
265	unsigned long xfer_mode;/* data transfer mode register */
266	unsigned long sfis_cfg;	/* SATA FIS reception config register */
267} sil_port[] = {
268	/* port 0 ... */
269	/*   tf    ctl  bmdma  bmdma2  fifo    scr   sien   mode   sfis */
270	{  0x80,  0x8A,   0x0,  0x10,  0x40, 0x100, 0x148,  0xb4, 0x14c },
271	{  0xC0,  0xCA,   0x8,  0x18,  0x44, 0x180, 0x1c8,  0xf4, 0x1cc },
272	{ 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
273	{ 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
274	/* ... port 3 */
275};
276
277MODULE_AUTHOR("Jeff Garzik");
278MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
279MODULE_LICENSE("GPL");
280MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
281MODULE_VERSION(DRV_VERSION);
282
283static int slow_down = 0;
284module_param(slow_down, int, 0444);
285MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
286
287
288static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
289{
290	u8 cache_line = 0;
291	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
292	return cache_line;
293}
294
295/**
296 *	sil_set_mode		-	wrap set_mode functions
297 *	@link: link to set up
298 *	@r_failed: returned device when we fail
299 *
300 *	Wrap the libata method for device setup as after the setup we need
301 *	to inspect the results and do some configuration work
302 */
303
304static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
305{
306	struct ata_port *ap = link->ap;
307	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
308	void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
309	struct ata_device *dev;
310	u32 tmp, dev_mode[2] = { };
311	int rc;
312
313	rc = ata_do_set_mode(link, r_failed);
314	if (rc)
315		return rc;
316
317	ata_link_for_each_dev(dev, link) {
318		if (!ata_dev_enabled(dev))
319			dev_mode[dev->devno] = 0;	/* PIO0/1/2 */
320		else if (dev->flags & ATA_DFLAG_PIO)
321			dev_mode[dev->devno] = 1;	/* PIO3/4 */
322		else
323			dev_mode[dev->devno] = 3;	/* UDMA */
324		/* value 2 indicates MDMA */
325	}
326
327	tmp = readl(addr);
328	tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
329	tmp |= dev_mode[0];
330	tmp |= (dev_mode[1] << 4);
331	writel(tmp, addr);
332	readl(addr);	/* flush */
333	return 0;
334}
335
336static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
337{
338	void __iomem *offset = ap->ioaddr.scr_addr;
339
340	switch (sc_reg) {
341	case SCR_STATUS:
342		return offset + 4;
343	case SCR_ERROR:
344		return offset + 8;
345	case SCR_CONTROL:
346		return offset;
347	default:
348		/* do nothing */
349		break;
350	}
351
352	return NULL;
353}
354
355static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
356{
357	void __iomem *mmio = sil_scr_addr(ap, sc_reg);
358
359	if (mmio) {
360		*val = readl(mmio);
361		return 0;
362	}
363	return -EINVAL;
364}
365
366static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
367{
368	void __iomem *mmio = sil_scr_addr(ap, sc_reg);
369
370	if (mmio) {
371		writel(val, mmio);
372		return 0;
373	}
374	return -EINVAL;
375}
376
377static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
378{
379	struct ata_eh_info *ehi = &ap->link.eh_info;
380	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
381	u8 status;
382
383	if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
384		u32 serror;
385
386		/* SIEN doesn't mask SATA IRQs on some 3112s.  Those
387		 * controllers continue to assert IRQ as long as
388		 * SError bits are pending.  Clear SError immediately.
389		 */
390		sil_scr_read(ap, SCR_ERROR, &serror);
391		sil_scr_write(ap, SCR_ERROR, serror);
392
393		/* Trigger hotplug and accumulate SError only if the
394		 * port isn't already frozen.  Otherwise, PHY events
395		 * during hardreset makes controllers with broken SIEN
396		 * repeat probing needlessly.
397		 */
398		if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
399			ata_ehi_hotplugged(&ap->link.eh_info);
400			ap->link.eh_info.serror |= serror;
401		}
402
403		goto freeze;
404	}
405
406	if (unlikely(!qc))
407		goto freeze;
408
409	if (unlikely(qc->tf.flags & ATA_TFLAG_POLLING)) {
410		/* this sometimes happens, just clear IRQ */
411		ata_chk_status(ap);
412		return;
413	}
414
415	/* Check whether we are expecting interrupt in this state */
416	switch (ap->hsm_task_state) {
417	case HSM_ST_FIRST:
418		/* Some pre-ATAPI-4 devices assert INTRQ
419		 * at this state when ready to receive CDB.
420		 */
421
422		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
423		 * The flag was turned on only for atapi devices.
424		 * No need to check is_atapi_taskfile(&qc->tf) again.
425		 */
426		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
427			goto err_hsm;
428		break;
429	case HSM_ST_LAST:
430		if (qc->tf.protocol == ATA_PROT_DMA ||
431		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
432			/* clear DMA-Start bit */
433			ap->ops->bmdma_stop(qc);
434
435			if (bmdma2 & SIL_DMA_ERROR) {
436				qc->err_mask |= AC_ERR_HOST_BUS;
437				ap->hsm_task_state = HSM_ST_ERR;
438			}
439		}
440		break;
441	case HSM_ST:
442		break;
443	default:
444		goto err_hsm;
445	}
446
447	/* check main status, clearing INTRQ */
448	status = ata_chk_status(ap);
449	if (unlikely(status & ATA_BUSY))
450		goto err_hsm;
451
452	/* ack bmdma irq events */
453	ata_bmdma_irq_clear(ap);
454
455	/* kick HSM in the ass */
456	ata_hsm_move(ap, qc, status, 0);
457
458	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
459				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
460		ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
461
462	return;
463
464 err_hsm:
465	qc->err_mask |= AC_ERR_HSM;
466 freeze:
467	ata_port_freeze(ap);
468}
469
470static irqreturn_t sil_interrupt(int irq, void *dev_instance)
471{
472	struct ata_host *host = dev_instance;
473	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
474	int handled = 0;
475	int i;
476
477	spin_lock(&host->lock);
478
479	for (i = 0; i < host->n_ports; i++) {
480		struct ata_port *ap = host->ports[i];
481		u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
482
483		if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
484			continue;
485
486		/* turn off SATA_IRQ if not supported */
487		if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
488			bmdma2 &= ~SIL_DMA_SATA_IRQ;
489
490		if (bmdma2 == 0xffffffff ||
491		    !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
492			continue;
493
494		sil_host_intr(ap, bmdma2);
495		handled = 1;
496	}
497
498	spin_unlock(&host->lock);
499
500	return IRQ_RETVAL(handled);
501}
502
503static void sil_freeze(struct ata_port *ap)
504{
505	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
506	u32 tmp;
507
508	/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
509	writel(0, mmio_base + sil_port[ap->port_no].sien);
510
511	/* plug IRQ */
512	tmp = readl(mmio_base + SIL_SYSCFG);
513	tmp |= SIL_MASK_IDE0_INT << ap->port_no;
514	writel(tmp, mmio_base + SIL_SYSCFG);
515	readl(mmio_base + SIL_SYSCFG);	/* flush */
516}
517
518static void sil_thaw(struct ata_port *ap)
519{
520	void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
521	u32 tmp;
522
523	/* clear IRQ */
524	ata_chk_status(ap);
525	ata_bmdma_irq_clear(ap);
526
527	/* turn on SATA IRQ if supported */
528	if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
529		writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
530
531	/* turn on IRQ */
532	tmp = readl(mmio_base + SIL_SYSCFG);
533	tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
534	writel(tmp, mmio_base + SIL_SYSCFG);
535}
536
537/**
538 *	sil_dev_config - Apply device/host-specific errata fixups
539 *	@dev: Device to be examined
540 *
541 *	After the IDENTIFY [PACKET] DEVICE step is complete, and a
542 *	device is known to be present, this function is called.
543 *	We apply two errata fixups which are specific to Silicon Image,
544 *	a Seagate and a Maxtor fixup.
545 *
546 *	For certain Seagate devices, we must limit the maximum sectors
547 *	to under 8K.
548 *
549 *	For certain Maxtor devices, we must not program the drive
550 *	beyond udma5.
551 *
552 *	Both fixups are unfairly pessimistic.  As soon as I get more
553 *	information on these errata, I will create a more exhaustive
554 *	list, and apply the fixups to only the specific
555 *	devices/hosts/firmwares that need it.
556 *
557 *	20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
558 *	The Maxtor quirk is in the blacklist, but I'm keeping the original
559 *	pessimistic fix for the following reasons...
560 *	- There seems to be less info on it, only one device gleaned off the
561 *	Windows	driver, maybe only one is affected.  More info would be greatly
562 *	appreciated.
563 *	- But then again UDMA5 is hardly anything to complain about
564 */
565static void sil_dev_config(struct ata_device *dev)
566{
567	struct ata_port *ap = dev->link->ap;
568	int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
569	unsigned int n, quirks = 0;
570	unsigned char model_num[ATA_ID_PROD_LEN + 1];
571
572	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
573
574	for (n = 0; sil_blacklist[n].product; n++)
575		if (!strcmp(sil_blacklist[n].product, model_num)) {
576			quirks = sil_blacklist[n].quirk;
577			break;
578		}
579
580	/* limit requests to 15 sectors */
581	if (slow_down ||
582	    ((ap->flags & SIL_FLAG_MOD15WRITE) &&
583	     (quirks & SIL_QUIRK_MOD15WRITE))) {
584		if (print_info)
585			ata_dev_printk(dev, KERN_INFO, "applying Seagate "
586				       "errata fix (mod15write workaround)\n");
587		dev->max_sectors = 15;
588		return;
589	}
590
591	/* limit to udma5 */
592	if (quirks & SIL_QUIRK_UDMA5MAX) {
593		if (print_info)
594			ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
595				       "errata fix %s\n", model_num);
596		dev->udma_mask &= ATA_UDMA5;
597		return;
598	}
599}
600
601static void sil_init_controller(struct ata_host *host)
602{
603	struct pci_dev *pdev = to_pci_dev(host->dev);
604	void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
605	u8 cls;
606	u32 tmp;
607	int i;
608
609	/* Initialize FIFO PCI bus arbitration */
610	cls = sil_get_device_cache_line(pdev);
611	if (cls) {
612		cls >>= 3;
613		cls++;  /* cls = (line_size/8)+1 */
614		for (i = 0; i < host->n_ports; i++)
615			writew(cls << 8 | cls,
616			       mmio_base + sil_port[i].fifo_cfg);
617	} else
618		dev_printk(KERN_WARNING, &pdev->dev,
619			   "cache line size not set.  Driver may not function\n");
620
621	/* Apply R_ERR on DMA activate FIS errata workaround */
622	if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
623		int cnt;
624
625		for (i = 0, cnt = 0; i < host->n_ports; i++) {
626			tmp = readl(mmio_base + sil_port[i].sfis_cfg);
627			if ((tmp & 0x3) != 0x01)
628				continue;
629			if (!cnt)
630				dev_printk(KERN_INFO, &pdev->dev,
631					   "Applying R_ERR on DMA activate "
632					   "FIS errata fix\n");
633			writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
634			cnt++;
635		}
636	}
637
638	if (host->n_ports == 4) {
639		/* flip the magic "make 4 ports work" bit */
640		tmp = readl(mmio_base + sil_port[2].bmdma);
641		if ((tmp & SIL_INTR_STEERING) == 0)
642			writel(tmp | SIL_INTR_STEERING,
643			       mmio_base + sil_port[2].bmdma);
644	}
645}
646
647static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
648{
649	static int printed_version;
650	int board_id = ent->driver_data;
651	const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL };
652	struct ata_host *host;
653	void __iomem *mmio_base;
654	int n_ports, rc;
655	unsigned int i;
656
657	if (!printed_version++)
658		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
659
660	/* allocate host */
661	n_ports = 2;
662	if (board_id == sil_3114)
663		n_ports = 4;
664
665	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
666	if (!host)
667		return -ENOMEM;
668
669	/* acquire resources and fill host */
670	rc = pcim_enable_device(pdev);
671	if (rc)
672		return rc;
673
674	rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
675	if (rc == -EBUSY)
676		pcim_pin_device(pdev);
677	if (rc)
678		return rc;
679	host->iomap = pcim_iomap_table(pdev);
680
681	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
682	if (rc)
683		return rc;
684	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
685	if (rc)
686		return rc;
687
688	mmio_base = host->iomap[SIL_MMIO_BAR];
689
690	for (i = 0; i < host->n_ports; i++) {
691		struct ata_ioports *ioaddr = &host->ports[i]->ioaddr;
692
693		ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
694		ioaddr->altstatus_addr =
695		ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
696		ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
697		ioaddr->scr_addr = mmio_base + sil_port[i].scr;
698		ata_std_ports(ioaddr);
699	}
700
701	/* initialize and activate */
702	sil_init_controller(host);
703
704	pci_set_master(pdev);
705	return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
706				 &sil_sht);
707}
708
709#ifdef CONFIG_PM
710static int sil_pci_device_resume(struct pci_dev *pdev)
711{
712	struct ata_host *host = dev_get_drvdata(&pdev->dev);
713	int rc;
714
715	rc = ata_pci_device_do_resume(pdev);
716	if (rc)
717		return rc;
718
719	sil_init_controller(host);
720	ata_host_resume(host);
721
722	return 0;
723}
724#endif
725
726static int __init sil_init(void)
727{
728	return pci_register_driver(&sil_pci_driver);
729}
730
731static void __exit sil_exit(void)
732{
733	pci_unregister_driver(&sil_pci_driver);
734}
735
736
737module_init(sil_init);
738module_exit(sil_exit);
739