serverworks.c revision 8776168ca2151850164af1de5565d01f7b8b2c53
1/*
2 * Copyright (C) 1998-2000 Michel Aubry
3 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
4 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
6 * Portions copyright (c) 2001 Sun Microsystems
7 *
8 *
9 * RCC/ServerWorks IDE driver for Linux
10 *
11 *   OSB4: `Open South Bridge' IDE Interface (fn 1)
12 *         supports UDMA mode 2 (33 MB/s)
13 *
14 *   CSB5: `Champion South Bridge' IDE Interface (fn 1)
15 *         all revisions support UDMA mode 4 (66 MB/s)
16 *         revision A2.0 and up support UDMA mode 5 (100 MB/s)
17 *
18 *         *** The CSB5 does not provide ANY register ***
19 *         *** to detect 80-conductor cable presence. ***
20 *
21 *   CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
22 *
23 *   HT1000: AKA BCM5785 - Hypertransport Southbridge for Opteron systems. IDE
24 *   controller same as the CSB6. Single channel ATA100 only.
25 *
26 * Documentation:
27 *	Available under NDA only. Errata info very hard to get.
28 *
29 */
30
31#include <linux/types.h>
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/pci.h>
35#include <linux/ide.h>
36#include <linux/init.h>
37
38#include <asm/io.h>
39
40#define DRV_NAME "serverworks"
41
42#define SVWKS_CSB5_REVISION_NEW	0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
43#define SVWKS_CSB6_REVISION	0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
44
45/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
46 * can overrun their FIFOs when used with the CSB5 */
47static const char *svwks_bad_ata100[] = {
48	"ST320011A",
49	"ST340016A",
50	"ST360021A",
51	"ST380021A",
52	NULL
53};
54
55static int check_in_drive_lists (ide_drive_t *drive, const char **list)
56{
57	char *m = (char *)&drive->id[ATA_ID_PROD];
58
59	while (*list)
60		if (!strcmp(*list++, m))
61			return 1;
62	return 0;
63}
64
65static u8 svwks_udma_filter(ide_drive_t *drive)
66{
67	struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
68
69	if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
70		return 0x1f;
71	} else if (dev->revision < SVWKS_CSB5_REVISION_NEW) {
72		return 0x07;
73	} else {
74		u8 btr = 0, mode, mask;
75
76		pci_read_config_byte(dev, 0x5A, &btr);
77		mode = btr & 0x3;
78
79		/* If someone decides to do UDMA133 on CSB5 the same
80		   issue will bite so be inclusive */
81		if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100))
82			mode = 2;
83
84		switch(mode) {
85		case 3:	 mask = 0x3f; break;
86		case 2:	 mask = 0x1f; break;
87		case 1:	 mask = 0x07; break;
88		default: mask = 0x00; break;
89		}
90
91		return mask;
92	}
93}
94
95static u8 svwks_csb_check (struct pci_dev *dev)
96{
97	switch (dev->device) {
98		case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
99		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
100		case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
101		case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
102			return 1;
103		default:
104			break;
105	}
106	return 0;
107}
108
109static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
110{
111	static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
112	static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
113
114	struct pci_dev *dev = to_pci_dev(hwif->dev);
115	const u8 pio = drive->pio_mode - XFER_PIO_0;
116
117	pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
118
119	if (svwks_csb_check(dev)) {
120		u16 csb_pio = 0;
121
122		pci_read_config_word(dev, 0x4a, &csb_pio);
123
124		csb_pio &= ~(0x0f << (4 * drive->dn));
125		csb_pio |= (pio << (4 * drive->dn));
126
127		pci_write_config_word(dev, 0x4a, csb_pio);
128	}
129}
130
131static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
132{
133	static const u8 udma_modes[]		= { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
134	static const u8 dma_modes[]		= { 0x77, 0x21, 0x20 };
135	static const u8 drive_pci2[]		= { 0x45, 0x44, 0x47, 0x46 };
136
137	struct pci_dev *dev	= to_pci_dev(hwif->dev);
138	const u8 speed		= drive->dma_mode;
139	u8 unit			= drive->dn & 1;
140
141	u8 ultra_enable	 = 0, ultra_timing = 0, dma_timing = 0;
142
143	pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
144	pci_read_config_byte(dev, 0x54, &ultra_enable);
145
146	ultra_timing	&= ~(0x0F << (4*unit));
147	ultra_enable	&= ~(0x01 << drive->dn);
148
149	if (speed >= XFER_UDMA_0) {
150		dma_timing   |= dma_modes[2];
151		ultra_timing |= (udma_modes[speed - XFER_UDMA_0] << (4 * unit));
152		ultra_enable |= (0x01 << drive->dn);
153	} else if (speed >= XFER_MW_DMA_0)
154		dma_timing   |= dma_modes[speed - XFER_MW_DMA_0];
155
156	pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
157	pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
158	pci_write_config_byte(dev, 0x54, ultra_enable);
159}
160
161static int init_chipset_svwks(struct pci_dev *dev)
162{
163	unsigned int reg;
164	u8 btr;
165
166	/* force Master Latency Timer value to 64 PCICLKs */
167	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40);
168
169	/* OSB4 : South Bridge and IDE */
170	if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
171		struct pci_dev *isa_dev =
172			pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
173					PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
174		if (isa_dev) {
175			pci_read_config_dword(isa_dev, 0x64, &reg);
176			reg &= ~0x00002000; /* disable 600ns interrupt mask */
177			if(!(reg & 0x00004000))
178				printk(KERN_DEBUG DRV_NAME " %s: UDMA not BIOS "
179					"enabled.\n", pci_name(dev));
180			reg |=  0x00004000; /* enable UDMA/33 support */
181			pci_write_config_dword(isa_dev, 0x64, reg);
182			pci_dev_put(isa_dev);
183		}
184	}
185
186	/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
187	else if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
188		 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
189		 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
190
191		/* Third Channel Test */
192		if (!(PCI_FUNC(dev->devfn) & 1)) {
193			struct pci_dev * findev = NULL;
194			u32 reg4c = 0;
195			findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
196				PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
197			if (findev) {
198				pci_read_config_dword(findev, 0x4C, &reg4c);
199				reg4c &= ~0x000007FF;
200				reg4c |=  0x00000040;
201				reg4c |=  0x00000020;
202				pci_write_config_dword(findev, 0x4C, reg4c);
203				pci_dev_put(findev);
204			}
205			outb_p(0x06, 0x0c00);
206			dev->irq = inb_p(0x0c01);
207		} else {
208			struct pci_dev * findev = NULL;
209			u8 reg41 = 0;
210
211			findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
212					PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
213			if (findev) {
214				pci_read_config_byte(findev, 0x41, &reg41);
215				reg41 &= ~0x40;
216				pci_write_config_byte(findev, 0x41, reg41);
217				pci_dev_put(findev);
218			}
219			/*
220			 * This is a device pin issue on CSB6.
221			 * Since there will be a future raid mode,
222			 * early versions of the chipset require the
223			 * interrupt pin to be set, and it is a compatibility
224			 * mode issue.
225			 */
226			if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
227				dev->irq = 0;
228		}
229//		pci_read_config_dword(dev, 0x40, &pioreg)
230//		pci_write_config_dword(dev, 0x40, 0x99999999);
231//		pci_read_config_dword(dev, 0x44, &dmareg);
232//		pci_write_config_dword(dev, 0x44, 0xFFFFFFFF);
233		/* setup the UDMA Control register
234		 *
235		 * 1. clear bit 6 to enable DMA
236		 * 2. enable DMA modes with bits 0-1
237		 * 	00 : legacy
238		 * 	01 : udma2
239		 * 	10 : udma2/udma4
240		 * 	11 : udma2/udma4/udma5
241		 */
242		pci_read_config_byte(dev, 0x5A, &btr);
243		btr &= ~0x40;
244		if (!(PCI_FUNC(dev->devfn) & 1))
245			btr |= 0x2;
246		else
247			btr |= (dev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
248		pci_write_config_byte(dev, 0x5A, btr);
249	}
250	/* Setup HT1000 SouthBridge Controller - Single Channel Only */
251	else if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
252		pci_read_config_byte(dev, 0x5A, &btr);
253		btr &= ~0x40;
254		btr |= 0x3;
255		pci_write_config_byte(dev, 0x5A, btr);
256	}
257
258	return 0;
259}
260
261static u8 ata66_svwks_svwks(ide_hwif_t *hwif)
262{
263	return ATA_CBL_PATA80;
264}
265
266/* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits
267 * of the subsystem device ID indicate presence of an 80-pin cable.
268 * Bit 15 clear = secondary IDE channel does not have 80-pin cable.
269 * Bit 15 set   = secondary IDE channel has 80-pin cable.
270 * Bit 14 clear = primary IDE channel does not have 80-pin cable.
271 * Bit 14 set   = primary IDE channel has 80-pin cable.
272 */
273static u8 ata66_svwks_dell(ide_hwif_t *hwif)
274{
275	struct pci_dev *dev = to_pci_dev(hwif->dev);
276
277	if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
278	    dev->vendor	== PCI_VENDOR_ID_SERVERWORKS &&
279	    (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE ||
280	     dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE))
281		return ((1 << (hwif->channel + 14)) &
282			dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
283	return ATA_CBL_PATA40;
284}
285
286/* Sun Cobalt Alpine hardware avoids the 80-pin cable
287 * detect issue by attaching the drives directly to the board.
288 * This check follows the Dell precedent (how scary is that?!)
289 *
290 * WARNING: this only works on Alpine hardware!
291 */
292static u8 ata66_svwks_cobalt(ide_hwif_t *hwif)
293{
294	struct pci_dev *dev = to_pci_dev(hwif->dev);
295
296	if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN &&
297	    dev->vendor	== PCI_VENDOR_ID_SERVERWORKS &&
298	    dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
299		return ((1 << (hwif->channel + 14)) &
300			dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
301	return ATA_CBL_PATA40;
302}
303
304static u8 svwks_cable_detect(ide_hwif_t *hwif)
305{
306	struct pci_dev *dev = to_pci_dev(hwif->dev);
307
308	/* Server Works */
309	if (dev->subsystem_vendor == PCI_VENDOR_ID_SERVERWORKS)
310		return ata66_svwks_svwks (hwif);
311
312	/* Dell PowerEdge */
313	if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL)
314		return ata66_svwks_dell (hwif);
315
316	/* Cobalt Alpine */
317	if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN)
318		return ata66_svwks_cobalt (hwif);
319
320	/* Per Specified Design by OEM, and ASIC Architect */
321	if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
322	    (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
323		return ATA_CBL_PATA80;
324
325	return ATA_CBL_PATA40;
326}
327
328static const struct ide_port_ops osb4_port_ops = {
329	.set_pio_mode		= svwks_set_pio_mode,
330	.set_dma_mode		= svwks_set_dma_mode,
331};
332
333static const struct ide_port_ops svwks_port_ops = {
334	.set_pio_mode		= svwks_set_pio_mode,
335	.set_dma_mode		= svwks_set_dma_mode,
336	.udma_filter		= svwks_udma_filter,
337	.cable_detect		= svwks_cable_detect,
338};
339
340static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
341	{	/* 0: OSB4 */
342		.name		= DRV_NAME,
343		.init_chipset	= init_chipset_svwks,
344		.port_ops	= &osb4_port_ops,
345		.pio_mask	= ATA_PIO4,
346		.mwdma_mask	= ATA_MWDMA2,
347		.udma_mask	= 0x00, /* UDMA is problematic on OSB4 */
348	},
349	{	/* 1: CSB5 */
350		.name		= DRV_NAME,
351		.init_chipset	= init_chipset_svwks,
352		.port_ops	= &svwks_port_ops,
353		.pio_mask	= ATA_PIO4,
354		.mwdma_mask	= ATA_MWDMA2,
355		.udma_mask	= ATA_UDMA5,
356	},
357	{	/* 2: CSB6 */
358		.name		= DRV_NAME,
359		.init_chipset	= init_chipset_svwks,
360		.port_ops	= &svwks_port_ops,
361		.pio_mask	= ATA_PIO4,
362		.mwdma_mask	= ATA_MWDMA2,
363		.udma_mask	= ATA_UDMA5,
364	},
365	{	/* 3: CSB6-2 */
366		.name		= DRV_NAME,
367		.init_chipset	= init_chipset_svwks,
368		.port_ops	= &svwks_port_ops,
369		.host_flags	= IDE_HFLAG_SINGLE,
370		.pio_mask	= ATA_PIO4,
371		.mwdma_mask	= ATA_MWDMA2,
372		.udma_mask	= ATA_UDMA5,
373	},
374	{	/* 4: HT1000 */
375		.name		= DRV_NAME,
376		.init_chipset	= init_chipset_svwks,
377		.port_ops	= &svwks_port_ops,
378		.host_flags	= IDE_HFLAG_SINGLE,
379		.pio_mask	= ATA_PIO4,
380		.mwdma_mask	= ATA_MWDMA2,
381		.udma_mask	= ATA_UDMA5,
382	}
383};
384
385/**
386 *	svwks_init_one	-	called when a OSB/CSB is found
387 *	@dev: the svwks device
388 *	@id: the matching pci id
389 *
390 *	Called when the PCI registration layer (or the IDE initialization)
391 *	finds a device matching our IDE device tables.
392 */
393
394static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id)
395{
396	struct ide_port_info d;
397	u8 idx = id->driver_data;
398
399	d = serverworks_chipsets[idx];
400
401	if (idx == 1)
402		d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
403	else if (idx == 2 || idx == 3) {
404		if ((PCI_FUNC(dev->devfn) & 1) == 0) {
405			if (pci_resource_start(dev, 0) != 0x01f1)
406				d.host_flags |= IDE_HFLAG_NON_BOOTABLE;
407			d.host_flags |= IDE_HFLAG_SINGLE;
408		} else
409			d.host_flags &= ~IDE_HFLAG_SINGLE;
410	}
411
412	return ide_pci_init_one(dev, &d, NULL);
413}
414
415static const struct pci_device_id svwks_pci_tbl[] = {
416	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE),   0 },
417	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE),   1 },
418	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE),   2 },
419	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2),  3 },
420	{ PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 4 },
421	{ 0, },
422};
423MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
424
425static struct pci_driver svwks_pci_driver = {
426	.name		= "Serverworks_IDE",
427	.id_table	= svwks_pci_tbl,
428	.probe		= svwks_init_one,
429	.remove		= ide_pci_remove,
430	.suspend	= ide_pci_suspend,
431	.resume		= ide_pci_resume,
432};
433
434static int __init svwks_ide_init(void)
435{
436	return ide_pci_register_driver(&svwks_pci_driver);
437}
438
439static void __exit svwks_ide_exit(void)
440{
441	pci_unregister_driver(&svwks_pci_driver);
442}
443
444module_init(svwks_ide_init);
445module_exit(svwks_ide_exit);
446
447MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick, Bartlomiej Zolnierkiewicz");
448MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
449MODULE_LICENSE("GPL");
450