i5100_edac.c revision 178d5a742291976d13bff55fa2b130879d4510de
1/*
2 * Intel 5100 Memory Controllers kernel module
3 *
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * This module is based on the following document:
8 *
9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
10 *      http://download.intel.com/design/chipsets/datashts/318378.pdf
11 *
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/slab.h>
18#include <linux/edac.h>
19#include <linux/delay.h>
20#include <linux/mmzone.h>
21
22#include "edac_core.h"
23
24/* register addresses and bit field accessors... */
25
26/* device 16, func 1 */
27#define I5100_MC		0x40	/* Memory Control Register */
28#define		I5100_MC_ERRDETEN(a)	((a) >> 5 & 1)
29#define I5100_MS		0x44	/* Memory Status Register */
30#define I5100_SPDDATA		0x48	/* Serial Presence Detect Status Reg */
31#define		I5100_SPDDATA_RDO(a)	((a) >> 15 & 1)
32#define		I5100_SPDDATA_SBE(a)	((a) >> 13 & 1)
33#define		I5100_SPDDATA_BUSY(a)	((a) >> 12 & 1)
34#define		I5100_SPDDATA_DATA(a)	((a)       & ((1 << 8) - 1))
35#define I5100_SPDCMD		0x4c	/* Serial Presence Detect Command Reg */
36#define		I5100_SPDCMD_DTI(a)	(((a) & ((1 << 4) - 1)) << 28)
37#define		I5100_SPDCMD_CKOVRD(a)	(((a) & 1)              << 27)
38#define		I5100_SPDCMD_SA(a)	(((a) & ((1 << 3) - 1)) << 24)
39#define		I5100_SPDCMD_BA(a)	(((a) & ((1 << 8) - 1)) << 16)
40#define		I5100_SPDCMD_DATA(a)	(((a) & ((1 << 8) - 1)) <<  8)
41#define		I5100_SPDCMD_CMD(a)	((a) & 1)
42#define I5100_TOLM		0x6c	/* Top of Low Memory */
43#define		I5100_TOLM_TOLM(a)	((a) >> 12 & ((1 << 4) - 1))
44#define I5100_MIR0		0x80	/* Memory Interleave Range 0 */
45#define I5100_MIR1		0x84	/* Memory Interleave Range 1 */
46#define I5100_AMIR_0		0x8c	/* Adjusted Memory Interleave Range 0 */
47#define I5100_AMIR_1		0x90	/* Adjusted Memory Interleave Range 1 */
48#define		I5100_MIR_LIMIT(a)	((a) >> 4 & ((1 << 12) - 1))
49#define		I5100_MIR_WAY1(a)	((a) >> 1 & 1)
50#define		I5100_MIR_WAY0(a)	((a)      & 1)
51#define I5100_FERR_NF_MEM	0xa0	/* MC First Non Fatal Errors */
52#define		I5100_FERR_NF_MEM_CHAN_INDX(a)	((a) >> 28 & 1)
53#define		I5100_FERR_NF_MEM_SPD_MASK	(1 << 18)
54#define		I5100_FERR_NF_MEM_M16ERR_MASK	(1 << 16)
55#define		I5100_FERR_NF_MEM_M15ERR_MASK	(1 << 15)
56#define		I5100_FERR_NF_MEM_M14ERR_MASK	(1 << 14)
57#define		I5100_FERR_NF_MEM_M12ERR_MASK	(1 << 12)
58#define		I5100_FERR_NF_MEM_M11ERR_MASK	(1 << 11)
59#define		I5100_FERR_NF_MEM_M10ERR_MASK	(1 << 10)
60#define		I5100_FERR_NF_MEM_M6ERR_MASK	(1 << 6)
61#define		I5100_FERR_NF_MEM_M5ERR_MASK	(1 << 5)
62#define		I5100_FERR_NF_MEM_M4ERR_MASK	(1 << 4)
63#define		I5100_FERR_NF_MEM_M1ERR_MASK	1
64#define		I5100_FERR_NF_MEM_ANY_MASK	\
65			(I5100_FERR_NF_MEM_M16ERR_MASK | \
66			I5100_FERR_NF_MEM_M15ERR_MASK | \
67			I5100_FERR_NF_MEM_M14ERR_MASK | \
68			I5100_FERR_NF_MEM_M12ERR_MASK | \
69			I5100_FERR_NF_MEM_M11ERR_MASK | \
70			I5100_FERR_NF_MEM_M10ERR_MASK | \
71			I5100_FERR_NF_MEM_M6ERR_MASK | \
72			I5100_FERR_NF_MEM_M5ERR_MASK | \
73			I5100_FERR_NF_MEM_M4ERR_MASK | \
74			I5100_FERR_NF_MEM_M1ERR_MASK)
75#define		I5100_FERR_NF_MEM_ANY(a)  ((a) & I5100_FERR_NF_MEM_ANY_MASK)
76#define	I5100_NERR_NF_MEM	0xa4	/* MC Next Non-Fatal Errors */
77#define		I5100_NERR_NF_MEM_ANY(a)  I5100_FERR_NF_MEM_ANY(a)
78#define I5100_EMASK_MEM		0xa8	/* MC Error Mask Register */
79
80/* device 21 and 22, func 0 */
81#define I5100_MTR_0	0x154	/* Memory Technology Registers 0-3 */
82#define I5100_DMIR	0x15c	/* DIMM Interleave Range */
83#define		I5100_DMIR_LIMIT(a)	((a) >> 16 & ((1 << 11) - 1))
84#define		I5100_DMIR_RANK(a, i)	((a) >> (4 * i) & ((1 <<  2) - 1))
85#define I5100_MTR_4	0x1b0	/* Memory Technology Registers 4,5 */
86#define		I5100_MTR_PRESENT(a)	((a) >> 10 & 1)
87#define		I5100_MTR_ETHROTTLE(a)	((a) >>  9 & 1)
88#define		I5100_MTR_WIDTH(a)	((a) >>  8 & 1)
89#define		I5100_MTR_NUMBANK(a)	((a) >>  6 & 1)
90#define		I5100_MTR_NUMROW(a)	((a) >>  2 & ((1 << 2) - 1))
91#define		I5100_MTR_NUMCOL(a)	((a)       & ((1 << 2) - 1))
92#define	I5100_VALIDLOG	0x18c	/* Valid Log Markers */
93#define		I5100_VALIDLOG_REDMEMVALID(a)	((a) >> 2 & 1)
94#define		I5100_VALIDLOG_RECMEMVALID(a)	((a) >> 1 & 1)
95#define		I5100_VALIDLOG_NRECMEMVALID(a)	((a)      & 1)
96#define	I5100_NRECMEMA	0x190	/* Non-Recoverable Memory Error Log Reg A */
97#define		I5100_NRECMEMA_MERR(a)		((a) >> 15 & ((1 << 5) - 1))
98#define		I5100_NRECMEMA_BANK(a)		((a) >> 12 & ((1 << 3) - 1))
99#define		I5100_NRECMEMA_RANK(a)		((a) >>  8 & ((1 << 3) - 1))
100#define		I5100_NRECMEMA_DM_BUF_ID(a)	((a)       & ((1 << 8) - 1))
101#define	I5100_NRECMEMB	0x194	/* Non-Recoverable Memory Error Log Reg B */
102#define		I5100_NRECMEMB_CAS(a)		((a) >> 16 & ((1 << 13) - 1))
103#define		I5100_NRECMEMB_RAS(a)		((a)       & ((1 << 16) - 1))
104#define	I5100_REDMEMA	0x198	/* Recoverable Memory Data Error Log Reg A */
105#define		I5100_REDMEMA_SYNDROME(a)	(a)
106#define	I5100_REDMEMB	0x19c	/* Recoverable Memory Data Error Log Reg B */
107#define		I5100_REDMEMB_ECC_LOCATOR(a)	((a) & ((1 << 18) - 1))
108#define	I5100_RECMEMA	0x1a0	/* Recoverable Memory Error Log Reg A */
109#define		I5100_RECMEMA_MERR(a)		I5100_NRECMEMA_MERR(a)
110#define		I5100_RECMEMA_BANK(a)		I5100_NRECMEMA_BANK(a)
111#define		I5100_RECMEMA_RANK(a)		I5100_NRECMEMA_RANK(a)
112#define		I5100_RECMEMA_DM_BUF_ID(a)	I5100_NRECMEMA_DM_BUF_ID(a)
113#define	I5100_RECMEMB	0x1a4	/* Recoverable Memory Error Log Reg B */
114#define		I5100_RECMEMB_CAS(a)		I5100_NRECMEMB_CAS(a)
115#define		I5100_RECMEMB_RAS(a)		I5100_NRECMEMB_RAS(a)
116
117/* some generic limits */
118#define I5100_MAX_RANKS_PER_CTLR	6
119#define I5100_MAX_CTLRS			2
120#define I5100_MAX_RANKS_PER_DIMM	4
121#define I5100_DIMM_ADDR_LINES		(6 - 3)	/* 64 bits / 8 bits per byte */
122#define I5100_MAX_DIMM_SLOTS_PER_CTLR	4
123#define I5100_MAX_RANK_INTERLEAVE	4
124#define I5100_MAX_DMIRS			5
125
126struct i5100_priv {
127	/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
128	int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
129
130	/*
131	 * mainboard chip select map -- maps i5100 chip selects to
132	 * DIMM slot chip selects.  In the case of only 4 ranks per
133	 * controller, the mapping is fairly obvious but not unique.
134	 * we map -1 -> NC and assume both controllers use the same
135	 * map...
136	 *
137	 */
138	int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
139
140	/* memory interleave range */
141	struct {
142		u64	 limit;
143		unsigned way[2];
144	} mir[I5100_MAX_CTLRS];
145
146	/* adjusted memory interleave range register */
147	unsigned amir[I5100_MAX_CTLRS];
148
149	/* dimm interleave range */
150	struct {
151		unsigned rank[I5100_MAX_RANK_INTERLEAVE];
152		u64	 limit;
153	} dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
154
155	/* memory technology registers... */
156	struct {
157		unsigned present;	/* 0 or 1 */
158		unsigned ethrottle;	/* 0 or 1 */
159		unsigned width;		/* 4 or 8 bits  */
160		unsigned numbank;	/* 2 or 3 lines */
161		unsigned numrow;	/* 13 .. 16 lines */
162		unsigned numcol;	/* 11 .. 12 lines */
163	} mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
164
165	u64 tolm;		/* top of low memory in bytes */
166	unsigned ranksperctlr;	/* number of ranks per controller */
167
168	struct pci_dev *mc;	/* device 16 func 1 */
169	struct pci_dev *ch0mm;	/* device 21 func 0 */
170	struct pci_dev *ch1mm;	/* device 22 func 0 */
171};
172
173/* map a rank/ctlr to a slot number on the mainboard */
174static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
175			      int ctlr, int rank)
176{
177	const struct i5100_priv *priv = mci->pvt_info;
178	int i;
179
180	for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
181		int j;
182		const int numrank = priv->dimm_numrank[ctlr][i];
183
184		for (j = 0; j < numrank; j++)
185			if (priv->dimm_csmap[i][j] == rank)
186				return i * 2 + ctlr;
187	}
188
189	return -1;
190}
191
192/*
193 * The processor bus memory addresses are broken into three
194 * pieces, whereas the controller addresses are contiguous.
195 *
196 * here we map from the controller address space to the
197 * processor address space:
198 *
199 *    Processor Address Space
200 * +-----------------------------+
201 * |                             |
202 * |  "high" memory addresses    |
203 * |                             |
204 * +-----------------------------+ <- 4GB on the i5100
205 * |                             |
206 * |  other non-memory addresses |
207 * |                             |
208 * +-----------------------------+ <- top of low memory
209 * |                             |
210 * | "low" memory addresses      |
211 * |                             |
212 * +-----------------------------+
213 */
214static unsigned long i5100_ctl_page_to_phys(struct mem_ctl_info *mci,
215					    unsigned long cntlr_addr)
216{
217	const struct i5100_priv *priv = mci->pvt_info;
218
219	if (cntlr_addr < priv->tolm)
220		return cntlr_addr;
221
222	return (1ULL << 32) + (cntlr_addr - priv->tolm);
223}
224
225static const char *i5100_err_msg(unsigned err)
226{
227	const char *merrs[] = {
228		"unknown", /* 0 */
229		"uncorrectable data ECC on replay", /* 1 */
230		"unknown", /* 2 */
231		"unknown", /* 3 */
232		"aliased uncorrectable demand data ECC", /* 4 */
233		"aliased uncorrectable spare-copy data ECC", /* 5 */
234		"aliased uncorrectable patrol data ECC", /* 6 */
235		"unknown", /* 7 */
236		"unknown", /* 8 */
237		"unknown", /* 9 */
238		"non-aliased uncorrectable demand data ECC", /* 10 */
239		"non-aliased uncorrectable spare-copy data ECC", /* 11 */
240		"non-aliased uncorrectable patrol data ECC", /* 12 */
241		"unknown", /* 13 */
242		"correctable demand data ECC", /* 14 */
243		"correctable spare-copy data ECC", /* 15 */
244		"correctable patrol data ECC", /* 16 */
245		"unknown", /* 17 */
246		"SPD protocol error", /* 18 */
247		"unknown", /* 19 */
248		"spare copy initiated", /* 20 */
249		"spare copy completed", /* 21 */
250	};
251	unsigned i;
252
253	for (i = 0; i < ARRAY_SIZE(merrs); i++)
254		if (1 << i & err)
255			return merrs[i];
256
257	return "none";
258}
259
260/* convert csrow index into a rank (per controller -- 0..5) */
261static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
262{
263	const struct i5100_priv *priv = mci->pvt_info;
264
265	return csrow % priv->ranksperctlr;
266}
267
268/* convert csrow index into a controller (0..1) */
269static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
270{
271	const struct i5100_priv *priv = mci->pvt_info;
272
273	return csrow / priv->ranksperctlr;
274}
275
276static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
277				    int ctlr, int rank)
278{
279	const struct i5100_priv *priv = mci->pvt_info;
280
281	return ctlr * priv->ranksperctlr + rank;
282}
283
284static void i5100_handle_ce(struct mem_ctl_info *mci,
285			    int ctlr,
286			    unsigned bank,
287			    unsigned rank,
288			    unsigned long syndrome,
289			    unsigned cas,
290			    unsigned ras,
291			    const char *msg)
292{
293	const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
294
295	printk(KERN_ERR
296		"CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
297		"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
298		ctlr, bank, rank, syndrome, cas, ras,
299		csrow, mci->csrows[csrow].channels[0].label, msg);
300
301	mci->ce_count++;
302	mci->csrows[csrow].ce_count++;
303	mci->csrows[csrow].channels[0].ce_count++;
304}
305
306static void i5100_handle_ue(struct mem_ctl_info *mci,
307			    int ctlr,
308			    unsigned bank,
309			    unsigned rank,
310			    unsigned long syndrome,
311			    unsigned cas,
312			    unsigned ras,
313			    const char *msg)
314{
315	const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
316
317	printk(KERN_ERR
318		"UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
319		"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
320		ctlr, bank, rank, syndrome, cas, ras,
321		csrow, mci->csrows[csrow].channels[0].label, msg);
322
323	mci->ue_count++;
324	mci->csrows[csrow].ue_count++;
325}
326
327static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
328			   u32 ferr, u32 nerr)
329{
330	struct i5100_priv *priv = mci->pvt_info;
331	struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
332	u32 dw;
333	u32 dw2;
334	unsigned syndrome = 0;
335	unsigned ecc_loc = 0;
336	unsigned merr;
337	unsigned bank;
338	unsigned rank;
339	unsigned cas;
340	unsigned ras;
341
342	pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
343
344	if (I5100_VALIDLOG_REDMEMVALID(dw)) {
345		pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
346		syndrome = I5100_REDMEMA_SYNDROME(dw2);
347		pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
348		ecc_loc = I5100_REDMEMB_ECC_LOCATOR(dw2);
349	}
350
351	if (I5100_VALIDLOG_RECMEMVALID(dw)) {
352		const char *msg;
353
354		pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
355		merr = I5100_RECMEMA_MERR(dw2);
356		bank = I5100_RECMEMA_BANK(dw2);
357		rank = I5100_RECMEMA_RANK(dw2);
358
359		pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
360		cas = I5100_RECMEMB_CAS(dw2);
361		ras = I5100_RECMEMB_RAS(dw2);
362
363		/* FIXME:  not really sure if this is what merr is...
364		 */
365		if (!merr)
366			msg = i5100_err_msg(ferr);
367		else
368			msg = i5100_err_msg(nerr);
369
370		i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
371	}
372
373	if (I5100_VALIDLOG_NRECMEMVALID(dw)) {
374		const char *msg;
375
376		pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
377		merr = I5100_NRECMEMA_MERR(dw2);
378		bank = I5100_NRECMEMA_BANK(dw2);
379		rank = I5100_NRECMEMA_RANK(dw2);
380
381		pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
382		cas = I5100_NRECMEMB_CAS(dw2);
383		ras = I5100_NRECMEMB_RAS(dw2);
384
385		/* FIXME:  not really sure if this is what merr is...
386		 */
387		if (!merr)
388			msg = i5100_err_msg(ferr);
389		else
390			msg = i5100_err_msg(nerr);
391
392		i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
393	}
394
395	pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
396}
397
398static void i5100_check_error(struct mem_ctl_info *mci)
399{
400	struct i5100_priv *priv = mci->pvt_info;
401	u32 dw;
402
403
404	pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
405	if (I5100_FERR_NF_MEM_ANY(dw)) {
406		u32 dw2;
407
408		pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
409		if (dw2)
410			pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM,
411					       dw2);
412		pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
413
414		i5100_read_log(mci, I5100_FERR_NF_MEM_CHAN_INDX(dw),
415			       I5100_FERR_NF_MEM_ANY(dw),
416			       I5100_NERR_NF_MEM_ANY(dw2));
417	}
418}
419
420static struct pci_dev *pci_get_device_func(unsigned vendor,
421					   unsigned device,
422					   unsigned func)
423{
424	struct pci_dev *ret = NULL;
425
426	while (1) {
427		ret = pci_get_device(vendor, device, ret);
428
429		if (!ret)
430			break;
431
432		if (PCI_FUNC(ret->devfn) == func)
433			break;
434	}
435
436	return ret;
437}
438
439static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
440					    int csrow)
441{
442	struct i5100_priv *priv = mci->pvt_info;
443	const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
444	const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
445	unsigned addr_lines;
446
447	/* dimm present? */
448	if (!priv->mtr[ctlr][ctlr_rank].present)
449		return 0ULL;
450
451	addr_lines =
452		I5100_DIMM_ADDR_LINES +
453		priv->mtr[ctlr][ctlr_rank].numcol +
454		priv->mtr[ctlr][ctlr_rank].numrow +
455		priv->mtr[ctlr][ctlr_rank].numbank;
456
457	return (unsigned long)
458		((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
459}
460
461static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
462{
463	struct i5100_priv *priv = mci->pvt_info;
464	struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
465	int i;
466
467	for (i = 0; i < I5100_MAX_CTLRS; i++) {
468		int j;
469		struct pci_dev *pdev = mms[i];
470
471		for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
472			const unsigned addr =
473				(j < 4) ? I5100_MTR_0 + j * 2 :
474					  I5100_MTR_4 + (j - 4) * 2;
475			u16 w;
476
477			pci_read_config_word(pdev, addr, &w);
478
479			priv->mtr[i][j].present = I5100_MTR_PRESENT(w);
480			priv->mtr[i][j].ethrottle = I5100_MTR_ETHROTTLE(w);
481			priv->mtr[i][j].width = 4 + 4 * I5100_MTR_WIDTH(w);
482			priv->mtr[i][j].numbank = 2 + I5100_MTR_NUMBANK(w);
483			priv->mtr[i][j].numrow = 13 + I5100_MTR_NUMROW(w);
484			priv->mtr[i][j].numcol = 10 + I5100_MTR_NUMCOL(w);
485		}
486	}
487}
488
489/*
490 * FIXME: make this into a real i2c adapter (so that dimm-decode
491 * will work)?
492 */
493static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
494			       u8 ch, u8 slot, u8 addr, u8 *byte)
495{
496	struct i5100_priv *priv = mci->pvt_info;
497	u16 w;
498	u32 dw;
499	unsigned long et;
500
501	pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
502	if (I5100_SPDDATA_BUSY(w))
503		return -1;
504
505	dw =	I5100_SPDCMD_DTI(0xa) |
506		I5100_SPDCMD_CKOVRD(1) |
507		I5100_SPDCMD_SA(ch * 4 + slot) |
508		I5100_SPDCMD_BA(addr) |
509		I5100_SPDCMD_DATA(0) |
510		I5100_SPDCMD_CMD(0);
511	pci_write_config_dword(priv->mc, I5100_SPDCMD, dw);
512
513	/* wait up to 100ms */
514	et = jiffies + HZ / 10;
515	udelay(100);
516	while (1) {
517		pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
518		if (!I5100_SPDDATA_BUSY(w))
519			break;
520		udelay(100);
521	}
522
523	if (!I5100_SPDDATA_RDO(w) || I5100_SPDDATA_SBE(w))
524		return -1;
525
526	*byte = I5100_SPDDATA_DATA(w);
527
528	return 0;
529}
530
531/*
532 * fill dimm chip select map
533 *
534 * FIXME:
535 *   o only valid for 4 ranks per controller
536 *   o not the only way to may chip selects to dimm slots
537 *   o investigate if there is some way to obtain this map from the bios
538 */
539static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
540{
541	struct i5100_priv *priv = mci->pvt_info;
542	int i;
543
544	WARN_ON(priv->ranksperctlr != 4);
545
546	for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
547		int j;
548
549		for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
550			priv->dimm_csmap[i][j] = -1; /* default NC */
551	}
552
553	/* only 2 chip selects per slot... */
554	priv->dimm_csmap[0][0] = 0;
555	priv->dimm_csmap[0][1] = 3;
556	priv->dimm_csmap[1][0] = 1;
557	priv->dimm_csmap[1][1] = 2;
558	priv->dimm_csmap[2][0] = 2;
559	priv->dimm_csmap[3][0] = 3;
560}
561
562static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
563					     struct mem_ctl_info *mci)
564{
565	struct i5100_priv *priv = mci->pvt_info;
566	int i;
567
568	for (i = 0; i < I5100_MAX_CTLRS; i++) {
569		int j;
570
571		for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
572			u8 rank;
573
574			if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
575				priv->dimm_numrank[i][j] = 0;
576			else
577				priv->dimm_numrank[i][j] = (rank & 3) + 1;
578		}
579	}
580
581	i5100_init_dimm_csmap(mci);
582}
583
584static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
585					      struct mem_ctl_info *mci)
586{
587	u16 w;
588	u32 dw;
589	struct i5100_priv *priv = mci->pvt_info;
590	struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
591	int i;
592
593	pci_read_config_word(pdev, I5100_TOLM, &w);
594	priv->tolm = (u64) I5100_TOLM_TOLM(w) * 256 * 1024 * 1024;
595
596	pci_read_config_word(pdev, I5100_MIR0, &w);
597	priv->mir[0].limit = (u64) I5100_MIR_LIMIT(w) << 28;
598	priv->mir[0].way[1] = I5100_MIR_WAY1(w);
599	priv->mir[0].way[0] = I5100_MIR_WAY0(w);
600
601	pci_read_config_word(pdev, I5100_MIR1, &w);
602	priv->mir[1].limit = (u64) I5100_MIR_LIMIT(w) << 28;
603	priv->mir[1].way[1] = I5100_MIR_WAY1(w);
604	priv->mir[1].way[0] = I5100_MIR_WAY0(w);
605
606	pci_read_config_word(pdev, I5100_AMIR_0, &w);
607	priv->amir[0] = w;
608	pci_read_config_word(pdev, I5100_AMIR_1, &w);
609	priv->amir[1] = w;
610
611	for (i = 0; i < I5100_MAX_CTLRS; i++) {
612		int j;
613
614		for (j = 0; j < 5; j++) {
615			int k;
616
617			pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
618
619			priv->dmir[i][j].limit =
620				(u64) I5100_DMIR_LIMIT(dw) << 28;
621			for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
622				priv->dmir[i][j].rank[k] =
623					I5100_DMIR_RANK(dw, k);
624		}
625	}
626
627	i5100_init_mtr(mci);
628}
629
630static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
631{
632	int i;
633	unsigned long total_pages = 0UL;
634	struct i5100_priv *priv = mci->pvt_info;
635
636	for (i = 0; i < mci->nr_csrows; i++) {
637		const unsigned long npages = i5100_npages(mci, i);
638		const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
639		const unsigned rank = i5100_csrow_to_rank(mci, i);
640
641		if (!npages)
642			continue;
643
644		/*
645		 * FIXME: these two are totally bogus -- I don't see how to
646		 * map them correctly to this structure...
647		 */
648		mci->csrows[i].first_page = total_pages;
649		mci->csrows[i].last_page = total_pages + npages - 1;
650		mci->csrows[i].page_mask = 0UL;
651
652		mci->csrows[i].nr_pages = npages;
653		mci->csrows[i].grain = 32;
654		mci->csrows[i].csrow_idx = i;
655		mci->csrows[i].dtype =
656			(priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
657		mci->csrows[i].ue_count = 0;
658		mci->csrows[i].ce_count = 0;
659		mci->csrows[i].mtype = MEM_RDDR2;
660		mci->csrows[i].edac_mode = EDAC_SECDED;
661		mci->csrows[i].mci = mci;
662		mci->csrows[i].nr_channels = 1;
663		mci->csrows[i].channels[0].chan_idx = 0;
664		mci->csrows[i].channels[0].ce_count = 0;
665		mci->csrows[i].channels[0].csrow = mci->csrows + i;
666		snprintf(mci->csrows[i].channels[0].label,
667			 sizeof(mci->csrows[i].channels[0].label),
668			 "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
669
670		total_pages += npages;
671	}
672}
673
674static int __devinit i5100_init_one(struct pci_dev *pdev,
675				    const struct pci_device_id *id)
676{
677	int rc;
678	struct mem_ctl_info *mci;
679	struct i5100_priv *priv;
680	struct pci_dev *ch0mm, *ch1mm;
681	int ret = 0;
682	u32 dw;
683	int ranksperch;
684
685	if (PCI_FUNC(pdev->devfn) != 1)
686		return -ENODEV;
687
688	rc = pci_enable_device(pdev);
689	if (rc < 0) {
690		ret = rc;
691		goto bail;
692	}
693
694	/* ECC enabled? */
695	pci_read_config_dword(pdev, I5100_MC, &dw);
696	if (!I5100_MC_ERRDETEN(dw)) {
697		printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
698		ret = -ENODEV;
699		goto bail;
700	}
701
702	/* figure out how many ranks, from strapped state of 48GB_Mode input */
703	pci_read_config_dword(pdev, I5100_MS, &dw);
704	ranksperch = !!(dw & (1 << 8)) * 2 + 4;
705
706	if (ranksperch != 4) {
707		/* FIXME: get 6 ranks / controller to work - need hw... */
708		printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
709		ret = -ENODEV;
710		goto bail;
711	}
712
713	/* enable error reporting... */
714	pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
715	dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
716	pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
717
718	/* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
719	ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
720				    PCI_DEVICE_ID_INTEL_5100_21, 0);
721	if (!ch0mm)
722		return -ENODEV;
723
724	rc = pci_enable_device(ch0mm);
725	if (rc < 0) {
726		ret = rc;
727		goto bail_ch0;
728	}
729
730	/* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
731	ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
732				    PCI_DEVICE_ID_INTEL_5100_22, 0);
733	if (!ch1mm) {
734		ret = -ENODEV;
735		goto bail_ch0;
736	}
737
738	rc = pci_enable_device(ch1mm);
739	if (rc < 0) {
740		ret = rc;
741		goto bail_ch1;
742	}
743
744	mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
745	if (!mci) {
746		ret = -ENOMEM;
747		goto bail_ch1;
748	}
749
750	mci->dev = &pdev->dev;
751
752	priv = mci->pvt_info;
753	priv->ranksperctlr = ranksperch;
754	priv->mc = pdev;
755	priv->ch0mm = ch0mm;
756	priv->ch1mm = ch1mm;
757
758	i5100_init_dimm_layout(pdev, mci);
759	i5100_init_interleaving(pdev, mci);
760
761	mci->mtype_cap = MEM_FLAG_FB_DDR2;
762	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
763	mci->edac_cap = EDAC_FLAG_SECDED;
764	mci->mod_name = "i5100_edac.c";
765	mci->mod_ver = "not versioned";
766	mci->ctl_name = "i5100";
767	mci->dev_name = pci_name(pdev);
768	mci->ctl_page_to_phys = i5100_ctl_page_to_phys;
769
770	mci->edac_check = i5100_check_error;
771
772	i5100_init_csrows(mci);
773
774	/* this strange construction seems to be in every driver, dunno why */
775	switch (edac_op_state) {
776	case EDAC_OPSTATE_POLL:
777	case EDAC_OPSTATE_NMI:
778		break;
779	default:
780		edac_op_state = EDAC_OPSTATE_POLL;
781		break;
782	}
783
784	if (edac_mc_add_mc(mci)) {
785		ret = -ENODEV;
786		goto bail_mc;
787	}
788
789	goto bail;
790
791bail_mc:
792	edac_mc_free(mci);
793
794bail_ch1:
795	pci_dev_put(ch1mm);
796
797bail_ch0:
798	pci_dev_put(ch0mm);
799
800bail:
801	return ret;
802}
803
804static void __devexit i5100_remove_one(struct pci_dev *pdev)
805{
806	struct mem_ctl_info *mci;
807	struct i5100_priv *priv;
808
809	mci = edac_mc_del_mc(&pdev->dev);
810
811	if (!mci)
812		return;
813
814	priv = mci->pvt_info;
815	pci_dev_put(priv->ch0mm);
816	pci_dev_put(priv->ch1mm);
817
818	edac_mc_free(mci);
819}
820
821static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
822	/* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
823	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
824	{ 0, }
825};
826MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
827
828static struct pci_driver i5100_driver = {
829	.name = KBUILD_BASENAME,
830	.probe = i5100_init_one,
831	.remove = __devexit_p(i5100_remove_one),
832	.id_table = i5100_pci_tbl,
833};
834
835static int __init i5100_init(void)
836{
837	int pci_rc;
838
839	pci_rc = pci_register_driver(&i5100_driver);
840
841	return (pci_rc < 0) ? pci_rc : 0;
842}
843
844static void __exit i5100_exit(void)
845{
846	pci_unregister_driver(&i5100_driver);
847}
848
849module_init(i5100_init);
850module_exit(i5100_exit);
851
852MODULE_LICENSE("GPL");
853MODULE_AUTHOR
854    ("Arthur Jones <ajones@riverbed.com>");
855MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");
856