mce_amd.c revision 8259a7e5724c42c89d927b92cda3e0ab15b9ade9
1#include <linux/module.h>
2#include <linux/slab.h>
3
4#include "mce_amd.h"
5
6static struct amd_decoder_ops *fam_ops;
7
8static u8 xec_mask	 = 0xf;
9static u8 nb_err_cpumask = 0xf;
10
11static bool report_gart_errors;
12static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg);
13
14void amd_report_gart_errors(bool v)
15{
16	report_gart_errors = v;
17}
18EXPORT_SYMBOL_GPL(amd_report_gart_errors);
19
20void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32))
21{
22	nb_bus_decoder = f;
23}
24EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
25
26void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32))
27{
28	if (nb_bus_decoder) {
29		WARN_ON(nb_bus_decoder != f);
30
31		nb_bus_decoder = NULL;
32	}
33}
34EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
35
36/*
37 * string representation for the different MCA reported error types, see F3x48
38 * or MSR0000_0411.
39 */
40
41/* transaction type */
42const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
43EXPORT_SYMBOL_GPL(tt_msgs);
44
45/* cache level */
46const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
47EXPORT_SYMBOL_GPL(ll_msgs);
48
49/* memory transaction type */
50const char *rrrr_msgs[] = {
51       "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
52};
53EXPORT_SYMBOL_GPL(rrrr_msgs);
54
55/* participating processor */
56const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
57EXPORT_SYMBOL_GPL(pp_msgs);
58
59/* request timeout */
60const char *to_msgs[] = { "no timeout",	"timed out" };
61EXPORT_SYMBOL_GPL(to_msgs);
62
63/* memory or i/o */
64const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
65EXPORT_SYMBOL_GPL(ii_msgs);
66
67static const char *f10h_nb_mce_desc[] = {
68	"HT link data error",
69	"Protocol error (link, L3, probe filter, etc.)",
70	"Parity error in NB-internal arrays",
71	"Link Retry due to IO link transmission error",
72	"L3 ECC data cache error",
73	"ECC error in L3 cache tag",
74	"L3 LRU parity bits error",
75	"ECC Error in the Probe Filter directory"
76};
77
78static const char * const f15h_ic_mce_desc[] = {
79	"UC during a demand linefill from L2",
80	"Parity error during data load from IC",
81	"Parity error for IC valid bit",
82	"Main tag parity error",
83	"Parity error in prediction queue",
84	"PFB data/address parity error",
85	"Parity error in the branch status reg",
86	"PFB promotion address error",
87	"Tag error during probe/victimization",
88	"Parity error for IC probe tag valid bit",
89	"PFB non-cacheable bit parity error",
90	"PFB valid bit parity error",			/* xec = 0xd */
91	"patch RAM",					/* xec = 010 */
92	"uop queue",
93	"insn buffer",
94	"predecode buffer",
95	"fetch address FIFO"
96};
97
98static const char * const f15h_cu_mce_desc[] = {
99	"Fill ECC error on data fills",			/* xec = 0x4 */
100	"Fill parity error on insn fills",
101	"Prefetcher request FIFO parity error",
102	"PRQ address parity error",
103	"PRQ data parity error",
104	"WCC Tag ECC error",
105	"WCC Data ECC error",
106	"WCB Data parity error",
107	"VB Data/ECC error",
108	"L2 Tag ECC error",				/* xec = 0x10 */
109	"Hard L2 Tag ECC error",
110	"Multiple hits on L2 tag",
111	"XAB parity error",
112	"PRB address parity error"
113};
114
115static const char * const fr_ex_mce_desc[] = {
116	"CPU Watchdog timer expire",
117	"Wakeup array dest tag",
118	"AG payload array",
119	"EX payload array",
120	"IDRF array",
121	"Retire dispatch queue",
122	"Mapper checkpoint array",
123	"Physical register file EX0 port",
124	"Physical register file EX1 port",
125	"Physical register file AG0 port",
126	"Physical register file AG1 port",
127	"Flag register file",
128	"DE correctable error could not be corrected"
129};
130
131static bool f12h_dc_mce(u16 ec, u8 xec)
132{
133	bool ret = false;
134
135	if (MEM_ERROR(ec)) {
136		u8 ll = ec & 0x3;
137		ret = true;
138
139		if (ll == LL_L2)
140			pr_cont("during L1 linefill from L2.\n");
141		else if (ll == LL_L1)
142			pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec));
143		else
144			ret = false;
145	}
146	return ret;
147}
148
149static bool f10h_dc_mce(u16 ec, u8 xec)
150{
151	u8 r4  = (ec >> 4) & 0xf;
152	u8 ll  = ec & 0x3;
153
154	if (r4 == R4_GEN && ll == LL_L1) {
155		pr_cont("during data scrub.\n");
156		return true;
157	}
158	return f12h_dc_mce(ec, xec);
159}
160
161static bool k8_dc_mce(u16 ec, u8 xec)
162{
163	if (BUS_ERROR(ec)) {
164		pr_cont("during system linefill.\n");
165		return true;
166	}
167
168	return f10h_dc_mce(ec, xec);
169}
170
171static bool f14h_dc_mce(u16 ec, u8 xec)
172{
173	u8 r4	 = (ec >> 4) & 0xf;
174	u8 ll	 = ec & 0x3;
175	u8 tt	 = (ec >> 2) & 0x3;
176	u8 ii	 = tt;
177	bool ret = true;
178
179	if (MEM_ERROR(ec)) {
180
181		if (tt != TT_DATA || ll != LL_L1)
182			return false;
183
184		switch (r4) {
185		case R4_DRD:
186		case R4_DWR:
187			pr_cont("Data/Tag parity error due to %s.\n",
188				(r4 == R4_DRD ? "load/hw prf" : "store"));
189			break;
190		case R4_EVICT:
191			pr_cont("Copyback parity error on a tag miss.\n");
192			break;
193		case R4_SNOOP:
194			pr_cont("Tag parity error during snoop.\n");
195			break;
196		default:
197			ret = false;
198		}
199	} else if (BUS_ERROR(ec)) {
200
201		if ((ii != II_MEM && ii != II_IO) || ll != LL_LG)
202			return false;
203
204		pr_cont("System read data error on a ");
205
206		switch (r4) {
207		case R4_RD:
208			pr_cont("TLB reload.\n");
209			break;
210		case R4_DWR:
211			pr_cont("store.\n");
212			break;
213		case R4_DRD:
214			pr_cont("load.\n");
215			break;
216		default:
217			ret = false;
218		}
219	} else {
220		ret = false;
221	}
222
223	return ret;
224}
225
226static bool f15h_dc_mce(u16 ec, u8 xec)
227{
228	bool ret = true;
229
230	if (MEM_ERROR(ec)) {
231
232		switch (xec) {
233		case 0x0:
234			pr_cont("Data Array access error.\n");
235			break;
236
237		case 0x1:
238			pr_cont("UC error during a linefill from L2/NB.\n");
239			break;
240
241		case 0x2:
242		case 0x11:
243			pr_cont("STQ access error.\n");
244			break;
245
246		case 0x3:
247			pr_cont("SCB access error.\n");
248			break;
249
250		case 0x10:
251			pr_cont("Tag error.\n");
252			break;
253
254		case 0x12:
255			pr_cont("LDQ access error.\n");
256			break;
257
258		default:
259			ret = false;
260		}
261	} else if (BUS_ERROR(ec)) {
262
263		if (!xec)
264			pr_cont("during system linefill.\n");
265		else
266			pr_cont(" Internal %s condition.\n",
267				((xec == 1) ? "livelock" : "deadlock"));
268	} else
269		ret = false;
270
271	return ret;
272}
273
274static void amd_decode_dc_mce(struct mce *m)
275{
276	u16 ec = m->status & 0xffff;
277	u8 xec = (m->status >> 16) & xec_mask;
278
279	pr_emerg(HW_ERR "Data Cache Error: ");
280
281	/* TLB error signatures are the same across families */
282	if (TLB_ERROR(ec)) {
283		u8 tt = (ec >> 2) & 0x3;
284
285		if (tt == TT_DATA) {
286			pr_cont("%s TLB %s.\n", LL_MSG(ec),
287				((xec == 2) ? "locked miss"
288					    : (xec ? "multimatch" : "parity")));
289			return;
290		}
291	} else if (fam_ops->dc_mce(ec, xec))
292		;
293	else
294		pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
295}
296
297static bool k8_ic_mce(u16 ec, u8 xec)
298{
299	u8 ll	 = ec & 0x3;
300	u8 r4	 = (ec >> 4) & 0xf;
301	bool ret = true;
302
303	if (!MEM_ERROR(ec))
304		return false;
305
306	if (ll == 0x2)
307		pr_cont("during a linefill from L2.\n");
308	else if (ll == 0x1) {
309		switch (r4) {
310		case R4_IRD:
311			pr_cont("Parity error during data load.\n");
312			break;
313
314		case R4_EVICT:
315			pr_cont("Copyback Parity/Victim error.\n");
316			break;
317
318		case R4_SNOOP:
319			pr_cont("Tag Snoop error.\n");
320			break;
321
322		default:
323			ret = false;
324			break;
325		}
326	} else
327		ret = false;
328
329	return ret;
330}
331
332static bool f14h_ic_mce(u16 ec, u8 xec)
333{
334	u8 ll    = ec & 0x3;
335	u8 tt    = (ec >> 2) & 0x3;
336	u8 r4  = (ec >> 4) & 0xf;
337	bool ret = true;
338
339	if (MEM_ERROR(ec)) {
340		if (tt != 0 || ll != 1)
341			ret = false;
342
343		if (r4 == R4_IRD)
344			pr_cont("Data/tag array parity error for a tag hit.\n");
345		else if (r4 == R4_SNOOP)
346			pr_cont("Tag error during snoop/victimization.\n");
347		else
348			ret = false;
349	}
350	return ret;
351}
352
353static bool f15h_ic_mce(u16 ec, u8 xec)
354{
355	bool ret = true;
356
357	if (!MEM_ERROR(ec))
358		return false;
359
360	switch (xec) {
361	case 0x0 ... 0xa:
362		pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
363		break;
364
365	case 0xd:
366		pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
367		break;
368
369	case 0x10 ... 0x14:
370		pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
371		break;
372
373	default:
374		ret = false;
375	}
376	return ret;
377}
378
379static void amd_decode_ic_mce(struct mce *m)
380{
381	u16 ec = m->status & 0xffff;
382	u8 xec = (m->status >> 16) & xec_mask;
383
384	pr_emerg(HW_ERR "Instruction Cache Error: ");
385
386	if (TLB_ERROR(ec))
387		pr_cont("%s TLB %s.\n", LL_MSG(ec),
388			(xec ? "multimatch" : "parity error"));
389	else if (BUS_ERROR(ec)) {
390		bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
391
392		pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
393	} else if (fam_ops->ic_mce(ec, xec))
394		;
395	else
396		pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
397}
398
399static void amd_decode_bu_mce(struct mce *m)
400{
401	u32 ec = m->status & 0xffff;
402	u32 xec = (m->status >> 16) & xec_mask;
403
404	pr_emerg(HW_ERR "Bus Unit Error");
405
406	if (xec == 0x1)
407		pr_cont(" in the write data buffers.\n");
408	else if (xec == 0x3)
409		pr_cont(" in the victim data buffers.\n");
410	else if (xec == 0x2 && MEM_ERROR(ec))
411		pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
412	else if (xec == 0x0) {
413		if (TLB_ERROR(ec))
414			pr_cont(": %s error in a Page Descriptor Cache or "
415				"Guest TLB.\n", TT_MSG(ec));
416		else if (BUS_ERROR(ec))
417			pr_cont(": %s/ECC error in data read from NB: %s.\n",
418				RRRR_MSG(ec), PP_MSG(ec));
419		else if (MEM_ERROR(ec)) {
420			u8 rrrr = (ec >> 4) & 0xf;
421
422			if (rrrr >= 0x7)
423				pr_cont(": %s error during data copyback.\n",
424					RRRR_MSG(ec));
425			else if (rrrr <= 0x1)
426				pr_cont(": %s parity/ECC error during data "
427					"access from L2.\n", RRRR_MSG(ec));
428			else
429				goto wrong_bu_mce;
430		} else
431			goto wrong_bu_mce;
432	} else
433		goto wrong_bu_mce;
434
435	return;
436
437wrong_bu_mce:
438	pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
439}
440
441static void amd_decode_cu_mce(struct mce *m)
442{
443	u16 ec = m->status & 0xffff;
444	u8 xec = (m->status >> 16) & xec_mask;
445
446	pr_emerg(HW_ERR "Combined Unit Error: ");
447
448	if (TLB_ERROR(ec)) {
449		if (xec == 0x0)
450			pr_cont("Data parity TLB read error.\n");
451		else if (xec == 0x1)
452			pr_cont("Poison data provided for TLB fill.\n");
453		else
454			goto wrong_cu_mce;
455	} else if (BUS_ERROR(ec)) {
456		if (xec > 2)
457			goto wrong_cu_mce;
458
459		pr_cont("Error during attempted NB data read.\n");
460	} else if (MEM_ERROR(ec)) {
461		switch (xec) {
462		case 0x4 ... 0xc:
463			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
464			break;
465
466		case 0x10 ... 0x14:
467			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
468			break;
469
470		default:
471			goto wrong_cu_mce;
472		}
473	}
474
475	return;
476
477wrong_cu_mce:
478	pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
479}
480
481static void amd_decode_ls_mce(struct mce *m)
482{
483	u16 ec = m->status & 0xffff;
484	u8 xec = (m->status >> 16) & xec_mask;
485
486	if (boot_cpu_data.x86 >= 0x14) {
487		pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
488			 " please report on LKML.\n");
489		return;
490	}
491
492	pr_emerg(HW_ERR "Load Store Error");
493
494	if (xec == 0x0) {
495		u8 r4 = (ec >> 4) & 0xf;
496
497		if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
498			goto wrong_ls_mce;
499
500		pr_cont(" during %s.\n", RRRR_MSG(ec));
501	} else
502		goto wrong_ls_mce;
503
504	return;
505
506wrong_ls_mce:
507	pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
508}
509
510static bool k8_nb_mce(u16 ec, u8 xec)
511{
512	bool ret = true;
513
514	switch (xec) {
515	case 0x1:
516		pr_cont("CRC error detected on HT link.\n");
517		break;
518
519	case 0x5:
520		pr_cont("Invalid GART PTE entry during GART table walk.\n");
521		break;
522
523	case 0x6:
524		pr_cont("Unsupported atomic RMW received from an IO link.\n");
525		break;
526
527	case 0x0:
528	case 0x8:
529		if (boot_cpu_data.x86 == 0x11)
530			return false;
531
532		pr_cont("DRAM ECC error detected on the NB.\n");
533		break;
534
535	case 0xd:
536		pr_cont("Parity error on the DRAM addr/ctl signals.\n");
537		break;
538
539	default:
540		ret = false;
541		break;
542	}
543
544	return ret;
545}
546
547static bool f10h_nb_mce(u16 ec, u8 xec)
548{
549	bool ret = true;
550	u8 offset = 0;
551
552	if (k8_nb_mce(ec, xec))
553		return true;
554
555	switch(xec) {
556	case 0xa ... 0xc:
557		offset = 10;
558		break;
559
560	case 0xe:
561		offset = 11;
562		break;
563
564	case 0xf:
565		if (TLB_ERROR(ec))
566			pr_cont("GART Table Walk data error.\n");
567		else if (BUS_ERROR(ec))
568			pr_cont("DMA Exclusion Vector Table Walk error.\n");
569		else
570			ret = false;
571
572		goto out;
573		break;
574
575	case 0x19:
576		if (boot_cpu_data.x86 == 0x15)
577			pr_cont("Compute Unit Data Error.\n");
578		else
579			ret = false;
580
581		goto out;
582		break;
583
584	case 0x1c ... 0x1f:
585		offset = 24;
586		break;
587
588	default:
589		ret = false;
590
591		goto out;
592		break;
593	}
594
595	pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]);
596
597out:
598	return ret;
599}
600
601static bool nb_noop_mce(u16 ec, u8 xec)
602{
603	return false;
604}
605
606void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
607{
608	u8 xec   = (m->status >> 16) & 0x1f;
609	u16 ec   = m->status & 0xffff;
610	u32 nbsh = (u32)(m->status >> 32);
611
612	pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id);
613
614	/*
615	 * F10h, revD can disable ErrCpu[3:0] so check that first and also the
616	 * value encoding has changed so interpret those differently
617	 */
618	if ((boot_cpu_data.x86 == 0x10) &&
619	    (boot_cpu_data.x86_model > 7)) {
620		if (nbsh & K8_NBSH_ERR_CPU_VAL)
621			pr_cont(", core: %u", (u8)(nbsh & nb_err_cpumask));
622	} else {
623		u8 assoc_cpus = nbsh & nb_err_cpumask;
624
625		if (assoc_cpus > 0)
626			pr_cont(", core: %d", fls(assoc_cpus) - 1);
627	}
628
629	switch (xec) {
630	case 0x2:
631		pr_cont("Sync error (sync packets on HT link detected).\n");
632		return;
633
634	case 0x3:
635		pr_cont("HT Master abort.\n");
636		return;
637
638	case 0x4:
639		pr_cont("HT Target abort.\n");
640		return;
641
642	case 0x7:
643		pr_cont("NB Watchdog timeout.\n");
644		return;
645
646	case 0x9:
647		pr_cont("SVM DMA Exclusion Vector error.\n");
648		return;
649
650	default:
651		break;
652	}
653
654	if (!fam_ops->nb_mce(ec, xec))
655		goto wrong_nb_mce;
656
657	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
658		if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
659			nb_bus_decoder(node_id, m, nbcfg);
660
661	return;
662
663wrong_nb_mce:
664	pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
665}
666EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
667
668static void amd_decode_fr_mce(struct mce *m)
669{
670	struct cpuinfo_x86 *c = &boot_cpu_data;
671	u8 xec = (m->status >> 16) & xec_mask;
672
673	if (c->x86 == 0xf || c->x86 == 0x11)
674		goto wrong_fr_mce;
675
676	if (c->x86 != 0x15 && xec != 0x0)
677		goto wrong_fr_mce;
678
679	pr_emerg(HW_ERR "%s Error: ",
680		 (c->x86 == 0x15 ? "Execution Unit" : "FIROB"));
681
682	if (xec == 0x0 || xec == 0xc)
683		pr_cont("%s.\n", fr_ex_mce_desc[xec]);
684	else if (xec < 0xd)
685		pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]);
686	else
687		goto wrong_fr_mce;
688
689	return;
690
691wrong_fr_mce:
692	pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
693}
694
695static inline void amd_decode_err_code(u16 ec)
696{
697	if (TLB_ERROR(ec)) {
698		pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n",
699			 TT_MSG(ec), LL_MSG(ec));
700	} else if (MEM_ERROR(ec)) {
701		pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
702			 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
703	} else if (BUS_ERROR(ec)) {
704		pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
705			 "Participating Processor: %s\n",
706			  RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
707			  PP_MSG(ec));
708	} else
709		pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
710}
711
712/*
713 * Filter out unwanted MCE signatures here.
714 */
715static bool amd_filter_mce(struct mce *m)
716{
717	u8 xec = (m->status >> 16) & 0x1f;
718
719	/*
720	 * NB GART TLB error reporting is disabled by default.
721	 */
722	if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
723		return true;
724
725	return false;
726}
727
728int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
729{
730	struct mce *m = (struct mce *)data;
731	int node, ecc;
732
733	if (amd_filter_mce(m))
734		return NOTIFY_STOP;
735
736	pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank);
737
738	pr_cont("%sorrected error, other errors lost: %s, "
739		 "CPU context corrupt: %s",
740		 ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
741		 ((m->status & MCI_STATUS_OVER) ? "yes"  : "no"),
742		 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
743
744	/* do the two bits[14:13] together */
745	ecc = (m->status >> 45) & 0x3;
746	if (ecc)
747		pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
748
749	pr_cont("\n");
750
751	switch (m->bank) {
752	case 0:
753		amd_decode_dc_mce(m);
754		break;
755
756	case 1:
757		amd_decode_ic_mce(m);
758		break;
759
760	case 2:
761		if (boot_cpu_data.x86 == 0x15)
762			amd_decode_cu_mce(m);
763		else
764			amd_decode_bu_mce(m);
765		break;
766
767	case 3:
768		amd_decode_ls_mce(m);
769		break;
770
771	case 4:
772		node = amd_get_nb_id(m->extcpu);
773		amd_decode_nb_mce(node, m, 0);
774		break;
775
776	case 5:
777		amd_decode_fr_mce(m);
778		break;
779
780	default:
781		break;
782	}
783
784	amd_decode_err_code(m->status & 0xffff);
785
786	return NOTIFY_STOP;
787}
788EXPORT_SYMBOL_GPL(amd_decode_mce);
789
790static struct notifier_block amd_mce_dec_nb = {
791	.notifier_call	= amd_decode_mce,
792};
793
794static int __init mce_amd_init(void)
795{
796	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
797		return 0;
798
799	if ((boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x12) &&
800	    (boot_cpu_data.x86 != 0x14 || boot_cpu_data.x86_model > 0xf))
801		return 0;
802
803	fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
804	if (!fam_ops)
805		return -ENOMEM;
806
807	switch (boot_cpu_data.x86) {
808	case 0xf:
809		fam_ops->dc_mce = k8_dc_mce;
810		fam_ops->ic_mce = k8_ic_mce;
811		fam_ops->nb_mce = k8_nb_mce;
812		break;
813
814	case 0x10:
815		fam_ops->dc_mce = f10h_dc_mce;
816		fam_ops->ic_mce = k8_ic_mce;
817		fam_ops->nb_mce = f10h_nb_mce;
818		break;
819
820	case 0x11:
821		fam_ops->dc_mce = k8_dc_mce;
822		fam_ops->ic_mce = k8_ic_mce;
823		fam_ops->nb_mce = f10h_nb_mce;
824		break;
825
826	case 0x12:
827		fam_ops->dc_mce = f12h_dc_mce;
828		fam_ops->ic_mce = k8_ic_mce;
829		fam_ops->nb_mce = nb_noop_mce;
830		break;
831
832	case 0x14:
833		nb_err_cpumask  = 0x3;
834		fam_ops->dc_mce = f14h_dc_mce;
835		fam_ops->ic_mce = f14h_ic_mce;
836		fam_ops->nb_mce = nb_noop_mce;
837		break;
838
839	case 0x15:
840		xec_mask = 0x1f;
841		fam_ops->dc_mce = f15h_dc_mce;
842		fam_ops->ic_mce = f15h_ic_mce;
843		fam_ops->nb_mce = f10h_nb_mce;
844		break;
845
846	default:
847		printk(KERN_WARNING "Huh? What family is that: %d?!\n",
848				    boot_cpu_data.x86);
849		kfree(fam_ops);
850		return -EINVAL;
851	}
852
853	pr_info("MCE: In-kernel MCE decoding enabled.\n");
854
855	atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
856
857	return 0;
858}
859early_initcall(mce_amd_init);
860
861#ifdef MODULE
862static void __exit mce_amd_exit(void)
863{
864	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
865	kfree(fam_ops);
866}
867
868MODULE_DESCRIPTION("AMD MCE decoder");
869MODULE_ALIAS("edac-mce-amd");
870MODULE_LICENSE("GPL");
871module_exit(mce_amd_exit);
872#endif
873