mce_amd.c revision 6c1173a61e63c32bd40cb1e6dd16343240a328eb
1#include <linux/module.h>
2#include <linux/slab.h>
3
4#include "mce_amd.h"
5
6static struct amd_decoder_ops *fam_ops;
7
8static u8 xec_mask	 = 0xf;
9static u8 nb_err_cpumask = 0xf;
10
11static bool report_gart_errors;
12static void (*nb_bus_decoder)(int node_id, struct mce *m);
13
14void amd_report_gart_errors(bool v)
15{
16	report_gart_errors = v;
17}
18EXPORT_SYMBOL_GPL(amd_report_gart_errors);
19
20void amd_register_ecc_decoder(void (*f)(int, struct mce *))
21{
22	nb_bus_decoder = f;
23}
24EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
25
26void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
27{
28	if (nb_bus_decoder) {
29		WARN_ON(nb_bus_decoder != f);
30
31		nb_bus_decoder = NULL;
32	}
33}
34EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
35
36/*
37 * string representation for the different MCA reported error types, see F3x48
38 * or MSR0000_0411.
39 */
40
41/* transaction type */
42const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
43EXPORT_SYMBOL_GPL(tt_msgs);
44
45/* cache level */
46const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
47EXPORT_SYMBOL_GPL(ll_msgs);
48
49/* memory transaction type */
50const char *rrrr_msgs[] = {
51       "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
52};
53EXPORT_SYMBOL_GPL(rrrr_msgs);
54
55/* participating processor */
56const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
57EXPORT_SYMBOL_GPL(pp_msgs);
58
59/* request timeout */
60const char *to_msgs[] = { "no timeout",	"timed out" };
61EXPORT_SYMBOL_GPL(to_msgs);
62
63/* memory or i/o */
64const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
65EXPORT_SYMBOL_GPL(ii_msgs);
66
67static const char *f10h_nb_mce_desc[] = {
68	"HT link data error",
69	"Protocol error (link, L3, probe filter, etc.)",
70	"Parity error in NB-internal arrays",
71	"Link Retry due to IO link transmission error",
72	"L3 ECC data cache error",
73	"ECC error in L3 cache tag",
74	"L3 LRU parity bits error",
75	"ECC Error in the Probe Filter directory"
76};
77
78static const char * const f15h_ic_mce_desc[] = {
79	"UC during a demand linefill from L2",
80	"Parity error during data load from IC",
81	"Parity error for IC valid bit",
82	"Main tag parity error",
83	"Parity error in prediction queue",
84	"PFB data/address parity error",
85	"Parity error in the branch status reg",
86	"PFB promotion address error",
87	"Tag error during probe/victimization",
88	"Parity error for IC probe tag valid bit",
89	"PFB non-cacheable bit parity error",
90	"PFB valid bit parity error",			/* xec = 0xd */
91	"Microcode Patch Buffer",			/* xec = 010 */
92	"uop queue",
93	"insn buffer",
94	"predecode buffer",
95	"fetch address FIFO"
96};
97
98static const char * const f15h_cu_mce_desc[] = {
99	"Fill ECC error on data fills",			/* xec = 0x4 */
100	"Fill parity error on insn fills",
101	"Prefetcher request FIFO parity error",
102	"PRQ address parity error",
103	"PRQ data parity error",
104	"WCC Tag ECC error",
105	"WCC Data ECC error",
106	"WCB Data parity error",
107	"VB Data/ECC error",
108	"L2 Tag ECC error",				/* xec = 0x10 */
109	"Hard L2 Tag ECC error",
110	"Multiple hits on L2 tag",
111	"XAB parity error",
112	"PRB address parity error"
113};
114
115static const char * const fr_ex_mce_desc[] = {
116	"CPU Watchdog timer expire",
117	"Wakeup array dest tag",
118	"AG payload array",
119	"EX payload array",
120	"IDRF array",
121	"Retire dispatch queue",
122	"Mapper checkpoint array",
123	"Physical register file EX0 port",
124	"Physical register file EX1 port",
125	"Physical register file AG0 port",
126	"Physical register file AG1 port",
127	"Flag register file",
128	"DE correctable error could not be corrected"
129};
130
131static bool f12h_dc_mce(u16 ec, u8 xec)
132{
133	bool ret = false;
134
135	if (MEM_ERROR(ec)) {
136		u8 ll = LL(ec);
137		ret = true;
138
139		if (ll == LL_L2)
140			pr_cont("during L1 linefill from L2.\n");
141		else if (ll == LL_L1)
142			pr_cont("Data/Tag %s error.\n", R4_MSG(ec));
143		else
144			ret = false;
145	}
146	return ret;
147}
148
149static bool f10h_dc_mce(u16 ec, u8 xec)
150{
151	if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
152		pr_cont("during data scrub.\n");
153		return true;
154	}
155	return f12h_dc_mce(ec, xec);
156}
157
158static bool k8_dc_mce(u16 ec, u8 xec)
159{
160	if (BUS_ERROR(ec)) {
161		pr_cont("during system linefill.\n");
162		return true;
163	}
164
165	return f10h_dc_mce(ec, xec);
166}
167
168static bool f14h_dc_mce(u16 ec, u8 xec)
169{
170	u8 r4	 = R4(ec);
171	bool ret = true;
172
173	if (MEM_ERROR(ec)) {
174
175		if (TT(ec) != TT_DATA || LL(ec) != LL_L1)
176			return false;
177
178		switch (r4) {
179		case R4_DRD:
180		case R4_DWR:
181			pr_cont("Data/Tag parity error due to %s.\n",
182				(r4 == R4_DRD ? "load/hw prf" : "store"));
183			break;
184		case R4_EVICT:
185			pr_cont("Copyback parity error on a tag miss.\n");
186			break;
187		case R4_SNOOP:
188			pr_cont("Tag parity error during snoop.\n");
189			break;
190		default:
191			ret = false;
192		}
193	} else if (BUS_ERROR(ec)) {
194
195		if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG)
196			return false;
197
198		pr_cont("System read data error on a ");
199
200		switch (r4) {
201		case R4_RD:
202			pr_cont("TLB reload.\n");
203			break;
204		case R4_DWR:
205			pr_cont("store.\n");
206			break;
207		case R4_DRD:
208			pr_cont("load.\n");
209			break;
210		default:
211			ret = false;
212		}
213	} else {
214		ret = false;
215	}
216
217	return ret;
218}
219
220static bool f15h_dc_mce(u16 ec, u8 xec)
221{
222	bool ret = true;
223
224	if (MEM_ERROR(ec)) {
225
226		switch (xec) {
227		case 0x0:
228			pr_cont("Data Array access error.\n");
229			break;
230
231		case 0x1:
232			pr_cont("UC error during a linefill from L2/NB.\n");
233			break;
234
235		case 0x2:
236		case 0x11:
237			pr_cont("STQ access error.\n");
238			break;
239
240		case 0x3:
241			pr_cont("SCB access error.\n");
242			break;
243
244		case 0x10:
245			pr_cont("Tag error.\n");
246			break;
247
248		case 0x12:
249			pr_cont("LDQ access error.\n");
250			break;
251
252		default:
253			ret = false;
254		}
255	} else if (BUS_ERROR(ec)) {
256
257		if (!xec)
258			pr_cont("System Read Data Error.\n");
259		else
260			pr_cont(" Internal error condition type %d.\n", xec);
261	} else
262		ret = false;
263
264	return ret;
265}
266
267static void amd_decode_dc_mce(struct mce *m)
268{
269	u16 ec = EC(m->status);
270	u8 xec = XEC(m->status, xec_mask);
271
272	pr_emerg(HW_ERR "Data Cache Error: ");
273
274	/* TLB error signatures are the same across families */
275	if (TLB_ERROR(ec)) {
276		if (TT(ec) == TT_DATA) {
277			pr_cont("%s TLB %s.\n", LL_MSG(ec),
278				((xec == 2) ? "locked miss"
279					    : (xec ? "multimatch" : "parity")));
280			return;
281		}
282	} else if (fam_ops->dc_mce(ec, xec))
283		;
284	else
285		pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
286}
287
288static bool k8_ic_mce(u16 ec, u8 xec)
289{
290	u8 ll	 = LL(ec);
291	bool ret = true;
292
293	if (!MEM_ERROR(ec))
294		return false;
295
296	if (ll == 0x2)
297		pr_cont("during a linefill from L2.\n");
298	else if (ll == 0x1) {
299		switch (R4(ec)) {
300		case R4_IRD:
301			pr_cont("Parity error during data load.\n");
302			break;
303
304		case R4_EVICT:
305			pr_cont("Copyback Parity/Victim error.\n");
306			break;
307
308		case R4_SNOOP:
309			pr_cont("Tag Snoop error.\n");
310			break;
311
312		default:
313			ret = false;
314			break;
315		}
316	} else
317		ret = false;
318
319	return ret;
320}
321
322static bool f14h_ic_mce(u16 ec, u8 xec)
323{
324	u8 r4    = R4(ec);
325	bool ret = true;
326
327	if (MEM_ERROR(ec)) {
328		if (TT(ec) != 0 || LL(ec) != 1)
329			ret = false;
330
331		if (r4 == R4_IRD)
332			pr_cont("Data/tag array parity error for a tag hit.\n");
333		else if (r4 == R4_SNOOP)
334			pr_cont("Tag error during snoop/victimization.\n");
335		else
336			ret = false;
337	}
338	return ret;
339}
340
341static bool f15h_ic_mce(u16 ec, u8 xec)
342{
343	bool ret = true;
344
345	if (!MEM_ERROR(ec))
346		return false;
347
348	switch (xec) {
349	case 0x0 ... 0xa:
350		pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
351		break;
352
353	case 0xd:
354		pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
355		break;
356
357	case 0x10:
358		pr_cont("%s.\n", f15h_ic_mce_desc[xec-4]);
359		break;
360
361	case 0x11 ... 0x14:
362		pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
363		break;
364
365	default:
366		ret = false;
367	}
368	return ret;
369}
370
371static void amd_decode_ic_mce(struct mce *m)
372{
373	u16 ec = EC(m->status);
374	u8 xec = XEC(m->status, xec_mask);
375
376	pr_emerg(HW_ERR "Instruction Cache Error: ");
377
378	if (TLB_ERROR(ec))
379		pr_cont("%s TLB %s.\n", LL_MSG(ec),
380			(xec ? "multimatch" : "parity error"));
381	else if (BUS_ERROR(ec)) {
382		bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
383
384		pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
385	} else if (fam_ops->ic_mce(ec, xec))
386		;
387	else
388		pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
389}
390
391static void amd_decode_bu_mce(struct mce *m)
392{
393	u16 ec = EC(m->status);
394	u8 xec = XEC(m->status, xec_mask);
395
396	pr_emerg(HW_ERR "Bus Unit Error");
397
398	if (xec == 0x1)
399		pr_cont(" in the write data buffers.\n");
400	else if (xec == 0x3)
401		pr_cont(" in the victim data buffers.\n");
402	else if (xec == 0x2 && MEM_ERROR(ec))
403		pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
404	else if (xec == 0x0) {
405		if (TLB_ERROR(ec))
406			pr_cont(": %s error in a Page Descriptor Cache or "
407				"Guest TLB.\n", TT_MSG(ec));
408		else if (BUS_ERROR(ec))
409			pr_cont(": %s/ECC error in data read from NB: %s.\n",
410				R4_MSG(ec), PP_MSG(ec));
411		else if (MEM_ERROR(ec)) {
412			u8 r4 = R4(ec);
413
414			if (r4 >= 0x7)
415				pr_cont(": %s error during data copyback.\n",
416					R4_MSG(ec));
417			else if (r4 <= 0x1)
418				pr_cont(": %s parity/ECC error during data "
419					"access from L2.\n", R4_MSG(ec));
420			else
421				goto wrong_bu_mce;
422		} else
423			goto wrong_bu_mce;
424	} else
425		goto wrong_bu_mce;
426
427	return;
428
429wrong_bu_mce:
430	pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
431}
432
433static void amd_decode_cu_mce(struct mce *m)
434{
435	u16 ec = EC(m->status);
436	u8 xec = XEC(m->status, xec_mask);
437
438	pr_emerg(HW_ERR "Combined Unit Error: ");
439
440	if (TLB_ERROR(ec)) {
441		if (xec == 0x0)
442			pr_cont("Data parity TLB read error.\n");
443		else if (xec == 0x1)
444			pr_cont("Poison data provided for TLB fill.\n");
445		else
446			goto wrong_cu_mce;
447	} else if (BUS_ERROR(ec)) {
448		if (xec > 2)
449			goto wrong_cu_mce;
450
451		pr_cont("Error during attempted NB data read.\n");
452	} else if (MEM_ERROR(ec)) {
453		switch (xec) {
454		case 0x4 ... 0xc:
455			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
456			break;
457
458		case 0x10 ... 0x14:
459			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
460			break;
461
462		default:
463			goto wrong_cu_mce;
464		}
465	}
466
467	return;
468
469wrong_cu_mce:
470	pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
471}
472
473static void amd_decode_ls_mce(struct mce *m)
474{
475	u16 ec = EC(m->status);
476	u8 xec = XEC(m->status, xec_mask);
477
478	if (boot_cpu_data.x86 >= 0x14) {
479		pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
480			 " please report on LKML.\n");
481		return;
482	}
483
484	pr_emerg(HW_ERR "Load Store Error");
485
486	if (xec == 0x0) {
487		u8 r4 = R4(ec);
488
489		if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
490			goto wrong_ls_mce;
491
492		pr_cont(" during %s.\n", R4_MSG(ec));
493	} else
494		goto wrong_ls_mce;
495
496	return;
497
498wrong_ls_mce:
499	pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
500}
501
502static bool k8_nb_mce(u16 ec, u8 xec)
503{
504	bool ret = true;
505
506	switch (xec) {
507	case 0x1:
508		pr_cont("CRC error detected on HT link.\n");
509		break;
510
511	case 0x5:
512		pr_cont("Invalid GART PTE entry during GART table walk.\n");
513		break;
514
515	case 0x6:
516		pr_cont("Unsupported atomic RMW received from an IO link.\n");
517		break;
518
519	case 0x0:
520	case 0x8:
521		if (boot_cpu_data.x86 == 0x11)
522			return false;
523
524		pr_cont("DRAM ECC error detected on the NB.\n");
525		break;
526
527	case 0xd:
528		pr_cont("Parity error on the DRAM addr/ctl signals.\n");
529		break;
530
531	default:
532		ret = false;
533		break;
534	}
535
536	return ret;
537}
538
539static bool f10h_nb_mce(u16 ec, u8 xec)
540{
541	bool ret = true;
542	u8 offset = 0;
543
544	if (k8_nb_mce(ec, xec))
545		return true;
546
547	switch(xec) {
548	case 0xa ... 0xc:
549		offset = 10;
550		break;
551
552	case 0xe:
553		offset = 11;
554		break;
555
556	case 0xf:
557		if (TLB_ERROR(ec))
558			pr_cont("GART Table Walk data error.\n");
559		else if (BUS_ERROR(ec))
560			pr_cont("DMA Exclusion Vector Table Walk error.\n");
561		else
562			ret = false;
563
564		goto out;
565		break;
566
567	case 0x19:
568		if (boot_cpu_data.x86 == 0x15)
569			pr_cont("Compute Unit Data Error.\n");
570		else
571			ret = false;
572
573		goto out;
574		break;
575
576	case 0x1c ... 0x1f:
577		offset = 24;
578		break;
579
580	default:
581		ret = false;
582
583		goto out;
584		break;
585	}
586
587	pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]);
588
589out:
590	return ret;
591}
592
593static bool nb_noop_mce(u16 ec, u8 xec)
594{
595	return false;
596}
597
598void amd_decode_nb_mce(struct mce *m)
599{
600	struct cpuinfo_x86 *c = &boot_cpu_data;
601	int node_id = amd_get_nb_id(m->extcpu);
602	u16 ec = EC(m->status);
603	u8 xec = XEC(m->status, 0x1f);
604
605	pr_emerg(HW_ERR "Northbridge Error (node %d): ", node_id);
606
607	switch (xec) {
608	case 0x2:
609		pr_cont("Sync error (sync packets on HT link detected).\n");
610		return;
611
612	case 0x3:
613		pr_cont("HT Master abort.\n");
614		return;
615
616	case 0x4:
617		pr_cont("HT Target abort.\n");
618		return;
619
620	case 0x7:
621		pr_cont("NB Watchdog timeout.\n");
622		return;
623
624	case 0x9:
625		pr_cont("SVM DMA Exclusion Vector error.\n");
626		return;
627
628	default:
629		break;
630	}
631
632	if (!fam_ops->nb_mce(ec, xec))
633		goto wrong_nb_mce;
634
635	if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
636		if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
637			nb_bus_decoder(node_id, m);
638
639	return;
640
641wrong_nb_mce:
642	pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
643}
644EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
645
646static void amd_decode_fr_mce(struct mce *m)
647{
648	struct cpuinfo_x86 *c = &boot_cpu_data;
649	u8 xec = XEC(m->status, xec_mask);
650
651	if (c->x86 == 0xf || c->x86 == 0x11)
652		goto wrong_fr_mce;
653
654	if (c->x86 != 0x15 && xec != 0x0)
655		goto wrong_fr_mce;
656
657	pr_emerg(HW_ERR "%s Error: ",
658		 (c->x86 == 0x15 ? "Execution Unit" : "FIROB"));
659
660	if (xec == 0x0 || xec == 0xc)
661		pr_cont("%s.\n", fr_ex_mce_desc[xec]);
662	else if (xec < 0xd)
663		pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]);
664	else
665		goto wrong_fr_mce;
666
667	return;
668
669wrong_fr_mce:
670	pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
671}
672
673static void amd_decode_fp_mce(struct mce *m)
674{
675	u8 xec = XEC(m->status, xec_mask);
676
677	pr_emerg(HW_ERR "Floating Point Unit Error: ");
678
679	switch (xec) {
680	case 0x1:
681		pr_cont("Free List");
682		break;
683
684	case 0x2:
685		pr_cont("Physical Register File");
686		break;
687
688	case 0x3:
689		pr_cont("Retire Queue");
690		break;
691
692	case 0x4:
693		pr_cont("Scheduler table");
694		break;
695
696	case 0x5:
697		pr_cont("Status Register File");
698		break;
699
700	default:
701		goto wrong_fp_mce;
702		break;
703	}
704
705	pr_cont(" parity error.\n");
706
707	return;
708
709wrong_fp_mce:
710	pr_emerg(HW_ERR "Corrupted FP MCE info?\n");
711}
712
713static inline void amd_decode_err_code(u16 ec)
714{
715
716	pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
717
718	if (BUS_ERROR(ec))
719		pr_cont(", mem/io: %s", II_MSG(ec));
720	else
721		pr_cont(", tx: %s", TT_MSG(ec));
722
723	if (MEM_ERROR(ec) || BUS_ERROR(ec)) {
724		pr_cont(", mem-tx: %s", R4_MSG(ec));
725
726		if (BUS_ERROR(ec))
727			pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec));
728	}
729
730	pr_cont("\n");
731}
732
733/*
734 * Filter out unwanted MCE signatures here.
735 */
736static bool amd_filter_mce(struct mce *m)
737{
738	u8 xec = (m->status >> 16) & 0x1f;
739
740	/*
741	 * NB GART TLB error reporting is disabled by default.
742	 */
743	if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
744		return true;
745
746	return false;
747}
748
749int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
750{
751	struct mce *m = (struct mce *)data;
752	struct cpuinfo_x86 *c = &boot_cpu_data;
753	int ecc;
754
755	if (amd_filter_mce(m))
756		return NOTIFY_STOP;
757
758	pr_emerg(HW_ERR "CPU:%d\tMC%d_STATUS[%s|%s|%s|%s|%s",
759		m->extcpu, m->bank,
760		((m->status & MCI_STATUS_OVER)	? "Over"  : "-"),
761		((m->status & MCI_STATUS_UC)	? "UE"	  : "CE"),
762		((m->status & MCI_STATUS_MISCV)	? "MiscV" : "-"),
763		((m->status & MCI_STATUS_PCC)	? "PCC"	  : "-"),
764		((m->status & MCI_STATUS_ADDRV)	? "AddrV" : "-"));
765
766	if (c->x86 == 0x15)
767		pr_cont("|%s|%s",
768			((m->status & BIT_64(44)) ? "Deferred" : "-"),
769			((m->status & BIT_64(43)) ? "Poison"   : "-"));
770
771	/* do the two bits[14:13] together */
772	ecc = (m->status >> 45) & 0x3;
773	if (ecc)
774		pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
775
776	pr_cont("]: 0x%016llx\n", m->status);
777
778	if (m->status & MCI_STATUS_ADDRV)
779		pr_emerg(HW_ERR "\tMC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
780
781	switch (m->bank) {
782	case 0:
783		amd_decode_dc_mce(m);
784		break;
785
786	case 1:
787		amd_decode_ic_mce(m);
788		break;
789
790	case 2:
791		if (c->x86 == 0x15)
792			amd_decode_cu_mce(m);
793		else
794			amd_decode_bu_mce(m);
795		break;
796
797	case 3:
798		amd_decode_ls_mce(m);
799		break;
800
801	case 4:
802		amd_decode_nb_mce(m);
803		break;
804
805	case 5:
806		amd_decode_fr_mce(m);
807		break;
808
809	case 6:
810		amd_decode_fp_mce(m);
811		break;
812
813	default:
814		break;
815	}
816
817	amd_decode_err_code(m->status & 0xffff);
818
819	return NOTIFY_STOP;
820}
821EXPORT_SYMBOL_GPL(amd_decode_mce);
822
823static struct notifier_block amd_mce_dec_nb = {
824	.notifier_call	= amd_decode_mce,
825};
826
827static int __init mce_amd_init(void)
828{
829	struct cpuinfo_x86 *c = &boot_cpu_data;
830
831	if (c->x86_vendor != X86_VENDOR_AMD)
832		return 0;
833
834	if ((c->x86 < 0xf || c->x86 > 0x12) &&
835	    (c->x86 != 0x14 || c->x86_model > 0xf) &&
836	    (c->x86 != 0x15 || c->x86_model > 0xf))
837		return 0;
838
839	fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
840	if (!fam_ops)
841		return -ENOMEM;
842
843	switch (c->x86) {
844	case 0xf:
845		fam_ops->dc_mce = k8_dc_mce;
846		fam_ops->ic_mce = k8_ic_mce;
847		fam_ops->nb_mce = k8_nb_mce;
848		break;
849
850	case 0x10:
851		fam_ops->dc_mce = f10h_dc_mce;
852		fam_ops->ic_mce = k8_ic_mce;
853		fam_ops->nb_mce = f10h_nb_mce;
854		break;
855
856	case 0x11:
857		fam_ops->dc_mce = k8_dc_mce;
858		fam_ops->ic_mce = k8_ic_mce;
859		fam_ops->nb_mce = f10h_nb_mce;
860		break;
861
862	case 0x12:
863		fam_ops->dc_mce = f12h_dc_mce;
864		fam_ops->ic_mce = k8_ic_mce;
865		fam_ops->nb_mce = nb_noop_mce;
866		break;
867
868	case 0x14:
869		nb_err_cpumask  = 0x3;
870		fam_ops->dc_mce = f14h_dc_mce;
871		fam_ops->ic_mce = f14h_ic_mce;
872		fam_ops->nb_mce = nb_noop_mce;
873		break;
874
875	case 0x15:
876		xec_mask = 0x1f;
877		fam_ops->dc_mce = f15h_dc_mce;
878		fam_ops->ic_mce = f15h_ic_mce;
879		fam_ops->nb_mce = f10h_nb_mce;
880		break;
881
882	default:
883		printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86);
884		kfree(fam_ops);
885		return -EINVAL;
886	}
887
888	pr_info("MCE: In-kernel MCE decoding enabled.\n");
889
890	mce_register_decode_chain(&amd_mce_dec_nb);
891
892	return 0;
893}
894early_initcall(mce_amd_init);
895
896#ifdef MODULE
897static void __exit mce_amd_exit(void)
898{
899	mce_unregister_decode_chain(&amd_mce_dec_nb);
900	kfree(fam_ops);
901}
902
903MODULE_DESCRIPTION("AMD MCE decoder");
904MODULE_ALIAS("edac-mce-amd");
905MODULE_LICENSE("GPL");
906module_exit(mce_amd_exit);
907#endif
908