mce_amd.c revision b18434cad1740466f7a1c304ea4af0f4d3c874f1
1#include <linux/module.h>
2#include <linux/slab.h>
3
4#include "mce_amd.h"
5
6static struct amd_decoder_ops *fam_ops;
7
8static u8 xec_mask	 = 0xf;
9static u8 nb_err_cpumask = 0xf;
10
11static bool report_gart_errors;
12static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg);
13
14void amd_report_gart_errors(bool v)
15{
16	report_gart_errors = v;
17}
18EXPORT_SYMBOL_GPL(amd_report_gart_errors);
19
20void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32))
21{
22	nb_bus_decoder = f;
23}
24EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
25
26void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32))
27{
28	if (nb_bus_decoder) {
29		WARN_ON(nb_bus_decoder != f);
30
31		nb_bus_decoder = NULL;
32	}
33}
34EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
35
36/*
37 * string representation for the different MCA reported error types, see F3x48
38 * or MSR0000_0411.
39 */
40
41/* transaction type */
42const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
43EXPORT_SYMBOL_GPL(tt_msgs);
44
45/* cache level */
46const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
47EXPORT_SYMBOL_GPL(ll_msgs);
48
49/* memory transaction type */
50const char *rrrr_msgs[] = {
51       "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
52};
53EXPORT_SYMBOL_GPL(rrrr_msgs);
54
55/* participating processor */
56const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
57EXPORT_SYMBOL_GPL(pp_msgs);
58
59/* request timeout */
60const char *to_msgs[] = { "no timeout",	"timed out" };
61EXPORT_SYMBOL_GPL(to_msgs);
62
63/* memory or i/o */
64const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
65EXPORT_SYMBOL_GPL(ii_msgs);
66
67static const char *f10h_nb_mce_desc[] = {
68	"HT link data error",
69	"Protocol error (link, L3, probe filter, etc.)",
70	"Parity error in NB-internal arrays",
71	"Link Retry due to IO link transmission error",
72	"L3 ECC data cache error",
73	"ECC error in L3 cache tag",
74	"L3 LRU parity bits error",
75	"ECC Error in the Probe Filter directory"
76};
77
78static const char * const f15h_ic_mce_desc[] = {
79	"UC during a demand linefill from L2",
80	"Parity error during data load from IC",
81	"Parity error for IC valid bit",
82	"Main tag parity error",
83	"Parity error in prediction queue",
84	"PFB data/address parity error",
85	"Parity error in the branch status reg",
86	"PFB promotion address error",
87	"Tag error during probe/victimization",
88	"Parity error for IC probe tag valid bit",
89	"PFB non-cacheable bit parity error",
90	"PFB valid bit parity error",			/* xec = 0xd */
91	"patch RAM",					/* xec = 010 */
92	"uop queue",
93	"insn buffer",
94	"predecode buffer",
95	"fetch address FIFO"
96};
97
98static const char * const f15h_cu_mce_desc[] = {
99	"Fill ECC error on data fills",			/* xec = 0x4 */
100	"Fill parity error on insn fills",
101	"Prefetcher request FIFO parity error",
102	"PRQ address parity error",
103	"PRQ data parity error",
104	"WCC Tag ECC error",
105	"WCC Data ECC error",
106	"WCB Data parity error",
107	"VB Data/ECC error",
108	"L2 Tag ECC error",				/* xec = 0x10 */
109	"Hard L2 Tag ECC error",
110	"Multiple hits on L2 tag",
111	"XAB parity error",
112	"PRB address parity error"
113};
114
115static bool f12h_dc_mce(u16 ec, u8 xec)
116{
117	bool ret = false;
118
119	if (MEM_ERROR(ec)) {
120		u8 ll = ec & 0x3;
121		ret = true;
122
123		if (ll == LL_L2)
124			pr_cont("during L1 linefill from L2.\n");
125		else if (ll == LL_L1)
126			pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec));
127		else
128			ret = false;
129	}
130	return ret;
131}
132
133static bool f10h_dc_mce(u16 ec, u8 xec)
134{
135	u8 r4  = (ec >> 4) & 0xf;
136	u8 ll  = ec & 0x3;
137
138	if (r4 == R4_GEN && ll == LL_L1) {
139		pr_cont("during data scrub.\n");
140		return true;
141	}
142	return f12h_dc_mce(ec, xec);
143}
144
145static bool k8_dc_mce(u16 ec, u8 xec)
146{
147	if (BUS_ERROR(ec)) {
148		pr_cont("during system linefill.\n");
149		return true;
150	}
151
152	return f10h_dc_mce(ec, xec);
153}
154
155static bool f14h_dc_mce(u16 ec, u8 xec)
156{
157	u8 r4	 = (ec >> 4) & 0xf;
158	u8 ll	 = ec & 0x3;
159	u8 tt	 = (ec >> 2) & 0x3;
160	u8 ii	 = tt;
161	bool ret = true;
162
163	if (MEM_ERROR(ec)) {
164
165		if (tt != TT_DATA || ll != LL_L1)
166			return false;
167
168		switch (r4) {
169		case R4_DRD:
170		case R4_DWR:
171			pr_cont("Data/Tag parity error due to %s.\n",
172				(r4 == R4_DRD ? "load/hw prf" : "store"));
173			break;
174		case R4_EVICT:
175			pr_cont("Copyback parity error on a tag miss.\n");
176			break;
177		case R4_SNOOP:
178			pr_cont("Tag parity error during snoop.\n");
179			break;
180		default:
181			ret = false;
182		}
183	} else if (BUS_ERROR(ec)) {
184
185		if ((ii != II_MEM && ii != II_IO) || ll != LL_LG)
186			return false;
187
188		pr_cont("System read data error on a ");
189
190		switch (r4) {
191		case R4_RD:
192			pr_cont("TLB reload.\n");
193			break;
194		case R4_DWR:
195			pr_cont("store.\n");
196			break;
197		case R4_DRD:
198			pr_cont("load.\n");
199			break;
200		default:
201			ret = false;
202		}
203	} else {
204		ret = false;
205	}
206
207	return ret;
208}
209
210static bool f15h_dc_mce(u16 ec, u8 xec)
211{
212	bool ret = true;
213
214	if (MEM_ERROR(ec)) {
215
216		switch (xec) {
217		case 0x0:
218			pr_cont("Data Array access error.\n");
219			break;
220
221		case 0x1:
222			pr_cont("UC error during a linefill from L2/NB.\n");
223			break;
224
225		case 0x2:
226		case 0x11:
227			pr_cont("STQ access error.\n");
228			break;
229
230		case 0x3:
231			pr_cont("SCB access error.\n");
232			break;
233
234		case 0x10:
235			pr_cont("Tag error.\n");
236			break;
237
238		case 0x12:
239			pr_cont("LDQ access error.\n");
240			break;
241
242		default:
243			ret = false;
244		}
245	} else if (BUS_ERROR(ec)) {
246
247		if (!xec)
248			pr_cont("during system linefill.\n");
249		else
250			pr_cont(" Internal %s condition.\n",
251				((xec == 1) ? "livelock" : "deadlock"));
252	} else
253		ret = false;
254
255	return ret;
256}
257
258static void amd_decode_dc_mce(struct mce *m)
259{
260	u16 ec = m->status & 0xffff;
261	u8 xec = (m->status >> 16) & xec_mask;
262
263	pr_emerg(HW_ERR "Data Cache Error: ");
264
265	/* TLB error signatures are the same across families */
266	if (TLB_ERROR(ec)) {
267		u8 tt = (ec >> 2) & 0x3;
268
269		if (tt == TT_DATA) {
270			pr_cont("%s TLB %s.\n", LL_MSG(ec),
271				((xec == 2) ? "locked miss"
272					    : (xec ? "multimatch" : "parity")));
273			return;
274		}
275	} else if (fam_ops->dc_mce(ec, xec))
276		;
277	else
278		pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
279}
280
281static bool k8_ic_mce(u16 ec, u8 xec)
282{
283	u8 ll	 = ec & 0x3;
284	u8 r4	 = (ec >> 4) & 0xf;
285	bool ret = true;
286
287	if (!MEM_ERROR(ec))
288		return false;
289
290	if (ll == 0x2)
291		pr_cont("during a linefill from L2.\n");
292	else if (ll == 0x1) {
293		switch (r4) {
294		case R4_IRD:
295			pr_cont("Parity error during data load.\n");
296			break;
297
298		case R4_EVICT:
299			pr_cont("Copyback Parity/Victim error.\n");
300			break;
301
302		case R4_SNOOP:
303			pr_cont("Tag Snoop error.\n");
304			break;
305
306		default:
307			ret = false;
308			break;
309		}
310	} else
311		ret = false;
312
313	return ret;
314}
315
316static bool f14h_ic_mce(u16 ec, u8 xec)
317{
318	u8 ll    = ec & 0x3;
319	u8 tt    = (ec >> 2) & 0x3;
320	u8 r4  = (ec >> 4) & 0xf;
321	bool ret = true;
322
323	if (MEM_ERROR(ec)) {
324		if (tt != 0 || ll != 1)
325			ret = false;
326
327		if (r4 == R4_IRD)
328			pr_cont("Data/tag array parity error for a tag hit.\n");
329		else if (r4 == R4_SNOOP)
330			pr_cont("Tag error during snoop/victimization.\n");
331		else
332			ret = false;
333	}
334	return ret;
335}
336
337static bool f15h_ic_mce(u16 ec, u8 xec)
338{
339	bool ret = true;
340
341	if (!MEM_ERROR(ec))
342		return false;
343
344	switch (xec) {
345	case 0x0 ... 0xa:
346		pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
347		break;
348
349	case 0xd:
350		pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
351		break;
352
353	case 0x10 ... 0x14:
354		pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
355		break;
356
357	default:
358		ret = false;
359	}
360	return ret;
361}
362
363static void amd_decode_ic_mce(struct mce *m)
364{
365	u16 ec = m->status & 0xffff;
366	u8 xec = (m->status >> 16) & xec_mask;
367
368	pr_emerg(HW_ERR "Instruction Cache Error: ");
369
370	if (TLB_ERROR(ec))
371		pr_cont("%s TLB %s.\n", LL_MSG(ec),
372			(xec ? "multimatch" : "parity error"));
373	else if (BUS_ERROR(ec)) {
374		bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
375
376		pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
377	} else if (fam_ops->ic_mce(ec, xec))
378		;
379	else
380		pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
381}
382
383static void amd_decode_bu_mce(struct mce *m)
384{
385	u32 ec = m->status & 0xffff;
386	u32 xec = (m->status >> 16) & xec_mask;
387
388	pr_emerg(HW_ERR "Bus Unit Error");
389
390	if (xec == 0x1)
391		pr_cont(" in the write data buffers.\n");
392	else if (xec == 0x3)
393		pr_cont(" in the victim data buffers.\n");
394	else if (xec == 0x2 && MEM_ERROR(ec))
395		pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
396	else if (xec == 0x0) {
397		if (TLB_ERROR(ec))
398			pr_cont(": %s error in a Page Descriptor Cache or "
399				"Guest TLB.\n", TT_MSG(ec));
400		else if (BUS_ERROR(ec))
401			pr_cont(": %s/ECC error in data read from NB: %s.\n",
402				RRRR_MSG(ec), PP_MSG(ec));
403		else if (MEM_ERROR(ec)) {
404			u8 rrrr = (ec >> 4) & 0xf;
405
406			if (rrrr >= 0x7)
407				pr_cont(": %s error during data copyback.\n",
408					RRRR_MSG(ec));
409			else if (rrrr <= 0x1)
410				pr_cont(": %s parity/ECC error during data "
411					"access from L2.\n", RRRR_MSG(ec));
412			else
413				goto wrong_bu_mce;
414		} else
415			goto wrong_bu_mce;
416	} else
417		goto wrong_bu_mce;
418
419	return;
420
421wrong_bu_mce:
422	pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
423}
424
425static void amd_decode_cu_mce(struct mce *m)
426{
427	u16 ec = m->status & 0xffff;
428	u8 xec = (m->status >> 16) & xec_mask;
429
430	pr_emerg(HW_ERR "Combined Unit Error: ");
431
432	if (TLB_ERROR(ec)) {
433		if (xec == 0x0)
434			pr_cont("Data parity TLB read error.\n");
435		else if (xec == 0x1)
436			pr_cont("Poison data provided for TLB fill.\n");
437		else
438			goto wrong_cu_mce;
439	} else if (BUS_ERROR(ec)) {
440		if (xec > 2)
441			goto wrong_cu_mce;
442
443		pr_cont("Error during attempted NB data read.\n");
444	} else if (MEM_ERROR(ec)) {
445		switch (xec) {
446		case 0x4 ... 0xc:
447			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
448			break;
449
450		case 0x10 ... 0x14:
451			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
452			break;
453
454		default:
455			goto wrong_cu_mce;
456		}
457	}
458
459	return;
460
461wrong_cu_mce:
462	pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
463}
464
465static void amd_decode_ls_mce(struct mce *m)
466{
467	u16 ec = m->status & 0xffff;
468	u8 xec = (m->status >> 16) & xec_mask;
469
470	if (boot_cpu_data.x86 >= 0x14) {
471		pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
472			 " please report on LKML.\n");
473		return;
474	}
475
476	pr_emerg(HW_ERR "Load Store Error");
477
478	if (xec == 0x0) {
479		u8 r4 = (ec >> 4) & 0xf;
480
481		if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
482			goto wrong_ls_mce;
483
484		pr_cont(" during %s.\n", RRRR_MSG(ec));
485	} else
486		goto wrong_ls_mce;
487
488	return;
489
490wrong_ls_mce:
491	pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
492}
493
494static bool k8_nb_mce(u16 ec, u8 xec)
495{
496	bool ret = true;
497
498	switch (xec) {
499	case 0x1:
500		pr_cont("CRC error detected on HT link.\n");
501		break;
502
503	case 0x5:
504		pr_cont("Invalid GART PTE entry during GART table walk.\n");
505		break;
506
507	case 0x6:
508		pr_cont("Unsupported atomic RMW received from an IO link.\n");
509		break;
510
511	case 0x0:
512	case 0x8:
513		if (boot_cpu_data.x86 == 0x11)
514			return false;
515
516		pr_cont("DRAM ECC error detected on the NB.\n");
517		break;
518
519	case 0xd:
520		pr_cont("Parity error on the DRAM addr/ctl signals.\n");
521		break;
522
523	default:
524		ret = false;
525		break;
526	}
527
528	return ret;
529}
530
531static bool f10h_nb_mce(u16 ec, u8 xec)
532{
533	bool ret = true;
534	u8 offset = 0;
535
536	if (k8_nb_mce(ec, xec))
537		return true;
538
539	switch(xec) {
540	case 0xa ... 0xc:
541		offset = 10;
542		break;
543
544	case 0xe:
545		offset = 11;
546		break;
547
548	case 0xf:
549		if (TLB_ERROR(ec))
550			pr_cont("GART Table Walk data error.\n");
551		else if (BUS_ERROR(ec))
552			pr_cont("DMA Exclusion Vector Table Walk error.\n");
553		else
554			ret = false;
555
556		goto out;
557		break;
558
559	case 0x1c ... 0x1f:
560		offset = 24;
561		break;
562
563	default:
564		ret = false;
565
566		goto out;
567		break;
568	}
569
570	pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]);
571
572out:
573	return ret;
574}
575
576static bool nb_noop_mce(u16 ec, u8 xec)
577{
578	return false;
579}
580
581void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
582{
583	u8 xec   = (m->status >> 16) & 0x1f;
584	u16 ec   = m->status & 0xffff;
585	u32 nbsh = (u32)(m->status >> 32);
586
587	pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id);
588
589	/*
590	 * F10h, revD can disable ErrCpu[3:0] so check that first and also the
591	 * value encoding has changed so interpret those differently
592	 */
593	if ((boot_cpu_data.x86 == 0x10) &&
594	    (boot_cpu_data.x86_model > 7)) {
595		if (nbsh & K8_NBSH_ERR_CPU_VAL)
596			pr_cont(", core: %u", (u8)(nbsh & nb_err_cpumask));
597	} else {
598		u8 assoc_cpus = nbsh & nb_err_cpumask;
599
600		if (assoc_cpus > 0)
601			pr_cont(", core: %d", fls(assoc_cpus) - 1);
602	}
603
604	switch (xec) {
605	case 0x2:
606		pr_cont("Sync error (sync packets on HT link detected).\n");
607		return;
608
609	case 0x3:
610		pr_cont("HT Master abort.\n");
611		return;
612
613	case 0x4:
614		pr_cont("HT Target abort.\n");
615		return;
616
617	case 0x7:
618		pr_cont("NB Watchdog timeout.\n");
619		return;
620
621	case 0x9:
622		pr_cont("SVM DMA Exclusion Vector error.\n");
623		return;
624
625	default:
626		break;
627	}
628
629	if (!fam_ops->nb_mce(ec, xec))
630		goto wrong_nb_mce;
631
632	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
633		if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
634			nb_bus_decoder(node_id, m, nbcfg);
635
636	return;
637
638wrong_nb_mce:
639	pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
640}
641EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
642
643static void amd_decode_fr_mce(struct mce *m)
644{
645	if (boot_cpu_data.x86 == 0xf ||
646	    boot_cpu_data.x86 == 0x11)
647		goto wrong_fr_mce;
648
649	/* we have only one error signature so match all fields at once. */
650	if ((m->status & 0xffff) == 0x0f0f) {
651		pr_emerg(HW_ERR "FR Error: CPU Watchdog timer expire.\n");
652		return;
653	}
654
655wrong_fr_mce:
656	pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
657}
658
659static inline void amd_decode_err_code(u16 ec)
660{
661	if (TLB_ERROR(ec)) {
662		pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n",
663			 TT_MSG(ec), LL_MSG(ec));
664	} else if (MEM_ERROR(ec)) {
665		pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
666			 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
667	} else if (BUS_ERROR(ec)) {
668		pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
669			 "Participating Processor: %s\n",
670			  RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
671			  PP_MSG(ec));
672	} else
673		pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
674}
675
676/*
677 * Filter out unwanted MCE signatures here.
678 */
679static bool amd_filter_mce(struct mce *m)
680{
681	u8 xec = (m->status >> 16) & 0x1f;
682
683	/*
684	 * NB GART TLB error reporting is disabled by default.
685	 */
686	if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
687		return true;
688
689	return false;
690}
691
692int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
693{
694	struct mce *m = (struct mce *)data;
695	int node, ecc;
696
697	if (amd_filter_mce(m))
698		return NOTIFY_STOP;
699
700	pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank);
701
702	pr_cont("%sorrected error, other errors lost: %s, "
703		 "CPU context corrupt: %s",
704		 ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
705		 ((m->status & MCI_STATUS_OVER) ? "yes"  : "no"),
706		 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
707
708	/* do the two bits[14:13] together */
709	ecc = (m->status >> 45) & 0x3;
710	if (ecc)
711		pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
712
713	pr_cont("\n");
714
715	switch (m->bank) {
716	case 0:
717		amd_decode_dc_mce(m);
718		break;
719
720	case 1:
721		amd_decode_ic_mce(m);
722		break;
723
724	case 2:
725		if (boot_cpu_data.x86 == 0x15)
726			amd_decode_cu_mce(m);
727		else
728			amd_decode_bu_mce(m);
729		break;
730
731	case 3:
732		amd_decode_ls_mce(m);
733		break;
734
735	case 4:
736		node = amd_get_nb_id(m->extcpu);
737		amd_decode_nb_mce(node, m, 0);
738		break;
739
740	case 5:
741		amd_decode_fr_mce(m);
742		break;
743
744	default:
745		break;
746	}
747
748	amd_decode_err_code(m->status & 0xffff);
749
750	return NOTIFY_STOP;
751}
752EXPORT_SYMBOL_GPL(amd_decode_mce);
753
754static struct notifier_block amd_mce_dec_nb = {
755	.notifier_call	= amd_decode_mce,
756};
757
758static int __init mce_amd_init(void)
759{
760	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
761		return 0;
762
763	if ((boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x12) &&
764	    (boot_cpu_data.x86 != 0x14 || boot_cpu_data.x86_model > 0xf))
765		return 0;
766
767	fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
768	if (!fam_ops)
769		return -ENOMEM;
770
771	switch (boot_cpu_data.x86) {
772	case 0xf:
773		fam_ops->dc_mce = k8_dc_mce;
774		fam_ops->ic_mce = k8_ic_mce;
775		fam_ops->nb_mce = k8_nb_mce;
776		break;
777
778	case 0x10:
779		fam_ops->dc_mce = f10h_dc_mce;
780		fam_ops->ic_mce = k8_ic_mce;
781		fam_ops->nb_mce = f10h_nb_mce;
782		break;
783
784	case 0x11:
785		fam_ops->dc_mce = k8_dc_mce;
786		fam_ops->ic_mce = k8_ic_mce;
787		fam_ops->nb_mce = f10h_nb_mce;
788		break;
789
790	case 0x12:
791		fam_ops->dc_mce = f12h_dc_mce;
792		fam_ops->ic_mce = k8_ic_mce;
793		fam_ops->nb_mce = nb_noop_mce;
794		break;
795
796	case 0x14:
797		nb_err_cpumask  = 0x3;
798		fam_ops->dc_mce = f14h_dc_mce;
799		fam_ops->ic_mce = f14h_ic_mce;
800		fam_ops->nb_mce = nb_noop_mce;
801		break;
802
803	case 0x15:
804		xec_mask = 0x1f;
805		fam_ops->dc_mce = f15h_dc_mce;
806		fam_ops->ic_mce = f15h_ic_mce;
807		break;
808
809	default:
810		printk(KERN_WARNING "Huh? What family is that: %d?!\n",
811				    boot_cpu_data.x86);
812		kfree(fam_ops);
813		return -EINVAL;
814	}
815
816	pr_info("MCE: In-kernel MCE decoding enabled.\n");
817
818	atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
819
820	return 0;
821}
822early_initcall(mce_amd_init);
823
824#ifdef MODULE
825static void __exit mce_amd_exit(void)
826{
827	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
828	kfree(fam_ops);
829}
830
831MODULE_DESCRIPTION("AMD MCE decoder");
832MODULE_ALIAS("edac-mce-amd");
833MODULE_LICENSE("GPL");
834module_exit(mce_amd_exit);
835#endif
836