mce_amd.c revision 344f0a0631e1b2784859fbe2351d99dce2652b77
1#include <linux/module.h>
2#include <linux/slab.h>
3
4#include "mce_amd.h"
5
6static struct amd_decoder_ops *fam_ops;
7
8static u8 xec_mask	 = 0xf;
9static u8 nb_err_cpumask = 0xf;
10
11static bool report_gart_errors;
12static void (*nb_bus_decoder)(int node_id, struct mce *m);
13
14void amd_report_gart_errors(bool v)
15{
16	report_gart_errors = v;
17}
18EXPORT_SYMBOL_GPL(amd_report_gart_errors);
19
20void amd_register_ecc_decoder(void (*f)(int, struct mce *))
21{
22	nb_bus_decoder = f;
23}
24EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
25
26void amd_unregister_ecc_decoder(void (*f)(int, struct mce *))
27{
28	if (nb_bus_decoder) {
29		WARN_ON(nb_bus_decoder != f);
30
31		nb_bus_decoder = NULL;
32	}
33}
34EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
35
36/*
37 * string representation for the different MCA reported error types, see F3x48
38 * or MSR0000_0411.
39 */
40
41/* transaction type */
42const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
43EXPORT_SYMBOL_GPL(tt_msgs);
44
45/* cache level */
46const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
47EXPORT_SYMBOL_GPL(ll_msgs);
48
49/* memory transaction type */
50const char *rrrr_msgs[] = {
51       "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
52};
53EXPORT_SYMBOL_GPL(rrrr_msgs);
54
55/* participating processor */
56const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
57EXPORT_SYMBOL_GPL(pp_msgs);
58
59/* request timeout */
60const char *to_msgs[] = { "no timeout",	"timed out" };
61EXPORT_SYMBOL_GPL(to_msgs);
62
63/* memory or i/o */
64const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
65EXPORT_SYMBOL_GPL(ii_msgs);
66
67static const char *f10h_nb_mce_desc[] = {
68	"HT link data error",
69	"Protocol error (link, L3, probe filter, etc.)",
70	"Parity error in NB-internal arrays",
71	"Link Retry due to IO link transmission error",
72	"L3 ECC data cache error",
73	"ECC error in L3 cache tag",
74	"L3 LRU parity bits error",
75	"ECC Error in the Probe Filter directory"
76};
77
78static const char * const f15h_ic_mce_desc[] = {
79	"UC during a demand linefill from L2",
80	"Parity error during data load from IC",
81	"Parity error for IC valid bit",
82	"Main tag parity error",
83	"Parity error in prediction queue",
84	"PFB data/address parity error",
85	"Parity error in the branch status reg",
86	"PFB promotion address error",
87	"Tag error during probe/victimization",
88	"Parity error for IC probe tag valid bit",
89	"PFB non-cacheable bit parity error",
90	"PFB valid bit parity error",			/* xec = 0xd */
91	"patch RAM",					/* xec = 010 */
92	"uop queue",
93	"insn buffer",
94	"predecode buffer",
95	"fetch address FIFO"
96};
97
98static const char * const f15h_cu_mce_desc[] = {
99	"Fill ECC error on data fills",			/* xec = 0x4 */
100	"Fill parity error on insn fills",
101	"Prefetcher request FIFO parity error",
102	"PRQ address parity error",
103	"PRQ data parity error",
104	"WCC Tag ECC error",
105	"WCC Data ECC error",
106	"WCB Data parity error",
107	"VB Data/ECC error",
108	"L2 Tag ECC error",				/* xec = 0x10 */
109	"Hard L2 Tag ECC error",
110	"Multiple hits on L2 tag",
111	"XAB parity error",
112	"PRB address parity error"
113};
114
115static const char * const fr_ex_mce_desc[] = {
116	"CPU Watchdog timer expire",
117	"Wakeup array dest tag",
118	"AG payload array",
119	"EX payload array",
120	"IDRF array",
121	"Retire dispatch queue",
122	"Mapper checkpoint array",
123	"Physical register file EX0 port",
124	"Physical register file EX1 port",
125	"Physical register file AG0 port",
126	"Physical register file AG1 port",
127	"Flag register file",
128	"DE correctable error could not be corrected"
129};
130
131static bool f12h_dc_mce(u16 ec, u8 xec)
132{
133	bool ret = false;
134
135	if (MEM_ERROR(ec)) {
136		u8 ll = LL(ec);
137		ret = true;
138
139		if (ll == LL_L2)
140			pr_cont("during L1 linefill from L2.\n");
141		else if (ll == LL_L1)
142			pr_cont("Data/Tag %s error.\n", R4_MSG(ec));
143		else
144			ret = false;
145	}
146	return ret;
147}
148
149static bool f10h_dc_mce(u16 ec, u8 xec)
150{
151	if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
152		pr_cont("during data scrub.\n");
153		return true;
154	}
155	return f12h_dc_mce(ec, xec);
156}
157
158static bool k8_dc_mce(u16 ec, u8 xec)
159{
160	if (BUS_ERROR(ec)) {
161		pr_cont("during system linefill.\n");
162		return true;
163	}
164
165	return f10h_dc_mce(ec, xec);
166}
167
168static bool f14h_dc_mce(u16 ec, u8 xec)
169{
170	u8 r4	 = R4(ec);
171	bool ret = true;
172
173	if (MEM_ERROR(ec)) {
174
175		if (TT(ec) != TT_DATA || LL(ec) != LL_L1)
176			return false;
177
178		switch (r4) {
179		case R4_DRD:
180		case R4_DWR:
181			pr_cont("Data/Tag parity error due to %s.\n",
182				(r4 == R4_DRD ? "load/hw prf" : "store"));
183			break;
184		case R4_EVICT:
185			pr_cont("Copyback parity error on a tag miss.\n");
186			break;
187		case R4_SNOOP:
188			pr_cont("Tag parity error during snoop.\n");
189			break;
190		default:
191			ret = false;
192		}
193	} else if (BUS_ERROR(ec)) {
194
195		if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG)
196			return false;
197
198		pr_cont("System read data error on a ");
199
200		switch (r4) {
201		case R4_RD:
202			pr_cont("TLB reload.\n");
203			break;
204		case R4_DWR:
205			pr_cont("store.\n");
206			break;
207		case R4_DRD:
208			pr_cont("load.\n");
209			break;
210		default:
211			ret = false;
212		}
213	} else {
214		ret = false;
215	}
216
217	return ret;
218}
219
220static bool f15h_dc_mce(u16 ec, u8 xec)
221{
222	bool ret = true;
223
224	if (MEM_ERROR(ec)) {
225
226		switch (xec) {
227		case 0x0:
228			pr_cont("Data Array access error.\n");
229			break;
230
231		case 0x1:
232			pr_cont("UC error during a linefill from L2/NB.\n");
233			break;
234
235		case 0x2:
236		case 0x11:
237			pr_cont("STQ access error.\n");
238			break;
239
240		case 0x3:
241			pr_cont("SCB access error.\n");
242			break;
243
244		case 0x10:
245			pr_cont("Tag error.\n");
246			break;
247
248		case 0x12:
249			pr_cont("LDQ access error.\n");
250			break;
251
252		default:
253			ret = false;
254		}
255	} else if (BUS_ERROR(ec)) {
256
257		if (!xec)
258			pr_cont("System Read Data Error.\n");
259		else
260			pr_cont(" Internal error condition type %d.\n", xec);
261	} else
262		ret = false;
263
264	return ret;
265}
266
267static void amd_decode_dc_mce(struct mce *m)
268{
269	u16 ec = EC(m->status);
270	u8 xec = XEC(m->status, xec_mask);
271
272	pr_emerg(HW_ERR "Data Cache Error: ");
273
274	/* TLB error signatures are the same across families */
275	if (TLB_ERROR(ec)) {
276		if (TT(ec) == TT_DATA) {
277			pr_cont("%s TLB %s.\n", LL_MSG(ec),
278				((xec == 2) ? "locked miss"
279					    : (xec ? "multimatch" : "parity")));
280			return;
281		}
282	} else if (fam_ops->dc_mce(ec, xec))
283		;
284	else
285		pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
286}
287
288static bool k8_ic_mce(u16 ec, u8 xec)
289{
290	u8 ll	 = LL(ec);
291	bool ret = true;
292
293	if (!MEM_ERROR(ec))
294		return false;
295
296	if (ll == 0x2)
297		pr_cont("during a linefill from L2.\n");
298	else if (ll == 0x1) {
299		switch (R4(ec)) {
300		case R4_IRD:
301			pr_cont("Parity error during data load.\n");
302			break;
303
304		case R4_EVICT:
305			pr_cont("Copyback Parity/Victim error.\n");
306			break;
307
308		case R4_SNOOP:
309			pr_cont("Tag Snoop error.\n");
310			break;
311
312		default:
313			ret = false;
314			break;
315		}
316	} else
317		ret = false;
318
319	return ret;
320}
321
322static bool f14h_ic_mce(u16 ec, u8 xec)
323{
324	u8 r4    = R4(ec);
325	bool ret = true;
326
327	if (MEM_ERROR(ec)) {
328		if (TT(ec) != 0 || LL(ec) != 1)
329			ret = false;
330
331		if (r4 == R4_IRD)
332			pr_cont("Data/tag array parity error for a tag hit.\n");
333		else if (r4 == R4_SNOOP)
334			pr_cont("Tag error during snoop/victimization.\n");
335		else
336			ret = false;
337	}
338	return ret;
339}
340
341static bool f15h_ic_mce(u16 ec, u8 xec)
342{
343	bool ret = true;
344
345	if (!MEM_ERROR(ec))
346		return false;
347
348	switch (xec) {
349	case 0x0 ... 0xa:
350		pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
351		break;
352
353	case 0xd:
354		pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
355		break;
356
357	case 0x10 ... 0x14:
358		pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
359		break;
360
361	default:
362		ret = false;
363	}
364	return ret;
365}
366
367static void amd_decode_ic_mce(struct mce *m)
368{
369	u16 ec = EC(m->status);
370	u8 xec = XEC(m->status, xec_mask);
371
372	pr_emerg(HW_ERR "Instruction Cache Error: ");
373
374	if (TLB_ERROR(ec))
375		pr_cont("%s TLB %s.\n", LL_MSG(ec),
376			(xec ? "multimatch" : "parity error"));
377	else if (BUS_ERROR(ec)) {
378		bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
379
380		pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
381	} else if (fam_ops->ic_mce(ec, xec))
382		;
383	else
384		pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
385}
386
387static void amd_decode_bu_mce(struct mce *m)
388{
389	u16 ec = EC(m->status);
390	u8 xec = XEC(m->status, xec_mask);
391
392	pr_emerg(HW_ERR "Bus Unit Error");
393
394	if (xec == 0x1)
395		pr_cont(" in the write data buffers.\n");
396	else if (xec == 0x3)
397		pr_cont(" in the victim data buffers.\n");
398	else if (xec == 0x2 && MEM_ERROR(ec))
399		pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
400	else if (xec == 0x0) {
401		if (TLB_ERROR(ec))
402			pr_cont(": %s error in a Page Descriptor Cache or "
403				"Guest TLB.\n", TT_MSG(ec));
404		else if (BUS_ERROR(ec))
405			pr_cont(": %s/ECC error in data read from NB: %s.\n",
406				R4_MSG(ec), PP_MSG(ec));
407		else if (MEM_ERROR(ec)) {
408			u8 r4 = R4(ec);
409
410			if (r4 >= 0x7)
411				pr_cont(": %s error during data copyback.\n",
412					R4_MSG(ec));
413			else if (r4 <= 0x1)
414				pr_cont(": %s parity/ECC error during data "
415					"access from L2.\n", R4_MSG(ec));
416			else
417				goto wrong_bu_mce;
418		} else
419			goto wrong_bu_mce;
420	} else
421		goto wrong_bu_mce;
422
423	return;
424
425wrong_bu_mce:
426	pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
427}
428
429static void amd_decode_cu_mce(struct mce *m)
430{
431	u16 ec = EC(m->status);
432	u8 xec = XEC(m->status, xec_mask);
433
434	pr_emerg(HW_ERR "Combined Unit Error: ");
435
436	if (TLB_ERROR(ec)) {
437		if (xec == 0x0)
438			pr_cont("Data parity TLB read error.\n");
439		else if (xec == 0x1)
440			pr_cont("Poison data provided for TLB fill.\n");
441		else
442			goto wrong_cu_mce;
443	} else if (BUS_ERROR(ec)) {
444		if (xec > 2)
445			goto wrong_cu_mce;
446
447		pr_cont("Error during attempted NB data read.\n");
448	} else if (MEM_ERROR(ec)) {
449		switch (xec) {
450		case 0x4 ... 0xc:
451			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
452			break;
453
454		case 0x10 ... 0x14:
455			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
456			break;
457
458		default:
459			goto wrong_cu_mce;
460		}
461	}
462
463	return;
464
465wrong_cu_mce:
466	pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
467}
468
469static void amd_decode_ls_mce(struct mce *m)
470{
471	u16 ec = EC(m->status);
472	u8 xec = XEC(m->status, xec_mask);
473
474	if (boot_cpu_data.x86 >= 0x14) {
475		pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
476			 " please report on LKML.\n");
477		return;
478	}
479
480	pr_emerg(HW_ERR "Load Store Error");
481
482	if (xec == 0x0) {
483		u8 r4 = R4(ec);
484
485		if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
486			goto wrong_ls_mce;
487
488		pr_cont(" during %s.\n", R4_MSG(ec));
489	} else
490		goto wrong_ls_mce;
491
492	return;
493
494wrong_ls_mce:
495	pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
496}
497
498static bool k8_nb_mce(u16 ec, u8 xec)
499{
500	bool ret = true;
501
502	switch (xec) {
503	case 0x1:
504		pr_cont("CRC error detected on HT link.\n");
505		break;
506
507	case 0x5:
508		pr_cont("Invalid GART PTE entry during GART table walk.\n");
509		break;
510
511	case 0x6:
512		pr_cont("Unsupported atomic RMW received from an IO link.\n");
513		break;
514
515	case 0x0:
516	case 0x8:
517		if (boot_cpu_data.x86 == 0x11)
518			return false;
519
520		pr_cont("DRAM ECC error detected on the NB.\n");
521		break;
522
523	case 0xd:
524		pr_cont("Parity error on the DRAM addr/ctl signals.\n");
525		break;
526
527	default:
528		ret = false;
529		break;
530	}
531
532	return ret;
533}
534
535static bool f10h_nb_mce(u16 ec, u8 xec)
536{
537	bool ret = true;
538	u8 offset = 0;
539
540	if (k8_nb_mce(ec, xec))
541		return true;
542
543	switch(xec) {
544	case 0xa ... 0xc:
545		offset = 10;
546		break;
547
548	case 0xe:
549		offset = 11;
550		break;
551
552	case 0xf:
553		if (TLB_ERROR(ec))
554			pr_cont("GART Table Walk data error.\n");
555		else if (BUS_ERROR(ec))
556			pr_cont("DMA Exclusion Vector Table Walk error.\n");
557		else
558			ret = false;
559
560		goto out;
561		break;
562
563	case 0x19:
564		if (boot_cpu_data.x86 == 0x15)
565			pr_cont("Compute Unit Data Error.\n");
566		else
567			ret = false;
568
569		goto out;
570		break;
571
572	case 0x1c ... 0x1f:
573		offset = 24;
574		break;
575
576	default:
577		ret = false;
578
579		goto out;
580		break;
581	}
582
583	pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]);
584
585out:
586	return ret;
587}
588
589static bool nb_noop_mce(u16 ec, u8 xec)
590{
591	return false;
592}
593
594void amd_decode_nb_mce(struct mce *m)
595{
596	struct cpuinfo_x86 *c = &boot_cpu_data;
597	int node_id = amd_get_nb_id(m->extcpu);
598	u16 ec = EC(m->status);
599	u8 xec = XEC(m->status, 0x1f);
600
601	pr_emerg(HW_ERR "Northbridge Error (node %d): ", node_id);
602
603	switch (xec) {
604	case 0x2:
605		pr_cont("Sync error (sync packets on HT link detected).\n");
606		return;
607
608	case 0x3:
609		pr_cont("HT Master abort.\n");
610		return;
611
612	case 0x4:
613		pr_cont("HT Target abort.\n");
614		return;
615
616	case 0x7:
617		pr_cont("NB Watchdog timeout.\n");
618		return;
619
620	case 0x9:
621		pr_cont("SVM DMA Exclusion Vector error.\n");
622		return;
623
624	default:
625		break;
626	}
627
628	if (!fam_ops->nb_mce(ec, xec))
629		goto wrong_nb_mce;
630
631	if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
632		if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
633			nb_bus_decoder(node_id, m);
634
635	return;
636
637wrong_nb_mce:
638	pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
639}
640EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
641
642static void amd_decode_fr_mce(struct mce *m)
643{
644	struct cpuinfo_x86 *c = &boot_cpu_data;
645	u8 xec = XEC(m->status, xec_mask);
646
647	if (c->x86 == 0xf || c->x86 == 0x11)
648		goto wrong_fr_mce;
649
650	if (c->x86 != 0x15 && xec != 0x0)
651		goto wrong_fr_mce;
652
653	pr_emerg(HW_ERR "%s Error: ",
654		 (c->x86 == 0x15 ? "Execution Unit" : "FIROB"));
655
656	if (xec == 0x0 || xec == 0xc)
657		pr_cont("%s.\n", fr_ex_mce_desc[xec]);
658	else if (xec < 0xd)
659		pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]);
660	else
661		goto wrong_fr_mce;
662
663	return;
664
665wrong_fr_mce:
666	pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
667}
668
669static void amd_decode_fp_mce(struct mce *m)
670{
671	u8 xec = XEC(m->status, xec_mask);
672
673	pr_emerg(HW_ERR "Floating Point Unit Error: ");
674
675	switch (xec) {
676	case 0x1:
677		pr_cont("Free List");
678		break;
679
680	case 0x2:
681		pr_cont("Physical Register File");
682		break;
683
684	case 0x3:
685		pr_cont("Retire Queue");
686		break;
687
688	case 0x4:
689		pr_cont("Scheduler table");
690		break;
691
692	case 0x5:
693		pr_cont("Status Register File");
694		break;
695
696	default:
697		goto wrong_fp_mce;
698		break;
699	}
700
701	pr_cont(" parity error.\n");
702
703	return;
704
705wrong_fp_mce:
706	pr_emerg(HW_ERR "Corrupted FP MCE info?\n");
707}
708
709static inline void amd_decode_err_code(u16 ec)
710{
711
712	pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
713
714	if (BUS_ERROR(ec))
715		pr_cont(", mem/io: %s", II_MSG(ec));
716	else
717		pr_cont(", tx: %s", TT_MSG(ec));
718
719	if (MEM_ERROR(ec) || BUS_ERROR(ec)) {
720		pr_cont(", mem-tx: %s", R4_MSG(ec));
721
722		if (BUS_ERROR(ec))
723			pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec));
724	}
725
726	pr_cont("\n");
727}
728
729/*
730 * Filter out unwanted MCE signatures here.
731 */
732static bool amd_filter_mce(struct mce *m)
733{
734	u8 xec = (m->status >> 16) & 0x1f;
735
736	/*
737	 * NB GART TLB error reporting is disabled by default.
738	 */
739	if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
740		return true;
741
742	return false;
743}
744
745int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
746{
747	struct mce *m = (struct mce *)data;
748	struct cpuinfo_x86 *c = &boot_cpu_data;
749	int ecc;
750
751	if (amd_filter_mce(m))
752		return NOTIFY_STOP;
753
754	pr_emerg(HW_ERR "CPU:%d\tMC%d_STATUS[%s|%s|%s|%s|%s",
755		m->extcpu, m->bank,
756		((m->status & MCI_STATUS_OVER)	? "Over"  : "-"),
757		((m->status & MCI_STATUS_UC)	? "UE"	  : "CE"),
758		((m->status & MCI_STATUS_MISCV)	? "MiscV" : "-"),
759		((m->status & MCI_STATUS_PCC)	? "PCC"	  : "-"),
760		((m->status & MCI_STATUS_ADDRV)	? "AddrV" : "-"));
761
762	if (c->x86 == 0x15)
763		pr_cont("|%s|%s",
764			((m->status & BIT_64(44)) ? "Deferred" : "-"),
765			((m->status & BIT_64(43)) ? "Poison"   : "-"));
766
767	/* do the two bits[14:13] together */
768	ecc = (m->status >> 45) & 0x3;
769	if (ecc)
770		pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
771
772	pr_cont("]: 0x%016llx\n", m->status);
773
774	if (m->status & MCI_STATUS_ADDRV)
775		pr_emerg(HW_ERR "\tMC%d_ADDR: 0x%016llx\n", m->bank, m->addr);
776
777	switch (m->bank) {
778	case 0:
779		amd_decode_dc_mce(m);
780		break;
781
782	case 1:
783		amd_decode_ic_mce(m);
784		break;
785
786	case 2:
787		if (c->x86 == 0x15)
788			amd_decode_cu_mce(m);
789		else
790			amd_decode_bu_mce(m);
791		break;
792
793	case 3:
794		amd_decode_ls_mce(m);
795		break;
796
797	case 4:
798		amd_decode_nb_mce(m);
799		break;
800
801	case 5:
802		amd_decode_fr_mce(m);
803		break;
804
805	case 6:
806		amd_decode_fp_mce(m);
807		break;
808
809	default:
810		break;
811	}
812
813	amd_decode_err_code(m->status & 0xffff);
814
815	return NOTIFY_STOP;
816}
817EXPORT_SYMBOL_GPL(amd_decode_mce);
818
819static struct notifier_block amd_mce_dec_nb = {
820	.notifier_call	= amd_decode_mce,
821};
822
823static int __init mce_amd_init(void)
824{
825	struct cpuinfo_x86 *c = &boot_cpu_data;
826
827	if (c->x86_vendor != X86_VENDOR_AMD)
828		return 0;
829
830	if ((c->x86 < 0xf || c->x86 > 0x12) &&
831	    (c->x86 != 0x14 || c->x86_model > 0xf) &&
832	    (c->x86 != 0x15 || c->x86_model > 0xf))
833		return 0;
834
835	fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
836	if (!fam_ops)
837		return -ENOMEM;
838
839	switch (c->x86) {
840	case 0xf:
841		fam_ops->dc_mce = k8_dc_mce;
842		fam_ops->ic_mce = k8_ic_mce;
843		fam_ops->nb_mce = k8_nb_mce;
844		break;
845
846	case 0x10:
847		fam_ops->dc_mce = f10h_dc_mce;
848		fam_ops->ic_mce = k8_ic_mce;
849		fam_ops->nb_mce = f10h_nb_mce;
850		break;
851
852	case 0x11:
853		fam_ops->dc_mce = k8_dc_mce;
854		fam_ops->ic_mce = k8_ic_mce;
855		fam_ops->nb_mce = f10h_nb_mce;
856		break;
857
858	case 0x12:
859		fam_ops->dc_mce = f12h_dc_mce;
860		fam_ops->ic_mce = k8_ic_mce;
861		fam_ops->nb_mce = nb_noop_mce;
862		break;
863
864	case 0x14:
865		nb_err_cpumask  = 0x3;
866		fam_ops->dc_mce = f14h_dc_mce;
867		fam_ops->ic_mce = f14h_ic_mce;
868		fam_ops->nb_mce = nb_noop_mce;
869		break;
870
871	case 0x15:
872		xec_mask = 0x1f;
873		fam_ops->dc_mce = f15h_dc_mce;
874		fam_ops->ic_mce = f15h_ic_mce;
875		fam_ops->nb_mce = f10h_nb_mce;
876		break;
877
878	default:
879		printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86);
880		kfree(fam_ops);
881		return -EINVAL;
882	}
883
884	pr_info("MCE: In-kernel MCE decoding enabled.\n");
885
886	mce_register_decode_chain(&amd_mce_dec_nb);
887
888	return 0;
889}
890early_initcall(mce_amd_init);
891
892#ifdef MODULE
893static void __exit mce_amd_exit(void)
894{
895	mce_unregister_decode_chain(&amd_mce_dec_nb);
896	kfree(fam_ops);
897}
898
899MODULE_DESCRIPTION("AMD MCE decoder");
900MODULE_ALIAS("edac-mce-amd");
901MODULE_LICENSE("GPL");
902module_exit(mce_amd_exit);
903#endif
904