mce.c revision 1462594bf2866c1dc80066ed6f49f4331c551901
1/*
2 * Machine check handler.
3 *
4 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 * Rest from unknown author(s).
6 * 2004 Andi Kleen. Rewrote most of it.
7 * Copyright 2008 Intel Corporation
8 * Author: Andi Kleen
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/thread_info.h>
14#include <linux/capability.h>
15#include <linux/miscdevice.h>
16#include <linux/ratelimit.h>
17#include <linux/kallsyms.h>
18#include <linux/rcupdate.h>
19#include <linux/kobject.h>
20#include <linux/uaccess.h>
21#include <linux/kdebug.h>
22#include <linux/kernel.h>
23#include <linux/percpu.h>
24#include <linux/string.h>
25#include <linux/device.h>
26#include <linux/syscore_ops.h>
27#include <linux/delay.h>
28#include <linux/ctype.h>
29#include <linux/sched.h>
30#include <linux/sysfs.h>
31#include <linux/types.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/kmod.h>
35#include <linux/poll.h>
36#include <linux/nmi.h>
37#include <linux/cpu.h>
38#include <linux/smp.h>
39#include <linux/fs.h>
40#include <linux/mm.h>
41#include <linux/debugfs.h>
42#include <linux/irq_work.h>
43#include <linux/export.h>
44
45#include <asm/processor.h>
46#include <asm/mce.h>
47#include <asm/msr.h>
48
49#include "mce-internal.h"
50
51static DEFINE_MUTEX(mce_chrdev_read_mutex);
52
53#define rcu_dereference_check_mce(p) \
54	rcu_dereference_index_check((p), \
55			      rcu_read_lock_sched_held() || \
56			      lockdep_is_held(&mce_chrdev_read_mutex))
57
58#define CREATE_TRACE_POINTS
59#include <trace/events/mce.h>
60
61#define SPINUNIT 100	/* 100ns */
62
63atomic_t mce_entry;
64
65DEFINE_PER_CPU(unsigned, mce_exception_count);
66
67struct mce_bank *mce_banks __read_mostly;
68
69struct mca_config mca_cfg __read_mostly = {
70	.bootlog  = -1,
71	/*
72	 * Tolerant levels:
73	 * 0: always panic on uncorrected errors, log corrected errors
74	 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
75	 * 2: SIGBUS or log uncorrected errors (if possible), log corr. errors
76	 * 3: never panic or SIGBUS, log all errors (for testing only)
77	 */
78	.tolerant = 1,
79	.monarch_timeout = -1
80};
81
82/* User mode helper program triggered by machine check event */
83static unsigned long		mce_need_notify;
84static char			mce_helper[128];
85static char			*mce_helper_argv[2] = { mce_helper, NULL };
86
87static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
88
89static DEFINE_PER_CPU(struct mce, mces_seen);
90static int			cpu_missing;
91
92/* MCA banks polled by the period polling timer for corrected events */
93DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
94	[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
95};
96
97static DEFINE_PER_CPU(struct work_struct, mce_work);
98
99static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
100
101/*
102 * CPU/chipset specific EDAC code can register a notifier call here to print
103 * MCE errors in a human-readable form.
104 */
105ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
106
107/* Do initial initialization of a struct mce */
108void mce_setup(struct mce *m)
109{
110	memset(m, 0, sizeof(struct mce));
111	m->cpu = m->extcpu = smp_processor_id();
112	rdtscll(m->tsc);
113	/* We hope get_seconds stays lockless */
114	m->time = get_seconds();
115	m->cpuvendor = boot_cpu_data.x86_vendor;
116	m->cpuid = cpuid_eax(1);
117	m->socketid = cpu_data(m->extcpu).phys_proc_id;
118	m->apicid = cpu_data(m->extcpu).initial_apicid;
119	rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
120}
121
122DEFINE_PER_CPU(struct mce, injectm);
123EXPORT_PER_CPU_SYMBOL_GPL(injectm);
124
125/*
126 * Lockless MCE logging infrastructure.
127 * This avoids deadlocks on printk locks without having to break locks. Also
128 * separate MCEs from kernel messages to avoid bogus bug reports.
129 */
130
131static struct mce_log mcelog = {
132	.signature	= MCE_LOG_SIGNATURE,
133	.len		= MCE_LOG_LEN,
134	.recordlen	= sizeof(struct mce),
135};
136
137void mce_log(struct mce *mce)
138{
139	unsigned next, entry;
140	int ret = 0;
141
142	/* Emit the trace record: */
143	trace_mce_record(mce);
144
145	ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
146	if (ret == NOTIFY_STOP)
147		return;
148
149	mce->finished = 0;
150	wmb();
151	for (;;) {
152		entry = rcu_dereference_check_mce(mcelog.next);
153		for (;;) {
154
155			/*
156			 * When the buffer fills up discard new entries.
157			 * Assume that the earlier errors are the more
158			 * interesting ones:
159			 */
160			if (entry >= MCE_LOG_LEN) {
161				set_bit(MCE_OVERFLOW,
162					(unsigned long *)&mcelog.flags);
163				return;
164			}
165			/* Old left over entry. Skip: */
166			if (mcelog.entry[entry].finished) {
167				entry++;
168				continue;
169			}
170			break;
171		}
172		smp_rmb();
173		next = entry + 1;
174		if (cmpxchg(&mcelog.next, entry, next) == entry)
175			break;
176	}
177	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
178	wmb();
179	mcelog.entry[entry].finished = 1;
180	wmb();
181
182	mce->finished = 1;
183	set_bit(0, &mce_need_notify);
184}
185
186static void drain_mcelog_buffer(void)
187{
188	unsigned int next, i, prev = 0;
189
190	next = ACCESS_ONCE(mcelog.next);
191
192	do {
193		struct mce *m;
194
195		/* drain what was logged during boot */
196		for (i = prev; i < next; i++) {
197			unsigned long start = jiffies;
198			unsigned retries = 1;
199
200			m = &mcelog.entry[i];
201
202			while (!m->finished) {
203				if (time_after_eq(jiffies, start + 2*retries))
204					retries++;
205
206				cpu_relax();
207
208				if (!m->finished && retries >= 4) {
209					pr_err("skipping error being logged currently!\n");
210					break;
211				}
212			}
213			smp_rmb();
214			atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
215		}
216
217		memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
218		prev = next;
219		next = cmpxchg(&mcelog.next, prev, 0);
220	} while (next != prev);
221}
222
223
224void mce_register_decode_chain(struct notifier_block *nb)
225{
226	atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
227	drain_mcelog_buffer();
228}
229EXPORT_SYMBOL_GPL(mce_register_decode_chain);
230
231void mce_unregister_decode_chain(struct notifier_block *nb)
232{
233	atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
234}
235EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
236
237static void print_mce(struct mce *m)
238{
239	int ret = 0;
240
241	pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
242	       m->extcpu, m->mcgstatus, m->bank, m->status);
243
244	if (m->ip) {
245		pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
246			!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
247				m->cs, m->ip);
248
249		if (m->cs == __KERNEL_CS)
250			print_symbol("{%s}", m->ip);
251		pr_cont("\n");
252	}
253
254	pr_emerg(HW_ERR "TSC %llx ", m->tsc);
255	if (m->addr)
256		pr_cont("ADDR %llx ", m->addr);
257	if (m->misc)
258		pr_cont("MISC %llx ", m->misc);
259
260	pr_cont("\n");
261	/*
262	 * Note this output is parsed by external tools and old fields
263	 * should not be changed.
264	 */
265	pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
266		m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
267		cpu_data(m->extcpu).microcode);
268
269	/*
270	 * Print out human-readable details about the MCE error,
271	 * (if the CPU has an implementation for that)
272	 */
273	ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
274	if (ret == NOTIFY_STOP)
275		return;
276
277	pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
278}
279
280#define PANIC_TIMEOUT 5 /* 5 seconds */
281
282static atomic_t mce_paniced;
283
284static int fake_panic;
285static atomic_t mce_fake_paniced;
286
287/* Panic in progress. Enable interrupts and wait for final IPI */
288static void wait_for_panic(void)
289{
290	long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
291
292	preempt_disable();
293	local_irq_enable();
294	while (timeout-- > 0)
295		udelay(1);
296	if (panic_timeout == 0)
297		panic_timeout = mca_cfg.panic_timeout;
298	panic("Panicing machine check CPU died");
299}
300
301static void mce_panic(char *msg, struct mce *final, char *exp)
302{
303	int i, apei_err = 0;
304
305	if (!fake_panic) {
306		/*
307		 * Make sure only one CPU runs in machine check panic
308		 */
309		if (atomic_inc_return(&mce_paniced) > 1)
310			wait_for_panic();
311		barrier();
312
313		bust_spinlocks(1);
314		console_verbose();
315	} else {
316		/* Don't log too much for fake panic */
317		if (atomic_inc_return(&mce_fake_paniced) > 1)
318			return;
319	}
320	/* First print corrected ones that are still unlogged */
321	for (i = 0; i < MCE_LOG_LEN; i++) {
322		struct mce *m = &mcelog.entry[i];
323		if (!(m->status & MCI_STATUS_VAL))
324			continue;
325		if (!(m->status & MCI_STATUS_UC)) {
326			print_mce(m);
327			if (!apei_err)
328				apei_err = apei_write_mce(m);
329		}
330	}
331	/* Now print uncorrected but with the final one last */
332	for (i = 0; i < MCE_LOG_LEN; i++) {
333		struct mce *m = &mcelog.entry[i];
334		if (!(m->status & MCI_STATUS_VAL))
335			continue;
336		if (!(m->status & MCI_STATUS_UC))
337			continue;
338		if (!final || memcmp(m, final, sizeof(struct mce))) {
339			print_mce(m);
340			if (!apei_err)
341				apei_err = apei_write_mce(m);
342		}
343	}
344	if (final) {
345		print_mce(final);
346		if (!apei_err)
347			apei_err = apei_write_mce(final);
348	}
349	if (cpu_missing)
350		pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
351	if (exp)
352		pr_emerg(HW_ERR "Machine check: %s\n", exp);
353	if (!fake_panic) {
354		if (panic_timeout == 0)
355			panic_timeout = mca_cfg.panic_timeout;
356		panic(msg);
357	} else
358		pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
359}
360
361/* Support code for software error injection */
362
363static int msr_to_offset(u32 msr)
364{
365	unsigned bank = __this_cpu_read(injectm.bank);
366
367	if (msr == mca_cfg.rip_msr)
368		return offsetof(struct mce, ip);
369	if (msr == MSR_IA32_MCx_STATUS(bank))
370		return offsetof(struct mce, status);
371	if (msr == MSR_IA32_MCx_ADDR(bank))
372		return offsetof(struct mce, addr);
373	if (msr == MSR_IA32_MCx_MISC(bank))
374		return offsetof(struct mce, misc);
375	if (msr == MSR_IA32_MCG_STATUS)
376		return offsetof(struct mce, mcgstatus);
377	return -1;
378}
379
380/* MSR access wrappers used for error injection */
381static u64 mce_rdmsrl(u32 msr)
382{
383	u64 v;
384
385	if (__this_cpu_read(injectm.finished)) {
386		int offset = msr_to_offset(msr);
387
388		if (offset < 0)
389			return 0;
390		return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
391	}
392
393	if (rdmsrl_safe(msr, &v)) {
394		WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
395		/*
396		 * Return zero in case the access faulted. This should
397		 * not happen normally but can happen if the CPU does
398		 * something weird, or if the code is buggy.
399		 */
400		v = 0;
401	}
402
403	return v;
404}
405
406static void mce_wrmsrl(u32 msr, u64 v)
407{
408	if (__this_cpu_read(injectm.finished)) {
409		int offset = msr_to_offset(msr);
410
411		if (offset >= 0)
412			*(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
413		return;
414	}
415	wrmsrl(msr, v);
416}
417
418/*
419 * Collect all global (w.r.t. this processor) status about this machine
420 * check into our "mce" struct so that we can use it later to assess
421 * the severity of the problem as we read per-bank specific details.
422 */
423static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
424{
425	mce_setup(m);
426
427	m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
428	if (regs) {
429		/*
430		 * Get the address of the instruction at the time of
431		 * the machine check error.
432		 */
433		if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
434			m->ip = regs->ip;
435			m->cs = regs->cs;
436
437			/*
438			 * When in VM86 mode make the cs look like ring 3
439			 * always. This is a lie, but it's better than passing
440			 * the additional vm86 bit around everywhere.
441			 */
442			if (v8086_mode(regs))
443				m->cs |= 3;
444		}
445		/* Use accurate RIP reporting if available. */
446		if (mca_cfg.rip_msr)
447			m->ip = mce_rdmsrl(mca_cfg.rip_msr);
448	}
449}
450
451/*
452 * Simple lockless ring to communicate PFNs from the exception handler with the
453 * process context work function. This is vastly simplified because there's
454 * only a single reader and a single writer.
455 */
456#define MCE_RING_SIZE 16	/* we use one entry less */
457
458struct mce_ring {
459	unsigned short start;
460	unsigned short end;
461	unsigned long ring[MCE_RING_SIZE];
462};
463static DEFINE_PER_CPU(struct mce_ring, mce_ring);
464
465/* Runs with CPU affinity in workqueue */
466static int mce_ring_empty(void)
467{
468	struct mce_ring *r = &__get_cpu_var(mce_ring);
469
470	return r->start == r->end;
471}
472
473static int mce_ring_get(unsigned long *pfn)
474{
475	struct mce_ring *r;
476	int ret = 0;
477
478	*pfn = 0;
479	get_cpu();
480	r = &__get_cpu_var(mce_ring);
481	if (r->start == r->end)
482		goto out;
483	*pfn = r->ring[r->start];
484	r->start = (r->start + 1) % MCE_RING_SIZE;
485	ret = 1;
486out:
487	put_cpu();
488	return ret;
489}
490
491/* Always runs in MCE context with preempt off */
492static int mce_ring_add(unsigned long pfn)
493{
494	struct mce_ring *r = &__get_cpu_var(mce_ring);
495	unsigned next;
496
497	next = (r->end + 1) % MCE_RING_SIZE;
498	if (next == r->start)
499		return -1;
500	r->ring[r->end] = pfn;
501	wmb();
502	r->end = next;
503	return 0;
504}
505
506int mce_available(struct cpuinfo_x86 *c)
507{
508	if (mca_cfg.disabled)
509		return 0;
510	return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
511}
512
513static void mce_schedule_work(void)
514{
515	if (!mce_ring_empty()) {
516		struct work_struct *work = &__get_cpu_var(mce_work);
517		if (!work_pending(work))
518			schedule_work(work);
519	}
520}
521
522DEFINE_PER_CPU(struct irq_work, mce_irq_work);
523
524static void mce_irq_work_cb(struct irq_work *entry)
525{
526	mce_notify_irq();
527	mce_schedule_work();
528}
529
530static void mce_report_event(struct pt_regs *regs)
531{
532	if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
533		mce_notify_irq();
534		/*
535		 * Triggering the work queue here is just an insurance
536		 * policy in case the syscall exit notify handler
537		 * doesn't run soon enough or ends up running on the
538		 * wrong CPU (can happen when audit sleeps)
539		 */
540		mce_schedule_work();
541		return;
542	}
543
544	irq_work_queue(&__get_cpu_var(mce_irq_work));
545}
546
547/*
548 * Read ADDR and MISC registers.
549 */
550static void mce_read_aux(struct mce *m, int i)
551{
552	if (m->status & MCI_STATUS_MISCV)
553		m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
554	if (m->status & MCI_STATUS_ADDRV) {
555		m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
556
557		/*
558		 * Mask the reported address by the reported granularity.
559		 */
560		if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
561			u8 shift = MCI_MISC_ADDR_LSB(m->misc);
562			m->addr >>= shift;
563			m->addr <<= shift;
564		}
565	}
566}
567
568DEFINE_PER_CPU(unsigned, mce_poll_count);
569
570/*
571 * Poll for corrected events or events that happened before reset.
572 * Those are just logged through /dev/mcelog.
573 *
574 * This is executed in standard interrupt context.
575 *
576 * Note: spec recommends to panic for fatal unsignalled
577 * errors here. However this would be quite problematic --
578 * we would need to reimplement the Monarch handling and
579 * it would mess up the exclusion between exception handler
580 * and poll hander -- * so we skip this for now.
581 * These cases should not happen anyways, or only when the CPU
582 * is already totally * confused. In this case it's likely it will
583 * not fully execute the machine check handler either.
584 */
585void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
586{
587	struct mce m;
588	int i;
589
590	this_cpu_inc(mce_poll_count);
591
592	mce_gather_info(&m, NULL);
593
594	for (i = 0; i < mca_cfg.banks; i++) {
595		if (!mce_banks[i].ctl || !test_bit(i, *b))
596			continue;
597
598		m.misc = 0;
599		m.addr = 0;
600		m.bank = i;
601		m.tsc = 0;
602
603		barrier();
604		m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
605		if (!(m.status & MCI_STATUS_VAL))
606			continue;
607
608		/*
609		 * Uncorrected or signalled events are handled by the exception
610		 * handler when it is enabled, so don't process those here.
611		 *
612		 * TBD do the same check for MCI_STATUS_EN here?
613		 */
614		if (!(flags & MCP_UC) &&
615		    (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
616			continue;
617
618		mce_read_aux(&m, i);
619
620		if (!(flags & MCP_TIMESTAMP))
621			m.tsc = 0;
622		/*
623		 * Don't get the IP here because it's unlikely to
624		 * have anything to do with the actual error location.
625		 */
626		if (!(flags & MCP_DONTLOG) && !mca_cfg.dont_log_ce)
627			mce_log(&m);
628
629		/*
630		 * Clear state for this bank.
631		 */
632		mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
633	}
634
635	/*
636	 * Don't clear MCG_STATUS here because it's only defined for
637	 * exceptions.
638	 */
639
640	sync_core();
641}
642EXPORT_SYMBOL_GPL(machine_check_poll);
643
644/*
645 * Do a quick check if any of the events requires a panic.
646 * This decides if we keep the events around or clear them.
647 */
648static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
649			  struct pt_regs *regs)
650{
651	int i, ret = 0;
652
653	for (i = 0; i < mca_cfg.banks; i++) {
654		m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
655		if (m->status & MCI_STATUS_VAL) {
656			__set_bit(i, validp);
657			if (quirk_no_way_out)
658				quirk_no_way_out(i, m, regs);
659		}
660		if (mce_severity(m, mca_cfg.tolerant, msg) >= MCE_PANIC_SEVERITY)
661			ret = 1;
662	}
663	return ret;
664}
665
666/*
667 * Variable to establish order between CPUs while scanning.
668 * Each CPU spins initially until executing is equal its number.
669 */
670static atomic_t mce_executing;
671
672/*
673 * Defines order of CPUs on entry. First CPU becomes Monarch.
674 */
675static atomic_t mce_callin;
676
677/*
678 * Check if a timeout waiting for other CPUs happened.
679 */
680static int mce_timed_out(u64 *t)
681{
682	/*
683	 * The others already did panic for some reason.
684	 * Bail out like in a timeout.
685	 * rmb() to tell the compiler that system_state
686	 * might have been modified by someone else.
687	 */
688	rmb();
689	if (atomic_read(&mce_paniced))
690		wait_for_panic();
691	if (!mca_cfg.monarch_timeout)
692		goto out;
693	if ((s64)*t < SPINUNIT) {
694		/* CHECKME: Make panic default for 1 too? */
695		if (mca_cfg.tolerant < 1)
696			mce_panic("Timeout synchronizing machine check over CPUs",
697				  NULL, NULL);
698		cpu_missing = 1;
699		return 1;
700	}
701	*t -= SPINUNIT;
702out:
703	touch_nmi_watchdog();
704	return 0;
705}
706
707/*
708 * The Monarch's reign.  The Monarch is the CPU who entered
709 * the machine check handler first. It waits for the others to
710 * raise the exception too and then grades them. When any
711 * error is fatal panic. Only then let the others continue.
712 *
713 * The other CPUs entering the MCE handler will be controlled by the
714 * Monarch. They are called Subjects.
715 *
716 * This way we prevent any potential data corruption in a unrecoverable case
717 * and also makes sure always all CPU's errors are examined.
718 *
719 * Also this detects the case of a machine check event coming from outer
720 * space (not detected by any CPUs) In this case some external agent wants
721 * us to shut down, so panic too.
722 *
723 * The other CPUs might still decide to panic if the handler happens
724 * in a unrecoverable place, but in this case the system is in a semi-stable
725 * state and won't corrupt anything by itself. It's ok to let the others
726 * continue for a bit first.
727 *
728 * All the spin loops have timeouts; when a timeout happens a CPU
729 * typically elects itself to be Monarch.
730 */
731static void mce_reign(void)
732{
733	int cpu;
734	struct mce *m = NULL;
735	int global_worst = 0;
736	char *msg = NULL;
737	char *nmsg = NULL;
738
739	/*
740	 * This CPU is the Monarch and the other CPUs have run
741	 * through their handlers.
742	 * Grade the severity of the errors of all the CPUs.
743	 */
744	for_each_possible_cpu(cpu) {
745		int severity = mce_severity(&per_cpu(mces_seen, cpu),
746					    mca_cfg.tolerant,
747					    &nmsg);
748		if (severity > global_worst) {
749			msg = nmsg;
750			global_worst = severity;
751			m = &per_cpu(mces_seen, cpu);
752		}
753	}
754
755	/*
756	 * Cannot recover? Panic here then.
757	 * This dumps all the mces in the log buffer and stops the
758	 * other CPUs.
759	 */
760	if (m && global_worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
761		mce_panic("Fatal Machine check", m, msg);
762
763	/*
764	 * For UC somewhere we let the CPU who detects it handle it.
765	 * Also must let continue the others, otherwise the handling
766	 * CPU could deadlock on a lock.
767	 */
768
769	/*
770	 * No machine check event found. Must be some external
771	 * source or one CPU is hung. Panic.
772	 */
773	if (global_worst <= MCE_KEEP_SEVERITY && mca_cfg.tolerant < 3)
774		mce_panic("Machine check from unknown source", NULL, NULL);
775
776	/*
777	 * Now clear all the mces_seen so that they don't reappear on
778	 * the next mce.
779	 */
780	for_each_possible_cpu(cpu)
781		memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
782}
783
784static atomic_t global_nwo;
785
786/*
787 * Start of Monarch synchronization. This waits until all CPUs have
788 * entered the exception handler and then determines if any of them
789 * saw a fatal event that requires panic. Then it executes them
790 * in the entry order.
791 * TBD double check parallel CPU hotunplug
792 */
793static int mce_start(int *no_way_out)
794{
795	int order;
796	int cpus = num_online_cpus();
797	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
798
799	if (!timeout)
800		return -1;
801
802	atomic_add(*no_way_out, &global_nwo);
803	/*
804	 * global_nwo should be updated before mce_callin
805	 */
806	smp_wmb();
807	order = atomic_inc_return(&mce_callin);
808
809	/*
810	 * Wait for everyone.
811	 */
812	while (atomic_read(&mce_callin) != cpus) {
813		if (mce_timed_out(&timeout)) {
814			atomic_set(&global_nwo, 0);
815			return -1;
816		}
817		ndelay(SPINUNIT);
818	}
819
820	/*
821	 * mce_callin should be read before global_nwo
822	 */
823	smp_rmb();
824
825	if (order == 1) {
826		/*
827		 * Monarch: Starts executing now, the others wait.
828		 */
829		atomic_set(&mce_executing, 1);
830	} else {
831		/*
832		 * Subject: Now start the scanning loop one by one in
833		 * the original callin order.
834		 * This way when there are any shared banks it will be
835		 * only seen by one CPU before cleared, avoiding duplicates.
836		 */
837		while (atomic_read(&mce_executing) < order) {
838			if (mce_timed_out(&timeout)) {
839				atomic_set(&global_nwo, 0);
840				return -1;
841			}
842			ndelay(SPINUNIT);
843		}
844	}
845
846	/*
847	 * Cache the global no_way_out state.
848	 */
849	*no_way_out = atomic_read(&global_nwo);
850
851	return order;
852}
853
854/*
855 * Synchronize between CPUs after main scanning loop.
856 * This invokes the bulk of the Monarch processing.
857 */
858static int mce_end(int order)
859{
860	int ret = -1;
861	u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
862
863	if (!timeout)
864		goto reset;
865	if (order < 0)
866		goto reset;
867
868	/*
869	 * Allow others to run.
870	 */
871	atomic_inc(&mce_executing);
872
873	if (order == 1) {
874		/* CHECKME: Can this race with a parallel hotplug? */
875		int cpus = num_online_cpus();
876
877		/*
878		 * Monarch: Wait for everyone to go through their scanning
879		 * loops.
880		 */
881		while (atomic_read(&mce_executing) <= cpus) {
882			if (mce_timed_out(&timeout))
883				goto reset;
884			ndelay(SPINUNIT);
885		}
886
887		mce_reign();
888		barrier();
889		ret = 0;
890	} else {
891		/*
892		 * Subject: Wait for Monarch to finish.
893		 */
894		while (atomic_read(&mce_executing) != 0) {
895			if (mce_timed_out(&timeout))
896				goto reset;
897			ndelay(SPINUNIT);
898		}
899
900		/*
901		 * Don't reset anything. That's done by the Monarch.
902		 */
903		return 0;
904	}
905
906	/*
907	 * Reset all global state.
908	 */
909reset:
910	atomic_set(&global_nwo, 0);
911	atomic_set(&mce_callin, 0);
912	barrier();
913
914	/*
915	 * Let others run again.
916	 */
917	atomic_set(&mce_executing, 0);
918	return ret;
919}
920
921/*
922 * Check if the address reported by the CPU is in a format we can parse.
923 * It would be possible to add code for most other cases, but all would
924 * be somewhat complicated (e.g. segment offset would require an instruction
925 * parser). So only support physical addresses up to page granuality for now.
926 */
927static int mce_usable_address(struct mce *m)
928{
929	if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
930		return 0;
931	if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
932		return 0;
933	if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
934		return 0;
935	return 1;
936}
937
938static void mce_clear_state(unsigned long *toclear)
939{
940	int i;
941
942	for (i = 0; i < mca_cfg.banks; i++) {
943		if (test_bit(i, toclear))
944			mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
945	}
946}
947
948/*
949 * Need to save faulting physical address associated with a process
950 * in the machine check handler some place where we can grab it back
951 * later in mce_notify_process()
952 */
953#define	MCE_INFO_MAX	16
954
955struct mce_info {
956	atomic_t		inuse;
957	struct task_struct	*t;
958	__u64			paddr;
959	int			restartable;
960} mce_info[MCE_INFO_MAX];
961
962static void mce_save_info(__u64 addr, int c)
963{
964	struct mce_info *mi;
965
966	for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
967		if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
968			mi->t = current;
969			mi->paddr = addr;
970			mi->restartable = c;
971			return;
972		}
973	}
974
975	mce_panic("Too many concurrent recoverable errors", NULL, NULL);
976}
977
978static struct mce_info *mce_find_info(void)
979{
980	struct mce_info *mi;
981
982	for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
983		if (atomic_read(&mi->inuse) && mi->t == current)
984			return mi;
985	return NULL;
986}
987
988static void mce_clear_info(struct mce_info *mi)
989{
990	atomic_set(&mi->inuse, 0);
991}
992
993/*
994 * The actual machine check handler. This only handles real
995 * exceptions when something got corrupted coming in through int 18.
996 *
997 * This is executed in NMI context not subject to normal locking rules. This
998 * implies that most kernel services cannot be safely used. Don't even
999 * think about putting a printk in there!
1000 *
1001 * On Intel systems this is entered on all CPUs in parallel through
1002 * MCE broadcast. However some CPUs might be broken beyond repair,
1003 * so be always careful when synchronizing with others.
1004 */
1005void do_machine_check(struct pt_regs *regs, long error_code)
1006{
1007	struct mca_config *cfg = &mca_cfg;
1008	struct mce m, *final;
1009	int i;
1010	int worst = 0;
1011	int severity;
1012	/*
1013	 * Establish sequential order between the CPUs entering the machine
1014	 * check handler.
1015	 */
1016	int order;
1017	/*
1018	 * If no_way_out gets set, there is no safe way to recover from this
1019	 * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
1020	 */
1021	int no_way_out = 0;
1022	/*
1023	 * If kill_it gets set, there might be a way to recover from this
1024	 * error.
1025	 */
1026	int kill_it = 0;
1027	DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1028	DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1029	char *msg = "Unknown";
1030
1031	atomic_inc(&mce_entry);
1032
1033	this_cpu_inc(mce_exception_count);
1034
1035	if (!cfg->banks)
1036		goto out;
1037
1038	mce_gather_info(&m, regs);
1039
1040	final = &__get_cpu_var(mces_seen);
1041	*final = m;
1042
1043	memset(valid_banks, 0, sizeof(valid_banks));
1044	no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1045
1046	barrier();
1047
1048	/*
1049	 * When no restart IP might need to kill or panic.
1050	 * Assume the worst for now, but if we find the
1051	 * severity is MCE_AR_SEVERITY we have other options.
1052	 */
1053	if (!(m.mcgstatus & MCG_STATUS_RIPV))
1054		kill_it = 1;
1055
1056	/*
1057	 * Go through all the banks in exclusion of the other CPUs.
1058	 * This way we don't report duplicated events on shared banks
1059	 * because the first one to see it will clear it.
1060	 */
1061	order = mce_start(&no_way_out);
1062	for (i = 0; i < cfg->banks; i++) {
1063		__clear_bit(i, toclear);
1064		if (!test_bit(i, valid_banks))
1065			continue;
1066		if (!mce_banks[i].ctl)
1067			continue;
1068
1069		m.misc = 0;
1070		m.addr = 0;
1071		m.bank = i;
1072
1073		m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
1074		if ((m.status & MCI_STATUS_VAL) == 0)
1075			continue;
1076
1077		/*
1078		 * Non uncorrected or non signaled errors are handled by
1079		 * machine_check_poll. Leave them alone, unless this panics.
1080		 */
1081		if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1082			!no_way_out)
1083			continue;
1084
1085		/*
1086		 * Set taint even when machine check was not enabled.
1087		 */
1088		add_taint(TAINT_MACHINE_CHECK);
1089
1090		severity = mce_severity(&m, cfg->tolerant, NULL);
1091
1092		/*
1093		 * When machine check was for corrected handler don't touch,
1094		 * unless we're panicing.
1095		 */
1096		if (severity == MCE_KEEP_SEVERITY && !no_way_out)
1097			continue;
1098		__set_bit(i, toclear);
1099		if (severity == MCE_NO_SEVERITY) {
1100			/*
1101			 * Machine check event was not enabled. Clear, but
1102			 * ignore.
1103			 */
1104			continue;
1105		}
1106
1107		mce_read_aux(&m, i);
1108
1109		/*
1110		 * Action optional error. Queue address for later processing.
1111		 * When the ring overflows we just ignore the AO error.
1112		 * RED-PEN add some logging mechanism when
1113		 * usable_address or mce_add_ring fails.
1114		 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
1115		 */
1116		if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
1117			mce_ring_add(m.addr >> PAGE_SHIFT);
1118
1119		mce_log(&m);
1120
1121		if (severity > worst) {
1122			*final = m;
1123			worst = severity;
1124		}
1125	}
1126
1127	/* mce_clear_state will clear *final, save locally for use later */
1128	m = *final;
1129
1130	if (!no_way_out)
1131		mce_clear_state(toclear);
1132
1133	/*
1134	 * Do most of the synchronization with other CPUs.
1135	 * When there's any problem use only local no_way_out state.
1136	 */
1137	if (mce_end(order) < 0)
1138		no_way_out = worst >= MCE_PANIC_SEVERITY;
1139
1140	/*
1141	 * At insane "tolerant" levels we take no action. Otherwise
1142	 * we only die if we have no other choice. For less serious
1143	 * issues we try to recover, or limit damage to the current
1144	 * process.
1145	 */
1146	if (cfg->tolerant < 3) {
1147		if (no_way_out)
1148			mce_panic("Fatal machine check on current CPU", &m, msg);
1149		if (worst == MCE_AR_SEVERITY) {
1150			/* schedule action before return to userland */
1151			mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
1152			set_thread_flag(TIF_MCE_NOTIFY);
1153		} else if (kill_it) {
1154			force_sig(SIGBUS, current);
1155		}
1156	}
1157
1158	if (worst > 0)
1159		mce_report_event(regs);
1160	mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1161out:
1162	atomic_dec(&mce_entry);
1163	sync_core();
1164}
1165EXPORT_SYMBOL_GPL(do_machine_check);
1166
1167#ifndef CONFIG_MEMORY_FAILURE
1168int memory_failure(unsigned long pfn, int vector, int flags)
1169{
1170	/* mce_severity() should not hand us an ACTION_REQUIRED error */
1171	BUG_ON(flags & MF_ACTION_REQUIRED);
1172	pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1173	       "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1174	       pfn);
1175
1176	return 0;
1177}
1178#endif
1179
1180/*
1181 * Called in process context that interrupted by MCE and marked with
1182 * TIF_MCE_NOTIFY, just before returning to erroneous userland.
1183 * This code is allowed to sleep.
1184 * Attempt possible recovery such as calling the high level VM handler to
1185 * process any corrupted pages, and kill/signal current process if required.
1186 * Action required errors are handled here.
1187 */
1188void mce_notify_process(void)
1189{
1190	unsigned long pfn;
1191	struct mce_info *mi = mce_find_info();
1192	int flags = MF_ACTION_REQUIRED;
1193
1194	if (!mi)
1195		mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
1196	pfn = mi->paddr >> PAGE_SHIFT;
1197
1198	clear_thread_flag(TIF_MCE_NOTIFY);
1199
1200	pr_err("Uncorrected hardware memory error in user-access at %llx",
1201		 mi->paddr);
1202	/*
1203	 * We must call memory_failure() here even if the current process is
1204	 * doomed. We still need to mark the page as poisoned and alert any
1205	 * other users of the page.
1206	 */
1207	if (!mi->restartable)
1208		flags |= MF_MUST_KILL;
1209	if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
1210		pr_err("Memory error not recovered");
1211		force_sig(SIGBUS, current);
1212	}
1213	mce_clear_info(mi);
1214}
1215
1216/*
1217 * Action optional processing happens here (picking up
1218 * from the list of faulting pages that do_machine_check()
1219 * placed into the "ring").
1220 */
1221static void mce_process_work(struct work_struct *dummy)
1222{
1223	unsigned long pfn;
1224
1225	while (mce_ring_get(&pfn))
1226		memory_failure(pfn, MCE_VECTOR, 0);
1227}
1228
1229#ifdef CONFIG_X86_MCE_INTEL
1230/***
1231 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
1232 * @cpu: The CPU on which the event occurred.
1233 * @status: Event status information
1234 *
1235 * This function should be called by the thermal interrupt after the
1236 * event has been processed and the decision was made to log the event
1237 * further.
1238 *
1239 * The status parameter will be saved to the 'status' field of 'struct mce'
1240 * and historically has been the register value of the
1241 * MSR_IA32_THERMAL_STATUS (Intel) msr.
1242 */
1243void mce_log_therm_throt_event(__u64 status)
1244{
1245	struct mce m;
1246
1247	mce_setup(&m);
1248	m.bank = MCE_THERMAL_BANK;
1249	m.status = status;
1250	mce_log(&m);
1251}
1252#endif /* CONFIG_X86_MCE_INTEL */
1253
1254/*
1255 * Periodic polling timer for "silent" machine check errors.  If the
1256 * poller finds an MCE, poll 2x faster.  When the poller finds no more
1257 * errors, poll 2x slower (up to check_interval seconds).
1258 */
1259static unsigned long check_interval = 5 * 60; /* 5 minutes */
1260
1261static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1262static DEFINE_PER_CPU(struct timer_list, mce_timer);
1263
1264static unsigned long mce_adjust_timer_default(unsigned long interval)
1265{
1266	return interval;
1267}
1268
1269static unsigned long (*mce_adjust_timer)(unsigned long interval) =
1270	mce_adjust_timer_default;
1271
1272static void mce_timer_fn(unsigned long data)
1273{
1274	struct timer_list *t = &__get_cpu_var(mce_timer);
1275	unsigned long iv;
1276
1277	WARN_ON(smp_processor_id() != data);
1278
1279	if (mce_available(__this_cpu_ptr(&cpu_info))) {
1280		machine_check_poll(MCP_TIMESTAMP,
1281				&__get_cpu_var(mce_poll_banks));
1282		mce_intel_cmci_poll();
1283	}
1284
1285	/*
1286	 * Alert userspace if needed.  If we logged an MCE, reduce the
1287	 * polling interval, otherwise increase the polling interval.
1288	 */
1289	iv = __this_cpu_read(mce_next_interval);
1290	if (mce_notify_irq()) {
1291		iv = max(iv / 2, (unsigned long) HZ/100);
1292	} else {
1293		iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1294		iv = mce_adjust_timer(iv);
1295	}
1296	__this_cpu_write(mce_next_interval, iv);
1297	/* Might have become 0 after CMCI storm subsided */
1298	if (iv) {
1299		t->expires = jiffies + iv;
1300		add_timer_on(t, smp_processor_id());
1301	}
1302}
1303
1304/*
1305 * Ensure that the timer is firing in @interval from now.
1306 */
1307void mce_timer_kick(unsigned long interval)
1308{
1309	struct timer_list *t = &__get_cpu_var(mce_timer);
1310	unsigned long when = jiffies + interval;
1311	unsigned long iv = __this_cpu_read(mce_next_interval);
1312
1313	if (timer_pending(t)) {
1314		if (time_before(when, t->expires))
1315			mod_timer_pinned(t, when);
1316	} else {
1317		t->expires = round_jiffies(when);
1318		add_timer_on(t, smp_processor_id());
1319	}
1320	if (interval < iv)
1321		__this_cpu_write(mce_next_interval, interval);
1322}
1323
1324/* Must not be called in IRQ context where del_timer_sync() can deadlock */
1325static void mce_timer_delete_all(void)
1326{
1327	int cpu;
1328
1329	for_each_online_cpu(cpu)
1330		del_timer_sync(&per_cpu(mce_timer, cpu));
1331}
1332
1333static void mce_do_trigger(struct work_struct *work)
1334{
1335	call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
1336}
1337
1338static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
1339
1340/*
1341 * Notify the user(s) about new machine check events.
1342 * Can be called from interrupt context, but not from machine check/NMI
1343 * context.
1344 */
1345int mce_notify_irq(void)
1346{
1347	/* Not more than two messages every minute */
1348	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1349
1350	if (test_and_clear_bit(0, &mce_need_notify)) {
1351		/* wake processes polling /dev/mcelog */
1352		wake_up_interruptible(&mce_chrdev_wait);
1353
1354		/*
1355		 * There is no risk of missing notifications because
1356		 * work_pending is always cleared before the function is
1357		 * executed.
1358		 */
1359		if (mce_helper[0] && !work_pending(&mce_trigger_work))
1360			schedule_work(&mce_trigger_work);
1361
1362		if (__ratelimit(&ratelimit))
1363			pr_info(HW_ERR "Machine check events logged\n");
1364
1365		return 1;
1366	}
1367	return 0;
1368}
1369EXPORT_SYMBOL_GPL(mce_notify_irq);
1370
1371static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1372{
1373	int i;
1374	u8 num_banks = mca_cfg.banks;
1375
1376	mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL);
1377	if (!mce_banks)
1378		return -ENOMEM;
1379
1380	for (i = 0; i < num_banks; i++) {
1381		struct mce_bank *b = &mce_banks[i];
1382
1383		b->ctl = -1ULL;
1384		b->init = 1;
1385	}
1386	return 0;
1387}
1388
1389/*
1390 * Initialize Machine Checks for a CPU.
1391 */
1392static int __cpuinit __mcheck_cpu_cap_init(void)
1393{
1394	unsigned b;
1395	u64 cap;
1396
1397	rdmsrl(MSR_IA32_MCG_CAP, cap);
1398
1399	b = cap & MCG_BANKCNT_MASK;
1400	if (!mca_cfg.banks)
1401		pr_info("CPU supports %d MCE banks\n", b);
1402
1403	if (b > MAX_NR_BANKS) {
1404		pr_warn("Using only %u machine check banks out of %u\n",
1405			MAX_NR_BANKS, b);
1406		b = MAX_NR_BANKS;
1407	}
1408
1409	/* Don't support asymmetric configurations today */
1410	WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1411	mca_cfg.banks = b;
1412
1413	if (!mce_banks) {
1414		int err = __mcheck_cpu_mce_banks_init();
1415
1416		if (err)
1417			return err;
1418	}
1419
1420	/* Use accurate RIP reporting if available. */
1421	if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1422		mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1423
1424	if (cap & MCG_SER_P)
1425		mca_cfg.ser = true;
1426
1427	return 0;
1428}
1429
1430static void __mcheck_cpu_init_generic(void)
1431{
1432	enum mcp_flags m_fl = 0;
1433	mce_banks_t all_banks;
1434	u64 cap;
1435	int i;
1436
1437	if (!mca_cfg.bootlog)
1438		m_fl = MCP_DONTLOG;
1439
1440	/*
1441	 * Log the machine checks left over from the previous reset.
1442	 */
1443	bitmap_fill(all_banks, MAX_NR_BANKS);
1444	machine_check_poll(MCP_UC | m_fl, &all_banks);
1445
1446	set_in_cr4(X86_CR4_MCE);
1447
1448	rdmsrl(MSR_IA32_MCG_CAP, cap);
1449	if (cap & MCG_CTL_P)
1450		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1451
1452	for (i = 0; i < mca_cfg.banks; i++) {
1453		struct mce_bank *b = &mce_banks[i];
1454
1455		if (!b->init)
1456			continue;
1457		wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1458		wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1459	}
1460}
1461
1462/*
1463 * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
1464 * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
1465 * Vol 3B Table 15-20). But this confuses both the code that determines
1466 * whether the machine check occurred in kernel or user mode, and also
1467 * the severity assessment code. Pretend that EIPV was set, and take the
1468 * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
1469 */
1470static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1471{
1472	if (bank != 0)
1473		return;
1474	if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
1475		return;
1476	if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
1477		          MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
1478			  MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
1479			  MCACOD)) !=
1480			 (MCI_STATUS_UC|MCI_STATUS_EN|
1481			  MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
1482			  MCI_STATUS_AR|MCACOD_INSTR))
1483		return;
1484
1485	m->mcgstatus |= MCG_STATUS_EIPV;
1486	m->ip = regs->ip;
1487	m->cs = regs->cs;
1488}
1489
1490/* Add per CPU specific workarounds here */
1491static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1492{
1493	struct mca_config *cfg = &mca_cfg;
1494
1495	if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1496		pr_info("unknown CPU type - not enabling MCE support\n");
1497		return -EOPNOTSUPP;
1498	}
1499
1500	/* This should be disabled by the BIOS, but isn't always */
1501	if (c->x86_vendor == X86_VENDOR_AMD) {
1502		if (c->x86 == 15 && cfg->banks > 4) {
1503			/*
1504			 * disable GART TBL walk error reporting, which
1505			 * trips off incorrectly with the IOMMU & 3ware
1506			 * & Cerberus:
1507			 */
1508			clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1509		}
1510		if (c->x86 <= 17 && cfg->bootlog < 0) {
1511			/*
1512			 * Lots of broken BIOS around that don't clear them
1513			 * by default and leave crap in there. Don't log:
1514			 */
1515			cfg->bootlog = 0;
1516		}
1517		/*
1518		 * Various K7s with broken bank 0 around. Always disable
1519		 * by default.
1520		 */
1521		 if (c->x86 == 6 && cfg->banks > 0)
1522			mce_banks[0].ctl = 0;
1523
1524		 /*
1525		  * Turn off MC4_MISC thresholding banks on those models since
1526		  * they're not supported there.
1527		  */
1528		 if (c->x86 == 0x15 &&
1529		     (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
1530			 int i;
1531			 u64 val, hwcr;
1532			 bool need_toggle;
1533			 u32 msrs[] = {
1534				0x00000413, /* MC4_MISC0 */
1535				0xc0000408, /* MC4_MISC1 */
1536			 };
1537
1538			 rdmsrl(MSR_K7_HWCR, hwcr);
1539
1540			 /* McStatusWrEn has to be set */
1541			 need_toggle = !(hwcr & BIT(18));
1542
1543			 if (need_toggle)
1544				 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
1545
1546			 for (i = 0; i < ARRAY_SIZE(msrs); i++) {
1547				 rdmsrl(msrs[i], val);
1548
1549				 /* CntP bit set? */
1550				 if (val & BIT_64(62)) {
1551					val &= ~BIT_64(62);
1552					wrmsrl(msrs[i], val);
1553				 }
1554			 }
1555
1556			 /* restore old settings */
1557			 if (need_toggle)
1558				 wrmsrl(MSR_K7_HWCR, hwcr);
1559		 }
1560	}
1561
1562	if (c->x86_vendor == X86_VENDOR_INTEL) {
1563		/*
1564		 * SDM documents that on family 6 bank 0 should not be written
1565		 * because it aliases to another special BIOS controlled
1566		 * register.
1567		 * But it's not aliased anymore on model 0x1a+
1568		 * Don't ignore bank 0 completely because there could be a
1569		 * valid event later, merely don't write CTL0.
1570		 */
1571
1572		if (c->x86 == 6 && c->x86_model < 0x1A && cfg->banks > 0)
1573			mce_banks[0].init = 0;
1574
1575		/*
1576		 * All newer Intel systems support MCE broadcasting. Enable
1577		 * synchronization with a one second timeout.
1578		 */
1579		if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1580			cfg->monarch_timeout < 0)
1581			cfg->monarch_timeout = USEC_PER_SEC;
1582
1583		/*
1584		 * There are also broken BIOSes on some Pentium M and
1585		 * earlier systems:
1586		 */
1587		if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1588			cfg->bootlog = 0;
1589
1590		if (c->x86 == 6 && c->x86_model == 45)
1591			quirk_no_way_out = quirk_sandybridge_ifu;
1592	}
1593	if (cfg->monarch_timeout < 0)
1594		cfg->monarch_timeout = 0;
1595	if (cfg->bootlog != 0)
1596		cfg->panic_timeout = 30;
1597
1598	return 0;
1599}
1600
1601static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1602{
1603	if (c->x86 != 5)
1604		return 0;
1605
1606	switch (c->x86_vendor) {
1607	case X86_VENDOR_INTEL:
1608		intel_p5_mcheck_init(c);
1609		return 1;
1610		break;
1611	case X86_VENDOR_CENTAUR:
1612		winchip_mcheck_init(c);
1613		return 1;
1614		break;
1615	}
1616
1617	return 0;
1618}
1619
1620static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1621{
1622	switch (c->x86_vendor) {
1623	case X86_VENDOR_INTEL:
1624		mce_intel_feature_init(c);
1625		mce_adjust_timer = mce_intel_adjust_timer;
1626		break;
1627	case X86_VENDOR_AMD:
1628		mce_amd_feature_init(c);
1629		break;
1630	default:
1631		break;
1632	}
1633}
1634
1635static void mce_start_timer(unsigned int cpu, struct timer_list *t)
1636{
1637	unsigned long iv = mce_adjust_timer(check_interval * HZ);
1638
1639	__this_cpu_write(mce_next_interval, iv);
1640
1641	if (mca_cfg.ignore_ce || !iv)
1642		return;
1643
1644	t->expires = round_jiffies(jiffies + iv);
1645	add_timer_on(t, smp_processor_id());
1646}
1647
1648static void __mcheck_cpu_init_timer(void)
1649{
1650	struct timer_list *t = &__get_cpu_var(mce_timer);
1651	unsigned int cpu = smp_processor_id();
1652
1653	setup_timer(t, mce_timer_fn, cpu);
1654	mce_start_timer(cpu, t);
1655}
1656
1657/* Handle unconfigured int18 (should never happen) */
1658static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1659{
1660	pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1661	       smp_processor_id());
1662}
1663
1664/* Call the installed machine check handler for this CPU setup. */
1665void (*machine_check_vector)(struct pt_regs *, long error_code) =
1666						unexpected_machine_check;
1667
1668/*
1669 * Called for each booted CPU to set up machine checks.
1670 * Must be called with preempt off:
1671 */
1672void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
1673{
1674	if (mca_cfg.disabled)
1675		return;
1676
1677	if (__mcheck_cpu_ancient_init(c))
1678		return;
1679
1680	if (!mce_available(c))
1681		return;
1682
1683	if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
1684		mca_cfg.disabled = true;
1685		return;
1686	}
1687
1688	machine_check_vector = do_machine_check;
1689
1690	__mcheck_cpu_init_generic();
1691	__mcheck_cpu_init_vendor(c);
1692	__mcheck_cpu_init_timer();
1693	INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
1694	init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
1695}
1696
1697/*
1698 * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
1699 */
1700
1701static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1702static int mce_chrdev_open_count;	/* #times opened */
1703static int mce_chrdev_open_exclu;	/* already open exclusive? */
1704
1705static int mce_chrdev_open(struct inode *inode, struct file *file)
1706{
1707	spin_lock(&mce_chrdev_state_lock);
1708
1709	if (mce_chrdev_open_exclu ||
1710	    (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
1711		spin_unlock(&mce_chrdev_state_lock);
1712
1713		return -EBUSY;
1714	}
1715
1716	if (file->f_flags & O_EXCL)
1717		mce_chrdev_open_exclu = 1;
1718	mce_chrdev_open_count++;
1719
1720	spin_unlock(&mce_chrdev_state_lock);
1721
1722	return nonseekable_open(inode, file);
1723}
1724
1725static int mce_chrdev_release(struct inode *inode, struct file *file)
1726{
1727	spin_lock(&mce_chrdev_state_lock);
1728
1729	mce_chrdev_open_count--;
1730	mce_chrdev_open_exclu = 0;
1731
1732	spin_unlock(&mce_chrdev_state_lock);
1733
1734	return 0;
1735}
1736
1737static void collect_tscs(void *data)
1738{
1739	unsigned long *cpu_tsc = (unsigned long *)data;
1740
1741	rdtscll(cpu_tsc[smp_processor_id()]);
1742}
1743
1744static int mce_apei_read_done;
1745
1746/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1747static int __mce_read_apei(char __user **ubuf, size_t usize)
1748{
1749	int rc;
1750	u64 record_id;
1751	struct mce m;
1752
1753	if (usize < sizeof(struct mce))
1754		return -EINVAL;
1755
1756	rc = apei_read_mce(&m, &record_id);
1757	/* Error or no more MCE record */
1758	if (rc <= 0) {
1759		mce_apei_read_done = 1;
1760		/*
1761		 * When ERST is disabled, mce_chrdev_read() should return
1762		 * "no record" instead of "no device."
1763		 */
1764		if (rc == -ENODEV)
1765			return 0;
1766		return rc;
1767	}
1768	rc = -EFAULT;
1769	if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1770		return rc;
1771	/*
1772	 * In fact, we should have cleared the record after that has
1773	 * been flushed to the disk or sent to network in
1774	 * /sbin/mcelog, but we have no interface to support that now,
1775	 * so just clear it to avoid duplication.
1776	 */
1777	rc = apei_clear_mce(record_id);
1778	if (rc) {
1779		mce_apei_read_done = 1;
1780		return rc;
1781	}
1782	*ubuf += sizeof(struct mce);
1783
1784	return 0;
1785}
1786
1787static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
1788				size_t usize, loff_t *off)
1789{
1790	char __user *buf = ubuf;
1791	unsigned long *cpu_tsc;
1792	unsigned prev, next;
1793	int i, err;
1794
1795	cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
1796	if (!cpu_tsc)
1797		return -ENOMEM;
1798
1799	mutex_lock(&mce_chrdev_read_mutex);
1800
1801	if (!mce_apei_read_done) {
1802		err = __mce_read_apei(&buf, usize);
1803		if (err || buf != ubuf)
1804			goto out;
1805	}
1806
1807	next = rcu_dereference_check_mce(mcelog.next);
1808
1809	/* Only supports full reads right now */
1810	err = -EINVAL;
1811	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1812		goto out;
1813
1814	err = 0;
1815	prev = 0;
1816	do {
1817		for (i = prev; i < next; i++) {
1818			unsigned long start = jiffies;
1819			struct mce *m = &mcelog.entry[i];
1820
1821			while (!m->finished) {
1822				if (time_after_eq(jiffies, start + 2)) {
1823					memset(m, 0, sizeof(*m));
1824					goto timeout;
1825				}
1826				cpu_relax();
1827			}
1828			smp_rmb();
1829			err |= copy_to_user(buf, m, sizeof(*m));
1830			buf += sizeof(*m);
1831timeout:
1832			;
1833		}
1834
1835		memset(mcelog.entry + prev, 0,
1836		       (next - prev) * sizeof(struct mce));
1837		prev = next;
1838		next = cmpxchg(&mcelog.next, prev, 0);
1839	} while (next != prev);
1840
1841	synchronize_sched();
1842
1843	/*
1844	 * Collect entries that were still getting written before the
1845	 * synchronize.
1846	 */
1847	on_each_cpu(collect_tscs, cpu_tsc, 1);
1848
1849	for (i = next; i < MCE_LOG_LEN; i++) {
1850		struct mce *m = &mcelog.entry[i];
1851
1852		if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
1853			err |= copy_to_user(buf, m, sizeof(*m));
1854			smp_rmb();
1855			buf += sizeof(*m);
1856			memset(m, 0, sizeof(*m));
1857		}
1858	}
1859
1860	if (err)
1861		err = -EFAULT;
1862
1863out:
1864	mutex_unlock(&mce_chrdev_read_mutex);
1865	kfree(cpu_tsc);
1866
1867	return err ? err : buf - ubuf;
1868}
1869
1870static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
1871{
1872	poll_wait(file, &mce_chrdev_wait, wait);
1873	if (rcu_access_index(mcelog.next))
1874		return POLLIN | POLLRDNORM;
1875	if (!mce_apei_read_done && apei_check_mce())
1876		return POLLIN | POLLRDNORM;
1877	return 0;
1878}
1879
1880static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
1881				unsigned long arg)
1882{
1883	int __user *p = (int __user *)arg;
1884
1885	if (!capable(CAP_SYS_ADMIN))
1886		return -EPERM;
1887
1888	switch (cmd) {
1889	case MCE_GET_RECORD_LEN:
1890		return put_user(sizeof(struct mce), p);
1891	case MCE_GET_LOG_LEN:
1892		return put_user(MCE_LOG_LEN, p);
1893	case MCE_GETCLEAR_FLAGS: {
1894		unsigned flags;
1895
1896		do {
1897			flags = mcelog.flags;
1898		} while (cmpxchg(&mcelog.flags, flags, 0) != flags);
1899
1900		return put_user(flags, p);
1901	}
1902	default:
1903		return -ENOTTY;
1904	}
1905}
1906
1907static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
1908			    size_t usize, loff_t *off);
1909
1910void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
1911			     const char __user *ubuf,
1912			     size_t usize, loff_t *off))
1913{
1914	mce_write = fn;
1915}
1916EXPORT_SYMBOL_GPL(register_mce_write_callback);
1917
1918ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
1919			 size_t usize, loff_t *off)
1920{
1921	if (mce_write)
1922		return mce_write(filp, ubuf, usize, off);
1923	else
1924		return -EINVAL;
1925}
1926
1927static const struct file_operations mce_chrdev_ops = {
1928	.open			= mce_chrdev_open,
1929	.release		= mce_chrdev_release,
1930	.read			= mce_chrdev_read,
1931	.write			= mce_chrdev_write,
1932	.poll			= mce_chrdev_poll,
1933	.unlocked_ioctl		= mce_chrdev_ioctl,
1934	.llseek			= no_llseek,
1935};
1936
1937static struct miscdevice mce_chrdev_device = {
1938	MISC_MCELOG_MINOR,
1939	"mcelog",
1940	&mce_chrdev_ops,
1941};
1942
1943/*
1944 * mce=off Disables machine check
1945 * mce=no_cmci Disables CMCI
1946 * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
1947 * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
1948 * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
1949 *	monarchtimeout is how long to wait for other CPUs on machine
1950 *	check, or 0 to not wait
1951 * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
1952 * mce=nobootlog Don't log MCEs from before booting.
1953 * mce=bios_cmci_threshold Don't program the CMCI threshold
1954 */
1955static int __init mcheck_enable(char *str)
1956{
1957	struct mca_config *cfg = &mca_cfg;
1958
1959	if (*str == 0) {
1960		enable_p5_mce();
1961		return 1;
1962	}
1963	if (*str == '=')
1964		str++;
1965	if (!strcmp(str, "off"))
1966		cfg->disabled = true;
1967	else if (!strcmp(str, "no_cmci"))
1968		cfg->cmci_disabled = true;
1969	else if (!strcmp(str, "dont_log_ce"))
1970		cfg->dont_log_ce = true;
1971	else if (!strcmp(str, "ignore_ce"))
1972		cfg->ignore_ce = true;
1973	else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
1974		cfg->bootlog = (str[0] == 'b');
1975	else if (!strcmp(str, "bios_cmci_threshold"))
1976		cfg->bios_cmci_threshold = true;
1977	else if (isdigit(str[0])) {
1978		get_option(&str, &(cfg->tolerant));
1979		if (*str == ',') {
1980			++str;
1981			get_option(&str, &(cfg->monarch_timeout));
1982		}
1983	} else {
1984		pr_info("mce argument %s ignored. Please use /sys\n", str);
1985		return 0;
1986	}
1987	return 1;
1988}
1989__setup("mce", mcheck_enable);
1990
1991int __init mcheck_init(void)
1992{
1993	mcheck_intel_therm_init();
1994
1995	return 0;
1996}
1997
1998/*
1999 * mce_syscore: PM support
2000 */
2001
2002/*
2003 * Disable machine checks on suspend and shutdown. We can't really handle
2004 * them later.
2005 */
2006static int mce_disable_error_reporting(void)
2007{
2008	int i;
2009
2010	for (i = 0; i < mca_cfg.banks; i++) {
2011		struct mce_bank *b = &mce_banks[i];
2012
2013		if (b->init)
2014			wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2015	}
2016	return 0;
2017}
2018
2019static int mce_syscore_suspend(void)
2020{
2021	return mce_disable_error_reporting();
2022}
2023
2024static void mce_syscore_shutdown(void)
2025{
2026	mce_disable_error_reporting();
2027}
2028
2029/*
2030 * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2031 * Only one CPU is active at this time, the others get re-added later using
2032 * CPU hotplug:
2033 */
2034static void mce_syscore_resume(void)
2035{
2036	__mcheck_cpu_init_generic();
2037	__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
2038}
2039
2040static struct syscore_ops mce_syscore_ops = {
2041	.suspend	= mce_syscore_suspend,
2042	.shutdown	= mce_syscore_shutdown,
2043	.resume		= mce_syscore_resume,
2044};
2045
2046/*
2047 * mce_device: Sysfs support
2048 */
2049
2050static void mce_cpu_restart(void *data)
2051{
2052	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2053		return;
2054	__mcheck_cpu_init_generic();
2055	__mcheck_cpu_init_timer();
2056}
2057
2058/* Reinit MCEs after user configuration changes */
2059static void mce_restart(void)
2060{
2061	mce_timer_delete_all();
2062	on_each_cpu(mce_cpu_restart, NULL, 1);
2063}
2064
2065/* Toggle features for corrected errors */
2066static void mce_disable_cmci(void *data)
2067{
2068	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2069		return;
2070	cmci_clear();
2071}
2072
2073static void mce_enable_ce(void *all)
2074{
2075	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2076		return;
2077	cmci_reenable();
2078	cmci_recheck();
2079	if (all)
2080		__mcheck_cpu_init_timer();
2081}
2082
2083static struct bus_type mce_subsys = {
2084	.name		= "machinecheck",
2085	.dev_name	= "machinecheck",
2086};
2087
2088DEFINE_PER_CPU(struct device *, mce_device);
2089
2090__cpuinitdata
2091void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2092
2093static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
2094{
2095	return container_of(attr, struct mce_bank, attr);
2096}
2097
2098static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2099			 char *buf)
2100{
2101	return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
2102}
2103
2104static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2105			const char *buf, size_t size)
2106{
2107	u64 new;
2108
2109	if (strict_strtoull(buf, 0, &new) < 0)
2110		return -EINVAL;
2111
2112	attr_to_bank(attr)->ctl = new;
2113	mce_restart();
2114
2115	return size;
2116}
2117
2118static ssize_t
2119show_trigger(struct device *s, struct device_attribute *attr, char *buf)
2120{
2121	strcpy(buf, mce_helper);
2122	strcat(buf, "\n");
2123	return strlen(mce_helper) + 1;
2124}
2125
2126static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
2127				const char *buf, size_t siz)
2128{
2129	char *p;
2130
2131	strncpy(mce_helper, buf, sizeof(mce_helper));
2132	mce_helper[sizeof(mce_helper)-1] = 0;
2133	p = strchr(mce_helper, '\n');
2134
2135	if (p)
2136		*p = 0;
2137
2138	return strlen(mce_helper) + !!p;
2139}
2140
2141static ssize_t set_ignore_ce(struct device *s,
2142			     struct device_attribute *attr,
2143			     const char *buf, size_t size)
2144{
2145	u64 new;
2146
2147	if (strict_strtoull(buf, 0, &new) < 0)
2148		return -EINVAL;
2149
2150	if (mca_cfg.ignore_ce ^ !!new) {
2151		if (new) {
2152			/* disable ce features */
2153			mce_timer_delete_all();
2154			on_each_cpu(mce_disable_cmci, NULL, 1);
2155			mca_cfg.ignore_ce = true;
2156		} else {
2157			/* enable ce features */
2158			mca_cfg.ignore_ce = false;
2159			on_each_cpu(mce_enable_ce, (void *)1, 1);
2160		}
2161	}
2162	return size;
2163}
2164
2165static ssize_t set_cmci_disabled(struct device *s,
2166				 struct device_attribute *attr,
2167				 const char *buf, size_t size)
2168{
2169	u64 new;
2170
2171	if (strict_strtoull(buf, 0, &new) < 0)
2172		return -EINVAL;
2173
2174	if (mca_cfg.cmci_disabled ^ !!new) {
2175		if (new) {
2176			/* disable cmci */
2177			on_each_cpu(mce_disable_cmci, NULL, 1);
2178			mca_cfg.cmci_disabled = true;
2179		} else {
2180			/* enable cmci */
2181			mca_cfg.cmci_disabled = false;
2182			on_each_cpu(mce_enable_ce, NULL, 1);
2183		}
2184	}
2185	return size;
2186}
2187
2188static ssize_t store_int_with_restart(struct device *s,
2189				      struct device_attribute *attr,
2190				      const char *buf, size_t size)
2191{
2192	ssize_t ret = device_store_int(s, attr, buf, size);
2193	mce_restart();
2194	return ret;
2195}
2196
2197static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
2198static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
2199static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2200static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2201
2202static struct dev_ext_attribute dev_attr_check_interval = {
2203	__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2204	&check_interval
2205};
2206
2207static struct dev_ext_attribute dev_attr_ignore_ce = {
2208	__ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2209	&mca_cfg.ignore_ce
2210};
2211
2212static struct dev_ext_attribute dev_attr_cmci_disabled = {
2213	__ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2214	&mca_cfg.cmci_disabled
2215};
2216
2217static struct device_attribute *mce_device_attrs[] = {
2218	&dev_attr_tolerant.attr,
2219	&dev_attr_check_interval.attr,
2220	&dev_attr_trigger,
2221	&dev_attr_monarch_timeout.attr,
2222	&dev_attr_dont_log_ce.attr,
2223	&dev_attr_ignore_ce.attr,
2224	&dev_attr_cmci_disabled.attr,
2225	NULL
2226};
2227
2228static cpumask_var_t mce_device_initialized;
2229
2230static void mce_device_release(struct device *dev)
2231{
2232	kfree(dev);
2233}
2234
2235/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2236static __cpuinit int mce_device_create(unsigned int cpu)
2237{
2238	struct device *dev;
2239	int err;
2240	int i, j;
2241
2242	if (!mce_available(&boot_cpu_data))
2243		return -EIO;
2244
2245	dev = kzalloc(sizeof *dev, GFP_KERNEL);
2246	if (!dev)
2247		return -ENOMEM;
2248	dev->id  = cpu;
2249	dev->bus = &mce_subsys;
2250	dev->release = &mce_device_release;
2251
2252	err = device_register(dev);
2253	if (err)
2254		return err;
2255
2256	for (i = 0; mce_device_attrs[i]; i++) {
2257		err = device_create_file(dev, mce_device_attrs[i]);
2258		if (err)
2259			goto error;
2260	}
2261	for (j = 0; j < mca_cfg.banks; j++) {
2262		err = device_create_file(dev, &mce_banks[j].attr);
2263		if (err)
2264			goto error2;
2265	}
2266	cpumask_set_cpu(cpu, mce_device_initialized);
2267	per_cpu(mce_device, cpu) = dev;
2268
2269	return 0;
2270error2:
2271	while (--j >= 0)
2272		device_remove_file(dev, &mce_banks[j].attr);
2273error:
2274	while (--i >= 0)
2275		device_remove_file(dev, mce_device_attrs[i]);
2276
2277	device_unregister(dev);
2278
2279	return err;
2280}
2281
2282static __cpuinit void mce_device_remove(unsigned int cpu)
2283{
2284	struct device *dev = per_cpu(mce_device, cpu);
2285	int i;
2286
2287	if (!cpumask_test_cpu(cpu, mce_device_initialized))
2288		return;
2289
2290	for (i = 0; mce_device_attrs[i]; i++)
2291		device_remove_file(dev, mce_device_attrs[i]);
2292
2293	for (i = 0; i < mca_cfg.banks; i++)
2294		device_remove_file(dev, &mce_banks[i].attr);
2295
2296	device_unregister(dev);
2297	cpumask_clear_cpu(cpu, mce_device_initialized);
2298	per_cpu(mce_device, cpu) = NULL;
2299}
2300
2301/* Make sure there are no machine checks on offlined CPUs. */
2302static void __cpuinit mce_disable_cpu(void *h)
2303{
2304	unsigned long action = *(unsigned long *)h;
2305	int i;
2306
2307	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2308		return;
2309
2310	if (!(action & CPU_TASKS_FROZEN))
2311		cmci_clear();
2312	for (i = 0; i < mca_cfg.banks; i++) {
2313		struct mce_bank *b = &mce_banks[i];
2314
2315		if (b->init)
2316			wrmsrl(MSR_IA32_MCx_CTL(i), 0);
2317	}
2318}
2319
2320static void __cpuinit mce_reenable_cpu(void *h)
2321{
2322	unsigned long action = *(unsigned long *)h;
2323	int i;
2324
2325	if (!mce_available(__this_cpu_ptr(&cpu_info)))
2326		return;
2327
2328	if (!(action & CPU_TASKS_FROZEN))
2329		cmci_reenable();
2330	for (i = 0; i < mca_cfg.banks; i++) {
2331		struct mce_bank *b = &mce_banks[i];
2332
2333		if (b->init)
2334			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
2335	}
2336}
2337
2338/* Get notified when a cpu comes on/off. Be hotplug friendly. */
2339static int __cpuinit
2340mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2341{
2342	unsigned int cpu = (unsigned long)hcpu;
2343	struct timer_list *t = &per_cpu(mce_timer, cpu);
2344
2345	switch (action & ~CPU_TASKS_FROZEN) {
2346	case CPU_ONLINE:
2347		mce_device_create(cpu);
2348		if (threshold_cpu_callback)
2349			threshold_cpu_callback(action, cpu);
2350		break;
2351	case CPU_DEAD:
2352		if (threshold_cpu_callback)
2353			threshold_cpu_callback(action, cpu);
2354		mce_device_remove(cpu);
2355		mce_intel_hcpu_update(cpu);
2356		break;
2357	case CPU_DOWN_PREPARE:
2358		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
2359		del_timer_sync(t);
2360		break;
2361	case CPU_DOWN_FAILED:
2362		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
2363		mce_start_timer(cpu, t);
2364		break;
2365	}
2366
2367	if (action == CPU_POST_DEAD) {
2368		/* intentionally ignoring frozen here */
2369		cmci_rediscover(cpu);
2370	}
2371
2372	return NOTIFY_OK;
2373}
2374
2375static struct notifier_block mce_cpu_notifier __cpuinitdata = {
2376	.notifier_call = mce_cpu_callback,
2377};
2378
2379static __init void mce_init_banks(void)
2380{
2381	int i;
2382
2383	for (i = 0; i < mca_cfg.banks; i++) {
2384		struct mce_bank *b = &mce_banks[i];
2385		struct device_attribute *a = &b->attr;
2386
2387		sysfs_attr_init(&a->attr);
2388		a->attr.name	= b->attrname;
2389		snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2390
2391		a->attr.mode	= 0644;
2392		a->show		= show_bank;
2393		a->store	= set_bank;
2394	}
2395}
2396
2397static __init int mcheck_init_device(void)
2398{
2399	int err;
2400	int i = 0;
2401
2402	if (!mce_available(&boot_cpu_data))
2403		return -EIO;
2404
2405	zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
2406
2407	mce_init_banks();
2408
2409	err = subsys_system_register(&mce_subsys, NULL);
2410	if (err)
2411		return err;
2412
2413	for_each_online_cpu(i) {
2414		err = mce_device_create(i);
2415		if (err)
2416			return err;
2417	}
2418
2419	register_syscore_ops(&mce_syscore_ops);
2420	register_hotcpu_notifier(&mce_cpu_notifier);
2421
2422	/* register character device /dev/mcelog */
2423	misc_register(&mce_chrdev_device);
2424
2425	return err;
2426}
2427device_initcall_sync(mcheck_init_device);
2428
2429/*
2430 * Old style boot options parsing. Only for compatibility.
2431 */
2432static int __init mcheck_disable(char *str)
2433{
2434	mca_cfg.disabled = true;
2435	return 1;
2436}
2437__setup("nomce", mcheck_disable);
2438
2439#ifdef CONFIG_DEBUG_FS
2440struct dentry *mce_get_debugfs_dir(void)
2441{
2442	static struct dentry *dmce;
2443
2444	if (!dmce)
2445		dmce = debugfs_create_dir("mce", NULL);
2446
2447	return dmce;
2448}
2449
2450static void mce_reset(void)
2451{
2452	cpu_missing = 0;
2453	atomic_set(&mce_fake_paniced, 0);
2454	atomic_set(&mce_executing, 0);
2455	atomic_set(&mce_callin, 0);
2456	atomic_set(&global_nwo, 0);
2457}
2458
2459static int fake_panic_get(void *data, u64 *val)
2460{
2461	*val = fake_panic;
2462	return 0;
2463}
2464
2465static int fake_panic_set(void *data, u64 val)
2466{
2467	mce_reset();
2468	fake_panic = val;
2469	return 0;
2470}
2471
2472DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2473			fake_panic_set, "%llu\n");
2474
2475static int __init mcheck_debugfs_init(void)
2476{
2477	struct dentry *dmce, *ffake_panic;
2478
2479	dmce = mce_get_debugfs_dir();
2480	if (!dmce)
2481		return -ENOMEM;
2482	ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2483					  &fake_panic_fops);
2484	if (!ffake_panic)
2485		return -ENOMEM;
2486
2487	return 0;
2488}
2489late_initcall(mcheck_debugfs_init);
2490#endif
2491