memory-failure.c revision 95d01fc664b9476e0d18e3d745bb209a42a33588
1/*
2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
4 *
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
8 *
9 * High level machine check handler. Handles pages reported by the
10 * hardware as being corrupted usually due to a 2bit ECC memory or cache
11 * failure.
12 *
13 * Handles page cache pages in various states.	The tricky part
14 * here is that we can access any page asynchronous to other VM
15 * users, because memory failures could happen anytime and anywhere,
16 * possibly violating some of their assumptions. This is why this code
17 * has to be extremely careful. Generally it tries to use normal locking
18 * rules, as in get the standard locks, even if that means the
19 * error handling takes potentially a long time.
20 *
21 * The operation to map back from RMAP chains to processes has to walk
22 * the complete process list and has non linear complexity with the number
23 * mappings. In short it can be quite slow. But since memory corruptions
24 * are rare we hope to get away with this.
25 */
26
27/*
28 * Notebook:
29 * - hugetlb needs more code
30 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
31 * - pass bad pages to kdump next kernel
32 */
33#define DEBUG 1		/* remove me in 2.6.34 */
34#include <linux/kernel.h>
35#include <linux/mm.h>
36#include <linux/page-flags.h>
37#include <linux/sched.h>
38#include <linux/ksm.h>
39#include <linux/rmap.h>
40#include <linux/pagemap.h>
41#include <linux/swap.h>
42#include <linux/backing-dev.h>
43#include "internal.h"
44
45int sysctl_memory_failure_early_kill __read_mostly = 0;
46
47int sysctl_memory_failure_recovery __read_mostly = 1;
48
49atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
50
51/*
52 * Send all the processes who have the page mapped an ``action optional''
53 * signal.
54 */
55static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
56			unsigned long pfn)
57{
58	struct siginfo si;
59	int ret;
60
61	printk(KERN_ERR
62		"MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
63		pfn, t->comm, t->pid);
64	si.si_signo = SIGBUS;
65	si.si_errno = 0;
66	si.si_code = BUS_MCEERR_AO;
67	si.si_addr = (void *)addr;
68#ifdef __ARCH_SI_TRAPNO
69	si.si_trapno = trapno;
70#endif
71	si.si_addr_lsb = PAGE_SHIFT;
72	/*
73	 * Don't use force here, it's convenient if the signal
74	 * can be temporarily blocked.
75	 * This could cause a loop when the user sets SIGBUS
76	 * to SIG_IGN, but hopefully noone will do that?
77	 */
78	ret = send_sig_info(SIGBUS, &si, t);  /* synchronous? */
79	if (ret < 0)
80		printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
81		       t->comm, t->pid, ret);
82	return ret;
83}
84
85/*
86 * When a unknown page type is encountered drain as many buffers as possible
87 * in the hope to turn the page into a LRU or free page, which we can handle.
88 */
89void shake_page(struct page *p)
90{
91	if (!PageSlab(p)) {
92		lru_add_drain_all();
93		if (PageLRU(p))
94			return;
95		drain_all_pages();
96		if (PageLRU(p) || is_free_buddy_page(p))
97			return;
98	}
99	/*
100	 * Could call shrink_slab here (which would also
101	 * shrink other caches). Unfortunately that might
102	 * also access the corrupted page, which could be fatal.
103	 */
104}
105EXPORT_SYMBOL_GPL(shake_page);
106
107/*
108 * Kill all processes that have a poisoned page mapped and then isolate
109 * the page.
110 *
111 * General strategy:
112 * Find all processes having the page mapped and kill them.
113 * But we keep a page reference around so that the page is not
114 * actually freed yet.
115 * Then stash the page away
116 *
117 * There's no convenient way to get back to mapped processes
118 * from the VMAs. So do a brute-force search over all
119 * running processes.
120 *
121 * Remember that machine checks are not common (or rather
122 * if they are common you have other problems), so this shouldn't
123 * be a performance issue.
124 *
125 * Also there are some races possible while we get from the
126 * error detection to actually handle it.
127 */
128
129struct to_kill {
130	struct list_head nd;
131	struct task_struct *tsk;
132	unsigned long addr;
133	unsigned addr_valid:1;
134};
135
136/*
137 * Failure handling: if we can't find or can't kill a process there's
138 * not much we can do.	We just print a message and ignore otherwise.
139 */
140
141/*
142 * Schedule a process for later kill.
143 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
144 * TBD would GFP_NOIO be enough?
145 */
146static void add_to_kill(struct task_struct *tsk, struct page *p,
147		       struct vm_area_struct *vma,
148		       struct list_head *to_kill,
149		       struct to_kill **tkc)
150{
151	struct to_kill *tk;
152
153	if (*tkc) {
154		tk = *tkc;
155		*tkc = NULL;
156	} else {
157		tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
158		if (!tk) {
159			printk(KERN_ERR
160		"MCE: Out of memory while machine check handling\n");
161			return;
162		}
163	}
164	tk->addr = page_address_in_vma(p, vma);
165	tk->addr_valid = 1;
166
167	/*
168	 * In theory we don't have to kill when the page was
169	 * munmaped. But it could be also a mremap. Since that's
170	 * likely very rare kill anyways just out of paranoia, but use
171	 * a SIGKILL because the error is not contained anymore.
172	 */
173	if (tk->addr == -EFAULT) {
174		pr_debug("MCE: Unable to find user space address %lx in %s\n",
175			page_to_pfn(p), tsk->comm);
176		tk->addr_valid = 0;
177	}
178	get_task_struct(tsk);
179	tk->tsk = tsk;
180	list_add_tail(&tk->nd, to_kill);
181}
182
183/*
184 * Kill the processes that have been collected earlier.
185 *
186 * Only do anything when DOIT is set, otherwise just free the list
187 * (this is used for clean pages which do not need killing)
188 * Also when FAIL is set do a force kill because something went
189 * wrong earlier.
190 */
191static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
192			  int fail, unsigned long pfn)
193{
194	struct to_kill *tk, *next;
195
196	list_for_each_entry_safe (tk, next, to_kill, nd) {
197		if (doit) {
198			/*
199			 * In case something went wrong with munmapping
200			 * make sure the process doesn't catch the
201			 * signal and then access the memory. Just kill it.
202			 * the signal handlers
203			 */
204			if (fail || tk->addr_valid == 0) {
205				printk(KERN_ERR
206		"MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
207					pfn, tk->tsk->comm, tk->tsk->pid);
208				force_sig(SIGKILL, tk->tsk);
209			}
210
211			/*
212			 * In theory the process could have mapped
213			 * something else on the address in-between. We could
214			 * check for that, but we need to tell the
215			 * process anyways.
216			 */
217			else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
218					      pfn) < 0)
219				printk(KERN_ERR
220		"MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
221					pfn, tk->tsk->comm, tk->tsk->pid);
222		}
223		put_task_struct(tk->tsk);
224		kfree(tk);
225	}
226}
227
228static int task_early_kill(struct task_struct *tsk)
229{
230	if (!tsk->mm)
231		return 0;
232	if (tsk->flags & PF_MCE_PROCESS)
233		return !!(tsk->flags & PF_MCE_EARLY);
234	return sysctl_memory_failure_early_kill;
235}
236
237/*
238 * Collect processes when the error hit an anonymous page.
239 */
240static void collect_procs_anon(struct page *page, struct list_head *to_kill,
241			      struct to_kill **tkc)
242{
243	struct vm_area_struct *vma;
244	struct task_struct *tsk;
245	struct anon_vma *av;
246
247	read_lock(&tasklist_lock);
248	av = page_lock_anon_vma(page);
249	if (av == NULL)	/* Not actually mapped anymore */
250		goto out;
251	for_each_process (tsk) {
252		if (!task_early_kill(tsk))
253			continue;
254		list_for_each_entry (vma, &av->head, anon_vma_node) {
255			if (!page_mapped_in_vma(page, vma))
256				continue;
257			if (vma->vm_mm == tsk->mm)
258				add_to_kill(tsk, page, vma, to_kill, tkc);
259		}
260	}
261	page_unlock_anon_vma(av);
262out:
263	read_unlock(&tasklist_lock);
264}
265
266/*
267 * Collect processes when the error hit a file mapped page.
268 */
269static void collect_procs_file(struct page *page, struct list_head *to_kill,
270			      struct to_kill **tkc)
271{
272	struct vm_area_struct *vma;
273	struct task_struct *tsk;
274	struct prio_tree_iter iter;
275	struct address_space *mapping = page->mapping;
276
277	/*
278	 * A note on the locking order between the two locks.
279	 * We don't rely on this particular order.
280	 * If you have some other code that needs a different order
281	 * feel free to switch them around. Or add a reverse link
282	 * from mm_struct to task_struct, then this could be all
283	 * done without taking tasklist_lock and looping over all tasks.
284	 */
285
286	read_lock(&tasklist_lock);
287	spin_lock(&mapping->i_mmap_lock);
288	for_each_process(tsk) {
289		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
290
291		if (!task_early_kill(tsk))
292			continue;
293
294		vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
295				      pgoff) {
296			/*
297			 * Send early kill signal to tasks where a vma covers
298			 * the page but the corrupted page is not necessarily
299			 * mapped it in its pte.
300			 * Assume applications who requested early kill want
301			 * to be informed of all such data corruptions.
302			 */
303			if (vma->vm_mm == tsk->mm)
304				add_to_kill(tsk, page, vma, to_kill, tkc);
305		}
306	}
307	spin_unlock(&mapping->i_mmap_lock);
308	read_unlock(&tasklist_lock);
309}
310
311/*
312 * Collect the processes who have the corrupted page mapped to kill.
313 * This is done in two steps for locking reasons.
314 * First preallocate one tokill structure outside the spin locks,
315 * so that we can kill at least one process reasonably reliable.
316 */
317static void collect_procs(struct page *page, struct list_head *tokill)
318{
319	struct to_kill *tk;
320
321	if (!page->mapping)
322		return;
323
324	tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
325	if (!tk)
326		return;
327	if (PageAnon(page))
328		collect_procs_anon(page, tokill, &tk);
329	else
330		collect_procs_file(page, tokill, &tk);
331	kfree(tk);
332}
333
334/*
335 * Error handlers for various types of pages.
336 */
337
338enum outcome {
339	FAILED,		/* Error handling failed */
340	DELAYED,	/* Will be handled later */
341	IGNORED,	/* Error safely ignored */
342	RECOVERED,	/* Successfully recovered */
343};
344
345static const char *action_name[] = {
346	[FAILED] = "Failed",
347	[DELAYED] = "Delayed",
348	[IGNORED] = "Ignored",
349	[RECOVERED] = "Recovered",
350};
351
352/*
353 * XXX: It is possible that a page is isolated from LRU cache,
354 * and then kept in swap cache or failed to remove from page cache.
355 * The page count will stop it from being freed by unpoison.
356 * Stress tests should be aware of this memory leak problem.
357 */
358static int delete_from_lru_cache(struct page *p)
359{
360	if (!isolate_lru_page(p)) {
361		/*
362		 * Clear sensible page flags, so that the buddy system won't
363		 * complain when the page is unpoison-and-freed.
364		 */
365		ClearPageActive(p);
366		ClearPageUnevictable(p);
367		/*
368		 * drop the page count elevated by isolate_lru_page()
369		 */
370		page_cache_release(p);
371		return 0;
372	}
373	return -EIO;
374}
375
376/*
377 * Error hit kernel page.
378 * Do nothing, try to be lucky and not touch this instead. For a few cases we
379 * could be more sophisticated.
380 */
381static int me_kernel(struct page *p, unsigned long pfn)
382{
383	return DELAYED;
384}
385
386/*
387 * Already poisoned page.
388 */
389static int me_ignore(struct page *p, unsigned long pfn)
390{
391	return IGNORED;
392}
393
394/*
395 * Page in unknown state. Do nothing.
396 */
397static int me_unknown(struct page *p, unsigned long pfn)
398{
399	printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
400	return FAILED;
401}
402
403/*
404 * Clean (or cleaned) page cache page.
405 */
406static int me_pagecache_clean(struct page *p, unsigned long pfn)
407{
408	int err;
409	int ret = FAILED;
410	struct address_space *mapping;
411
412	delete_from_lru_cache(p);
413
414	/*
415	 * For anonymous pages we're done the only reference left
416	 * should be the one m_f() holds.
417	 */
418	if (PageAnon(p))
419		return RECOVERED;
420
421	/*
422	 * Now truncate the page in the page cache. This is really
423	 * more like a "temporary hole punch"
424	 * Don't do this for block devices when someone else
425	 * has a reference, because it could be file system metadata
426	 * and that's not safe to truncate.
427	 */
428	mapping = page_mapping(p);
429	if (!mapping) {
430		/*
431		 * Page has been teared down in the meanwhile
432		 */
433		return FAILED;
434	}
435
436	/*
437	 * Truncation is a bit tricky. Enable it per file system for now.
438	 *
439	 * Open: to take i_mutex or not for this? Right now we don't.
440	 */
441	if (mapping->a_ops->error_remove_page) {
442		err = mapping->a_ops->error_remove_page(mapping, p);
443		if (err != 0) {
444			printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
445					pfn, err);
446		} else if (page_has_private(p) &&
447				!try_to_release_page(p, GFP_NOIO)) {
448			pr_debug("MCE %#lx: failed to release buffers\n", pfn);
449		} else {
450			ret = RECOVERED;
451		}
452	} else {
453		/*
454		 * If the file system doesn't support it just invalidate
455		 * This fails on dirty or anything with private pages
456		 */
457		if (invalidate_inode_page(p))
458			ret = RECOVERED;
459		else
460			printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
461				pfn);
462	}
463	return ret;
464}
465
466/*
467 * Dirty cache page page
468 * Issues: when the error hit a hole page the error is not properly
469 * propagated.
470 */
471static int me_pagecache_dirty(struct page *p, unsigned long pfn)
472{
473	struct address_space *mapping = page_mapping(p);
474
475	SetPageError(p);
476	/* TBD: print more information about the file. */
477	if (mapping) {
478		/*
479		 * IO error will be reported by write(), fsync(), etc.
480		 * who check the mapping.
481		 * This way the application knows that something went
482		 * wrong with its dirty file data.
483		 *
484		 * There's one open issue:
485		 *
486		 * The EIO will be only reported on the next IO
487		 * operation and then cleared through the IO map.
488		 * Normally Linux has two mechanisms to pass IO error
489		 * first through the AS_EIO flag in the address space
490		 * and then through the PageError flag in the page.
491		 * Since we drop pages on memory failure handling the
492		 * only mechanism open to use is through AS_AIO.
493		 *
494		 * This has the disadvantage that it gets cleared on
495		 * the first operation that returns an error, while
496		 * the PageError bit is more sticky and only cleared
497		 * when the page is reread or dropped.  If an
498		 * application assumes it will always get error on
499		 * fsync, but does other operations on the fd before
500		 * and the page is dropped inbetween then the error
501		 * will not be properly reported.
502		 *
503		 * This can already happen even without hwpoisoned
504		 * pages: first on metadata IO errors (which only
505		 * report through AS_EIO) or when the page is dropped
506		 * at the wrong time.
507		 *
508		 * So right now we assume that the application DTRT on
509		 * the first EIO, but we're not worse than other parts
510		 * of the kernel.
511		 */
512		mapping_set_error(mapping, EIO);
513	}
514
515	return me_pagecache_clean(p, pfn);
516}
517
518/*
519 * Clean and dirty swap cache.
520 *
521 * Dirty swap cache page is tricky to handle. The page could live both in page
522 * cache and swap cache(ie. page is freshly swapped in). So it could be
523 * referenced concurrently by 2 types of PTEs:
524 * normal PTEs and swap PTEs. We try to handle them consistently by calling
525 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
526 * and then
527 *      - clear dirty bit to prevent IO
528 *      - remove from LRU
529 *      - but keep in the swap cache, so that when we return to it on
530 *        a later page fault, we know the application is accessing
531 *        corrupted data and shall be killed (we installed simple
532 *        interception code in do_swap_page to catch it).
533 *
534 * Clean swap cache pages can be directly isolated. A later page fault will
535 * bring in the known good data from disk.
536 */
537static int me_swapcache_dirty(struct page *p, unsigned long pfn)
538{
539	ClearPageDirty(p);
540	/* Trigger EIO in shmem: */
541	ClearPageUptodate(p);
542
543	if (!delete_from_lru_cache(p))
544		return DELAYED;
545	else
546		return FAILED;
547}
548
549static int me_swapcache_clean(struct page *p, unsigned long pfn)
550{
551	delete_from_swap_cache(p);
552
553	if (!delete_from_lru_cache(p))
554		return RECOVERED;
555	else
556		return FAILED;
557}
558
559/*
560 * Huge pages. Needs work.
561 * Issues:
562 * No rmap support so we cannot find the original mapper. In theory could walk
563 * all MMs and look for the mappings, but that would be non atomic and racy.
564 * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
565 * like just walking the current process and hoping it has it mapped (that
566 * should be usually true for the common "shared database cache" case)
567 * Should handle free huge pages and dequeue them too, but this needs to
568 * handle huge page accounting correctly.
569 */
570static int me_huge_page(struct page *p, unsigned long pfn)
571{
572	return FAILED;
573}
574
575/*
576 * Various page states we can handle.
577 *
578 * A page state is defined by its current page->flags bits.
579 * The table matches them in order and calls the right handler.
580 *
581 * This is quite tricky because we can access page at any time
582 * in its live cycle, so all accesses have to be extremly careful.
583 *
584 * This is not complete. More states could be added.
585 * For any missing state don't attempt recovery.
586 */
587
588#define dirty		(1UL << PG_dirty)
589#define sc		(1UL << PG_swapcache)
590#define unevict		(1UL << PG_unevictable)
591#define mlock		(1UL << PG_mlocked)
592#define writeback	(1UL << PG_writeback)
593#define lru		(1UL << PG_lru)
594#define swapbacked	(1UL << PG_swapbacked)
595#define head		(1UL << PG_head)
596#define tail		(1UL << PG_tail)
597#define compound	(1UL << PG_compound)
598#define slab		(1UL << PG_slab)
599#define reserved	(1UL << PG_reserved)
600
601static struct page_state {
602	unsigned long mask;
603	unsigned long res;
604	char *msg;
605	int (*action)(struct page *p, unsigned long pfn);
606} error_states[] = {
607	{ reserved,	reserved,	"reserved kernel",	me_ignore },
608	/*
609	 * free pages are specially detected outside this table:
610	 * PG_buddy pages only make a small fraction of all free pages.
611	 */
612
613	/*
614	 * Could in theory check if slab page is free or if we can drop
615	 * currently unused objects without touching them. But just
616	 * treat it as standard kernel for now.
617	 */
618	{ slab,		slab,		"kernel slab",	me_kernel },
619
620#ifdef CONFIG_PAGEFLAGS_EXTENDED
621	{ head,		head,		"huge",		me_huge_page },
622	{ tail,		tail,		"huge",		me_huge_page },
623#else
624	{ compound,	compound,	"huge",		me_huge_page },
625#endif
626
627	{ sc|dirty,	sc|dirty,	"swapcache",	me_swapcache_dirty },
628	{ sc|dirty,	sc,		"swapcache",	me_swapcache_clean },
629
630	{ unevict|dirty, unevict|dirty,	"unevictable LRU", me_pagecache_dirty},
631	{ unevict,	unevict,	"unevictable LRU", me_pagecache_clean},
632
633	{ mlock|dirty,	mlock|dirty,	"mlocked LRU",	me_pagecache_dirty },
634	{ mlock,	mlock,		"mlocked LRU",	me_pagecache_clean },
635
636	{ lru|dirty,	lru|dirty,	"LRU",		me_pagecache_dirty },
637	{ lru|dirty,	lru,		"clean LRU",	me_pagecache_clean },
638
639	/*
640	 * Catchall entry: must be at end.
641	 */
642	{ 0,		0,		"unknown page state",	me_unknown },
643};
644
645static void action_result(unsigned long pfn, char *msg, int result)
646{
647	struct page *page = pfn_to_page(pfn);
648
649	printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
650		pfn,
651		PageDirty(page) ? "dirty " : "",
652		msg, action_name[result]);
653}
654
655static int page_action(struct page_state *ps, struct page *p,
656			unsigned long pfn)
657{
658	int result;
659	int count;
660
661	result = ps->action(p, pfn);
662	action_result(pfn, ps->msg, result);
663
664	count = page_count(p) - 1;
665	if (count != 0)
666		printk(KERN_ERR
667		       "MCE %#lx: %s page still referenced by %d users\n",
668		       pfn, ps->msg, count);
669
670	/* Could do more checks here if page looks ok */
671	/*
672	 * Could adjust zone counters here to correct for the missing page.
673	 */
674
675	return result == RECOVERED ? 0 : -EBUSY;
676}
677
678#define N_UNMAP_TRIES 5
679
680/*
681 * Do all that is necessary to remove user space mappings. Unmap
682 * the pages and send SIGBUS to the processes if the data was dirty.
683 */
684static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
685				  int trapno)
686{
687	enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
688	struct address_space *mapping;
689	LIST_HEAD(tokill);
690	int ret;
691	int i;
692	int kill = 1;
693
694	if (PageReserved(p) || PageSlab(p))
695		return SWAP_SUCCESS;
696
697	/*
698	 * This check implies we don't kill processes if their pages
699	 * are in the swap cache early. Those are always late kills.
700	 */
701	if (!page_mapped(p))
702		return SWAP_SUCCESS;
703
704	if (PageCompound(p) || PageKsm(p))
705		return SWAP_FAIL;
706
707	if (PageSwapCache(p)) {
708		printk(KERN_ERR
709		       "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
710		ttu |= TTU_IGNORE_HWPOISON;
711	}
712
713	/*
714	 * Propagate the dirty bit from PTEs to struct page first, because we
715	 * need this to decide if we should kill or just drop the page.
716	 * XXX: the dirty test could be racy: set_page_dirty() may not always
717	 * be called inside page lock (it's recommended but not enforced).
718	 */
719	mapping = page_mapping(p);
720	if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
721		if (page_mkclean(p)) {
722			SetPageDirty(p);
723		} else {
724			kill = 0;
725			ttu |= TTU_IGNORE_HWPOISON;
726			printk(KERN_INFO
727	"MCE %#lx: corrupted page was clean: dropped without side effects\n",
728				pfn);
729		}
730	}
731
732	/*
733	 * First collect all the processes that have the page
734	 * mapped in dirty form.  This has to be done before try_to_unmap,
735	 * because ttu takes the rmap data structures down.
736	 *
737	 * Error handling: We ignore errors here because
738	 * there's nothing that can be done.
739	 */
740	if (kill)
741		collect_procs(p, &tokill);
742
743	/*
744	 * try_to_unmap can fail temporarily due to races.
745	 * Try a few times (RED-PEN better strategy?)
746	 */
747	for (i = 0; i < N_UNMAP_TRIES; i++) {
748		ret = try_to_unmap(p, ttu);
749		if (ret == SWAP_SUCCESS)
750			break;
751		pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn,  ret);
752	}
753
754	if (ret != SWAP_SUCCESS)
755		printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
756				pfn, page_mapcount(p));
757
758	/*
759	 * Now that the dirty bit has been propagated to the
760	 * struct page and all unmaps done we can decide if
761	 * killing is needed or not.  Only kill when the page
762	 * was dirty, otherwise the tokill list is merely
763	 * freed.  When there was a problem unmapping earlier
764	 * use a more force-full uncatchable kill to prevent
765	 * any accesses to the poisoned memory.
766	 */
767	kill_procs_ao(&tokill, !!PageDirty(p), trapno,
768		      ret != SWAP_SUCCESS, pfn);
769
770	return ret;
771}
772
773int __memory_failure(unsigned long pfn, int trapno, int flags)
774{
775	struct page_state *ps;
776	struct page *p;
777	int res;
778
779	if (!sysctl_memory_failure_recovery)
780		panic("Memory failure from trap %d on page %lx", trapno, pfn);
781
782	if (!pfn_valid(pfn)) {
783		printk(KERN_ERR
784		       "MCE %#lx: memory outside kernel control\n",
785		       pfn);
786		return -ENXIO;
787	}
788
789	p = pfn_to_page(pfn);
790	if (TestSetPageHWPoison(p)) {
791		action_result(pfn, "already hardware poisoned", IGNORED);
792		return 0;
793	}
794
795	atomic_long_add(1, &mce_bad_pages);
796
797	/*
798	 * We need/can do nothing about count=0 pages.
799	 * 1) it's a free page, and therefore in safe hand:
800	 *    prep_new_page() will be the gate keeper.
801	 * 2) it's part of a non-compound high order page.
802	 *    Implies some kernel user: cannot stop them from
803	 *    R/W the page; let's pray that the page has been
804	 *    used and will be freed some time later.
805	 * In fact it's dangerous to directly bump up page count from 0,
806	 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
807	 */
808	if (!(flags & MF_COUNT_INCREASED) &&
809		!get_page_unless_zero(compound_head(p))) {
810		action_result(pfn, "free or high order kernel", IGNORED);
811		return PageBuddy(compound_head(p)) ? 0 : -EBUSY;
812	}
813
814	/*
815	 * We ignore non-LRU pages for good reasons.
816	 * - PG_locked is only well defined for LRU pages and a few others
817	 * - to avoid races with __set_page_locked()
818	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
819	 * The check (unnecessarily) ignores LRU pages being isolated and
820	 * walked by the page reclaim code, however that's not a big loss.
821	 */
822	if (!PageLRU(p))
823		lru_add_drain_all();
824	if (!PageLRU(p)) {
825		action_result(pfn, "non LRU", IGNORED);
826		put_page(p);
827		return -EBUSY;
828	}
829
830	/*
831	 * Lock the page and wait for writeback to finish.
832	 * It's very difficult to mess with pages currently under IO
833	 * and in many cases impossible, so we just avoid it here.
834	 */
835	lock_page_nosync(p);
836	wait_on_page_writeback(p);
837
838	/*
839	 * Now take care of user space mappings.
840	 * Abort on fail: __remove_from_page_cache() assumes unmapped page.
841	 */
842	if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
843		printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
844		res = -EBUSY;
845		goto out;
846	}
847
848	/*
849	 * Torn down by someone else?
850	 */
851	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
852		action_result(pfn, "already truncated LRU", IGNORED);
853		res = 0;
854		goto out;
855	}
856
857	res = -EBUSY;
858	for (ps = error_states;; ps++) {
859		if ((p->flags & ps->mask) == ps->res) {
860			res = page_action(ps, p, pfn);
861			break;
862		}
863	}
864out:
865	unlock_page(p);
866	return res;
867}
868EXPORT_SYMBOL_GPL(__memory_failure);
869
870/**
871 * memory_failure - Handle memory failure of a page.
872 * @pfn: Page Number of the corrupted page
873 * @trapno: Trap number reported in the signal to user space.
874 *
875 * This function is called by the low level machine check code
876 * of an architecture when it detects hardware memory corruption
877 * of a page. It tries its best to recover, which includes
878 * dropping pages, killing processes etc.
879 *
880 * The function is primarily of use for corruptions that
881 * happen outside the current execution context (e.g. when
882 * detected by a background scrubber)
883 *
884 * Must run in process context (e.g. a work queue) with interrupts
885 * enabled and no spinlocks hold.
886 */
887void memory_failure(unsigned long pfn, int trapno)
888{
889	__memory_failure(pfn, trapno, 0);
890}
891