memory-failure.c revision a6bc32b899223a877f595ef9ddc1e89ead5072b8
1/* 2 * Copyright (C) 2008, 2009 Intel Corporation 3 * Authors: Andi Kleen, Fengguang Wu 4 * 5 * This software may be redistributed and/or modified under the terms of 6 * the GNU General Public License ("GPL") version 2 only as published by the 7 * Free Software Foundation. 8 * 9 * High level machine check handler. Handles pages reported by the 10 * hardware as being corrupted usually due to a multi-bit ECC memory or cache 11 * failure. 12 * 13 * In addition there is a "soft offline" entry point that allows stop using 14 * not-yet-corrupted-by-suspicious pages without killing anything. 15 * 16 * Handles page cache pages in various states. The tricky part 17 * here is that we can access any page asynchronously in respect to 18 * other VM users, because memory failures could happen anytime and 19 * anywhere. This could violate some of their assumptions. This is why 20 * this code has to be extremely careful. Generally it tries to use 21 * normal locking rules, as in get the standard locks, even if that means 22 * the error handling takes potentially a long time. 23 * 24 * There are several operations here with exponential complexity because 25 * of unsuitable VM data structures. For example the operation to map back 26 * from RMAP chains to processes has to walk the complete process list and 27 * has non linear complexity with the number. But since memory corruptions 28 * are rare we hope to get away with this. This avoids impacting the core 29 * VM. 30 */ 31 32/* 33 * Notebook: 34 * - hugetlb needs more code 35 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages 36 * - pass bad pages to kdump next kernel 37 */ 38#include <linux/kernel.h> 39#include <linux/mm.h> 40#include <linux/page-flags.h> 41#include <linux/kernel-page-flags.h> 42#include <linux/sched.h> 43#include <linux/ksm.h> 44#include <linux/rmap.h> 45#include <linux/export.h> 46#include <linux/pagemap.h> 47#include <linux/swap.h> 48#include <linux/backing-dev.h> 49#include <linux/migrate.h> 50#include <linux/page-isolation.h> 51#include <linux/suspend.h> 52#include <linux/slab.h> 53#include <linux/swapops.h> 54#include <linux/hugetlb.h> 55#include <linux/memory_hotplug.h> 56#include <linux/mm_inline.h> 57#include <linux/kfifo.h> 58#include "internal.h" 59 60int sysctl_memory_failure_early_kill __read_mostly = 0; 61 62int sysctl_memory_failure_recovery __read_mostly = 1; 63 64atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); 65 66#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) 67 68u32 hwpoison_filter_enable = 0; 69u32 hwpoison_filter_dev_major = ~0U; 70u32 hwpoison_filter_dev_minor = ~0U; 71u64 hwpoison_filter_flags_mask; 72u64 hwpoison_filter_flags_value; 73EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 74EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 75EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 76EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 77EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 78 79static int hwpoison_filter_dev(struct page *p) 80{ 81 struct address_space *mapping; 82 dev_t dev; 83 84 if (hwpoison_filter_dev_major == ~0U && 85 hwpoison_filter_dev_minor == ~0U) 86 return 0; 87 88 /* 89 * page_mapping() does not accept slab pages. 90 */ 91 if (PageSlab(p)) 92 return -EINVAL; 93 94 mapping = page_mapping(p); 95 if (mapping == NULL || mapping->host == NULL) 96 return -EINVAL; 97 98 dev = mapping->host->i_sb->s_dev; 99 if (hwpoison_filter_dev_major != ~0U && 100 hwpoison_filter_dev_major != MAJOR(dev)) 101 return -EINVAL; 102 if (hwpoison_filter_dev_minor != ~0U && 103 hwpoison_filter_dev_minor != MINOR(dev)) 104 return -EINVAL; 105 106 return 0; 107} 108 109static int hwpoison_filter_flags(struct page *p) 110{ 111 if (!hwpoison_filter_flags_mask) 112 return 0; 113 114 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 115 hwpoison_filter_flags_value) 116 return 0; 117 else 118 return -EINVAL; 119} 120 121/* 122 * This allows stress tests to limit test scope to a collection of tasks 123 * by putting them under some memcg. This prevents killing unrelated/important 124 * processes such as /sbin/init. Note that the target task may share clean 125 * pages with init (eg. libc text), which is harmless. If the target task 126 * share _dirty_ pages with another task B, the test scheme must make sure B 127 * is also included in the memcg. At last, due to race conditions this filter 128 * can only guarantee that the page either belongs to the memcg tasks, or is 129 * a freed page. 130 */ 131#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 132u64 hwpoison_filter_memcg; 133EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 134static int hwpoison_filter_task(struct page *p) 135{ 136 struct mem_cgroup *mem; 137 struct cgroup_subsys_state *css; 138 unsigned long ino; 139 140 if (!hwpoison_filter_memcg) 141 return 0; 142 143 mem = try_get_mem_cgroup_from_page(p); 144 if (!mem) 145 return -EINVAL; 146 147 css = mem_cgroup_css(mem); 148 /* root_mem_cgroup has NULL dentries */ 149 if (!css->cgroup->dentry) 150 return -EINVAL; 151 152 ino = css->cgroup->dentry->d_inode->i_ino; 153 css_put(css); 154 155 if (ino != hwpoison_filter_memcg) 156 return -EINVAL; 157 158 return 0; 159} 160#else 161static int hwpoison_filter_task(struct page *p) { return 0; } 162#endif 163 164int hwpoison_filter(struct page *p) 165{ 166 if (!hwpoison_filter_enable) 167 return 0; 168 169 if (hwpoison_filter_dev(p)) 170 return -EINVAL; 171 172 if (hwpoison_filter_flags(p)) 173 return -EINVAL; 174 175 if (hwpoison_filter_task(p)) 176 return -EINVAL; 177 178 return 0; 179} 180#else 181int hwpoison_filter(struct page *p) 182{ 183 return 0; 184} 185#endif 186 187EXPORT_SYMBOL_GPL(hwpoison_filter); 188 189/* 190 * Send all the processes who have the page mapped an ``action optional'' 191 * signal. 192 */ 193static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, 194 unsigned long pfn, struct page *page) 195{ 196 struct siginfo si; 197 int ret; 198 199 printk(KERN_ERR 200 "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n", 201 pfn, t->comm, t->pid); 202 si.si_signo = SIGBUS; 203 si.si_errno = 0; 204 si.si_code = BUS_MCEERR_AO; 205 si.si_addr = (void *)addr; 206#ifdef __ARCH_SI_TRAPNO 207 si.si_trapno = trapno; 208#endif 209 si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT; 210 /* 211 * Don't use force here, it's convenient if the signal 212 * can be temporarily blocked. 213 * This could cause a loop when the user sets SIGBUS 214 * to SIG_IGN, but hopefully no one will do that? 215 */ 216 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ 217 if (ret < 0) 218 printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", 219 t->comm, t->pid, ret); 220 return ret; 221} 222 223/* 224 * When a unknown page type is encountered drain as many buffers as possible 225 * in the hope to turn the page into a LRU or free page, which we can handle. 226 */ 227void shake_page(struct page *p, int access) 228{ 229 if (!PageSlab(p)) { 230 lru_add_drain_all(); 231 if (PageLRU(p)) 232 return; 233 drain_all_pages(); 234 if (PageLRU(p) || is_free_buddy_page(p)) 235 return; 236 } 237 238 /* 239 * Only call shrink_slab here (which would also shrink other caches) if 240 * access is not potentially fatal. 241 */ 242 if (access) { 243 int nr; 244 do { 245 struct shrink_control shrink = { 246 .gfp_mask = GFP_KERNEL, 247 }; 248 249 nr = shrink_slab(&shrink, 1000, 1000); 250 if (page_count(p) == 1) 251 break; 252 } while (nr > 10); 253 } 254} 255EXPORT_SYMBOL_GPL(shake_page); 256 257/* 258 * Kill all processes that have a poisoned page mapped and then isolate 259 * the page. 260 * 261 * General strategy: 262 * Find all processes having the page mapped and kill them. 263 * But we keep a page reference around so that the page is not 264 * actually freed yet. 265 * Then stash the page away 266 * 267 * There's no convenient way to get back to mapped processes 268 * from the VMAs. So do a brute-force search over all 269 * running processes. 270 * 271 * Remember that machine checks are not common (or rather 272 * if they are common you have other problems), so this shouldn't 273 * be a performance issue. 274 * 275 * Also there are some races possible while we get from the 276 * error detection to actually handle it. 277 */ 278 279struct to_kill { 280 struct list_head nd; 281 struct task_struct *tsk; 282 unsigned long addr; 283 char addr_valid; 284}; 285 286/* 287 * Failure handling: if we can't find or can't kill a process there's 288 * not much we can do. We just print a message and ignore otherwise. 289 */ 290 291/* 292 * Schedule a process for later kill. 293 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 294 * TBD would GFP_NOIO be enough? 295 */ 296static void add_to_kill(struct task_struct *tsk, struct page *p, 297 struct vm_area_struct *vma, 298 struct list_head *to_kill, 299 struct to_kill **tkc) 300{ 301 struct to_kill *tk; 302 303 if (*tkc) { 304 tk = *tkc; 305 *tkc = NULL; 306 } else { 307 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 308 if (!tk) { 309 printk(KERN_ERR 310 "MCE: Out of memory while machine check handling\n"); 311 return; 312 } 313 } 314 tk->addr = page_address_in_vma(p, vma); 315 tk->addr_valid = 1; 316 317 /* 318 * In theory we don't have to kill when the page was 319 * munmaped. But it could be also a mremap. Since that's 320 * likely very rare kill anyways just out of paranoia, but use 321 * a SIGKILL because the error is not contained anymore. 322 */ 323 if (tk->addr == -EFAULT) { 324 pr_info("MCE: Unable to find user space address %lx in %s\n", 325 page_to_pfn(p), tsk->comm); 326 tk->addr_valid = 0; 327 } 328 get_task_struct(tsk); 329 tk->tsk = tsk; 330 list_add_tail(&tk->nd, to_kill); 331} 332 333/* 334 * Kill the processes that have been collected earlier. 335 * 336 * Only do anything when DOIT is set, otherwise just free the list 337 * (this is used for clean pages which do not need killing) 338 * Also when FAIL is set do a force kill because something went 339 * wrong earlier. 340 */ 341static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, 342 int fail, struct page *page, unsigned long pfn) 343{ 344 struct to_kill *tk, *next; 345 346 list_for_each_entry_safe (tk, next, to_kill, nd) { 347 if (doit) { 348 /* 349 * In case something went wrong with munmapping 350 * make sure the process doesn't catch the 351 * signal and then access the memory. Just kill it. 352 */ 353 if (fail || tk->addr_valid == 0) { 354 printk(KERN_ERR 355 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 356 pfn, tk->tsk->comm, tk->tsk->pid); 357 force_sig(SIGKILL, tk->tsk); 358 } 359 360 /* 361 * In theory the process could have mapped 362 * something else on the address in-between. We could 363 * check for that, but we need to tell the 364 * process anyways. 365 */ 366 else if (kill_proc_ao(tk->tsk, tk->addr, trapno, 367 pfn, page) < 0) 368 printk(KERN_ERR 369 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", 370 pfn, tk->tsk->comm, tk->tsk->pid); 371 } 372 put_task_struct(tk->tsk); 373 kfree(tk); 374 } 375} 376 377static int task_early_kill(struct task_struct *tsk) 378{ 379 if (!tsk->mm) 380 return 0; 381 if (tsk->flags & PF_MCE_PROCESS) 382 return !!(tsk->flags & PF_MCE_EARLY); 383 return sysctl_memory_failure_early_kill; 384} 385 386/* 387 * Collect processes when the error hit an anonymous page. 388 */ 389static void collect_procs_anon(struct page *page, struct list_head *to_kill, 390 struct to_kill **tkc) 391{ 392 struct vm_area_struct *vma; 393 struct task_struct *tsk; 394 struct anon_vma *av; 395 396 av = page_lock_anon_vma(page); 397 if (av == NULL) /* Not actually mapped anymore */ 398 return; 399 400 read_lock(&tasklist_lock); 401 for_each_process (tsk) { 402 struct anon_vma_chain *vmac; 403 404 if (!task_early_kill(tsk)) 405 continue; 406 list_for_each_entry(vmac, &av->head, same_anon_vma) { 407 vma = vmac->vma; 408 if (!page_mapped_in_vma(page, vma)) 409 continue; 410 if (vma->vm_mm == tsk->mm) 411 add_to_kill(tsk, page, vma, to_kill, tkc); 412 } 413 } 414 read_unlock(&tasklist_lock); 415 page_unlock_anon_vma(av); 416} 417 418/* 419 * Collect processes when the error hit a file mapped page. 420 */ 421static void collect_procs_file(struct page *page, struct list_head *to_kill, 422 struct to_kill **tkc) 423{ 424 struct vm_area_struct *vma; 425 struct task_struct *tsk; 426 struct prio_tree_iter iter; 427 struct address_space *mapping = page->mapping; 428 429 mutex_lock(&mapping->i_mmap_mutex); 430 read_lock(&tasklist_lock); 431 for_each_process(tsk) { 432 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 433 434 if (!task_early_kill(tsk)) 435 continue; 436 437 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, 438 pgoff) { 439 /* 440 * Send early kill signal to tasks where a vma covers 441 * the page but the corrupted page is not necessarily 442 * mapped it in its pte. 443 * Assume applications who requested early kill want 444 * to be informed of all such data corruptions. 445 */ 446 if (vma->vm_mm == tsk->mm) 447 add_to_kill(tsk, page, vma, to_kill, tkc); 448 } 449 } 450 read_unlock(&tasklist_lock); 451 mutex_unlock(&mapping->i_mmap_mutex); 452} 453 454/* 455 * Collect the processes who have the corrupted page mapped to kill. 456 * This is done in two steps for locking reasons. 457 * First preallocate one tokill structure outside the spin locks, 458 * so that we can kill at least one process reasonably reliable. 459 */ 460static void collect_procs(struct page *page, struct list_head *tokill) 461{ 462 struct to_kill *tk; 463 464 if (!page->mapping) 465 return; 466 467 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO); 468 if (!tk) 469 return; 470 if (PageAnon(page)) 471 collect_procs_anon(page, tokill, &tk); 472 else 473 collect_procs_file(page, tokill, &tk); 474 kfree(tk); 475} 476 477/* 478 * Error handlers for various types of pages. 479 */ 480 481enum outcome { 482 IGNORED, /* Error: cannot be handled */ 483 FAILED, /* Error: handling failed */ 484 DELAYED, /* Will be handled later */ 485 RECOVERED, /* Successfully recovered */ 486}; 487 488static const char *action_name[] = { 489 [IGNORED] = "Ignored", 490 [FAILED] = "Failed", 491 [DELAYED] = "Delayed", 492 [RECOVERED] = "Recovered", 493}; 494 495/* 496 * XXX: It is possible that a page is isolated from LRU cache, 497 * and then kept in swap cache or failed to remove from page cache. 498 * The page count will stop it from being freed by unpoison. 499 * Stress tests should be aware of this memory leak problem. 500 */ 501static int delete_from_lru_cache(struct page *p) 502{ 503 if (!isolate_lru_page(p)) { 504 /* 505 * Clear sensible page flags, so that the buddy system won't 506 * complain when the page is unpoison-and-freed. 507 */ 508 ClearPageActive(p); 509 ClearPageUnevictable(p); 510 /* 511 * drop the page count elevated by isolate_lru_page() 512 */ 513 page_cache_release(p); 514 return 0; 515 } 516 return -EIO; 517} 518 519/* 520 * Error hit kernel page. 521 * Do nothing, try to be lucky and not touch this instead. For a few cases we 522 * could be more sophisticated. 523 */ 524static int me_kernel(struct page *p, unsigned long pfn) 525{ 526 return IGNORED; 527} 528 529/* 530 * Page in unknown state. Do nothing. 531 */ 532static int me_unknown(struct page *p, unsigned long pfn) 533{ 534 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); 535 return FAILED; 536} 537 538/* 539 * Clean (or cleaned) page cache page. 540 */ 541static int me_pagecache_clean(struct page *p, unsigned long pfn) 542{ 543 int err; 544 int ret = FAILED; 545 struct address_space *mapping; 546 547 delete_from_lru_cache(p); 548 549 /* 550 * For anonymous pages we're done the only reference left 551 * should be the one m_f() holds. 552 */ 553 if (PageAnon(p)) 554 return RECOVERED; 555 556 /* 557 * Now truncate the page in the page cache. This is really 558 * more like a "temporary hole punch" 559 * Don't do this for block devices when someone else 560 * has a reference, because it could be file system metadata 561 * and that's not safe to truncate. 562 */ 563 mapping = page_mapping(p); 564 if (!mapping) { 565 /* 566 * Page has been teared down in the meanwhile 567 */ 568 return FAILED; 569 } 570 571 /* 572 * Truncation is a bit tricky. Enable it per file system for now. 573 * 574 * Open: to take i_mutex or not for this? Right now we don't. 575 */ 576 if (mapping->a_ops->error_remove_page) { 577 err = mapping->a_ops->error_remove_page(mapping, p); 578 if (err != 0) { 579 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", 580 pfn, err); 581 } else if (page_has_private(p) && 582 !try_to_release_page(p, GFP_NOIO)) { 583 pr_info("MCE %#lx: failed to release buffers\n", pfn); 584 } else { 585 ret = RECOVERED; 586 } 587 } else { 588 /* 589 * If the file system doesn't support it just invalidate 590 * This fails on dirty or anything with private pages 591 */ 592 if (invalidate_inode_page(p)) 593 ret = RECOVERED; 594 else 595 printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", 596 pfn); 597 } 598 return ret; 599} 600 601/* 602 * Dirty cache page page 603 * Issues: when the error hit a hole page the error is not properly 604 * propagated. 605 */ 606static int me_pagecache_dirty(struct page *p, unsigned long pfn) 607{ 608 struct address_space *mapping = page_mapping(p); 609 610 SetPageError(p); 611 /* TBD: print more information about the file. */ 612 if (mapping) { 613 /* 614 * IO error will be reported by write(), fsync(), etc. 615 * who check the mapping. 616 * This way the application knows that something went 617 * wrong with its dirty file data. 618 * 619 * There's one open issue: 620 * 621 * The EIO will be only reported on the next IO 622 * operation and then cleared through the IO map. 623 * Normally Linux has two mechanisms to pass IO error 624 * first through the AS_EIO flag in the address space 625 * and then through the PageError flag in the page. 626 * Since we drop pages on memory failure handling the 627 * only mechanism open to use is through AS_AIO. 628 * 629 * This has the disadvantage that it gets cleared on 630 * the first operation that returns an error, while 631 * the PageError bit is more sticky and only cleared 632 * when the page is reread or dropped. If an 633 * application assumes it will always get error on 634 * fsync, but does other operations on the fd before 635 * and the page is dropped between then the error 636 * will not be properly reported. 637 * 638 * This can already happen even without hwpoisoned 639 * pages: first on metadata IO errors (which only 640 * report through AS_EIO) or when the page is dropped 641 * at the wrong time. 642 * 643 * So right now we assume that the application DTRT on 644 * the first EIO, but we're not worse than other parts 645 * of the kernel. 646 */ 647 mapping_set_error(mapping, EIO); 648 } 649 650 return me_pagecache_clean(p, pfn); 651} 652 653/* 654 * Clean and dirty swap cache. 655 * 656 * Dirty swap cache page is tricky to handle. The page could live both in page 657 * cache and swap cache(ie. page is freshly swapped in). So it could be 658 * referenced concurrently by 2 types of PTEs: 659 * normal PTEs and swap PTEs. We try to handle them consistently by calling 660 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 661 * and then 662 * - clear dirty bit to prevent IO 663 * - remove from LRU 664 * - but keep in the swap cache, so that when we return to it on 665 * a later page fault, we know the application is accessing 666 * corrupted data and shall be killed (we installed simple 667 * interception code in do_swap_page to catch it). 668 * 669 * Clean swap cache pages can be directly isolated. A later page fault will 670 * bring in the known good data from disk. 671 */ 672static int me_swapcache_dirty(struct page *p, unsigned long pfn) 673{ 674 ClearPageDirty(p); 675 /* Trigger EIO in shmem: */ 676 ClearPageUptodate(p); 677 678 if (!delete_from_lru_cache(p)) 679 return DELAYED; 680 else 681 return FAILED; 682} 683 684static int me_swapcache_clean(struct page *p, unsigned long pfn) 685{ 686 delete_from_swap_cache(p); 687 688 if (!delete_from_lru_cache(p)) 689 return RECOVERED; 690 else 691 return FAILED; 692} 693 694/* 695 * Huge pages. Needs work. 696 * Issues: 697 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 698 * To narrow down kill region to one page, we need to break up pmd. 699 */ 700static int me_huge_page(struct page *p, unsigned long pfn) 701{ 702 int res = 0; 703 struct page *hpage = compound_head(p); 704 /* 705 * We can safely recover from error on free or reserved (i.e. 706 * not in-use) hugepage by dequeuing it from freelist. 707 * To check whether a hugepage is in-use or not, we can't use 708 * page->lru because it can be used in other hugepage operations, 709 * such as __unmap_hugepage_range() and gather_surplus_pages(). 710 * So instead we use page_mapping() and PageAnon(). 711 * We assume that this function is called with page lock held, 712 * so there is no race between isolation and mapping/unmapping. 713 */ 714 if (!(page_mapping(hpage) || PageAnon(hpage))) { 715 res = dequeue_hwpoisoned_huge_page(hpage); 716 if (!res) 717 return RECOVERED; 718 } 719 return DELAYED; 720} 721 722/* 723 * Various page states we can handle. 724 * 725 * A page state is defined by its current page->flags bits. 726 * The table matches them in order and calls the right handler. 727 * 728 * This is quite tricky because we can access page at any time 729 * in its live cycle, so all accesses have to be extremely careful. 730 * 731 * This is not complete. More states could be added. 732 * For any missing state don't attempt recovery. 733 */ 734 735#define dirty (1UL << PG_dirty) 736#define sc (1UL << PG_swapcache) 737#define unevict (1UL << PG_unevictable) 738#define mlock (1UL << PG_mlocked) 739#define writeback (1UL << PG_writeback) 740#define lru (1UL << PG_lru) 741#define swapbacked (1UL << PG_swapbacked) 742#define head (1UL << PG_head) 743#define tail (1UL << PG_tail) 744#define compound (1UL << PG_compound) 745#define slab (1UL << PG_slab) 746#define reserved (1UL << PG_reserved) 747 748static struct page_state { 749 unsigned long mask; 750 unsigned long res; 751 char *msg; 752 int (*action)(struct page *p, unsigned long pfn); 753} error_states[] = { 754 { reserved, reserved, "reserved kernel", me_kernel }, 755 /* 756 * free pages are specially detected outside this table: 757 * PG_buddy pages only make a small fraction of all free pages. 758 */ 759 760 /* 761 * Could in theory check if slab page is free or if we can drop 762 * currently unused objects without touching them. But just 763 * treat it as standard kernel for now. 764 */ 765 { slab, slab, "kernel slab", me_kernel }, 766 767#ifdef CONFIG_PAGEFLAGS_EXTENDED 768 { head, head, "huge", me_huge_page }, 769 { tail, tail, "huge", me_huge_page }, 770#else 771 { compound, compound, "huge", me_huge_page }, 772#endif 773 774 { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty }, 775 { sc|dirty, sc, "swapcache", me_swapcache_clean }, 776 777 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, 778 { unevict, unevict, "unevictable LRU", me_pagecache_clean}, 779 780 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, 781 { mlock, mlock, "mlocked LRU", me_pagecache_clean }, 782 783 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, 784 { lru|dirty, lru, "clean LRU", me_pagecache_clean }, 785 786 /* 787 * Catchall entry: must be at end. 788 */ 789 { 0, 0, "unknown page state", me_unknown }, 790}; 791 792#undef dirty 793#undef sc 794#undef unevict 795#undef mlock 796#undef writeback 797#undef lru 798#undef swapbacked 799#undef head 800#undef tail 801#undef compound 802#undef slab 803#undef reserved 804 805static void action_result(unsigned long pfn, char *msg, int result) 806{ 807 struct page *page = pfn_to_page(pfn); 808 809 printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n", 810 pfn, 811 PageDirty(page) ? "dirty " : "", 812 msg, action_name[result]); 813} 814 815static int page_action(struct page_state *ps, struct page *p, 816 unsigned long pfn) 817{ 818 int result; 819 int count; 820 821 result = ps->action(p, pfn); 822 action_result(pfn, ps->msg, result); 823 824 count = page_count(p) - 1; 825 if (ps->action == me_swapcache_dirty && result == DELAYED) 826 count--; 827 if (count != 0) { 828 printk(KERN_ERR 829 "MCE %#lx: %s page still referenced by %d users\n", 830 pfn, ps->msg, count); 831 result = FAILED; 832 } 833 834 /* Could do more checks here if page looks ok */ 835 /* 836 * Could adjust zone counters here to correct for the missing page. 837 */ 838 839 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; 840} 841 842/* 843 * Do all that is necessary to remove user space mappings. Unmap 844 * the pages and send SIGBUS to the processes if the data was dirty. 845 */ 846static int hwpoison_user_mappings(struct page *p, unsigned long pfn, 847 int trapno) 848{ 849 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 850 struct address_space *mapping; 851 LIST_HEAD(tokill); 852 int ret; 853 int kill = 1; 854 struct page *hpage = compound_head(p); 855 struct page *ppage; 856 857 if (PageReserved(p) || PageSlab(p)) 858 return SWAP_SUCCESS; 859 860 /* 861 * This check implies we don't kill processes if their pages 862 * are in the swap cache early. Those are always late kills. 863 */ 864 if (!page_mapped(hpage)) 865 return SWAP_SUCCESS; 866 867 if (PageKsm(p)) 868 return SWAP_FAIL; 869 870 if (PageSwapCache(p)) { 871 printk(KERN_ERR 872 "MCE %#lx: keeping poisoned page in swap cache\n", pfn); 873 ttu |= TTU_IGNORE_HWPOISON; 874 } 875 876 /* 877 * Propagate the dirty bit from PTEs to struct page first, because we 878 * need this to decide if we should kill or just drop the page. 879 * XXX: the dirty test could be racy: set_page_dirty() may not always 880 * be called inside page lock (it's recommended but not enforced). 881 */ 882 mapping = page_mapping(hpage); 883 if (!PageDirty(hpage) && mapping && 884 mapping_cap_writeback_dirty(mapping)) { 885 if (page_mkclean(hpage)) { 886 SetPageDirty(hpage); 887 } else { 888 kill = 0; 889 ttu |= TTU_IGNORE_HWPOISON; 890 printk(KERN_INFO 891 "MCE %#lx: corrupted page was clean: dropped without side effects\n", 892 pfn); 893 } 894 } 895 896 /* 897 * ppage: poisoned page 898 * if p is regular page(4k page) 899 * ppage == real poisoned page; 900 * else p is hugetlb or THP, ppage == head page. 901 */ 902 ppage = hpage; 903 904 if (PageTransHuge(hpage)) { 905 /* 906 * Verify that this isn't a hugetlbfs head page, the check for 907 * PageAnon is just for avoid tripping a split_huge_page 908 * internal debug check, as split_huge_page refuses to deal with 909 * anything that isn't an anon page. PageAnon can't go away fro 910 * under us because we hold a refcount on the hpage, without a 911 * refcount on the hpage. split_huge_page can't be safely called 912 * in the first place, having a refcount on the tail isn't 913 * enough * to be safe. 914 */ 915 if (!PageHuge(hpage) && PageAnon(hpage)) { 916 if (unlikely(split_huge_page(hpage))) { 917 /* 918 * FIXME: if splitting THP is failed, it is 919 * better to stop the following operation rather 920 * than causing panic by unmapping. System might 921 * survive if the page is freed later. 922 */ 923 printk(KERN_INFO 924 "MCE %#lx: failed to split THP\n", pfn); 925 926 BUG_ON(!PageHWPoison(p)); 927 return SWAP_FAIL; 928 } 929 /* THP is split, so ppage should be the real poisoned page. */ 930 ppage = p; 931 } 932 } 933 934 /* 935 * First collect all the processes that have the page 936 * mapped in dirty form. This has to be done before try_to_unmap, 937 * because ttu takes the rmap data structures down. 938 * 939 * Error handling: We ignore errors here because 940 * there's nothing that can be done. 941 */ 942 if (kill) 943 collect_procs(ppage, &tokill); 944 945 if (hpage != ppage) 946 lock_page(ppage); 947 948 ret = try_to_unmap(ppage, ttu); 949 if (ret != SWAP_SUCCESS) 950 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", 951 pfn, page_mapcount(ppage)); 952 953 if (hpage != ppage) 954 unlock_page(ppage); 955 956 /* 957 * Now that the dirty bit has been propagated to the 958 * struct page and all unmaps done we can decide if 959 * killing is needed or not. Only kill when the page 960 * was dirty, otherwise the tokill list is merely 961 * freed. When there was a problem unmapping earlier 962 * use a more force-full uncatchable kill to prevent 963 * any accesses to the poisoned memory. 964 */ 965 kill_procs_ao(&tokill, !!PageDirty(ppage), trapno, 966 ret != SWAP_SUCCESS, p, pfn); 967 968 return ret; 969} 970 971static void set_page_hwpoison_huge_page(struct page *hpage) 972{ 973 int i; 974 int nr_pages = 1 << compound_trans_order(hpage); 975 for (i = 0; i < nr_pages; i++) 976 SetPageHWPoison(hpage + i); 977} 978 979static void clear_page_hwpoison_huge_page(struct page *hpage) 980{ 981 int i; 982 int nr_pages = 1 << compound_trans_order(hpage); 983 for (i = 0; i < nr_pages; i++) 984 ClearPageHWPoison(hpage + i); 985} 986 987int __memory_failure(unsigned long pfn, int trapno, int flags) 988{ 989 struct page_state *ps; 990 struct page *p; 991 struct page *hpage; 992 int res; 993 unsigned int nr_pages; 994 995 if (!sysctl_memory_failure_recovery) 996 panic("Memory failure from trap %d on page %lx", trapno, pfn); 997 998 if (!pfn_valid(pfn)) { 999 printk(KERN_ERR 1000 "MCE %#lx: memory outside kernel control\n", 1001 pfn); 1002 return -ENXIO; 1003 } 1004 1005 p = pfn_to_page(pfn); 1006 hpage = compound_head(p); 1007 if (TestSetPageHWPoison(p)) { 1008 printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); 1009 return 0; 1010 } 1011 1012 nr_pages = 1 << compound_trans_order(hpage); 1013 atomic_long_add(nr_pages, &mce_bad_pages); 1014 1015 /* 1016 * We need/can do nothing about count=0 pages. 1017 * 1) it's a free page, and therefore in safe hand: 1018 * prep_new_page() will be the gate keeper. 1019 * 2) it's a free hugepage, which is also safe: 1020 * an affected hugepage will be dequeued from hugepage freelist, 1021 * so there's no concern about reusing it ever after. 1022 * 3) it's part of a non-compound high order page. 1023 * Implies some kernel user: cannot stop them from 1024 * R/W the page; let's pray that the page has been 1025 * used and will be freed some time later. 1026 * In fact it's dangerous to directly bump up page count from 0, 1027 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. 1028 */ 1029 if (!(flags & MF_COUNT_INCREASED) && 1030 !get_page_unless_zero(hpage)) { 1031 if (is_free_buddy_page(p)) { 1032 action_result(pfn, "free buddy", DELAYED); 1033 return 0; 1034 } else if (PageHuge(hpage)) { 1035 /* 1036 * Check "just unpoisoned", "filter hit", and 1037 * "race with other subpage." 1038 */ 1039 lock_page(hpage); 1040 if (!PageHWPoison(hpage) 1041 || (hwpoison_filter(p) && TestClearPageHWPoison(p)) 1042 || (p != hpage && TestSetPageHWPoison(hpage))) { 1043 atomic_long_sub(nr_pages, &mce_bad_pages); 1044 return 0; 1045 } 1046 set_page_hwpoison_huge_page(hpage); 1047 res = dequeue_hwpoisoned_huge_page(hpage); 1048 action_result(pfn, "free huge", 1049 res ? IGNORED : DELAYED); 1050 unlock_page(hpage); 1051 return res; 1052 } else { 1053 action_result(pfn, "high order kernel", IGNORED); 1054 return -EBUSY; 1055 } 1056 } 1057 1058 /* 1059 * We ignore non-LRU pages for good reasons. 1060 * - PG_locked is only well defined for LRU pages and a few others 1061 * - to avoid races with __set_page_locked() 1062 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 1063 * The check (unnecessarily) ignores LRU pages being isolated and 1064 * walked by the page reclaim code, however that's not a big loss. 1065 */ 1066 if (!PageHuge(p) && !PageTransCompound(p)) { 1067 if (!PageLRU(p)) 1068 shake_page(p, 0); 1069 if (!PageLRU(p)) { 1070 /* 1071 * shake_page could have turned it free. 1072 */ 1073 if (is_free_buddy_page(p)) { 1074 action_result(pfn, "free buddy, 2nd try", 1075 DELAYED); 1076 return 0; 1077 } 1078 action_result(pfn, "non LRU", IGNORED); 1079 put_page(p); 1080 return -EBUSY; 1081 } 1082 } 1083 1084 /* 1085 * Lock the page and wait for writeback to finish. 1086 * It's very difficult to mess with pages currently under IO 1087 * and in many cases impossible, so we just avoid it here. 1088 */ 1089 lock_page(hpage); 1090 1091 /* 1092 * unpoison always clear PG_hwpoison inside page lock 1093 */ 1094 if (!PageHWPoison(p)) { 1095 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1096 res = 0; 1097 goto out; 1098 } 1099 if (hwpoison_filter(p)) { 1100 if (TestClearPageHWPoison(p)) 1101 atomic_long_sub(nr_pages, &mce_bad_pages); 1102 unlock_page(hpage); 1103 put_page(hpage); 1104 return 0; 1105 } 1106 1107 /* 1108 * For error on the tail page, we should set PG_hwpoison 1109 * on the head page to show that the hugepage is hwpoisoned 1110 */ 1111 if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) { 1112 action_result(pfn, "hugepage already hardware poisoned", 1113 IGNORED); 1114 unlock_page(hpage); 1115 put_page(hpage); 1116 return 0; 1117 } 1118 /* 1119 * Set PG_hwpoison on all pages in an error hugepage, 1120 * because containment is done in hugepage unit for now. 1121 * Since we have done TestSetPageHWPoison() for the head page with 1122 * page lock held, we can safely set PG_hwpoison bits on tail pages. 1123 */ 1124 if (PageHuge(p)) 1125 set_page_hwpoison_huge_page(hpage); 1126 1127 wait_on_page_writeback(p); 1128 1129 /* 1130 * Now take care of user space mappings. 1131 * Abort on fail: __delete_from_page_cache() assumes unmapped page. 1132 */ 1133 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) { 1134 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1135 res = -EBUSY; 1136 goto out; 1137 } 1138 1139 /* 1140 * Torn down by someone else? 1141 */ 1142 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 1143 action_result(pfn, "already truncated LRU", IGNORED); 1144 res = -EBUSY; 1145 goto out; 1146 } 1147 1148 res = -EBUSY; 1149 for (ps = error_states;; ps++) { 1150 if ((p->flags & ps->mask) == ps->res) { 1151 res = page_action(ps, p, pfn); 1152 break; 1153 } 1154 } 1155out: 1156 unlock_page(hpage); 1157 return res; 1158} 1159EXPORT_SYMBOL_GPL(__memory_failure); 1160 1161/** 1162 * memory_failure - Handle memory failure of a page. 1163 * @pfn: Page Number of the corrupted page 1164 * @trapno: Trap number reported in the signal to user space. 1165 * 1166 * This function is called by the low level machine check code 1167 * of an architecture when it detects hardware memory corruption 1168 * of a page. It tries its best to recover, which includes 1169 * dropping pages, killing processes etc. 1170 * 1171 * The function is primarily of use for corruptions that 1172 * happen outside the current execution context (e.g. when 1173 * detected by a background scrubber) 1174 * 1175 * Must run in process context (e.g. a work queue) with interrupts 1176 * enabled and no spinlocks hold. 1177 */ 1178void memory_failure(unsigned long pfn, int trapno) 1179{ 1180 __memory_failure(pfn, trapno, 0); 1181} 1182 1183#define MEMORY_FAILURE_FIFO_ORDER 4 1184#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) 1185 1186struct memory_failure_entry { 1187 unsigned long pfn; 1188 int trapno; 1189 int flags; 1190}; 1191 1192struct memory_failure_cpu { 1193 DECLARE_KFIFO(fifo, struct memory_failure_entry, 1194 MEMORY_FAILURE_FIFO_SIZE); 1195 spinlock_t lock; 1196 struct work_struct work; 1197}; 1198 1199static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); 1200 1201/** 1202 * memory_failure_queue - Schedule handling memory failure of a page. 1203 * @pfn: Page Number of the corrupted page 1204 * @trapno: Trap number reported in the signal to user space. 1205 * @flags: Flags for memory failure handling 1206 * 1207 * This function is called by the low level hardware error handler 1208 * when it detects hardware memory corruption of a page. It schedules 1209 * the recovering of error page, including dropping pages, killing 1210 * processes etc. 1211 * 1212 * The function is primarily of use for corruptions that 1213 * happen outside the current execution context (e.g. when 1214 * detected by a background scrubber) 1215 * 1216 * Can run in IRQ context. 1217 */ 1218void memory_failure_queue(unsigned long pfn, int trapno, int flags) 1219{ 1220 struct memory_failure_cpu *mf_cpu; 1221 unsigned long proc_flags; 1222 struct memory_failure_entry entry = { 1223 .pfn = pfn, 1224 .trapno = trapno, 1225 .flags = flags, 1226 }; 1227 1228 mf_cpu = &get_cpu_var(memory_failure_cpu); 1229 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 1230 if (kfifo_put(&mf_cpu->fifo, &entry)) 1231 schedule_work_on(smp_processor_id(), &mf_cpu->work); 1232 else 1233 pr_err("Memory failure: buffer overflow when queuing memory failure at 0x%#lx\n", 1234 pfn); 1235 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 1236 put_cpu_var(memory_failure_cpu); 1237} 1238EXPORT_SYMBOL_GPL(memory_failure_queue); 1239 1240static void memory_failure_work_func(struct work_struct *work) 1241{ 1242 struct memory_failure_cpu *mf_cpu; 1243 struct memory_failure_entry entry = { 0, }; 1244 unsigned long proc_flags; 1245 int gotten; 1246 1247 mf_cpu = &__get_cpu_var(memory_failure_cpu); 1248 for (;;) { 1249 spin_lock_irqsave(&mf_cpu->lock, proc_flags); 1250 gotten = kfifo_get(&mf_cpu->fifo, &entry); 1251 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); 1252 if (!gotten) 1253 break; 1254 __memory_failure(entry.pfn, entry.trapno, entry.flags); 1255 } 1256} 1257 1258static int __init memory_failure_init(void) 1259{ 1260 struct memory_failure_cpu *mf_cpu; 1261 int cpu; 1262 1263 for_each_possible_cpu(cpu) { 1264 mf_cpu = &per_cpu(memory_failure_cpu, cpu); 1265 spin_lock_init(&mf_cpu->lock); 1266 INIT_KFIFO(mf_cpu->fifo); 1267 INIT_WORK(&mf_cpu->work, memory_failure_work_func); 1268 } 1269 1270 return 0; 1271} 1272core_initcall(memory_failure_init); 1273 1274/** 1275 * unpoison_memory - Unpoison a previously poisoned page 1276 * @pfn: Page number of the to be unpoisoned page 1277 * 1278 * Software-unpoison a page that has been poisoned by 1279 * memory_failure() earlier. 1280 * 1281 * This is only done on the software-level, so it only works 1282 * for linux injected failures, not real hardware failures 1283 * 1284 * Returns 0 for success, otherwise -errno. 1285 */ 1286int unpoison_memory(unsigned long pfn) 1287{ 1288 struct page *page; 1289 struct page *p; 1290 int freeit = 0; 1291 unsigned int nr_pages; 1292 1293 if (!pfn_valid(pfn)) 1294 return -ENXIO; 1295 1296 p = pfn_to_page(pfn); 1297 page = compound_head(p); 1298 1299 if (!PageHWPoison(p)) { 1300 pr_info("MCE: Page was already unpoisoned %#lx\n", pfn); 1301 return 0; 1302 } 1303 1304 nr_pages = 1 << compound_trans_order(page); 1305 1306 if (!get_page_unless_zero(page)) { 1307 /* 1308 * Since HWPoisoned hugepage should have non-zero refcount, 1309 * race between memory failure and unpoison seems to happen. 1310 * In such case unpoison fails and memory failure runs 1311 * to the end. 1312 */ 1313 if (PageHuge(page)) { 1314 pr_info("MCE: Memory failure is now running on free hugepage %#lx\n", pfn); 1315 return 0; 1316 } 1317 if (TestClearPageHWPoison(p)) 1318 atomic_long_sub(nr_pages, &mce_bad_pages); 1319 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); 1320 return 0; 1321 } 1322 1323 lock_page(page); 1324 /* 1325 * This test is racy because PG_hwpoison is set outside of page lock. 1326 * That's acceptable because that won't trigger kernel panic. Instead, 1327 * the PG_hwpoison page will be caught and isolated on the entrance to 1328 * the free buddy page pool. 1329 */ 1330 if (TestClearPageHWPoison(page)) { 1331 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); 1332 atomic_long_sub(nr_pages, &mce_bad_pages); 1333 freeit = 1; 1334 if (PageHuge(page)) 1335 clear_page_hwpoison_huge_page(page); 1336 } 1337 unlock_page(page); 1338 1339 put_page(page); 1340 if (freeit) 1341 put_page(page); 1342 1343 return 0; 1344} 1345EXPORT_SYMBOL(unpoison_memory); 1346 1347static struct page *new_page(struct page *p, unsigned long private, int **x) 1348{ 1349 int nid = page_to_nid(p); 1350 if (PageHuge(p)) 1351 return alloc_huge_page_node(page_hstate(compound_head(p)), 1352 nid); 1353 else 1354 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); 1355} 1356 1357/* 1358 * Safely get reference count of an arbitrary page. 1359 * Returns 0 for a free page, -EIO for a zero refcount page 1360 * that is not free, and 1 for any other page type. 1361 * For 1 the page is returned with increased page count, otherwise not. 1362 */ 1363static int get_any_page(struct page *p, unsigned long pfn, int flags) 1364{ 1365 int ret; 1366 1367 if (flags & MF_COUNT_INCREASED) 1368 return 1; 1369 1370 /* 1371 * The lock_memory_hotplug prevents a race with memory hotplug. 1372 * This is a big hammer, a better would be nicer. 1373 */ 1374 lock_memory_hotplug(); 1375 1376 /* 1377 * Isolate the page, so that it doesn't get reallocated if it 1378 * was free. 1379 */ 1380 set_migratetype_isolate(p); 1381 /* 1382 * When the target page is a free hugepage, just remove it 1383 * from free hugepage list. 1384 */ 1385 if (!get_page_unless_zero(compound_head(p))) { 1386 if (PageHuge(p)) { 1387 pr_info("get_any_page: %#lx free huge page\n", pfn); 1388 ret = dequeue_hwpoisoned_huge_page(compound_head(p)); 1389 } else if (is_free_buddy_page(p)) { 1390 pr_info("get_any_page: %#lx free buddy page\n", pfn); 1391 /* Set hwpoison bit while page is still isolated */ 1392 SetPageHWPoison(p); 1393 ret = 0; 1394 } else { 1395 pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n", 1396 pfn, p->flags); 1397 ret = -EIO; 1398 } 1399 } else { 1400 /* Not a free page */ 1401 ret = 1; 1402 } 1403 unset_migratetype_isolate(p); 1404 unlock_memory_hotplug(); 1405 return ret; 1406} 1407 1408static int soft_offline_huge_page(struct page *page, int flags) 1409{ 1410 int ret; 1411 unsigned long pfn = page_to_pfn(page); 1412 struct page *hpage = compound_head(page); 1413 LIST_HEAD(pagelist); 1414 1415 ret = get_any_page(page, pfn, flags); 1416 if (ret < 0) 1417 return ret; 1418 if (ret == 0) 1419 goto done; 1420 1421 if (PageHWPoison(hpage)) { 1422 put_page(hpage); 1423 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn); 1424 return -EBUSY; 1425 } 1426 1427 /* Keep page count to indicate a given hugepage is isolated. */ 1428 1429 list_add(&hpage->lru, &pagelist); 1430 ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0, 1431 true); 1432 if (ret) { 1433 struct page *page1, *page2; 1434 list_for_each_entry_safe(page1, page2, &pagelist, lru) 1435 put_page(page1); 1436 1437 pr_info("soft offline: %#lx: migration failed %d, type %lx\n", 1438 pfn, ret, page->flags); 1439 if (ret > 0) 1440 ret = -EIO; 1441 return ret; 1442 } 1443done: 1444 if (!PageHWPoison(hpage)) 1445 atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); 1446 set_page_hwpoison_huge_page(hpage); 1447 dequeue_hwpoisoned_huge_page(hpage); 1448 /* keep elevated page count for bad page */ 1449 return ret; 1450} 1451 1452/** 1453 * soft_offline_page - Soft offline a page. 1454 * @page: page to offline 1455 * @flags: flags. Same as memory_failure(). 1456 * 1457 * Returns 0 on success, otherwise negated errno. 1458 * 1459 * Soft offline a page, by migration or invalidation, 1460 * without killing anything. This is for the case when 1461 * a page is not corrupted yet (so it's still valid to access), 1462 * but has had a number of corrected errors and is better taken 1463 * out. 1464 * 1465 * The actual policy on when to do that is maintained by 1466 * user space. 1467 * 1468 * This should never impact any application or cause data loss, 1469 * however it might take some time. 1470 * 1471 * This is not a 100% solution for all memory, but tries to be 1472 * ``good enough'' for the majority of memory. 1473 */ 1474int soft_offline_page(struct page *page, int flags) 1475{ 1476 int ret; 1477 unsigned long pfn = page_to_pfn(page); 1478 1479 if (PageHuge(page)) 1480 return soft_offline_huge_page(page, flags); 1481 1482 ret = get_any_page(page, pfn, flags); 1483 if (ret < 0) 1484 return ret; 1485 if (ret == 0) 1486 goto done; 1487 1488 /* 1489 * Page cache page we can handle? 1490 */ 1491 if (!PageLRU(page)) { 1492 /* 1493 * Try to free it. 1494 */ 1495 put_page(page); 1496 shake_page(page, 1); 1497 1498 /* 1499 * Did it turn free? 1500 */ 1501 ret = get_any_page(page, pfn, 0); 1502 if (ret < 0) 1503 return ret; 1504 if (ret == 0) 1505 goto done; 1506 } 1507 if (!PageLRU(page)) { 1508 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", 1509 pfn, page->flags); 1510 return -EIO; 1511 } 1512 1513 lock_page(page); 1514 wait_on_page_writeback(page); 1515 1516 /* 1517 * Synchronized using the page lock with memory_failure() 1518 */ 1519 if (PageHWPoison(page)) { 1520 unlock_page(page); 1521 put_page(page); 1522 pr_info("soft offline: %#lx page already poisoned\n", pfn); 1523 return -EBUSY; 1524 } 1525 1526 /* 1527 * Try to invalidate first. This should work for 1528 * non dirty unmapped page cache pages. 1529 */ 1530 ret = invalidate_inode_page(page); 1531 unlock_page(page); 1532 /* 1533 * RED-PEN would be better to keep it isolated here, but we 1534 * would need to fix isolation locking first. 1535 */ 1536 if (ret == 1) { 1537 put_page(page); 1538 ret = 0; 1539 pr_info("soft_offline: %#lx: invalidated\n", pfn); 1540 goto done; 1541 } 1542 1543 /* 1544 * Simple invalidation didn't work. 1545 * Try to migrate to a new page instead. migrate.c 1546 * handles a large number of cases for us. 1547 */ 1548 ret = isolate_lru_page(page); 1549 /* 1550 * Drop page reference which is came from get_any_page() 1551 * successful isolate_lru_page() already took another one. 1552 */ 1553 put_page(page); 1554 if (!ret) { 1555 LIST_HEAD(pagelist); 1556 inc_zone_page_state(page, NR_ISOLATED_ANON + 1557 page_is_file_cache(page)); 1558 list_add(&page->lru, &pagelist); 1559 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1560 0, MIGRATE_SYNC); 1561 if (ret) { 1562 putback_lru_pages(&pagelist); 1563 pr_info("soft offline: %#lx: migration failed %d, type %lx\n", 1564 pfn, ret, page->flags); 1565 if (ret > 0) 1566 ret = -EIO; 1567 } 1568 } else { 1569 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1570 pfn, ret, page_count(page), page->flags); 1571 } 1572 if (ret) 1573 return ret; 1574 1575done: 1576 atomic_long_add(1, &mce_bad_pages); 1577 SetPageHWPoison(page); 1578 /* keep elevated page count for bad page */ 1579 return ret; 1580} 1581