memory-failure.c revision a09ed5e00084448453c8bada4dcd31e5fbfc2f21
1/* 2 * Copyright (C) 2008, 2009 Intel Corporation 3 * Authors: Andi Kleen, Fengguang Wu 4 * 5 * This software may be redistributed and/or modified under the terms of 6 * the GNU General Public License ("GPL") version 2 only as published by the 7 * Free Software Foundation. 8 * 9 * High level machine check handler. Handles pages reported by the 10 * hardware as being corrupted usually due to a multi-bit ECC memory or cache 11 * failure. 12 * 13 * In addition there is a "soft offline" entry point that allows stop using 14 * not-yet-corrupted-by-suspicious pages without killing anything. 15 * 16 * Handles page cache pages in various states. The tricky part 17 * here is that we can access any page asynchronously in respect to 18 * other VM users, because memory failures could happen anytime and 19 * anywhere. This could violate some of their assumptions. This is why 20 * this code has to be extremely careful. Generally it tries to use 21 * normal locking rules, as in get the standard locks, even if that means 22 * the error handling takes potentially a long time. 23 * 24 * There are several operations here with exponential complexity because 25 * of unsuitable VM data structures. For example the operation to map back 26 * from RMAP chains to processes has to walk the complete process list and 27 * has non linear complexity with the number. But since memory corruptions 28 * are rare we hope to get away with this. This avoids impacting the core 29 * VM. 30 */ 31 32/* 33 * Notebook: 34 * - hugetlb needs more code 35 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages 36 * - pass bad pages to kdump next kernel 37 */ 38#include <linux/kernel.h> 39#include <linux/mm.h> 40#include <linux/page-flags.h> 41#include <linux/kernel-page-flags.h> 42#include <linux/sched.h> 43#include <linux/ksm.h> 44#include <linux/rmap.h> 45#include <linux/pagemap.h> 46#include <linux/swap.h> 47#include <linux/backing-dev.h> 48#include <linux/migrate.h> 49#include <linux/page-isolation.h> 50#include <linux/suspend.h> 51#include <linux/slab.h> 52#include <linux/swapops.h> 53#include <linux/hugetlb.h> 54#include <linux/memory_hotplug.h> 55#include "internal.h" 56 57int sysctl_memory_failure_early_kill __read_mostly = 0; 58 59int sysctl_memory_failure_recovery __read_mostly = 1; 60 61atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); 62 63#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) 64 65u32 hwpoison_filter_enable = 0; 66u32 hwpoison_filter_dev_major = ~0U; 67u32 hwpoison_filter_dev_minor = ~0U; 68u64 hwpoison_filter_flags_mask; 69u64 hwpoison_filter_flags_value; 70EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 71EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 72EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 73EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 74EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 75 76static int hwpoison_filter_dev(struct page *p) 77{ 78 struct address_space *mapping; 79 dev_t dev; 80 81 if (hwpoison_filter_dev_major == ~0U && 82 hwpoison_filter_dev_minor == ~0U) 83 return 0; 84 85 /* 86 * page_mapping() does not accept slab pages. 87 */ 88 if (PageSlab(p)) 89 return -EINVAL; 90 91 mapping = page_mapping(p); 92 if (mapping == NULL || mapping->host == NULL) 93 return -EINVAL; 94 95 dev = mapping->host->i_sb->s_dev; 96 if (hwpoison_filter_dev_major != ~0U && 97 hwpoison_filter_dev_major != MAJOR(dev)) 98 return -EINVAL; 99 if (hwpoison_filter_dev_minor != ~0U && 100 hwpoison_filter_dev_minor != MINOR(dev)) 101 return -EINVAL; 102 103 return 0; 104} 105 106static int hwpoison_filter_flags(struct page *p) 107{ 108 if (!hwpoison_filter_flags_mask) 109 return 0; 110 111 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 112 hwpoison_filter_flags_value) 113 return 0; 114 else 115 return -EINVAL; 116} 117 118/* 119 * This allows stress tests to limit test scope to a collection of tasks 120 * by putting them under some memcg. This prevents killing unrelated/important 121 * processes such as /sbin/init. Note that the target task may share clean 122 * pages with init (eg. libc text), which is harmless. If the target task 123 * share _dirty_ pages with another task B, the test scheme must make sure B 124 * is also included in the memcg. At last, due to race conditions this filter 125 * can only guarantee that the page either belongs to the memcg tasks, or is 126 * a freed page. 127 */ 128#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 129u64 hwpoison_filter_memcg; 130EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 131static int hwpoison_filter_task(struct page *p) 132{ 133 struct mem_cgroup *mem; 134 struct cgroup_subsys_state *css; 135 unsigned long ino; 136 137 if (!hwpoison_filter_memcg) 138 return 0; 139 140 mem = try_get_mem_cgroup_from_page(p); 141 if (!mem) 142 return -EINVAL; 143 144 css = mem_cgroup_css(mem); 145 /* root_mem_cgroup has NULL dentries */ 146 if (!css->cgroup->dentry) 147 return -EINVAL; 148 149 ino = css->cgroup->dentry->d_inode->i_ino; 150 css_put(css); 151 152 if (ino != hwpoison_filter_memcg) 153 return -EINVAL; 154 155 return 0; 156} 157#else 158static int hwpoison_filter_task(struct page *p) { return 0; } 159#endif 160 161int hwpoison_filter(struct page *p) 162{ 163 if (!hwpoison_filter_enable) 164 return 0; 165 166 if (hwpoison_filter_dev(p)) 167 return -EINVAL; 168 169 if (hwpoison_filter_flags(p)) 170 return -EINVAL; 171 172 if (hwpoison_filter_task(p)) 173 return -EINVAL; 174 175 return 0; 176} 177#else 178int hwpoison_filter(struct page *p) 179{ 180 return 0; 181} 182#endif 183 184EXPORT_SYMBOL_GPL(hwpoison_filter); 185 186/* 187 * Send all the processes who have the page mapped an ``action optional'' 188 * signal. 189 */ 190static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, 191 unsigned long pfn, struct page *page) 192{ 193 struct siginfo si; 194 int ret; 195 196 printk(KERN_ERR 197 "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n", 198 pfn, t->comm, t->pid); 199 si.si_signo = SIGBUS; 200 si.si_errno = 0; 201 si.si_code = BUS_MCEERR_AO; 202 si.si_addr = (void *)addr; 203#ifdef __ARCH_SI_TRAPNO 204 si.si_trapno = trapno; 205#endif 206 si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT; 207 /* 208 * Don't use force here, it's convenient if the signal 209 * can be temporarily blocked. 210 * This could cause a loop when the user sets SIGBUS 211 * to SIG_IGN, but hopefully no one will do that? 212 */ 213 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ 214 if (ret < 0) 215 printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", 216 t->comm, t->pid, ret); 217 return ret; 218} 219 220/* 221 * When a unknown page type is encountered drain as many buffers as possible 222 * in the hope to turn the page into a LRU or free page, which we can handle. 223 */ 224void shake_page(struct page *p, int access) 225{ 226 if (!PageSlab(p)) { 227 lru_add_drain_all(); 228 if (PageLRU(p)) 229 return; 230 drain_all_pages(); 231 if (PageLRU(p) || is_free_buddy_page(p)) 232 return; 233 } 234 235 /* 236 * Only call shrink_slab here (which would also shrink other caches) if 237 * access is not potentially fatal. 238 */ 239 if (access) { 240 int nr; 241 do { 242 struct shrink_control shrink = { 243 .gfp_mask = GFP_KERNEL, 244 .nr_scanned = 1000, 245 }; 246 247 nr = shrink_slab(&shrink, 1000); 248 if (page_count(p) == 1) 249 break; 250 } while (nr > 10); 251 } 252} 253EXPORT_SYMBOL_GPL(shake_page); 254 255/* 256 * Kill all processes that have a poisoned page mapped and then isolate 257 * the page. 258 * 259 * General strategy: 260 * Find all processes having the page mapped and kill them. 261 * But we keep a page reference around so that the page is not 262 * actually freed yet. 263 * Then stash the page away 264 * 265 * There's no convenient way to get back to mapped processes 266 * from the VMAs. So do a brute-force search over all 267 * running processes. 268 * 269 * Remember that machine checks are not common (or rather 270 * if they are common you have other problems), so this shouldn't 271 * be a performance issue. 272 * 273 * Also there are some races possible while we get from the 274 * error detection to actually handle it. 275 */ 276 277struct to_kill { 278 struct list_head nd; 279 struct task_struct *tsk; 280 unsigned long addr; 281 char addr_valid; 282}; 283 284/* 285 * Failure handling: if we can't find or can't kill a process there's 286 * not much we can do. We just print a message and ignore otherwise. 287 */ 288 289/* 290 * Schedule a process for later kill. 291 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 292 * TBD would GFP_NOIO be enough? 293 */ 294static void add_to_kill(struct task_struct *tsk, struct page *p, 295 struct vm_area_struct *vma, 296 struct list_head *to_kill, 297 struct to_kill **tkc) 298{ 299 struct to_kill *tk; 300 301 if (*tkc) { 302 tk = *tkc; 303 *tkc = NULL; 304 } else { 305 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 306 if (!tk) { 307 printk(KERN_ERR 308 "MCE: Out of memory while machine check handling\n"); 309 return; 310 } 311 } 312 tk->addr = page_address_in_vma(p, vma); 313 tk->addr_valid = 1; 314 315 /* 316 * In theory we don't have to kill when the page was 317 * munmaped. But it could be also a mremap. Since that's 318 * likely very rare kill anyways just out of paranoia, but use 319 * a SIGKILL because the error is not contained anymore. 320 */ 321 if (tk->addr == -EFAULT) { 322 pr_info("MCE: Unable to find user space address %lx in %s\n", 323 page_to_pfn(p), tsk->comm); 324 tk->addr_valid = 0; 325 } 326 get_task_struct(tsk); 327 tk->tsk = tsk; 328 list_add_tail(&tk->nd, to_kill); 329} 330 331/* 332 * Kill the processes that have been collected earlier. 333 * 334 * Only do anything when DOIT is set, otherwise just free the list 335 * (this is used for clean pages which do not need killing) 336 * Also when FAIL is set do a force kill because something went 337 * wrong earlier. 338 */ 339static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, 340 int fail, struct page *page, unsigned long pfn) 341{ 342 struct to_kill *tk, *next; 343 344 list_for_each_entry_safe (tk, next, to_kill, nd) { 345 if (doit) { 346 /* 347 * In case something went wrong with munmapping 348 * make sure the process doesn't catch the 349 * signal and then access the memory. Just kill it. 350 */ 351 if (fail || tk->addr_valid == 0) { 352 printk(KERN_ERR 353 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 354 pfn, tk->tsk->comm, tk->tsk->pid); 355 force_sig(SIGKILL, tk->tsk); 356 } 357 358 /* 359 * In theory the process could have mapped 360 * something else on the address in-between. We could 361 * check for that, but we need to tell the 362 * process anyways. 363 */ 364 else if (kill_proc_ao(tk->tsk, tk->addr, trapno, 365 pfn, page) < 0) 366 printk(KERN_ERR 367 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", 368 pfn, tk->tsk->comm, tk->tsk->pid); 369 } 370 put_task_struct(tk->tsk); 371 kfree(tk); 372 } 373} 374 375static int task_early_kill(struct task_struct *tsk) 376{ 377 if (!tsk->mm) 378 return 0; 379 if (tsk->flags & PF_MCE_PROCESS) 380 return !!(tsk->flags & PF_MCE_EARLY); 381 return sysctl_memory_failure_early_kill; 382} 383 384/* 385 * Collect processes when the error hit an anonymous page. 386 */ 387static void collect_procs_anon(struct page *page, struct list_head *to_kill, 388 struct to_kill **tkc) 389{ 390 struct vm_area_struct *vma; 391 struct task_struct *tsk; 392 struct anon_vma *av; 393 394 read_lock(&tasklist_lock); 395 av = page_lock_anon_vma(page); 396 if (av == NULL) /* Not actually mapped anymore */ 397 goto out; 398 for_each_process (tsk) { 399 struct anon_vma_chain *vmac; 400 401 if (!task_early_kill(tsk)) 402 continue; 403 list_for_each_entry(vmac, &av->head, same_anon_vma) { 404 vma = vmac->vma; 405 if (!page_mapped_in_vma(page, vma)) 406 continue; 407 if (vma->vm_mm == tsk->mm) 408 add_to_kill(tsk, page, vma, to_kill, tkc); 409 } 410 } 411 page_unlock_anon_vma(av); 412out: 413 read_unlock(&tasklist_lock); 414} 415 416/* 417 * Collect processes when the error hit a file mapped page. 418 */ 419static void collect_procs_file(struct page *page, struct list_head *to_kill, 420 struct to_kill **tkc) 421{ 422 struct vm_area_struct *vma; 423 struct task_struct *tsk; 424 struct prio_tree_iter iter; 425 struct address_space *mapping = page->mapping; 426 427 /* 428 * A note on the locking order between the two locks. 429 * We don't rely on this particular order. 430 * If you have some other code that needs a different order 431 * feel free to switch them around. Or add a reverse link 432 * from mm_struct to task_struct, then this could be all 433 * done without taking tasklist_lock and looping over all tasks. 434 */ 435 436 read_lock(&tasklist_lock); 437 mutex_lock(&mapping->i_mmap_mutex); 438 for_each_process(tsk) { 439 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 440 441 if (!task_early_kill(tsk)) 442 continue; 443 444 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, 445 pgoff) { 446 /* 447 * Send early kill signal to tasks where a vma covers 448 * the page but the corrupted page is not necessarily 449 * mapped it in its pte. 450 * Assume applications who requested early kill want 451 * to be informed of all such data corruptions. 452 */ 453 if (vma->vm_mm == tsk->mm) 454 add_to_kill(tsk, page, vma, to_kill, tkc); 455 } 456 } 457 mutex_unlock(&mapping->i_mmap_mutex); 458 read_unlock(&tasklist_lock); 459} 460 461/* 462 * Collect the processes who have the corrupted page mapped to kill. 463 * This is done in two steps for locking reasons. 464 * First preallocate one tokill structure outside the spin locks, 465 * so that we can kill at least one process reasonably reliable. 466 */ 467static void collect_procs(struct page *page, struct list_head *tokill) 468{ 469 struct to_kill *tk; 470 471 if (!page->mapping) 472 return; 473 474 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO); 475 if (!tk) 476 return; 477 if (PageAnon(page)) 478 collect_procs_anon(page, tokill, &tk); 479 else 480 collect_procs_file(page, tokill, &tk); 481 kfree(tk); 482} 483 484/* 485 * Error handlers for various types of pages. 486 */ 487 488enum outcome { 489 IGNORED, /* Error: cannot be handled */ 490 FAILED, /* Error: handling failed */ 491 DELAYED, /* Will be handled later */ 492 RECOVERED, /* Successfully recovered */ 493}; 494 495static const char *action_name[] = { 496 [IGNORED] = "Ignored", 497 [FAILED] = "Failed", 498 [DELAYED] = "Delayed", 499 [RECOVERED] = "Recovered", 500}; 501 502/* 503 * XXX: It is possible that a page is isolated from LRU cache, 504 * and then kept in swap cache or failed to remove from page cache. 505 * The page count will stop it from being freed by unpoison. 506 * Stress tests should be aware of this memory leak problem. 507 */ 508static int delete_from_lru_cache(struct page *p) 509{ 510 if (!isolate_lru_page(p)) { 511 /* 512 * Clear sensible page flags, so that the buddy system won't 513 * complain when the page is unpoison-and-freed. 514 */ 515 ClearPageActive(p); 516 ClearPageUnevictable(p); 517 /* 518 * drop the page count elevated by isolate_lru_page() 519 */ 520 page_cache_release(p); 521 return 0; 522 } 523 return -EIO; 524} 525 526/* 527 * Error hit kernel page. 528 * Do nothing, try to be lucky and not touch this instead. For a few cases we 529 * could be more sophisticated. 530 */ 531static int me_kernel(struct page *p, unsigned long pfn) 532{ 533 return IGNORED; 534} 535 536/* 537 * Page in unknown state. Do nothing. 538 */ 539static int me_unknown(struct page *p, unsigned long pfn) 540{ 541 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); 542 return FAILED; 543} 544 545/* 546 * Clean (or cleaned) page cache page. 547 */ 548static int me_pagecache_clean(struct page *p, unsigned long pfn) 549{ 550 int err; 551 int ret = FAILED; 552 struct address_space *mapping; 553 554 delete_from_lru_cache(p); 555 556 /* 557 * For anonymous pages we're done the only reference left 558 * should be the one m_f() holds. 559 */ 560 if (PageAnon(p)) 561 return RECOVERED; 562 563 /* 564 * Now truncate the page in the page cache. This is really 565 * more like a "temporary hole punch" 566 * Don't do this for block devices when someone else 567 * has a reference, because it could be file system metadata 568 * and that's not safe to truncate. 569 */ 570 mapping = page_mapping(p); 571 if (!mapping) { 572 /* 573 * Page has been teared down in the meanwhile 574 */ 575 return FAILED; 576 } 577 578 /* 579 * Truncation is a bit tricky. Enable it per file system for now. 580 * 581 * Open: to take i_mutex or not for this? Right now we don't. 582 */ 583 if (mapping->a_ops->error_remove_page) { 584 err = mapping->a_ops->error_remove_page(mapping, p); 585 if (err != 0) { 586 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", 587 pfn, err); 588 } else if (page_has_private(p) && 589 !try_to_release_page(p, GFP_NOIO)) { 590 pr_info("MCE %#lx: failed to release buffers\n", pfn); 591 } else { 592 ret = RECOVERED; 593 } 594 } else { 595 /* 596 * If the file system doesn't support it just invalidate 597 * This fails on dirty or anything with private pages 598 */ 599 if (invalidate_inode_page(p)) 600 ret = RECOVERED; 601 else 602 printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", 603 pfn); 604 } 605 return ret; 606} 607 608/* 609 * Dirty cache page page 610 * Issues: when the error hit a hole page the error is not properly 611 * propagated. 612 */ 613static int me_pagecache_dirty(struct page *p, unsigned long pfn) 614{ 615 struct address_space *mapping = page_mapping(p); 616 617 SetPageError(p); 618 /* TBD: print more information about the file. */ 619 if (mapping) { 620 /* 621 * IO error will be reported by write(), fsync(), etc. 622 * who check the mapping. 623 * This way the application knows that something went 624 * wrong with its dirty file data. 625 * 626 * There's one open issue: 627 * 628 * The EIO will be only reported on the next IO 629 * operation and then cleared through the IO map. 630 * Normally Linux has two mechanisms to pass IO error 631 * first through the AS_EIO flag in the address space 632 * and then through the PageError flag in the page. 633 * Since we drop pages on memory failure handling the 634 * only mechanism open to use is through AS_AIO. 635 * 636 * This has the disadvantage that it gets cleared on 637 * the first operation that returns an error, while 638 * the PageError bit is more sticky and only cleared 639 * when the page is reread or dropped. If an 640 * application assumes it will always get error on 641 * fsync, but does other operations on the fd before 642 * and the page is dropped between then the error 643 * will not be properly reported. 644 * 645 * This can already happen even without hwpoisoned 646 * pages: first on metadata IO errors (which only 647 * report through AS_EIO) or when the page is dropped 648 * at the wrong time. 649 * 650 * So right now we assume that the application DTRT on 651 * the first EIO, but we're not worse than other parts 652 * of the kernel. 653 */ 654 mapping_set_error(mapping, EIO); 655 } 656 657 return me_pagecache_clean(p, pfn); 658} 659 660/* 661 * Clean and dirty swap cache. 662 * 663 * Dirty swap cache page is tricky to handle. The page could live both in page 664 * cache and swap cache(ie. page is freshly swapped in). So it could be 665 * referenced concurrently by 2 types of PTEs: 666 * normal PTEs and swap PTEs. We try to handle them consistently by calling 667 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 668 * and then 669 * - clear dirty bit to prevent IO 670 * - remove from LRU 671 * - but keep in the swap cache, so that when we return to it on 672 * a later page fault, we know the application is accessing 673 * corrupted data and shall be killed (we installed simple 674 * interception code in do_swap_page to catch it). 675 * 676 * Clean swap cache pages can be directly isolated. A later page fault will 677 * bring in the known good data from disk. 678 */ 679static int me_swapcache_dirty(struct page *p, unsigned long pfn) 680{ 681 ClearPageDirty(p); 682 /* Trigger EIO in shmem: */ 683 ClearPageUptodate(p); 684 685 if (!delete_from_lru_cache(p)) 686 return DELAYED; 687 else 688 return FAILED; 689} 690 691static int me_swapcache_clean(struct page *p, unsigned long pfn) 692{ 693 delete_from_swap_cache(p); 694 695 if (!delete_from_lru_cache(p)) 696 return RECOVERED; 697 else 698 return FAILED; 699} 700 701/* 702 * Huge pages. Needs work. 703 * Issues: 704 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 705 * To narrow down kill region to one page, we need to break up pmd. 706 */ 707static int me_huge_page(struct page *p, unsigned long pfn) 708{ 709 int res = 0; 710 struct page *hpage = compound_head(p); 711 /* 712 * We can safely recover from error on free or reserved (i.e. 713 * not in-use) hugepage by dequeuing it from freelist. 714 * To check whether a hugepage is in-use or not, we can't use 715 * page->lru because it can be used in other hugepage operations, 716 * such as __unmap_hugepage_range() and gather_surplus_pages(). 717 * So instead we use page_mapping() and PageAnon(). 718 * We assume that this function is called with page lock held, 719 * so there is no race between isolation and mapping/unmapping. 720 */ 721 if (!(page_mapping(hpage) || PageAnon(hpage))) { 722 res = dequeue_hwpoisoned_huge_page(hpage); 723 if (!res) 724 return RECOVERED; 725 } 726 return DELAYED; 727} 728 729/* 730 * Various page states we can handle. 731 * 732 * A page state is defined by its current page->flags bits. 733 * The table matches them in order and calls the right handler. 734 * 735 * This is quite tricky because we can access page at any time 736 * in its live cycle, so all accesses have to be extremely careful. 737 * 738 * This is not complete. More states could be added. 739 * For any missing state don't attempt recovery. 740 */ 741 742#define dirty (1UL << PG_dirty) 743#define sc (1UL << PG_swapcache) 744#define unevict (1UL << PG_unevictable) 745#define mlock (1UL << PG_mlocked) 746#define writeback (1UL << PG_writeback) 747#define lru (1UL << PG_lru) 748#define swapbacked (1UL << PG_swapbacked) 749#define head (1UL << PG_head) 750#define tail (1UL << PG_tail) 751#define compound (1UL << PG_compound) 752#define slab (1UL << PG_slab) 753#define reserved (1UL << PG_reserved) 754 755static struct page_state { 756 unsigned long mask; 757 unsigned long res; 758 char *msg; 759 int (*action)(struct page *p, unsigned long pfn); 760} error_states[] = { 761 { reserved, reserved, "reserved kernel", me_kernel }, 762 /* 763 * free pages are specially detected outside this table: 764 * PG_buddy pages only make a small fraction of all free pages. 765 */ 766 767 /* 768 * Could in theory check if slab page is free or if we can drop 769 * currently unused objects without touching them. But just 770 * treat it as standard kernel for now. 771 */ 772 { slab, slab, "kernel slab", me_kernel }, 773 774#ifdef CONFIG_PAGEFLAGS_EXTENDED 775 { head, head, "huge", me_huge_page }, 776 { tail, tail, "huge", me_huge_page }, 777#else 778 { compound, compound, "huge", me_huge_page }, 779#endif 780 781 { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty }, 782 { sc|dirty, sc, "swapcache", me_swapcache_clean }, 783 784 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, 785 { unevict, unevict, "unevictable LRU", me_pagecache_clean}, 786 787 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, 788 { mlock, mlock, "mlocked LRU", me_pagecache_clean }, 789 790 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, 791 { lru|dirty, lru, "clean LRU", me_pagecache_clean }, 792 793 /* 794 * Catchall entry: must be at end. 795 */ 796 { 0, 0, "unknown page state", me_unknown }, 797}; 798 799#undef dirty 800#undef sc 801#undef unevict 802#undef mlock 803#undef writeback 804#undef lru 805#undef swapbacked 806#undef head 807#undef tail 808#undef compound 809#undef slab 810#undef reserved 811 812static void action_result(unsigned long pfn, char *msg, int result) 813{ 814 struct page *page = pfn_to_page(pfn); 815 816 printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n", 817 pfn, 818 PageDirty(page) ? "dirty " : "", 819 msg, action_name[result]); 820} 821 822static int page_action(struct page_state *ps, struct page *p, 823 unsigned long pfn) 824{ 825 int result; 826 int count; 827 828 result = ps->action(p, pfn); 829 action_result(pfn, ps->msg, result); 830 831 count = page_count(p) - 1; 832 if (ps->action == me_swapcache_dirty && result == DELAYED) 833 count--; 834 if (count != 0) { 835 printk(KERN_ERR 836 "MCE %#lx: %s page still referenced by %d users\n", 837 pfn, ps->msg, count); 838 result = FAILED; 839 } 840 841 /* Could do more checks here if page looks ok */ 842 /* 843 * Could adjust zone counters here to correct for the missing page. 844 */ 845 846 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; 847} 848 849/* 850 * Do all that is necessary to remove user space mappings. Unmap 851 * the pages and send SIGBUS to the processes if the data was dirty. 852 */ 853static int hwpoison_user_mappings(struct page *p, unsigned long pfn, 854 int trapno) 855{ 856 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 857 struct address_space *mapping; 858 LIST_HEAD(tokill); 859 int ret; 860 int kill = 1; 861 struct page *hpage = compound_head(p); 862 struct page *ppage; 863 864 if (PageReserved(p) || PageSlab(p)) 865 return SWAP_SUCCESS; 866 867 /* 868 * This check implies we don't kill processes if their pages 869 * are in the swap cache early. Those are always late kills. 870 */ 871 if (!page_mapped(hpage)) 872 return SWAP_SUCCESS; 873 874 if (PageKsm(p)) 875 return SWAP_FAIL; 876 877 if (PageSwapCache(p)) { 878 printk(KERN_ERR 879 "MCE %#lx: keeping poisoned page in swap cache\n", pfn); 880 ttu |= TTU_IGNORE_HWPOISON; 881 } 882 883 /* 884 * Propagate the dirty bit from PTEs to struct page first, because we 885 * need this to decide if we should kill or just drop the page. 886 * XXX: the dirty test could be racy: set_page_dirty() may not always 887 * be called inside page lock (it's recommended but not enforced). 888 */ 889 mapping = page_mapping(hpage); 890 if (!PageDirty(hpage) && mapping && 891 mapping_cap_writeback_dirty(mapping)) { 892 if (page_mkclean(hpage)) { 893 SetPageDirty(hpage); 894 } else { 895 kill = 0; 896 ttu |= TTU_IGNORE_HWPOISON; 897 printk(KERN_INFO 898 "MCE %#lx: corrupted page was clean: dropped without side effects\n", 899 pfn); 900 } 901 } 902 903 /* 904 * ppage: poisoned page 905 * if p is regular page(4k page) 906 * ppage == real poisoned page; 907 * else p is hugetlb or THP, ppage == head page. 908 */ 909 ppage = hpage; 910 911 if (PageTransHuge(hpage)) { 912 /* 913 * Verify that this isn't a hugetlbfs head page, the check for 914 * PageAnon is just for avoid tripping a split_huge_page 915 * internal debug check, as split_huge_page refuses to deal with 916 * anything that isn't an anon page. PageAnon can't go away fro 917 * under us because we hold a refcount on the hpage, without a 918 * refcount on the hpage. split_huge_page can't be safely called 919 * in the first place, having a refcount on the tail isn't 920 * enough * to be safe. 921 */ 922 if (!PageHuge(hpage) && PageAnon(hpage)) { 923 if (unlikely(split_huge_page(hpage))) { 924 /* 925 * FIXME: if splitting THP is failed, it is 926 * better to stop the following operation rather 927 * than causing panic by unmapping. System might 928 * survive if the page is freed later. 929 */ 930 printk(KERN_INFO 931 "MCE %#lx: failed to split THP\n", pfn); 932 933 BUG_ON(!PageHWPoison(p)); 934 return SWAP_FAIL; 935 } 936 /* THP is split, so ppage should be the real poisoned page. */ 937 ppage = p; 938 } 939 } 940 941 /* 942 * First collect all the processes that have the page 943 * mapped in dirty form. This has to be done before try_to_unmap, 944 * because ttu takes the rmap data structures down. 945 * 946 * Error handling: We ignore errors here because 947 * there's nothing that can be done. 948 */ 949 if (kill) 950 collect_procs(ppage, &tokill); 951 952 if (hpage != ppage) 953 lock_page(ppage); 954 955 ret = try_to_unmap(ppage, ttu); 956 if (ret != SWAP_SUCCESS) 957 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", 958 pfn, page_mapcount(ppage)); 959 960 if (hpage != ppage) 961 unlock_page(ppage); 962 963 /* 964 * Now that the dirty bit has been propagated to the 965 * struct page and all unmaps done we can decide if 966 * killing is needed or not. Only kill when the page 967 * was dirty, otherwise the tokill list is merely 968 * freed. When there was a problem unmapping earlier 969 * use a more force-full uncatchable kill to prevent 970 * any accesses to the poisoned memory. 971 */ 972 kill_procs_ao(&tokill, !!PageDirty(ppage), trapno, 973 ret != SWAP_SUCCESS, p, pfn); 974 975 return ret; 976} 977 978static void set_page_hwpoison_huge_page(struct page *hpage) 979{ 980 int i; 981 int nr_pages = 1 << compound_trans_order(hpage); 982 for (i = 0; i < nr_pages; i++) 983 SetPageHWPoison(hpage + i); 984} 985 986static void clear_page_hwpoison_huge_page(struct page *hpage) 987{ 988 int i; 989 int nr_pages = 1 << compound_trans_order(hpage); 990 for (i = 0; i < nr_pages; i++) 991 ClearPageHWPoison(hpage + i); 992} 993 994int __memory_failure(unsigned long pfn, int trapno, int flags) 995{ 996 struct page_state *ps; 997 struct page *p; 998 struct page *hpage; 999 int res; 1000 unsigned int nr_pages; 1001 1002 if (!sysctl_memory_failure_recovery) 1003 panic("Memory failure from trap %d on page %lx", trapno, pfn); 1004 1005 if (!pfn_valid(pfn)) { 1006 printk(KERN_ERR 1007 "MCE %#lx: memory outside kernel control\n", 1008 pfn); 1009 return -ENXIO; 1010 } 1011 1012 p = pfn_to_page(pfn); 1013 hpage = compound_head(p); 1014 if (TestSetPageHWPoison(p)) { 1015 printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); 1016 return 0; 1017 } 1018 1019 nr_pages = 1 << compound_trans_order(hpage); 1020 atomic_long_add(nr_pages, &mce_bad_pages); 1021 1022 /* 1023 * We need/can do nothing about count=0 pages. 1024 * 1) it's a free page, and therefore in safe hand: 1025 * prep_new_page() will be the gate keeper. 1026 * 2) it's a free hugepage, which is also safe: 1027 * an affected hugepage will be dequeued from hugepage freelist, 1028 * so there's no concern about reusing it ever after. 1029 * 3) it's part of a non-compound high order page. 1030 * Implies some kernel user: cannot stop them from 1031 * R/W the page; let's pray that the page has been 1032 * used and will be freed some time later. 1033 * In fact it's dangerous to directly bump up page count from 0, 1034 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. 1035 */ 1036 if (!(flags & MF_COUNT_INCREASED) && 1037 !get_page_unless_zero(hpage)) { 1038 if (is_free_buddy_page(p)) { 1039 action_result(pfn, "free buddy", DELAYED); 1040 return 0; 1041 } else if (PageHuge(hpage)) { 1042 /* 1043 * Check "just unpoisoned", "filter hit", and 1044 * "race with other subpage." 1045 */ 1046 lock_page(hpage); 1047 if (!PageHWPoison(hpage) 1048 || (hwpoison_filter(p) && TestClearPageHWPoison(p)) 1049 || (p != hpage && TestSetPageHWPoison(hpage))) { 1050 atomic_long_sub(nr_pages, &mce_bad_pages); 1051 return 0; 1052 } 1053 set_page_hwpoison_huge_page(hpage); 1054 res = dequeue_hwpoisoned_huge_page(hpage); 1055 action_result(pfn, "free huge", 1056 res ? IGNORED : DELAYED); 1057 unlock_page(hpage); 1058 return res; 1059 } else { 1060 action_result(pfn, "high order kernel", IGNORED); 1061 return -EBUSY; 1062 } 1063 } 1064 1065 /* 1066 * We ignore non-LRU pages for good reasons. 1067 * - PG_locked is only well defined for LRU pages and a few others 1068 * - to avoid races with __set_page_locked() 1069 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 1070 * The check (unnecessarily) ignores LRU pages being isolated and 1071 * walked by the page reclaim code, however that's not a big loss. 1072 */ 1073 if (!PageHuge(p) && !PageTransCompound(p)) { 1074 if (!PageLRU(p)) 1075 shake_page(p, 0); 1076 if (!PageLRU(p)) { 1077 /* 1078 * shake_page could have turned it free. 1079 */ 1080 if (is_free_buddy_page(p)) { 1081 action_result(pfn, "free buddy, 2nd try", 1082 DELAYED); 1083 return 0; 1084 } 1085 action_result(pfn, "non LRU", IGNORED); 1086 put_page(p); 1087 return -EBUSY; 1088 } 1089 } 1090 1091 /* 1092 * Lock the page and wait for writeback to finish. 1093 * It's very difficult to mess with pages currently under IO 1094 * and in many cases impossible, so we just avoid it here. 1095 */ 1096 lock_page(hpage); 1097 1098 /* 1099 * unpoison always clear PG_hwpoison inside page lock 1100 */ 1101 if (!PageHWPoison(p)) { 1102 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1103 res = 0; 1104 goto out; 1105 } 1106 if (hwpoison_filter(p)) { 1107 if (TestClearPageHWPoison(p)) 1108 atomic_long_sub(nr_pages, &mce_bad_pages); 1109 unlock_page(hpage); 1110 put_page(hpage); 1111 return 0; 1112 } 1113 1114 /* 1115 * For error on the tail page, we should set PG_hwpoison 1116 * on the head page to show that the hugepage is hwpoisoned 1117 */ 1118 if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) { 1119 action_result(pfn, "hugepage already hardware poisoned", 1120 IGNORED); 1121 unlock_page(hpage); 1122 put_page(hpage); 1123 return 0; 1124 } 1125 /* 1126 * Set PG_hwpoison on all pages in an error hugepage, 1127 * because containment is done in hugepage unit for now. 1128 * Since we have done TestSetPageHWPoison() for the head page with 1129 * page lock held, we can safely set PG_hwpoison bits on tail pages. 1130 */ 1131 if (PageHuge(p)) 1132 set_page_hwpoison_huge_page(hpage); 1133 1134 wait_on_page_writeback(p); 1135 1136 /* 1137 * Now take care of user space mappings. 1138 * Abort on fail: __delete_from_page_cache() assumes unmapped page. 1139 */ 1140 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) { 1141 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1142 res = -EBUSY; 1143 goto out; 1144 } 1145 1146 /* 1147 * Torn down by someone else? 1148 */ 1149 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 1150 action_result(pfn, "already truncated LRU", IGNORED); 1151 res = -EBUSY; 1152 goto out; 1153 } 1154 1155 res = -EBUSY; 1156 for (ps = error_states;; ps++) { 1157 if ((p->flags & ps->mask) == ps->res) { 1158 res = page_action(ps, p, pfn); 1159 break; 1160 } 1161 } 1162out: 1163 unlock_page(hpage); 1164 return res; 1165} 1166EXPORT_SYMBOL_GPL(__memory_failure); 1167 1168/** 1169 * memory_failure - Handle memory failure of a page. 1170 * @pfn: Page Number of the corrupted page 1171 * @trapno: Trap number reported in the signal to user space. 1172 * 1173 * This function is called by the low level machine check code 1174 * of an architecture when it detects hardware memory corruption 1175 * of a page. It tries its best to recover, which includes 1176 * dropping pages, killing processes etc. 1177 * 1178 * The function is primarily of use for corruptions that 1179 * happen outside the current execution context (e.g. when 1180 * detected by a background scrubber) 1181 * 1182 * Must run in process context (e.g. a work queue) with interrupts 1183 * enabled and no spinlocks hold. 1184 */ 1185void memory_failure(unsigned long pfn, int trapno) 1186{ 1187 __memory_failure(pfn, trapno, 0); 1188} 1189 1190/** 1191 * unpoison_memory - Unpoison a previously poisoned page 1192 * @pfn: Page number of the to be unpoisoned page 1193 * 1194 * Software-unpoison a page that has been poisoned by 1195 * memory_failure() earlier. 1196 * 1197 * This is only done on the software-level, so it only works 1198 * for linux injected failures, not real hardware failures 1199 * 1200 * Returns 0 for success, otherwise -errno. 1201 */ 1202int unpoison_memory(unsigned long pfn) 1203{ 1204 struct page *page; 1205 struct page *p; 1206 int freeit = 0; 1207 unsigned int nr_pages; 1208 1209 if (!pfn_valid(pfn)) 1210 return -ENXIO; 1211 1212 p = pfn_to_page(pfn); 1213 page = compound_head(p); 1214 1215 if (!PageHWPoison(p)) { 1216 pr_info("MCE: Page was already unpoisoned %#lx\n", pfn); 1217 return 0; 1218 } 1219 1220 nr_pages = 1 << compound_trans_order(page); 1221 1222 if (!get_page_unless_zero(page)) { 1223 /* 1224 * Since HWPoisoned hugepage should have non-zero refcount, 1225 * race between memory failure and unpoison seems to happen. 1226 * In such case unpoison fails and memory failure runs 1227 * to the end. 1228 */ 1229 if (PageHuge(page)) { 1230 pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn); 1231 return 0; 1232 } 1233 if (TestClearPageHWPoison(p)) 1234 atomic_long_sub(nr_pages, &mce_bad_pages); 1235 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); 1236 return 0; 1237 } 1238 1239 lock_page(page); 1240 /* 1241 * This test is racy because PG_hwpoison is set outside of page lock. 1242 * That's acceptable because that won't trigger kernel panic. Instead, 1243 * the PG_hwpoison page will be caught and isolated on the entrance to 1244 * the free buddy page pool. 1245 */ 1246 if (TestClearPageHWPoison(page)) { 1247 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); 1248 atomic_long_sub(nr_pages, &mce_bad_pages); 1249 freeit = 1; 1250 if (PageHuge(page)) 1251 clear_page_hwpoison_huge_page(page); 1252 } 1253 unlock_page(page); 1254 1255 put_page(page); 1256 if (freeit) 1257 put_page(page); 1258 1259 return 0; 1260} 1261EXPORT_SYMBOL(unpoison_memory); 1262 1263static struct page *new_page(struct page *p, unsigned long private, int **x) 1264{ 1265 int nid = page_to_nid(p); 1266 if (PageHuge(p)) 1267 return alloc_huge_page_node(page_hstate(compound_head(p)), 1268 nid); 1269 else 1270 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); 1271} 1272 1273/* 1274 * Safely get reference count of an arbitrary page. 1275 * Returns 0 for a free page, -EIO for a zero refcount page 1276 * that is not free, and 1 for any other page type. 1277 * For 1 the page is returned with increased page count, otherwise not. 1278 */ 1279static int get_any_page(struct page *p, unsigned long pfn, int flags) 1280{ 1281 int ret; 1282 1283 if (flags & MF_COUNT_INCREASED) 1284 return 1; 1285 1286 /* 1287 * The lock_memory_hotplug prevents a race with memory hotplug. 1288 * This is a big hammer, a better would be nicer. 1289 */ 1290 lock_memory_hotplug(); 1291 1292 /* 1293 * Isolate the page, so that it doesn't get reallocated if it 1294 * was free. 1295 */ 1296 set_migratetype_isolate(p); 1297 /* 1298 * When the target page is a free hugepage, just remove it 1299 * from free hugepage list. 1300 */ 1301 if (!get_page_unless_zero(compound_head(p))) { 1302 if (PageHuge(p)) { 1303 pr_info("get_any_page: %#lx free huge page\n", pfn); 1304 ret = dequeue_hwpoisoned_huge_page(compound_head(p)); 1305 } else if (is_free_buddy_page(p)) { 1306 pr_info("get_any_page: %#lx free buddy page\n", pfn); 1307 /* Set hwpoison bit while page is still isolated */ 1308 SetPageHWPoison(p); 1309 ret = 0; 1310 } else { 1311 pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n", 1312 pfn, p->flags); 1313 ret = -EIO; 1314 } 1315 } else { 1316 /* Not a free page */ 1317 ret = 1; 1318 } 1319 unset_migratetype_isolate(p); 1320 unlock_memory_hotplug(); 1321 return ret; 1322} 1323 1324static int soft_offline_huge_page(struct page *page, int flags) 1325{ 1326 int ret; 1327 unsigned long pfn = page_to_pfn(page); 1328 struct page *hpage = compound_head(page); 1329 LIST_HEAD(pagelist); 1330 1331 ret = get_any_page(page, pfn, flags); 1332 if (ret < 0) 1333 return ret; 1334 if (ret == 0) 1335 goto done; 1336 1337 if (PageHWPoison(hpage)) { 1338 put_page(hpage); 1339 pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn); 1340 return -EBUSY; 1341 } 1342 1343 /* Keep page count to indicate a given hugepage is isolated. */ 1344 1345 list_add(&hpage->lru, &pagelist); 1346 ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0, 1347 true); 1348 if (ret) { 1349 struct page *page1, *page2; 1350 list_for_each_entry_safe(page1, page2, &pagelist, lru) 1351 put_page(page1); 1352 1353 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", 1354 pfn, ret, page->flags); 1355 if (ret > 0) 1356 ret = -EIO; 1357 return ret; 1358 } 1359done: 1360 if (!PageHWPoison(hpage)) 1361 atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); 1362 set_page_hwpoison_huge_page(hpage); 1363 dequeue_hwpoisoned_huge_page(hpage); 1364 /* keep elevated page count for bad page */ 1365 return ret; 1366} 1367 1368/** 1369 * soft_offline_page - Soft offline a page. 1370 * @page: page to offline 1371 * @flags: flags. Same as memory_failure(). 1372 * 1373 * Returns 0 on success, otherwise negated errno. 1374 * 1375 * Soft offline a page, by migration or invalidation, 1376 * without killing anything. This is for the case when 1377 * a page is not corrupted yet (so it's still valid to access), 1378 * but has had a number of corrected errors and is better taken 1379 * out. 1380 * 1381 * The actual policy on when to do that is maintained by 1382 * user space. 1383 * 1384 * This should never impact any application or cause data loss, 1385 * however it might take some time. 1386 * 1387 * This is not a 100% solution for all memory, but tries to be 1388 * ``good enough'' for the majority of memory. 1389 */ 1390int soft_offline_page(struct page *page, int flags) 1391{ 1392 int ret; 1393 unsigned long pfn = page_to_pfn(page); 1394 1395 if (PageHuge(page)) 1396 return soft_offline_huge_page(page, flags); 1397 1398 ret = get_any_page(page, pfn, flags); 1399 if (ret < 0) 1400 return ret; 1401 if (ret == 0) 1402 goto done; 1403 1404 /* 1405 * Page cache page we can handle? 1406 */ 1407 if (!PageLRU(page)) { 1408 /* 1409 * Try to free it. 1410 */ 1411 put_page(page); 1412 shake_page(page, 1); 1413 1414 /* 1415 * Did it turn free? 1416 */ 1417 ret = get_any_page(page, pfn, 0); 1418 if (ret < 0) 1419 return ret; 1420 if (ret == 0) 1421 goto done; 1422 } 1423 if (!PageLRU(page)) { 1424 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", 1425 pfn, page->flags); 1426 return -EIO; 1427 } 1428 1429 lock_page(page); 1430 wait_on_page_writeback(page); 1431 1432 /* 1433 * Synchronized using the page lock with memory_failure() 1434 */ 1435 if (PageHWPoison(page)) { 1436 unlock_page(page); 1437 put_page(page); 1438 pr_info("soft offline: %#lx page already poisoned\n", pfn); 1439 return -EBUSY; 1440 } 1441 1442 /* 1443 * Try to invalidate first. This should work for 1444 * non dirty unmapped page cache pages. 1445 */ 1446 ret = invalidate_inode_page(page); 1447 unlock_page(page); 1448 /* 1449 * RED-PEN would be better to keep it isolated here, but we 1450 * would need to fix isolation locking first. 1451 */ 1452 if (ret == 1) { 1453 put_page(page); 1454 ret = 0; 1455 pr_info("soft_offline: %#lx: invalidated\n", pfn); 1456 goto done; 1457 } 1458 1459 /* 1460 * Simple invalidation didn't work. 1461 * Try to migrate to a new page instead. migrate.c 1462 * handles a large number of cases for us. 1463 */ 1464 ret = isolate_lru_page(page); 1465 /* 1466 * Drop page reference which is came from get_any_page() 1467 * successful isolate_lru_page() already took another one. 1468 */ 1469 put_page(page); 1470 if (!ret) { 1471 LIST_HEAD(pagelist); 1472 1473 list_add(&page->lru, &pagelist); 1474 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1475 0, true); 1476 if (ret) { 1477 putback_lru_pages(&pagelist); 1478 pr_info("soft offline: %#lx: migration failed %d, type %lx\n", 1479 pfn, ret, page->flags); 1480 if (ret > 0) 1481 ret = -EIO; 1482 } 1483 } else { 1484 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1485 pfn, ret, page_count(page), page->flags); 1486 } 1487 if (ret) 1488 return ret; 1489 1490done: 1491 atomic_long_add(1, &mce_bad_pages); 1492 SetPageHWPoison(page); 1493 /* keep elevated page count for bad page */ 1494 return ret; 1495} 1496