migrate.c revision 28bd65781c848d95ba6a7f58b5c4b8265a804ec6
1/* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter 13 */ 14 15#include <linux/migrate.h> 16#include <linux/module.h> 17#include <linux/swap.h> 18#include <linux/swapops.h> 19#include <linux/pagemap.h> 20#include <linux/buffer_head.h> 21#include <linux/mm_inline.h> 22#include <linux/nsproxy.h> 23#include <linux/pagevec.h> 24#include <linux/ksm.h> 25#include <linux/rmap.h> 26#include <linux/topology.h> 27#include <linux/cpu.h> 28#include <linux/cpuset.h> 29#include <linux/writeback.h> 30#include <linux/mempolicy.h> 31#include <linux/vmalloc.h> 32#include <linux/security.h> 33#include <linux/memcontrol.h> 34#include <linux/syscalls.h> 35#include <linux/hugetlb.h> 36#include <linux/gfp.h> 37 38#include <asm/tlbflush.h> 39 40#include "internal.h" 41 42#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 43 44/* 45 * migrate_prep() needs to be called before we start compiling a list of pages 46 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 47 * undesirable, use migrate_prep_local() 48 */ 49int migrate_prep(void) 50{ 51 /* 52 * Clear the LRU lists so pages can be isolated. 53 * Note that pages may be moved off the LRU after we have 54 * drained them. Those pages will fail to migrate like other 55 * pages that may be busy. 56 */ 57 lru_add_drain_all(); 58 59 return 0; 60} 61 62/* Do the necessary work of migrate_prep but not if it involves other CPUs */ 63int migrate_prep_local(void) 64{ 65 lru_add_drain(); 66 67 return 0; 68} 69 70/* 71 * Add isolated pages on the list back to the LRU under page lock 72 * to avoid leaking evictable pages back onto unevictable list. 73 */ 74void putback_lru_pages(struct list_head *l) 75{ 76 struct page *page; 77 struct page *page2; 78 79 list_for_each_entry_safe(page, page2, l, lru) { 80 list_del(&page->lru); 81 dec_zone_page_state(page, NR_ISOLATED_ANON + 82 page_is_file_cache(page)); 83 putback_lru_page(page); 84 } 85} 86 87/* 88 * Restore a potential migration pte to a working pte entry 89 */ 90static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, 91 unsigned long addr, void *old) 92{ 93 struct mm_struct *mm = vma->vm_mm; 94 swp_entry_t entry; 95 pgd_t *pgd; 96 pud_t *pud; 97 pmd_t *pmd; 98 pte_t *ptep, pte; 99 spinlock_t *ptl; 100 101 if (unlikely(PageHuge(new))) { 102 ptep = huge_pte_offset(mm, addr); 103 if (!ptep) 104 goto out; 105 ptl = &mm->page_table_lock; 106 } else { 107 pgd = pgd_offset(mm, addr); 108 if (!pgd_present(*pgd)) 109 goto out; 110 111 pud = pud_offset(pgd, addr); 112 if (!pud_present(*pud)) 113 goto out; 114 115 pmd = pmd_offset(pud, addr); 116 if (pmd_trans_huge(*pmd)) 117 goto out; 118 if (!pmd_present(*pmd)) 119 goto out; 120 121 ptep = pte_offset_map(pmd, addr); 122 123 if (!is_swap_pte(*ptep)) { 124 pte_unmap(ptep); 125 goto out; 126 } 127 128 ptl = pte_lockptr(mm, pmd); 129 } 130 131 spin_lock(ptl); 132 pte = *ptep; 133 if (!is_swap_pte(pte)) 134 goto unlock; 135 136 entry = pte_to_swp_entry(pte); 137 138 if (!is_migration_entry(entry) || 139 migration_entry_to_page(entry) != old) 140 goto unlock; 141 142 get_page(new); 143 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 144 if (is_write_migration_entry(entry)) 145 pte = pte_mkwrite(pte); 146#ifdef CONFIG_HUGETLB_PAGE 147 if (PageHuge(new)) 148 pte = pte_mkhuge(pte); 149#endif 150 flush_cache_page(vma, addr, pte_pfn(pte)); 151 set_pte_at(mm, addr, ptep, pte); 152 153 if (PageHuge(new)) { 154 if (PageAnon(new)) 155 hugepage_add_anon_rmap(new, vma, addr); 156 else 157 page_dup_rmap(new); 158 } else if (PageAnon(new)) 159 page_add_anon_rmap(new, vma, addr); 160 else 161 page_add_file_rmap(new); 162 163 /* No need to invalidate - it was non-present before */ 164 update_mmu_cache(vma, addr, ptep); 165unlock: 166 pte_unmap_unlock(ptep, ptl); 167out: 168 return SWAP_AGAIN; 169} 170 171/* 172 * Get rid of all migration entries and replace them by 173 * references to the indicated page. 174 */ 175static void remove_migration_ptes(struct page *old, struct page *new) 176{ 177 rmap_walk(new, remove_migration_pte, old); 178} 179 180/* 181 * Something used the pte of a page under migration. We need to 182 * get to the page and wait until migration is finished. 183 * When we return from this function the fault will be retried. 184 * 185 * This function is called from do_swap_page(). 186 */ 187void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 188 unsigned long address) 189{ 190 pte_t *ptep, pte; 191 spinlock_t *ptl; 192 swp_entry_t entry; 193 struct page *page; 194 195 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 196 pte = *ptep; 197 if (!is_swap_pte(pte)) 198 goto out; 199 200 entry = pte_to_swp_entry(pte); 201 if (!is_migration_entry(entry)) 202 goto out; 203 204 page = migration_entry_to_page(entry); 205 206 /* 207 * Once radix-tree replacement of page migration started, page_count 208 * *must* be zero. And, we don't want to call wait_on_page_locked() 209 * against a page without get_page(). 210 * So, we use get_page_unless_zero(), here. Even failed, page fault 211 * will occur again. 212 */ 213 if (!get_page_unless_zero(page)) 214 goto out; 215 pte_unmap_unlock(ptep, ptl); 216 wait_on_page_locked(page); 217 put_page(page); 218 return; 219out: 220 pte_unmap_unlock(ptep, ptl); 221} 222 223/* 224 * Replace the page in the mapping. 225 * 226 * The number of remaining references must be: 227 * 1 for anonymous pages without a mapping 228 * 2 for pages with a mapping 229 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 230 */ 231static int migrate_page_move_mapping(struct address_space *mapping, 232 struct page *newpage, struct page *page) 233{ 234 int expected_count; 235 void **pslot; 236 237 if (!mapping) { 238 /* Anonymous page without mapping */ 239 if (page_count(page) != 1) 240 return -EAGAIN; 241 return 0; 242 } 243 244 spin_lock_irq(&mapping->tree_lock); 245 246 pslot = radix_tree_lookup_slot(&mapping->page_tree, 247 page_index(page)); 248 249 expected_count = 2 + page_has_private(page); 250 if (page_count(page) != expected_count || 251 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 252 spin_unlock_irq(&mapping->tree_lock); 253 return -EAGAIN; 254 } 255 256 if (!page_freeze_refs(page, expected_count)) { 257 spin_unlock_irq(&mapping->tree_lock); 258 return -EAGAIN; 259 } 260 261 /* 262 * Now we know that no one else is looking at the page. 263 */ 264 get_page(newpage); /* add cache reference */ 265 if (PageSwapCache(page)) { 266 SetPageSwapCache(newpage); 267 set_page_private(newpage, page_private(page)); 268 } 269 270 radix_tree_replace_slot(pslot, newpage); 271 272 page_unfreeze_refs(page, expected_count); 273 /* 274 * Drop cache reference from old page. 275 * We know this isn't the last reference. 276 */ 277 __put_page(page); 278 279 /* 280 * If moved to a different zone then also account 281 * the page for that zone. Other VM counters will be 282 * taken care of when we establish references to the 283 * new page and drop references to the old page. 284 * 285 * Note that anonymous pages are accounted for 286 * via NR_FILE_PAGES and NR_ANON_PAGES if they 287 * are mapped to swap space. 288 */ 289 __dec_zone_page_state(page, NR_FILE_PAGES); 290 __inc_zone_page_state(newpage, NR_FILE_PAGES); 291 if (PageSwapBacked(page)) { 292 __dec_zone_page_state(page, NR_SHMEM); 293 __inc_zone_page_state(newpage, NR_SHMEM); 294 } 295 spin_unlock_irq(&mapping->tree_lock); 296 297 return 0; 298} 299 300/* 301 * The expected number of remaining references is the same as that 302 * of migrate_page_move_mapping(). 303 */ 304int migrate_huge_page_move_mapping(struct address_space *mapping, 305 struct page *newpage, struct page *page) 306{ 307 int expected_count; 308 void **pslot; 309 310 if (!mapping) { 311 if (page_count(page) != 1) 312 return -EAGAIN; 313 return 0; 314 } 315 316 spin_lock_irq(&mapping->tree_lock); 317 318 pslot = radix_tree_lookup_slot(&mapping->page_tree, 319 page_index(page)); 320 321 expected_count = 2 + page_has_private(page); 322 if (page_count(page) != expected_count || 323 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 324 spin_unlock_irq(&mapping->tree_lock); 325 return -EAGAIN; 326 } 327 328 if (!page_freeze_refs(page, expected_count)) { 329 spin_unlock_irq(&mapping->tree_lock); 330 return -EAGAIN; 331 } 332 333 get_page(newpage); 334 335 radix_tree_replace_slot(pslot, newpage); 336 337 page_unfreeze_refs(page, expected_count); 338 339 __put_page(page); 340 341 spin_unlock_irq(&mapping->tree_lock); 342 return 0; 343} 344 345/* 346 * Copy the page to its new location 347 */ 348void migrate_page_copy(struct page *newpage, struct page *page) 349{ 350 if (PageHuge(page)) 351 copy_huge_page(newpage, page); 352 else 353 copy_highpage(newpage, page); 354 355 if (PageError(page)) 356 SetPageError(newpage); 357 if (PageReferenced(page)) 358 SetPageReferenced(newpage); 359 if (PageUptodate(page)) 360 SetPageUptodate(newpage); 361 if (TestClearPageActive(page)) { 362 VM_BUG_ON(PageUnevictable(page)); 363 SetPageActive(newpage); 364 } else if (TestClearPageUnevictable(page)) 365 SetPageUnevictable(newpage); 366 if (PageChecked(page)) 367 SetPageChecked(newpage); 368 if (PageMappedToDisk(page)) 369 SetPageMappedToDisk(newpage); 370 371 if (PageDirty(page)) { 372 clear_page_dirty_for_io(page); 373 /* 374 * Want to mark the page and the radix tree as dirty, and 375 * redo the accounting that clear_page_dirty_for_io undid, 376 * but we can't use set_page_dirty because that function 377 * is actually a signal that all of the page has become dirty. 378 * Wheras only part of our page may be dirty. 379 */ 380 __set_page_dirty_nobuffers(newpage); 381 } 382 383 mlock_migrate_page(newpage, page); 384 ksm_migrate_page(newpage, page); 385 386 ClearPageSwapCache(page); 387 ClearPagePrivate(page); 388 set_page_private(page, 0); 389 page->mapping = NULL; 390 391 /* 392 * If any waiters have accumulated on the new page then 393 * wake them up. 394 */ 395 if (PageWriteback(newpage)) 396 end_page_writeback(newpage); 397} 398 399/************************************************************ 400 * Migration functions 401 ***********************************************************/ 402 403/* Always fail migration. Used for mappings that are not movable */ 404int fail_migrate_page(struct address_space *mapping, 405 struct page *newpage, struct page *page) 406{ 407 return -EIO; 408} 409EXPORT_SYMBOL(fail_migrate_page); 410 411/* 412 * Common logic to directly migrate a single page suitable for 413 * pages that do not use PagePrivate/PagePrivate2. 414 * 415 * Pages are locked upon entry and exit. 416 */ 417int migrate_page(struct address_space *mapping, 418 struct page *newpage, struct page *page) 419{ 420 int rc; 421 422 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 423 424 rc = migrate_page_move_mapping(mapping, newpage, page); 425 426 if (rc) 427 return rc; 428 429 migrate_page_copy(newpage, page); 430 return 0; 431} 432EXPORT_SYMBOL(migrate_page); 433 434#ifdef CONFIG_BLOCK 435/* 436 * Migration function for pages with buffers. This function can only be used 437 * if the underlying filesystem guarantees that no other references to "page" 438 * exist. 439 */ 440int buffer_migrate_page(struct address_space *mapping, 441 struct page *newpage, struct page *page) 442{ 443 struct buffer_head *bh, *head; 444 int rc; 445 446 if (!page_has_buffers(page)) 447 return migrate_page(mapping, newpage, page); 448 449 head = page_buffers(page); 450 451 rc = migrate_page_move_mapping(mapping, newpage, page); 452 453 if (rc) 454 return rc; 455 456 bh = head; 457 do { 458 get_bh(bh); 459 lock_buffer(bh); 460 bh = bh->b_this_page; 461 462 } while (bh != head); 463 464 ClearPagePrivate(page); 465 set_page_private(newpage, page_private(page)); 466 set_page_private(page, 0); 467 put_page(page); 468 get_page(newpage); 469 470 bh = head; 471 do { 472 set_bh_page(bh, newpage, bh_offset(bh)); 473 bh = bh->b_this_page; 474 475 } while (bh != head); 476 477 SetPagePrivate(newpage); 478 479 migrate_page_copy(newpage, page); 480 481 bh = head; 482 do { 483 unlock_buffer(bh); 484 put_bh(bh); 485 bh = bh->b_this_page; 486 487 } while (bh != head); 488 489 return 0; 490} 491EXPORT_SYMBOL(buffer_migrate_page); 492#endif 493 494/* 495 * Writeback a page to clean the dirty state 496 */ 497static int writeout(struct address_space *mapping, struct page *page) 498{ 499 struct writeback_control wbc = { 500 .sync_mode = WB_SYNC_NONE, 501 .nr_to_write = 1, 502 .range_start = 0, 503 .range_end = LLONG_MAX, 504 .for_reclaim = 1 505 }; 506 int rc; 507 508 if (!mapping->a_ops->writepage) 509 /* No write method for the address space */ 510 return -EINVAL; 511 512 if (!clear_page_dirty_for_io(page)) 513 /* Someone else already triggered a write */ 514 return -EAGAIN; 515 516 /* 517 * A dirty page may imply that the underlying filesystem has 518 * the page on some queue. So the page must be clean for 519 * migration. Writeout may mean we loose the lock and the 520 * page state is no longer what we checked for earlier. 521 * At this point we know that the migration attempt cannot 522 * be successful. 523 */ 524 remove_migration_ptes(page, page); 525 526 rc = mapping->a_ops->writepage(page, &wbc); 527 528 if (rc != AOP_WRITEPAGE_ACTIVATE) 529 /* unlocked. Relock */ 530 lock_page(page); 531 532 return (rc < 0) ? -EIO : -EAGAIN; 533} 534 535/* 536 * Default handling if a filesystem does not provide a migration function. 537 */ 538static int fallback_migrate_page(struct address_space *mapping, 539 struct page *newpage, struct page *page) 540{ 541 if (PageDirty(page)) 542 return writeout(mapping, page); 543 544 /* 545 * Buffers may be managed in a filesystem specific way. 546 * We must have no buffers or drop them. 547 */ 548 if (page_has_private(page) && 549 !try_to_release_page(page, GFP_KERNEL)) 550 return -EAGAIN; 551 552 return migrate_page(mapping, newpage, page); 553} 554 555/* 556 * Move a page to a newly allocated page 557 * The page is locked and all ptes have been successfully removed. 558 * 559 * The new page will have replaced the old page if this function 560 * is successful. 561 * 562 * Return value: 563 * < 0 - error code 564 * == 0 - success 565 */ 566static int move_to_new_page(struct page *newpage, struct page *page, 567 int remap_swapcache) 568{ 569 struct address_space *mapping; 570 int rc; 571 572 /* 573 * Block others from accessing the page when we get around to 574 * establishing additional references. We are the only one 575 * holding a reference to the new page at this point. 576 */ 577 if (!trylock_page(newpage)) 578 BUG(); 579 580 /* Prepare mapping for the new page.*/ 581 newpage->index = page->index; 582 newpage->mapping = page->mapping; 583 if (PageSwapBacked(page)) 584 SetPageSwapBacked(newpage); 585 586 mapping = page_mapping(page); 587 if (!mapping) 588 rc = migrate_page(mapping, newpage, page); 589 else if (mapping->a_ops->migratepage) 590 /* 591 * Most pages have a mapping and most filesystems 592 * should provide a migration function. Anonymous 593 * pages are part of swap space which also has its 594 * own migration function. This is the most common 595 * path for page migration. 596 */ 597 rc = mapping->a_ops->migratepage(mapping, 598 newpage, page); 599 else 600 rc = fallback_migrate_page(mapping, newpage, page); 601 602 if (rc) { 603 newpage->mapping = NULL; 604 } else { 605 if (remap_swapcache) 606 remove_migration_ptes(page, newpage); 607 } 608 609 unlock_page(newpage); 610 611 return rc; 612} 613 614/* 615 * Obtain the lock on page, remove all ptes and migrate the page 616 * to the newly allocated page in newpage. 617 */ 618static int unmap_and_move(new_page_t get_new_page, unsigned long private, 619 struct page *page, int force, bool offlining, bool sync) 620{ 621 int rc = 0; 622 int *result = NULL; 623 struct page *newpage = get_new_page(page, private, &result); 624 int remap_swapcache = 1; 625 int charge = 0; 626 struct mem_cgroup *mem = NULL; 627 struct anon_vma *anon_vma = NULL; 628 629 if (!newpage) 630 return -ENOMEM; 631 632 if (page_count(page) == 1) { 633 /* page was freed from under us. So we are done. */ 634 goto move_newpage; 635 } 636 if (unlikely(PageTransHuge(page))) 637 if (unlikely(split_huge_page(page))) 638 goto move_newpage; 639 640 /* prepare cgroup just returns 0 or -ENOMEM */ 641 rc = -EAGAIN; 642 643 if (!trylock_page(page)) { 644 if (!force) 645 goto move_newpage; 646 647 /* 648 * It's not safe for direct compaction to call lock_page. 649 * For example, during page readahead pages are added locked 650 * to the LRU. Later, when the IO completes the pages are 651 * marked uptodate and unlocked. However, the queueing 652 * could be merging multiple pages for one bio (e.g. 653 * mpage_readpages). If an allocation happens for the 654 * second or third page, the process can end up locking 655 * the same page twice and deadlocking. Rather than 656 * trying to be clever about what pages can be locked, 657 * avoid the use of lock_page for direct compaction 658 * altogether. 659 */ 660 if (current->flags & PF_MEMALLOC) 661 goto move_newpage; 662 663 lock_page(page); 664 } 665 666 /* 667 * Only memory hotplug's offline_pages() caller has locked out KSM, 668 * and can safely migrate a KSM page. The other cases have skipped 669 * PageKsm along with PageReserved - but it is only now when we have 670 * the page lock that we can be certain it will not go KSM beneath us 671 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees 672 * its pagecount raised, but only here do we take the page lock which 673 * serializes that). 674 */ 675 if (PageKsm(page) && !offlining) { 676 rc = -EBUSY; 677 goto unlock; 678 } 679 680 /* charge against new page */ 681 charge = mem_cgroup_prepare_migration(page, newpage, &mem); 682 if (charge == -ENOMEM) { 683 rc = -ENOMEM; 684 goto unlock; 685 } 686 BUG_ON(charge); 687 688 if (PageWriteback(page)) { 689 if (!force || !sync) 690 goto uncharge; 691 wait_on_page_writeback(page); 692 } 693 /* 694 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 695 * we cannot notice that anon_vma is freed while we migrates a page. 696 * This get_anon_vma() delays freeing anon_vma pointer until the end 697 * of migration. File cache pages are no problem because of page_lock() 698 * File Caches may use write_page() or lock_page() in migration, then, 699 * just care Anon page here. 700 */ 701 if (PageAnon(page)) { 702 /* 703 * Only page_lock_anon_vma() understands the subtleties of 704 * getting a hold on an anon_vma from outside one of its mms. 705 */ 706 anon_vma = page_lock_anon_vma(page); 707 if (anon_vma) { 708 /* 709 * Take a reference count on the anon_vma if the 710 * page is mapped so that it is guaranteed to 711 * exist when the page is remapped later 712 */ 713 get_anon_vma(anon_vma); 714 page_unlock_anon_vma(anon_vma); 715 } else if (PageSwapCache(page)) { 716 /* 717 * We cannot be sure that the anon_vma of an unmapped 718 * swapcache page is safe to use because we don't 719 * know in advance if the VMA that this page belonged 720 * to still exists. If the VMA and others sharing the 721 * data have been freed, then the anon_vma could 722 * already be invalid. 723 * 724 * To avoid this possibility, swapcache pages get 725 * migrated but are not remapped when migration 726 * completes 727 */ 728 remap_swapcache = 0; 729 } else { 730 goto uncharge; 731 } 732 } 733 734 /* 735 * Corner case handling: 736 * 1. When a new swap-cache page is read into, it is added to the LRU 737 * and treated as swapcache but it has no rmap yet. 738 * Calling try_to_unmap() against a page->mapping==NULL page will 739 * trigger a BUG. So handle it here. 740 * 2. An orphaned page (see truncate_complete_page) might have 741 * fs-private metadata. The page can be picked up due to memory 742 * offlining. Everywhere else except page reclaim, the page is 743 * invisible to the vm, so the page can not be migrated. So try to 744 * free the metadata, so the page can be freed. 745 */ 746 if (!page->mapping) { 747 VM_BUG_ON(PageAnon(page)); 748 if (page_has_private(page)) { 749 try_to_free_buffers(page); 750 goto uncharge; 751 } 752 goto skip_unmap; 753 } 754 755 /* Establish migration ptes or remove ptes */ 756 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 757 758skip_unmap: 759 if (!page_mapped(page)) 760 rc = move_to_new_page(newpage, page, remap_swapcache); 761 762 if (rc && remap_swapcache) 763 remove_migration_ptes(page, page); 764 765 /* Drop an anon_vma reference if we took one */ 766 if (anon_vma) 767 drop_anon_vma(anon_vma); 768 769uncharge: 770 if (!charge) 771 mem_cgroup_end_migration(mem, page, newpage, rc == 0); 772unlock: 773 unlock_page(page); 774 775 if (rc != -EAGAIN) { 776 /* 777 * A page that has been migrated has all references 778 * removed and will be freed. A page that has not been 779 * migrated will have kepts its references and be 780 * restored. 781 */ 782 list_del(&page->lru); 783 dec_zone_page_state(page, NR_ISOLATED_ANON + 784 page_is_file_cache(page)); 785 putback_lru_page(page); 786 } 787 788move_newpage: 789 790 /* 791 * Move the new page to the LRU. If migration was not successful 792 * then this will free the page. 793 */ 794 putback_lru_page(newpage); 795 796 if (result) { 797 if (rc) 798 *result = rc; 799 else 800 *result = page_to_nid(newpage); 801 } 802 return rc; 803} 804 805/* 806 * Counterpart of unmap_and_move_page() for hugepage migration. 807 * 808 * This function doesn't wait the completion of hugepage I/O 809 * because there is no race between I/O and migration for hugepage. 810 * Note that currently hugepage I/O occurs only in direct I/O 811 * where no lock is held and PG_writeback is irrelevant, 812 * and writeback status of all subpages are counted in the reference 813 * count of the head page (i.e. if all subpages of a 2MB hugepage are 814 * under direct I/O, the reference of the head page is 512 and a bit more.) 815 * This means that when we try to migrate hugepage whose subpages are 816 * doing direct I/O, some references remain after try_to_unmap() and 817 * hugepage migration fails without data corruption. 818 * 819 * There is also no race when direct I/O is issued on the page under migration, 820 * because then pte is replaced with migration swap entry and direct I/O code 821 * will wait in the page fault for migration to complete. 822 */ 823static int unmap_and_move_huge_page(new_page_t get_new_page, 824 unsigned long private, struct page *hpage, 825 int force, bool offlining, bool sync) 826{ 827 int rc = 0; 828 int *result = NULL; 829 struct page *new_hpage = get_new_page(hpage, private, &result); 830 struct anon_vma *anon_vma = NULL; 831 832 if (!new_hpage) 833 return -ENOMEM; 834 835 rc = -EAGAIN; 836 837 if (!trylock_page(hpage)) { 838 if (!force || !sync) 839 goto out; 840 lock_page(hpage); 841 } 842 843 if (PageAnon(hpage)) { 844 anon_vma = page_lock_anon_vma(hpage); 845 if (anon_vma) { 846 get_anon_vma(anon_vma); 847 page_unlock_anon_vma(anon_vma); 848 } 849 } 850 851 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 852 853 if (!page_mapped(hpage)) 854 rc = move_to_new_page(new_hpage, hpage, 1); 855 856 if (rc) 857 remove_migration_ptes(hpage, hpage); 858 859 if (anon_vma) 860 drop_anon_vma(anon_vma); 861out: 862 unlock_page(hpage); 863 864 if (rc != -EAGAIN) { 865 list_del(&hpage->lru); 866 put_page(hpage); 867 } 868 869 put_page(new_hpage); 870 871 if (result) { 872 if (rc) 873 *result = rc; 874 else 875 *result = page_to_nid(new_hpage); 876 } 877 return rc; 878} 879 880/* 881 * migrate_pages 882 * 883 * The function takes one list of pages to migrate and a function 884 * that determines from the page to be migrated and the private data 885 * the target of the move and allocates the page. 886 * 887 * The function returns after 10 attempts or if no pages 888 * are movable anymore because to has become empty 889 * or no retryable pages exist anymore. 890 * Caller should call putback_lru_pages to return pages to the LRU 891 * or free list only if ret != 0. 892 * 893 * Return: Number of pages not migrated or error code. 894 */ 895int migrate_pages(struct list_head *from, 896 new_page_t get_new_page, unsigned long private, bool offlining, 897 bool sync) 898{ 899 int retry = 1; 900 int nr_failed = 0; 901 int pass = 0; 902 struct page *page; 903 struct page *page2; 904 int swapwrite = current->flags & PF_SWAPWRITE; 905 int rc; 906 907 if (!swapwrite) 908 current->flags |= PF_SWAPWRITE; 909 910 for(pass = 0; pass < 10 && retry; pass++) { 911 retry = 0; 912 913 list_for_each_entry_safe(page, page2, from, lru) { 914 cond_resched(); 915 916 rc = unmap_and_move(get_new_page, private, 917 page, pass > 2, offlining, 918 sync); 919 920 switch(rc) { 921 case -ENOMEM: 922 goto out; 923 case -EAGAIN: 924 retry++; 925 break; 926 case 0: 927 break; 928 default: 929 /* Permanent failure */ 930 nr_failed++; 931 break; 932 } 933 } 934 } 935 rc = 0; 936out: 937 if (!swapwrite) 938 current->flags &= ~PF_SWAPWRITE; 939 940 if (rc) 941 return rc; 942 943 return nr_failed + retry; 944} 945 946int migrate_huge_pages(struct list_head *from, 947 new_page_t get_new_page, unsigned long private, bool offlining, 948 bool sync) 949{ 950 int retry = 1; 951 int nr_failed = 0; 952 int pass = 0; 953 struct page *page; 954 struct page *page2; 955 int rc; 956 957 for (pass = 0; pass < 10 && retry; pass++) { 958 retry = 0; 959 960 list_for_each_entry_safe(page, page2, from, lru) { 961 cond_resched(); 962 963 rc = unmap_and_move_huge_page(get_new_page, 964 private, page, pass > 2, offlining, 965 sync); 966 967 switch(rc) { 968 case -ENOMEM: 969 goto out; 970 case -EAGAIN: 971 retry++; 972 break; 973 case 0: 974 break; 975 default: 976 /* Permanent failure */ 977 nr_failed++; 978 break; 979 } 980 } 981 } 982 rc = 0; 983out: 984 985 list_for_each_entry_safe(page, page2, from, lru) 986 put_page(page); 987 988 if (rc) 989 return rc; 990 991 return nr_failed + retry; 992} 993 994#ifdef CONFIG_NUMA 995/* 996 * Move a list of individual pages 997 */ 998struct page_to_node { 999 unsigned long addr; 1000 struct page *page; 1001 int node; 1002 int status; 1003}; 1004 1005static struct page *new_page_node(struct page *p, unsigned long private, 1006 int **result) 1007{ 1008 struct page_to_node *pm = (struct page_to_node *)private; 1009 1010 while (pm->node != MAX_NUMNODES && pm->page != p) 1011 pm++; 1012 1013 if (pm->node == MAX_NUMNODES) 1014 return NULL; 1015 1016 *result = &pm->status; 1017 1018 return alloc_pages_exact_node(pm->node, 1019 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1020} 1021 1022/* 1023 * Move a set of pages as indicated in the pm array. The addr 1024 * field must be set to the virtual address of the page to be moved 1025 * and the node number must contain a valid target node. 1026 * The pm array ends with node = MAX_NUMNODES. 1027 */ 1028static int do_move_page_to_node_array(struct mm_struct *mm, 1029 struct page_to_node *pm, 1030 int migrate_all) 1031{ 1032 int err; 1033 struct page_to_node *pp; 1034 LIST_HEAD(pagelist); 1035 1036 down_read(&mm->mmap_sem); 1037 1038 /* 1039 * Build a list of pages to migrate 1040 */ 1041 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1042 struct vm_area_struct *vma; 1043 struct page *page; 1044 1045 err = -EFAULT; 1046 vma = find_vma(mm, pp->addr); 1047 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) 1048 goto set_status; 1049 1050 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT); 1051 1052 err = PTR_ERR(page); 1053 if (IS_ERR(page)) 1054 goto set_status; 1055 1056 err = -ENOENT; 1057 if (!page) 1058 goto set_status; 1059 1060 /* Use PageReserved to check for zero page */ 1061 if (PageReserved(page) || PageKsm(page)) 1062 goto put_and_set; 1063 1064 pp->page = page; 1065 err = page_to_nid(page); 1066 1067 if (err == pp->node) 1068 /* 1069 * Node already in the right place 1070 */ 1071 goto put_and_set; 1072 1073 err = -EACCES; 1074 if (page_mapcount(page) > 1 && 1075 !migrate_all) 1076 goto put_and_set; 1077 1078 err = isolate_lru_page(page); 1079 if (!err) { 1080 list_add_tail(&page->lru, &pagelist); 1081 inc_zone_page_state(page, NR_ISOLATED_ANON + 1082 page_is_file_cache(page)); 1083 } 1084put_and_set: 1085 /* 1086 * Either remove the duplicate refcount from 1087 * isolate_lru_page() or drop the page ref if it was 1088 * not isolated. 1089 */ 1090 put_page(page); 1091set_status: 1092 pp->status = err; 1093 } 1094 1095 err = 0; 1096 if (!list_empty(&pagelist)) { 1097 err = migrate_pages(&pagelist, new_page_node, 1098 (unsigned long)pm, 0, true); 1099 if (err) 1100 putback_lru_pages(&pagelist); 1101 } 1102 1103 up_read(&mm->mmap_sem); 1104 return err; 1105} 1106 1107/* 1108 * Migrate an array of page address onto an array of nodes and fill 1109 * the corresponding array of status. 1110 */ 1111static int do_pages_move(struct mm_struct *mm, struct task_struct *task, 1112 unsigned long nr_pages, 1113 const void __user * __user *pages, 1114 const int __user *nodes, 1115 int __user *status, int flags) 1116{ 1117 struct page_to_node *pm; 1118 nodemask_t task_nodes; 1119 unsigned long chunk_nr_pages; 1120 unsigned long chunk_start; 1121 int err; 1122 1123 task_nodes = cpuset_mems_allowed(task); 1124 1125 err = -ENOMEM; 1126 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 1127 if (!pm) 1128 goto out; 1129 1130 migrate_prep(); 1131 1132 /* 1133 * Store a chunk of page_to_node array in a page, 1134 * but keep the last one as a marker 1135 */ 1136 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; 1137 1138 for (chunk_start = 0; 1139 chunk_start < nr_pages; 1140 chunk_start += chunk_nr_pages) { 1141 int j; 1142 1143 if (chunk_start + chunk_nr_pages > nr_pages) 1144 chunk_nr_pages = nr_pages - chunk_start; 1145 1146 /* fill the chunk pm with addrs and nodes from user-space */ 1147 for (j = 0; j < chunk_nr_pages; j++) { 1148 const void __user *p; 1149 int node; 1150 1151 err = -EFAULT; 1152 if (get_user(p, pages + j + chunk_start)) 1153 goto out_pm; 1154 pm[j].addr = (unsigned long) p; 1155 1156 if (get_user(node, nodes + j + chunk_start)) 1157 goto out_pm; 1158 1159 err = -ENODEV; 1160 if (node < 0 || node >= MAX_NUMNODES) 1161 goto out_pm; 1162 1163 if (!node_state(node, N_HIGH_MEMORY)) 1164 goto out_pm; 1165 1166 err = -EACCES; 1167 if (!node_isset(node, task_nodes)) 1168 goto out_pm; 1169 1170 pm[j].node = node; 1171 } 1172 1173 /* End marker for this chunk */ 1174 pm[chunk_nr_pages].node = MAX_NUMNODES; 1175 1176 /* Migrate this chunk */ 1177 err = do_move_page_to_node_array(mm, pm, 1178 flags & MPOL_MF_MOVE_ALL); 1179 if (err < 0) 1180 goto out_pm; 1181 1182 /* Return status information */ 1183 for (j = 0; j < chunk_nr_pages; j++) 1184 if (put_user(pm[j].status, status + j + chunk_start)) { 1185 err = -EFAULT; 1186 goto out_pm; 1187 } 1188 } 1189 err = 0; 1190 1191out_pm: 1192 free_page((unsigned long)pm); 1193out: 1194 return err; 1195} 1196 1197/* 1198 * Determine the nodes of an array of pages and store it in an array of status. 1199 */ 1200static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1201 const void __user **pages, int *status) 1202{ 1203 unsigned long i; 1204 1205 down_read(&mm->mmap_sem); 1206 1207 for (i = 0; i < nr_pages; i++) { 1208 unsigned long addr = (unsigned long)(*pages); 1209 struct vm_area_struct *vma; 1210 struct page *page; 1211 int err = -EFAULT; 1212 1213 vma = find_vma(mm, addr); 1214 if (!vma || addr < vma->vm_start) 1215 goto set_status; 1216 1217 page = follow_page(vma, addr, 0); 1218 1219 err = PTR_ERR(page); 1220 if (IS_ERR(page)) 1221 goto set_status; 1222 1223 err = -ENOENT; 1224 /* Use PageReserved to check for zero page */ 1225 if (!page || PageReserved(page) || PageKsm(page)) 1226 goto set_status; 1227 1228 err = page_to_nid(page); 1229set_status: 1230 *status = err; 1231 1232 pages++; 1233 status++; 1234 } 1235 1236 up_read(&mm->mmap_sem); 1237} 1238 1239/* 1240 * Determine the nodes of a user array of pages and store it in 1241 * a user array of status. 1242 */ 1243static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1244 const void __user * __user *pages, 1245 int __user *status) 1246{ 1247#define DO_PAGES_STAT_CHUNK_NR 16 1248 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1249 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1250 1251 while (nr_pages) { 1252 unsigned long chunk_nr; 1253 1254 chunk_nr = nr_pages; 1255 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1256 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1257 1258 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1259 break; 1260 1261 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1262 1263 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1264 break; 1265 1266 pages += chunk_nr; 1267 status += chunk_nr; 1268 nr_pages -= chunk_nr; 1269 } 1270 return nr_pages ? -EFAULT : 0; 1271} 1272 1273/* 1274 * Move a list of pages in the address space of the currently executing 1275 * process. 1276 */ 1277SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1278 const void __user * __user *, pages, 1279 const int __user *, nodes, 1280 int __user *, status, int, flags) 1281{ 1282 const struct cred *cred = current_cred(), *tcred; 1283 struct task_struct *task; 1284 struct mm_struct *mm; 1285 int err; 1286 1287 /* Check flags */ 1288 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1289 return -EINVAL; 1290 1291 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1292 return -EPERM; 1293 1294 /* Find the mm_struct */ 1295 read_lock(&tasklist_lock); 1296 task = pid ? find_task_by_vpid(pid) : current; 1297 if (!task) { 1298 read_unlock(&tasklist_lock); 1299 return -ESRCH; 1300 } 1301 mm = get_task_mm(task); 1302 read_unlock(&tasklist_lock); 1303 1304 if (!mm) 1305 return -EINVAL; 1306 1307 /* 1308 * Check if this process has the right to modify the specified 1309 * process. The right exists if the process has administrative 1310 * capabilities, superuser privileges or the same 1311 * userid as the target process. 1312 */ 1313 rcu_read_lock(); 1314 tcred = __task_cred(task); 1315 if (cred->euid != tcred->suid && cred->euid != tcred->uid && 1316 cred->uid != tcred->suid && cred->uid != tcred->uid && 1317 !capable(CAP_SYS_NICE)) { 1318 rcu_read_unlock(); 1319 err = -EPERM; 1320 goto out; 1321 } 1322 rcu_read_unlock(); 1323 1324 err = security_task_movememory(task); 1325 if (err) 1326 goto out; 1327 1328 if (nodes) { 1329 err = do_pages_move(mm, task, nr_pages, pages, nodes, status, 1330 flags); 1331 } else { 1332 err = do_pages_stat(mm, nr_pages, pages, status); 1333 } 1334 1335out: 1336 mmput(mm); 1337 return err; 1338} 1339 1340/* 1341 * Call migration functions in the vma_ops that may prepare 1342 * memory in a vm for migration. migration functions may perform 1343 * the migration for vmas that do not have an underlying page struct. 1344 */ 1345int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1346 const nodemask_t *from, unsigned long flags) 1347{ 1348 struct vm_area_struct *vma; 1349 int err = 0; 1350 1351 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { 1352 if (vma->vm_ops && vma->vm_ops->migrate) { 1353 err = vma->vm_ops->migrate(vma, to, from, flags); 1354 if (err) 1355 break; 1356 } 1357 } 1358 return err; 1359} 1360#endif 1361