migrate.c revision 3ef8fd7f720fc4f462fcdcae2fcde6f1c0536bfe
1/* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * 9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 10 * Hirokazu Takahashi <taka@valinux.co.jp> 11 * Dave Hansen <haveblue@us.ibm.com> 12 * Christoph Lameter 13 */ 14 15#include <linux/migrate.h> 16#include <linux/module.h> 17#include <linux/swap.h> 18#include <linux/swapops.h> 19#include <linux/pagemap.h> 20#include <linux/buffer_head.h> 21#include <linux/mm_inline.h> 22#include <linux/nsproxy.h> 23#include <linux/pagevec.h> 24#include <linux/ksm.h> 25#include <linux/rmap.h> 26#include <linux/topology.h> 27#include <linux/cpu.h> 28#include <linux/cpuset.h> 29#include <linux/writeback.h> 30#include <linux/mempolicy.h> 31#include <linux/vmalloc.h> 32#include <linux/security.h> 33#include <linux/memcontrol.h> 34#include <linux/syscalls.h> 35#include <linux/hugetlb.h> 36#include <linux/gfp.h> 37 38#include "internal.h" 39 40#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 41 42/* 43 * migrate_prep() needs to be called before we start compiling a list of pages 44 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is 45 * undesirable, use migrate_prep_local() 46 */ 47int migrate_prep(void) 48{ 49 /* 50 * Clear the LRU lists so pages can be isolated. 51 * Note that pages may be moved off the LRU after we have 52 * drained them. Those pages will fail to migrate like other 53 * pages that may be busy. 54 */ 55 lru_add_drain_all(); 56 57 return 0; 58} 59 60/* Do the necessary work of migrate_prep but not if it involves other CPUs */ 61int migrate_prep_local(void) 62{ 63 lru_add_drain(); 64 65 return 0; 66} 67 68/* 69 * Add isolated pages on the list back to the LRU under page lock 70 * to avoid leaking evictable pages back onto unevictable list. 71 */ 72void putback_lru_pages(struct list_head *l) 73{ 74 struct page *page; 75 struct page *page2; 76 77 list_for_each_entry_safe(page, page2, l, lru) { 78 list_del(&page->lru); 79 dec_zone_page_state(page, NR_ISOLATED_ANON + 80 page_is_file_cache(page)); 81 putback_lru_page(page); 82 } 83} 84 85/* 86 * Restore a potential migration pte to a working pte entry 87 */ 88static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, 89 unsigned long addr, void *old) 90{ 91 struct mm_struct *mm = vma->vm_mm; 92 swp_entry_t entry; 93 pgd_t *pgd; 94 pud_t *pud; 95 pmd_t *pmd; 96 pte_t *ptep, pte; 97 spinlock_t *ptl; 98 99 if (unlikely(PageHuge(new))) { 100 ptep = huge_pte_offset(mm, addr); 101 if (!ptep) 102 goto out; 103 ptl = &mm->page_table_lock; 104 } else { 105 pgd = pgd_offset(mm, addr); 106 if (!pgd_present(*pgd)) 107 goto out; 108 109 pud = pud_offset(pgd, addr); 110 if (!pud_present(*pud)) 111 goto out; 112 113 pmd = pmd_offset(pud, addr); 114 if (!pmd_present(*pmd)) 115 goto out; 116 117 ptep = pte_offset_map(pmd, addr); 118 119 if (!is_swap_pte(*ptep)) { 120 pte_unmap(ptep); 121 goto out; 122 } 123 124 ptl = pte_lockptr(mm, pmd); 125 } 126 127 spin_lock(ptl); 128 pte = *ptep; 129 if (!is_swap_pte(pte)) 130 goto unlock; 131 132 entry = pte_to_swp_entry(pte); 133 134 if (!is_migration_entry(entry) || 135 migration_entry_to_page(entry) != old) 136 goto unlock; 137 138 get_page(new); 139 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 140 if (is_write_migration_entry(entry)) 141 pte = pte_mkwrite(pte); 142#ifdef CONFIG_HUGETLB_PAGE 143 if (PageHuge(new)) 144 pte = pte_mkhuge(pte); 145#endif 146 flush_cache_page(vma, addr, pte_pfn(pte)); 147 set_pte_at(mm, addr, ptep, pte); 148 149 if (PageHuge(new)) { 150 if (PageAnon(new)) 151 hugepage_add_anon_rmap(new, vma, addr); 152 else 153 page_dup_rmap(new); 154 } else if (PageAnon(new)) 155 page_add_anon_rmap(new, vma, addr); 156 else 157 page_add_file_rmap(new); 158 159 /* No need to invalidate - it was non-present before */ 160 update_mmu_cache(vma, addr, ptep); 161unlock: 162 pte_unmap_unlock(ptep, ptl); 163out: 164 return SWAP_AGAIN; 165} 166 167/* 168 * Get rid of all migration entries and replace them by 169 * references to the indicated page. 170 */ 171static void remove_migration_ptes(struct page *old, struct page *new) 172{ 173 rmap_walk(new, remove_migration_pte, old); 174} 175 176/* 177 * Something used the pte of a page under migration. We need to 178 * get to the page and wait until migration is finished. 179 * When we return from this function the fault will be retried. 180 * 181 * This function is called from do_swap_page(). 182 */ 183void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 184 unsigned long address) 185{ 186 pte_t *ptep, pte; 187 spinlock_t *ptl; 188 swp_entry_t entry; 189 struct page *page; 190 191 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 192 pte = *ptep; 193 if (!is_swap_pte(pte)) 194 goto out; 195 196 entry = pte_to_swp_entry(pte); 197 if (!is_migration_entry(entry)) 198 goto out; 199 200 page = migration_entry_to_page(entry); 201 202 /* 203 * Once radix-tree replacement of page migration started, page_count 204 * *must* be zero. And, we don't want to call wait_on_page_locked() 205 * against a page without get_page(). 206 * So, we use get_page_unless_zero(), here. Even failed, page fault 207 * will occur again. 208 */ 209 if (!get_page_unless_zero(page)) 210 goto out; 211 pte_unmap_unlock(ptep, ptl); 212 wait_on_page_locked(page); 213 put_page(page); 214 return; 215out: 216 pte_unmap_unlock(ptep, ptl); 217} 218 219/* 220 * Replace the page in the mapping. 221 * 222 * The number of remaining references must be: 223 * 1 for anonymous pages without a mapping 224 * 2 for pages with a mapping 225 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 226 */ 227static int migrate_page_move_mapping(struct address_space *mapping, 228 struct page *newpage, struct page *page) 229{ 230 int expected_count; 231 void **pslot; 232 233 if (!mapping) { 234 /* Anonymous page without mapping */ 235 if (page_count(page) != 1) 236 return -EAGAIN; 237 return 0; 238 } 239 240 spin_lock_irq(&mapping->tree_lock); 241 242 pslot = radix_tree_lookup_slot(&mapping->page_tree, 243 page_index(page)); 244 245 expected_count = 2 + page_has_private(page); 246 if (page_count(page) != expected_count || 247 (struct page *)radix_tree_deref_slot(pslot) != page) { 248 spin_unlock_irq(&mapping->tree_lock); 249 return -EAGAIN; 250 } 251 252 if (!page_freeze_refs(page, expected_count)) { 253 spin_unlock_irq(&mapping->tree_lock); 254 return -EAGAIN; 255 } 256 257 /* 258 * Now we know that no one else is looking at the page. 259 */ 260 get_page(newpage); /* add cache reference */ 261 if (PageSwapCache(page)) { 262 SetPageSwapCache(newpage); 263 set_page_private(newpage, page_private(page)); 264 } 265 266 radix_tree_replace_slot(pslot, newpage); 267 268 page_unfreeze_refs(page, expected_count); 269 /* 270 * Drop cache reference from old page. 271 * We know this isn't the last reference. 272 */ 273 __put_page(page); 274 275 /* 276 * If moved to a different zone then also account 277 * the page for that zone. Other VM counters will be 278 * taken care of when we establish references to the 279 * new page and drop references to the old page. 280 * 281 * Note that anonymous pages are accounted for 282 * via NR_FILE_PAGES and NR_ANON_PAGES if they 283 * are mapped to swap space. 284 */ 285 __dec_zone_page_state(page, NR_FILE_PAGES); 286 __inc_zone_page_state(newpage, NR_FILE_PAGES); 287 if (PageSwapBacked(page)) { 288 __dec_zone_page_state(page, NR_SHMEM); 289 __inc_zone_page_state(newpage, NR_SHMEM); 290 } 291 spin_unlock_irq(&mapping->tree_lock); 292 293 return 0; 294} 295 296/* 297 * The expected number of remaining references is the same as that 298 * of migrate_page_move_mapping(). 299 */ 300int migrate_huge_page_move_mapping(struct address_space *mapping, 301 struct page *newpage, struct page *page) 302{ 303 int expected_count; 304 void **pslot; 305 306 if (!mapping) { 307 if (page_count(page) != 1) 308 return -EAGAIN; 309 return 0; 310 } 311 312 spin_lock_irq(&mapping->tree_lock); 313 314 pslot = radix_tree_lookup_slot(&mapping->page_tree, 315 page_index(page)); 316 317 expected_count = 2 + page_has_private(page); 318 if (page_count(page) != expected_count || 319 (struct page *)radix_tree_deref_slot(pslot) != page) { 320 spin_unlock_irq(&mapping->tree_lock); 321 return -EAGAIN; 322 } 323 324 if (!page_freeze_refs(page, expected_count)) { 325 spin_unlock_irq(&mapping->tree_lock); 326 return -EAGAIN; 327 } 328 329 get_page(newpage); 330 331 radix_tree_replace_slot(pslot, newpage); 332 333 page_unfreeze_refs(page, expected_count); 334 335 __put_page(page); 336 337 spin_unlock_irq(&mapping->tree_lock); 338 return 0; 339} 340 341/* 342 * Copy the page to its new location 343 */ 344void migrate_page_copy(struct page *newpage, struct page *page) 345{ 346 if (PageHuge(page)) 347 copy_huge_page(newpage, page); 348 else 349 copy_highpage(newpage, page); 350 351 if (PageError(page)) 352 SetPageError(newpage); 353 if (PageReferenced(page)) 354 SetPageReferenced(newpage); 355 if (PageUptodate(page)) 356 SetPageUptodate(newpage); 357 if (TestClearPageActive(page)) { 358 VM_BUG_ON(PageUnevictable(page)); 359 SetPageActive(newpage); 360 } else if (TestClearPageUnevictable(page)) 361 SetPageUnevictable(newpage); 362 if (PageChecked(page)) 363 SetPageChecked(newpage); 364 if (PageMappedToDisk(page)) 365 SetPageMappedToDisk(newpage); 366 367 if (PageDirty(page)) { 368 clear_page_dirty_for_io(page); 369 /* 370 * Want to mark the page and the radix tree as dirty, and 371 * redo the accounting that clear_page_dirty_for_io undid, 372 * but we can't use set_page_dirty because that function 373 * is actually a signal that all of the page has become dirty. 374 * Wheras only part of our page may be dirty. 375 */ 376 __set_page_dirty_nobuffers(newpage); 377 } 378 379 mlock_migrate_page(newpage, page); 380 ksm_migrate_page(newpage, page); 381 382 ClearPageSwapCache(page); 383 ClearPagePrivate(page); 384 set_page_private(page, 0); 385 page->mapping = NULL; 386 387 /* 388 * If any waiters have accumulated on the new page then 389 * wake them up. 390 */ 391 if (PageWriteback(newpage)) 392 end_page_writeback(newpage); 393} 394 395/************************************************************ 396 * Migration functions 397 ***********************************************************/ 398 399/* Always fail migration. Used for mappings that are not movable */ 400int fail_migrate_page(struct address_space *mapping, 401 struct page *newpage, struct page *page) 402{ 403 return -EIO; 404} 405EXPORT_SYMBOL(fail_migrate_page); 406 407/* 408 * Common logic to directly migrate a single page suitable for 409 * pages that do not use PagePrivate/PagePrivate2. 410 * 411 * Pages are locked upon entry and exit. 412 */ 413int migrate_page(struct address_space *mapping, 414 struct page *newpage, struct page *page) 415{ 416 int rc; 417 418 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 419 420 rc = migrate_page_move_mapping(mapping, newpage, page); 421 422 if (rc) 423 return rc; 424 425 migrate_page_copy(newpage, page); 426 return 0; 427} 428EXPORT_SYMBOL(migrate_page); 429 430#ifdef CONFIG_BLOCK 431/* 432 * Migration function for pages with buffers. This function can only be used 433 * if the underlying filesystem guarantees that no other references to "page" 434 * exist. 435 */ 436int buffer_migrate_page(struct address_space *mapping, 437 struct page *newpage, struct page *page) 438{ 439 struct buffer_head *bh, *head; 440 int rc; 441 442 if (!page_has_buffers(page)) 443 return migrate_page(mapping, newpage, page); 444 445 head = page_buffers(page); 446 447 rc = migrate_page_move_mapping(mapping, newpage, page); 448 449 if (rc) 450 return rc; 451 452 bh = head; 453 do { 454 get_bh(bh); 455 lock_buffer(bh); 456 bh = bh->b_this_page; 457 458 } while (bh != head); 459 460 ClearPagePrivate(page); 461 set_page_private(newpage, page_private(page)); 462 set_page_private(page, 0); 463 put_page(page); 464 get_page(newpage); 465 466 bh = head; 467 do { 468 set_bh_page(bh, newpage, bh_offset(bh)); 469 bh = bh->b_this_page; 470 471 } while (bh != head); 472 473 SetPagePrivate(newpage); 474 475 migrate_page_copy(newpage, page); 476 477 bh = head; 478 do { 479 unlock_buffer(bh); 480 put_bh(bh); 481 bh = bh->b_this_page; 482 483 } while (bh != head); 484 485 return 0; 486} 487EXPORT_SYMBOL(buffer_migrate_page); 488#endif 489 490/* 491 * Writeback a page to clean the dirty state 492 */ 493static int writeout(struct address_space *mapping, struct page *page) 494{ 495 struct writeback_control wbc = { 496 .sync_mode = WB_SYNC_NONE, 497 .nr_to_write = 1, 498 .range_start = 0, 499 .range_end = LLONG_MAX, 500 .nonblocking = 1, 501 .for_reclaim = 1 502 }; 503 int rc; 504 505 if (!mapping->a_ops->writepage) 506 /* No write method for the address space */ 507 return -EINVAL; 508 509 if (!clear_page_dirty_for_io(page)) 510 /* Someone else already triggered a write */ 511 return -EAGAIN; 512 513 /* 514 * A dirty page may imply that the underlying filesystem has 515 * the page on some queue. So the page must be clean for 516 * migration. Writeout may mean we loose the lock and the 517 * page state is no longer what we checked for earlier. 518 * At this point we know that the migration attempt cannot 519 * be successful. 520 */ 521 remove_migration_ptes(page, page); 522 523 rc = mapping->a_ops->writepage(page, &wbc); 524 525 if (rc != AOP_WRITEPAGE_ACTIVATE) 526 /* unlocked. Relock */ 527 lock_page(page); 528 529 return (rc < 0) ? -EIO : -EAGAIN; 530} 531 532/* 533 * Default handling if a filesystem does not provide a migration function. 534 */ 535static int fallback_migrate_page(struct address_space *mapping, 536 struct page *newpage, struct page *page) 537{ 538 if (PageDirty(page)) 539 return writeout(mapping, page); 540 541 /* 542 * Buffers may be managed in a filesystem specific way. 543 * We must have no buffers or drop them. 544 */ 545 if (page_has_private(page) && 546 !try_to_release_page(page, GFP_KERNEL)) 547 return -EAGAIN; 548 549 return migrate_page(mapping, newpage, page); 550} 551 552/* 553 * Move a page to a newly allocated page 554 * The page is locked and all ptes have been successfully removed. 555 * 556 * The new page will have replaced the old page if this function 557 * is successful. 558 * 559 * Return value: 560 * < 0 - error code 561 * == 0 - success 562 */ 563static int move_to_new_page(struct page *newpage, struct page *page, 564 int remap_swapcache) 565{ 566 struct address_space *mapping; 567 int rc; 568 569 /* 570 * Block others from accessing the page when we get around to 571 * establishing additional references. We are the only one 572 * holding a reference to the new page at this point. 573 */ 574 if (!trylock_page(newpage)) 575 BUG(); 576 577 /* Prepare mapping for the new page.*/ 578 newpage->index = page->index; 579 newpage->mapping = page->mapping; 580 if (PageSwapBacked(page)) 581 SetPageSwapBacked(newpage); 582 583 mapping = page_mapping(page); 584 if (!mapping) 585 rc = migrate_page(mapping, newpage, page); 586 else if (mapping->a_ops->migratepage) 587 /* 588 * Most pages have a mapping and most filesystems 589 * should provide a migration function. Anonymous 590 * pages are part of swap space which also has its 591 * own migration function. This is the most common 592 * path for page migration. 593 */ 594 rc = mapping->a_ops->migratepage(mapping, 595 newpage, page); 596 else 597 rc = fallback_migrate_page(mapping, newpage, page); 598 599 if (rc) { 600 newpage->mapping = NULL; 601 } else { 602 if (remap_swapcache) 603 remove_migration_ptes(page, newpage); 604 } 605 606 unlock_page(newpage); 607 608 return rc; 609} 610 611/* 612 * Obtain the lock on page, remove all ptes and migrate the page 613 * to the newly allocated page in newpage. 614 */ 615static int unmap_and_move(new_page_t get_new_page, unsigned long private, 616 struct page *page, int force, int offlining) 617{ 618 int rc = 0; 619 int *result = NULL; 620 struct page *newpage = get_new_page(page, private, &result); 621 int remap_swapcache = 1; 622 int rcu_locked = 0; 623 int charge = 0; 624 struct mem_cgroup *mem = NULL; 625 struct anon_vma *anon_vma = NULL; 626 627 if (!newpage) 628 return -ENOMEM; 629 630 if (page_count(page) == 1) { 631 /* page was freed from under us. So we are done. */ 632 goto move_newpage; 633 } 634 635 /* prepare cgroup just returns 0 or -ENOMEM */ 636 rc = -EAGAIN; 637 638 if (!trylock_page(page)) { 639 if (!force) 640 goto move_newpage; 641 lock_page(page); 642 } 643 644 /* 645 * Only memory hotplug's offline_pages() caller has locked out KSM, 646 * and can safely migrate a KSM page. The other cases have skipped 647 * PageKsm along with PageReserved - but it is only now when we have 648 * the page lock that we can be certain it will not go KSM beneath us 649 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees 650 * its pagecount raised, but only here do we take the page lock which 651 * serializes that). 652 */ 653 if (PageKsm(page) && !offlining) { 654 rc = -EBUSY; 655 goto unlock; 656 } 657 658 /* charge against new page */ 659 charge = mem_cgroup_prepare_migration(page, newpage, &mem); 660 if (charge == -ENOMEM) { 661 rc = -ENOMEM; 662 goto unlock; 663 } 664 BUG_ON(charge); 665 666 if (PageWriteback(page)) { 667 if (!force) 668 goto uncharge; 669 wait_on_page_writeback(page); 670 } 671 /* 672 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 673 * we cannot notice that anon_vma is freed while we migrates a page. 674 * This rcu_read_lock() delays freeing anon_vma pointer until the end 675 * of migration. File cache pages are no problem because of page_lock() 676 * File Caches may use write_page() or lock_page() in migration, then, 677 * just care Anon page here. 678 */ 679 if (PageAnon(page)) { 680 rcu_read_lock(); 681 rcu_locked = 1; 682 683 /* Determine how to safely use anon_vma */ 684 if (!page_mapped(page)) { 685 if (!PageSwapCache(page)) 686 goto rcu_unlock; 687 688 /* 689 * We cannot be sure that the anon_vma of an unmapped 690 * swapcache page is safe to use because we don't 691 * know in advance if the VMA that this page belonged 692 * to still exists. If the VMA and others sharing the 693 * data have been freed, then the anon_vma could 694 * already be invalid. 695 * 696 * To avoid this possibility, swapcache pages get 697 * migrated but are not remapped when migration 698 * completes 699 */ 700 remap_swapcache = 0; 701 } else { 702 /* 703 * Take a reference count on the anon_vma if the 704 * page is mapped so that it is guaranteed to 705 * exist when the page is remapped later 706 */ 707 anon_vma = page_anon_vma(page); 708 get_anon_vma(anon_vma); 709 } 710 } 711 712 /* 713 * Corner case handling: 714 * 1. When a new swap-cache page is read into, it is added to the LRU 715 * and treated as swapcache but it has no rmap yet. 716 * Calling try_to_unmap() against a page->mapping==NULL page will 717 * trigger a BUG. So handle it here. 718 * 2. An orphaned page (see truncate_complete_page) might have 719 * fs-private metadata. The page can be picked up due to memory 720 * offlining. Everywhere else except page reclaim, the page is 721 * invisible to the vm, so the page can not be migrated. So try to 722 * free the metadata, so the page can be freed. 723 */ 724 if (!page->mapping) { 725 if (!PageAnon(page) && page_has_private(page)) { 726 /* 727 * Go direct to try_to_free_buffers() here because 728 * a) that's what try_to_release_page() would do anyway 729 * b) we may be under rcu_read_lock() here, so we can't 730 * use GFP_KERNEL which is what try_to_release_page() 731 * needs to be effective. 732 */ 733 try_to_free_buffers(page); 734 goto rcu_unlock; 735 } 736 goto skip_unmap; 737 } 738 739 /* Establish migration ptes or remove ptes */ 740 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 741 742skip_unmap: 743 if (!page_mapped(page)) 744 rc = move_to_new_page(newpage, page, remap_swapcache); 745 746 if (rc && remap_swapcache) 747 remove_migration_ptes(page, page); 748rcu_unlock: 749 750 /* Drop an anon_vma reference if we took one */ 751 if (anon_vma) 752 drop_anon_vma(anon_vma); 753 754 if (rcu_locked) 755 rcu_read_unlock(); 756uncharge: 757 if (!charge) 758 mem_cgroup_end_migration(mem, page, newpage); 759unlock: 760 unlock_page(page); 761 762 if (rc != -EAGAIN) { 763 /* 764 * A page that has been migrated has all references 765 * removed and will be freed. A page that has not been 766 * migrated will have kepts its references and be 767 * restored. 768 */ 769 list_del(&page->lru); 770 dec_zone_page_state(page, NR_ISOLATED_ANON + 771 page_is_file_cache(page)); 772 putback_lru_page(page); 773 } 774 775move_newpage: 776 777 /* 778 * Move the new page to the LRU. If migration was not successful 779 * then this will free the page. 780 */ 781 putback_lru_page(newpage); 782 783 if (result) { 784 if (rc) 785 *result = rc; 786 else 787 *result = page_to_nid(newpage); 788 } 789 return rc; 790} 791 792/* 793 * Counterpart of unmap_and_move_page() for hugepage migration. 794 * 795 * This function doesn't wait the completion of hugepage I/O 796 * because there is no race between I/O and migration for hugepage. 797 * Note that currently hugepage I/O occurs only in direct I/O 798 * where no lock is held and PG_writeback is irrelevant, 799 * and writeback status of all subpages are counted in the reference 800 * count of the head page (i.e. if all subpages of a 2MB hugepage are 801 * under direct I/O, the reference of the head page is 512 and a bit more.) 802 * This means that when we try to migrate hugepage whose subpages are 803 * doing direct I/O, some references remain after try_to_unmap() and 804 * hugepage migration fails without data corruption. 805 * 806 * There is also no race when direct I/O is issued on the page under migration, 807 * because then pte is replaced with migration swap entry and direct I/O code 808 * will wait in the page fault for migration to complete. 809 */ 810static int unmap_and_move_huge_page(new_page_t get_new_page, 811 unsigned long private, struct page *hpage, 812 int force, int offlining) 813{ 814 int rc = 0; 815 int *result = NULL; 816 struct page *new_hpage = get_new_page(hpage, private, &result); 817 int rcu_locked = 0; 818 struct anon_vma *anon_vma = NULL; 819 820 if (!new_hpage) 821 return -ENOMEM; 822 823 rc = -EAGAIN; 824 825 if (!trylock_page(hpage)) { 826 if (!force) 827 goto out; 828 lock_page(hpage); 829 } 830 831 if (PageAnon(hpage)) { 832 rcu_read_lock(); 833 rcu_locked = 1; 834 835 if (page_mapped(hpage)) { 836 anon_vma = page_anon_vma(hpage); 837 atomic_inc(&anon_vma->external_refcount); 838 } 839 } 840 841 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 842 843 if (!page_mapped(hpage)) 844 rc = move_to_new_page(new_hpage, hpage, 1); 845 846 if (rc) 847 remove_migration_ptes(hpage, hpage); 848 849 if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, 850 &anon_vma->lock)) { 851 int empty = list_empty(&anon_vma->head); 852 spin_unlock(&anon_vma->lock); 853 if (empty) 854 anon_vma_free(anon_vma); 855 } 856 857 if (rcu_locked) 858 rcu_read_unlock(); 859out: 860 unlock_page(hpage); 861 862 if (rc != -EAGAIN) { 863 list_del(&hpage->lru); 864 put_page(hpage); 865 } 866 867 put_page(new_hpage); 868 869 if (result) { 870 if (rc) 871 *result = rc; 872 else 873 *result = page_to_nid(new_hpage); 874 } 875 return rc; 876} 877 878/* 879 * migrate_pages 880 * 881 * The function takes one list of pages to migrate and a function 882 * that determines from the page to be migrated and the private data 883 * the target of the move and allocates the page. 884 * 885 * The function returns after 10 attempts or if no pages 886 * are movable anymore because to has become empty 887 * or no retryable pages exist anymore. All pages will be 888 * returned to the LRU or freed. 889 * 890 * Return: Number of pages not migrated or error code. 891 */ 892int migrate_pages(struct list_head *from, 893 new_page_t get_new_page, unsigned long private, int offlining) 894{ 895 int retry = 1; 896 int nr_failed = 0; 897 int pass = 0; 898 struct page *page; 899 struct page *page2; 900 int swapwrite = current->flags & PF_SWAPWRITE; 901 int rc; 902 903 if (!swapwrite) 904 current->flags |= PF_SWAPWRITE; 905 906 for(pass = 0; pass < 10 && retry; pass++) { 907 retry = 0; 908 909 list_for_each_entry_safe(page, page2, from, lru) { 910 cond_resched(); 911 912 rc = unmap_and_move(get_new_page, private, 913 page, pass > 2, offlining); 914 915 switch(rc) { 916 case -ENOMEM: 917 goto out; 918 case -EAGAIN: 919 retry++; 920 break; 921 case 0: 922 break; 923 default: 924 /* Permanent failure */ 925 nr_failed++; 926 break; 927 } 928 } 929 } 930 rc = 0; 931out: 932 if (!swapwrite) 933 current->flags &= ~PF_SWAPWRITE; 934 935 putback_lru_pages(from); 936 937 if (rc) 938 return rc; 939 940 return nr_failed + retry; 941} 942 943int migrate_huge_pages(struct list_head *from, 944 new_page_t get_new_page, unsigned long private, int offlining) 945{ 946 int retry = 1; 947 int nr_failed = 0; 948 int pass = 0; 949 struct page *page; 950 struct page *page2; 951 int rc; 952 953 for (pass = 0; pass < 10 && retry; pass++) { 954 retry = 0; 955 956 list_for_each_entry_safe(page, page2, from, lru) { 957 cond_resched(); 958 959 rc = unmap_and_move_huge_page(get_new_page, 960 private, page, pass > 2, offlining); 961 962 switch(rc) { 963 case -ENOMEM: 964 goto out; 965 case -EAGAIN: 966 retry++; 967 break; 968 case 0: 969 break; 970 default: 971 /* Permanent failure */ 972 nr_failed++; 973 break; 974 } 975 } 976 } 977 rc = 0; 978out: 979 980 list_for_each_entry_safe(page, page2, from, lru) 981 put_page(page); 982 983 if (rc) 984 return rc; 985 986 return nr_failed + retry; 987} 988 989#ifdef CONFIG_NUMA 990/* 991 * Move a list of individual pages 992 */ 993struct page_to_node { 994 unsigned long addr; 995 struct page *page; 996 int node; 997 int status; 998}; 999 1000static struct page *new_page_node(struct page *p, unsigned long private, 1001 int **result) 1002{ 1003 struct page_to_node *pm = (struct page_to_node *)private; 1004 1005 while (pm->node != MAX_NUMNODES && pm->page != p) 1006 pm++; 1007 1008 if (pm->node == MAX_NUMNODES) 1009 return NULL; 1010 1011 *result = &pm->status; 1012 1013 return alloc_pages_exact_node(pm->node, 1014 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); 1015} 1016 1017/* 1018 * Move a set of pages as indicated in the pm array. The addr 1019 * field must be set to the virtual address of the page to be moved 1020 * and the node number must contain a valid target node. 1021 * The pm array ends with node = MAX_NUMNODES. 1022 */ 1023static int do_move_page_to_node_array(struct mm_struct *mm, 1024 struct page_to_node *pm, 1025 int migrate_all) 1026{ 1027 int err; 1028 struct page_to_node *pp; 1029 LIST_HEAD(pagelist); 1030 1031 down_read(&mm->mmap_sem); 1032 1033 /* 1034 * Build a list of pages to migrate 1035 */ 1036 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { 1037 struct vm_area_struct *vma; 1038 struct page *page; 1039 1040 err = -EFAULT; 1041 vma = find_vma(mm, pp->addr); 1042 if (!vma || !vma_migratable(vma)) 1043 goto set_status; 1044 1045 page = follow_page(vma, pp->addr, FOLL_GET); 1046 1047 err = PTR_ERR(page); 1048 if (IS_ERR(page)) 1049 goto set_status; 1050 1051 err = -ENOENT; 1052 if (!page) 1053 goto set_status; 1054 1055 /* Use PageReserved to check for zero page */ 1056 if (PageReserved(page) || PageKsm(page)) 1057 goto put_and_set; 1058 1059 pp->page = page; 1060 err = page_to_nid(page); 1061 1062 if (err == pp->node) 1063 /* 1064 * Node already in the right place 1065 */ 1066 goto put_and_set; 1067 1068 err = -EACCES; 1069 if (page_mapcount(page) > 1 && 1070 !migrate_all) 1071 goto put_and_set; 1072 1073 err = isolate_lru_page(page); 1074 if (!err) { 1075 list_add_tail(&page->lru, &pagelist); 1076 inc_zone_page_state(page, NR_ISOLATED_ANON + 1077 page_is_file_cache(page)); 1078 } 1079put_and_set: 1080 /* 1081 * Either remove the duplicate refcount from 1082 * isolate_lru_page() or drop the page ref if it was 1083 * not isolated. 1084 */ 1085 put_page(page); 1086set_status: 1087 pp->status = err; 1088 } 1089 1090 err = 0; 1091 if (!list_empty(&pagelist)) 1092 err = migrate_pages(&pagelist, new_page_node, 1093 (unsigned long)pm, 0); 1094 1095 up_read(&mm->mmap_sem); 1096 return err; 1097} 1098 1099/* 1100 * Migrate an array of page address onto an array of nodes and fill 1101 * the corresponding array of status. 1102 */ 1103static int do_pages_move(struct mm_struct *mm, struct task_struct *task, 1104 unsigned long nr_pages, 1105 const void __user * __user *pages, 1106 const int __user *nodes, 1107 int __user *status, int flags) 1108{ 1109 struct page_to_node *pm; 1110 nodemask_t task_nodes; 1111 unsigned long chunk_nr_pages; 1112 unsigned long chunk_start; 1113 int err; 1114 1115 task_nodes = cpuset_mems_allowed(task); 1116 1117 err = -ENOMEM; 1118 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL); 1119 if (!pm) 1120 goto out; 1121 1122 migrate_prep(); 1123 1124 /* 1125 * Store a chunk of page_to_node array in a page, 1126 * but keep the last one as a marker 1127 */ 1128 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1; 1129 1130 for (chunk_start = 0; 1131 chunk_start < nr_pages; 1132 chunk_start += chunk_nr_pages) { 1133 int j; 1134 1135 if (chunk_start + chunk_nr_pages > nr_pages) 1136 chunk_nr_pages = nr_pages - chunk_start; 1137 1138 /* fill the chunk pm with addrs and nodes from user-space */ 1139 for (j = 0; j < chunk_nr_pages; j++) { 1140 const void __user *p; 1141 int node; 1142 1143 err = -EFAULT; 1144 if (get_user(p, pages + j + chunk_start)) 1145 goto out_pm; 1146 pm[j].addr = (unsigned long) p; 1147 1148 if (get_user(node, nodes + j + chunk_start)) 1149 goto out_pm; 1150 1151 err = -ENODEV; 1152 if (node < 0 || node >= MAX_NUMNODES) 1153 goto out_pm; 1154 1155 if (!node_state(node, N_HIGH_MEMORY)) 1156 goto out_pm; 1157 1158 err = -EACCES; 1159 if (!node_isset(node, task_nodes)) 1160 goto out_pm; 1161 1162 pm[j].node = node; 1163 } 1164 1165 /* End marker for this chunk */ 1166 pm[chunk_nr_pages].node = MAX_NUMNODES; 1167 1168 /* Migrate this chunk */ 1169 err = do_move_page_to_node_array(mm, pm, 1170 flags & MPOL_MF_MOVE_ALL); 1171 if (err < 0) 1172 goto out_pm; 1173 1174 /* Return status information */ 1175 for (j = 0; j < chunk_nr_pages; j++) 1176 if (put_user(pm[j].status, status + j + chunk_start)) { 1177 err = -EFAULT; 1178 goto out_pm; 1179 } 1180 } 1181 err = 0; 1182 1183out_pm: 1184 free_page((unsigned long)pm); 1185out: 1186 return err; 1187} 1188 1189/* 1190 * Determine the nodes of an array of pages and store it in an array of status. 1191 */ 1192static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, 1193 const void __user **pages, int *status) 1194{ 1195 unsigned long i; 1196 1197 down_read(&mm->mmap_sem); 1198 1199 for (i = 0; i < nr_pages; i++) { 1200 unsigned long addr = (unsigned long)(*pages); 1201 struct vm_area_struct *vma; 1202 struct page *page; 1203 int err = -EFAULT; 1204 1205 vma = find_vma(mm, addr); 1206 if (!vma) 1207 goto set_status; 1208 1209 page = follow_page(vma, addr, 0); 1210 1211 err = PTR_ERR(page); 1212 if (IS_ERR(page)) 1213 goto set_status; 1214 1215 err = -ENOENT; 1216 /* Use PageReserved to check for zero page */ 1217 if (!page || PageReserved(page) || PageKsm(page)) 1218 goto set_status; 1219 1220 err = page_to_nid(page); 1221set_status: 1222 *status = err; 1223 1224 pages++; 1225 status++; 1226 } 1227 1228 up_read(&mm->mmap_sem); 1229} 1230 1231/* 1232 * Determine the nodes of a user array of pages and store it in 1233 * a user array of status. 1234 */ 1235static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, 1236 const void __user * __user *pages, 1237 int __user *status) 1238{ 1239#define DO_PAGES_STAT_CHUNK_NR 16 1240 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1241 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1242 1243 while (nr_pages) { 1244 unsigned long chunk_nr; 1245 1246 chunk_nr = nr_pages; 1247 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR) 1248 chunk_nr = DO_PAGES_STAT_CHUNK_NR; 1249 1250 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) 1251 break; 1252 1253 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1254 1255 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) 1256 break; 1257 1258 pages += chunk_nr; 1259 status += chunk_nr; 1260 nr_pages -= chunk_nr; 1261 } 1262 return nr_pages ? -EFAULT : 0; 1263} 1264 1265/* 1266 * Move a list of pages in the address space of the currently executing 1267 * process. 1268 */ 1269SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, 1270 const void __user * __user *, pages, 1271 const int __user *, nodes, 1272 int __user *, status, int, flags) 1273{ 1274 const struct cred *cred = current_cred(), *tcred; 1275 struct task_struct *task; 1276 struct mm_struct *mm; 1277 int err; 1278 1279 /* Check flags */ 1280 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) 1281 return -EINVAL; 1282 1283 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) 1284 return -EPERM; 1285 1286 /* Find the mm_struct */ 1287 read_lock(&tasklist_lock); 1288 task = pid ? find_task_by_vpid(pid) : current; 1289 if (!task) { 1290 read_unlock(&tasklist_lock); 1291 return -ESRCH; 1292 } 1293 mm = get_task_mm(task); 1294 read_unlock(&tasklist_lock); 1295 1296 if (!mm) 1297 return -EINVAL; 1298 1299 /* 1300 * Check if this process has the right to modify the specified 1301 * process. The right exists if the process has administrative 1302 * capabilities, superuser privileges or the same 1303 * userid as the target process. 1304 */ 1305 rcu_read_lock(); 1306 tcred = __task_cred(task); 1307 if (cred->euid != tcred->suid && cred->euid != tcred->uid && 1308 cred->uid != tcred->suid && cred->uid != tcred->uid && 1309 !capable(CAP_SYS_NICE)) { 1310 rcu_read_unlock(); 1311 err = -EPERM; 1312 goto out; 1313 } 1314 rcu_read_unlock(); 1315 1316 err = security_task_movememory(task); 1317 if (err) 1318 goto out; 1319 1320 if (nodes) { 1321 err = do_pages_move(mm, task, nr_pages, pages, nodes, status, 1322 flags); 1323 } else { 1324 err = do_pages_stat(mm, nr_pages, pages, status); 1325 } 1326 1327out: 1328 mmput(mm); 1329 return err; 1330} 1331 1332/* 1333 * Call migration functions in the vma_ops that may prepare 1334 * memory in a vm for migration. migration functions may perform 1335 * the migration for vmas that do not have an underlying page struct. 1336 */ 1337int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, 1338 const nodemask_t *from, unsigned long flags) 1339{ 1340 struct vm_area_struct *vma; 1341 int err = 0; 1342 1343 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) { 1344 if (vma->vm_ops && vma->vm_ops->migrate) { 1345 err = vma->vm_ops->migrate(vma, to, from, flags); 1346 if (err) 1347 break; 1348 } 1349 } 1350 return err; 1351} 1352#endif 1353