rmap.c revision cbf84b7add8103b92aaa84928e335df726bfc8da
1/* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 18 */ 19 20/* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * inode->i_alloc_sem (vmtruncate_range) 25 * mm->mmap_sem 26 * page->flags PG_locked (lock_page) 27 * mapping->i_mmap_lock 28 * anon_vma->lock 29 * mm->page_table_lock or pte_lock 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within inode_lock in __sync_single_inode) 39 */ 40 41#include <linux/mm.h> 42#include <linux/pagemap.h> 43#include <linux/swap.h> 44#include <linux/swapops.h> 45#include <linux/slab.h> 46#include <linux/init.h> 47#include <linux/rmap.h> 48#include <linux/rcupdate.h> 49#include <linux/module.h> 50#include <linux/kallsyms.h> 51#include <linux/memcontrol.h> 52#include <linux/mmu_notifier.h> 53#include <linux/migrate.h> 54 55#include <asm/tlbflush.h> 56 57#include "internal.h" 58 59static struct kmem_cache *anon_vma_cachep; 60 61static inline struct anon_vma *anon_vma_alloc(void) 62{ 63 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 64} 65 66static inline void anon_vma_free(struct anon_vma *anon_vma) 67{ 68 kmem_cache_free(anon_vma_cachep, anon_vma); 69} 70 71/** 72 * anon_vma_prepare - attach an anon_vma to a memory region 73 * @vma: the memory region in question 74 * 75 * This makes sure the memory mapping described by 'vma' has 76 * an 'anon_vma' attached to it, so that we can associate the 77 * anonymous pages mapped into it with that anon_vma. 78 * 79 * The common case will be that we already have one, but if 80 * if not we either need to find an adjacent mapping that we 81 * can re-use the anon_vma from (very common when the only 82 * reason for splitting a vma has been mprotect()), or we 83 * allocate a new one. 84 * 85 * Anon-vma allocations are very subtle, because we may have 86 * optimistically looked up an anon_vma in page_lock_anon_vma() 87 * and that may actually touch the spinlock even in the newly 88 * allocated vma (it depends on RCU to make sure that the 89 * anon_vma isn't actually destroyed). 90 * 91 * As a result, we need to do proper anon_vma locking even 92 * for the new allocation. At the same time, we do not want 93 * to do any locking for the common case of already having 94 * an anon_vma. 95 * 96 * This must be called with the mmap_sem held for reading. 97 */ 98int anon_vma_prepare(struct vm_area_struct *vma) 99{ 100 struct anon_vma *anon_vma = vma->anon_vma; 101 102 might_sleep(); 103 if (unlikely(!anon_vma)) { 104 struct mm_struct *mm = vma->vm_mm; 105 struct anon_vma *allocated; 106 107 anon_vma = find_mergeable_anon_vma(vma); 108 allocated = NULL; 109 if (!anon_vma) { 110 anon_vma = anon_vma_alloc(); 111 if (unlikely(!anon_vma)) 112 return -ENOMEM; 113 allocated = anon_vma; 114 } 115 spin_lock(&anon_vma->lock); 116 117 /* page_table_lock to protect against threads */ 118 spin_lock(&mm->page_table_lock); 119 if (likely(!vma->anon_vma)) { 120 vma->anon_vma = anon_vma; 121 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 122 allocated = NULL; 123 } 124 spin_unlock(&mm->page_table_lock); 125 126 spin_unlock(&anon_vma->lock); 127 if (unlikely(allocated)) 128 anon_vma_free(allocated); 129 } 130 return 0; 131} 132 133void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) 134{ 135 BUG_ON(vma->anon_vma != next->anon_vma); 136 list_del(&next->anon_vma_node); 137} 138 139void __anon_vma_link(struct vm_area_struct *vma) 140{ 141 struct anon_vma *anon_vma = vma->anon_vma; 142 143 if (anon_vma) 144 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 145} 146 147void anon_vma_link(struct vm_area_struct *vma) 148{ 149 struct anon_vma *anon_vma = vma->anon_vma; 150 151 if (anon_vma) { 152 spin_lock(&anon_vma->lock); 153 list_add_tail(&vma->anon_vma_node, &anon_vma->head); 154 spin_unlock(&anon_vma->lock); 155 } 156} 157 158void anon_vma_unlink(struct vm_area_struct *vma) 159{ 160 struct anon_vma *anon_vma = vma->anon_vma; 161 int empty; 162 163 if (!anon_vma) 164 return; 165 166 spin_lock(&anon_vma->lock); 167 list_del(&vma->anon_vma_node); 168 169 /* We must garbage collect the anon_vma if it's empty */ 170 empty = list_empty(&anon_vma->head); 171 spin_unlock(&anon_vma->lock); 172 173 if (empty) 174 anon_vma_free(anon_vma); 175} 176 177static void anon_vma_ctor(void *data) 178{ 179 struct anon_vma *anon_vma = data; 180 181 spin_lock_init(&anon_vma->lock); 182 INIT_LIST_HEAD(&anon_vma->head); 183} 184 185void __init anon_vma_init(void) 186{ 187 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 188 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 189} 190 191/* 192 * Getting a lock on a stable anon_vma from a page off the LRU is 193 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 194 */ 195static struct anon_vma *page_lock_anon_vma(struct page *page) 196{ 197 struct anon_vma *anon_vma; 198 unsigned long anon_mapping; 199 200 rcu_read_lock(); 201 anon_mapping = (unsigned long) page->mapping; 202 if (!(anon_mapping & PAGE_MAPPING_ANON)) 203 goto out; 204 if (!page_mapped(page)) 205 goto out; 206 207 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 208 spin_lock(&anon_vma->lock); 209 return anon_vma; 210out: 211 rcu_read_unlock(); 212 return NULL; 213} 214 215static void page_unlock_anon_vma(struct anon_vma *anon_vma) 216{ 217 spin_unlock(&anon_vma->lock); 218 rcu_read_unlock(); 219} 220 221/* 222 * At what user virtual address is page expected in @vma? 223 * Returns virtual address or -EFAULT if page's index/offset is not 224 * within the range mapped the @vma. 225 */ 226static inline unsigned long 227vma_address(struct page *page, struct vm_area_struct *vma) 228{ 229 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 230 unsigned long address; 231 232 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 233 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 234 /* page should be within @vma mapping range */ 235 return -EFAULT; 236 } 237 return address; 238} 239 240/* 241 * At what user virtual address is page expected in vma? checking that the 242 * page matches the vma: currently only used on anon pages, by unuse_vma; 243 */ 244unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 245{ 246 if (PageAnon(page)) { 247 if ((void *)vma->anon_vma != 248 (void *)page->mapping - PAGE_MAPPING_ANON) 249 return -EFAULT; 250 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 251 if (!vma->vm_file || 252 vma->vm_file->f_mapping != page->mapping) 253 return -EFAULT; 254 } else 255 return -EFAULT; 256 return vma_address(page, vma); 257} 258 259/* 260 * Check that @page is mapped at @address into @mm. 261 * 262 * If @sync is false, page_check_address may perform a racy check to avoid 263 * the page table lock when the pte is not present (helpful when reclaiming 264 * highly shared pages). 265 * 266 * On success returns with pte mapped and locked. 267 */ 268pte_t *page_check_address(struct page *page, struct mm_struct *mm, 269 unsigned long address, spinlock_t **ptlp, int sync) 270{ 271 pgd_t *pgd; 272 pud_t *pud; 273 pmd_t *pmd; 274 pte_t *pte; 275 spinlock_t *ptl; 276 277 pgd = pgd_offset(mm, address); 278 if (!pgd_present(*pgd)) 279 return NULL; 280 281 pud = pud_offset(pgd, address); 282 if (!pud_present(*pud)) 283 return NULL; 284 285 pmd = pmd_offset(pud, address); 286 if (!pmd_present(*pmd)) 287 return NULL; 288 289 pte = pte_offset_map(pmd, address); 290 /* Make a quick check before getting the lock */ 291 if (!sync && !pte_present(*pte)) { 292 pte_unmap(pte); 293 return NULL; 294 } 295 296 ptl = pte_lockptr(mm, pmd); 297 spin_lock(ptl); 298 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 299 *ptlp = ptl; 300 return pte; 301 } 302 pte_unmap_unlock(pte, ptl); 303 return NULL; 304} 305 306/** 307 * page_mapped_in_vma - check whether a page is really mapped in a VMA 308 * @page: the page to test 309 * @vma: the VMA to test 310 * 311 * Returns 1 if the page is mapped into the page tables of the VMA, 0 312 * if the page is not mapped into the page tables of this VMA. Only 313 * valid for normal file or anonymous VMAs. 314 */ 315static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 316{ 317 unsigned long address; 318 pte_t *pte; 319 spinlock_t *ptl; 320 321 address = vma_address(page, vma); 322 if (address == -EFAULT) /* out of vma range */ 323 return 0; 324 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 325 if (!pte) /* the page is not in this mm */ 326 return 0; 327 pte_unmap_unlock(pte, ptl); 328 329 return 1; 330} 331 332/* 333 * Subfunctions of page_referenced: page_referenced_one called 334 * repeatedly from either page_referenced_anon or page_referenced_file. 335 */ 336static int page_referenced_one(struct page *page, 337 struct vm_area_struct *vma, unsigned int *mapcount) 338{ 339 struct mm_struct *mm = vma->vm_mm; 340 unsigned long address; 341 pte_t *pte; 342 spinlock_t *ptl; 343 int referenced = 0; 344 345 address = vma_address(page, vma); 346 if (address == -EFAULT) 347 goto out; 348 349 pte = page_check_address(page, mm, address, &ptl, 0); 350 if (!pte) 351 goto out; 352 353 /* 354 * Don't want to elevate referenced for mlocked page that gets this far, 355 * in order that it progresses to try_to_unmap and is moved to the 356 * unevictable list. 357 */ 358 if (vma->vm_flags & VM_LOCKED) { 359 *mapcount = 1; /* break early from loop */ 360 goto out_unmap; 361 } 362 363 if (ptep_clear_flush_young_notify(vma, address, pte)) { 364 /* 365 * Don't treat a reference through a sequentially read 366 * mapping as such. If the page has been used in 367 * another mapping, we will catch it; if this other 368 * mapping is already gone, the unmap path will have 369 * set PG_referenced or activated the page. 370 */ 371 if (likely(!VM_SequentialReadHint(vma))) 372 referenced++; 373 } 374 375 /* Pretend the page is referenced if the task has the 376 swap token and is in the middle of a page fault. */ 377 if (mm != current->mm && has_swap_token(mm) && 378 rwsem_is_locked(&mm->mmap_sem)) 379 referenced++; 380 381out_unmap: 382 (*mapcount)--; 383 pte_unmap_unlock(pte, ptl); 384out: 385 return referenced; 386} 387 388static int page_referenced_anon(struct page *page, 389 struct mem_cgroup *mem_cont) 390{ 391 unsigned int mapcount; 392 struct anon_vma *anon_vma; 393 struct vm_area_struct *vma; 394 int referenced = 0; 395 396 anon_vma = page_lock_anon_vma(page); 397 if (!anon_vma) 398 return referenced; 399 400 mapcount = page_mapcount(page); 401 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 402 /* 403 * If we are reclaiming on behalf of a cgroup, skip 404 * counting on behalf of references from different 405 * cgroups 406 */ 407 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 408 continue; 409 referenced += page_referenced_one(page, vma, &mapcount); 410 if (!mapcount) 411 break; 412 } 413 414 page_unlock_anon_vma(anon_vma); 415 return referenced; 416} 417 418/** 419 * page_referenced_file - referenced check for object-based rmap 420 * @page: the page we're checking references on. 421 * @mem_cont: target memory controller 422 * 423 * For an object-based mapped page, find all the places it is mapped and 424 * check/clear the referenced flag. This is done by following the page->mapping 425 * pointer, then walking the chain of vmas it holds. It returns the number 426 * of references it found. 427 * 428 * This function is only called from page_referenced for object-based pages. 429 */ 430static int page_referenced_file(struct page *page, 431 struct mem_cgroup *mem_cont) 432{ 433 unsigned int mapcount; 434 struct address_space *mapping = page->mapping; 435 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 436 struct vm_area_struct *vma; 437 struct prio_tree_iter iter; 438 int referenced = 0; 439 440 /* 441 * The caller's checks on page->mapping and !PageAnon have made 442 * sure that this is a file page: the check for page->mapping 443 * excludes the case just before it gets set on an anon page. 444 */ 445 BUG_ON(PageAnon(page)); 446 447 /* 448 * The page lock not only makes sure that page->mapping cannot 449 * suddenly be NULLified by truncation, it makes sure that the 450 * structure at mapping cannot be freed and reused yet, 451 * so we can safely take mapping->i_mmap_lock. 452 */ 453 BUG_ON(!PageLocked(page)); 454 455 spin_lock(&mapping->i_mmap_lock); 456 457 /* 458 * i_mmap_lock does not stabilize mapcount at all, but mapcount 459 * is more likely to be accurate if we note it after spinning. 460 */ 461 mapcount = page_mapcount(page); 462 463 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 464 /* 465 * If we are reclaiming on behalf of a cgroup, skip 466 * counting on behalf of references from different 467 * cgroups 468 */ 469 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) 470 continue; 471 referenced += page_referenced_one(page, vma, &mapcount); 472 if (!mapcount) 473 break; 474 } 475 476 spin_unlock(&mapping->i_mmap_lock); 477 return referenced; 478} 479 480/** 481 * page_referenced - test if the page was referenced 482 * @page: the page to test 483 * @is_locked: caller holds lock on the page 484 * @mem_cont: target memory controller 485 * 486 * Quick test_and_clear_referenced for all mappings to a page, 487 * returns the number of ptes which referenced the page. 488 */ 489int page_referenced(struct page *page, int is_locked, 490 struct mem_cgroup *mem_cont) 491{ 492 int referenced = 0; 493 494 if (TestClearPageReferenced(page)) 495 referenced++; 496 497 if (page_mapped(page) && page->mapping) { 498 if (PageAnon(page)) 499 referenced += page_referenced_anon(page, mem_cont); 500 else if (is_locked) 501 referenced += page_referenced_file(page, mem_cont); 502 else if (!trylock_page(page)) 503 referenced++; 504 else { 505 if (page->mapping) 506 referenced += 507 page_referenced_file(page, mem_cont); 508 unlock_page(page); 509 } 510 } 511 512 if (page_test_and_clear_young(page)) 513 referenced++; 514 515 return referenced; 516} 517 518static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) 519{ 520 struct mm_struct *mm = vma->vm_mm; 521 unsigned long address; 522 pte_t *pte; 523 spinlock_t *ptl; 524 int ret = 0; 525 526 address = vma_address(page, vma); 527 if (address == -EFAULT) 528 goto out; 529 530 pte = page_check_address(page, mm, address, &ptl, 1); 531 if (!pte) 532 goto out; 533 534 if (pte_dirty(*pte) || pte_write(*pte)) { 535 pte_t entry; 536 537 flush_cache_page(vma, address, pte_pfn(*pte)); 538 entry = ptep_clear_flush_notify(vma, address, pte); 539 entry = pte_wrprotect(entry); 540 entry = pte_mkclean(entry); 541 set_pte_at(mm, address, pte, entry); 542 ret = 1; 543 } 544 545 pte_unmap_unlock(pte, ptl); 546out: 547 return ret; 548} 549 550static int page_mkclean_file(struct address_space *mapping, struct page *page) 551{ 552 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 553 struct vm_area_struct *vma; 554 struct prio_tree_iter iter; 555 int ret = 0; 556 557 BUG_ON(PageAnon(page)); 558 559 spin_lock(&mapping->i_mmap_lock); 560 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 561 if (vma->vm_flags & VM_SHARED) 562 ret += page_mkclean_one(page, vma); 563 } 564 spin_unlock(&mapping->i_mmap_lock); 565 return ret; 566} 567 568int page_mkclean(struct page *page) 569{ 570 int ret = 0; 571 572 BUG_ON(!PageLocked(page)); 573 574 if (page_mapped(page)) { 575 struct address_space *mapping = page_mapping(page); 576 if (mapping) { 577 ret = page_mkclean_file(mapping, page); 578 if (page_test_dirty(page)) { 579 page_clear_dirty(page); 580 ret = 1; 581 } 582 } 583 } 584 585 return ret; 586} 587EXPORT_SYMBOL_GPL(page_mkclean); 588 589/** 590 * __page_set_anon_rmap - setup new anonymous rmap 591 * @page: the page to add the mapping to 592 * @vma: the vm area in which the mapping is added 593 * @address: the user virtual address mapped 594 */ 595static void __page_set_anon_rmap(struct page *page, 596 struct vm_area_struct *vma, unsigned long address) 597{ 598 struct anon_vma *anon_vma = vma->anon_vma; 599 600 BUG_ON(!anon_vma); 601 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 602 page->mapping = (struct address_space *) anon_vma; 603 604 page->index = linear_page_index(vma, address); 605 606 /* 607 * nr_mapped state can be updated without turning off 608 * interrupts because it is not modified via interrupt. 609 */ 610 __inc_zone_page_state(page, NR_ANON_PAGES); 611} 612 613/** 614 * __page_check_anon_rmap - sanity check anonymous rmap addition 615 * @page: the page to add the mapping to 616 * @vma: the vm area in which the mapping is added 617 * @address: the user virtual address mapped 618 */ 619static void __page_check_anon_rmap(struct page *page, 620 struct vm_area_struct *vma, unsigned long address) 621{ 622#ifdef CONFIG_DEBUG_VM 623 /* 624 * The page's anon-rmap details (mapping and index) are guaranteed to 625 * be set up correctly at this point. 626 * 627 * We have exclusion against page_add_anon_rmap because the caller 628 * always holds the page locked, except if called from page_dup_rmap, 629 * in which case the page is already known to be setup. 630 * 631 * We have exclusion against page_add_new_anon_rmap because those pages 632 * are initially only visible via the pagetables, and the pte is locked 633 * over the call to page_add_new_anon_rmap. 634 */ 635 struct anon_vma *anon_vma = vma->anon_vma; 636 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 637 BUG_ON(page->mapping != (struct address_space *)anon_vma); 638 BUG_ON(page->index != linear_page_index(vma, address)); 639#endif 640} 641 642/** 643 * page_add_anon_rmap - add pte mapping to an anonymous page 644 * @page: the page to add the mapping to 645 * @vma: the vm area in which the mapping is added 646 * @address: the user virtual address mapped 647 * 648 * The caller needs to hold the pte lock and the page must be locked. 649 */ 650void page_add_anon_rmap(struct page *page, 651 struct vm_area_struct *vma, unsigned long address) 652{ 653 VM_BUG_ON(!PageLocked(page)); 654 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 655 if (atomic_inc_and_test(&page->_mapcount)) 656 __page_set_anon_rmap(page, vma, address); 657 else 658 __page_check_anon_rmap(page, vma, address); 659} 660 661/** 662 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 663 * @page: the page to add the mapping to 664 * @vma: the vm area in which the mapping is added 665 * @address: the user virtual address mapped 666 * 667 * Same as page_add_anon_rmap but must only be called on *new* pages. 668 * This means the inc-and-test can be bypassed. 669 * Page does not have to be locked. 670 */ 671void page_add_new_anon_rmap(struct page *page, 672 struct vm_area_struct *vma, unsigned long address) 673{ 674 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 675 SetPageSwapBacked(page); 676 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 677 __page_set_anon_rmap(page, vma, address); 678 if (page_evictable(page, vma)) 679 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 680 else 681 add_page_to_unevictable_list(page); 682} 683 684/** 685 * page_add_file_rmap - add pte mapping to a file page 686 * @page: the page to add the mapping to 687 * 688 * The caller needs to hold the pte lock. 689 */ 690void page_add_file_rmap(struct page *page) 691{ 692 if (atomic_inc_and_test(&page->_mapcount)) 693 __inc_zone_page_state(page, NR_FILE_MAPPED); 694} 695 696#ifdef CONFIG_DEBUG_VM 697/** 698 * page_dup_rmap - duplicate pte mapping to a page 699 * @page: the page to add the mapping to 700 * @vma: the vm area being duplicated 701 * @address: the user virtual address mapped 702 * 703 * For copy_page_range only: minimal extract from page_add_file_rmap / 704 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's 705 * quicker. 706 * 707 * The caller needs to hold the pte lock. 708 */ 709void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 710{ 711 BUG_ON(page_mapcount(page) == 0); 712 if (PageAnon(page)) 713 __page_check_anon_rmap(page, vma, address); 714 atomic_inc(&page->_mapcount); 715} 716#endif 717 718/** 719 * page_remove_rmap - take down pte mapping from a page 720 * @page: page to remove mapping from 721 * @vma: the vm area in which the mapping is removed 722 * 723 * The caller needs to hold the pte lock. 724 */ 725void page_remove_rmap(struct page *page, struct vm_area_struct *vma) 726{ 727 if (atomic_add_negative(-1, &page->_mapcount)) { 728 if (unlikely(page_mapcount(page) < 0)) { 729 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 730 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); 731 printk (KERN_EMERG " page->flags = %lx\n", page->flags); 732 printk (KERN_EMERG " page->count = %x\n", page_count(page)); 733 printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 734 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); 735 if (vma->vm_ops) { 736 print_symbol (KERN_EMERG " vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault); 737 } 738 if (vma->vm_file && vma->vm_file->f_op) 739 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); 740 BUG(); 741 } 742 743 /* 744 * Now that the last pte has gone, s390 must transfer dirty 745 * flag from storage key to struct page. We can usually skip 746 * this if the page is anon, so about to be freed; but perhaps 747 * not if it's in swapcache - there might be another pte slot 748 * containing the swap entry, but page not yet written to swap. 749 */ 750 if ((!PageAnon(page) || PageSwapCache(page)) && 751 page_test_dirty(page)) { 752 page_clear_dirty(page); 753 set_page_dirty(page); 754 } 755 if (PageAnon(page)) 756 mem_cgroup_uncharge_page(page); 757 __dec_zone_page_state(page, 758 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 759 /* 760 * It would be tidy to reset the PageAnon mapping here, 761 * but that might overwrite a racing page_add_anon_rmap 762 * which increments mapcount after us but sets mapping 763 * before us: so leave the reset to free_hot_cold_page, 764 * and remember that it's only reliable while mapped. 765 * Leaving it set also helps swapoff to reinstate ptes 766 * faster for those pages still in swapcache. 767 */ 768 } 769} 770 771/* 772 * Subfunctions of try_to_unmap: try_to_unmap_one called 773 * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 774 */ 775static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 776 int migration) 777{ 778 struct mm_struct *mm = vma->vm_mm; 779 unsigned long address; 780 pte_t *pte; 781 pte_t pteval; 782 spinlock_t *ptl; 783 int ret = SWAP_AGAIN; 784 785 address = vma_address(page, vma); 786 if (address == -EFAULT) 787 goto out; 788 789 pte = page_check_address(page, mm, address, &ptl, 0); 790 if (!pte) 791 goto out; 792 793 /* 794 * If the page is mlock()d, we cannot swap it out. 795 * If it's recently referenced (perhaps page_referenced 796 * skipped over this mm) then we should reactivate it. 797 */ 798 if (!migration) { 799 if (vma->vm_flags & VM_LOCKED) { 800 ret = SWAP_MLOCK; 801 goto out_unmap; 802 } 803 if (ptep_clear_flush_young_notify(vma, address, pte)) { 804 ret = SWAP_FAIL; 805 goto out_unmap; 806 } 807 } 808 809 /* Nuke the page table entry. */ 810 flush_cache_page(vma, address, page_to_pfn(page)); 811 pteval = ptep_clear_flush_notify(vma, address, pte); 812 813 /* Move the dirty bit to the physical page now the pte is gone. */ 814 if (pte_dirty(pteval)) 815 set_page_dirty(page); 816 817 /* Update high watermark before we lower rss */ 818 update_hiwater_rss(mm); 819 820 if (PageAnon(page)) { 821 swp_entry_t entry = { .val = page_private(page) }; 822 823 if (PageSwapCache(page)) { 824 /* 825 * Store the swap location in the pte. 826 * See handle_pte_fault() ... 827 */ 828 swap_duplicate(entry); 829 if (list_empty(&mm->mmlist)) { 830 spin_lock(&mmlist_lock); 831 if (list_empty(&mm->mmlist)) 832 list_add(&mm->mmlist, &init_mm.mmlist); 833 spin_unlock(&mmlist_lock); 834 } 835 dec_mm_counter(mm, anon_rss); 836 } else if (PAGE_MIGRATION) { 837 /* 838 * Store the pfn of the page in a special migration 839 * pte. do_swap_page() will wait until the migration 840 * pte is removed and then restart fault handling. 841 */ 842 BUG_ON(!migration); 843 entry = make_migration_entry(page, pte_write(pteval)); 844 } 845 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 846 BUG_ON(pte_file(*pte)); 847 } else if (PAGE_MIGRATION && migration) { 848 /* Establish migration entry for a file page */ 849 swp_entry_t entry; 850 entry = make_migration_entry(page, pte_write(pteval)); 851 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 852 } else 853 dec_mm_counter(mm, file_rss); 854 855 856 page_remove_rmap(page, vma); 857 page_cache_release(page); 858 859out_unmap: 860 pte_unmap_unlock(pte, ptl); 861out: 862 return ret; 863} 864 865/* 866 * objrmap doesn't work for nonlinear VMAs because the assumption that 867 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 868 * Consequently, given a particular page and its ->index, we cannot locate the 869 * ptes which are mapping that page without an exhaustive linear search. 870 * 871 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 872 * maps the file to which the target page belongs. The ->vm_private_data field 873 * holds the current cursor into that scan. Successive searches will circulate 874 * around the vma's virtual address space. 875 * 876 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 877 * more scanning pressure is placed against them as well. Eventually pages 878 * will become fully unmapped and are eligible for eviction. 879 * 880 * For very sparsely populated VMAs this is a little inefficient - chances are 881 * there there won't be many ptes located within the scan cluster. In this case 882 * maybe we could scan further - to the end of the pte page, perhaps. 883 * 884 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 885 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 886 * rather than unmapping them. If we encounter the "check_page" that vmscan is 887 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 888 */ 889#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 890#define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 891 892static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 893 struct vm_area_struct *vma, struct page *check_page) 894{ 895 struct mm_struct *mm = vma->vm_mm; 896 pgd_t *pgd; 897 pud_t *pud; 898 pmd_t *pmd; 899 pte_t *pte; 900 pte_t pteval; 901 spinlock_t *ptl; 902 struct page *page; 903 unsigned long address; 904 unsigned long end; 905 int ret = SWAP_AGAIN; 906 int locked_vma = 0; 907 908 address = (vma->vm_start + cursor) & CLUSTER_MASK; 909 end = address + CLUSTER_SIZE; 910 if (address < vma->vm_start) 911 address = vma->vm_start; 912 if (end > vma->vm_end) 913 end = vma->vm_end; 914 915 pgd = pgd_offset(mm, address); 916 if (!pgd_present(*pgd)) 917 return ret; 918 919 pud = pud_offset(pgd, address); 920 if (!pud_present(*pud)) 921 return ret; 922 923 pmd = pmd_offset(pud, address); 924 if (!pmd_present(*pmd)) 925 return ret; 926 927 /* 928 * MLOCK_PAGES => feature is configured. 929 * if we can acquire the mmap_sem for read, and vma is VM_LOCKED, 930 * keep the sem while scanning the cluster for mlocking pages. 931 */ 932 if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) { 933 locked_vma = (vma->vm_flags & VM_LOCKED); 934 if (!locked_vma) 935 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 936 } 937 938 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 939 940 /* Update high watermark before we lower rss */ 941 update_hiwater_rss(mm); 942 943 for (; address < end; pte++, address += PAGE_SIZE) { 944 if (!pte_present(*pte)) 945 continue; 946 page = vm_normal_page(vma, address, *pte); 947 BUG_ON(!page || PageAnon(page)); 948 949 if (locked_vma) { 950 mlock_vma_page(page); /* no-op if already mlocked */ 951 if (page == check_page) 952 ret = SWAP_MLOCK; 953 continue; /* don't unmap */ 954 } 955 956 if (ptep_clear_flush_young_notify(vma, address, pte)) 957 continue; 958 959 /* Nuke the page table entry. */ 960 flush_cache_page(vma, address, pte_pfn(*pte)); 961 pteval = ptep_clear_flush_notify(vma, address, pte); 962 963 /* If nonlinear, store the file page offset in the pte. */ 964 if (page->index != linear_page_index(vma, address)) 965 set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 966 967 /* Move the dirty bit to the physical page now the pte is gone. */ 968 if (pte_dirty(pteval)) 969 set_page_dirty(page); 970 971 page_remove_rmap(page, vma); 972 page_cache_release(page); 973 dec_mm_counter(mm, file_rss); 974 (*mapcount)--; 975 } 976 pte_unmap_unlock(pte - 1, ptl); 977 if (locked_vma) 978 up_read(&vma->vm_mm->mmap_sem); 979 return ret; 980} 981 982/* 983 * common handling for pages mapped in VM_LOCKED vmas 984 */ 985static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma) 986{ 987 int mlocked = 0; 988 989 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 990 if (vma->vm_flags & VM_LOCKED) { 991 mlock_vma_page(page); 992 mlocked++; /* really mlocked the page */ 993 } 994 up_read(&vma->vm_mm->mmap_sem); 995 } 996 return mlocked; 997} 998 999/** 1000 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1001 * rmap method 1002 * @page: the page to unmap/unlock 1003 * @unlock: request for unlock rather than unmap [unlikely] 1004 * @migration: unmapping for migration - ignored if @unlock 1005 * 1006 * Find all the mappings of a page using the mapping pointer and the vma chains 1007 * contained in the anon_vma struct it points to. 1008 * 1009 * This function is only called from try_to_unmap/try_to_munlock for 1010 * anonymous pages. 1011 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1012 * where the page was found will be held for write. So, we won't recheck 1013 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1014 * 'LOCKED. 1015 */ 1016static int try_to_unmap_anon(struct page *page, int unlock, int migration) 1017{ 1018 struct anon_vma *anon_vma; 1019 struct vm_area_struct *vma; 1020 unsigned int mlocked = 0; 1021 int ret = SWAP_AGAIN; 1022 1023 if (MLOCK_PAGES && unlikely(unlock)) 1024 ret = SWAP_SUCCESS; /* default for try_to_munlock() */ 1025 1026 anon_vma = page_lock_anon_vma(page); 1027 if (!anon_vma) 1028 return ret; 1029 1030 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 1031 if (MLOCK_PAGES && unlikely(unlock)) { 1032 if (!((vma->vm_flags & VM_LOCKED) && 1033 page_mapped_in_vma(page, vma))) 1034 continue; /* must visit all unlocked vmas */ 1035 ret = SWAP_MLOCK; /* saw at least one mlocked vma */ 1036 } else { 1037 ret = try_to_unmap_one(page, vma, migration); 1038 if (ret == SWAP_FAIL || !page_mapped(page)) 1039 break; 1040 } 1041 if (ret == SWAP_MLOCK) { 1042 mlocked = try_to_mlock_page(page, vma); 1043 if (mlocked) 1044 break; /* stop if actually mlocked page */ 1045 } 1046 } 1047 1048 page_unlock_anon_vma(anon_vma); 1049 1050 if (mlocked) 1051 ret = SWAP_MLOCK; /* actually mlocked the page */ 1052 else if (ret == SWAP_MLOCK) 1053 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ 1054 1055 return ret; 1056} 1057 1058/** 1059 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1060 * @page: the page to unmap/unlock 1061 * @unlock: request for unlock rather than unmap [unlikely] 1062 * @migration: unmapping for migration - ignored if @unlock 1063 * 1064 * Find all the mappings of a page using the mapping pointer and the vma chains 1065 * contained in the address_space struct it points to. 1066 * 1067 * This function is only called from try_to_unmap/try_to_munlock for 1068 * object-based pages. 1069 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1070 * where the page was found will be held for write. So, we won't recheck 1071 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1072 * 'LOCKED. 1073 */ 1074static int try_to_unmap_file(struct page *page, int unlock, int migration) 1075{ 1076 struct address_space *mapping = page->mapping; 1077 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1078 struct vm_area_struct *vma; 1079 struct prio_tree_iter iter; 1080 int ret = SWAP_AGAIN; 1081 unsigned long cursor; 1082 unsigned long max_nl_cursor = 0; 1083 unsigned long max_nl_size = 0; 1084 unsigned int mapcount; 1085 unsigned int mlocked = 0; 1086 1087 if (MLOCK_PAGES && unlikely(unlock)) 1088 ret = SWAP_SUCCESS; /* default for try_to_munlock() */ 1089 1090 spin_lock(&mapping->i_mmap_lock); 1091 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1092 if (MLOCK_PAGES && unlikely(unlock)) { 1093 if (!(vma->vm_flags & VM_LOCKED)) 1094 continue; /* must visit all vmas */ 1095 ret = SWAP_MLOCK; 1096 } else { 1097 ret = try_to_unmap_one(page, vma, migration); 1098 if (ret == SWAP_FAIL || !page_mapped(page)) 1099 goto out; 1100 } 1101 if (ret == SWAP_MLOCK) { 1102 mlocked = try_to_mlock_page(page, vma); 1103 if (mlocked) 1104 break; /* stop if actually mlocked page */ 1105 } 1106 } 1107 1108 if (mlocked) 1109 goto out; 1110 1111 if (list_empty(&mapping->i_mmap_nonlinear)) 1112 goto out; 1113 1114 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1115 shared.vm_set.list) { 1116 if (MLOCK_PAGES && unlikely(unlock)) { 1117 if (!(vma->vm_flags & VM_LOCKED)) 1118 continue; /* must visit all vmas */ 1119 ret = SWAP_MLOCK; /* leave mlocked == 0 */ 1120 goto out; /* no need to look further */ 1121 } 1122 if (!MLOCK_PAGES && !migration && (vma->vm_flags & VM_LOCKED)) 1123 continue; 1124 cursor = (unsigned long) vma->vm_private_data; 1125 if (cursor > max_nl_cursor) 1126 max_nl_cursor = cursor; 1127 cursor = vma->vm_end - vma->vm_start; 1128 if (cursor > max_nl_size) 1129 max_nl_size = cursor; 1130 } 1131 1132 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1133 ret = SWAP_FAIL; 1134 goto out; 1135 } 1136 1137 /* 1138 * We don't try to search for this page in the nonlinear vmas, 1139 * and page_referenced wouldn't have found it anyway. Instead 1140 * just walk the nonlinear vmas trying to age and unmap some. 1141 * The mapcount of the page we came in with is irrelevant, 1142 * but even so use it as a guide to how hard we should try? 1143 */ 1144 mapcount = page_mapcount(page); 1145 if (!mapcount) 1146 goto out; 1147 cond_resched_lock(&mapping->i_mmap_lock); 1148 1149 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1150 if (max_nl_cursor == 0) 1151 max_nl_cursor = CLUSTER_SIZE; 1152 1153 do { 1154 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 1155 shared.vm_set.list) { 1156 if (!MLOCK_PAGES && !migration && 1157 (vma->vm_flags & VM_LOCKED)) 1158 continue; 1159 cursor = (unsigned long) vma->vm_private_data; 1160 while ( cursor < max_nl_cursor && 1161 cursor < vma->vm_end - vma->vm_start) { 1162 ret = try_to_unmap_cluster(cursor, &mapcount, 1163 vma, page); 1164 if (ret == SWAP_MLOCK) 1165 mlocked = 2; /* to return below */ 1166 cursor += CLUSTER_SIZE; 1167 vma->vm_private_data = (void *) cursor; 1168 if ((int)mapcount <= 0) 1169 goto out; 1170 } 1171 vma->vm_private_data = (void *) max_nl_cursor; 1172 } 1173 cond_resched_lock(&mapping->i_mmap_lock); 1174 max_nl_cursor += CLUSTER_SIZE; 1175 } while (max_nl_cursor <= max_nl_size); 1176 1177 /* 1178 * Don't loop forever (perhaps all the remaining pages are 1179 * in locked vmas). Reset cursor on all unreserved nonlinear 1180 * vmas, now forgetting on which ones it had fallen behind. 1181 */ 1182 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1183 vma->vm_private_data = NULL; 1184out: 1185 spin_unlock(&mapping->i_mmap_lock); 1186 if (mlocked) 1187 ret = SWAP_MLOCK; /* actually mlocked the page */ 1188 else if (ret == SWAP_MLOCK) 1189 ret = SWAP_AGAIN; /* saw VM_LOCKED vma */ 1190 return ret; 1191} 1192 1193/** 1194 * try_to_unmap - try to remove all page table mappings to a page 1195 * @page: the page to get unmapped 1196 * @migration: migration flag 1197 * 1198 * Tries to remove all the page table entries which are mapping this 1199 * page, used in the pageout path. Caller must hold the page lock. 1200 * Return values are: 1201 * 1202 * SWAP_SUCCESS - we succeeded in removing all mappings 1203 * SWAP_AGAIN - we missed a mapping, try again later 1204 * SWAP_FAIL - the page is unswappable 1205 * SWAP_MLOCK - page is mlocked. 1206 */ 1207int try_to_unmap(struct page *page, int migration) 1208{ 1209 int ret; 1210 1211 BUG_ON(!PageLocked(page)); 1212 1213 if (PageAnon(page)) 1214 ret = try_to_unmap_anon(page, 0, migration); 1215 else 1216 ret = try_to_unmap_file(page, 0, migration); 1217 if (ret != SWAP_MLOCK && !page_mapped(page)) 1218 ret = SWAP_SUCCESS; 1219 return ret; 1220} 1221 1222#ifdef CONFIG_UNEVICTABLE_LRU 1223/** 1224 * try_to_munlock - try to munlock a page 1225 * @page: the page to be munlocked 1226 * 1227 * Called from munlock code. Checks all of the VMAs mapping the page 1228 * to make sure nobody else has this page mlocked. The page will be 1229 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1230 * 1231 * Return values are: 1232 * 1233 * SWAP_SUCCESS - no vma's holding page mlocked. 1234 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1235 * SWAP_MLOCK - page is now mlocked. 1236 */ 1237int try_to_munlock(struct page *page) 1238{ 1239 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1240 1241 if (PageAnon(page)) 1242 return try_to_unmap_anon(page, 1, 0); 1243 else 1244 return try_to_unmap_file(page, 1, 0); 1245} 1246#endif 1247