vmscan.c revision 338fde90930eaa02f6f394daa23d35a410af5852
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14#include <linux/mm.h> 15#include <linux/module.h> 16#include <linux/slab.h> 17#include <linux/kernel_stat.h> 18#include <linux/swap.h> 19#include <linux/pagemap.h> 20#include <linux/init.h> 21#include <linux/highmem.h> 22#include <linux/vmstat.h> 23#include <linux/file.h> 24#include <linux/writeback.h> 25#include <linux/blkdev.h> 26#include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28#include <linux/mm_inline.h> 29#include <linux/pagevec.h> 30#include <linux/backing-dev.h> 31#include <linux/rmap.h> 32#include <linux/topology.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/notifier.h> 36#include <linux/rwsem.h> 37#include <linux/delay.h> 38#include <linux/kthread.h> 39#include <linux/freezer.h> 40#include <linux/memcontrol.h> 41#include <linux/delayacct.h> 42#include <linux/sysctl.h> 43 44#include <asm/tlbflush.h> 45#include <asm/div64.h> 46 47#include <linux/swapops.h> 48 49#include "internal.h" 50 51struct scan_control { 52 /* Incremented by the number of inactive pages that were scanned */ 53 unsigned long nr_scanned; 54 55 /* Number of pages freed so far during a call to shrink_zones() */ 56 unsigned long nr_reclaimed; 57 58 /* How many pages shrink_list() should reclaim */ 59 unsigned long nr_to_reclaim; 60 61 unsigned long hibernation_mode; 62 63 /* This context's GFP mask */ 64 gfp_t gfp_mask; 65 66 int may_writepage; 67 68 /* Can mapped pages be reclaimed? */ 69 int may_unmap; 70 71 /* Can pages be swapped as part of reclaim? */ 72 int may_swap; 73 74 int swappiness; 75 76 int all_unreclaimable; 77 78 int order; 79 80 /* Which cgroup do we reclaim from */ 81 struct mem_cgroup *mem_cgroup; 82 83 /* 84 * Nodemask of nodes allowed by the caller. If NULL, all nodes 85 * are scanned. 86 */ 87 nodemask_t *nodemask; 88 89 /* Pluggable isolate pages callback */ 90 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst, 91 unsigned long *scanned, int order, int mode, 92 struct zone *z, struct mem_cgroup *mem_cont, 93 int active, int file); 94}; 95 96#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 97 98#ifdef ARCH_HAS_PREFETCH 99#define prefetch_prev_lru_page(_page, _base, _field) \ 100 do { \ 101 if ((_page)->lru.prev != _base) { \ 102 struct page *prev; \ 103 \ 104 prev = lru_to_page(&(_page->lru)); \ 105 prefetch(&prev->_field); \ 106 } \ 107 } while (0) 108#else 109#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 110#endif 111 112#ifdef ARCH_HAS_PREFETCHW 113#define prefetchw_prev_lru_page(_page, _base, _field) \ 114 do { \ 115 if ((_page)->lru.prev != _base) { \ 116 struct page *prev; \ 117 \ 118 prev = lru_to_page(&(_page->lru)); \ 119 prefetchw(&prev->_field); \ 120 } \ 121 } while (0) 122#else 123#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 124#endif 125 126/* 127 * From 0 .. 100. Higher means more swappy. 128 */ 129int vm_swappiness = 60; 130long vm_total_pages; /* The total number of pages which the VM controls */ 131 132static LIST_HEAD(shrinker_list); 133static DECLARE_RWSEM(shrinker_rwsem); 134 135#ifdef CONFIG_CGROUP_MEM_RES_CTLR 136#define scanning_global_lru(sc) (!(sc)->mem_cgroup) 137#else 138#define scanning_global_lru(sc) (1) 139#endif 140 141static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, 142 struct scan_control *sc) 143{ 144 if (!scanning_global_lru(sc)) 145 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); 146 147 return &zone->reclaim_stat; 148} 149 150static unsigned long zone_nr_lru_pages(struct zone *zone, 151 struct scan_control *sc, enum lru_list lru) 152{ 153 if (!scanning_global_lru(sc)) 154 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); 155 156 return zone_page_state(zone, NR_LRU_BASE + lru); 157} 158 159 160/* 161 * Add a shrinker callback to be called from the vm 162 */ 163void register_shrinker(struct shrinker *shrinker) 164{ 165 shrinker->nr = 0; 166 down_write(&shrinker_rwsem); 167 list_add_tail(&shrinker->list, &shrinker_list); 168 up_write(&shrinker_rwsem); 169} 170EXPORT_SYMBOL(register_shrinker); 171 172/* 173 * Remove one 174 */ 175void unregister_shrinker(struct shrinker *shrinker) 176{ 177 down_write(&shrinker_rwsem); 178 list_del(&shrinker->list); 179 up_write(&shrinker_rwsem); 180} 181EXPORT_SYMBOL(unregister_shrinker); 182 183#define SHRINK_BATCH 128 184/* 185 * Call the shrink functions to age shrinkable caches 186 * 187 * Here we assume it costs one seek to replace a lru page and that it also 188 * takes a seek to recreate a cache object. With this in mind we age equal 189 * percentages of the lru and ageable caches. This should balance the seeks 190 * generated by these structures. 191 * 192 * If the vm encountered mapped pages on the LRU it increase the pressure on 193 * slab to avoid swapping. 194 * 195 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 196 * 197 * `lru_pages' represents the number of on-LRU pages in all the zones which 198 * are eligible for the caller's allocation attempt. It is used for balancing 199 * slab reclaim versus page reclaim. 200 * 201 * Returns the number of slab objects which we shrunk. 202 */ 203unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 204 unsigned long lru_pages) 205{ 206 struct shrinker *shrinker; 207 unsigned long ret = 0; 208 209 if (scanned == 0) 210 scanned = SWAP_CLUSTER_MAX; 211 212 if (!down_read_trylock(&shrinker_rwsem)) 213 return 1; /* Assume we'll be able to shrink next time */ 214 215 list_for_each_entry(shrinker, &shrinker_list, list) { 216 unsigned long long delta; 217 unsigned long total_scan; 218 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); 219 220 delta = (4 * scanned) / shrinker->seeks; 221 delta *= max_pass; 222 do_div(delta, lru_pages + 1); 223 shrinker->nr += delta; 224 if (shrinker->nr < 0) { 225 printk(KERN_ERR "shrink_slab: %pF negative objects to " 226 "delete nr=%ld\n", 227 shrinker->shrink, shrinker->nr); 228 shrinker->nr = max_pass; 229 } 230 231 /* 232 * Avoid risking looping forever due to too large nr value: 233 * never try to free more than twice the estimate number of 234 * freeable entries. 235 */ 236 if (shrinker->nr > max_pass * 2) 237 shrinker->nr = max_pass * 2; 238 239 total_scan = shrinker->nr; 240 shrinker->nr = 0; 241 242 while (total_scan >= SHRINK_BATCH) { 243 long this_scan = SHRINK_BATCH; 244 int shrink_ret; 245 int nr_before; 246 247 nr_before = (*shrinker->shrink)(0, gfp_mask); 248 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); 249 if (shrink_ret == -1) 250 break; 251 if (shrink_ret < nr_before) 252 ret += nr_before - shrink_ret; 253 count_vm_events(SLABS_SCANNED, this_scan); 254 total_scan -= this_scan; 255 256 cond_resched(); 257 } 258 259 shrinker->nr += total_scan; 260 } 261 up_read(&shrinker_rwsem); 262 return ret; 263} 264 265/* Called without lock on whether page is mapped, so answer is unstable */ 266static inline int page_mapping_inuse(struct page *page) 267{ 268 struct address_space *mapping; 269 270 /* Page is in somebody's page tables. */ 271 if (page_mapped(page)) 272 return 1; 273 274 /* Be more reluctant to reclaim swapcache than pagecache */ 275 if (PageSwapCache(page)) 276 return 1; 277 278 mapping = page_mapping(page); 279 if (!mapping) 280 return 0; 281 282 /* File is mmap'd by somebody? */ 283 return mapping_mapped(mapping); 284} 285 286static inline int is_page_cache_freeable(struct page *page) 287{ 288 /* 289 * A freeable page cache page is referenced only by the caller 290 * that isolated the page, the page cache radix tree and 291 * optional buffer heads at page->private. 292 */ 293 return page_count(page) - page_has_private(page) == 2; 294} 295 296static int may_write_to_queue(struct backing_dev_info *bdi) 297{ 298 if (current->flags & PF_SWAPWRITE) 299 return 1; 300 if (!bdi_write_congested(bdi)) 301 return 1; 302 if (bdi == current->backing_dev_info) 303 return 1; 304 return 0; 305} 306 307/* 308 * We detected a synchronous write error writing a page out. Probably 309 * -ENOSPC. We need to propagate that into the address_space for a subsequent 310 * fsync(), msync() or close(). 311 * 312 * The tricky part is that after writepage we cannot touch the mapping: nothing 313 * prevents it from being freed up. But we have a ref on the page and once 314 * that page is locked, the mapping is pinned. 315 * 316 * We're allowed to run sleeping lock_page() here because we know the caller has 317 * __GFP_FS. 318 */ 319static void handle_write_error(struct address_space *mapping, 320 struct page *page, int error) 321{ 322 lock_page(page); 323 if (page_mapping(page) == mapping) 324 mapping_set_error(mapping, error); 325 unlock_page(page); 326} 327 328/* Request for sync pageout. */ 329enum pageout_io { 330 PAGEOUT_IO_ASYNC, 331 PAGEOUT_IO_SYNC, 332}; 333 334/* possible outcome of pageout() */ 335typedef enum { 336 /* failed to write page out, page is locked */ 337 PAGE_KEEP, 338 /* move page to the active list, page is locked */ 339 PAGE_ACTIVATE, 340 /* page has been sent to the disk successfully, page is unlocked */ 341 PAGE_SUCCESS, 342 /* page is clean and locked */ 343 PAGE_CLEAN, 344} pageout_t; 345 346/* 347 * pageout is called by shrink_page_list() for each dirty page. 348 * Calls ->writepage(). 349 */ 350static pageout_t pageout(struct page *page, struct address_space *mapping, 351 enum pageout_io sync_writeback) 352{ 353 /* 354 * If the page is dirty, only perform writeback if that write 355 * will be non-blocking. To prevent this allocation from being 356 * stalled by pagecache activity. But note that there may be 357 * stalls if we need to run get_block(). We could test 358 * PagePrivate for that. 359 * 360 * If this process is currently in __generic_file_aio_write() against 361 * this page's queue, we can perform writeback even if that 362 * will block. 363 * 364 * If the page is swapcache, write it back even if that would 365 * block, for some throttling. This happens by accident, because 366 * swap_backing_dev_info is bust: it doesn't reflect the 367 * congestion state of the swapdevs. Easy to fix, if needed. 368 */ 369 if (!is_page_cache_freeable(page)) 370 return PAGE_KEEP; 371 if (!mapping) { 372 /* 373 * Some data journaling orphaned pages can have 374 * page->mapping == NULL while being dirty with clean buffers. 375 */ 376 if (page_has_private(page)) { 377 if (try_to_free_buffers(page)) { 378 ClearPageDirty(page); 379 printk("%s: orphaned page\n", __func__); 380 return PAGE_CLEAN; 381 } 382 } 383 return PAGE_KEEP; 384 } 385 if (mapping->a_ops->writepage == NULL) 386 return PAGE_ACTIVATE; 387 if (!may_write_to_queue(mapping->backing_dev_info)) 388 return PAGE_KEEP; 389 390 if (clear_page_dirty_for_io(page)) { 391 int res; 392 struct writeback_control wbc = { 393 .sync_mode = WB_SYNC_NONE, 394 .nr_to_write = SWAP_CLUSTER_MAX, 395 .range_start = 0, 396 .range_end = LLONG_MAX, 397 .nonblocking = 1, 398 .for_reclaim = 1, 399 }; 400 401 SetPageReclaim(page); 402 res = mapping->a_ops->writepage(page, &wbc); 403 if (res < 0) 404 handle_write_error(mapping, page, res); 405 if (res == AOP_WRITEPAGE_ACTIVATE) { 406 ClearPageReclaim(page); 407 return PAGE_ACTIVATE; 408 } 409 410 /* 411 * Wait on writeback if requested to. This happens when 412 * direct reclaiming a large contiguous area and the 413 * first attempt to free a range of pages fails. 414 */ 415 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC) 416 wait_on_page_writeback(page); 417 418 if (!PageWriteback(page)) { 419 /* synchronous write or broken a_ops? */ 420 ClearPageReclaim(page); 421 } 422 inc_zone_page_state(page, NR_VMSCAN_WRITE); 423 return PAGE_SUCCESS; 424 } 425 426 return PAGE_CLEAN; 427} 428 429/* 430 * Same as remove_mapping, but if the page is removed from the mapping, it 431 * gets returned with a refcount of 0. 432 */ 433static int __remove_mapping(struct address_space *mapping, struct page *page) 434{ 435 BUG_ON(!PageLocked(page)); 436 BUG_ON(mapping != page_mapping(page)); 437 438 spin_lock_irq(&mapping->tree_lock); 439 /* 440 * The non racy check for a busy page. 441 * 442 * Must be careful with the order of the tests. When someone has 443 * a ref to the page, it may be possible that they dirty it then 444 * drop the reference. So if PageDirty is tested before page_count 445 * here, then the following race may occur: 446 * 447 * get_user_pages(&page); 448 * [user mapping goes away] 449 * write_to(page); 450 * !PageDirty(page) [good] 451 * SetPageDirty(page); 452 * put_page(page); 453 * !page_count(page) [good, discard it] 454 * 455 * [oops, our write_to data is lost] 456 * 457 * Reversing the order of the tests ensures such a situation cannot 458 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 459 * load is not satisfied before that of page->_count. 460 * 461 * Note that if SetPageDirty is always performed via set_page_dirty, 462 * and thus under tree_lock, then this ordering is not required. 463 */ 464 if (!page_freeze_refs(page, 2)) 465 goto cannot_free; 466 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 467 if (unlikely(PageDirty(page))) { 468 page_unfreeze_refs(page, 2); 469 goto cannot_free; 470 } 471 472 if (PageSwapCache(page)) { 473 swp_entry_t swap = { .val = page_private(page) }; 474 __delete_from_swap_cache(page); 475 spin_unlock_irq(&mapping->tree_lock); 476 swapcache_free(swap, page); 477 } else { 478 __remove_from_page_cache(page); 479 spin_unlock_irq(&mapping->tree_lock); 480 mem_cgroup_uncharge_cache_page(page); 481 } 482 483 return 1; 484 485cannot_free: 486 spin_unlock_irq(&mapping->tree_lock); 487 return 0; 488} 489 490/* 491 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 492 * someone else has a ref on the page, abort and return 0. If it was 493 * successfully detached, return 1. Assumes the caller has a single ref on 494 * this page. 495 */ 496int remove_mapping(struct address_space *mapping, struct page *page) 497{ 498 if (__remove_mapping(mapping, page)) { 499 /* 500 * Unfreezing the refcount with 1 rather than 2 effectively 501 * drops the pagecache ref for us without requiring another 502 * atomic operation. 503 */ 504 page_unfreeze_refs(page, 1); 505 return 1; 506 } 507 return 0; 508} 509 510/** 511 * putback_lru_page - put previously isolated page onto appropriate LRU list 512 * @page: page to be put back to appropriate lru list 513 * 514 * Add previously isolated @page to appropriate LRU list. 515 * Page may still be unevictable for other reasons. 516 * 517 * lru_lock must not be held, interrupts must be enabled. 518 */ 519void putback_lru_page(struct page *page) 520{ 521 int lru; 522 int active = !!TestClearPageActive(page); 523 int was_unevictable = PageUnevictable(page); 524 525 VM_BUG_ON(PageLRU(page)); 526 527redo: 528 ClearPageUnevictable(page); 529 530 if (page_evictable(page, NULL)) { 531 /* 532 * For evictable pages, we can use the cache. 533 * In event of a race, worst case is we end up with an 534 * unevictable page on [in]active list. 535 * We know how to handle that. 536 */ 537 lru = active + page_lru_base_type(page); 538 lru_cache_add_lru(page, lru); 539 } else { 540 /* 541 * Put unevictable pages directly on zone's unevictable 542 * list. 543 */ 544 lru = LRU_UNEVICTABLE; 545 add_page_to_unevictable_list(page); 546 /* 547 * When racing with an mlock clearing (page is 548 * unlocked), make sure that if the other thread does 549 * not observe our setting of PG_lru and fails 550 * isolation, we see PG_mlocked cleared below and move 551 * the page back to the evictable list. 552 * 553 * The other side is TestClearPageMlocked(). 554 */ 555 smp_mb(); 556 } 557 558 /* 559 * page's status can change while we move it among lru. If an evictable 560 * page is on unevictable list, it never be freed. To avoid that, 561 * check after we added it to the list, again. 562 */ 563 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 564 if (!isolate_lru_page(page)) { 565 put_page(page); 566 goto redo; 567 } 568 /* This means someone else dropped this page from LRU 569 * So, it will be freed or putback to LRU again. There is 570 * nothing to do here. 571 */ 572 } 573 574 if (was_unevictable && lru != LRU_UNEVICTABLE) 575 count_vm_event(UNEVICTABLE_PGRESCUED); 576 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 577 count_vm_event(UNEVICTABLE_PGCULLED); 578 579 put_page(page); /* drop ref from isolate */ 580} 581 582/* 583 * shrink_page_list() returns the number of reclaimed pages 584 */ 585static unsigned long shrink_page_list(struct list_head *page_list, 586 struct scan_control *sc, 587 enum pageout_io sync_writeback) 588{ 589 LIST_HEAD(ret_pages); 590 struct pagevec freed_pvec; 591 int pgactivate = 0; 592 unsigned long nr_reclaimed = 0; 593 unsigned long vm_flags; 594 595 cond_resched(); 596 597 pagevec_init(&freed_pvec, 1); 598 while (!list_empty(page_list)) { 599 struct address_space *mapping; 600 struct page *page; 601 int may_enter_fs; 602 int referenced; 603 604 cond_resched(); 605 606 page = lru_to_page(page_list); 607 list_del(&page->lru); 608 609 if (!trylock_page(page)) 610 goto keep; 611 612 VM_BUG_ON(PageActive(page)); 613 614 sc->nr_scanned++; 615 616 if (unlikely(!page_evictable(page, NULL))) 617 goto cull_mlocked; 618 619 if (!sc->may_unmap && page_mapped(page)) 620 goto keep_locked; 621 622 /* Double the slab pressure for mapped and swapcache pages */ 623 if (page_mapped(page) || PageSwapCache(page)) 624 sc->nr_scanned++; 625 626 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 627 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 628 629 if (PageWriteback(page)) { 630 /* 631 * Synchronous reclaim is performed in two passes, 632 * first an asynchronous pass over the list to 633 * start parallel writeback, and a second synchronous 634 * pass to wait for the IO to complete. Wait here 635 * for any page for which writeback has already 636 * started. 637 */ 638 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) 639 wait_on_page_writeback(page); 640 else 641 goto keep_locked; 642 } 643 644 referenced = page_referenced(page, 1, 645 sc->mem_cgroup, &vm_flags); 646 /* 647 * In active use or really unfreeable? Activate it. 648 * If page which have PG_mlocked lost isoltation race, 649 * try_to_unmap moves it to unevictable list 650 */ 651 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && 652 referenced && page_mapping_inuse(page) 653 && !(vm_flags & VM_LOCKED)) 654 goto activate_locked; 655 656 /* 657 * Anonymous process memory has backing store? 658 * Try to allocate it some swap space here. 659 */ 660 if (PageAnon(page) && !PageSwapCache(page)) { 661 if (!(sc->gfp_mask & __GFP_IO)) 662 goto keep_locked; 663 if (!add_to_swap(page)) 664 goto activate_locked; 665 may_enter_fs = 1; 666 } 667 668 mapping = page_mapping(page); 669 670 /* 671 * The page is mapped into the page tables of one or more 672 * processes. Try to unmap it here. 673 */ 674 if (page_mapped(page) && mapping) { 675 switch (try_to_unmap(page, TTU_UNMAP)) { 676 case SWAP_FAIL: 677 goto activate_locked; 678 case SWAP_AGAIN: 679 goto keep_locked; 680 case SWAP_MLOCK: 681 goto cull_mlocked; 682 case SWAP_SUCCESS: 683 ; /* try to free the page below */ 684 } 685 } 686 687 if (PageDirty(page)) { 688 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) 689 goto keep_locked; 690 if (!may_enter_fs) 691 goto keep_locked; 692 if (!sc->may_writepage) 693 goto keep_locked; 694 695 /* Page is dirty, try to write it out here */ 696 switch (pageout(page, mapping, sync_writeback)) { 697 case PAGE_KEEP: 698 goto keep_locked; 699 case PAGE_ACTIVATE: 700 goto activate_locked; 701 case PAGE_SUCCESS: 702 if (PageWriteback(page) || PageDirty(page)) 703 goto keep; 704 /* 705 * A synchronous write - probably a ramdisk. Go 706 * ahead and try to reclaim the page. 707 */ 708 if (!trylock_page(page)) 709 goto keep; 710 if (PageDirty(page) || PageWriteback(page)) 711 goto keep_locked; 712 mapping = page_mapping(page); 713 case PAGE_CLEAN: 714 ; /* try to free the page below */ 715 } 716 } 717 718 /* 719 * If the page has buffers, try to free the buffer mappings 720 * associated with this page. If we succeed we try to free 721 * the page as well. 722 * 723 * We do this even if the page is PageDirty(). 724 * try_to_release_page() does not perform I/O, but it is 725 * possible for a page to have PageDirty set, but it is actually 726 * clean (all its buffers are clean). This happens if the 727 * buffers were written out directly, with submit_bh(). ext3 728 * will do this, as well as the blockdev mapping. 729 * try_to_release_page() will discover that cleanness and will 730 * drop the buffers and mark the page clean - it can be freed. 731 * 732 * Rarely, pages can have buffers and no ->mapping. These are 733 * the pages which were not successfully invalidated in 734 * truncate_complete_page(). We try to drop those buffers here 735 * and if that worked, and the page is no longer mapped into 736 * process address space (page_count == 1) it can be freed. 737 * Otherwise, leave the page on the LRU so it is swappable. 738 */ 739 if (page_has_private(page)) { 740 if (!try_to_release_page(page, sc->gfp_mask)) 741 goto activate_locked; 742 if (!mapping && page_count(page) == 1) { 743 unlock_page(page); 744 if (put_page_testzero(page)) 745 goto free_it; 746 else { 747 /* 748 * rare race with speculative reference. 749 * the speculative reference will free 750 * this page shortly, so we may 751 * increment nr_reclaimed here (and 752 * leave it off the LRU). 753 */ 754 nr_reclaimed++; 755 continue; 756 } 757 } 758 } 759 760 if (!mapping || !__remove_mapping(mapping, page)) 761 goto keep_locked; 762 763 /* 764 * At this point, we have no other references and there is 765 * no way to pick any more up (removed from LRU, removed 766 * from pagecache). Can use non-atomic bitops now (and 767 * we obviously don't have to worry about waking up a process 768 * waiting on the page lock, because there are no references. 769 */ 770 __clear_page_locked(page); 771free_it: 772 nr_reclaimed++; 773 if (!pagevec_add(&freed_pvec, page)) { 774 __pagevec_free(&freed_pvec); 775 pagevec_reinit(&freed_pvec); 776 } 777 continue; 778 779cull_mlocked: 780 if (PageSwapCache(page)) 781 try_to_free_swap(page); 782 unlock_page(page); 783 putback_lru_page(page); 784 continue; 785 786activate_locked: 787 /* Not a candidate for swapping, so reclaim swap space. */ 788 if (PageSwapCache(page) && vm_swap_full()) 789 try_to_free_swap(page); 790 VM_BUG_ON(PageActive(page)); 791 SetPageActive(page); 792 pgactivate++; 793keep_locked: 794 unlock_page(page); 795keep: 796 list_add(&page->lru, &ret_pages); 797 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 798 } 799 list_splice(&ret_pages, page_list); 800 if (pagevec_count(&freed_pvec)) 801 __pagevec_free(&freed_pvec); 802 count_vm_events(PGACTIVATE, pgactivate); 803 return nr_reclaimed; 804} 805 806/* LRU Isolation modes. */ 807#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */ 808#define ISOLATE_ACTIVE 1 /* Isolate active pages. */ 809#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */ 810 811/* 812 * Attempt to remove the specified page from its LRU. Only take this page 813 * if it is of the appropriate PageActive status. Pages which are being 814 * freed elsewhere are also ignored. 815 * 816 * page: page to consider 817 * mode: one of the LRU isolation modes defined above 818 * 819 * returns 0 on success, -ve errno on failure. 820 */ 821int __isolate_lru_page(struct page *page, int mode, int file) 822{ 823 int ret = -EINVAL; 824 825 /* Only take pages on the LRU. */ 826 if (!PageLRU(page)) 827 return ret; 828 829 /* 830 * When checking the active state, we need to be sure we are 831 * dealing with comparible boolean values. Take the logical not 832 * of each. 833 */ 834 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode)) 835 return ret; 836 837 if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file) 838 return ret; 839 840 /* 841 * When this function is being called for lumpy reclaim, we 842 * initially look into all LRU pages, active, inactive and 843 * unevictable; only give shrink_page_list evictable pages. 844 */ 845 if (PageUnevictable(page)) 846 return ret; 847 848 ret = -EBUSY; 849 850 if (likely(get_page_unless_zero(page))) { 851 /* 852 * Be careful not to clear PageLRU until after we're 853 * sure the page is not being freed elsewhere -- the 854 * page release code relies on it. 855 */ 856 ClearPageLRU(page); 857 ret = 0; 858 } 859 860 return ret; 861} 862 863/* 864 * zone->lru_lock is heavily contended. Some of the functions that 865 * shrink the lists perform better by taking out a batch of pages 866 * and working on them outside the LRU lock. 867 * 868 * For pagecache intensive workloads, this function is the hottest 869 * spot in the kernel (apart from copy_*_user functions). 870 * 871 * Appropriate locks must be held before calling this function. 872 * 873 * @nr_to_scan: The number of pages to look through on the list. 874 * @src: The LRU list to pull pages off. 875 * @dst: The temp list to put pages on to. 876 * @scanned: The number of pages that were scanned. 877 * @order: The caller's attempted allocation order 878 * @mode: One of the LRU isolation modes 879 * @file: True [1] if isolating file [!anon] pages 880 * 881 * returns how many pages were moved onto *@dst. 882 */ 883static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 884 struct list_head *src, struct list_head *dst, 885 unsigned long *scanned, int order, int mode, int file) 886{ 887 unsigned long nr_taken = 0; 888 unsigned long scan; 889 890 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 891 struct page *page; 892 unsigned long pfn; 893 unsigned long end_pfn; 894 unsigned long page_pfn; 895 int zone_id; 896 897 page = lru_to_page(src); 898 prefetchw_prev_lru_page(page, src, flags); 899 900 VM_BUG_ON(!PageLRU(page)); 901 902 switch (__isolate_lru_page(page, mode, file)) { 903 case 0: 904 list_move(&page->lru, dst); 905 mem_cgroup_del_lru(page); 906 nr_taken++; 907 break; 908 909 case -EBUSY: 910 /* else it is being freed elsewhere */ 911 list_move(&page->lru, src); 912 mem_cgroup_rotate_lru_list(page, page_lru(page)); 913 continue; 914 915 default: 916 BUG(); 917 } 918 919 if (!order) 920 continue; 921 922 /* 923 * Attempt to take all pages in the order aligned region 924 * surrounding the tag page. Only take those pages of 925 * the same active state as that tag page. We may safely 926 * round the target page pfn down to the requested order 927 * as the mem_map is guarenteed valid out to MAX_ORDER, 928 * where that page is in a different zone we will detect 929 * it from its zone id and abort this block scan. 930 */ 931 zone_id = page_zone_id(page); 932 page_pfn = page_to_pfn(page); 933 pfn = page_pfn & ~((1 << order) - 1); 934 end_pfn = pfn + (1 << order); 935 for (; pfn < end_pfn; pfn++) { 936 struct page *cursor_page; 937 938 /* The target page is in the block, ignore it. */ 939 if (unlikely(pfn == page_pfn)) 940 continue; 941 942 /* Avoid holes within the zone. */ 943 if (unlikely(!pfn_valid_within(pfn))) 944 break; 945 946 cursor_page = pfn_to_page(pfn); 947 948 /* Check that we have not crossed a zone boundary. */ 949 if (unlikely(page_zone_id(cursor_page) != zone_id)) 950 continue; 951 952 /* 953 * If we don't have enough swap space, reclaiming of 954 * anon page which don't already have a swap slot is 955 * pointless. 956 */ 957 if (nr_swap_pages <= 0 && PageAnon(cursor_page) && 958 !PageSwapCache(cursor_page)) 959 continue; 960 961 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 962 list_move(&cursor_page->lru, dst); 963 mem_cgroup_del_lru(cursor_page); 964 nr_taken++; 965 scan++; 966 } 967 } 968 } 969 970 *scanned = scan; 971 return nr_taken; 972} 973 974static unsigned long isolate_pages_global(unsigned long nr, 975 struct list_head *dst, 976 unsigned long *scanned, int order, 977 int mode, struct zone *z, 978 struct mem_cgroup *mem_cont, 979 int active, int file) 980{ 981 int lru = LRU_BASE; 982 if (active) 983 lru += LRU_ACTIVE; 984 if (file) 985 lru += LRU_FILE; 986 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 987 mode, file); 988} 989 990/* 991 * clear_active_flags() is a helper for shrink_active_list(), clearing 992 * any active bits from the pages in the list. 993 */ 994static unsigned long clear_active_flags(struct list_head *page_list, 995 unsigned int *count) 996{ 997 int nr_active = 0; 998 int lru; 999 struct page *page; 1000 1001 list_for_each_entry(page, page_list, lru) { 1002 lru = page_lru_base_type(page); 1003 if (PageActive(page)) { 1004 lru += LRU_ACTIVE; 1005 ClearPageActive(page); 1006 nr_active++; 1007 } 1008 count[lru]++; 1009 } 1010 1011 return nr_active; 1012} 1013 1014/** 1015 * isolate_lru_page - tries to isolate a page from its LRU list 1016 * @page: page to isolate from its LRU list 1017 * 1018 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1019 * vmstat statistic corresponding to whatever LRU list the page was on. 1020 * 1021 * Returns 0 if the page was removed from an LRU list. 1022 * Returns -EBUSY if the page was not on an LRU list. 1023 * 1024 * The returned page will have PageLRU() cleared. If it was found on 1025 * the active list, it will have PageActive set. If it was found on 1026 * the unevictable list, it will have the PageUnevictable bit set. That flag 1027 * may need to be cleared by the caller before letting the page go. 1028 * 1029 * The vmstat statistic corresponding to the list on which the page was 1030 * found will be decremented. 1031 * 1032 * Restrictions: 1033 * (1) Must be called with an elevated refcount on the page. This is a 1034 * fundamentnal difference from isolate_lru_pages (which is called 1035 * without a stable reference). 1036 * (2) the lru_lock must not be held. 1037 * (3) interrupts must be enabled. 1038 */ 1039int isolate_lru_page(struct page *page) 1040{ 1041 int ret = -EBUSY; 1042 1043 if (PageLRU(page)) { 1044 struct zone *zone = page_zone(page); 1045 1046 spin_lock_irq(&zone->lru_lock); 1047 if (PageLRU(page) && get_page_unless_zero(page)) { 1048 int lru = page_lru(page); 1049 ret = 0; 1050 ClearPageLRU(page); 1051 1052 del_page_from_lru_list(zone, page, lru); 1053 } 1054 spin_unlock_irq(&zone->lru_lock); 1055 } 1056 return ret; 1057} 1058 1059/* 1060 * Are there way too many processes in the direct reclaim path already? 1061 */ 1062static int too_many_isolated(struct zone *zone, int file, 1063 struct scan_control *sc) 1064{ 1065 unsigned long inactive, isolated; 1066 1067 if (current_is_kswapd()) 1068 return 0; 1069 1070 if (!scanning_global_lru(sc)) 1071 return 0; 1072 1073 if (file) { 1074 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1075 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1076 } else { 1077 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1078 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1079 } 1080 1081 return isolated > inactive; 1082} 1083 1084/* 1085 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1086 * of reclaimed pages 1087 */ 1088static unsigned long shrink_inactive_list(unsigned long max_scan, 1089 struct zone *zone, struct scan_control *sc, 1090 int priority, int file) 1091{ 1092 LIST_HEAD(page_list); 1093 struct pagevec pvec; 1094 unsigned long nr_scanned = 0; 1095 unsigned long nr_reclaimed = 0; 1096 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1097 int lumpy_reclaim = 0; 1098 1099 while (unlikely(too_many_isolated(zone, file, sc))) { 1100 congestion_wait(BLK_RW_ASYNC, HZ/10); 1101 1102 /* We are about to die and free our memory. Return now. */ 1103 if (fatal_signal_pending(current)) 1104 return SWAP_CLUSTER_MAX; 1105 } 1106 1107 /* 1108 * If we need a large contiguous chunk of memory, or have 1109 * trouble getting a small set of contiguous pages, we 1110 * will reclaim both active and inactive pages. 1111 * 1112 * We use the same threshold as pageout congestion_wait below. 1113 */ 1114 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1115 lumpy_reclaim = 1; 1116 else if (sc->order && priority < DEF_PRIORITY - 2) 1117 lumpy_reclaim = 1; 1118 1119 pagevec_init(&pvec, 1); 1120 1121 lru_add_drain(); 1122 spin_lock_irq(&zone->lru_lock); 1123 do { 1124 struct page *page; 1125 unsigned long nr_taken; 1126 unsigned long nr_scan; 1127 unsigned long nr_freed; 1128 unsigned long nr_active; 1129 unsigned int count[NR_LRU_LISTS] = { 0, }; 1130 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE; 1131 unsigned long nr_anon; 1132 unsigned long nr_file; 1133 1134 nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX, 1135 &page_list, &nr_scan, sc->order, mode, 1136 zone, sc->mem_cgroup, 0, file); 1137 1138 if (scanning_global_lru(sc)) { 1139 zone->pages_scanned += nr_scan; 1140 if (current_is_kswapd()) 1141 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1142 nr_scan); 1143 else 1144 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1145 nr_scan); 1146 } 1147 1148 if (nr_taken == 0) 1149 goto done; 1150 1151 nr_active = clear_active_flags(&page_list, count); 1152 __count_vm_events(PGDEACTIVATE, nr_active); 1153 1154 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1155 -count[LRU_ACTIVE_FILE]); 1156 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1157 -count[LRU_INACTIVE_FILE]); 1158 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1159 -count[LRU_ACTIVE_ANON]); 1160 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1161 -count[LRU_INACTIVE_ANON]); 1162 1163 nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1164 nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1165 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); 1166 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); 1167 1168 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; 1169 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; 1170 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE]; 1171 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE]; 1172 1173 spin_unlock_irq(&zone->lru_lock); 1174 1175 nr_scanned += nr_scan; 1176 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); 1177 1178 /* 1179 * If we are direct reclaiming for contiguous pages and we do 1180 * not reclaim everything in the list, try again and wait 1181 * for IO to complete. This will stall high-order allocations 1182 * but that should be acceptable to the caller 1183 */ 1184 if (nr_freed < nr_taken && !current_is_kswapd() && 1185 lumpy_reclaim) { 1186 congestion_wait(BLK_RW_ASYNC, HZ/10); 1187 1188 /* 1189 * The attempt at page out may have made some 1190 * of the pages active, mark them inactive again. 1191 */ 1192 nr_active = clear_active_flags(&page_list, count); 1193 count_vm_events(PGDEACTIVATE, nr_active); 1194 1195 nr_freed += shrink_page_list(&page_list, sc, 1196 PAGEOUT_IO_SYNC); 1197 } 1198 1199 nr_reclaimed += nr_freed; 1200 1201 local_irq_disable(); 1202 if (current_is_kswapd()) 1203 __count_vm_events(KSWAPD_STEAL, nr_freed); 1204 __count_zone_vm_events(PGSTEAL, zone, nr_freed); 1205 1206 spin_lock(&zone->lru_lock); 1207 /* 1208 * Put back any unfreeable pages. 1209 */ 1210 while (!list_empty(&page_list)) { 1211 int lru; 1212 page = lru_to_page(&page_list); 1213 VM_BUG_ON(PageLRU(page)); 1214 list_del(&page->lru); 1215 if (unlikely(!page_evictable(page, NULL))) { 1216 spin_unlock_irq(&zone->lru_lock); 1217 putback_lru_page(page); 1218 spin_lock_irq(&zone->lru_lock); 1219 continue; 1220 } 1221 SetPageLRU(page); 1222 lru = page_lru(page); 1223 add_page_to_lru_list(zone, page, lru); 1224 if (is_active_lru(lru)) { 1225 int file = is_file_lru(lru); 1226 reclaim_stat->recent_rotated[file]++; 1227 } 1228 if (!pagevec_add(&pvec, page)) { 1229 spin_unlock_irq(&zone->lru_lock); 1230 __pagevec_release(&pvec); 1231 spin_lock_irq(&zone->lru_lock); 1232 } 1233 } 1234 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1235 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1236 1237 } while (nr_scanned < max_scan); 1238 1239done: 1240 spin_unlock_irq(&zone->lru_lock); 1241 pagevec_release(&pvec); 1242 return nr_reclaimed; 1243} 1244 1245/* 1246 * We are about to scan this zone at a certain priority level. If that priority 1247 * level is smaller (ie: more urgent) than the previous priority, then note 1248 * that priority level within the zone. This is done so that when the next 1249 * process comes in to scan this zone, it will immediately start out at this 1250 * priority level rather than having to build up its own scanning priority. 1251 * Here, this priority affects only the reclaim-mapped threshold. 1252 */ 1253static inline void note_zone_scanning_priority(struct zone *zone, int priority) 1254{ 1255 if (priority < zone->prev_priority) 1256 zone->prev_priority = priority; 1257} 1258 1259/* 1260 * This moves pages from the active list to the inactive list. 1261 * 1262 * We move them the other way if the page is referenced by one or more 1263 * processes, from rmap. 1264 * 1265 * If the pages are mostly unmapped, the processing is fast and it is 1266 * appropriate to hold zone->lru_lock across the whole operation. But if 1267 * the pages are mapped, the processing is slow (page_referenced()) so we 1268 * should drop zone->lru_lock around each page. It's impossible to balance 1269 * this, so instead we remove the pages from the LRU while processing them. 1270 * It is safe to rely on PG_active against the non-LRU pages in here because 1271 * nobody will play with that bit on a non-LRU page. 1272 * 1273 * The downside is that we have to touch page->_count against each page. 1274 * But we had to alter page->flags anyway. 1275 */ 1276 1277static void move_active_pages_to_lru(struct zone *zone, 1278 struct list_head *list, 1279 enum lru_list lru) 1280{ 1281 unsigned long pgmoved = 0; 1282 struct pagevec pvec; 1283 struct page *page; 1284 1285 pagevec_init(&pvec, 1); 1286 1287 while (!list_empty(list)) { 1288 page = lru_to_page(list); 1289 1290 VM_BUG_ON(PageLRU(page)); 1291 SetPageLRU(page); 1292 1293 list_move(&page->lru, &zone->lru[lru].list); 1294 mem_cgroup_add_lru_list(page, lru); 1295 pgmoved++; 1296 1297 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1298 spin_unlock_irq(&zone->lru_lock); 1299 if (buffer_heads_over_limit) 1300 pagevec_strip(&pvec); 1301 __pagevec_release(&pvec); 1302 spin_lock_irq(&zone->lru_lock); 1303 } 1304 } 1305 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1306 if (!is_active_lru(lru)) 1307 __count_vm_events(PGDEACTIVATE, pgmoved); 1308} 1309 1310static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1311 struct scan_control *sc, int priority, int file) 1312{ 1313 unsigned long nr_taken; 1314 unsigned long pgscanned; 1315 unsigned long vm_flags; 1316 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1317 LIST_HEAD(l_active); 1318 LIST_HEAD(l_inactive); 1319 struct page *page; 1320 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1321 unsigned long nr_rotated = 0; 1322 1323 lru_add_drain(); 1324 spin_lock_irq(&zone->lru_lock); 1325 nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order, 1326 ISOLATE_ACTIVE, zone, 1327 sc->mem_cgroup, 1, file); 1328 /* 1329 * zone->pages_scanned is used for detect zone's oom 1330 * mem_cgroup remembers nr_scan by itself. 1331 */ 1332 if (scanning_global_lru(sc)) { 1333 zone->pages_scanned += pgscanned; 1334 } 1335 reclaim_stat->recent_scanned[file] += nr_taken; 1336 1337 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1338 if (file) 1339 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); 1340 else 1341 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); 1342 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1343 spin_unlock_irq(&zone->lru_lock); 1344 1345 while (!list_empty(&l_hold)) { 1346 cond_resched(); 1347 page = lru_to_page(&l_hold); 1348 list_del(&page->lru); 1349 1350 if (unlikely(!page_evictable(page, NULL))) { 1351 putback_lru_page(page); 1352 continue; 1353 } 1354 1355 /* page_referenced clears PageReferenced */ 1356 if (page_mapping_inuse(page) && 1357 page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { 1358 nr_rotated++; 1359 /* 1360 * Identify referenced, file-backed active pages and 1361 * give them one more trip around the active list. So 1362 * that executable code get better chances to stay in 1363 * memory under moderate memory pressure. Anon pages 1364 * are not likely to be evicted by use-once streaming 1365 * IO, plus JVM can create lots of anon VM_EXEC pages, 1366 * so we ignore them here. 1367 */ 1368 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1369 list_add(&page->lru, &l_active); 1370 continue; 1371 } 1372 } 1373 1374 ClearPageActive(page); /* we are de-activating */ 1375 list_add(&page->lru, &l_inactive); 1376 } 1377 1378 /* 1379 * Move pages back to the lru list. 1380 */ 1381 spin_lock_irq(&zone->lru_lock); 1382 /* 1383 * Count referenced pages from currently used mappings as rotated, 1384 * even though only some of them are actually re-activated. This 1385 * helps balance scan pressure between file and anonymous pages in 1386 * get_scan_ratio. 1387 */ 1388 reclaim_stat->recent_rotated[file] += nr_rotated; 1389 1390 move_active_pages_to_lru(zone, &l_active, 1391 LRU_ACTIVE + file * LRU_FILE); 1392 move_active_pages_to_lru(zone, &l_inactive, 1393 LRU_BASE + file * LRU_FILE); 1394 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1395 spin_unlock_irq(&zone->lru_lock); 1396} 1397 1398static int inactive_anon_is_low_global(struct zone *zone) 1399{ 1400 unsigned long active, inactive; 1401 1402 active = zone_page_state(zone, NR_ACTIVE_ANON); 1403 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1404 1405 if (inactive * zone->inactive_ratio < active) 1406 return 1; 1407 1408 return 0; 1409} 1410 1411/** 1412 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1413 * @zone: zone to check 1414 * @sc: scan control of this context 1415 * 1416 * Returns true if the zone does not have enough inactive anon pages, 1417 * meaning some active anon pages need to be deactivated. 1418 */ 1419static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) 1420{ 1421 int low; 1422 1423 if (scanning_global_lru(sc)) 1424 low = inactive_anon_is_low_global(zone); 1425 else 1426 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); 1427 return low; 1428} 1429 1430static int inactive_file_is_low_global(struct zone *zone) 1431{ 1432 unsigned long active, inactive; 1433 1434 active = zone_page_state(zone, NR_ACTIVE_FILE); 1435 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1436 1437 return (active > inactive); 1438} 1439 1440/** 1441 * inactive_file_is_low - check if file pages need to be deactivated 1442 * @zone: zone to check 1443 * @sc: scan control of this context 1444 * 1445 * When the system is doing streaming IO, memory pressure here 1446 * ensures that active file pages get deactivated, until more 1447 * than half of the file pages are on the inactive list. 1448 * 1449 * Once we get to that situation, protect the system's working 1450 * set from being evicted by disabling active file page aging. 1451 * 1452 * This uses a different ratio than the anonymous pages, because 1453 * the page cache uses a use-once replacement algorithm. 1454 */ 1455static int inactive_file_is_low(struct zone *zone, struct scan_control *sc) 1456{ 1457 int low; 1458 1459 if (scanning_global_lru(sc)) 1460 low = inactive_file_is_low_global(zone); 1461 else 1462 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup); 1463 return low; 1464} 1465 1466static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1467 struct zone *zone, struct scan_control *sc, int priority) 1468{ 1469 int file = is_file_lru(lru); 1470 1471 if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) { 1472 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1473 return 0; 1474 } 1475 1476 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) { 1477 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1478 return 0; 1479 } 1480 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1481} 1482 1483/* 1484 * Determine how aggressively the anon and file LRU lists should be 1485 * scanned. The relative value of each set of LRU lists is determined 1486 * by looking at the fraction of the pages scanned we did rotate back 1487 * onto the active list instead of evict. 1488 * 1489 * percent[0] specifies how much pressure to put on ram/swap backed 1490 * memory, while percent[1] determines pressure on the file LRUs. 1491 */ 1492static void get_scan_ratio(struct zone *zone, struct scan_control *sc, 1493 unsigned long *percent) 1494{ 1495 unsigned long anon, file, free; 1496 unsigned long anon_prio, file_prio; 1497 unsigned long ap, fp; 1498 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1499 1500 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1501 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1502 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1503 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1504 1505 if (scanning_global_lru(sc)) { 1506 free = zone_page_state(zone, NR_FREE_PAGES); 1507 /* If we have very few page cache pages, 1508 force-scan anon pages. */ 1509 if (unlikely(file + free <= high_wmark_pages(zone))) { 1510 percent[0] = 100; 1511 percent[1] = 0; 1512 return; 1513 } 1514 } 1515 1516 /* 1517 * OK, so we have swap space and a fair amount of page cache 1518 * pages. We use the recently rotated / recently scanned 1519 * ratios to determine how valuable each cache is. 1520 * 1521 * Because workloads change over time (and to avoid overflow) 1522 * we keep these statistics as a floating average, which ends 1523 * up weighing recent references more than old ones. 1524 * 1525 * anon in [0], file in [1] 1526 */ 1527 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1528 spin_lock_irq(&zone->lru_lock); 1529 reclaim_stat->recent_scanned[0] /= 2; 1530 reclaim_stat->recent_rotated[0] /= 2; 1531 spin_unlock_irq(&zone->lru_lock); 1532 } 1533 1534 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1535 spin_lock_irq(&zone->lru_lock); 1536 reclaim_stat->recent_scanned[1] /= 2; 1537 reclaim_stat->recent_rotated[1] /= 2; 1538 spin_unlock_irq(&zone->lru_lock); 1539 } 1540 1541 /* 1542 * With swappiness at 100, anonymous and file have the same priority. 1543 * This scanning priority is essentially the inverse of IO cost. 1544 */ 1545 anon_prio = sc->swappiness; 1546 file_prio = 200 - sc->swappiness; 1547 1548 /* 1549 * The amount of pressure on anon vs file pages is inversely 1550 * proportional to the fraction of recently scanned pages on 1551 * each list that were recently referenced and in active use. 1552 */ 1553 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1554 ap /= reclaim_stat->recent_rotated[0] + 1; 1555 1556 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1557 fp /= reclaim_stat->recent_rotated[1] + 1; 1558 1559 /* Normalize to percentages */ 1560 percent[0] = 100 * ap / (ap + fp + 1); 1561 percent[1] = 100 - percent[0]; 1562} 1563 1564/* 1565 * Smallish @nr_to_scan's are deposited in @nr_saved_scan, 1566 * until we collected @swap_cluster_max pages to scan. 1567 */ 1568static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, 1569 unsigned long *nr_saved_scan) 1570{ 1571 unsigned long nr; 1572 1573 *nr_saved_scan += nr_to_scan; 1574 nr = *nr_saved_scan; 1575 1576 if (nr >= SWAP_CLUSTER_MAX) 1577 *nr_saved_scan = 0; 1578 else 1579 nr = 0; 1580 1581 return nr; 1582} 1583 1584/* 1585 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1586 */ 1587static void shrink_zone(int priority, struct zone *zone, 1588 struct scan_control *sc) 1589{ 1590 unsigned long nr[NR_LRU_LISTS]; 1591 unsigned long nr_to_scan; 1592 unsigned long percent[2]; /* anon @ 0; file @ 1 */ 1593 enum lru_list l; 1594 unsigned long nr_reclaimed = sc->nr_reclaimed; 1595 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1596 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1597 int noswap = 0; 1598 1599 /* If we have no swap space, do not bother scanning anon pages. */ 1600 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1601 noswap = 1; 1602 percent[0] = 0; 1603 percent[1] = 100; 1604 } else 1605 get_scan_ratio(zone, sc, percent); 1606 1607 for_each_evictable_lru(l) { 1608 int file = is_file_lru(l); 1609 unsigned long scan; 1610 1611 scan = zone_nr_lru_pages(zone, sc, l); 1612 if (priority || noswap) { 1613 scan >>= priority; 1614 scan = (scan * percent[file]) / 100; 1615 } 1616 nr[l] = nr_scan_try_batch(scan, 1617 &reclaim_stat->nr_saved_scan[l]); 1618 } 1619 1620 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1621 nr[LRU_INACTIVE_FILE]) { 1622 for_each_evictable_lru(l) { 1623 if (nr[l]) { 1624 nr_to_scan = min_t(unsigned long, 1625 nr[l], SWAP_CLUSTER_MAX); 1626 nr[l] -= nr_to_scan; 1627 1628 nr_reclaimed += shrink_list(l, nr_to_scan, 1629 zone, sc, priority); 1630 } 1631 } 1632 /* 1633 * On large memory systems, scan >> priority can become 1634 * really large. This is fine for the starting priority; 1635 * we want to put equal scanning pressure on each zone. 1636 * However, if the VM has a harder time of freeing pages, 1637 * with multiple processes reclaiming pages, the total 1638 * freeing target can get unreasonably large. 1639 */ 1640 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 1641 break; 1642 } 1643 1644 sc->nr_reclaimed = nr_reclaimed; 1645 1646 /* 1647 * Even if we did not try to evict anon pages at all, we want to 1648 * rebalance the anon lru active/inactive ratio. 1649 */ 1650 if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0) 1651 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1652 1653 throttle_vm_writeout(sc->gfp_mask); 1654} 1655 1656/* 1657 * This is the direct reclaim path, for page-allocating processes. We only 1658 * try to reclaim pages from zones which will satisfy the caller's allocation 1659 * request. 1660 * 1661 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 1662 * Because: 1663 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1664 * allocation or 1665 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 1666 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 1667 * zone defense algorithm. 1668 * 1669 * If a zone is deemed to be full of pinned pages then just give it a light 1670 * scan then give up on it. 1671 */ 1672static void shrink_zones(int priority, struct zonelist *zonelist, 1673 struct scan_control *sc) 1674{ 1675 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1676 struct zoneref *z; 1677 struct zone *zone; 1678 1679 sc->all_unreclaimable = 1; 1680 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 1681 sc->nodemask) { 1682 if (!populated_zone(zone)) 1683 continue; 1684 /* 1685 * Take care memory controller reclaiming has small influence 1686 * to global LRU. 1687 */ 1688 if (scanning_global_lru(sc)) { 1689 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1690 continue; 1691 note_zone_scanning_priority(zone, priority); 1692 1693 if (zone_is_all_unreclaimable(zone) && 1694 priority != DEF_PRIORITY) 1695 continue; /* Let kswapd poll it */ 1696 sc->all_unreclaimable = 0; 1697 } else { 1698 /* 1699 * Ignore cpuset limitation here. We just want to reduce 1700 * # of used pages by us regardless of memory shortage. 1701 */ 1702 sc->all_unreclaimable = 0; 1703 mem_cgroup_note_reclaim_priority(sc->mem_cgroup, 1704 priority); 1705 } 1706 1707 shrink_zone(priority, zone, sc); 1708 } 1709} 1710 1711/* 1712 * This is the main entry point to direct page reclaim. 1713 * 1714 * If a full scan of the inactive list fails to free enough memory then we 1715 * are "out of memory" and something needs to be killed. 1716 * 1717 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1718 * high - the zone may be full of dirty or under-writeback pages, which this 1719 * caller can't do much about. We kick the writeback threads and take explicit 1720 * naps in the hope that some of these pages can be written. But if the 1721 * allocating task holds filesystem locks which prevent writeout this might not 1722 * work, and the allocation attempt will fail. 1723 * 1724 * returns: 0, if no pages reclaimed 1725 * else, the number of pages reclaimed 1726 */ 1727static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 1728 struct scan_control *sc) 1729{ 1730 int priority; 1731 unsigned long ret = 0; 1732 unsigned long total_scanned = 0; 1733 struct reclaim_state *reclaim_state = current->reclaim_state; 1734 unsigned long lru_pages = 0; 1735 struct zoneref *z; 1736 struct zone *zone; 1737 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1738 unsigned long writeback_threshold; 1739 1740 delayacct_freepages_start(); 1741 1742 if (scanning_global_lru(sc)) 1743 count_vm_event(ALLOCSTALL); 1744 /* 1745 * mem_cgroup will not do shrink_slab. 1746 */ 1747 if (scanning_global_lru(sc)) { 1748 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1749 1750 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1751 continue; 1752 1753 lru_pages += zone_reclaimable_pages(zone); 1754 } 1755 } 1756 1757 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1758 sc->nr_scanned = 0; 1759 if (!priority) 1760 disable_swap_token(); 1761 shrink_zones(priority, zonelist, sc); 1762 /* 1763 * Don't shrink slabs when reclaiming memory from 1764 * over limit cgroups 1765 */ 1766 if (scanning_global_lru(sc)) { 1767 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 1768 if (reclaim_state) { 1769 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 1770 reclaim_state->reclaimed_slab = 0; 1771 } 1772 } 1773 total_scanned += sc->nr_scanned; 1774 if (sc->nr_reclaimed >= sc->nr_to_reclaim) { 1775 ret = sc->nr_reclaimed; 1776 goto out; 1777 } 1778 1779 /* 1780 * Try to write back as many pages as we just scanned. This 1781 * tends to cause slow streaming writers to write data to the 1782 * disk smoothly, at the dirtying rate, which is nice. But 1783 * that's undesirable in laptop mode, where we *want* lumpy 1784 * writeout. So in laptop mode, write out the whole world. 1785 */ 1786 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 1787 if (total_scanned > writeback_threshold) { 1788 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); 1789 sc->may_writepage = 1; 1790 } 1791 1792 /* Take a nap, wait for some writeback to complete */ 1793 if (!sc->hibernation_mode && sc->nr_scanned && 1794 priority < DEF_PRIORITY - 2) 1795 congestion_wait(BLK_RW_ASYNC, HZ/10); 1796 } 1797 /* top priority shrink_zones still had more to do? don't OOM, then */ 1798 if (!sc->all_unreclaimable && scanning_global_lru(sc)) 1799 ret = sc->nr_reclaimed; 1800out: 1801 /* 1802 * Now that we've scanned all the zones at this priority level, note 1803 * that level within the zone so that the next thread which performs 1804 * scanning of this zone will immediately start out at this priority 1805 * level. This affects only the decision whether or not to bring 1806 * mapped pages onto the inactive list. 1807 */ 1808 if (priority < 0) 1809 priority = 0; 1810 1811 if (scanning_global_lru(sc)) { 1812 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1813 1814 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1815 continue; 1816 1817 zone->prev_priority = priority; 1818 } 1819 } else 1820 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); 1821 1822 delayacct_freepages_end(); 1823 1824 return ret; 1825} 1826 1827unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 1828 gfp_t gfp_mask, nodemask_t *nodemask) 1829{ 1830 struct scan_control sc = { 1831 .gfp_mask = gfp_mask, 1832 .may_writepage = !laptop_mode, 1833 .nr_to_reclaim = SWAP_CLUSTER_MAX, 1834 .may_unmap = 1, 1835 .may_swap = 1, 1836 .swappiness = vm_swappiness, 1837 .order = order, 1838 .mem_cgroup = NULL, 1839 .isolate_pages = isolate_pages_global, 1840 .nodemask = nodemask, 1841 }; 1842 1843 return do_try_to_free_pages(zonelist, &sc); 1844} 1845 1846#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1847 1848unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 1849 gfp_t gfp_mask, bool noswap, 1850 unsigned int swappiness, 1851 struct zone *zone, int nid) 1852{ 1853 struct scan_control sc = { 1854 .may_writepage = !laptop_mode, 1855 .may_unmap = 1, 1856 .may_swap = !noswap, 1857 .swappiness = swappiness, 1858 .order = 0, 1859 .mem_cgroup = mem, 1860 .isolate_pages = mem_cgroup_isolate_pages, 1861 }; 1862 nodemask_t nm = nodemask_of_node(nid); 1863 1864 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1865 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1866 sc.nodemask = &nm; 1867 sc.nr_reclaimed = 0; 1868 sc.nr_scanned = 0; 1869 /* 1870 * NOTE: Although we can get the priority field, using it 1871 * here is not a good idea, since it limits the pages we can scan. 1872 * if we don't reclaim here, the shrink_zone from balance_pgdat 1873 * will pick up pages from other mem cgroup's as well. We hack 1874 * the priority and make it zero. 1875 */ 1876 shrink_zone(0, zone, &sc); 1877 return sc.nr_reclaimed; 1878} 1879 1880unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 1881 gfp_t gfp_mask, 1882 bool noswap, 1883 unsigned int swappiness) 1884{ 1885 struct zonelist *zonelist; 1886 struct scan_control sc = { 1887 .may_writepage = !laptop_mode, 1888 .may_unmap = 1, 1889 .may_swap = !noswap, 1890 .nr_to_reclaim = SWAP_CLUSTER_MAX, 1891 .swappiness = swappiness, 1892 .order = 0, 1893 .mem_cgroup = mem_cont, 1894 .isolate_pages = mem_cgroup_isolate_pages, 1895 .nodemask = NULL, /* we don't care the placement */ 1896 }; 1897 1898 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1899 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1900 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 1901 return do_try_to_free_pages(zonelist, &sc); 1902} 1903#endif 1904 1905/* is kswapd sleeping prematurely? */ 1906static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) 1907{ 1908 int i; 1909 1910 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 1911 if (remaining) 1912 return 1; 1913 1914 /* If after HZ/10, a zone is below the high mark, it's premature */ 1915 for (i = 0; i < pgdat->nr_zones; i++) { 1916 struct zone *zone = pgdat->node_zones + i; 1917 1918 if (!populated_zone(zone)) 1919 continue; 1920 1921 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 1922 0, 0)) 1923 return 1; 1924 } 1925 1926 return 0; 1927} 1928 1929/* 1930 * For kswapd, balance_pgdat() will work across all this node's zones until 1931 * they are all at high_wmark_pages(zone). 1932 * 1933 * Returns the number of pages which were actually freed. 1934 * 1935 * There is special handling here for zones which are full of pinned pages. 1936 * This can happen if the pages are all mlocked, or if they are all used by 1937 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 1938 * What we do is to detect the case where all pages in the zone have been 1939 * scanned twice and there has been zero successful reclaim. Mark the zone as 1940 * dead and from now on, only perform a short scan. Basically we're polling 1941 * the zone for when the problem goes away. 1942 * 1943 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1944 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 1945 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 1946 * lower zones regardless of the number of free pages in the lower zones. This 1947 * interoperates with the page allocator fallback scheme to ensure that aging 1948 * of pages is balanced across the zones. 1949 */ 1950static unsigned long balance_pgdat(pg_data_t *pgdat, int order) 1951{ 1952 int all_zones_ok; 1953 int priority; 1954 int i; 1955 unsigned long total_scanned; 1956 struct reclaim_state *reclaim_state = current->reclaim_state; 1957 struct scan_control sc = { 1958 .gfp_mask = GFP_KERNEL, 1959 .may_unmap = 1, 1960 .may_swap = 1, 1961 /* 1962 * kswapd doesn't want to be bailed out while reclaim. because 1963 * we want to put equal scanning pressure on each zone. 1964 */ 1965 .nr_to_reclaim = ULONG_MAX, 1966 .swappiness = vm_swappiness, 1967 .order = order, 1968 .mem_cgroup = NULL, 1969 .isolate_pages = isolate_pages_global, 1970 }; 1971 /* 1972 * temp_priority is used to remember the scanning priority at which 1973 * this zone was successfully refilled to 1974 * free_pages == high_wmark_pages(zone). 1975 */ 1976 int temp_priority[MAX_NR_ZONES]; 1977 1978loop_again: 1979 total_scanned = 0; 1980 sc.nr_reclaimed = 0; 1981 sc.may_writepage = !laptop_mode; 1982 count_vm_event(PAGEOUTRUN); 1983 1984 for (i = 0; i < pgdat->nr_zones; i++) 1985 temp_priority[i] = DEF_PRIORITY; 1986 1987 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1988 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1989 unsigned long lru_pages = 0; 1990 int has_under_min_watermark_zone = 0; 1991 1992 /* The swap token gets in the way of swapout... */ 1993 if (!priority) 1994 disable_swap_token(); 1995 1996 all_zones_ok = 1; 1997 1998 /* 1999 * Scan in the highmem->dma direction for the highest 2000 * zone which needs scanning 2001 */ 2002 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2003 struct zone *zone = pgdat->node_zones + i; 2004 2005 if (!populated_zone(zone)) 2006 continue; 2007 2008 if (zone_is_all_unreclaimable(zone) && 2009 priority != DEF_PRIORITY) 2010 continue; 2011 2012 /* 2013 * Do some background aging of the anon list, to give 2014 * pages a chance to be referenced before reclaiming. 2015 */ 2016 if (inactive_anon_is_low(zone, &sc)) 2017 shrink_active_list(SWAP_CLUSTER_MAX, zone, 2018 &sc, priority, 0); 2019 2020 if (!zone_watermark_ok(zone, order, 2021 high_wmark_pages(zone), 0, 0)) { 2022 end_zone = i; 2023 break; 2024 } 2025 } 2026 if (i < 0) 2027 goto out; 2028 2029 for (i = 0; i <= end_zone; i++) { 2030 struct zone *zone = pgdat->node_zones + i; 2031 2032 lru_pages += zone_reclaimable_pages(zone); 2033 } 2034 2035 /* 2036 * Now scan the zone in the dma->highmem direction, stopping 2037 * at the last zone which needs scanning. 2038 * 2039 * We do this because the page allocator works in the opposite 2040 * direction. This prevents the page allocator from allocating 2041 * pages behind kswapd's direction of progress, which would 2042 * cause too much scanning of the lower zones. 2043 */ 2044 for (i = 0; i <= end_zone; i++) { 2045 struct zone *zone = pgdat->node_zones + i; 2046 int nr_slab; 2047 int nid, zid; 2048 2049 if (!populated_zone(zone)) 2050 continue; 2051 2052 if (zone_is_all_unreclaimable(zone) && 2053 priority != DEF_PRIORITY) 2054 continue; 2055 2056 if (!zone_watermark_ok(zone, order, 2057 high_wmark_pages(zone), end_zone, 0)) 2058 all_zones_ok = 0; 2059 temp_priority[i] = priority; 2060 sc.nr_scanned = 0; 2061 note_zone_scanning_priority(zone, priority); 2062 2063 nid = pgdat->node_id; 2064 zid = zone_idx(zone); 2065 /* 2066 * Call soft limit reclaim before calling shrink_zone. 2067 * For now we ignore the return value 2068 */ 2069 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask, 2070 nid, zid); 2071 /* 2072 * We put equal pressure on every zone, unless one 2073 * zone has way too many pages free already. 2074 */ 2075 if (!zone_watermark_ok(zone, order, 2076 8*high_wmark_pages(zone), end_zone, 0)) 2077 shrink_zone(priority, zone, &sc); 2078 reclaim_state->reclaimed_slab = 0; 2079 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2080 lru_pages); 2081 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2082 total_scanned += sc.nr_scanned; 2083 if (zone_is_all_unreclaimable(zone)) 2084 continue; 2085 if (nr_slab == 0 && zone->pages_scanned >= 2086 (zone_reclaimable_pages(zone) * 6)) 2087 zone_set_flag(zone, 2088 ZONE_ALL_UNRECLAIMABLE); 2089 /* 2090 * If we've done a decent amount of scanning and 2091 * the reclaim ratio is low, start doing writepage 2092 * even in laptop mode 2093 */ 2094 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2095 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2096 sc.may_writepage = 1; 2097 2098 /* 2099 * We are still under min water mark. it mean we have 2100 * GFP_ATOMIC allocation failure risk. Hurry up! 2101 */ 2102 if (!zone_watermark_ok(zone, order, min_wmark_pages(zone), 2103 end_zone, 0)) 2104 has_under_min_watermark_zone = 1; 2105 2106 } 2107 if (all_zones_ok) 2108 break; /* kswapd: all done */ 2109 /* 2110 * OK, kswapd is getting into trouble. Take a nap, then take 2111 * another pass across the zones. 2112 */ 2113 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2114 if (has_under_min_watermark_zone) 2115 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2116 else 2117 congestion_wait(BLK_RW_ASYNC, HZ/10); 2118 } 2119 2120 /* 2121 * We do this so kswapd doesn't build up large priorities for 2122 * example when it is freeing in parallel with allocators. It 2123 * matches the direct reclaim path behaviour in terms of impact 2124 * on zone->*_priority. 2125 */ 2126 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2127 break; 2128 } 2129out: 2130 /* 2131 * Note within each zone the priority level at which this zone was 2132 * brought into a happy state. So that the next thread which scans this 2133 * zone will start out at that priority level. 2134 */ 2135 for (i = 0; i < pgdat->nr_zones; i++) { 2136 struct zone *zone = pgdat->node_zones + i; 2137 2138 zone->prev_priority = temp_priority[i]; 2139 } 2140 if (!all_zones_ok) { 2141 cond_resched(); 2142 2143 try_to_freeze(); 2144 2145 /* 2146 * Fragmentation may mean that the system cannot be 2147 * rebalanced for high-order allocations in all zones. 2148 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2149 * it means the zones have been fully scanned and are still 2150 * not balanced. For high-order allocations, there is 2151 * little point trying all over again as kswapd may 2152 * infinite loop. 2153 * 2154 * Instead, recheck all watermarks at order-0 as they 2155 * are the most important. If watermarks are ok, kswapd will go 2156 * back to sleep. High-order users can still perform direct 2157 * reclaim if they wish. 2158 */ 2159 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2160 order = sc.order = 0; 2161 2162 goto loop_again; 2163 } 2164 2165 return sc.nr_reclaimed; 2166} 2167 2168/* 2169 * The background pageout daemon, started as a kernel thread 2170 * from the init process. 2171 * 2172 * This basically trickles out pages so that we have _some_ 2173 * free memory available even if there is no other activity 2174 * that frees anything up. This is needed for things like routing 2175 * etc, where we otherwise might have all activity going on in 2176 * asynchronous contexts that cannot page things out. 2177 * 2178 * If there are applications that are active memory-allocators 2179 * (most normal use), this basically shouldn't matter. 2180 */ 2181static int kswapd(void *p) 2182{ 2183 unsigned long order; 2184 pg_data_t *pgdat = (pg_data_t*)p; 2185 struct task_struct *tsk = current; 2186 DEFINE_WAIT(wait); 2187 struct reclaim_state reclaim_state = { 2188 .reclaimed_slab = 0, 2189 }; 2190 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2191 2192 lockdep_set_current_reclaim_state(GFP_KERNEL); 2193 2194 if (!cpumask_empty(cpumask)) 2195 set_cpus_allowed_ptr(tsk, cpumask); 2196 current->reclaim_state = &reclaim_state; 2197 2198 /* 2199 * Tell the memory management that we're a "memory allocator", 2200 * and that if we need more memory we should get access to it 2201 * regardless (see "__alloc_pages()"). "kswapd" should 2202 * never get caught in the normal page freeing logic. 2203 * 2204 * (Kswapd normally doesn't need memory anyway, but sometimes 2205 * you need a small amount of memory in order to be able to 2206 * page out something else, and this flag essentially protects 2207 * us from recursively trying to free more memory as we're 2208 * trying to free the first piece of memory in the first place). 2209 */ 2210 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2211 set_freezable(); 2212 2213 order = 0; 2214 for ( ; ; ) { 2215 unsigned long new_order; 2216 int ret; 2217 2218 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2219 new_order = pgdat->kswapd_max_order; 2220 pgdat->kswapd_max_order = 0; 2221 if (order < new_order) { 2222 /* 2223 * Don't sleep if someone wants a larger 'order' 2224 * allocation 2225 */ 2226 order = new_order; 2227 } else { 2228 if (!freezing(current) && !kthread_should_stop()) { 2229 long remaining = 0; 2230 2231 /* Try to sleep for a short interval */ 2232 if (!sleeping_prematurely(pgdat, order, remaining)) { 2233 remaining = schedule_timeout(HZ/10); 2234 finish_wait(&pgdat->kswapd_wait, &wait); 2235 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2236 } 2237 2238 /* 2239 * After a short sleep, check if it was a 2240 * premature sleep. If not, then go fully 2241 * to sleep until explicitly woken up 2242 */ 2243 if (!sleeping_prematurely(pgdat, order, remaining)) 2244 schedule(); 2245 else { 2246 if (remaining) 2247 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2248 else 2249 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 2250 } 2251 } 2252 2253 order = pgdat->kswapd_max_order; 2254 } 2255 finish_wait(&pgdat->kswapd_wait, &wait); 2256 2257 ret = try_to_freeze(); 2258 if (kthread_should_stop()) 2259 break; 2260 2261 /* 2262 * We can speed up thawing tasks if we don't call balance_pgdat 2263 * after returning from the refrigerator 2264 */ 2265 if (!ret) 2266 balance_pgdat(pgdat, order); 2267 } 2268 return 0; 2269} 2270 2271/* 2272 * A zone is low on free memory, so wake its kswapd task to service it. 2273 */ 2274void wakeup_kswapd(struct zone *zone, int order) 2275{ 2276 pg_data_t *pgdat; 2277 2278 if (!populated_zone(zone)) 2279 return; 2280 2281 pgdat = zone->zone_pgdat; 2282 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 2283 return; 2284 if (pgdat->kswapd_max_order < order) 2285 pgdat->kswapd_max_order = order; 2286 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2287 return; 2288 if (!waitqueue_active(&pgdat->kswapd_wait)) 2289 return; 2290 wake_up_interruptible(&pgdat->kswapd_wait); 2291} 2292 2293/* 2294 * The reclaimable count would be mostly accurate. 2295 * The less reclaimable pages may be 2296 * - mlocked pages, which will be moved to unevictable list when encountered 2297 * - mapped pages, which may require several travels to be reclaimed 2298 * - dirty pages, which is not "instantly" reclaimable 2299 */ 2300unsigned long global_reclaimable_pages(void) 2301{ 2302 int nr; 2303 2304 nr = global_page_state(NR_ACTIVE_FILE) + 2305 global_page_state(NR_INACTIVE_FILE); 2306 2307 if (nr_swap_pages > 0) 2308 nr += global_page_state(NR_ACTIVE_ANON) + 2309 global_page_state(NR_INACTIVE_ANON); 2310 2311 return nr; 2312} 2313 2314unsigned long zone_reclaimable_pages(struct zone *zone) 2315{ 2316 int nr; 2317 2318 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 2319 zone_page_state(zone, NR_INACTIVE_FILE); 2320 2321 if (nr_swap_pages > 0) 2322 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 2323 zone_page_state(zone, NR_INACTIVE_ANON); 2324 2325 return nr; 2326} 2327 2328#ifdef CONFIG_HIBERNATION 2329/* 2330 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 2331 * freed pages. 2332 * 2333 * Rather than trying to age LRUs the aim is to preserve the overall 2334 * LRU order by reclaiming preferentially 2335 * inactive > active > active referenced > active mapped 2336 */ 2337unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 2338{ 2339 struct reclaim_state reclaim_state; 2340 struct scan_control sc = { 2341 .gfp_mask = GFP_HIGHUSER_MOVABLE, 2342 .may_swap = 1, 2343 .may_unmap = 1, 2344 .may_writepage = 1, 2345 .nr_to_reclaim = nr_to_reclaim, 2346 .hibernation_mode = 1, 2347 .swappiness = vm_swappiness, 2348 .order = 0, 2349 .isolate_pages = isolate_pages_global, 2350 }; 2351 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2352 struct task_struct *p = current; 2353 unsigned long nr_reclaimed; 2354 2355 p->flags |= PF_MEMALLOC; 2356 lockdep_set_current_reclaim_state(sc.gfp_mask); 2357 reclaim_state.reclaimed_slab = 0; 2358 p->reclaim_state = &reclaim_state; 2359 2360 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2361 2362 p->reclaim_state = NULL; 2363 lockdep_clear_current_reclaim_state(); 2364 p->flags &= ~PF_MEMALLOC; 2365 2366 return nr_reclaimed; 2367} 2368#endif /* CONFIG_HIBERNATION */ 2369 2370/* It's optimal to keep kswapds on the same CPUs as their memory, but 2371 not required for correctness. So if the last cpu in a node goes 2372 away, we get changed to run anywhere: as the first one comes back, 2373 restore their cpu bindings. */ 2374static int __devinit cpu_callback(struct notifier_block *nfb, 2375 unsigned long action, void *hcpu) 2376{ 2377 int nid; 2378 2379 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2380 for_each_node_state(nid, N_HIGH_MEMORY) { 2381 pg_data_t *pgdat = NODE_DATA(nid); 2382 const struct cpumask *mask; 2383 2384 mask = cpumask_of_node(pgdat->node_id); 2385 2386 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2387 /* One of our CPUs online: restore mask */ 2388 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2389 } 2390 } 2391 return NOTIFY_OK; 2392} 2393 2394/* 2395 * This kswapd start function will be called by init and node-hot-add. 2396 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 2397 */ 2398int kswapd_run(int nid) 2399{ 2400 pg_data_t *pgdat = NODE_DATA(nid); 2401 int ret = 0; 2402 2403 if (pgdat->kswapd) 2404 return 0; 2405 2406 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 2407 if (IS_ERR(pgdat->kswapd)) { 2408 /* failure at boot is fatal */ 2409 BUG_ON(system_state == SYSTEM_BOOTING); 2410 printk("Failed to start kswapd on node %d\n",nid); 2411 ret = -1; 2412 } 2413 return ret; 2414} 2415 2416/* 2417 * Called by memory hotplug when all memory in a node is offlined. 2418 */ 2419void kswapd_stop(int nid) 2420{ 2421 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 2422 2423 if (kswapd) 2424 kthread_stop(kswapd); 2425} 2426 2427static int __init kswapd_init(void) 2428{ 2429 int nid; 2430 2431 swap_setup(); 2432 for_each_node_state(nid, N_HIGH_MEMORY) 2433 kswapd_run(nid); 2434 hotcpu_notifier(cpu_callback, 0); 2435 return 0; 2436} 2437 2438module_init(kswapd_init) 2439 2440#ifdef CONFIG_NUMA 2441/* 2442 * Zone reclaim mode 2443 * 2444 * If non-zero call zone_reclaim when the number of free pages falls below 2445 * the watermarks. 2446 */ 2447int zone_reclaim_mode __read_mostly; 2448 2449#define RECLAIM_OFF 0 2450#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 2451#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 2452#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 2453 2454/* 2455 * Priority for ZONE_RECLAIM. This determines the fraction of pages 2456 * of a node considered for each zone_reclaim. 4 scans 1/16th of 2457 * a zone. 2458 */ 2459#define ZONE_RECLAIM_PRIORITY 4 2460 2461/* 2462 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 2463 * occur. 2464 */ 2465int sysctl_min_unmapped_ratio = 1; 2466 2467/* 2468 * If the number of slab pages in a zone grows beyond this percentage then 2469 * slab reclaim needs to occur. 2470 */ 2471int sysctl_min_slab_ratio = 5; 2472 2473static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 2474{ 2475 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 2476 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 2477 zone_page_state(zone, NR_ACTIVE_FILE); 2478 2479 /* 2480 * It's possible for there to be more file mapped pages than 2481 * accounted for by the pages on the file LRU lists because 2482 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 2483 */ 2484 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 2485} 2486 2487/* Work out how many page cache pages we can reclaim in this reclaim_mode */ 2488static long zone_pagecache_reclaimable(struct zone *zone) 2489{ 2490 long nr_pagecache_reclaimable; 2491 long delta = 0; 2492 2493 /* 2494 * If RECLAIM_SWAP is set, then all file pages are considered 2495 * potentially reclaimable. Otherwise, we have to worry about 2496 * pages like swapcache and zone_unmapped_file_pages() provides 2497 * a better estimate 2498 */ 2499 if (zone_reclaim_mode & RECLAIM_SWAP) 2500 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 2501 else 2502 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 2503 2504 /* If we can't clean pages, remove dirty pages from consideration */ 2505 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 2506 delta += zone_page_state(zone, NR_FILE_DIRTY); 2507 2508 /* Watch for any possible underflows due to delta */ 2509 if (unlikely(delta > nr_pagecache_reclaimable)) 2510 delta = nr_pagecache_reclaimable; 2511 2512 return nr_pagecache_reclaimable - delta; 2513} 2514 2515/* 2516 * Try to free up some pages from this zone through reclaim. 2517 */ 2518static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 2519{ 2520 /* Minimum pages needed in order to stay on node */ 2521 const unsigned long nr_pages = 1 << order; 2522 struct task_struct *p = current; 2523 struct reclaim_state reclaim_state; 2524 int priority; 2525 struct scan_control sc = { 2526 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 2527 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 2528 .may_swap = 1, 2529 .nr_to_reclaim = max_t(unsigned long, nr_pages, 2530 SWAP_CLUSTER_MAX), 2531 .gfp_mask = gfp_mask, 2532 .swappiness = vm_swappiness, 2533 .order = order, 2534 .isolate_pages = isolate_pages_global, 2535 }; 2536 unsigned long slab_reclaimable; 2537 2538 disable_swap_token(); 2539 cond_resched(); 2540 /* 2541 * We need to be able to allocate from the reserves for RECLAIM_SWAP 2542 * and we also need to be able to write out pages for RECLAIM_WRITE 2543 * and RECLAIM_SWAP. 2544 */ 2545 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 2546 reclaim_state.reclaimed_slab = 0; 2547 p->reclaim_state = &reclaim_state; 2548 2549 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 2550 /* 2551 * Free memory by calling shrink zone with increasing 2552 * priorities until we have enough memory freed. 2553 */ 2554 priority = ZONE_RECLAIM_PRIORITY; 2555 do { 2556 note_zone_scanning_priority(zone, priority); 2557 shrink_zone(priority, zone, &sc); 2558 priority--; 2559 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 2560 } 2561 2562 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2563 if (slab_reclaimable > zone->min_slab_pages) { 2564 /* 2565 * shrink_slab() does not currently allow us to determine how 2566 * many pages were freed in this zone. So we take the current 2567 * number of slab pages and shake the slab until it is reduced 2568 * by the same nr_pages that we used for reclaiming unmapped 2569 * pages. 2570 * 2571 * Note that shrink_slab will free memory on all zones and may 2572 * take a long time. 2573 */ 2574 while (shrink_slab(sc.nr_scanned, gfp_mask, order) && 2575 zone_page_state(zone, NR_SLAB_RECLAIMABLE) > 2576 slab_reclaimable - nr_pages) 2577 ; 2578 2579 /* 2580 * Update nr_reclaimed by the number of slab pages we 2581 * reclaimed from this zone. 2582 */ 2583 sc.nr_reclaimed += slab_reclaimable - 2584 zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2585 } 2586 2587 p->reclaim_state = NULL; 2588 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 2589 return sc.nr_reclaimed >= nr_pages; 2590} 2591 2592int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 2593{ 2594 int node_id; 2595 int ret; 2596 2597 /* 2598 * Zone reclaim reclaims unmapped file backed pages and 2599 * slab pages if we are over the defined limits. 2600 * 2601 * A small portion of unmapped file backed pages is needed for 2602 * file I/O otherwise pages read by file I/O will be immediately 2603 * thrown out if the zone is overallocated. So we do not reclaim 2604 * if less than a specified percentage of the zone is used by 2605 * unmapped file backed pages. 2606 */ 2607 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 2608 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 2609 return ZONE_RECLAIM_FULL; 2610 2611 if (zone_is_all_unreclaimable(zone)) 2612 return ZONE_RECLAIM_FULL; 2613 2614 /* 2615 * Do not scan if the allocation should not be delayed. 2616 */ 2617 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 2618 return ZONE_RECLAIM_NOSCAN; 2619 2620 /* 2621 * Only run zone reclaim on the local zone or on zones that do not 2622 * have associated processors. This will favor the local processor 2623 * over remote processors and spread off node memory allocations 2624 * as wide as possible. 2625 */ 2626 node_id = zone_to_nid(zone); 2627 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 2628 return ZONE_RECLAIM_NOSCAN; 2629 2630 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 2631 return ZONE_RECLAIM_NOSCAN; 2632 2633 ret = __zone_reclaim(zone, gfp_mask, order); 2634 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 2635 2636 if (!ret) 2637 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 2638 2639 return ret; 2640} 2641#endif 2642 2643/* 2644 * page_evictable - test whether a page is evictable 2645 * @page: the page to test 2646 * @vma: the VMA in which the page is or will be mapped, may be NULL 2647 * 2648 * Test whether page is evictable--i.e., should be placed on active/inactive 2649 * lists vs unevictable list. The vma argument is !NULL when called from the 2650 * fault path to determine how to instantate a new page. 2651 * 2652 * Reasons page might not be evictable: 2653 * (1) page's mapping marked unevictable 2654 * (2) page is part of an mlocked VMA 2655 * 2656 */ 2657int page_evictable(struct page *page, struct vm_area_struct *vma) 2658{ 2659 2660 if (mapping_unevictable(page_mapping(page))) 2661 return 0; 2662 2663 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 2664 return 0; 2665 2666 return 1; 2667} 2668 2669/** 2670 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 2671 * @page: page to check evictability and move to appropriate lru list 2672 * @zone: zone page is in 2673 * 2674 * Checks a page for evictability and moves the page to the appropriate 2675 * zone lru list. 2676 * 2677 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 2678 * have PageUnevictable set. 2679 */ 2680static void check_move_unevictable_page(struct page *page, struct zone *zone) 2681{ 2682 VM_BUG_ON(PageActive(page)); 2683 2684retry: 2685 ClearPageUnevictable(page); 2686 if (page_evictable(page, NULL)) { 2687 enum lru_list l = page_lru_base_type(page); 2688 2689 __dec_zone_state(zone, NR_UNEVICTABLE); 2690 list_move(&page->lru, &zone->lru[l].list); 2691 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); 2692 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2693 __count_vm_event(UNEVICTABLE_PGRESCUED); 2694 } else { 2695 /* 2696 * rotate unevictable list 2697 */ 2698 SetPageUnevictable(page); 2699 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 2700 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); 2701 if (page_evictable(page, NULL)) 2702 goto retry; 2703 } 2704} 2705 2706/** 2707 * scan_mapping_unevictable_pages - scan an address space for evictable pages 2708 * @mapping: struct address_space to scan for evictable pages 2709 * 2710 * Scan all pages in mapping. Check unevictable pages for 2711 * evictability and move them to the appropriate zone lru list. 2712 */ 2713void scan_mapping_unevictable_pages(struct address_space *mapping) 2714{ 2715 pgoff_t next = 0; 2716 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> 2717 PAGE_CACHE_SHIFT; 2718 struct zone *zone; 2719 struct pagevec pvec; 2720 2721 if (mapping->nrpages == 0) 2722 return; 2723 2724 pagevec_init(&pvec, 0); 2725 while (next < end && 2726 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2727 int i; 2728 int pg_scanned = 0; 2729 2730 zone = NULL; 2731 2732 for (i = 0; i < pagevec_count(&pvec); i++) { 2733 struct page *page = pvec.pages[i]; 2734 pgoff_t page_index = page->index; 2735 struct zone *pagezone = page_zone(page); 2736 2737 pg_scanned++; 2738 if (page_index > next) 2739 next = page_index; 2740 next++; 2741 2742 if (pagezone != zone) { 2743 if (zone) 2744 spin_unlock_irq(&zone->lru_lock); 2745 zone = pagezone; 2746 spin_lock_irq(&zone->lru_lock); 2747 } 2748 2749 if (PageLRU(page) && PageUnevictable(page)) 2750 check_move_unevictable_page(page, zone); 2751 } 2752 if (zone) 2753 spin_unlock_irq(&zone->lru_lock); 2754 pagevec_release(&pvec); 2755 2756 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); 2757 } 2758 2759} 2760 2761/** 2762 * scan_zone_unevictable_pages - check unevictable list for evictable pages 2763 * @zone - zone of which to scan the unevictable list 2764 * 2765 * Scan @zone's unevictable LRU lists to check for pages that have become 2766 * evictable. Move those that have to @zone's inactive list where they 2767 * become candidates for reclaim, unless shrink_inactive_zone() decides 2768 * to reactivate them. Pages that are still unevictable are rotated 2769 * back onto @zone's unevictable list. 2770 */ 2771#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */ 2772static void scan_zone_unevictable_pages(struct zone *zone) 2773{ 2774 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; 2775 unsigned long scan; 2776 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE); 2777 2778 while (nr_to_scan > 0) { 2779 unsigned long batch_size = min(nr_to_scan, 2780 SCAN_UNEVICTABLE_BATCH_SIZE); 2781 2782 spin_lock_irq(&zone->lru_lock); 2783 for (scan = 0; scan < batch_size; scan++) { 2784 struct page *page = lru_to_page(l_unevictable); 2785 2786 if (!trylock_page(page)) 2787 continue; 2788 2789 prefetchw_prev_lru_page(page, l_unevictable, flags); 2790 2791 if (likely(PageLRU(page) && PageUnevictable(page))) 2792 check_move_unevictable_page(page, zone); 2793 2794 unlock_page(page); 2795 } 2796 spin_unlock_irq(&zone->lru_lock); 2797 2798 nr_to_scan -= batch_size; 2799 } 2800} 2801 2802 2803/** 2804 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages 2805 * 2806 * A really big hammer: scan all zones' unevictable LRU lists to check for 2807 * pages that have become evictable. Move those back to the zones' 2808 * inactive list where they become candidates for reclaim. 2809 * This occurs when, e.g., we have unswappable pages on the unevictable lists, 2810 * and we add swap to the system. As such, it runs in the context of a task 2811 * that has possibly/probably made some previously unevictable pages 2812 * evictable. 2813 */ 2814static void scan_all_zones_unevictable_pages(void) 2815{ 2816 struct zone *zone; 2817 2818 for_each_zone(zone) { 2819 scan_zone_unevictable_pages(zone); 2820 } 2821} 2822 2823/* 2824 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 2825 * all nodes' unevictable lists for evictable pages 2826 */ 2827unsigned long scan_unevictable_pages; 2828 2829int scan_unevictable_handler(struct ctl_table *table, int write, 2830 void __user *buffer, 2831 size_t *length, loff_t *ppos) 2832{ 2833 proc_doulongvec_minmax(table, write, buffer, length, ppos); 2834 2835 if (write && *(unsigned long *)table->data) 2836 scan_all_zones_unevictable_pages(); 2837 2838 scan_unevictable_pages = 0; 2839 return 0; 2840} 2841 2842/* 2843 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 2844 * a specified node's per zone unevictable lists for evictable pages. 2845 */ 2846 2847static ssize_t read_scan_unevictable_node(struct sys_device *dev, 2848 struct sysdev_attribute *attr, 2849 char *buf) 2850{ 2851 return sprintf(buf, "0\n"); /* always zero; should fit... */ 2852} 2853 2854static ssize_t write_scan_unevictable_node(struct sys_device *dev, 2855 struct sysdev_attribute *attr, 2856 const char *buf, size_t count) 2857{ 2858 struct zone *node_zones = NODE_DATA(dev->id)->node_zones; 2859 struct zone *zone; 2860 unsigned long res; 2861 unsigned long req = strict_strtoul(buf, 10, &res); 2862 2863 if (!req) 2864 return 1; /* zero is no-op */ 2865 2866 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 2867 if (!populated_zone(zone)) 2868 continue; 2869 scan_zone_unevictable_pages(zone); 2870 } 2871 return 1; 2872} 2873 2874 2875static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 2876 read_scan_unevictable_node, 2877 write_scan_unevictable_node); 2878 2879int scan_unevictable_register_node(struct node *node) 2880{ 2881 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages); 2882} 2883 2884void scan_unevictable_unregister_node(struct node *node) 2885{ 2886 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); 2887} 2888 2889