vmscan.c revision 7335084d446b83cbcb15da80497d03f0c1dc9e21
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14#include <linux/mm.h> 15#include <linux/module.h> 16#include <linux/gfp.h> 17#include <linux/kernel_stat.h> 18#include <linux/swap.h> 19#include <linux/pagemap.h> 20#include <linux/init.h> 21#include <linux/highmem.h> 22#include <linux/vmstat.h> 23#include <linux/file.h> 24#include <linux/writeback.h> 25#include <linux/blkdev.h> 26#include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28#include <linux/mm_inline.h> 29#include <linux/pagevec.h> 30#include <linux/backing-dev.h> 31#include <linux/rmap.h> 32#include <linux/topology.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/compaction.h> 36#include <linux/notifier.h> 37#include <linux/rwsem.h> 38#include <linux/delay.h> 39#include <linux/kthread.h> 40#include <linux/freezer.h> 41#include <linux/memcontrol.h> 42#include <linux/delayacct.h> 43#include <linux/sysctl.h> 44#include <linux/oom.h> 45#include <linux/prefetch.h> 46 47#include <asm/tlbflush.h> 48#include <asm/div64.h> 49 50#include <linux/swapops.h> 51 52#include "internal.h" 53 54#define CREATE_TRACE_POINTS 55#include <trace/events/vmscan.h> 56 57/* 58 * reclaim_mode determines how the inactive list is shrunk 59 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages 60 * RECLAIM_MODE_ASYNC: Do not block 61 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback 62 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference 63 * page from the LRU and reclaim all pages within a 64 * naturally aligned range 65 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of 66 * order-0 pages and then compact the zone 67 */ 68typedef unsigned __bitwise__ reclaim_mode_t; 69#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) 70#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) 71#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) 72#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) 73#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u) 74 75struct scan_control { 76 /* Incremented by the number of inactive pages that were scanned */ 77 unsigned long nr_scanned; 78 79 /* Number of pages freed so far during a call to shrink_zones() */ 80 unsigned long nr_reclaimed; 81 82 /* How many pages shrink_list() should reclaim */ 83 unsigned long nr_to_reclaim; 84 85 unsigned long hibernation_mode; 86 87 /* This context's GFP mask */ 88 gfp_t gfp_mask; 89 90 int may_writepage; 91 92 /* Can mapped pages be reclaimed? */ 93 int may_unmap; 94 95 /* Can pages be swapped as part of reclaim? */ 96 int may_swap; 97 98 int order; 99 100 /* 101 * Intend to reclaim enough continuous memory rather than reclaim 102 * enough amount of memory. i.e, mode for high order allocation. 103 */ 104 reclaim_mode_t reclaim_mode; 105 106 /* 107 * The memory cgroup that hit its limit and as a result is the 108 * primary target of this reclaim invocation. 109 */ 110 struct mem_cgroup *target_mem_cgroup; 111 112 /* 113 * Nodemask of nodes allowed by the caller. If NULL, all nodes 114 * are scanned. 115 */ 116 nodemask_t *nodemask; 117}; 118 119struct mem_cgroup_zone { 120 struct mem_cgroup *mem_cgroup; 121 struct zone *zone; 122}; 123 124#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 125 126#ifdef ARCH_HAS_PREFETCH 127#define prefetch_prev_lru_page(_page, _base, _field) \ 128 do { \ 129 if ((_page)->lru.prev != _base) { \ 130 struct page *prev; \ 131 \ 132 prev = lru_to_page(&(_page->lru)); \ 133 prefetch(&prev->_field); \ 134 } \ 135 } while (0) 136#else 137#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 138#endif 139 140#ifdef ARCH_HAS_PREFETCHW 141#define prefetchw_prev_lru_page(_page, _base, _field) \ 142 do { \ 143 if ((_page)->lru.prev != _base) { \ 144 struct page *prev; \ 145 \ 146 prev = lru_to_page(&(_page->lru)); \ 147 prefetchw(&prev->_field); \ 148 } \ 149 } while (0) 150#else 151#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 152#endif 153 154/* 155 * From 0 .. 100. Higher means more swappy. 156 */ 157int vm_swappiness = 60; 158long vm_total_pages; /* The total number of pages which the VM controls */ 159 160static LIST_HEAD(shrinker_list); 161static DECLARE_RWSEM(shrinker_rwsem); 162 163#ifdef CONFIG_CGROUP_MEM_RES_CTLR 164static bool global_reclaim(struct scan_control *sc) 165{ 166 return !sc->target_mem_cgroup; 167} 168 169static bool scanning_global_lru(struct mem_cgroup_zone *mz) 170{ 171 return !mz->mem_cgroup; 172} 173#else 174static bool global_reclaim(struct scan_control *sc) 175{ 176 return true; 177} 178 179static bool scanning_global_lru(struct mem_cgroup_zone *mz) 180{ 181 return true; 182} 183#endif 184 185static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz) 186{ 187 if (!scanning_global_lru(mz)) 188 return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone); 189 190 return &mz->zone->reclaim_stat; 191} 192 193static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz, 194 enum lru_list lru) 195{ 196 if (!scanning_global_lru(mz)) 197 return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup, 198 zone_to_nid(mz->zone), 199 zone_idx(mz->zone), 200 BIT(lru)); 201 202 return zone_page_state(mz->zone, NR_LRU_BASE + lru); 203} 204 205 206/* 207 * Add a shrinker callback to be called from the vm 208 */ 209void register_shrinker(struct shrinker *shrinker) 210{ 211 atomic_long_set(&shrinker->nr_in_batch, 0); 212 down_write(&shrinker_rwsem); 213 list_add_tail(&shrinker->list, &shrinker_list); 214 up_write(&shrinker_rwsem); 215} 216EXPORT_SYMBOL(register_shrinker); 217 218/* 219 * Remove one 220 */ 221void unregister_shrinker(struct shrinker *shrinker) 222{ 223 down_write(&shrinker_rwsem); 224 list_del(&shrinker->list); 225 up_write(&shrinker_rwsem); 226} 227EXPORT_SYMBOL(unregister_shrinker); 228 229static inline int do_shrinker_shrink(struct shrinker *shrinker, 230 struct shrink_control *sc, 231 unsigned long nr_to_scan) 232{ 233 sc->nr_to_scan = nr_to_scan; 234 return (*shrinker->shrink)(shrinker, sc); 235} 236 237#define SHRINK_BATCH 128 238/* 239 * Call the shrink functions to age shrinkable caches 240 * 241 * Here we assume it costs one seek to replace a lru page and that it also 242 * takes a seek to recreate a cache object. With this in mind we age equal 243 * percentages of the lru and ageable caches. This should balance the seeks 244 * generated by these structures. 245 * 246 * If the vm encountered mapped pages on the LRU it increase the pressure on 247 * slab to avoid swapping. 248 * 249 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 250 * 251 * `lru_pages' represents the number of on-LRU pages in all the zones which 252 * are eligible for the caller's allocation attempt. It is used for balancing 253 * slab reclaim versus page reclaim. 254 * 255 * Returns the number of slab objects which we shrunk. 256 */ 257unsigned long shrink_slab(struct shrink_control *shrink, 258 unsigned long nr_pages_scanned, 259 unsigned long lru_pages) 260{ 261 struct shrinker *shrinker; 262 unsigned long ret = 0; 263 264 if (nr_pages_scanned == 0) 265 nr_pages_scanned = SWAP_CLUSTER_MAX; 266 267 if (!down_read_trylock(&shrinker_rwsem)) { 268 /* Assume we'll be able to shrink next time */ 269 ret = 1; 270 goto out; 271 } 272 273 list_for_each_entry(shrinker, &shrinker_list, list) { 274 unsigned long long delta; 275 long total_scan; 276 long max_pass; 277 int shrink_ret = 0; 278 long nr; 279 long new_nr; 280 long batch_size = shrinker->batch ? shrinker->batch 281 : SHRINK_BATCH; 282 283 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 284 if (max_pass <= 0) 285 continue; 286 287 /* 288 * copy the current shrinker scan count into a local variable 289 * and zero it so that other concurrent shrinker invocations 290 * don't also do this scanning work. 291 */ 292 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); 293 294 total_scan = nr; 295 delta = (4 * nr_pages_scanned) / shrinker->seeks; 296 delta *= max_pass; 297 do_div(delta, lru_pages + 1); 298 total_scan += delta; 299 if (total_scan < 0) { 300 printk(KERN_ERR "shrink_slab: %pF negative objects to " 301 "delete nr=%ld\n", 302 shrinker->shrink, total_scan); 303 total_scan = max_pass; 304 } 305 306 /* 307 * We need to avoid excessive windup on filesystem shrinkers 308 * due to large numbers of GFP_NOFS allocations causing the 309 * shrinkers to return -1 all the time. This results in a large 310 * nr being built up so when a shrink that can do some work 311 * comes along it empties the entire cache due to nr >>> 312 * max_pass. This is bad for sustaining a working set in 313 * memory. 314 * 315 * Hence only allow the shrinker to scan the entire cache when 316 * a large delta change is calculated directly. 317 */ 318 if (delta < max_pass / 4) 319 total_scan = min(total_scan, max_pass / 2); 320 321 /* 322 * Avoid risking looping forever due to too large nr value: 323 * never try to free more than twice the estimate number of 324 * freeable entries. 325 */ 326 if (total_scan > max_pass * 2) 327 total_scan = max_pass * 2; 328 329 trace_mm_shrink_slab_start(shrinker, shrink, nr, 330 nr_pages_scanned, lru_pages, 331 max_pass, delta, total_scan); 332 333 while (total_scan >= batch_size) { 334 int nr_before; 335 336 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 337 shrink_ret = do_shrinker_shrink(shrinker, shrink, 338 batch_size); 339 if (shrink_ret == -1) 340 break; 341 if (shrink_ret < nr_before) 342 ret += nr_before - shrink_ret; 343 count_vm_events(SLABS_SCANNED, batch_size); 344 total_scan -= batch_size; 345 346 cond_resched(); 347 } 348 349 /* 350 * move the unused scan count back into the shrinker in a 351 * manner that handles concurrent updates. If we exhausted the 352 * scan, there is no need to do an update. 353 */ 354 if (total_scan > 0) 355 new_nr = atomic_long_add_return(total_scan, 356 &shrinker->nr_in_batch); 357 else 358 new_nr = atomic_long_read(&shrinker->nr_in_batch); 359 360 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 361 } 362 up_read(&shrinker_rwsem); 363out: 364 cond_resched(); 365 return ret; 366} 367 368static void set_reclaim_mode(int priority, struct scan_control *sc, 369 bool sync) 370{ 371 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC; 372 373 /* 374 * Initially assume we are entering either lumpy reclaim or 375 * reclaim/compaction.Depending on the order, we will either set the 376 * sync mode or just reclaim order-0 pages later. 377 */ 378 if (COMPACTION_BUILD) 379 sc->reclaim_mode = RECLAIM_MODE_COMPACTION; 380 else 381 sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM; 382 383 /* 384 * Avoid using lumpy reclaim or reclaim/compaction if possible by 385 * restricting when its set to either costly allocations or when 386 * under memory pressure 387 */ 388 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 389 sc->reclaim_mode |= syncmode; 390 else if (sc->order && priority < DEF_PRIORITY - 2) 391 sc->reclaim_mode |= syncmode; 392 else 393 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 394} 395 396static void reset_reclaim_mode(struct scan_control *sc) 397{ 398 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 399} 400 401static inline int is_page_cache_freeable(struct page *page) 402{ 403 /* 404 * A freeable page cache page is referenced only by the caller 405 * that isolated the page, the page cache radix tree and 406 * optional buffer heads at page->private. 407 */ 408 return page_count(page) - page_has_private(page) == 2; 409} 410 411static int may_write_to_queue(struct backing_dev_info *bdi, 412 struct scan_control *sc) 413{ 414 if (current->flags & PF_SWAPWRITE) 415 return 1; 416 if (!bdi_write_congested(bdi)) 417 return 1; 418 if (bdi == current->backing_dev_info) 419 return 1; 420 421 /* lumpy reclaim for hugepage often need a lot of write */ 422 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 423 return 1; 424 return 0; 425} 426 427/* 428 * We detected a synchronous write error writing a page out. Probably 429 * -ENOSPC. We need to propagate that into the address_space for a subsequent 430 * fsync(), msync() or close(). 431 * 432 * The tricky part is that after writepage we cannot touch the mapping: nothing 433 * prevents it from being freed up. But we have a ref on the page and once 434 * that page is locked, the mapping is pinned. 435 * 436 * We're allowed to run sleeping lock_page() here because we know the caller has 437 * __GFP_FS. 438 */ 439static void handle_write_error(struct address_space *mapping, 440 struct page *page, int error) 441{ 442 lock_page(page); 443 if (page_mapping(page) == mapping) 444 mapping_set_error(mapping, error); 445 unlock_page(page); 446} 447 448/* possible outcome of pageout() */ 449typedef enum { 450 /* failed to write page out, page is locked */ 451 PAGE_KEEP, 452 /* move page to the active list, page is locked */ 453 PAGE_ACTIVATE, 454 /* page has been sent to the disk successfully, page is unlocked */ 455 PAGE_SUCCESS, 456 /* page is clean and locked */ 457 PAGE_CLEAN, 458} pageout_t; 459 460/* 461 * pageout is called by shrink_page_list() for each dirty page. 462 * Calls ->writepage(). 463 */ 464static pageout_t pageout(struct page *page, struct address_space *mapping, 465 struct scan_control *sc) 466{ 467 /* 468 * If the page is dirty, only perform writeback if that write 469 * will be non-blocking. To prevent this allocation from being 470 * stalled by pagecache activity. But note that there may be 471 * stalls if we need to run get_block(). We could test 472 * PagePrivate for that. 473 * 474 * If this process is currently in __generic_file_aio_write() against 475 * this page's queue, we can perform writeback even if that 476 * will block. 477 * 478 * If the page is swapcache, write it back even if that would 479 * block, for some throttling. This happens by accident, because 480 * swap_backing_dev_info is bust: it doesn't reflect the 481 * congestion state of the swapdevs. Easy to fix, if needed. 482 */ 483 if (!is_page_cache_freeable(page)) 484 return PAGE_KEEP; 485 if (!mapping) { 486 /* 487 * Some data journaling orphaned pages can have 488 * page->mapping == NULL while being dirty with clean buffers. 489 */ 490 if (page_has_private(page)) { 491 if (try_to_free_buffers(page)) { 492 ClearPageDirty(page); 493 printk("%s: orphaned page\n", __func__); 494 return PAGE_CLEAN; 495 } 496 } 497 return PAGE_KEEP; 498 } 499 if (mapping->a_ops->writepage == NULL) 500 return PAGE_ACTIVATE; 501 if (!may_write_to_queue(mapping->backing_dev_info, sc)) 502 return PAGE_KEEP; 503 504 if (clear_page_dirty_for_io(page)) { 505 int res; 506 struct writeback_control wbc = { 507 .sync_mode = WB_SYNC_NONE, 508 .nr_to_write = SWAP_CLUSTER_MAX, 509 .range_start = 0, 510 .range_end = LLONG_MAX, 511 .for_reclaim = 1, 512 }; 513 514 SetPageReclaim(page); 515 res = mapping->a_ops->writepage(page, &wbc); 516 if (res < 0) 517 handle_write_error(mapping, page, res); 518 if (res == AOP_WRITEPAGE_ACTIVATE) { 519 ClearPageReclaim(page); 520 return PAGE_ACTIVATE; 521 } 522 523 if (!PageWriteback(page)) { 524 /* synchronous write or broken a_ops? */ 525 ClearPageReclaim(page); 526 } 527 trace_mm_vmscan_writepage(page, 528 trace_reclaim_flags(page, sc->reclaim_mode)); 529 inc_zone_page_state(page, NR_VMSCAN_WRITE); 530 return PAGE_SUCCESS; 531 } 532 533 return PAGE_CLEAN; 534} 535 536/* 537 * Same as remove_mapping, but if the page is removed from the mapping, it 538 * gets returned with a refcount of 0. 539 */ 540static int __remove_mapping(struct address_space *mapping, struct page *page) 541{ 542 BUG_ON(!PageLocked(page)); 543 BUG_ON(mapping != page_mapping(page)); 544 545 spin_lock_irq(&mapping->tree_lock); 546 /* 547 * The non racy check for a busy page. 548 * 549 * Must be careful with the order of the tests. When someone has 550 * a ref to the page, it may be possible that they dirty it then 551 * drop the reference. So if PageDirty is tested before page_count 552 * here, then the following race may occur: 553 * 554 * get_user_pages(&page); 555 * [user mapping goes away] 556 * write_to(page); 557 * !PageDirty(page) [good] 558 * SetPageDirty(page); 559 * put_page(page); 560 * !page_count(page) [good, discard it] 561 * 562 * [oops, our write_to data is lost] 563 * 564 * Reversing the order of the tests ensures such a situation cannot 565 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 566 * load is not satisfied before that of page->_count. 567 * 568 * Note that if SetPageDirty is always performed via set_page_dirty, 569 * and thus under tree_lock, then this ordering is not required. 570 */ 571 if (!page_freeze_refs(page, 2)) 572 goto cannot_free; 573 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 574 if (unlikely(PageDirty(page))) { 575 page_unfreeze_refs(page, 2); 576 goto cannot_free; 577 } 578 579 if (PageSwapCache(page)) { 580 swp_entry_t swap = { .val = page_private(page) }; 581 __delete_from_swap_cache(page); 582 spin_unlock_irq(&mapping->tree_lock); 583 swapcache_free(swap, page); 584 } else { 585 void (*freepage)(struct page *); 586 587 freepage = mapping->a_ops->freepage; 588 589 __delete_from_page_cache(page); 590 spin_unlock_irq(&mapping->tree_lock); 591 mem_cgroup_uncharge_cache_page(page); 592 593 if (freepage != NULL) 594 freepage(page); 595 } 596 597 return 1; 598 599cannot_free: 600 spin_unlock_irq(&mapping->tree_lock); 601 return 0; 602} 603 604/* 605 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 606 * someone else has a ref on the page, abort and return 0. If it was 607 * successfully detached, return 1. Assumes the caller has a single ref on 608 * this page. 609 */ 610int remove_mapping(struct address_space *mapping, struct page *page) 611{ 612 if (__remove_mapping(mapping, page)) { 613 /* 614 * Unfreezing the refcount with 1 rather than 2 effectively 615 * drops the pagecache ref for us without requiring another 616 * atomic operation. 617 */ 618 page_unfreeze_refs(page, 1); 619 return 1; 620 } 621 return 0; 622} 623 624/** 625 * putback_lru_page - put previously isolated page onto appropriate LRU list 626 * @page: page to be put back to appropriate lru list 627 * 628 * Add previously isolated @page to appropriate LRU list. 629 * Page may still be unevictable for other reasons. 630 * 631 * lru_lock must not be held, interrupts must be enabled. 632 */ 633void putback_lru_page(struct page *page) 634{ 635 int lru; 636 int active = !!TestClearPageActive(page); 637 int was_unevictable = PageUnevictable(page); 638 639 VM_BUG_ON(PageLRU(page)); 640 641redo: 642 ClearPageUnevictable(page); 643 644 if (page_evictable(page, NULL)) { 645 /* 646 * For evictable pages, we can use the cache. 647 * In event of a race, worst case is we end up with an 648 * unevictable page on [in]active list. 649 * We know how to handle that. 650 */ 651 lru = active + page_lru_base_type(page); 652 lru_cache_add_lru(page, lru); 653 } else { 654 /* 655 * Put unevictable pages directly on zone's unevictable 656 * list. 657 */ 658 lru = LRU_UNEVICTABLE; 659 add_page_to_unevictable_list(page); 660 /* 661 * When racing with an mlock or AS_UNEVICTABLE clearing 662 * (page is unlocked) make sure that if the other thread 663 * does not observe our setting of PG_lru and fails 664 * isolation/check_move_unevictable_page, 665 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 666 * the page back to the evictable list. 667 * 668 * The other side is TestClearPageMlocked() or shmem_lock(). 669 */ 670 smp_mb(); 671 } 672 673 /* 674 * page's status can change while we move it among lru. If an evictable 675 * page is on unevictable list, it never be freed. To avoid that, 676 * check after we added it to the list, again. 677 */ 678 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 679 if (!isolate_lru_page(page)) { 680 put_page(page); 681 goto redo; 682 } 683 /* This means someone else dropped this page from LRU 684 * So, it will be freed or putback to LRU again. There is 685 * nothing to do here. 686 */ 687 } 688 689 if (was_unevictable && lru != LRU_UNEVICTABLE) 690 count_vm_event(UNEVICTABLE_PGRESCUED); 691 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 692 count_vm_event(UNEVICTABLE_PGCULLED); 693 694 put_page(page); /* drop ref from isolate */ 695} 696 697enum page_references { 698 PAGEREF_RECLAIM, 699 PAGEREF_RECLAIM_CLEAN, 700 PAGEREF_KEEP, 701 PAGEREF_ACTIVATE, 702}; 703 704static enum page_references page_check_references(struct page *page, 705 struct mem_cgroup_zone *mz, 706 struct scan_control *sc) 707{ 708 int referenced_ptes, referenced_page; 709 unsigned long vm_flags; 710 711 referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags); 712 referenced_page = TestClearPageReferenced(page); 713 714 /* Lumpy reclaim - ignore references */ 715 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 716 return PAGEREF_RECLAIM; 717 718 /* 719 * Mlock lost the isolation race with us. Let try_to_unmap() 720 * move the page to the unevictable list. 721 */ 722 if (vm_flags & VM_LOCKED) 723 return PAGEREF_RECLAIM; 724 725 if (referenced_ptes) { 726 if (PageAnon(page)) 727 return PAGEREF_ACTIVATE; 728 /* 729 * All mapped pages start out with page table 730 * references from the instantiating fault, so we need 731 * to look twice if a mapped file page is used more 732 * than once. 733 * 734 * Mark it and spare it for another trip around the 735 * inactive list. Another page table reference will 736 * lead to its activation. 737 * 738 * Note: the mark is set for activated pages as well 739 * so that recently deactivated but used pages are 740 * quickly recovered. 741 */ 742 SetPageReferenced(page); 743 744 if (referenced_page || referenced_ptes > 1) 745 return PAGEREF_ACTIVATE; 746 747 /* 748 * Activate file-backed executable pages after first usage. 749 */ 750 if (vm_flags & VM_EXEC) 751 return PAGEREF_ACTIVATE; 752 753 return PAGEREF_KEEP; 754 } 755 756 /* Reclaim if clean, defer dirty pages to writeback */ 757 if (referenced_page && !PageSwapBacked(page)) 758 return PAGEREF_RECLAIM_CLEAN; 759 760 return PAGEREF_RECLAIM; 761} 762 763/* 764 * shrink_page_list() returns the number of reclaimed pages 765 */ 766static unsigned long shrink_page_list(struct list_head *page_list, 767 struct mem_cgroup_zone *mz, 768 struct scan_control *sc, 769 int priority, 770 unsigned long *ret_nr_dirty, 771 unsigned long *ret_nr_writeback) 772{ 773 LIST_HEAD(ret_pages); 774 LIST_HEAD(free_pages); 775 int pgactivate = 0; 776 unsigned long nr_dirty = 0; 777 unsigned long nr_congested = 0; 778 unsigned long nr_reclaimed = 0; 779 unsigned long nr_writeback = 0; 780 781 cond_resched(); 782 783 while (!list_empty(page_list)) { 784 enum page_references references; 785 struct address_space *mapping; 786 struct page *page; 787 int may_enter_fs; 788 789 cond_resched(); 790 791 page = lru_to_page(page_list); 792 list_del(&page->lru); 793 794 if (!trylock_page(page)) 795 goto keep; 796 797 VM_BUG_ON(PageActive(page)); 798 VM_BUG_ON(page_zone(page) != mz->zone); 799 800 sc->nr_scanned++; 801 802 if (unlikely(!page_evictable(page, NULL))) 803 goto cull_mlocked; 804 805 if (!sc->may_unmap && page_mapped(page)) 806 goto keep_locked; 807 808 /* Double the slab pressure for mapped and swapcache pages */ 809 if (page_mapped(page) || PageSwapCache(page)) 810 sc->nr_scanned++; 811 812 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 813 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 814 815 if (PageWriteback(page)) { 816 nr_writeback++; 817 /* 818 * Synchronous reclaim cannot queue pages for 819 * writeback due to the possibility of stack overflow 820 * but if it encounters a page under writeback, wait 821 * for the IO to complete. 822 */ 823 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && 824 may_enter_fs) 825 wait_on_page_writeback(page); 826 else { 827 unlock_page(page); 828 goto keep_lumpy; 829 } 830 } 831 832 references = page_check_references(page, mz, sc); 833 switch (references) { 834 case PAGEREF_ACTIVATE: 835 goto activate_locked; 836 case PAGEREF_KEEP: 837 goto keep_locked; 838 case PAGEREF_RECLAIM: 839 case PAGEREF_RECLAIM_CLEAN: 840 ; /* try to reclaim the page below */ 841 } 842 843 /* 844 * Anonymous process memory has backing store? 845 * Try to allocate it some swap space here. 846 */ 847 if (PageAnon(page) && !PageSwapCache(page)) { 848 if (!(sc->gfp_mask & __GFP_IO)) 849 goto keep_locked; 850 if (!add_to_swap(page)) 851 goto activate_locked; 852 may_enter_fs = 1; 853 } 854 855 mapping = page_mapping(page); 856 857 /* 858 * The page is mapped into the page tables of one or more 859 * processes. Try to unmap it here. 860 */ 861 if (page_mapped(page) && mapping) { 862 switch (try_to_unmap(page, TTU_UNMAP)) { 863 case SWAP_FAIL: 864 goto activate_locked; 865 case SWAP_AGAIN: 866 goto keep_locked; 867 case SWAP_MLOCK: 868 goto cull_mlocked; 869 case SWAP_SUCCESS: 870 ; /* try to free the page below */ 871 } 872 } 873 874 if (PageDirty(page)) { 875 nr_dirty++; 876 877 /* 878 * Only kswapd can writeback filesystem pages to 879 * avoid risk of stack overflow but do not writeback 880 * unless under significant pressure. 881 */ 882 if (page_is_file_cache(page) && 883 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { 884 /* 885 * Immediately reclaim when written back. 886 * Similar in principal to deactivate_page() 887 * except we already have the page isolated 888 * and know it's dirty 889 */ 890 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); 891 SetPageReclaim(page); 892 893 goto keep_locked; 894 } 895 896 if (references == PAGEREF_RECLAIM_CLEAN) 897 goto keep_locked; 898 if (!may_enter_fs) 899 goto keep_locked; 900 if (!sc->may_writepage) 901 goto keep_locked; 902 903 /* Page is dirty, try to write it out here */ 904 switch (pageout(page, mapping, sc)) { 905 case PAGE_KEEP: 906 nr_congested++; 907 goto keep_locked; 908 case PAGE_ACTIVATE: 909 goto activate_locked; 910 case PAGE_SUCCESS: 911 if (PageWriteback(page)) 912 goto keep_lumpy; 913 if (PageDirty(page)) 914 goto keep; 915 916 /* 917 * A synchronous write - probably a ramdisk. Go 918 * ahead and try to reclaim the page. 919 */ 920 if (!trylock_page(page)) 921 goto keep; 922 if (PageDirty(page) || PageWriteback(page)) 923 goto keep_locked; 924 mapping = page_mapping(page); 925 case PAGE_CLEAN: 926 ; /* try to free the page below */ 927 } 928 } 929 930 /* 931 * If the page has buffers, try to free the buffer mappings 932 * associated with this page. If we succeed we try to free 933 * the page as well. 934 * 935 * We do this even if the page is PageDirty(). 936 * try_to_release_page() does not perform I/O, but it is 937 * possible for a page to have PageDirty set, but it is actually 938 * clean (all its buffers are clean). This happens if the 939 * buffers were written out directly, with submit_bh(). ext3 940 * will do this, as well as the blockdev mapping. 941 * try_to_release_page() will discover that cleanness and will 942 * drop the buffers and mark the page clean - it can be freed. 943 * 944 * Rarely, pages can have buffers and no ->mapping. These are 945 * the pages which were not successfully invalidated in 946 * truncate_complete_page(). We try to drop those buffers here 947 * and if that worked, and the page is no longer mapped into 948 * process address space (page_count == 1) it can be freed. 949 * Otherwise, leave the page on the LRU so it is swappable. 950 */ 951 if (page_has_private(page)) { 952 if (!try_to_release_page(page, sc->gfp_mask)) 953 goto activate_locked; 954 if (!mapping && page_count(page) == 1) { 955 unlock_page(page); 956 if (put_page_testzero(page)) 957 goto free_it; 958 else { 959 /* 960 * rare race with speculative reference. 961 * the speculative reference will free 962 * this page shortly, so we may 963 * increment nr_reclaimed here (and 964 * leave it off the LRU). 965 */ 966 nr_reclaimed++; 967 continue; 968 } 969 } 970 } 971 972 if (!mapping || !__remove_mapping(mapping, page)) 973 goto keep_locked; 974 975 /* 976 * At this point, we have no other references and there is 977 * no way to pick any more up (removed from LRU, removed 978 * from pagecache). Can use non-atomic bitops now (and 979 * we obviously don't have to worry about waking up a process 980 * waiting on the page lock, because there are no references. 981 */ 982 __clear_page_locked(page); 983free_it: 984 nr_reclaimed++; 985 986 /* 987 * Is there need to periodically free_page_list? It would 988 * appear not as the counts should be low 989 */ 990 list_add(&page->lru, &free_pages); 991 continue; 992 993cull_mlocked: 994 if (PageSwapCache(page)) 995 try_to_free_swap(page); 996 unlock_page(page); 997 putback_lru_page(page); 998 reset_reclaim_mode(sc); 999 continue; 1000 1001activate_locked: 1002 /* Not a candidate for swapping, so reclaim swap space. */ 1003 if (PageSwapCache(page) && vm_swap_full()) 1004 try_to_free_swap(page); 1005 VM_BUG_ON(PageActive(page)); 1006 SetPageActive(page); 1007 pgactivate++; 1008keep_locked: 1009 unlock_page(page); 1010keep: 1011 reset_reclaim_mode(sc); 1012keep_lumpy: 1013 list_add(&page->lru, &ret_pages); 1014 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1015 } 1016 1017 /* 1018 * Tag a zone as congested if all the dirty pages encountered were 1019 * backed by a congested BDI. In this case, reclaimers should just 1020 * back off and wait for congestion to clear because further reclaim 1021 * will encounter the same problem 1022 */ 1023 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) 1024 zone_set_flag(mz->zone, ZONE_CONGESTED); 1025 1026 free_hot_cold_page_list(&free_pages, 1); 1027 1028 list_splice(&ret_pages, page_list); 1029 count_vm_events(PGACTIVATE, pgactivate); 1030 *ret_nr_dirty += nr_dirty; 1031 *ret_nr_writeback += nr_writeback; 1032 return nr_reclaimed; 1033} 1034 1035/* 1036 * Attempt to remove the specified page from its LRU. Only take this page 1037 * if it is of the appropriate PageActive status. Pages which are being 1038 * freed elsewhere are also ignored. 1039 * 1040 * page: page to consider 1041 * mode: one of the LRU isolation modes defined above 1042 * 1043 * returns 0 on success, -ve errno on failure. 1044 */ 1045int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) 1046{ 1047 bool all_lru_mode; 1048 int ret = -EINVAL; 1049 1050 /* Only take pages on the LRU. */ 1051 if (!PageLRU(page)) 1052 return ret; 1053 1054 all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) == 1055 (ISOLATE_ACTIVE|ISOLATE_INACTIVE); 1056 1057 /* 1058 * When checking the active state, we need to be sure we are 1059 * dealing with comparible boolean values. Take the logical not 1060 * of each. 1061 */ 1062 if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE)) 1063 return ret; 1064 1065 if (!all_lru_mode && !!page_is_file_cache(page) != file) 1066 return ret; 1067 1068 /* 1069 * When this function is being called for lumpy reclaim, we 1070 * initially look into all LRU pages, active, inactive and 1071 * unevictable; only give shrink_page_list evictable pages. 1072 */ 1073 if (PageUnevictable(page)) 1074 return ret; 1075 1076 ret = -EBUSY; 1077 1078 if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page))) 1079 return ret; 1080 1081 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) 1082 return ret; 1083 1084 if (likely(get_page_unless_zero(page))) { 1085 /* 1086 * Be careful not to clear PageLRU until after we're 1087 * sure the page is not being freed elsewhere -- the 1088 * page release code relies on it. 1089 */ 1090 ClearPageLRU(page); 1091 ret = 0; 1092 } 1093 1094 return ret; 1095} 1096 1097/* 1098 * zone->lru_lock is heavily contended. Some of the functions that 1099 * shrink the lists perform better by taking out a batch of pages 1100 * and working on them outside the LRU lock. 1101 * 1102 * For pagecache intensive workloads, this function is the hottest 1103 * spot in the kernel (apart from copy_*_user functions). 1104 * 1105 * Appropriate locks must be held before calling this function. 1106 * 1107 * @nr_to_scan: The number of pages to look through on the list. 1108 * @src: The LRU list to pull pages off. 1109 * @dst: The temp list to put pages on to. 1110 * @scanned: The number of pages that were scanned. 1111 * @order: The caller's attempted allocation order 1112 * @mode: One of the LRU isolation modes 1113 * @file: True [1] if isolating file [!anon] pages 1114 * 1115 * returns how many pages were moved onto *@dst. 1116 */ 1117static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 1118 struct list_head *src, struct list_head *dst, 1119 unsigned long *scanned, int order, isolate_mode_t mode, 1120 int file) 1121{ 1122 unsigned long nr_taken = 0; 1123 unsigned long nr_lumpy_taken = 0; 1124 unsigned long nr_lumpy_dirty = 0; 1125 unsigned long nr_lumpy_failed = 0; 1126 unsigned long scan; 1127 1128 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1129 struct page *page; 1130 unsigned long pfn; 1131 unsigned long end_pfn; 1132 unsigned long page_pfn; 1133 int zone_id; 1134 1135 page = lru_to_page(src); 1136 prefetchw_prev_lru_page(page, src, flags); 1137 1138 VM_BUG_ON(!PageLRU(page)); 1139 1140 switch (__isolate_lru_page(page, mode, file)) { 1141 case 0: 1142 mem_cgroup_lru_del(page); 1143 list_move(&page->lru, dst); 1144 nr_taken += hpage_nr_pages(page); 1145 break; 1146 1147 case -EBUSY: 1148 /* else it is being freed elsewhere */ 1149 list_move(&page->lru, src); 1150 continue; 1151 1152 default: 1153 BUG(); 1154 } 1155 1156 if (!order) 1157 continue; 1158 1159 /* 1160 * Attempt to take all pages in the order aligned region 1161 * surrounding the tag page. Only take those pages of 1162 * the same active state as that tag page. We may safely 1163 * round the target page pfn down to the requested order 1164 * as the mem_map is guaranteed valid out to MAX_ORDER, 1165 * where that page is in a different zone we will detect 1166 * it from its zone id and abort this block scan. 1167 */ 1168 zone_id = page_zone_id(page); 1169 page_pfn = page_to_pfn(page); 1170 pfn = page_pfn & ~((1 << order) - 1); 1171 end_pfn = pfn + (1 << order); 1172 for (; pfn < end_pfn; pfn++) { 1173 struct page *cursor_page; 1174 1175 /* The target page is in the block, ignore it. */ 1176 if (unlikely(pfn == page_pfn)) 1177 continue; 1178 1179 /* Avoid holes within the zone. */ 1180 if (unlikely(!pfn_valid_within(pfn))) 1181 break; 1182 1183 cursor_page = pfn_to_page(pfn); 1184 1185 /* Check that we have not crossed a zone boundary. */ 1186 if (unlikely(page_zone_id(cursor_page) != zone_id)) 1187 break; 1188 1189 /* 1190 * If we don't have enough swap space, reclaiming of 1191 * anon page which don't already have a swap slot is 1192 * pointless. 1193 */ 1194 if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) && 1195 !PageSwapCache(cursor_page)) 1196 break; 1197 1198 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 1199 unsigned int isolated_pages; 1200 1201 mem_cgroup_lru_del(cursor_page); 1202 list_move(&cursor_page->lru, dst); 1203 isolated_pages = hpage_nr_pages(cursor_page); 1204 nr_taken += isolated_pages; 1205 nr_lumpy_taken += isolated_pages; 1206 if (PageDirty(cursor_page)) 1207 nr_lumpy_dirty += isolated_pages; 1208 scan++; 1209 pfn += isolated_pages - 1; 1210 } else { 1211 /* 1212 * Check if the page is freed already. 1213 * 1214 * We can't use page_count() as that 1215 * requires compound_head and we don't 1216 * have a pin on the page here. If a 1217 * page is tail, we may or may not 1218 * have isolated the head, so assume 1219 * it's not free, it'd be tricky to 1220 * track the head status without a 1221 * page pin. 1222 */ 1223 if (!PageTail(cursor_page) && 1224 !atomic_read(&cursor_page->_count)) 1225 continue; 1226 break; 1227 } 1228 } 1229 1230 /* If we break out of the loop above, lumpy reclaim failed */ 1231 if (pfn < end_pfn) 1232 nr_lumpy_failed++; 1233 } 1234 1235 *scanned = scan; 1236 1237 trace_mm_vmscan_lru_isolate(order, 1238 nr_to_scan, scan, 1239 nr_taken, 1240 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, 1241 mode, file); 1242 return nr_taken; 1243} 1244 1245static unsigned long isolate_pages(unsigned long nr, struct mem_cgroup_zone *mz, 1246 struct list_head *dst, 1247 unsigned long *scanned, int order, 1248 isolate_mode_t mode, int active, int file) 1249{ 1250 struct lruvec *lruvec; 1251 int lru = LRU_BASE; 1252 1253 lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup); 1254 if (active) 1255 lru += LRU_ACTIVE; 1256 if (file) 1257 lru += LRU_FILE; 1258 return isolate_lru_pages(nr, &lruvec->lists[lru], dst, 1259 scanned, order, mode, file); 1260} 1261 1262/* 1263 * clear_active_flags() is a helper for shrink_active_list(), clearing 1264 * any active bits from the pages in the list. 1265 */ 1266static unsigned long clear_active_flags(struct list_head *page_list, 1267 unsigned int *count) 1268{ 1269 int nr_active = 0; 1270 int lru; 1271 struct page *page; 1272 1273 list_for_each_entry(page, page_list, lru) { 1274 int numpages = hpage_nr_pages(page); 1275 lru = page_lru_base_type(page); 1276 if (PageActive(page)) { 1277 lru += LRU_ACTIVE; 1278 ClearPageActive(page); 1279 nr_active += numpages; 1280 } 1281 if (count) 1282 count[lru] += numpages; 1283 } 1284 1285 return nr_active; 1286} 1287 1288/** 1289 * isolate_lru_page - tries to isolate a page from its LRU list 1290 * @page: page to isolate from its LRU list 1291 * 1292 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1293 * vmstat statistic corresponding to whatever LRU list the page was on. 1294 * 1295 * Returns 0 if the page was removed from an LRU list. 1296 * Returns -EBUSY if the page was not on an LRU list. 1297 * 1298 * The returned page will have PageLRU() cleared. If it was found on 1299 * the active list, it will have PageActive set. If it was found on 1300 * the unevictable list, it will have the PageUnevictable bit set. That flag 1301 * may need to be cleared by the caller before letting the page go. 1302 * 1303 * The vmstat statistic corresponding to the list on which the page was 1304 * found will be decremented. 1305 * 1306 * Restrictions: 1307 * (1) Must be called with an elevated refcount on the page. This is a 1308 * fundamentnal difference from isolate_lru_pages (which is called 1309 * without a stable reference). 1310 * (2) the lru_lock must not be held. 1311 * (3) interrupts must be enabled. 1312 */ 1313int isolate_lru_page(struct page *page) 1314{ 1315 int ret = -EBUSY; 1316 1317 VM_BUG_ON(!page_count(page)); 1318 1319 if (PageLRU(page)) { 1320 struct zone *zone = page_zone(page); 1321 1322 spin_lock_irq(&zone->lru_lock); 1323 if (PageLRU(page)) { 1324 int lru = page_lru(page); 1325 ret = 0; 1326 get_page(page); 1327 ClearPageLRU(page); 1328 1329 del_page_from_lru_list(zone, page, lru); 1330 } 1331 spin_unlock_irq(&zone->lru_lock); 1332 } 1333 return ret; 1334} 1335 1336/* 1337 * Are there way too many processes in the direct reclaim path already? 1338 */ 1339static int too_many_isolated(struct zone *zone, int file, 1340 struct scan_control *sc) 1341{ 1342 unsigned long inactive, isolated; 1343 1344 if (current_is_kswapd()) 1345 return 0; 1346 1347 if (!global_reclaim(sc)) 1348 return 0; 1349 1350 if (file) { 1351 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1352 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1353 } else { 1354 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1355 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1356 } 1357 1358 return isolated > inactive; 1359} 1360 1361/* 1362 * TODO: Try merging with migrations version of putback_lru_pages 1363 */ 1364static noinline_for_stack void 1365putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc, 1366 unsigned long nr_anon, unsigned long nr_file, 1367 struct list_head *page_list) 1368{ 1369 struct page *page; 1370 struct pagevec pvec; 1371 struct zone *zone = mz->zone; 1372 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1373 1374 pagevec_init(&pvec, 1); 1375 1376 /* 1377 * Put back any unfreeable pages. 1378 */ 1379 spin_lock(&zone->lru_lock); 1380 while (!list_empty(page_list)) { 1381 int lru; 1382 page = lru_to_page(page_list); 1383 VM_BUG_ON(PageLRU(page)); 1384 list_del(&page->lru); 1385 if (unlikely(!page_evictable(page, NULL))) { 1386 spin_unlock_irq(&zone->lru_lock); 1387 putback_lru_page(page); 1388 spin_lock_irq(&zone->lru_lock); 1389 continue; 1390 } 1391 SetPageLRU(page); 1392 lru = page_lru(page); 1393 add_page_to_lru_list(zone, page, lru); 1394 if (is_active_lru(lru)) { 1395 int file = is_file_lru(lru); 1396 int numpages = hpage_nr_pages(page); 1397 reclaim_stat->recent_rotated[file] += numpages; 1398 } 1399 if (!pagevec_add(&pvec, page)) { 1400 spin_unlock_irq(&zone->lru_lock); 1401 __pagevec_release(&pvec); 1402 spin_lock_irq(&zone->lru_lock); 1403 } 1404 } 1405 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1406 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1407 1408 spin_unlock_irq(&zone->lru_lock); 1409 pagevec_release(&pvec); 1410} 1411 1412static noinline_for_stack void 1413update_isolated_counts(struct mem_cgroup_zone *mz, 1414 struct scan_control *sc, 1415 unsigned long *nr_anon, 1416 unsigned long *nr_file, 1417 struct list_head *isolated_list) 1418{ 1419 unsigned long nr_active; 1420 struct zone *zone = mz->zone; 1421 unsigned int count[NR_LRU_LISTS] = { 0, }; 1422 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1423 1424 nr_active = clear_active_flags(isolated_list, count); 1425 __count_vm_events(PGDEACTIVATE, nr_active); 1426 1427 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1428 -count[LRU_ACTIVE_FILE]); 1429 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1430 -count[LRU_INACTIVE_FILE]); 1431 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1432 -count[LRU_ACTIVE_ANON]); 1433 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1434 -count[LRU_INACTIVE_ANON]); 1435 1436 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1437 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1438 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon); 1439 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file); 1440 1441 reclaim_stat->recent_scanned[0] += *nr_anon; 1442 reclaim_stat->recent_scanned[1] += *nr_file; 1443} 1444 1445/* 1446 * Returns true if a direct reclaim should wait on pages under writeback. 1447 * 1448 * If we are direct reclaiming for contiguous pages and we do not reclaim 1449 * everything in the list, try again and wait for writeback IO to complete. 1450 * This will stall high-order allocations noticeably. Only do that when really 1451 * need to free the pages under high memory pressure. 1452 */ 1453static inline bool should_reclaim_stall(unsigned long nr_taken, 1454 unsigned long nr_freed, 1455 int priority, 1456 struct scan_control *sc) 1457{ 1458 int lumpy_stall_priority; 1459 1460 /* kswapd should not stall on sync IO */ 1461 if (current_is_kswapd()) 1462 return false; 1463 1464 /* Only stall on lumpy reclaim */ 1465 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE) 1466 return false; 1467 1468 /* If we have reclaimed everything on the isolated list, no stall */ 1469 if (nr_freed == nr_taken) 1470 return false; 1471 1472 /* 1473 * For high-order allocations, there are two stall thresholds. 1474 * High-cost allocations stall immediately where as lower 1475 * order allocations such as stacks require the scanning 1476 * priority to be much higher before stalling. 1477 */ 1478 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1479 lumpy_stall_priority = DEF_PRIORITY; 1480 else 1481 lumpy_stall_priority = DEF_PRIORITY / 3; 1482 1483 return priority <= lumpy_stall_priority; 1484} 1485 1486/* 1487 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1488 * of reclaimed pages 1489 */ 1490static noinline_for_stack unsigned long 1491shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, 1492 struct scan_control *sc, int priority, int file) 1493{ 1494 LIST_HEAD(page_list); 1495 unsigned long nr_scanned; 1496 unsigned long nr_reclaimed = 0; 1497 unsigned long nr_taken; 1498 unsigned long nr_anon; 1499 unsigned long nr_file; 1500 unsigned long nr_dirty = 0; 1501 unsigned long nr_writeback = 0; 1502 isolate_mode_t reclaim_mode = ISOLATE_INACTIVE; 1503 struct zone *zone = mz->zone; 1504 1505 while (unlikely(too_many_isolated(zone, file, sc))) { 1506 congestion_wait(BLK_RW_ASYNC, HZ/10); 1507 1508 /* We are about to die and free our memory. Return now. */ 1509 if (fatal_signal_pending(current)) 1510 return SWAP_CLUSTER_MAX; 1511 } 1512 1513 set_reclaim_mode(priority, sc, false); 1514 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 1515 reclaim_mode |= ISOLATE_ACTIVE; 1516 1517 lru_add_drain(); 1518 1519 if (!sc->may_unmap) 1520 reclaim_mode |= ISOLATE_UNMAPPED; 1521 if (!sc->may_writepage) 1522 reclaim_mode |= ISOLATE_CLEAN; 1523 1524 spin_lock_irq(&zone->lru_lock); 1525 1526 nr_taken = isolate_pages(nr_to_scan, mz, &page_list, 1527 &nr_scanned, sc->order, 1528 reclaim_mode, 0, file); 1529 if (global_reclaim(sc)) { 1530 zone->pages_scanned += nr_scanned; 1531 if (current_is_kswapd()) 1532 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1533 nr_scanned); 1534 else 1535 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1536 nr_scanned); 1537 } 1538 1539 if (nr_taken == 0) { 1540 spin_unlock_irq(&zone->lru_lock); 1541 return 0; 1542 } 1543 1544 update_isolated_counts(mz, sc, &nr_anon, &nr_file, &page_list); 1545 1546 spin_unlock_irq(&zone->lru_lock); 1547 1548 nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority, 1549 &nr_dirty, &nr_writeback); 1550 1551 /* Check if we should syncronously wait for writeback */ 1552 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { 1553 set_reclaim_mode(priority, sc, true); 1554 nr_reclaimed += shrink_page_list(&page_list, mz, sc, 1555 priority, &nr_dirty, &nr_writeback); 1556 } 1557 1558 local_irq_disable(); 1559 if (current_is_kswapd()) 1560 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1561 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1562 1563 putback_lru_pages(mz, sc, nr_anon, nr_file, &page_list); 1564 1565 /* 1566 * If reclaim is isolating dirty pages under writeback, it implies 1567 * that the long-lived page allocation rate is exceeding the page 1568 * laundering rate. Either the global limits are not being effective 1569 * at throttling processes due to the page distribution throughout 1570 * zones or there is heavy usage of a slow backing device. The 1571 * only option is to throttle from reclaim context which is not ideal 1572 * as there is no guarantee the dirtying process is throttled in the 1573 * same way balance_dirty_pages() manages. 1574 * 1575 * This scales the number of dirty pages that must be under writeback 1576 * before throttling depending on priority. It is a simple backoff 1577 * function that has the most effect in the range DEF_PRIORITY to 1578 * DEF_PRIORITY-2 which is the priority reclaim is considered to be 1579 * in trouble and reclaim is considered to be in trouble. 1580 * 1581 * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle 1582 * DEF_PRIORITY-1 50% must be PageWriteback 1583 * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble 1584 * ... 1585 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1586 * isolated page is PageWriteback 1587 */ 1588 if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority))) 1589 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1590 1591 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1592 zone_idx(zone), 1593 nr_scanned, nr_reclaimed, 1594 priority, 1595 trace_shrink_flags(file, sc->reclaim_mode)); 1596 return nr_reclaimed; 1597} 1598 1599/* 1600 * This moves pages from the active list to the inactive list. 1601 * 1602 * We move them the other way if the page is referenced by one or more 1603 * processes, from rmap. 1604 * 1605 * If the pages are mostly unmapped, the processing is fast and it is 1606 * appropriate to hold zone->lru_lock across the whole operation. But if 1607 * the pages are mapped, the processing is slow (page_referenced()) so we 1608 * should drop zone->lru_lock around each page. It's impossible to balance 1609 * this, so instead we remove the pages from the LRU while processing them. 1610 * It is safe to rely on PG_active against the non-LRU pages in here because 1611 * nobody will play with that bit on a non-LRU page. 1612 * 1613 * The downside is that we have to touch page->_count against each page. 1614 * But we had to alter page->flags anyway. 1615 */ 1616 1617static void move_active_pages_to_lru(struct zone *zone, 1618 struct list_head *list, 1619 enum lru_list lru) 1620{ 1621 unsigned long pgmoved = 0; 1622 struct pagevec pvec; 1623 struct page *page; 1624 1625 pagevec_init(&pvec, 1); 1626 1627 while (!list_empty(list)) { 1628 struct lruvec *lruvec; 1629 1630 page = lru_to_page(list); 1631 1632 VM_BUG_ON(PageLRU(page)); 1633 SetPageLRU(page); 1634 1635 lruvec = mem_cgroup_lru_add_list(zone, page, lru); 1636 list_move(&page->lru, &lruvec->lists[lru]); 1637 pgmoved += hpage_nr_pages(page); 1638 1639 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1640 spin_unlock_irq(&zone->lru_lock); 1641 if (buffer_heads_over_limit) 1642 pagevec_strip(&pvec); 1643 __pagevec_release(&pvec); 1644 spin_lock_irq(&zone->lru_lock); 1645 } 1646 } 1647 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1648 if (!is_active_lru(lru)) 1649 __count_vm_events(PGDEACTIVATE, pgmoved); 1650} 1651 1652static void shrink_active_list(unsigned long nr_pages, 1653 struct mem_cgroup_zone *mz, 1654 struct scan_control *sc, 1655 int priority, int file) 1656{ 1657 unsigned long nr_taken; 1658 unsigned long pgscanned; 1659 unsigned long vm_flags; 1660 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1661 LIST_HEAD(l_active); 1662 LIST_HEAD(l_inactive); 1663 struct page *page; 1664 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1665 unsigned long nr_rotated = 0; 1666 isolate_mode_t reclaim_mode = ISOLATE_ACTIVE; 1667 struct zone *zone = mz->zone; 1668 1669 lru_add_drain(); 1670 1671 if (!sc->may_unmap) 1672 reclaim_mode |= ISOLATE_UNMAPPED; 1673 if (!sc->may_writepage) 1674 reclaim_mode |= ISOLATE_CLEAN; 1675 1676 spin_lock_irq(&zone->lru_lock); 1677 1678 nr_taken = isolate_pages(nr_pages, mz, &l_hold, 1679 &pgscanned, sc->order, 1680 reclaim_mode, 1, file); 1681 1682 if (global_reclaim(sc)) 1683 zone->pages_scanned += pgscanned; 1684 1685 reclaim_stat->recent_scanned[file] += nr_taken; 1686 1687 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1688 if (file) 1689 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); 1690 else 1691 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); 1692 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1693 spin_unlock_irq(&zone->lru_lock); 1694 1695 while (!list_empty(&l_hold)) { 1696 cond_resched(); 1697 page = lru_to_page(&l_hold); 1698 list_del(&page->lru); 1699 1700 if (unlikely(!page_evictable(page, NULL))) { 1701 putback_lru_page(page); 1702 continue; 1703 } 1704 1705 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) { 1706 nr_rotated += hpage_nr_pages(page); 1707 /* 1708 * Identify referenced, file-backed active pages and 1709 * give them one more trip around the active list. So 1710 * that executable code get better chances to stay in 1711 * memory under moderate memory pressure. Anon pages 1712 * are not likely to be evicted by use-once streaming 1713 * IO, plus JVM can create lots of anon VM_EXEC pages, 1714 * so we ignore them here. 1715 */ 1716 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1717 list_add(&page->lru, &l_active); 1718 continue; 1719 } 1720 } 1721 1722 ClearPageActive(page); /* we are de-activating */ 1723 list_add(&page->lru, &l_inactive); 1724 } 1725 1726 /* 1727 * Move pages back to the lru list. 1728 */ 1729 spin_lock_irq(&zone->lru_lock); 1730 /* 1731 * Count referenced pages from currently used mappings as rotated, 1732 * even though only some of them are actually re-activated. This 1733 * helps balance scan pressure between file and anonymous pages in 1734 * get_scan_ratio. 1735 */ 1736 reclaim_stat->recent_rotated[file] += nr_rotated; 1737 1738 move_active_pages_to_lru(zone, &l_active, 1739 LRU_ACTIVE + file * LRU_FILE); 1740 move_active_pages_to_lru(zone, &l_inactive, 1741 LRU_BASE + file * LRU_FILE); 1742 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1743 spin_unlock_irq(&zone->lru_lock); 1744} 1745 1746#ifdef CONFIG_SWAP 1747static int inactive_anon_is_low_global(struct zone *zone) 1748{ 1749 unsigned long active, inactive; 1750 1751 active = zone_page_state(zone, NR_ACTIVE_ANON); 1752 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1753 1754 if (inactive * zone->inactive_ratio < active) 1755 return 1; 1756 1757 return 0; 1758} 1759 1760/** 1761 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1762 * @zone: zone to check 1763 * @sc: scan control of this context 1764 * 1765 * Returns true if the zone does not have enough inactive anon pages, 1766 * meaning some active anon pages need to be deactivated. 1767 */ 1768static int inactive_anon_is_low(struct mem_cgroup_zone *mz) 1769{ 1770 /* 1771 * If we don't have swap space, anonymous page deactivation 1772 * is pointless. 1773 */ 1774 if (!total_swap_pages) 1775 return 0; 1776 1777 if (!scanning_global_lru(mz)) 1778 return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup, 1779 mz->zone); 1780 1781 return inactive_anon_is_low_global(mz->zone); 1782} 1783#else 1784static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz) 1785{ 1786 return 0; 1787} 1788#endif 1789 1790static int inactive_file_is_low_global(struct zone *zone) 1791{ 1792 unsigned long active, inactive; 1793 1794 active = zone_page_state(zone, NR_ACTIVE_FILE); 1795 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1796 1797 return (active > inactive); 1798} 1799 1800/** 1801 * inactive_file_is_low - check if file pages need to be deactivated 1802 * @mz: memory cgroup and zone to check 1803 * 1804 * When the system is doing streaming IO, memory pressure here 1805 * ensures that active file pages get deactivated, until more 1806 * than half of the file pages are on the inactive list. 1807 * 1808 * Once we get to that situation, protect the system's working 1809 * set from being evicted by disabling active file page aging. 1810 * 1811 * This uses a different ratio than the anonymous pages, because 1812 * the page cache uses a use-once replacement algorithm. 1813 */ 1814static int inactive_file_is_low(struct mem_cgroup_zone *mz) 1815{ 1816 if (!scanning_global_lru(mz)) 1817 return mem_cgroup_inactive_file_is_low(mz->mem_cgroup, 1818 mz->zone); 1819 1820 return inactive_file_is_low_global(mz->zone); 1821} 1822 1823static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file) 1824{ 1825 if (file) 1826 return inactive_file_is_low(mz); 1827 else 1828 return inactive_anon_is_low(mz); 1829} 1830 1831static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1832 struct mem_cgroup_zone *mz, 1833 struct scan_control *sc, int priority) 1834{ 1835 int file = is_file_lru(lru); 1836 1837 if (is_active_lru(lru)) { 1838 if (inactive_list_is_low(mz, file)) 1839 shrink_active_list(nr_to_scan, mz, sc, priority, file); 1840 return 0; 1841 } 1842 1843 return shrink_inactive_list(nr_to_scan, mz, sc, priority, file); 1844} 1845 1846static int vmscan_swappiness(struct mem_cgroup_zone *mz, 1847 struct scan_control *sc) 1848{ 1849 if (global_reclaim(sc)) 1850 return vm_swappiness; 1851 return mem_cgroup_swappiness(mz->mem_cgroup); 1852} 1853 1854/* 1855 * Determine how aggressively the anon and file LRU lists should be 1856 * scanned. The relative value of each set of LRU lists is determined 1857 * by looking at the fraction of the pages scanned we did rotate back 1858 * onto the active list instead of evict. 1859 * 1860 * nr[0] = anon pages to scan; nr[1] = file pages to scan 1861 */ 1862static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, 1863 unsigned long *nr, int priority) 1864{ 1865 unsigned long anon, file, free; 1866 unsigned long anon_prio, file_prio; 1867 unsigned long ap, fp; 1868 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1869 u64 fraction[2], denominator; 1870 enum lru_list l; 1871 int noswap = 0; 1872 bool force_scan = false; 1873 1874 /* 1875 * If the zone or memcg is small, nr[l] can be 0. This 1876 * results in no scanning on this priority and a potential 1877 * priority drop. Global direct reclaim can go to the next 1878 * zone and tends to have no problems. Global kswapd is for 1879 * zone balancing and it needs to scan a minimum amount. When 1880 * reclaiming for a memcg, a priority drop can cause high 1881 * latencies, so it's better to scan a minimum amount there as 1882 * well. 1883 */ 1884 if (current_is_kswapd() && mz->zone->all_unreclaimable) 1885 force_scan = true; 1886 if (!global_reclaim(sc)) 1887 force_scan = true; 1888 1889 /* If we have no swap space, do not bother scanning anon pages. */ 1890 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1891 noswap = 1; 1892 fraction[0] = 0; 1893 fraction[1] = 1; 1894 denominator = 1; 1895 goto out; 1896 } 1897 1898 anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) + 1899 zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 1900 file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) + 1901 zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 1902 1903 if (global_reclaim(sc)) { 1904 free = zone_page_state(mz->zone, NR_FREE_PAGES); 1905 /* If we have very few page cache pages, 1906 force-scan anon pages. */ 1907 if (unlikely(file + free <= high_wmark_pages(mz->zone))) { 1908 fraction[0] = 1; 1909 fraction[1] = 0; 1910 denominator = 1; 1911 goto out; 1912 } 1913 } 1914 1915 /* 1916 * With swappiness at 100, anonymous and file have the same priority. 1917 * This scanning priority is essentially the inverse of IO cost. 1918 */ 1919 anon_prio = vmscan_swappiness(mz, sc); 1920 file_prio = 200 - vmscan_swappiness(mz, sc); 1921 1922 /* 1923 * OK, so we have swap space and a fair amount of page cache 1924 * pages. We use the recently rotated / recently scanned 1925 * ratios to determine how valuable each cache is. 1926 * 1927 * Because workloads change over time (and to avoid overflow) 1928 * we keep these statistics as a floating average, which ends 1929 * up weighing recent references more than old ones. 1930 * 1931 * anon in [0], file in [1] 1932 */ 1933 spin_lock_irq(&mz->zone->lru_lock); 1934 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1935 reclaim_stat->recent_scanned[0] /= 2; 1936 reclaim_stat->recent_rotated[0] /= 2; 1937 } 1938 1939 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1940 reclaim_stat->recent_scanned[1] /= 2; 1941 reclaim_stat->recent_rotated[1] /= 2; 1942 } 1943 1944 /* 1945 * The amount of pressure on anon vs file pages is inversely 1946 * proportional to the fraction of recently scanned pages on 1947 * each list that were recently referenced and in active use. 1948 */ 1949 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1950 ap /= reclaim_stat->recent_rotated[0] + 1; 1951 1952 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1953 fp /= reclaim_stat->recent_rotated[1] + 1; 1954 spin_unlock_irq(&mz->zone->lru_lock); 1955 1956 fraction[0] = ap; 1957 fraction[1] = fp; 1958 denominator = ap + fp + 1; 1959out: 1960 for_each_evictable_lru(l) { 1961 int file = is_file_lru(l); 1962 unsigned long scan; 1963 1964 scan = zone_nr_lru_pages(mz, l); 1965 if (priority || noswap) { 1966 scan >>= priority; 1967 if (!scan && force_scan) 1968 scan = SWAP_CLUSTER_MAX; 1969 scan = div64_u64(scan * fraction[file], denominator); 1970 } 1971 nr[l] = scan; 1972 } 1973} 1974 1975/* 1976 * Reclaim/compaction depends on a number of pages being freed. To avoid 1977 * disruption to the system, a small number of order-0 pages continue to be 1978 * rotated and reclaimed in the normal fashion. However, by the time we get 1979 * back to the allocator and call try_to_compact_zone(), we ensure that 1980 * there are enough free pages for it to be likely successful 1981 */ 1982static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, 1983 unsigned long nr_reclaimed, 1984 unsigned long nr_scanned, 1985 struct scan_control *sc) 1986{ 1987 unsigned long pages_for_compaction; 1988 unsigned long inactive_lru_pages; 1989 1990 /* If not in reclaim/compaction mode, stop */ 1991 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1992 return false; 1993 1994 /* Consider stopping depending on scan and reclaim activity */ 1995 if (sc->gfp_mask & __GFP_REPEAT) { 1996 /* 1997 * For __GFP_REPEAT allocations, stop reclaiming if the 1998 * full LRU list has been scanned and we are still failing 1999 * to reclaim pages. This full LRU scan is potentially 2000 * expensive but a __GFP_REPEAT caller really wants to succeed 2001 */ 2002 if (!nr_reclaimed && !nr_scanned) 2003 return false; 2004 } else { 2005 /* 2006 * For non-__GFP_REPEAT allocations which can presumably 2007 * fail without consequence, stop if we failed to reclaim 2008 * any pages from the last SWAP_CLUSTER_MAX number of 2009 * pages that were scanned. This will return to the 2010 * caller faster at the risk reclaim/compaction and 2011 * the resulting allocation attempt fails 2012 */ 2013 if (!nr_reclaimed) 2014 return false; 2015 } 2016 2017 /* 2018 * If we have not reclaimed enough pages for compaction and the 2019 * inactive lists are large enough, continue reclaiming 2020 */ 2021 pages_for_compaction = (2UL << sc->order); 2022 inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 2023 if (nr_swap_pages > 0) 2024 inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 2025 if (sc->nr_reclaimed < pages_for_compaction && 2026 inactive_lru_pages > pages_for_compaction) 2027 return true; 2028 2029 /* If compaction would go ahead or the allocation would succeed, stop */ 2030 switch (compaction_suitable(mz->zone, sc->order)) { 2031 case COMPACT_PARTIAL: 2032 case COMPACT_CONTINUE: 2033 return false; 2034 default: 2035 return true; 2036 } 2037} 2038 2039/* 2040 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 2041 */ 2042static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz, 2043 struct scan_control *sc) 2044{ 2045 unsigned long nr[NR_LRU_LISTS]; 2046 unsigned long nr_to_scan; 2047 enum lru_list l; 2048 unsigned long nr_reclaimed, nr_scanned; 2049 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 2050 struct blk_plug plug; 2051 2052restart: 2053 nr_reclaimed = 0; 2054 nr_scanned = sc->nr_scanned; 2055 get_scan_count(mz, sc, nr, priority); 2056 2057 blk_start_plug(&plug); 2058 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2059 nr[LRU_INACTIVE_FILE]) { 2060 for_each_evictable_lru(l) { 2061 if (nr[l]) { 2062 nr_to_scan = min_t(unsigned long, 2063 nr[l], SWAP_CLUSTER_MAX); 2064 nr[l] -= nr_to_scan; 2065 2066 nr_reclaimed += shrink_list(l, nr_to_scan, 2067 mz, sc, priority); 2068 } 2069 } 2070 /* 2071 * On large memory systems, scan >> priority can become 2072 * really large. This is fine for the starting priority; 2073 * we want to put equal scanning pressure on each zone. 2074 * However, if the VM has a harder time of freeing pages, 2075 * with multiple processes reclaiming pages, the total 2076 * freeing target can get unreasonably large. 2077 */ 2078 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 2079 break; 2080 } 2081 blk_finish_plug(&plug); 2082 sc->nr_reclaimed += nr_reclaimed; 2083 2084 /* 2085 * Even if we did not try to evict anon pages at all, we want to 2086 * rebalance the anon lru active/inactive ratio. 2087 */ 2088 if (inactive_anon_is_low(mz)) 2089 shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0); 2090 2091 /* reclaim/compaction might need reclaim to continue */ 2092 if (should_continue_reclaim(mz, nr_reclaimed, 2093 sc->nr_scanned - nr_scanned, sc)) 2094 goto restart; 2095 2096 throttle_vm_writeout(sc->gfp_mask); 2097} 2098 2099static void shrink_zone(int priority, struct zone *zone, 2100 struct scan_control *sc) 2101{ 2102 struct mem_cgroup *root = sc->target_mem_cgroup; 2103 struct mem_cgroup_reclaim_cookie reclaim = { 2104 .zone = zone, 2105 .priority = priority, 2106 }; 2107 struct mem_cgroup *memcg; 2108 2109 memcg = mem_cgroup_iter(root, NULL, &reclaim); 2110 do { 2111 struct mem_cgroup_zone mz = { 2112 .mem_cgroup = memcg, 2113 .zone = zone, 2114 }; 2115 2116 shrink_mem_cgroup_zone(priority, &mz, sc); 2117 /* 2118 * Limit reclaim has historically picked one memcg and 2119 * scanned it with decreasing priority levels until 2120 * nr_to_reclaim had been reclaimed. This priority 2121 * cycle is thus over after a single memcg. 2122 * 2123 * Direct reclaim and kswapd, on the other hand, have 2124 * to scan all memory cgroups to fulfill the overall 2125 * scan target for the zone. 2126 */ 2127 if (!global_reclaim(sc)) { 2128 mem_cgroup_iter_break(root, memcg); 2129 break; 2130 } 2131 memcg = mem_cgroup_iter(root, memcg, &reclaim); 2132 } while (memcg); 2133} 2134 2135/* 2136 * This is the direct reclaim path, for page-allocating processes. We only 2137 * try to reclaim pages from zones which will satisfy the caller's allocation 2138 * request. 2139 * 2140 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 2141 * Because: 2142 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 2143 * allocation or 2144 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 2145 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 2146 * zone defense algorithm. 2147 * 2148 * If a zone is deemed to be full of pinned pages then just give it a light 2149 * scan then give up on it. 2150 * 2151 * This function returns true if a zone is being reclaimed for a costly 2152 * high-order allocation and compaction is either ready to begin or deferred. 2153 * This indicates to the caller that it should retry the allocation or fail. 2154 */ 2155static bool shrink_zones(int priority, struct zonelist *zonelist, 2156 struct scan_control *sc) 2157{ 2158 struct zoneref *z; 2159 struct zone *zone; 2160 unsigned long nr_soft_reclaimed; 2161 unsigned long nr_soft_scanned; 2162 bool should_abort_reclaim = false; 2163 2164 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2165 gfp_zone(sc->gfp_mask), sc->nodemask) { 2166 if (!populated_zone(zone)) 2167 continue; 2168 /* 2169 * Take care memory controller reclaiming has small influence 2170 * to global LRU. 2171 */ 2172 if (global_reclaim(sc)) { 2173 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2174 continue; 2175 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2176 continue; /* Let kswapd poll it */ 2177 if (COMPACTION_BUILD) { 2178 /* 2179 * If we already have plenty of memory free for 2180 * compaction in this zone, don't free any more. 2181 * Even though compaction is invoked for any 2182 * non-zero order, only frequent costly order 2183 * reclamation is disruptive enough to become a 2184 * noticable problem, like transparent huge page 2185 * allocations. 2186 */ 2187 if (sc->order > PAGE_ALLOC_COSTLY_ORDER && 2188 (compaction_suitable(zone, sc->order) || 2189 compaction_deferred(zone))) { 2190 should_abort_reclaim = true; 2191 continue; 2192 } 2193 } 2194 /* 2195 * This steals pages from memory cgroups over softlimit 2196 * and returns the number of reclaimed pages and 2197 * scanned pages. This works for global memory pressure 2198 * and balancing, not for a memcg's limit. 2199 */ 2200 nr_soft_scanned = 0; 2201 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2202 sc->order, sc->gfp_mask, 2203 &nr_soft_scanned); 2204 sc->nr_reclaimed += nr_soft_reclaimed; 2205 sc->nr_scanned += nr_soft_scanned; 2206 /* need some check for avoid more shrink_zone() */ 2207 } 2208 2209 shrink_zone(priority, zone, sc); 2210 } 2211 2212 return should_abort_reclaim; 2213} 2214 2215static bool zone_reclaimable(struct zone *zone) 2216{ 2217 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 2218} 2219 2220/* All zones in zonelist are unreclaimable? */ 2221static bool all_unreclaimable(struct zonelist *zonelist, 2222 struct scan_control *sc) 2223{ 2224 struct zoneref *z; 2225 struct zone *zone; 2226 2227 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2228 gfp_zone(sc->gfp_mask), sc->nodemask) { 2229 if (!populated_zone(zone)) 2230 continue; 2231 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2232 continue; 2233 if (!zone->all_unreclaimable) 2234 return false; 2235 } 2236 2237 return true; 2238} 2239 2240/* 2241 * This is the main entry point to direct page reclaim. 2242 * 2243 * If a full scan of the inactive list fails to free enough memory then we 2244 * are "out of memory" and something needs to be killed. 2245 * 2246 * If the caller is !__GFP_FS then the probability of a failure is reasonably 2247 * high - the zone may be full of dirty or under-writeback pages, which this 2248 * caller can't do much about. We kick the writeback threads and take explicit 2249 * naps in the hope that some of these pages can be written. But if the 2250 * allocating task holds filesystem locks which prevent writeout this might not 2251 * work, and the allocation attempt will fail. 2252 * 2253 * returns: 0, if no pages reclaimed 2254 * else, the number of pages reclaimed 2255 */ 2256static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2257 struct scan_control *sc, 2258 struct shrink_control *shrink) 2259{ 2260 int priority; 2261 unsigned long total_scanned = 0; 2262 struct reclaim_state *reclaim_state = current->reclaim_state; 2263 struct zoneref *z; 2264 struct zone *zone; 2265 unsigned long writeback_threshold; 2266 bool should_abort_reclaim; 2267 2268 get_mems_allowed(); 2269 delayacct_freepages_start(); 2270 2271 if (global_reclaim(sc)) 2272 count_vm_event(ALLOCSTALL); 2273 2274 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2275 sc->nr_scanned = 0; 2276 if (!priority) 2277 disable_swap_token(sc->target_mem_cgroup); 2278 should_abort_reclaim = shrink_zones(priority, zonelist, sc); 2279 if (should_abort_reclaim) 2280 break; 2281 2282 /* 2283 * Don't shrink slabs when reclaiming memory from 2284 * over limit cgroups 2285 */ 2286 if (global_reclaim(sc)) { 2287 unsigned long lru_pages = 0; 2288 for_each_zone_zonelist(zone, z, zonelist, 2289 gfp_zone(sc->gfp_mask)) { 2290 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2291 continue; 2292 2293 lru_pages += zone_reclaimable_pages(zone); 2294 } 2295 2296 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2297 if (reclaim_state) { 2298 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2299 reclaim_state->reclaimed_slab = 0; 2300 } 2301 } 2302 total_scanned += sc->nr_scanned; 2303 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2304 goto out; 2305 2306 /* 2307 * Try to write back as many pages as we just scanned. This 2308 * tends to cause slow streaming writers to write data to the 2309 * disk smoothly, at the dirtying rate, which is nice. But 2310 * that's undesirable in laptop mode, where we *want* lumpy 2311 * writeout. So in laptop mode, write out the whole world. 2312 */ 2313 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 2314 if (total_scanned > writeback_threshold) { 2315 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, 2316 WB_REASON_TRY_TO_FREE_PAGES); 2317 sc->may_writepage = 1; 2318 } 2319 2320 /* Take a nap, wait for some writeback to complete */ 2321 if (!sc->hibernation_mode && sc->nr_scanned && 2322 priority < DEF_PRIORITY - 2) { 2323 struct zone *preferred_zone; 2324 2325 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2326 &cpuset_current_mems_allowed, 2327 &preferred_zone); 2328 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2329 } 2330 } 2331 2332out: 2333 delayacct_freepages_end(); 2334 put_mems_allowed(); 2335 2336 if (sc->nr_reclaimed) 2337 return sc->nr_reclaimed; 2338 2339 /* 2340 * As hibernation is going on, kswapd is freezed so that it can't mark 2341 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable 2342 * check. 2343 */ 2344 if (oom_killer_disabled) 2345 return 0; 2346 2347 /* Aborting reclaim to try compaction? don't OOM, then */ 2348 if (should_abort_reclaim) 2349 return 1; 2350 2351 /* top priority shrink_zones still had more to do? don't OOM, then */ 2352 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc)) 2353 return 1; 2354 2355 return 0; 2356} 2357 2358unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2359 gfp_t gfp_mask, nodemask_t *nodemask) 2360{ 2361 unsigned long nr_reclaimed; 2362 struct scan_control sc = { 2363 .gfp_mask = gfp_mask, 2364 .may_writepage = !laptop_mode, 2365 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2366 .may_unmap = 1, 2367 .may_swap = 1, 2368 .order = order, 2369 .target_mem_cgroup = NULL, 2370 .nodemask = nodemask, 2371 }; 2372 struct shrink_control shrink = { 2373 .gfp_mask = sc.gfp_mask, 2374 }; 2375 2376 trace_mm_vmscan_direct_reclaim_begin(order, 2377 sc.may_writepage, 2378 gfp_mask); 2379 2380 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2381 2382 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2383 2384 return nr_reclaimed; 2385} 2386 2387#ifdef CONFIG_CGROUP_MEM_RES_CTLR 2388 2389unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, 2390 gfp_t gfp_mask, bool noswap, 2391 struct zone *zone, 2392 unsigned long *nr_scanned) 2393{ 2394 struct scan_control sc = { 2395 .nr_scanned = 0, 2396 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2397 .may_writepage = !laptop_mode, 2398 .may_unmap = 1, 2399 .may_swap = !noswap, 2400 .order = 0, 2401 .target_mem_cgroup = memcg, 2402 }; 2403 struct mem_cgroup_zone mz = { 2404 .mem_cgroup = memcg, 2405 .zone = zone, 2406 }; 2407 2408 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2409 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2410 2411 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0, 2412 sc.may_writepage, 2413 sc.gfp_mask); 2414 2415 /* 2416 * NOTE: Although we can get the priority field, using it 2417 * here is not a good idea, since it limits the pages we can scan. 2418 * if we don't reclaim here, the shrink_zone from balance_pgdat 2419 * will pick up pages from other mem cgroup's as well. We hack 2420 * the priority and make it zero. 2421 */ 2422 shrink_mem_cgroup_zone(0, &mz, &sc); 2423 2424 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2425 2426 *nr_scanned = sc.nr_scanned; 2427 return sc.nr_reclaimed; 2428} 2429 2430unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 2431 gfp_t gfp_mask, 2432 bool noswap) 2433{ 2434 struct zonelist *zonelist; 2435 unsigned long nr_reclaimed; 2436 int nid; 2437 struct scan_control sc = { 2438 .may_writepage = !laptop_mode, 2439 .may_unmap = 1, 2440 .may_swap = !noswap, 2441 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2442 .order = 0, 2443 .target_mem_cgroup = memcg, 2444 .nodemask = NULL, /* we don't care the placement */ 2445 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2446 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2447 }; 2448 struct shrink_control shrink = { 2449 .gfp_mask = sc.gfp_mask, 2450 }; 2451 2452 /* 2453 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't 2454 * take care of from where we get pages. So the node where we start the 2455 * scan does not need to be the current node. 2456 */ 2457 nid = mem_cgroup_select_victim_node(memcg); 2458 2459 zonelist = NODE_DATA(nid)->node_zonelists; 2460 2461 trace_mm_vmscan_memcg_reclaim_begin(0, 2462 sc.may_writepage, 2463 sc.gfp_mask); 2464 2465 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2466 2467 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2468 2469 return nr_reclaimed; 2470} 2471#endif 2472 2473static void age_active_anon(struct zone *zone, struct scan_control *sc, 2474 int priority) 2475{ 2476 struct mem_cgroup *memcg; 2477 2478 if (!total_swap_pages) 2479 return; 2480 2481 memcg = mem_cgroup_iter(NULL, NULL, NULL); 2482 do { 2483 struct mem_cgroup_zone mz = { 2484 .mem_cgroup = memcg, 2485 .zone = zone, 2486 }; 2487 2488 if (inactive_anon_is_low(&mz)) 2489 shrink_active_list(SWAP_CLUSTER_MAX, &mz, 2490 sc, priority, 0); 2491 2492 memcg = mem_cgroup_iter(NULL, memcg, NULL); 2493 } while (memcg); 2494} 2495 2496/* 2497 * pgdat_balanced is used when checking if a node is balanced for high-order 2498 * allocations. Only zones that meet watermarks and are in a zone allowed 2499 * by the callers classzone_idx are added to balanced_pages. The total of 2500 * balanced pages must be at least 25% of the zones allowed by classzone_idx 2501 * for the node to be considered balanced. Forcing all zones to be balanced 2502 * for high orders can cause excessive reclaim when there are imbalanced zones. 2503 * The choice of 25% is due to 2504 * o a 16M DMA zone that is balanced will not balance a zone on any 2505 * reasonable sized machine 2506 * o On all other machines, the top zone must be at least a reasonable 2507 * percentage of the middle zones. For example, on 32-bit x86, highmem 2508 * would need to be at least 256M for it to be balance a whole node. 2509 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2510 * to balance a node on its own. These seemed like reasonable ratios. 2511 */ 2512static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, 2513 int classzone_idx) 2514{ 2515 unsigned long present_pages = 0; 2516 int i; 2517 2518 for (i = 0; i <= classzone_idx; i++) 2519 present_pages += pgdat->node_zones[i].present_pages; 2520 2521 /* A special case here: if zone has no page, we think it's balanced */ 2522 return balanced_pages >= (present_pages >> 2); 2523} 2524 2525/* is kswapd sleeping prematurely? */ 2526static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, 2527 int classzone_idx) 2528{ 2529 int i; 2530 unsigned long balanced = 0; 2531 bool all_zones_ok = true; 2532 2533 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 2534 if (remaining) 2535 return true; 2536 2537 /* Check the watermark levels */ 2538 for (i = 0; i <= classzone_idx; i++) { 2539 struct zone *zone = pgdat->node_zones + i; 2540 2541 if (!populated_zone(zone)) 2542 continue; 2543 2544 /* 2545 * balance_pgdat() skips over all_unreclaimable after 2546 * DEF_PRIORITY. Effectively, it considers them balanced so 2547 * they must be considered balanced here as well if kswapd 2548 * is to sleep 2549 */ 2550 if (zone->all_unreclaimable) { 2551 balanced += zone->present_pages; 2552 continue; 2553 } 2554 2555 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2556 i, 0)) 2557 all_zones_ok = false; 2558 else 2559 balanced += zone->present_pages; 2560 } 2561 2562 /* 2563 * For high-order requests, the balanced zones must contain at least 2564 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones 2565 * must be balanced 2566 */ 2567 if (order) 2568 return !pgdat_balanced(pgdat, balanced, classzone_idx); 2569 else 2570 return !all_zones_ok; 2571} 2572 2573/* 2574 * For kswapd, balance_pgdat() will work across all this node's zones until 2575 * they are all at high_wmark_pages(zone). 2576 * 2577 * Returns the final order kswapd was reclaiming at 2578 * 2579 * There is special handling here for zones which are full of pinned pages. 2580 * This can happen if the pages are all mlocked, or if they are all used by 2581 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 2582 * What we do is to detect the case where all pages in the zone have been 2583 * scanned twice and there has been zero successful reclaim. Mark the zone as 2584 * dead and from now on, only perform a short scan. Basically we're polling 2585 * the zone for when the problem goes away. 2586 * 2587 * kswapd scans the zones in the highmem->normal->dma direction. It skips 2588 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 2589 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 2590 * lower zones regardless of the number of free pages in the lower zones. This 2591 * interoperates with the page allocator fallback scheme to ensure that aging 2592 * of pages is balanced across the zones. 2593 */ 2594static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2595 int *classzone_idx) 2596{ 2597 int all_zones_ok; 2598 unsigned long balanced; 2599 int priority; 2600 int i; 2601 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2602 unsigned long total_scanned; 2603 struct reclaim_state *reclaim_state = current->reclaim_state; 2604 unsigned long nr_soft_reclaimed; 2605 unsigned long nr_soft_scanned; 2606 struct scan_control sc = { 2607 .gfp_mask = GFP_KERNEL, 2608 .may_unmap = 1, 2609 .may_swap = 1, 2610 /* 2611 * kswapd doesn't want to be bailed out while reclaim. because 2612 * we want to put equal scanning pressure on each zone. 2613 */ 2614 .nr_to_reclaim = ULONG_MAX, 2615 .order = order, 2616 .target_mem_cgroup = NULL, 2617 }; 2618 struct shrink_control shrink = { 2619 .gfp_mask = sc.gfp_mask, 2620 }; 2621loop_again: 2622 total_scanned = 0; 2623 sc.nr_reclaimed = 0; 2624 sc.may_writepage = !laptop_mode; 2625 count_vm_event(PAGEOUTRUN); 2626 2627 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2628 unsigned long lru_pages = 0; 2629 int has_under_min_watermark_zone = 0; 2630 2631 /* The swap token gets in the way of swapout... */ 2632 if (!priority) 2633 disable_swap_token(NULL); 2634 2635 all_zones_ok = 1; 2636 balanced = 0; 2637 2638 /* 2639 * Scan in the highmem->dma direction for the highest 2640 * zone which needs scanning 2641 */ 2642 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2643 struct zone *zone = pgdat->node_zones + i; 2644 2645 if (!populated_zone(zone)) 2646 continue; 2647 2648 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2649 continue; 2650 2651 /* 2652 * Do some background aging of the anon list, to give 2653 * pages a chance to be referenced before reclaiming. 2654 */ 2655 age_active_anon(zone, &sc, priority); 2656 2657 if (!zone_watermark_ok_safe(zone, order, 2658 high_wmark_pages(zone), 0, 0)) { 2659 end_zone = i; 2660 break; 2661 } else { 2662 /* If balanced, clear the congested flag */ 2663 zone_clear_flag(zone, ZONE_CONGESTED); 2664 } 2665 } 2666 if (i < 0) 2667 goto out; 2668 2669 for (i = 0; i <= end_zone; i++) { 2670 struct zone *zone = pgdat->node_zones + i; 2671 2672 lru_pages += zone_reclaimable_pages(zone); 2673 } 2674 2675 /* 2676 * Now scan the zone in the dma->highmem direction, stopping 2677 * at the last zone which needs scanning. 2678 * 2679 * We do this because the page allocator works in the opposite 2680 * direction. This prevents the page allocator from allocating 2681 * pages behind kswapd's direction of progress, which would 2682 * cause too much scanning of the lower zones. 2683 */ 2684 for (i = 0; i <= end_zone; i++) { 2685 struct zone *zone = pgdat->node_zones + i; 2686 int nr_slab; 2687 unsigned long balance_gap; 2688 2689 if (!populated_zone(zone)) 2690 continue; 2691 2692 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2693 continue; 2694 2695 sc.nr_scanned = 0; 2696 2697 nr_soft_scanned = 0; 2698 /* 2699 * Call soft limit reclaim before calling shrink_zone. 2700 */ 2701 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2702 order, sc.gfp_mask, 2703 &nr_soft_scanned); 2704 sc.nr_reclaimed += nr_soft_reclaimed; 2705 total_scanned += nr_soft_scanned; 2706 2707 /* 2708 * We put equal pressure on every zone, unless 2709 * one zone has way too many pages free 2710 * already. The "too many pages" is defined 2711 * as the high wmark plus a "gap" where the 2712 * gap is either the low watermark or 1% 2713 * of the zone, whichever is smaller. 2714 */ 2715 balance_gap = min(low_wmark_pages(zone), 2716 (zone->present_pages + 2717 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2718 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2719 if (!zone_watermark_ok_safe(zone, order, 2720 high_wmark_pages(zone) + balance_gap, 2721 end_zone, 0)) { 2722 shrink_zone(priority, zone, &sc); 2723 2724 reclaim_state->reclaimed_slab = 0; 2725 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2726 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2727 total_scanned += sc.nr_scanned; 2728 2729 if (nr_slab == 0 && !zone_reclaimable(zone)) 2730 zone->all_unreclaimable = 1; 2731 } 2732 2733 /* 2734 * If we've done a decent amount of scanning and 2735 * the reclaim ratio is low, start doing writepage 2736 * even in laptop mode 2737 */ 2738 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2739 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2740 sc.may_writepage = 1; 2741 2742 if (zone->all_unreclaimable) { 2743 if (end_zone && end_zone == i) 2744 end_zone--; 2745 continue; 2746 } 2747 2748 if (!zone_watermark_ok_safe(zone, order, 2749 high_wmark_pages(zone), end_zone, 0)) { 2750 all_zones_ok = 0; 2751 /* 2752 * We are still under min water mark. This 2753 * means that we have a GFP_ATOMIC allocation 2754 * failure risk. Hurry up! 2755 */ 2756 if (!zone_watermark_ok_safe(zone, order, 2757 min_wmark_pages(zone), end_zone, 0)) 2758 has_under_min_watermark_zone = 1; 2759 } else { 2760 /* 2761 * If a zone reaches its high watermark, 2762 * consider it to be no longer congested. It's 2763 * possible there are dirty pages backed by 2764 * congested BDIs but as pressure is relieved, 2765 * spectulatively avoid congestion waits 2766 */ 2767 zone_clear_flag(zone, ZONE_CONGESTED); 2768 if (i <= *classzone_idx) 2769 balanced += zone->present_pages; 2770 } 2771 2772 } 2773 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) 2774 break; /* kswapd: all done */ 2775 /* 2776 * OK, kswapd is getting into trouble. Take a nap, then take 2777 * another pass across the zones. 2778 */ 2779 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2780 if (has_under_min_watermark_zone) 2781 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2782 else 2783 congestion_wait(BLK_RW_ASYNC, HZ/10); 2784 } 2785 2786 /* 2787 * We do this so kswapd doesn't build up large priorities for 2788 * example when it is freeing in parallel with allocators. It 2789 * matches the direct reclaim path behaviour in terms of impact 2790 * on zone->*_priority. 2791 */ 2792 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2793 break; 2794 } 2795out: 2796 2797 /* 2798 * order-0: All zones must meet high watermark for a balanced node 2799 * high-order: Balanced zones must make up at least 25% of the node 2800 * for the node to be balanced 2801 */ 2802 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { 2803 cond_resched(); 2804 2805 try_to_freeze(); 2806 2807 /* 2808 * Fragmentation may mean that the system cannot be 2809 * rebalanced for high-order allocations in all zones. 2810 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2811 * it means the zones have been fully scanned and are still 2812 * not balanced. For high-order allocations, there is 2813 * little point trying all over again as kswapd may 2814 * infinite loop. 2815 * 2816 * Instead, recheck all watermarks at order-0 as they 2817 * are the most important. If watermarks are ok, kswapd will go 2818 * back to sleep. High-order users can still perform direct 2819 * reclaim if they wish. 2820 */ 2821 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2822 order = sc.order = 0; 2823 2824 goto loop_again; 2825 } 2826 2827 /* 2828 * If kswapd was reclaiming at a higher order, it has the option of 2829 * sleeping without all zones being balanced. Before it does, it must 2830 * ensure that the watermarks for order-0 on *all* zones are met and 2831 * that the congestion flags are cleared. The congestion flag must 2832 * be cleared as kswapd is the only mechanism that clears the flag 2833 * and it is potentially going to sleep here. 2834 */ 2835 if (order) { 2836 for (i = 0; i <= end_zone; i++) { 2837 struct zone *zone = pgdat->node_zones + i; 2838 2839 if (!populated_zone(zone)) 2840 continue; 2841 2842 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2843 continue; 2844 2845 /* Confirm the zone is balanced for order-0 */ 2846 if (!zone_watermark_ok(zone, 0, 2847 high_wmark_pages(zone), 0, 0)) { 2848 order = sc.order = 0; 2849 goto loop_again; 2850 } 2851 2852 /* If balanced, clear the congested flag */ 2853 zone_clear_flag(zone, ZONE_CONGESTED); 2854 if (i <= *classzone_idx) 2855 balanced += zone->present_pages; 2856 } 2857 } 2858 2859 /* 2860 * Return the order we were reclaiming at so sleeping_prematurely() 2861 * makes a decision on the order we were last reclaiming at. However, 2862 * if another caller entered the allocator slow path while kswapd 2863 * was awake, order will remain at the higher level 2864 */ 2865 *classzone_idx = end_zone; 2866 return order; 2867} 2868 2869static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) 2870{ 2871 long remaining = 0; 2872 DEFINE_WAIT(wait); 2873 2874 if (freezing(current) || kthread_should_stop()) 2875 return; 2876 2877 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2878 2879 /* Try to sleep for a short interval */ 2880 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2881 remaining = schedule_timeout(HZ/10); 2882 finish_wait(&pgdat->kswapd_wait, &wait); 2883 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2884 } 2885 2886 /* 2887 * After a short sleep, check if it was a premature sleep. If not, then 2888 * go fully to sleep until explicitly woken up. 2889 */ 2890 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2891 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 2892 2893 /* 2894 * vmstat counters are not perfectly accurate and the estimated 2895 * value for counters such as NR_FREE_PAGES can deviate from the 2896 * true value by nr_online_cpus * threshold. To avoid the zone 2897 * watermarks being breached while under pressure, we reduce the 2898 * per-cpu vmstat threshold while kswapd is awake and restore 2899 * them before going back to sleep. 2900 */ 2901 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 2902 schedule(); 2903 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 2904 } else { 2905 if (remaining) 2906 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2907 else 2908 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 2909 } 2910 finish_wait(&pgdat->kswapd_wait, &wait); 2911} 2912 2913/* 2914 * The background pageout daemon, started as a kernel thread 2915 * from the init process. 2916 * 2917 * This basically trickles out pages so that we have _some_ 2918 * free memory available even if there is no other activity 2919 * that frees anything up. This is needed for things like routing 2920 * etc, where we otherwise might have all activity going on in 2921 * asynchronous contexts that cannot page things out. 2922 * 2923 * If there are applications that are active memory-allocators 2924 * (most normal use), this basically shouldn't matter. 2925 */ 2926static int kswapd(void *p) 2927{ 2928 unsigned long order, new_order; 2929 unsigned balanced_order; 2930 int classzone_idx, new_classzone_idx; 2931 int balanced_classzone_idx; 2932 pg_data_t *pgdat = (pg_data_t*)p; 2933 struct task_struct *tsk = current; 2934 2935 struct reclaim_state reclaim_state = { 2936 .reclaimed_slab = 0, 2937 }; 2938 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2939 2940 lockdep_set_current_reclaim_state(GFP_KERNEL); 2941 2942 if (!cpumask_empty(cpumask)) 2943 set_cpus_allowed_ptr(tsk, cpumask); 2944 current->reclaim_state = &reclaim_state; 2945 2946 /* 2947 * Tell the memory management that we're a "memory allocator", 2948 * and that if we need more memory we should get access to it 2949 * regardless (see "__alloc_pages()"). "kswapd" should 2950 * never get caught in the normal page freeing logic. 2951 * 2952 * (Kswapd normally doesn't need memory anyway, but sometimes 2953 * you need a small amount of memory in order to be able to 2954 * page out something else, and this flag essentially protects 2955 * us from recursively trying to free more memory as we're 2956 * trying to free the first piece of memory in the first place). 2957 */ 2958 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2959 set_freezable(); 2960 2961 order = new_order = 0; 2962 balanced_order = 0; 2963 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; 2964 balanced_classzone_idx = classzone_idx; 2965 for ( ; ; ) { 2966 int ret; 2967 2968 /* 2969 * If the last balance_pgdat was unsuccessful it's unlikely a 2970 * new request of a similar or harder type will succeed soon 2971 * so consider going to sleep on the basis we reclaimed at 2972 */ 2973 if (balanced_classzone_idx >= new_classzone_idx && 2974 balanced_order == new_order) { 2975 new_order = pgdat->kswapd_max_order; 2976 new_classzone_idx = pgdat->classzone_idx; 2977 pgdat->kswapd_max_order = 0; 2978 pgdat->classzone_idx = pgdat->nr_zones - 1; 2979 } 2980 2981 if (order < new_order || classzone_idx > new_classzone_idx) { 2982 /* 2983 * Don't sleep if someone wants a larger 'order' 2984 * allocation or has tigher zone constraints 2985 */ 2986 order = new_order; 2987 classzone_idx = new_classzone_idx; 2988 } else { 2989 kswapd_try_to_sleep(pgdat, balanced_order, 2990 balanced_classzone_idx); 2991 order = pgdat->kswapd_max_order; 2992 classzone_idx = pgdat->classzone_idx; 2993 new_order = order; 2994 new_classzone_idx = classzone_idx; 2995 pgdat->kswapd_max_order = 0; 2996 pgdat->classzone_idx = pgdat->nr_zones - 1; 2997 } 2998 2999 ret = try_to_freeze(); 3000 if (kthread_should_stop()) 3001 break; 3002 3003 /* 3004 * We can speed up thawing tasks if we don't call balance_pgdat 3005 * after returning from the refrigerator 3006 */ 3007 if (!ret) { 3008 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); 3009 balanced_classzone_idx = classzone_idx; 3010 balanced_order = balance_pgdat(pgdat, order, 3011 &balanced_classzone_idx); 3012 } 3013 } 3014 return 0; 3015} 3016 3017/* 3018 * A zone is low on free memory, so wake its kswapd task to service it. 3019 */ 3020void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 3021{ 3022 pg_data_t *pgdat; 3023 3024 if (!populated_zone(zone)) 3025 return; 3026 3027 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 3028 return; 3029 pgdat = zone->zone_pgdat; 3030 if (pgdat->kswapd_max_order < order) { 3031 pgdat->kswapd_max_order = order; 3032 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); 3033 } 3034 if (!waitqueue_active(&pgdat->kswapd_wait)) 3035 return; 3036 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) 3037 return; 3038 3039 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); 3040 wake_up_interruptible(&pgdat->kswapd_wait); 3041} 3042 3043/* 3044 * The reclaimable count would be mostly accurate. 3045 * The less reclaimable pages may be 3046 * - mlocked pages, which will be moved to unevictable list when encountered 3047 * - mapped pages, which may require several travels to be reclaimed 3048 * - dirty pages, which is not "instantly" reclaimable 3049 */ 3050unsigned long global_reclaimable_pages(void) 3051{ 3052 int nr; 3053 3054 nr = global_page_state(NR_ACTIVE_FILE) + 3055 global_page_state(NR_INACTIVE_FILE); 3056 3057 if (nr_swap_pages > 0) 3058 nr += global_page_state(NR_ACTIVE_ANON) + 3059 global_page_state(NR_INACTIVE_ANON); 3060 3061 return nr; 3062} 3063 3064unsigned long zone_reclaimable_pages(struct zone *zone) 3065{ 3066 int nr; 3067 3068 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 3069 zone_page_state(zone, NR_INACTIVE_FILE); 3070 3071 if (nr_swap_pages > 0) 3072 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 3073 zone_page_state(zone, NR_INACTIVE_ANON); 3074 3075 return nr; 3076} 3077 3078#ifdef CONFIG_HIBERNATION 3079/* 3080 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3081 * freed pages. 3082 * 3083 * Rather than trying to age LRUs the aim is to preserve the overall 3084 * LRU order by reclaiming preferentially 3085 * inactive > active > active referenced > active mapped 3086 */ 3087unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 3088{ 3089 struct reclaim_state reclaim_state; 3090 struct scan_control sc = { 3091 .gfp_mask = GFP_HIGHUSER_MOVABLE, 3092 .may_swap = 1, 3093 .may_unmap = 1, 3094 .may_writepage = 1, 3095 .nr_to_reclaim = nr_to_reclaim, 3096 .hibernation_mode = 1, 3097 .order = 0, 3098 }; 3099 struct shrink_control shrink = { 3100 .gfp_mask = sc.gfp_mask, 3101 }; 3102 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 3103 struct task_struct *p = current; 3104 unsigned long nr_reclaimed; 3105 3106 p->flags |= PF_MEMALLOC; 3107 lockdep_set_current_reclaim_state(sc.gfp_mask); 3108 reclaim_state.reclaimed_slab = 0; 3109 p->reclaim_state = &reclaim_state; 3110 3111 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 3112 3113 p->reclaim_state = NULL; 3114 lockdep_clear_current_reclaim_state(); 3115 p->flags &= ~PF_MEMALLOC; 3116 3117 return nr_reclaimed; 3118} 3119#endif /* CONFIG_HIBERNATION */ 3120 3121/* It's optimal to keep kswapds on the same CPUs as their memory, but 3122 not required for correctness. So if the last cpu in a node goes 3123 away, we get changed to run anywhere: as the first one comes back, 3124 restore their cpu bindings. */ 3125static int __devinit cpu_callback(struct notifier_block *nfb, 3126 unsigned long action, void *hcpu) 3127{ 3128 int nid; 3129 3130 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 3131 for_each_node_state(nid, N_HIGH_MEMORY) { 3132 pg_data_t *pgdat = NODE_DATA(nid); 3133 const struct cpumask *mask; 3134 3135 mask = cpumask_of_node(pgdat->node_id); 3136 3137 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3138 /* One of our CPUs online: restore mask */ 3139 set_cpus_allowed_ptr(pgdat->kswapd, mask); 3140 } 3141 } 3142 return NOTIFY_OK; 3143} 3144 3145/* 3146 * This kswapd start function will be called by init and node-hot-add. 3147 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 3148 */ 3149int kswapd_run(int nid) 3150{ 3151 pg_data_t *pgdat = NODE_DATA(nid); 3152 int ret = 0; 3153 3154 if (pgdat->kswapd) 3155 return 0; 3156 3157 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 3158 if (IS_ERR(pgdat->kswapd)) { 3159 /* failure at boot is fatal */ 3160 BUG_ON(system_state == SYSTEM_BOOTING); 3161 printk("Failed to start kswapd on node %d\n",nid); 3162 ret = -1; 3163 } 3164 return ret; 3165} 3166 3167/* 3168 * Called by memory hotplug when all memory in a node is offlined. 3169 */ 3170void kswapd_stop(int nid) 3171{ 3172 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 3173 3174 if (kswapd) 3175 kthread_stop(kswapd); 3176} 3177 3178static int __init kswapd_init(void) 3179{ 3180 int nid; 3181 3182 swap_setup(); 3183 for_each_node_state(nid, N_HIGH_MEMORY) 3184 kswapd_run(nid); 3185 hotcpu_notifier(cpu_callback, 0); 3186 return 0; 3187} 3188 3189module_init(kswapd_init) 3190 3191#ifdef CONFIG_NUMA 3192/* 3193 * Zone reclaim mode 3194 * 3195 * If non-zero call zone_reclaim when the number of free pages falls below 3196 * the watermarks. 3197 */ 3198int zone_reclaim_mode __read_mostly; 3199 3200#define RECLAIM_OFF 0 3201#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 3202#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 3203#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 3204 3205/* 3206 * Priority for ZONE_RECLAIM. This determines the fraction of pages 3207 * of a node considered for each zone_reclaim. 4 scans 1/16th of 3208 * a zone. 3209 */ 3210#define ZONE_RECLAIM_PRIORITY 4 3211 3212/* 3213 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 3214 * occur. 3215 */ 3216int sysctl_min_unmapped_ratio = 1; 3217 3218/* 3219 * If the number of slab pages in a zone grows beyond this percentage then 3220 * slab reclaim needs to occur. 3221 */ 3222int sysctl_min_slab_ratio = 5; 3223 3224static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 3225{ 3226 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 3227 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 3228 zone_page_state(zone, NR_ACTIVE_FILE); 3229 3230 /* 3231 * It's possible for there to be more file mapped pages than 3232 * accounted for by the pages on the file LRU lists because 3233 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 3234 */ 3235 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 3236} 3237 3238/* Work out how many page cache pages we can reclaim in this reclaim_mode */ 3239static long zone_pagecache_reclaimable(struct zone *zone) 3240{ 3241 long nr_pagecache_reclaimable; 3242 long delta = 0; 3243 3244 /* 3245 * If RECLAIM_SWAP is set, then all file pages are considered 3246 * potentially reclaimable. Otherwise, we have to worry about 3247 * pages like swapcache and zone_unmapped_file_pages() provides 3248 * a better estimate 3249 */ 3250 if (zone_reclaim_mode & RECLAIM_SWAP) 3251 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 3252 else 3253 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 3254 3255 /* If we can't clean pages, remove dirty pages from consideration */ 3256 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 3257 delta += zone_page_state(zone, NR_FILE_DIRTY); 3258 3259 /* Watch for any possible underflows due to delta */ 3260 if (unlikely(delta > nr_pagecache_reclaimable)) 3261 delta = nr_pagecache_reclaimable; 3262 3263 return nr_pagecache_reclaimable - delta; 3264} 3265 3266/* 3267 * Try to free up some pages from this zone through reclaim. 3268 */ 3269static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3270{ 3271 /* Minimum pages needed in order to stay on node */ 3272 const unsigned long nr_pages = 1 << order; 3273 struct task_struct *p = current; 3274 struct reclaim_state reclaim_state; 3275 int priority; 3276 struct scan_control sc = { 3277 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3278 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3279 .may_swap = 1, 3280 .nr_to_reclaim = max_t(unsigned long, nr_pages, 3281 SWAP_CLUSTER_MAX), 3282 .gfp_mask = gfp_mask, 3283 .order = order, 3284 }; 3285 struct shrink_control shrink = { 3286 .gfp_mask = sc.gfp_mask, 3287 }; 3288 unsigned long nr_slab_pages0, nr_slab_pages1; 3289 3290 cond_resched(); 3291 /* 3292 * We need to be able to allocate from the reserves for RECLAIM_SWAP 3293 * and we also need to be able to write out pages for RECLAIM_WRITE 3294 * and RECLAIM_SWAP. 3295 */ 3296 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 3297 lockdep_set_current_reclaim_state(gfp_mask); 3298 reclaim_state.reclaimed_slab = 0; 3299 p->reclaim_state = &reclaim_state; 3300 3301 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 3302 /* 3303 * Free memory by calling shrink zone with increasing 3304 * priorities until we have enough memory freed. 3305 */ 3306 priority = ZONE_RECLAIM_PRIORITY; 3307 do { 3308 shrink_zone(priority, zone, &sc); 3309 priority--; 3310 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 3311 } 3312 3313 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3314 if (nr_slab_pages0 > zone->min_slab_pages) { 3315 /* 3316 * shrink_slab() does not currently allow us to determine how 3317 * many pages were freed in this zone. So we take the current 3318 * number of slab pages and shake the slab until it is reduced 3319 * by the same nr_pages that we used for reclaiming unmapped 3320 * pages. 3321 * 3322 * Note that shrink_slab will free memory on all zones and may 3323 * take a long time. 3324 */ 3325 for (;;) { 3326 unsigned long lru_pages = zone_reclaimable_pages(zone); 3327 3328 /* No reclaimable slab or very low memory pressure */ 3329 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) 3330 break; 3331 3332 /* Freed enough memory */ 3333 nr_slab_pages1 = zone_page_state(zone, 3334 NR_SLAB_RECLAIMABLE); 3335 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3336 break; 3337 } 3338 3339 /* 3340 * Update nr_reclaimed by the number of slab pages we 3341 * reclaimed from this zone. 3342 */ 3343 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3344 if (nr_slab_pages1 < nr_slab_pages0) 3345 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; 3346 } 3347 3348 p->reclaim_state = NULL; 3349 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 3350 lockdep_clear_current_reclaim_state(); 3351 return sc.nr_reclaimed >= nr_pages; 3352} 3353 3354int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3355{ 3356 int node_id; 3357 int ret; 3358 3359 /* 3360 * Zone reclaim reclaims unmapped file backed pages and 3361 * slab pages if we are over the defined limits. 3362 * 3363 * A small portion of unmapped file backed pages is needed for 3364 * file I/O otherwise pages read by file I/O will be immediately 3365 * thrown out if the zone is overallocated. So we do not reclaim 3366 * if less than a specified percentage of the zone is used by 3367 * unmapped file backed pages. 3368 */ 3369 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 3370 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 3371 return ZONE_RECLAIM_FULL; 3372 3373 if (zone->all_unreclaimable) 3374 return ZONE_RECLAIM_FULL; 3375 3376 /* 3377 * Do not scan if the allocation should not be delayed. 3378 */ 3379 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 3380 return ZONE_RECLAIM_NOSCAN; 3381 3382 /* 3383 * Only run zone reclaim on the local zone or on zones that do not 3384 * have associated processors. This will favor the local processor 3385 * over remote processors and spread off node memory allocations 3386 * as wide as possible. 3387 */ 3388 node_id = zone_to_nid(zone); 3389 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3390 return ZONE_RECLAIM_NOSCAN; 3391 3392 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3393 return ZONE_RECLAIM_NOSCAN; 3394 3395 ret = __zone_reclaim(zone, gfp_mask, order); 3396 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3397 3398 if (!ret) 3399 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3400 3401 return ret; 3402} 3403#endif 3404 3405/* 3406 * page_evictable - test whether a page is evictable 3407 * @page: the page to test 3408 * @vma: the VMA in which the page is or will be mapped, may be NULL 3409 * 3410 * Test whether page is evictable--i.e., should be placed on active/inactive 3411 * lists vs unevictable list. The vma argument is !NULL when called from the 3412 * fault path to determine how to instantate a new page. 3413 * 3414 * Reasons page might not be evictable: 3415 * (1) page's mapping marked unevictable 3416 * (2) page is part of an mlocked VMA 3417 * 3418 */ 3419int page_evictable(struct page *page, struct vm_area_struct *vma) 3420{ 3421 3422 if (mapping_unevictable(page_mapping(page))) 3423 return 0; 3424 3425 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 3426 return 0; 3427 3428 return 1; 3429} 3430 3431/** 3432 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 3433 * @page: page to check evictability and move to appropriate lru list 3434 * @zone: zone page is in 3435 * 3436 * Checks a page for evictability and moves the page to the appropriate 3437 * zone lru list. 3438 * 3439 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 3440 * have PageUnevictable set. 3441 */ 3442static void check_move_unevictable_page(struct page *page, struct zone *zone) 3443{ 3444 struct lruvec *lruvec; 3445 3446 VM_BUG_ON(PageActive(page)); 3447retry: 3448 ClearPageUnevictable(page); 3449 if (page_evictable(page, NULL)) { 3450 enum lru_list l = page_lru_base_type(page); 3451 3452 __dec_zone_state(zone, NR_UNEVICTABLE); 3453 lruvec = mem_cgroup_lru_move_lists(zone, page, 3454 LRU_UNEVICTABLE, l); 3455 list_move(&page->lru, &lruvec->lists[l]); 3456 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 3457 __count_vm_event(UNEVICTABLE_PGRESCUED); 3458 } else { 3459 /* 3460 * rotate unevictable list 3461 */ 3462 SetPageUnevictable(page); 3463 lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE, 3464 LRU_UNEVICTABLE); 3465 list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]); 3466 if (page_evictable(page, NULL)) 3467 goto retry; 3468 } 3469} 3470 3471/** 3472 * scan_mapping_unevictable_pages - scan an address space for evictable pages 3473 * @mapping: struct address_space to scan for evictable pages 3474 * 3475 * Scan all pages in mapping. Check unevictable pages for 3476 * evictability and move them to the appropriate zone lru list. 3477 */ 3478void scan_mapping_unevictable_pages(struct address_space *mapping) 3479{ 3480 pgoff_t next = 0; 3481 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> 3482 PAGE_CACHE_SHIFT; 3483 struct zone *zone; 3484 struct pagevec pvec; 3485 3486 if (mapping->nrpages == 0) 3487 return; 3488 3489 pagevec_init(&pvec, 0); 3490 while (next < end && 3491 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 3492 int i; 3493 int pg_scanned = 0; 3494 3495 zone = NULL; 3496 3497 for (i = 0; i < pagevec_count(&pvec); i++) { 3498 struct page *page = pvec.pages[i]; 3499 pgoff_t page_index = page->index; 3500 struct zone *pagezone = page_zone(page); 3501 3502 pg_scanned++; 3503 if (page_index > next) 3504 next = page_index; 3505 next++; 3506 3507 if (pagezone != zone) { 3508 if (zone) 3509 spin_unlock_irq(&zone->lru_lock); 3510 zone = pagezone; 3511 spin_lock_irq(&zone->lru_lock); 3512 } 3513 3514 if (PageLRU(page) && PageUnevictable(page)) 3515 check_move_unevictable_page(page, zone); 3516 } 3517 if (zone) 3518 spin_unlock_irq(&zone->lru_lock); 3519 pagevec_release(&pvec); 3520 3521 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); 3522 } 3523 3524} 3525 3526static void warn_scan_unevictable_pages(void) 3527{ 3528 printk_once(KERN_WARNING 3529 "%s: The scan_unevictable_pages sysctl/node-interface has been " 3530 "disabled for lack of a legitimate use case. If you have " 3531 "one, please send an email to linux-mm@kvack.org.\n", 3532 current->comm); 3533} 3534 3535/* 3536 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 3537 * all nodes' unevictable lists for evictable pages 3538 */ 3539unsigned long scan_unevictable_pages; 3540 3541int scan_unevictable_handler(struct ctl_table *table, int write, 3542 void __user *buffer, 3543 size_t *length, loff_t *ppos) 3544{ 3545 warn_scan_unevictable_pages(); 3546 proc_doulongvec_minmax(table, write, buffer, length, ppos); 3547 scan_unevictable_pages = 0; 3548 return 0; 3549} 3550 3551#ifdef CONFIG_NUMA 3552/* 3553 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 3554 * a specified node's per zone unevictable lists for evictable pages. 3555 */ 3556 3557static ssize_t read_scan_unevictable_node(struct device *dev, 3558 struct device_attribute *attr, 3559 char *buf) 3560{ 3561 warn_scan_unevictable_pages(); 3562 return sprintf(buf, "0\n"); /* always zero; should fit... */ 3563} 3564 3565static ssize_t write_scan_unevictable_node(struct device *dev, 3566 struct device_attribute *attr, 3567 const char *buf, size_t count) 3568{ 3569 warn_scan_unevictable_pages(); 3570 return 1; 3571} 3572 3573 3574static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 3575 read_scan_unevictable_node, 3576 write_scan_unevictable_node); 3577 3578int scan_unevictable_register_node(struct node *node) 3579{ 3580 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages); 3581} 3582 3583void scan_unevictable_unregister_node(struct node *node) 3584{ 3585 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages); 3586} 3587#endif 3588