vmscan.c revision d149e3b25d7c5f33de9aa866303926fa53535aa7
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14#include <linux/mm.h> 15#include <linux/module.h> 16#include <linux/gfp.h> 17#include <linux/kernel_stat.h> 18#include <linux/swap.h> 19#include <linux/pagemap.h> 20#include <linux/init.h> 21#include <linux/highmem.h> 22#include <linux/vmstat.h> 23#include <linux/file.h> 24#include <linux/writeback.h> 25#include <linux/blkdev.h> 26#include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28#include <linux/mm_inline.h> 29#include <linux/pagevec.h> 30#include <linux/backing-dev.h> 31#include <linux/rmap.h> 32#include <linux/topology.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/compaction.h> 36#include <linux/notifier.h> 37#include <linux/rwsem.h> 38#include <linux/delay.h> 39#include <linux/kthread.h> 40#include <linux/freezer.h> 41#include <linux/memcontrol.h> 42#include <linux/delayacct.h> 43#include <linux/sysctl.h> 44#include <linux/oom.h> 45#include <linux/prefetch.h> 46 47#include <asm/tlbflush.h> 48#include <asm/div64.h> 49 50#include <linux/swapops.h> 51 52#include "internal.h" 53 54#define CREATE_TRACE_POINTS 55#include <trace/events/vmscan.h> 56 57/* 58 * reclaim_mode determines how the inactive list is shrunk 59 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages 60 * RECLAIM_MODE_ASYNC: Do not block 61 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback 62 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference 63 * page from the LRU and reclaim all pages within a 64 * naturally aligned range 65 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of 66 * order-0 pages and then compact the zone 67 */ 68typedef unsigned __bitwise__ reclaim_mode_t; 69#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u) 70#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u) 71#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u) 72#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u) 73#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u) 74 75struct scan_control { 76 /* Incremented by the number of inactive pages that were scanned */ 77 unsigned long nr_scanned; 78 79 /* Number of pages freed so far during a call to shrink_zones() */ 80 unsigned long nr_reclaimed; 81 82 /* How many pages shrink_list() should reclaim */ 83 unsigned long nr_to_reclaim; 84 85 unsigned long hibernation_mode; 86 87 /* This context's GFP mask */ 88 gfp_t gfp_mask; 89 90 int may_writepage; 91 92 /* Can mapped pages be reclaimed? */ 93 int may_unmap; 94 95 /* Can pages be swapped as part of reclaim? */ 96 int may_swap; 97 98 int swappiness; 99 100 int order; 101 102 /* 103 * Intend to reclaim enough continuous memory rather than reclaim 104 * enough amount of memory. i.e, mode for high order allocation. 105 */ 106 reclaim_mode_t reclaim_mode; 107 108 /* Which cgroup do we reclaim from */ 109 struct mem_cgroup *mem_cgroup; 110 111 /* 112 * Nodemask of nodes allowed by the caller. If NULL, all nodes 113 * are scanned. 114 */ 115 nodemask_t *nodemask; 116}; 117 118#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 119 120#ifdef ARCH_HAS_PREFETCH 121#define prefetch_prev_lru_page(_page, _base, _field) \ 122 do { \ 123 if ((_page)->lru.prev != _base) { \ 124 struct page *prev; \ 125 \ 126 prev = lru_to_page(&(_page->lru)); \ 127 prefetch(&prev->_field); \ 128 } \ 129 } while (0) 130#else 131#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 132#endif 133 134#ifdef ARCH_HAS_PREFETCHW 135#define prefetchw_prev_lru_page(_page, _base, _field) \ 136 do { \ 137 if ((_page)->lru.prev != _base) { \ 138 struct page *prev; \ 139 \ 140 prev = lru_to_page(&(_page->lru)); \ 141 prefetchw(&prev->_field); \ 142 } \ 143 } while (0) 144#else 145#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 146#endif 147 148/* 149 * From 0 .. 100. Higher means more swappy. 150 */ 151int vm_swappiness = 60; 152long vm_total_pages; /* The total number of pages which the VM controls */ 153 154static LIST_HEAD(shrinker_list); 155static DECLARE_RWSEM(shrinker_rwsem); 156 157#ifdef CONFIG_CGROUP_MEM_RES_CTLR 158#define scanning_global_lru(sc) (!(sc)->mem_cgroup) 159#else 160#define scanning_global_lru(sc) (1) 161#endif 162 163static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, 164 struct scan_control *sc) 165{ 166 if (!scanning_global_lru(sc)) 167 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); 168 169 return &zone->reclaim_stat; 170} 171 172static unsigned long zone_nr_lru_pages(struct zone *zone, 173 struct scan_control *sc, enum lru_list lru) 174{ 175 if (!scanning_global_lru(sc)) 176 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); 177 178 return zone_page_state(zone, NR_LRU_BASE + lru); 179} 180 181 182/* 183 * Add a shrinker callback to be called from the vm 184 */ 185void register_shrinker(struct shrinker *shrinker) 186{ 187 shrinker->nr = 0; 188 down_write(&shrinker_rwsem); 189 list_add_tail(&shrinker->list, &shrinker_list); 190 up_write(&shrinker_rwsem); 191} 192EXPORT_SYMBOL(register_shrinker); 193 194/* 195 * Remove one 196 */ 197void unregister_shrinker(struct shrinker *shrinker) 198{ 199 down_write(&shrinker_rwsem); 200 list_del(&shrinker->list); 201 up_write(&shrinker_rwsem); 202} 203EXPORT_SYMBOL(unregister_shrinker); 204 205static inline int do_shrinker_shrink(struct shrinker *shrinker, 206 struct shrink_control *sc, 207 unsigned long nr_to_scan) 208{ 209 sc->nr_to_scan = nr_to_scan; 210 return (*shrinker->shrink)(shrinker, sc); 211} 212 213#define SHRINK_BATCH 128 214/* 215 * Call the shrink functions to age shrinkable caches 216 * 217 * Here we assume it costs one seek to replace a lru page and that it also 218 * takes a seek to recreate a cache object. With this in mind we age equal 219 * percentages of the lru and ageable caches. This should balance the seeks 220 * generated by these structures. 221 * 222 * If the vm encountered mapped pages on the LRU it increase the pressure on 223 * slab to avoid swapping. 224 * 225 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 226 * 227 * `lru_pages' represents the number of on-LRU pages in all the zones which 228 * are eligible for the caller's allocation attempt. It is used for balancing 229 * slab reclaim versus page reclaim. 230 * 231 * Returns the number of slab objects which we shrunk. 232 */ 233unsigned long shrink_slab(struct shrink_control *shrink, 234 unsigned long nr_pages_scanned, 235 unsigned long lru_pages) 236{ 237 struct shrinker *shrinker; 238 unsigned long ret = 0; 239 240 if (nr_pages_scanned == 0) 241 nr_pages_scanned = SWAP_CLUSTER_MAX; 242 243 if (!down_read_trylock(&shrinker_rwsem)) { 244 /* Assume we'll be able to shrink next time */ 245 ret = 1; 246 goto out; 247 } 248 249 list_for_each_entry(shrinker, &shrinker_list, list) { 250 unsigned long long delta; 251 unsigned long total_scan; 252 unsigned long max_pass; 253 254 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 255 delta = (4 * nr_pages_scanned) / shrinker->seeks; 256 delta *= max_pass; 257 do_div(delta, lru_pages + 1); 258 shrinker->nr += delta; 259 if (shrinker->nr < 0) { 260 printk(KERN_ERR "shrink_slab: %pF negative objects to " 261 "delete nr=%ld\n", 262 shrinker->shrink, shrinker->nr); 263 shrinker->nr = max_pass; 264 } 265 266 /* 267 * Avoid risking looping forever due to too large nr value: 268 * never try to free more than twice the estimate number of 269 * freeable entries. 270 */ 271 if (shrinker->nr > max_pass * 2) 272 shrinker->nr = max_pass * 2; 273 274 total_scan = shrinker->nr; 275 shrinker->nr = 0; 276 277 while (total_scan >= SHRINK_BATCH) { 278 long this_scan = SHRINK_BATCH; 279 int shrink_ret; 280 int nr_before; 281 282 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 283 shrink_ret = do_shrinker_shrink(shrinker, shrink, 284 this_scan); 285 if (shrink_ret == -1) 286 break; 287 if (shrink_ret < nr_before) 288 ret += nr_before - shrink_ret; 289 count_vm_events(SLABS_SCANNED, this_scan); 290 total_scan -= this_scan; 291 292 cond_resched(); 293 } 294 295 shrinker->nr += total_scan; 296 } 297 up_read(&shrinker_rwsem); 298out: 299 cond_resched(); 300 return ret; 301} 302 303static void set_reclaim_mode(int priority, struct scan_control *sc, 304 bool sync) 305{ 306 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC; 307 308 /* 309 * Initially assume we are entering either lumpy reclaim or 310 * reclaim/compaction.Depending on the order, we will either set the 311 * sync mode or just reclaim order-0 pages later. 312 */ 313 if (COMPACTION_BUILD) 314 sc->reclaim_mode = RECLAIM_MODE_COMPACTION; 315 else 316 sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM; 317 318 /* 319 * Avoid using lumpy reclaim or reclaim/compaction if possible by 320 * restricting when its set to either costly allocations or when 321 * under memory pressure 322 */ 323 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 324 sc->reclaim_mode |= syncmode; 325 else if (sc->order && priority < DEF_PRIORITY - 2) 326 sc->reclaim_mode |= syncmode; 327 else 328 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 329} 330 331static void reset_reclaim_mode(struct scan_control *sc) 332{ 333 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC; 334} 335 336static inline int is_page_cache_freeable(struct page *page) 337{ 338 /* 339 * A freeable page cache page is referenced only by the caller 340 * that isolated the page, the page cache radix tree and 341 * optional buffer heads at page->private. 342 */ 343 return page_count(page) - page_has_private(page) == 2; 344} 345 346static int may_write_to_queue(struct backing_dev_info *bdi, 347 struct scan_control *sc) 348{ 349 if (current->flags & PF_SWAPWRITE) 350 return 1; 351 if (!bdi_write_congested(bdi)) 352 return 1; 353 if (bdi == current->backing_dev_info) 354 return 1; 355 356 /* lumpy reclaim for hugepage often need a lot of write */ 357 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 358 return 1; 359 return 0; 360} 361 362/* 363 * We detected a synchronous write error writing a page out. Probably 364 * -ENOSPC. We need to propagate that into the address_space for a subsequent 365 * fsync(), msync() or close(). 366 * 367 * The tricky part is that after writepage we cannot touch the mapping: nothing 368 * prevents it from being freed up. But we have a ref on the page and once 369 * that page is locked, the mapping is pinned. 370 * 371 * We're allowed to run sleeping lock_page() here because we know the caller has 372 * __GFP_FS. 373 */ 374static void handle_write_error(struct address_space *mapping, 375 struct page *page, int error) 376{ 377 lock_page(page); 378 if (page_mapping(page) == mapping) 379 mapping_set_error(mapping, error); 380 unlock_page(page); 381} 382 383/* possible outcome of pageout() */ 384typedef enum { 385 /* failed to write page out, page is locked */ 386 PAGE_KEEP, 387 /* move page to the active list, page is locked */ 388 PAGE_ACTIVATE, 389 /* page has been sent to the disk successfully, page is unlocked */ 390 PAGE_SUCCESS, 391 /* page is clean and locked */ 392 PAGE_CLEAN, 393} pageout_t; 394 395/* 396 * pageout is called by shrink_page_list() for each dirty page. 397 * Calls ->writepage(). 398 */ 399static pageout_t pageout(struct page *page, struct address_space *mapping, 400 struct scan_control *sc) 401{ 402 /* 403 * If the page is dirty, only perform writeback if that write 404 * will be non-blocking. To prevent this allocation from being 405 * stalled by pagecache activity. But note that there may be 406 * stalls if we need to run get_block(). We could test 407 * PagePrivate for that. 408 * 409 * If this process is currently in __generic_file_aio_write() against 410 * this page's queue, we can perform writeback even if that 411 * will block. 412 * 413 * If the page is swapcache, write it back even if that would 414 * block, for some throttling. This happens by accident, because 415 * swap_backing_dev_info is bust: it doesn't reflect the 416 * congestion state of the swapdevs. Easy to fix, if needed. 417 */ 418 if (!is_page_cache_freeable(page)) 419 return PAGE_KEEP; 420 if (!mapping) { 421 /* 422 * Some data journaling orphaned pages can have 423 * page->mapping == NULL while being dirty with clean buffers. 424 */ 425 if (page_has_private(page)) { 426 if (try_to_free_buffers(page)) { 427 ClearPageDirty(page); 428 printk("%s: orphaned page\n", __func__); 429 return PAGE_CLEAN; 430 } 431 } 432 return PAGE_KEEP; 433 } 434 if (mapping->a_ops->writepage == NULL) 435 return PAGE_ACTIVATE; 436 if (!may_write_to_queue(mapping->backing_dev_info, sc)) 437 return PAGE_KEEP; 438 439 if (clear_page_dirty_for_io(page)) { 440 int res; 441 struct writeback_control wbc = { 442 .sync_mode = WB_SYNC_NONE, 443 .nr_to_write = SWAP_CLUSTER_MAX, 444 .range_start = 0, 445 .range_end = LLONG_MAX, 446 .for_reclaim = 1, 447 }; 448 449 SetPageReclaim(page); 450 res = mapping->a_ops->writepage(page, &wbc); 451 if (res < 0) 452 handle_write_error(mapping, page, res); 453 if (res == AOP_WRITEPAGE_ACTIVATE) { 454 ClearPageReclaim(page); 455 return PAGE_ACTIVATE; 456 } 457 458 /* 459 * Wait on writeback if requested to. This happens when 460 * direct reclaiming a large contiguous area and the 461 * first attempt to free a range of pages fails. 462 */ 463 if (PageWriteback(page) && 464 (sc->reclaim_mode & RECLAIM_MODE_SYNC)) 465 wait_on_page_writeback(page); 466 467 if (!PageWriteback(page)) { 468 /* synchronous write or broken a_ops? */ 469 ClearPageReclaim(page); 470 } 471 trace_mm_vmscan_writepage(page, 472 trace_reclaim_flags(page, sc->reclaim_mode)); 473 inc_zone_page_state(page, NR_VMSCAN_WRITE); 474 return PAGE_SUCCESS; 475 } 476 477 return PAGE_CLEAN; 478} 479 480/* 481 * Same as remove_mapping, but if the page is removed from the mapping, it 482 * gets returned with a refcount of 0. 483 */ 484static int __remove_mapping(struct address_space *mapping, struct page *page) 485{ 486 BUG_ON(!PageLocked(page)); 487 BUG_ON(mapping != page_mapping(page)); 488 489 spin_lock_irq(&mapping->tree_lock); 490 /* 491 * The non racy check for a busy page. 492 * 493 * Must be careful with the order of the tests. When someone has 494 * a ref to the page, it may be possible that they dirty it then 495 * drop the reference. So if PageDirty is tested before page_count 496 * here, then the following race may occur: 497 * 498 * get_user_pages(&page); 499 * [user mapping goes away] 500 * write_to(page); 501 * !PageDirty(page) [good] 502 * SetPageDirty(page); 503 * put_page(page); 504 * !page_count(page) [good, discard it] 505 * 506 * [oops, our write_to data is lost] 507 * 508 * Reversing the order of the tests ensures such a situation cannot 509 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 510 * load is not satisfied before that of page->_count. 511 * 512 * Note that if SetPageDirty is always performed via set_page_dirty, 513 * and thus under tree_lock, then this ordering is not required. 514 */ 515 if (!page_freeze_refs(page, 2)) 516 goto cannot_free; 517 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 518 if (unlikely(PageDirty(page))) { 519 page_unfreeze_refs(page, 2); 520 goto cannot_free; 521 } 522 523 if (PageSwapCache(page)) { 524 swp_entry_t swap = { .val = page_private(page) }; 525 __delete_from_swap_cache(page); 526 spin_unlock_irq(&mapping->tree_lock); 527 swapcache_free(swap, page); 528 } else { 529 void (*freepage)(struct page *); 530 531 freepage = mapping->a_ops->freepage; 532 533 __delete_from_page_cache(page); 534 spin_unlock_irq(&mapping->tree_lock); 535 mem_cgroup_uncharge_cache_page(page); 536 537 if (freepage != NULL) 538 freepage(page); 539 } 540 541 return 1; 542 543cannot_free: 544 spin_unlock_irq(&mapping->tree_lock); 545 return 0; 546} 547 548/* 549 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 550 * someone else has a ref on the page, abort and return 0. If it was 551 * successfully detached, return 1. Assumes the caller has a single ref on 552 * this page. 553 */ 554int remove_mapping(struct address_space *mapping, struct page *page) 555{ 556 if (__remove_mapping(mapping, page)) { 557 /* 558 * Unfreezing the refcount with 1 rather than 2 effectively 559 * drops the pagecache ref for us without requiring another 560 * atomic operation. 561 */ 562 page_unfreeze_refs(page, 1); 563 return 1; 564 } 565 return 0; 566} 567 568/** 569 * putback_lru_page - put previously isolated page onto appropriate LRU list 570 * @page: page to be put back to appropriate lru list 571 * 572 * Add previously isolated @page to appropriate LRU list. 573 * Page may still be unevictable for other reasons. 574 * 575 * lru_lock must not be held, interrupts must be enabled. 576 */ 577void putback_lru_page(struct page *page) 578{ 579 int lru; 580 int active = !!TestClearPageActive(page); 581 int was_unevictable = PageUnevictable(page); 582 583 VM_BUG_ON(PageLRU(page)); 584 585redo: 586 ClearPageUnevictable(page); 587 588 if (page_evictable(page, NULL)) { 589 /* 590 * For evictable pages, we can use the cache. 591 * In event of a race, worst case is we end up with an 592 * unevictable page on [in]active list. 593 * We know how to handle that. 594 */ 595 lru = active + page_lru_base_type(page); 596 lru_cache_add_lru(page, lru); 597 } else { 598 /* 599 * Put unevictable pages directly on zone's unevictable 600 * list. 601 */ 602 lru = LRU_UNEVICTABLE; 603 add_page_to_unevictable_list(page); 604 /* 605 * When racing with an mlock clearing (page is 606 * unlocked), make sure that if the other thread does 607 * not observe our setting of PG_lru and fails 608 * isolation, we see PG_mlocked cleared below and move 609 * the page back to the evictable list. 610 * 611 * The other side is TestClearPageMlocked(). 612 */ 613 smp_mb(); 614 } 615 616 /* 617 * page's status can change while we move it among lru. If an evictable 618 * page is on unevictable list, it never be freed. To avoid that, 619 * check after we added it to the list, again. 620 */ 621 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 622 if (!isolate_lru_page(page)) { 623 put_page(page); 624 goto redo; 625 } 626 /* This means someone else dropped this page from LRU 627 * So, it will be freed or putback to LRU again. There is 628 * nothing to do here. 629 */ 630 } 631 632 if (was_unevictable && lru != LRU_UNEVICTABLE) 633 count_vm_event(UNEVICTABLE_PGRESCUED); 634 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 635 count_vm_event(UNEVICTABLE_PGCULLED); 636 637 put_page(page); /* drop ref from isolate */ 638} 639 640enum page_references { 641 PAGEREF_RECLAIM, 642 PAGEREF_RECLAIM_CLEAN, 643 PAGEREF_KEEP, 644 PAGEREF_ACTIVATE, 645}; 646 647static enum page_references page_check_references(struct page *page, 648 struct scan_control *sc) 649{ 650 int referenced_ptes, referenced_page; 651 unsigned long vm_flags; 652 653 referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags); 654 referenced_page = TestClearPageReferenced(page); 655 656 /* Lumpy reclaim - ignore references */ 657 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM) 658 return PAGEREF_RECLAIM; 659 660 /* 661 * Mlock lost the isolation race with us. Let try_to_unmap() 662 * move the page to the unevictable list. 663 */ 664 if (vm_flags & VM_LOCKED) 665 return PAGEREF_RECLAIM; 666 667 if (referenced_ptes) { 668 if (PageAnon(page)) 669 return PAGEREF_ACTIVATE; 670 /* 671 * All mapped pages start out with page table 672 * references from the instantiating fault, so we need 673 * to look twice if a mapped file page is used more 674 * than once. 675 * 676 * Mark it and spare it for another trip around the 677 * inactive list. Another page table reference will 678 * lead to its activation. 679 * 680 * Note: the mark is set for activated pages as well 681 * so that recently deactivated but used pages are 682 * quickly recovered. 683 */ 684 SetPageReferenced(page); 685 686 if (referenced_page) 687 return PAGEREF_ACTIVATE; 688 689 return PAGEREF_KEEP; 690 } 691 692 /* Reclaim if clean, defer dirty pages to writeback */ 693 if (referenced_page && !PageSwapBacked(page)) 694 return PAGEREF_RECLAIM_CLEAN; 695 696 return PAGEREF_RECLAIM; 697} 698 699static noinline_for_stack void free_page_list(struct list_head *free_pages) 700{ 701 struct pagevec freed_pvec; 702 struct page *page, *tmp; 703 704 pagevec_init(&freed_pvec, 1); 705 706 list_for_each_entry_safe(page, tmp, free_pages, lru) { 707 list_del(&page->lru); 708 if (!pagevec_add(&freed_pvec, page)) { 709 __pagevec_free(&freed_pvec); 710 pagevec_reinit(&freed_pvec); 711 } 712 } 713 714 pagevec_free(&freed_pvec); 715} 716 717/* 718 * shrink_page_list() returns the number of reclaimed pages 719 */ 720static unsigned long shrink_page_list(struct list_head *page_list, 721 struct zone *zone, 722 struct scan_control *sc) 723{ 724 LIST_HEAD(ret_pages); 725 LIST_HEAD(free_pages); 726 int pgactivate = 0; 727 unsigned long nr_dirty = 0; 728 unsigned long nr_congested = 0; 729 unsigned long nr_reclaimed = 0; 730 731 cond_resched(); 732 733 while (!list_empty(page_list)) { 734 enum page_references references; 735 struct address_space *mapping; 736 struct page *page; 737 int may_enter_fs; 738 739 cond_resched(); 740 741 page = lru_to_page(page_list); 742 list_del(&page->lru); 743 744 if (!trylock_page(page)) 745 goto keep; 746 747 VM_BUG_ON(PageActive(page)); 748 VM_BUG_ON(page_zone(page) != zone); 749 750 sc->nr_scanned++; 751 752 if (unlikely(!page_evictable(page, NULL))) 753 goto cull_mlocked; 754 755 if (!sc->may_unmap && page_mapped(page)) 756 goto keep_locked; 757 758 /* Double the slab pressure for mapped and swapcache pages */ 759 if (page_mapped(page) || PageSwapCache(page)) 760 sc->nr_scanned++; 761 762 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 763 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 764 765 if (PageWriteback(page)) { 766 /* 767 * Synchronous reclaim is performed in two passes, 768 * first an asynchronous pass over the list to 769 * start parallel writeback, and a second synchronous 770 * pass to wait for the IO to complete. Wait here 771 * for any page for which writeback has already 772 * started. 773 */ 774 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) && 775 may_enter_fs) 776 wait_on_page_writeback(page); 777 else { 778 unlock_page(page); 779 goto keep_lumpy; 780 } 781 } 782 783 references = page_check_references(page, sc); 784 switch (references) { 785 case PAGEREF_ACTIVATE: 786 goto activate_locked; 787 case PAGEREF_KEEP: 788 goto keep_locked; 789 case PAGEREF_RECLAIM: 790 case PAGEREF_RECLAIM_CLEAN: 791 ; /* try to reclaim the page below */ 792 } 793 794 /* 795 * Anonymous process memory has backing store? 796 * Try to allocate it some swap space here. 797 */ 798 if (PageAnon(page) && !PageSwapCache(page)) { 799 if (!(sc->gfp_mask & __GFP_IO)) 800 goto keep_locked; 801 if (!add_to_swap(page)) 802 goto activate_locked; 803 may_enter_fs = 1; 804 } 805 806 mapping = page_mapping(page); 807 808 /* 809 * The page is mapped into the page tables of one or more 810 * processes. Try to unmap it here. 811 */ 812 if (page_mapped(page) && mapping) { 813 switch (try_to_unmap(page, TTU_UNMAP)) { 814 case SWAP_FAIL: 815 goto activate_locked; 816 case SWAP_AGAIN: 817 goto keep_locked; 818 case SWAP_MLOCK: 819 goto cull_mlocked; 820 case SWAP_SUCCESS: 821 ; /* try to free the page below */ 822 } 823 } 824 825 if (PageDirty(page)) { 826 nr_dirty++; 827 828 if (references == PAGEREF_RECLAIM_CLEAN) 829 goto keep_locked; 830 if (!may_enter_fs) 831 goto keep_locked; 832 if (!sc->may_writepage) 833 goto keep_locked; 834 835 /* Page is dirty, try to write it out here */ 836 switch (pageout(page, mapping, sc)) { 837 case PAGE_KEEP: 838 nr_congested++; 839 goto keep_locked; 840 case PAGE_ACTIVATE: 841 goto activate_locked; 842 case PAGE_SUCCESS: 843 if (PageWriteback(page)) 844 goto keep_lumpy; 845 if (PageDirty(page)) 846 goto keep; 847 848 /* 849 * A synchronous write - probably a ramdisk. Go 850 * ahead and try to reclaim the page. 851 */ 852 if (!trylock_page(page)) 853 goto keep; 854 if (PageDirty(page) || PageWriteback(page)) 855 goto keep_locked; 856 mapping = page_mapping(page); 857 case PAGE_CLEAN: 858 ; /* try to free the page below */ 859 } 860 } 861 862 /* 863 * If the page has buffers, try to free the buffer mappings 864 * associated with this page. If we succeed we try to free 865 * the page as well. 866 * 867 * We do this even if the page is PageDirty(). 868 * try_to_release_page() does not perform I/O, but it is 869 * possible for a page to have PageDirty set, but it is actually 870 * clean (all its buffers are clean). This happens if the 871 * buffers were written out directly, with submit_bh(). ext3 872 * will do this, as well as the blockdev mapping. 873 * try_to_release_page() will discover that cleanness and will 874 * drop the buffers and mark the page clean - it can be freed. 875 * 876 * Rarely, pages can have buffers and no ->mapping. These are 877 * the pages which were not successfully invalidated in 878 * truncate_complete_page(). We try to drop those buffers here 879 * and if that worked, and the page is no longer mapped into 880 * process address space (page_count == 1) it can be freed. 881 * Otherwise, leave the page on the LRU so it is swappable. 882 */ 883 if (page_has_private(page)) { 884 if (!try_to_release_page(page, sc->gfp_mask)) 885 goto activate_locked; 886 if (!mapping && page_count(page) == 1) { 887 unlock_page(page); 888 if (put_page_testzero(page)) 889 goto free_it; 890 else { 891 /* 892 * rare race with speculative reference. 893 * the speculative reference will free 894 * this page shortly, so we may 895 * increment nr_reclaimed here (and 896 * leave it off the LRU). 897 */ 898 nr_reclaimed++; 899 continue; 900 } 901 } 902 } 903 904 if (!mapping || !__remove_mapping(mapping, page)) 905 goto keep_locked; 906 907 /* 908 * At this point, we have no other references and there is 909 * no way to pick any more up (removed from LRU, removed 910 * from pagecache). Can use non-atomic bitops now (and 911 * we obviously don't have to worry about waking up a process 912 * waiting on the page lock, because there are no references. 913 */ 914 __clear_page_locked(page); 915free_it: 916 nr_reclaimed++; 917 918 /* 919 * Is there need to periodically free_page_list? It would 920 * appear not as the counts should be low 921 */ 922 list_add(&page->lru, &free_pages); 923 continue; 924 925cull_mlocked: 926 if (PageSwapCache(page)) 927 try_to_free_swap(page); 928 unlock_page(page); 929 putback_lru_page(page); 930 reset_reclaim_mode(sc); 931 continue; 932 933activate_locked: 934 /* Not a candidate for swapping, so reclaim swap space. */ 935 if (PageSwapCache(page) && vm_swap_full()) 936 try_to_free_swap(page); 937 VM_BUG_ON(PageActive(page)); 938 SetPageActive(page); 939 pgactivate++; 940keep_locked: 941 unlock_page(page); 942keep: 943 reset_reclaim_mode(sc); 944keep_lumpy: 945 list_add(&page->lru, &ret_pages); 946 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 947 } 948 949 /* 950 * Tag a zone as congested if all the dirty pages encountered were 951 * backed by a congested BDI. In this case, reclaimers should just 952 * back off and wait for congestion to clear because further reclaim 953 * will encounter the same problem 954 */ 955 if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc)) 956 zone_set_flag(zone, ZONE_CONGESTED); 957 958 free_page_list(&free_pages); 959 960 list_splice(&ret_pages, page_list); 961 count_vm_events(PGACTIVATE, pgactivate); 962 return nr_reclaimed; 963} 964 965/* 966 * Attempt to remove the specified page from its LRU. Only take this page 967 * if it is of the appropriate PageActive status. Pages which are being 968 * freed elsewhere are also ignored. 969 * 970 * page: page to consider 971 * mode: one of the LRU isolation modes defined above 972 * 973 * returns 0 on success, -ve errno on failure. 974 */ 975int __isolate_lru_page(struct page *page, int mode, int file) 976{ 977 int ret = -EINVAL; 978 979 /* Only take pages on the LRU. */ 980 if (!PageLRU(page)) 981 return ret; 982 983 /* 984 * When checking the active state, we need to be sure we are 985 * dealing with comparible boolean values. Take the logical not 986 * of each. 987 */ 988 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode)) 989 return ret; 990 991 if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file) 992 return ret; 993 994 /* 995 * When this function is being called for lumpy reclaim, we 996 * initially look into all LRU pages, active, inactive and 997 * unevictable; only give shrink_page_list evictable pages. 998 */ 999 if (PageUnevictable(page)) 1000 return ret; 1001 1002 ret = -EBUSY; 1003 1004 if (likely(get_page_unless_zero(page))) { 1005 /* 1006 * Be careful not to clear PageLRU until after we're 1007 * sure the page is not being freed elsewhere -- the 1008 * page release code relies on it. 1009 */ 1010 ClearPageLRU(page); 1011 ret = 0; 1012 } 1013 1014 return ret; 1015} 1016 1017/* 1018 * zone->lru_lock is heavily contended. Some of the functions that 1019 * shrink the lists perform better by taking out a batch of pages 1020 * and working on them outside the LRU lock. 1021 * 1022 * For pagecache intensive workloads, this function is the hottest 1023 * spot in the kernel (apart from copy_*_user functions). 1024 * 1025 * Appropriate locks must be held before calling this function. 1026 * 1027 * @nr_to_scan: The number of pages to look through on the list. 1028 * @src: The LRU list to pull pages off. 1029 * @dst: The temp list to put pages on to. 1030 * @scanned: The number of pages that were scanned. 1031 * @order: The caller's attempted allocation order 1032 * @mode: One of the LRU isolation modes 1033 * @file: True [1] if isolating file [!anon] pages 1034 * 1035 * returns how many pages were moved onto *@dst. 1036 */ 1037static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 1038 struct list_head *src, struct list_head *dst, 1039 unsigned long *scanned, int order, int mode, int file) 1040{ 1041 unsigned long nr_taken = 0; 1042 unsigned long nr_lumpy_taken = 0; 1043 unsigned long nr_lumpy_dirty = 0; 1044 unsigned long nr_lumpy_failed = 0; 1045 unsigned long scan; 1046 1047 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1048 struct page *page; 1049 unsigned long pfn; 1050 unsigned long end_pfn; 1051 unsigned long page_pfn; 1052 int zone_id; 1053 1054 page = lru_to_page(src); 1055 prefetchw_prev_lru_page(page, src, flags); 1056 1057 VM_BUG_ON(!PageLRU(page)); 1058 1059 switch (__isolate_lru_page(page, mode, file)) { 1060 case 0: 1061 list_move(&page->lru, dst); 1062 mem_cgroup_del_lru(page); 1063 nr_taken += hpage_nr_pages(page); 1064 break; 1065 1066 case -EBUSY: 1067 /* else it is being freed elsewhere */ 1068 list_move(&page->lru, src); 1069 mem_cgroup_rotate_lru_list(page, page_lru(page)); 1070 continue; 1071 1072 default: 1073 BUG(); 1074 } 1075 1076 if (!order) 1077 continue; 1078 1079 /* 1080 * Attempt to take all pages in the order aligned region 1081 * surrounding the tag page. Only take those pages of 1082 * the same active state as that tag page. We may safely 1083 * round the target page pfn down to the requested order 1084 * as the mem_map is guaranteed valid out to MAX_ORDER, 1085 * where that page is in a different zone we will detect 1086 * it from its zone id and abort this block scan. 1087 */ 1088 zone_id = page_zone_id(page); 1089 page_pfn = page_to_pfn(page); 1090 pfn = page_pfn & ~((1 << order) - 1); 1091 end_pfn = pfn + (1 << order); 1092 for (; pfn < end_pfn; pfn++) { 1093 struct page *cursor_page; 1094 1095 /* The target page is in the block, ignore it. */ 1096 if (unlikely(pfn == page_pfn)) 1097 continue; 1098 1099 /* Avoid holes within the zone. */ 1100 if (unlikely(!pfn_valid_within(pfn))) 1101 break; 1102 1103 cursor_page = pfn_to_page(pfn); 1104 1105 /* Check that we have not crossed a zone boundary. */ 1106 if (unlikely(page_zone_id(cursor_page) != zone_id)) 1107 break; 1108 1109 /* 1110 * If we don't have enough swap space, reclaiming of 1111 * anon page which don't already have a swap slot is 1112 * pointless. 1113 */ 1114 if (nr_swap_pages <= 0 && PageAnon(cursor_page) && 1115 !PageSwapCache(cursor_page)) 1116 break; 1117 1118 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 1119 list_move(&cursor_page->lru, dst); 1120 mem_cgroup_del_lru(cursor_page); 1121 nr_taken += hpage_nr_pages(page); 1122 nr_lumpy_taken++; 1123 if (PageDirty(cursor_page)) 1124 nr_lumpy_dirty++; 1125 scan++; 1126 } else { 1127 /* the page is freed already. */ 1128 if (!page_count(cursor_page)) 1129 continue; 1130 break; 1131 } 1132 } 1133 1134 /* If we break out of the loop above, lumpy reclaim failed */ 1135 if (pfn < end_pfn) 1136 nr_lumpy_failed++; 1137 } 1138 1139 *scanned = scan; 1140 1141 trace_mm_vmscan_lru_isolate(order, 1142 nr_to_scan, scan, 1143 nr_taken, 1144 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, 1145 mode); 1146 return nr_taken; 1147} 1148 1149static unsigned long isolate_pages_global(unsigned long nr, 1150 struct list_head *dst, 1151 unsigned long *scanned, int order, 1152 int mode, struct zone *z, 1153 int active, int file) 1154{ 1155 int lru = LRU_BASE; 1156 if (active) 1157 lru += LRU_ACTIVE; 1158 if (file) 1159 lru += LRU_FILE; 1160 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 1161 mode, file); 1162} 1163 1164/* 1165 * clear_active_flags() is a helper for shrink_active_list(), clearing 1166 * any active bits from the pages in the list. 1167 */ 1168static unsigned long clear_active_flags(struct list_head *page_list, 1169 unsigned int *count) 1170{ 1171 int nr_active = 0; 1172 int lru; 1173 struct page *page; 1174 1175 list_for_each_entry(page, page_list, lru) { 1176 int numpages = hpage_nr_pages(page); 1177 lru = page_lru_base_type(page); 1178 if (PageActive(page)) { 1179 lru += LRU_ACTIVE; 1180 ClearPageActive(page); 1181 nr_active += numpages; 1182 } 1183 if (count) 1184 count[lru] += numpages; 1185 } 1186 1187 return nr_active; 1188} 1189 1190/** 1191 * isolate_lru_page - tries to isolate a page from its LRU list 1192 * @page: page to isolate from its LRU list 1193 * 1194 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 1195 * vmstat statistic corresponding to whatever LRU list the page was on. 1196 * 1197 * Returns 0 if the page was removed from an LRU list. 1198 * Returns -EBUSY if the page was not on an LRU list. 1199 * 1200 * The returned page will have PageLRU() cleared. If it was found on 1201 * the active list, it will have PageActive set. If it was found on 1202 * the unevictable list, it will have the PageUnevictable bit set. That flag 1203 * may need to be cleared by the caller before letting the page go. 1204 * 1205 * The vmstat statistic corresponding to the list on which the page was 1206 * found will be decremented. 1207 * 1208 * Restrictions: 1209 * (1) Must be called with an elevated refcount on the page. This is a 1210 * fundamentnal difference from isolate_lru_pages (which is called 1211 * without a stable reference). 1212 * (2) the lru_lock must not be held. 1213 * (3) interrupts must be enabled. 1214 */ 1215int isolate_lru_page(struct page *page) 1216{ 1217 int ret = -EBUSY; 1218 1219 VM_BUG_ON(!page_count(page)); 1220 1221 if (PageLRU(page)) { 1222 struct zone *zone = page_zone(page); 1223 1224 spin_lock_irq(&zone->lru_lock); 1225 if (PageLRU(page)) { 1226 int lru = page_lru(page); 1227 ret = 0; 1228 get_page(page); 1229 ClearPageLRU(page); 1230 1231 del_page_from_lru_list(zone, page, lru); 1232 } 1233 spin_unlock_irq(&zone->lru_lock); 1234 } 1235 return ret; 1236} 1237 1238/* 1239 * Are there way too many processes in the direct reclaim path already? 1240 */ 1241static int too_many_isolated(struct zone *zone, int file, 1242 struct scan_control *sc) 1243{ 1244 unsigned long inactive, isolated; 1245 1246 if (current_is_kswapd()) 1247 return 0; 1248 1249 if (!scanning_global_lru(sc)) 1250 return 0; 1251 1252 if (file) { 1253 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1254 isolated = zone_page_state(zone, NR_ISOLATED_FILE); 1255 } else { 1256 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1257 isolated = zone_page_state(zone, NR_ISOLATED_ANON); 1258 } 1259 1260 return isolated > inactive; 1261} 1262 1263/* 1264 * TODO: Try merging with migrations version of putback_lru_pages 1265 */ 1266static noinline_for_stack void 1267putback_lru_pages(struct zone *zone, struct scan_control *sc, 1268 unsigned long nr_anon, unsigned long nr_file, 1269 struct list_head *page_list) 1270{ 1271 struct page *page; 1272 struct pagevec pvec; 1273 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1274 1275 pagevec_init(&pvec, 1); 1276 1277 /* 1278 * Put back any unfreeable pages. 1279 */ 1280 spin_lock(&zone->lru_lock); 1281 while (!list_empty(page_list)) { 1282 int lru; 1283 page = lru_to_page(page_list); 1284 VM_BUG_ON(PageLRU(page)); 1285 list_del(&page->lru); 1286 if (unlikely(!page_evictable(page, NULL))) { 1287 spin_unlock_irq(&zone->lru_lock); 1288 putback_lru_page(page); 1289 spin_lock_irq(&zone->lru_lock); 1290 continue; 1291 } 1292 SetPageLRU(page); 1293 lru = page_lru(page); 1294 add_page_to_lru_list(zone, page, lru); 1295 if (is_active_lru(lru)) { 1296 int file = is_file_lru(lru); 1297 int numpages = hpage_nr_pages(page); 1298 reclaim_stat->recent_rotated[file] += numpages; 1299 } 1300 if (!pagevec_add(&pvec, page)) { 1301 spin_unlock_irq(&zone->lru_lock); 1302 __pagevec_release(&pvec); 1303 spin_lock_irq(&zone->lru_lock); 1304 } 1305 } 1306 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1307 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1308 1309 spin_unlock_irq(&zone->lru_lock); 1310 pagevec_release(&pvec); 1311} 1312 1313static noinline_for_stack void update_isolated_counts(struct zone *zone, 1314 struct scan_control *sc, 1315 unsigned long *nr_anon, 1316 unsigned long *nr_file, 1317 struct list_head *isolated_list) 1318{ 1319 unsigned long nr_active; 1320 unsigned int count[NR_LRU_LISTS] = { 0, }; 1321 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1322 1323 nr_active = clear_active_flags(isolated_list, count); 1324 __count_vm_events(PGDEACTIVATE, nr_active); 1325 1326 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1327 -count[LRU_ACTIVE_FILE]); 1328 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1329 -count[LRU_INACTIVE_FILE]); 1330 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1331 -count[LRU_ACTIVE_ANON]); 1332 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1333 -count[LRU_INACTIVE_ANON]); 1334 1335 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1336 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1337 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon); 1338 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file); 1339 1340 reclaim_stat->recent_scanned[0] += *nr_anon; 1341 reclaim_stat->recent_scanned[1] += *nr_file; 1342} 1343 1344/* 1345 * Returns true if the caller should wait to clean dirty/writeback pages. 1346 * 1347 * If we are direct reclaiming for contiguous pages and we do not reclaim 1348 * everything in the list, try again and wait for writeback IO to complete. 1349 * This will stall high-order allocations noticeably. Only do that when really 1350 * need to free the pages under high memory pressure. 1351 */ 1352static inline bool should_reclaim_stall(unsigned long nr_taken, 1353 unsigned long nr_freed, 1354 int priority, 1355 struct scan_control *sc) 1356{ 1357 int lumpy_stall_priority; 1358 1359 /* kswapd should not stall on sync IO */ 1360 if (current_is_kswapd()) 1361 return false; 1362 1363 /* Only stall on lumpy reclaim */ 1364 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE) 1365 return false; 1366 1367 /* If we have relaimed everything on the isolated list, no stall */ 1368 if (nr_freed == nr_taken) 1369 return false; 1370 1371 /* 1372 * For high-order allocations, there are two stall thresholds. 1373 * High-cost allocations stall immediately where as lower 1374 * order allocations such as stacks require the scanning 1375 * priority to be much higher before stalling. 1376 */ 1377 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1378 lumpy_stall_priority = DEF_PRIORITY; 1379 else 1380 lumpy_stall_priority = DEF_PRIORITY / 3; 1381 1382 return priority <= lumpy_stall_priority; 1383} 1384 1385/* 1386 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1387 * of reclaimed pages 1388 */ 1389static noinline_for_stack unsigned long 1390shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, 1391 struct scan_control *sc, int priority, int file) 1392{ 1393 LIST_HEAD(page_list); 1394 unsigned long nr_scanned; 1395 unsigned long nr_reclaimed = 0; 1396 unsigned long nr_taken; 1397 unsigned long nr_anon; 1398 unsigned long nr_file; 1399 1400 while (unlikely(too_many_isolated(zone, file, sc))) { 1401 congestion_wait(BLK_RW_ASYNC, HZ/10); 1402 1403 /* We are about to die and free our memory. Return now. */ 1404 if (fatal_signal_pending(current)) 1405 return SWAP_CLUSTER_MAX; 1406 } 1407 1408 set_reclaim_mode(priority, sc, false); 1409 lru_add_drain(); 1410 spin_lock_irq(&zone->lru_lock); 1411 1412 if (scanning_global_lru(sc)) { 1413 nr_taken = isolate_pages_global(nr_to_scan, 1414 &page_list, &nr_scanned, sc->order, 1415 sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ? 1416 ISOLATE_BOTH : ISOLATE_INACTIVE, 1417 zone, 0, file); 1418 zone->pages_scanned += nr_scanned; 1419 if (current_is_kswapd()) 1420 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1421 nr_scanned); 1422 else 1423 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1424 nr_scanned); 1425 } else { 1426 nr_taken = mem_cgroup_isolate_pages(nr_to_scan, 1427 &page_list, &nr_scanned, sc->order, 1428 sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ? 1429 ISOLATE_BOTH : ISOLATE_INACTIVE, 1430 zone, sc->mem_cgroup, 1431 0, file); 1432 /* 1433 * mem_cgroup_isolate_pages() keeps track of 1434 * scanned pages on its own. 1435 */ 1436 } 1437 1438 if (nr_taken == 0) { 1439 spin_unlock_irq(&zone->lru_lock); 1440 return 0; 1441 } 1442 1443 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list); 1444 1445 spin_unlock_irq(&zone->lru_lock); 1446 1447 nr_reclaimed = shrink_page_list(&page_list, zone, sc); 1448 1449 /* Check if we should syncronously wait for writeback */ 1450 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { 1451 set_reclaim_mode(priority, sc, true); 1452 nr_reclaimed += shrink_page_list(&page_list, zone, sc); 1453 } 1454 1455 local_irq_disable(); 1456 if (current_is_kswapd()) 1457 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1458 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1459 1460 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list); 1461 1462 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1463 zone_idx(zone), 1464 nr_scanned, nr_reclaimed, 1465 priority, 1466 trace_shrink_flags(file, sc->reclaim_mode)); 1467 return nr_reclaimed; 1468} 1469 1470/* 1471 * This moves pages from the active list to the inactive list. 1472 * 1473 * We move them the other way if the page is referenced by one or more 1474 * processes, from rmap. 1475 * 1476 * If the pages are mostly unmapped, the processing is fast and it is 1477 * appropriate to hold zone->lru_lock across the whole operation. But if 1478 * the pages are mapped, the processing is slow (page_referenced()) so we 1479 * should drop zone->lru_lock around each page. It's impossible to balance 1480 * this, so instead we remove the pages from the LRU while processing them. 1481 * It is safe to rely on PG_active against the non-LRU pages in here because 1482 * nobody will play with that bit on a non-LRU page. 1483 * 1484 * The downside is that we have to touch page->_count against each page. 1485 * But we had to alter page->flags anyway. 1486 */ 1487 1488static void move_active_pages_to_lru(struct zone *zone, 1489 struct list_head *list, 1490 enum lru_list lru) 1491{ 1492 unsigned long pgmoved = 0; 1493 struct pagevec pvec; 1494 struct page *page; 1495 1496 pagevec_init(&pvec, 1); 1497 1498 while (!list_empty(list)) { 1499 page = lru_to_page(list); 1500 1501 VM_BUG_ON(PageLRU(page)); 1502 SetPageLRU(page); 1503 1504 list_move(&page->lru, &zone->lru[lru].list); 1505 mem_cgroup_add_lru_list(page, lru); 1506 pgmoved += hpage_nr_pages(page); 1507 1508 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1509 spin_unlock_irq(&zone->lru_lock); 1510 if (buffer_heads_over_limit) 1511 pagevec_strip(&pvec); 1512 __pagevec_release(&pvec); 1513 spin_lock_irq(&zone->lru_lock); 1514 } 1515 } 1516 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1517 if (!is_active_lru(lru)) 1518 __count_vm_events(PGDEACTIVATE, pgmoved); 1519} 1520 1521static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1522 struct scan_control *sc, int priority, int file) 1523{ 1524 unsigned long nr_taken; 1525 unsigned long pgscanned; 1526 unsigned long vm_flags; 1527 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1528 LIST_HEAD(l_active); 1529 LIST_HEAD(l_inactive); 1530 struct page *page; 1531 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1532 unsigned long nr_rotated = 0; 1533 1534 lru_add_drain(); 1535 spin_lock_irq(&zone->lru_lock); 1536 if (scanning_global_lru(sc)) { 1537 nr_taken = isolate_pages_global(nr_pages, &l_hold, 1538 &pgscanned, sc->order, 1539 ISOLATE_ACTIVE, zone, 1540 1, file); 1541 zone->pages_scanned += pgscanned; 1542 } else { 1543 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold, 1544 &pgscanned, sc->order, 1545 ISOLATE_ACTIVE, zone, 1546 sc->mem_cgroup, 1, file); 1547 /* 1548 * mem_cgroup_isolate_pages() keeps track of 1549 * scanned pages on its own. 1550 */ 1551 } 1552 1553 reclaim_stat->recent_scanned[file] += nr_taken; 1554 1555 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1556 if (file) 1557 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken); 1558 else 1559 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken); 1560 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1561 spin_unlock_irq(&zone->lru_lock); 1562 1563 while (!list_empty(&l_hold)) { 1564 cond_resched(); 1565 page = lru_to_page(&l_hold); 1566 list_del(&page->lru); 1567 1568 if (unlikely(!page_evictable(page, NULL))) { 1569 putback_lru_page(page); 1570 continue; 1571 } 1572 1573 if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) { 1574 nr_rotated += hpage_nr_pages(page); 1575 /* 1576 * Identify referenced, file-backed active pages and 1577 * give them one more trip around the active list. So 1578 * that executable code get better chances to stay in 1579 * memory under moderate memory pressure. Anon pages 1580 * are not likely to be evicted by use-once streaming 1581 * IO, plus JVM can create lots of anon VM_EXEC pages, 1582 * so we ignore them here. 1583 */ 1584 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { 1585 list_add(&page->lru, &l_active); 1586 continue; 1587 } 1588 } 1589 1590 ClearPageActive(page); /* we are de-activating */ 1591 list_add(&page->lru, &l_inactive); 1592 } 1593 1594 /* 1595 * Move pages back to the lru list. 1596 */ 1597 spin_lock_irq(&zone->lru_lock); 1598 /* 1599 * Count referenced pages from currently used mappings as rotated, 1600 * even though only some of them are actually re-activated. This 1601 * helps balance scan pressure between file and anonymous pages in 1602 * get_scan_ratio. 1603 */ 1604 reclaim_stat->recent_rotated[file] += nr_rotated; 1605 1606 move_active_pages_to_lru(zone, &l_active, 1607 LRU_ACTIVE + file * LRU_FILE); 1608 move_active_pages_to_lru(zone, &l_inactive, 1609 LRU_BASE + file * LRU_FILE); 1610 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1611 spin_unlock_irq(&zone->lru_lock); 1612} 1613 1614#ifdef CONFIG_SWAP 1615static int inactive_anon_is_low_global(struct zone *zone) 1616{ 1617 unsigned long active, inactive; 1618 1619 active = zone_page_state(zone, NR_ACTIVE_ANON); 1620 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1621 1622 if (inactive * zone->inactive_ratio < active) 1623 return 1; 1624 1625 return 0; 1626} 1627 1628/** 1629 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1630 * @zone: zone to check 1631 * @sc: scan control of this context 1632 * 1633 * Returns true if the zone does not have enough inactive anon pages, 1634 * meaning some active anon pages need to be deactivated. 1635 */ 1636static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) 1637{ 1638 int low; 1639 1640 /* 1641 * If we don't have swap space, anonymous page deactivation 1642 * is pointless. 1643 */ 1644 if (!total_swap_pages) 1645 return 0; 1646 1647 if (scanning_global_lru(sc)) 1648 low = inactive_anon_is_low_global(zone); 1649 else 1650 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); 1651 return low; 1652} 1653#else 1654static inline int inactive_anon_is_low(struct zone *zone, 1655 struct scan_control *sc) 1656{ 1657 return 0; 1658} 1659#endif 1660 1661static int inactive_file_is_low_global(struct zone *zone) 1662{ 1663 unsigned long active, inactive; 1664 1665 active = zone_page_state(zone, NR_ACTIVE_FILE); 1666 inactive = zone_page_state(zone, NR_INACTIVE_FILE); 1667 1668 return (active > inactive); 1669} 1670 1671/** 1672 * inactive_file_is_low - check if file pages need to be deactivated 1673 * @zone: zone to check 1674 * @sc: scan control of this context 1675 * 1676 * When the system is doing streaming IO, memory pressure here 1677 * ensures that active file pages get deactivated, until more 1678 * than half of the file pages are on the inactive list. 1679 * 1680 * Once we get to that situation, protect the system's working 1681 * set from being evicted by disabling active file page aging. 1682 * 1683 * This uses a different ratio than the anonymous pages, because 1684 * the page cache uses a use-once replacement algorithm. 1685 */ 1686static int inactive_file_is_low(struct zone *zone, struct scan_control *sc) 1687{ 1688 int low; 1689 1690 if (scanning_global_lru(sc)) 1691 low = inactive_file_is_low_global(zone); 1692 else 1693 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup); 1694 return low; 1695} 1696 1697static int inactive_list_is_low(struct zone *zone, struct scan_control *sc, 1698 int file) 1699{ 1700 if (file) 1701 return inactive_file_is_low(zone, sc); 1702 else 1703 return inactive_anon_is_low(zone, sc); 1704} 1705 1706static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1707 struct zone *zone, struct scan_control *sc, int priority) 1708{ 1709 int file = is_file_lru(lru); 1710 1711 if (is_active_lru(lru)) { 1712 if (inactive_list_is_low(zone, sc, file)) 1713 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1714 return 0; 1715 } 1716 1717 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1718} 1719 1720/* 1721 * Smallish @nr_to_scan's are deposited in @nr_saved_scan, 1722 * until we collected @swap_cluster_max pages to scan. 1723 */ 1724static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, 1725 unsigned long *nr_saved_scan) 1726{ 1727 unsigned long nr; 1728 1729 *nr_saved_scan += nr_to_scan; 1730 nr = *nr_saved_scan; 1731 1732 if (nr >= SWAP_CLUSTER_MAX) 1733 *nr_saved_scan = 0; 1734 else 1735 nr = 0; 1736 1737 return nr; 1738} 1739 1740/* 1741 * Determine how aggressively the anon and file LRU lists should be 1742 * scanned. The relative value of each set of LRU lists is determined 1743 * by looking at the fraction of the pages scanned we did rotate back 1744 * onto the active list instead of evict. 1745 * 1746 * nr[0] = anon pages to scan; nr[1] = file pages to scan 1747 */ 1748static void get_scan_count(struct zone *zone, struct scan_control *sc, 1749 unsigned long *nr, int priority) 1750{ 1751 unsigned long anon, file, free; 1752 unsigned long anon_prio, file_prio; 1753 unsigned long ap, fp; 1754 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1755 u64 fraction[2], denominator; 1756 enum lru_list l; 1757 int noswap = 0; 1758 1759 /* If we have no swap space, do not bother scanning anon pages. */ 1760 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1761 noswap = 1; 1762 fraction[0] = 0; 1763 fraction[1] = 1; 1764 denominator = 1; 1765 goto out; 1766 } 1767 1768 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1769 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1770 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1771 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1772 1773 if (scanning_global_lru(sc)) { 1774 free = zone_page_state(zone, NR_FREE_PAGES); 1775 /* If we have very few page cache pages, 1776 force-scan anon pages. */ 1777 if (unlikely(file + free <= high_wmark_pages(zone))) { 1778 fraction[0] = 1; 1779 fraction[1] = 0; 1780 denominator = 1; 1781 goto out; 1782 } 1783 } 1784 1785 /* 1786 * With swappiness at 100, anonymous and file have the same priority. 1787 * This scanning priority is essentially the inverse of IO cost. 1788 */ 1789 anon_prio = sc->swappiness; 1790 file_prio = 200 - sc->swappiness; 1791 1792 /* 1793 * OK, so we have swap space and a fair amount of page cache 1794 * pages. We use the recently rotated / recently scanned 1795 * ratios to determine how valuable each cache is. 1796 * 1797 * Because workloads change over time (and to avoid overflow) 1798 * we keep these statistics as a floating average, which ends 1799 * up weighing recent references more than old ones. 1800 * 1801 * anon in [0], file in [1] 1802 */ 1803 spin_lock_irq(&zone->lru_lock); 1804 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1805 reclaim_stat->recent_scanned[0] /= 2; 1806 reclaim_stat->recent_rotated[0] /= 2; 1807 } 1808 1809 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1810 reclaim_stat->recent_scanned[1] /= 2; 1811 reclaim_stat->recent_rotated[1] /= 2; 1812 } 1813 1814 /* 1815 * The amount of pressure on anon vs file pages is inversely 1816 * proportional to the fraction of recently scanned pages on 1817 * each list that were recently referenced and in active use. 1818 */ 1819 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1820 ap /= reclaim_stat->recent_rotated[0] + 1; 1821 1822 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1823 fp /= reclaim_stat->recent_rotated[1] + 1; 1824 spin_unlock_irq(&zone->lru_lock); 1825 1826 fraction[0] = ap; 1827 fraction[1] = fp; 1828 denominator = ap + fp + 1; 1829out: 1830 for_each_evictable_lru(l) { 1831 int file = is_file_lru(l); 1832 unsigned long scan; 1833 1834 scan = zone_nr_lru_pages(zone, sc, l); 1835 if (priority || noswap) { 1836 scan >>= priority; 1837 scan = div64_u64(scan * fraction[file], denominator); 1838 } 1839 nr[l] = nr_scan_try_batch(scan, 1840 &reclaim_stat->nr_saved_scan[l]); 1841 } 1842} 1843 1844/* 1845 * Reclaim/compaction depends on a number of pages being freed. To avoid 1846 * disruption to the system, a small number of order-0 pages continue to be 1847 * rotated and reclaimed in the normal fashion. However, by the time we get 1848 * back to the allocator and call try_to_compact_zone(), we ensure that 1849 * there are enough free pages for it to be likely successful 1850 */ 1851static inline bool should_continue_reclaim(struct zone *zone, 1852 unsigned long nr_reclaimed, 1853 unsigned long nr_scanned, 1854 struct scan_control *sc) 1855{ 1856 unsigned long pages_for_compaction; 1857 unsigned long inactive_lru_pages; 1858 1859 /* If not in reclaim/compaction mode, stop */ 1860 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1861 return false; 1862 1863 /* Consider stopping depending on scan and reclaim activity */ 1864 if (sc->gfp_mask & __GFP_REPEAT) { 1865 /* 1866 * For __GFP_REPEAT allocations, stop reclaiming if the 1867 * full LRU list has been scanned and we are still failing 1868 * to reclaim pages. This full LRU scan is potentially 1869 * expensive but a __GFP_REPEAT caller really wants to succeed 1870 */ 1871 if (!nr_reclaimed && !nr_scanned) 1872 return false; 1873 } else { 1874 /* 1875 * For non-__GFP_REPEAT allocations which can presumably 1876 * fail without consequence, stop if we failed to reclaim 1877 * any pages from the last SWAP_CLUSTER_MAX number of 1878 * pages that were scanned. This will return to the 1879 * caller faster at the risk reclaim/compaction and 1880 * the resulting allocation attempt fails 1881 */ 1882 if (!nr_reclaimed) 1883 return false; 1884 } 1885 1886 /* 1887 * If we have not reclaimed enough pages for compaction and the 1888 * inactive lists are large enough, continue reclaiming 1889 */ 1890 pages_for_compaction = (2UL << sc->order); 1891 inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) + 1892 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1893 if (sc->nr_reclaimed < pages_for_compaction && 1894 inactive_lru_pages > pages_for_compaction) 1895 return true; 1896 1897 /* If compaction would go ahead or the allocation would succeed, stop */ 1898 switch (compaction_suitable(zone, sc->order)) { 1899 case COMPACT_PARTIAL: 1900 case COMPACT_CONTINUE: 1901 return false; 1902 default: 1903 return true; 1904 } 1905} 1906 1907/* 1908 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1909 */ 1910static void shrink_zone(int priority, struct zone *zone, 1911 struct scan_control *sc) 1912{ 1913 unsigned long nr[NR_LRU_LISTS]; 1914 unsigned long nr_to_scan; 1915 enum lru_list l; 1916 unsigned long nr_reclaimed, nr_scanned; 1917 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1918 1919restart: 1920 nr_reclaimed = 0; 1921 nr_scanned = sc->nr_scanned; 1922 get_scan_count(zone, sc, nr, priority); 1923 1924 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1925 nr[LRU_INACTIVE_FILE]) { 1926 for_each_evictable_lru(l) { 1927 if (nr[l]) { 1928 nr_to_scan = min_t(unsigned long, 1929 nr[l], SWAP_CLUSTER_MAX); 1930 nr[l] -= nr_to_scan; 1931 1932 nr_reclaimed += shrink_list(l, nr_to_scan, 1933 zone, sc, priority); 1934 } 1935 } 1936 /* 1937 * On large memory systems, scan >> priority can become 1938 * really large. This is fine for the starting priority; 1939 * we want to put equal scanning pressure on each zone. 1940 * However, if the VM has a harder time of freeing pages, 1941 * with multiple processes reclaiming pages, the total 1942 * freeing target can get unreasonably large. 1943 */ 1944 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 1945 break; 1946 } 1947 sc->nr_reclaimed += nr_reclaimed; 1948 1949 /* 1950 * Even if we did not try to evict anon pages at all, we want to 1951 * rebalance the anon lru active/inactive ratio. 1952 */ 1953 if (inactive_anon_is_low(zone, sc)) 1954 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1955 1956 /* reclaim/compaction might need reclaim to continue */ 1957 if (should_continue_reclaim(zone, nr_reclaimed, 1958 sc->nr_scanned - nr_scanned, sc)) 1959 goto restart; 1960 1961 throttle_vm_writeout(sc->gfp_mask); 1962} 1963 1964/* 1965 * This is the direct reclaim path, for page-allocating processes. We only 1966 * try to reclaim pages from zones which will satisfy the caller's allocation 1967 * request. 1968 * 1969 * We reclaim from a zone even if that zone is over high_wmark_pages(zone). 1970 * Because: 1971 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1972 * allocation or 1973 * b) The target zone may be at high_wmark_pages(zone) but the lower zones 1974 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' 1975 * zone defense algorithm. 1976 * 1977 * If a zone is deemed to be full of pinned pages then just give it a light 1978 * scan then give up on it. 1979 */ 1980static unsigned long shrink_zones(int priority, struct zonelist *zonelist, 1981 struct scan_control *sc) 1982{ 1983 struct zoneref *z; 1984 struct zone *zone; 1985 unsigned long nr_soft_reclaimed; 1986 unsigned long nr_soft_scanned; 1987 unsigned long total_scanned = 0; 1988 1989 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1990 gfp_zone(sc->gfp_mask), sc->nodemask) { 1991 if (!populated_zone(zone)) 1992 continue; 1993 /* 1994 * Take care memory controller reclaiming has small influence 1995 * to global LRU. 1996 */ 1997 if (scanning_global_lru(sc)) { 1998 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1999 continue; 2000 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2001 continue; /* Let kswapd poll it */ 2002 } 2003 2004 nr_soft_scanned = 0; 2005 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2006 sc->order, sc->gfp_mask, 2007 &nr_soft_scanned); 2008 sc->nr_reclaimed += nr_soft_reclaimed; 2009 total_scanned += nr_soft_scanned; 2010 2011 shrink_zone(priority, zone, sc); 2012 } 2013 2014 return total_scanned; 2015} 2016 2017static bool zone_reclaimable(struct zone *zone) 2018{ 2019 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 2020} 2021 2022/* All zones in zonelist are unreclaimable? */ 2023static bool all_unreclaimable(struct zonelist *zonelist, 2024 struct scan_control *sc) 2025{ 2026 struct zoneref *z; 2027 struct zone *zone; 2028 2029 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2030 gfp_zone(sc->gfp_mask), sc->nodemask) { 2031 if (!populated_zone(zone)) 2032 continue; 2033 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2034 continue; 2035 if (!zone->all_unreclaimable) 2036 return false; 2037 } 2038 2039 return true; 2040} 2041 2042/* 2043 * This is the main entry point to direct page reclaim. 2044 * 2045 * If a full scan of the inactive list fails to free enough memory then we 2046 * are "out of memory" and something needs to be killed. 2047 * 2048 * If the caller is !__GFP_FS then the probability of a failure is reasonably 2049 * high - the zone may be full of dirty or under-writeback pages, which this 2050 * caller can't do much about. We kick the writeback threads and take explicit 2051 * naps in the hope that some of these pages can be written. But if the 2052 * allocating task holds filesystem locks which prevent writeout this might not 2053 * work, and the allocation attempt will fail. 2054 * 2055 * returns: 0, if no pages reclaimed 2056 * else, the number of pages reclaimed 2057 */ 2058static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2059 struct scan_control *sc, 2060 struct shrink_control *shrink) 2061{ 2062 int priority; 2063 unsigned long total_scanned = 0; 2064 struct reclaim_state *reclaim_state = current->reclaim_state; 2065 struct zoneref *z; 2066 struct zone *zone; 2067 unsigned long writeback_threshold; 2068 2069 get_mems_allowed(); 2070 delayacct_freepages_start(); 2071 2072 if (scanning_global_lru(sc)) 2073 count_vm_event(ALLOCSTALL); 2074 2075 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2076 sc->nr_scanned = 0; 2077 if (!priority) 2078 disable_swap_token(); 2079 total_scanned += shrink_zones(priority, zonelist, sc); 2080 /* 2081 * Don't shrink slabs when reclaiming memory from 2082 * over limit cgroups 2083 */ 2084 if (scanning_global_lru(sc)) { 2085 unsigned long lru_pages = 0; 2086 for_each_zone_zonelist(zone, z, zonelist, 2087 gfp_zone(sc->gfp_mask)) { 2088 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2089 continue; 2090 2091 lru_pages += zone_reclaimable_pages(zone); 2092 } 2093 2094 shrink_slab(shrink, sc->nr_scanned, lru_pages); 2095 if (reclaim_state) { 2096 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2097 reclaim_state->reclaimed_slab = 0; 2098 } 2099 } 2100 total_scanned += sc->nr_scanned; 2101 if (sc->nr_reclaimed >= sc->nr_to_reclaim) 2102 goto out; 2103 2104 /* 2105 * Try to write back as many pages as we just scanned. This 2106 * tends to cause slow streaming writers to write data to the 2107 * disk smoothly, at the dirtying rate, which is nice. But 2108 * that's undesirable in laptop mode, where we *want* lumpy 2109 * writeout. So in laptop mode, write out the whole world. 2110 */ 2111 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; 2112 if (total_scanned > writeback_threshold) { 2113 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); 2114 sc->may_writepage = 1; 2115 } 2116 2117 /* Take a nap, wait for some writeback to complete */ 2118 if (!sc->hibernation_mode && sc->nr_scanned && 2119 priority < DEF_PRIORITY - 2) { 2120 struct zone *preferred_zone; 2121 2122 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2123 &cpuset_current_mems_allowed, 2124 &preferred_zone); 2125 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2126 } 2127 } 2128 2129out: 2130 delayacct_freepages_end(); 2131 put_mems_allowed(); 2132 2133 if (sc->nr_reclaimed) 2134 return sc->nr_reclaimed; 2135 2136 /* 2137 * As hibernation is going on, kswapd is freezed so that it can't mark 2138 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable 2139 * check. 2140 */ 2141 if (oom_killer_disabled) 2142 return 0; 2143 2144 /* top priority shrink_zones still had more to do? don't OOM, then */ 2145 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) 2146 return 1; 2147 2148 return 0; 2149} 2150 2151unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 2152 gfp_t gfp_mask, nodemask_t *nodemask) 2153{ 2154 unsigned long nr_reclaimed; 2155 struct scan_control sc = { 2156 .gfp_mask = gfp_mask, 2157 .may_writepage = !laptop_mode, 2158 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2159 .may_unmap = 1, 2160 .may_swap = 1, 2161 .swappiness = vm_swappiness, 2162 .order = order, 2163 .mem_cgroup = NULL, 2164 .nodemask = nodemask, 2165 }; 2166 struct shrink_control shrink = { 2167 .gfp_mask = sc.gfp_mask, 2168 }; 2169 2170 trace_mm_vmscan_direct_reclaim_begin(order, 2171 sc.may_writepage, 2172 gfp_mask); 2173 2174 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2175 2176 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2177 2178 return nr_reclaimed; 2179} 2180 2181#ifdef CONFIG_CGROUP_MEM_RES_CTLR 2182 2183unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2184 gfp_t gfp_mask, bool noswap, 2185 unsigned int swappiness, 2186 struct zone *zone, 2187 unsigned long *nr_scanned) 2188{ 2189 struct scan_control sc = { 2190 .nr_scanned = 0, 2191 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2192 .may_writepage = !laptop_mode, 2193 .may_unmap = 1, 2194 .may_swap = !noswap, 2195 .swappiness = swappiness, 2196 .order = 0, 2197 .mem_cgroup = mem, 2198 }; 2199 2200 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2201 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2202 2203 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0, 2204 sc.may_writepage, 2205 sc.gfp_mask); 2206 2207 /* 2208 * NOTE: Although we can get the priority field, using it 2209 * here is not a good idea, since it limits the pages we can scan. 2210 * if we don't reclaim here, the shrink_zone from balance_pgdat 2211 * will pick up pages from other mem cgroup's as well. We hack 2212 * the priority and make it zero. 2213 */ 2214 shrink_zone(0, zone, &sc); 2215 2216 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2217 2218 *nr_scanned = sc.nr_scanned; 2219 return sc.nr_reclaimed; 2220} 2221 2222unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 2223 gfp_t gfp_mask, 2224 bool noswap, 2225 unsigned int swappiness) 2226{ 2227 struct zonelist *zonelist; 2228 unsigned long nr_reclaimed; 2229 struct scan_control sc = { 2230 .may_writepage = !laptop_mode, 2231 .may_unmap = 1, 2232 .may_swap = !noswap, 2233 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2234 .swappiness = swappiness, 2235 .order = 0, 2236 .mem_cgroup = mem_cont, 2237 .nodemask = NULL, /* we don't care the placement */ 2238 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2239 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), 2240 }; 2241 struct shrink_control shrink = { 2242 .gfp_mask = sc.gfp_mask, 2243 }; 2244 2245 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 2246 2247 trace_mm_vmscan_memcg_reclaim_begin(0, 2248 sc.may_writepage, 2249 sc.gfp_mask); 2250 2251 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2252 2253 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2254 2255 return nr_reclaimed; 2256} 2257#endif 2258 2259/* 2260 * pgdat_balanced is used when checking if a node is balanced for high-order 2261 * allocations. Only zones that meet watermarks and are in a zone allowed 2262 * by the callers classzone_idx are added to balanced_pages. The total of 2263 * balanced pages must be at least 25% of the zones allowed by classzone_idx 2264 * for the node to be considered balanced. Forcing all zones to be balanced 2265 * for high orders can cause excessive reclaim when there are imbalanced zones. 2266 * The choice of 25% is due to 2267 * o a 16M DMA zone that is balanced will not balance a zone on any 2268 * reasonable sized machine 2269 * o On all other machines, the top zone must be at least a reasonable 2270 * percentage of the middle zones. For example, on 32-bit x86, highmem 2271 * would need to be at least 256M for it to be balance a whole node. 2272 * Similarly, on x86-64 the Normal zone would need to be at least 1G 2273 * to balance a node on its own. These seemed like reasonable ratios. 2274 */ 2275static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages, 2276 int classzone_idx) 2277{ 2278 unsigned long present_pages = 0; 2279 int i; 2280 2281 for (i = 0; i <= classzone_idx; i++) 2282 present_pages += pgdat->node_zones[i].present_pages; 2283 2284 return balanced_pages > (present_pages >> 2); 2285} 2286 2287/* is kswapd sleeping prematurely? */ 2288static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, 2289 int classzone_idx) 2290{ 2291 int i; 2292 unsigned long balanced = 0; 2293 bool all_zones_ok = true; 2294 2295 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ 2296 if (remaining) 2297 return true; 2298 2299 /* Check the watermark levels */ 2300 for (i = 0; i < pgdat->nr_zones; i++) { 2301 struct zone *zone = pgdat->node_zones + i; 2302 2303 if (!populated_zone(zone)) 2304 continue; 2305 2306 /* 2307 * balance_pgdat() skips over all_unreclaimable after 2308 * DEF_PRIORITY. Effectively, it considers them balanced so 2309 * they must be considered balanced here as well if kswapd 2310 * is to sleep 2311 */ 2312 if (zone->all_unreclaimable) { 2313 balanced += zone->present_pages; 2314 continue; 2315 } 2316 2317 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 2318 classzone_idx, 0)) 2319 all_zones_ok = false; 2320 else 2321 balanced += zone->present_pages; 2322 } 2323 2324 /* 2325 * For high-order requests, the balanced zones must contain at least 2326 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones 2327 * must be balanced 2328 */ 2329 if (order) 2330 return !pgdat_balanced(pgdat, balanced, classzone_idx); 2331 else 2332 return !all_zones_ok; 2333} 2334 2335/* 2336 * For kswapd, balance_pgdat() will work across all this node's zones until 2337 * they are all at high_wmark_pages(zone). 2338 * 2339 * Returns the final order kswapd was reclaiming at 2340 * 2341 * There is special handling here for zones which are full of pinned pages. 2342 * This can happen if the pages are all mlocked, or if they are all used by 2343 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 2344 * What we do is to detect the case where all pages in the zone have been 2345 * scanned twice and there has been zero successful reclaim. Mark the zone as 2346 * dead and from now on, only perform a short scan. Basically we're polling 2347 * the zone for when the problem goes away. 2348 * 2349 * kswapd scans the zones in the highmem->normal->dma direction. It skips 2350 * zones which have free_pages > high_wmark_pages(zone), but once a zone is 2351 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the 2352 * lower zones regardless of the number of free pages in the lower zones. This 2353 * interoperates with the page allocator fallback scheme to ensure that aging 2354 * of pages is balanced across the zones. 2355 */ 2356static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2357 int *classzone_idx) 2358{ 2359 int all_zones_ok; 2360 unsigned long balanced; 2361 int priority; 2362 int i; 2363 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2364 unsigned long total_scanned; 2365 struct reclaim_state *reclaim_state = current->reclaim_state; 2366 unsigned long nr_soft_reclaimed; 2367 unsigned long nr_soft_scanned; 2368 struct scan_control sc = { 2369 .gfp_mask = GFP_KERNEL, 2370 .may_unmap = 1, 2371 .may_swap = 1, 2372 /* 2373 * kswapd doesn't want to be bailed out while reclaim. because 2374 * we want to put equal scanning pressure on each zone. 2375 */ 2376 .nr_to_reclaim = ULONG_MAX, 2377 .swappiness = vm_swappiness, 2378 .order = order, 2379 .mem_cgroup = NULL, 2380 }; 2381 struct shrink_control shrink = { 2382 .gfp_mask = sc.gfp_mask, 2383 }; 2384loop_again: 2385 total_scanned = 0; 2386 sc.nr_reclaimed = 0; 2387 sc.may_writepage = !laptop_mode; 2388 count_vm_event(PAGEOUTRUN); 2389 2390 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2391 unsigned long lru_pages = 0; 2392 int has_under_min_watermark_zone = 0; 2393 2394 /* The swap token gets in the way of swapout... */ 2395 if (!priority) 2396 disable_swap_token(); 2397 2398 all_zones_ok = 1; 2399 balanced = 0; 2400 2401 /* 2402 * Scan in the highmem->dma direction for the highest 2403 * zone which needs scanning 2404 */ 2405 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 2406 struct zone *zone = pgdat->node_zones + i; 2407 2408 if (!populated_zone(zone)) 2409 continue; 2410 2411 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2412 continue; 2413 2414 /* 2415 * Do some background aging of the anon list, to give 2416 * pages a chance to be referenced before reclaiming. 2417 */ 2418 if (inactive_anon_is_low(zone, &sc)) 2419 shrink_active_list(SWAP_CLUSTER_MAX, zone, 2420 &sc, priority, 0); 2421 2422 if (!zone_watermark_ok_safe(zone, order, 2423 high_wmark_pages(zone), 0, 0)) { 2424 end_zone = i; 2425 *classzone_idx = i; 2426 break; 2427 } 2428 } 2429 if (i < 0) 2430 goto out; 2431 2432 for (i = 0; i <= end_zone; i++) { 2433 struct zone *zone = pgdat->node_zones + i; 2434 2435 lru_pages += zone_reclaimable_pages(zone); 2436 } 2437 2438 /* 2439 * Now scan the zone in the dma->highmem direction, stopping 2440 * at the last zone which needs scanning. 2441 * 2442 * We do this because the page allocator works in the opposite 2443 * direction. This prevents the page allocator from allocating 2444 * pages behind kswapd's direction of progress, which would 2445 * cause too much scanning of the lower zones. 2446 */ 2447 for (i = 0; i <= end_zone; i++) { 2448 struct zone *zone = pgdat->node_zones + i; 2449 int nr_slab; 2450 unsigned long balance_gap; 2451 2452 if (!populated_zone(zone)) 2453 continue; 2454 2455 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2456 continue; 2457 2458 sc.nr_scanned = 0; 2459 2460 nr_soft_scanned = 0; 2461 /* 2462 * Call soft limit reclaim before calling shrink_zone. 2463 */ 2464 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, 2465 order, sc.gfp_mask, 2466 &nr_soft_scanned); 2467 sc.nr_reclaimed += nr_soft_reclaimed; 2468 total_scanned += nr_soft_scanned; 2469 2470 /* 2471 * We put equal pressure on every zone, unless 2472 * one zone has way too many pages free 2473 * already. The "too many pages" is defined 2474 * as the high wmark plus a "gap" where the 2475 * gap is either the low watermark or 1% 2476 * of the zone, whichever is smaller. 2477 */ 2478 balance_gap = min(low_wmark_pages(zone), 2479 (zone->present_pages + 2480 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / 2481 KSWAPD_ZONE_BALANCE_GAP_RATIO); 2482 if (!zone_watermark_ok_safe(zone, order, 2483 high_wmark_pages(zone) + balance_gap, 2484 end_zone, 0)) 2485 shrink_zone(priority, zone, &sc); 2486 reclaim_state->reclaimed_slab = 0; 2487 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2488 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2489 total_scanned += sc.nr_scanned; 2490 2491 if (zone->all_unreclaimable) 2492 continue; 2493 if (nr_slab == 0 && 2494 !zone_reclaimable(zone)) 2495 zone->all_unreclaimable = 1; 2496 /* 2497 * If we've done a decent amount of scanning and 2498 * the reclaim ratio is low, start doing writepage 2499 * even in laptop mode 2500 */ 2501 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 2502 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 2503 sc.may_writepage = 1; 2504 2505 if (!zone_watermark_ok_safe(zone, order, 2506 high_wmark_pages(zone), end_zone, 0)) { 2507 all_zones_ok = 0; 2508 /* 2509 * We are still under min water mark. This 2510 * means that we have a GFP_ATOMIC allocation 2511 * failure risk. Hurry up! 2512 */ 2513 if (!zone_watermark_ok_safe(zone, order, 2514 min_wmark_pages(zone), end_zone, 0)) 2515 has_under_min_watermark_zone = 1; 2516 } else { 2517 /* 2518 * If a zone reaches its high watermark, 2519 * consider it to be no longer congested. It's 2520 * possible there are dirty pages backed by 2521 * congested BDIs but as pressure is relieved, 2522 * spectulatively avoid congestion waits 2523 */ 2524 zone_clear_flag(zone, ZONE_CONGESTED); 2525 if (i <= *classzone_idx) 2526 balanced += zone->present_pages; 2527 } 2528 2529 } 2530 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) 2531 break; /* kswapd: all done */ 2532 /* 2533 * OK, kswapd is getting into trouble. Take a nap, then take 2534 * another pass across the zones. 2535 */ 2536 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2537 if (has_under_min_watermark_zone) 2538 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2539 else 2540 congestion_wait(BLK_RW_ASYNC, HZ/10); 2541 } 2542 2543 /* 2544 * We do this so kswapd doesn't build up large priorities for 2545 * example when it is freeing in parallel with allocators. It 2546 * matches the direct reclaim path behaviour in terms of impact 2547 * on zone->*_priority. 2548 */ 2549 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2550 break; 2551 } 2552out: 2553 2554 /* 2555 * order-0: All zones must meet high watermark for a balanced node 2556 * high-order: Balanced zones must make up at least 25% of the node 2557 * for the node to be balanced 2558 */ 2559 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { 2560 cond_resched(); 2561 2562 try_to_freeze(); 2563 2564 /* 2565 * Fragmentation may mean that the system cannot be 2566 * rebalanced for high-order allocations in all zones. 2567 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 2568 * it means the zones have been fully scanned and are still 2569 * not balanced. For high-order allocations, there is 2570 * little point trying all over again as kswapd may 2571 * infinite loop. 2572 * 2573 * Instead, recheck all watermarks at order-0 as they 2574 * are the most important. If watermarks are ok, kswapd will go 2575 * back to sleep. High-order users can still perform direct 2576 * reclaim if they wish. 2577 */ 2578 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 2579 order = sc.order = 0; 2580 2581 goto loop_again; 2582 } 2583 2584 /* 2585 * If kswapd was reclaiming at a higher order, it has the option of 2586 * sleeping without all zones being balanced. Before it does, it must 2587 * ensure that the watermarks for order-0 on *all* zones are met and 2588 * that the congestion flags are cleared. The congestion flag must 2589 * be cleared as kswapd is the only mechanism that clears the flag 2590 * and it is potentially going to sleep here. 2591 */ 2592 if (order) { 2593 for (i = 0; i <= end_zone; i++) { 2594 struct zone *zone = pgdat->node_zones + i; 2595 2596 if (!populated_zone(zone)) 2597 continue; 2598 2599 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2600 continue; 2601 2602 /* Confirm the zone is balanced for order-0 */ 2603 if (!zone_watermark_ok(zone, 0, 2604 high_wmark_pages(zone), 0, 0)) { 2605 order = sc.order = 0; 2606 goto loop_again; 2607 } 2608 2609 /* If balanced, clear the congested flag */ 2610 zone_clear_flag(zone, ZONE_CONGESTED); 2611 } 2612 } 2613 2614 /* 2615 * Return the order we were reclaiming at so sleeping_prematurely() 2616 * makes a decision on the order we were last reclaiming at. However, 2617 * if another caller entered the allocator slow path while kswapd 2618 * was awake, order will remain at the higher level 2619 */ 2620 *classzone_idx = end_zone; 2621 return order; 2622} 2623 2624static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) 2625{ 2626 long remaining = 0; 2627 DEFINE_WAIT(wait); 2628 2629 if (freezing(current) || kthread_should_stop()) 2630 return; 2631 2632 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2633 2634 /* Try to sleep for a short interval */ 2635 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2636 remaining = schedule_timeout(HZ/10); 2637 finish_wait(&pgdat->kswapd_wait, &wait); 2638 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 2639 } 2640 2641 /* 2642 * After a short sleep, check if it was a premature sleep. If not, then 2643 * go fully to sleep until explicitly woken up. 2644 */ 2645 if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) { 2646 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); 2647 2648 /* 2649 * vmstat counters are not perfectly accurate and the estimated 2650 * value for counters such as NR_FREE_PAGES can deviate from the 2651 * true value by nr_online_cpus * threshold. To avoid the zone 2652 * watermarks being breached while under pressure, we reduce the 2653 * per-cpu vmstat threshold while kswapd is awake and restore 2654 * them before going back to sleep. 2655 */ 2656 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 2657 schedule(); 2658 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); 2659 } else { 2660 if (remaining) 2661 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2662 else 2663 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); 2664 } 2665 finish_wait(&pgdat->kswapd_wait, &wait); 2666} 2667 2668/* 2669 * The background pageout daemon, started as a kernel thread 2670 * from the init process. 2671 * 2672 * This basically trickles out pages so that we have _some_ 2673 * free memory available even if there is no other activity 2674 * that frees anything up. This is needed for things like routing 2675 * etc, where we otherwise might have all activity going on in 2676 * asynchronous contexts that cannot page things out. 2677 * 2678 * If there are applications that are active memory-allocators 2679 * (most normal use), this basically shouldn't matter. 2680 */ 2681static int kswapd(void *p) 2682{ 2683 unsigned long order; 2684 int classzone_idx; 2685 pg_data_t *pgdat = (pg_data_t*)p; 2686 struct task_struct *tsk = current; 2687 2688 struct reclaim_state reclaim_state = { 2689 .reclaimed_slab = 0, 2690 }; 2691 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2692 2693 lockdep_set_current_reclaim_state(GFP_KERNEL); 2694 2695 if (!cpumask_empty(cpumask)) 2696 set_cpus_allowed_ptr(tsk, cpumask); 2697 current->reclaim_state = &reclaim_state; 2698 2699 /* 2700 * Tell the memory management that we're a "memory allocator", 2701 * and that if we need more memory we should get access to it 2702 * regardless (see "__alloc_pages()"). "kswapd" should 2703 * never get caught in the normal page freeing logic. 2704 * 2705 * (Kswapd normally doesn't need memory anyway, but sometimes 2706 * you need a small amount of memory in order to be able to 2707 * page out something else, and this flag essentially protects 2708 * us from recursively trying to free more memory as we're 2709 * trying to free the first piece of memory in the first place). 2710 */ 2711 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 2712 set_freezable(); 2713 2714 order = 0; 2715 classzone_idx = MAX_NR_ZONES - 1; 2716 for ( ; ; ) { 2717 unsigned long new_order; 2718 int new_classzone_idx; 2719 int ret; 2720 2721 new_order = pgdat->kswapd_max_order; 2722 new_classzone_idx = pgdat->classzone_idx; 2723 pgdat->kswapd_max_order = 0; 2724 pgdat->classzone_idx = MAX_NR_ZONES - 1; 2725 if (order < new_order || classzone_idx > new_classzone_idx) { 2726 /* 2727 * Don't sleep if someone wants a larger 'order' 2728 * allocation or has tigher zone constraints 2729 */ 2730 order = new_order; 2731 classzone_idx = new_classzone_idx; 2732 } else { 2733 kswapd_try_to_sleep(pgdat, order, classzone_idx); 2734 order = pgdat->kswapd_max_order; 2735 classzone_idx = pgdat->classzone_idx; 2736 pgdat->kswapd_max_order = 0; 2737 pgdat->classzone_idx = MAX_NR_ZONES - 1; 2738 } 2739 2740 ret = try_to_freeze(); 2741 if (kthread_should_stop()) 2742 break; 2743 2744 /* 2745 * We can speed up thawing tasks if we don't call balance_pgdat 2746 * after returning from the refrigerator 2747 */ 2748 if (!ret) { 2749 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order); 2750 order = balance_pgdat(pgdat, order, &classzone_idx); 2751 } 2752 } 2753 return 0; 2754} 2755 2756/* 2757 * A zone is low on free memory, so wake its kswapd task to service it. 2758 */ 2759void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) 2760{ 2761 pg_data_t *pgdat; 2762 2763 if (!populated_zone(zone)) 2764 return; 2765 2766 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2767 return; 2768 pgdat = zone->zone_pgdat; 2769 if (pgdat->kswapd_max_order < order) { 2770 pgdat->kswapd_max_order = order; 2771 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); 2772 } 2773 if (!waitqueue_active(&pgdat->kswapd_wait)) 2774 return; 2775 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) 2776 return; 2777 2778 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); 2779 wake_up_interruptible(&pgdat->kswapd_wait); 2780} 2781 2782/* 2783 * The reclaimable count would be mostly accurate. 2784 * The less reclaimable pages may be 2785 * - mlocked pages, which will be moved to unevictable list when encountered 2786 * - mapped pages, which may require several travels to be reclaimed 2787 * - dirty pages, which is not "instantly" reclaimable 2788 */ 2789unsigned long global_reclaimable_pages(void) 2790{ 2791 int nr; 2792 2793 nr = global_page_state(NR_ACTIVE_FILE) + 2794 global_page_state(NR_INACTIVE_FILE); 2795 2796 if (nr_swap_pages > 0) 2797 nr += global_page_state(NR_ACTIVE_ANON) + 2798 global_page_state(NR_INACTIVE_ANON); 2799 2800 return nr; 2801} 2802 2803unsigned long zone_reclaimable_pages(struct zone *zone) 2804{ 2805 int nr; 2806 2807 nr = zone_page_state(zone, NR_ACTIVE_FILE) + 2808 zone_page_state(zone, NR_INACTIVE_FILE); 2809 2810 if (nr_swap_pages > 0) 2811 nr += zone_page_state(zone, NR_ACTIVE_ANON) + 2812 zone_page_state(zone, NR_INACTIVE_ANON); 2813 2814 return nr; 2815} 2816 2817#ifdef CONFIG_HIBERNATION 2818/* 2819 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 2820 * freed pages. 2821 * 2822 * Rather than trying to age LRUs the aim is to preserve the overall 2823 * LRU order by reclaiming preferentially 2824 * inactive > active > active referenced > active mapped 2825 */ 2826unsigned long shrink_all_memory(unsigned long nr_to_reclaim) 2827{ 2828 struct reclaim_state reclaim_state; 2829 struct scan_control sc = { 2830 .gfp_mask = GFP_HIGHUSER_MOVABLE, 2831 .may_swap = 1, 2832 .may_unmap = 1, 2833 .may_writepage = 1, 2834 .nr_to_reclaim = nr_to_reclaim, 2835 .hibernation_mode = 1, 2836 .swappiness = vm_swappiness, 2837 .order = 0, 2838 }; 2839 struct shrink_control shrink = { 2840 .gfp_mask = sc.gfp_mask, 2841 }; 2842 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2843 struct task_struct *p = current; 2844 unsigned long nr_reclaimed; 2845 2846 p->flags |= PF_MEMALLOC; 2847 lockdep_set_current_reclaim_state(sc.gfp_mask); 2848 reclaim_state.reclaimed_slab = 0; 2849 p->reclaim_state = &reclaim_state; 2850 2851 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); 2852 2853 p->reclaim_state = NULL; 2854 lockdep_clear_current_reclaim_state(); 2855 p->flags &= ~PF_MEMALLOC; 2856 2857 return nr_reclaimed; 2858} 2859#endif /* CONFIG_HIBERNATION */ 2860 2861/* It's optimal to keep kswapds on the same CPUs as their memory, but 2862 not required for correctness. So if the last cpu in a node goes 2863 away, we get changed to run anywhere: as the first one comes back, 2864 restore their cpu bindings. */ 2865static int __devinit cpu_callback(struct notifier_block *nfb, 2866 unsigned long action, void *hcpu) 2867{ 2868 int nid; 2869 2870 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2871 for_each_node_state(nid, N_HIGH_MEMORY) { 2872 pg_data_t *pgdat = NODE_DATA(nid); 2873 const struct cpumask *mask; 2874 2875 mask = cpumask_of_node(pgdat->node_id); 2876 2877 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2878 /* One of our CPUs online: restore mask */ 2879 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2880 } 2881 } 2882 return NOTIFY_OK; 2883} 2884 2885/* 2886 * This kswapd start function will be called by init and node-hot-add. 2887 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 2888 */ 2889int kswapd_run(int nid) 2890{ 2891 pg_data_t *pgdat = NODE_DATA(nid); 2892 int ret = 0; 2893 2894 if (pgdat->kswapd) 2895 return 0; 2896 2897 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 2898 if (IS_ERR(pgdat->kswapd)) { 2899 /* failure at boot is fatal */ 2900 BUG_ON(system_state == SYSTEM_BOOTING); 2901 printk("Failed to start kswapd on node %d\n",nid); 2902 ret = -1; 2903 } 2904 return ret; 2905} 2906 2907/* 2908 * Called by memory hotplug when all memory in a node is offlined. 2909 */ 2910void kswapd_stop(int nid) 2911{ 2912 struct task_struct *kswapd = NODE_DATA(nid)->kswapd; 2913 2914 if (kswapd) 2915 kthread_stop(kswapd); 2916} 2917 2918static int __init kswapd_init(void) 2919{ 2920 int nid; 2921 2922 swap_setup(); 2923 for_each_node_state(nid, N_HIGH_MEMORY) 2924 kswapd_run(nid); 2925 hotcpu_notifier(cpu_callback, 0); 2926 return 0; 2927} 2928 2929module_init(kswapd_init) 2930 2931#ifdef CONFIG_NUMA 2932/* 2933 * Zone reclaim mode 2934 * 2935 * If non-zero call zone_reclaim when the number of free pages falls below 2936 * the watermarks. 2937 */ 2938int zone_reclaim_mode __read_mostly; 2939 2940#define RECLAIM_OFF 0 2941#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 2942#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 2943#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 2944 2945/* 2946 * Priority for ZONE_RECLAIM. This determines the fraction of pages 2947 * of a node considered for each zone_reclaim. 4 scans 1/16th of 2948 * a zone. 2949 */ 2950#define ZONE_RECLAIM_PRIORITY 4 2951 2952/* 2953 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 2954 * occur. 2955 */ 2956int sysctl_min_unmapped_ratio = 1; 2957 2958/* 2959 * If the number of slab pages in a zone grows beyond this percentage then 2960 * slab reclaim needs to occur. 2961 */ 2962int sysctl_min_slab_ratio = 5; 2963 2964static inline unsigned long zone_unmapped_file_pages(struct zone *zone) 2965{ 2966 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); 2967 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + 2968 zone_page_state(zone, NR_ACTIVE_FILE); 2969 2970 /* 2971 * It's possible for there to be more file mapped pages than 2972 * accounted for by the pages on the file LRU lists because 2973 * tmpfs pages accounted for as ANON can also be FILE_MAPPED 2974 */ 2975 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; 2976} 2977 2978/* Work out how many page cache pages we can reclaim in this reclaim_mode */ 2979static long zone_pagecache_reclaimable(struct zone *zone) 2980{ 2981 long nr_pagecache_reclaimable; 2982 long delta = 0; 2983 2984 /* 2985 * If RECLAIM_SWAP is set, then all file pages are considered 2986 * potentially reclaimable. Otherwise, we have to worry about 2987 * pages like swapcache and zone_unmapped_file_pages() provides 2988 * a better estimate 2989 */ 2990 if (zone_reclaim_mode & RECLAIM_SWAP) 2991 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); 2992 else 2993 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); 2994 2995 /* If we can't clean pages, remove dirty pages from consideration */ 2996 if (!(zone_reclaim_mode & RECLAIM_WRITE)) 2997 delta += zone_page_state(zone, NR_FILE_DIRTY); 2998 2999 /* Watch for any possible underflows due to delta */ 3000 if (unlikely(delta > nr_pagecache_reclaimable)) 3001 delta = nr_pagecache_reclaimable; 3002 3003 return nr_pagecache_reclaimable - delta; 3004} 3005 3006/* 3007 * Try to free up some pages from this zone through reclaim. 3008 */ 3009static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3010{ 3011 /* Minimum pages needed in order to stay on node */ 3012 const unsigned long nr_pages = 1 << order; 3013 struct task_struct *p = current; 3014 struct reclaim_state reclaim_state; 3015 int priority; 3016 struct scan_control sc = { 3017 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3018 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3019 .may_swap = 1, 3020 .nr_to_reclaim = max_t(unsigned long, nr_pages, 3021 SWAP_CLUSTER_MAX), 3022 .gfp_mask = gfp_mask, 3023 .swappiness = vm_swappiness, 3024 .order = order, 3025 }; 3026 struct shrink_control shrink = { 3027 .gfp_mask = sc.gfp_mask, 3028 }; 3029 unsigned long nr_slab_pages0, nr_slab_pages1; 3030 3031 cond_resched(); 3032 /* 3033 * We need to be able to allocate from the reserves for RECLAIM_SWAP 3034 * and we also need to be able to write out pages for RECLAIM_WRITE 3035 * and RECLAIM_SWAP. 3036 */ 3037 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 3038 lockdep_set_current_reclaim_state(gfp_mask); 3039 reclaim_state.reclaimed_slab = 0; 3040 p->reclaim_state = &reclaim_state; 3041 3042 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { 3043 /* 3044 * Free memory by calling shrink zone with increasing 3045 * priorities until we have enough memory freed. 3046 */ 3047 priority = ZONE_RECLAIM_PRIORITY; 3048 do { 3049 shrink_zone(priority, zone, &sc); 3050 priority--; 3051 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 3052 } 3053 3054 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3055 if (nr_slab_pages0 > zone->min_slab_pages) { 3056 /* 3057 * shrink_slab() does not currently allow us to determine how 3058 * many pages were freed in this zone. So we take the current 3059 * number of slab pages and shake the slab until it is reduced 3060 * by the same nr_pages that we used for reclaiming unmapped 3061 * pages. 3062 * 3063 * Note that shrink_slab will free memory on all zones and may 3064 * take a long time. 3065 */ 3066 for (;;) { 3067 unsigned long lru_pages = zone_reclaimable_pages(zone); 3068 3069 /* No reclaimable slab or very low memory pressure */ 3070 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) 3071 break; 3072 3073 /* Freed enough memory */ 3074 nr_slab_pages1 = zone_page_state(zone, 3075 NR_SLAB_RECLAIMABLE); 3076 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) 3077 break; 3078 } 3079 3080 /* 3081 * Update nr_reclaimed by the number of slab pages we 3082 * reclaimed from this zone. 3083 */ 3084 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3085 if (nr_slab_pages1 < nr_slab_pages0) 3086 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; 3087 } 3088 3089 p->reclaim_state = NULL; 3090 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 3091 lockdep_clear_current_reclaim_state(); 3092 return sc.nr_reclaimed >= nr_pages; 3093} 3094 3095int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 3096{ 3097 int node_id; 3098 int ret; 3099 3100 /* 3101 * Zone reclaim reclaims unmapped file backed pages and 3102 * slab pages if we are over the defined limits. 3103 * 3104 * A small portion of unmapped file backed pages is needed for 3105 * file I/O otherwise pages read by file I/O will be immediately 3106 * thrown out if the zone is overallocated. So we do not reclaim 3107 * if less than a specified percentage of the zone is used by 3108 * unmapped file backed pages. 3109 */ 3110 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && 3111 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 3112 return ZONE_RECLAIM_FULL; 3113 3114 if (zone->all_unreclaimable) 3115 return ZONE_RECLAIM_FULL; 3116 3117 /* 3118 * Do not scan if the allocation should not be delayed. 3119 */ 3120 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 3121 return ZONE_RECLAIM_NOSCAN; 3122 3123 /* 3124 * Only run zone reclaim on the local zone or on zones that do not 3125 * have associated processors. This will favor the local processor 3126 * over remote processors and spread off node memory allocations 3127 * as wide as possible. 3128 */ 3129 node_id = zone_to_nid(zone); 3130 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 3131 return ZONE_RECLAIM_NOSCAN; 3132 3133 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 3134 return ZONE_RECLAIM_NOSCAN; 3135 3136 ret = __zone_reclaim(zone, gfp_mask, order); 3137 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 3138 3139 if (!ret) 3140 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); 3141 3142 return ret; 3143} 3144#endif 3145 3146/* 3147 * page_evictable - test whether a page is evictable 3148 * @page: the page to test 3149 * @vma: the VMA in which the page is or will be mapped, may be NULL 3150 * 3151 * Test whether page is evictable--i.e., should be placed on active/inactive 3152 * lists vs unevictable list. The vma argument is !NULL when called from the 3153 * fault path to determine how to instantate a new page. 3154 * 3155 * Reasons page might not be evictable: 3156 * (1) page's mapping marked unevictable 3157 * (2) page is part of an mlocked VMA 3158 * 3159 */ 3160int page_evictable(struct page *page, struct vm_area_struct *vma) 3161{ 3162 3163 if (mapping_unevictable(page_mapping(page))) 3164 return 0; 3165 3166 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 3167 return 0; 3168 3169 return 1; 3170} 3171 3172/** 3173 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 3174 * @page: page to check evictability and move to appropriate lru list 3175 * @zone: zone page is in 3176 * 3177 * Checks a page for evictability and moves the page to the appropriate 3178 * zone lru list. 3179 * 3180 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 3181 * have PageUnevictable set. 3182 */ 3183static void check_move_unevictable_page(struct page *page, struct zone *zone) 3184{ 3185 VM_BUG_ON(PageActive(page)); 3186 3187retry: 3188 ClearPageUnevictable(page); 3189 if (page_evictable(page, NULL)) { 3190 enum lru_list l = page_lru_base_type(page); 3191 3192 __dec_zone_state(zone, NR_UNEVICTABLE); 3193 list_move(&page->lru, &zone->lru[l].list); 3194 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); 3195 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 3196 __count_vm_event(UNEVICTABLE_PGRESCUED); 3197 } else { 3198 /* 3199 * rotate unevictable list 3200 */ 3201 SetPageUnevictable(page); 3202 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 3203 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); 3204 if (page_evictable(page, NULL)) 3205 goto retry; 3206 } 3207} 3208 3209/** 3210 * scan_mapping_unevictable_pages - scan an address space for evictable pages 3211 * @mapping: struct address_space to scan for evictable pages 3212 * 3213 * Scan all pages in mapping. Check unevictable pages for 3214 * evictability and move them to the appropriate zone lru list. 3215 */ 3216void scan_mapping_unevictable_pages(struct address_space *mapping) 3217{ 3218 pgoff_t next = 0; 3219 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> 3220 PAGE_CACHE_SHIFT; 3221 struct zone *zone; 3222 struct pagevec pvec; 3223 3224 if (mapping->nrpages == 0) 3225 return; 3226 3227 pagevec_init(&pvec, 0); 3228 while (next < end && 3229 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 3230 int i; 3231 int pg_scanned = 0; 3232 3233 zone = NULL; 3234 3235 for (i = 0; i < pagevec_count(&pvec); i++) { 3236 struct page *page = pvec.pages[i]; 3237 pgoff_t page_index = page->index; 3238 struct zone *pagezone = page_zone(page); 3239 3240 pg_scanned++; 3241 if (page_index > next) 3242 next = page_index; 3243 next++; 3244 3245 if (pagezone != zone) { 3246 if (zone) 3247 spin_unlock_irq(&zone->lru_lock); 3248 zone = pagezone; 3249 spin_lock_irq(&zone->lru_lock); 3250 } 3251 3252 if (PageLRU(page) && PageUnevictable(page)) 3253 check_move_unevictable_page(page, zone); 3254 } 3255 if (zone) 3256 spin_unlock_irq(&zone->lru_lock); 3257 pagevec_release(&pvec); 3258 3259 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); 3260 } 3261 3262} 3263 3264/** 3265 * scan_zone_unevictable_pages - check unevictable list for evictable pages 3266 * @zone - zone of which to scan the unevictable list 3267 * 3268 * Scan @zone's unevictable LRU lists to check for pages that have become 3269 * evictable. Move those that have to @zone's inactive list where they 3270 * become candidates for reclaim, unless shrink_inactive_zone() decides 3271 * to reactivate them. Pages that are still unevictable are rotated 3272 * back onto @zone's unevictable list. 3273 */ 3274#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */ 3275static void scan_zone_unevictable_pages(struct zone *zone) 3276{ 3277 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; 3278 unsigned long scan; 3279 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE); 3280 3281 while (nr_to_scan > 0) { 3282 unsigned long batch_size = min(nr_to_scan, 3283 SCAN_UNEVICTABLE_BATCH_SIZE); 3284 3285 spin_lock_irq(&zone->lru_lock); 3286 for (scan = 0; scan < batch_size; scan++) { 3287 struct page *page = lru_to_page(l_unevictable); 3288 3289 if (!trylock_page(page)) 3290 continue; 3291 3292 prefetchw_prev_lru_page(page, l_unevictable, flags); 3293 3294 if (likely(PageLRU(page) && PageUnevictable(page))) 3295 check_move_unevictable_page(page, zone); 3296 3297 unlock_page(page); 3298 } 3299 spin_unlock_irq(&zone->lru_lock); 3300 3301 nr_to_scan -= batch_size; 3302 } 3303} 3304 3305 3306/** 3307 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages 3308 * 3309 * A really big hammer: scan all zones' unevictable LRU lists to check for 3310 * pages that have become evictable. Move those back to the zones' 3311 * inactive list where they become candidates for reclaim. 3312 * This occurs when, e.g., we have unswappable pages on the unevictable lists, 3313 * and we add swap to the system. As such, it runs in the context of a task 3314 * that has possibly/probably made some previously unevictable pages 3315 * evictable. 3316 */ 3317static void scan_all_zones_unevictable_pages(void) 3318{ 3319 struct zone *zone; 3320 3321 for_each_zone(zone) { 3322 scan_zone_unevictable_pages(zone); 3323 } 3324} 3325 3326/* 3327 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 3328 * all nodes' unevictable lists for evictable pages 3329 */ 3330unsigned long scan_unevictable_pages; 3331 3332int scan_unevictable_handler(struct ctl_table *table, int write, 3333 void __user *buffer, 3334 size_t *length, loff_t *ppos) 3335{ 3336 proc_doulongvec_minmax(table, write, buffer, length, ppos); 3337 3338 if (write && *(unsigned long *)table->data) 3339 scan_all_zones_unevictable_pages(); 3340 3341 scan_unevictable_pages = 0; 3342 return 0; 3343} 3344 3345#ifdef CONFIG_NUMA 3346/* 3347 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 3348 * a specified node's per zone unevictable lists for evictable pages. 3349 */ 3350 3351static ssize_t read_scan_unevictable_node(struct sys_device *dev, 3352 struct sysdev_attribute *attr, 3353 char *buf) 3354{ 3355 return sprintf(buf, "0\n"); /* always zero; should fit... */ 3356} 3357 3358static ssize_t write_scan_unevictable_node(struct sys_device *dev, 3359 struct sysdev_attribute *attr, 3360 const char *buf, size_t count) 3361{ 3362 struct zone *node_zones = NODE_DATA(dev->id)->node_zones; 3363 struct zone *zone; 3364 unsigned long res; 3365 unsigned long req = strict_strtoul(buf, 10, &res); 3366 3367 if (!req) 3368 return 1; /* zero is no-op */ 3369 3370 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 3371 if (!populated_zone(zone)) 3372 continue; 3373 scan_zone_unevictable_pages(zone); 3374 } 3375 return 1; 3376} 3377 3378 3379static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 3380 read_scan_unevictable_node, 3381 write_scan_unevictable_node); 3382 3383int scan_unevictable_register_node(struct node *node) 3384{ 3385 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages); 3386} 3387 3388void scan_unevictable_unregister_node(struct node *node) 3389{ 3390 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); 3391} 3392#endif 3393