vmscan.c revision 14797e2363c2b2f1ce139fd1c5a215e4e05aa1d9
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14#include <linux/mm.h> 15#include <linux/module.h> 16#include <linux/slab.h> 17#include <linux/kernel_stat.h> 18#include <linux/swap.h> 19#include <linux/pagemap.h> 20#include <linux/init.h> 21#include <linux/highmem.h> 22#include <linux/vmstat.h> 23#include <linux/file.h> 24#include <linux/writeback.h> 25#include <linux/blkdev.h> 26#include <linux/buffer_head.h> /* for try_to_release_page(), 27 buffer_heads_over_limit */ 28#include <linux/mm_inline.h> 29#include <linux/pagevec.h> 30#include <linux/backing-dev.h> 31#include <linux/rmap.h> 32#include <linux/topology.h> 33#include <linux/cpu.h> 34#include <linux/cpuset.h> 35#include <linux/notifier.h> 36#include <linux/rwsem.h> 37#include <linux/delay.h> 38#include <linux/kthread.h> 39#include <linux/freezer.h> 40#include <linux/memcontrol.h> 41#include <linux/delayacct.h> 42#include <linux/sysctl.h> 43 44#include <asm/tlbflush.h> 45#include <asm/div64.h> 46 47#include <linux/swapops.h> 48 49#include "internal.h" 50 51struct scan_control { 52 /* Incremented by the number of inactive pages that were scanned */ 53 unsigned long nr_scanned; 54 55 /* Number of pages freed so far during a call to shrink_zones() */ 56 unsigned long nr_reclaimed; 57 58 /* This context's GFP mask */ 59 gfp_t gfp_mask; 60 61 int may_writepage; 62 63 /* Can pages be swapped as part of reclaim? */ 64 int may_swap; 65 66 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 67 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 68 * In this context, it doesn't matter that we scan the 69 * whole list at once. */ 70 int swap_cluster_max; 71 72 int swappiness; 73 74 int all_unreclaimable; 75 76 int order; 77 78 /* Which cgroup do we reclaim from */ 79 struct mem_cgroup *mem_cgroup; 80 81 /* Pluggable isolate pages callback */ 82 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst, 83 unsigned long *scanned, int order, int mode, 84 struct zone *z, struct mem_cgroup *mem_cont, 85 int active, int file); 86}; 87 88#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 89 90#ifdef ARCH_HAS_PREFETCH 91#define prefetch_prev_lru_page(_page, _base, _field) \ 92 do { \ 93 if ((_page)->lru.prev != _base) { \ 94 struct page *prev; \ 95 \ 96 prev = lru_to_page(&(_page->lru)); \ 97 prefetch(&prev->_field); \ 98 } \ 99 } while (0) 100#else 101#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 102#endif 103 104#ifdef ARCH_HAS_PREFETCHW 105#define prefetchw_prev_lru_page(_page, _base, _field) \ 106 do { \ 107 if ((_page)->lru.prev != _base) { \ 108 struct page *prev; \ 109 \ 110 prev = lru_to_page(&(_page->lru)); \ 111 prefetchw(&prev->_field); \ 112 } \ 113 } while (0) 114#else 115#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 116#endif 117 118/* 119 * From 0 .. 100. Higher means more swappy. 120 */ 121int vm_swappiness = 60; 122long vm_total_pages; /* The total number of pages which the VM controls */ 123 124static LIST_HEAD(shrinker_list); 125static DECLARE_RWSEM(shrinker_rwsem); 126 127#ifdef CONFIG_CGROUP_MEM_RES_CTLR 128#define scan_global_lru(sc) (!(sc)->mem_cgroup) 129#else 130#define scan_global_lru(sc) (1) 131#endif 132 133static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, 134 struct scan_control *sc) 135{ 136 return &zone->reclaim_stat; 137} 138 139static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc, 140 enum lru_list lru) 141{ 142 return zone_page_state(zone, NR_LRU_BASE + lru); 143} 144 145 146/* 147 * Add a shrinker callback to be called from the vm 148 */ 149void register_shrinker(struct shrinker *shrinker) 150{ 151 shrinker->nr = 0; 152 down_write(&shrinker_rwsem); 153 list_add_tail(&shrinker->list, &shrinker_list); 154 up_write(&shrinker_rwsem); 155} 156EXPORT_SYMBOL(register_shrinker); 157 158/* 159 * Remove one 160 */ 161void unregister_shrinker(struct shrinker *shrinker) 162{ 163 down_write(&shrinker_rwsem); 164 list_del(&shrinker->list); 165 up_write(&shrinker_rwsem); 166} 167EXPORT_SYMBOL(unregister_shrinker); 168 169#define SHRINK_BATCH 128 170/* 171 * Call the shrink functions to age shrinkable caches 172 * 173 * Here we assume it costs one seek to replace a lru page and that it also 174 * takes a seek to recreate a cache object. With this in mind we age equal 175 * percentages of the lru and ageable caches. This should balance the seeks 176 * generated by these structures. 177 * 178 * If the vm encountered mapped pages on the LRU it increase the pressure on 179 * slab to avoid swapping. 180 * 181 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 182 * 183 * `lru_pages' represents the number of on-LRU pages in all the zones which 184 * are eligible for the caller's allocation attempt. It is used for balancing 185 * slab reclaim versus page reclaim. 186 * 187 * Returns the number of slab objects which we shrunk. 188 */ 189unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 190 unsigned long lru_pages) 191{ 192 struct shrinker *shrinker; 193 unsigned long ret = 0; 194 195 if (scanned == 0) 196 scanned = SWAP_CLUSTER_MAX; 197 198 if (!down_read_trylock(&shrinker_rwsem)) 199 return 1; /* Assume we'll be able to shrink next time */ 200 201 list_for_each_entry(shrinker, &shrinker_list, list) { 202 unsigned long long delta; 203 unsigned long total_scan; 204 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask); 205 206 delta = (4 * scanned) / shrinker->seeks; 207 delta *= max_pass; 208 do_div(delta, lru_pages + 1); 209 shrinker->nr += delta; 210 if (shrinker->nr < 0) { 211 printk(KERN_ERR "%s: nr=%ld\n", 212 __func__, shrinker->nr); 213 shrinker->nr = max_pass; 214 } 215 216 /* 217 * Avoid risking looping forever due to too large nr value: 218 * never try to free more than twice the estimate number of 219 * freeable entries. 220 */ 221 if (shrinker->nr > max_pass * 2) 222 shrinker->nr = max_pass * 2; 223 224 total_scan = shrinker->nr; 225 shrinker->nr = 0; 226 227 while (total_scan >= SHRINK_BATCH) { 228 long this_scan = SHRINK_BATCH; 229 int shrink_ret; 230 int nr_before; 231 232 nr_before = (*shrinker->shrink)(0, gfp_mask); 233 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask); 234 if (shrink_ret == -1) 235 break; 236 if (shrink_ret < nr_before) 237 ret += nr_before - shrink_ret; 238 count_vm_events(SLABS_SCANNED, this_scan); 239 total_scan -= this_scan; 240 241 cond_resched(); 242 } 243 244 shrinker->nr += total_scan; 245 } 246 up_read(&shrinker_rwsem); 247 return ret; 248} 249 250/* Called without lock on whether page is mapped, so answer is unstable */ 251static inline int page_mapping_inuse(struct page *page) 252{ 253 struct address_space *mapping; 254 255 /* Page is in somebody's page tables. */ 256 if (page_mapped(page)) 257 return 1; 258 259 /* Be more reluctant to reclaim swapcache than pagecache */ 260 if (PageSwapCache(page)) 261 return 1; 262 263 mapping = page_mapping(page); 264 if (!mapping) 265 return 0; 266 267 /* File is mmap'd by somebody? */ 268 return mapping_mapped(mapping); 269} 270 271static inline int is_page_cache_freeable(struct page *page) 272{ 273 return page_count(page) - !!PagePrivate(page) == 2; 274} 275 276static int may_write_to_queue(struct backing_dev_info *bdi) 277{ 278 if (current->flags & PF_SWAPWRITE) 279 return 1; 280 if (!bdi_write_congested(bdi)) 281 return 1; 282 if (bdi == current->backing_dev_info) 283 return 1; 284 return 0; 285} 286 287/* 288 * We detected a synchronous write error writing a page out. Probably 289 * -ENOSPC. We need to propagate that into the address_space for a subsequent 290 * fsync(), msync() or close(). 291 * 292 * The tricky part is that after writepage we cannot touch the mapping: nothing 293 * prevents it from being freed up. But we have a ref on the page and once 294 * that page is locked, the mapping is pinned. 295 * 296 * We're allowed to run sleeping lock_page() here because we know the caller has 297 * __GFP_FS. 298 */ 299static void handle_write_error(struct address_space *mapping, 300 struct page *page, int error) 301{ 302 lock_page(page); 303 if (page_mapping(page) == mapping) 304 mapping_set_error(mapping, error); 305 unlock_page(page); 306} 307 308/* Request for sync pageout. */ 309enum pageout_io { 310 PAGEOUT_IO_ASYNC, 311 PAGEOUT_IO_SYNC, 312}; 313 314/* possible outcome of pageout() */ 315typedef enum { 316 /* failed to write page out, page is locked */ 317 PAGE_KEEP, 318 /* move page to the active list, page is locked */ 319 PAGE_ACTIVATE, 320 /* page has been sent to the disk successfully, page is unlocked */ 321 PAGE_SUCCESS, 322 /* page is clean and locked */ 323 PAGE_CLEAN, 324} pageout_t; 325 326/* 327 * pageout is called by shrink_page_list() for each dirty page. 328 * Calls ->writepage(). 329 */ 330static pageout_t pageout(struct page *page, struct address_space *mapping, 331 enum pageout_io sync_writeback) 332{ 333 /* 334 * If the page is dirty, only perform writeback if that write 335 * will be non-blocking. To prevent this allocation from being 336 * stalled by pagecache activity. But note that there may be 337 * stalls if we need to run get_block(). We could test 338 * PagePrivate for that. 339 * 340 * If this process is currently in generic_file_write() against 341 * this page's queue, we can perform writeback even if that 342 * will block. 343 * 344 * If the page is swapcache, write it back even if that would 345 * block, for some throttling. This happens by accident, because 346 * swap_backing_dev_info is bust: it doesn't reflect the 347 * congestion state of the swapdevs. Easy to fix, if needed. 348 * See swapfile.c:page_queue_congested(). 349 */ 350 if (!is_page_cache_freeable(page)) 351 return PAGE_KEEP; 352 if (!mapping) { 353 /* 354 * Some data journaling orphaned pages can have 355 * page->mapping == NULL while being dirty with clean buffers. 356 */ 357 if (PagePrivate(page)) { 358 if (try_to_free_buffers(page)) { 359 ClearPageDirty(page); 360 printk("%s: orphaned page\n", __func__); 361 return PAGE_CLEAN; 362 } 363 } 364 return PAGE_KEEP; 365 } 366 if (mapping->a_ops->writepage == NULL) 367 return PAGE_ACTIVATE; 368 if (!may_write_to_queue(mapping->backing_dev_info)) 369 return PAGE_KEEP; 370 371 if (clear_page_dirty_for_io(page)) { 372 int res; 373 struct writeback_control wbc = { 374 .sync_mode = WB_SYNC_NONE, 375 .nr_to_write = SWAP_CLUSTER_MAX, 376 .range_start = 0, 377 .range_end = LLONG_MAX, 378 .nonblocking = 1, 379 .for_reclaim = 1, 380 }; 381 382 SetPageReclaim(page); 383 res = mapping->a_ops->writepage(page, &wbc); 384 if (res < 0) 385 handle_write_error(mapping, page, res); 386 if (res == AOP_WRITEPAGE_ACTIVATE) { 387 ClearPageReclaim(page); 388 return PAGE_ACTIVATE; 389 } 390 391 /* 392 * Wait on writeback if requested to. This happens when 393 * direct reclaiming a large contiguous area and the 394 * first attempt to free a range of pages fails. 395 */ 396 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC) 397 wait_on_page_writeback(page); 398 399 if (!PageWriteback(page)) { 400 /* synchronous write or broken a_ops? */ 401 ClearPageReclaim(page); 402 } 403 inc_zone_page_state(page, NR_VMSCAN_WRITE); 404 return PAGE_SUCCESS; 405 } 406 407 return PAGE_CLEAN; 408} 409 410/* 411 * Same as remove_mapping, but if the page is removed from the mapping, it 412 * gets returned with a refcount of 0. 413 */ 414static int __remove_mapping(struct address_space *mapping, struct page *page) 415{ 416 BUG_ON(!PageLocked(page)); 417 BUG_ON(mapping != page_mapping(page)); 418 419 spin_lock_irq(&mapping->tree_lock); 420 /* 421 * The non racy check for a busy page. 422 * 423 * Must be careful with the order of the tests. When someone has 424 * a ref to the page, it may be possible that they dirty it then 425 * drop the reference. So if PageDirty is tested before page_count 426 * here, then the following race may occur: 427 * 428 * get_user_pages(&page); 429 * [user mapping goes away] 430 * write_to(page); 431 * !PageDirty(page) [good] 432 * SetPageDirty(page); 433 * put_page(page); 434 * !page_count(page) [good, discard it] 435 * 436 * [oops, our write_to data is lost] 437 * 438 * Reversing the order of the tests ensures such a situation cannot 439 * escape unnoticed. The smp_rmb is needed to ensure the page->flags 440 * load is not satisfied before that of page->_count. 441 * 442 * Note that if SetPageDirty is always performed via set_page_dirty, 443 * and thus under tree_lock, then this ordering is not required. 444 */ 445 if (!page_freeze_refs(page, 2)) 446 goto cannot_free; 447 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ 448 if (unlikely(PageDirty(page))) { 449 page_unfreeze_refs(page, 2); 450 goto cannot_free; 451 } 452 453 if (PageSwapCache(page)) { 454 swp_entry_t swap = { .val = page_private(page) }; 455 __delete_from_swap_cache(page); 456 spin_unlock_irq(&mapping->tree_lock); 457 swap_free(swap); 458 } else { 459 __remove_from_page_cache(page); 460 spin_unlock_irq(&mapping->tree_lock); 461 } 462 463 return 1; 464 465cannot_free: 466 spin_unlock_irq(&mapping->tree_lock); 467 return 0; 468} 469 470/* 471 * Attempt to detach a locked page from its ->mapping. If it is dirty or if 472 * someone else has a ref on the page, abort and return 0. If it was 473 * successfully detached, return 1. Assumes the caller has a single ref on 474 * this page. 475 */ 476int remove_mapping(struct address_space *mapping, struct page *page) 477{ 478 if (__remove_mapping(mapping, page)) { 479 /* 480 * Unfreezing the refcount with 1 rather than 2 effectively 481 * drops the pagecache ref for us without requiring another 482 * atomic operation. 483 */ 484 page_unfreeze_refs(page, 1); 485 return 1; 486 } 487 return 0; 488} 489 490/** 491 * putback_lru_page - put previously isolated page onto appropriate LRU list 492 * @page: page to be put back to appropriate lru list 493 * 494 * Add previously isolated @page to appropriate LRU list. 495 * Page may still be unevictable for other reasons. 496 * 497 * lru_lock must not be held, interrupts must be enabled. 498 */ 499#ifdef CONFIG_UNEVICTABLE_LRU 500void putback_lru_page(struct page *page) 501{ 502 int lru; 503 int active = !!TestClearPageActive(page); 504 int was_unevictable = PageUnevictable(page); 505 506 VM_BUG_ON(PageLRU(page)); 507 508redo: 509 ClearPageUnevictable(page); 510 511 if (page_evictable(page, NULL)) { 512 /* 513 * For evictable pages, we can use the cache. 514 * In event of a race, worst case is we end up with an 515 * unevictable page on [in]active list. 516 * We know how to handle that. 517 */ 518 lru = active + page_is_file_cache(page); 519 lru_cache_add_lru(page, lru); 520 } else { 521 /* 522 * Put unevictable pages directly on zone's unevictable 523 * list. 524 */ 525 lru = LRU_UNEVICTABLE; 526 add_page_to_unevictable_list(page); 527 } 528 529 /* 530 * page's status can change while we move it among lru. If an evictable 531 * page is on unevictable list, it never be freed. To avoid that, 532 * check after we added it to the list, again. 533 */ 534 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) { 535 if (!isolate_lru_page(page)) { 536 put_page(page); 537 goto redo; 538 } 539 /* This means someone else dropped this page from LRU 540 * So, it will be freed or putback to LRU again. There is 541 * nothing to do here. 542 */ 543 } 544 545 if (was_unevictable && lru != LRU_UNEVICTABLE) 546 count_vm_event(UNEVICTABLE_PGRESCUED); 547 else if (!was_unevictable && lru == LRU_UNEVICTABLE) 548 count_vm_event(UNEVICTABLE_PGCULLED); 549 550 put_page(page); /* drop ref from isolate */ 551} 552 553#else /* CONFIG_UNEVICTABLE_LRU */ 554 555void putback_lru_page(struct page *page) 556{ 557 int lru; 558 VM_BUG_ON(PageLRU(page)); 559 560 lru = !!TestClearPageActive(page) + page_is_file_cache(page); 561 lru_cache_add_lru(page, lru); 562 put_page(page); 563} 564#endif /* CONFIG_UNEVICTABLE_LRU */ 565 566 567/* 568 * shrink_page_list() returns the number of reclaimed pages 569 */ 570static unsigned long shrink_page_list(struct list_head *page_list, 571 struct scan_control *sc, 572 enum pageout_io sync_writeback) 573{ 574 LIST_HEAD(ret_pages); 575 struct pagevec freed_pvec; 576 int pgactivate = 0; 577 unsigned long nr_reclaimed = 0; 578 579 cond_resched(); 580 581 pagevec_init(&freed_pvec, 1); 582 while (!list_empty(page_list)) { 583 struct address_space *mapping; 584 struct page *page; 585 int may_enter_fs; 586 int referenced; 587 588 cond_resched(); 589 590 page = lru_to_page(page_list); 591 list_del(&page->lru); 592 593 if (!trylock_page(page)) 594 goto keep; 595 596 VM_BUG_ON(PageActive(page)); 597 598 sc->nr_scanned++; 599 600 if (unlikely(!page_evictable(page, NULL))) 601 goto cull_mlocked; 602 603 if (!sc->may_swap && page_mapped(page)) 604 goto keep_locked; 605 606 /* Double the slab pressure for mapped and swapcache pages */ 607 if (page_mapped(page) || PageSwapCache(page)) 608 sc->nr_scanned++; 609 610 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 611 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 612 613 if (PageWriteback(page)) { 614 /* 615 * Synchronous reclaim is performed in two passes, 616 * first an asynchronous pass over the list to 617 * start parallel writeback, and a second synchronous 618 * pass to wait for the IO to complete. Wait here 619 * for any page for which writeback has already 620 * started. 621 */ 622 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) 623 wait_on_page_writeback(page); 624 else 625 goto keep_locked; 626 } 627 628 referenced = page_referenced(page, 1, sc->mem_cgroup); 629 /* In active use or really unfreeable? Activate it. */ 630 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && 631 referenced && page_mapping_inuse(page)) 632 goto activate_locked; 633 634 /* 635 * Anonymous process memory has backing store? 636 * Try to allocate it some swap space here. 637 */ 638 if (PageAnon(page) && !PageSwapCache(page)) { 639 if (!(sc->gfp_mask & __GFP_IO)) 640 goto keep_locked; 641 if (!add_to_swap(page)) 642 goto activate_locked; 643 may_enter_fs = 1; 644 } 645 646 mapping = page_mapping(page); 647 648 /* 649 * The page is mapped into the page tables of one or more 650 * processes. Try to unmap it here. 651 */ 652 if (page_mapped(page) && mapping) { 653 switch (try_to_unmap(page, 0)) { 654 case SWAP_FAIL: 655 goto activate_locked; 656 case SWAP_AGAIN: 657 goto keep_locked; 658 case SWAP_MLOCK: 659 goto cull_mlocked; 660 case SWAP_SUCCESS: 661 ; /* try to free the page below */ 662 } 663 } 664 665 if (PageDirty(page)) { 666 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) 667 goto keep_locked; 668 if (!may_enter_fs) 669 goto keep_locked; 670 if (!sc->may_writepage) 671 goto keep_locked; 672 673 /* Page is dirty, try to write it out here */ 674 switch (pageout(page, mapping, sync_writeback)) { 675 case PAGE_KEEP: 676 goto keep_locked; 677 case PAGE_ACTIVATE: 678 goto activate_locked; 679 case PAGE_SUCCESS: 680 if (PageWriteback(page) || PageDirty(page)) 681 goto keep; 682 /* 683 * A synchronous write - probably a ramdisk. Go 684 * ahead and try to reclaim the page. 685 */ 686 if (!trylock_page(page)) 687 goto keep; 688 if (PageDirty(page) || PageWriteback(page)) 689 goto keep_locked; 690 mapping = page_mapping(page); 691 case PAGE_CLEAN: 692 ; /* try to free the page below */ 693 } 694 } 695 696 /* 697 * If the page has buffers, try to free the buffer mappings 698 * associated with this page. If we succeed we try to free 699 * the page as well. 700 * 701 * We do this even if the page is PageDirty(). 702 * try_to_release_page() does not perform I/O, but it is 703 * possible for a page to have PageDirty set, but it is actually 704 * clean (all its buffers are clean). This happens if the 705 * buffers were written out directly, with submit_bh(). ext3 706 * will do this, as well as the blockdev mapping. 707 * try_to_release_page() will discover that cleanness and will 708 * drop the buffers and mark the page clean - it can be freed. 709 * 710 * Rarely, pages can have buffers and no ->mapping. These are 711 * the pages which were not successfully invalidated in 712 * truncate_complete_page(). We try to drop those buffers here 713 * and if that worked, and the page is no longer mapped into 714 * process address space (page_count == 1) it can be freed. 715 * Otherwise, leave the page on the LRU so it is swappable. 716 */ 717 if (PagePrivate(page)) { 718 if (!try_to_release_page(page, sc->gfp_mask)) 719 goto activate_locked; 720 if (!mapping && page_count(page) == 1) { 721 unlock_page(page); 722 if (put_page_testzero(page)) 723 goto free_it; 724 else { 725 /* 726 * rare race with speculative reference. 727 * the speculative reference will free 728 * this page shortly, so we may 729 * increment nr_reclaimed here (and 730 * leave it off the LRU). 731 */ 732 nr_reclaimed++; 733 continue; 734 } 735 } 736 } 737 738 if (!mapping || !__remove_mapping(mapping, page)) 739 goto keep_locked; 740 741 /* 742 * At this point, we have no other references and there is 743 * no way to pick any more up (removed from LRU, removed 744 * from pagecache). Can use non-atomic bitops now (and 745 * we obviously don't have to worry about waking up a process 746 * waiting on the page lock, because there are no references. 747 */ 748 __clear_page_locked(page); 749free_it: 750 nr_reclaimed++; 751 if (!pagevec_add(&freed_pvec, page)) { 752 __pagevec_free(&freed_pvec); 753 pagevec_reinit(&freed_pvec); 754 } 755 continue; 756 757cull_mlocked: 758 if (PageSwapCache(page)) 759 try_to_free_swap(page); 760 unlock_page(page); 761 putback_lru_page(page); 762 continue; 763 764activate_locked: 765 /* Not a candidate for swapping, so reclaim swap space. */ 766 if (PageSwapCache(page) && vm_swap_full()) 767 try_to_free_swap(page); 768 VM_BUG_ON(PageActive(page)); 769 SetPageActive(page); 770 pgactivate++; 771keep_locked: 772 unlock_page(page); 773keep: 774 list_add(&page->lru, &ret_pages); 775 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 776 } 777 list_splice(&ret_pages, page_list); 778 if (pagevec_count(&freed_pvec)) 779 __pagevec_free(&freed_pvec); 780 count_vm_events(PGACTIVATE, pgactivate); 781 return nr_reclaimed; 782} 783 784/* LRU Isolation modes. */ 785#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */ 786#define ISOLATE_ACTIVE 1 /* Isolate active pages. */ 787#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */ 788 789/* 790 * Attempt to remove the specified page from its LRU. Only take this page 791 * if it is of the appropriate PageActive status. Pages which are being 792 * freed elsewhere are also ignored. 793 * 794 * page: page to consider 795 * mode: one of the LRU isolation modes defined above 796 * 797 * returns 0 on success, -ve errno on failure. 798 */ 799int __isolate_lru_page(struct page *page, int mode, int file) 800{ 801 int ret = -EINVAL; 802 803 /* Only take pages on the LRU. */ 804 if (!PageLRU(page)) 805 return ret; 806 807 /* 808 * When checking the active state, we need to be sure we are 809 * dealing with comparible boolean values. Take the logical not 810 * of each. 811 */ 812 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode)) 813 return ret; 814 815 if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file)) 816 return ret; 817 818 /* 819 * When this function is being called for lumpy reclaim, we 820 * initially look into all LRU pages, active, inactive and 821 * unevictable; only give shrink_page_list evictable pages. 822 */ 823 if (PageUnevictable(page)) 824 return ret; 825 826 ret = -EBUSY; 827 828 if (likely(get_page_unless_zero(page))) { 829 /* 830 * Be careful not to clear PageLRU until after we're 831 * sure the page is not being freed elsewhere -- the 832 * page release code relies on it. 833 */ 834 ClearPageLRU(page); 835 ret = 0; 836 mem_cgroup_del_lru(page); 837 } 838 839 return ret; 840} 841 842/* 843 * zone->lru_lock is heavily contended. Some of the functions that 844 * shrink the lists perform better by taking out a batch of pages 845 * and working on them outside the LRU lock. 846 * 847 * For pagecache intensive workloads, this function is the hottest 848 * spot in the kernel (apart from copy_*_user functions). 849 * 850 * Appropriate locks must be held before calling this function. 851 * 852 * @nr_to_scan: The number of pages to look through on the list. 853 * @src: The LRU list to pull pages off. 854 * @dst: The temp list to put pages on to. 855 * @scanned: The number of pages that were scanned. 856 * @order: The caller's attempted allocation order 857 * @mode: One of the LRU isolation modes 858 * @file: True [1] if isolating file [!anon] pages 859 * 860 * returns how many pages were moved onto *@dst. 861 */ 862static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 863 struct list_head *src, struct list_head *dst, 864 unsigned long *scanned, int order, int mode, int file) 865{ 866 unsigned long nr_taken = 0; 867 unsigned long scan; 868 869 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 870 struct page *page; 871 unsigned long pfn; 872 unsigned long end_pfn; 873 unsigned long page_pfn; 874 int zone_id; 875 876 page = lru_to_page(src); 877 prefetchw_prev_lru_page(page, src, flags); 878 879 VM_BUG_ON(!PageLRU(page)); 880 881 switch (__isolate_lru_page(page, mode, file)) { 882 case 0: 883 list_move(&page->lru, dst); 884 nr_taken++; 885 break; 886 887 case -EBUSY: 888 /* else it is being freed elsewhere */ 889 list_move(&page->lru, src); 890 continue; 891 892 default: 893 BUG(); 894 } 895 896 if (!order) 897 continue; 898 899 /* 900 * Attempt to take all pages in the order aligned region 901 * surrounding the tag page. Only take those pages of 902 * the same active state as that tag page. We may safely 903 * round the target page pfn down to the requested order 904 * as the mem_map is guarenteed valid out to MAX_ORDER, 905 * where that page is in a different zone we will detect 906 * it from its zone id and abort this block scan. 907 */ 908 zone_id = page_zone_id(page); 909 page_pfn = page_to_pfn(page); 910 pfn = page_pfn & ~((1 << order) - 1); 911 end_pfn = pfn + (1 << order); 912 for (; pfn < end_pfn; pfn++) { 913 struct page *cursor_page; 914 915 /* The target page is in the block, ignore it. */ 916 if (unlikely(pfn == page_pfn)) 917 continue; 918 919 /* Avoid holes within the zone. */ 920 if (unlikely(!pfn_valid_within(pfn))) 921 break; 922 923 cursor_page = pfn_to_page(pfn); 924 925 /* Check that we have not crossed a zone boundary. */ 926 if (unlikely(page_zone_id(cursor_page) != zone_id)) 927 continue; 928 switch (__isolate_lru_page(cursor_page, mode, file)) { 929 case 0: 930 list_move(&cursor_page->lru, dst); 931 nr_taken++; 932 scan++; 933 break; 934 935 case -EBUSY: 936 /* else it is being freed elsewhere */ 937 list_move(&cursor_page->lru, src); 938 default: 939 break; /* ! on LRU or wrong list */ 940 } 941 } 942 } 943 944 *scanned = scan; 945 return nr_taken; 946} 947 948static unsigned long isolate_pages_global(unsigned long nr, 949 struct list_head *dst, 950 unsigned long *scanned, int order, 951 int mode, struct zone *z, 952 struct mem_cgroup *mem_cont, 953 int active, int file) 954{ 955 int lru = LRU_BASE; 956 if (active) 957 lru += LRU_ACTIVE; 958 if (file) 959 lru += LRU_FILE; 960 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 961 mode, !!file); 962} 963 964/* 965 * clear_active_flags() is a helper for shrink_active_list(), clearing 966 * any active bits from the pages in the list. 967 */ 968static unsigned long clear_active_flags(struct list_head *page_list, 969 unsigned int *count) 970{ 971 int nr_active = 0; 972 int lru; 973 struct page *page; 974 975 list_for_each_entry(page, page_list, lru) { 976 lru = page_is_file_cache(page); 977 if (PageActive(page)) { 978 lru += LRU_ACTIVE; 979 ClearPageActive(page); 980 nr_active++; 981 } 982 count[lru]++; 983 } 984 985 return nr_active; 986} 987 988/** 989 * isolate_lru_page - tries to isolate a page from its LRU list 990 * @page: page to isolate from its LRU list 991 * 992 * Isolates a @page from an LRU list, clears PageLRU and adjusts the 993 * vmstat statistic corresponding to whatever LRU list the page was on. 994 * 995 * Returns 0 if the page was removed from an LRU list. 996 * Returns -EBUSY if the page was not on an LRU list. 997 * 998 * The returned page will have PageLRU() cleared. If it was found on 999 * the active list, it will have PageActive set. If it was found on 1000 * the unevictable list, it will have the PageUnevictable bit set. That flag 1001 * may need to be cleared by the caller before letting the page go. 1002 * 1003 * The vmstat statistic corresponding to the list on which the page was 1004 * found will be decremented. 1005 * 1006 * Restrictions: 1007 * (1) Must be called with an elevated refcount on the page. This is a 1008 * fundamentnal difference from isolate_lru_pages (which is called 1009 * without a stable reference). 1010 * (2) the lru_lock must not be held. 1011 * (3) interrupts must be enabled. 1012 */ 1013int isolate_lru_page(struct page *page) 1014{ 1015 int ret = -EBUSY; 1016 1017 if (PageLRU(page)) { 1018 struct zone *zone = page_zone(page); 1019 1020 spin_lock_irq(&zone->lru_lock); 1021 if (PageLRU(page) && get_page_unless_zero(page)) { 1022 int lru = page_lru(page); 1023 ret = 0; 1024 ClearPageLRU(page); 1025 1026 del_page_from_lru_list(zone, page, lru); 1027 } 1028 spin_unlock_irq(&zone->lru_lock); 1029 } 1030 return ret; 1031} 1032 1033/* 1034 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1035 * of reclaimed pages 1036 */ 1037static unsigned long shrink_inactive_list(unsigned long max_scan, 1038 struct zone *zone, struct scan_control *sc, 1039 int priority, int file) 1040{ 1041 LIST_HEAD(page_list); 1042 struct pagevec pvec; 1043 unsigned long nr_scanned = 0; 1044 unsigned long nr_reclaimed = 0; 1045 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1046 1047 pagevec_init(&pvec, 1); 1048 1049 lru_add_drain(); 1050 spin_lock_irq(&zone->lru_lock); 1051 do { 1052 struct page *page; 1053 unsigned long nr_taken; 1054 unsigned long nr_scan; 1055 unsigned long nr_freed; 1056 unsigned long nr_active; 1057 unsigned int count[NR_LRU_LISTS] = { 0, }; 1058 int mode = ISOLATE_INACTIVE; 1059 1060 /* 1061 * If we need a large contiguous chunk of memory, or have 1062 * trouble getting a small set of contiguous pages, we 1063 * will reclaim both active and inactive pages. 1064 * 1065 * We use the same threshold as pageout congestion_wait below. 1066 */ 1067 if (sc->order > PAGE_ALLOC_COSTLY_ORDER) 1068 mode = ISOLATE_BOTH; 1069 else if (sc->order && priority < DEF_PRIORITY - 2) 1070 mode = ISOLATE_BOTH; 1071 1072 nr_taken = sc->isolate_pages(sc->swap_cluster_max, 1073 &page_list, &nr_scan, sc->order, mode, 1074 zone, sc->mem_cgroup, 0, file); 1075 nr_active = clear_active_flags(&page_list, count); 1076 __count_vm_events(PGDEACTIVATE, nr_active); 1077 1078 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1079 -count[LRU_ACTIVE_FILE]); 1080 __mod_zone_page_state(zone, NR_INACTIVE_FILE, 1081 -count[LRU_INACTIVE_FILE]); 1082 __mod_zone_page_state(zone, NR_ACTIVE_ANON, 1083 -count[LRU_ACTIVE_ANON]); 1084 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1085 -count[LRU_INACTIVE_ANON]); 1086 1087 if (scan_global_lru(sc)) { 1088 zone->pages_scanned += nr_scan; 1089 reclaim_stat->recent_scanned[0] += 1090 count[LRU_INACTIVE_ANON]; 1091 reclaim_stat->recent_scanned[0] += 1092 count[LRU_ACTIVE_ANON]; 1093 reclaim_stat->recent_scanned[1] += 1094 count[LRU_INACTIVE_FILE]; 1095 reclaim_stat->recent_scanned[1] += 1096 count[LRU_ACTIVE_FILE]; 1097 } 1098 spin_unlock_irq(&zone->lru_lock); 1099 1100 nr_scanned += nr_scan; 1101 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); 1102 1103 /* 1104 * If we are direct reclaiming for contiguous pages and we do 1105 * not reclaim everything in the list, try again and wait 1106 * for IO to complete. This will stall high-order allocations 1107 * but that should be acceptable to the caller 1108 */ 1109 if (nr_freed < nr_taken && !current_is_kswapd() && 1110 sc->order > PAGE_ALLOC_COSTLY_ORDER) { 1111 congestion_wait(WRITE, HZ/10); 1112 1113 /* 1114 * The attempt at page out may have made some 1115 * of the pages active, mark them inactive again. 1116 */ 1117 nr_active = clear_active_flags(&page_list, count); 1118 count_vm_events(PGDEACTIVATE, nr_active); 1119 1120 nr_freed += shrink_page_list(&page_list, sc, 1121 PAGEOUT_IO_SYNC); 1122 } 1123 1124 nr_reclaimed += nr_freed; 1125 local_irq_disable(); 1126 if (current_is_kswapd()) { 1127 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); 1128 __count_vm_events(KSWAPD_STEAL, nr_freed); 1129 } else if (scan_global_lru(sc)) 1130 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); 1131 1132 __count_zone_vm_events(PGSTEAL, zone, nr_freed); 1133 1134 if (nr_taken == 0) 1135 goto done; 1136 1137 spin_lock(&zone->lru_lock); 1138 /* 1139 * Put back any unfreeable pages. 1140 */ 1141 while (!list_empty(&page_list)) { 1142 int lru; 1143 page = lru_to_page(&page_list); 1144 VM_BUG_ON(PageLRU(page)); 1145 list_del(&page->lru); 1146 if (unlikely(!page_evictable(page, NULL))) { 1147 spin_unlock_irq(&zone->lru_lock); 1148 putback_lru_page(page); 1149 spin_lock_irq(&zone->lru_lock); 1150 continue; 1151 } 1152 SetPageLRU(page); 1153 lru = page_lru(page); 1154 add_page_to_lru_list(zone, page, lru); 1155 if (PageActive(page) && scan_global_lru(sc)) { 1156 int file = !!page_is_file_cache(page); 1157 reclaim_stat->recent_rotated[file]++; 1158 } 1159 if (!pagevec_add(&pvec, page)) { 1160 spin_unlock_irq(&zone->lru_lock); 1161 __pagevec_release(&pvec); 1162 spin_lock_irq(&zone->lru_lock); 1163 } 1164 } 1165 } while (nr_scanned < max_scan); 1166 spin_unlock(&zone->lru_lock); 1167done: 1168 local_irq_enable(); 1169 pagevec_release(&pvec); 1170 return nr_reclaimed; 1171} 1172 1173/* 1174 * We are about to scan this zone at a certain priority level. If that priority 1175 * level is smaller (ie: more urgent) than the previous priority, then note 1176 * that priority level within the zone. This is done so that when the next 1177 * process comes in to scan this zone, it will immediately start out at this 1178 * priority level rather than having to build up its own scanning priority. 1179 * Here, this priority affects only the reclaim-mapped threshold. 1180 */ 1181static inline void note_zone_scanning_priority(struct zone *zone, int priority) 1182{ 1183 if (priority < zone->prev_priority) 1184 zone->prev_priority = priority; 1185} 1186 1187/* 1188 * This moves pages from the active list to the inactive list. 1189 * 1190 * We move them the other way if the page is referenced by one or more 1191 * processes, from rmap. 1192 * 1193 * If the pages are mostly unmapped, the processing is fast and it is 1194 * appropriate to hold zone->lru_lock across the whole operation. But if 1195 * the pages are mapped, the processing is slow (page_referenced()) so we 1196 * should drop zone->lru_lock around each page. It's impossible to balance 1197 * this, so instead we remove the pages from the LRU while processing them. 1198 * It is safe to rely on PG_active against the non-LRU pages in here because 1199 * nobody will play with that bit on a non-LRU page. 1200 * 1201 * The downside is that we have to touch page->_count against each page. 1202 * But we had to alter page->flags anyway. 1203 */ 1204 1205 1206static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 1207 struct scan_control *sc, int priority, int file) 1208{ 1209 unsigned long pgmoved; 1210 int pgdeactivate = 0; 1211 unsigned long pgscanned; 1212 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1213 LIST_HEAD(l_inactive); 1214 struct page *page; 1215 struct pagevec pvec; 1216 enum lru_list lru; 1217 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1218 1219 lru_add_drain(); 1220 spin_lock_irq(&zone->lru_lock); 1221 pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order, 1222 ISOLATE_ACTIVE, zone, 1223 sc->mem_cgroup, 1, file); 1224 /* 1225 * zone->pages_scanned is used for detect zone's oom 1226 * mem_cgroup remembers nr_scan by itself. 1227 */ 1228 if (scan_global_lru(sc)) { 1229 zone->pages_scanned += pgscanned; 1230 reclaim_stat->recent_scanned[!!file] += pgmoved; 1231 } 1232 1233 if (file) 1234 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1235 else 1236 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved); 1237 spin_unlock_irq(&zone->lru_lock); 1238 1239 pgmoved = 0; 1240 while (!list_empty(&l_hold)) { 1241 cond_resched(); 1242 page = lru_to_page(&l_hold); 1243 list_del(&page->lru); 1244 1245 if (unlikely(!page_evictable(page, NULL))) { 1246 putback_lru_page(page); 1247 continue; 1248 } 1249 1250 /* page_referenced clears PageReferenced */ 1251 if (page_mapping_inuse(page) && 1252 page_referenced(page, 0, sc->mem_cgroup)) 1253 pgmoved++; 1254 1255 list_add(&page->lru, &l_inactive); 1256 } 1257 1258 /* 1259 * Move the pages to the [file or anon] inactive list. 1260 */ 1261 pagevec_init(&pvec, 1); 1262 pgmoved = 0; 1263 lru = LRU_BASE + file * LRU_FILE; 1264 1265 spin_lock_irq(&zone->lru_lock); 1266 /* 1267 * Count referenced pages from currently used mappings as 1268 * rotated, even though they are moved to the inactive list. 1269 * This helps balance scan pressure between file and anonymous 1270 * pages in get_scan_ratio. 1271 */ 1272 if (scan_global_lru(sc)) 1273 reclaim_stat->recent_rotated[!!file] += pgmoved; 1274 1275 while (!list_empty(&l_inactive)) { 1276 page = lru_to_page(&l_inactive); 1277 prefetchw_prev_lru_page(page, &l_inactive, flags); 1278 VM_BUG_ON(PageLRU(page)); 1279 SetPageLRU(page); 1280 VM_BUG_ON(!PageActive(page)); 1281 ClearPageActive(page); 1282 1283 list_move(&page->lru, &zone->lru[lru].list); 1284 mem_cgroup_add_lru_list(page, lru); 1285 pgmoved++; 1286 if (!pagevec_add(&pvec, page)) { 1287 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1288 spin_unlock_irq(&zone->lru_lock); 1289 pgdeactivate += pgmoved; 1290 pgmoved = 0; 1291 if (buffer_heads_over_limit) 1292 pagevec_strip(&pvec); 1293 __pagevec_release(&pvec); 1294 spin_lock_irq(&zone->lru_lock); 1295 } 1296 } 1297 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1298 pgdeactivate += pgmoved; 1299 if (buffer_heads_over_limit) { 1300 spin_unlock_irq(&zone->lru_lock); 1301 pagevec_strip(&pvec); 1302 spin_lock_irq(&zone->lru_lock); 1303 } 1304 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1305 __count_vm_events(PGDEACTIVATE, pgdeactivate); 1306 spin_unlock_irq(&zone->lru_lock); 1307 if (vm_swap_full()) 1308 pagevec_swap_free(&pvec); 1309 1310 pagevec_release(&pvec); 1311} 1312 1313static int inactive_anon_is_low_global(struct zone *zone) 1314{ 1315 unsigned long active, inactive; 1316 1317 active = zone_page_state(zone, NR_ACTIVE_ANON); 1318 inactive = zone_page_state(zone, NR_INACTIVE_ANON); 1319 1320 if (inactive * zone->inactive_ratio < active) 1321 return 1; 1322 1323 return 0; 1324} 1325 1326/** 1327 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1328 * @zone: zone to check 1329 * @sc: scan control of this context 1330 * 1331 * Returns true if the zone does not have enough inactive anon pages, 1332 * meaning some active anon pages need to be deactivated. 1333 */ 1334static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) 1335{ 1336 int low; 1337 1338 if (scan_global_lru(sc)) 1339 low = inactive_anon_is_low_global(zone); 1340 else 1341 low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); 1342 return low; 1343} 1344 1345static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1346 struct zone *zone, struct scan_control *sc, int priority) 1347{ 1348 int file = is_file_lru(lru); 1349 1350 if (lru == LRU_ACTIVE_FILE) { 1351 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1352 return 0; 1353 } 1354 1355 if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) { 1356 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1357 return 0; 1358 } 1359 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1360} 1361 1362/* 1363 * Determine how aggressively the anon and file LRU lists should be 1364 * scanned. The relative value of each set of LRU lists is determined 1365 * by looking at the fraction of the pages scanned we did rotate back 1366 * onto the active list instead of evict. 1367 * 1368 * percent[0] specifies how much pressure to put on ram/swap backed 1369 * memory, while percent[1] determines pressure on the file LRUs. 1370 */ 1371static void get_scan_ratio(struct zone *zone, struct scan_control *sc, 1372 unsigned long *percent) 1373{ 1374 unsigned long anon, file, free; 1375 unsigned long anon_prio, file_prio; 1376 unsigned long ap, fp; 1377 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1378 1379 /* If we have no swap space, do not bother scanning anon pages. */ 1380 if (nr_swap_pages <= 0) { 1381 percent[0] = 0; 1382 percent[1] = 100; 1383 return; 1384 } 1385 1386 anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) + 1387 zone_nr_pages(zone, sc, LRU_INACTIVE_ANON); 1388 file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + 1389 zone_nr_pages(zone, sc, LRU_INACTIVE_FILE); 1390 1391 if (scan_global_lru(sc)) { 1392 free = zone_page_state(zone, NR_FREE_PAGES); 1393 /* If we have very few page cache pages, 1394 force-scan anon pages. */ 1395 if (unlikely(file + free <= zone->pages_high)) { 1396 percent[0] = 100; 1397 percent[1] = 0; 1398 return; 1399 } 1400 } 1401 1402 /* 1403 * OK, so we have swap space and a fair amount of page cache 1404 * pages. We use the recently rotated / recently scanned 1405 * ratios to determine how valuable each cache is. 1406 * 1407 * Because workloads change over time (and to avoid overflow) 1408 * we keep these statistics as a floating average, which ends 1409 * up weighing recent references more than old ones. 1410 * 1411 * anon in [0], file in [1] 1412 */ 1413 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1414 spin_lock_irq(&zone->lru_lock); 1415 reclaim_stat->recent_scanned[0] /= 2; 1416 reclaim_stat->recent_rotated[0] /= 2; 1417 spin_unlock_irq(&zone->lru_lock); 1418 } 1419 1420 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { 1421 spin_lock_irq(&zone->lru_lock); 1422 reclaim_stat->recent_scanned[1] /= 2; 1423 reclaim_stat->recent_rotated[1] /= 2; 1424 spin_unlock_irq(&zone->lru_lock); 1425 } 1426 1427 /* 1428 * With swappiness at 100, anonymous and file have the same priority. 1429 * This scanning priority is essentially the inverse of IO cost. 1430 */ 1431 anon_prio = sc->swappiness; 1432 file_prio = 200 - sc->swappiness; 1433 1434 /* 1435 * The amount of pressure on anon vs file pages is inversely 1436 * proportional to the fraction of recently scanned pages on 1437 * each list that were recently referenced and in active use. 1438 */ 1439 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1440 ap /= reclaim_stat->recent_rotated[0] + 1; 1441 1442 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1443 fp /= reclaim_stat->recent_rotated[1] + 1; 1444 1445 /* Normalize to percentages */ 1446 percent[0] = 100 * ap / (ap + fp + 1); 1447 percent[1] = 100 - percent[0]; 1448} 1449 1450 1451/* 1452 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1453 */ 1454static void shrink_zone(int priority, struct zone *zone, 1455 struct scan_control *sc) 1456{ 1457 unsigned long nr[NR_LRU_LISTS]; 1458 unsigned long nr_to_scan; 1459 unsigned long percent[2]; /* anon @ 0; file @ 1 */ 1460 enum lru_list l; 1461 unsigned long nr_reclaimed = sc->nr_reclaimed; 1462 unsigned long swap_cluster_max = sc->swap_cluster_max; 1463 1464 get_scan_ratio(zone, sc, percent); 1465 1466 for_each_evictable_lru(l) { 1467 if (scan_global_lru(sc)) { 1468 int file = is_file_lru(l); 1469 int scan; 1470 1471 scan = zone_page_state(zone, NR_LRU_BASE + l); 1472 if (priority) { 1473 scan >>= priority; 1474 scan = (scan * percent[file]) / 100; 1475 } 1476 zone->lru[l].nr_scan += scan; 1477 nr[l] = zone->lru[l].nr_scan; 1478 if (nr[l] >= swap_cluster_max) 1479 zone->lru[l].nr_scan = 0; 1480 else 1481 nr[l] = 0; 1482 } else { 1483 /* 1484 * This reclaim occurs not because zone memory shortage 1485 * but because memory controller hits its limit. 1486 * Don't modify zone reclaim related data. 1487 */ 1488 nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone, 1489 priority, l); 1490 } 1491 } 1492 1493 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1494 nr[LRU_INACTIVE_FILE]) { 1495 for_each_evictable_lru(l) { 1496 if (nr[l]) { 1497 nr_to_scan = min(nr[l], swap_cluster_max); 1498 nr[l] -= nr_to_scan; 1499 1500 nr_reclaimed += shrink_list(l, nr_to_scan, 1501 zone, sc, priority); 1502 } 1503 } 1504 /* 1505 * On large memory systems, scan >> priority can become 1506 * really large. This is fine for the starting priority; 1507 * we want to put equal scanning pressure on each zone. 1508 * However, if the VM has a harder time of freeing pages, 1509 * with multiple processes reclaiming pages, the total 1510 * freeing target can get unreasonably large. 1511 */ 1512 if (nr_reclaimed > swap_cluster_max && 1513 priority < DEF_PRIORITY && !current_is_kswapd()) 1514 break; 1515 } 1516 1517 sc->nr_reclaimed = nr_reclaimed; 1518 1519 /* 1520 * Even if we did not try to evict anon pages at all, we want to 1521 * rebalance the anon lru active/inactive ratio. 1522 */ 1523 if (inactive_anon_is_low(zone, sc)) 1524 shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); 1525 1526 throttle_vm_writeout(sc->gfp_mask); 1527} 1528 1529/* 1530 * This is the direct reclaim path, for page-allocating processes. We only 1531 * try to reclaim pages from zones which will satisfy the caller's allocation 1532 * request. 1533 * 1534 * We reclaim from a zone even if that zone is over pages_high. Because: 1535 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1536 * allocation or 1537 * b) The zones may be over pages_high but they must go *over* pages_high to 1538 * satisfy the `incremental min' zone defense algorithm. 1539 * 1540 * If a zone is deemed to be full of pinned pages then just give it a light 1541 * scan then give up on it. 1542 */ 1543static void shrink_zones(int priority, struct zonelist *zonelist, 1544 struct scan_control *sc) 1545{ 1546 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1547 struct zoneref *z; 1548 struct zone *zone; 1549 1550 sc->all_unreclaimable = 1; 1551 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1552 if (!populated_zone(zone)) 1553 continue; 1554 /* 1555 * Take care memory controller reclaiming has small influence 1556 * to global LRU. 1557 */ 1558 if (scan_global_lru(sc)) { 1559 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1560 continue; 1561 note_zone_scanning_priority(zone, priority); 1562 1563 if (zone_is_all_unreclaimable(zone) && 1564 priority != DEF_PRIORITY) 1565 continue; /* Let kswapd poll it */ 1566 sc->all_unreclaimable = 0; 1567 } else { 1568 /* 1569 * Ignore cpuset limitation here. We just want to reduce 1570 * # of used pages by us regardless of memory shortage. 1571 */ 1572 sc->all_unreclaimable = 0; 1573 mem_cgroup_note_reclaim_priority(sc->mem_cgroup, 1574 priority); 1575 } 1576 1577 shrink_zone(priority, zone, sc); 1578 } 1579} 1580 1581/* 1582 * This is the main entry point to direct page reclaim. 1583 * 1584 * If a full scan of the inactive list fails to free enough memory then we 1585 * are "out of memory" and something needs to be killed. 1586 * 1587 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1588 * high - the zone may be full of dirty or under-writeback pages, which this 1589 * caller can't do much about. We kick pdflush and take explicit naps in the 1590 * hope that some of these pages can be written. But if the allocating task 1591 * holds filesystem locks which prevent writeout this might not work, and the 1592 * allocation attempt will fail. 1593 * 1594 * returns: 0, if no pages reclaimed 1595 * else, the number of pages reclaimed 1596 */ 1597static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 1598 struct scan_control *sc) 1599{ 1600 int priority; 1601 unsigned long ret = 0; 1602 unsigned long total_scanned = 0; 1603 struct reclaim_state *reclaim_state = current->reclaim_state; 1604 unsigned long lru_pages = 0; 1605 struct zoneref *z; 1606 struct zone *zone; 1607 enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); 1608 1609 delayacct_freepages_start(); 1610 1611 if (scan_global_lru(sc)) 1612 count_vm_event(ALLOCSTALL); 1613 /* 1614 * mem_cgroup will not do shrink_slab. 1615 */ 1616 if (scan_global_lru(sc)) { 1617 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1618 1619 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1620 continue; 1621 1622 lru_pages += zone_lru_pages(zone); 1623 } 1624 } 1625 1626 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1627 sc->nr_scanned = 0; 1628 if (!priority) 1629 disable_swap_token(); 1630 shrink_zones(priority, zonelist, sc); 1631 /* 1632 * Don't shrink slabs when reclaiming memory from 1633 * over limit cgroups 1634 */ 1635 if (scan_global_lru(sc)) { 1636 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 1637 if (reclaim_state) { 1638 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 1639 reclaim_state->reclaimed_slab = 0; 1640 } 1641 } 1642 total_scanned += sc->nr_scanned; 1643 if (sc->nr_reclaimed >= sc->swap_cluster_max) { 1644 ret = sc->nr_reclaimed; 1645 goto out; 1646 } 1647 1648 /* 1649 * Try to write back as many pages as we just scanned. This 1650 * tends to cause slow streaming writers to write data to the 1651 * disk smoothly, at the dirtying rate, which is nice. But 1652 * that's undesirable in laptop mode, where we *want* lumpy 1653 * writeout. So in laptop mode, write out the whole world. 1654 */ 1655 if (total_scanned > sc->swap_cluster_max + 1656 sc->swap_cluster_max / 2) { 1657 wakeup_pdflush(laptop_mode ? 0 : total_scanned); 1658 sc->may_writepage = 1; 1659 } 1660 1661 /* Take a nap, wait for some writeback to complete */ 1662 if (sc->nr_scanned && priority < DEF_PRIORITY - 2) 1663 congestion_wait(WRITE, HZ/10); 1664 } 1665 /* top priority shrink_zones still had more to do? don't OOM, then */ 1666 if (!sc->all_unreclaimable && scan_global_lru(sc)) 1667 ret = sc->nr_reclaimed; 1668out: 1669 /* 1670 * Now that we've scanned all the zones at this priority level, note 1671 * that level within the zone so that the next thread which performs 1672 * scanning of this zone will immediately start out at this priority 1673 * level. This affects only the decision whether or not to bring 1674 * mapped pages onto the inactive list. 1675 */ 1676 if (priority < 0) 1677 priority = 0; 1678 1679 if (scan_global_lru(sc)) { 1680 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1681 1682 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1683 continue; 1684 1685 zone->prev_priority = priority; 1686 } 1687 } else 1688 mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority); 1689 1690 delayacct_freepages_end(); 1691 1692 return ret; 1693} 1694 1695unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 1696 gfp_t gfp_mask) 1697{ 1698 struct scan_control sc = { 1699 .gfp_mask = gfp_mask, 1700 .may_writepage = !laptop_mode, 1701 .swap_cluster_max = SWAP_CLUSTER_MAX, 1702 .may_swap = 1, 1703 .swappiness = vm_swappiness, 1704 .order = order, 1705 .mem_cgroup = NULL, 1706 .isolate_pages = isolate_pages_global, 1707 }; 1708 1709 return do_try_to_free_pages(zonelist, &sc); 1710} 1711 1712#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1713 1714unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 1715 gfp_t gfp_mask, 1716 bool noswap) 1717{ 1718 struct scan_control sc = { 1719 .may_writepage = !laptop_mode, 1720 .may_swap = 1, 1721 .swap_cluster_max = SWAP_CLUSTER_MAX, 1722 .swappiness = vm_swappiness, 1723 .order = 0, 1724 .mem_cgroup = mem_cont, 1725 .isolate_pages = mem_cgroup_isolate_pages, 1726 }; 1727 struct zonelist *zonelist; 1728 1729 if (noswap) 1730 sc.may_swap = 0; 1731 1732 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1733 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1734 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 1735 return do_try_to_free_pages(zonelist, &sc); 1736} 1737#endif 1738 1739/* 1740 * For kswapd, balance_pgdat() will work across all this node's zones until 1741 * they are all at pages_high. 1742 * 1743 * Returns the number of pages which were actually freed. 1744 * 1745 * There is special handling here for zones which are full of pinned pages. 1746 * This can happen if the pages are all mlocked, or if they are all used by 1747 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 1748 * What we do is to detect the case where all pages in the zone have been 1749 * scanned twice and there has been zero successful reclaim. Mark the zone as 1750 * dead and from now on, only perform a short scan. Basically we're polling 1751 * the zone for when the problem goes away. 1752 * 1753 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1754 * zones which have free_pages > pages_high, but once a zone is found to have 1755 * free_pages <= pages_high, we scan that zone and the lower zones regardless 1756 * of the number of free pages in the lower zones. This interoperates with 1757 * the page allocator fallback scheme to ensure that aging of pages is balanced 1758 * across the zones. 1759 */ 1760static unsigned long balance_pgdat(pg_data_t *pgdat, int order) 1761{ 1762 int all_zones_ok; 1763 int priority; 1764 int i; 1765 unsigned long total_scanned; 1766 struct reclaim_state *reclaim_state = current->reclaim_state; 1767 struct scan_control sc = { 1768 .gfp_mask = GFP_KERNEL, 1769 .may_swap = 1, 1770 .swap_cluster_max = SWAP_CLUSTER_MAX, 1771 .swappiness = vm_swappiness, 1772 .order = order, 1773 .mem_cgroup = NULL, 1774 .isolate_pages = isolate_pages_global, 1775 }; 1776 /* 1777 * temp_priority is used to remember the scanning priority at which 1778 * this zone was successfully refilled to free_pages == pages_high. 1779 */ 1780 int temp_priority[MAX_NR_ZONES]; 1781 1782loop_again: 1783 total_scanned = 0; 1784 sc.nr_reclaimed = 0; 1785 sc.may_writepage = !laptop_mode; 1786 count_vm_event(PAGEOUTRUN); 1787 1788 for (i = 0; i < pgdat->nr_zones; i++) 1789 temp_priority[i] = DEF_PRIORITY; 1790 1791 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1792 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1793 unsigned long lru_pages = 0; 1794 1795 /* The swap token gets in the way of swapout... */ 1796 if (!priority) 1797 disable_swap_token(); 1798 1799 all_zones_ok = 1; 1800 1801 /* 1802 * Scan in the highmem->dma direction for the highest 1803 * zone which needs scanning 1804 */ 1805 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 1806 struct zone *zone = pgdat->node_zones + i; 1807 1808 if (!populated_zone(zone)) 1809 continue; 1810 1811 if (zone_is_all_unreclaimable(zone) && 1812 priority != DEF_PRIORITY) 1813 continue; 1814 1815 /* 1816 * Do some background aging of the anon list, to give 1817 * pages a chance to be referenced before reclaiming. 1818 */ 1819 if (inactive_anon_is_low(zone, &sc)) 1820 shrink_active_list(SWAP_CLUSTER_MAX, zone, 1821 &sc, priority, 0); 1822 1823 if (!zone_watermark_ok(zone, order, zone->pages_high, 1824 0, 0)) { 1825 end_zone = i; 1826 break; 1827 } 1828 } 1829 if (i < 0) 1830 goto out; 1831 1832 for (i = 0; i <= end_zone; i++) { 1833 struct zone *zone = pgdat->node_zones + i; 1834 1835 lru_pages += zone_lru_pages(zone); 1836 } 1837 1838 /* 1839 * Now scan the zone in the dma->highmem direction, stopping 1840 * at the last zone which needs scanning. 1841 * 1842 * We do this because the page allocator works in the opposite 1843 * direction. This prevents the page allocator from allocating 1844 * pages behind kswapd's direction of progress, which would 1845 * cause too much scanning of the lower zones. 1846 */ 1847 for (i = 0; i <= end_zone; i++) { 1848 struct zone *zone = pgdat->node_zones + i; 1849 int nr_slab; 1850 1851 if (!populated_zone(zone)) 1852 continue; 1853 1854 if (zone_is_all_unreclaimable(zone) && 1855 priority != DEF_PRIORITY) 1856 continue; 1857 1858 if (!zone_watermark_ok(zone, order, zone->pages_high, 1859 end_zone, 0)) 1860 all_zones_ok = 0; 1861 temp_priority[i] = priority; 1862 sc.nr_scanned = 0; 1863 note_zone_scanning_priority(zone, priority); 1864 /* 1865 * We put equal pressure on every zone, unless one 1866 * zone has way too many pages free already. 1867 */ 1868 if (!zone_watermark_ok(zone, order, 8*zone->pages_high, 1869 end_zone, 0)) 1870 shrink_zone(priority, zone, &sc); 1871 reclaim_state->reclaimed_slab = 0; 1872 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1873 lru_pages); 1874 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1875 total_scanned += sc.nr_scanned; 1876 if (zone_is_all_unreclaimable(zone)) 1877 continue; 1878 if (nr_slab == 0 && zone->pages_scanned >= 1879 (zone_lru_pages(zone) * 6)) 1880 zone_set_flag(zone, 1881 ZONE_ALL_UNRECLAIMABLE); 1882 /* 1883 * If we've done a decent amount of scanning and 1884 * the reclaim ratio is low, start doing writepage 1885 * even in laptop mode 1886 */ 1887 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 1888 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) 1889 sc.may_writepage = 1; 1890 } 1891 if (all_zones_ok) 1892 break; /* kswapd: all done */ 1893 /* 1894 * OK, kswapd is getting into trouble. Take a nap, then take 1895 * another pass across the zones. 1896 */ 1897 if (total_scanned && priority < DEF_PRIORITY - 2) 1898 congestion_wait(WRITE, HZ/10); 1899 1900 /* 1901 * We do this so kswapd doesn't build up large priorities for 1902 * example when it is freeing in parallel with allocators. It 1903 * matches the direct reclaim path behaviour in terms of impact 1904 * on zone->*_priority. 1905 */ 1906 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 1907 break; 1908 } 1909out: 1910 /* 1911 * Note within each zone the priority level at which this zone was 1912 * brought into a happy state. So that the next thread which scans this 1913 * zone will start out at that priority level. 1914 */ 1915 for (i = 0; i < pgdat->nr_zones; i++) { 1916 struct zone *zone = pgdat->node_zones + i; 1917 1918 zone->prev_priority = temp_priority[i]; 1919 } 1920 if (!all_zones_ok) { 1921 cond_resched(); 1922 1923 try_to_freeze(); 1924 1925 /* 1926 * Fragmentation may mean that the system cannot be 1927 * rebalanced for high-order allocations in all zones. 1928 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, 1929 * it means the zones have been fully scanned and are still 1930 * not balanced. For high-order allocations, there is 1931 * little point trying all over again as kswapd may 1932 * infinite loop. 1933 * 1934 * Instead, recheck all watermarks at order-0 as they 1935 * are the most important. If watermarks are ok, kswapd will go 1936 * back to sleep. High-order users can still perform direct 1937 * reclaim if they wish. 1938 */ 1939 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) 1940 order = sc.order = 0; 1941 1942 goto loop_again; 1943 } 1944 1945 return sc.nr_reclaimed; 1946} 1947 1948/* 1949 * The background pageout daemon, started as a kernel thread 1950 * from the init process. 1951 * 1952 * This basically trickles out pages so that we have _some_ 1953 * free memory available even if there is no other activity 1954 * that frees anything up. This is needed for things like routing 1955 * etc, where we otherwise might have all activity going on in 1956 * asynchronous contexts that cannot page things out. 1957 * 1958 * If there are applications that are active memory-allocators 1959 * (most normal use), this basically shouldn't matter. 1960 */ 1961static int kswapd(void *p) 1962{ 1963 unsigned long order; 1964 pg_data_t *pgdat = (pg_data_t*)p; 1965 struct task_struct *tsk = current; 1966 DEFINE_WAIT(wait); 1967 struct reclaim_state reclaim_state = { 1968 .reclaimed_slab = 0, 1969 }; 1970 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1971 1972 if (!cpumask_empty(cpumask)) 1973 set_cpus_allowed_ptr(tsk, cpumask); 1974 current->reclaim_state = &reclaim_state; 1975 1976 /* 1977 * Tell the memory management that we're a "memory allocator", 1978 * and that if we need more memory we should get access to it 1979 * regardless (see "__alloc_pages()"). "kswapd" should 1980 * never get caught in the normal page freeing logic. 1981 * 1982 * (Kswapd normally doesn't need memory anyway, but sometimes 1983 * you need a small amount of memory in order to be able to 1984 * page out something else, and this flag essentially protects 1985 * us from recursively trying to free more memory as we're 1986 * trying to free the first piece of memory in the first place). 1987 */ 1988 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 1989 set_freezable(); 1990 1991 order = 0; 1992 for ( ; ; ) { 1993 unsigned long new_order; 1994 1995 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 1996 new_order = pgdat->kswapd_max_order; 1997 pgdat->kswapd_max_order = 0; 1998 if (order < new_order) { 1999 /* 2000 * Don't sleep if someone wants a larger 'order' 2001 * allocation 2002 */ 2003 order = new_order; 2004 } else { 2005 if (!freezing(current)) 2006 schedule(); 2007 2008 order = pgdat->kswapd_max_order; 2009 } 2010 finish_wait(&pgdat->kswapd_wait, &wait); 2011 2012 if (!try_to_freeze()) { 2013 /* We can speed up thawing tasks if we don't call 2014 * balance_pgdat after returning from the refrigerator 2015 */ 2016 balance_pgdat(pgdat, order); 2017 } 2018 } 2019 return 0; 2020} 2021 2022/* 2023 * A zone is low on free memory, so wake its kswapd task to service it. 2024 */ 2025void wakeup_kswapd(struct zone *zone, int order) 2026{ 2027 pg_data_t *pgdat; 2028 2029 if (!populated_zone(zone)) 2030 return; 2031 2032 pgdat = zone->zone_pgdat; 2033 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 2034 return; 2035 if (pgdat->kswapd_max_order < order) 2036 pgdat->kswapd_max_order = order; 2037 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2038 return; 2039 if (!waitqueue_active(&pgdat->kswapd_wait)) 2040 return; 2041 wake_up_interruptible(&pgdat->kswapd_wait); 2042} 2043 2044unsigned long global_lru_pages(void) 2045{ 2046 return global_page_state(NR_ACTIVE_ANON) 2047 + global_page_state(NR_ACTIVE_FILE) 2048 + global_page_state(NR_INACTIVE_ANON) 2049 + global_page_state(NR_INACTIVE_FILE); 2050} 2051 2052#ifdef CONFIG_PM 2053/* 2054 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages 2055 * from LRU lists system-wide, for given pass and priority, and returns the 2056 * number of reclaimed pages 2057 * 2058 * For pass > 3 we also try to shrink the LRU lists that contain a few pages 2059 */ 2060static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, 2061 int pass, struct scan_control *sc) 2062{ 2063 struct zone *zone; 2064 unsigned long nr_to_scan, ret = 0; 2065 enum lru_list l; 2066 2067 for_each_zone(zone) { 2068 2069 if (!populated_zone(zone)) 2070 continue; 2071 2072 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2073 continue; 2074 2075 for_each_evictable_lru(l) { 2076 /* For pass = 0, we don't shrink the active list */ 2077 if (pass == 0 && 2078 (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) 2079 continue; 2080 2081 zone->lru[l].nr_scan += 2082 (zone_page_state(zone, NR_LRU_BASE + l) 2083 >> prio) + 1; 2084 if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { 2085 zone->lru[l].nr_scan = 0; 2086 nr_to_scan = min(nr_pages, 2087 zone_page_state(zone, 2088 NR_LRU_BASE + l)); 2089 ret += shrink_list(l, nr_to_scan, zone, 2090 sc, prio); 2091 if (ret >= nr_pages) 2092 return ret; 2093 } 2094 } 2095 } 2096 2097 return ret; 2098} 2099 2100/* 2101 * Try to free `nr_pages' of memory, system-wide, and return the number of 2102 * freed pages. 2103 * 2104 * Rather than trying to age LRUs the aim is to preserve the overall 2105 * LRU order by reclaiming preferentially 2106 * inactive > active > active referenced > active mapped 2107 */ 2108unsigned long shrink_all_memory(unsigned long nr_pages) 2109{ 2110 unsigned long lru_pages, nr_slab; 2111 unsigned long ret = 0; 2112 int pass; 2113 struct reclaim_state reclaim_state; 2114 struct scan_control sc = { 2115 .gfp_mask = GFP_KERNEL, 2116 .may_swap = 0, 2117 .swap_cluster_max = nr_pages, 2118 .may_writepage = 1, 2119 .swappiness = vm_swappiness, 2120 .isolate_pages = isolate_pages_global, 2121 }; 2122 2123 current->reclaim_state = &reclaim_state; 2124 2125 lru_pages = global_lru_pages(); 2126 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); 2127 /* If slab caches are huge, it's better to hit them first */ 2128 while (nr_slab >= lru_pages) { 2129 reclaim_state.reclaimed_slab = 0; 2130 shrink_slab(nr_pages, sc.gfp_mask, lru_pages); 2131 if (!reclaim_state.reclaimed_slab) 2132 break; 2133 2134 ret += reclaim_state.reclaimed_slab; 2135 if (ret >= nr_pages) 2136 goto out; 2137 2138 nr_slab -= reclaim_state.reclaimed_slab; 2139 } 2140 2141 /* 2142 * We try to shrink LRUs in 5 passes: 2143 * 0 = Reclaim from inactive_list only 2144 * 1 = Reclaim from active list but don't reclaim mapped 2145 * 2 = 2nd pass of type 1 2146 * 3 = Reclaim mapped (normal reclaim) 2147 * 4 = 2nd pass of type 3 2148 */ 2149 for (pass = 0; pass < 5; pass++) { 2150 int prio; 2151 2152 /* Force reclaiming mapped pages in the passes #3 and #4 */ 2153 if (pass > 2) { 2154 sc.may_swap = 1; 2155 sc.swappiness = 100; 2156 } 2157 2158 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 2159 unsigned long nr_to_scan = nr_pages - ret; 2160 2161 sc.nr_scanned = 0; 2162 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); 2163 if (ret >= nr_pages) 2164 goto out; 2165 2166 reclaim_state.reclaimed_slab = 0; 2167 shrink_slab(sc.nr_scanned, sc.gfp_mask, 2168 global_lru_pages()); 2169 ret += reclaim_state.reclaimed_slab; 2170 if (ret >= nr_pages) 2171 goto out; 2172 2173 if (sc.nr_scanned && prio < DEF_PRIORITY - 2) 2174 congestion_wait(WRITE, HZ / 10); 2175 } 2176 } 2177 2178 /* 2179 * If ret = 0, we could not shrink LRUs, but there may be something 2180 * in slab caches 2181 */ 2182 if (!ret) { 2183 do { 2184 reclaim_state.reclaimed_slab = 0; 2185 shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages()); 2186 ret += reclaim_state.reclaimed_slab; 2187 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); 2188 } 2189 2190out: 2191 current->reclaim_state = NULL; 2192 2193 return ret; 2194} 2195#endif 2196 2197/* It's optimal to keep kswapds on the same CPUs as their memory, but 2198 not required for correctness. So if the last cpu in a node goes 2199 away, we get changed to run anywhere: as the first one comes back, 2200 restore their cpu bindings. */ 2201static int __devinit cpu_callback(struct notifier_block *nfb, 2202 unsigned long action, void *hcpu) 2203{ 2204 int nid; 2205 2206 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2207 for_each_node_state(nid, N_HIGH_MEMORY) { 2208 pg_data_t *pgdat = NODE_DATA(nid); 2209 node_to_cpumask_ptr(mask, pgdat->node_id); 2210 2211 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2212 /* One of our CPUs online: restore mask */ 2213 set_cpus_allowed_ptr(pgdat->kswapd, mask); 2214 } 2215 } 2216 return NOTIFY_OK; 2217} 2218 2219/* 2220 * This kswapd start function will be called by init and node-hot-add. 2221 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. 2222 */ 2223int kswapd_run(int nid) 2224{ 2225 pg_data_t *pgdat = NODE_DATA(nid); 2226 int ret = 0; 2227 2228 if (pgdat->kswapd) 2229 return 0; 2230 2231 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); 2232 if (IS_ERR(pgdat->kswapd)) { 2233 /* failure at boot is fatal */ 2234 BUG_ON(system_state == SYSTEM_BOOTING); 2235 printk("Failed to start kswapd on node %d\n",nid); 2236 ret = -1; 2237 } 2238 return ret; 2239} 2240 2241static int __init kswapd_init(void) 2242{ 2243 int nid; 2244 2245 swap_setup(); 2246 for_each_node_state(nid, N_HIGH_MEMORY) 2247 kswapd_run(nid); 2248 hotcpu_notifier(cpu_callback, 0); 2249 return 0; 2250} 2251 2252module_init(kswapd_init) 2253 2254#ifdef CONFIG_NUMA 2255/* 2256 * Zone reclaim mode 2257 * 2258 * If non-zero call zone_reclaim when the number of free pages falls below 2259 * the watermarks. 2260 */ 2261int zone_reclaim_mode __read_mostly; 2262 2263#define RECLAIM_OFF 0 2264#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ 2265#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 2266#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 2267 2268/* 2269 * Priority for ZONE_RECLAIM. This determines the fraction of pages 2270 * of a node considered for each zone_reclaim. 4 scans 1/16th of 2271 * a zone. 2272 */ 2273#define ZONE_RECLAIM_PRIORITY 4 2274 2275/* 2276 * Percentage of pages in a zone that must be unmapped for zone_reclaim to 2277 * occur. 2278 */ 2279int sysctl_min_unmapped_ratio = 1; 2280 2281/* 2282 * If the number of slab pages in a zone grows beyond this percentage then 2283 * slab reclaim needs to occur. 2284 */ 2285int sysctl_min_slab_ratio = 5; 2286 2287/* 2288 * Try to free up some pages from this zone through reclaim. 2289 */ 2290static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 2291{ 2292 /* Minimum pages needed in order to stay on node */ 2293 const unsigned long nr_pages = 1 << order; 2294 struct task_struct *p = current; 2295 struct reclaim_state reclaim_state; 2296 int priority; 2297 struct scan_control sc = { 2298 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 2299 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 2300 .swap_cluster_max = max_t(unsigned long, nr_pages, 2301 SWAP_CLUSTER_MAX), 2302 .gfp_mask = gfp_mask, 2303 .swappiness = vm_swappiness, 2304 .isolate_pages = isolate_pages_global, 2305 }; 2306 unsigned long slab_reclaimable; 2307 2308 disable_swap_token(); 2309 cond_resched(); 2310 /* 2311 * We need to be able to allocate from the reserves for RECLAIM_SWAP 2312 * and we also need to be able to write out pages for RECLAIM_WRITE 2313 * and RECLAIM_SWAP. 2314 */ 2315 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; 2316 reclaim_state.reclaimed_slab = 0; 2317 p->reclaim_state = &reclaim_state; 2318 2319 if (zone_page_state(zone, NR_FILE_PAGES) - 2320 zone_page_state(zone, NR_FILE_MAPPED) > 2321 zone->min_unmapped_pages) { 2322 /* 2323 * Free memory by calling shrink zone with increasing 2324 * priorities until we have enough memory freed. 2325 */ 2326 priority = ZONE_RECLAIM_PRIORITY; 2327 do { 2328 note_zone_scanning_priority(zone, priority); 2329 shrink_zone(priority, zone, &sc); 2330 priority--; 2331 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 2332 } 2333 2334 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2335 if (slab_reclaimable > zone->min_slab_pages) { 2336 /* 2337 * shrink_slab() does not currently allow us to determine how 2338 * many pages were freed in this zone. So we take the current 2339 * number of slab pages and shake the slab until it is reduced 2340 * by the same nr_pages that we used for reclaiming unmapped 2341 * pages. 2342 * 2343 * Note that shrink_slab will free memory on all zones and may 2344 * take a long time. 2345 */ 2346 while (shrink_slab(sc.nr_scanned, gfp_mask, order) && 2347 zone_page_state(zone, NR_SLAB_RECLAIMABLE) > 2348 slab_reclaimable - nr_pages) 2349 ; 2350 2351 /* 2352 * Update nr_reclaimed by the number of slab pages we 2353 * reclaimed from this zone. 2354 */ 2355 sc.nr_reclaimed += slab_reclaimable - 2356 zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2357 } 2358 2359 p->reclaim_state = NULL; 2360 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 2361 return sc.nr_reclaimed >= nr_pages; 2362} 2363 2364int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 2365{ 2366 int node_id; 2367 int ret; 2368 2369 /* 2370 * Zone reclaim reclaims unmapped file backed pages and 2371 * slab pages if we are over the defined limits. 2372 * 2373 * A small portion of unmapped file backed pages is needed for 2374 * file I/O otherwise pages read by file I/O will be immediately 2375 * thrown out if the zone is overallocated. So we do not reclaim 2376 * if less than a specified percentage of the zone is used by 2377 * unmapped file backed pages. 2378 */ 2379 if (zone_page_state(zone, NR_FILE_PAGES) - 2380 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages 2381 && zone_page_state(zone, NR_SLAB_RECLAIMABLE) 2382 <= zone->min_slab_pages) 2383 return 0; 2384 2385 if (zone_is_all_unreclaimable(zone)) 2386 return 0; 2387 2388 /* 2389 * Do not scan if the allocation should not be delayed. 2390 */ 2391 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) 2392 return 0; 2393 2394 /* 2395 * Only run zone reclaim on the local zone or on zones that do not 2396 * have associated processors. This will favor the local processor 2397 * over remote processors and spread off node memory allocations 2398 * as wide as possible. 2399 */ 2400 node_id = zone_to_nid(zone); 2401 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) 2402 return 0; 2403 2404 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) 2405 return 0; 2406 ret = __zone_reclaim(zone, gfp_mask, order); 2407 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); 2408 2409 return ret; 2410} 2411#endif 2412 2413#ifdef CONFIG_UNEVICTABLE_LRU 2414/* 2415 * page_evictable - test whether a page is evictable 2416 * @page: the page to test 2417 * @vma: the VMA in which the page is or will be mapped, may be NULL 2418 * 2419 * Test whether page is evictable--i.e., should be placed on active/inactive 2420 * lists vs unevictable list. The vma argument is !NULL when called from the 2421 * fault path to determine how to instantate a new page. 2422 * 2423 * Reasons page might not be evictable: 2424 * (1) page's mapping marked unevictable 2425 * (2) page is part of an mlocked VMA 2426 * 2427 */ 2428int page_evictable(struct page *page, struct vm_area_struct *vma) 2429{ 2430 2431 if (mapping_unevictable(page_mapping(page))) 2432 return 0; 2433 2434 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 2435 return 0; 2436 2437 return 1; 2438} 2439 2440/** 2441 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 2442 * @page: page to check evictability and move to appropriate lru list 2443 * @zone: zone page is in 2444 * 2445 * Checks a page for evictability and moves the page to the appropriate 2446 * zone lru list. 2447 * 2448 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 2449 * have PageUnevictable set. 2450 */ 2451static void check_move_unevictable_page(struct page *page, struct zone *zone) 2452{ 2453 VM_BUG_ON(PageActive(page)); 2454 2455retry: 2456 ClearPageUnevictable(page); 2457 if (page_evictable(page, NULL)) { 2458 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); 2459 2460 __dec_zone_state(zone, NR_UNEVICTABLE); 2461 list_move(&page->lru, &zone->lru[l].list); 2462 mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); 2463 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2464 __count_vm_event(UNEVICTABLE_PGRESCUED); 2465 } else { 2466 /* 2467 * rotate unevictable list 2468 */ 2469 SetPageUnevictable(page); 2470 list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); 2471 mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); 2472 if (page_evictable(page, NULL)) 2473 goto retry; 2474 } 2475} 2476 2477/** 2478 * scan_mapping_unevictable_pages - scan an address space for evictable pages 2479 * @mapping: struct address_space to scan for evictable pages 2480 * 2481 * Scan all pages in mapping. Check unevictable pages for 2482 * evictability and move them to the appropriate zone lru list. 2483 */ 2484void scan_mapping_unevictable_pages(struct address_space *mapping) 2485{ 2486 pgoff_t next = 0; 2487 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> 2488 PAGE_CACHE_SHIFT; 2489 struct zone *zone; 2490 struct pagevec pvec; 2491 2492 if (mapping->nrpages == 0) 2493 return; 2494 2495 pagevec_init(&pvec, 0); 2496 while (next < end && 2497 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 2498 int i; 2499 int pg_scanned = 0; 2500 2501 zone = NULL; 2502 2503 for (i = 0; i < pagevec_count(&pvec); i++) { 2504 struct page *page = pvec.pages[i]; 2505 pgoff_t page_index = page->index; 2506 struct zone *pagezone = page_zone(page); 2507 2508 pg_scanned++; 2509 if (page_index > next) 2510 next = page_index; 2511 next++; 2512 2513 if (pagezone != zone) { 2514 if (zone) 2515 spin_unlock_irq(&zone->lru_lock); 2516 zone = pagezone; 2517 spin_lock_irq(&zone->lru_lock); 2518 } 2519 2520 if (PageLRU(page) && PageUnevictable(page)) 2521 check_move_unevictable_page(page, zone); 2522 } 2523 if (zone) 2524 spin_unlock_irq(&zone->lru_lock); 2525 pagevec_release(&pvec); 2526 2527 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); 2528 } 2529 2530} 2531 2532/** 2533 * scan_zone_unevictable_pages - check unevictable list for evictable pages 2534 * @zone - zone of which to scan the unevictable list 2535 * 2536 * Scan @zone's unevictable LRU lists to check for pages that have become 2537 * evictable. Move those that have to @zone's inactive list where they 2538 * become candidates for reclaim, unless shrink_inactive_zone() decides 2539 * to reactivate them. Pages that are still unevictable are rotated 2540 * back onto @zone's unevictable list. 2541 */ 2542#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */ 2543static void scan_zone_unevictable_pages(struct zone *zone) 2544{ 2545 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; 2546 unsigned long scan; 2547 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE); 2548 2549 while (nr_to_scan > 0) { 2550 unsigned long batch_size = min(nr_to_scan, 2551 SCAN_UNEVICTABLE_BATCH_SIZE); 2552 2553 spin_lock_irq(&zone->lru_lock); 2554 for (scan = 0; scan < batch_size; scan++) { 2555 struct page *page = lru_to_page(l_unevictable); 2556 2557 if (!trylock_page(page)) 2558 continue; 2559 2560 prefetchw_prev_lru_page(page, l_unevictable, flags); 2561 2562 if (likely(PageLRU(page) && PageUnevictable(page))) 2563 check_move_unevictable_page(page, zone); 2564 2565 unlock_page(page); 2566 } 2567 spin_unlock_irq(&zone->lru_lock); 2568 2569 nr_to_scan -= batch_size; 2570 } 2571} 2572 2573 2574/** 2575 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages 2576 * 2577 * A really big hammer: scan all zones' unevictable LRU lists to check for 2578 * pages that have become evictable. Move those back to the zones' 2579 * inactive list where they become candidates for reclaim. 2580 * This occurs when, e.g., we have unswappable pages on the unevictable lists, 2581 * and we add swap to the system. As such, it runs in the context of a task 2582 * that has possibly/probably made some previously unevictable pages 2583 * evictable. 2584 */ 2585static void scan_all_zones_unevictable_pages(void) 2586{ 2587 struct zone *zone; 2588 2589 for_each_zone(zone) { 2590 scan_zone_unevictable_pages(zone); 2591 } 2592} 2593 2594/* 2595 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of 2596 * all nodes' unevictable lists for evictable pages 2597 */ 2598unsigned long scan_unevictable_pages; 2599 2600int scan_unevictable_handler(struct ctl_table *table, int write, 2601 struct file *file, void __user *buffer, 2602 size_t *length, loff_t *ppos) 2603{ 2604 proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 2605 2606 if (write && *(unsigned long *)table->data) 2607 scan_all_zones_unevictable_pages(); 2608 2609 scan_unevictable_pages = 0; 2610 return 0; 2611} 2612 2613/* 2614 * per node 'scan_unevictable_pages' attribute. On demand re-scan of 2615 * a specified node's per zone unevictable lists for evictable pages. 2616 */ 2617 2618static ssize_t read_scan_unevictable_node(struct sys_device *dev, 2619 struct sysdev_attribute *attr, 2620 char *buf) 2621{ 2622 return sprintf(buf, "0\n"); /* always zero; should fit... */ 2623} 2624 2625static ssize_t write_scan_unevictable_node(struct sys_device *dev, 2626 struct sysdev_attribute *attr, 2627 const char *buf, size_t count) 2628{ 2629 struct zone *node_zones = NODE_DATA(dev->id)->node_zones; 2630 struct zone *zone; 2631 unsigned long res; 2632 unsigned long req = strict_strtoul(buf, 10, &res); 2633 2634 if (!req) 2635 return 1; /* zero is no-op */ 2636 2637 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 2638 if (!populated_zone(zone)) 2639 continue; 2640 scan_zone_unevictable_pages(zone); 2641 } 2642 return 1; 2643} 2644 2645 2646static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, 2647 read_scan_unevictable_node, 2648 write_scan_unevictable_node); 2649 2650int scan_unevictable_register_node(struct node *node) 2651{ 2652 return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages); 2653} 2654 2655void scan_unevictable_unregister_node(struct node *node) 2656{ 2657 sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); 2658} 2659 2660#endif 2661