vmscan.c revision 3d80636a0d5f056ffc26472d05b6027a7a9f6e1c
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14#include <linux/mm.h> 15#include <linux/module.h> 16#include <linux/slab.h> 17#include <linux/kernel_stat.h> 18#include <linux/swap.h> 19#include <linux/pagemap.h> 20#include <linux/init.h> 21#include <linux/highmem.h> 22#include <linux/file.h> 23#include <linux/writeback.h> 24#include <linux/blkdev.h> 25#include <linux/buffer_head.h> /* for try_to_release_page(), 26 buffer_heads_over_limit */ 27#include <linux/mm_inline.h> 28#include <linux/pagevec.h> 29#include <linux/backing-dev.h> 30#include <linux/rmap.h> 31#include <linux/topology.h> 32#include <linux/cpu.h> 33#include <linux/cpuset.h> 34#include <linux/notifier.h> 35#include <linux/rwsem.h> 36 37#include <asm/tlbflush.h> 38#include <asm/div64.h> 39 40#include <linux/swapops.h> 41 42/* possible outcome of pageout() */ 43typedef enum { 44 /* failed to write page out, page is locked */ 45 PAGE_KEEP, 46 /* move page to the active list, page is locked */ 47 PAGE_ACTIVATE, 48 /* page has been sent to the disk successfully, page is unlocked */ 49 PAGE_SUCCESS, 50 /* page is clean and locked */ 51 PAGE_CLEAN, 52} pageout_t; 53 54struct scan_control { 55 /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ 56 unsigned long nr_to_scan; 57 58 /* Incremented by the number of inactive pages that were scanned */ 59 unsigned long nr_scanned; 60 61 /* Incremented by the number of pages reclaimed */ 62 unsigned long nr_reclaimed; 63 64 unsigned long nr_mapped; /* From page_state */ 65 66 /* How many pages shrink_cache() should reclaim */ 67 int nr_to_reclaim; 68 69 /* Ask shrink_caches, or shrink_zone to scan at this priority */ 70 unsigned int priority; 71 72 /* This context's GFP mask */ 73 unsigned int gfp_mask; 74 75 int may_writepage; 76 77 /* Can pages be swapped as part of reclaim? */ 78 int may_swap; 79 80 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 81 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 82 * In this context, it doesn't matter that we scan the 83 * whole list at once. */ 84 int swap_cluster_max; 85}; 86 87/* 88 * The list of shrinker callbacks used by to apply pressure to 89 * ageable caches. 90 */ 91struct shrinker { 92 shrinker_t shrinker; 93 struct list_head list; 94 int seeks; /* seeks to recreate an obj */ 95 long nr; /* objs pending delete */ 96}; 97 98#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 99 100#ifdef ARCH_HAS_PREFETCH 101#define prefetch_prev_lru_page(_page, _base, _field) \ 102 do { \ 103 if ((_page)->lru.prev != _base) { \ 104 struct page *prev; \ 105 \ 106 prev = lru_to_page(&(_page->lru)); \ 107 prefetch(&prev->_field); \ 108 } \ 109 } while (0) 110#else 111#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 112#endif 113 114#ifdef ARCH_HAS_PREFETCHW 115#define prefetchw_prev_lru_page(_page, _base, _field) \ 116 do { \ 117 if ((_page)->lru.prev != _base) { \ 118 struct page *prev; \ 119 \ 120 prev = lru_to_page(&(_page->lru)); \ 121 prefetchw(&prev->_field); \ 122 } \ 123 } while (0) 124#else 125#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 126#endif 127 128/* 129 * From 0 .. 100. Higher means more swappy. 130 */ 131int vm_swappiness = 60; 132static long total_memory; 133 134static LIST_HEAD(shrinker_list); 135static DECLARE_RWSEM(shrinker_rwsem); 136 137/* 138 * Add a shrinker callback to be called from the vm 139 */ 140struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 141{ 142 struct shrinker *shrinker; 143 144 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 145 if (shrinker) { 146 shrinker->shrinker = theshrinker; 147 shrinker->seeks = seeks; 148 shrinker->nr = 0; 149 down_write(&shrinker_rwsem); 150 list_add_tail(&shrinker->list, &shrinker_list); 151 up_write(&shrinker_rwsem); 152 } 153 return shrinker; 154} 155EXPORT_SYMBOL(set_shrinker); 156 157/* 158 * Remove one 159 */ 160void remove_shrinker(struct shrinker *shrinker) 161{ 162 down_write(&shrinker_rwsem); 163 list_del(&shrinker->list); 164 up_write(&shrinker_rwsem); 165 kfree(shrinker); 166} 167EXPORT_SYMBOL(remove_shrinker); 168 169#define SHRINK_BATCH 128 170/* 171 * Call the shrink functions to age shrinkable caches 172 * 173 * Here we assume it costs one seek to replace a lru page and that it also 174 * takes a seek to recreate a cache object. With this in mind we age equal 175 * percentages of the lru and ageable caches. This should balance the seeks 176 * generated by these structures. 177 * 178 * If the vm encounted mapped pages on the LRU it increase the pressure on 179 * slab to avoid swapping. 180 * 181 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 182 * 183 * `lru_pages' represents the number of on-LRU pages in all the zones which 184 * are eligible for the caller's allocation attempt. It is used for balancing 185 * slab reclaim versus page reclaim. 186 * 187 * Returns the number of slab objects which we shrunk. 188 */ 189static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, 190 unsigned long lru_pages) 191{ 192 struct shrinker *shrinker; 193 int ret = 0; 194 195 if (scanned == 0) 196 scanned = SWAP_CLUSTER_MAX; 197 198 if (!down_read_trylock(&shrinker_rwsem)) 199 return 1; /* Assume we'll be able to shrink next time */ 200 201 list_for_each_entry(shrinker, &shrinker_list, list) { 202 unsigned long long delta; 203 unsigned long total_scan; 204 205 delta = (4 * scanned) / shrinker->seeks; 206 delta *= (*shrinker->shrinker)(0, gfp_mask); 207 do_div(delta, lru_pages + 1); 208 shrinker->nr += delta; 209 if (shrinker->nr < 0) 210 shrinker->nr = LONG_MAX; /* It wrapped! */ 211 212 total_scan = shrinker->nr; 213 shrinker->nr = 0; 214 215 while (total_scan >= SHRINK_BATCH) { 216 long this_scan = SHRINK_BATCH; 217 int shrink_ret; 218 int nr_before; 219 220 nr_before = (*shrinker->shrinker)(0, gfp_mask); 221 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 222 if (shrink_ret == -1) 223 break; 224 if (shrink_ret < nr_before) 225 ret += nr_before - shrink_ret; 226 mod_page_state(slabs_scanned, this_scan); 227 total_scan -= this_scan; 228 229 cond_resched(); 230 } 231 232 shrinker->nr += total_scan; 233 } 234 up_read(&shrinker_rwsem); 235 return ret; 236} 237 238/* Called without lock on whether page is mapped, so answer is unstable */ 239static inline int page_mapping_inuse(struct page *page) 240{ 241 struct address_space *mapping; 242 243 /* Page is in somebody's page tables. */ 244 if (page_mapped(page)) 245 return 1; 246 247 /* Be more reluctant to reclaim swapcache than pagecache */ 248 if (PageSwapCache(page)) 249 return 1; 250 251 mapping = page_mapping(page); 252 if (!mapping) 253 return 0; 254 255 /* File is mmap'd by somebody? */ 256 return mapping_mapped(mapping); 257} 258 259static inline int is_page_cache_freeable(struct page *page) 260{ 261 return page_count(page) - !!PagePrivate(page) == 2; 262} 263 264static int may_write_to_queue(struct backing_dev_info *bdi) 265{ 266 if (current_is_kswapd()) 267 return 1; 268 if (current_is_pdflush()) /* This is unlikely, but why not... */ 269 return 1; 270 if (!bdi_write_congested(bdi)) 271 return 1; 272 if (bdi == current->backing_dev_info) 273 return 1; 274 return 0; 275} 276 277/* 278 * We detected a synchronous write error writing a page out. Probably 279 * -ENOSPC. We need to propagate that into the address_space for a subsequent 280 * fsync(), msync() or close(). 281 * 282 * The tricky part is that after writepage we cannot touch the mapping: nothing 283 * prevents it from being freed up. But we have a ref on the page and once 284 * that page is locked, the mapping is pinned. 285 * 286 * We're allowed to run sleeping lock_page() here because we know the caller has 287 * __GFP_FS. 288 */ 289static void handle_write_error(struct address_space *mapping, 290 struct page *page, int error) 291{ 292 lock_page(page); 293 if (page_mapping(page) == mapping) { 294 if (error == -ENOSPC) 295 set_bit(AS_ENOSPC, &mapping->flags); 296 else 297 set_bit(AS_EIO, &mapping->flags); 298 } 299 unlock_page(page); 300} 301 302/* 303 * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). 304 */ 305static pageout_t pageout(struct page *page, struct address_space *mapping) 306{ 307 /* 308 * If the page is dirty, only perform writeback if that write 309 * will be non-blocking. To prevent this allocation from being 310 * stalled by pagecache activity. But note that there may be 311 * stalls if we need to run get_block(). We could test 312 * PagePrivate for that. 313 * 314 * If this process is currently in generic_file_write() against 315 * this page's queue, we can perform writeback even if that 316 * will block. 317 * 318 * If the page is swapcache, write it back even if that would 319 * block, for some throttling. This happens by accident, because 320 * swap_backing_dev_info is bust: it doesn't reflect the 321 * congestion state of the swapdevs. Easy to fix, if needed. 322 * See swapfile.c:page_queue_congested(). 323 */ 324 if (!is_page_cache_freeable(page)) 325 return PAGE_KEEP; 326 if (!mapping) { 327 /* 328 * Some data journaling orphaned pages can have 329 * page->mapping == NULL while being dirty with clean buffers. 330 */ 331 if (PagePrivate(page)) { 332 if (try_to_free_buffers(page)) { 333 ClearPageDirty(page); 334 printk("%s: orphaned page\n", __FUNCTION__); 335 return PAGE_CLEAN; 336 } 337 } 338 return PAGE_KEEP; 339 } 340 if (mapping->a_ops->writepage == NULL) 341 return PAGE_ACTIVATE; 342 if (!may_write_to_queue(mapping->backing_dev_info)) 343 return PAGE_KEEP; 344 345 if (clear_page_dirty_for_io(page)) { 346 int res; 347 struct writeback_control wbc = { 348 .sync_mode = WB_SYNC_NONE, 349 .nr_to_write = SWAP_CLUSTER_MAX, 350 .nonblocking = 1, 351 .for_reclaim = 1, 352 }; 353 354 SetPageReclaim(page); 355 res = mapping->a_ops->writepage(page, &wbc); 356 if (res < 0) 357 handle_write_error(mapping, page, res); 358 if (res == WRITEPAGE_ACTIVATE) { 359 ClearPageReclaim(page); 360 return PAGE_ACTIVATE; 361 } 362 if (!PageWriteback(page)) { 363 /* synchronous write or broken a_ops? */ 364 ClearPageReclaim(page); 365 } 366 367 return PAGE_SUCCESS; 368 } 369 370 return PAGE_CLEAN; 371} 372 373/* 374 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed 375 */ 376static int shrink_list(struct list_head *page_list, struct scan_control *sc) 377{ 378 LIST_HEAD(ret_pages); 379 struct pagevec freed_pvec; 380 int pgactivate = 0; 381 int reclaimed = 0; 382 383 cond_resched(); 384 385 pagevec_init(&freed_pvec, 1); 386 while (!list_empty(page_list)) { 387 struct address_space *mapping; 388 struct page *page; 389 int may_enter_fs; 390 int referenced; 391 392 cond_resched(); 393 394 page = lru_to_page(page_list); 395 list_del(&page->lru); 396 397 if (TestSetPageLocked(page)) 398 goto keep; 399 400 BUG_ON(PageActive(page)); 401 402 sc->nr_scanned++; 403 /* Double the slab pressure for mapped and swapcache pages */ 404 if (page_mapped(page) || PageSwapCache(page)) 405 sc->nr_scanned++; 406 407 if (PageWriteback(page)) 408 goto keep_locked; 409 410 referenced = page_referenced(page, 1, sc->priority <= 0); 411 /* In active use or really unfreeable? Activate it. */ 412 if (referenced && page_mapping_inuse(page)) 413 goto activate_locked; 414 415#ifdef CONFIG_SWAP 416 /* 417 * Anonymous process memory has backing store? 418 * Try to allocate it some swap space here. 419 */ 420 if (PageAnon(page) && !PageSwapCache(page) && sc->may_swap) { 421 if (!add_to_swap(page)) 422 goto activate_locked; 423 } 424#endif /* CONFIG_SWAP */ 425 426 mapping = page_mapping(page); 427 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 428 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 429 430 /* 431 * The page is mapped into the page tables of one or more 432 * processes. Try to unmap it here. 433 */ 434 if (page_mapped(page) && mapping) { 435 switch (try_to_unmap(page)) { 436 case SWAP_FAIL: 437 goto activate_locked; 438 case SWAP_AGAIN: 439 goto keep_locked; 440 case SWAP_SUCCESS: 441 ; /* try to free the page below */ 442 } 443 } 444 445 if (PageDirty(page)) { 446 if (referenced) 447 goto keep_locked; 448 if (!may_enter_fs) 449 goto keep_locked; 450 if (laptop_mode && !sc->may_writepage) 451 goto keep_locked; 452 453 /* Page is dirty, try to write it out here */ 454 switch(pageout(page, mapping)) { 455 case PAGE_KEEP: 456 goto keep_locked; 457 case PAGE_ACTIVATE: 458 goto activate_locked; 459 case PAGE_SUCCESS: 460 if (PageWriteback(page) || PageDirty(page)) 461 goto keep; 462 /* 463 * A synchronous write - probably a ramdisk. Go 464 * ahead and try to reclaim the page. 465 */ 466 if (TestSetPageLocked(page)) 467 goto keep; 468 if (PageDirty(page) || PageWriteback(page)) 469 goto keep_locked; 470 mapping = page_mapping(page); 471 case PAGE_CLEAN: 472 ; /* try to free the page below */ 473 } 474 } 475 476 /* 477 * If the page has buffers, try to free the buffer mappings 478 * associated with this page. If we succeed we try to free 479 * the page as well. 480 * 481 * We do this even if the page is PageDirty(). 482 * try_to_release_page() does not perform I/O, but it is 483 * possible for a page to have PageDirty set, but it is actually 484 * clean (all its buffers are clean). This happens if the 485 * buffers were written out directly, with submit_bh(). ext3 486 * will do this, as well as the blockdev mapping. 487 * try_to_release_page() will discover that cleanness and will 488 * drop the buffers and mark the page clean - it can be freed. 489 * 490 * Rarely, pages can have buffers and no ->mapping. These are 491 * the pages which were not successfully invalidated in 492 * truncate_complete_page(). We try to drop those buffers here 493 * and if that worked, and the page is no longer mapped into 494 * process address space (page_count == 1) it can be freed. 495 * Otherwise, leave the page on the LRU so it is swappable. 496 */ 497 if (PagePrivate(page)) { 498 if (!try_to_release_page(page, sc->gfp_mask)) 499 goto activate_locked; 500 if (!mapping && page_count(page) == 1) 501 goto free_it; 502 } 503 504 if (!mapping) 505 goto keep_locked; /* truncate got there first */ 506 507 write_lock_irq(&mapping->tree_lock); 508 509 /* 510 * The non-racy check for busy page. It is critical to check 511 * PageDirty _after_ making sure that the page is freeable and 512 * not in use by anybody. (pagecache + us == 2) 513 */ 514 if (unlikely(page_count(page) != 2)) 515 goto cannot_free; 516 smp_rmb(); 517 if (unlikely(PageDirty(page))) 518 goto cannot_free; 519 520#ifdef CONFIG_SWAP 521 if (PageSwapCache(page)) { 522 swp_entry_t swap = { .val = page->private }; 523 __delete_from_swap_cache(page); 524 write_unlock_irq(&mapping->tree_lock); 525 swap_free(swap); 526 __put_page(page); /* The pagecache ref */ 527 goto free_it; 528 } 529#endif /* CONFIG_SWAP */ 530 531 __remove_from_page_cache(page); 532 write_unlock_irq(&mapping->tree_lock); 533 __put_page(page); 534 535free_it: 536 unlock_page(page); 537 reclaimed++; 538 if (!pagevec_add(&freed_pvec, page)) 539 __pagevec_release_nonlru(&freed_pvec); 540 continue; 541 542cannot_free: 543 write_unlock_irq(&mapping->tree_lock); 544 goto keep_locked; 545 546activate_locked: 547 SetPageActive(page); 548 pgactivate++; 549keep_locked: 550 unlock_page(page); 551keep: 552 list_add(&page->lru, &ret_pages); 553 BUG_ON(PageLRU(page)); 554 } 555 list_splice(&ret_pages, page_list); 556 if (pagevec_count(&freed_pvec)) 557 __pagevec_release_nonlru(&freed_pvec); 558 mod_page_state(pgactivate, pgactivate); 559 sc->nr_reclaimed += reclaimed; 560 return reclaimed; 561} 562 563/* 564 * zone->lru_lock is heavily contended. Some of the functions that 565 * shrink the lists perform better by taking out a batch of pages 566 * and working on them outside the LRU lock. 567 * 568 * For pagecache intensive workloads, this function is the hottest 569 * spot in the kernel (apart from copy_*_user functions). 570 * 571 * Appropriate locks must be held before calling this function. 572 * 573 * @nr_to_scan: The number of pages to look through on the list. 574 * @src: The LRU list to pull pages off. 575 * @dst: The temp list to put pages on to. 576 * @scanned: The number of pages that were scanned. 577 * 578 * returns how many pages were moved onto *@dst. 579 */ 580static int isolate_lru_pages(int nr_to_scan, struct list_head *src, 581 struct list_head *dst, int *scanned) 582{ 583 int nr_taken = 0; 584 struct page *page; 585 int scan = 0; 586 587 while (scan++ < nr_to_scan && !list_empty(src)) { 588 page = lru_to_page(src); 589 prefetchw_prev_lru_page(page, src, flags); 590 591 if (!TestClearPageLRU(page)) 592 BUG(); 593 list_del(&page->lru); 594 if (get_page_testone(page)) { 595 /* 596 * It is being freed elsewhere 597 */ 598 __put_page(page); 599 SetPageLRU(page); 600 list_add(&page->lru, src); 601 continue; 602 } else { 603 list_add(&page->lru, dst); 604 nr_taken++; 605 } 606 } 607 608 *scanned = scan; 609 return nr_taken; 610} 611 612/* 613 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 614 */ 615static void shrink_cache(struct zone *zone, struct scan_control *sc) 616{ 617 LIST_HEAD(page_list); 618 struct pagevec pvec; 619 int max_scan = sc->nr_to_scan; 620 621 pagevec_init(&pvec, 1); 622 623 lru_add_drain(); 624 spin_lock_irq(&zone->lru_lock); 625 while (max_scan > 0) { 626 struct page *page; 627 int nr_taken; 628 int nr_scan; 629 int nr_freed; 630 631 nr_taken = isolate_lru_pages(sc->swap_cluster_max, 632 &zone->inactive_list, 633 &page_list, &nr_scan); 634 zone->nr_inactive -= nr_taken; 635 zone->pages_scanned += nr_scan; 636 spin_unlock_irq(&zone->lru_lock); 637 638 if (nr_taken == 0) 639 goto done; 640 641 max_scan -= nr_scan; 642 if (current_is_kswapd()) 643 mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 644 else 645 mod_page_state_zone(zone, pgscan_direct, nr_scan); 646 nr_freed = shrink_list(&page_list, sc); 647 if (current_is_kswapd()) 648 mod_page_state(kswapd_steal, nr_freed); 649 mod_page_state_zone(zone, pgsteal, nr_freed); 650 sc->nr_to_reclaim -= nr_freed; 651 652 spin_lock_irq(&zone->lru_lock); 653 /* 654 * Put back any unfreeable pages. 655 */ 656 while (!list_empty(&page_list)) { 657 page = lru_to_page(&page_list); 658 if (TestSetPageLRU(page)) 659 BUG(); 660 list_del(&page->lru); 661 if (PageActive(page)) 662 add_page_to_active_list(zone, page); 663 else 664 add_page_to_inactive_list(zone, page); 665 if (!pagevec_add(&pvec, page)) { 666 spin_unlock_irq(&zone->lru_lock); 667 __pagevec_release(&pvec); 668 spin_lock_irq(&zone->lru_lock); 669 } 670 } 671 } 672 spin_unlock_irq(&zone->lru_lock); 673done: 674 pagevec_release(&pvec); 675} 676 677/* 678 * This moves pages from the active list to the inactive list. 679 * 680 * We move them the other way if the page is referenced by one or more 681 * processes, from rmap. 682 * 683 * If the pages are mostly unmapped, the processing is fast and it is 684 * appropriate to hold zone->lru_lock across the whole operation. But if 685 * the pages are mapped, the processing is slow (page_referenced()) so we 686 * should drop zone->lru_lock around each page. It's impossible to balance 687 * this, so instead we remove the pages from the LRU while processing them. 688 * It is safe to rely on PG_active against the non-LRU pages in here because 689 * nobody will play with that bit on a non-LRU page. 690 * 691 * The downside is that we have to touch page->_count against each page. 692 * But we had to alter page->flags anyway. 693 */ 694static void 695refill_inactive_zone(struct zone *zone, struct scan_control *sc) 696{ 697 int pgmoved; 698 int pgdeactivate = 0; 699 int pgscanned; 700 int nr_pages = sc->nr_to_scan; 701 LIST_HEAD(l_hold); /* The pages which were snipped off */ 702 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 703 LIST_HEAD(l_active); /* Pages to go onto the active_list */ 704 struct page *page; 705 struct pagevec pvec; 706 int reclaim_mapped = 0; 707 long mapped_ratio; 708 long distress; 709 long swap_tendency; 710 711 lru_add_drain(); 712 spin_lock_irq(&zone->lru_lock); 713 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 714 &l_hold, &pgscanned); 715 zone->pages_scanned += pgscanned; 716 zone->nr_active -= pgmoved; 717 spin_unlock_irq(&zone->lru_lock); 718 719 /* 720 * `distress' is a measure of how much trouble we're having reclaiming 721 * pages. 0 -> no problems. 100 -> great trouble. 722 */ 723 distress = 100 >> zone->prev_priority; 724 725 /* 726 * The point of this algorithm is to decide when to start reclaiming 727 * mapped memory instead of just pagecache. Work out how much memory 728 * is mapped. 729 */ 730 mapped_ratio = (sc->nr_mapped * 100) / total_memory; 731 732 /* 733 * Now decide how much we really want to unmap some pages. The mapped 734 * ratio is downgraded - just because there's a lot of mapped memory 735 * doesn't necessarily mean that page reclaim isn't succeeding. 736 * 737 * The distress ratio is important - we don't want to start going oom. 738 * 739 * A 100% value of vm_swappiness overrides this algorithm altogether. 740 */ 741 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; 742 743 /* 744 * Now use this metric to decide whether to start moving mapped memory 745 * onto the inactive list. 746 */ 747 if (swap_tendency >= 100) 748 reclaim_mapped = 1; 749 750 while (!list_empty(&l_hold)) { 751 cond_resched(); 752 page = lru_to_page(&l_hold); 753 list_del(&page->lru); 754 if (page_mapped(page)) { 755 if (!reclaim_mapped || 756 (total_swap_pages == 0 && PageAnon(page)) || 757 page_referenced(page, 0, sc->priority <= 0)) { 758 list_add(&page->lru, &l_active); 759 continue; 760 } 761 } 762 list_add(&page->lru, &l_inactive); 763 } 764 765 pagevec_init(&pvec, 1); 766 pgmoved = 0; 767 spin_lock_irq(&zone->lru_lock); 768 while (!list_empty(&l_inactive)) { 769 page = lru_to_page(&l_inactive); 770 prefetchw_prev_lru_page(page, &l_inactive, flags); 771 if (TestSetPageLRU(page)) 772 BUG(); 773 if (!TestClearPageActive(page)) 774 BUG(); 775 list_move(&page->lru, &zone->inactive_list); 776 pgmoved++; 777 if (!pagevec_add(&pvec, page)) { 778 zone->nr_inactive += pgmoved; 779 spin_unlock_irq(&zone->lru_lock); 780 pgdeactivate += pgmoved; 781 pgmoved = 0; 782 if (buffer_heads_over_limit) 783 pagevec_strip(&pvec); 784 __pagevec_release(&pvec); 785 spin_lock_irq(&zone->lru_lock); 786 } 787 } 788 zone->nr_inactive += pgmoved; 789 pgdeactivate += pgmoved; 790 if (buffer_heads_over_limit) { 791 spin_unlock_irq(&zone->lru_lock); 792 pagevec_strip(&pvec); 793 spin_lock_irq(&zone->lru_lock); 794 } 795 796 pgmoved = 0; 797 while (!list_empty(&l_active)) { 798 page = lru_to_page(&l_active); 799 prefetchw_prev_lru_page(page, &l_active, flags); 800 if (TestSetPageLRU(page)) 801 BUG(); 802 BUG_ON(!PageActive(page)); 803 list_move(&page->lru, &zone->active_list); 804 pgmoved++; 805 if (!pagevec_add(&pvec, page)) { 806 zone->nr_active += pgmoved; 807 pgmoved = 0; 808 spin_unlock_irq(&zone->lru_lock); 809 __pagevec_release(&pvec); 810 spin_lock_irq(&zone->lru_lock); 811 } 812 } 813 zone->nr_active += pgmoved; 814 spin_unlock_irq(&zone->lru_lock); 815 pagevec_release(&pvec); 816 817 mod_page_state_zone(zone, pgrefill, pgscanned); 818 mod_page_state(pgdeactivate, pgdeactivate); 819} 820 821/* 822 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 823 */ 824static void 825shrink_zone(struct zone *zone, struct scan_control *sc) 826{ 827 unsigned long nr_active; 828 unsigned long nr_inactive; 829 830 atomic_inc(&zone->reclaim_in_progress); 831 832 /* 833 * Add one to `nr_to_scan' just to make sure that the kernel will 834 * slowly sift through the active list. 835 */ 836 zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; 837 nr_active = zone->nr_scan_active; 838 if (nr_active >= sc->swap_cluster_max) 839 zone->nr_scan_active = 0; 840 else 841 nr_active = 0; 842 843 zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; 844 nr_inactive = zone->nr_scan_inactive; 845 if (nr_inactive >= sc->swap_cluster_max) 846 zone->nr_scan_inactive = 0; 847 else 848 nr_inactive = 0; 849 850 sc->nr_to_reclaim = sc->swap_cluster_max; 851 852 while (nr_active || nr_inactive) { 853 if (nr_active) { 854 sc->nr_to_scan = min(nr_active, 855 (unsigned long)sc->swap_cluster_max); 856 nr_active -= sc->nr_to_scan; 857 refill_inactive_zone(zone, sc); 858 } 859 860 if (nr_inactive) { 861 sc->nr_to_scan = min(nr_inactive, 862 (unsigned long)sc->swap_cluster_max); 863 nr_inactive -= sc->nr_to_scan; 864 shrink_cache(zone, sc); 865 if (sc->nr_to_reclaim <= 0) 866 break; 867 } 868 } 869 870 throttle_vm_writeout(); 871 872 atomic_dec(&zone->reclaim_in_progress); 873} 874 875/* 876 * This is the direct reclaim path, for page-allocating processes. We only 877 * try to reclaim pages from zones which will satisfy the caller's allocation 878 * request. 879 * 880 * We reclaim from a zone even if that zone is over pages_high. Because: 881 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 882 * allocation or 883 * b) The zones may be over pages_high but they must go *over* pages_high to 884 * satisfy the `incremental min' zone defense algorithm. 885 * 886 * Returns the number of reclaimed pages. 887 * 888 * If a zone is deemed to be full of pinned pages then just give it a light 889 * scan then give up on it. 890 */ 891static void 892shrink_caches(struct zone **zones, struct scan_control *sc) 893{ 894 int i; 895 896 for (i = 0; zones[i] != NULL; i++) { 897 struct zone *zone = zones[i]; 898 899 if (zone->present_pages == 0) 900 continue; 901 902 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 903 continue; 904 905 zone->temp_priority = sc->priority; 906 if (zone->prev_priority > sc->priority) 907 zone->prev_priority = sc->priority; 908 909 if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) 910 continue; /* Let kswapd poll it */ 911 912 shrink_zone(zone, sc); 913 } 914} 915 916/* 917 * This is the main entry point to direct page reclaim. 918 * 919 * If a full scan of the inactive list fails to free enough memory then we 920 * are "out of memory" and something needs to be killed. 921 * 922 * If the caller is !__GFP_FS then the probability of a failure is reasonably 923 * high - the zone may be full of dirty or under-writeback pages, which this 924 * caller can't do much about. We kick pdflush and take explicit naps in the 925 * hope that some of these pages can be written. But if the allocating task 926 * holds filesystem locks which prevent writeout this might not work, and the 927 * allocation attempt will fail. 928 */ 929int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) 930{ 931 int priority; 932 int ret = 0; 933 int total_scanned = 0, total_reclaimed = 0; 934 struct reclaim_state *reclaim_state = current->reclaim_state; 935 struct scan_control sc; 936 unsigned long lru_pages = 0; 937 int i; 938 939 sc.gfp_mask = gfp_mask; 940 sc.may_writepage = 0; 941 sc.may_swap = 1; 942 943 inc_page_state(allocstall); 944 945 for (i = 0; zones[i] != NULL; i++) { 946 struct zone *zone = zones[i]; 947 948 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 949 continue; 950 951 zone->temp_priority = DEF_PRIORITY; 952 lru_pages += zone->nr_active + zone->nr_inactive; 953 } 954 955 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 956 sc.nr_mapped = read_page_state(nr_mapped); 957 sc.nr_scanned = 0; 958 sc.nr_reclaimed = 0; 959 sc.priority = priority; 960 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 961 shrink_caches(zones, &sc); 962 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 963 if (reclaim_state) { 964 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 965 reclaim_state->reclaimed_slab = 0; 966 } 967 total_scanned += sc.nr_scanned; 968 total_reclaimed += sc.nr_reclaimed; 969 if (total_reclaimed >= sc.swap_cluster_max) { 970 ret = 1; 971 goto out; 972 } 973 974 /* 975 * Try to write back as many pages as we just scanned. This 976 * tends to cause slow streaming writers to write data to the 977 * disk smoothly, at the dirtying rate, which is nice. But 978 * that's undesirable in laptop mode, where we *want* lumpy 979 * writeout. So in laptop mode, write out the whole world. 980 */ 981 if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { 982 wakeup_pdflush(laptop_mode ? 0 : total_scanned); 983 sc.may_writepage = 1; 984 } 985 986 /* Take a nap, wait for some writeback to complete */ 987 if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 988 blk_congestion_wait(WRITE, HZ/10); 989 } 990out: 991 for (i = 0; zones[i] != 0; i++) { 992 struct zone *zone = zones[i]; 993 994 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 995 continue; 996 997 zone->prev_priority = zone->temp_priority; 998 } 999 return ret; 1000} 1001 1002/* 1003 * For kswapd, balance_pgdat() will work across all this node's zones until 1004 * they are all at pages_high. 1005 * 1006 * If `nr_pages' is non-zero then it is the number of pages which are to be 1007 * reclaimed, regardless of the zone occupancies. This is a software suspend 1008 * special. 1009 * 1010 * Returns the number of pages which were actually freed. 1011 * 1012 * There is special handling here for zones which are full of pinned pages. 1013 * This can happen if the pages are all mlocked, or if they are all used by 1014 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 1015 * What we do is to detect the case where all pages in the zone have been 1016 * scanned twice and there has been zero successful reclaim. Mark the zone as 1017 * dead and from now on, only perform a short scan. Basically we're polling 1018 * the zone for when the problem goes away. 1019 * 1020 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1021 * zones which have free_pages > pages_high, but once a zone is found to have 1022 * free_pages <= pages_high, we scan that zone and the lower zones regardless 1023 * of the number of free pages in the lower zones. This interoperates with 1024 * the page allocator fallback scheme to ensure that aging of pages is balanced 1025 * across the zones. 1026 */ 1027static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) 1028{ 1029 int to_free = nr_pages; 1030 int all_zones_ok; 1031 int priority; 1032 int i; 1033 int total_scanned, total_reclaimed; 1034 struct reclaim_state *reclaim_state = current->reclaim_state; 1035 struct scan_control sc; 1036 1037loop_again: 1038 total_scanned = 0; 1039 total_reclaimed = 0; 1040 sc.gfp_mask = GFP_KERNEL; 1041 sc.may_writepage = 0; 1042 sc.may_swap = 1; 1043 sc.nr_mapped = read_page_state(nr_mapped); 1044 1045 inc_page_state(pageoutrun); 1046 1047 for (i = 0; i < pgdat->nr_zones; i++) { 1048 struct zone *zone = pgdat->node_zones + i; 1049 1050 zone->temp_priority = DEF_PRIORITY; 1051 } 1052 1053 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1054 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1055 unsigned long lru_pages = 0; 1056 1057 all_zones_ok = 1; 1058 1059 if (nr_pages == 0) { 1060 /* 1061 * Scan in the highmem->dma direction for the highest 1062 * zone which needs scanning 1063 */ 1064 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 1065 struct zone *zone = pgdat->node_zones + i; 1066 1067 if (zone->present_pages == 0) 1068 continue; 1069 1070 if (zone->all_unreclaimable && 1071 priority != DEF_PRIORITY) 1072 continue; 1073 1074 if (!zone_watermark_ok(zone, order, 1075 zone->pages_high, 0, 0, 0)) { 1076 end_zone = i; 1077 goto scan; 1078 } 1079 } 1080 goto out; 1081 } else { 1082 end_zone = pgdat->nr_zones - 1; 1083 } 1084scan: 1085 for (i = 0; i <= end_zone; i++) { 1086 struct zone *zone = pgdat->node_zones + i; 1087 1088 lru_pages += zone->nr_active + zone->nr_inactive; 1089 } 1090 1091 /* 1092 * Now scan the zone in the dma->highmem direction, stopping 1093 * at the last zone which needs scanning. 1094 * 1095 * We do this because the page allocator works in the opposite 1096 * direction. This prevents the page allocator from allocating 1097 * pages behind kswapd's direction of progress, which would 1098 * cause too much scanning of the lower zones. 1099 */ 1100 for (i = 0; i <= end_zone; i++) { 1101 struct zone *zone = pgdat->node_zones + i; 1102 int nr_slab; 1103 1104 if (zone->present_pages == 0) 1105 continue; 1106 1107 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1108 continue; 1109 1110 if (nr_pages == 0) { /* Not software suspend */ 1111 if (!zone_watermark_ok(zone, order, 1112 zone->pages_high, end_zone, 0, 0)) 1113 all_zones_ok = 0; 1114 } 1115 zone->temp_priority = priority; 1116 if (zone->prev_priority > priority) 1117 zone->prev_priority = priority; 1118 sc.nr_scanned = 0; 1119 sc.nr_reclaimed = 0; 1120 sc.priority = priority; 1121 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1122 atomic_inc(&zone->reclaim_in_progress); 1123 shrink_zone(zone, &sc); 1124 atomic_dec(&zone->reclaim_in_progress); 1125 reclaim_state->reclaimed_slab = 0; 1126 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1127 lru_pages); 1128 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1129 total_reclaimed += sc.nr_reclaimed; 1130 total_scanned += sc.nr_scanned; 1131 if (zone->all_unreclaimable) 1132 continue; 1133 if (nr_slab == 0 && zone->pages_scanned >= 1134 (zone->nr_active + zone->nr_inactive) * 4) 1135 zone->all_unreclaimable = 1; 1136 /* 1137 * If we've done a decent amount of scanning and 1138 * the reclaim ratio is low, start doing writepage 1139 * even in laptop mode 1140 */ 1141 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 1142 total_scanned > total_reclaimed+total_reclaimed/2) 1143 sc.may_writepage = 1; 1144 } 1145 if (nr_pages && to_free > total_reclaimed) 1146 continue; /* swsusp: need to do more work */ 1147 if (all_zones_ok) 1148 break; /* kswapd: all done */ 1149 /* 1150 * OK, kswapd is getting into trouble. Take a nap, then take 1151 * another pass across the zones. 1152 */ 1153 if (total_scanned && priority < DEF_PRIORITY - 2) 1154 blk_congestion_wait(WRITE, HZ/10); 1155 1156 /* 1157 * We do this so kswapd doesn't build up large priorities for 1158 * example when it is freeing in parallel with allocators. It 1159 * matches the direct reclaim path behaviour in terms of impact 1160 * on zone->*_priority. 1161 */ 1162 if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) 1163 break; 1164 } 1165out: 1166 for (i = 0; i < pgdat->nr_zones; i++) { 1167 struct zone *zone = pgdat->node_zones + i; 1168 1169 zone->prev_priority = zone->temp_priority; 1170 } 1171 if (!all_zones_ok) { 1172 cond_resched(); 1173 goto loop_again; 1174 } 1175 1176 return total_reclaimed; 1177} 1178 1179/* 1180 * The background pageout daemon, started as a kernel thread 1181 * from the init process. 1182 * 1183 * This basically trickles out pages so that we have _some_ 1184 * free memory available even if there is no other activity 1185 * that frees anything up. This is needed for things like routing 1186 * etc, where we otherwise might have all activity going on in 1187 * asynchronous contexts that cannot page things out. 1188 * 1189 * If there are applications that are active memory-allocators 1190 * (most normal use), this basically shouldn't matter. 1191 */ 1192static int kswapd(void *p) 1193{ 1194 unsigned long order; 1195 pg_data_t *pgdat = (pg_data_t*)p; 1196 struct task_struct *tsk = current; 1197 DEFINE_WAIT(wait); 1198 struct reclaim_state reclaim_state = { 1199 .reclaimed_slab = 0, 1200 }; 1201 cpumask_t cpumask; 1202 1203 daemonize("kswapd%d", pgdat->node_id); 1204 cpumask = node_to_cpumask(pgdat->node_id); 1205 if (!cpus_empty(cpumask)) 1206 set_cpus_allowed(tsk, cpumask); 1207 current->reclaim_state = &reclaim_state; 1208 1209 /* 1210 * Tell the memory management that we're a "memory allocator", 1211 * and that if we need more memory we should get access to it 1212 * regardless (see "__alloc_pages()"). "kswapd" should 1213 * never get caught in the normal page freeing logic. 1214 * 1215 * (Kswapd normally doesn't need memory anyway, but sometimes 1216 * you need a small amount of memory in order to be able to 1217 * page out something else, and this flag essentially protects 1218 * us from recursively trying to free more memory as we're 1219 * trying to free the first piece of memory in the first place). 1220 */ 1221 tsk->flags |= PF_MEMALLOC|PF_KSWAPD; 1222 1223 order = 0; 1224 for ( ; ; ) { 1225 unsigned long new_order; 1226 1227 try_to_freeze(); 1228 1229 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 1230 new_order = pgdat->kswapd_max_order; 1231 pgdat->kswapd_max_order = 0; 1232 if (order < new_order) { 1233 /* 1234 * Don't sleep if someone wants a larger 'order' 1235 * allocation 1236 */ 1237 order = new_order; 1238 } else { 1239 schedule(); 1240 order = pgdat->kswapd_max_order; 1241 } 1242 finish_wait(&pgdat->kswapd_wait, &wait); 1243 1244 balance_pgdat(pgdat, 0, order); 1245 } 1246 return 0; 1247} 1248 1249/* 1250 * A zone is low on free memory, so wake its kswapd task to service it. 1251 */ 1252void wakeup_kswapd(struct zone *zone, int order) 1253{ 1254 pg_data_t *pgdat; 1255 1256 if (zone->present_pages == 0) 1257 return; 1258 1259 pgdat = zone->zone_pgdat; 1260 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0, 0)) 1261 return; 1262 if (pgdat->kswapd_max_order < order) 1263 pgdat->kswapd_max_order = order; 1264 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1265 return; 1266 if (!waitqueue_active(&pgdat->kswapd_wait)) 1267 return; 1268 wake_up_interruptible(&pgdat->kswapd_wait); 1269} 1270 1271#ifdef CONFIG_PM 1272/* 1273 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed 1274 * pages. 1275 */ 1276int shrink_all_memory(int nr_pages) 1277{ 1278 pg_data_t *pgdat; 1279 int nr_to_free = nr_pages; 1280 int ret = 0; 1281 struct reclaim_state reclaim_state = { 1282 .reclaimed_slab = 0, 1283 }; 1284 1285 current->reclaim_state = &reclaim_state; 1286 for_each_pgdat(pgdat) { 1287 int freed; 1288 freed = balance_pgdat(pgdat, nr_to_free, 0); 1289 ret += freed; 1290 nr_to_free -= freed; 1291 if (nr_to_free <= 0) 1292 break; 1293 } 1294 current->reclaim_state = NULL; 1295 return ret; 1296} 1297#endif 1298 1299#ifdef CONFIG_HOTPLUG_CPU 1300/* It's optimal to keep kswapds on the same CPUs as their memory, but 1301 not required for correctness. So if the last cpu in a node goes 1302 away, we get changed to run anywhere: as the first one comes back, 1303 restore their cpu bindings. */ 1304static int __devinit cpu_callback(struct notifier_block *nfb, 1305 unsigned long action, 1306 void *hcpu) 1307{ 1308 pg_data_t *pgdat; 1309 cpumask_t mask; 1310 1311 if (action == CPU_ONLINE) { 1312 for_each_pgdat(pgdat) { 1313 mask = node_to_cpumask(pgdat->node_id); 1314 if (any_online_cpu(mask) != NR_CPUS) 1315 /* One of our CPUs online: restore mask */ 1316 set_cpus_allowed(pgdat->kswapd, mask); 1317 } 1318 } 1319 return NOTIFY_OK; 1320} 1321#endif /* CONFIG_HOTPLUG_CPU */ 1322 1323static int __init kswapd_init(void) 1324{ 1325 pg_data_t *pgdat; 1326 swap_setup(); 1327 for_each_pgdat(pgdat) 1328 pgdat->kswapd 1329 = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); 1330 total_memory = nr_free_pagecache_pages(); 1331 hotcpu_notifier(cpu_callback, 0); 1332 return 0; 1333} 1334 1335module_init(kswapd_init) 1336 1337 1338/* 1339 * Try to free up some pages from this zone through reclaim. 1340 */ 1341int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) 1342{ 1343 struct scan_control sc; 1344 int nr_pages = 1 << order; 1345 int total_reclaimed = 0; 1346 1347 /* The reclaim may sleep, so don't do it if sleep isn't allowed */ 1348 if (!(gfp_mask & __GFP_WAIT)) 1349 return 0; 1350 if (zone->all_unreclaimable) 1351 return 0; 1352 1353 sc.gfp_mask = gfp_mask; 1354 sc.may_writepage = 0; 1355 sc.may_swap = 0; 1356 sc.nr_mapped = read_page_state(nr_mapped); 1357 sc.nr_scanned = 0; 1358 sc.nr_reclaimed = 0; 1359 /* scan at the highest priority */ 1360 sc.priority = 0; 1361 1362 if (nr_pages > SWAP_CLUSTER_MAX) 1363 sc.swap_cluster_max = nr_pages; 1364 else 1365 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1366 1367 /* Don't reclaim the zone if there are other reclaimers active */ 1368 if (atomic_read(&zone->reclaim_in_progress) > 0) 1369 goto out; 1370 1371 shrink_zone(zone, &sc); 1372 total_reclaimed = sc.nr_reclaimed; 1373 1374 out: 1375 return total_reclaimed; 1376} 1377 1378asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, 1379 unsigned int state) 1380{ 1381 struct zone *z; 1382 int i; 1383 1384 if (!capable(CAP_SYS_ADMIN)) 1385 return -EACCES; 1386 1387 if (node >= MAX_NUMNODES || !node_online(node)) 1388 return -EINVAL; 1389 1390 /* This will break if we ever add more zones */ 1391 if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM))) 1392 return -EINVAL; 1393 1394 for (i = 0; i < MAX_NR_ZONES; i++) { 1395 if (!(zone & 1<<i)) 1396 continue; 1397 1398 z = &NODE_DATA(node)->node_zones[i]; 1399 1400 if (state) 1401 z->reclaim_pages = 1; 1402 else 1403 z->reclaim_pages = 0; 1404 } 1405 1406 return 0; 1407} 1408