vmscan.c revision 2903fb1694dcb08a3c1d9d823cfae7ba30e66cd3
1/* 2 * linux/mm/vmscan.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * 6 * Swap reorganised 29.12.95, Stephen Tweedie. 7 * kswapd added: 7.1.96 sct 8 * Removed kswapd_ctl limits, and swap out as many pages as needed 9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel. 10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com). 11 * Multiqueue VM started 5.8.00, Rik van Riel. 12 */ 13 14#include <linux/mm.h> 15#include <linux/module.h> 16#include <linux/slab.h> 17#include <linux/kernel_stat.h> 18#include <linux/swap.h> 19#include <linux/pagemap.h> 20#include <linux/init.h> 21#include <linux/highmem.h> 22#include <linux/file.h> 23#include <linux/writeback.h> 24#include <linux/blkdev.h> 25#include <linux/buffer_head.h> /* for try_to_release_page(), 26 buffer_heads_over_limit */ 27#include <linux/mm_inline.h> 28#include <linux/pagevec.h> 29#include <linux/backing-dev.h> 30#include <linux/rmap.h> 31#include <linux/topology.h> 32#include <linux/cpu.h> 33#include <linux/cpuset.h> 34#include <linux/notifier.h> 35#include <linux/rwsem.h> 36 37#include <asm/tlbflush.h> 38#include <asm/div64.h> 39 40#include <linux/swapops.h> 41 42/* possible outcome of pageout() */ 43typedef enum { 44 /* failed to write page out, page is locked */ 45 PAGE_KEEP, 46 /* move page to the active list, page is locked */ 47 PAGE_ACTIVATE, 48 /* page has been sent to the disk successfully, page is unlocked */ 49 PAGE_SUCCESS, 50 /* page is clean and locked */ 51 PAGE_CLEAN, 52} pageout_t; 53 54struct scan_control { 55 /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ 56 unsigned long nr_to_scan; 57 58 /* Incremented by the number of inactive pages that were scanned */ 59 unsigned long nr_scanned; 60 61 /* Incremented by the number of pages reclaimed */ 62 unsigned long nr_reclaimed; 63 64 unsigned long nr_mapped; /* From page_state */ 65 66 /* Ask shrink_caches, or shrink_zone to scan at this priority */ 67 unsigned int priority; 68 69 /* This context's GFP mask */ 70 gfp_t gfp_mask; 71 72 int may_writepage; 73 74 /* Can pages be swapped as part of reclaim? */ 75 int may_swap; 76 77 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 78 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 79 * In this context, it doesn't matter that we scan the 80 * whole list at once. */ 81 int swap_cluster_max; 82}; 83 84/* 85 * The list of shrinker callbacks used by to apply pressure to 86 * ageable caches. 87 */ 88struct shrinker { 89 shrinker_t shrinker; 90 struct list_head list; 91 int seeks; /* seeks to recreate an obj */ 92 long nr; /* objs pending delete */ 93}; 94 95#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 96 97#ifdef ARCH_HAS_PREFETCH 98#define prefetch_prev_lru_page(_page, _base, _field) \ 99 do { \ 100 if ((_page)->lru.prev != _base) { \ 101 struct page *prev; \ 102 \ 103 prev = lru_to_page(&(_page->lru)); \ 104 prefetch(&prev->_field); \ 105 } \ 106 } while (0) 107#else 108#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0) 109#endif 110 111#ifdef ARCH_HAS_PREFETCHW 112#define prefetchw_prev_lru_page(_page, _base, _field) \ 113 do { \ 114 if ((_page)->lru.prev != _base) { \ 115 struct page *prev; \ 116 \ 117 prev = lru_to_page(&(_page->lru)); \ 118 prefetchw(&prev->_field); \ 119 } \ 120 } while (0) 121#else 122#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0) 123#endif 124 125/* 126 * From 0 .. 100. Higher means more swappy. 127 */ 128int vm_swappiness = 60; 129static long total_memory; 130 131static LIST_HEAD(shrinker_list); 132static DECLARE_RWSEM(shrinker_rwsem); 133 134/* 135 * Add a shrinker callback to be called from the vm 136 */ 137struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 138{ 139 struct shrinker *shrinker; 140 141 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 142 if (shrinker) { 143 shrinker->shrinker = theshrinker; 144 shrinker->seeks = seeks; 145 shrinker->nr = 0; 146 down_write(&shrinker_rwsem); 147 list_add_tail(&shrinker->list, &shrinker_list); 148 up_write(&shrinker_rwsem); 149 } 150 return shrinker; 151} 152EXPORT_SYMBOL(set_shrinker); 153 154/* 155 * Remove one 156 */ 157void remove_shrinker(struct shrinker *shrinker) 158{ 159 down_write(&shrinker_rwsem); 160 list_del(&shrinker->list); 161 up_write(&shrinker_rwsem); 162 kfree(shrinker); 163} 164EXPORT_SYMBOL(remove_shrinker); 165 166#define SHRINK_BATCH 128 167/* 168 * Call the shrink functions to age shrinkable caches 169 * 170 * Here we assume it costs one seek to replace a lru page and that it also 171 * takes a seek to recreate a cache object. With this in mind we age equal 172 * percentages of the lru and ageable caches. This should balance the seeks 173 * generated by these structures. 174 * 175 * If the vm encounted mapped pages on the LRU it increase the pressure on 176 * slab to avoid swapping. 177 * 178 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. 179 * 180 * `lru_pages' represents the number of on-LRU pages in all the zones which 181 * are eligible for the caller's allocation attempt. It is used for balancing 182 * slab reclaim versus page reclaim. 183 * 184 * Returns the number of slab objects which we shrunk. 185 */ 186int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) 187{ 188 struct shrinker *shrinker; 189 int ret = 0; 190 191 if (scanned == 0) 192 scanned = SWAP_CLUSTER_MAX; 193 194 if (!down_read_trylock(&shrinker_rwsem)) 195 return 1; /* Assume we'll be able to shrink next time */ 196 197 list_for_each_entry(shrinker, &shrinker_list, list) { 198 unsigned long long delta; 199 unsigned long total_scan; 200 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); 201 202 delta = (4 * scanned) / shrinker->seeks; 203 delta *= max_pass; 204 do_div(delta, lru_pages + 1); 205 shrinker->nr += delta; 206 if (shrinker->nr < 0) { 207 printk(KERN_ERR "%s: nr=%ld\n", 208 __FUNCTION__, shrinker->nr); 209 shrinker->nr = max_pass; 210 } 211 212 /* 213 * Avoid risking looping forever due to too large nr value: 214 * never try to free more than twice the estimate number of 215 * freeable entries. 216 */ 217 if (shrinker->nr > max_pass * 2) 218 shrinker->nr = max_pass * 2; 219 220 total_scan = shrinker->nr; 221 shrinker->nr = 0; 222 223 while (total_scan >= SHRINK_BATCH) { 224 long this_scan = SHRINK_BATCH; 225 int shrink_ret; 226 int nr_before; 227 228 nr_before = (*shrinker->shrinker)(0, gfp_mask); 229 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 230 if (shrink_ret == -1) 231 break; 232 if (shrink_ret < nr_before) 233 ret += nr_before - shrink_ret; 234 mod_page_state(slabs_scanned, this_scan); 235 total_scan -= this_scan; 236 237 cond_resched(); 238 } 239 240 shrinker->nr += total_scan; 241 } 242 up_read(&shrinker_rwsem); 243 return ret; 244} 245 246/* Called without lock on whether page is mapped, so answer is unstable */ 247static inline int page_mapping_inuse(struct page *page) 248{ 249 struct address_space *mapping; 250 251 /* Page is in somebody's page tables. */ 252 if (page_mapped(page)) 253 return 1; 254 255 /* Be more reluctant to reclaim swapcache than pagecache */ 256 if (PageSwapCache(page)) 257 return 1; 258 259 mapping = page_mapping(page); 260 if (!mapping) 261 return 0; 262 263 /* File is mmap'd by somebody? */ 264 return mapping_mapped(mapping); 265} 266 267static inline int is_page_cache_freeable(struct page *page) 268{ 269 return page_count(page) - !!PagePrivate(page) == 2; 270} 271 272static int may_write_to_queue(struct backing_dev_info *bdi) 273{ 274 if (current->flags & PF_SWAPWRITE) 275 return 1; 276 if (!bdi_write_congested(bdi)) 277 return 1; 278 if (bdi == current->backing_dev_info) 279 return 1; 280 return 0; 281} 282 283/* 284 * We detected a synchronous write error writing a page out. Probably 285 * -ENOSPC. We need to propagate that into the address_space for a subsequent 286 * fsync(), msync() or close(). 287 * 288 * The tricky part is that after writepage we cannot touch the mapping: nothing 289 * prevents it from being freed up. But we have a ref on the page and once 290 * that page is locked, the mapping is pinned. 291 * 292 * We're allowed to run sleeping lock_page() here because we know the caller has 293 * __GFP_FS. 294 */ 295static void handle_write_error(struct address_space *mapping, 296 struct page *page, int error) 297{ 298 lock_page(page); 299 if (page_mapping(page) == mapping) { 300 if (error == -ENOSPC) 301 set_bit(AS_ENOSPC, &mapping->flags); 302 else 303 set_bit(AS_EIO, &mapping->flags); 304 } 305 unlock_page(page); 306} 307 308/* 309 * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). 310 */ 311static pageout_t pageout(struct page *page, struct address_space *mapping) 312{ 313 /* 314 * If the page is dirty, only perform writeback if that write 315 * will be non-blocking. To prevent this allocation from being 316 * stalled by pagecache activity. But note that there may be 317 * stalls if we need to run get_block(). We could test 318 * PagePrivate for that. 319 * 320 * If this process is currently in generic_file_write() against 321 * this page's queue, we can perform writeback even if that 322 * will block. 323 * 324 * If the page is swapcache, write it back even if that would 325 * block, for some throttling. This happens by accident, because 326 * swap_backing_dev_info is bust: it doesn't reflect the 327 * congestion state of the swapdevs. Easy to fix, if needed. 328 * See swapfile.c:page_queue_congested(). 329 */ 330 if (!is_page_cache_freeable(page)) 331 return PAGE_KEEP; 332 if (!mapping) { 333 /* 334 * Some data journaling orphaned pages can have 335 * page->mapping == NULL while being dirty with clean buffers. 336 */ 337 if (PagePrivate(page)) { 338 if (try_to_free_buffers(page)) { 339 ClearPageDirty(page); 340 printk("%s: orphaned page\n", __FUNCTION__); 341 return PAGE_CLEAN; 342 } 343 } 344 return PAGE_KEEP; 345 } 346 if (mapping->a_ops->writepage == NULL) 347 return PAGE_ACTIVATE; 348 if (!may_write_to_queue(mapping->backing_dev_info)) 349 return PAGE_KEEP; 350 351 if (clear_page_dirty_for_io(page)) { 352 int res; 353 struct writeback_control wbc = { 354 .sync_mode = WB_SYNC_NONE, 355 .nr_to_write = SWAP_CLUSTER_MAX, 356 .nonblocking = 1, 357 .for_reclaim = 1, 358 }; 359 360 SetPageReclaim(page); 361 res = mapping->a_ops->writepage(page, &wbc); 362 if (res < 0) 363 handle_write_error(mapping, page, res); 364 if (res == AOP_WRITEPAGE_ACTIVATE) { 365 ClearPageReclaim(page); 366 return PAGE_ACTIVATE; 367 } 368 if (!PageWriteback(page)) { 369 /* synchronous write or broken a_ops? */ 370 ClearPageReclaim(page); 371 } 372 373 return PAGE_SUCCESS; 374 } 375 376 return PAGE_CLEAN; 377} 378 379static int remove_mapping(struct address_space *mapping, struct page *page) 380{ 381 if (!mapping) 382 return 0; /* truncate got there first */ 383 384 write_lock_irq(&mapping->tree_lock); 385 386 /* 387 * The non-racy check for busy page. It is critical to check 388 * PageDirty _after_ making sure that the page is freeable and 389 * not in use by anybody. (pagecache + us == 2) 390 */ 391 if (unlikely(page_count(page) != 2)) 392 goto cannot_free; 393 smp_rmb(); 394 if (unlikely(PageDirty(page))) 395 goto cannot_free; 396 397 if (PageSwapCache(page)) { 398 swp_entry_t swap = { .val = page_private(page) }; 399 __delete_from_swap_cache(page); 400 write_unlock_irq(&mapping->tree_lock); 401 swap_free(swap); 402 __put_page(page); /* The pagecache ref */ 403 return 1; 404 } 405 406 __remove_from_page_cache(page); 407 write_unlock_irq(&mapping->tree_lock); 408 __put_page(page); 409 return 1; 410 411cannot_free: 412 write_unlock_irq(&mapping->tree_lock); 413 return 0; 414} 415 416/* 417 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed 418 */ 419static int shrink_list(struct list_head *page_list, struct scan_control *sc) 420{ 421 LIST_HEAD(ret_pages); 422 struct pagevec freed_pvec; 423 int pgactivate = 0; 424 int reclaimed = 0; 425 426 cond_resched(); 427 428 pagevec_init(&freed_pvec, 1); 429 while (!list_empty(page_list)) { 430 struct address_space *mapping; 431 struct page *page; 432 int may_enter_fs; 433 int referenced; 434 435 cond_resched(); 436 437 page = lru_to_page(page_list); 438 list_del(&page->lru); 439 440 if (TestSetPageLocked(page)) 441 goto keep; 442 443 BUG_ON(PageActive(page)); 444 445 sc->nr_scanned++; 446 447 if (!sc->may_swap && page_mapped(page)) 448 goto keep_locked; 449 450 /* Double the slab pressure for mapped and swapcache pages */ 451 if (page_mapped(page) || PageSwapCache(page)) 452 sc->nr_scanned++; 453 454 if (PageWriteback(page)) 455 goto keep_locked; 456 457 referenced = page_referenced(page, 1); 458 /* In active use or really unfreeable? Activate it. */ 459 if (referenced && page_mapping_inuse(page)) 460 goto activate_locked; 461 462#ifdef CONFIG_SWAP 463 /* 464 * Anonymous process memory has backing store? 465 * Try to allocate it some swap space here. 466 */ 467 if (PageAnon(page) && !PageSwapCache(page)) { 468 if (!sc->may_swap) 469 goto keep_locked; 470 if (!add_to_swap(page, GFP_ATOMIC)) 471 goto activate_locked; 472 } 473#endif /* CONFIG_SWAP */ 474 475 mapping = page_mapping(page); 476 may_enter_fs = (sc->gfp_mask & __GFP_FS) || 477 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); 478 479 /* 480 * The page is mapped into the page tables of one or more 481 * processes. Try to unmap it here. 482 */ 483 if (page_mapped(page) && mapping) { 484 /* 485 * No unmapping if we do not swap 486 */ 487 if (!sc->may_swap) 488 goto keep_locked; 489 490 switch (try_to_unmap(page, 0)) { 491 case SWAP_FAIL: 492 goto activate_locked; 493 case SWAP_AGAIN: 494 goto keep_locked; 495 case SWAP_SUCCESS: 496 ; /* try to free the page below */ 497 } 498 } 499 500 if (PageDirty(page)) { 501 if (referenced) 502 goto keep_locked; 503 if (!may_enter_fs) 504 goto keep_locked; 505 if (!sc->may_writepage) 506 goto keep_locked; 507 508 /* Page is dirty, try to write it out here */ 509 switch(pageout(page, mapping)) { 510 case PAGE_KEEP: 511 goto keep_locked; 512 case PAGE_ACTIVATE: 513 goto activate_locked; 514 case PAGE_SUCCESS: 515 if (PageWriteback(page) || PageDirty(page)) 516 goto keep; 517 /* 518 * A synchronous write - probably a ramdisk. Go 519 * ahead and try to reclaim the page. 520 */ 521 if (TestSetPageLocked(page)) 522 goto keep; 523 if (PageDirty(page) || PageWriteback(page)) 524 goto keep_locked; 525 mapping = page_mapping(page); 526 case PAGE_CLEAN: 527 ; /* try to free the page below */ 528 } 529 } 530 531 /* 532 * If the page has buffers, try to free the buffer mappings 533 * associated with this page. If we succeed we try to free 534 * the page as well. 535 * 536 * We do this even if the page is PageDirty(). 537 * try_to_release_page() does not perform I/O, but it is 538 * possible for a page to have PageDirty set, but it is actually 539 * clean (all its buffers are clean). This happens if the 540 * buffers were written out directly, with submit_bh(). ext3 541 * will do this, as well as the blockdev mapping. 542 * try_to_release_page() will discover that cleanness and will 543 * drop the buffers and mark the page clean - it can be freed. 544 * 545 * Rarely, pages can have buffers and no ->mapping. These are 546 * the pages which were not successfully invalidated in 547 * truncate_complete_page(). We try to drop those buffers here 548 * and if that worked, and the page is no longer mapped into 549 * process address space (page_count == 1) it can be freed. 550 * Otherwise, leave the page on the LRU so it is swappable. 551 */ 552 if (PagePrivate(page)) { 553 if (!try_to_release_page(page, sc->gfp_mask)) 554 goto activate_locked; 555 if (!mapping && page_count(page) == 1) 556 goto free_it; 557 } 558 559 if (!remove_mapping(mapping, page)) 560 goto keep_locked; 561 562free_it: 563 unlock_page(page); 564 reclaimed++; 565 if (!pagevec_add(&freed_pvec, page)) 566 __pagevec_release_nonlru(&freed_pvec); 567 continue; 568 569activate_locked: 570 SetPageActive(page); 571 pgactivate++; 572keep_locked: 573 unlock_page(page); 574keep: 575 list_add(&page->lru, &ret_pages); 576 BUG_ON(PageLRU(page)); 577 } 578 list_splice(&ret_pages, page_list); 579 if (pagevec_count(&freed_pvec)) 580 __pagevec_release_nonlru(&freed_pvec); 581 mod_page_state(pgactivate, pgactivate); 582 sc->nr_reclaimed += reclaimed; 583 return reclaimed; 584} 585 586#ifdef CONFIG_MIGRATION 587static inline void move_to_lru(struct page *page) 588{ 589 list_del(&page->lru); 590 if (PageActive(page)) { 591 /* 592 * lru_cache_add_active checks that 593 * the PG_active bit is off. 594 */ 595 ClearPageActive(page); 596 lru_cache_add_active(page); 597 } else { 598 lru_cache_add(page); 599 } 600 put_page(page); 601} 602 603/* 604 * Add isolated pages on the list back to the LRU. 605 * 606 * returns the number of pages put back. 607 */ 608int putback_lru_pages(struct list_head *l) 609{ 610 struct page *page; 611 struct page *page2; 612 int count = 0; 613 614 list_for_each_entry_safe(page, page2, l, lru) { 615 move_to_lru(page); 616 count++; 617 } 618 return count; 619} 620 621/* 622 * Non migratable page 623 */ 624int fail_migrate_page(struct page *newpage, struct page *page) 625{ 626 return -EIO; 627} 628EXPORT_SYMBOL(fail_migrate_page); 629 630/* 631 * swapout a single page 632 * page is locked upon entry, unlocked on exit 633 */ 634static int swap_page(struct page *page) 635{ 636 struct address_space *mapping = page_mapping(page); 637 638 if (page_mapped(page) && mapping) 639 if (try_to_unmap(page, 1) != SWAP_SUCCESS) 640 goto unlock_retry; 641 642 if (PageDirty(page)) { 643 /* Page is dirty, try to write it out here */ 644 switch(pageout(page, mapping)) { 645 case PAGE_KEEP: 646 case PAGE_ACTIVATE: 647 goto unlock_retry; 648 649 case PAGE_SUCCESS: 650 goto retry; 651 652 case PAGE_CLEAN: 653 ; /* try to free the page below */ 654 } 655 } 656 657 if (PagePrivate(page)) { 658 if (!try_to_release_page(page, GFP_KERNEL) || 659 (!mapping && page_count(page) == 1)) 660 goto unlock_retry; 661 } 662 663 if (remove_mapping(mapping, page)) { 664 /* Success */ 665 unlock_page(page); 666 return 0; 667 } 668 669unlock_retry: 670 unlock_page(page); 671 672retry: 673 return -EAGAIN; 674} 675EXPORT_SYMBOL(swap_page); 676 677/* 678 * Page migration was first developed in the context of the memory hotplug 679 * project. The main authors of the migration code are: 680 * 681 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> 682 * Hirokazu Takahashi <taka@valinux.co.jp> 683 * Dave Hansen <haveblue@us.ibm.com> 684 * Christoph Lameter <clameter@sgi.com> 685 */ 686 687/* 688 * Remove references for a page and establish the new page with the correct 689 * basic settings to be able to stop accesses to the page. 690 */ 691int migrate_page_remove_references(struct page *newpage, 692 struct page *page, int nr_refs) 693{ 694 struct address_space *mapping = page_mapping(page); 695 struct page **radix_pointer; 696 697 /* 698 * Avoid doing any of the following work if the page count 699 * indicates that the page is in use or truncate has removed 700 * the page. 701 */ 702 if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) 703 return 1; 704 705 /* 706 * Establish swap ptes for anonymous pages or destroy pte 707 * maps for files. 708 * 709 * In order to reestablish file backed mappings the fault handlers 710 * will take the radix tree_lock which may then be used to stop 711 * processses from accessing this page until the new page is ready. 712 * 713 * A process accessing via a swap pte (an anonymous page) will take a 714 * page_lock on the old page which will block the process until the 715 * migration attempt is complete. At that time the PageSwapCache bit 716 * will be examined. If the page was migrated then the PageSwapCache 717 * bit will be clear and the operation to retrieve the page will be 718 * retried which will find the new page in the radix tree. Then a new 719 * direct mapping may be generated based on the radix tree contents. 720 * 721 * If the page was not migrated then the PageSwapCache bit 722 * is still set and the operation may continue. 723 */ 724 try_to_unmap(page, 1); 725 726 /* 727 * Give up if we were unable to remove all mappings. 728 */ 729 if (page_mapcount(page)) 730 return 1; 731 732 write_lock_irq(&mapping->tree_lock); 733 734 radix_pointer = (struct page **)radix_tree_lookup_slot( 735 &mapping->page_tree, 736 page_index(page)); 737 738 if (!page_mapping(page) || page_count(page) != nr_refs || 739 *radix_pointer != page) { 740 write_unlock_irq(&mapping->tree_lock); 741 return 1; 742 } 743 744 /* 745 * Now we know that no one else is looking at the page. 746 * 747 * Certain minimal information about a page must be available 748 * in order for other subsystems to properly handle the page if they 749 * find it through the radix tree update before we are finished 750 * copying the page. 751 */ 752 get_page(newpage); 753 newpage->index = page->index; 754 newpage->mapping = page->mapping; 755 if (PageSwapCache(page)) { 756 SetPageSwapCache(newpage); 757 set_page_private(newpage, page_private(page)); 758 } 759 760 *radix_pointer = newpage; 761 __put_page(page); 762 write_unlock_irq(&mapping->tree_lock); 763 764 return 0; 765} 766EXPORT_SYMBOL(migrate_page_remove_references); 767 768/* 769 * Copy the page to its new location 770 */ 771void migrate_page_copy(struct page *newpage, struct page *page) 772{ 773 copy_highpage(newpage, page); 774 775 if (PageError(page)) 776 SetPageError(newpage); 777 if (PageReferenced(page)) 778 SetPageReferenced(newpage); 779 if (PageUptodate(page)) 780 SetPageUptodate(newpage); 781 if (PageActive(page)) 782 SetPageActive(newpage); 783 if (PageChecked(page)) 784 SetPageChecked(newpage); 785 if (PageMappedToDisk(page)) 786 SetPageMappedToDisk(newpage); 787 788 if (PageDirty(page)) { 789 clear_page_dirty_for_io(page); 790 set_page_dirty(newpage); 791 } 792 793 ClearPageSwapCache(page); 794 ClearPageActive(page); 795 ClearPagePrivate(page); 796 set_page_private(page, 0); 797 page->mapping = NULL; 798 799 /* 800 * If any waiters have accumulated on the new page then 801 * wake them up. 802 */ 803 if (PageWriteback(newpage)) 804 end_page_writeback(newpage); 805} 806EXPORT_SYMBOL(migrate_page_copy); 807 808/* 809 * Common logic to directly migrate a single page suitable for 810 * pages that do not use PagePrivate. 811 * 812 * Pages are locked upon entry and exit. 813 */ 814int migrate_page(struct page *newpage, struct page *page) 815{ 816 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 817 818 if (migrate_page_remove_references(newpage, page, 2)) 819 return -EAGAIN; 820 821 migrate_page_copy(newpage, page); 822 823 /* 824 * Remove auxiliary swap entries and replace 825 * them with real ptes. 826 * 827 * Note that a real pte entry will allow processes that are not 828 * waiting on the page lock to use the new page via the page tables 829 * before the new page is unlocked. 830 */ 831 remove_from_swap(newpage); 832 return 0; 833} 834EXPORT_SYMBOL(migrate_page); 835 836/* 837 * migrate_pages 838 * 839 * Two lists are passed to this function. The first list 840 * contains the pages isolated from the LRU to be migrated. 841 * The second list contains new pages that the pages isolated 842 * can be moved to. If the second list is NULL then all 843 * pages are swapped out. 844 * 845 * The function returns after 10 attempts or if no pages 846 * are movable anymore because to has become empty 847 * or no retryable pages exist anymore. 848 * 849 * Return: Number of pages not migrated when "to" ran empty. 850 */ 851int migrate_pages(struct list_head *from, struct list_head *to, 852 struct list_head *moved, struct list_head *failed) 853{ 854 int retry; 855 int nr_failed = 0; 856 int pass = 0; 857 struct page *page; 858 struct page *page2; 859 int swapwrite = current->flags & PF_SWAPWRITE; 860 int rc; 861 862 if (!swapwrite) 863 current->flags |= PF_SWAPWRITE; 864 865redo: 866 retry = 0; 867 868 list_for_each_entry_safe(page, page2, from, lru) { 869 struct page *newpage = NULL; 870 struct address_space *mapping; 871 872 cond_resched(); 873 874 rc = 0; 875 if (page_count(page) == 1) 876 /* page was freed from under us. So we are done. */ 877 goto next; 878 879 if (to && list_empty(to)) 880 break; 881 882 /* 883 * Skip locked pages during the first two passes to give the 884 * functions holding the lock time to release the page. Later we 885 * use lock_page() to have a higher chance of acquiring the 886 * lock. 887 */ 888 rc = -EAGAIN; 889 if (pass > 2) 890 lock_page(page); 891 else 892 if (TestSetPageLocked(page)) 893 goto next; 894 895 /* 896 * Only wait on writeback if we have already done a pass where 897 * we we may have triggered writeouts for lots of pages. 898 */ 899 if (pass > 0) { 900 wait_on_page_writeback(page); 901 } else { 902 if (PageWriteback(page)) 903 goto unlock_page; 904 } 905 906 /* 907 * Anonymous pages must have swap cache references otherwise 908 * the information contained in the page maps cannot be 909 * preserved. 910 */ 911 if (PageAnon(page) && !PageSwapCache(page)) { 912 if (!add_to_swap(page, GFP_KERNEL)) { 913 rc = -ENOMEM; 914 goto unlock_page; 915 } 916 } 917 918 if (!to) { 919 rc = swap_page(page); 920 goto next; 921 } 922 923 newpage = lru_to_page(to); 924 lock_page(newpage); 925 926 /* 927 * Pages are properly locked and writeback is complete. 928 * Try to migrate the page. 929 */ 930 mapping = page_mapping(page); 931 if (!mapping) 932 goto unlock_both; 933 934 if (mapping->a_ops->migratepage) { 935 /* 936 * Most pages have a mapping and most filesystems 937 * should provide a migration function. Anonymous 938 * pages are part of swap space which also has its 939 * own migration function. This is the most common 940 * path for page migration. 941 */ 942 rc = mapping->a_ops->migratepage(newpage, page); 943 goto unlock_both; 944 } 945 946 /* 947 * Default handling if a filesystem does not provide 948 * a migration function. We can only migrate clean 949 * pages so try to write out any dirty pages first. 950 */ 951 if (PageDirty(page)) { 952 switch (pageout(page, mapping)) { 953 case PAGE_KEEP: 954 case PAGE_ACTIVATE: 955 goto unlock_both; 956 957 case PAGE_SUCCESS: 958 unlock_page(newpage); 959 goto next; 960 961 case PAGE_CLEAN: 962 ; /* try to migrate the page below */ 963 } 964 } 965 966 /* 967 * Buffers are managed in a filesystem specific way. 968 * We must have no buffers or drop them. 969 */ 970 if (!page_has_buffers(page) || 971 try_to_release_page(page, GFP_KERNEL)) { 972 rc = migrate_page(newpage, page); 973 goto unlock_both; 974 } 975 976 /* 977 * On early passes with mapped pages simply 978 * retry. There may be a lock held for some 979 * buffers that may go away. Later 980 * swap them out. 981 */ 982 if (pass > 4) { 983 /* 984 * Persistently unable to drop buffers..... As a 985 * measure of last resort we fall back to 986 * swap_page(). 987 */ 988 unlock_page(newpage); 989 newpage = NULL; 990 rc = swap_page(page); 991 goto next; 992 } 993 994unlock_both: 995 unlock_page(newpage); 996 997unlock_page: 998 unlock_page(page); 999 1000next: 1001 if (rc == -EAGAIN) { 1002 retry++; 1003 } else if (rc) { 1004 /* Permanent failure */ 1005 list_move(&page->lru, failed); 1006 nr_failed++; 1007 } else { 1008 if (newpage) { 1009 /* Successful migration. Return page to LRU */ 1010 move_to_lru(newpage); 1011 } 1012 list_move(&page->lru, moved); 1013 } 1014 } 1015 if (retry && pass++ < 10) 1016 goto redo; 1017 1018 if (!swapwrite) 1019 current->flags &= ~PF_SWAPWRITE; 1020 1021 return nr_failed + retry; 1022} 1023 1024/* 1025 * Isolate one page from the LRU lists and put it on the 1026 * indicated list with elevated refcount. 1027 * 1028 * Result: 1029 * 0 = page not on LRU list 1030 * 1 = page removed from LRU list and added to the specified list. 1031 */ 1032int isolate_lru_page(struct page *page) 1033{ 1034 int ret = 0; 1035 1036 if (PageLRU(page)) { 1037 struct zone *zone = page_zone(page); 1038 spin_lock_irq(&zone->lru_lock); 1039 if (TestClearPageLRU(page)) { 1040 ret = 1; 1041 get_page(page); 1042 if (PageActive(page)) 1043 del_page_from_active_list(zone, page); 1044 else 1045 del_page_from_inactive_list(zone, page); 1046 } 1047 spin_unlock_irq(&zone->lru_lock); 1048 } 1049 1050 return ret; 1051} 1052#endif 1053 1054/* 1055 * zone->lru_lock is heavily contended. Some of the functions that 1056 * shrink the lists perform better by taking out a batch of pages 1057 * and working on them outside the LRU lock. 1058 * 1059 * For pagecache intensive workloads, this function is the hottest 1060 * spot in the kernel (apart from copy_*_user functions). 1061 * 1062 * Appropriate locks must be held before calling this function. 1063 * 1064 * @nr_to_scan: The number of pages to look through on the list. 1065 * @src: The LRU list to pull pages off. 1066 * @dst: The temp list to put pages on to. 1067 * @scanned: The number of pages that were scanned. 1068 * 1069 * returns how many pages were moved onto *@dst. 1070 */ 1071static int isolate_lru_pages(int nr_to_scan, struct list_head *src, 1072 struct list_head *dst, int *scanned) 1073{ 1074 int nr_taken = 0; 1075 struct page *page; 1076 int scan = 0; 1077 1078 while (scan++ < nr_to_scan && !list_empty(src)) { 1079 page = lru_to_page(src); 1080 prefetchw_prev_lru_page(page, src, flags); 1081 1082 if (!TestClearPageLRU(page)) 1083 BUG(); 1084 list_del(&page->lru); 1085 if (get_page_testone(page)) { 1086 /* 1087 * It is being freed elsewhere 1088 */ 1089 __put_page(page); 1090 SetPageLRU(page); 1091 list_add(&page->lru, src); 1092 continue; 1093 } else { 1094 list_add(&page->lru, dst); 1095 nr_taken++; 1096 } 1097 } 1098 1099 *scanned = scan; 1100 return nr_taken; 1101} 1102 1103/* 1104 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed 1105 */ 1106static void shrink_cache(struct zone *zone, struct scan_control *sc) 1107{ 1108 LIST_HEAD(page_list); 1109 struct pagevec pvec; 1110 int max_scan = sc->nr_to_scan; 1111 1112 pagevec_init(&pvec, 1); 1113 1114 lru_add_drain(); 1115 spin_lock_irq(&zone->lru_lock); 1116 while (max_scan > 0) { 1117 struct page *page; 1118 int nr_taken; 1119 int nr_scan; 1120 int nr_freed; 1121 1122 nr_taken = isolate_lru_pages(sc->swap_cluster_max, 1123 &zone->inactive_list, 1124 &page_list, &nr_scan); 1125 zone->nr_inactive -= nr_taken; 1126 zone->pages_scanned += nr_scan; 1127 spin_unlock_irq(&zone->lru_lock); 1128 1129 if (nr_taken == 0) 1130 goto done; 1131 1132 max_scan -= nr_scan; 1133 nr_freed = shrink_list(&page_list, sc); 1134 1135 local_irq_disable(); 1136 if (current_is_kswapd()) { 1137 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 1138 __mod_page_state(kswapd_steal, nr_freed); 1139 } else 1140 __mod_page_state_zone(zone, pgscan_direct, nr_scan); 1141 __mod_page_state_zone(zone, pgsteal, nr_freed); 1142 1143 spin_lock(&zone->lru_lock); 1144 /* 1145 * Put back any unfreeable pages. 1146 */ 1147 while (!list_empty(&page_list)) { 1148 page = lru_to_page(&page_list); 1149 if (TestSetPageLRU(page)) 1150 BUG(); 1151 list_del(&page->lru); 1152 if (PageActive(page)) 1153 add_page_to_active_list(zone, page); 1154 else 1155 add_page_to_inactive_list(zone, page); 1156 if (!pagevec_add(&pvec, page)) { 1157 spin_unlock_irq(&zone->lru_lock); 1158 __pagevec_release(&pvec); 1159 spin_lock_irq(&zone->lru_lock); 1160 } 1161 } 1162 } 1163 spin_unlock_irq(&zone->lru_lock); 1164done: 1165 pagevec_release(&pvec); 1166} 1167 1168/* 1169 * This moves pages from the active list to the inactive list. 1170 * 1171 * We move them the other way if the page is referenced by one or more 1172 * processes, from rmap. 1173 * 1174 * If the pages are mostly unmapped, the processing is fast and it is 1175 * appropriate to hold zone->lru_lock across the whole operation. But if 1176 * the pages are mapped, the processing is slow (page_referenced()) so we 1177 * should drop zone->lru_lock around each page. It's impossible to balance 1178 * this, so instead we remove the pages from the LRU while processing them. 1179 * It is safe to rely on PG_active against the non-LRU pages in here because 1180 * nobody will play with that bit on a non-LRU page. 1181 * 1182 * The downside is that we have to touch page->_count against each page. 1183 * But we had to alter page->flags anyway. 1184 */ 1185static void 1186refill_inactive_zone(struct zone *zone, struct scan_control *sc) 1187{ 1188 int pgmoved; 1189 int pgdeactivate = 0; 1190 int pgscanned; 1191 int nr_pages = sc->nr_to_scan; 1192 LIST_HEAD(l_hold); /* The pages which were snipped off */ 1193 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ 1194 LIST_HEAD(l_active); /* Pages to go onto the active_list */ 1195 struct page *page; 1196 struct pagevec pvec; 1197 int reclaim_mapped = 0; 1198 1199 if (unlikely(sc->may_swap)) { 1200 long mapped_ratio; 1201 long distress; 1202 long swap_tendency; 1203 1204 /* 1205 * `distress' is a measure of how much trouble we're having 1206 * reclaiming pages. 0 -> no problems. 100 -> great trouble. 1207 */ 1208 distress = 100 >> zone->prev_priority; 1209 1210 /* 1211 * The point of this algorithm is to decide when to start 1212 * reclaiming mapped memory instead of just pagecache. Work out 1213 * how much memory 1214 * is mapped. 1215 */ 1216 mapped_ratio = (sc->nr_mapped * 100) / total_memory; 1217 1218 /* 1219 * Now decide how much we really want to unmap some pages. The 1220 * mapped ratio is downgraded - just because there's a lot of 1221 * mapped memory doesn't necessarily mean that page reclaim 1222 * isn't succeeding. 1223 * 1224 * The distress ratio is important - we don't want to start 1225 * going oom. 1226 * 1227 * A 100% value of vm_swappiness overrides this algorithm 1228 * altogether. 1229 */ 1230 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; 1231 1232 /* 1233 * Now use this metric to decide whether to start moving mapped 1234 * memory onto the inactive list. 1235 */ 1236 if (swap_tendency >= 100) 1237 reclaim_mapped = 1; 1238 } 1239 1240 lru_add_drain(); 1241 spin_lock_irq(&zone->lru_lock); 1242 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 1243 &l_hold, &pgscanned); 1244 zone->pages_scanned += pgscanned; 1245 zone->nr_active -= pgmoved; 1246 spin_unlock_irq(&zone->lru_lock); 1247 1248 while (!list_empty(&l_hold)) { 1249 cond_resched(); 1250 page = lru_to_page(&l_hold); 1251 list_del(&page->lru); 1252 if (page_mapped(page)) { 1253 if (!reclaim_mapped || 1254 (total_swap_pages == 0 && PageAnon(page)) || 1255 page_referenced(page, 0)) { 1256 list_add(&page->lru, &l_active); 1257 continue; 1258 } 1259 } 1260 list_add(&page->lru, &l_inactive); 1261 } 1262 1263 pagevec_init(&pvec, 1); 1264 pgmoved = 0; 1265 spin_lock_irq(&zone->lru_lock); 1266 while (!list_empty(&l_inactive)) { 1267 page = lru_to_page(&l_inactive); 1268 prefetchw_prev_lru_page(page, &l_inactive, flags); 1269 if (TestSetPageLRU(page)) 1270 BUG(); 1271 if (!TestClearPageActive(page)) 1272 BUG(); 1273 list_move(&page->lru, &zone->inactive_list); 1274 pgmoved++; 1275 if (!pagevec_add(&pvec, page)) { 1276 zone->nr_inactive += pgmoved; 1277 spin_unlock_irq(&zone->lru_lock); 1278 pgdeactivate += pgmoved; 1279 pgmoved = 0; 1280 if (buffer_heads_over_limit) 1281 pagevec_strip(&pvec); 1282 __pagevec_release(&pvec); 1283 spin_lock_irq(&zone->lru_lock); 1284 } 1285 } 1286 zone->nr_inactive += pgmoved; 1287 pgdeactivate += pgmoved; 1288 if (buffer_heads_over_limit) { 1289 spin_unlock_irq(&zone->lru_lock); 1290 pagevec_strip(&pvec); 1291 spin_lock_irq(&zone->lru_lock); 1292 } 1293 1294 pgmoved = 0; 1295 while (!list_empty(&l_active)) { 1296 page = lru_to_page(&l_active); 1297 prefetchw_prev_lru_page(page, &l_active, flags); 1298 if (TestSetPageLRU(page)) 1299 BUG(); 1300 BUG_ON(!PageActive(page)); 1301 list_move(&page->lru, &zone->active_list); 1302 pgmoved++; 1303 if (!pagevec_add(&pvec, page)) { 1304 zone->nr_active += pgmoved; 1305 pgmoved = 0; 1306 spin_unlock_irq(&zone->lru_lock); 1307 __pagevec_release(&pvec); 1308 spin_lock_irq(&zone->lru_lock); 1309 } 1310 } 1311 zone->nr_active += pgmoved; 1312 spin_unlock(&zone->lru_lock); 1313 1314 __mod_page_state_zone(zone, pgrefill, pgscanned); 1315 __mod_page_state(pgdeactivate, pgdeactivate); 1316 local_irq_enable(); 1317 1318 pagevec_release(&pvec); 1319} 1320 1321/* 1322 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1323 */ 1324static void 1325shrink_zone(struct zone *zone, struct scan_control *sc) 1326{ 1327 unsigned long nr_active; 1328 unsigned long nr_inactive; 1329 1330 atomic_inc(&zone->reclaim_in_progress); 1331 1332 /* 1333 * Add one to `nr_to_scan' just to make sure that the kernel will 1334 * slowly sift through the active list. 1335 */ 1336 zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; 1337 nr_active = zone->nr_scan_active; 1338 if (nr_active >= sc->swap_cluster_max) 1339 zone->nr_scan_active = 0; 1340 else 1341 nr_active = 0; 1342 1343 zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; 1344 nr_inactive = zone->nr_scan_inactive; 1345 if (nr_inactive >= sc->swap_cluster_max) 1346 zone->nr_scan_inactive = 0; 1347 else 1348 nr_inactive = 0; 1349 1350 while (nr_active || nr_inactive) { 1351 if (nr_active) { 1352 sc->nr_to_scan = min(nr_active, 1353 (unsigned long)sc->swap_cluster_max); 1354 nr_active -= sc->nr_to_scan; 1355 refill_inactive_zone(zone, sc); 1356 } 1357 1358 if (nr_inactive) { 1359 sc->nr_to_scan = min(nr_inactive, 1360 (unsigned long)sc->swap_cluster_max); 1361 nr_inactive -= sc->nr_to_scan; 1362 shrink_cache(zone, sc); 1363 } 1364 } 1365 1366 throttle_vm_writeout(); 1367 1368 atomic_dec(&zone->reclaim_in_progress); 1369} 1370 1371/* 1372 * This is the direct reclaim path, for page-allocating processes. We only 1373 * try to reclaim pages from zones which will satisfy the caller's allocation 1374 * request. 1375 * 1376 * We reclaim from a zone even if that zone is over pages_high. Because: 1377 * a) The caller may be trying to free *extra* pages to satisfy a higher-order 1378 * allocation or 1379 * b) The zones may be over pages_high but they must go *over* pages_high to 1380 * satisfy the `incremental min' zone defense algorithm. 1381 * 1382 * Returns the number of reclaimed pages. 1383 * 1384 * If a zone is deemed to be full of pinned pages then just give it a light 1385 * scan then give up on it. 1386 */ 1387static void 1388shrink_caches(struct zone **zones, struct scan_control *sc) 1389{ 1390 int i; 1391 1392 for (i = 0; zones[i] != NULL; i++) { 1393 struct zone *zone = zones[i]; 1394 1395 if (!populated_zone(zone)) 1396 continue; 1397 1398 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1399 continue; 1400 1401 zone->temp_priority = sc->priority; 1402 if (zone->prev_priority > sc->priority) 1403 zone->prev_priority = sc->priority; 1404 1405 if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) 1406 continue; /* Let kswapd poll it */ 1407 1408 shrink_zone(zone, sc); 1409 } 1410} 1411 1412/* 1413 * This is the main entry point to direct page reclaim. 1414 * 1415 * If a full scan of the inactive list fails to free enough memory then we 1416 * are "out of memory" and something needs to be killed. 1417 * 1418 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1419 * high - the zone may be full of dirty or under-writeback pages, which this 1420 * caller can't do much about. We kick pdflush and take explicit naps in the 1421 * hope that some of these pages can be written. But if the allocating task 1422 * holds filesystem locks which prevent writeout this might not work, and the 1423 * allocation attempt will fail. 1424 */ 1425int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 1426{ 1427 int priority; 1428 int ret = 0; 1429 int total_scanned = 0, total_reclaimed = 0; 1430 struct reclaim_state *reclaim_state = current->reclaim_state; 1431 struct scan_control sc; 1432 unsigned long lru_pages = 0; 1433 int i; 1434 1435 sc.gfp_mask = gfp_mask; 1436 sc.may_writepage = !laptop_mode; 1437 sc.may_swap = 1; 1438 1439 inc_page_state(allocstall); 1440 1441 for (i = 0; zones[i] != NULL; i++) { 1442 struct zone *zone = zones[i]; 1443 1444 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1445 continue; 1446 1447 zone->temp_priority = DEF_PRIORITY; 1448 lru_pages += zone->nr_active + zone->nr_inactive; 1449 } 1450 1451 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1452 sc.nr_mapped = read_page_state(nr_mapped); 1453 sc.nr_scanned = 0; 1454 sc.nr_reclaimed = 0; 1455 sc.priority = priority; 1456 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1457 if (!priority) 1458 disable_swap_token(); 1459 shrink_caches(zones, &sc); 1460 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 1461 if (reclaim_state) { 1462 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1463 reclaim_state->reclaimed_slab = 0; 1464 } 1465 total_scanned += sc.nr_scanned; 1466 total_reclaimed += sc.nr_reclaimed; 1467 if (total_reclaimed >= sc.swap_cluster_max) { 1468 ret = 1; 1469 goto out; 1470 } 1471 1472 /* 1473 * Try to write back as many pages as we just scanned. This 1474 * tends to cause slow streaming writers to write data to the 1475 * disk smoothly, at the dirtying rate, which is nice. But 1476 * that's undesirable in laptop mode, where we *want* lumpy 1477 * writeout. So in laptop mode, write out the whole world. 1478 */ 1479 if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { 1480 wakeup_pdflush(laptop_mode ? 0 : total_scanned); 1481 sc.may_writepage = 1; 1482 } 1483 1484 /* Take a nap, wait for some writeback to complete */ 1485 if (sc.nr_scanned && priority < DEF_PRIORITY - 2) 1486 blk_congestion_wait(WRITE, HZ/10); 1487 } 1488out: 1489 for (i = 0; zones[i] != 0; i++) { 1490 struct zone *zone = zones[i]; 1491 1492 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1493 continue; 1494 1495 zone->prev_priority = zone->temp_priority; 1496 } 1497 return ret; 1498} 1499 1500/* 1501 * For kswapd, balance_pgdat() will work across all this node's zones until 1502 * they are all at pages_high. 1503 * 1504 * If `nr_pages' is non-zero then it is the number of pages which are to be 1505 * reclaimed, regardless of the zone occupancies. This is a software suspend 1506 * special. 1507 * 1508 * Returns the number of pages which were actually freed. 1509 * 1510 * There is special handling here for zones which are full of pinned pages. 1511 * This can happen if the pages are all mlocked, or if they are all used by 1512 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. 1513 * What we do is to detect the case where all pages in the zone have been 1514 * scanned twice and there has been zero successful reclaim. Mark the zone as 1515 * dead and from now on, only perform a short scan. Basically we're polling 1516 * the zone for when the problem goes away. 1517 * 1518 * kswapd scans the zones in the highmem->normal->dma direction. It skips 1519 * zones which have free_pages > pages_high, but once a zone is found to have 1520 * free_pages <= pages_high, we scan that zone and the lower zones regardless 1521 * of the number of free pages in the lower zones. This interoperates with 1522 * the page allocator fallback scheme to ensure that aging of pages is balanced 1523 * across the zones. 1524 */ 1525static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) 1526{ 1527 int to_free = nr_pages; 1528 int all_zones_ok; 1529 int priority; 1530 int i; 1531 int total_scanned, total_reclaimed; 1532 struct reclaim_state *reclaim_state = current->reclaim_state; 1533 struct scan_control sc; 1534 1535loop_again: 1536 total_scanned = 0; 1537 total_reclaimed = 0; 1538 sc.gfp_mask = GFP_KERNEL; 1539 sc.may_writepage = !laptop_mode; 1540 sc.may_swap = 1; 1541 sc.nr_mapped = read_page_state(nr_mapped); 1542 1543 inc_page_state(pageoutrun); 1544 1545 for (i = 0; i < pgdat->nr_zones; i++) { 1546 struct zone *zone = pgdat->node_zones + i; 1547 1548 zone->temp_priority = DEF_PRIORITY; 1549 } 1550 1551 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 1552 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1553 unsigned long lru_pages = 0; 1554 1555 /* The swap token gets in the way of swapout... */ 1556 if (!priority) 1557 disable_swap_token(); 1558 1559 all_zones_ok = 1; 1560 1561 if (nr_pages == 0) { 1562 /* 1563 * Scan in the highmem->dma direction for the highest 1564 * zone which needs scanning 1565 */ 1566 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 1567 struct zone *zone = pgdat->node_zones + i; 1568 1569 if (!populated_zone(zone)) 1570 continue; 1571 1572 if (zone->all_unreclaimable && 1573 priority != DEF_PRIORITY) 1574 continue; 1575 1576 if (!zone_watermark_ok(zone, order, 1577 zone->pages_high, 0, 0)) { 1578 end_zone = i; 1579 goto scan; 1580 } 1581 } 1582 goto out; 1583 } else { 1584 end_zone = pgdat->nr_zones - 1; 1585 } 1586scan: 1587 for (i = 0; i <= end_zone; i++) { 1588 struct zone *zone = pgdat->node_zones + i; 1589 1590 lru_pages += zone->nr_active + zone->nr_inactive; 1591 } 1592 1593 /* 1594 * Now scan the zone in the dma->highmem direction, stopping 1595 * at the last zone which needs scanning. 1596 * 1597 * We do this because the page allocator works in the opposite 1598 * direction. This prevents the page allocator from allocating 1599 * pages behind kswapd's direction of progress, which would 1600 * cause too much scanning of the lower zones. 1601 */ 1602 for (i = 0; i <= end_zone; i++) { 1603 struct zone *zone = pgdat->node_zones + i; 1604 int nr_slab; 1605 1606 if (!populated_zone(zone)) 1607 continue; 1608 1609 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1610 continue; 1611 1612 if (nr_pages == 0) { /* Not software suspend */ 1613 if (!zone_watermark_ok(zone, order, 1614 zone->pages_high, end_zone, 0)) 1615 all_zones_ok = 0; 1616 } 1617 zone->temp_priority = priority; 1618 if (zone->prev_priority > priority) 1619 zone->prev_priority = priority; 1620 sc.nr_scanned = 0; 1621 sc.nr_reclaimed = 0; 1622 sc.priority = priority; 1623 sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; 1624 shrink_zone(zone, &sc); 1625 reclaim_state->reclaimed_slab = 0; 1626 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 1627 lru_pages); 1628 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 1629 total_reclaimed += sc.nr_reclaimed; 1630 total_scanned += sc.nr_scanned; 1631 if (zone->all_unreclaimable) 1632 continue; 1633 if (nr_slab == 0 && zone->pages_scanned >= 1634 (zone->nr_active + zone->nr_inactive) * 4) 1635 zone->all_unreclaimable = 1; 1636 /* 1637 * If we've done a decent amount of scanning and 1638 * the reclaim ratio is low, start doing writepage 1639 * even in laptop mode 1640 */ 1641 if (total_scanned > SWAP_CLUSTER_MAX * 2 && 1642 total_scanned > total_reclaimed+total_reclaimed/2) 1643 sc.may_writepage = 1; 1644 } 1645 if (nr_pages && to_free > total_reclaimed) 1646 continue; /* swsusp: need to do more work */ 1647 if (all_zones_ok) 1648 break; /* kswapd: all done */ 1649 /* 1650 * OK, kswapd is getting into trouble. Take a nap, then take 1651 * another pass across the zones. 1652 */ 1653 if (total_scanned && priority < DEF_PRIORITY - 2) 1654 blk_congestion_wait(WRITE, HZ/10); 1655 1656 /* 1657 * We do this so kswapd doesn't build up large priorities for 1658 * example when it is freeing in parallel with allocators. It 1659 * matches the direct reclaim path behaviour in terms of impact 1660 * on zone->*_priority. 1661 */ 1662 if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) 1663 break; 1664 } 1665out: 1666 for (i = 0; i < pgdat->nr_zones; i++) { 1667 struct zone *zone = pgdat->node_zones + i; 1668 1669 zone->prev_priority = zone->temp_priority; 1670 } 1671 if (!all_zones_ok) { 1672 cond_resched(); 1673 goto loop_again; 1674 } 1675 1676 return total_reclaimed; 1677} 1678 1679/* 1680 * The background pageout daemon, started as a kernel thread 1681 * from the init process. 1682 * 1683 * This basically trickles out pages so that we have _some_ 1684 * free memory available even if there is no other activity 1685 * that frees anything up. This is needed for things like routing 1686 * etc, where we otherwise might have all activity going on in 1687 * asynchronous contexts that cannot page things out. 1688 * 1689 * If there are applications that are active memory-allocators 1690 * (most normal use), this basically shouldn't matter. 1691 */ 1692static int kswapd(void *p) 1693{ 1694 unsigned long order; 1695 pg_data_t *pgdat = (pg_data_t*)p; 1696 struct task_struct *tsk = current; 1697 DEFINE_WAIT(wait); 1698 struct reclaim_state reclaim_state = { 1699 .reclaimed_slab = 0, 1700 }; 1701 cpumask_t cpumask; 1702 1703 daemonize("kswapd%d", pgdat->node_id); 1704 cpumask = node_to_cpumask(pgdat->node_id); 1705 if (!cpus_empty(cpumask)) 1706 set_cpus_allowed(tsk, cpumask); 1707 current->reclaim_state = &reclaim_state; 1708 1709 /* 1710 * Tell the memory management that we're a "memory allocator", 1711 * and that if we need more memory we should get access to it 1712 * regardless (see "__alloc_pages()"). "kswapd" should 1713 * never get caught in the normal page freeing logic. 1714 * 1715 * (Kswapd normally doesn't need memory anyway, but sometimes 1716 * you need a small amount of memory in order to be able to 1717 * page out something else, and this flag essentially protects 1718 * us from recursively trying to free more memory as we're 1719 * trying to free the first piece of memory in the first place). 1720 */ 1721 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 1722 1723 order = 0; 1724 for ( ; ; ) { 1725 unsigned long new_order; 1726 1727 try_to_freeze(); 1728 1729 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 1730 new_order = pgdat->kswapd_max_order; 1731 pgdat->kswapd_max_order = 0; 1732 if (order < new_order) { 1733 /* 1734 * Don't sleep if someone wants a larger 'order' 1735 * allocation 1736 */ 1737 order = new_order; 1738 } else { 1739 schedule(); 1740 order = pgdat->kswapd_max_order; 1741 } 1742 finish_wait(&pgdat->kswapd_wait, &wait); 1743 1744 balance_pgdat(pgdat, 0, order); 1745 } 1746 return 0; 1747} 1748 1749/* 1750 * A zone is low on free memory, so wake its kswapd task to service it. 1751 */ 1752void wakeup_kswapd(struct zone *zone, int order) 1753{ 1754 pg_data_t *pgdat; 1755 1756 if (!populated_zone(zone)) 1757 return; 1758 1759 pgdat = zone->zone_pgdat; 1760 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) 1761 return; 1762 if (pgdat->kswapd_max_order < order) 1763 pgdat->kswapd_max_order = order; 1764 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 1765 return; 1766 if (!waitqueue_active(&pgdat->kswapd_wait)) 1767 return; 1768 wake_up_interruptible(&pgdat->kswapd_wait); 1769} 1770 1771#ifdef CONFIG_PM 1772/* 1773 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed 1774 * pages. 1775 */ 1776int shrink_all_memory(int nr_pages) 1777{ 1778 pg_data_t *pgdat; 1779 int nr_to_free = nr_pages; 1780 int ret = 0; 1781 struct reclaim_state reclaim_state = { 1782 .reclaimed_slab = 0, 1783 }; 1784 1785 current->reclaim_state = &reclaim_state; 1786 for_each_pgdat(pgdat) { 1787 int freed; 1788 freed = balance_pgdat(pgdat, nr_to_free, 0); 1789 ret += freed; 1790 nr_to_free -= freed; 1791 if (nr_to_free <= 0) 1792 break; 1793 } 1794 current->reclaim_state = NULL; 1795 return ret; 1796} 1797#endif 1798 1799#ifdef CONFIG_HOTPLUG_CPU 1800/* It's optimal to keep kswapds on the same CPUs as their memory, but 1801 not required for correctness. So if the last cpu in a node goes 1802 away, we get changed to run anywhere: as the first one comes back, 1803 restore their cpu bindings. */ 1804static int __devinit cpu_callback(struct notifier_block *nfb, 1805 unsigned long action, 1806 void *hcpu) 1807{ 1808 pg_data_t *pgdat; 1809 cpumask_t mask; 1810 1811 if (action == CPU_ONLINE) { 1812 for_each_pgdat(pgdat) { 1813 mask = node_to_cpumask(pgdat->node_id); 1814 if (any_online_cpu(mask) != NR_CPUS) 1815 /* One of our CPUs online: restore mask */ 1816 set_cpus_allowed(pgdat->kswapd, mask); 1817 } 1818 } 1819 return NOTIFY_OK; 1820} 1821#endif /* CONFIG_HOTPLUG_CPU */ 1822 1823static int __init kswapd_init(void) 1824{ 1825 pg_data_t *pgdat; 1826 swap_setup(); 1827 for_each_pgdat(pgdat) 1828 pgdat->kswapd 1829 = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); 1830 total_memory = nr_free_pagecache_pages(); 1831 hotcpu_notifier(cpu_callback, 0); 1832 return 0; 1833} 1834 1835module_init(kswapd_init) 1836 1837#ifdef CONFIG_NUMA 1838/* 1839 * Zone reclaim mode 1840 * 1841 * If non-zero call zone_reclaim when the number of free pages falls below 1842 * the watermarks. 1843 * 1844 * In the future we may add flags to the mode. However, the page allocator 1845 * should only have to check that zone_reclaim_mode != 0 before calling 1846 * zone_reclaim(). 1847 */ 1848int zone_reclaim_mode __read_mostly; 1849 1850#define RECLAIM_OFF 0 1851#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ 1852#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 1853#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 1854#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ 1855 1856/* 1857 * Mininum time between zone reclaim scans 1858 */ 1859int zone_reclaim_interval __read_mostly = 30*HZ; 1860 1861/* 1862 * Priority for ZONE_RECLAIM. This determines the fraction of pages 1863 * of a node considered for each zone_reclaim. 4 scans 1/16th of 1864 * a zone. 1865 */ 1866#define ZONE_RECLAIM_PRIORITY 4 1867 1868/* 1869 * Try to free up some pages from this zone through reclaim. 1870 */ 1871int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1872{ 1873 int nr_pages; 1874 struct task_struct *p = current; 1875 struct reclaim_state reclaim_state; 1876 struct scan_control sc; 1877 cpumask_t mask; 1878 int node_id; 1879 1880 if (time_before(jiffies, 1881 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 1882 return 0; 1883 1884 if (!(gfp_mask & __GFP_WAIT) || 1885 zone->all_unreclaimable || 1886 atomic_read(&zone->reclaim_in_progress) > 0) 1887 return 0; 1888 1889 node_id = zone->zone_pgdat->node_id; 1890 mask = node_to_cpumask(node_id); 1891 if (!cpus_empty(mask) && node_id != numa_node_id()) 1892 return 0; 1893 1894 sc.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE); 1895 sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP); 1896 sc.nr_scanned = 0; 1897 sc.nr_reclaimed = 0; 1898 sc.priority = ZONE_RECLAIM_PRIORITY + 1; 1899 sc.nr_mapped = read_page_state(nr_mapped); 1900 sc.gfp_mask = gfp_mask; 1901 1902 disable_swap_token(); 1903 1904 nr_pages = 1 << order; 1905 if (nr_pages > SWAP_CLUSTER_MAX) 1906 sc.swap_cluster_max = nr_pages; 1907 else 1908 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 1909 1910 cond_resched(); 1911 p->flags |= PF_MEMALLOC; 1912 reclaim_state.reclaimed_slab = 0; 1913 p->reclaim_state = &reclaim_state; 1914 1915 /* 1916 * Free memory by calling shrink zone with increasing priorities 1917 * until we have enough memory freed. 1918 */ 1919 do { 1920 sc.priority--; 1921 shrink_zone(zone, &sc); 1922 1923 } while (sc.nr_reclaimed < nr_pages && sc.priority > 0); 1924 1925 if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { 1926 /* 1927 * shrink_slab does not currently allow us to determine 1928 * how many pages were freed in the zone. So we just 1929 * shake the slab and then go offnode for a single allocation. 1930 * 1931 * shrink_slab will free memory on all zones and may take 1932 * a long time. 1933 */ 1934 shrink_slab(sc.nr_scanned, gfp_mask, order); 1935 sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */ 1936 } 1937 1938 p->reclaim_state = NULL; 1939 current->flags &= ~PF_MEMALLOC; 1940 1941 if (sc.nr_reclaimed == 0) 1942 zone->last_unsuccessful_zone_reclaim = jiffies; 1943 1944 return sc.nr_reclaimed >= nr_pages; 1945} 1946#endif 1947 1948