vmscan.c revision ff30153bf9647c8646538810d4c01015a5e44787
1/*
2 *  linux/mm/vmscan.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 *
6 *  Swap reorganised 29.12.95, Stephen Tweedie.
7 *  kswapd added: 7.1.96  sct
8 *  Removed kswapd_ctl limits, and swap out as many pages as needed
9 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 *  Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h>	/* for try_to_release_page(),
27					buffer_heads_over_limit */
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
37#include <linux/delay.h>
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/memcontrol.h>
41#include <linux/delayacct.h>
42#include <linux/sysctl.h>
43
44#include <asm/tlbflush.h>
45#include <asm/div64.h>
46
47#include <linux/swapops.h>
48
49#include "internal.h"
50
51struct scan_control {
52	/* Incremented by the number of inactive pages that were scanned */
53	unsigned long nr_scanned;
54
55	/* This context's GFP mask */
56	gfp_t gfp_mask;
57
58	int may_writepage;
59
60	/* Can pages be swapped as part of reclaim? */
61	int may_swap;
62
63	/* This context's SWAP_CLUSTER_MAX. If freeing memory for
64	 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
65	 * In this context, it doesn't matter that we scan the
66	 * whole list at once. */
67	int swap_cluster_max;
68
69	int swappiness;
70
71	int all_unreclaimable;
72
73	int order;
74
75	/* Which cgroup do we reclaim from */
76	struct mem_cgroup *mem_cgroup;
77
78	/* Pluggable isolate pages callback */
79	unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
80			unsigned long *scanned, int order, int mode,
81			struct zone *z, struct mem_cgroup *mem_cont,
82			int active, int file);
83};
84
85#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
86
87#ifdef ARCH_HAS_PREFETCH
88#define prefetch_prev_lru_page(_page, _base, _field)			\
89	do {								\
90		if ((_page)->lru.prev != _base) {			\
91			struct page *prev;				\
92									\
93			prev = lru_to_page(&(_page->lru));		\
94			prefetch(&prev->_field);			\
95		}							\
96	} while (0)
97#else
98#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
99#endif
100
101#ifdef ARCH_HAS_PREFETCHW
102#define prefetchw_prev_lru_page(_page, _base, _field)			\
103	do {								\
104		if ((_page)->lru.prev != _base) {			\
105			struct page *prev;				\
106									\
107			prev = lru_to_page(&(_page->lru));		\
108			prefetchw(&prev->_field);			\
109		}							\
110	} while (0)
111#else
112#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
113#endif
114
115/*
116 * From 0 .. 100.  Higher means more swappy.
117 */
118int vm_swappiness = 60;
119long vm_total_pages;	/* The total number of pages which the VM controls */
120
121static LIST_HEAD(shrinker_list);
122static DECLARE_RWSEM(shrinker_rwsem);
123
124#ifdef CONFIG_CGROUP_MEM_RES_CTLR
125#define scan_global_lru(sc)	(!(sc)->mem_cgroup)
126#else
127#define scan_global_lru(sc)	(1)
128#endif
129
130/*
131 * Add a shrinker callback to be called from the vm
132 */
133void register_shrinker(struct shrinker *shrinker)
134{
135	shrinker->nr = 0;
136	down_write(&shrinker_rwsem);
137	list_add_tail(&shrinker->list, &shrinker_list);
138	up_write(&shrinker_rwsem);
139}
140EXPORT_SYMBOL(register_shrinker);
141
142/*
143 * Remove one
144 */
145void unregister_shrinker(struct shrinker *shrinker)
146{
147	down_write(&shrinker_rwsem);
148	list_del(&shrinker->list);
149	up_write(&shrinker_rwsem);
150}
151EXPORT_SYMBOL(unregister_shrinker);
152
153#define SHRINK_BATCH 128
154/*
155 * Call the shrink functions to age shrinkable caches
156 *
157 * Here we assume it costs one seek to replace a lru page and that it also
158 * takes a seek to recreate a cache object.  With this in mind we age equal
159 * percentages of the lru and ageable caches.  This should balance the seeks
160 * generated by these structures.
161 *
162 * If the vm encountered mapped pages on the LRU it increase the pressure on
163 * slab to avoid swapping.
164 *
165 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
166 *
167 * `lru_pages' represents the number of on-LRU pages in all the zones which
168 * are eligible for the caller's allocation attempt.  It is used for balancing
169 * slab reclaim versus page reclaim.
170 *
171 * Returns the number of slab objects which we shrunk.
172 */
173unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
174			unsigned long lru_pages)
175{
176	struct shrinker *shrinker;
177	unsigned long ret = 0;
178
179	if (scanned == 0)
180		scanned = SWAP_CLUSTER_MAX;
181
182	if (!down_read_trylock(&shrinker_rwsem))
183		return 1;	/* Assume we'll be able to shrink next time */
184
185	list_for_each_entry(shrinker, &shrinker_list, list) {
186		unsigned long long delta;
187		unsigned long total_scan;
188		unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
189
190		delta = (4 * scanned) / shrinker->seeks;
191		delta *= max_pass;
192		do_div(delta, lru_pages + 1);
193		shrinker->nr += delta;
194		if (shrinker->nr < 0) {
195			printk(KERN_ERR "%s: nr=%ld\n",
196					__func__, shrinker->nr);
197			shrinker->nr = max_pass;
198		}
199
200		/*
201		 * Avoid risking looping forever due to too large nr value:
202		 * never try to free more than twice the estimate number of
203		 * freeable entries.
204		 */
205		if (shrinker->nr > max_pass * 2)
206			shrinker->nr = max_pass * 2;
207
208		total_scan = shrinker->nr;
209		shrinker->nr = 0;
210
211		while (total_scan >= SHRINK_BATCH) {
212			long this_scan = SHRINK_BATCH;
213			int shrink_ret;
214			int nr_before;
215
216			nr_before = (*shrinker->shrink)(0, gfp_mask);
217			shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
218			if (shrink_ret == -1)
219				break;
220			if (shrink_ret < nr_before)
221				ret += nr_before - shrink_ret;
222			count_vm_events(SLABS_SCANNED, this_scan);
223			total_scan -= this_scan;
224
225			cond_resched();
226		}
227
228		shrinker->nr += total_scan;
229	}
230	up_read(&shrinker_rwsem);
231	return ret;
232}
233
234/* Called without lock on whether page is mapped, so answer is unstable */
235static inline int page_mapping_inuse(struct page *page)
236{
237	struct address_space *mapping;
238
239	/* Page is in somebody's page tables. */
240	if (page_mapped(page))
241		return 1;
242
243	/* Be more reluctant to reclaim swapcache than pagecache */
244	if (PageSwapCache(page))
245		return 1;
246
247	mapping = page_mapping(page);
248	if (!mapping)
249		return 0;
250
251	/* File is mmap'd by somebody? */
252	return mapping_mapped(mapping);
253}
254
255static inline int is_page_cache_freeable(struct page *page)
256{
257	return page_count(page) - !!PagePrivate(page) == 2;
258}
259
260static int may_write_to_queue(struct backing_dev_info *bdi)
261{
262	if (current->flags & PF_SWAPWRITE)
263		return 1;
264	if (!bdi_write_congested(bdi))
265		return 1;
266	if (bdi == current->backing_dev_info)
267		return 1;
268	return 0;
269}
270
271/*
272 * We detected a synchronous write error writing a page out.  Probably
273 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
274 * fsync(), msync() or close().
275 *
276 * The tricky part is that after writepage we cannot touch the mapping: nothing
277 * prevents it from being freed up.  But we have a ref on the page and once
278 * that page is locked, the mapping is pinned.
279 *
280 * We're allowed to run sleeping lock_page() here because we know the caller has
281 * __GFP_FS.
282 */
283static void handle_write_error(struct address_space *mapping,
284				struct page *page, int error)
285{
286	lock_page(page);
287	if (page_mapping(page) == mapping)
288		mapping_set_error(mapping, error);
289	unlock_page(page);
290}
291
292/* Request for sync pageout. */
293enum pageout_io {
294	PAGEOUT_IO_ASYNC,
295	PAGEOUT_IO_SYNC,
296};
297
298/* possible outcome of pageout() */
299typedef enum {
300	/* failed to write page out, page is locked */
301	PAGE_KEEP,
302	/* move page to the active list, page is locked */
303	PAGE_ACTIVATE,
304	/* page has been sent to the disk successfully, page is unlocked */
305	PAGE_SUCCESS,
306	/* page is clean and locked */
307	PAGE_CLEAN,
308} pageout_t;
309
310/*
311 * pageout is called by shrink_page_list() for each dirty page.
312 * Calls ->writepage().
313 */
314static pageout_t pageout(struct page *page, struct address_space *mapping,
315						enum pageout_io sync_writeback)
316{
317	/*
318	 * If the page is dirty, only perform writeback if that write
319	 * will be non-blocking.  To prevent this allocation from being
320	 * stalled by pagecache activity.  But note that there may be
321	 * stalls if we need to run get_block().  We could test
322	 * PagePrivate for that.
323	 *
324	 * If this process is currently in generic_file_write() against
325	 * this page's queue, we can perform writeback even if that
326	 * will block.
327	 *
328	 * If the page is swapcache, write it back even if that would
329	 * block, for some throttling. This happens by accident, because
330	 * swap_backing_dev_info is bust: it doesn't reflect the
331	 * congestion state of the swapdevs.  Easy to fix, if needed.
332	 * See swapfile.c:page_queue_congested().
333	 */
334	if (!is_page_cache_freeable(page))
335		return PAGE_KEEP;
336	if (!mapping) {
337		/*
338		 * Some data journaling orphaned pages can have
339		 * page->mapping == NULL while being dirty with clean buffers.
340		 */
341		if (PagePrivate(page)) {
342			if (try_to_free_buffers(page)) {
343				ClearPageDirty(page);
344				printk("%s: orphaned page\n", __func__);
345				return PAGE_CLEAN;
346			}
347		}
348		return PAGE_KEEP;
349	}
350	if (mapping->a_ops->writepage == NULL)
351		return PAGE_ACTIVATE;
352	if (!may_write_to_queue(mapping->backing_dev_info))
353		return PAGE_KEEP;
354
355	if (clear_page_dirty_for_io(page)) {
356		int res;
357		struct writeback_control wbc = {
358			.sync_mode = WB_SYNC_NONE,
359			.nr_to_write = SWAP_CLUSTER_MAX,
360			.range_start = 0,
361			.range_end = LLONG_MAX,
362			.nonblocking = 1,
363			.for_reclaim = 1,
364		};
365
366		SetPageReclaim(page);
367		res = mapping->a_ops->writepage(page, &wbc);
368		if (res < 0)
369			handle_write_error(mapping, page, res);
370		if (res == AOP_WRITEPAGE_ACTIVATE) {
371			ClearPageReclaim(page);
372			return PAGE_ACTIVATE;
373		}
374
375		/*
376		 * Wait on writeback if requested to. This happens when
377		 * direct reclaiming a large contiguous area and the
378		 * first attempt to free a range of pages fails.
379		 */
380		if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
381			wait_on_page_writeback(page);
382
383		if (!PageWriteback(page)) {
384			/* synchronous write or broken a_ops? */
385			ClearPageReclaim(page);
386		}
387		inc_zone_page_state(page, NR_VMSCAN_WRITE);
388		return PAGE_SUCCESS;
389	}
390
391	return PAGE_CLEAN;
392}
393
394/*
395 * Same as remove_mapping, but if the page is removed from the mapping, it
396 * gets returned with a refcount of 0.
397 */
398static int __remove_mapping(struct address_space *mapping, struct page *page)
399{
400	BUG_ON(!PageLocked(page));
401	BUG_ON(mapping != page_mapping(page));
402
403	spin_lock_irq(&mapping->tree_lock);
404	/*
405	 * The non racy check for a busy page.
406	 *
407	 * Must be careful with the order of the tests. When someone has
408	 * a ref to the page, it may be possible that they dirty it then
409	 * drop the reference. So if PageDirty is tested before page_count
410	 * here, then the following race may occur:
411	 *
412	 * get_user_pages(&page);
413	 * [user mapping goes away]
414	 * write_to(page);
415	 *				!PageDirty(page)    [good]
416	 * SetPageDirty(page);
417	 * put_page(page);
418	 *				!page_count(page)   [good, discard it]
419	 *
420	 * [oops, our write_to data is lost]
421	 *
422	 * Reversing the order of the tests ensures such a situation cannot
423	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
424	 * load is not satisfied before that of page->_count.
425	 *
426	 * Note that if SetPageDirty is always performed via set_page_dirty,
427	 * and thus under tree_lock, then this ordering is not required.
428	 */
429	if (!page_freeze_refs(page, 2))
430		goto cannot_free;
431	/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
432	if (unlikely(PageDirty(page))) {
433		page_unfreeze_refs(page, 2);
434		goto cannot_free;
435	}
436
437	if (PageSwapCache(page)) {
438		swp_entry_t swap = { .val = page_private(page) };
439		__delete_from_swap_cache(page);
440		spin_unlock_irq(&mapping->tree_lock);
441		swap_free(swap);
442	} else {
443		__remove_from_page_cache(page);
444		spin_unlock_irq(&mapping->tree_lock);
445	}
446
447	return 1;
448
449cannot_free:
450	spin_unlock_irq(&mapping->tree_lock);
451	return 0;
452}
453
454/*
455 * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
456 * someone else has a ref on the page, abort and return 0.  If it was
457 * successfully detached, return 1.  Assumes the caller has a single ref on
458 * this page.
459 */
460int remove_mapping(struct address_space *mapping, struct page *page)
461{
462	if (__remove_mapping(mapping, page)) {
463		/*
464		 * Unfreezing the refcount with 1 rather than 2 effectively
465		 * drops the pagecache ref for us without requiring another
466		 * atomic operation.
467		 */
468		page_unfreeze_refs(page, 1);
469		return 1;
470	}
471	return 0;
472}
473
474/**
475 * putback_lru_page - put previously isolated page onto appropriate LRU list
476 * @page: page to be put back to appropriate lru list
477 *
478 * Add previously isolated @page to appropriate LRU list.
479 * Page may still be unevictable for other reasons.
480 *
481 * lru_lock must not be held, interrupts must be enabled.
482 */
483#ifdef CONFIG_UNEVICTABLE_LRU
484void putback_lru_page(struct page *page)
485{
486	int lru;
487	int active = !!TestClearPageActive(page);
488	int was_unevictable = PageUnevictable(page);
489
490	VM_BUG_ON(PageLRU(page));
491
492redo:
493	ClearPageUnevictable(page);
494
495	if (page_evictable(page, NULL)) {
496		/*
497		 * For evictable pages, we can use the cache.
498		 * In event of a race, worst case is we end up with an
499		 * unevictable page on [in]active list.
500		 * We know how to handle that.
501		 */
502		lru = active + page_is_file_cache(page);
503		lru_cache_add_lru(page, lru);
504	} else {
505		/*
506		 * Put unevictable pages directly on zone's unevictable
507		 * list.
508		 */
509		lru = LRU_UNEVICTABLE;
510		add_page_to_unevictable_list(page);
511	}
512	mem_cgroup_move_lists(page, lru);
513
514	/*
515	 * page's status can change while we move it among lru. If an evictable
516	 * page is on unevictable list, it never be freed. To avoid that,
517	 * check after we added it to the list, again.
518	 */
519	if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
520		if (!isolate_lru_page(page)) {
521			put_page(page);
522			goto redo;
523		}
524		/* This means someone else dropped this page from LRU
525		 * So, it will be freed or putback to LRU again. There is
526		 * nothing to do here.
527		 */
528	}
529
530	if (was_unevictable && lru != LRU_UNEVICTABLE)
531		count_vm_event(UNEVICTABLE_PGRESCUED);
532	else if (!was_unevictable && lru == LRU_UNEVICTABLE)
533		count_vm_event(UNEVICTABLE_PGCULLED);
534
535	put_page(page);		/* drop ref from isolate */
536}
537
538#else /* CONFIG_UNEVICTABLE_LRU */
539
540void putback_lru_page(struct page *page)
541{
542	int lru;
543	VM_BUG_ON(PageLRU(page));
544
545	lru = !!TestClearPageActive(page) + page_is_file_cache(page);
546	lru_cache_add_lru(page, lru);
547	mem_cgroup_move_lists(page, lru);
548	put_page(page);
549}
550#endif /* CONFIG_UNEVICTABLE_LRU */
551
552
553/*
554 * shrink_page_list() returns the number of reclaimed pages
555 */
556static unsigned long shrink_page_list(struct list_head *page_list,
557					struct scan_control *sc,
558					enum pageout_io sync_writeback)
559{
560	LIST_HEAD(ret_pages);
561	struct pagevec freed_pvec;
562	int pgactivate = 0;
563	unsigned long nr_reclaimed = 0;
564
565	cond_resched();
566
567	pagevec_init(&freed_pvec, 1);
568	while (!list_empty(page_list)) {
569		struct address_space *mapping;
570		struct page *page;
571		int may_enter_fs;
572		int referenced;
573
574		cond_resched();
575
576		page = lru_to_page(page_list);
577		list_del(&page->lru);
578
579		if (!trylock_page(page))
580			goto keep;
581
582		VM_BUG_ON(PageActive(page));
583
584		sc->nr_scanned++;
585
586		if (unlikely(!page_evictable(page, NULL)))
587			goto cull_mlocked;
588
589		if (!sc->may_swap && page_mapped(page))
590			goto keep_locked;
591
592		/* Double the slab pressure for mapped and swapcache pages */
593		if (page_mapped(page) || PageSwapCache(page))
594			sc->nr_scanned++;
595
596		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
597			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
598
599		if (PageWriteback(page)) {
600			/*
601			 * Synchronous reclaim is performed in two passes,
602			 * first an asynchronous pass over the list to
603			 * start parallel writeback, and a second synchronous
604			 * pass to wait for the IO to complete.  Wait here
605			 * for any page for which writeback has already
606			 * started.
607			 */
608			if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
609				wait_on_page_writeback(page);
610			else
611				goto keep_locked;
612		}
613
614		referenced = page_referenced(page, 1, sc->mem_cgroup);
615		/* In active use or really unfreeable?  Activate it. */
616		if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
617					referenced && page_mapping_inuse(page))
618			goto activate_locked;
619
620		/*
621		 * Anonymous process memory has backing store?
622		 * Try to allocate it some swap space here.
623		 */
624		if (PageAnon(page) && !PageSwapCache(page)) {
625			if (!(sc->gfp_mask & __GFP_IO))
626				goto keep_locked;
627			if (!add_to_swap(page))
628				goto activate_locked;
629			may_enter_fs = 1;
630		}
631
632		mapping = page_mapping(page);
633
634		/*
635		 * The page is mapped into the page tables of one or more
636		 * processes. Try to unmap it here.
637		 */
638		if (page_mapped(page) && mapping) {
639			switch (try_to_unmap(page, 0)) {
640			case SWAP_FAIL:
641				goto activate_locked;
642			case SWAP_AGAIN:
643				goto keep_locked;
644			case SWAP_MLOCK:
645				goto cull_mlocked;
646			case SWAP_SUCCESS:
647				; /* try to free the page below */
648			}
649		}
650
651		if (PageDirty(page)) {
652			if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
653				goto keep_locked;
654			if (!may_enter_fs)
655				goto keep_locked;
656			if (!sc->may_writepage)
657				goto keep_locked;
658
659			/* Page is dirty, try to write it out here */
660			switch (pageout(page, mapping, sync_writeback)) {
661			case PAGE_KEEP:
662				goto keep_locked;
663			case PAGE_ACTIVATE:
664				goto activate_locked;
665			case PAGE_SUCCESS:
666				if (PageWriteback(page) || PageDirty(page))
667					goto keep;
668				/*
669				 * A synchronous write - probably a ramdisk.  Go
670				 * ahead and try to reclaim the page.
671				 */
672				if (!trylock_page(page))
673					goto keep;
674				if (PageDirty(page) || PageWriteback(page))
675					goto keep_locked;
676				mapping = page_mapping(page);
677			case PAGE_CLEAN:
678				; /* try to free the page below */
679			}
680		}
681
682		/*
683		 * If the page has buffers, try to free the buffer mappings
684		 * associated with this page. If we succeed we try to free
685		 * the page as well.
686		 *
687		 * We do this even if the page is PageDirty().
688		 * try_to_release_page() does not perform I/O, but it is
689		 * possible for a page to have PageDirty set, but it is actually
690		 * clean (all its buffers are clean).  This happens if the
691		 * buffers were written out directly, with submit_bh(). ext3
692		 * will do this, as well as the blockdev mapping.
693		 * try_to_release_page() will discover that cleanness and will
694		 * drop the buffers and mark the page clean - it can be freed.
695		 *
696		 * Rarely, pages can have buffers and no ->mapping.  These are
697		 * the pages which were not successfully invalidated in
698		 * truncate_complete_page().  We try to drop those buffers here
699		 * and if that worked, and the page is no longer mapped into
700		 * process address space (page_count == 1) it can be freed.
701		 * Otherwise, leave the page on the LRU so it is swappable.
702		 */
703		if (PagePrivate(page)) {
704			if (!try_to_release_page(page, sc->gfp_mask))
705				goto activate_locked;
706			if (!mapping && page_count(page) == 1) {
707				unlock_page(page);
708				if (put_page_testzero(page))
709					goto free_it;
710				else {
711					/*
712					 * rare race with speculative reference.
713					 * the speculative reference will free
714					 * this page shortly, so we may
715					 * increment nr_reclaimed here (and
716					 * leave it off the LRU).
717					 */
718					nr_reclaimed++;
719					continue;
720				}
721			}
722		}
723
724		if (!mapping || !__remove_mapping(mapping, page))
725			goto keep_locked;
726
727		/*
728		 * At this point, we have no other references and there is
729		 * no way to pick any more up (removed from LRU, removed
730		 * from pagecache). Can use non-atomic bitops now (and
731		 * we obviously don't have to worry about waking up a process
732		 * waiting on the page lock, because there are no references.
733		 */
734		__clear_page_locked(page);
735free_it:
736		nr_reclaimed++;
737		if (!pagevec_add(&freed_pvec, page)) {
738			__pagevec_free(&freed_pvec);
739			pagevec_reinit(&freed_pvec);
740		}
741		continue;
742
743cull_mlocked:
744		if (PageSwapCache(page))
745			try_to_free_swap(page);
746		unlock_page(page);
747		putback_lru_page(page);
748		continue;
749
750activate_locked:
751		/* Not a candidate for swapping, so reclaim swap space. */
752		if (PageSwapCache(page) && vm_swap_full())
753			try_to_free_swap(page);
754		VM_BUG_ON(PageActive(page));
755		SetPageActive(page);
756		pgactivate++;
757keep_locked:
758		unlock_page(page);
759keep:
760		list_add(&page->lru, &ret_pages);
761		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
762	}
763	list_splice(&ret_pages, page_list);
764	if (pagevec_count(&freed_pvec))
765		__pagevec_free(&freed_pvec);
766	count_vm_events(PGACTIVATE, pgactivate);
767	return nr_reclaimed;
768}
769
770/* LRU Isolation modes. */
771#define ISOLATE_INACTIVE 0	/* Isolate inactive pages. */
772#define ISOLATE_ACTIVE 1	/* Isolate active pages. */
773#define ISOLATE_BOTH 2		/* Isolate both active and inactive pages. */
774
775/*
776 * Attempt to remove the specified page from its LRU.  Only take this page
777 * if it is of the appropriate PageActive status.  Pages which are being
778 * freed elsewhere are also ignored.
779 *
780 * page:	page to consider
781 * mode:	one of the LRU isolation modes defined above
782 *
783 * returns 0 on success, -ve errno on failure.
784 */
785int __isolate_lru_page(struct page *page, int mode, int file)
786{
787	int ret = -EINVAL;
788
789	/* Only take pages on the LRU. */
790	if (!PageLRU(page))
791		return ret;
792
793	/*
794	 * When checking the active state, we need to be sure we are
795	 * dealing with comparible boolean values.  Take the logical not
796	 * of each.
797	 */
798	if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
799		return ret;
800
801	if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
802		return ret;
803
804	/*
805	 * When this function is being called for lumpy reclaim, we
806	 * initially look into all LRU pages, active, inactive and
807	 * unevictable; only give shrink_page_list evictable pages.
808	 */
809	if (PageUnevictable(page))
810		return ret;
811
812	ret = -EBUSY;
813	if (likely(get_page_unless_zero(page))) {
814		/*
815		 * Be careful not to clear PageLRU until after we're
816		 * sure the page is not being freed elsewhere -- the
817		 * page release code relies on it.
818		 */
819		ClearPageLRU(page);
820		ret = 0;
821	}
822
823	return ret;
824}
825
826/*
827 * zone->lru_lock is heavily contended.  Some of the functions that
828 * shrink the lists perform better by taking out a batch of pages
829 * and working on them outside the LRU lock.
830 *
831 * For pagecache intensive workloads, this function is the hottest
832 * spot in the kernel (apart from copy_*_user functions).
833 *
834 * Appropriate locks must be held before calling this function.
835 *
836 * @nr_to_scan:	The number of pages to look through on the list.
837 * @src:	The LRU list to pull pages off.
838 * @dst:	The temp list to put pages on to.
839 * @scanned:	The number of pages that were scanned.
840 * @order:	The caller's attempted allocation order
841 * @mode:	One of the LRU isolation modes
842 * @file:	True [1] if isolating file [!anon] pages
843 *
844 * returns how many pages were moved onto *@dst.
845 */
846static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
847		struct list_head *src, struct list_head *dst,
848		unsigned long *scanned, int order, int mode, int file)
849{
850	unsigned long nr_taken = 0;
851	unsigned long scan;
852
853	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
854		struct page *page;
855		unsigned long pfn;
856		unsigned long end_pfn;
857		unsigned long page_pfn;
858		int zone_id;
859
860		page = lru_to_page(src);
861		prefetchw_prev_lru_page(page, src, flags);
862
863		VM_BUG_ON(!PageLRU(page));
864
865		switch (__isolate_lru_page(page, mode, file)) {
866		case 0:
867			list_move(&page->lru, dst);
868			nr_taken++;
869			break;
870
871		case -EBUSY:
872			/* else it is being freed elsewhere */
873			list_move(&page->lru, src);
874			continue;
875
876		default:
877			BUG();
878		}
879
880		if (!order)
881			continue;
882
883		/*
884		 * Attempt to take all pages in the order aligned region
885		 * surrounding the tag page.  Only take those pages of
886		 * the same active state as that tag page.  We may safely
887		 * round the target page pfn down to the requested order
888		 * as the mem_map is guarenteed valid out to MAX_ORDER,
889		 * where that page is in a different zone we will detect
890		 * it from its zone id and abort this block scan.
891		 */
892		zone_id = page_zone_id(page);
893		page_pfn = page_to_pfn(page);
894		pfn = page_pfn & ~((1 << order) - 1);
895		end_pfn = pfn + (1 << order);
896		for (; pfn < end_pfn; pfn++) {
897			struct page *cursor_page;
898
899			/* The target page is in the block, ignore it. */
900			if (unlikely(pfn == page_pfn))
901				continue;
902
903			/* Avoid holes within the zone. */
904			if (unlikely(!pfn_valid_within(pfn)))
905				break;
906
907			cursor_page = pfn_to_page(pfn);
908
909			/* Check that we have not crossed a zone boundary. */
910			if (unlikely(page_zone_id(cursor_page) != zone_id))
911				continue;
912			switch (__isolate_lru_page(cursor_page, mode, file)) {
913			case 0:
914				list_move(&cursor_page->lru, dst);
915				nr_taken++;
916				scan++;
917				break;
918
919			case -EBUSY:
920				/* else it is being freed elsewhere */
921				list_move(&cursor_page->lru, src);
922			default:
923				break;	/* ! on LRU or wrong list */
924			}
925		}
926	}
927
928	*scanned = scan;
929	return nr_taken;
930}
931
932static unsigned long isolate_pages_global(unsigned long nr,
933					struct list_head *dst,
934					unsigned long *scanned, int order,
935					int mode, struct zone *z,
936					struct mem_cgroup *mem_cont,
937					int active, int file)
938{
939	int lru = LRU_BASE;
940	if (active)
941		lru += LRU_ACTIVE;
942	if (file)
943		lru += LRU_FILE;
944	return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
945								mode, !!file);
946}
947
948/*
949 * clear_active_flags() is a helper for shrink_active_list(), clearing
950 * any active bits from the pages in the list.
951 */
952static unsigned long clear_active_flags(struct list_head *page_list,
953					unsigned int *count)
954{
955	int nr_active = 0;
956	int lru;
957	struct page *page;
958
959	list_for_each_entry(page, page_list, lru) {
960		lru = page_is_file_cache(page);
961		if (PageActive(page)) {
962			lru += LRU_ACTIVE;
963			ClearPageActive(page);
964			nr_active++;
965		}
966		count[lru]++;
967	}
968
969	return nr_active;
970}
971
972/**
973 * isolate_lru_page - tries to isolate a page from its LRU list
974 * @page: page to isolate from its LRU list
975 *
976 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
977 * vmstat statistic corresponding to whatever LRU list the page was on.
978 *
979 * Returns 0 if the page was removed from an LRU list.
980 * Returns -EBUSY if the page was not on an LRU list.
981 *
982 * The returned page will have PageLRU() cleared.  If it was found on
983 * the active list, it will have PageActive set.  If it was found on
984 * the unevictable list, it will have the PageUnevictable bit set. That flag
985 * may need to be cleared by the caller before letting the page go.
986 *
987 * The vmstat statistic corresponding to the list on which the page was
988 * found will be decremented.
989 *
990 * Restrictions:
991 * (1) Must be called with an elevated refcount on the page. This is a
992 *     fundamentnal difference from isolate_lru_pages (which is called
993 *     without a stable reference).
994 * (2) the lru_lock must not be held.
995 * (3) interrupts must be enabled.
996 */
997int isolate_lru_page(struct page *page)
998{
999	int ret = -EBUSY;
1000
1001	if (PageLRU(page)) {
1002		struct zone *zone = page_zone(page);
1003
1004		spin_lock_irq(&zone->lru_lock);
1005		if (PageLRU(page) && get_page_unless_zero(page)) {
1006			int lru = page_lru(page);
1007			ret = 0;
1008			ClearPageLRU(page);
1009
1010			del_page_from_lru_list(zone, page, lru);
1011		}
1012		spin_unlock_irq(&zone->lru_lock);
1013	}
1014	return ret;
1015}
1016
1017/*
1018 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
1019 * of reclaimed pages
1020 */
1021static unsigned long shrink_inactive_list(unsigned long max_scan,
1022			struct zone *zone, struct scan_control *sc,
1023			int priority, int file)
1024{
1025	LIST_HEAD(page_list);
1026	struct pagevec pvec;
1027	unsigned long nr_scanned = 0;
1028	unsigned long nr_reclaimed = 0;
1029
1030	pagevec_init(&pvec, 1);
1031
1032	lru_add_drain();
1033	spin_lock_irq(&zone->lru_lock);
1034	do {
1035		struct page *page;
1036		unsigned long nr_taken;
1037		unsigned long nr_scan;
1038		unsigned long nr_freed;
1039		unsigned long nr_active;
1040		unsigned int count[NR_LRU_LISTS] = { 0, };
1041		int mode = ISOLATE_INACTIVE;
1042
1043		/*
1044		 * If we need a large contiguous chunk of memory, or have
1045		 * trouble getting a small set of contiguous pages, we
1046		 * will reclaim both active and inactive pages.
1047		 *
1048		 * We use the same threshold as pageout congestion_wait below.
1049		 */
1050		if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1051			mode = ISOLATE_BOTH;
1052		else if (sc->order && priority < DEF_PRIORITY - 2)
1053			mode = ISOLATE_BOTH;
1054
1055		nr_taken = sc->isolate_pages(sc->swap_cluster_max,
1056			     &page_list, &nr_scan, sc->order, mode,
1057				zone, sc->mem_cgroup, 0, file);
1058		nr_active = clear_active_flags(&page_list, count);
1059		__count_vm_events(PGDEACTIVATE, nr_active);
1060
1061		__mod_zone_page_state(zone, NR_ACTIVE_FILE,
1062						-count[LRU_ACTIVE_FILE]);
1063		__mod_zone_page_state(zone, NR_INACTIVE_FILE,
1064						-count[LRU_INACTIVE_FILE]);
1065		__mod_zone_page_state(zone, NR_ACTIVE_ANON,
1066						-count[LRU_ACTIVE_ANON]);
1067		__mod_zone_page_state(zone, NR_INACTIVE_ANON,
1068						-count[LRU_INACTIVE_ANON]);
1069
1070		if (scan_global_lru(sc)) {
1071			zone->pages_scanned += nr_scan;
1072			zone->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1073			zone->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1074			zone->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1075			zone->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1076		}
1077		spin_unlock_irq(&zone->lru_lock);
1078
1079		nr_scanned += nr_scan;
1080		nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1081
1082		/*
1083		 * If we are direct reclaiming for contiguous pages and we do
1084		 * not reclaim everything in the list, try again and wait
1085		 * for IO to complete. This will stall high-order allocations
1086		 * but that should be acceptable to the caller
1087		 */
1088		if (nr_freed < nr_taken && !current_is_kswapd() &&
1089					sc->order > PAGE_ALLOC_COSTLY_ORDER) {
1090			congestion_wait(WRITE, HZ/10);
1091
1092			/*
1093			 * The attempt at page out may have made some
1094			 * of the pages active, mark them inactive again.
1095			 */
1096			nr_active = clear_active_flags(&page_list, count);
1097			count_vm_events(PGDEACTIVATE, nr_active);
1098
1099			nr_freed += shrink_page_list(&page_list, sc,
1100							PAGEOUT_IO_SYNC);
1101		}
1102
1103		nr_reclaimed += nr_freed;
1104		local_irq_disable();
1105		if (current_is_kswapd()) {
1106			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
1107			__count_vm_events(KSWAPD_STEAL, nr_freed);
1108		} else if (scan_global_lru(sc))
1109			__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
1110
1111		__count_zone_vm_events(PGSTEAL, zone, nr_freed);
1112
1113		if (nr_taken == 0)
1114			goto done;
1115
1116		spin_lock(&zone->lru_lock);
1117		/*
1118		 * Put back any unfreeable pages.
1119		 */
1120		while (!list_empty(&page_list)) {
1121			int lru;
1122			page = lru_to_page(&page_list);
1123			VM_BUG_ON(PageLRU(page));
1124			list_del(&page->lru);
1125			if (unlikely(!page_evictable(page, NULL))) {
1126				spin_unlock_irq(&zone->lru_lock);
1127				putback_lru_page(page);
1128				spin_lock_irq(&zone->lru_lock);
1129				continue;
1130			}
1131			SetPageLRU(page);
1132			lru = page_lru(page);
1133			add_page_to_lru_list(zone, page, lru);
1134			mem_cgroup_move_lists(page, lru);
1135			if (PageActive(page) && scan_global_lru(sc)) {
1136				int file = !!page_is_file_cache(page);
1137				zone->recent_rotated[file]++;
1138			}
1139			if (!pagevec_add(&pvec, page)) {
1140				spin_unlock_irq(&zone->lru_lock);
1141				__pagevec_release(&pvec);
1142				spin_lock_irq(&zone->lru_lock);
1143			}
1144		}
1145  	} while (nr_scanned < max_scan);
1146	spin_unlock(&zone->lru_lock);
1147done:
1148	local_irq_enable();
1149	pagevec_release(&pvec);
1150	return nr_reclaimed;
1151}
1152
1153/*
1154 * We are about to scan this zone at a certain priority level.  If that priority
1155 * level is smaller (ie: more urgent) than the previous priority, then note
1156 * that priority level within the zone.  This is done so that when the next
1157 * process comes in to scan this zone, it will immediately start out at this
1158 * priority level rather than having to build up its own scanning priority.
1159 * Here, this priority affects only the reclaim-mapped threshold.
1160 */
1161static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1162{
1163	if (priority < zone->prev_priority)
1164		zone->prev_priority = priority;
1165}
1166
1167static inline int zone_is_near_oom(struct zone *zone)
1168{
1169	return zone->pages_scanned >= (zone_lru_pages(zone) * 3);
1170}
1171
1172/*
1173 * This moves pages from the active list to the inactive list.
1174 *
1175 * We move them the other way if the page is referenced by one or more
1176 * processes, from rmap.
1177 *
1178 * If the pages are mostly unmapped, the processing is fast and it is
1179 * appropriate to hold zone->lru_lock across the whole operation.  But if
1180 * the pages are mapped, the processing is slow (page_referenced()) so we
1181 * should drop zone->lru_lock around each page.  It's impossible to balance
1182 * this, so instead we remove the pages from the LRU while processing them.
1183 * It is safe to rely on PG_active against the non-LRU pages in here because
1184 * nobody will play with that bit on a non-LRU page.
1185 *
1186 * The downside is that we have to touch page->_count against each page.
1187 * But we had to alter page->flags anyway.
1188 */
1189
1190
1191static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1192			struct scan_control *sc, int priority, int file)
1193{
1194	unsigned long pgmoved;
1195	int pgdeactivate = 0;
1196	unsigned long pgscanned;
1197	LIST_HEAD(l_hold);	/* The pages which were snipped off */
1198	LIST_HEAD(l_inactive);
1199	struct page *page;
1200	struct pagevec pvec;
1201	enum lru_list lru;
1202
1203	lru_add_drain();
1204	spin_lock_irq(&zone->lru_lock);
1205	pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1206					ISOLATE_ACTIVE, zone,
1207					sc->mem_cgroup, 1, file);
1208	/*
1209	 * zone->pages_scanned is used for detect zone's oom
1210	 * mem_cgroup remembers nr_scan by itself.
1211	 */
1212	if (scan_global_lru(sc)) {
1213		zone->pages_scanned += pgscanned;
1214		zone->recent_scanned[!!file] += pgmoved;
1215	}
1216
1217	if (file)
1218		__mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
1219	else
1220		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
1221	spin_unlock_irq(&zone->lru_lock);
1222
1223	pgmoved = 0;
1224	while (!list_empty(&l_hold)) {
1225		cond_resched();
1226		page = lru_to_page(&l_hold);
1227		list_del(&page->lru);
1228
1229		if (unlikely(!page_evictable(page, NULL))) {
1230			putback_lru_page(page);
1231			continue;
1232		}
1233
1234		/* page_referenced clears PageReferenced */
1235		if (page_mapping_inuse(page) &&
1236		    page_referenced(page, 0, sc->mem_cgroup))
1237			pgmoved++;
1238
1239		list_add(&page->lru, &l_inactive);
1240	}
1241
1242	spin_lock_irq(&zone->lru_lock);
1243	/*
1244	 * Count referenced pages from currently used mappings as
1245	 * rotated, even though they are moved to the inactive list.
1246	 * This helps balance scan pressure between file and anonymous
1247	 * pages in get_scan_ratio.
1248	 */
1249	if (scan_global_lru(sc))
1250		zone->recent_rotated[!!file] += pgmoved;
1251
1252	/*
1253	 * Move the pages to the [file or anon] inactive list.
1254	 */
1255	pagevec_init(&pvec, 1);
1256
1257	pgmoved = 0;
1258	lru = LRU_BASE + file * LRU_FILE;
1259	while (!list_empty(&l_inactive)) {
1260		page = lru_to_page(&l_inactive);
1261		prefetchw_prev_lru_page(page, &l_inactive, flags);
1262		VM_BUG_ON(PageLRU(page));
1263		SetPageLRU(page);
1264		VM_BUG_ON(!PageActive(page));
1265		ClearPageActive(page);
1266
1267		list_move(&page->lru, &zone->lru[lru].list);
1268		mem_cgroup_move_lists(page, lru);
1269		pgmoved++;
1270		if (!pagevec_add(&pvec, page)) {
1271			__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1272			spin_unlock_irq(&zone->lru_lock);
1273			pgdeactivate += pgmoved;
1274			pgmoved = 0;
1275			if (buffer_heads_over_limit)
1276				pagevec_strip(&pvec);
1277			__pagevec_release(&pvec);
1278			spin_lock_irq(&zone->lru_lock);
1279		}
1280	}
1281	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1282	pgdeactivate += pgmoved;
1283	if (buffer_heads_over_limit) {
1284		spin_unlock_irq(&zone->lru_lock);
1285		pagevec_strip(&pvec);
1286		spin_lock_irq(&zone->lru_lock);
1287	}
1288	__count_zone_vm_events(PGREFILL, zone, pgscanned);
1289	__count_vm_events(PGDEACTIVATE, pgdeactivate);
1290	spin_unlock_irq(&zone->lru_lock);
1291	if (vm_swap_full())
1292		pagevec_swap_free(&pvec);
1293
1294	pagevec_release(&pvec);
1295}
1296
1297static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1298	struct zone *zone, struct scan_control *sc, int priority)
1299{
1300	int file = is_file_lru(lru);
1301
1302	if (lru == LRU_ACTIVE_FILE) {
1303		shrink_active_list(nr_to_scan, zone, sc, priority, file);
1304		return 0;
1305	}
1306
1307	if (lru == LRU_ACTIVE_ANON &&
1308	    (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
1309		shrink_active_list(nr_to_scan, zone, sc, priority, file);
1310		return 0;
1311	}
1312	return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1313}
1314
1315/*
1316 * Determine how aggressively the anon and file LRU lists should be
1317 * scanned.  The relative value of each set of LRU lists is determined
1318 * by looking at the fraction of the pages scanned we did rotate back
1319 * onto the active list instead of evict.
1320 *
1321 * percent[0] specifies how much pressure to put on ram/swap backed
1322 * memory, while percent[1] determines pressure on the file LRUs.
1323 */
1324static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1325					unsigned long *percent)
1326{
1327	unsigned long anon, file, free;
1328	unsigned long anon_prio, file_prio;
1329	unsigned long ap, fp;
1330
1331	/* If we have no swap space, do not bother scanning anon pages. */
1332	if (nr_swap_pages <= 0) {
1333		percent[0] = 0;
1334		percent[1] = 100;
1335		return;
1336	}
1337
1338	anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
1339		zone_page_state(zone, NR_INACTIVE_ANON);
1340	file  = zone_page_state(zone, NR_ACTIVE_FILE) +
1341		zone_page_state(zone, NR_INACTIVE_FILE);
1342	free  = zone_page_state(zone, NR_FREE_PAGES);
1343
1344	/* If we have very few page cache pages, force-scan anon pages. */
1345	if (unlikely(file + free <= zone->pages_high)) {
1346		percent[0] = 100;
1347		percent[1] = 0;
1348		return;
1349	}
1350
1351	/*
1352	 * OK, so we have swap space and a fair amount of page cache
1353	 * pages.  We use the recently rotated / recently scanned
1354	 * ratios to determine how valuable each cache is.
1355	 *
1356	 * Because workloads change over time (and to avoid overflow)
1357	 * we keep these statistics as a floating average, which ends
1358	 * up weighing recent references more than old ones.
1359	 *
1360	 * anon in [0], file in [1]
1361	 */
1362	if (unlikely(zone->recent_scanned[0] > anon / 4)) {
1363		spin_lock_irq(&zone->lru_lock);
1364		zone->recent_scanned[0] /= 2;
1365		zone->recent_rotated[0] /= 2;
1366		spin_unlock_irq(&zone->lru_lock);
1367	}
1368
1369	if (unlikely(zone->recent_scanned[1] > file / 4)) {
1370		spin_lock_irq(&zone->lru_lock);
1371		zone->recent_scanned[1] /= 2;
1372		zone->recent_rotated[1] /= 2;
1373		spin_unlock_irq(&zone->lru_lock);
1374	}
1375
1376	/*
1377	 * With swappiness at 100, anonymous and file have the same priority.
1378	 * This scanning priority is essentially the inverse of IO cost.
1379	 */
1380	anon_prio = sc->swappiness;
1381	file_prio = 200 - sc->swappiness;
1382
1383	/*
1384	 * The amount of pressure on anon vs file pages is inversely
1385	 * proportional to the fraction of recently scanned pages on
1386	 * each list that were recently referenced and in active use.
1387	 */
1388	ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
1389	ap /= zone->recent_rotated[0] + 1;
1390
1391	fp = (file_prio + 1) * (zone->recent_scanned[1] + 1);
1392	fp /= zone->recent_rotated[1] + 1;
1393
1394	/* Normalize to percentages */
1395	percent[0] = 100 * ap / (ap + fp + 1);
1396	percent[1] = 100 - percent[0];
1397}
1398
1399
1400/*
1401 * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1402 */
1403static unsigned long shrink_zone(int priority, struct zone *zone,
1404				struct scan_control *sc)
1405{
1406	unsigned long nr[NR_LRU_LISTS];
1407	unsigned long nr_to_scan;
1408	unsigned long nr_reclaimed = 0;
1409	unsigned long percent[2];	/* anon @ 0; file @ 1 */
1410	enum lru_list l;
1411
1412	get_scan_ratio(zone, sc, percent);
1413
1414	for_each_evictable_lru(l) {
1415		if (scan_global_lru(sc)) {
1416			int file = is_file_lru(l);
1417			int scan;
1418
1419			scan = zone_page_state(zone, NR_LRU_BASE + l);
1420			if (priority) {
1421				scan >>= priority;
1422				scan = (scan * percent[file]) / 100;
1423			}
1424			zone->lru[l].nr_scan += scan;
1425			nr[l] = zone->lru[l].nr_scan;
1426			if (nr[l] >= sc->swap_cluster_max)
1427				zone->lru[l].nr_scan = 0;
1428			else
1429				nr[l] = 0;
1430		} else {
1431			/*
1432			 * This reclaim occurs not because zone memory shortage
1433			 * but because memory controller hits its limit.
1434			 * Don't modify zone reclaim related data.
1435			 */
1436			nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
1437								priority, l);
1438		}
1439	}
1440
1441	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1442					nr[LRU_INACTIVE_FILE]) {
1443		for_each_evictable_lru(l) {
1444			if (nr[l]) {
1445				nr_to_scan = min(nr[l],
1446					(unsigned long)sc->swap_cluster_max);
1447				nr[l] -= nr_to_scan;
1448
1449				nr_reclaimed += shrink_list(l, nr_to_scan,
1450							zone, sc, priority);
1451			}
1452		}
1453	}
1454
1455	/*
1456	 * Even if we did not try to evict anon pages at all, we want to
1457	 * rebalance the anon lru active/inactive ratio.
1458	 */
1459	if (!scan_global_lru(sc) || inactive_anon_is_low(zone))
1460		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1461	else if (!scan_global_lru(sc))
1462		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1463
1464	throttle_vm_writeout(sc->gfp_mask);
1465	return nr_reclaimed;
1466}
1467
1468/*
1469 * This is the direct reclaim path, for page-allocating processes.  We only
1470 * try to reclaim pages from zones which will satisfy the caller's allocation
1471 * request.
1472 *
1473 * We reclaim from a zone even if that zone is over pages_high.  Because:
1474 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1475 *    allocation or
1476 * b) The zones may be over pages_high but they must go *over* pages_high to
1477 *    satisfy the `incremental min' zone defense algorithm.
1478 *
1479 * Returns the number of reclaimed pages.
1480 *
1481 * If a zone is deemed to be full of pinned pages then just give it a light
1482 * scan then give up on it.
1483 */
1484static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1485					struct scan_control *sc)
1486{
1487	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1488	unsigned long nr_reclaimed = 0;
1489	struct zoneref *z;
1490	struct zone *zone;
1491
1492	sc->all_unreclaimable = 1;
1493	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1494		if (!populated_zone(zone))
1495			continue;
1496		/*
1497		 * Take care memory controller reclaiming has small influence
1498		 * to global LRU.
1499		 */
1500		if (scan_global_lru(sc)) {
1501			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1502				continue;
1503			note_zone_scanning_priority(zone, priority);
1504
1505			if (zone_is_all_unreclaimable(zone) &&
1506						priority != DEF_PRIORITY)
1507				continue;	/* Let kswapd poll it */
1508			sc->all_unreclaimable = 0;
1509		} else {
1510			/*
1511			 * Ignore cpuset limitation here. We just want to reduce
1512			 * # of used pages by us regardless of memory shortage.
1513			 */
1514			sc->all_unreclaimable = 0;
1515			mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1516							priority);
1517		}
1518
1519		nr_reclaimed += shrink_zone(priority, zone, sc);
1520	}
1521
1522	return nr_reclaimed;
1523}
1524
1525/*
1526 * This is the main entry point to direct page reclaim.
1527 *
1528 * If a full scan of the inactive list fails to free enough memory then we
1529 * are "out of memory" and something needs to be killed.
1530 *
1531 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1532 * high - the zone may be full of dirty or under-writeback pages, which this
1533 * caller can't do much about.  We kick pdflush and take explicit naps in the
1534 * hope that some of these pages can be written.  But if the allocating task
1535 * holds filesystem locks which prevent writeout this might not work, and the
1536 * allocation attempt will fail.
1537 *
1538 * returns:	0, if no pages reclaimed
1539 * 		else, the number of pages reclaimed
1540 */
1541static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1542					struct scan_control *sc)
1543{
1544	int priority;
1545	unsigned long ret = 0;
1546	unsigned long total_scanned = 0;
1547	unsigned long nr_reclaimed = 0;
1548	struct reclaim_state *reclaim_state = current->reclaim_state;
1549	unsigned long lru_pages = 0;
1550	struct zoneref *z;
1551	struct zone *zone;
1552	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1553
1554	delayacct_freepages_start();
1555
1556	if (scan_global_lru(sc))
1557		count_vm_event(ALLOCSTALL);
1558	/*
1559	 * mem_cgroup will not do shrink_slab.
1560	 */
1561	if (scan_global_lru(sc)) {
1562		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1563
1564			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1565				continue;
1566
1567			lru_pages += zone_lru_pages(zone);
1568		}
1569	}
1570
1571	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1572		sc->nr_scanned = 0;
1573		if (!priority)
1574			disable_swap_token();
1575		nr_reclaimed += shrink_zones(priority, zonelist, sc);
1576		/*
1577		 * Don't shrink slabs when reclaiming memory from
1578		 * over limit cgroups
1579		 */
1580		if (scan_global_lru(sc)) {
1581			shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1582			if (reclaim_state) {
1583				nr_reclaimed += reclaim_state->reclaimed_slab;
1584				reclaim_state->reclaimed_slab = 0;
1585			}
1586		}
1587		total_scanned += sc->nr_scanned;
1588		if (nr_reclaimed >= sc->swap_cluster_max) {
1589			ret = nr_reclaimed;
1590			goto out;
1591		}
1592
1593		/*
1594		 * Try to write back as many pages as we just scanned.  This
1595		 * tends to cause slow streaming writers to write data to the
1596		 * disk smoothly, at the dirtying rate, which is nice.   But
1597		 * that's undesirable in laptop mode, where we *want* lumpy
1598		 * writeout.  So in laptop mode, write out the whole world.
1599		 */
1600		if (total_scanned > sc->swap_cluster_max +
1601					sc->swap_cluster_max / 2) {
1602			wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1603			sc->may_writepage = 1;
1604		}
1605
1606		/* Take a nap, wait for some writeback to complete */
1607		if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1608			congestion_wait(WRITE, HZ/10);
1609	}
1610	/* top priority shrink_zones still had more to do? don't OOM, then */
1611	if (!sc->all_unreclaimable && scan_global_lru(sc))
1612		ret = nr_reclaimed;
1613out:
1614	/*
1615	 * Now that we've scanned all the zones at this priority level, note
1616	 * that level within the zone so that the next thread which performs
1617	 * scanning of this zone will immediately start out at this priority
1618	 * level.  This affects only the decision whether or not to bring
1619	 * mapped pages onto the inactive list.
1620	 */
1621	if (priority < 0)
1622		priority = 0;
1623
1624	if (scan_global_lru(sc)) {
1625		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1626
1627			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1628				continue;
1629
1630			zone->prev_priority = priority;
1631		}
1632	} else
1633		mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1634
1635	delayacct_freepages_end();
1636
1637	return ret;
1638}
1639
1640unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1641								gfp_t gfp_mask)
1642{
1643	struct scan_control sc = {
1644		.gfp_mask = gfp_mask,
1645		.may_writepage = !laptop_mode,
1646		.swap_cluster_max = SWAP_CLUSTER_MAX,
1647		.may_swap = 1,
1648		.swappiness = vm_swappiness,
1649		.order = order,
1650		.mem_cgroup = NULL,
1651		.isolate_pages = isolate_pages_global,
1652	};
1653
1654	return do_try_to_free_pages(zonelist, &sc);
1655}
1656
1657#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1658
1659unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1660						gfp_t gfp_mask)
1661{
1662	struct scan_control sc = {
1663		.may_writepage = !laptop_mode,
1664		.may_swap = 1,
1665		.swap_cluster_max = SWAP_CLUSTER_MAX,
1666		.swappiness = vm_swappiness,
1667		.order = 0,
1668		.mem_cgroup = mem_cont,
1669		.isolate_pages = mem_cgroup_isolate_pages,
1670	};
1671	struct zonelist *zonelist;
1672
1673	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1674			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1675	zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1676	return do_try_to_free_pages(zonelist, &sc);
1677}
1678#endif
1679
1680/*
1681 * For kswapd, balance_pgdat() will work across all this node's zones until
1682 * they are all at pages_high.
1683 *
1684 * Returns the number of pages which were actually freed.
1685 *
1686 * There is special handling here for zones which are full of pinned pages.
1687 * This can happen if the pages are all mlocked, or if they are all used by
1688 * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1689 * What we do is to detect the case where all pages in the zone have been
1690 * scanned twice and there has been zero successful reclaim.  Mark the zone as
1691 * dead and from now on, only perform a short scan.  Basically we're polling
1692 * the zone for when the problem goes away.
1693 *
1694 * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1695 * zones which have free_pages > pages_high, but once a zone is found to have
1696 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1697 * of the number of free pages in the lower zones.  This interoperates with
1698 * the page allocator fallback scheme to ensure that aging of pages is balanced
1699 * across the zones.
1700 */
1701static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1702{
1703	int all_zones_ok;
1704	int priority;
1705	int i;
1706	unsigned long total_scanned;
1707	unsigned long nr_reclaimed;
1708	struct reclaim_state *reclaim_state = current->reclaim_state;
1709	struct scan_control sc = {
1710		.gfp_mask = GFP_KERNEL,
1711		.may_swap = 1,
1712		.swap_cluster_max = SWAP_CLUSTER_MAX,
1713		.swappiness = vm_swappiness,
1714		.order = order,
1715		.mem_cgroup = NULL,
1716		.isolate_pages = isolate_pages_global,
1717	};
1718	/*
1719	 * temp_priority is used to remember the scanning priority at which
1720	 * this zone was successfully refilled to free_pages == pages_high.
1721	 */
1722	int temp_priority[MAX_NR_ZONES];
1723
1724loop_again:
1725	total_scanned = 0;
1726	nr_reclaimed = 0;
1727	sc.may_writepage = !laptop_mode;
1728	count_vm_event(PAGEOUTRUN);
1729
1730	for (i = 0; i < pgdat->nr_zones; i++)
1731		temp_priority[i] = DEF_PRIORITY;
1732
1733	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1734		int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
1735		unsigned long lru_pages = 0;
1736
1737		/* The swap token gets in the way of swapout... */
1738		if (!priority)
1739			disable_swap_token();
1740
1741		all_zones_ok = 1;
1742
1743		/*
1744		 * Scan in the highmem->dma direction for the highest
1745		 * zone which needs scanning
1746		 */
1747		for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1748			struct zone *zone = pgdat->node_zones + i;
1749
1750			if (!populated_zone(zone))
1751				continue;
1752
1753			if (zone_is_all_unreclaimable(zone) &&
1754			    priority != DEF_PRIORITY)
1755				continue;
1756
1757			/*
1758			 * Do some background aging of the anon list, to give
1759			 * pages a chance to be referenced before reclaiming.
1760			 */
1761			if (inactive_anon_is_low(zone))
1762				shrink_active_list(SWAP_CLUSTER_MAX, zone,
1763							&sc, priority, 0);
1764
1765			if (!zone_watermark_ok(zone, order, zone->pages_high,
1766					       0, 0)) {
1767				end_zone = i;
1768				break;
1769			}
1770		}
1771		if (i < 0)
1772			goto out;
1773
1774		for (i = 0; i <= end_zone; i++) {
1775			struct zone *zone = pgdat->node_zones + i;
1776
1777			lru_pages += zone_lru_pages(zone);
1778		}
1779
1780		/*
1781		 * Now scan the zone in the dma->highmem direction, stopping
1782		 * at the last zone which needs scanning.
1783		 *
1784		 * We do this because the page allocator works in the opposite
1785		 * direction.  This prevents the page allocator from allocating
1786		 * pages behind kswapd's direction of progress, which would
1787		 * cause too much scanning of the lower zones.
1788		 */
1789		for (i = 0; i <= end_zone; i++) {
1790			struct zone *zone = pgdat->node_zones + i;
1791			int nr_slab;
1792
1793			if (!populated_zone(zone))
1794				continue;
1795
1796			if (zone_is_all_unreclaimable(zone) &&
1797					priority != DEF_PRIORITY)
1798				continue;
1799
1800			if (!zone_watermark_ok(zone, order, zone->pages_high,
1801					       end_zone, 0))
1802				all_zones_ok = 0;
1803			temp_priority[i] = priority;
1804			sc.nr_scanned = 0;
1805			note_zone_scanning_priority(zone, priority);
1806			/*
1807			 * We put equal pressure on every zone, unless one
1808			 * zone has way too many pages free already.
1809			 */
1810			if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
1811						end_zone, 0))
1812				nr_reclaimed += shrink_zone(priority, zone, &sc);
1813			reclaim_state->reclaimed_slab = 0;
1814			nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1815						lru_pages);
1816			nr_reclaimed += reclaim_state->reclaimed_slab;
1817			total_scanned += sc.nr_scanned;
1818			if (zone_is_all_unreclaimable(zone))
1819				continue;
1820			if (nr_slab == 0 && zone->pages_scanned >=
1821						(zone_lru_pages(zone) * 6))
1822					zone_set_flag(zone,
1823						      ZONE_ALL_UNRECLAIMABLE);
1824			/*
1825			 * If we've done a decent amount of scanning and
1826			 * the reclaim ratio is low, start doing writepage
1827			 * even in laptop mode
1828			 */
1829			if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1830			    total_scanned > nr_reclaimed + nr_reclaimed / 2)
1831				sc.may_writepage = 1;
1832		}
1833		if (all_zones_ok)
1834			break;		/* kswapd: all done */
1835		/*
1836		 * OK, kswapd is getting into trouble.  Take a nap, then take
1837		 * another pass across the zones.
1838		 */
1839		if (total_scanned && priority < DEF_PRIORITY - 2)
1840			congestion_wait(WRITE, HZ/10);
1841
1842		/*
1843		 * We do this so kswapd doesn't build up large priorities for
1844		 * example when it is freeing in parallel with allocators. It
1845		 * matches the direct reclaim path behaviour in terms of impact
1846		 * on zone->*_priority.
1847		 */
1848		if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1849			break;
1850	}
1851out:
1852	/*
1853	 * Note within each zone the priority level at which this zone was
1854	 * brought into a happy state.  So that the next thread which scans this
1855	 * zone will start out at that priority level.
1856	 */
1857	for (i = 0; i < pgdat->nr_zones; i++) {
1858		struct zone *zone = pgdat->node_zones + i;
1859
1860		zone->prev_priority = temp_priority[i];
1861	}
1862	if (!all_zones_ok) {
1863		cond_resched();
1864
1865		try_to_freeze();
1866
1867		goto loop_again;
1868	}
1869
1870	return nr_reclaimed;
1871}
1872
1873/*
1874 * The background pageout daemon, started as a kernel thread
1875 * from the init process.
1876 *
1877 * This basically trickles out pages so that we have _some_
1878 * free memory available even if there is no other activity
1879 * that frees anything up. This is needed for things like routing
1880 * etc, where we otherwise might have all activity going on in
1881 * asynchronous contexts that cannot page things out.
1882 *
1883 * If there are applications that are active memory-allocators
1884 * (most normal use), this basically shouldn't matter.
1885 */
1886static int kswapd(void *p)
1887{
1888	unsigned long order;
1889	pg_data_t *pgdat = (pg_data_t*)p;
1890	struct task_struct *tsk = current;
1891	DEFINE_WAIT(wait);
1892	struct reclaim_state reclaim_state = {
1893		.reclaimed_slab = 0,
1894	};
1895	node_to_cpumask_ptr(cpumask, pgdat->node_id);
1896
1897	if (!cpumask_empty(cpumask))
1898		set_cpus_allowed_ptr(tsk, cpumask);
1899	current->reclaim_state = &reclaim_state;
1900
1901	/*
1902	 * Tell the memory management that we're a "memory allocator",
1903	 * and that if we need more memory we should get access to it
1904	 * regardless (see "__alloc_pages()"). "kswapd" should
1905	 * never get caught in the normal page freeing logic.
1906	 *
1907	 * (Kswapd normally doesn't need memory anyway, but sometimes
1908	 * you need a small amount of memory in order to be able to
1909	 * page out something else, and this flag essentially protects
1910	 * us from recursively trying to free more memory as we're
1911	 * trying to free the first piece of memory in the first place).
1912	 */
1913	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1914	set_freezable();
1915
1916	order = 0;
1917	for ( ; ; ) {
1918		unsigned long new_order;
1919
1920		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1921		new_order = pgdat->kswapd_max_order;
1922		pgdat->kswapd_max_order = 0;
1923		if (order < new_order) {
1924			/*
1925			 * Don't sleep if someone wants a larger 'order'
1926			 * allocation
1927			 */
1928			order = new_order;
1929		} else {
1930			if (!freezing(current))
1931				schedule();
1932
1933			order = pgdat->kswapd_max_order;
1934		}
1935		finish_wait(&pgdat->kswapd_wait, &wait);
1936
1937		if (!try_to_freeze()) {
1938			/* We can speed up thawing tasks if we don't call
1939			 * balance_pgdat after returning from the refrigerator
1940			 */
1941			balance_pgdat(pgdat, order);
1942		}
1943	}
1944	return 0;
1945}
1946
1947/*
1948 * A zone is low on free memory, so wake its kswapd task to service it.
1949 */
1950void wakeup_kswapd(struct zone *zone, int order)
1951{
1952	pg_data_t *pgdat;
1953
1954	if (!populated_zone(zone))
1955		return;
1956
1957	pgdat = zone->zone_pgdat;
1958	if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1959		return;
1960	if (pgdat->kswapd_max_order < order)
1961		pgdat->kswapd_max_order = order;
1962	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1963		return;
1964	if (!waitqueue_active(&pgdat->kswapd_wait))
1965		return;
1966	wake_up_interruptible(&pgdat->kswapd_wait);
1967}
1968
1969unsigned long global_lru_pages(void)
1970{
1971	return global_page_state(NR_ACTIVE_ANON)
1972		+ global_page_state(NR_ACTIVE_FILE)
1973		+ global_page_state(NR_INACTIVE_ANON)
1974		+ global_page_state(NR_INACTIVE_FILE);
1975}
1976
1977#ifdef CONFIG_PM
1978/*
1979 * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
1980 * from LRU lists system-wide, for given pass and priority, and returns the
1981 * number of reclaimed pages
1982 *
1983 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1984 */
1985static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1986				      int pass, struct scan_control *sc)
1987{
1988	struct zone *zone;
1989	unsigned long nr_to_scan, ret = 0;
1990	enum lru_list l;
1991
1992	for_each_zone(zone) {
1993
1994		if (!populated_zone(zone))
1995			continue;
1996
1997		if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
1998			continue;
1999
2000		for_each_evictable_lru(l) {
2001			/* For pass = 0, we don't shrink the active list */
2002			if (pass == 0 &&
2003				(l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
2004				continue;
2005
2006			zone->lru[l].nr_scan +=
2007				(zone_page_state(zone, NR_LRU_BASE + l)
2008								>> prio) + 1;
2009			if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
2010				zone->lru[l].nr_scan = 0;
2011				nr_to_scan = min(nr_pages,
2012					zone_page_state(zone,
2013							NR_LRU_BASE + l));
2014				ret += shrink_list(l, nr_to_scan, zone,
2015								sc, prio);
2016				if (ret >= nr_pages)
2017					return ret;
2018			}
2019		}
2020	}
2021
2022	return ret;
2023}
2024
2025/*
2026 * Try to free `nr_pages' of memory, system-wide, and return the number of
2027 * freed pages.
2028 *
2029 * Rather than trying to age LRUs the aim is to preserve the overall
2030 * LRU order by reclaiming preferentially
2031 * inactive > active > active referenced > active mapped
2032 */
2033unsigned long shrink_all_memory(unsigned long nr_pages)
2034{
2035	unsigned long lru_pages, nr_slab;
2036	unsigned long ret = 0;
2037	int pass;
2038	struct reclaim_state reclaim_state;
2039	struct scan_control sc = {
2040		.gfp_mask = GFP_KERNEL,
2041		.may_swap = 0,
2042		.swap_cluster_max = nr_pages,
2043		.may_writepage = 1,
2044		.swappiness = vm_swappiness,
2045		.isolate_pages = isolate_pages_global,
2046	};
2047
2048	current->reclaim_state = &reclaim_state;
2049
2050	lru_pages = global_lru_pages();
2051	nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
2052	/* If slab caches are huge, it's better to hit them first */
2053	while (nr_slab >= lru_pages) {
2054		reclaim_state.reclaimed_slab = 0;
2055		shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
2056		if (!reclaim_state.reclaimed_slab)
2057			break;
2058
2059		ret += reclaim_state.reclaimed_slab;
2060		if (ret >= nr_pages)
2061			goto out;
2062
2063		nr_slab -= reclaim_state.reclaimed_slab;
2064	}
2065
2066	/*
2067	 * We try to shrink LRUs in 5 passes:
2068	 * 0 = Reclaim from inactive_list only
2069	 * 1 = Reclaim from active list but don't reclaim mapped
2070	 * 2 = 2nd pass of type 1
2071	 * 3 = Reclaim mapped (normal reclaim)
2072	 * 4 = 2nd pass of type 3
2073	 */
2074	for (pass = 0; pass < 5; pass++) {
2075		int prio;
2076
2077		/* Force reclaiming mapped pages in the passes #3 and #4 */
2078		if (pass > 2) {
2079			sc.may_swap = 1;
2080			sc.swappiness = 100;
2081		}
2082
2083		for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2084			unsigned long nr_to_scan = nr_pages - ret;
2085
2086			sc.nr_scanned = 0;
2087			ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
2088			if (ret >= nr_pages)
2089				goto out;
2090
2091			reclaim_state.reclaimed_slab = 0;
2092			shrink_slab(sc.nr_scanned, sc.gfp_mask,
2093					global_lru_pages());
2094			ret += reclaim_state.reclaimed_slab;
2095			if (ret >= nr_pages)
2096				goto out;
2097
2098			if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
2099				congestion_wait(WRITE, HZ / 10);
2100		}
2101	}
2102
2103	/*
2104	 * If ret = 0, we could not shrink LRUs, but there may be something
2105	 * in slab caches
2106	 */
2107	if (!ret) {
2108		do {
2109			reclaim_state.reclaimed_slab = 0;
2110			shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
2111			ret += reclaim_state.reclaimed_slab;
2112		} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
2113	}
2114
2115out:
2116	current->reclaim_state = NULL;
2117
2118	return ret;
2119}
2120#endif
2121
2122/* It's optimal to keep kswapds on the same CPUs as their memory, but
2123   not required for correctness.  So if the last cpu in a node goes
2124   away, we get changed to run anywhere: as the first one comes back,
2125   restore their cpu bindings. */
2126static int __devinit cpu_callback(struct notifier_block *nfb,
2127				  unsigned long action, void *hcpu)
2128{
2129	int nid;
2130
2131	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2132		for_each_node_state(nid, N_HIGH_MEMORY) {
2133			pg_data_t *pgdat = NODE_DATA(nid);
2134			node_to_cpumask_ptr(mask, pgdat->node_id);
2135
2136			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2137				/* One of our CPUs online: restore mask */
2138				set_cpus_allowed_ptr(pgdat->kswapd, mask);
2139		}
2140	}
2141	return NOTIFY_OK;
2142}
2143
2144/*
2145 * This kswapd start function will be called by init and node-hot-add.
2146 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2147 */
2148int kswapd_run(int nid)
2149{
2150	pg_data_t *pgdat = NODE_DATA(nid);
2151	int ret = 0;
2152
2153	if (pgdat->kswapd)
2154		return 0;
2155
2156	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2157	if (IS_ERR(pgdat->kswapd)) {
2158		/* failure at boot is fatal */
2159		BUG_ON(system_state == SYSTEM_BOOTING);
2160		printk("Failed to start kswapd on node %d\n",nid);
2161		ret = -1;
2162	}
2163	return ret;
2164}
2165
2166static int __init kswapd_init(void)
2167{
2168	int nid;
2169
2170	swap_setup();
2171	for_each_node_state(nid, N_HIGH_MEMORY)
2172 		kswapd_run(nid);
2173	hotcpu_notifier(cpu_callback, 0);
2174	return 0;
2175}
2176
2177module_init(kswapd_init)
2178
2179#ifdef CONFIG_NUMA
2180/*
2181 * Zone reclaim mode
2182 *
2183 * If non-zero call zone_reclaim when the number of free pages falls below
2184 * the watermarks.
2185 */
2186int zone_reclaim_mode __read_mostly;
2187
2188#define RECLAIM_OFF 0
2189#define RECLAIM_ZONE (1<<0)	/* Run shrink_inactive_list on the zone */
2190#define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */
2191#define RECLAIM_SWAP (1<<2)	/* Swap pages out during reclaim */
2192
2193/*
2194 * Priority for ZONE_RECLAIM. This determines the fraction of pages
2195 * of a node considered for each zone_reclaim. 4 scans 1/16th of
2196 * a zone.
2197 */
2198#define ZONE_RECLAIM_PRIORITY 4
2199
2200/*
2201 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2202 * occur.
2203 */
2204int sysctl_min_unmapped_ratio = 1;
2205
2206/*
2207 * If the number of slab pages in a zone grows beyond this percentage then
2208 * slab reclaim needs to occur.
2209 */
2210int sysctl_min_slab_ratio = 5;
2211
2212/*
2213 * Try to free up some pages from this zone through reclaim.
2214 */
2215static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2216{
2217	/* Minimum pages needed in order to stay on node */
2218	const unsigned long nr_pages = 1 << order;
2219	struct task_struct *p = current;
2220	struct reclaim_state reclaim_state;
2221	int priority;
2222	unsigned long nr_reclaimed = 0;
2223	struct scan_control sc = {
2224		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2225		.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2226		.swap_cluster_max = max_t(unsigned long, nr_pages,
2227					SWAP_CLUSTER_MAX),
2228		.gfp_mask = gfp_mask,
2229		.swappiness = vm_swappiness,
2230		.isolate_pages = isolate_pages_global,
2231	};
2232	unsigned long slab_reclaimable;
2233
2234	disable_swap_token();
2235	cond_resched();
2236	/*
2237	 * We need to be able to allocate from the reserves for RECLAIM_SWAP
2238	 * and we also need to be able to write out pages for RECLAIM_WRITE
2239	 * and RECLAIM_SWAP.
2240	 */
2241	p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2242	reclaim_state.reclaimed_slab = 0;
2243	p->reclaim_state = &reclaim_state;
2244
2245	if (zone_page_state(zone, NR_FILE_PAGES) -
2246		zone_page_state(zone, NR_FILE_MAPPED) >
2247		zone->min_unmapped_pages) {
2248		/*
2249		 * Free memory by calling shrink zone with increasing
2250		 * priorities until we have enough memory freed.
2251		 */
2252		priority = ZONE_RECLAIM_PRIORITY;
2253		do {
2254			note_zone_scanning_priority(zone, priority);
2255			nr_reclaimed += shrink_zone(priority, zone, &sc);
2256			priority--;
2257		} while (priority >= 0 && nr_reclaimed < nr_pages);
2258	}
2259
2260	slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2261	if (slab_reclaimable > zone->min_slab_pages) {
2262		/*
2263		 * shrink_slab() does not currently allow us to determine how
2264		 * many pages were freed in this zone. So we take the current
2265		 * number of slab pages and shake the slab until it is reduced
2266		 * by the same nr_pages that we used for reclaiming unmapped
2267		 * pages.
2268		 *
2269		 * Note that shrink_slab will free memory on all zones and may
2270		 * take a long time.
2271		 */
2272		while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2273			zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2274				slab_reclaimable - nr_pages)
2275			;
2276
2277		/*
2278		 * Update nr_reclaimed by the number of slab pages we
2279		 * reclaimed from this zone.
2280		 */
2281		nr_reclaimed += slab_reclaimable -
2282			zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2283	}
2284
2285	p->reclaim_state = NULL;
2286	current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2287	return nr_reclaimed >= nr_pages;
2288}
2289
2290int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2291{
2292	int node_id;
2293	int ret;
2294
2295	/*
2296	 * Zone reclaim reclaims unmapped file backed pages and
2297	 * slab pages if we are over the defined limits.
2298	 *
2299	 * A small portion of unmapped file backed pages is needed for
2300	 * file I/O otherwise pages read by file I/O will be immediately
2301	 * thrown out if the zone is overallocated. So we do not reclaim
2302	 * if less than a specified percentage of the zone is used by
2303	 * unmapped file backed pages.
2304	 */
2305	if (zone_page_state(zone, NR_FILE_PAGES) -
2306	    zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
2307	    && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
2308			<= zone->min_slab_pages)
2309		return 0;
2310
2311	if (zone_is_all_unreclaimable(zone))
2312		return 0;
2313
2314	/*
2315	 * Do not scan if the allocation should not be delayed.
2316	 */
2317	if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2318			return 0;
2319
2320	/*
2321	 * Only run zone reclaim on the local zone or on zones that do not
2322	 * have associated processors. This will favor the local processor
2323	 * over remote processors and spread off node memory allocations
2324	 * as wide as possible.
2325	 */
2326	node_id = zone_to_nid(zone);
2327	if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2328		return 0;
2329
2330	if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2331		return 0;
2332	ret = __zone_reclaim(zone, gfp_mask, order);
2333	zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2334
2335	return ret;
2336}
2337#endif
2338
2339#ifdef CONFIG_UNEVICTABLE_LRU
2340/*
2341 * page_evictable - test whether a page is evictable
2342 * @page: the page to test
2343 * @vma: the VMA in which the page is or will be mapped, may be NULL
2344 *
2345 * Test whether page is evictable--i.e., should be placed on active/inactive
2346 * lists vs unevictable list.  The vma argument is !NULL when called from the
2347 * fault path to determine how to instantate a new page.
2348 *
2349 * Reasons page might not be evictable:
2350 * (1) page's mapping marked unevictable
2351 * (2) page is part of an mlocked VMA
2352 *
2353 */
2354int page_evictable(struct page *page, struct vm_area_struct *vma)
2355{
2356
2357	if (mapping_unevictable(page_mapping(page)))
2358		return 0;
2359
2360	if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2361		return 0;
2362
2363	return 1;
2364}
2365
2366/**
2367 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2368 * @page: page to check evictability and move to appropriate lru list
2369 * @zone: zone page is in
2370 *
2371 * Checks a page for evictability and moves the page to the appropriate
2372 * zone lru list.
2373 *
2374 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2375 * have PageUnevictable set.
2376 */
2377static void check_move_unevictable_page(struct page *page, struct zone *zone)
2378{
2379	VM_BUG_ON(PageActive(page));
2380
2381retry:
2382	ClearPageUnevictable(page);
2383	if (page_evictable(page, NULL)) {
2384		enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
2385
2386		__dec_zone_state(zone, NR_UNEVICTABLE);
2387		list_move(&page->lru, &zone->lru[l].list);
2388		__inc_zone_state(zone, NR_INACTIVE_ANON + l);
2389		__count_vm_event(UNEVICTABLE_PGRESCUED);
2390	} else {
2391		/*
2392		 * rotate unevictable list
2393		 */
2394		SetPageUnevictable(page);
2395		list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2396		if (page_evictable(page, NULL))
2397			goto retry;
2398	}
2399}
2400
2401/**
2402 * scan_mapping_unevictable_pages - scan an address space for evictable pages
2403 * @mapping: struct address_space to scan for evictable pages
2404 *
2405 * Scan all pages in mapping.  Check unevictable pages for
2406 * evictability and move them to the appropriate zone lru list.
2407 */
2408void scan_mapping_unevictable_pages(struct address_space *mapping)
2409{
2410	pgoff_t next = 0;
2411	pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2412			 PAGE_CACHE_SHIFT;
2413	struct zone *zone;
2414	struct pagevec pvec;
2415
2416	if (mapping->nrpages == 0)
2417		return;
2418
2419	pagevec_init(&pvec, 0);
2420	while (next < end &&
2421		pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2422		int i;
2423		int pg_scanned = 0;
2424
2425		zone = NULL;
2426
2427		for (i = 0; i < pagevec_count(&pvec); i++) {
2428			struct page *page = pvec.pages[i];
2429			pgoff_t page_index = page->index;
2430			struct zone *pagezone = page_zone(page);
2431
2432			pg_scanned++;
2433			if (page_index > next)
2434				next = page_index;
2435			next++;
2436
2437			if (pagezone != zone) {
2438				if (zone)
2439					spin_unlock_irq(&zone->lru_lock);
2440				zone = pagezone;
2441				spin_lock_irq(&zone->lru_lock);
2442			}
2443
2444			if (PageLRU(page) && PageUnevictable(page))
2445				check_move_unevictable_page(page, zone);
2446		}
2447		if (zone)
2448			spin_unlock_irq(&zone->lru_lock);
2449		pagevec_release(&pvec);
2450
2451		count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2452	}
2453
2454}
2455
2456/**
2457 * scan_zone_unevictable_pages - check unevictable list for evictable pages
2458 * @zone - zone of which to scan the unevictable list
2459 *
2460 * Scan @zone's unevictable LRU lists to check for pages that have become
2461 * evictable.  Move those that have to @zone's inactive list where they
2462 * become candidates for reclaim, unless shrink_inactive_zone() decides
2463 * to reactivate them.  Pages that are still unevictable are rotated
2464 * back onto @zone's unevictable list.
2465 */
2466#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
2467void scan_zone_unevictable_pages(struct zone *zone)
2468{
2469	struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2470	unsigned long scan;
2471	unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2472
2473	while (nr_to_scan > 0) {
2474		unsigned long batch_size = min(nr_to_scan,
2475						SCAN_UNEVICTABLE_BATCH_SIZE);
2476
2477		spin_lock_irq(&zone->lru_lock);
2478		for (scan = 0;  scan < batch_size; scan++) {
2479			struct page *page = lru_to_page(l_unevictable);
2480
2481			if (!trylock_page(page))
2482				continue;
2483
2484			prefetchw_prev_lru_page(page, l_unevictable, flags);
2485
2486			if (likely(PageLRU(page) && PageUnevictable(page)))
2487				check_move_unevictable_page(page, zone);
2488
2489			unlock_page(page);
2490		}
2491		spin_unlock_irq(&zone->lru_lock);
2492
2493		nr_to_scan -= batch_size;
2494	}
2495}
2496
2497
2498/**
2499 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
2500 *
2501 * A really big hammer:  scan all zones' unevictable LRU lists to check for
2502 * pages that have become evictable.  Move those back to the zones'
2503 * inactive list where they become candidates for reclaim.
2504 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
2505 * and we add swap to the system.  As such, it runs in the context of a task
2506 * that has possibly/probably made some previously unevictable pages
2507 * evictable.
2508 */
2509static void scan_all_zones_unevictable_pages(void)
2510{
2511	struct zone *zone;
2512
2513	for_each_zone(zone) {
2514		scan_zone_unevictable_pages(zone);
2515	}
2516}
2517
2518/*
2519 * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
2520 * all nodes' unevictable lists for evictable pages
2521 */
2522unsigned long scan_unevictable_pages;
2523
2524int scan_unevictable_handler(struct ctl_table *table, int write,
2525			   struct file *file, void __user *buffer,
2526			   size_t *length, loff_t *ppos)
2527{
2528	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
2529
2530	if (write && *(unsigned long *)table->data)
2531		scan_all_zones_unevictable_pages();
2532
2533	scan_unevictable_pages = 0;
2534	return 0;
2535}
2536
2537/*
2538 * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
2539 * a specified node's per zone unevictable lists for evictable pages.
2540 */
2541
2542static ssize_t read_scan_unevictable_node(struct sys_device *dev,
2543					  struct sysdev_attribute *attr,
2544					  char *buf)
2545{
2546	return sprintf(buf, "0\n");	/* always zero; should fit... */
2547}
2548
2549static ssize_t write_scan_unevictable_node(struct sys_device *dev,
2550					   struct sysdev_attribute *attr,
2551					const char *buf, size_t count)
2552{
2553	struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
2554	struct zone *zone;
2555	unsigned long res;
2556	unsigned long req = strict_strtoul(buf, 10, &res);
2557
2558	if (!req)
2559		return 1;	/* zero is no-op */
2560
2561	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2562		if (!populated_zone(zone))
2563			continue;
2564		scan_zone_unevictable_pages(zone);
2565	}
2566	return 1;
2567}
2568
2569
2570static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
2571			read_scan_unevictable_node,
2572			write_scan_unevictable_node);
2573
2574int scan_unevictable_register_node(struct node *node)
2575{
2576	return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
2577}
2578
2579void scan_unevictable_unregister_node(struct node *node)
2580{
2581	sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
2582}
2583
2584#endif
2585