vmscan.c revision 45973d74fd3b1e3e16c025b688a725c7653b1443
1/*
2 *  linux/mm/vmscan.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 *
6 *  Swap reorganised 29.12.95, Stephen Tweedie.
7 *  kswapd added: 7.1.96  sct
8 *  Removed kswapd_ctl limits, and swap out as many pages as needed
9 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 *  Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/vmstat.h>
23#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h>	/* for try_to_release_page(),
27					buffer_heads_over_limit */
28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h>
31#include <linux/rmap.h>
32#include <linux/topology.h>
33#include <linux/cpu.h>
34#include <linux/cpuset.h>
35#include <linux/notifier.h>
36#include <linux/rwsem.h>
37#include <linux/delay.h>
38#include <linux/kthread.h>
39#include <linux/freezer.h>
40#include <linux/memcontrol.h>
41#include <linux/delayacct.h>
42#include <linux/sysctl.h>
43
44#include <asm/tlbflush.h>
45#include <asm/div64.h>
46
47#include <linux/swapops.h>
48
49#include "internal.h"
50
51struct scan_control {
52	/* Incremented by the number of inactive pages that were scanned */
53	unsigned long nr_scanned;
54
55	/* Number of pages freed so far during a call to shrink_zones() */
56	unsigned long nr_reclaimed;
57
58	/* How many pages shrink_list() should reclaim */
59	unsigned long nr_to_reclaim;
60
61	unsigned long hibernation_mode;
62
63	/* This context's GFP mask */
64	gfp_t gfp_mask;
65
66	int may_writepage;
67
68	/* Can mapped pages be reclaimed? */
69	int may_unmap;
70
71	/* Can pages be swapped as part of reclaim? */
72	int may_swap;
73
74	int swappiness;
75
76	int all_unreclaimable;
77
78	int order;
79
80	/* Which cgroup do we reclaim from */
81	struct mem_cgroup *mem_cgroup;
82
83	/*
84	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
85	 * are scanned.
86	 */
87	nodemask_t	*nodemask;
88
89	/* Pluggable isolate pages callback */
90	unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
91			unsigned long *scanned, int order, int mode,
92			struct zone *z, struct mem_cgroup *mem_cont,
93			int active, int file);
94};
95
96#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
97
98#ifdef ARCH_HAS_PREFETCH
99#define prefetch_prev_lru_page(_page, _base, _field)			\
100	do {								\
101		if ((_page)->lru.prev != _base) {			\
102			struct page *prev;				\
103									\
104			prev = lru_to_page(&(_page->lru));		\
105			prefetch(&prev->_field);			\
106		}							\
107	} while (0)
108#else
109#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
110#endif
111
112#ifdef ARCH_HAS_PREFETCHW
113#define prefetchw_prev_lru_page(_page, _base, _field)			\
114	do {								\
115		if ((_page)->lru.prev != _base) {			\
116			struct page *prev;				\
117									\
118			prev = lru_to_page(&(_page->lru));		\
119			prefetchw(&prev->_field);			\
120		}							\
121	} while (0)
122#else
123#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
124#endif
125
126/*
127 * From 0 .. 100.  Higher means more swappy.
128 */
129int vm_swappiness = 60;
130long vm_total_pages;	/* The total number of pages which the VM controls */
131
132static LIST_HEAD(shrinker_list);
133static DECLARE_RWSEM(shrinker_rwsem);
134
135#ifdef CONFIG_CGROUP_MEM_RES_CTLR
136#define scanning_global_lru(sc)	(!(sc)->mem_cgroup)
137#else
138#define scanning_global_lru(sc)	(1)
139#endif
140
141static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
142						  struct scan_control *sc)
143{
144	if (!scanning_global_lru(sc))
145		return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
146
147	return &zone->reclaim_stat;
148}
149
150static unsigned long zone_nr_lru_pages(struct zone *zone,
151				struct scan_control *sc, enum lru_list lru)
152{
153	if (!scanning_global_lru(sc))
154		return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
155
156	return zone_page_state(zone, NR_LRU_BASE + lru);
157}
158
159
160/*
161 * Add a shrinker callback to be called from the vm
162 */
163void register_shrinker(struct shrinker *shrinker)
164{
165	shrinker->nr = 0;
166	down_write(&shrinker_rwsem);
167	list_add_tail(&shrinker->list, &shrinker_list);
168	up_write(&shrinker_rwsem);
169}
170EXPORT_SYMBOL(register_shrinker);
171
172/*
173 * Remove one
174 */
175void unregister_shrinker(struct shrinker *shrinker)
176{
177	down_write(&shrinker_rwsem);
178	list_del(&shrinker->list);
179	up_write(&shrinker_rwsem);
180}
181EXPORT_SYMBOL(unregister_shrinker);
182
183#define SHRINK_BATCH 128
184/*
185 * Call the shrink functions to age shrinkable caches
186 *
187 * Here we assume it costs one seek to replace a lru page and that it also
188 * takes a seek to recreate a cache object.  With this in mind we age equal
189 * percentages of the lru and ageable caches.  This should balance the seeks
190 * generated by these structures.
191 *
192 * If the vm encountered mapped pages on the LRU it increase the pressure on
193 * slab to avoid swapping.
194 *
195 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
196 *
197 * `lru_pages' represents the number of on-LRU pages in all the zones which
198 * are eligible for the caller's allocation attempt.  It is used for balancing
199 * slab reclaim versus page reclaim.
200 *
201 * Returns the number of slab objects which we shrunk.
202 */
203unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
204			unsigned long lru_pages)
205{
206	struct shrinker *shrinker;
207	unsigned long ret = 0;
208
209	if (scanned == 0)
210		scanned = SWAP_CLUSTER_MAX;
211
212	if (!down_read_trylock(&shrinker_rwsem))
213		return 1;	/* Assume we'll be able to shrink next time */
214
215	list_for_each_entry(shrinker, &shrinker_list, list) {
216		unsigned long long delta;
217		unsigned long total_scan;
218		unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
219
220		delta = (4 * scanned) / shrinker->seeks;
221		delta *= max_pass;
222		do_div(delta, lru_pages + 1);
223		shrinker->nr += delta;
224		if (shrinker->nr < 0) {
225			printk(KERN_ERR "shrink_slab: %pF negative objects to "
226			       "delete nr=%ld\n",
227			       shrinker->shrink, shrinker->nr);
228			shrinker->nr = max_pass;
229		}
230
231		/*
232		 * Avoid risking looping forever due to too large nr value:
233		 * never try to free more than twice the estimate number of
234		 * freeable entries.
235		 */
236		if (shrinker->nr > max_pass * 2)
237			shrinker->nr = max_pass * 2;
238
239		total_scan = shrinker->nr;
240		shrinker->nr = 0;
241
242		while (total_scan >= SHRINK_BATCH) {
243			long this_scan = SHRINK_BATCH;
244			int shrink_ret;
245			int nr_before;
246
247			nr_before = (*shrinker->shrink)(0, gfp_mask);
248			shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
249			if (shrink_ret == -1)
250				break;
251			if (shrink_ret < nr_before)
252				ret += nr_before - shrink_ret;
253			count_vm_events(SLABS_SCANNED, this_scan);
254			total_scan -= this_scan;
255
256			cond_resched();
257		}
258
259		shrinker->nr += total_scan;
260	}
261	up_read(&shrinker_rwsem);
262	return ret;
263}
264
265/* Called without lock on whether page is mapped, so answer is unstable */
266static inline int page_mapping_inuse(struct page *page)
267{
268	struct address_space *mapping;
269
270	/* Page is in somebody's page tables. */
271	if (page_mapped(page))
272		return 1;
273
274	/* Be more reluctant to reclaim swapcache than pagecache */
275	if (PageSwapCache(page))
276		return 1;
277
278	mapping = page_mapping(page);
279	if (!mapping)
280		return 0;
281
282	/* File is mmap'd by somebody? */
283	return mapping_mapped(mapping);
284}
285
286static inline int is_page_cache_freeable(struct page *page)
287{
288	/*
289	 * A freeable page cache page is referenced only by the caller
290	 * that isolated the page, the page cache radix tree and
291	 * optional buffer heads at page->private.
292	 */
293	return page_count(page) - page_has_private(page) == 2;
294}
295
296static int may_write_to_queue(struct backing_dev_info *bdi)
297{
298	if (current->flags & PF_SWAPWRITE)
299		return 1;
300	if (!bdi_write_congested(bdi))
301		return 1;
302	if (bdi == current->backing_dev_info)
303		return 1;
304	return 0;
305}
306
307/*
308 * We detected a synchronous write error writing a page out.  Probably
309 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
310 * fsync(), msync() or close().
311 *
312 * The tricky part is that after writepage we cannot touch the mapping: nothing
313 * prevents it from being freed up.  But we have a ref on the page and once
314 * that page is locked, the mapping is pinned.
315 *
316 * We're allowed to run sleeping lock_page() here because we know the caller has
317 * __GFP_FS.
318 */
319static void handle_write_error(struct address_space *mapping,
320				struct page *page, int error)
321{
322	lock_page(page);
323	if (page_mapping(page) == mapping)
324		mapping_set_error(mapping, error);
325	unlock_page(page);
326}
327
328/* Request for sync pageout. */
329enum pageout_io {
330	PAGEOUT_IO_ASYNC,
331	PAGEOUT_IO_SYNC,
332};
333
334/* possible outcome of pageout() */
335typedef enum {
336	/* failed to write page out, page is locked */
337	PAGE_KEEP,
338	/* move page to the active list, page is locked */
339	PAGE_ACTIVATE,
340	/* page has been sent to the disk successfully, page is unlocked */
341	PAGE_SUCCESS,
342	/* page is clean and locked */
343	PAGE_CLEAN,
344} pageout_t;
345
346/*
347 * pageout is called by shrink_page_list() for each dirty page.
348 * Calls ->writepage().
349 */
350static pageout_t pageout(struct page *page, struct address_space *mapping,
351						enum pageout_io sync_writeback)
352{
353	/*
354	 * If the page is dirty, only perform writeback if that write
355	 * will be non-blocking.  To prevent this allocation from being
356	 * stalled by pagecache activity.  But note that there may be
357	 * stalls if we need to run get_block().  We could test
358	 * PagePrivate for that.
359	 *
360	 * If this process is currently in __generic_file_aio_write() against
361	 * this page's queue, we can perform writeback even if that
362	 * will block.
363	 *
364	 * If the page is swapcache, write it back even if that would
365	 * block, for some throttling. This happens by accident, because
366	 * swap_backing_dev_info is bust: it doesn't reflect the
367	 * congestion state of the swapdevs.  Easy to fix, if needed.
368	 */
369	if (!is_page_cache_freeable(page))
370		return PAGE_KEEP;
371	if (!mapping) {
372		/*
373		 * Some data journaling orphaned pages can have
374		 * page->mapping == NULL while being dirty with clean buffers.
375		 */
376		if (page_has_private(page)) {
377			if (try_to_free_buffers(page)) {
378				ClearPageDirty(page);
379				printk("%s: orphaned page\n", __func__);
380				return PAGE_CLEAN;
381			}
382		}
383		return PAGE_KEEP;
384	}
385	if (mapping->a_ops->writepage == NULL)
386		return PAGE_ACTIVATE;
387	if (!may_write_to_queue(mapping->backing_dev_info))
388		return PAGE_KEEP;
389
390	if (clear_page_dirty_for_io(page)) {
391		int res;
392		struct writeback_control wbc = {
393			.sync_mode = WB_SYNC_NONE,
394			.nr_to_write = SWAP_CLUSTER_MAX,
395			.range_start = 0,
396			.range_end = LLONG_MAX,
397			.nonblocking = 1,
398			.for_reclaim = 1,
399		};
400
401		SetPageReclaim(page);
402		res = mapping->a_ops->writepage(page, &wbc);
403		if (res < 0)
404			handle_write_error(mapping, page, res);
405		if (res == AOP_WRITEPAGE_ACTIVATE) {
406			ClearPageReclaim(page);
407			return PAGE_ACTIVATE;
408		}
409
410		/*
411		 * Wait on writeback if requested to. This happens when
412		 * direct reclaiming a large contiguous area and the
413		 * first attempt to free a range of pages fails.
414		 */
415		if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
416			wait_on_page_writeback(page);
417
418		if (!PageWriteback(page)) {
419			/* synchronous write or broken a_ops? */
420			ClearPageReclaim(page);
421		}
422		inc_zone_page_state(page, NR_VMSCAN_WRITE);
423		return PAGE_SUCCESS;
424	}
425
426	return PAGE_CLEAN;
427}
428
429/*
430 * Same as remove_mapping, but if the page is removed from the mapping, it
431 * gets returned with a refcount of 0.
432 */
433static int __remove_mapping(struct address_space *mapping, struct page *page)
434{
435	BUG_ON(!PageLocked(page));
436	BUG_ON(mapping != page_mapping(page));
437
438	spin_lock_irq(&mapping->tree_lock);
439	/*
440	 * The non racy check for a busy page.
441	 *
442	 * Must be careful with the order of the tests. When someone has
443	 * a ref to the page, it may be possible that they dirty it then
444	 * drop the reference. So if PageDirty is tested before page_count
445	 * here, then the following race may occur:
446	 *
447	 * get_user_pages(&page);
448	 * [user mapping goes away]
449	 * write_to(page);
450	 *				!PageDirty(page)    [good]
451	 * SetPageDirty(page);
452	 * put_page(page);
453	 *				!page_count(page)   [good, discard it]
454	 *
455	 * [oops, our write_to data is lost]
456	 *
457	 * Reversing the order of the tests ensures such a situation cannot
458	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
459	 * load is not satisfied before that of page->_count.
460	 *
461	 * Note that if SetPageDirty is always performed via set_page_dirty,
462	 * and thus under tree_lock, then this ordering is not required.
463	 */
464	if (!page_freeze_refs(page, 2))
465		goto cannot_free;
466	/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
467	if (unlikely(PageDirty(page))) {
468		page_unfreeze_refs(page, 2);
469		goto cannot_free;
470	}
471
472	if (PageSwapCache(page)) {
473		swp_entry_t swap = { .val = page_private(page) };
474		__delete_from_swap_cache(page);
475		spin_unlock_irq(&mapping->tree_lock);
476		swapcache_free(swap, page);
477	} else {
478		__remove_from_page_cache(page);
479		spin_unlock_irq(&mapping->tree_lock);
480		mem_cgroup_uncharge_cache_page(page);
481	}
482
483	return 1;
484
485cannot_free:
486	spin_unlock_irq(&mapping->tree_lock);
487	return 0;
488}
489
490/*
491 * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
492 * someone else has a ref on the page, abort and return 0.  If it was
493 * successfully detached, return 1.  Assumes the caller has a single ref on
494 * this page.
495 */
496int remove_mapping(struct address_space *mapping, struct page *page)
497{
498	if (__remove_mapping(mapping, page)) {
499		/*
500		 * Unfreezing the refcount with 1 rather than 2 effectively
501		 * drops the pagecache ref for us without requiring another
502		 * atomic operation.
503		 */
504		page_unfreeze_refs(page, 1);
505		return 1;
506	}
507	return 0;
508}
509
510/**
511 * putback_lru_page - put previously isolated page onto appropriate LRU list
512 * @page: page to be put back to appropriate lru list
513 *
514 * Add previously isolated @page to appropriate LRU list.
515 * Page may still be unevictable for other reasons.
516 *
517 * lru_lock must not be held, interrupts must be enabled.
518 */
519void putback_lru_page(struct page *page)
520{
521	int lru;
522	int active = !!TestClearPageActive(page);
523	int was_unevictable = PageUnevictable(page);
524
525	VM_BUG_ON(PageLRU(page));
526
527redo:
528	ClearPageUnevictable(page);
529
530	if (page_evictable(page, NULL)) {
531		/*
532		 * For evictable pages, we can use the cache.
533		 * In event of a race, worst case is we end up with an
534		 * unevictable page on [in]active list.
535		 * We know how to handle that.
536		 */
537		lru = active + page_lru_base_type(page);
538		lru_cache_add_lru(page, lru);
539	} else {
540		/*
541		 * Put unevictable pages directly on zone's unevictable
542		 * list.
543		 */
544		lru = LRU_UNEVICTABLE;
545		add_page_to_unevictable_list(page);
546		/*
547		 * When racing with an mlock clearing (page is
548		 * unlocked), make sure that if the other thread does
549		 * not observe our setting of PG_lru and fails
550		 * isolation, we see PG_mlocked cleared below and move
551		 * the page back to the evictable list.
552		 *
553		 * The other side is TestClearPageMlocked().
554		 */
555		smp_mb();
556	}
557
558	/*
559	 * page's status can change while we move it among lru. If an evictable
560	 * page is on unevictable list, it never be freed. To avoid that,
561	 * check after we added it to the list, again.
562	 */
563	if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
564		if (!isolate_lru_page(page)) {
565			put_page(page);
566			goto redo;
567		}
568		/* This means someone else dropped this page from LRU
569		 * So, it will be freed or putback to LRU again. There is
570		 * nothing to do here.
571		 */
572	}
573
574	if (was_unevictable && lru != LRU_UNEVICTABLE)
575		count_vm_event(UNEVICTABLE_PGRESCUED);
576	else if (!was_unevictable && lru == LRU_UNEVICTABLE)
577		count_vm_event(UNEVICTABLE_PGCULLED);
578
579	put_page(page);		/* drop ref from isolate */
580}
581
582/*
583 * shrink_page_list() returns the number of reclaimed pages
584 */
585static unsigned long shrink_page_list(struct list_head *page_list,
586					struct scan_control *sc,
587					enum pageout_io sync_writeback)
588{
589	LIST_HEAD(ret_pages);
590	struct pagevec freed_pvec;
591	int pgactivate = 0;
592	unsigned long nr_reclaimed = 0;
593	unsigned long vm_flags;
594
595	cond_resched();
596
597	pagevec_init(&freed_pvec, 1);
598	while (!list_empty(page_list)) {
599		struct address_space *mapping;
600		struct page *page;
601		int may_enter_fs;
602		int referenced;
603
604		cond_resched();
605
606		page = lru_to_page(page_list);
607		list_del(&page->lru);
608
609		if (!trylock_page(page))
610			goto keep;
611
612		VM_BUG_ON(PageActive(page));
613
614		sc->nr_scanned++;
615
616		if (unlikely(!page_evictable(page, NULL)))
617			goto cull_mlocked;
618
619		if (!sc->may_unmap && page_mapped(page))
620			goto keep_locked;
621
622		/* Double the slab pressure for mapped and swapcache pages */
623		if (page_mapped(page) || PageSwapCache(page))
624			sc->nr_scanned++;
625
626		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
627			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
628
629		if (PageWriteback(page)) {
630			/*
631			 * Synchronous reclaim is performed in two passes,
632			 * first an asynchronous pass over the list to
633			 * start parallel writeback, and a second synchronous
634			 * pass to wait for the IO to complete.  Wait here
635			 * for any page for which writeback has already
636			 * started.
637			 */
638			if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
639				wait_on_page_writeback(page);
640			else
641				goto keep_locked;
642		}
643
644		referenced = page_referenced(page, 1,
645						sc->mem_cgroup, &vm_flags);
646		/*
647		 * In active use or really unfreeable?  Activate it.
648		 * If page which have PG_mlocked lost isoltation race,
649		 * try_to_unmap moves it to unevictable list
650		 */
651		if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
652					referenced && page_mapping_inuse(page)
653					&& !(vm_flags & VM_LOCKED))
654			goto activate_locked;
655
656		/*
657		 * Anonymous process memory has backing store?
658		 * Try to allocate it some swap space here.
659		 */
660		if (PageAnon(page) && !PageSwapCache(page)) {
661			if (!(sc->gfp_mask & __GFP_IO))
662				goto keep_locked;
663			if (!add_to_swap(page))
664				goto activate_locked;
665			may_enter_fs = 1;
666		}
667
668		mapping = page_mapping(page);
669
670		/*
671		 * The page is mapped into the page tables of one or more
672		 * processes. Try to unmap it here.
673		 */
674		if (page_mapped(page) && mapping) {
675			switch (try_to_unmap(page, TTU_UNMAP)) {
676			case SWAP_FAIL:
677				goto activate_locked;
678			case SWAP_AGAIN:
679				goto keep_locked;
680			case SWAP_MLOCK:
681				goto cull_mlocked;
682			case SWAP_SUCCESS:
683				; /* try to free the page below */
684			}
685		}
686
687		if (PageDirty(page)) {
688			if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
689				goto keep_locked;
690			if (!may_enter_fs)
691				goto keep_locked;
692			if (!sc->may_writepage)
693				goto keep_locked;
694
695			/* Page is dirty, try to write it out here */
696			switch (pageout(page, mapping, sync_writeback)) {
697			case PAGE_KEEP:
698				goto keep_locked;
699			case PAGE_ACTIVATE:
700				goto activate_locked;
701			case PAGE_SUCCESS:
702				if (PageWriteback(page) || PageDirty(page))
703					goto keep;
704				/*
705				 * A synchronous write - probably a ramdisk.  Go
706				 * ahead and try to reclaim the page.
707				 */
708				if (!trylock_page(page))
709					goto keep;
710				if (PageDirty(page) || PageWriteback(page))
711					goto keep_locked;
712				mapping = page_mapping(page);
713			case PAGE_CLEAN:
714				; /* try to free the page below */
715			}
716		}
717
718		/*
719		 * If the page has buffers, try to free the buffer mappings
720		 * associated with this page. If we succeed we try to free
721		 * the page as well.
722		 *
723		 * We do this even if the page is PageDirty().
724		 * try_to_release_page() does not perform I/O, but it is
725		 * possible for a page to have PageDirty set, but it is actually
726		 * clean (all its buffers are clean).  This happens if the
727		 * buffers were written out directly, with submit_bh(). ext3
728		 * will do this, as well as the blockdev mapping.
729		 * try_to_release_page() will discover that cleanness and will
730		 * drop the buffers and mark the page clean - it can be freed.
731		 *
732		 * Rarely, pages can have buffers and no ->mapping.  These are
733		 * the pages which were not successfully invalidated in
734		 * truncate_complete_page().  We try to drop those buffers here
735		 * and if that worked, and the page is no longer mapped into
736		 * process address space (page_count == 1) it can be freed.
737		 * Otherwise, leave the page on the LRU so it is swappable.
738		 */
739		if (page_has_private(page)) {
740			if (!try_to_release_page(page, sc->gfp_mask))
741				goto activate_locked;
742			if (!mapping && page_count(page) == 1) {
743				unlock_page(page);
744				if (put_page_testzero(page))
745					goto free_it;
746				else {
747					/*
748					 * rare race with speculative reference.
749					 * the speculative reference will free
750					 * this page shortly, so we may
751					 * increment nr_reclaimed here (and
752					 * leave it off the LRU).
753					 */
754					nr_reclaimed++;
755					continue;
756				}
757			}
758		}
759
760		if (!mapping || !__remove_mapping(mapping, page))
761			goto keep_locked;
762
763		/*
764		 * At this point, we have no other references and there is
765		 * no way to pick any more up (removed from LRU, removed
766		 * from pagecache). Can use non-atomic bitops now (and
767		 * we obviously don't have to worry about waking up a process
768		 * waiting on the page lock, because there are no references.
769		 */
770		__clear_page_locked(page);
771free_it:
772		nr_reclaimed++;
773		if (!pagevec_add(&freed_pvec, page)) {
774			__pagevec_free(&freed_pvec);
775			pagevec_reinit(&freed_pvec);
776		}
777		continue;
778
779cull_mlocked:
780		if (PageSwapCache(page))
781			try_to_free_swap(page);
782		unlock_page(page);
783		putback_lru_page(page);
784		continue;
785
786activate_locked:
787		/* Not a candidate for swapping, so reclaim swap space. */
788		if (PageSwapCache(page) && vm_swap_full())
789			try_to_free_swap(page);
790		VM_BUG_ON(PageActive(page));
791		SetPageActive(page);
792		pgactivate++;
793keep_locked:
794		unlock_page(page);
795keep:
796		list_add(&page->lru, &ret_pages);
797		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
798	}
799	list_splice(&ret_pages, page_list);
800	if (pagevec_count(&freed_pvec))
801		__pagevec_free(&freed_pvec);
802	count_vm_events(PGACTIVATE, pgactivate);
803	return nr_reclaimed;
804}
805
806/* LRU Isolation modes. */
807#define ISOLATE_INACTIVE 0	/* Isolate inactive pages. */
808#define ISOLATE_ACTIVE 1	/* Isolate active pages. */
809#define ISOLATE_BOTH 2		/* Isolate both active and inactive pages. */
810
811/*
812 * Attempt to remove the specified page from its LRU.  Only take this page
813 * if it is of the appropriate PageActive status.  Pages which are being
814 * freed elsewhere are also ignored.
815 *
816 * page:	page to consider
817 * mode:	one of the LRU isolation modes defined above
818 *
819 * returns 0 on success, -ve errno on failure.
820 */
821int __isolate_lru_page(struct page *page, int mode, int file)
822{
823	int ret = -EINVAL;
824
825	/* Only take pages on the LRU. */
826	if (!PageLRU(page))
827		return ret;
828
829	/*
830	 * When checking the active state, we need to be sure we are
831	 * dealing with comparible boolean values.  Take the logical not
832	 * of each.
833	 */
834	if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
835		return ret;
836
837	if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
838		return ret;
839
840	/*
841	 * When this function is being called for lumpy reclaim, we
842	 * initially look into all LRU pages, active, inactive and
843	 * unevictable; only give shrink_page_list evictable pages.
844	 */
845	if (PageUnevictable(page))
846		return ret;
847
848	ret = -EBUSY;
849
850	if (likely(get_page_unless_zero(page))) {
851		/*
852		 * Be careful not to clear PageLRU until after we're
853		 * sure the page is not being freed elsewhere -- the
854		 * page release code relies on it.
855		 */
856		ClearPageLRU(page);
857		ret = 0;
858	}
859
860	return ret;
861}
862
863/*
864 * zone->lru_lock is heavily contended.  Some of the functions that
865 * shrink the lists perform better by taking out a batch of pages
866 * and working on them outside the LRU lock.
867 *
868 * For pagecache intensive workloads, this function is the hottest
869 * spot in the kernel (apart from copy_*_user functions).
870 *
871 * Appropriate locks must be held before calling this function.
872 *
873 * @nr_to_scan:	The number of pages to look through on the list.
874 * @src:	The LRU list to pull pages off.
875 * @dst:	The temp list to put pages on to.
876 * @scanned:	The number of pages that were scanned.
877 * @order:	The caller's attempted allocation order
878 * @mode:	One of the LRU isolation modes
879 * @file:	True [1] if isolating file [!anon] pages
880 *
881 * returns how many pages were moved onto *@dst.
882 */
883static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
884		struct list_head *src, struct list_head *dst,
885		unsigned long *scanned, int order, int mode, int file)
886{
887	unsigned long nr_taken = 0;
888	unsigned long scan;
889
890	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
891		struct page *page;
892		unsigned long pfn;
893		unsigned long end_pfn;
894		unsigned long page_pfn;
895		int zone_id;
896
897		page = lru_to_page(src);
898		prefetchw_prev_lru_page(page, src, flags);
899
900		VM_BUG_ON(!PageLRU(page));
901
902		switch (__isolate_lru_page(page, mode, file)) {
903		case 0:
904			list_move(&page->lru, dst);
905			mem_cgroup_del_lru(page);
906			nr_taken++;
907			break;
908
909		case -EBUSY:
910			/* else it is being freed elsewhere */
911			list_move(&page->lru, src);
912			mem_cgroup_rotate_lru_list(page, page_lru(page));
913			continue;
914
915		default:
916			BUG();
917		}
918
919		if (!order)
920			continue;
921
922		/*
923		 * Attempt to take all pages in the order aligned region
924		 * surrounding the tag page.  Only take those pages of
925		 * the same active state as that tag page.  We may safely
926		 * round the target page pfn down to the requested order
927		 * as the mem_map is guarenteed valid out to MAX_ORDER,
928		 * where that page is in a different zone we will detect
929		 * it from its zone id and abort this block scan.
930		 */
931		zone_id = page_zone_id(page);
932		page_pfn = page_to_pfn(page);
933		pfn = page_pfn & ~((1 << order) - 1);
934		end_pfn = pfn + (1 << order);
935		for (; pfn < end_pfn; pfn++) {
936			struct page *cursor_page;
937
938			/* The target page is in the block, ignore it. */
939			if (unlikely(pfn == page_pfn))
940				continue;
941
942			/* Avoid holes within the zone. */
943			if (unlikely(!pfn_valid_within(pfn)))
944				break;
945
946			cursor_page = pfn_to_page(pfn);
947
948			/* Check that we have not crossed a zone boundary. */
949			if (unlikely(page_zone_id(cursor_page) != zone_id))
950				continue;
951
952			/*
953			 * If we don't have enough swap space, reclaiming of
954			 * anon page which don't already have a swap slot is
955			 * pointless.
956			 */
957			if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
958					!PageSwapCache(cursor_page))
959				continue;
960
961			if (__isolate_lru_page(cursor_page, mode, file) == 0) {
962				list_move(&cursor_page->lru, dst);
963				mem_cgroup_del_lru(cursor_page);
964				nr_taken++;
965				scan++;
966			}
967		}
968	}
969
970	*scanned = scan;
971	return nr_taken;
972}
973
974static unsigned long isolate_pages_global(unsigned long nr,
975					struct list_head *dst,
976					unsigned long *scanned, int order,
977					int mode, struct zone *z,
978					struct mem_cgroup *mem_cont,
979					int active, int file)
980{
981	int lru = LRU_BASE;
982	if (active)
983		lru += LRU_ACTIVE;
984	if (file)
985		lru += LRU_FILE;
986	return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
987								mode, file);
988}
989
990/*
991 * clear_active_flags() is a helper for shrink_active_list(), clearing
992 * any active bits from the pages in the list.
993 */
994static unsigned long clear_active_flags(struct list_head *page_list,
995					unsigned int *count)
996{
997	int nr_active = 0;
998	int lru;
999	struct page *page;
1000
1001	list_for_each_entry(page, page_list, lru) {
1002		lru = page_lru_base_type(page);
1003		if (PageActive(page)) {
1004			lru += LRU_ACTIVE;
1005			ClearPageActive(page);
1006			nr_active++;
1007		}
1008		count[lru]++;
1009	}
1010
1011	return nr_active;
1012}
1013
1014/**
1015 * isolate_lru_page - tries to isolate a page from its LRU list
1016 * @page: page to isolate from its LRU list
1017 *
1018 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1019 * vmstat statistic corresponding to whatever LRU list the page was on.
1020 *
1021 * Returns 0 if the page was removed from an LRU list.
1022 * Returns -EBUSY if the page was not on an LRU list.
1023 *
1024 * The returned page will have PageLRU() cleared.  If it was found on
1025 * the active list, it will have PageActive set.  If it was found on
1026 * the unevictable list, it will have the PageUnevictable bit set. That flag
1027 * may need to be cleared by the caller before letting the page go.
1028 *
1029 * The vmstat statistic corresponding to the list on which the page was
1030 * found will be decremented.
1031 *
1032 * Restrictions:
1033 * (1) Must be called with an elevated refcount on the page. This is a
1034 *     fundamentnal difference from isolate_lru_pages (which is called
1035 *     without a stable reference).
1036 * (2) the lru_lock must not be held.
1037 * (3) interrupts must be enabled.
1038 */
1039int isolate_lru_page(struct page *page)
1040{
1041	int ret = -EBUSY;
1042
1043	if (PageLRU(page)) {
1044		struct zone *zone = page_zone(page);
1045
1046		spin_lock_irq(&zone->lru_lock);
1047		if (PageLRU(page) && get_page_unless_zero(page)) {
1048			int lru = page_lru(page);
1049			ret = 0;
1050			ClearPageLRU(page);
1051
1052			del_page_from_lru_list(zone, page, lru);
1053		}
1054		spin_unlock_irq(&zone->lru_lock);
1055	}
1056	return ret;
1057}
1058
1059/*
1060 * Are there way too many processes in the direct reclaim path already?
1061 */
1062static int too_many_isolated(struct zone *zone, int file,
1063		struct scan_control *sc)
1064{
1065	unsigned long inactive, isolated;
1066
1067	if (current_is_kswapd())
1068		return 0;
1069
1070	if (!scanning_global_lru(sc))
1071		return 0;
1072
1073	if (file) {
1074		inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1075		isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1076	} else {
1077		inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1078		isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1079	}
1080
1081	return isolated > inactive;
1082}
1083
1084/*
1085 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
1086 * of reclaimed pages
1087 */
1088static unsigned long shrink_inactive_list(unsigned long max_scan,
1089			struct zone *zone, struct scan_control *sc,
1090			int priority, int file)
1091{
1092	LIST_HEAD(page_list);
1093	struct pagevec pvec;
1094	unsigned long nr_scanned = 0;
1095	unsigned long nr_reclaimed = 0;
1096	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1097	int lumpy_reclaim = 0;
1098
1099	while (unlikely(too_many_isolated(zone, file, sc))) {
1100		congestion_wait(BLK_RW_ASYNC, HZ/10);
1101
1102		/* We are about to die and free our memory. Return now. */
1103		if (fatal_signal_pending(current))
1104			return SWAP_CLUSTER_MAX;
1105	}
1106
1107	/*
1108	 * If we need a large contiguous chunk of memory, or have
1109	 * trouble getting a small set of contiguous pages, we
1110	 * will reclaim both active and inactive pages.
1111	 *
1112	 * We use the same threshold as pageout congestion_wait below.
1113	 */
1114	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1115		lumpy_reclaim = 1;
1116	else if (sc->order && priority < DEF_PRIORITY - 2)
1117		lumpy_reclaim = 1;
1118
1119	pagevec_init(&pvec, 1);
1120
1121	lru_add_drain();
1122	spin_lock_irq(&zone->lru_lock);
1123	do {
1124		struct page *page;
1125		unsigned long nr_taken;
1126		unsigned long nr_scan;
1127		unsigned long nr_freed;
1128		unsigned long nr_active;
1129		unsigned int count[NR_LRU_LISTS] = { 0, };
1130		int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
1131		unsigned long nr_anon;
1132		unsigned long nr_file;
1133
1134		nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
1135			     &page_list, &nr_scan, sc->order, mode,
1136				zone, sc->mem_cgroup, 0, file);
1137
1138		if (scanning_global_lru(sc)) {
1139			zone->pages_scanned += nr_scan;
1140			if (current_is_kswapd())
1141				__count_zone_vm_events(PGSCAN_KSWAPD, zone,
1142						       nr_scan);
1143			else
1144				__count_zone_vm_events(PGSCAN_DIRECT, zone,
1145						       nr_scan);
1146		}
1147
1148		if (nr_taken == 0)
1149			goto done;
1150
1151		nr_active = clear_active_flags(&page_list, count);
1152		__count_vm_events(PGDEACTIVATE, nr_active);
1153
1154		__mod_zone_page_state(zone, NR_ACTIVE_FILE,
1155						-count[LRU_ACTIVE_FILE]);
1156		__mod_zone_page_state(zone, NR_INACTIVE_FILE,
1157						-count[LRU_INACTIVE_FILE]);
1158		__mod_zone_page_state(zone, NR_ACTIVE_ANON,
1159						-count[LRU_ACTIVE_ANON]);
1160		__mod_zone_page_state(zone, NR_INACTIVE_ANON,
1161						-count[LRU_INACTIVE_ANON]);
1162
1163		nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1164		nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1165		__mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
1166		__mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
1167
1168		reclaim_stat->recent_scanned[0] += nr_anon;
1169		reclaim_stat->recent_scanned[1] += nr_file;
1170
1171		spin_unlock_irq(&zone->lru_lock);
1172
1173		nr_scanned += nr_scan;
1174		nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
1175
1176		/*
1177		 * If we are direct reclaiming for contiguous pages and we do
1178		 * not reclaim everything in the list, try again and wait
1179		 * for IO to complete. This will stall high-order allocations
1180		 * but that should be acceptable to the caller
1181		 */
1182		if (nr_freed < nr_taken && !current_is_kswapd() &&
1183		    lumpy_reclaim) {
1184			congestion_wait(BLK_RW_ASYNC, HZ/10);
1185
1186			/*
1187			 * The attempt at page out may have made some
1188			 * of the pages active, mark them inactive again.
1189			 */
1190			nr_active = clear_active_flags(&page_list, count);
1191			count_vm_events(PGDEACTIVATE, nr_active);
1192
1193			nr_freed += shrink_page_list(&page_list, sc,
1194							PAGEOUT_IO_SYNC);
1195		}
1196
1197		nr_reclaimed += nr_freed;
1198
1199		local_irq_disable();
1200		if (current_is_kswapd())
1201			__count_vm_events(KSWAPD_STEAL, nr_freed);
1202		__count_zone_vm_events(PGSTEAL, zone, nr_freed);
1203
1204		spin_lock(&zone->lru_lock);
1205		/*
1206		 * Put back any unfreeable pages.
1207		 */
1208		while (!list_empty(&page_list)) {
1209			int lru;
1210			page = lru_to_page(&page_list);
1211			VM_BUG_ON(PageLRU(page));
1212			list_del(&page->lru);
1213			if (unlikely(!page_evictable(page, NULL))) {
1214				spin_unlock_irq(&zone->lru_lock);
1215				putback_lru_page(page);
1216				spin_lock_irq(&zone->lru_lock);
1217				continue;
1218			}
1219			SetPageLRU(page);
1220			lru = page_lru(page);
1221			add_page_to_lru_list(zone, page, lru);
1222			if (is_active_lru(lru)) {
1223				int file = is_file_lru(lru);
1224				reclaim_stat->recent_rotated[file]++;
1225			}
1226			if (!pagevec_add(&pvec, page)) {
1227				spin_unlock_irq(&zone->lru_lock);
1228				__pagevec_release(&pvec);
1229				spin_lock_irq(&zone->lru_lock);
1230			}
1231		}
1232		__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1233		__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1234
1235  	} while (nr_scanned < max_scan);
1236
1237done:
1238	spin_unlock_irq(&zone->lru_lock);
1239	pagevec_release(&pvec);
1240	return nr_reclaimed;
1241}
1242
1243/*
1244 * We are about to scan this zone at a certain priority level.  If that priority
1245 * level is smaller (ie: more urgent) than the previous priority, then note
1246 * that priority level within the zone.  This is done so that when the next
1247 * process comes in to scan this zone, it will immediately start out at this
1248 * priority level rather than having to build up its own scanning priority.
1249 * Here, this priority affects only the reclaim-mapped threshold.
1250 */
1251static inline void note_zone_scanning_priority(struct zone *zone, int priority)
1252{
1253	if (priority < zone->prev_priority)
1254		zone->prev_priority = priority;
1255}
1256
1257/*
1258 * This moves pages from the active list to the inactive list.
1259 *
1260 * We move them the other way if the page is referenced by one or more
1261 * processes, from rmap.
1262 *
1263 * If the pages are mostly unmapped, the processing is fast and it is
1264 * appropriate to hold zone->lru_lock across the whole operation.  But if
1265 * the pages are mapped, the processing is slow (page_referenced()) so we
1266 * should drop zone->lru_lock around each page.  It's impossible to balance
1267 * this, so instead we remove the pages from the LRU while processing them.
1268 * It is safe to rely on PG_active against the non-LRU pages in here because
1269 * nobody will play with that bit on a non-LRU page.
1270 *
1271 * The downside is that we have to touch page->_count against each page.
1272 * But we had to alter page->flags anyway.
1273 */
1274
1275static void move_active_pages_to_lru(struct zone *zone,
1276				     struct list_head *list,
1277				     enum lru_list lru)
1278{
1279	unsigned long pgmoved = 0;
1280	struct pagevec pvec;
1281	struct page *page;
1282
1283	pagevec_init(&pvec, 1);
1284
1285	while (!list_empty(list)) {
1286		page = lru_to_page(list);
1287
1288		VM_BUG_ON(PageLRU(page));
1289		SetPageLRU(page);
1290
1291		list_move(&page->lru, &zone->lru[lru].list);
1292		mem_cgroup_add_lru_list(page, lru);
1293		pgmoved++;
1294
1295		if (!pagevec_add(&pvec, page) || list_empty(list)) {
1296			spin_unlock_irq(&zone->lru_lock);
1297			if (buffer_heads_over_limit)
1298				pagevec_strip(&pvec);
1299			__pagevec_release(&pvec);
1300			spin_lock_irq(&zone->lru_lock);
1301		}
1302	}
1303	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1304	if (!is_active_lru(lru))
1305		__count_vm_events(PGDEACTIVATE, pgmoved);
1306}
1307
1308static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1309			struct scan_control *sc, int priority, int file)
1310{
1311	unsigned long nr_taken;
1312	unsigned long pgscanned;
1313	unsigned long vm_flags;
1314	LIST_HEAD(l_hold);	/* The pages which were snipped off */
1315	LIST_HEAD(l_active);
1316	LIST_HEAD(l_inactive);
1317	struct page *page;
1318	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1319	unsigned long nr_rotated = 0;
1320
1321	lru_add_drain();
1322	spin_lock_irq(&zone->lru_lock);
1323	nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1324					ISOLATE_ACTIVE, zone,
1325					sc->mem_cgroup, 1, file);
1326	/*
1327	 * zone->pages_scanned is used for detect zone's oom
1328	 * mem_cgroup remembers nr_scan by itself.
1329	 */
1330	if (scanning_global_lru(sc)) {
1331		zone->pages_scanned += pgscanned;
1332	}
1333	reclaim_stat->recent_scanned[file] += nr_taken;
1334
1335	__count_zone_vm_events(PGREFILL, zone, pgscanned);
1336	if (file)
1337		__mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1338	else
1339		__mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1340	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1341	spin_unlock_irq(&zone->lru_lock);
1342
1343	while (!list_empty(&l_hold)) {
1344		cond_resched();
1345		page = lru_to_page(&l_hold);
1346		list_del(&page->lru);
1347
1348		if (unlikely(!page_evictable(page, NULL))) {
1349			putback_lru_page(page);
1350			continue;
1351		}
1352
1353		/* page_referenced clears PageReferenced */
1354		if (page_mapping_inuse(page) &&
1355		    page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1356			nr_rotated++;
1357			/*
1358			 * Identify referenced, file-backed active pages and
1359			 * give them one more trip around the active list. So
1360			 * that executable code get better chances to stay in
1361			 * memory under moderate memory pressure.  Anon pages
1362			 * are not likely to be evicted by use-once streaming
1363			 * IO, plus JVM can create lots of anon VM_EXEC pages,
1364			 * so we ignore them here.
1365			 */
1366			if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1367				list_add(&page->lru, &l_active);
1368				continue;
1369			}
1370		}
1371
1372		ClearPageActive(page);	/* we are de-activating */
1373		list_add(&page->lru, &l_inactive);
1374	}
1375
1376	/*
1377	 * Move pages back to the lru list.
1378	 */
1379	spin_lock_irq(&zone->lru_lock);
1380	/*
1381	 * Count referenced pages from currently used mappings as rotated,
1382	 * even though only some of them are actually re-activated.  This
1383	 * helps balance scan pressure between file and anonymous pages in
1384	 * get_scan_ratio.
1385	 */
1386	reclaim_stat->recent_rotated[file] += nr_rotated;
1387
1388	move_active_pages_to_lru(zone, &l_active,
1389						LRU_ACTIVE + file * LRU_FILE);
1390	move_active_pages_to_lru(zone, &l_inactive,
1391						LRU_BASE   + file * LRU_FILE);
1392	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1393	spin_unlock_irq(&zone->lru_lock);
1394}
1395
1396static int inactive_anon_is_low_global(struct zone *zone)
1397{
1398	unsigned long active, inactive;
1399
1400	active = zone_page_state(zone, NR_ACTIVE_ANON);
1401	inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1402
1403	if (inactive * zone->inactive_ratio < active)
1404		return 1;
1405
1406	return 0;
1407}
1408
1409/**
1410 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1411 * @zone: zone to check
1412 * @sc:   scan control of this context
1413 *
1414 * Returns true if the zone does not have enough inactive anon pages,
1415 * meaning some active anon pages need to be deactivated.
1416 */
1417static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1418{
1419	int low;
1420
1421	if (scanning_global_lru(sc))
1422		low = inactive_anon_is_low_global(zone);
1423	else
1424		low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1425	return low;
1426}
1427
1428static int inactive_file_is_low_global(struct zone *zone)
1429{
1430	unsigned long active, inactive;
1431
1432	active = zone_page_state(zone, NR_ACTIVE_FILE);
1433	inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1434
1435	return (active > inactive);
1436}
1437
1438/**
1439 * inactive_file_is_low - check if file pages need to be deactivated
1440 * @zone: zone to check
1441 * @sc:   scan control of this context
1442 *
1443 * When the system is doing streaming IO, memory pressure here
1444 * ensures that active file pages get deactivated, until more
1445 * than half of the file pages are on the inactive list.
1446 *
1447 * Once we get to that situation, protect the system's working
1448 * set from being evicted by disabling active file page aging.
1449 *
1450 * This uses a different ratio than the anonymous pages, because
1451 * the page cache uses a use-once replacement algorithm.
1452 */
1453static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1454{
1455	int low;
1456
1457	if (scanning_global_lru(sc))
1458		low = inactive_file_is_low_global(zone);
1459	else
1460		low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1461	return low;
1462}
1463
1464static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
1465				int file)
1466{
1467	if (file)
1468		return inactive_file_is_low(zone, sc);
1469	else
1470		return inactive_anon_is_low(zone, sc);
1471}
1472
1473static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1474	struct zone *zone, struct scan_control *sc, int priority)
1475{
1476	int file = is_file_lru(lru);
1477
1478	if (is_active_lru(lru)) {
1479		if (inactive_list_is_low(zone, sc, file))
1480		    shrink_active_list(nr_to_scan, zone, sc, priority, file);
1481		return 0;
1482	}
1483
1484	return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1485}
1486
1487/*
1488 * Determine how aggressively the anon and file LRU lists should be
1489 * scanned.  The relative value of each set of LRU lists is determined
1490 * by looking at the fraction of the pages scanned we did rotate back
1491 * onto the active list instead of evict.
1492 *
1493 * percent[0] specifies how much pressure to put on ram/swap backed
1494 * memory, while percent[1] determines pressure on the file LRUs.
1495 */
1496static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1497					unsigned long *percent)
1498{
1499	unsigned long anon, file, free;
1500	unsigned long anon_prio, file_prio;
1501	unsigned long ap, fp;
1502	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1503
1504	anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1505		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1506	file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1507		zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1508
1509	if (scanning_global_lru(sc)) {
1510		free  = zone_page_state(zone, NR_FREE_PAGES);
1511		/* If we have very few page cache pages,
1512		   force-scan anon pages. */
1513		if (unlikely(file + free <= high_wmark_pages(zone))) {
1514			percent[0] = 100;
1515			percent[1] = 0;
1516			return;
1517		}
1518	}
1519
1520	/*
1521	 * OK, so we have swap space and a fair amount of page cache
1522	 * pages.  We use the recently rotated / recently scanned
1523	 * ratios to determine how valuable each cache is.
1524	 *
1525	 * Because workloads change over time (and to avoid overflow)
1526	 * we keep these statistics as a floating average, which ends
1527	 * up weighing recent references more than old ones.
1528	 *
1529	 * anon in [0], file in [1]
1530	 */
1531	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1532		spin_lock_irq(&zone->lru_lock);
1533		reclaim_stat->recent_scanned[0] /= 2;
1534		reclaim_stat->recent_rotated[0] /= 2;
1535		spin_unlock_irq(&zone->lru_lock);
1536	}
1537
1538	if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1539		spin_lock_irq(&zone->lru_lock);
1540		reclaim_stat->recent_scanned[1] /= 2;
1541		reclaim_stat->recent_rotated[1] /= 2;
1542		spin_unlock_irq(&zone->lru_lock);
1543	}
1544
1545	/*
1546	 * With swappiness at 100, anonymous and file have the same priority.
1547	 * This scanning priority is essentially the inverse of IO cost.
1548	 */
1549	anon_prio = sc->swappiness;
1550	file_prio = 200 - sc->swappiness;
1551
1552	/*
1553	 * The amount of pressure on anon vs file pages is inversely
1554	 * proportional to the fraction of recently scanned pages on
1555	 * each list that were recently referenced and in active use.
1556	 */
1557	ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1558	ap /= reclaim_stat->recent_rotated[0] + 1;
1559
1560	fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1561	fp /= reclaim_stat->recent_rotated[1] + 1;
1562
1563	/* Normalize to percentages */
1564	percent[0] = 100 * ap / (ap + fp + 1);
1565	percent[1] = 100 - percent[0];
1566}
1567
1568/*
1569 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1570 * until we collected @swap_cluster_max pages to scan.
1571 */
1572static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1573				       unsigned long *nr_saved_scan)
1574{
1575	unsigned long nr;
1576
1577	*nr_saved_scan += nr_to_scan;
1578	nr = *nr_saved_scan;
1579
1580	if (nr >= SWAP_CLUSTER_MAX)
1581		*nr_saved_scan = 0;
1582	else
1583		nr = 0;
1584
1585	return nr;
1586}
1587
1588/*
1589 * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1590 */
1591static void shrink_zone(int priority, struct zone *zone,
1592				struct scan_control *sc)
1593{
1594	unsigned long nr[NR_LRU_LISTS];
1595	unsigned long nr_to_scan;
1596	unsigned long percent[2];	/* anon @ 0; file @ 1 */
1597	enum lru_list l;
1598	unsigned long nr_reclaimed = sc->nr_reclaimed;
1599	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1600	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1601	int noswap = 0;
1602
1603	/* If we have no swap space, do not bother scanning anon pages. */
1604	if (!sc->may_swap || (nr_swap_pages <= 0)) {
1605		noswap = 1;
1606		percent[0] = 0;
1607		percent[1] = 100;
1608	} else
1609		get_scan_ratio(zone, sc, percent);
1610
1611	for_each_evictable_lru(l) {
1612		int file = is_file_lru(l);
1613		unsigned long scan;
1614
1615		scan = zone_nr_lru_pages(zone, sc, l);
1616		if (priority || noswap) {
1617			scan >>= priority;
1618			scan = (scan * percent[file]) / 100;
1619		}
1620		nr[l] = nr_scan_try_batch(scan,
1621					  &reclaim_stat->nr_saved_scan[l]);
1622	}
1623
1624	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1625					nr[LRU_INACTIVE_FILE]) {
1626		for_each_evictable_lru(l) {
1627			if (nr[l]) {
1628				nr_to_scan = min_t(unsigned long,
1629						   nr[l], SWAP_CLUSTER_MAX);
1630				nr[l] -= nr_to_scan;
1631
1632				nr_reclaimed += shrink_list(l, nr_to_scan,
1633							    zone, sc, priority);
1634			}
1635		}
1636		/*
1637		 * On large memory systems, scan >> priority can become
1638		 * really large. This is fine for the starting priority;
1639		 * we want to put equal scanning pressure on each zone.
1640		 * However, if the VM has a harder time of freeing pages,
1641		 * with multiple processes reclaiming pages, the total
1642		 * freeing target can get unreasonably large.
1643		 */
1644		if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
1645			break;
1646	}
1647
1648	sc->nr_reclaimed = nr_reclaimed;
1649
1650	/*
1651	 * Even if we did not try to evict anon pages at all, we want to
1652	 * rebalance the anon lru active/inactive ratio.
1653	 */
1654	if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
1655		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1656
1657	throttle_vm_writeout(sc->gfp_mask);
1658}
1659
1660/*
1661 * This is the direct reclaim path, for page-allocating processes.  We only
1662 * try to reclaim pages from zones which will satisfy the caller's allocation
1663 * request.
1664 *
1665 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1666 * Because:
1667 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1668 *    allocation or
1669 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1670 *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1671 *    zone defense algorithm.
1672 *
1673 * If a zone is deemed to be full of pinned pages then just give it a light
1674 * scan then give up on it.
1675 */
1676static void shrink_zones(int priority, struct zonelist *zonelist,
1677					struct scan_control *sc)
1678{
1679	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1680	struct zoneref *z;
1681	struct zone *zone;
1682
1683	sc->all_unreclaimable = 1;
1684	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1685					sc->nodemask) {
1686		if (!populated_zone(zone))
1687			continue;
1688		/*
1689		 * Take care memory controller reclaiming has small influence
1690		 * to global LRU.
1691		 */
1692		if (scanning_global_lru(sc)) {
1693			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1694				continue;
1695			note_zone_scanning_priority(zone, priority);
1696
1697			if (zone_is_all_unreclaimable(zone) &&
1698						priority != DEF_PRIORITY)
1699				continue;	/* Let kswapd poll it */
1700			sc->all_unreclaimable = 0;
1701		} else {
1702			/*
1703			 * Ignore cpuset limitation here. We just want to reduce
1704			 * # of used pages by us regardless of memory shortage.
1705			 */
1706			sc->all_unreclaimable = 0;
1707			mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1708							priority);
1709		}
1710
1711		shrink_zone(priority, zone, sc);
1712	}
1713}
1714
1715/*
1716 * This is the main entry point to direct page reclaim.
1717 *
1718 * If a full scan of the inactive list fails to free enough memory then we
1719 * are "out of memory" and something needs to be killed.
1720 *
1721 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1722 * high - the zone may be full of dirty or under-writeback pages, which this
1723 * caller can't do much about.  We kick the writeback threads and take explicit
1724 * naps in the hope that some of these pages can be written.  But if the
1725 * allocating task holds filesystem locks which prevent writeout this might not
1726 * work, and the allocation attempt will fail.
1727 *
1728 * returns:	0, if no pages reclaimed
1729 * 		else, the number of pages reclaimed
1730 */
1731static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1732					struct scan_control *sc)
1733{
1734	int priority;
1735	unsigned long ret = 0;
1736	unsigned long total_scanned = 0;
1737	struct reclaim_state *reclaim_state = current->reclaim_state;
1738	unsigned long lru_pages = 0;
1739	struct zoneref *z;
1740	struct zone *zone;
1741	enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1742	unsigned long writeback_threshold;
1743
1744	delayacct_freepages_start();
1745
1746	if (scanning_global_lru(sc))
1747		count_vm_event(ALLOCSTALL);
1748	/*
1749	 * mem_cgroup will not do shrink_slab.
1750	 */
1751	if (scanning_global_lru(sc)) {
1752		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1753
1754			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1755				continue;
1756
1757			lru_pages += zone_reclaimable_pages(zone);
1758		}
1759	}
1760
1761	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1762		sc->nr_scanned = 0;
1763		if (!priority)
1764			disable_swap_token();
1765		shrink_zones(priority, zonelist, sc);
1766		/*
1767		 * Don't shrink slabs when reclaiming memory from
1768		 * over limit cgroups
1769		 */
1770		if (scanning_global_lru(sc)) {
1771			shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1772			if (reclaim_state) {
1773				sc->nr_reclaimed += reclaim_state->reclaimed_slab;
1774				reclaim_state->reclaimed_slab = 0;
1775			}
1776		}
1777		total_scanned += sc->nr_scanned;
1778		if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
1779			ret = sc->nr_reclaimed;
1780			goto out;
1781		}
1782
1783		/*
1784		 * Try to write back as many pages as we just scanned.  This
1785		 * tends to cause slow streaming writers to write data to the
1786		 * disk smoothly, at the dirtying rate, which is nice.   But
1787		 * that's undesirable in laptop mode, where we *want* lumpy
1788		 * writeout.  So in laptop mode, write out the whole world.
1789		 */
1790		writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
1791		if (total_scanned > writeback_threshold) {
1792			wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
1793			sc->may_writepage = 1;
1794		}
1795
1796		/* Take a nap, wait for some writeback to complete */
1797		if (!sc->hibernation_mode && sc->nr_scanned &&
1798		    priority < DEF_PRIORITY - 2)
1799			congestion_wait(BLK_RW_ASYNC, HZ/10);
1800	}
1801	/* top priority shrink_zones still had more to do? don't OOM, then */
1802	if (!sc->all_unreclaimable && scanning_global_lru(sc))
1803		ret = sc->nr_reclaimed;
1804out:
1805	/*
1806	 * Now that we've scanned all the zones at this priority level, note
1807	 * that level within the zone so that the next thread which performs
1808	 * scanning of this zone will immediately start out at this priority
1809	 * level.  This affects only the decision whether or not to bring
1810	 * mapped pages onto the inactive list.
1811	 */
1812	if (priority < 0)
1813		priority = 0;
1814
1815	if (scanning_global_lru(sc)) {
1816		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1817
1818			if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1819				continue;
1820
1821			zone->prev_priority = priority;
1822		}
1823	} else
1824		mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1825
1826	delayacct_freepages_end();
1827
1828	return ret;
1829}
1830
1831unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1832				gfp_t gfp_mask, nodemask_t *nodemask)
1833{
1834	struct scan_control sc = {
1835		.gfp_mask = gfp_mask,
1836		.may_writepage = !laptop_mode,
1837		.nr_to_reclaim = SWAP_CLUSTER_MAX,
1838		.may_unmap = 1,
1839		.may_swap = 1,
1840		.swappiness = vm_swappiness,
1841		.order = order,
1842		.mem_cgroup = NULL,
1843		.isolate_pages = isolate_pages_global,
1844		.nodemask = nodemask,
1845	};
1846
1847	return do_try_to_free_pages(zonelist, &sc);
1848}
1849
1850#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1851
1852unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
1853						gfp_t gfp_mask, bool noswap,
1854						unsigned int swappiness,
1855						struct zone *zone, int nid)
1856{
1857	struct scan_control sc = {
1858		.may_writepage = !laptop_mode,
1859		.may_unmap = 1,
1860		.may_swap = !noswap,
1861		.swappiness = swappiness,
1862		.order = 0,
1863		.mem_cgroup = mem,
1864		.isolate_pages = mem_cgroup_isolate_pages,
1865	};
1866	nodemask_t nm  = nodemask_of_node(nid);
1867
1868	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1869			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1870	sc.nodemask = &nm;
1871	sc.nr_reclaimed = 0;
1872	sc.nr_scanned = 0;
1873	/*
1874	 * NOTE: Although we can get the priority field, using it
1875	 * here is not a good idea, since it limits the pages we can scan.
1876	 * if we don't reclaim here, the shrink_zone from balance_pgdat
1877	 * will pick up pages from other mem cgroup's as well. We hack
1878	 * the priority and make it zero.
1879	 */
1880	shrink_zone(0, zone, &sc);
1881	return sc.nr_reclaimed;
1882}
1883
1884unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1885					   gfp_t gfp_mask,
1886					   bool noswap,
1887					   unsigned int swappiness)
1888{
1889	struct zonelist *zonelist;
1890	struct scan_control sc = {
1891		.may_writepage = !laptop_mode,
1892		.may_unmap = 1,
1893		.may_swap = !noswap,
1894		.nr_to_reclaim = SWAP_CLUSTER_MAX,
1895		.swappiness = swappiness,
1896		.order = 0,
1897		.mem_cgroup = mem_cont,
1898		.isolate_pages = mem_cgroup_isolate_pages,
1899		.nodemask = NULL, /* we don't care the placement */
1900	};
1901
1902	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1903			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1904	zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1905	return do_try_to_free_pages(zonelist, &sc);
1906}
1907#endif
1908
1909/* is kswapd sleeping prematurely? */
1910static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1911{
1912	int i;
1913
1914	/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
1915	if (remaining)
1916		return 1;
1917
1918	/* If after HZ/10, a zone is below the high mark, it's premature */
1919	for (i = 0; i < pgdat->nr_zones; i++) {
1920		struct zone *zone = pgdat->node_zones + i;
1921
1922		if (!populated_zone(zone))
1923			continue;
1924
1925		if (zone_is_all_unreclaimable(zone))
1926			continue;
1927
1928		if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
1929								0, 0))
1930			return 1;
1931	}
1932
1933	return 0;
1934}
1935
1936/*
1937 * For kswapd, balance_pgdat() will work across all this node's zones until
1938 * they are all at high_wmark_pages(zone).
1939 *
1940 * Returns the number of pages which were actually freed.
1941 *
1942 * There is special handling here for zones which are full of pinned pages.
1943 * This can happen if the pages are all mlocked, or if they are all used by
1944 * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1945 * What we do is to detect the case where all pages in the zone have been
1946 * scanned twice and there has been zero successful reclaim.  Mark the zone as
1947 * dead and from now on, only perform a short scan.  Basically we're polling
1948 * the zone for when the problem goes away.
1949 *
1950 * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1951 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
1952 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
1953 * lower zones regardless of the number of free pages in the lower zones. This
1954 * interoperates with the page allocator fallback scheme to ensure that aging
1955 * of pages is balanced across the zones.
1956 */
1957static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1958{
1959	int all_zones_ok;
1960	int priority;
1961	int i;
1962	unsigned long total_scanned;
1963	struct reclaim_state *reclaim_state = current->reclaim_state;
1964	struct scan_control sc = {
1965		.gfp_mask = GFP_KERNEL,
1966		.may_unmap = 1,
1967		.may_swap = 1,
1968		/*
1969		 * kswapd doesn't want to be bailed out while reclaim. because
1970		 * we want to put equal scanning pressure on each zone.
1971		 */
1972		.nr_to_reclaim = ULONG_MAX,
1973		.swappiness = vm_swappiness,
1974		.order = order,
1975		.mem_cgroup = NULL,
1976		.isolate_pages = isolate_pages_global,
1977	};
1978	/*
1979	 * temp_priority is used to remember the scanning priority at which
1980	 * this zone was successfully refilled to
1981	 * free_pages == high_wmark_pages(zone).
1982	 */
1983	int temp_priority[MAX_NR_ZONES];
1984
1985loop_again:
1986	total_scanned = 0;
1987	sc.nr_reclaimed = 0;
1988	sc.may_writepage = !laptop_mode;
1989	count_vm_event(PAGEOUTRUN);
1990
1991	for (i = 0; i < pgdat->nr_zones; i++)
1992		temp_priority[i] = DEF_PRIORITY;
1993
1994	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1995		int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
1996		unsigned long lru_pages = 0;
1997		int has_under_min_watermark_zone = 0;
1998
1999		/* The swap token gets in the way of swapout... */
2000		if (!priority)
2001			disable_swap_token();
2002
2003		all_zones_ok = 1;
2004
2005		/*
2006		 * Scan in the highmem->dma direction for the highest
2007		 * zone which needs scanning
2008		 */
2009		for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2010			struct zone *zone = pgdat->node_zones + i;
2011
2012			if (!populated_zone(zone))
2013				continue;
2014
2015			if (zone_is_all_unreclaimable(zone) &&
2016			    priority != DEF_PRIORITY)
2017				continue;
2018
2019			/*
2020			 * Do some background aging of the anon list, to give
2021			 * pages a chance to be referenced before reclaiming.
2022			 */
2023			if (inactive_anon_is_low(zone, &sc))
2024				shrink_active_list(SWAP_CLUSTER_MAX, zone,
2025							&sc, priority, 0);
2026
2027			if (!zone_watermark_ok(zone, order,
2028					high_wmark_pages(zone), 0, 0)) {
2029				end_zone = i;
2030				break;
2031			}
2032		}
2033		if (i < 0)
2034			goto out;
2035
2036		for (i = 0; i <= end_zone; i++) {
2037			struct zone *zone = pgdat->node_zones + i;
2038
2039			lru_pages += zone_reclaimable_pages(zone);
2040		}
2041
2042		/*
2043		 * Now scan the zone in the dma->highmem direction, stopping
2044		 * at the last zone which needs scanning.
2045		 *
2046		 * We do this because the page allocator works in the opposite
2047		 * direction.  This prevents the page allocator from allocating
2048		 * pages behind kswapd's direction of progress, which would
2049		 * cause too much scanning of the lower zones.
2050		 */
2051		for (i = 0; i <= end_zone; i++) {
2052			struct zone *zone = pgdat->node_zones + i;
2053			int nr_slab;
2054			int nid, zid;
2055
2056			if (!populated_zone(zone))
2057				continue;
2058
2059			if (zone_is_all_unreclaimable(zone) &&
2060					priority != DEF_PRIORITY)
2061				continue;
2062
2063			temp_priority[i] = priority;
2064			sc.nr_scanned = 0;
2065			note_zone_scanning_priority(zone, priority);
2066
2067			nid = pgdat->node_id;
2068			zid = zone_idx(zone);
2069			/*
2070			 * Call soft limit reclaim before calling shrink_zone.
2071			 * For now we ignore the return value
2072			 */
2073			mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
2074							nid, zid);
2075			/*
2076			 * We put equal pressure on every zone, unless one
2077			 * zone has way too many pages free already.
2078			 */
2079			if (!zone_watermark_ok(zone, order,
2080					8*high_wmark_pages(zone), end_zone, 0))
2081				shrink_zone(priority, zone, &sc);
2082			reclaim_state->reclaimed_slab = 0;
2083			nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
2084						lru_pages);
2085			sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2086			total_scanned += sc.nr_scanned;
2087			if (zone_is_all_unreclaimable(zone))
2088				continue;
2089			if (nr_slab == 0 && zone->pages_scanned >=
2090					(zone_reclaimable_pages(zone) * 6))
2091					zone_set_flag(zone,
2092						      ZONE_ALL_UNRECLAIMABLE);
2093			/*
2094			 * If we've done a decent amount of scanning and
2095			 * the reclaim ratio is low, start doing writepage
2096			 * even in laptop mode
2097			 */
2098			if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2099			    total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2100				sc.may_writepage = 1;
2101
2102			if (!zone_watermark_ok(zone, order,
2103					high_wmark_pages(zone), end_zone, 0)) {
2104				all_zones_ok = 0;
2105				/*
2106				 * We are still under min water mark.  This
2107				 * means that we have a GFP_ATOMIC allocation
2108				 * failure risk. Hurry up!
2109				 */
2110				if (!zone_watermark_ok(zone, order,
2111					    min_wmark_pages(zone), end_zone, 0))
2112					has_under_min_watermark_zone = 1;
2113			}
2114
2115		}
2116		if (all_zones_ok)
2117			break;		/* kswapd: all done */
2118		/*
2119		 * OK, kswapd is getting into trouble.  Take a nap, then take
2120		 * another pass across the zones.
2121		 */
2122		if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2123			if (has_under_min_watermark_zone)
2124				count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2125			else
2126				congestion_wait(BLK_RW_ASYNC, HZ/10);
2127		}
2128
2129		/*
2130		 * We do this so kswapd doesn't build up large priorities for
2131		 * example when it is freeing in parallel with allocators. It
2132		 * matches the direct reclaim path behaviour in terms of impact
2133		 * on zone->*_priority.
2134		 */
2135		if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2136			break;
2137	}
2138out:
2139	/*
2140	 * Note within each zone the priority level at which this zone was
2141	 * brought into a happy state.  So that the next thread which scans this
2142	 * zone will start out at that priority level.
2143	 */
2144	for (i = 0; i < pgdat->nr_zones; i++) {
2145		struct zone *zone = pgdat->node_zones + i;
2146
2147		zone->prev_priority = temp_priority[i];
2148	}
2149	if (!all_zones_ok) {
2150		cond_resched();
2151
2152		try_to_freeze();
2153
2154		/*
2155		 * Fragmentation may mean that the system cannot be
2156		 * rebalanced for high-order allocations in all zones.
2157		 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2158		 * it means the zones have been fully scanned and are still
2159		 * not balanced. For high-order allocations, there is
2160		 * little point trying all over again as kswapd may
2161		 * infinite loop.
2162		 *
2163		 * Instead, recheck all watermarks at order-0 as they
2164		 * are the most important. If watermarks are ok, kswapd will go
2165		 * back to sleep. High-order users can still perform direct
2166		 * reclaim if they wish.
2167		 */
2168		if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2169			order = sc.order = 0;
2170
2171		goto loop_again;
2172	}
2173
2174	return sc.nr_reclaimed;
2175}
2176
2177/*
2178 * The background pageout daemon, started as a kernel thread
2179 * from the init process.
2180 *
2181 * This basically trickles out pages so that we have _some_
2182 * free memory available even if there is no other activity
2183 * that frees anything up. This is needed for things like routing
2184 * etc, where we otherwise might have all activity going on in
2185 * asynchronous contexts that cannot page things out.
2186 *
2187 * If there are applications that are active memory-allocators
2188 * (most normal use), this basically shouldn't matter.
2189 */
2190static int kswapd(void *p)
2191{
2192	unsigned long order;
2193	pg_data_t *pgdat = (pg_data_t*)p;
2194	struct task_struct *tsk = current;
2195	DEFINE_WAIT(wait);
2196	struct reclaim_state reclaim_state = {
2197		.reclaimed_slab = 0,
2198	};
2199	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2200
2201	lockdep_set_current_reclaim_state(GFP_KERNEL);
2202
2203	if (!cpumask_empty(cpumask))
2204		set_cpus_allowed_ptr(tsk, cpumask);
2205	current->reclaim_state = &reclaim_state;
2206
2207	/*
2208	 * Tell the memory management that we're a "memory allocator",
2209	 * and that if we need more memory we should get access to it
2210	 * regardless (see "__alloc_pages()"). "kswapd" should
2211	 * never get caught in the normal page freeing logic.
2212	 *
2213	 * (Kswapd normally doesn't need memory anyway, but sometimes
2214	 * you need a small amount of memory in order to be able to
2215	 * page out something else, and this flag essentially protects
2216	 * us from recursively trying to free more memory as we're
2217	 * trying to free the first piece of memory in the first place).
2218	 */
2219	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2220	set_freezable();
2221
2222	order = 0;
2223	for ( ; ; ) {
2224		unsigned long new_order;
2225		int ret;
2226
2227		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2228		new_order = pgdat->kswapd_max_order;
2229		pgdat->kswapd_max_order = 0;
2230		if (order < new_order) {
2231			/*
2232			 * Don't sleep if someone wants a larger 'order'
2233			 * allocation
2234			 */
2235			order = new_order;
2236		} else {
2237			if (!freezing(current) && !kthread_should_stop()) {
2238				long remaining = 0;
2239
2240				/* Try to sleep for a short interval */
2241				if (!sleeping_prematurely(pgdat, order, remaining)) {
2242					remaining = schedule_timeout(HZ/10);
2243					finish_wait(&pgdat->kswapd_wait, &wait);
2244					prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2245				}
2246
2247				/*
2248				 * After a short sleep, check if it was a
2249				 * premature sleep. If not, then go fully
2250				 * to sleep until explicitly woken up
2251				 */
2252				if (!sleeping_prematurely(pgdat, order, remaining))
2253					schedule();
2254				else {
2255					if (remaining)
2256						count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2257					else
2258						count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2259				}
2260			}
2261
2262			order = pgdat->kswapd_max_order;
2263		}
2264		finish_wait(&pgdat->kswapd_wait, &wait);
2265
2266		ret = try_to_freeze();
2267		if (kthread_should_stop())
2268			break;
2269
2270		/*
2271		 * We can speed up thawing tasks if we don't call balance_pgdat
2272		 * after returning from the refrigerator
2273		 */
2274		if (!ret)
2275			balance_pgdat(pgdat, order);
2276	}
2277	return 0;
2278}
2279
2280/*
2281 * A zone is low on free memory, so wake its kswapd task to service it.
2282 */
2283void wakeup_kswapd(struct zone *zone, int order)
2284{
2285	pg_data_t *pgdat;
2286
2287	if (!populated_zone(zone))
2288		return;
2289
2290	pgdat = zone->zone_pgdat;
2291	if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
2292		return;
2293	if (pgdat->kswapd_max_order < order)
2294		pgdat->kswapd_max_order = order;
2295	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2296		return;
2297	if (!waitqueue_active(&pgdat->kswapd_wait))
2298		return;
2299	wake_up_interruptible(&pgdat->kswapd_wait);
2300}
2301
2302/*
2303 * The reclaimable count would be mostly accurate.
2304 * The less reclaimable pages may be
2305 * - mlocked pages, which will be moved to unevictable list when encountered
2306 * - mapped pages, which may require several travels to be reclaimed
2307 * - dirty pages, which is not "instantly" reclaimable
2308 */
2309unsigned long global_reclaimable_pages(void)
2310{
2311	int nr;
2312
2313	nr = global_page_state(NR_ACTIVE_FILE) +
2314	     global_page_state(NR_INACTIVE_FILE);
2315
2316	if (nr_swap_pages > 0)
2317		nr += global_page_state(NR_ACTIVE_ANON) +
2318		      global_page_state(NR_INACTIVE_ANON);
2319
2320	return nr;
2321}
2322
2323unsigned long zone_reclaimable_pages(struct zone *zone)
2324{
2325	int nr;
2326
2327	nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2328	     zone_page_state(zone, NR_INACTIVE_FILE);
2329
2330	if (nr_swap_pages > 0)
2331		nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2332		      zone_page_state(zone, NR_INACTIVE_ANON);
2333
2334	return nr;
2335}
2336
2337#ifdef CONFIG_HIBERNATION
2338/*
2339 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2340 * freed pages.
2341 *
2342 * Rather than trying to age LRUs the aim is to preserve the overall
2343 * LRU order by reclaiming preferentially
2344 * inactive > active > active referenced > active mapped
2345 */
2346unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2347{
2348	struct reclaim_state reclaim_state;
2349	struct scan_control sc = {
2350		.gfp_mask = GFP_HIGHUSER_MOVABLE,
2351		.may_swap = 1,
2352		.may_unmap = 1,
2353		.may_writepage = 1,
2354		.nr_to_reclaim = nr_to_reclaim,
2355		.hibernation_mode = 1,
2356		.swappiness = vm_swappiness,
2357		.order = 0,
2358		.isolate_pages = isolate_pages_global,
2359	};
2360	struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2361	struct task_struct *p = current;
2362	unsigned long nr_reclaimed;
2363
2364	p->flags |= PF_MEMALLOC;
2365	lockdep_set_current_reclaim_state(sc.gfp_mask);
2366	reclaim_state.reclaimed_slab = 0;
2367	p->reclaim_state = &reclaim_state;
2368
2369	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2370
2371	p->reclaim_state = NULL;
2372	lockdep_clear_current_reclaim_state();
2373	p->flags &= ~PF_MEMALLOC;
2374
2375	return nr_reclaimed;
2376}
2377#endif /* CONFIG_HIBERNATION */
2378
2379/* It's optimal to keep kswapds on the same CPUs as their memory, but
2380   not required for correctness.  So if the last cpu in a node goes
2381   away, we get changed to run anywhere: as the first one comes back,
2382   restore their cpu bindings. */
2383static int __devinit cpu_callback(struct notifier_block *nfb,
2384				  unsigned long action, void *hcpu)
2385{
2386	int nid;
2387
2388	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2389		for_each_node_state(nid, N_HIGH_MEMORY) {
2390			pg_data_t *pgdat = NODE_DATA(nid);
2391			const struct cpumask *mask;
2392
2393			mask = cpumask_of_node(pgdat->node_id);
2394
2395			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2396				/* One of our CPUs online: restore mask */
2397				set_cpus_allowed_ptr(pgdat->kswapd, mask);
2398		}
2399	}
2400	return NOTIFY_OK;
2401}
2402
2403/*
2404 * This kswapd start function will be called by init and node-hot-add.
2405 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2406 */
2407int kswapd_run(int nid)
2408{
2409	pg_data_t *pgdat = NODE_DATA(nid);
2410	int ret = 0;
2411
2412	if (pgdat->kswapd)
2413		return 0;
2414
2415	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2416	if (IS_ERR(pgdat->kswapd)) {
2417		/* failure at boot is fatal */
2418		BUG_ON(system_state == SYSTEM_BOOTING);
2419		printk("Failed to start kswapd on node %d\n",nid);
2420		ret = -1;
2421	}
2422	return ret;
2423}
2424
2425/*
2426 * Called by memory hotplug when all memory in a node is offlined.
2427 */
2428void kswapd_stop(int nid)
2429{
2430	struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2431
2432	if (kswapd)
2433		kthread_stop(kswapd);
2434}
2435
2436static int __init kswapd_init(void)
2437{
2438	int nid;
2439
2440	swap_setup();
2441	for_each_node_state(nid, N_HIGH_MEMORY)
2442 		kswapd_run(nid);
2443	hotcpu_notifier(cpu_callback, 0);
2444	return 0;
2445}
2446
2447module_init(kswapd_init)
2448
2449#ifdef CONFIG_NUMA
2450/*
2451 * Zone reclaim mode
2452 *
2453 * If non-zero call zone_reclaim when the number of free pages falls below
2454 * the watermarks.
2455 */
2456int zone_reclaim_mode __read_mostly;
2457
2458#define RECLAIM_OFF 0
2459#define RECLAIM_ZONE (1<<0)	/* Run shrink_inactive_list on the zone */
2460#define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */
2461#define RECLAIM_SWAP (1<<2)	/* Swap pages out during reclaim */
2462
2463/*
2464 * Priority for ZONE_RECLAIM. This determines the fraction of pages
2465 * of a node considered for each zone_reclaim. 4 scans 1/16th of
2466 * a zone.
2467 */
2468#define ZONE_RECLAIM_PRIORITY 4
2469
2470/*
2471 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2472 * occur.
2473 */
2474int sysctl_min_unmapped_ratio = 1;
2475
2476/*
2477 * If the number of slab pages in a zone grows beyond this percentage then
2478 * slab reclaim needs to occur.
2479 */
2480int sysctl_min_slab_ratio = 5;
2481
2482static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2483{
2484	unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2485	unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2486		zone_page_state(zone, NR_ACTIVE_FILE);
2487
2488	/*
2489	 * It's possible for there to be more file mapped pages than
2490	 * accounted for by the pages on the file LRU lists because
2491	 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
2492	 */
2493	return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2494}
2495
2496/* Work out how many page cache pages we can reclaim in this reclaim_mode */
2497static long zone_pagecache_reclaimable(struct zone *zone)
2498{
2499	long nr_pagecache_reclaimable;
2500	long delta = 0;
2501
2502	/*
2503	 * If RECLAIM_SWAP is set, then all file pages are considered
2504	 * potentially reclaimable. Otherwise, we have to worry about
2505	 * pages like swapcache and zone_unmapped_file_pages() provides
2506	 * a better estimate
2507	 */
2508	if (zone_reclaim_mode & RECLAIM_SWAP)
2509		nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2510	else
2511		nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2512
2513	/* If we can't clean pages, remove dirty pages from consideration */
2514	if (!(zone_reclaim_mode & RECLAIM_WRITE))
2515		delta += zone_page_state(zone, NR_FILE_DIRTY);
2516
2517	/* Watch for any possible underflows due to delta */
2518	if (unlikely(delta > nr_pagecache_reclaimable))
2519		delta = nr_pagecache_reclaimable;
2520
2521	return nr_pagecache_reclaimable - delta;
2522}
2523
2524/*
2525 * Try to free up some pages from this zone through reclaim.
2526 */
2527static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2528{
2529	/* Minimum pages needed in order to stay on node */
2530	const unsigned long nr_pages = 1 << order;
2531	struct task_struct *p = current;
2532	struct reclaim_state reclaim_state;
2533	int priority;
2534	struct scan_control sc = {
2535		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2536		.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2537		.may_swap = 1,
2538		.nr_to_reclaim = max_t(unsigned long, nr_pages,
2539				       SWAP_CLUSTER_MAX),
2540		.gfp_mask = gfp_mask,
2541		.swappiness = vm_swappiness,
2542		.order = order,
2543		.isolate_pages = isolate_pages_global,
2544	};
2545	unsigned long slab_reclaimable;
2546
2547	disable_swap_token();
2548	cond_resched();
2549	/*
2550	 * We need to be able to allocate from the reserves for RECLAIM_SWAP
2551	 * and we also need to be able to write out pages for RECLAIM_WRITE
2552	 * and RECLAIM_SWAP.
2553	 */
2554	p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2555	reclaim_state.reclaimed_slab = 0;
2556	p->reclaim_state = &reclaim_state;
2557
2558	if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
2559		/*
2560		 * Free memory by calling shrink zone with increasing
2561		 * priorities until we have enough memory freed.
2562		 */
2563		priority = ZONE_RECLAIM_PRIORITY;
2564		do {
2565			note_zone_scanning_priority(zone, priority);
2566			shrink_zone(priority, zone, &sc);
2567			priority--;
2568		} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
2569	}
2570
2571	slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2572	if (slab_reclaimable > zone->min_slab_pages) {
2573		/*
2574		 * shrink_slab() does not currently allow us to determine how
2575		 * many pages were freed in this zone. So we take the current
2576		 * number of slab pages and shake the slab until it is reduced
2577		 * by the same nr_pages that we used for reclaiming unmapped
2578		 * pages.
2579		 *
2580		 * Note that shrink_slab will free memory on all zones and may
2581		 * take a long time.
2582		 */
2583		while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2584			zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2585				slab_reclaimable - nr_pages)
2586			;
2587
2588		/*
2589		 * Update nr_reclaimed by the number of slab pages we
2590		 * reclaimed from this zone.
2591		 */
2592		sc.nr_reclaimed += slab_reclaimable -
2593			zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2594	}
2595
2596	p->reclaim_state = NULL;
2597	current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2598	return sc.nr_reclaimed >= nr_pages;
2599}
2600
2601int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2602{
2603	int node_id;
2604	int ret;
2605
2606	/*
2607	 * Zone reclaim reclaims unmapped file backed pages and
2608	 * slab pages if we are over the defined limits.
2609	 *
2610	 * A small portion of unmapped file backed pages is needed for
2611	 * file I/O otherwise pages read by file I/O will be immediately
2612	 * thrown out if the zone is overallocated. So we do not reclaim
2613	 * if less than a specified percentage of the zone is used by
2614	 * unmapped file backed pages.
2615	 */
2616	if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
2617	    zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2618		return ZONE_RECLAIM_FULL;
2619
2620	if (zone_is_all_unreclaimable(zone))
2621		return ZONE_RECLAIM_FULL;
2622
2623	/*
2624	 * Do not scan if the allocation should not be delayed.
2625	 */
2626	if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2627		return ZONE_RECLAIM_NOSCAN;
2628
2629	/*
2630	 * Only run zone reclaim on the local zone or on zones that do not
2631	 * have associated processors. This will favor the local processor
2632	 * over remote processors and spread off node memory allocations
2633	 * as wide as possible.
2634	 */
2635	node_id = zone_to_nid(zone);
2636	if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2637		return ZONE_RECLAIM_NOSCAN;
2638
2639	if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2640		return ZONE_RECLAIM_NOSCAN;
2641
2642	ret = __zone_reclaim(zone, gfp_mask, order);
2643	zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2644
2645	if (!ret)
2646		count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
2647
2648	return ret;
2649}
2650#endif
2651
2652/*
2653 * page_evictable - test whether a page is evictable
2654 * @page: the page to test
2655 * @vma: the VMA in which the page is or will be mapped, may be NULL
2656 *
2657 * Test whether page is evictable--i.e., should be placed on active/inactive
2658 * lists vs unevictable list.  The vma argument is !NULL when called from the
2659 * fault path to determine how to instantate a new page.
2660 *
2661 * Reasons page might not be evictable:
2662 * (1) page's mapping marked unevictable
2663 * (2) page is part of an mlocked VMA
2664 *
2665 */
2666int page_evictable(struct page *page, struct vm_area_struct *vma)
2667{
2668
2669	if (mapping_unevictable(page_mapping(page)))
2670		return 0;
2671
2672	if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
2673		return 0;
2674
2675	return 1;
2676}
2677
2678/**
2679 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2680 * @page: page to check evictability and move to appropriate lru list
2681 * @zone: zone page is in
2682 *
2683 * Checks a page for evictability and moves the page to the appropriate
2684 * zone lru list.
2685 *
2686 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
2687 * have PageUnevictable set.
2688 */
2689static void check_move_unevictable_page(struct page *page, struct zone *zone)
2690{
2691	VM_BUG_ON(PageActive(page));
2692
2693retry:
2694	ClearPageUnevictable(page);
2695	if (page_evictable(page, NULL)) {
2696		enum lru_list l = page_lru_base_type(page);
2697
2698		__dec_zone_state(zone, NR_UNEVICTABLE);
2699		list_move(&page->lru, &zone->lru[l].list);
2700		mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
2701		__inc_zone_state(zone, NR_INACTIVE_ANON + l);
2702		__count_vm_event(UNEVICTABLE_PGRESCUED);
2703	} else {
2704		/*
2705		 * rotate unevictable list
2706		 */
2707		SetPageUnevictable(page);
2708		list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
2709		mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
2710		if (page_evictable(page, NULL))
2711			goto retry;
2712	}
2713}
2714
2715/**
2716 * scan_mapping_unevictable_pages - scan an address space for evictable pages
2717 * @mapping: struct address_space to scan for evictable pages
2718 *
2719 * Scan all pages in mapping.  Check unevictable pages for
2720 * evictability and move them to the appropriate zone lru list.
2721 */
2722void scan_mapping_unevictable_pages(struct address_space *mapping)
2723{
2724	pgoff_t next = 0;
2725	pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
2726			 PAGE_CACHE_SHIFT;
2727	struct zone *zone;
2728	struct pagevec pvec;
2729
2730	if (mapping->nrpages == 0)
2731		return;
2732
2733	pagevec_init(&pvec, 0);
2734	while (next < end &&
2735		pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
2736		int i;
2737		int pg_scanned = 0;
2738
2739		zone = NULL;
2740
2741		for (i = 0; i < pagevec_count(&pvec); i++) {
2742			struct page *page = pvec.pages[i];
2743			pgoff_t page_index = page->index;
2744			struct zone *pagezone = page_zone(page);
2745
2746			pg_scanned++;
2747			if (page_index > next)
2748				next = page_index;
2749			next++;
2750
2751			if (pagezone != zone) {
2752				if (zone)
2753					spin_unlock_irq(&zone->lru_lock);
2754				zone = pagezone;
2755				spin_lock_irq(&zone->lru_lock);
2756			}
2757
2758			if (PageLRU(page) && PageUnevictable(page))
2759				check_move_unevictable_page(page, zone);
2760		}
2761		if (zone)
2762			spin_unlock_irq(&zone->lru_lock);
2763		pagevec_release(&pvec);
2764
2765		count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
2766	}
2767
2768}
2769
2770/**
2771 * scan_zone_unevictable_pages - check unevictable list for evictable pages
2772 * @zone - zone of which to scan the unevictable list
2773 *
2774 * Scan @zone's unevictable LRU lists to check for pages that have become
2775 * evictable.  Move those that have to @zone's inactive list where they
2776 * become candidates for reclaim, unless shrink_inactive_zone() decides
2777 * to reactivate them.  Pages that are still unevictable are rotated
2778 * back onto @zone's unevictable list.
2779 */
2780#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
2781static void scan_zone_unevictable_pages(struct zone *zone)
2782{
2783	struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
2784	unsigned long scan;
2785	unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
2786
2787	while (nr_to_scan > 0) {
2788		unsigned long batch_size = min(nr_to_scan,
2789						SCAN_UNEVICTABLE_BATCH_SIZE);
2790
2791		spin_lock_irq(&zone->lru_lock);
2792		for (scan = 0;  scan < batch_size; scan++) {
2793			struct page *page = lru_to_page(l_unevictable);
2794
2795			if (!trylock_page(page))
2796				continue;
2797
2798			prefetchw_prev_lru_page(page, l_unevictable, flags);
2799
2800			if (likely(PageLRU(page) && PageUnevictable(page)))
2801				check_move_unevictable_page(page, zone);
2802
2803			unlock_page(page);
2804		}
2805		spin_unlock_irq(&zone->lru_lock);
2806
2807		nr_to_scan -= batch_size;
2808	}
2809}
2810
2811
2812/**
2813 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
2814 *
2815 * A really big hammer:  scan all zones' unevictable LRU lists to check for
2816 * pages that have become evictable.  Move those back to the zones'
2817 * inactive list where they become candidates for reclaim.
2818 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
2819 * and we add swap to the system.  As such, it runs in the context of a task
2820 * that has possibly/probably made some previously unevictable pages
2821 * evictable.
2822 */
2823static void scan_all_zones_unevictable_pages(void)
2824{
2825	struct zone *zone;
2826
2827	for_each_zone(zone) {
2828		scan_zone_unevictable_pages(zone);
2829	}
2830}
2831
2832/*
2833 * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
2834 * all nodes' unevictable lists for evictable pages
2835 */
2836unsigned long scan_unevictable_pages;
2837
2838int scan_unevictable_handler(struct ctl_table *table, int write,
2839			   void __user *buffer,
2840			   size_t *length, loff_t *ppos)
2841{
2842	proc_doulongvec_minmax(table, write, buffer, length, ppos);
2843
2844	if (write && *(unsigned long *)table->data)
2845		scan_all_zones_unevictable_pages();
2846
2847	scan_unevictable_pages = 0;
2848	return 0;
2849}
2850
2851/*
2852 * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
2853 * a specified node's per zone unevictable lists for evictable pages.
2854 */
2855
2856static ssize_t read_scan_unevictable_node(struct sys_device *dev,
2857					  struct sysdev_attribute *attr,
2858					  char *buf)
2859{
2860	return sprintf(buf, "0\n");	/* always zero; should fit... */
2861}
2862
2863static ssize_t write_scan_unevictable_node(struct sys_device *dev,
2864					   struct sysdev_attribute *attr,
2865					const char *buf, size_t count)
2866{
2867	struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
2868	struct zone *zone;
2869	unsigned long res;
2870	unsigned long req = strict_strtoul(buf, 10, &res);
2871
2872	if (!req)
2873		return 1;	/* zero is no-op */
2874
2875	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2876		if (!populated_zone(zone))
2877			continue;
2878		scan_zone_unevictable_pages(zone);
2879	}
2880	return 1;
2881}
2882
2883
2884static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
2885			read_scan_unevictable_node,
2886			write_scan_unevictable_node);
2887
2888int scan_unevictable_register_node(struct node *node)
2889{
2890	return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
2891}
2892
2893void scan_unevictable_unregister_node(struct node *node)
2894{
2895	sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
2896}
2897
2898