vmscan.c revision 994fc28c7b1e697ac56befe4aecabf23f0689f46
1/*
2 *  linux/mm/vmscan.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 *
6 *  Swap reorganised 29.12.95, Stephen Tweedie.
7 *  kswapd added: 7.1.96  sct
8 *  Removed kswapd_ctl limits, and swap out as many pages as needed
9 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 *  Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/file.h>
23#include <linux/writeback.h>
24#include <linux/blkdev.h>
25#include <linux/buffer_head.h>	/* for try_to_release_page(),
26					buffer_heads_over_limit */
27#include <linux/mm_inline.h>
28#include <linux/pagevec.h>
29#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
34#include <linux/notifier.h>
35#include <linux/rwsem.h>
36
37#include <asm/tlbflush.h>
38#include <asm/div64.h>
39
40#include <linux/swapops.h>
41
42/* possible outcome of pageout() */
43typedef enum {
44	/* failed to write page out, page is locked */
45	PAGE_KEEP,
46	/* move page to the active list, page is locked */
47	PAGE_ACTIVATE,
48	/* page has been sent to the disk successfully, page is unlocked */
49	PAGE_SUCCESS,
50	/* page is clean and locked */
51	PAGE_CLEAN,
52} pageout_t;
53
54struct scan_control {
55	/* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
56	unsigned long nr_to_scan;
57
58	/* Incremented by the number of inactive pages that were scanned */
59	unsigned long nr_scanned;
60
61	/* Incremented by the number of pages reclaimed */
62	unsigned long nr_reclaimed;
63
64	unsigned long nr_mapped;	/* From page_state */
65
66	/* How many pages shrink_cache() should reclaim */
67	int nr_to_reclaim;
68
69	/* Ask shrink_caches, or shrink_zone to scan at this priority */
70	unsigned int priority;
71
72	/* This context's GFP mask */
73	gfp_t gfp_mask;
74
75	int may_writepage;
76
77	/* Can pages be swapped as part of reclaim? */
78	int may_swap;
79
80	/* This context's SWAP_CLUSTER_MAX. If freeing memory for
81	 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
82	 * In this context, it doesn't matter that we scan the
83	 * whole list at once. */
84	int swap_cluster_max;
85};
86
87/*
88 * The list of shrinker callbacks used by to apply pressure to
89 * ageable caches.
90 */
91struct shrinker {
92	shrinker_t		shrinker;
93	struct list_head	list;
94	int			seeks;	/* seeks to recreate an obj */
95	long			nr;	/* objs pending delete */
96};
97
98#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
99
100#ifdef ARCH_HAS_PREFETCH
101#define prefetch_prev_lru_page(_page, _base, _field)			\
102	do {								\
103		if ((_page)->lru.prev != _base) {			\
104			struct page *prev;				\
105									\
106			prev = lru_to_page(&(_page->lru));		\
107			prefetch(&prev->_field);			\
108		}							\
109	} while (0)
110#else
111#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
112#endif
113
114#ifdef ARCH_HAS_PREFETCHW
115#define prefetchw_prev_lru_page(_page, _base, _field)			\
116	do {								\
117		if ((_page)->lru.prev != _base) {			\
118			struct page *prev;				\
119									\
120			prev = lru_to_page(&(_page->lru));		\
121			prefetchw(&prev->_field);			\
122		}							\
123	} while (0)
124#else
125#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
126#endif
127
128/*
129 * From 0 .. 100.  Higher means more swappy.
130 */
131int vm_swappiness = 60;
132static long total_memory;
133
134static LIST_HEAD(shrinker_list);
135static DECLARE_RWSEM(shrinker_rwsem);
136
137/*
138 * Add a shrinker callback to be called from the vm
139 */
140struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
141{
142        struct shrinker *shrinker;
143
144        shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
145        if (shrinker) {
146	        shrinker->shrinker = theshrinker;
147	        shrinker->seeks = seeks;
148	        shrinker->nr = 0;
149	        down_write(&shrinker_rwsem);
150	        list_add_tail(&shrinker->list, &shrinker_list);
151	        up_write(&shrinker_rwsem);
152	}
153	return shrinker;
154}
155EXPORT_SYMBOL(set_shrinker);
156
157/*
158 * Remove one
159 */
160void remove_shrinker(struct shrinker *shrinker)
161{
162	down_write(&shrinker_rwsem);
163	list_del(&shrinker->list);
164	up_write(&shrinker_rwsem);
165	kfree(shrinker);
166}
167EXPORT_SYMBOL(remove_shrinker);
168
169#define SHRINK_BATCH 128
170/*
171 * Call the shrink functions to age shrinkable caches
172 *
173 * Here we assume it costs one seek to replace a lru page and that it also
174 * takes a seek to recreate a cache object.  With this in mind we age equal
175 * percentages of the lru and ageable caches.  This should balance the seeks
176 * generated by these structures.
177 *
178 * If the vm encounted mapped pages on the LRU it increase the pressure on
179 * slab to avoid swapping.
180 *
181 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
182 *
183 * `lru_pages' represents the number of on-LRU pages in all the zones which
184 * are eligible for the caller's allocation attempt.  It is used for balancing
185 * slab reclaim versus page reclaim.
186 *
187 * Returns the number of slab objects which we shrunk.
188 */
189static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
190			unsigned long lru_pages)
191{
192	struct shrinker *shrinker;
193	int ret = 0;
194
195	if (scanned == 0)
196		scanned = SWAP_CLUSTER_MAX;
197
198	if (!down_read_trylock(&shrinker_rwsem))
199		return 1;	/* Assume we'll be able to shrink next time */
200
201	list_for_each_entry(shrinker, &shrinker_list, list) {
202		unsigned long long delta;
203		unsigned long total_scan;
204		unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
205
206		delta = (4 * scanned) / shrinker->seeks;
207		delta *= max_pass;
208		do_div(delta, lru_pages + 1);
209		shrinker->nr += delta;
210		if (shrinker->nr < 0) {
211			printk(KERN_ERR "%s: nr=%ld\n",
212					__FUNCTION__, shrinker->nr);
213			shrinker->nr = max_pass;
214		}
215
216		/*
217		 * Avoid risking looping forever due to too large nr value:
218		 * never try to free more than twice the estimate number of
219		 * freeable entries.
220		 */
221		if (shrinker->nr > max_pass * 2)
222			shrinker->nr = max_pass * 2;
223
224		total_scan = shrinker->nr;
225		shrinker->nr = 0;
226
227		while (total_scan >= SHRINK_BATCH) {
228			long this_scan = SHRINK_BATCH;
229			int shrink_ret;
230			int nr_before;
231
232			nr_before = (*shrinker->shrinker)(0, gfp_mask);
233			shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
234			if (shrink_ret == -1)
235				break;
236			if (shrink_ret < nr_before)
237				ret += nr_before - shrink_ret;
238			mod_page_state(slabs_scanned, this_scan);
239			total_scan -= this_scan;
240
241			cond_resched();
242		}
243
244		shrinker->nr += total_scan;
245	}
246	up_read(&shrinker_rwsem);
247	return ret;
248}
249
250/* Called without lock on whether page is mapped, so answer is unstable */
251static inline int page_mapping_inuse(struct page *page)
252{
253	struct address_space *mapping;
254
255	/* Page is in somebody's page tables. */
256	if (page_mapped(page))
257		return 1;
258
259	/* Be more reluctant to reclaim swapcache than pagecache */
260	if (PageSwapCache(page))
261		return 1;
262
263	mapping = page_mapping(page);
264	if (!mapping)
265		return 0;
266
267	/* File is mmap'd by somebody? */
268	return mapping_mapped(mapping);
269}
270
271static inline int is_page_cache_freeable(struct page *page)
272{
273	return page_count(page) - !!PagePrivate(page) == 2;
274}
275
276static int may_write_to_queue(struct backing_dev_info *bdi)
277{
278	if (current_is_kswapd())
279		return 1;
280	if (current_is_pdflush())	/* This is unlikely, but why not... */
281		return 1;
282	if (!bdi_write_congested(bdi))
283		return 1;
284	if (bdi == current->backing_dev_info)
285		return 1;
286	return 0;
287}
288
289/*
290 * We detected a synchronous write error writing a page out.  Probably
291 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
292 * fsync(), msync() or close().
293 *
294 * The tricky part is that after writepage we cannot touch the mapping: nothing
295 * prevents it from being freed up.  But we have a ref on the page and once
296 * that page is locked, the mapping is pinned.
297 *
298 * We're allowed to run sleeping lock_page() here because we know the caller has
299 * __GFP_FS.
300 */
301static void handle_write_error(struct address_space *mapping,
302				struct page *page, int error)
303{
304	lock_page(page);
305	if (page_mapping(page) == mapping) {
306		if (error == -ENOSPC)
307			set_bit(AS_ENOSPC, &mapping->flags);
308		else
309			set_bit(AS_EIO, &mapping->flags);
310	}
311	unlock_page(page);
312}
313
314/*
315 * pageout is called by shrink_list() for each dirty page. Calls ->writepage().
316 */
317static pageout_t pageout(struct page *page, struct address_space *mapping)
318{
319	/*
320	 * If the page is dirty, only perform writeback if that write
321	 * will be non-blocking.  To prevent this allocation from being
322	 * stalled by pagecache activity.  But note that there may be
323	 * stalls if we need to run get_block().  We could test
324	 * PagePrivate for that.
325	 *
326	 * If this process is currently in generic_file_write() against
327	 * this page's queue, we can perform writeback even if that
328	 * will block.
329	 *
330	 * If the page is swapcache, write it back even if that would
331	 * block, for some throttling. This happens by accident, because
332	 * swap_backing_dev_info is bust: it doesn't reflect the
333	 * congestion state of the swapdevs.  Easy to fix, if needed.
334	 * See swapfile.c:page_queue_congested().
335	 */
336	if (!is_page_cache_freeable(page))
337		return PAGE_KEEP;
338	if (!mapping) {
339		/*
340		 * Some data journaling orphaned pages can have
341		 * page->mapping == NULL while being dirty with clean buffers.
342		 */
343		if (PagePrivate(page)) {
344			if (try_to_free_buffers(page)) {
345				ClearPageDirty(page);
346				printk("%s: orphaned page\n", __FUNCTION__);
347				return PAGE_CLEAN;
348			}
349		}
350		return PAGE_KEEP;
351	}
352	if (mapping->a_ops->writepage == NULL)
353		return PAGE_ACTIVATE;
354	if (!may_write_to_queue(mapping->backing_dev_info))
355		return PAGE_KEEP;
356
357	if (clear_page_dirty_for_io(page)) {
358		int res;
359		struct writeback_control wbc = {
360			.sync_mode = WB_SYNC_NONE,
361			.nr_to_write = SWAP_CLUSTER_MAX,
362			.nonblocking = 1,
363			.for_reclaim = 1,
364		};
365
366		SetPageReclaim(page);
367		res = mapping->a_ops->writepage(page, &wbc);
368		if (res < 0)
369			handle_write_error(mapping, page, res);
370		if (res == AOP_WRITEPAGE_ACTIVATE) {
371			ClearPageReclaim(page);
372			return PAGE_ACTIVATE;
373		}
374		if (!PageWriteback(page)) {
375			/* synchronous write or broken a_ops? */
376			ClearPageReclaim(page);
377		}
378
379		return PAGE_SUCCESS;
380	}
381
382	return PAGE_CLEAN;
383}
384
385/*
386 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
387 */
388static int shrink_list(struct list_head *page_list, struct scan_control *sc)
389{
390	LIST_HEAD(ret_pages);
391	struct pagevec freed_pvec;
392	int pgactivate = 0;
393	int reclaimed = 0;
394
395	cond_resched();
396
397	pagevec_init(&freed_pvec, 1);
398	while (!list_empty(page_list)) {
399		struct address_space *mapping;
400		struct page *page;
401		int may_enter_fs;
402		int referenced;
403
404		cond_resched();
405
406		page = lru_to_page(page_list);
407		list_del(&page->lru);
408
409		if (TestSetPageLocked(page))
410			goto keep;
411
412		BUG_ON(PageActive(page));
413
414		sc->nr_scanned++;
415		/* Double the slab pressure for mapped and swapcache pages */
416		if (page_mapped(page) || PageSwapCache(page))
417			sc->nr_scanned++;
418
419		if (PageWriteback(page))
420			goto keep_locked;
421
422		referenced = page_referenced(page, 1);
423		/* In active use or really unfreeable?  Activate it. */
424		if (referenced && page_mapping_inuse(page))
425			goto activate_locked;
426
427#ifdef CONFIG_SWAP
428		/*
429		 * Anonymous process memory has backing store?
430		 * Try to allocate it some swap space here.
431		 */
432		if (PageAnon(page) && !PageSwapCache(page)) {
433			if (!sc->may_swap)
434				goto keep_locked;
435			if (!add_to_swap(page))
436				goto activate_locked;
437		}
438#endif /* CONFIG_SWAP */
439
440		mapping = page_mapping(page);
441		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
442			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
443
444		/*
445		 * The page is mapped into the page tables of one or more
446		 * processes. Try to unmap it here.
447		 */
448		if (page_mapped(page) && mapping) {
449			switch (try_to_unmap(page)) {
450			case SWAP_FAIL:
451				goto activate_locked;
452			case SWAP_AGAIN:
453				goto keep_locked;
454			case SWAP_SUCCESS:
455				; /* try to free the page below */
456			}
457		}
458
459		if (PageDirty(page)) {
460			if (referenced)
461				goto keep_locked;
462			if (!may_enter_fs)
463				goto keep_locked;
464			if (laptop_mode && !sc->may_writepage)
465				goto keep_locked;
466
467			/* Page is dirty, try to write it out here */
468			switch(pageout(page, mapping)) {
469			case PAGE_KEEP:
470				goto keep_locked;
471			case PAGE_ACTIVATE:
472				goto activate_locked;
473			case PAGE_SUCCESS:
474				if (PageWriteback(page) || PageDirty(page))
475					goto keep;
476				/*
477				 * A synchronous write - probably a ramdisk.  Go
478				 * ahead and try to reclaim the page.
479				 */
480				if (TestSetPageLocked(page))
481					goto keep;
482				if (PageDirty(page) || PageWriteback(page))
483					goto keep_locked;
484				mapping = page_mapping(page);
485			case PAGE_CLEAN:
486				; /* try to free the page below */
487			}
488		}
489
490		/*
491		 * If the page has buffers, try to free the buffer mappings
492		 * associated with this page. If we succeed we try to free
493		 * the page as well.
494		 *
495		 * We do this even if the page is PageDirty().
496		 * try_to_release_page() does not perform I/O, but it is
497		 * possible for a page to have PageDirty set, but it is actually
498		 * clean (all its buffers are clean).  This happens if the
499		 * buffers were written out directly, with submit_bh(). ext3
500		 * will do this, as well as the blockdev mapping.
501		 * try_to_release_page() will discover that cleanness and will
502		 * drop the buffers and mark the page clean - it can be freed.
503		 *
504		 * Rarely, pages can have buffers and no ->mapping.  These are
505		 * the pages which were not successfully invalidated in
506		 * truncate_complete_page().  We try to drop those buffers here
507		 * and if that worked, and the page is no longer mapped into
508		 * process address space (page_count == 1) it can be freed.
509		 * Otherwise, leave the page on the LRU so it is swappable.
510		 */
511		if (PagePrivate(page)) {
512			if (!try_to_release_page(page, sc->gfp_mask))
513				goto activate_locked;
514			if (!mapping && page_count(page) == 1)
515				goto free_it;
516		}
517
518		if (!mapping)
519			goto keep_locked;	/* truncate got there first */
520
521		write_lock_irq(&mapping->tree_lock);
522
523		/*
524		 * The non-racy check for busy page.  It is critical to check
525		 * PageDirty _after_ making sure that the page is freeable and
526		 * not in use by anybody. 	(pagecache + us == 2)
527		 */
528		if (unlikely(page_count(page) != 2))
529			goto cannot_free;
530		smp_rmb();
531		if (unlikely(PageDirty(page)))
532			goto cannot_free;
533
534#ifdef CONFIG_SWAP
535		if (PageSwapCache(page)) {
536			swp_entry_t swap = { .val = page_private(page) };
537			__delete_from_swap_cache(page);
538			write_unlock_irq(&mapping->tree_lock);
539			swap_free(swap);
540			__put_page(page);	/* The pagecache ref */
541			goto free_it;
542		}
543#endif /* CONFIG_SWAP */
544
545		__remove_from_page_cache(page);
546		write_unlock_irq(&mapping->tree_lock);
547		__put_page(page);
548
549free_it:
550		unlock_page(page);
551		reclaimed++;
552		if (!pagevec_add(&freed_pvec, page))
553			__pagevec_release_nonlru(&freed_pvec);
554		continue;
555
556cannot_free:
557		write_unlock_irq(&mapping->tree_lock);
558		goto keep_locked;
559
560activate_locked:
561		SetPageActive(page);
562		pgactivate++;
563keep_locked:
564		unlock_page(page);
565keep:
566		list_add(&page->lru, &ret_pages);
567		BUG_ON(PageLRU(page));
568	}
569	list_splice(&ret_pages, page_list);
570	if (pagevec_count(&freed_pvec))
571		__pagevec_release_nonlru(&freed_pvec);
572	mod_page_state(pgactivate, pgactivate);
573	sc->nr_reclaimed += reclaimed;
574	return reclaimed;
575}
576
577/*
578 * zone->lru_lock is heavily contended.  Some of the functions that
579 * shrink the lists perform better by taking out a batch of pages
580 * and working on them outside the LRU lock.
581 *
582 * For pagecache intensive workloads, this function is the hottest
583 * spot in the kernel (apart from copy_*_user functions).
584 *
585 * Appropriate locks must be held before calling this function.
586 *
587 * @nr_to_scan:	The number of pages to look through on the list.
588 * @src:	The LRU list to pull pages off.
589 * @dst:	The temp list to put pages on to.
590 * @scanned:	The number of pages that were scanned.
591 *
592 * returns how many pages were moved onto *@dst.
593 */
594static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
595			     struct list_head *dst, int *scanned)
596{
597	int nr_taken = 0;
598	struct page *page;
599	int scan = 0;
600
601	while (scan++ < nr_to_scan && !list_empty(src)) {
602		page = lru_to_page(src);
603		prefetchw_prev_lru_page(page, src, flags);
604
605		if (!TestClearPageLRU(page))
606			BUG();
607		list_del(&page->lru);
608		if (get_page_testone(page)) {
609			/*
610			 * It is being freed elsewhere
611			 */
612			__put_page(page);
613			SetPageLRU(page);
614			list_add(&page->lru, src);
615			continue;
616		} else {
617			list_add(&page->lru, dst);
618			nr_taken++;
619		}
620	}
621
622	*scanned = scan;
623	return nr_taken;
624}
625
626/*
627 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
628 */
629static void shrink_cache(struct zone *zone, struct scan_control *sc)
630{
631	LIST_HEAD(page_list);
632	struct pagevec pvec;
633	int max_scan = sc->nr_to_scan;
634
635	pagevec_init(&pvec, 1);
636
637	lru_add_drain();
638	spin_lock_irq(&zone->lru_lock);
639	while (max_scan > 0) {
640		struct page *page;
641		int nr_taken;
642		int nr_scan;
643		int nr_freed;
644
645		nr_taken = isolate_lru_pages(sc->swap_cluster_max,
646					     &zone->inactive_list,
647					     &page_list, &nr_scan);
648		zone->nr_inactive -= nr_taken;
649		zone->pages_scanned += nr_scan;
650		spin_unlock_irq(&zone->lru_lock);
651
652		if (nr_taken == 0)
653			goto done;
654
655		max_scan -= nr_scan;
656		if (current_is_kswapd())
657			mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
658		else
659			mod_page_state_zone(zone, pgscan_direct, nr_scan);
660		nr_freed = shrink_list(&page_list, sc);
661		if (current_is_kswapd())
662			mod_page_state(kswapd_steal, nr_freed);
663		mod_page_state_zone(zone, pgsteal, nr_freed);
664		sc->nr_to_reclaim -= nr_freed;
665
666		spin_lock_irq(&zone->lru_lock);
667		/*
668		 * Put back any unfreeable pages.
669		 */
670		while (!list_empty(&page_list)) {
671			page = lru_to_page(&page_list);
672			if (TestSetPageLRU(page))
673				BUG();
674			list_del(&page->lru);
675			if (PageActive(page))
676				add_page_to_active_list(zone, page);
677			else
678				add_page_to_inactive_list(zone, page);
679			if (!pagevec_add(&pvec, page)) {
680				spin_unlock_irq(&zone->lru_lock);
681				__pagevec_release(&pvec);
682				spin_lock_irq(&zone->lru_lock);
683			}
684		}
685  	}
686	spin_unlock_irq(&zone->lru_lock);
687done:
688	pagevec_release(&pvec);
689}
690
691/*
692 * This moves pages from the active list to the inactive list.
693 *
694 * We move them the other way if the page is referenced by one or more
695 * processes, from rmap.
696 *
697 * If the pages are mostly unmapped, the processing is fast and it is
698 * appropriate to hold zone->lru_lock across the whole operation.  But if
699 * the pages are mapped, the processing is slow (page_referenced()) so we
700 * should drop zone->lru_lock around each page.  It's impossible to balance
701 * this, so instead we remove the pages from the LRU while processing them.
702 * It is safe to rely on PG_active against the non-LRU pages in here because
703 * nobody will play with that bit on a non-LRU page.
704 *
705 * The downside is that we have to touch page->_count against each page.
706 * But we had to alter page->flags anyway.
707 */
708static void
709refill_inactive_zone(struct zone *zone, struct scan_control *sc)
710{
711	int pgmoved;
712	int pgdeactivate = 0;
713	int pgscanned;
714	int nr_pages = sc->nr_to_scan;
715	LIST_HEAD(l_hold);	/* The pages which were snipped off */
716	LIST_HEAD(l_inactive);	/* Pages to go onto the inactive_list */
717	LIST_HEAD(l_active);	/* Pages to go onto the active_list */
718	struct page *page;
719	struct pagevec pvec;
720	int reclaim_mapped = 0;
721	long mapped_ratio;
722	long distress;
723	long swap_tendency;
724
725	lru_add_drain();
726	spin_lock_irq(&zone->lru_lock);
727	pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
728				    &l_hold, &pgscanned);
729	zone->pages_scanned += pgscanned;
730	zone->nr_active -= pgmoved;
731	spin_unlock_irq(&zone->lru_lock);
732
733	/*
734	 * `distress' is a measure of how much trouble we're having reclaiming
735	 * pages.  0 -> no problems.  100 -> great trouble.
736	 */
737	distress = 100 >> zone->prev_priority;
738
739	/*
740	 * The point of this algorithm is to decide when to start reclaiming
741	 * mapped memory instead of just pagecache.  Work out how much memory
742	 * is mapped.
743	 */
744	mapped_ratio = (sc->nr_mapped * 100) / total_memory;
745
746	/*
747	 * Now decide how much we really want to unmap some pages.  The mapped
748	 * ratio is downgraded - just because there's a lot of mapped memory
749	 * doesn't necessarily mean that page reclaim isn't succeeding.
750	 *
751	 * The distress ratio is important - we don't want to start going oom.
752	 *
753	 * A 100% value of vm_swappiness overrides this algorithm altogether.
754	 */
755	swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
756
757	/*
758	 * Now use this metric to decide whether to start moving mapped memory
759	 * onto the inactive list.
760	 */
761	if (swap_tendency >= 100)
762		reclaim_mapped = 1;
763
764	while (!list_empty(&l_hold)) {
765		cond_resched();
766		page = lru_to_page(&l_hold);
767		list_del(&page->lru);
768		if (page_mapped(page)) {
769			if (!reclaim_mapped ||
770			    (total_swap_pages == 0 && PageAnon(page)) ||
771			    page_referenced(page, 0)) {
772				list_add(&page->lru, &l_active);
773				continue;
774			}
775		}
776		list_add(&page->lru, &l_inactive);
777	}
778
779	pagevec_init(&pvec, 1);
780	pgmoved = 0;
781	spin_lock_irq(&zone->lru_lock);
782	while (!list_empty(&l_inactive)) {
783		page = lru_to_page(&l_inactive);
784		prefetchw_prev_lru_page(page, &l_inactive, flags);
785		if (TestSetPageLRU(page))
786			BUG();
787		if (!TestClearPageActive(page))
788			BUG();
789		list_move(&page->lru, &zone->inactive_list);
790		pgmoved++;
791		if (!pagevec_add(&pvec, page)) {
792			zone->nr_inactive += pgmoved;
793			spin_unlock_irq(&zone->lru_lock);
794			pgdeactivate += pgmoved;
795			pgmoved = 0;
796			if (buffer_heads_over_limit)
797				pagevec_strip(&pvec);
798			__pagevec_release(&pvec);
799			spin_lock_irq(&zone->lru_lock);
800		}
801	}
802	zone->nr_inactive += pgmoved;
803	pgdeactivate += pgmoved;
804	if (buffer_heads_over_limit) {
805		spin_unlock_irq(&zone->lru_lock);
806		pagevec_strip(&pvec);
807		spin_lock_irq(&zone->lru_lock);
808	}
809
810	pgmoved = 0;
811	while (!list_empty(&l_active)) {
812		page = lru_to_page(&l_active);
813		prefetchw_prev_lru_page(page, &l_active, flags);
814		if (TestSetPageLRU(page))
815			BUG();
816		BUG_ON(!PageActive(page));
817		list_move(&page->lru, &zone->active_list);
818		pgmoved++;
819		if (!pagevec_add(&pvec, page)) {
820			zone->nr_active += pgmoved;
821			pgmoved = 0;
822			spin_unlock_irq(&zone->lru_lock);
823			__pagevec_release(&pvec);
824			spin_lock_irq(&zone->lru_lock);
825		}
826	}
827	zone->nr_active += pgmoved;
828	spin_unlock_irq(&zone->lru_lock);
829	pagevec_release(&pvec);
830
831	mod_page_state_zone(zone, pgrefill, pgscanned);
832	mod_page_state(pgdeactivate, pgdeactivate);
833}
834
835/*
836 * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
837 */
838static void
839shrink_zone(struct zone *zone, struct scan_control *sc)
840{
841	unsigned long nr_active;
842	unsigned long nr_inactive;
843
844	atomic_inc(&zone->reclaim_in_progress);
845
846	/*
847	 * Add one to `nr_to_scan' just to make sure that the kernel will
848	 * slowly sift through the active list.
849	 */
850	zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1;
851	nr_active = zone->nr_scan_active;
852	if (nr_active >= sc->swap_cluster_max)
853		zone->nr_scan_active = 0;
854	else
855		nr_active = 0;
856
857	zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1;
858	nr_inactive = zone->nr_scan_inactive;
859	if (nr_inactive >= sc->swap_cluster_max)
860		zone->nr_scan_inactive = 0;
861	else
862		nr_inactive = 0;
863
864	sc->nr_to_reclaim = sc->swap_cluster_max;
865
866	while (nr_active || nr_inactive) {
867		if (nr_active) {
868			sc->nr_to_scan = min(nr_active,
869					(unsigned long)sc->swap_cluster_max);
870			nr_active -= sc->nr_to_scan;
871			refill_inactive_zone(zone, sc);
872		}
873
874		if (nr_inactive) {
875			sc->nr_to_scan = min(nr_inactive,
876					(unsigned long)sc->swap_cluster_max);
877			nr_inactive -= sc->nr_to_scan;
878			shrink_cache(zone, sc);
879			if (sc->nr_to_reclaim <= 0)
880				break;
881		}
882	}
883
884	throttle_vm_writeout();
885
886	atomic_dec(&zone->reclaim_in_progress);
887}
888
889/*
890 * This is the direct reclaim path, for page-allocating processes.  We only
891 * try to reclaim pages from zones which will satisfy the caller's allocation
892 * request.
893 *
894 * We reclaim from a zone even if that zone is over pages_high.  Because:
895 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
896 *    allocation or
897 * b) The zones may be over pages_high but they must go *over* pages_high to
898 *    satisfy the `incremental min' zone defense algorithm.
899 *
900 * Returns the number of reclaimed pages.
901 *
902 * If a zone is deemed to be full of pinned pages then just give it a light
903 * scan then give up on it.
904 */
905static void
906shrink_caches(struct zone **zones, struct scan_control *sc)
907{
908	int i;
909
910	for (i = 0; zones[i] != NULL; i++) {
911		struct zone *zone = zones[i];
912
913		if (zone->present_pages == 0)
914			continue;
915
916		if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
917			continue;
918
919		zone->temp_priority = sc->priority;
920		if (zone->prev_priority > sc->priority)
921			zone->prev_priority = sc->priority;
922
923		if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
924			continue;	/* Let kswapd poll it */
925
926		shrink_zone(zone, sc);
927	}
928}
929
930/*
931 * This is the main entry point to direct page reclaim.
932 *
933 * If a full scan of the inactive list fails to free enough memory then we
934 * are "out of memory" and something needs to be killed.
935 *
936 * If the caller is !__GFP_FS then the probability of a failure is reasonably
937 * high - the zone may be full of dirty or under-writeback pages, which this
938 * caller can't do much about.  We kick pdflush and take explicit naps in the
939 * hope that some of these pages can be written.  But if the allocating task
940 * holds filesystem locks which prevent writeout this might not work, and the
941 * allocation attempt will fail.
942 */
943int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
944{
945	int priority;
946	int ret = 0;
947	int total_scanned = 0, total_reclaimed = 0;
948	struct reclaim_state *reclaim_state = current->reclaim_state;
949	struct scan_control sc;
950	unsigned long lru_pages = 0;
951	int i;
952
953	sc.gfp_mask = gfp_mask;
954	sc.may_writepage = 0;
955	sc.may_swap = 1;
956
957	inc_page_state(allocstall);
958
959	for (i = 0; zones[i] != NULL; i++) {
960		struct zone *zone = zones[i];
961
962		if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
963			continue;
964
965		zone->temp_priority = DEF_PRIORITY;
966		lru_pages += zone->nr_active + zone->nr_inactive;
967	}
968
969	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
970		sc.nr_mapped = read_page_state(nr_mapped);
971		sc.nr_scanned = 0;
972		sc.nr_reclaimed = 0;
973		sc.priority = priority;
974		sc.swap_cluster_max = SWAP_CLUSTER_MAX;
975		if (!priority)
976			disable_swap_token();
977		shrink_caches(zones, &sc);
978		shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
979		if (reclaim_state) {
980			sc.nr_reclaimed += reclaim_state->reclaimed_slab;
981			reclaim_state->reclaimed_slab = 0;
982		}
983		total_scanned += sc.nr_scanned;
984		total_reclaimed += sc.nr_reclaimed;
985		if (total_reclaimed >= sc.swap_cluster_max) {
986			ret = 1;
987			goto out;
988		}
989
990		/*
991		 * Try to write back as many pages as we just scanned.  This
992		 * tends to cause slow streaming writers to write data to the
993		 * disk smoothly, at the dirtying rate, which is nice.   But
994		 * that's undesirable in laptop mode, where we *want* lumpy
995		 * writeout.  So in laptop mode, write out the whole world.
996		 */
997		if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) {
998			wakeup_pdflush(laptop_mode ? 0 : total_scanned);
999			sc.may_writepage = 1;
1000		}
1001
1002		/* Take a nap, wait for some writeback to complete */
1003		if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
1004			blk_congestion_wait(WRITE, HZ/10);
1005	}
1006out:
1007	for (i = 0; zones[i] != 0; i++) {
1008		struct zone *zone = zones[i];
1009
1010		if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1011			continue;
1012
1013		zone->prev_priority = zone->temp_priority;
1014	}
1015	return ret;
1016}
1017
1018/*
1019 * For kswapd, balance_pgdat() will work across all this node's zones until
1020 * they are all at pages_high.
1021 *
1022 * If `nr_pages' is non-zero then it is the number of pages which are to be
1023 * reclaimed, regardless of the zone occupancies.  This is a software suspend
1024 * special.
1025 *
1026 * Returns the number of pages which were actually freed.
1027 *
1028 * There is special handling here for zones which are full of pinned pages.
1029 * This can happen if the pages are all mlocked, or if they are all used by
1030 * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1031 * What we do is to detect the case where all pages in the zone have been
1032 * scanned twice and there has been zero successful reclaim.  Mark the zone as
1033 * dead and from now on, only perform a short scan.  Basically we're polling
1034 * the zone for when the problem goes away.
1035 *
1036 * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1037 * zones which have free_pages > pages_high, but once a zone is found to have
1038 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1039 * of the number of free pages in the lower zones.  This interoperates with
1040 * the page allocator fallback scheme to ensure that aging of pages is balanced
1041 * across the zones.
1042 */
1043static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
1044{
1045	int to_free = nr_pages;
1046	int all_zones_ok;
1047	int priority;
1048	int i;
1049	int total_scanned, total_reclaimed;
1050	struct reclaim_state *reclaim_state = current->reclaim_state;
1051	struct scan_control sc;
1052
1053loop_again:
1054	total_scanned = 0;
1055	total_reclaimed = 0;
1056	sc.gfp_mask = GFP_KERNEL;
1057	sc.may_writepage = 0;
1058	sc.may_swap = 1;
1059	sc.nr_mapped = read_page_state(nr_mapped);
1060
1061	inc_page_state(pageoutrun);
1062
1063	for (i = 0; i < pgdat->nr_zones; i++) {
1064		struct zone *zone = pgdat->node_zones + i;
1065
1066		zone->temp_priority = DEF_PRIORITY;
1067	}
1068
1069	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1070		int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
1071		unsigned long lru_pages = 0;
1072
1073		/* The swap token gets in the way of swapout... */
1074		if (!priority)
1075			disable_swap_token();
1076
1077		all_zones_ok = 1;
1078
1079		if (nr_pages == 0) {
1080			/*
1081			 * Scan in the highmem->dma direction for the highest
1082			 * zone which needs scanning
1083			 */
1084			for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1085				struct zone *zone = pgdat->node_zones + i;
1086
1087				if (zone->present_pages == 0)
1088					continue;
1089
1090				if (zone->all_unreclaimable &&
1091						priority != DEF_PRIORITY)
1092					continue;
1093
1094				if (!zone_watermark_ok(zone, order,
1095						zone->pages_high, 0, 0)) {
1096					end_zone = i;
1097					goto scan;
1098				}
1099			}
1100			goto out;
1101		} else {
1102			end_zone = pgdat->nr_zones - 1;
1103		}
1104scan:
1105		for (i = 0; i <= end_zone; i++) {
1106			struct zone *zone = pgdat->node_zones + i;
1107
1108			lru_pages += zone->nr_active + zone->nr_inactive;
1109		}
1110
1111		/*
1112		 * Now scan the zone in the dma->highmem direction, stopping
1113		 * at the last zone which needs scanning.
1114		 *
1115		 * We do this because the page allocator works in the opposite
1116		 * direction.  This prevents the page allocator from allocating
1117		 * pages behind kswapd's direction of progress, which would
1118		 * cause too much scanning of the lower zones.
1119		 */
1120		for (i = 0; i <= end_zone; i++) {
1121			struct zone *zone = pgdat->node_zones + i;
1122			int nr_slab;
1123
1124			if (zone->present_pages == 0)
1125				continue;
1126
1127			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1128				continue;
1129
1130			if (nr_pages == 0) {	/* Not software suspend */
1131				if (!zone_watermark_ok(zone, order,
1132						zone->pages_high, end_zone, 0))
1133					all_zones_ok = 0;
1134			}
1135			zone->temp_priority = priority;
1136			if (zone->prev_priority > priority)
1137				zone->prev_priority = priority;
1138			sc.nr_scanned = 0;
1139			sc.nr_reclaimed = 0;
1140			sc.priority = priority;
1141			sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
1142			atomic_inc(&zone->reclaim_in_progress);
1143			shrink_zone(zone, &sc);
1144			atomic_dec(&zone->reclaim_in_progress);
1145			reclaim_state->reclaimed_slab = 0;
1146			nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1147						lru_pages);
1148			sc.nr_reclaimed += reclaim_state->reclaimed_slab;
1149			total_reclaimed += sc.nr_reclaimed;
1150			total_scanned += sc.nr_scanned;
1151			if (zone->all_unreclaimable)
1152				continue;
1153			if (nr_slab == 0 && zone->pages_scanned >=
1154				    (zone->nr_active + zone->nr_inactive) * 4)
1155				zone->all_unreclaimable = 1;
1156			/*
1157			 * If we've done a decent amount of scanning and
1158			 * the reclaim ratio is low, start doing writepage
1159			 * even in laptop mode
1160			 */
1161			if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1162			    total_scanned > total_reclaimed+total_reclaimed/2)
1163				sc.may_writepage = 1;
1164		}
1165		if (nr_pages && to_free > total_reclaimed)
1166			continue;	/* swsusp: need to do more work */
1167		if (all_zones_ok)
1168			break;		/* kswapd: all done */
1169		/*
1170		 * OK, kswapd is getting into trouble.  Take a nap, then take
1171		 * another pass across the zones.
1172		 */
1173		if (total_scanned && priority < DEF_PRIORITY - 2)
1174			blk_congestion_wait(WRITE, HZ/10);
1175
1176		/*
1177		 * We do this so kswapd doesn't build up large priorities for
1178		 * example when it is freeing in parallel with allocators. It
1179		 * matches the direct reclaim path behaviour in terms of impact
1180		 * on zone->*_priority.
1181		 */
1182		if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages))
1183			break;
1184	}
1185out:
1186	for (i = 0; i < pgdat->nr_zones; i++) {
1187		struct zone *zone = pgdat->node_zones + i;
1188
1189		zone->prev_priority = zone->temp_priority;
1190	}
1191	if (!all_zones_ok) {
1192		cond_resched();
1193		goto loop_again;
1194	}
1195
1196	return total_reclaimed;
1197}
1198
1199/*
1200 * The background pageout daemon, started as a kernel thread
1201 * from the init process.
1202 *
1203 * This basically trickles out pages so that we have _some_
1204 * free memory available even if there is no other activity
1205 * that frees anything up. This is needed for things like routing
1206 * etc, where we otherwise might have all activity going on in
1207 * asynchronous contexts that cannot page things out.
1208 *
1209 * If there are applications that are active memory-allocators
1210 * (most normal use), this basically shouldn't matter.
1211 */
1212static int kswapd(void *p)
1213{
1214	unsigned long order;
1215	pg_data_t *pgdat = (pg_data_t*)p;
1216	struct task_struct *tsk = current;
1217	DEFINE_WAIT(wait);
1218	struct reclaim_state reclaim_state = {
1219		.reclaimed_slab = 0,
1220	};
1221	cpumask_t cpumask;
1222
1223	daemonize("kswapd%d", pgdat->node_id);
1224	cpumask = node_to_cpumask(pgdat->node_id);
1225	if (!cpus_empty(cpumask))
1226		set_cpus_allowed(tsk, cpumask);
1227	current->reclaim_state = &reclaim_state;
1228
1229	/*
1230	 * Tell the memory management that we're a "memory allocator",
1231	 * and that if we need more memory we should get access to it
1232	 * regardless (see "__alloc_pages()"). "kswapd" should
1233	 * never get caught in the normal page freeing logic.
1234	 *
1235	 * (Kswapd normally doesn't need memory anyway, but sometimes
1236	 * you need a small amount of memory in order to be able to
1237	 * page out something else, and this flag essentially protects
1238	 * us from recursively trying to free more memory as we're
1239	 * trying to free the first piece of memory in the first place).
1240	 */
1241	tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
1242
1243	order = 0;
1244	for ( ; ; ) {
1245		unsigned long new_order;
1246
1247		try_to_freeze();
1248
1249		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1250		new_order = pgdat->kswapd_max_order;
1251		pgdat->kswapd_max_order = 0;
1252		if (order < new_order) {
1253			/*
1254			 * Don't sleep if someone wants a larger 'order'
1255			 * allocation
1256			 */
1257			order = new_order;
1258		} else {
1259			schedule();
1260			order = pgdat->kswapd_max_order;
1261		}
1262		finish_wait(&pgdat->kswapd_wait, &wait);
1263
1264		balance_pgdat(pgdat, 0, order);
1265	}
1266	return 0;
1267}
1268
1269/*
1270 * A zone is low on free memory, so wake its kswapd task to service it.
1271 */
1272void wakeup_kswapd(struct zone *zone, int order)
1273{
1274	pg_data_t *pgdat;
1275
1276	if (zone->present_pages == 0)
1277		return;
1278
1279	pgdat = zone->zone_pgdat;
1280	if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1281		return;
1282	if (pgdat->kswapd_max_order < order)
1283		pgdat->kswapd_max_order = order;
1284	if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1285		return;
1286	if (!waitqueue_active(&pgdat->kswapd_wait))
1287		return;
1288	wake_up_interruptible(&pgdat->kswapd_wait);
1289}
1290
1291#ifdef CONFIG_PM
1292/*
1293 * Try to free `nr_pages' of memory, system-wide.  Returns the number of freed
1294 * pages.
1295 */
1296int shrink_all_memory(int nr_pages)
1297{
1298	pg_data_t *pgdat;
1299	int nr_to_free = nr_pages;
1300	int ret = 0;
1301	struct reclaim_state reclaim_state = {
1302		.reclaimed_slab = 0,
1303	};
1304
1305	current->reclaim_state = &reclaim_state;
1306	for_each_pgdat(pgdat) {
1307		int freed;
1308		freed = balance_pgdat(pgdat, nr_to_free, 0);
1309		ret += freed;
1310		nr_to_free -= freed;
1311		if (nr_to_free <= 0)
1312			break;
1313	}
1314	current->reclaim_state = NULL;
1315	return ret;
1316}
1317#endif
1318
1319#ifdef CONFIG_HOTPLUG_CPU
1320/* It's optimal to keep kswapds on the same CPUs as their memory, but
1321   not required for correctness.  So if the last cpu in a node goes
1322   away, we get changed to run anywhere: as the first one comes back,
1323   restore their cpu bindings. */
1324static int __devinit cpu_callback(struct notifier_block *nfb,
1325				  unsigned long action,
1326				  void *hcpu)
1327{
1328	pg_data_t *pgdat;
1329	cpumask_t mask;
1330
1331	if (action == CPU_ONLINE) {
1332		for_each_pgdat(pgdat) {
1333			mask = node_to_cpumask(pgdat->node_id);
1334			if (any_online_cpu(mask) != NR_CPUS)
1335				/* One of our CPUs online: restore mask */
1336				set_cpus_allowed(pgdat->kswapd, mask);
1337		}
1338	}
1339	return NOTIFY_OK;
1340}
1341#endif /* CONFIG_HOTPLUG_CPU */
1342
1343static int __init kswapd_init(void)
1344{
1345	pg_data_t *pgdat;
1346	swap_setup();
1347	for_each_pgdat(pgdat)
1348		pgdat->kswapd
1349		= find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
1350	total_memory = nr_free_pagecache_pages();
1351	hotcpu_notifier(cpu_callback, 0);
1352	return 0;
1353}
1354
1355module_init(kswapd_init)
1356
1357
1358/*
1359 * Try to free up some pages from this zone through reclaim.
1360 */
1361int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1362{
1363	struct scan_control sc;
1364	int nr_pages = 1 << order;
1365	int total_reclaimed = 0;
1366
1367	/* The reclaim may sleep, so don't do it if sleep isn't allowed */
1368	if (!(gfp_mask & __GFP_WAIT))
1369		return 0;
1370	if (zone->all_unreclaimable)
1371		return 0;
1372
1373	sc.gfp_mask = gfp_mask;
1374	sc.may_writepage = 0;
1375	sc.may_swap = 0;
1376	sc.nr_mapped = read_page_state(nr_mapped);
1377	sc.nr_scanned = 0;
1378	sc.nr_reclaimed = 0;
1379	/* scan at the highest priority */
1380	sc.priority = 0;
1381	disable_swap_token();
1382
1383	if (nr_pages > SWAP_CLUSTER_MAX)
1384		sc.swap_cluster_max = nr_pages;
1385	else
1386		sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1387
1388	/* Don't reclaim the zone if there are other reclaimers active */
1389	if (atomic_read(&zone->reclaim_in_progress) > 0)
1390		goto out;
1391
1392	shrink_zone(zone, &sc);
1393	total_reclaimed = sc.nr_reclaimed;
1394
1395 out:
1396	return total_reclaimed;
1397}
1398
1399asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
1400				     unsigned int state)
1401{
1402	struct zone *z;
1403	int i;
1404
1405	if (!capable(CAP_SYS_ADMIN))
1406		return -EACCES;
1407
1408	if (node >= MAX_NUMNODES || !node_online(node))
1409		return -EINVAL;
1410
1411	/* This will break if we ever add more zones */
1412	if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
1413		return -EINVAL;
1414
1415	for (i = 0; i < MAX_NR_ZONES; i++) {
1416		if (!(zone & 1<<i))
1417			continue;
1418
1419		z = &NODE_DATA(node)->node_zones[i];
1420
1421		if (state)
1422			z->reclaim_pages = 1;
1423		else
1424			z->reclaim_pages = 0;
1425	}
1426
1427	return 0;
1428}
1429