swap.c revision 529ae9aaa08378cfe2a4350bded76f32cc8ff0ce
1/*
2 *  linux/mm/swap.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 */
6
7/*
8 * This file contains the default values for the operation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
14 */
15
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/kernel_stat.h>
19#include <linux/swap.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/pagevec.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/mm_inline.h>
26#include <linux/buffer_head.h>	/* for try_to_release_page() */
27#include <linux/percpu_counter.h>
28#include <linux/percpu.h>
29#include <linux/cpu.h>
30#include <linux/notifier.h>
31#include <linux/backing-dev.h>
32#include <linux/memcontrol.h>
33
34/* How many pages do we try to swap or page in/out together? */
35int page_cluster;
36
37static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs);
38static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs);
39static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
40
41/*
42 * This path almost never happens for VM activity - pages are normally
43 * freed via pagevecs.  But it gets used by networking.
44 */
45static void __page_cache_release(struct page *page)
46{
47	if (PageLRU(page)) {
48		unsigned long flags;
49		struct zone *zone = page_zone(page);
50
51		spin_lock_irqsave(&zone->lru_lock, flags);
52		VM_BUG_ON(!PageLRU(page));
53		__ClearPageLRU(page);
54		del_page_from_lru(zone, page);
55		spin_unlock_irqrestore(&zone->lru_lock, flags);
56	}
57	free_hot_page(page);
58}
59
60static void put_compound_page(struct page *page)
61{
62	page = compound_head(page);
63	if (put_page_testzero(page)) {
64		compound_page_dtor *dtor;
65
66		dtor = get_compound_page_dtor(page);
67		(*dtor)(page);
68	}
69}
70
71void put_page(struct page *page)
72{
73	if (unlikely(PageCompound(page)))
74		put_compound_page(page);
75	else if (put_page_testzero(page))
76		__page_cache_release(page);
77}
78EXPORT_SYMBOL(put_page);
79
80/**
81 * put_pages_list() - release a list of pages
82 * @pages: list of pages threaded on page->lru
83 *
84 * Release a list of pages which are strung together on page.lru.  Currently
85 * used by read_cache_pages() and related error recovery code.
86 */
87void put_pages_list(struct list_head *pages)
88{
89	while (!list_empty(pages)) {
90		struct page *victim;
91
92		victim = list_entry(pages->prev, struct page, lru);
93		list_del(&victim->lru);
94		page_cache_release(victim);
95	}
96}
97EXPORT_SYMBOL(put_pages_list);
98
99/*
100 * pagevec_move_tail() must be called with IRQ disabled.
101 * Otherwise this may cause nasty races.
102 */
103static void pagevec_move_tail(struct pagevec *pvec)
104{
105	int i;
106	int pgmoved = 0;
107	struct zone *zone = NULL;
108
109	for (i = 0; i < pagevec_count(pvec); i++) {
110		struct page *page = pvec->pages[i];
111		struct zone *pagezone = page_zone(page);
112
113		if (pagezone != zone) {
114			if (zone)
115				spin_unlock(&zone->lru_lock);
116			zone = pagezone;
117			spin_lock(&zone->lru_lock);
118		}
119		if (PageLRU(page) && !PageActive(page)) {
120			list_move_tail(&page->lru, &zone->inactive_list);
121			pgmoved++;
122		}
123	}
124	if (zone)
125		spin_unlock(&zone->lru_lock);
126	__count_vm_events(PGROTATED, pgmoved);
127	release_pages(pvec->pages, pvec->nr, pvec->cold);
128	pagevec_reinit(pvec);
129}
130
131/*
132 * Writeback is about to end against a page which has been marked for immediate
133 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
134 * inactive list.
135 */
136void  rotate_reclaimable_page(struct page *page)
137{
138	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
139	    PageLRU(page)) {
140		struct pagevec *pvec;
141		unsigned long flags;
142
143		page_cache_get(page);
144		local_irq_save(flags);
145		pvec = &__get_cpu_var(lru_rotate_pvecs);
146		if (!pagevec_add(pvec, page))
147			pagevec_move_tail(pvec);
148		local_irq_restore(flags);
149	}
150}
151
152/*
153 * FIXME: speed this up?
154 */
155void activate_page(struct page *page)
156{
157	struct zone *zone = page_zone(page);
158
159	spin_lock_irq(&zone->lru_lock);
160	if (PageLRU(page) && !PageActive(page)) {
161		del_page_from_inactive_list(zone, page);
162		SetPageActive(page);
163		add_page_to_active_list(zone, page);
164		__count_vm_event(PGACTIVATE);
165		mem_cgroup_move_lists(page, true);
166	}
167	spin_unlock_irq(&zone->lru_lock);
168}
169
170/*
171 * Mark a page as having seen activity.
172 *
173 * inactive,unreferenced	->	inactive,referenced
174 * inactive,referenced		->	active,unreferenced
175 * active,unreferenced		->	active,referenced
176 */
177void mark_page_accessed(struct page *page)
178{
179	if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
180		activate_page(page);
181		ClearPageReferenced(page);
182	} else if (!PageReferenced(page)) {
183		SetPageReferenced(page);
184	}
185}
186
187EXPORT_SYMBOL(mark_page_accessed);
188
189/**
190 * lru_cache_add: add a page to the page lists
191 * @page: the page to add
192 */
193void lru_cache_add(struct page *page)
194{
195	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
196
197	page_cache_get(page);
198	if (!pagevec_add(pvec, page))
199		__pagevec_lru_add(pvec);
200	put_cpu_var(lru_add_pvecs);
201}
202
203void lru_cache_add_active(struct page *page)
204{
205	struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
206
207	page_cache_get(page);
208	if (!pagevec_add(pvec, page))
209		__pagevec_lru_add_active(pvec);
210	put_cpu_var(lru_add_active_pvecs);
211}
212
213/*
214 * Drain pages out of the cpu's pagevecs.
215 * Either "cpu" is the current CPU, and preemption has already been
216 * disabled; or "cpu" is being hot-unplugged, and is already dead.
217 */
218static void drain_cpu_pagevecs(int cpu)
219{
220	struct pagevec *pvec;
221
222	pvec = &per_cpu(lru_add_pvecs, cpu);
223	if (pagevec_count(pvec))
224		__pagevec_lru_add(pvec);
225
226	pvec = &per_cpu(lru_add_active_pvecs, cpu);
227	if (pagevec_count(pvec))
228		__pagevec_lru_add_active(pvec);
229
230	pvec = &per_cpu(lru_rotate_pvecs, cpu);
231	if (pagevec_count(pvec)) {
232		unsigned long flags;
233
234		/* No harm done if a racing interrupt already did this */
235		local_irq_save(flags);
236		pagevec_move_tail(pvec);
237		local_irq_restore(flags);
238	}
239}
240
241void lru_add_drain(void)
242{
243	drain_cpu_pagevecs(get_cpu());
244	put_cpu();
245}
246
247#ifdef CONFIG_NUMA
248static void lru_add_drain_per_cpu(struct work_struct *dummy)
249{
250	lru_add_drain();
251}
252
253/*
254 * Returns 0 for success
255 */
256int lru_add_drain_all(void)
257{
258	return schedule_on_each_cpu(lru_add_drain_per_cpu);
259}
260
261#else
262
263/*
264 * Returns 0 for success
265 */
266int lru_add_drain_all(void)
267{
268	lru_add_drain();
269	return 0;
270}
271#endif
272
273/*
274 * Batched page_cache_release().  Decrement the reference count on all the
275 * passed pages.  If it fell to zero then remove the page from the LRU and
276 * free it.
277 *
278 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
279 * for the remainder of the operation.
280 *
281 * The locking in this function is against shrink_inactive_list(): we recheck
282 * the page count inside the lock to see whether shrink_inactive_list()
283 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
284 * will free it.
285 */
286void release_pages(struct page **pages, int nr, int cold)
287{
288	int i;
289	struct pagevec pages_to_free;
290	struct zone *zone = NULL;
291	unsigned long uninitialized_var(flags);
292
293	pagevec_init(&pages_to_free, cold);
294	for (i = 0; i < nr; i++) {
295		struct page *page = pages[i];
296
297		if (unlikely(PageCompound(page))) {
298			if (zone) {
299				spin_unlock_irqrestore(&zone->lru_lock, flags);
300				zone = NULL;
301			}
302			put_compound_page(page);
303			continue;
304		}
305
306		if (!put_page_testzero(page))
307			continue;
308
309		if (PageLRU(page)) {
310			struct zone *pagezone = page_zone(page);
311			if (pagezone != zone) {
312				if (zone)
313					spin_unlock_irqrestore(&zone->lru_lock,
314									flags);
315				zone = pagezone;
316				spin_lock_irqsave(&zone->lru_lock, flags);
317			}
318			VM_BUG_ON(!PageLRU(page));
319			__ClearPageLRU(page);
320			del_page_from_lru(zone, page);
321		}
322
323		if (!pagevec_add(&pages_to_free, page)) {
324			if (zone) {
325				spin_unlock_irqrestore(&zone->lru_lock, flags);
326				zone = NULL;
327			}
328			__pagevec_free(&pages_to_free);
329			pagevec_reinit(&pages_to_free);
330  		}
331	}
332	if (zone)
333		spin_unlock_irqrestore(&zone->lru_lock, flags);
334
335	pagevec_free(&pages_to_free);
336}
337
338/*
339 * The pages which we're about to release may be in the deferred lru-addition
340 * queues.  That would prevent them from really being freed right now.  That's
341 * OK from a correctness point of view but is inefficient - those pages may be
342 * cache-warm and we want to give them back to the page allocator ASAP.
343 *
344 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
345 * and __pagevec_lru_add_active() call release_pages() directly to avoid
346 * mutual recursion.
347 */
348void __pagevec_release(struct pagevec *pvec)
349{
350	lru_add_drain();
351	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
352	pagevec_reinit(pvec);
353}
354
355EXPORT_SYMBOL(__pagevec_release);
356
357/*
358 * pagevec_release() for pages which are known to not be on the LRU
359 *
360 * This function reinitialises the caller's pagevec.
361 */
362void __pagevec_release_nonlru(struct pagevec *pvec)
363{
364	int i;
365	struct pagevec pages_to_free;
366
367	pagevec_init(&pages_to_free, pvec->cold);
368	for (i = 0; i < pagevec_count(pvec); i++) {
369		struct page *page = pvec->pages[i];
370
371		VM_BUG_ON(PageLRU(page));
372		if (put_page_testzero(page))
373			pagevec_add(&pages_to_free, page);
374	}
375	pagevec_free(&pages_to_free);
376	pagevec_reinit(pvec);
377}
378
379/*
380 * Add the passed pages to the LRU, then drop the caller's refcount
381 * on them.  Reinitialises the caller's pagevec.
382 */
383void __pagevec_lru_add(struct pagevec *pvec)
384{
385	int i;
386	struct zone *zone = NULL;
387
388	for (i = 0; i < pagevec_count(pvec); i++) {
389		struct page *page = pvec->pages[i];
390		struct zone *pagezone = page_zone(page);
391
392		if (pagezone != zone) {
393			if (zone)
394				spin_unlock_irq(&zone->lru_lock);
395			zone = pagezone;
396			spin_lock_irq(&zone->lru_lock);
397		}
398		VM_BUG_ON(PageLRU(page));
399		SetPageLRU(page);
400		add_page_to_inactive_list(zone, page);
401	}
402	if (zone)
403		spin_unlock_irq(&zone->lru_lock);
404	release_pages(pvec->pages, pvec->nr, pvec->cold);
405	pagevec_reinit(pvec);
406}
407
408EXPORT_SYMBOL(__pagevec_lru_add);
409
410void __pagevec_lru_add_active(struct pagevec *pvec)
411{
412	int i;
413	struct zone *zone = NULL;
414
415	for (i = 0; i < pagevec_count(pvec); i++) {
416		struct page *page = pvec->pages[i];
417		struct zone *pagezone = page_zone(page);
418
419		if (pagezone != zone) {
420			if (zone)
421				spin_unlock_irq(&zone->lru_lock);
422			zone = pagezone;
423			spin_lock_irq(&zone->lru_lock);
424		}
425		VM_BUG_ON(PageLRU(page));
426		SetPageLRU(page);
427		VM_BUG_ON(PageActive(page));
428		SetPageActive(page);
429		add_page_to_active_list(zone, page);
430	}
431	if (zone)
432		spin_unlock_irq(&zone->lru_lock);
433	release_pages(pvec->pages, pvec->nr, pvec->cold);
434	pagevec_reinit(pvec);
435}
436
437/*
438 * Try to drop buffers from the pages in a pagevec
439 */
440void pagevec_strip(struct pagevec *pvec)
441{
442	int i;
443
444	for (i = 0; i < pagevec_count(pvec); i++) {
445		struct page *page = pvec->pages[i];
446
447		if (PagePrivate(page) && trylock_page(page)) {
448			if (PagePrivate(page))
449				try_to_release_page(page, 0);
450			unlock_page(page);
451		}
452	}
453}
454
455/**
456 * pagevec_lookup - gang pagecache lookup
457 * @pvec:	Where the resulting pages are placed
458 * @mapping:	The address_space to search
459 * @start:	The starting page index
460 * @nr_pages:	The maximum number of pages
461 *
462 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
463 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
464 * reference against the pages in @pvec.
465 *
466 * The search returns a group of mapping-contiguous pages with ascending
467 * indexes.  There may be holes in the indices due to not-present pages.
468 *
469 * pagevec_lookup() returns the number of pages which were found.
470 */
471unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
472		pgoff_t start, unsigned nr_pages)
473{
474	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
475	return pagevec_count(pvec);
476}
477
478EXPORT_SYMBOL(pagevec_lookup);
479
480unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
481		pgoff_t *index, int tag, unsigned nr_pages)
482{
483	pvec->nr = find_get_pages_tag(mapping, index, tag,
484					nr_pages, pvec->pages);
485	return pagevec_count(pvec);
486}
487
488EXPORT_SYMBOL(pagevec_lookup_tag);
489
490#ifdef CONFIG_SMP
491/*
492 * We tolerate a little inaccuracy to avoid ping-ponging the counter between
493 * CPUs
494 */
495#define ACCT_THRESHOLD	max(16, NR_CPUS * 2)
496
497static DEFINE_PER_CPU(long, committed_space);
498
499void vm_acct_memory(long pages)
500{
501	long *local;
502
503	preempt_disable();
504	local = &__get_cpu_var(committed_space);
505	*local += pages;
506	if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
507		atomic_long_add(*local, &vm_committed_space);
508		*local = 0;
509	}
510	preempt_enable();
511}
512
513#ifdef CONFIG_HOTPLUG_CPU
514
515/* Drop the CPU's cached committed space back into the central pool. */
516static int cpu_swap_callback(struct notifier_block *nfb,
517			     unsigned long action,
518			     void *hcpu)
519{
520	long *committed;
521
522	committed = &per_cpu(committed_space, (long)hcpu);
523	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
524		atomic_long_add(*committed, &vm_committed_space);
525		*committed = 0;
526		drain_cpu_pagevecs((long)hcpu);
527	}
528	return NOTIFY_OK;
529}
530#endif /* CONFIG_HOTPLUG_CPU */
531#endif /* CONFIG_SMP */
532
533/*
534 * Perform any setup for the swap system
535 */
536void __init swap_setup(void)
537{
538	unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
539
540#ifdef CONFIG_SWAP
541	bdi_init(swapper_space.backing_dev_info);
542#endif
543
544	/* Use a smaller cluster for small-memory machines */
545	if (megs < 16)
546		page_cluster = 2;
547	else
548		page_cluster = 3;
549	/*
550	 * Right now other parts of the system means that we
551	 * _really_ don't want to cluster much more
552	 */
553#ifdef CONFIG_HOTPLUG_CPU
554	hotcpu_notifier(cpu_swap_callback, 0);
555#endif
556}
557