rmap.c revision f7b7fd8f3ebbb2810d6893295aa984acd0fd30db
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18 */
19
20/*
21 * Lock ordering in mm:
22 *
23 * inode->i_sem	(while writing or truncating, not reading or faulting)
24 *   inode->i_alloc_sem
25 *
26 * When a page fault occurs in writing from user to file, down_read
27 * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
28 * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
29 * taken together; in truncation, i_sem is taken outermost.
30 *
31 * mm->mmap_sem
32 *   page->flags PG_locked (lock_page)
33 *     mapping->i_mmap_lock
34 *       anon_vma->lock
35 *         mm->page_table_lock or pte_lock
36 *           zone->lru_lock (in mark_page_accessed)
37 *           swap_lock (in swap_duplicate, swap_info_get)
38 *             mmlist_lock (in mmput, drain_mmlist and others)
39 *             mapping->private_lock (in __set_page_dirty_buffers)
40 *             inode_lock (in set_page_dirty's __mark_inode_dirty)
41 *               sb_lock (within inode_lock in fs/fs-writeback.c)
42 *               mapping->tree_lock (widely used, in set_page_dirty,
43 *                         in arch-dependent flush_dcache_mmap_lock,
44 *                         within inode_lock in __sync_single_inode)
45 */
46
47#include <linux/mm.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/swapops.h>
51#include <linux/slab.h>
52#include <linux/init.h>
53#include <linux/rmap.h>
54#include <linux/rcupdate.h>
55
56#include <asm/tlbflush.h>
57
58//#define RMAP_DEBUG /* can be enabled only for debugging */
59
60kmem_cache_t *anon_vma_cachep;
61
62static inline void validate_anon_vma(struct vm_area_struct *find_vma)
63{
64#ifdef RMAP_DEBUG
65	struct anon_vma *anon_vma = find_vma->anon_vma;
66	struct vm_area_struct *vma;
67	unsigned int mapcount = 0;
68	int found = 0;
69
70	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
71		mapcount++;
72		BUG_ON(mapcount > 100000);
73		if (vma == find_vma)
74			found = 1;
75	}
76	BUG_ON(!found);
77#endif
78}
79
80/* This must be called under the mmap_sem. */
81int anon_vma_prepare(struct vm_area_struct *vma)
82{
83	struct anon_vma *anon_vma = vma->anon_vma;
84
85	might_sleep();
86	if (unlikely(!anon_vma)) {
87		struct mm_struct *mm = vma->vm_mm;
88		struct anon_vma *allocated, *locked;
89
90		anon_vma = find_mergeable_anon_vma(vma);
91		if (anon_vma) {
92			allocated = NULL;
93			locked = anon_vma;
94			spin_lock(&locked->lock);
95		} else {
96			anon_vma = anon_vma_alloc();
97			if (unlikely(!anon_vma))
98				return -ENOMEM;
99			allocated = anon_vma;
100			locked = NULL;
101		}
102
103		/* page_table_lock to protect against threads */
104		spin_lock(&mm->page_table_lock);
105		if (likely(!vma->anon_vma)) {
106			vma->anon_vma = anon_vma;
107			list_add(&vma->anon_vma_node, &anon_vma->head);
108			allocated = NULL;
109		}
110		spin_unlock(&mm->page_table_lock);
111
112		if (locked)
113			spin_unlock(&locked->lock);
114		if (unlikely(allocated))
115			anon_vma_free(allocated);
116	}
117	return 0;
118}
119
120void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
121{
122	BUG_ON(vma->anon_vma != next->anon_vma);
123	list_del(&next->anon_vma_node);
124}
125
126void __anon_vma_link(struct vm_area_struct *vma)
127{
128	struct anon_vma *anon_vma = vma->anon_vma;
129
130	if (anon_vma) {
131		list_add(&vma->anon_vma_node, &anon_vma->head);
132		validate_anon_vma(vma);
133	}
134}
135
136void anon_vma_link(struct vm_area_struct *vma)
137{
138	struct anon_vma *anon_vma = vma->anon_vma;
139
140	if (anon_vma) {
141		spin_lock(&anon_vma->lock);
142		list_add(&vma->anon_vma_node, &anon_vma->head);
143		validate_anon_vma(vma);
144		spin_unlock(&anon_vma->lock);
145	}
146}
147
148void anon_vma_unlink(struct vm_area_struct *vma)
149{
150	struct anon_vma *anon_vma = vma->anon_vma;
151	int empty;
152
153	if (!anon_vma)
154		return;
155
156	spin_lock(&anon_vma->lock);
157	validate_anon_vma(vma);
158	list_del(&vma->anon_vma_node);
159
160	/* We must garbage collect the anon_vma if it's empty */
161	empty = list_empty(&anon_vma->head);
162	spin_unlock(&anon_vma->lock);
163
164	if (empty)
165		anon_vma_free(anon_vma);
166}
167
168static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
169{
170	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
171						SLAB_CTOR_CONSTRUCTOR) {
172		struct anon_vma *anon_vma = data;
173
174		spin_lock_init(&anon_vma->lock);
175		INIT_LIST_HEAD(&anon_vma->head);
176	}
177}
178
179void __init anon_vma_init(void)
180{
181	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
182			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
183}
184
185/*
186 * Getting a lock on a stable anon_vma from a page off the LRU is
187 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
188 */
189static struct anon_vma *page_lock_anon_vma(struct page *page)
190{
191	struct anon_vma *anon_vma = NULL;
192	unsigned long anon_mapping;
193
194	rcu_read_lock();
195	anon_mapping = (unsigned long) page->mapping;
196	if (!(anon_mapping & PAGE_MAPPING_ANON))
197		goto out;
198	if (!page_mapped(page))
199		goto out;
200
201	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
202	spin_lock(&anon_vma->lock);
203out:
204	rcu_read_unlock();
205	return anon_vma;
206}
207
208/*
209 * At what user virtual address is page expected in vma?
210 */
211static inline unsigned long
212vma_address(struct page *page, struct vm_area_struct *vma)
213{
214	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
215	unsigned long address;
216
217	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
218	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
219		/* page should be within any vma from prio_tree_next */
220		BUG_ON(!PageAnon(page));
221		return -EFAULT;
222	}
223	return address;
224}
225
226/*
227 * At what user virtual address is page expected in vma? checking that the
228 * page matches the vma: currently only used on anon pages, by unuse_vma;
229 */
230unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
231{
232	if (PageAnon(page)) {
233		if ((void *)vma->anon_vma !=
234		    (void *)page->mapping - PAGE_MAPPING_ANON)
235			return -EFAULT;
236	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
237		if (!vma->vm_file ||
238		    vma->vm_file->f_mapping != page->mapping)
239			return -EFAULT;
240	} else
241		return -EFAULT;
242	return vma_address(page, vma);
243}
244
245/*
246 * Check that @page is mapped at @address into @mm.
247 *
248 * On success returns with pte mapped and locked.
249 */
250pte_t *page_check_address(struct page *page, struct mm_struct *mm,
251			  unsigned long address, spinlock_t **ptlp)
252{
253	pgd_t *pgd;
254	pud_t *pud;
255	pmd_t *pmd;
256	pte_t *pte;
257	spinlock_t *ptl;
258
259	pgd = pgd_offset(mm, address);
260	if (!pgd_present(*pgd))
261		return NULL;
262
263	pud = pud_offset(pgd, address);
264	if (!pud_present(*pud))
265		return NULL;
266
267	pmd = pmd_offset(pud, address);
268	if (!pmd_present(*pmd))
269		return NULL;
270
271	pte = pte_offset_map(pmd, address);
272	/* Make a quick check before getting the lock */
273	if (!pte_present(*pte)) {
274		pte_unmap(pte);
275		return NULL;
276	}
277
278	ptl = pte_lockptr(mm, pmd);
279	spin_lock(ptl);
280	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
281		*ptlp = ptl;
282		return pte;
283	}
284	pte_unmap_unlock(pte, ptl);
285	return NULL;
286}
287
288/*
289 * Subfunctions of page_referenced: page_referenced_one called
290 * repeatedly from either page_referenced_anon or page_referenced_file.
291 */
292static int page_referenced_one(struct page *page,
293	struct vm_area_struct *vma, unsigned int *mapcount)
294{
295	struct mm_struct *mm = vma->vm_mm;
296	unsigned long address;
297	pte_t *pte;
298	spinlock_t *ptl;
299	int referenced = 0;
300
301	address = vma_address(page, vma);
302	if (address == -EFAULT)
303		goto out;
304
305	pte = page_check_address(page, mm, address, &ptl);
306	if (!pte)
307		goto out;
308
309	if (ptep_clear_flush_young(vma, address, pte))
310		referenced++;
311
312	/* Pretend the page is referenced if the task has the
313	   swap token and is in the middle of a page fault. */
314	if (mm != current->mm && has_swap_token(mm) &&
315			rwsem_is_locked(&mm->mmap_sem))
316		referenced++;
317
318	(*mapcount)--;
319	pte_unmap_unlock(pte, ptl);
320out:
321	return referenced;
322}
323
324static int page_referenced_anon(struct page *page)
325{
326	unsigned int mapcount;
327	struct anon_vma *anon_vma;
328	struct vm_area_struct *vma;
329	int referenced = 0;
330
331	anon_vma = page_lock_anon_vma(page);
332	if (!anon_vma)
333		return referenced;
334
335	mapcount = page_mapcount(page);
336	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
337		referenced += page_referenced_one(page, vma, &mapcount);
338		if (!mapcount)
339			break;
340	}
341	spin_unlock(&anon_vma->lock);
342	return referenced;
343}
344
345/**
346 * page_referenced_file - referenced check for object-based rmap
347 * @page: the page we're checking references on.
348 *
349 * For an object-based mapped page, find all the places it is mapped and
350 * check/clear the referenced flag.  This is done by following the page->mapping
351 * pointer, then walking the chain of vmas it holds.  It returns the number
352 * of references it found.
353 *
354 * This function is only called from page_referenced for object-based pages.
355 */
356static int page_referenced_file(struct page *page)
357{
358	unsigned int mapcount;
359	struct address_space *mapping = page->mapping;
360	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
361	struct vm_area_struct *vma;
362	struct prio_tree_iter iter;
363	int referenced = 0;
364
365	/*
366	 * The caller's checks on page->mapping and !PageAnon have made
367	 * sure that this is a file page: the check for page->mapping
368	 * excludes the case just before it gets set on an anon page.
369	 */
370	BUG_ON(PageAnon(page));
371
372	/*
373	 * The page lock not only makes sure that page->mapping cannot
374	 * suddenly be NULLified by truncation, it makes sure that the
375	 * structure at mapping cannot be freed and reused yet,
376	 * so we can safely take mapping->i_mmap_lock.
377	 */
378	BUG_ON(!PageLocked(page));
379
380	spin_lock(&mapping->i_mmap_lock);
381
382	/*
383	 * i_mmap_lock does not stabilize mapcount at all, but mapcount
384	 * is more likely to be accurate if we note it after spinning.
385	 */
386	mapcount = page_mapcount(page);
387
388	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
389		if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
390				  == (VM_LOCKED|VM_MAYSHARE)) {
391			referenced++;
392			break;
393		}
394		referenced += page_referenced_one(page, vma, &mapcount);
395		if (!mapcount)
396			break;
397	}
398
399	spin_unlock(&mapping->i_mmap_lock);
400	return referenced;
401}
402
403/**
404 * page_referenced - test if the page was referenced
405 * @page: the page to test
406 * @is_locked: caller holds lock on the page
407 *
408 * Quick test_and_clear_referenced for all mappings to a page,
409 * returns the number of ptes which referenced the page.
410 */
411int page_referenced(struct page *page, int is_locked)
412{
413	int referenced = 0;
414
415	if (page_test_and_clear_young(page))
416		referenced++;
417
418	if (TestClearPageReferenced(page))
419		referenced++;
420
421	if (page_mapped(page) && page->mapping) {
422		if (PageAnon(page))
423			referenced += page_referenced_anon(page);
424		else if (is_locked)
425			referenced += page_referenced_file(page);
426		else if (TestSetPageLocked(page))
427			referenced++;
428		else {
429			if (page->mapping)
430				referenced += page_referenced_file(page);
431			unlock_page(page);
432		}
433	}
434	return referenced;
435}
436
437/**
438 * page_add_anon_rmap - add pte mapping to an anonymous page
439 * @page:	the page to add the mapping to
440 * @vma:	the vm area in which the mapping is added
441 * @address:	the user virtual address mapped
442 *
443 * The caller needs to hold the pte lock.
444 */
445void page_add_anon_rmap(struct page *page,
446	struct vm_area_struct *vma, unsigned long address)
447{
448	if (atomic_inc_and_test(&page->_mapcount)) {
449		struct anon_vma *anon_vma = vma->anon_vma;
450
451		BUG_ON(!anon_vma);
452		anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
453		page->mapping = (struct address_space *) anon_vma;
454
455		page->index = linear_page_index(vma, address);
456
457		inc_page_state(nr_mapped);
458	}
459	/* else checking page index and mapping is racy */
460}
461
462/**
463 * page_add_file_rmap - add pte mapping to a file page
464 * @page: the page to add the mapping to
465 *
466 * The caller needs to hold the pte lock.
467 */
468void page_add_file_rmap(struct page *page)
469{
470	BUG_ON(PageAnon(page));
471	BUG_ON(!pfn_valid(page_to_pfn(page)));
472
473	if (atomic_inc_and_test(&page->_mapcount))
474		inc_page_state(nr_mapped);
475}
476
477/**
478 * page_remove_rmap - take down pte mapping from a page
479 * @page: page to remove mapping from
480 *
481 * The caller needs to hold the pte lock.
482 */
483void page_remove_rmap(struct page *page)
484{
485	if (atomic_add_negative(-1, &page->_mapcount)) {
486		BUG_ON(page_mapcount(page) < 0);
487		/*
488		 * It would be tidy to reset the PageAnon mapping here,
489		 * but that might overwrite a racing page_add_anon_rmap
490		 * which increments mapcount after us but sets mapping
491		 * before us: so leave the reset to free_hot_cold_page,
492		 * and remember that it's only reliable while mapped.
493		 * Leaving it set also helps swapoff to reinstate ptes
494		 * faster for those pages still in swapcache.
495		 */
496		if (page_test_and_clear_dirty(page))
497			set_page_dirty(page);
498		dec_page_state(nr_mapped);
499	}
500}
501
502/*
503 * Subfunctions of try_to_unmap: try_to_unmap_one called
504 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
505 */
506static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
507{
508	struct mm_struct *mm = vma->vm_mm;
509	unsigned long address;
510	pte_t *pte;
511	pte_t pteval;
512	spinlock_t *ptl;
513	int ret = SWAP_AGAIN;
514
515	address = vma_address(page, vma);
516	if (address == -EFAULT)
517		goto out;
518
519	pte = page_check_address(page, mm, address, &ptl);
520	if (!pte)
521		goto out;
522
523	/*
524	 * If the page is mlock()d, we cannot swap it out.
525	 * If it's recently referenced (perhaps page_referenced
526	 * skipped over this mm) then we should reactivate it.
527	 */
528	if ((vma->vm_flags & VM_LOCKED) ||
529			ptep_clear_flush_young(vma, address, pte)) {
530		ret = SWAP_FAIL;
531		goto out_unmap;
532	}
533
534	/* Nuke the page table entry. */
535	flush_cache_page(vma, address, page_to_pfn(page));
536	pteval = ptep_clear_flush(vma, address, pte);
537
538	/* Move the dirty bit to the physical page now the pte is gone. */
539	if (pte_dirty(pteval))
540		set_page_dirty(page);
541
542	/* Update high watermark before we lower rss */
543	update_hiwater_rss(mm);
544
545	if (PageAnon(page)) {
546		swp_entry_t entry = { .val = page_private(page) };
547		/*
548		 * Store the swap location in the pte.
549		 * See handle_pte_fault() ...
550		 */
551		BUG_ON(!PageSwapCache(page));
552		swap_duplicate(entry);
553		if (list_empty(&mm->mmlist)) {
554			spin_lock(&mmlist_lock);
555			if (list_empty(&mm->mmlist))
556				list_add(&mm->mmlist, &init_mm.mmlist);
557			spin_unlock(&mmlist_lock);
558		}
559		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
560		BUG_ON(pte_file(*pte));
561		dec_mm_counter(mm, anon_rss);
562	} else
563		dec_mm_counter(mm, file_rss);
564
565	page_remove_rmap(page);
566	page_cache_release(page);
567
568out_unmap:
569	pte_unmap_unlock(pte, ptl);
570out:
571	return ret;
572}
573
574/*
575 * objrmap doesn't work for nonlinear VMAs because the assumption that
576 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
577 * Consequently, given a particular page and its ->index, we cannot locate the
578 * ptes which are mapping that page without an exhaustive linear search.
579 *
580 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
581 * maps the file to which the target page belongs.  The ->vm_private_data field
582 * holds the current cursor into that scan.  Successive searches will circulate
583 * around the vma's virtual address space.
584 *
585 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
586 * more scanning pressure is placed against them as well.   Eventually pages
587 * will become fully unmapped and are eligible for eviction.
588 *
589 * For very sparsely populated VMAs this is a little inefficient - chances are
590 * there there won't be many ptes located within the scan cluster.  In this case
591 * maybe we could scan further - to the end of the pte page, perhaps.
592 */
593#define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
594#define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
595
596static void try_to_unmap_cluster(unsigned long cursor,
597	unsigned int *mapcount, struct vm_area_struct *vma)
598{
599	struct mm_struct *mm = vma->vm_mm;
600	pgd_t *pgd;
601	pud_t *pud;
602	pmd_t *pmd;
603	pte_t *pte;
604	pte_t pteval;
605	spinlock_t *ptl;
606	struct page *page;
607	unsigned long address;
608	unsigned long end;
609
610	address = (vma->vm_start + cursor) & CLUSTER_MASK;
611	end = address + CLUSTER_SIZE;
612	if (address < vma->vm_start)
613		address = vma->vm_start;
614	if (end > vma->vm_end)
615		end = vma->vm_end;
616
617	pgd = pgd_offset(mm, address);
618	if (!pgd_present(*pgd))
619		return;
620
621	pud = pud_offset(pgd, address);
622	if (!pud_present(*pud))
623		return;
624
625	pmd = pmd_offset(pud, address);
626	if (!pmd_present(*pmd))
627		return;
628
629	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
630
631	/* Update high watermark before we lower rss */
632	update_hiwater_rss(mm);
633
634	for (; address < end; pte++, address += PAGE_SIZE) {
635		if (!pte_present(*pte))
636			continue;
637		page = vm_normal_page(vma, address, *pte);
638		BUG_ON(!page || PageAnon(page));
639
640		if (ptep_clear_flush_young(vma, address, pte))
641			continue;
642
643		/* Nuke the page table entry. */
644		flush_cache_page(vma, address, pfn);
645		pteval = ptep_clear_flush(vma, address, pte);
646
647		/* If nonlinear, store the file page offset in the pte. */
648		if (page->index != linear_page_index(vma, address))
649			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
650
651		/* Move the dirty bit to the physical page now the pte is gone. */
652		if (pte_dirty(pteval))
653			set_page_dirty(page);
654
655		page_remove_rmap(page);
656		page_cache_release(page);
657		dec_mm_counter(mm, file_rss);
658		(*mapcount)--;
659	}
660	pte_unmap_unlock(pte - 1, ptl);
661}
662
663static int try_to_unmap_anon(struct page *page)
664{
665	struct anon_vma *anon_vma;
666	struct vm_area_struct *vma;
667	int ret = SWAP_AGAIN;
668
669	anon_vma = page_lock_anon_vma(page);
670	if (!anon_vma)
671		return ret;
672
673	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
674		ret = try_to_unmap_one(page, vma);
675		if (ret == SWAP_FAIL || !page_mapped(page))
676			break;
677	}
678	spin_unlock(&anon_vma->lock);
679	return ret;
680}
681
682/**
683 * try_to_unmap_file - unmap file page using the object-based rmap method
684 * @page: the page to unmap
685 *
686 * Find all the mappings of a page using the mapping pointer and the vma chains
687 * contained in the address_space struct it points to.
688 *
689 * This function is only called from try_to_unmap for object-based pages.
690 */
691static int try_to_unmap_file(struct page *page)
692{
693	struct address_space *mapping = page->mapping;
694	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
695	struct vm_area_struct *vma;
696	struct prio_tree_iter iter;
697	int ret = SWAP_AGAIN;
698	unsigned long cursor;
699	unsigned long max_nl_cursor = 0;
700	unsigned long max_nl_size = 0;
701	unsigned int mapcount;
702
703	spin_lock(&mapping->i_mmap_lock);
704	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
705		ret = try_to_unmap_one(page, vma);
706		if (ret == SWAP_FAIL || !page_mapped(page))
707			goto out;
708	}
709
710	if (list_empty(&mapping->i_mmap_nonlinear))
711		goto out;
712
713	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
714						shared.vm_set.list) {
715		if (vma->vm_flags & VM_LOCKED)
716			continue;
717		cursor = (unsigned long) vma->vm_private_data;
718		if (cursor > max_nl_cursor)
719			max_nl_cursor = cursor;
720		cursor = vma->vm_end - vma->vm_start;
721		if (cursor > max_nl_size)
722			max_nl_size = cursor;
723	}
724
725	if (max_nl_size == 0) {	/* any nonlinears locked or reserved */
726		ret = SWAP_FAIL;
727		goto out;
728	}
729
730	/*
731	 * We don't try to search for this page in the nonlinear vmas,
732	 * and page_referenced wouldn't have found it anyway.  Instead
733	 * just walk the nonlinear vmas trying to age and unmap some.
734	 * The mapcount of the page we came in with is irrelevant,
735	 * but even so use it as a guide to how hard we should try?
736	 */
737	mapcount = page_mapcount(page);
738	if (!mapcount)
739		goto out;
740	cond_resched_lock(&mapping->i_mmap_lock);
741
742	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
743	if (max_nl_cursor == 0)
744		max_nl_cursor = CLUSTER_SIZE;
745
746	do {
747		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
748						shared.vm_set.list) {
749			if (vma->vm_flags & VM_LOCKED)
750				continue;
751			cursor = (unsigned long) vma->vm_private_data;
752			while ( cursor < max_nl_cursor &&
753				cursor < vma->vm_end - vma->vm_start) {
754				try_to_unmap_cluster(cursor, &mapcount, vma);
755				cursor += CLUSTER_SIZE;
756				vma->vm_private_data = (void *) cursor;
757				if ((int)mapcount <= 0)
758					goto out;
759			}
760			vma->vm_private_data = (void *) max_nl_cursor;
761		}
762		cond_resched_lock(&mapping->i_mmap_lock);
763		max_nl_cursor += CLUSTER_SIZE;
764	} while (max_nl_cursor <= max_nl_size);
765
766	/*
767	 * Don't loop forever (perhaps all the remaining pages are
768	 * in locked vmas).  Reset cursor on all unreserved nonlinear
769	 * vmas, now forgetting on which ones it had fallen behind.
770	 */
771	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
772		vma->vm_private_data = NULL;
773out:
774	spin_unlock(&mapping->i_mmap_lock);
775	return ret;
776}
777
778/**
779 * try_to_unmap - try to remove all page table mappings to a page
780 * @page: the page to get unmapped
781 *
782 * Tries to remove all the page table entries which are mapping this
783 * page, used in the pageout path.  Caller must hold the page lock.
784 * Return values are:
785 *
786 * SWAP_SUCCESS	- we succeeded in removing all mappings
787 * SWAP_AGAIN	- we missed a mapping, try again later
788 * SWAP_FAIL	- the page is unswappable
789 */
790int try_to_unmap(struct page *page)
791{
792	int ret;
793
794	BUG_ON(!PageLocked(page));
795
796	if (PageAnon(page))
797		ret = try_to_unmap_anon(page);
798	else
799		ret = try_to_unmap_file(page);
800
801	if (!page_mapped(page))
802		ret = SWAP_SUCCESS;
803	return ret;
804}
805
806