rmap.c revision 4c21e2f2441dc5fbb957b030333f5a3f2d02dea7
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18 */
19
20/*
21 * Lock ordering in mm:
22 *
23 * inode->i_sem	(while writing or truncating, not reading or faulting)
24 *   inode->i_alloc_sem
25 *
26 * When a page fault occurs in writing from user to file, down_read
27 * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
28 * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
29 * taken together; in truncation, i_sem is taken outermost.
30 *
31 * mm->mmap_sem
32 *   page->flags PG_locked (lock_page)
33 *     mapping->i_mmap_lock
34 *       anon_vma->lock
35 *         mm->page_table_lock
36 *           zone->lru_lock (in mark_page_accessed)
37 *           swap_lock (in swap_duplicate, swap_info_get)
38 *             mmlist_lock (in mmput, drain_mmlist and others)
39 *             mapping->private_lock (in __set_page_dirty_buffers)
40 *             inode_lock (in set_page_dirty's __mark_inode_dirty)
41 *               sb_lock (within inode_lock in fs/fs-writeback.c)
42 *               mapping->tree_lock (widely used, in set_page_dirty,
43 *                         in arch-dependent flush_dcache_mmap_lock,
44 *                         within inode_lock in __sync_single_inode)
45 */
46
47#include <linux/mm.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/swapops.h>
51#include <linux/slab.h>
52#include <linux/init.h>
53#include <linux/rmap.h>
54#include <linux/rcupdate.h>
55
56#include <asm/tlbflush.h>
57
58//#define RMAP_DEBUG /* can be enabled only for debugging */
59
60kmem_cache_t *anon_vma_cachep;
61
62static inline void validate_anon_vma(struct vm_area_struct *find_vma)
63{
64#ifdef RMAP_DEBUG
65	struct anon_vma *anon_vma = find_vma->anon_vma;
66	struct vm_area_struct *vma;
67	unsigned int mapcount = 0;
68	int found = 0;
69
70	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
71		mapcount++;
72		BUG_ON(mapcount > 100000);
73		if (vma == find_vma)
74			found = 1;
75	}
76	BUG_ON(!found);
77#endif
78}
79
80/* This must be called under the mmap_sem. */
81int anon_vma_prepare(struct vm_area_struct *vma)
82{
83	struct anon_vma *anon_vma = vma->anon_vma;
84
85	might_sleep();
86	if (unlikely(!anon_vma)) {
87		struct mm_struct *mm = vma->vm_mm;
88		struct anon_vma *allocated, *locked;
89
90		anon_vma = find_mergeable_anon_vma(vma);
91		if (anon_vma) {
92			allocated = NULL;
93			locked = anon_vma;
94			spin_lock(&locked->lock);
95		} else {
96			anon_vma = anon_vma_alloc();
97			if (unlikely(!anon_vma))
98				return -ENOMEM;
99			allocated = anon_vma;
100			locked = NULL;
101		}
102
103		/* page_table_lock to protect against threads */
104		spin_lock(&mm->page_table_lock);
105		if (likely(!vma->anon_vma)) {
106			vma->anon_vma = anon_vma;
107			list_add(&vma->anon_vma_node, &anon_vma->head);
108			allocated = NULL;
109		}
110		spin_unlock(&mm->page_table_lock);
111
112		if (locked)
113			spin_unlock(&locked->lock);
114		if (unlikely(allocated))
115			anon_vma_free(allocated);
116	}
117	return 0;
118}
119
120void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
121{
122	BUG_ON(vma->anon_vma != next->anon_vma);
123	list_del(&next->anon_vma_node);
124}
125
126void __anon_vma_link(struct vm_area_struct *vma)
127{
128	struct anon_vma *anon_vma = vma->anon_vma;
129
130	if (anon_vma) {
131		list_add(&vma->anon_vma_node, &anon_vma->head);
132		validate_anon_vma(vma);
133	}
134}
135
136void anon_vma_link(struct vm_area_struct *vma)
137{
138	struct anon_vma *anon_vma = vma->anon_vma;
139
140	if (anon_vma) {
141		spin_lock(&anon_vma->lock);
142		list_add(&vma->anon_vma_node, &anon_vma->head);
143		validate_anon_vma(vma);
144		spin_unlock(&anon_vma->lock);
145	}
146}
147
148void anon_vma_unlink(struct vm_area_struct *vma)
149{
150	struct anon_vma *anon_vma = vma->anon_vma;
151	int empty;
152
153	if (!anon_vma)
154		return;
155
156	spin_lock(&anon_vma->lock);
157	validate_anon_vma(vma);
158	list_del(&vma->anon_vma_node);
159
160	/* We must garbage collect the anon_vma if it's empty */
161	empty = list_empty(&anon_vma->head);
162	spin_unlock(&anon_vma->lock);
163
164	if (empty)
165		anon_vma_free(anon_vma);
166}
167
168static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
169{
170	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
171						SLAB_CTOR_CONSTRUCTOR) {
172		struct anon_vma *anon_vma = data;
173
174		spin_lock_init(&anon_vma->lock);
175		INIT_LIST_HEAD(&anon_vma->head);
176	}
177}
178
179void __init anon_vma_init(void)
180{
181	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
182			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
183}
184
185/*
186 * Getting a lock on a stable anon_vma from a page off the LRU is
187 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
188 */
189static struct anon_vma *page_lock_anon_vma(struct page *page)
190{
191	struct anon_vma *anon_vma = NULL;
192	unsigned long anon_mapping;
193
194	rcu_read_lock();
195	anon_mapping = (unsigned long) page->mapping;
196	if (!(anon_mapping & PAGE_MAPPING_ANON))
197		goto out;
198	if (!page_mapped(page))
199		goto out;
200
201	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
202	spin_lock(&anon_vma->lock);
203out:
204	rcu_read_unlock();
205	return anon_vma;
206}
207
208/*
209 * At what user virtual address is page expected in vma?
210 */
211static inline unsigned long
212vma_address(struct page *page, struct vm_area_struct *vma)
213{
214	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
215	unsigned long address;
216
217	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
218	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
219		/* page should be within any vma from prio_tree_next */
220		BUG_ON(!PageAnon(page));
221		return -EFAULT;
222	}
223	return address;
224}
225
226/*
227 * At what user virtual address is page expected in vma? checking that the
228 * page matches the vma: currently only used by unuse_process, on anon pages.
229 */
230unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
231{
232	if (PageAnon(page)) {
233		if ((void *)vma->anon_vma !=
234		    (void *)page->mapping - PAGE_MAPPING_ANON)
235			return -EFAULT;
236	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
237		if (vma->vm_file->f_mapping != page->mapping)
238			return -EFAULT;
239	} else
240		return -EFAULT;
241	return vma_address(page, vma);
242}
243
244/*
245 * Check that @page is mapped at @address into @mm.
246 *
247 * On success returns with mapped pte and locked mm->page_table_lock.
248 */
249pte_t *page_check_address(struct page *page, struct mm_struct *mm,
250			  unsigned long address, spinlock_t **ptlp)
251{
252	pgd_t *pgd;
253	pud_t *pud;
254	pmd_t *pmd;
255	pte_t *pte;
256	spinlock_t *ptl;
257
258	pgd = pgd_offset(mm, address);
259	if (!pgd_present(*pgd))
260		return NULL;
261
262	pud = pud_offset(pgd, address);
263	if (!pud_present(*pud))
264		return NULL;
265
266	pmd = pmd_offset(pud, address);
267	if (!pmd_present(*pmd))
268		return NULL;
269
270	pte = pte_offset_map(pmd, address);
271	/* Make a quick check before getting the lock */
272	if (!pte_present(*pte)) {
273		pte_unmap(pte);
274		return NULL;
275	}
276
277	ptl = pte_lockptr(mm, pmd);
278	spin_lock(ptl);
279	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
280		*ptlp = ptl;
281		return pte;
282	}
283	pte_unmap_unlock(pte, ptl);
284	return NULL;
285}
286
287/*
288 * Subfunctions of page_referenced: page_referenced_one called
289 * repeatedly from either page_referenced_anon or page_referenced_file.
290 */
291static int page_referenced_one(struct page *page,
292	struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
293{
294	struct mm_struct *mm = vma->vm_mm;
295	unsigned long address;
296	pte_t *pte;
297	spinlock_t *ptl;
298	int referenced = 0;
299
300	address = vma_address(page, vma);
301	if (address == -EFAULT)
302		goto out;
303
304	pte = page_check_address(page, mm, address, &ptl);
305	if (!pte)
306		goto out;
307
308	if (ptep_clear_flush_young(vma, address, pte))
309		referenced++;
310
311	/* Pretend the page is referenced if the task has the
312	   swap token and is in the middle of a page fault. */
313	if (mm != current->mm && !ignore_token && has_swap_token(mm) &&
314			rwsem_is_locked(&mm->mmap_sem))
315		referenced++;
316
317	(*mapcount)--;
318	pte_unmap_unlock(pte, ptl);
319out:
320	return referenced;
321}
322
323static int page_referenced_anon(struct page *page, int ignore_token)
324{
325	unsigned int mapcount;
326	struct anon_vma *anon_vma;
327	struct vm_area_struct *vma;
328	int referenced = 0;
329
330	anon_vma = page_lock_anon_vma(page);
331	if (!anon_vma)
332		return referenced;
333
334	mapcount = page_mapcount(page);
335	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
336		referenced += page_referenced_one(page, vma, &mapcount,
337							ignore_token);
338		if (!mapcount)
339			break;
340	}
341	spin_unlock(&anon_vma->lock);
342	return referenced;
343}
344
345/**
346 * page_referenced_file - referenced check for object-based rmap
347 * @page: the page we're checking references on.
348 *
349 * For an object-based mapped page, find all the places it is mapped and
350 * check/clear the referenced flag.  This is done by following the page->mapping
351 * pointer, then walking the chain of vmas it holds.  It returns the number
352 * of references it found.
353 *
354 * This function is only called from page_referenced for object-based pages.
355 */
356static int page_referenced_file(struct page *page, int ignore_token)
357{
358	unsigned int mapcount;
359	struct address_space *mapping = page->mapping;
360	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
361	struct vm_area_struct *vma;
362	struct prio_tree_iter iter;
363	int referenced = 0;
364
365	/*
366	 * The caller's checks on page->mapping and !PageAnon have made
367	 * sure that this is a file page: the check for page->mapping
368	 * excludes the case just before it gets set on an anon page.
369	 */
370	BUG_ON(PageAnon(page));
371
372	/*
373	 * The page lock not only makes sure that page->mapping cannot
374	 * suddenly be NULLified by truncation, it makes sure that the
375	 * structure at mapping cannot be freed and reused yet,
376	 * so we can safely take mapping->i_mmap_lock.
377	 */
378	BUG_ON(!PageLocked(page));
379
380	spin_lock(&mapping->i_mmap_lock);
381
382	/*
383	 * i_mmap_lock does not stabilize mapcount at all, but mapcount
384	 * is more likely to be accurate if we note it after spinning.
385	 */
386	mapcount = page_mapcount(page);
387
388	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
389		if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
390				  == (VM_LOCKED|VM_MAYSHARE)) {
391			referenced++;
392			break;
393		}
394		referenced += page_referenced_one(page, vma, &mapcount,
395							ignore_token);
396		if (!mapcount)
397			break;
398	}
399
400	spin_unlock(&mapping->i_mmap_lock);
401	return referenced;
402}
403
404/**
405 * page_referenced - test if the page was referenced
406 * @page: the page to test
407 * @is_locked: caller holds lock on the page
408 *
409 * Quick test_and_clear_referenced for all mappings to a page,
410 * returns the number of ptes which referenced the page.
411 */
412int page_referenced(struct page *page, int is_locked, int ignore_token)
413{
414	int referenced = 0;
415
416	if (!swap_token_default_timeout)
417		ignore_token = 1;
418
419	if (page_test_and_clear_young(page))
420		referenced++;
421
422	if (TestClearPageReferenced(page))
423		referenced++;
424
425	if (page_mapped(page) && page->mapping) {
426		if (PageAnon(page))
427			referenced += page_referenced_anon(page, ignore_token);
428		else if (is_locked)
429			referenced += page_referenced_file(page, ignore_token);
430		else if (TestSetPageLocked(page))
431			referenced++;
432		else {
433			if (page->mapping)
434				referenced += page_referenced_file(page,
435								ignore_token);
436			unlock_page(page);
437		}
438	}
439	return referenced;
440}
441
442/**
443 * page_add_anon_rmap - add pte mapping to an anonymous page
444 * @page:	the page to add the mapping to
445 * @vma:	the vm area in which the mapping is added
446 * @address:	the user virtual address mapped
447 *
448 * The caller needs to hold the mm->page_table_lock.
449 */
450void page_add_anon_rmap(struct page *page,
451	struct vm_area_struct *vma, unsigned long address)
452{
453	if (atomic_inc_and_test(&page->_mapcount)) {
454		struct anon_vma *anon_vma = vma->anon_vma;
455
456		BUG_ON(!anon_vma);
457		anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
458		page->mapping = (struct address_space *) anon_vma;
459
460		page->index = linear_page_index(vma, address);
461
462		inc_page_state(nr_mapped);
463	}
464	/* else checking page index and mapping is racy */
465}
466
467/**
468 * page_add_file_rmap - add pte mapping to a file page
469 * @page: the page to add the mapping to
470 *
471 * The caller needs to hold the mm->page_table_lock.
472 */
473void page_add_file_rmap(struct page *page)
474{
475	BUG_ON(PageAnon(page));
476	BUG_ON(!pfn_valid(page_to_pfn(page)));
477
478	if (atomic_inc_and_test(&page->_mapcount))
479		inc_page_state(nr_mapped);
480}
481
482/**
483 * page_remove_rmap - take down pte mapping from a page
484 * @page: page to remove mapping from
485 *
486 * Caller needs to hold the mm->page_table_lock.
487 */
488void page_remove_rmap(struct page *page)
489{
490	if (atomic_add_negative(-1, &page->_mapcount)) {
491		BUG_ON(page_mapcount(page) < 0);
492		/*
493		 * It would be tidy to reset the PageAnon mapping here,
494		 * but that might overwrite a racing page_add_anon_rmap
495		 * which increments mapcount after us but sets mapping
496		 * before us: so leave the reset to free_hot_cold_page,
497		 * and remember that it's only reliable while mapped.
498		 * Leaving it set also helps swapoff to reinstate ptes
499		 * faster for those pages still in swapcache.
500		 */
501		if (page_test_and_clear_dirty(page))
502			set_page_dirty(page);
503		dec_page_state(nr_mapped);
504	}
505}
506
507/*
508 * Subfunctions of try_to_unmap: try_to_unmap_one called
509 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
510 */
511static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
512{
513	struct mm_struct *mm = vma->vm_mm;
514	unsigned long address;
515	pte_t *pte;
516	pte_t pteval;
517	spinlock_t *ptl;
518	int ret = SWAP_AGAIN;
519
520	address = vma_address(page, vma);
521	if (address == -EFAULT)
522		goto out;
523
524	pte = page_check_address(page, mm, address, &ptl);
525	if (!pte)
526		goto out;
527
528	/*
529	 * If the page is mlock()d, we cannot swap it out.
530	 * If it's recently referenced (perhaps page_referenced
531	 * skipped over this mm) then we should reactivate it.
532	 *
533	 * Pages belonging to VM_RESERVED regions should not happen here.
534	 */
535	if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
536			ptep_clear_flush_young(vma, address, pte)) {
537		ret = SWAP_FAIL;
538		goto out_unmap;
539	}
540
541	/* Nuke the page table entry. */
542	flush_cache_page(vma, address, page_to_pfn(page));
543	pteval = ptep_clear_flush(vma, address, pte);
544
545	/* Move the dirty bit to the physical page now the pte is gone. */
546	if (pte_dirty(pteval))
547		set_page_dirty(page);
548
549	/* Update high watermark before we lower rss */
550	update_hiwater_rss(mm);
551
552	if (PageAnon(page)) {
553		swp_entry_t entry = { .val = page_private(page) };
554		/*
555		 * Store the swap location in the pte.
556		 * See handle_pte_fault() ...
557		 */
558		BUG_ON(!PageSwapCache(page));
559		swap_duplicate(entry);
560		if (list_empty(&mm->mmlist)) {
561			spin_lock(&mmlist_lock);
562			list_add(&mm->mmlist, &init_mm.mmlist);
563			spin_unlock(&mmlist_lock);
564		}
565		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
566		BUG_ON(pte_file(*pte));
567		dec_mm_counter(mm, anon_rss);
568	} else
569		dec_mm_counter(mm, file_rss);
570
571	page_remove_rmap(page);
572	page_cache_release(page);
573
574out_unmap:
575	pte_unmap_unlock(pte, ptl);
576out:
577	return ret;
578}
579
580/*
581 * objrmap doesn't work for nonlinear VMAs because the assumption that
582 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
583 * Consequently, given a particular page and its ->index, we cannot locate the
584 * ptes which are mapping that page without an exhaustive linear search.
585 *
586 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
587 * maps the file to which the target page belongs.  The ->vm_private_data field
588 * holds the current cursor into that scan.  Successive searches will circulate
589 * around the vma's virtual address space.
590 *
591 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
592 * more scanning pressure is placed against them as well.   Eventually pages
593 * will become fully unmapped and are eligible for eviction.
594 *
595 * For very sparsely populated VMAs this is a little inefficient - chances are
596 * there there won't be many ptes located within the scan cluster.  In this case
597 * maybe we could scan further - to the end of the pte page, perhaps.
598 */
599#define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
600#define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
601
602static void try_to_unmap_cluster(unsigned long cursor,
603	unsigned int *mapcount, struct vm_area_struct *vma)
604{
605	struct mm_struct *mm = vma->vm_mm;
606	pgd_t *pgd;
607	pud_t *pud;
608	pmd_t *pmd;
609	pte_t *pte;
610	pte_t pteval;
611	spinlock_t *ptl;
612	struct page *page;
613	unsigned long address;
614	unsigned long end;
615	unsigned long pfn;
616
617	address = (vma->vm_start + cursor) & CLUSTER_MASK;
618	end = address + CLUSTER_SIZE;
619	if (address < vma->vm_start)
620		address = vma->vm_start;
621	if (end > vma->vm_end)
622		end = vma->vm_end;
623
624	pgd = pgd_offset(mm, address);
625	if (!pgd_present(*pgd))
626		return;
627
628	pud = pud_offset(pgd, address);
629	if (!pud_present(*pud))
630		return;
631
632	pmd = pmd_offset(pud, address);
633	if (!pmd_present(*pmd))
634		return;
635
636	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
637
638	/* Update high watermark before we lower rss */
639	update_hiwater_rss(mm);
640
641	for (; address < end; pte++, address += PAGE_SIZE) {
642		if (!pte_present(*pte))
643			continue;
644
645		pfn = pte_pfn(*pte);
646		if (unlikely(!pfn_valid(pfn))) {
647			print_bad_pte(vma, *pte, address);
648			continue;
649		}
650
651		page = pfn_to_page(pfn);
652		BUG_ON(PageAnon(page));
653
654		if (ptep_clear_flush_young(vma, address, pte))
655			continue;
656
657		/* Nuke the page table entry. */
658		flush_cache_page(vma, address, pfn);
659		pteval = ptep_clear_flush(vma, address, pte);
660
661		/* If nonlinear, store the file page offset in the pte. */
662		if (page->index != linear_page_index(vma, address))
663			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
664
665		/* Move the dirty bit to the physical page now the pte is gone. */
666		if (pte_dirty(pteval))
667			set_page_dirty(page);
668
669		page_remove_rmap(page);
670		page_cache_release(page);
671		dec_mm_counter(mm, file_rss);
672		(*mapcount)--;
673	}
674	pte_unmap_unlock(pte - 1, ptl);
675}
676
677static int try_to_unmap_anon(struct page *page)
678{
679	struct anon_vma *anon_vma;
680	struct vm_area_struct *vma;
681	int ret = SWAP_AGAIN;
682
683	anon_vma = page_lock_anon_vma(page);
684	if (!anon_vma)
685		return ret;
686
687	list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
688		ret = try_to_unmap_one(page, vma);
689		if (ret == SWAP_FAIL || !page_mapped(page))
690			break;
691	}
692	spin_unlock(&anon_vma->lock);
693	return ret;
694}
695
696/**
697 * try_to_unmap_file - unmap file page using the object-based rmap method
698 * @page: the page to unmap
699 *
700 * Find all the mappings of a page using the mapping pointer and the vma chains
701 * contained in the address_space struct it points to.
702 *
703 * This function is only called from try_to_unmap for object-based pages.
704 */
705static int try_to_unmap_file(struct page *page)
706{
707	struct address_space *mapping = page->mapping;
708	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
709	struct vm_area_struct *vma;
710	struct prio_tree_iter iter;
711	int ret = SWAP_AGAIN;
712	unsigned long cursor;
713	unsigned long max_nl_cursor = 0;
714	unsigned long max_nl_size = 0;
715	unsigned int mapcount;
716
717	spin_lock(&mapping->i_mmap_lock);
718	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
719		ret = try_to_unmap_one(page, vma);
720		if (ret == SWAP_FAIL || !page_mapped(page))
721			goto out;
722	}
723
724	if (list_empty(&mapping->i_mmap_nonlinear))
725		goto out;
726
727	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
728						shared.vm_set.list) {
729		if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
730			continue;
731		cursor = (unsigned long) vma->vm_private_data;
732		if (cursor > max_nl_cursor)
733			max_nl_cursor = cursor;
734		cursor = vma->vm_end - vma->vm_start;
735		if (cursor > max_nl_size)
736			max_nl_size = cursor;
737	}
738
739	if (max_nl_size == 0) {	/* any nonlinears locked or reserved */
740		ret = SWAP_FAIL;
741		goto out;
742	}
743
744	/*
745	 * We don't try to search for this page in the nonlinear vmas,
746	 * and page_referenced wouldn't have found it anyway.  Instead
747	 * just walk the nonlinear vmas trying to age and unmap some.
748	 * The mapcount of the page we came in with is irrelevant,
749	 * but even so use it as a guide to how hard we should try?
750	 */
751	mapcount = page_mapcount(page);
752	if (!mapcount)
753		goto out;
754	cond_resched_lock(&mapping->i_mmap_lock);
755
756	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
757	if (max_nl_cursor == 0)
758		max_nl_cursor = CLUSTER_SIZE;
759
760	do {
761		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
762						shared.vm_set.list) {
763			if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
764				continue;
765			cursor = (unsigned long) vma->vm_private_data;
766			while ( cursor < max_nl_cursor &&
767				cursor < vma->vm_end - vma->vm_start) {
768				try_to_unmap_cluster(cursor, &mapcount, vma);
769				cursor += CLUSTER_SIZE;
770				vma->vm_private_data = (void *) cursor;
771				if ((int)mapcount <= 0)
772					goto out;
773			}
774			vma->vm_private_data = (void *) max_nl_cursor;
775		}
776		cond_resched_lock(&mapping->i_mmap_lock);
777		max_nl_cursor += CLUSTER_SIZE;
778	} while (max_nl_cursor <= max_nl_size);
779
780	/*
781	 * Don't loop forever (perhaps all the remaining pages are
782	 * in locked vmas).  Reset cursor on all unreserved nonlinear
783	 * vmas, now forgetting on which ones it had fallen behind.
784	 */
785	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
786						shared.vm_set.list) {
787		if (!(vma->vm_flags & VM_RESERVED))
788			vma->vm_private_data = NULL;
789	}
790out:
791	spin_unlock(&mapping->i_mmap_lock);
792	return ret;
793}
794
795/**
796 * try_to_unmap - try to remove all page table mappings to a page
797 * @page: the page to get unmapped
798 *
799 * Tries to remove all the page table entries which are mapping this
800 * page, used in the pageout path.  Caller must hold the page lock.
801 * Return values are:
802 *
803 * SWAP_SUCCESS	- we succeeded in removing all mappings
804 * SWAP_AGAIN	- we missed a mapping, try again later
805 * SWAP_FAIL	- the page is unswappable
806 */
807int try_to_unmap(struct page *page)
808{
809	int ret;
810
811	BUG_ON(!PageLocked(page));
812
813	if (PageAnon(page))
814		ret = try_to_unmap_anon(page);
815	else
816		ret = try_to_unmap_file(page);
817
818	if (!page_mapped(page))
819		ret = SWAP_SUCCESS;
820	return ret;
821}
822
823