memory.c revision 46017e954826ac59e91df76341a3f76b45467847
1/*
2 *  linux/mm/memory.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
10 */
11
12/*
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
15 *
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
19 *
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21 */
22
23/*
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27 *		Found it. Everything seems to work now.
28 * 20.12.91  -  Ok, making the swap-device changeable like the root.
29 */
30
31/*
32 * 05.04.94  -  Multi-page memory management added for v1.1.
33 * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
34 *
35 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 *		(Gerhard.Wichert@pdb.siemens.de)
37 *
38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39 */
40
41#include <linux/kernel_stat.h>
42#include <linux/mm.h>
43#include <linux/hugetlb.h>
44#include <linux/mman.h>
45#include <linux/swap.h>
46#include <linux/highmem.h>
47#include <linux/pagemap.h>
48#include <linux/rmap.h>
49#include <linux/module.h>
50#include <linux/delayacct.h>
51#include <linux/init.h>
52#include <linux/writeback.h>
53
54#include <asm/pgalloc.h>
55#include <asm/uaccess.h>
56#include <asm/tlb.h>
57#include <asm/tlbflush.h>
58#include <asm/pgtable.h>
59
60#include <linux/swapops.h>
61#include <linux/elf.h>
62
63#ifndef CONFIG_NEED_MULTIPLE_NODES
64/* use the per-pgdat data instead for discontigmem - mbligh */
65unsigned long max_mapnr;
66struct page *mem_map;
67
68EXPORT_SYMBOL(max_mapnr);
69EXPORT_SYMBOL(mem_map);
70#endif
71
72unsigned long num_physpages;
73/*
74 * A number of key systems in x86 including ioremap() rely on the assumption
75 * that high_memory defines the upper bound on direct map memory, then end
76 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
77 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
78 * and ZONE_HIGHMEM.
79 */
80void * high_memory;
81
82EXPORT_SYMBOL(num_physpages);
83EXPORT_SYMBOL(high_memory);
84
85int randomize_va_space __read_mostly = 1;
86
87static int __init disable_randmaps(char *s)
88{
89	randomize_va_space = 0;
90	return 1;
91}
92__setup("norandmaps", disable_randmaps);
93
94
95/*
96 * If a p?d_bad entry is found while walking page tables, report
97 * the error, before resetting entry to p?d_none.  Usually (but
98 * very seldom) called out from the p?d_none_or_clear_bad macros.
99 */
100
101void pgd_clear_bad(pgd_t *pgd)
102{
103	pgd_ERROR(*pgd);
104	pgd_clear(pgd);
105}
106
107void pud_clear_bad(pud_t *pud)
108{
109	pud_ERROR(*pud);
110	pud_clear(pud);
111}
112
113void pmd_clear_bad(pmd_t *pmd)
114{
115	pmd_ERROR(*pmd);
116	pmd_clear(pmd);
117}
118
119/*
120 * Note: this doesn't free the actual pages themselves. That
121 * has been handled earlier when unmapping all the memory regions.
122 */
123static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
124{
125	struct page *page = pmd_page(*pmd);
126	pmd_clear(pmd);
127	pte_lock_deinit(page);
128	pte_free_tlb(tlb, page);
129	dec_zone_page_state(page, NR_PAGETABLE);
130	tlb->mm->nr_ptes--;
131}
132
133static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
134				unsigned long addr, unsigned long end,
135				unsigned long floor, unsigned long ceiling)
136{
137	pmd_t *pmd;
138	unsigned long next;
139	unsigned long start;
140
141	start = addr;
142	pmd = pmd_offset(pud, addr);
143	do {
144		next = pmd_addr_end(addr, end);
145		if (pmd_none_or_clear_bad(pmd))
146			continue;
147		free_pte_range(tlb, pmd);
148	} while (pmd++, addr = next, addr != end);
149
150	start &= PUD_MASK;
151	if (start < floor)
152		return;
153	if (ceiling) {
154		ceiling &= PUD_MASK;
155		if (!ceiling)
156			return;
157	}
158	if (end - 1 > ceiling - 1)
159		return;
160
161	pmd = pmd_offset(pud, start);
162	pud_clear(pud);
163	pmd_free_tlb(tlb, pmd);
164}
165
166static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
167				unsigned long addr, unsigned long end,
168				unsigned long floor, unsigned long ceiling)
169{
170	pud_t *pud;
171	unsigned long next;
172	unsigned long start;
173
174	start = addr;
175	pud = pud_offset(pgd, addr);
176	do {
177		next = pud_addr_end(addr, end);
178		if (pud_none_or_clear_bad(pud))
179			continue;
180		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
181	} while (pud++, addr = next, addr != end);
182
183	start &= PGDIR_MASK;
184	if (start < floor)
185		return;
186	if (ceiling) {
187		ceiling &= PGDIR_MASK;
188		if (!ceiling)
189			return;
190	}
191	if (end - 1 > ceiling - 1)
192		return;
193
194	pud = pud_offset(pgd, start);
195	pgd_clear(pgd);
196	pud_free_tlb(tlb, pud);
197}
198
199/*
200 * This function frees user-level page tables of a process.
201 *
202 * Must be called with pagetable lock held.
203 */
204void free_pgd_range(struct mmu_gather **tlb,
205			unsigned long addr, unsigned long end,
206			unsigned long floor, unsigned long ceiling)
207{
208	pgd_t *pgd;
209	unsigned long next;
210	unsigned long start;
211
212	/*
213	 * The next few lines have given us lots of grief...
214	 *
215	 * Why are we testing PMD* at this top level?  Because often
216	 * there will be no work to do at all, and we'd prefer not to
217	 * go all the way down to the bottom just to discover that.
218	 *
219	 * Why all these "- 1"s?  Because 0 represents both the bottom
220	 * of the address space and the top of it (using -1 for the
221	 * top wouldn't help much: the masks would do the wrong thing).
222	 * The rule is that addr 0 and floor 0 refer to the bottom of
223	 * the address space, but end 0 and ceiling 0 refer to the top
224	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
225	 * that end 0 case should be mythical).
226	 *
227	 * Wherever addr is brought up or ceiling brought down, we must
228	 * be careful to reject "the opposite 0" before it confuses the
229	 * subsequent tests.  But what about where end is brought down
230	 * by PMD_SIZE below? no, end can't go down to 0 there.
231	 *
232	 * Whereas we round start (addr) and ceiling down, by different
233	 * masks at different levels, in order to test whether a table
234	 * now has no other vmas using it, so can be freed, we don't
235	 * bother to round floor or end up - the tests don't need that.
236	 */
237
238	addr &= PMD_MASK;
239	if (addr < floor) {
240		addr += PMD_SIZE;
241		if (!addr)
242			return;
243	}
244	if (ceiling) {
245		ceiling &= PMD_MASK;
246		if (!ceiling)
247			return;
248	}
249	if (end - 1 > ceiling - 1)
250		end -= PMD_SIZE;
251	if (addr > end - 1)
252		return;
253
254	start = addr;
255	pgd = pgd_offset((*tlb)->mm, addr);
256	do {
257		next = pgd_addr_end(addr, end);
258		if (pgd_none_or_clear_bad(pgd))
259			continue;
260		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
261	} while (pgd++, addr = next, addr != end);
262}
263
264void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
265		unsigned long floor, unsigned long ceiling)
266{
267	while (vma) {
268		struct vm_area_struct *next = vma->vm_next;
269		unsigned long addr = vma->vm_start;
270
271		/*
272		 * Hide vma from rmap and vmtruncate before freeing pgtables
273		 */
274		anon_vma_unlink(vma);
275		unlink_file_vma(vma);
276
277		if (is_vm_hugetlb_page(vma)) {
278			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
279				floor, next? next->vm_start: ceiling);
280		} else {
281			/*
282			 * Optimization: gather nearby vmas into one call down
283			 */
284			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
285			       && !is_vm_hugetlb_page(next)) {
286				vma = next;
287				next = vma->vm_next;
288				anon_vma_unlink(vma);
289				unlink_file_vma(vma);
290			}
291			free_pgd_range(tlb, addr, vma->vm_end,
292				floor, next? next->vm_start: ceiling);
293		}
294		vma = next;
295	}
296}
297
298int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
299{
300	struct page *new = pte_alloc_one(mm, address);
301	if (!new)
302		return -ENOMEM;
303
304	pte_lock_init(new);
305	spin_lock(&mm->page_table_lock);
306	if (pmd_present(*pmd)) {	/* Another has populated it */
307		pte_lock_deinit(new);
308		pte_free(new);
309	} else {
310		mm->nr_ptes++;
311		inc_zone_page_state(new, NR_PAGETABLE);
312		pmd_populate(mm, pmd, new);
313	}
314	spin_unlock(&mm->page_table_lock);
315	return 0;
316}
317
318int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
319{
320	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
321	if (!new)
322		return -ENOMEM;
323
324	spin_lock(&init_mm.page_table_lock);
325	if (pmd_present(*pmd))		/* Another has populated it */
326		pte_free_kernel(new);
327	else
328		pmd_populate_kernel(&init_mm, pmd, new);
329	spin_unlock(&init_mm.page_table_lock);
330	return 0;
331}
332
333static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
334{
335	if (file_rss)
336		add_mm_counter(mm, file_rss, file_rss);
337	if (anon_rss)
338		add_mm_counter(mm, anon_rss, anon_rss);
339}
340
341/*
342 * This function is called to print an error when a bad pte
343 * is found. For example, we might have a PFN-mapped pte in
344 * a region that doesn't allow it.
345 *
346 * The calling function must still handle the error.
347 */
348void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
349{
350	printk(KERN_ERR "Bad pte = %08llx, process = %s, "
351			"vm_flags = %lx, vaddr = %lx\n",
352		(long long)pte_val(pte),
353		(vma->vm_mm == current->mm ? current->comm : "???"),
354		vma->vm_flags, vaddr);
355	dump_stack();
356}
357
358static inline int is_cow_mapping(unsigned int flags)
359{
360	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
361}
362
363/*
364 * This function gets the "struct page" associated with a pte.
365 *
366 * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
367 * will have each page table entry just pointing to a raw page frame
368 * number, and as far as the VM layer is concerned, those do not have
369 * pages associated with them - even if the PFN might point to memory
370 * that otherwise is perfectly fine and has a "struct page".
371 *
372 * The way we recognize those mappings is through the rules set up
373 * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
374 * and the vm_pgoff will point to the first PFN mapped: thus every
375 * page that is a raw mapping will always honor the rule
376 *
377 *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
378 *
379 * and if that isn't true, the page has been COW'ed (in which case it
380 * _does_ have a "struct page" associated with it even if it is in a
381 * VM_PFNMAP range).
382 */
383struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
384{
385	unsigned long pfn = pte_pfn(pte);
386
387	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
388		unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
389		if (pfn == vma->vm_pgoff + off)
390			return NULL;
391		if (!is_cow_mapping(vma->vm_flags))
392			return NULL;
393	}
394
395#ifdef CONFIG_DEBUG_VM
396	/*
397	 * Add some anal sanity checks for now. Eventually,
398	 * we should just do "return pfn_to_page(pfn)", but
399	 * in the meantime we check that we get a valid pfn,
400	 * and that the resulting page looks ok.
401	 */
402	if (unlikely(!pfn_valid(pfn))) {
403		print_bad_pte(vma, pte, addr);
404		return NULL;
405	}
406#endif
407
408	/*
409	 * NOTE! We still have PageReserved() pages in the page
410	 * tables.
411	 *
412	 * The PAGE_ZERO() pages and various VDSO mappings can
413	 * cause them to exist.
414	 */
415	return pfn_to_page(pfn);
416}
417
418/*
419 * copy one vm_area from one task to the other. Assumes the page tables
420 * already present in the new task to be cleared in the whole range
421 * covered by this vma.
422 */
423
424static inline void
425copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
426		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
427		unsigned long addr, int *rss)
428{
429	unsigned long vm_flags = vma->vm_flags;
430	pte_t pte = *src_pte;
431	struct page *page;
432
433	/* pte contains position in swap or file, so copy. */
434	if (unlikely(!pte_present(pte))) {
435		if (!pte_file(pte)) {
436			swp_entry_t entry = pte_to_swp_entry(pte);
437
438			swap_duplicate(entry);
439			/* make sure dst_mm is on swapoff's mmlist. */
440			if (unlikely(list_empty(&dst_mm->mmlist))) {
441				spin_lock(&mmlist_lock);
442				if (list_empty(&dst_mm->mmlist))
443					list_add(&dst_mm->mmlist,
444						 &src_mm->mmlist);
445				spin_unlock(&mmlist_lock);
446			}
447			if (is_write_migration_entry(entry) &&
448					is_cow_mapping(vm_flags)) {
449				/*
450				 * COW mappings require pages in both parent
451				 * and child to be set to read.
452				 */
453				make_migration_entry_read(&entry);
454				pte = swp_entry_to_pte(entry);
455				set_pte_at(src_mm, addr, src_pte, pte);
456			}
457		}
458		goto out_set_pte;
459	}
460
461	/*
462	 * If it's a COW mapping, write protect it both
463	 * in the parent and the child
464	 */
465	if (is_cow_mapping(vm_flags)) {
466		ptep_set_wrprotect(src_mm, addr, src_pte);
467		pte = pte_wrprotect(pte);
468	}
469
470	/*
471	 * If it's a shared mapping, mark it clean in
472	 * the child
473	 */
474	if (vm_flags & VM_SHARED)
475		pte = pte_mkclean(pte);
476	pte = pte_mkold(pte);
477
478	page = vm_normal_page(vma, addr, pte);
479	if (page) {
480		get_page(page);
481		page_dup_rmap(page, vma, addr);
482		rss[!!PageAnon(page)]++;
483	}
484
485out_set_pte:
486	set_pte_at(dst_mm, addr, dst_pte, pte);
487}
488
489static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
490		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
491		unsigned long addr, unsigned long end)
492{
493	pte_t *src_pte, *dst_pte;
494	spinlock_t *src_ptl, *dst_ptl;
495	int progress = 0;
496	int rss[2];
497
498again:
499	rss[1] = rss[0] = 0;
500	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
501	if (!dst_pte)
502		return -ENOMEM;
503	src_pte = pte_offset_map_nested(src_pmd, addr);
504	src_ptl = pte_lockptr(src_mm, src_pmd);
505	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
506	arch_enter_lazy_mmu_mode();
507
508	do {
509		/*
510		 * We are holding two locks at this point - either of them
511		 * could generate latencies in another task on another CPU.
512		 */
513		if (progress >= 32) {
514			progress = 0;
515			if (need_resched() ||
516			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
517				break;
518		}
519		if (pte_none(*src_pte)) {
520			progress++;
521			continue;
522		}
523		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
524		progress += 8;
525	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
526
527	arch_leave_lazy_mmu_mode();
528	spin_unlock(src_ptl);
529	pte_unmap_nested(src_pte - 1);
530	add_mm_rss(dst_mm, rss[0], rss[1]);
531	pte_unmap_unlock(dst_pte - 1, dst_ptl);
532	cond_resched();
533	if (addr != end)
534		goto again;
535	return 0;
536}
537
538static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
539		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
540		unsigned long addr, unsigned long end)
541{
542	pmd_t *src_pmd, *dst_pmd;
543	unsigned long next;
544
545	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
546	if (!dst_pmd)
547		return -ENOMEM;
548	src_pmd = pmd_offset(src_pud, addr);
549	do {
550		next = pmd_addr_end(addr, end);
551		if (pmd_none_or_clear_bad(src_pmd))
552			continue;
553		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
554						vma, addr, next))
555			return -ENOMEM;
556	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
557	return 0;
558}
559
560static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
561		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
562		unsigned long addr, unsigned long end)
563{
564	pud_t *src_pud, *dst_pud;
565	unsigned long next;
566
567	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
568	if (!dst_pud)
569		return -ENOMEM;
570	src_pud = pud_offset(src_pgd, addr);
571	do {
572		next = pud_addr_end(addr, end);
573		if (pud_none_or_clear_bad(src_pud))
574			continue;
575		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
576						vma, addr, next))
577			return -ENOMEM;
578	} while (dst_pud++, src_pud++, addr = next, addr != end);
579	return 0;
580}
581
582int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
583		struct vm_area_struct *vma)
584{
585	pgd_t *src_pgd, *dst_pgd;
586	unsigned long next;
587	unsigned long addr = vma->vm_start;
588	unsigned long end = vma->vm_end;
589
590	/*
591	 * Don't copy ptes where a page fault will fill them correctly.
592	 * Fork becomes much lighter when there are big shared or private
593	 * readonly mappings. The tradeoff is that copy_page_range is more
594	 * efficient than faulting.
595	 */
596	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
597		if (!vma->anon_vma)
598			return 0;
599	}
600
601	if (is_vm_hugetlb_page(vma))
602		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
603
604	dst_pgd = pgd_offset(dst_mm, addr);
605	src_pgd = pgd_offset(src_mm, addr);
606	do {
607		next = pgd_addr_end(addr, end);
608		if (pgd_none_or_clear_bad(src_pgd))
609			continue;
610		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
611						vma, addr, next))
612			return -ENOMEM;
613	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
614	return 0;
615}
616
617static unsigned long zap_pte_range(struct mmu_gather *tlb,
618				struct vm_area_struct *vma, pmd_t *pmd,
619				unsigned long addr, unsigned long end,
620				long *zap_work, struct zap_details *details)
621{
622	struct mm_struct *mm = tlb->mm;
623	pte_t *pte;
624	spinlock_t *ptl;
625	int file_rss = 0;
626	int anon_rss = 0;
627
628	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
629	arch_enter_lazy_mmu_mode();
630	do {
631		pte_t ptent = *pte;
632		if (pte_none(ptent)) {
633			(*zap_work)--;
634			continue;
635		}
636
637		(*zap_work) -= PAGE_SIZE;
638
639		if (pte_present(ptent)) {
640			struct page *page;
641
642			page = vm_normal_page(vma, addr, ptent);
643			if (unlikely(details) && page) {
644				/*
645				 * unmap_shared_mapping_pages() wants to
646				 * invalidate cache without truncating:
647				 * unmap shared but keep private pages.
648				 */
649				if (details->check_mapping &&
650				    details->check_mapping != page->mapping)
651					continue;
652				/*
653				 * Each page->index must be checked when
654				 * invalidating or truncating nonlinear.
655				 */
656				if (details->nonlinear_vma &&
657				    (page->index < details->first_index ||
658				     page->index > details->last_index))
659					continue;
660			}
661			ptent = ptep_get_and_clear_full(mm, addr, pte,
662							tlb->fullmm);
663			tlb_remove_tlb_entry(tlb, pte, addr);
664			if (unlikely(!page))
665				continue;
666			if (unlikely(details) && details->nonlinear_vma
667			    && linear_page_index(details->nonlinear_vma,
668						addr) != page->index)
669				set_pte_at(mm, addr, pte,
670					   pgoff_to_pte(page->index));
671			if (PageAnon(page))
672				anon_rss--;
673			else {
674				if (pte_dirty(ptent))
675					set_page_dirty(page);
676				if (pte_young(ptent))
677					SetPageReferenced(page);
678				file_rss--;
679			}
680			page_remove_rmap(page, vma);
681			tlb_remove_page(tlb, page);
682			continue;
683		}
684		/*
685		 * If details->check_mapping, we leave swap entries;
686		 * if details->nonlinear_vma, we leave file entries.
687		 */
688		if (unlikely(details))
689			continue;
690		if (!pte_file(ptent))
691			free_swap_and_cache(pte_to_swp_entry(ptent));
692		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
693	} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
694
695	add_mm_rss(mm, file_rss, anon_rss);
696	arch_leave_lazy_mmu_mode();
697	pte_unmap_unlock(pte - 1, ptl);
698
699	return addr;
700}
701
702static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
703				struct vm_area_struct *vma, pud_t *pud,
704				unsigned long addr, unsigned long end,
705				long *zap_work, struct zap_details *details)
706{
707	pmd_t *pmd;
708	unsigned long next;
709
710	pmd = pmd_offset(pud, addr);
711	do {
712		next = pmd_addr_end(addr, end);
713		if (pmd_none_or_clear_bad(pmd)) {
714			(*zap_work)--;
715			continue;
716		}
717		next = zap_pte_range(tlb, vma, pmd, addr, next,
718						zap_work, details);
719	} while (pmd++, addr = next, (addr != end && *zap_work > 0));
720
721	return addr;
722}
723
724static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
725				struct vm_area_struct *vma, pgd_t *pgd,
726				unsigned long addr, unsigned long end,
727				long *zap_work, struct zap_details *details)
728{
729	pud_t *pud;
730	unsigned long next;
731
732	pud = pud_offset(pgd, addr);
733	do {
734		next = pud_addr_end(addr, end);
735		if (pud_none_or_clear_bad(pud)) {
736			(*zap_work)--;
737			continue;
738		}
739		next = zap_pmd_range(tlb, vma, pud, addr, next,
740						zap_work, details);
741	} while (pud++, addr = next, (addr != end && *zap_work > 0));
742
743	return addr;
744}
745
746static unsigned long unmap_page_range(struct mmu_gather *tlb,
747				struct vm_area_struct *vma,
748				unsigned long addr, unsigned long end,
749				long *zap_work, struct zap_details *details)
750{
751	pgd_t *pgd;
752	unsigned long next;
753
754	if (details && !details->check_mapping && !details->nonlinear_vma)
755		details = NULL;
756
757	BUG_ON(addr >= end);
758	tlb_start_vma(tlb, vma);
759	pgd = pgd_offset(vma->vm_mm, addr);
760	do {
761		next = pgd_addr_end(addr, end);
762		if (pgd_none_or_clear_bad(pgd)) {
763			(*zap_work)--;
764			continue;
765		}
766		next = zap_pud_range(tlb, vma, pgd, addr, next,
767						zap_work, details);
768	} while (pgd++, addr = next, (addr != end && *zap_work > 0));
769	tlb_end_vma(tlb, vma);
770
771	return addr;
772}
773
774#ifdef CONFIG_PREEMPT
775# define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
776#else
777/* No preempt: go for improved straight-line efficiency */
778# define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
779#endif
780
781/**
782 * unmap_vmas - unmap a range of memory covered by a list of vma's
783 * @tlbp: address of the caller's struct mmu_gather
784 * @vma: the starting vma
785 * @start_addr: virtual address at which to start unmapping
786 * @end_addr: virtual address at which to end unmapping
787 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
788 * @details: details of nonlinear truncation or shared cache invalidation
789 *
790 * Returns the end address of the unmapping (restart addr if interrupted).
791 *
792 * Unmap all pages in the vma list.
793 *
794 * We aim to not hold locks for too long (for scheduling latency reasons).
795 * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
796 * return the ending mmu_gather to the caller.
797 *
798 * Only addresses between `start' and `end' will be unmapped.
799 *
800 * The VMA list must be sorted in ascending virtual address order.
801 *
802 * unmap_vmas() assumes that the caller will flush the whole unmapped address
803 * range after unmap_vmas() returns.  So the only responsibility here is to
804 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
805 * drops the lock and schedules.
806 */
807unsigned long unmap_vmas(struct mmu_gather **tlbp,
808		struct vm_area_struct *vma, unsigned long start_addr,
809		unsigned long end_addr, unsigned long *nr_accounted,
810		struct zap_details *details)
811{
812	long zap_work = ZAP_BLOCK_SIZE;
813	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
814	int tlb_start_valid = 0;
815	unsigned long start = start_addr;
816	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
817	int fullmm = (*tlbp)->fullmm;
818
819	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
820		unsigned long end;
821
822		start = max(vma->vm_start, start_addr);
823		if (start >= vma->vm_end)
824			continue;
825		end = min(vma->vm_end, end_addr);
826		if (end <= vma->vm_start)
827			continue;
828
829		if (vma->vm_flags & VM_ACCOUNT)
830			*nr_accounted += (end - start) >> PAGE_SHIFT;
831
832		while (start != end) {
833			if (!tlb_start_valid) {
834				tlb_start = start;
835				tlb_start_valid = 1;
836			}
837
838			if (unlikely(is_vm_hugetlb_page(vma))) {
839				unmap_hugepage_range(vma, start, end);
840				zap_work -= (end - start) /
841						(HPAGE_SIZE / PAGE_SIZE);
842				start = end;
843			} else
844				start = unmap_page_range(*tlbp, vma,
845						start, end, &zap_work, details);
846
847			if (zap_work > 0) {
848				BUG_ON(start != end);
849				break;
850			}
851
852			tlb_finish_mmu(*tlbp, tlb_start, start);
853
854			if (need_resched() ||
855				(i_mmap_lock && spin_needbreak(i_mmap_lock))) {
856				if (i_mmap_lock) {
857					*tlbp = NULL;
858					goto out;
859				}
860				cond_resched();
861			}
862
863			*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
864			tlb_start_valid = 0;
865			zap_work = ZAP_BLOCK_SIZE;
866		}
867	}
868out:
869	return start;	/* which is now the end (or restart) address */
870}
871
872/**
873 * zap_page_range - remove user pages in a given range
874 * @vma: vm_area_struct holding the applicable pages
875 * @address: starting address of pages to zap
876 * @size: number of bytes to zap
877 * @details: details of nonlinear truncation or shared cache invalidation
878 */
879unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
880		unsigned long size, struct zap_details *details)
881{
882	struct mm_struct *mm = vma->vm_mm;
883	struct mmu_gather *tlb;
884	unsigned long end = address + size;
885	unsigned long nr_accounted = 0;
886
887	lru_add_drain();
888	tlb = tlb_gather_mmu(mm, 0);
889	update_hiwater_rss(mm);
890	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
891	if (tlb)
892		tlb_finish_mmu(tlb, address, end);
893	return end;
894}
895
896/*
897 * Do a quick page-table lookup for a single page.
898 */
899struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
900			unsigned int flags)
901{
902	pgd_t *pgd;
903	pud_t *pud;
904	pmd_t *pmd;
905	pte_t *ptep, pte;
906	spinlock_t *ptl;
907	struct page *page;
908	struct mm_struct *mm = vma->vm_mm;
909
910	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
911	if (!IS_ERR(page)) {
912		BUG_ON(flags & FOLL_GET);
913		goto out;
914	}
915
916	page = NULL;
917	pgd = pgd_offset(mm, address);
918	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
919		goto no_page_table;
920
921	pud = pud_offset(pgd, address);
922	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
923		goto no_page_table;
924
925	pmd = pmd_offset(pud, address);
926	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
927		goto no_page_table;
928
929	if (pmd_huge(*pmd)) {
930		BUG_ON(flags & FOLL_GET);
931		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
932		goto out;
933	}
934
935	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
936	if (!ptep)
937		goto out;
938
939	pte = *ptep;
940	if (!pte_present(pte))
941		goto unlock;
942	if ((flags & FOLL_WRITE) && !pte_write(pte))
943		goto unlock;
944	page = vm_normal_page(vma, address, pte);
945	if (unlikely(!page))
946		goto unlock;
947
948	if (flags & FOLL_GET)
949		get_page(page);
950	if (flags & FOLL_TOUCH) {
951		if ((flags & FOLL_WRITE) &&
952		    !pte_dirty(pte) && !PageDirty(page))
953			set_page_dirty(page);
954		mark_page_accessed(page);
955	}
956unlock:
957	pte_unmap_unlock(ptep, ptl);
958out:
959	return page;
960
961no_page_table:
962	/*
963	 * When core dumping an enormous anonymous area that nobody
964	 * has touched so far, we don't want to allocate page tables.
965	 */
966	if (flags & FOLL_ANON) {
967		page = ZERO_PAGE(0);
968		if (flags & FOLL_GET)
969			get_page(page);
970		BUG_ON(flags & FOLL_WRITE);
971	}
972	return page;
973}
974
975int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
976		unsigned long start, int len, int write, int force,
977		struct page **pages, struct vm_area_struct **vmas)
978{
979	int i;
980	unsigned int vm_flags;
981
982	/*
983	 * Require read or write permissions.
984	 * If 'force' is set, we only require the "MAY" flags.
985	 */
986	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
987	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
988	i = 0;
989
990	do {
991		struct vm_area_struct *vma;
992		unsigned int foll_flags;
993
994		vma = find_extend_vma(mm, start);
995		if (!vma && in_gate_area(tsk, start)) {
996			unsigned long pg = start & PAGE_MASK;
997			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
998			pgd_t *pgd;
999			pud_t *pud;
1000			pmd_t *pmd;
1001			pte_t *pte;
1002			if (write) /* user gate pages are read-only */
1003				return i ? : -EFAULT;
1004			if (pg > TASK_SIZE)
1005				pgd = pgd_offset_k(pg);
1006			else
1007				pgd = pgd_offset_gate(mm, pg);
1008			BUG_ON(pgd_none(*pgd));
1009			pud = pud_offset(pgd, pg);
1010			BUG_ON(pud_none(*pud));
1011			pmd = pmd_offset(pud, pg);
1012			if (pmd_none(*pmd))
1013				return i ? : -EFAULT;
1014			pte = pte_offset_map(pmd, pg);
1015			if (pte_none(*pte)) {
1016				pte_unmap(pte);
1017				return i ? : -EFAULT;
1018			}
1019			if (pages) {
1020				struct page *page = vm_normal_page(gate_vma, start, *pte);
1021				pages[i] = page;
1022				if (page)
1023					get_page(page);
1024			}
1025			pte_unmap(pte);
1026			if (vmas)
1027				vmas[i] = gate_vma;
1028			i++;
1029			start += PAGE_SIZE;
1030			len--;
1031			continue;
1032		}
1033
1034		if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
1035				|| !(vm_flags & vma->vm_flags))
1036			return i ? : -EFAULT;
1037
1038		if (is_vm_hugetlb_page(vma)) {
1039			i = follow_hugetlb_page(mm, vma, pages, vmas,
1040						&start, &len, i, write);
1041			continue;
1042		}
1043
1044		foll_flags = FOLL_TOUCH;
1045		if (pages)
1046			foll_flags |= FOLL_GET;
1047		if (!write && !(vma->vm_flags & VM_LOCKED) &&
1048		    (!vma->vm_ops || (!vma->vm_ops->nopage &&
1049					!vma->vm_ops->fault)))
1050			foll_flags |= FOLL_ANON;
1051
1052		do {
1053			struct page *page;
1054
1055			/*
1056			 * If tsk is ooming, cut off its access to large memory
1057			 * allocations. It has a pending SIGKILL, but it can't
1058			 * be processed until returning to user space.
1059			 */
1060			if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
1061				return -ENOMEM;
1062
1063			if (write)
1064				foll_flags |= FOLL_WRITE;
1065
1066			cond_resched();
1067			while (!(page = follow_page(vma, start, foll_flags))) {
1068				int ret;
1069				ret = handle_mm_fault(mm, vma, start,
1070						foll_flags & FOLL_WRITE);
1071				if (ret & VM_FAULT_ERROR) {
1072					if (ret & VM_FAULT_OOM)
1073						return i ? i : -ENOMEM;
1074					else if (ret & VM_FAULT_SIGBUS)
1075						return i ? i : -EFAULT;
1076					BUG();
1077				}
1078				if (ret & VM_FAULT_MAJOR)
1079					tsk->maj_flt++;
1080				else
1081					tsk->min_flt++;
1082
1083				/*
1084				 * The VM_FAULT_WRITE bit tells us that
1085				 * do_wp_page has broken COW when necessary,
1086				 * even if maybe_mkwrite decided not to set
1087				 * pte_write. We can thus safely do subsequent
1088				 * page lookups as if they were reads.
1089				 */
1090				if (ret & VM_FAULT_WRITE)
1091					foll_flags &= ~FOLL_WRITE;
1092
1093				cond_resched();
1094			}
1095			if (pages) {
1096				pages[i] = page;
1097
1098				flush_anon_page(vma, page, start);
1099				flush_dcache_page(page);
1100			}
1101			if (vmas)
1102				vmas[i] = vma;
1103			i++;
1104			start += PAGE_SIZE;
1105			len--;
1106		} while (len && start < vma->vm_end);
1107	} while (len);
1108	return i;
1109}
1110EXPORT_SYMBOL(get_user_pages);
1111
1112pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
1113{
1114	pgd_t * pgd = pgd_offset(mm, addr);
1115	pud_t * pud = pud_alloc(mm, pgd, addr);
1116	if (pud) {
1117		pmd_t * pmd = pmd_alloc(mm, pud, addr);
1118		if (pmd)
1119			return pte_alloc_map_lock(mm, pmd, addr, ptl);
1120	}
1121	return NULL;
1122}
1123
1124/*
1125 * This is the old fallback for page remapping.
1126 *
1127 * For historical reasons, it only allows reserved pages. Only
1128 * old drivers should use this, and they needed to mark their
1129 * pages reserved for the old functions anyway.
1130 */
1131static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
1132{
1133	int retval;
1134	pte_t *pte;
1135	spinlock_t *ptl;
1136
1137	retval = -EINVAL;
1138	if (PageAnon(page))
1139		goto out;
1140	retval = -ENOMEM;
1141	flush_dcache_page(page);
1142	pte = get_locked_pte(mm, addr, &ptl);
1143	if (!pte)
1144		goto out;
1145	retval = -EBUSY;
1146	if (!pte_none(*pte))
1147		goto out_unlock;
1148
1149	/* Ok, finally just insert the thing.. */
1150	get_page(page);
1151	inc_mm_counter(mm, file_rss);
1152	page_add_file_rmap(page);
1153	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1154
1155	retval = 0;
1156out_unlock:
1157	pte_unmap_unlock(pte, ptl);
1158out:
1159	return retval;
1160}
1161
1162/**
1163 * vm_insert_page - insert single page into user vma
1164 * @vma: user vma to map to
1165 * @addr: target user address of this page
1166 * @page: source kernel page
1167 *
1168 * This allows drivers to insert individual pages they've allocated
1169 * into a user vma.
1170 *
1171 * The page has to be a nice clean _individual_ kernel allocation.
1172 * If you allocate a compound page, you need to have marked it as
1173 * such (__GFP_COMP), or manually just split the page up yourself
1174 * (see split_page()).
1175 *
1176 * NOTE! Traditionally this was done with "remap_pfn_range()" which
1177 * took an arbitrary page protection parameter. This doesn't allow
1178 * that. Your vma protection will have to be set up correctly, which
1179 * means that if you want a shared writable mapping, you'd better
1180 * ask for a shared writable mapping!
1181 *
1182 * The page does not need to be reserved.
1183 */
1184int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
1185{
1186	if (addr < vma->vm_start || addr >= vma->vm_end)
1187		return -EFAULT;
1188	if (!page_count(page))
1189		return -EINVAL;
1190	vma->vm_flags |= VM_INSERTPAGE;
1191	return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
1192}
1193EXPORT_SYMBOL(vm_insert_page);
1194
1195/**
1196 * vm_insert_pfn - insert single pfn into user vma
1197 * @vma: user vma to map to
1198 * @addr: target user address of this page
1199 * @pfn: source kernel pfn
1200 *
1201 * Similar to vm_inert_page, this allows drivers to insert individual pages
1202 * they've allocated into a user vma. Same comments apply.
1203 *
1204 * This function should only be called from a vm_ops->fault handler, and
1205 * in that case the handler should return NULL.
1206 */
1207int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1208		unsigned long pfn)
1209{
1210	struct mm_struct *mm = vma->vm_mm;
1211	int retval;
1212	pte_t *pte, entry;
1213	spinlock_t *ptl;
1214
1215	BUG_ON(!(vma->vm_flags & VM_PFNMAP));
1216	BUG_ON(is_cow_mapping(vma->vm_flags));
1217
1218	retval = -ENOMEM;
1219	pte = get_locked_pte(mm, addr, &ptl);
1220	if (!pte)
1221		goto out;
1222	retval = -EBUSY;
1223	if (!pte_none(*pte))
1224		goto out_unlock;
1225
1226	/* Ok, finally just insert the thing.. */
1227	entry = pfn_pte(pfn, vma->vm_page_prot);
1228	set_pte_at(mm, addr, pte, entry);
1229	update_mmu_cache(vma, addr, entry);
1230
1231	retval = 0;
1232out_unlock:
1233	pte_unmap_unlock(pte, ptl);
1234
1235out:
1236	return retval;
1237}
1238EXPORT_SYMBOL(vm_insert_pfn);
1239
1240/*
1241 * maps a range of physical memory into the requested pages. the old
1242 * mappings are removed. any references to nonexistent pages results
1243 * in null mappings (currently treated as "copy-on-access")
1244 */
1245static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1246			unsigned long addr, unsigned long end,
1247			unsigned long pfn, pgprot_t prot)
1248{
1249	pte_t *pte;
1250	spinlock_t *ptl;
1251
1252	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1253	if (!pte)
1254		return -ENOMEM;
1255	arch_enter_lazy_mmu_mode();
1256	do {
1257		BUG_ON(!pte_none(*pte));
1258		set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
1259		pfn++;
1260	} while (pte++, addr += PAGE_SIZE, addr != end);
1261	arch_leave_lazy_mmu_mode();
1262	pte_unmap_unlock(pte - 1, ptl);
1263	return 0;
1264}
1265
1266static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1267			unsigned long addr, unsigned long end,
1268			unsigned long pfn, pgprot_t prot)
1269{
1270	pmd_t *pmd;
1271	unsigned long next;
1272
1273	pfn -= addr >> PAGE_SHIFT;
1274	pmd = pmd_alloc(mm, pud, addr);
1275	if (!pmd)
1276		return -ENOMEM;
1277	do {
1278		next = pmd_addr_end(addr, end);
1279		if (remap_pte_range(mm, pmd, addr, next,
1280				pfn + (addr >> PAGE_SHIFT), prot))
1281			return -ENOMEM;
1282	} while (pmd++, addr = next, addr != end);
1283	return 0;
1284}
1285
1286static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1287			unsigned long addr, unsigned long end,
1288			unsigned long pfn, pgprot_t prot)
1289{
1290	pud_t *pud;
1291	unsigned long next;
1292
1293	pfn -= addr >> PAGE_SHIFT;
1294	pud = pud_alloc(mm, pgd, addr);
1295	if (!pud)
1296		return -ENOMEM;
1297	do {
1298		next = pud_addr_end(addr, end);
1299		if (remap_pmd_range(mm, pud, addr, next,
1300				pfn + (addr >> PAGE_SHIFT), prot))
1301			return -ENOMEM;
1302	} while (pud++, addr = next, addr != end);
1303	return 0;
1304}
1305
1306/**
1307 * remap_pfn_range - remap kernel memory to userspace
1308 * @vma: user vma to map to
1309 * @addr: target user address to start at
1310 * @pfn: physical address of kernel memory
1311 * @size: size of map area
1312 * @prot: page protection flags for this mapping
1313 *
1314 *  Note: this is only safe if the mm semaphore is held when called.
1315 */
1316int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1317		    unsigned long pfn, unsigned long size, pgprot_t prot)
1318{
1319	pgd_t *pgd;
1320	unsigned long next;
1321	unsigned long end = addr + PAGE_ALIGN(size);
1322	struct mm_struct *mm = vma->vm_mm;
1323	int err;
1324
1325	/*
1326	 * Physically remapped pages are special. Tell the
1327	 * rest of the world about it:
1328	 *   VM_IO tells people not to look at these pages
1329	 *	(accesses can have side effects).
1330	 *   VM_RESERVED is specified all over the place, because
1331	 *	in 2.4 it kept swapout's vma scan off this vma; but
1332	 *	in 2.6 the LRU scan won't even find its pages, so this
1333	 *	flag means no more than count its pages in reserved_vm,
1334	 * 	and omit it from core dump, even when VM_IO turned off.
1335	 *   VM_PFNMAP tells the core MM that the base pages are just
1336	 *	raw PFN mappings, and do not have a "struct page" associated
1337	 *	with them.
1338	 *
1339	 * There's a horrible special case to handle copy-on-write
1340	 * behaviour that some programs depend on. We mark the "original"
1341	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1342	 */
1343	if (is_cow_mapping(vma->vm_flags)) {
1344		if (addr != vma->vm_start || end != vma->vm_end)
1345			return -EINVAL;
1346		vma->vm_pgoff = pfn;
1347	}
1348
1349	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1350
1351	BUG_ON(addr >= end);
1352	pfn -= addr >> PAGE_SHIFT;
1353	pgd = pgd_offset(mm, addr);
1354	flush_cache_range(vma, addr, end);
1355	do {
1356		next = pgd_addr_end(addr, end);
1357		err = remap_pud_range(mm, pgd, addr, next,
1358				pfn + (addr >> PAGE_SHIFT), prot);
1359		if (err)
1360			break;
1361	} while (pgd++, addr = next, addr != end);
1362	return err;
1363}
1364EXPORT_SYMBOL(remap_pfn_range);
1365
1366static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1367				     unsigned long addr, unsigned long end,
1368				     pte_fn_t fn, void *data)
1369{
1370	pte_t *pte;
1371	int err;
1372	struct page *pmd_page;
1373	spinlock_t *uninitialized_var(ptl);
1374
1375	pte = (mm == &init_mm) ?
1376		pte_alloc_kernel(pmd, addr) :
1377		pte_alloc_map_lock(mm, pmd, addr, &ptl);
1378	if (!pte)
1379		return -ENOMEM;
1380
1381	BUG_ON(pmd_huge(*pmd));
1382
1383	pmd_page = pmd_page(*pmd);
1384
1385	do {
1386		err = fn(pte, pmd_page, addr, data);
1387		if (err)
1388			break;
1389	} while (pte++, addr += PAGE_SIZE, addr != end);
1390
1391	if (mm != &init_mm)
1392		pte_unmap_unlock(pte-1, ptl);
1393	return err;
1394}
1395
1396static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1397				     unsigned long addr, unsigned long end,
1398				     pte_fn_t fn, void *data)
1399{
1400	pmd_t *pmd;
1401	unsigned long next;
1402	int err;
1403
1404	pmd = pmd_alloc(mm, pud, addr);
1405	if (!pmd)
1406		return -ENOMEM;
1407	do {
1408		next = pmd_addr_end(addr, end);
1409		err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
1410		if (err)
1411			break;
1412	} while (pmd++, addr = next, addr != end);
1413	return err;
1414}
1415
1416static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1417				     unsigned long addr, unsigned long end,
1418				     pte_fn_t fn, void *data)
1419{
1420	pud_t *pud;
1421	unsigned long next;
1422	int err;
1423
1424	pud = pud_alloc(mm, pgd, addr);
1425	if (!pud)
1426		return -ENOMEM;
1427	do {
1428		next = pud_addr_end(addr, end);
1429		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
1430		if (err)
1431			break;
1432	} while (pud++, addr = next, addr != end);
1433	return err;
1434}
1435
1436/*
1437 * Scan a region of virtual memory, filling in page tables as necessary
1438 * and calling a provided function on each leaf page table.
1439 */
1440int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1441			unsigned long size, pte_fn_t fn, void *data)
1442{
1443	pgd_t *pgd;
1444	unsigned long next;
1445	unsigned long end = addr + size;
1446	int err;
1447
1448	BUG_ON(addr >= end);
1449	pgd = pgd_offset(mm, addr);
1450	do {
1451		next = pgd_addr_end(addr, end);
1452		err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
1453		if (err)
1454			break;
1455	} while (pgd++, addr = next, addr != end);
1456	return err;
1457}
1458EXPORT_SYMBOL_GPL(apply_to_page_range);
1459
1460/*
1461 * handle_pte_fault chooses page fault handler according to an entry
1462 * which was read non-atomically.  Before making any commitment, on
1463 * those architectures or configurations (e.g. i386 with PAE) which
1464 * might give a mix of unmatched parts, do_swap_page and do_file_page
1465 * must check under lock before unmapping the pte and proceeding
1466 * (but do_wp_page is only called after already making such a check;
1467 * and do_anonymous_page and do_no_page can safely check later on).
1468 */
1469static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
1470				pte_t *page_table, pte_t orig_pte)
1471{
1472	int same = 1;
1473#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1474	if (sizeof(pte_t) > sizeof(unsigned long)) {
1475		spinlock_t *ptl = pte_lockptr(mm, pmd);
1476		spin_lock(ptl);
1477		same = pte_same(*page_table, orig_pte);
1478		spin_unlock(ptl);
1479	}
1480#endif
1481	pte_unmap(page_table);
1482	return same;
1483}
1484
1485/*
1486 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1487 * servicing faults for write access.  In the normal case, do always want
1488 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1489 * that do not have writing enabled, when used by access_process_vm.
1490 */
1491static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1492{
1493	if (likely(vma->vm_flags & VM_WRITE))
1494		pte = pte_mkwrite(pte);
1495	return pte;
1496}
1497
1498static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
1499{
1500	/*
1501	 * If the source page was a PFN mapping, we don't have
1502	 * a "struct page" for it. We do a best-effort copy by
1503	 * just copying from the original user address. If that
1504	 * fails, we just zero-fill it. Live with it.
1505	 */
1506	if (unlikely(!src)) {
1507		void *kaddr = kmap_atomic(dst, KM_USER0);
1508		void __user *uaddr = (void __user *)(va & PAGE_MASK);
1509
1510		/*
1511		 * This really shouldn't fail, because the page is there
1512		 * in the page tables. But it might just be unreadable,
1513		 * in which case we just give up and fill the result with
1514		 * zeroes.
1515		 */
1516		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
1517			memset(kaddr, 0, PAGE_SIZE);
1518		kunmap_atomic(kaddr, KM_USER0);
1519		flush_dcache_page(dst);
1520		return;
1521
1522	}
1523	copy_user_highpage(dst, src, va, vma);
1524}
1525
1526/*
1527 * This routine handles present pages, when users try to write
1528 * to a shared page. It is done by copying the page to a new address
1529 * and decrementing the shared-page counter for the old page.
1530 *
1531 * Note that this routine assumes that the protection checks have been
1532 * done by the caller (the low-level page fault routine in most cases).
1533 * Thus we can safely just mark it writable once we've done any necessary
1534 * COW.
1535 *
1536 * We also mark the page dirty at this point even though the page will
1537 * change only once the write actually happens. This avoids a few races,
1538 * and potentially makes it more efficient.
1539 *
1540 * We enter with non-exclusive mmap_sem (to exclude vma changes,
1541 * but allow concurrent faults), with pte both mapped and locked.
1542 * We return with mmap_sem still held, but pte unmapped and unlocked.
1543 */
1544static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1545		unsigned long address, pte_t *page_table, pmd_t *pmd,
1546		spinlock_t *ptl, pte_t orig_pte)
1547{
1548	struct page *old_page, *new_page;
1549	pte_t entry;
1550	int reuse = 0, ret = 0;
1551	int page_mkwrite = 0;
1552	struct page *dirty_page = NULL;
1553
1554	old_page = vm_normal_page(vma, address, orig_pte);
1555	if (!old_page)
1556		goto gotten;
1557
1558	/*
1559	 * Take out anonymous pages first, anonymous shared vmas are
1560	 * not dirty accountable.
1561	 */
1562	if (PageAnon(old_page)) {
1563		if (!TestSetPageLocked(old_page)) {
1564			reuse = can_share_swap_page(old_page);
1565			unlock_page(old_page);
1566		}
1567	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
1568					(VM_WRITE|VM_SHARED))) {
1569		/*
1570		 * Only catch write-faults on shared writable pages,
1571		 * read-only shared pages can get COWed by
1572		 * get_user_pages(.write=1, .force=1).
1573		 */
1574		if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
1575			/*
1576			 * Notify the address space that the page is about to
1577			 * become writable so that it can prohibit this or wait
1578			 * for the page to get into an appropriate state.
1579			 *
1580			 * We do this without the lock held, so that it can
1581			 * sleep if it needs to.
1582			 */
1583			page_cache_get(old_page);
1584			pte_unmap_unlock(page_table, ptl);
1585
1586			if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
1587				goto unwritable_page;
1588
1589			/*
1590			 * Since we dropped the lock we need to revalidate
1591			 * the PTE as someone else may have changed it.  If
1592			 * they did, we just return, as we can count on the
1593			 * MMU to tell us if they didn't also make it writable.
1594			 */
1595			page_table = pte_offset_map_lock(mm, pmd, address,
1596							 &ptl);
1597			page_cache_release(old_page);
1598			if (!pte_same(*page_table, orig_pte))
1599				goto unlock;
1600
1601			page_mkwrite = 1;
1602		}
1603		dirty_page = old_page;
1604		get_page(dirty_page);
1605		reuse = 1;
1606	}
1607
1608	if (reuse) {
1609		flush_cache_page(vma, address, pte_pfn(orig_pte));
1610		entry = pte_mkyoung(orig_pte);
1611		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1612		if (ptep_set_access_flags(vma, address, page_table, entry,1))
1613			update_mmu_cache(vma, address, entry);
1614		ret |= VM_FAULT_WRITE;
1615		goto unlock;
1616	}
1617
1618	/*
1619	 * Ok, we need to copy. Oh, well..
1620	 */
1621	page_cache_get(old_page);
1622gotten:
1623	pte_unmap_unlock(page_table, ptl);
1624
1625	if (unlikely(anon_vma_prepare(vma)))
1626		goto oom;
1627	VM_BUG_ON(old_page == ZERO_PAGE(0));
1628	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1629	if (!new_page)
1630		goto oom;
1631	cow_user_page(new_page, old_page, address, vma);
1632
1633	/*
1634	 * Re-check the pte - we dropped the lock
1635	 */
1636	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1637	if (likely(pte_same(*page_table, orig_pte))) {
1638		if (old_page) {
1639			page_remove_rmap(old_page, vma);
1640			if (!PageAnon(old_page)) {
1641				dec_mm_counter(mm, file_rss);
1642				inc_mm_counter(mm, anon_rss);
1643			}
1644		} else
1645			inc_mm_counter(mm, anon_rss);
1646		flush_cache_page(vma, address, pte_pfn(orig_pte));
1647		entry = mk_pte(new_page, vma->vm_page_prot);
1648		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1649		/*
1650		 * Clear the pte entry and flush it first, before updating the
1651		 * pte with the new entry. This will avoid a race condition
1652		 * seen in the presence of one thread doing SMC and another
1653		 * thread doing COW.
1654		 */
1655		ptep_clear_flush(vma, address, page_table);
1656		set_pte_at(mm, address, page_table, entry);
1657		update_mmu_cache(vma, address, entry);
1658		lru_cache_add_active(new_page);
1659		page_add_new_anon_rmap(new_page, vma, address);
1660
1661		/* Free the old page.. */
1662		new_page = old_page;
1663		ret |= VM_FAULT_WRITE;
1664	}
1665	if (new_page)
1666		page_cache_release(new_page);
1667	if (old_page)
1668		page_cache_release(old_page);
1669unlock:
1670	pte_unmap_unlock(page_table, ptl);
1671	if (dirty_page) {
1672		if (vma->vm_file)
1673			file_update_time(vma->vm_file);
1674
1675		/*
1676		 * Yes, Virginia, this is actually required to prevent a race
1677		 * with clear_page_dirty_for_io() from clearing the page dirty
1678		 * bit after it clear all dirty ptes, but before a racing
1679		 * do_wp_page installs a dirty pte.
1680		 *
1681		 * do_no_page is protected similarly.
1682		 */
1683		wait_on_page_locked(dirty_page);
1684		set_page_dirty_balance(dirty_page, page_mkwrite);
1685		put_page(dirty_page);
1686	}
1687	return ret;
1688oom:
1689	if (old_page)
1690		page_cache_release(old_page);
1691	return VM_FAULT_OOM;
1692
1693unwritable_page:
1694	page_cache_release(old_page);
1695	return VM_FAULT_SIGBUS;
1696}
1697
1698/*
1699 * Helper functions for unmap_mapping_range().
1700 *
1701 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
1702 *
1703 * We have to restart searching the prio_tree whenever we drop the lock,
1704 * since the iterator is only valid while the lock is held, and anyway
1705 * a later vma might be split and reinserted earlier while lock dropped.
1706 *
1707 * The list of nonlinear vmas could be handled more efficiently, using
1708 * a placeholder, but handle it in the same way until a need is shown.
1709 * It is important to search the prio_tree before nonlinear list: a vma
1710 * may become nonlinear and be shifted from prio_tree to nonlinear list
1711 * while the lock is dropped; but never shifted from list to prio_tree.
1712 *
1713 * In order to make forward progress despite restarting the search,
1714 * vm_truncate_count is used to mark a vma as now dealt with, so we can
1715 * quickly skip it next time around.  Since the prio_tree search only
1716 * shows us those vmas affected by unmapping the range in question, we
1717 * can't efficiently keep all vmas in step with mapping->truncate_count:
1718 * so instead reset them all whenever it wraps back to 0 (then go to 1).
1719 * mapping->truncate_count and vma->vm_truncate_count are protected by
1720 * i_mmap_lock.
1721 *
1722 * In order to make forward progress despite repeatedly restarting some
1723 * large vma, note the restart_addr from unmap_vmas when it breaks out:
1724 * and restart from that address when we reach that vma again.  It might
1725 * have been split or merged, shrunk or extended, but never shifted: so
1726 * restart_addr remains valid so long as it remains in the vma's range.
1727 * unmap_mapping_range forces truncate_count to leap over page-aligned
1728 * values so we can save vma's restart_addr in its truncate_count field.
1729 */
1730#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
1731
1732static void reset_vma_truncate_counts(struct address_space *mapping)
1733{
1734	struct vm_area_struct *vma;
1735	struct prio_tree_iter iter;
1736
1737	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
1738		vma->vm_truncate_count = 0;
1739	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1740		vma->vm_truncate_count = 0;
1741}
1742
1743static int unmap_mapping_range_vma(struct vm_area_struct *vma,
1744		unsigned long start_addr, unsigned long end_addr,
1745		struct zap_details *details)
1746{
1747	unsigned long restart_addr;
1748	int need_break;
1749
1750	/*
1751	 * files that support invalidating or truncating portions of the
1752	 * file from under mmaped areas must have their ->fault function
1753	 * return a locked page (and set VM_FAULT_LOCKED in the return).
1754	 * This provides synchronisation against concurrent unmapping here.
1755	 */
1756
1757again:
1758	restart_addr = vma->vm_truncate_count;
1759	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
1760		start_addr = restart_addr;
1761		if (start_addr >= end_addr) {
1762			/* Top of vma has been split off since last time */
1763			vma->vm_truncate_count = details->truncate_count;
1764			return 0;
1765		}
1766	}
1767
1768	restart_addr = zap_page_range(vma, start_addr,
1769					end_addr - start_addr, details);
1770	need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
1771
1772	if (restart_addr >= end_addr) {
1773		/* We have now completed this vma: mark it so */
1774		vma->vm_truncate_count = details->truncate_count;
1775		if (!need_break)
1776			return 0;
1777	} else {
1778		/* Note restart_addr in vma's truncate_count field */
1779		vma->vm_truncate_count = restart_addr;
1780		if (!need_break)
1781			goto again;
1782	}
1783
1784	spin_unlock(details->i_mmap_lock);
1785	cond_resched();
1786	spin_lock(details->i_mmap_lock);
1787	return -EINTR;
1788}
1789
1790static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
1791					    struct zap_details *details)
1792{
1793	struct vm_area_struct *vma;
1794	struct prio_tree_iter iter;
1795	pgoff_t vba, vea, zba, zea;
1796
1797restart:
1798	vma_prio_tree_foreach(vma, &iter, root,
1799			details->first_index, details->last_index) {
1800		/* Skip quickly over those we have already dealt with */
1801		if (vma->vm_truncate_count == details->truncate_count)
1802			continue;
1803
1804		vba = vma->vm_pgoff;
1805		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
1806		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
1807		zba = details->first_index;
1808		if (zba < vba)
1809			zba = vba;
1810		zea = details->last_index;
1811		if (zea > vea)
1812			zea = vea;
1813
1814		if (unmap_mapping_range_vma(vma,
1815			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
1816			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
1817				details) < 0)
1818			goto restart;
1819	}
1820}
1821
1822static inline void unmap_mapping_range_list(struct list_head *head,
1823					    struct zap_details *details)
1824{
1825	struct vm_area_struct *vma;
1826
1827	/*
1828	 * In nonlinear VMAs there is no correspondence between virtual address
1829	 * offset and file offset.  So we must perform an exhaustive search
1830	 * across *all* the pages in each nonlinear VMA, not just the pages
1831	 * whose virtual address lies outside the file truncation point.
1832	 */
1833restart:
1834	list_for_each_entry(vma, head, shared.vm_set.list) {
1835		/* Skip quickly over those we have already dealt with */
1836		if (vma->vm_truncate_count == details->truncate_count)
1837			continue;
1838		details->nonlinear_vma = vma;
1839		if (unmap_mapping_range_vma(vma, vma->vm_start,
1840					vma->vm_end, details) < 0)
1841			goto restart;
1842	}
1843}
1844
1845/**
1846 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
1847 * @mapping: the address space containing mmaps to be unmapped.
1848 * @holebegin: byte in first page to unmap, relative to the start of
1849 * the underlying file.  This will be rounded down to a PAGE_SIZE
1850 * boundary.  Note that this is different from vmtruncate(), which
1851 * must keep the partial page.  In contrast, we must get rid of
1852 * partial pages.
1853 * @holelen: size of prospective hole in bytes.  This will be rounded
1854 * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
1855 * end of the file.
1856 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
1857 * but 0 when invalidating pagecache, don't throw away private data.
1858 */
1859void unmap_mapping_range(struct address_space *mapping,
1860		loff_t const holebegin, loff_t const holelen, int even_cows)
1861{
1862	struct zap_details details;
1863	pgoff_t hba = holebegin >> PAGE_SHIFT;
1864	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1865
1866	/* Check for overflow. */
1867	if (sizeof(holelen) > sizeof(hlen)) {
1868		long long holeend =
1869			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1870		if (holeend & ~(long long)ULONG_MAX)
1871			hlen = ULONG_MAX - hba + 1;
1872	}
1873
1874	details.check_mapping = even_cows? NULL: mapping;
1875	details.nonlinear_vma = NULL;
1876	details.first_index = hba;
1877	details.last_index = hba + hlen - 1;
1878	if (details.last_index < details.first_index)
1879		details.last_index = ULONG_MAX;
1880	details.i_mmap_lock = &mapping->i_mmap_lock;
1881
1882	spin_lock(&mapping->i_mmap_lock);
1883
1884	/* Protect against endless unmapping loops */
1885	mapping->truncate_count++;
1886	if (unlikely(is_restart_addr(mapping->truncate_count))) {
1887		if (mapping->truncate_count == 0)
1888			reset_vma_truncate_counts(mapping);
1889		mapping->truncate_count++;
1890	}
1891	details.truncate_count = mapping->truncate_count;
1892
1893	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
1894		unmap_mapping_range_tree(&mapping->i_mmap, &details);
1895	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
1896		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
1897	spin_unlock(&mapping->i_mmap_lock);
1898}
1899EXPORT_SYMBOL(unmap_mapping_range);
1900
1901/**
1902 * vmtruncate - unmap mappings "freed" by truncate() syscall
1903 * @inode: inode of the file used
1904 * @offset: file offset to start truncating
1905 *
1906 * NOTE! We have to be ready to update the memory sharing
1907 * between the file and the memory map for a potential last
1908 * incomplete page.  Ugly, but necessary.
1909 */
1910int vmtruncate(struct inode * inode, loff_t offset)
1911{
1912	struct address_space *mapping = inode->i_mapping;
1913	unsigned long limit;
1914
1915	if (inode->i_size < offset)
1916		goto do_expand;
1917	/*
1918	 * truncation of in-use swapfiles is disallowed - it would cause
1919	 * subsequent swapout to scribble on the now-freed blocks.
1920	 */
1921	if (IS_SWAPFILE(inode))
1922		goto out_busy;
1923	i_size_write(inode, offset);
1924
1925	/*
1926	 * unmap_mapping_range is called twice, first simply for efficiency
1927	 * so that truncate_inode_pages does fewer single-page unmaps. However
1928	 * after this first call, and before truncate_inode_pages finishes,
1929	 * it is possible for private pages to be COWed, which remain after
1930	 * truncate_inode_pages finishes, hence the second unmap_mapping_range
1931	 * call must be made for correctness.
1932	 */
1933	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
1934	truncate_inode_pages(mapping, offset);
1935	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
1936	goto out_truncate;
1937
1938do_expand:
1939	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1940	if (limit != RLIM_INFINITY && offset > limit)
1941		goto out_sig;
1942	if (offset > inode->i_sb->s_maxbytes)
1943		goto out_big;
1944	i_size_write(inode, offset);
1945
1946out_truncate:
1947	if (inode->i_op && inode->i_op->truncate)
1948		inode->i_op->truncate(inode);
1949	return 0;
1950out_sig:
1951	send_sig(SIGXFSZ, current, 0);
1952out_big:
1953	return -EFBIG;
1954out_busy:
1955	return -ETXTBSY;
1956}
1957EXPORT_SYMBOL(vmtruncate);
1958
1959int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1960{
1961	struct address_space *mapping = inode->i_mapping;
1962
1963	/*
1964	 * If the underlying filesystem is not going to provide
1965	 * a way to truncate a range of blocks (punch a hole) -
1966	 * we should return failure right now.
1967	 */
1968	if (!inode->i_op || !inode->i_op->truncate_range)
1969		return -ENOSYS;
1970
1971	mutex_lock(&inode->i_mutex);
1972	down_write(&inode->i_alloc_sem);
1973	unmap_mapping_range(mapping, offset, (end - offset), 1);
1974	truncate_inode_pages_range(mapping, offset, end);
1975	unmap_mapping_range(mapping, offset, (end - offset), 1);
1976	inode->i_op->truncate_range(inode, offset, end);
1977	up_write(&inode->i_alloc_sem);
1978	mutex_unlock(&inode->i_mutex);
1979
1980	return 0;
1981}
1982
1983/*
1984 * We enter with non-exclusive mmap_sem (to exclude vma changes,
1985 * but allow concurrent faults), and pte mapped but not yet locked.
1986 * We return with mmap_sem still held, but pte unmapped and unlocked.
1987 */
1988static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1989		unsigned long address, pte_t *page_table, pmd_t *pmd,
1990		int write_access, pte_t orig_pte)
1991{
1992	spinlock_t *ptl;
1993	struct page *page;
1994	swp_entry_t entry;
1995	pte_t pte;
1996	int ret = 0;
1997
1998	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
1999		goto out;
2000
2001	entry = pte_to_swp_entry(orig_pte);
2002	if (is_migration_entry(entry)) {
2003		migration_entry_wait(mm, pmd, address);
2004		goto out;
2005	}
2006	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2007	page = lookup_swap_cache(entry);
2008	if (!page) {
2009		grab_swap_token(); /* Contend for token _before_ read-in */
2010		page = swapin_readahead(entry, vma, address);
2011		if (!page) {
2012			/*
2013			 * Back out if somebody else faulted in this pte
2014			 * while we released the pte lock.
2015			 */
2016			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2017			if (likely(pte_same(*page_table, orig_pte)))
2018				ret = VM_FAULT_OOM;
2019			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2020			goto unlock;
2021		}
2022
2023		/* Had to read the page from swap area: Major fault */
2024		ret = VM_FAULT_MAJOR;
2025		count_vm_event(PGMAJFAULT);
2026	}
2027
2028	mark_page_accessed(page);
2029	lock_page(page);
2030	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2031
2032	/*
2033	 * Back out if somebody else already faulted in this pte.
2034	 */
2035	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2036	if (unlikely(!pte_same(*page_table, orig_pte)))
2037		goto out_nomap;
2038
2039	if (unlikely(!PageUptodate(page))) {
2040		ret = VM_FAULT_SIGBUS;
2041		goto out_nomap;
2042	}
2043
2044	/* The page isn't present yet, go ahead with the fault. */
2045
2046	inc_mm_counter(mm, anon_rss);
2047	pte = mk_pte(page, vma->vm_page_prot);
2048	if (write_access && can_share_swap_page(page)) {
2049		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2050		write_access = 0;
2051	}
2052
2053	flush_icache_page(vma, page);
2054	set_pte_at(mm, address, page_table, pte);
2055	page_add_anon_rmap(page, vma, address);
2056
2057	swap_free(entry);
2058	if (vm_swap_full())
2059		remove_exclusive_swap_page(page);
2060	unlock_page(page);
2061
2062	if (write_access) {
2063		/* XXX: We could OR the do_wp_page code with this one? */
2064		if (do_wp_page(mm, vma, address,
2065				page_table, pmd, ptl, pte) & VM_FAULT_OOM)
2066			ret = VM_FAULT_OOM;
2067		goto out;
2068	}
2069
2070	/* No need to invalidate - it was non-present before */
2071	update_mmu_cache(vma, address, pte);
2072unlock:
2073	pte_unmap_unlock(page_table, ptl);
2074out:
2075	return ret;
2076out_nomap:
2077	pte_unmap_unlock(page_table, ptl);
2078	unlock_page(page);
2079	page_cache_release(page);
2080	return ret;
2081}
2082
2083/*
2084 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2085 * but allow concurrent faults), and pte mapped but not yet locked.
2086 * We return with mmap_sem still held, but pte unmapped and unlocked.
2087 */
2088static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2089		unsigned long address, pte_t *page_table, pmd_t *pmd,
2090		int write_access)
2091{
2092	struct page *page;
2093	spinlock_t *ptl;
2094	pte_t entry;
2095
2096	/* Allocate our own private page. */
2097	pte_unmap(page_table);
2098
2099	if (unlikely(anon_vma_prepare(vma)))
2100		goto oom;
2101	page = alloc_zeroed_user_highpage_movable(vma, address);
2102	if (!page)
2103		goto oom;
2104
2105	entry = mk_pte(page, vma->vm_page_prot);
2106	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2107
2108	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2109	if (!pte_none(*page_table))
2110		goto release;
2111	inc_mm_counter(mm, anon_rss);
2112	lru_cache_add_active(page);
2113	page_add_new_anon_rmap(page, vma, address);
2114	set_pte_at(mm, address, page_table, entry);
2115
2116	/* No need to invalidate - it was non-present before */
2117	update_mmu_cache(vma, address, entry);
2118unlock:
2119	pte_unmap_unlock(page_table, ptl);
2120	return 0;
2121release:
2122	page_cache_release(page);
2123	goto unlock;
2124oom:
2125	return VM_FAULT_OOM;
2126}
2127
2128/*
2129 * __do_fault() tries to create a new page mapping. It aggressively
2130 * tries to share with existing pages, but makes a separate copy if
2131 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
2132 * the next page fault.
2133 *
2134 * As this is called only for pages that do not currently exist, we
2135 * do not need to flush old virtual caches or the TLB.
2136 *
2137 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2138 * but allow concurrent faults), and pte neither mapped nor locked.
2139 * We return with mmap_sem still held, but pte unmapped and unlocked.
2140 */
2141static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2142		unsigned long address, pmd_t *pmd,
2143		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2144{
2145	pte_t *page_table;
2146	spinlock_t *ptl;
2147	struct page *page;
2148	pte_t entry;
2149	int anon = 0;
2150	struct page *dirty_page = NULL;
2151	struct vm_fault vmf;
2152	int ret;
2153	int page_mkwrite = 0;
2154
2155	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2156	vmf.pgoff = pgoff;
2157	vmf.flags = flags;
2158	vmf.page = NULL;
2159
2160	BUG_ON(vma->vm_flags & VM_PFNMAP);
2161
2162	if (likely(vma->vm_ops->fault)) {
2163		ret = vma->vm_ops->fault(vma, &vmf);
2164		if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2165			return ret;
2166	} else {
2167		/* Legacy ->nopage path */
2168		ret = 0;
2169		vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
2170		/* no page was available -- either SIGBUS or OOM */
2171		if (unlikely(vmf.page == NOPAGE_SIGBUS))
2172			return VM_FAULT_SIGBUS;
2173		else if (unlikely(vmf.page == NOPAGE_OOM))
2174			return VM_FAULT_OOM;
2175	}
2176
2177	/*
2178	 * For consistency in subsequent calls, make the faulted page always
2179	 * locked.
2180	 */
2181	if (unlikely(!(ret & VM_FAULT_LOCKED)))
2182		lock_page(vmf.page);
2183	else
2184		VM_BUG_ON(!PageLocked(vmf.page));
2185
2186	/*
2187	 * Should we do an early C-O-W break?
2188	 */
2189	page = vmf.page;
2190	if (flags & FAULT_FLAG_WRITE) {
2191		if (!(vma->vm_flags & VM_SHARED)) {
2192			anon = 1;
2193			if (unlikely(anon_vma_prepare(vma))) {
2194				ret = VM_FAULT_OOM;
2195				goto out;
2196			}
2197			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2198						vma, address);
2199			if (!page) {
2200				ret = VM_FAULT_OOM;
2201				goto out;
2202			}
2203			copy_user_highpage(page, vmf.page, address, vma);
2204		} else {
2205			/*
2206			 * If the page will be shareable, see if the backing
2207			 * address space wants to know that the page is about
2208			 * to become writable
2209			 */
2210			if (vma->vm_ops->page_mkwrite) {
2211				unlock_page(page);
2212				if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
2213					ret = VM_FAULT_SIGBUS;
2214					anon = 1; /* no anon but release vmf.page */
2215					goto out_unlocked;
2216				}
2217				lock_page(page);
2218				/*
2219				 * XXX: this is not quite right (racy vs
2220				 * invalidate) to unlock and relock the page
2221				 * like this, however a better fix requires
2222				 * reworking page_mkwrite locking API, which
2223				 * is better done later.
2224				 */
2225				if (!page->mapping) {
2226					ret = 0;
2227					anon = 1; /* no anon but release vmf.page */
2228					goto out;
2229				}
2230				page_mkwrite = 1;
2231			}
2232		}
2233
2234	}
2235
2236	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2237
2238	/*
2239	 * This silly early PAGE_DIRTY setting removes a race
2240	 * due to the bad i386 page protection. But it's valid
2241	 * for other architectures too.
2242	 *
2243	 * Note that if write_access is true, we either now have
2244	 * an exclusive copy of the page, or this is a shared mapping,
2245	 * so we can make it writable and dirty to avoid having to
2246	 * handle that later.
2247	 */
2248	/* Only go through if we didn't race with anybody else... */
2249	if (likely(pte_same(*page_table, orig_pte))) {
2250		flush_icache_page(vma, page);
2251		entry = mk_pte(page, vma->vm_page_prot);
2252		if (flags & FAULT_FLAG_WRITE)
2253			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2254		set_pte_at(mm, address, page_table, entry);
2255		if (anon) {
2256                        inc_mm_counter(mm, anon_rss);
2257                        lru_cache_add_active(page);
2258                        page_add_new_anon_rmap(page, vma, address);
2259		} else {
2260			inc_mm_counter(mm, file_rss);
2261			page_add_file_rmap(page);
2262			if (flags & FAULT_FLAG_WRITE) {
2263				dirty_page = page;
2264				get_page(dirty_page);
2265			}
2266		}
2267
2268		/* no need to invalidate: a not-present page won't be cached */
2269		update_mmu_cache(vma, address, entry);
2270	} else {
2271		if (anon)
2272			page_cache_release(page);
2273		else
2274			anon = 1; /* no anon but release faulted_page */
2275	}
2276
2277	pte_unmap_unlock(page_table, ptl);
2278
2279out:
2280	unlock_page(vmf.page);
2281out_unlocked:
2282	if (anon)
2283		page_cache_release(vmf.page);
2284	else if (dirty_page) {
2285		if (vma->vm_file)
2286			file_update_time(vma->vm_file);
2287
2288		set_page_dirty_balance(dirty_page, page_mkwrite);
2289		put_page(dirty_page);
2290	}
2291
2292	return ret;
2293}
2294
2295static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2296		unsigned long address, pte_t *page_table, pmd_t *pmd,
2297		int write_access, pte_t orig_pte)
2298{
2299	pgoff_t pgoff = (((address & PAGE_MASK)
2300			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2301	unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2302
2303	pte_unmap(page_table);
2304	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2305}
2306
2307
2308/*
2309 * do_no_pfn() tries to create a new page mapping for a page without
2310 * a struct_page backing it
2311 *
2312 * As this is called only for pages that do not currently exist, we
2313 * do not need to flush old virtual caches or the TLB.
2314 *
2315 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2316 * but allow concurrent faults), and pte mapped but not yet locked.
2317 * We return with mmap_sem still held, but pte unmapped and unlocked.
2318 *
2319 * It is expected that the ->nopfn handler always returns the same pfn
2320 * for a given virtual mapping.
2321 *
2322 * Mark this `noinline' to prevent it from bloating the main pagefault code.
2323 */
2324static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
2325		     unsigned long address, pte_t *page_table, pmd_t *pmd,
2326		     int write_access)
2327{
2328	spinlock_t *ptl;
2329	pte_t entry;
2330	unsigned long pfn;
2331
2332	pte_unmap(page_table);
2333	BUG_ON(!(vma->vm_flags & VM_PFNMAP));
2334	BUG_ON(is_cow_mapping(vma->vm_flags));
2335
2336	pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
2337	if (unlikely(pfn == NOPFN_OOM))
2338		return VM_FAULT_OOM;
2339	else if (unlikely(pfn == NOPFN_SIGBUS))
2340		return VM_FAULT_SIGBUS;
2341	else if (unlikely(pfn == NOPFN_REFAULT))
2342		return 0;
2343
2344	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2345
2346	/* Only go through if we didn't race with anybody else... */
2347	if (pte_none(*page_table)) {
2348		entry = pfn_pte(pfn, vma->vm_page_prot);
2349		if (write_access)
2350			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2351		set_pte_at(mm, address, page_table, entry);
2352	}
2353	pte_unmap_unlock(page_table, ptl);
2354	return 0;
2355}
2356
2357/*
2358 * Fault of a previously existing named mapping. Repopulate the pte
2359 * from the encoded file_pte if possible. This enables swappable
2360 * nonlinear vmas.
2361 *
2362 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2363 * but allow concurrent faults), and pte mapped but not yet locked.
2364 * We return with mmap_sem still held, but pte unmapped and unlocked.
2365 */
2366static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2367		unsigned long address, pte_t *page_table, pmd_t *pmd,
2368		int write_access, pte_t orig_pte)
2369{
2370	unsigned int flags = FAULT_FLAG_NONLINEAR |
2371				(write_access ? FAULT_FLAG_WRITE : 0);
2372	pgoff_t pgoff;
2373
2374	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2375		return 0;
2376
2377	if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
2378			!(vma->vm_flags & VM_CAN_NONLINEAR))) {
2379		/*
2380		 * Page table corrupted: show pte and kill process.
2381		 */
2382		print_bad_pte(vma, orig_pte, address);
2383		return VM_FAULT_OOM;
2384	}
2385
2386	pgoff = pte_to_pgoff(orig_pte);
2387	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2388}
2389
2390/*
2391 * These routines also need to handle stuff like marking pages dirty
2392 * and/or accessed for architectures that don't do it in hardware (most
2393 * RISC architectures).  The early dirtying is also good on the i386.
2394 *
2395 * There is also a hook called "update_mmu_cache()" that architectures
2396 * with external mmu caches can use to update those (ie the Sparc or
2397 * PowerPC hashed page tables that act as extended TLBs).
2398 *
2399 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2400 * but allow concurrent faults), and pte mapped but not yet locked.
2401 * We return with mmap_sem still held, but pte unmapped and unlocked.
2402 */
2403static inline int handle_pte_fault(struct mm_struct *mm,
2404		struct vm_area_struct *vma, unsigned long address,
2405		pte_t *pte, pmd_t *pmd, int write_access)
2406{
2407	pte_t entry;
2408	spinlock_t *ptl;
2409
2410	entry = *pte;
2411	if (!pte_present(entry)) {
2412		if (pte_none(entry)) {
2413			if (vma->vm_ops) {
2414				if (vma->vm_ops->fault || vma->vm_ops->nopage)
2415					return do_linear_fault(mm, vma, address,
2416						pte, pmd, write_access, entry);
2417				if (unlikely(vma->vm_ops->nopfn))
2418					return do_no_pfn(mm, vma, address, pte,
2419							 pmd, write_access);
2420			}
2421			return do_anonymous_page(mm, vma, address,
2422						 pte, pmd, write_access);
2423		}
2424		if (pte_file(entry))
2425			return do_nonlinear_fault(mm, vma, address,
2426					pte, pmd, write_access, entry);
2427		return do_swap_page(mm, vma, address,
2428					pte, pmd, write_access, entry);
2429	}
2430
2431	ptl = pte_lockptr(mm, pmd);
2432	spin_lock(ptl);
2433	if (unlikely(!pte_same(*pte, entry)))
2434		goto unlock;
2435	if (write_access) {
2436		if (!pte_write(entry))
2437			return do_wp_page(mm, vma, address,
2438					pte, pmd, ptl, entry);
2439		entry = pte_mkdirty(entry);
2440	}
2441	entry = pte_mkyoung(entry);
2442	if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
2443		update_mmu_cache(vma, address, entry);
2444	} else {
2445		/*
2446		 * This is needed only for protection faults but the arch code
2447		 * is not yet telling us if this is a protection fault or not.
2448		 * This still avoids useless tlb flushes for .text page faults
2449		 * with threads.
2450		 */
2451		if (write_access)
2452			flush_tlb_page(vma, address);
2453	}
2454unlock:
2455	pte_unmap_unlock(pte, ptl);
2456	return 0;
2457}
2458
2459/*
2460 * By the time we get here, we already hold the mm semaphore
2461 */
2462int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2463		unsigned long address, int write_access)
2464{
2465	pgd_t *pgd;
2466	pud_t *pud;
2467	pmd_t *pmd;
2468	pte_t *pte;
2469
2470	__set_current_state(TASK_RUNNING);
2471
2472	count_vm_event(PGFAULT);
2473
2474	if (unlikely(is_vm_hugetlb_page(vma)))
2475		return hugetlb_fault(mm, vma, address, write_access);
2476
2477	pgd = pgd_offset(mm, address);
2478	pud = pud_alloc(mm, pgd, address);
2479	if (!pud)
2480		return VM_FAULT_OOM;
2481	pmd = pmd_alloc(mm, pud, address);
2482	if (!pmd)
2483		return VM_FAULT_OOM;
2484	pte = pte_alloc_map(mm, pmd, address);
2485	if (!pte)
2486		return VM_FAULT_OOM;
2487
2488	return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2489}
2490
2491#ifndef __PAGETABLE_PUD_FOLDED
2492/*
2493 * Allocate page upper directory.
2494 * We've already handled the fast-path in-line.
2495 */
2496int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2497{
2498	pud_t *new = pud_alloc_one(mm, address);
2499	if (!new)
2500		return -ENOMEM;
2501
2502	spin_lock(&mm->page_table_lock);
2503	if (pgd_present(*pgd))		/* Another has populated it */
2504		pud_free(new);
2505	else
2506		pgd_populate(mm, pgd, new);
2507	spin_unlock(&mm->page_table_lock);
2508	return 0;
2509}
2510#endif /* __PAGETABLE_PUD_FOLDED */
2511
2512#ifndef __PAGETABLE_PMD_FOLDED
2513/*
2514 * Allocate page middle directory.
2515 * We've already handled the fast-path in-line.
2516 */
2517int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2518{
2519	pmd_t *new = pmd_alloc_one(mm, address);
2520	if (!new)
2521		return -ENOMEM;
2522
2523	spin_lock(&mm->page_table_lock);
2524#ifndef __ARCH_HAS_4LEVEL_HACK
2525	if (pud_present(*pud))		/* Another has populated it */
2526		pmd_free(new);
2527	else
2528		pud_populate(mm, pud, new);
2529#else
2530	if (pgd_present(*pud))		/* Another has populated it */
2531		pmd_free(new);
2532	else
2533		pgd_populate(mm, pud, new);
2534#endif /* __ARCH_HAS_4LEVEL_HACK */
2535	spin_unlock(&mm->page_table_lock);
2536	return 0;
2537}
2538#endif /* __PAGETABLE_PMD_FOLDED */
2539
2540int make_pages_present(unsigned long addr, unsigned long end)
2541{
2542	int ret, len, write;
2543	struct vm_area_struct * vma;
2544
2545	vma = find_vma(current->mm, addr);
2546	if (!vma)
2547		return -1;
2548	write = (vma->vm_flags & VM_WRITE) != 0;
2549	BUG_ON(addr >= end);
2550	BUG_ON(end > vma->vm_end);
2551	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
2552	ret = get_user_pages(current, current->mm, addr,
2553			len, write, 0, NULL, NULL);
2554	if (ret < 0)
2555		return ret;
2556	return ret == len ? 0 : -1;
2557}
2558
2559#if !defined(__HAVE_ARCH_GATE_AREA)
2560
2561#if defined(AT_SYSINFO_EHDR)
2562static struct vm_area_struct gate_vma;
2563
2564static int __init gate_vma_init(void)
2565{
2566	gate_vma.vm_mm = NULL;
2567	gate_vma.vm_start = FIXADDR_USER_START;
2568	gate_vma.vm_end = FIXADDR_USER_END;
2569	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
2570	gate_vma.vm_page_prot = __P101;
2571	/*
2572	 * Make sure the vDSO gets into every core dump.
2573	 * Dumping its contents makes post-mortem fully interpretable later
2574	 * without matching up the same kernel and hardware config to see
2575	 * what PC values meant.
2576	 */
2577	gate_vma.vm_flags |= VM_ALWAYSDUMP;
2578	return 0;
2579}
2580__initcall(gate_vma_init);
2581#endif
2582
2583struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
2584{
2585#ifdef AT_SYSINFO_EHDR
2586	return &gate_vma;
2587#else
2588	return NULL;
2589#endif
2590}
2591
2592int in_gate_area_no_task(unsigned long addr)
2593{
2594#ifdef AT_SYSINFO_EHDR
2595	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2596		return 1;
2597#endif
2598	return 0;
2599}
2600
2601#endif	/* __HAVE_ARCH_GATE_AREA */
2602
2603/*
2604 * Access another process' address space.
2605 * Source/target buffer must be kernel space,
2606 * Do not walk the page table directly, use get_user_pages
2607 */
2608int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2609{
2610	struct mm_struct *mm;
2611	struct vm_area_struct *vma;
2612	struct page *page;
2613	void *old_buf = buf;
2614
2615	mm = get_task_mm(tsk);
2616	if (!mm)
2617		return 0;
2618
2619	down_read(&mm->mmap_sem);
2620	/* ignore errors, just check how much was successfully transferred */
2621	while (len) {
2622		int bytes, ret, offset;
2623		void *maddr;
2624
2625		ret = get_user_pages(tsk, mm, addr, 1,
2626				write, 1, &page, &vma);
2627		if (ret <= 0)
2628			break;
2629
2630		bytes = len;
2631		offset = addr & (PAGE_SIZE-1);
2632		if (bytes > PAGE_SIZE-offset)
2633			bytes = PAGE_SIZE-offset;
2634
2635		maddr = kmap(page);
2636		if (write) {
2637			copy_to_user_page(vma, page, addr,
2638					  maddr + offset, buf, bytes);
2639			set_page_dirty_lock(page);
2640		} else {
2641			copy_from_user_page(vma, page, addr,
2642					    buf, maddr + offset, bytes);
2643		}
2644		kunmap(page);
2645		page_cache_release(page);
2646		len -= bytes;
2647		buf += bytes;
2648		addr += bytes;
2649	}
2650	up_read(&mm->mmap_sem);
2651	mmput(mm);
2652
2653	return buf - old_buf;
2654}
2655
2656/*
2657 * Print the name of a VMA.
2658 */
2659void print_vma_addr(char *prefix, unsigned long ip)
2660{
2661	struct mm_struct *mm = current->mm;
2662	struct vm_area_struct *vma;
2663
2664	down_read(&mm->mmap_sem);
2665	vma = find_vma(mm, ip);
2666	if (vma && vma->vm_file) {
2667		struct file *f = vma->vm_file;
2668		char *buf = (char *)__get_free_page(GFP_KERNEL);
2669		if (buf) {
2670			char *p, *s;
2671
2672			p = d_path(f->f_dentry, f->f_vfsmnt, buf, PAGE_SIZE);
2673			if (IS_ERR(p))
2674				p = "?";
2675			s = strrchr(p, '/');
2676			if (s)
2677				p = s+1;
2678			printk("%s%s[%lx+%lx]", prefix, p,
2679					vma->vm_start,
2680					vma->vm_end - vma->vm_start);
2681			free_page((unsigned long)buf);
2682		}
2683	}
2684	up_read(&current->mm->mmap_sem);
2685}
2686