memory.c revision a68d2ebc1581a3aec57bd032651e013fa609f530
1/*
2 *  linux/mm/memory.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of
9 * things wanted, and it should be easy to implement. - Linus
10 */
11
12/*
13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14 * pages started 02.12.91, seems to work. - Linus.
15 *
16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17 * would have taken more than the 6M I have free, but it worked well as
18 * far as I could see.
19 *
20 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21 */
22
23/*
24 * Real VM (paging to/from disk) started 18.12.91. Much more work and
25 * thought has to go into this. Oh, well..
26 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27 *		Found it. Everything seems to work now.
28 * 20.12.91  -  Ok, making the swap-device changeable like the root.
29 */
30
31/*
32 * 05.04.94  -  Multi-page memory management added for v1.1.
33 * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
34 *
35 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36 *		(Gerhard.Wichert@pdb.siemens.de)
37 *
38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39 */
40
41#include <linux/kernel_stat.h>
42#include <linux/mm.h>
43#include <linux/hugetlb.h>
44#include <linux/mman.h>
45#include <linux/swap.h>
46#include <linux/highmem.h>
47#include <linux/pagemap.h>
48#include <linux/rmap.h>
49#include <linux/module.h>
50#include <linux/init.h>
51
52#include <asm/pgalloc.h>
53#include <asm/uaccess.h>
54#include <asm/tlb.h>
55#include <asm/tlbflush.h>
56#include <asm/pgtable.h>
57
58#include <linux/swapops.h>
59#include <linux/elf.h>
60
61#ifndef CONFIG_NEED_MULTIPLE_NODES
62/* use the per-pgdat data instead for discontigmem - mbligh */
63unsigned long max_mapnr;
64struct page *mem_map;
65
66EXPORT_SYMBOL(max_mapnr);
67EXPORT_SYMBOL(mem_map);
68#endif
69
70unsigned long num_physpages;
71/*
72 * A number of key systems in x86 including ioremap() rely on the assumption
73 * that high_memory defines the upper bound on direct map memory, then end
74 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
75 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
76 * and ZONE_HIGHMEM.
77 */
78void * high_memory;
79unsigned long vmalloc_earlyreserve;
80
81EXPORT_SYMBOL(num_physpages);
82EXPORT_SYMBOL(high_memory);
83EXPORT_SYMBOL(vmalloc_earlyreserve);
84
85/*
86 * If a p?d_bad entry is found while walking page tables, report
87 * the error, before resetting entry to p?d_none.  Usually (but
88 * very seldom) called out from the p?d_none_or_clear_bad macros.
89 */
90
91void pgd_clear_bad(pgd_t *pgd)
92{
93	pgd_ERROR(*pgd);
94	pgd_clear(pgd);
95}
96
97void pud_clear_bad(pud_t *pud)
98{
99	pud_ERROR(*pud);
100	pud_clear(pud);
101}
102
103void pmd_clear_bad(pmd_t *pmd)
104{
105	pmd_ERROR(*pmd);
106	pmd_clear(pmd);
107}
108
109/*
110 * Note: this doesn't free the actual pages themselves. That
111 * has been handled earlier when unmapping all the memory regions.
112 */
113static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
114{
115	struct page *page = pmd_page(*pmd);
116	pmd_clear(pmd);
117	pte_free_tlb(tlb, page);
118	dec_page_state(nr_page_table_pages);
119	tlb->mm->nr_ptes--;
120}
121
122static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
123				unsigned long addr, unsigned long end,
124				unsigned long floor, unsigned long ceiling)
125{
126	pmd_t *pmd;
127	unsigned long next;
128	unsigned long start;
129
130	start = addr;
131	pmd = pmd_offset(pud, addr);
132	do {
133		next = pmd_addr_end(addr, end);
134		if (pmd_none_or_clear_bad(pmd))
135			continue;
136		free_pte_range(tlb, pmd);
137	} while (pmd++, addr = next, addr != end);
138
139	start &= PUD_MASK;
140	if (start < floor)
141		return;
142	if (ceiling) {
143		ceiling &= PUD_MASK;
144		if (!ceiling)
145			return;
146	}
147	if (end - 1 > ceiling - 1)
148		return;
149
150	pmd = pmd_offset(pud, start);
151	pud_clear(pud);
152	pmd_free_tlb(tlb, pmd);
153}
154
155static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
156				unsigned long addr, unsigned long end,
157				unsigned long floor, unsigned long ceiling)
158{
159	pud_t *pud;
160	unsigned long next;
161	unsigned long start;
162
163	start = addr;
164	pud = pud_offset(pgd, addr);
165	do {
166		next = pud_addr_end(addr, end);
167		if (pud_none_or_clear_bad(pud))
168			continue;
169		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
170	} while (pud++, addr = next, addr != end);
171
172	start &= PGDIR_MASK;
173	if (start < floor)
174		return;
175	if (ceiling) {
176		ceiling &= PGDIR_MASK;
177		if (!ceiling)
178			return;
179	}
180	if (end - 1 > ceiling - 1)
181		return;
182
183	pud = pud_offset(pgd, start);
184	pgd_clear(pgd);
185	pud_free_tlb(tlb, pud);
186}
187
188/*
189 * This function frees user-level page tables of a process.
190 *
191 * Must be called with pagetable lock held.
192 */
193void free_pgd_range(struct mmu_gather **tlb,
194			unsigned long addr, unsigned long end,
195			unsigned long floor, unsigned long ceiling)
196{
197	pgd_t *pgd;
198	unsigned long next;
199	unsigned long start;
200
201	/*
202	 * The next few lines have given us lots of grief...
203	 *
204	 * Why are we testing PMD* at this top level?  Because often
205	 * there will be no work to do at all, and we'd prefer not to
206	 * go all the way down to the bottom just to discover that.
207	 *
208	 * Why all these "- 1"s?  Because 0 represents both the bottom
209	 * of the address space and the top of it (using -1 for the
210	 * top wouldn't help much: the masks would do the wrong thing).
211	 * The rule is that addr 0 and floor 0 refer to the bottom of
212	 * the address space, but end 0 and ceiling 0 refer to the top
213	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
214	 * that end 0 case should be mythical).
215	 *
216	 * Wherever addr is brought up or ceiling brought down, we must
217	 * be careful to reject "the opposite 0" before it confuses the
218	 * subsequent tests.  But what about where end is brought down
219	 * by PMD_SIZE below? no, end can't go down to 0 there.
220	 *
221	 * Whereas we round start (addr) and ceiling down, by different
222	 * masks at different levels, in order to test whether a table
223	 * now has no other vmas using it, so can be freed, we don't
224	 * bother to round floor or end up - the tests don't need that.
225	 */
226
227	addr &= PMD_MASK;
228	if (addr < floor) {
229		addr += PMD_SIZE;
230		if (!addr)
231			return;
232	}
233	if (ceiling) {
234		ceiling &= PMD_MASK;
235		if (!ceiling)
236			return;
237	}
238	if (end - 1 > ceiling - 1)
239		end -= PMD_SIZE;
240	if (addr > end - 1)
241		return;
242
243	start = addr;
244	pgd = pgd_offset((*tlb)->mm, addr);
245	do {
246		next = pgd_addr_end(addr, end);
247		if (pgd_none_or_clear_bad(pgd))
248			continue;
249		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
250	} while (pgd++, addr = next, addr != end);
251
252	if (!tlb_is_full_mm(*tlb))
253		flush_tlb_pgtables((*tlb)->mm, start, end);
254}
255
256void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
257		unsigned long floor, unsigned long ceiling)
258{
259	while (vma) {
260		struct vm_area_struct *next = vma->vm_next;
261		unsigned long addr = vma->vm_start;
262
263		if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
264			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
265				floor, next? next->vm_start: ceiling);
266		} else {
267			/*
268			 * Optimization: gather nearby vmas into one call down
269			 */
270			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
271			  && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
272							HPAGE_SIZE)) {
273				vma = next;
274				next = vma->vm_next;
275			}
276			free_pgd_range(tlb, addr, vma->vm_end,
277				floor, next? next->vm_start: ceiling);
278		}
279		vma = next;
280	}
281}
282
283pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
284				unsigned long address)
285{
286	if (!pmd_present(*pmd)) {
287		struct page *new;
288
289		spin_unlock(&mm->page_table_lock);
290		new = pte_alloc_one(mm, address);
291		spin_lock(&mm->page_table_lock);
292		if (!new)
293			return NULL;
294		/*
295		 * Because we dropped the lock, we should re-check the
296		 * entry, as somebody else could have populated it..
297		 */
298		if (pmd_present(*pmd)) {
299			pte_free(new);
300			goto out;
301		}
302		mm->nr_ptes++;
303		inc_page_state(nr_page_table_pages);
304		pmd_populate(mm, pmd, new);
305	}
306out:
307	return pte_offset_map(pmd, address);
308}
309
310pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311{
312	if (!pmd_present(*pmd)) {
313		pte_t *new;
314
315		spin_unlock(&mm->page_table_lock);
316		new = pte_alloc_one_kernel(mm, address);
317		spin_lock(&mm->page_table_lock);
318		if (!new)
319			return NULL;
320
321		/*
322		 * Because we dropped the lock, we should re-check the
323		 * entry, as somebody else could have populated it..
324		 */
325		if (pmd_present(*pmd)) {
326			pte_free_kernel(new);
327			goto out;
328		}
329		pmd_populate_kernel(mm, pmd, new);
330	}
331out:
332	return pte_offset_kernel(pmd, address);
333}
334
335/*
336 * copy one vm_area from one task to the other. Assumes the page tables
337 * already present in the new task to be cleared in the whole range
338 * covered by this vma.
339 *
340 * dst->page_table_lock is held on entry and exit,
341 * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
342 */
343
344static inline void
345copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
346		pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
347		unsigned long addr)
348{
349	pte_t pte = *src_pte;
350	struct page *page;
351	unsigned long pfn;
352
353	/* pte contains position in swap or file, so copy. */
354	if (unlikely(!pte_present(pte))) {
355		if (!pte_file(pte)) {
356			swap_duplicate(pte_to_swp_entry(pte));
357			/* make sure dst_mm is on swapoff's mmlist. */
358			if (unlikely(list_empty(&dst_mm->mmlist))) {
359				spin_lock(&mmlist_lock);
360				list_add(&dst_mm->mmlist, &src_mm->mmlist);
361				spin_unlock(&mmlist_lock);
362			}
363		}
364		set_pte_at(dst_mm, addr, dst_pte, pte);
365		return;
366	}
367
368	pfn = pte_pfn(pte);
369	/* the pte points outside of valid memory, the
370	 * mapping is assumed to be good, meaningful
371	 * and not mapped via rmap - duplicate the
372	 * mapping as is.
373	 */
374	page = NULL;
375	if (pfn_valid(pfn))
376		page = pfn_to_page(pfn);
377
378	if (!page || PageReserved(page)) {
379		set_pte_at(dst_mm, addr, dst_pte, pte);
380		return;
381	}
382
383	/*
384	 * If it's a COW mapping, write protect it both
385	 * in the parent and the child
386	 */
387	if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
388		ptep_set_wrprotect(src_mm, addr, src_pte);
389		pte = *src_pte;
390	}
391
392	/*
393	 * If it's a shared mapping, mark it clean in
394	 * the child
395	 */
396	if (vm_flags & VM_SHARED)
397		pte = pte_mkclean(pte);
398	pte = pte_mkold(pte);
399	get_page(page);
400	inc_mm_counter(dst_mm, rss);
401	if (PageAnon(page))
402		inc_mm_counter(dst_mm, anon_rss);
403	set_pte_at(dst_mm, addr, dst_pte, pte);
404	page_dup_rmap(page);
405}
406
407static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
408		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
409		unsigned long addr, unsigned long end)
410{
411	pte_t *src_pte, *dst_pte;
412	unsigned long vm_flags = vma->vm_flags;
413	int progress;
414
415again:
416	dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
417	if (!dst_pte)
418		return -ENOMEM;
419	src_pte = pte_offset_map_nested(src_pmd, addr);
420
421	progress = 0;
422	spin_lock(&src_mm->page_table_lock);
423	do {
424		/*
425		 * We are holding two locks at this point - either of them
426		 * could generate latencies in another task on another CPU.
427		 */
428		if (progress >= 32 && (need_resched() ||
429		    need_lockbreak(&src_mm->page_table_lock) ||
430		    need_lockbreak(&dst_mm->page_table_lock)))
431			break;
432		if (pte_none(*src_pte)) {
433			progress++;
434			continue;
435		}
436		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vm_flags, addr);
437		progress += 8;
438	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
439	spin_unlock(&src_mm->page_table_lock);
440
441	pte_unmap_nested(src_pte - 1);
442	pte_unmap(dst_pte - 1);
443	cond_resched_lock(&dst_mm->page_table_lock);
444	if (addr != end)
445		goto again;
446	return 0;
447}
448
449static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
450		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
451		unsigned long addr, unsigned long end)
452{
453	pmd_t *src_pmd, *dst_pmd;
454	unsigned long next;
455
456	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
457	if (!dst_pmd)
458		return -ENOMEM;
459	src_pmd = pmd_offset(src_pud, addr);
460	do {
461		next = pmd_addr_end(addr, end);
462		if (pmd_none_or_clear_bad(src_pmd))
463			continue;
464		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
465						vma, addr, next))
466			return -ENOMEM;
467	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
468	return 0;
469}
470
471static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
472		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
473		unsigned long addr, unsigned long end)
474{
475	pud_t *src_pud, *dst_pud;
476	unsigned long next;
477
478	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
479	if (!dst_pud)
480		return -ENOMEM;
481	src_pud = pud_offset(src_pgd, addr);
482	do {
483		next = pud_addr_end(addr, end);
484		if (pud_none_or_clear_bad(src_pud))
485			continue;
486		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
487						vma, addr, next))
488			return -ENOMEM;
489	} while (dst_pud++, src_pud++, addr = next, addr != end);
490	return 0;
491}
492
493int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
494		struct vm_area_struct *vma)
495{
496	pgd_t *src_pgd, *dst_pgd;
497	unsigned long next;
498	unsigned long addr = vma->vm_start;
499	unsigned long end = vma->vm_end;
500
501	if (is_vm_hugetlb_page(vma))
502		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
503
504	dst_pgd = pgd_offset(dst_mm, addr);
505	src_pgd = pgd_offset(src_mm, addr);
506	do {
507		next = pgd_addr_end(addr, end);
508		if (pgd_none_or_clear_bad(src_pgd))
509			continue;
510		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
511						vma, addr, next))
512			return -ENOMEM;
513	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
514	return 0;
515}
516
517static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
518				unsigned long addr, unsigned long end,
519				struct zap_details *details)
520{
521	pte_t *pte;
522
523	pte = pte_offset_map(pmd, addr);
524	do {
525		pte_t ptent = *pte;
526		if (pte_none(ptent))
527			continue;
528		if (pte_present(ptent)) {
529			struct page *page = NULL;
530			unsigned long pfn = pte_pfn(ptent);
531			if (pfn_valid(pfn)) {
532				page = pfn_to_page(pfn);
533				if (PageReserved(page))
534					page = NULL;
535			}
536			if (unlikely(details) && page) {
537				/*
538				 * unmap_shared_mapping_pages() wants to
539				 * invalidate cache without truncating:
540				 * unmap shared but keep private pages.
541				 */
542				if (details->check_mapping &&
543				    details->check_mapping != page->mapping)
544					continue;
545				/*
546				 * Each page->index must be checked when
547				 * invalidating or truncating nonlinear.
548				 */
549				if (details->nonlinear_vma &&
550				    (page->index < details->first_index ||
551				     page->index > details->last_index))
552					continue;
553			}
554			ptent = ptep_get_and_clear(tlb->mm, addr, pte);
555			tlb_remove_tlb_entry(tlb, pte, addr);
556			if (unlikely(!page))
557				continue;
558			if (unlikely(details) && details->nonlinear_vma
559			    && linear_page_index(details->nonlinear_vma,
560						addr) != page->index)
561				set_pte_at(tlb->mm, addr, pte,
562					   pgoff_to_pte(page->index));
563			if (pte_dirty(ptent))
564				set_page_dirty(page);
565			if (PageAnon(page))
566				dec_mm_counter(tlb->mm, anon_rss);
567			else if (pte_young(ptent))
568				mark_page_accessed(page);
569			tlb->freed++;
570			page_remove_rmap(page);
571			tlb_remove_page(tlb, page);
572			continue;
573		}
574		/*
575		 * If details->check_mapping, we leave swap entries;
576		 * if details->nonlinear_vma, we leave file entries.
577		 */
578		if (unlikely(details))
579			continue;
580		if (!pte_file(ptent))
581			free_swap_and_cache(pte_to_swp_entry(ptent));
582		pte_clear(tlb->mm, addr, pte);
583	} while (pte++, addr += PAGE_SIZE, addr != end);
584	pte_unmap(pte - 1);
585}
586
587static inline void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud,
588				unsigned long addr, unsigned long end,
589				struct zap_details *details)
590{
591	pmd_t *pmd;
592	unsigned long next;
593
594	pmd = pmd_offset(pud, addr);
595	do {
596		next = pmd_addr_end(addr, end);
597		if (pmd_none_or_clear_bad(pmd))
598			continue;
599		zap_pte_range(tlb, pmd, addr, next, details);
600	} while (pmd++, addr = next, addr != end);
601}
602
603static inline void zap_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
604				unsigned long addr, unsigned long end,
605				struct zap_details *details)
606{
607	pud_t *pud;
608	unsigned long next;
609
610	pud = pud_offset(pgd, addr);
611	do {
612		next = pud_addr_end(addr, end);
613		if (pud_none_or_clear_bad(pud))
614			continue;
615		zap_pmd_range(tlb, pud, addr, next, details);
616	} while (pud++, addr = next, addr != end);
617}
618
619static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
620				unsigned long addr, unsigned long end,
621				struct zap_details *details)
622{
623	pgd_t *pgd;
624	unsigned long next;
625
626	if (details && !details->check_mapping && !details->nonlinear_vma)
627		details = NULL;
628
629	BUG_ON(addr >= end);
630	tlb_start_vma(tlb, vma);
631	pgd = pgd_offset(vma->vm_mm, addr);
632	do {
633		next = pgd_addr_end(addr, end);
634		if (pgd_none_or_clear_bad(pgd))
635			continue;
636		zap_pud_range(tlb, pgd, addr, next, details);
637	} while (pgd++, addr = next, addr != end);
638	tlb_end_vma(tlb, vma);
639}
640
641#ifdef CONFIG_PREEMPT
642# define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
643#else
644/* No preempt: go for improved straight-line efficiency */
645# define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
646#endif
647
648/**
649 * unmap_vmas - unmap a range of memory covered by a list of vma's
650 * @tlbp: address of the caller's struct mmu_gather
651 * @mm: the controlling mm_struct
652 * @vma: the starting vma
653 * @start_addr: virtual address at which to start unmapping
654 * @end_addr: virtual address at which to end unmapping
655 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
656 * @details: details of nonlinear truncation or shared cache invalidation
657 *
658 * Returns the end address of the unmapping (restart addr if interrupted).
659 *
660 * Unmap all pages in the vma list.  Called under page_table_lock.
661 *
662 * We aim to not hold page_table_lock for too long (for scheduling latency
663 * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
664 * return the ending mmu_gather to the caller.
665 *
666 * Only addresses between `start' and `end' will be unmapped.
667 *
668 * The VMA list must be sorted in ascending virtual address order.
669 *
670 * unmap_vmas() assumes that the caller will flush the whole unmapped address
671 * range after unmap_vmas() returns.  So the only responsibility here is to
672 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
673 * drops the lock and schedules.
674 */
675unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
676		struct vm_area_struct *vma, unsigned long start_addr,
677		unsigned long end_addr, unsigned long *nr_accounted,
678		struct zap_details *details)
679{
680	unsigned long zap_bytes = ZAP_BLOCK_SIZE;
681	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
682	int tlb_start_valid = 0;
683	unsigned long start = start_addr;
684	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
685	int fullmm = tlb_is_full_mm(*tlbp);
686
687	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
688		unsigned long end;
689
690		start = max(vma->vm_start, start_addr);
691		if (start >= vma->vm_end)
692			continue;
693		end = min(vma->vm_end, end_addr);
694		if (end <= vma->vm_start)
695			continue;
696
697		if (vma->vm_flags & VM_ACCOUNT)
698			*nr_accounted += (end - start) >> PAGE_SHIFT;
699
700		while (start != end) {
701			unsigned long block;
702
703			if (!tlb_start_valid) {
704				tlb_start = start;
705				tlb_start_valid = 1;
706			}
707
708			if (is_vm_hugetlb_page(vma)) {
709				block = end - start;
710				unmap_hugepage_range(vma, start, end);
711			} else {
712				block = min(zap_bytes, end - start);
713				unmap_page_range(*tlbp, vma, start,
714						start + block, details);
715			}
716
717			start += block;
718			zap_bytes -= block;
719			if ((long)zap_bytes > 0)
720				continue;
721
722			tlb_finish_mmu(*tlbp, tlb_start, start);
723
724			if (need_resched() ||
725				need_lockbreak(&mm->page_table_lock) ||
726				(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
727				if (i_mmap_lock) {
728					/* must reset count of rss freed */
729					*tlbp = tlb_gather_mmu(mm, fullmm);
730					goto out;
731				}
732				spin_unlock(&mm->page_table_lock);
733				cond_resched();
734				spin_lock(&mm->page_table_lock);
735			}
736
737			*tlbp = tlb_gather_mmu(mm, fullmm);
738			tlb_start_valid = 0;
739			zap_bytes = ZAP_BLOCK_SIZE;
740		}
741	}
742out:
743	return start;	/* which is now the end (or restart) address */
744}
745
746/**
747 * zap_page_range - remove user pages in a given range
748 * @vma: vm_area_struct holding the applicable pages
749 * @address: starting address of pages to zap
750 * @size: number of bytes to zap
751 * @details: details of nonlinear truncation or shared cache invalidation
752 */
753unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
754		unsigned long size, struct zap_details *details)
755{
756	struct mm_struct *mm = vma->vm_mm;
757	struct mmu_gather *tlb;
758	unsigned long end = address + size;
759	unsigned long nr_accounted = 0;
760
761	if (is_vm_hugetlb_page(vma)) {
762		zap_hugepage_range(vma, address, size);
763		return end;
764	}
765
766	lru_add_drain();
767	spin_lock(&mm->page_table_lock);
768	tlb = tlb_gather_mmu(mm, 0);
769	end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
770	tlb_finish_mmu(tlb, address, end);
771	spin_unlock(&mm->page_table_lock);
772	return end;
773}
774
775/*
776 * Do a quick page-table lookup for a single page.
777 * mm->page_table_lock must be held.
778 */
779static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
780			int read, int write, int accessed)
781{
782	pgd_t *pgd;
783	pud_t *pud;
784	pmd_t *pmd;
785	pte_t *ptep, pte;
786	unsigned long pfn;
787	struct page *page;
788
789	page = follow_huge_addr(mm, address, write);
790	if (! IS_ERR(page))
791		return page;
792
793	pgd = pgd_offset(mm, address);
794	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
795		goto out;
796
797	pud = pud_offset(pgd, address);
798	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
799		goto out;
800
801	pmd = pmd_offset(pud, address);
802	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
803		goto out;
804	if (pmd_huge(*pmd))
805		return follow_huge_pmd(mm, address, pmd, write);
806
807	ptep = pte_offset_map(pmd, address);
808	if (!ptep)
809		goto out;
810
811	pte = *ptep;
812	pte_unmap(ptep);
813	if (pte_present(pte)) {
814		if (write && !pte_write(pte))
815			goto out;
816		if (read && !pte_read(pte))
817			goto out;
818		pfn = pte_pfn(pte);
819		if (pfn_valid(pfn)) {
820			page = pfn_to_page(pfn);
821			if (accessed) {
822				if (write && !pte_dirty(pte) &&!PageDirty(page))
823					set_page_dirty(page);
824				mark_page_accessed(page);
825			}
826			return page;
827		}
828	}
829
830out:
831	return NULL;
832}
833
834inline struct page *
835follow_page(struct mm_struct *mm, unsigned long address, int write)
836{
837	return __follow_page(mm, address, 0, write, 1);
838}
839
840/*
841 * check_user_page_readable() can be called frm niterrupt context by oprofile,
842 * so we need to avoid taking any non-irq-safe locks
843 */
844int check_user_page_readable(struct mm_struct *mm, unsigned long address)
845{
846	return __follow_page(mm, address, 1, 0, 0) != NULL;
847}
848EXPORT_SYMBOL(check_user_page_readable);
849
850static inline int
851untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
852			 unsigned long address)
853{
854	pgd_t *pgd;
855	pud_t *pud;
856	pmd_t *pmd;
857
858	/* Check if the vma is for an anonymous mapping. */
859	if (vma->vm_ops && vma->vm_ops->nopage)
860		return 0;
861
862	/* Check if page directory entry exists. */
863	pgd = pgd_offset(mm, address);
864	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
865		return 1;
866
867	pud = pud_offset(pgd, address);
868	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
869		return 1;
870
871	/* Check if page middle directory entry exists. */
872	pmd = pmd_offset(pud, address);
873	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
874		return 1;
875
876	/* There is a pte slot for 'address' in 'mm'. */
877	return 0;
878}
879
880int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
881		unsigned long start, int len, int write, int force,
882		struct page **pages, struct vm_area_struct **vmas)
883{
884	int i;
885	unsigned int flags;
886
887	/*
888	 * Require read or write permissions.
889	 * If 'force' is set, we only require the "MAY" flags.
890	 */
891	flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
892	flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
893	i = 0;
894
895	do {
896		struct vm_area_struct *	vma;
897
898		vma = find_extend_vma(mm, start);
899		if (!vma && in_gate_area(tsk, start)) {
900			unsigned long pg = start & PAGE_MASK;
901			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
902			pgd_t *pgd;
903			pud_t *pud;
904			pmd_t *pmd;
905			pte_t *pte;
906			if (write) /* user gate pages are read-only */
907				return i ? : -EFAULT;
908			if (pg > TASK_SIZE)
909				pgd = pgd_offset_k(pg);
910			else
911				pgd = pgd_offset_gate(mm, pg);
912			BUG_ON(pgd_none(*pgd));
913			pud = pud_offset(pgd, pg);
914			BUG_ON(pud_none(*pud));
915			pmd = pmd_offset(pud, pg);
916			if (pmd_none(*pmd))
917				return i ? : -EFAULT;
918			pte = pte_offset_map(pmd, pg);
919			if (pte_none(*pte)) {
920				pte_unmap(pte);
921				return i ? : -EFAULT;
922			}
923			if (pages) {
924				pages[i] = pte_page(*pte);
925				get_page(pages[i]);
926			}
927			pte_unmap(pte);
928			if (vmas)
929				vmas[i] = gate_vma;
930			i++;
931			start += PAGE_SIZE;
932			len--;
933			continue;
934		}
935
936		if (!vma || (vma->vm_flags & VM_IO)
937				|| !(flags & vma->vm_flags))
938			return i ? : -EFAULT;
939
940		if (is_vm_hugetlb_page(vma)) {
941			i = follow_hugetlb_page(mm, vma, pages, vmas,
942						&start, &len, i);
943			continue;
944		}
945		spin_lock(&mm->page_table_lock);
946		do {
947			int write_access = write;
948			struct page *page;
949
950			cond_resched_lock(&mm->page_table_lock);
951			while (!(page = follow_page(mm, start, write_access))) {
952				int ret;
953
954				/*
955				 * Shortcut for anonymous pages. We don't want
956				 * to force the creation of pages tables for
957				 * insanely big anonymously mapped areas that
958				 * nobody touched so far. This is important
959				 * for doing a core dump for these mappings.
960				 */
961				if (!write && untouched_anonymous_page(mm,vma,start)) {
962					page = ZERO_PAGE(start);
963					break;
964				}
965				spin_unlock(&mm->page_table_lock);
966				ret = __handle_mm_fault(mm, vma, start, write_access);
967
968				/*
969				 * The VM_FAULT_WRITE bit tells us that do_wp_page has
970				 * broken COW when necessary, even if maybe_mkwrite
971				 * decided not to set pte_write. We can thus safely do
972				 * subsequent page lookups as if they were reads.
973				 */
974				if (ret & VM_FAULT_WRITE)
975					write_access = 0;
976
977				switch (ret & ~VM_FAULT_WRITE) {
978				case VM_FAULT_MINOR:
979					tsk->min_flt++;
980					break;
981				case VM_FAULT_MAJOR:
982					tsk->maj_flt++;
983					break;
984				case VM_FAULT_SIGBUS:
985					return i ? i : -EFAULT;
986				case VM_FAULT_OOM:
987					return i ? i : -ENOMEM;
988				default:
989					BUG();
990				}
991				spin_lock(&mm->page_table_lock);
992			}
993			if (pages) {
994				pages[i] = page;
995				flush_dcache_page(page);
996				if (!PageReserved(page))
997					page_cache_get(page);
998			}
999			if (vmas)
1000				vmas[i] = vma;
1001			i++;
1002			start += PAGE_SIZE;
1003			len--;
1004		} while (len && start < vma->vm_end);
1005		spin_unlock(&mm->page_table_lock);
1006	} while (len);
1007	return i;
1008}
1009EXPORT_SYMBOL(get_user_pages);
1010
1011static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1012			unsigned long addr, unsigned long end, pgprot_t prot)
1013{
1014	pte_t *pte;
1015
1016	pte = pte_alloc_map(mm, pmd, addr);
1017	if (!pte)
1018		return -ENOMEM;
1019	do {
1020		pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(addr), prot));
1021		BUG_ON(!pte_none(*pte));
1022		set_pte_at(mm, addr, pte, zero_pte);
1023	} while (pte++, addr += PAGE_SIZE, addr != end);
1024	pte_unmap(pte - 1);
1025	return 0;
1026}
1027
1028static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
1029			unsigned long addr, unsigned long end, pgprot_t prot)
1030{
1031	pmd_t *pmd;
1032	unsigned long next;
1033
1034	pmd = pmd_alloc(mm, pud, addr);
1035	if (!pmd)
1036		return -ENOMEM;
1037	do {
1038		next = pmd_addr_end(addr, end);
1039		if (zeromap_pte_range(mm, pmd, addr, next, prot))
1040			return -ENOMEM;
1041	} while (pmd++, addr = next, addr != end);
1042	return 0;
1043}
1044
1045static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1046			unsigned long addr, unsigned long end, pgprot_t prot)
1047{
1048	pud_t *pud;
1049	unsigned long next;
1050
1051	pud = pud_alloc(mm, pgd, addr);
1052	if (!pud)
1053		return -ENOMEM;
1054	do {
1055		next = pud_addr_end(addr, end);
1056		if (zeromap_pmd_range(mm, pud, addr, next, prot))
1057			return -ENOMEM;
1058	} while (pud++, addr = next, addr != end);
1059	return 0;
1060}
1061
1062int zeromap_page_range(struct vm_area_struct *vma,
1063			unsigned long addr, unsigned long size, pgprot_t prot)
1064{
1065	pgd_t *pgd;
1066	unsigned long next;
1067	unsigned long end = addr + size;
1068	struct mm_struct *mm = vma->vm_mm;
1069	int err;
1070
1071	BUG_ON(addr >= end);
1072	pgd = pgd_offset(mm, addr);
1073	flush_cache_range(vma, addr, end);
1074	spin_lock(&mm->page_table_lock);
1075	do {
1076		next = pgd_addr_end(addr, end);
1077		err = zeromap_pud_range(mm, pgd, addr, next, prot);
1078		if (err)
1079			break;
1080	} while (pgd++, addr = next, addr != end);
1081	spin_unlock(&mm->page_table_lock);
1082	return err;
1083}
1084
1085/*
1086 * maps a range of physical memory into the requested pages. the old
1087 * mappings are removed. any references to nonexistent pages results
1088 * in null mappings (currently treated as "copy-on-access")
1089 */
1090static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1091			unsigned long addr, unsigned long end,
1092			unsigned long pfn, pgprot_t prot)
1093{
1094	pte_t *pte;
1095
1096	pte = pte_alloc_map(mm, pmd, addr);
1097	if (!pte)
1098		return -ENOMEM;
1099	do {
1100		BUG_ON(!pte_none(*pte));
1101		if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
1102			set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
1103		pfn++;
1104	} while (pte++, addr += PAGE_SIZE, addr != end);
1105	pte_unmap(pte - 1);
1106	return 0;
1107}
1108
1109static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1110			unsigned long addr, unsigned long end,
1111			unsigned long pfn, pgprot_t prot)
1112{
1113	pmd_t *pmd;
1114	unsigned long next;
1115
1116	pfn -= addr >> PAGE_SHIFT;
1117	pmd = pmd_alloc(mm, pud, addr);
1118	if (!pmd)
1119		return -ENOMEM;
1120	do {
1121		next = pmd_addr_end(addr, end);
1122		if (remap_pte_range(mm, pmd, addr, next,
1123				pfn + (addr >> PAGE_SHIFT), prot))
1124			return -ENOMEM;
1125	} while (pmd++, addr = next, addr != end);
1126	return 0;
1127}
1128
1129static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1130			unsigned long addr, unsigned long end,
1131			unsigned long pfn, pgprot_t prot)
1132{
1133	pud_t *pud;
1134	unsigned long next;
1135
1136	pfn -= addr >> PAGE_SHIFT;
1137	pud = pud_alloc(mm, pgd, addr);
1138	if (!pud)
1139		return -ENOMEM;
1140	do {
1141		next = pud_addr_end(addr, end);
1142		if (remap_pmd_range(mm, pud, addr, next,
1143				pfn + (addr >> PAGE_SHIFT), prot))
1144			return -ENOMEM;
1145	} while (pud++, addr = next, addr != end);
1146	return 0;
1147}
1148
1149/*  Note: this is only safe if the mm semaphore is held when called. */
1150int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1151		    unsigned long pfn, unsigned long size, pgprot_t prot)
1152{
1153	pgd_t *pgd;
1154	unsigned long next;
1155	unsigned long end = addr + PAGE_ALIGN(size);
1156	struct mm_struct *mm = vma->vm_mm;
1157	int err;
1158
1159	/*
1160	 * Physically remapped pages are special. Tell the
1161	 * rest of the world about it:
1162	 *   VM_IO tells people not to look at these pages
1163	 *	(accesses can have side effects).
1164	 *   VM_RESERVED tells swapout not to try to touch
1165	 *	this region.
1166	 */
1167	vma->vm_flags |= VM_IO | VM_RESERVED;
1168
1169	BUG_ON(addr >= end);
1170	pfn -= addr >> PAGE_SHIFT;
1171	pgd = pgd_offset(mm, addr);
1172	flush_cache_range(vma, addr, end);
1173	spin_lock(&mm->page_table_lock);
1174	do {
1175		next = pgd_addr_end(addr, end);
1176		err = remap_pud_range(mm, pgd, addr, next,
1177				pfn + (addr >> PAGE_SHIFT), prot);
1178		if (err)
1179			break;
1180	} while (pgd++, addr = next, addr != end);
1181	spin_unlock(&mm->page_table_lock);
1182	return err;
1183}
1184EXPORT_SYMBOL(remap_pfn_range);
1185
1186/*
1187 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1188 * servicing faults for write access.  In the normal case, do always want
1189 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1190 * that do not have writing enabled, when used by access_process_vm.
1191 */
1192static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1193{
1194	if (likely(vma->vm_flags & VM_WRITE))
1195		pte = pte_mkwrite(pte);
1196	return pte;
1197}
1198
1199/*
1200 * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
1201 */
1202static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address,
1203		pte_t *page_table)
1204{
1205	pte_t entry;
1206
1207	entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
1208			      vma);
1209	ptep_establish(vma, address, page_table, entry);
1210	update_mmu_cache(vma, address, entry);
1211	lazy_mmu_prot_update(entry);
1212}
1213
1214/*
1215 * This routine handles present pages, when users try to write
1216 * to a shared page. It is done by copying the page to a new address
1217 * and decrementing the shared-page counter for the old page.
1218 *
1219 * Goto-purists beware: the only reason for goto's here is that it results
1220 * in better assembly code.. The "default" path will see no jumps at all.
1221 *
1222 * Note that this routine assumes that the protection checks have been
1223 * done by the caller (the low-level page fault routine in most cases).
1224 * Thus we can safely just mark it writable once we've done any necessary
1225 * COW.
1226 *
1227 * We also mark the page dirty at this point even though the page will
1228 * change only once the write actually happens. This avoids a few races,
1229 * and potentially makes it more efficient.
1230 *
1231 * We hold the mm semaphore and the page_table_lock on entry and exit
1232 * with the page_table_lock released.
1233 */
1234static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
1235	unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
1236{
1237	struct page *old_page, *new_page;
1238	unsigned long pfn = pte_pfn(pte);
1239	pte_t entry;
1240	int ret;
1241
1242	if (unlikely(!pfn_valid(pfn))) {
1243		/*
1244		 * This should really halt the system so it can be debugged or
1245		 * at least the kernel stops what it's doing before it corrupts
1246		 * data, but for the moment just pretend this is OOM.
1247		 */
1248		pte_unmap(page_table);
1249		printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
1250				address);
1251		spin_unlock(&mm->page_table_lock);
1252		return VM_FAULT_OOM;
1253	}
1254	old_page = pfn_to_page(pfn);
1255
1256	if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
1257		int reuse = can_share_swap_page(old_page);
1258		unlock_page(old_page);
1259		if (reuse) {
1260			flush_cache_page(vma, address, pfn);
1261			entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
1262					      vma);
1263			ptep_set_access_flags(vma, address, page_table, entry, 1);
1264			update_mmu_cache(vma, address, entry);
1265			lazy_mmu_prot_update(entry);
1266			pte_unmap(page_table);
1267			spin_unlock(&mm->page_table_lock);
1268			return VM_FAULT_MINOR|VM_FAULT_WRITE;
1269		}
1270	}
1271	pte_unmap(page_table);
1272
1273	/*
1274	 * Ok, we need to copy. Oh, well..
1275	 */
1276	if (!PageReserved(old_page))
1277		page_cache_get(old_page);
1278	spin_unlock(&mm->page_table_lock);
1279
1280	if (unlikely(anon_vma_prepare(vma)))
1281		goto no_new_page;
1282	if (old_page == ZERO_PAGE(address)) {
1283		new_page = alloc_zeroed_user_highpage(vma, address);
1284		if (!new_page)
1285			goto no_new_page;
1286	} else {
1287		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1288		if (!new_page)
1289			goto no_new_page;
1290		copy_user_highpage(new_page, old_page, address);
1291	}
1292	/*
1293	 * Re-check the pte - we dropped the lock
1294	 */
1295	ret = VM_FAULT_MINOR;
1296	spin_lock(&mm->page_table_lock);
1297	page_table = pte_offset_map(pmd, address);
1298	if (likely(pte_same(*page_table, pte))) {
1299		if (PageAnon(old_page))
1300			dec_mm_counter(mm, anon_rss);
1301		if (PageReserved(old_page))
1302			inc_mm_counter(mm, rss);
1303		else
1304			page_remove_rmap(old_page);
1305		flush_cache_page(vma, address, pfn);
1306		break_cow(vma, new_page, address, page_table);
1307		lru_cache_add_active(new_page);
1308		page_add_anon_rmap(new_page, vma, address);
1309
1310		/* Free the old page.. */
1311		new_page = old_page;
1312		ret |= VM_FAULT_WRITE;
1313	}
1314	pte_unmap(page_table);
1315	page_cache_release(new_page);
1316	page_cache_release(old_page);
1317	spin_unlock(&mm->page_table_lock);
1318	return ret;
1319
1320no_new_page:
1321	page_cache_release(old_page);
1322	return VM_FAULT_OOM;
1323}
1324
1325/*
1326 * Helper functions for unmap_mapping_range().
1327 *
1328 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
1329 *
1330 * We have to restart searching the prio_tree whenever we drop the lock,
1331 * since the iterator is only valid while the lock is held, and anyway
1332 * a later vma might be split and reinserted earlier while lock dropped.
1333 *
1334 * The list of nonlinear vmas could be handled more efficiently, using
1335 * a placeholder, but handle it in the same way until a need is shown.
1336 * It is important to search the prio_tree before nonlinear list: a vma
1337 * may become nonlinear and be shifted from prio_tree to nonlinear list
1338 * while the lock is dropped; but never shifted from list to prio_tree.
1339 *
1340 * In order to make forward progress despite restarting the search,
1341 * vm_truncate_count is used to mark a vma as now dealt with, so we can
1342 * quickly skip it next time around.  Since the prio_tree search only
1343 * shows us those vmas affected by unmapping the range in question, we
1344 * can't efficiently keep all vmas in step with mapping->truncate_count:
1345 * so instead reset them all whenever it wraps back to 0 (then go to 1).
1346 * mapping->truncate_count and vma->vm_truncate_count are protected by
1347 * i_mmap_lock.
1348 *
1349 * In order to make forward progress despite repeatedly restarting some
1350 * large vma, note the restart_addr from unmap_vmas when it breaks out:
1351 * and restart from that address when we reach that vma again.  It might
1352 * have been split or merged, shrunk or extended, but never shifted: so
1353 * restart_addr remains valid so long as it remains in the vma's range.
1354 * unmap_mapping_range forces truncate_count to leap over page-aligned
1355 * values so we can save vma's restart_addr in its truncate_count field.
1356 */
1357#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
1358
1359static void reset_vma_truncate_counts(struct address_space *mapping)
1360{
1361	struct vm_area_struct *vma;
1362	struct prio_tree_iter iter;
1363
1364	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
1365		vma->vm_truncate_count = 0;
1366	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1367		vma->vm_truncate_count = 0;
1368}
1369
1370static int unmap_mapping_range_vma(struct vm_area_struct *vma,
1371		unsigned long start_addr, unsigned long end_addr,
1372		struct zap_details *details)
1373{
1374	unsigned long restart_addr;
1375	int need_break;
1376
1377again:
1378	restart_addr = vma->vm_truncate_count;
1379	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
1380		start_addr = restart_addr;
1381		if (start_addr >= end_addr) {
1382			/* Top of vma has been split off since last time */
1383			vma->vm_truncate_count = details->truncate_count;
1384			return 0;
1385		}
1386	}
1387
1388	restart_addr = zap_page_range(vma, start_addr,
1389					end_addr - start_addr, details);
1390
1391	/*
1392	 * We cannot rely on the break test in unmap_vmas:
1393	 * on the one hand, we don't want to restart our loop
1394	 * just because that broke out for the page_table_lock;
1395	 * on the other hand, it does no test when vma is small.
1396	 */
1397	need_break = need_resched() ||
1398			need_lockbreak(details->i_mmap_lock);
1399
1400	if (restart_addr >= end_addr) {
1401		/* We have now completed this vma: mark it so */
1402		vma->vm_truncate_count = details->truncate_count;
1403		if (!need_break)
1404			return 0;
1405	} else {
1406		/* Note restart_addr in vma's truncate_count field */
1407		vma->vm_truncate_count = restart_addr;
1408		if (!need_break)
1409			goto again;
1410	}
1411
1412	spin_unlock(details->i_mmap_lock);
1413	cond_resched();
1414	spin_lock(details->i_mmap_lock);
1415	return -EINTR;
1416}
1417
1418static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
1419					    struct zap_details *details)
1420{
1421	struct vm_area_struct *vma;
1422	struct prio_tree_iter iter;
1423	pgoff_t vba, vea, zba, zea;
1424
1425restart:
1426	vma_prio_tree_foreach(vma, &iter, root,
1427			details->first_index, details->last_index) {
1428		/* Skip quickly over those we have already dealt with */
1429		if (vma->vm_truncate_count == details->truncate_count)
1430			continue;
1431
1432		vba = vma->vm_pgoff;
1433		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
1434		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
1435		zba = details->first_index;
1436		if (zba < vba)
1437			zba = vba;
1438		zea = details->last_index;
1439		if (zea > vea)
1440			zea = vea;
1441
1442		if (unmap_mapping_range_vma(vma,
1443			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
1444			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
1445				details) < 0)
1446			goto restart;
1447	}
1448}
1449
1450static inline void unmap_mapping_range_list(struct list_head *head,
1451					    struct zap_details *details)
1452{
1453	struct vm_area_struct *vma;
1454
1455	/*
1456	 * In nonlinear VMAs there is no correspondence between virtual address
1457	 * offset and file offset.  So we must perform an exhaustive search
1458	 * across *all* the pages in each nonlinear VMA, not just the pages
1459	 * whose virtual address lies outside the file truncation point.
1460	 */
1461restart:
1462	list_for_each_entry(vma, head, shared.vm_set.list) {
1463		/* Skip quickly over those we have already dealt with */
1464		if (vma->vm_truncate_count == details->truncate_count)
1465			continue;
1466		details->nonlinear_vma = vma;
1467		if (unmap_mapping_range_vma(vma, vma->vm_start,
1468					vma->vm_end, details) < 0)
1469			goto restart;
1470	}
1471}
1472
1473/**
1474 * unmap_mapping_range - unmap the portion of all mmaps
1475 * in the specified address_space corresponding to the specified
1476 * page range in the underlying file.
1477 * @mapping: the address space containing mmaps to be unmapped.
1478 * @holebegin: byte in first page to unmap, relative to the start of
1479 * the underlying file.  This will be rounded down to a PAGE_SIZE
1480 * boundary.  Note that this is different from vmtruncate(), which
1481 * must keep the partial page.  In contrast, we must get rid of
1482 * partial pages.
1483 * @holelen: size of prospective hole in bytes.  This will be rounded
1484 * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
1485 * end of the file.
1486 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
1487 * but 0 when invalidating pagecache, don't throw away private data.
1488 */
1489void unmap_mapping_range(struct address_space *mapping,
1490		loff_t const holebegin, loff_t const holelen, int even_cows)
1491{
1492	struct zap_details details;
1493	pgoff_t hba = holebegin >> PAGE_SHIFT;
1494	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1495
1496	/* Check for overflow. */
1497	if (sizeof(holelen) > sizeof(hlen)) {
1498		long long holeend =
1499			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1500		if (holeend & ~(long long)ULONG_MAX)
1501			hlen = ULONG_MAX - hba + 1;
1502	}
1503
1504	details.check_mapping = even_cows? NULL: mapping;
1505	details.nonlinear_vma = NULL;
1506	details.first_index = hba;
1507	details.last_index = hba + hlen - 1;
1508	if (details.last_index < details.first_index)
1509		details.last_index = ULONG_MAX;
1510	details.i_mmap_lock = &mapping->i_mmap_lock;
1511
1512	spin_lock(&mapping->i_mmap_lock);
1513
1514	/* serialize i_size write against truncate_count write */
1515	smp_wmb();
1516	/* Protect against page faults, and endless unmapping loops */
1517	mapping->truncate_count++;
1518	/*
1519	 * For archs where spin_lock has inclusive semantics like ia64
1520	 * this smp_mb() will prevent to read pagetable contents
1521	 * before the truncate_count increment is visible to
1522	 * other cpus.
1523	 */
1524	smp_mb();
1525	if (unlikely(is_restart_addr(mapping->truncate_count))) {
1526		if (mapping->truncate_count == 0)
1527			reset_vma_truncate_counts(mapping);
1528		mapping->truncate_count++;
1529	}
1530	details.truncate_count = mapping->truncate_count;
1531
1532	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
1533		unmap_mapping_range_tree(&mapping->i_mmap, &details);
1534	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
1535		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
1536	spin_unlock(&mapping->i_mmap_lock);
1537}
1538EXPORT_SYMBOL(unmap_mapping_range);
1539
1540/*
1541 * Handle all mappings that got truncated by a "truncate()"
1542 * system call.
1543 *
1544 * NOTE! We have to be ready to update the memory sharing
1545 * between the file and the memory map for a potential last
1546 * incomplete page.  Ugly, but necessary.
1547 */
1548int vmtruncate(struct inode * inode, loff_t offset)
1549{
1550	struct address_space *mapping = inode->i_mapping;
1551	unsigned long limit;
1552
1553	if (inode->i_size < offset)
1554		goto do_expand;
1555	/*
1556	 * truncation of in-use swapfiles is disallowed - it would cause
1557	 * subsequent swapout to scribble on the now-freed blocks.
1558	 */
1559	if (IS_SWAPFILE(inode))
1560		goto out_busy;
1561	i_size_write(inode, offset);
1562	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
1563	truncate_inode_pages(mapping, offset);
1564	goto out_truncate;
1565
1566do_expand:
1567	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1568	if (limit != RLIM_INFINITY && offset > limit)
1569		goto out_sig;
1570	if (offset > inode->i_sb->s_maxbytes)
1571		goto out_big;
1572	i_size_write(inode, offset);
1573
1574out_truncate:
1575	if (inode->i_op && inode->i_op->truncate)
1576		inode->i_op->truncate(inode);
1577	return 0;
1578out_sig:
1579	send_sig(SIGXFSZ, current, 0);
1580out_big:
1581	return -EFBIG;
1582out_busy:
1583	return -ETXTBSY;
1584}
1585
1586EXPORT_SYMBOL(vmtruncate);
1587
1588/*
1589 * Primitive swap readahead code. We simply read an aligned block of
1590 * (1 << page_cluster) entries in the swap area. This method is chosen
1591 * because it doesn't cost us any seek time.  We also make sure to queue
1592 * the 'original' request together with the readahead ones...
1593 *
1594 * This has been extended to use the NUMA policies from the mm triggering
1595 * the readahead.
1596 *
1597 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
1598 */
1599void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
1600{
1601#ifdef CONFIG_NUMA
1602	struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
1603#endif
1604	int i, num;
1605	struct page *new_page;
1606	unsigned long offset;
1607
1608	/*
1609	 * Get the number of handles we should do readahead io to.
1610	 */
1611	num = valid_swaphandles(entry, &offset);
1612	for (i = 0; i < num; offset++, i++) {
1613		/* Ok, do the async read-ahead now */
1614		new_page = read_swap_cache_async(swp_entry(swp_type(entry),
1615							   offset), vma, addr);
1616		if (!new_page)
1617			break;
1618		page_cache_release(new_page);
1619#ifdef CONFIG_NUMA
1620		/*
1621		 * Find the next applicable VMA for the NUMA policy.
1622		 */
1623		addr += PAGE_SIZE;
1624		if (addr == 0)
1625			vma = NULL;
1626		if (vma) {
1627			if (addr >= vma->vm_end) {
1628				vma = next_vma;
1629				next_vma = vma ? vma->vm_next : NULL;
1630			}
1631			if (vma && addr < vma->vm_start)
1632				vma = NULL;
1633		} else {
1634			if (next_vma && addr >= next_vma->vm_start) {
1635				vma = next_vma;
1636				next_vma = vma->vm_next;
1637			}
1638		}
1639#endif
1640	}
1641	lru_add_drain();	/* Push any new pages onto the LRU now */
1642}
1643
1644/*
1645 * We hold the mm semaphore and the page_table_lock on entry and
1646 * should release the pagetable lock on exit..
1647 */
1648static int do_swap_page(struct mm_struct * mm,
1649	struct vm_area_struct * vma, unsigned long address,
1650	pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
1651{
1652	struct page *page;
1653	swp_entry_t entry = pte_to_swp_entry(orig_pte);
1654	pte_t pte;
1655	int ret = VM_FAULT_MINOR;
1656
1657	pte_unmap(page_table);
1658	spin_unlock(&mm->page_table_lock);
1659	page = lookup_swap_cache(entry);
1660	if (!page) {
1661 		swapin_readahead(entry, address, vma);
1662 		page = read_swap_cache_async(entry, vma, address);
1663		if (!page) {
1664			/*
1665			 * Back out if somebody else faulted in this pte while
1666			 * we released the page table lock.
1667			 */
1668			spin_lock(&mm->page_table_lock);
1669			page_table = pte_offset_map(pmd, address);
1670			if (likely(pte_same(*page_table, orig_pte)))
1671				ret = VM_FAULT_OOM;
1672			else
1673				ret = VM_FAULT_MINOR;
1674			pte_unmap(page_table);
1675			spin_unlock(&mm->page_table_lock);
1676			goto out;
1677		}
1678
1679		/* Had to read the page from swap area: Major fault */
1680		ret = VM_FAULT_MAJOR;
1681		inc_page_state(pgmajfault);
1682		grab_swap_token();
1683	}
1684
1685	mark_page_accessed(page);
1686	lock_page(page);
1687
1688	/*
1689	 * Back out if somebody else faulted in this pte while we
1690	 * released the page table lock.
1691	 */
1692	spin_lock(&mm->page_table_lock);
1693	page_table = pte_offset_map(pmd, address);
1694	if (unlikely(!pte_same(*page_table, orig_pte))) {
1695		ret = VM_FAULT_MINOR;
1696		goto out_nomap;
1697	}
1698
1699	if (unlikely(!PageUptodate(page))) {
1700		ret = VM_FAULT_SIGBUS;
1701		goto out_nomap;
1702	}
1703
1704	/* The page isn't present yet, go ahead with the fault. */
1705
1706	inc_mm_counter(mm, rss);
1707	pte = mk_pte(page, vma->vm_page_prot);
1708	if (write_access && can_share_swap_page(page)) {
1709		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
1710		write_access = 0;
1711	}
1712
1713	flush_icache_page(vma, page);
1714	set_pte_at(mm, address, page_table, pte);
1715	page_add_anon_rmap(page, vma, address);
1716
1717	swap_free(entry);
1718	if (vm_swap_full())
1719		remove_exclusive_swap_page(page);
1720	unlock_page(page);
1721
1722	if (write_access) {
1723		if (do_wp_page(mm, vma, address,
1724				page_table, pmd, pte) == VM_FAULT_OOM)
1725			ret = VM_FAULT_OOM;
1726		goto out;
1727	}
1728
1729	/* No need to invalidate - it was non-present before */
1730	update_mmu_cache(vma, address, pte);
1731	lazy_mmu_prot_update(pte);
1732	pte_unmap(page_table);
1733	spin_unlock(&mm->page_table_lock);
1734out:
1735	return ret;
1736out_nomap:
1737	pte_unmap(page_table);
1738	spin_unlock(&mm->page_table_lock);
1739	unlock_page(page);
1740	page_cache_release(page);
1741	goto out;
1742}
1743
1744/*
1745 * We are called with the MM semaphore and page_table_lock
1746 * spinlock held to protect against concurrent faults in
1747 * multithreaded programs.
1748 */
1749static int
1750do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1751		pte_t *page_table, pmd_t *pmd, int write_access,
1752		unsigned long addr)
1753{
1754	pte_t entry;
1755	struct page * page = ZERO_PAGE(addr);
1756
1757	/* Read-only mapping of ZERO_PAGE. */
1758	entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1759
1760	/* ..except if it's a write access */
1761	if (write_access) {
1762		/* Allocate our own private page. */
1763		pte_unmap(page_table);
1764		spin_unlock(&mm->page_table_lock);
1765
1766		if (unlikely(anon_vma_prepare(vma)))
1767			goto no_mem;
1768		page = alloc_zeroed_user_highpage(vma, addr);
1769		if (!page)
1770			goto no_mem;
1771
1772		spin_lock(&mm->page_table_lock);
1773		page_table = pte_offset_map(pmd, addr);
1774
1775		if (!pte_none(*page_table)) {
1776			pte_unmap(page_table);
1777			page_cache_release(page);
1778			spin_unlock(&mm->page_table_lock);
1779			goto out;
1780		}
1781		inc_mm_counter(mm, rss);
1782		entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
1783							 vma->vm_page_prot)),
1784				      vma);
1785		lru_cache_add_active(page);
1786		SetPageReferenced(page);
1787		page_add_anon_rmap(page, vma, addr);
1788	}
1789
1790	set_pte_at(mm, addr, page_table, entry);
1791	pte_unmap(page_table);
1792
1793	/* No need to invalidate - it was non-present before */
1794	update_mmu_cache(vma, addr, entry);
1795	lazy_mmu_prot_update(entry);
1796	spin_unlock(&mm->page_table_lock);
1797out:
1798	return VM_FAULT_MINOR;
1799no_mem:
1800	return VM_FAULT_OOM;
1801}
1802
1803/*
1804 * do_no_page() tries to create a new page mapping. It aggressively
1805 * tries to share with existing pages, but makes a separate copy if
1806 * the "write_access" parameter is true in order to avoid the next
1807 * page fault.
1808 *
1809 * As this is called only for pages that do not currently exist, we
1810 * do not need to flush old virtual caches or the TLB.
1811 *
1812 * This is called with the MM semaphore held and the page table
1813 * spinlock held. Exit with the spinlock released.
1814 */
1815static int
1816do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1817	unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
1818{
1819	struct page * new_page;
1820	struct address_space *mapping = NULL;
1821	pte_t entry;
1822	unsigned int sequence = 0;
1823	int ret = VM_FAULT_MINOR;
1824	int anon = 0;
1825
1826	if (!vma->vm_ops || !vma->vm_ops->nopage)
1827		return do_anonymous_page(mm, vma, page_table,
1828					pmd, write_access, address);
1829	pte_unmap(page_table);
1830	spin_unlock(&mm->page_table_lock);
1831
1832	if (vma->vm_file) {
1833		mapping = vma->vm_file->f_mapping;
1834		sequence = mapping->truncate_count;
1835		smp_rmb(); /* serializes i_size against truncate_count */
1836	}
1837retry:
1838	cond_resched();
1839	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
1840	/*
1841	 * No smp_rmb is needed here as long as there's a full
1842	 * spin_lock/unlock sequence inside the ->nopage callback
1843	 * (for the pagecache lookup) that acts as an implicit
1844	 * smp_mb() and prevents the i_size read to happen
1845	 * after the next truncate_count read.
1846	 */
1847
1848	/* no page was available -- either SIGBUS or OOM */
1849	if (new_page == NOPAGE_SIGBUS)
1850		return VM_FAULT_SIGBUS;
1851	if (new_page == NOPAGE_OOM)
1852		return VM_FAULT_OOM;
1853
1854	/*
1855	 * Should we do an early C-O-W break?
1856	 */
1857	if (write_access && !(vma->vm_flags & VM_SHARED)) {
1858		struct page *page;
1859
1860		if (unlikely(anon_vma_prepare(vma)))
1861			goto oom;
1862		page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1863		if (!page)
1864			goto oom;
1865		copy_user_highpage(page, new_page, address);
1866		page_cache_release(new_page);
1867		new_page = page;
1868		anon = 1;
1869	}
1870
1871	spin_lock(&mm->page_table_lock);
1872	/*
1873	 * For a file-backed vma, someone could have truncated or otherwise
1874	 * invalidated this page.  If unmap_mapping_range got called,
1875	 * retry getting the page.
1876	 */
1877	if (mapping && unlikely(sequence != mapping->truncate_count)) {
1878		sequence = mapping->truncate_count;
1879		spin_unlock(&mm->page_table_lock);
1880		page_cache_release(new_page);
1881		goto retry;
1882	}
1883	page_table = pte_offset_map(pmd, address);
1884
1885	/*
1886	 * This silly early PAGE_DIRTY setting removes a race
1887	 * due to the bad i386 page protection. But it's valid
1888	 * for other architectures too.
1889	 *
1890	 * Note that if write_access is true, we either now have
1891	 * an exclusive copy of the page, or this is a shared mapping,
1892	 * so we can make it writable and dirty to avoid having to
1893	 * handle that later.
1894	 */
1895	/* Only go through if we didn't race with anybody else... */
1896	if (pte_none(*page_table)) {
1897		if (!PageReserved(new_page))
1898			inc_mm_counter(mm, rss);
1899
1900		flush_icache_page(vma, new_page);
1901		entry = mk_pte(new_page, vma->vm_page_prot);
1902		if (write_access)
1903			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1904		set_pte_at(mm, address, page_table, entry);
1905		if (anon) {
1906			lru_cache_add_active(new_page);
1907			page_add_anon_rmap(new_page, vma, address);
1908		} else
1909			page_add_file_rmap(new_page);
1910		pte_unmap(page_table);
1911	} else {
1912		/* One of our sibling threads was faster, back out. */
1913		pte_unmap(page_table);
1914		page_cache_release(new_page);
1915		spin_unlock(&mm->page_table_lock);
1916		goto out;
1917	}
1918
1919	/* no need to invalidate: a not-present page shouldn't be cached */
1920	update_mmu_cache(vma, address, entry);
1921	lazy_mmu_prot_update(entry);
1922	spin_unlock(&mm->page_table_lock);
1923out:
1924	return ret;
1925oom:
1926	page_cache_release(new_page);
1927	ret = VM_FAULT_OOM;
1928	goto out;
1929}
1930
1931/*
1932 * Fault of a previously existing named mapping. Repopulate the pte
1933 * from the encoded file_pte if possible. This enables swappable
1934 * nonlinear vmas.
1935 */
1936static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
1937	unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
1938{
1939	unsigned long pgoff;
1940	int err;
1941
1942	BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
1943	/*
1944	 * Fall back to the linear mapping if the fs does not support
1945	 * ->populate:
1946	 */
1947	if (!vma->vm_ops || !vma->vm_ops->populate ||
1948			(write_access && !(vma->vm_flags & VM_SHARED))) {
1949		pte_clear(mm, address, pte);
1950		return do_no_page(mm, vma, address, write_access, pte, pmd);
1951	}
1952
1953	pgoff = pte_to_pgoff(*pte);
1954
1955	pte_unmap(pte);
1956	spin_unlock(&mm->page_table_lock);
1957
1958	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
1959	if (err == -ENOMEM)
1960		return VM_FAULT_OOM;
1961	if (err)
1962		return VM_FAULT_SIGBUS;
1963	return VM_FAULT_MAJOR;
1964}
1965
1966/*
1967 * These routines also need to handle stuff like marking pages dirty
1968 * and/or accessed for architectures that don't do it in hardware (most
1969 * RISC architectures).  The early dirtying is also good on the i386.
1970 *
1971 * There is also a hook called "update_mmu_cache()" that architectures
1972 * with external mmu caches can use to update those (ie the Sparc or
1973 * PowerPC hashed page tables that act as extended TLBs).
1974 *
1975 * Note the "page_table_lock". It is to protect against kswapd removing
1976 * pages from under us. Note that kswapd only ever _removes_ pages, never
1977 * adds them. As such, once we have noticed that the page is not present,
1978 * we can drop the lock early.
1979 *
1980 * The adding of pages is protected by the MM semaphore (which we hold),
1981 * so we don't need to worry about a page being suddenly been added into
1982 * our VM.
1983 *
1984 * We enter with the pagetable spinlock held, we are supposed to
1985 * release it when done.
1986 */
1987static inline int handle_pte_fault(struct mm_struct *mm,
1988	struct vm_area_struct * vma, unsigned long address,
1989	int write_access, pte_t *pte, pmd_t *pmd)
1990{
1991	pte_t entry;
1992
1993	entry = *pte;
1994	if (!pte_present(entry)) {
1995		/*
1996		 * If it truly wasn't present, we know that kswapd
1997		 * and the PTE updates will not touch it later. So
1998		 * drop the lock.
1999		 */
2000		if (pte_none(entry))
2001			return do_no_page(mm, vma, address, write_access, pte, pmd);
2002		if (pte_file(entry))
2003			return do_file_page(mm, vma, address, write_access, pte, pmd);
2004		return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
2005	}
2006
2007	if (write_access) {
2008		if (!pte_write(entry))
2009			return do_wp_page(mm, vma, address, pte, pmd, entry);
2010		entry = pte_mkdirty(entry);
2011	}
2012	entry = pte_mkyoung(entry);
2013	ptep_set_access_flags(vma, address, pte, entry, write_access);
2014	update_mmu_cache(vma, address, entry);
2015	lazy_mmu_prot_update(entry);
2016	pte_unmap(pte);
2017	spin_unlock(&mm->page_table_lock);
2018	return VM_FAULT_MINOR;
2019}
2020
2021/*
2022 * By the time we get here, we already hold the mm semaphore
2023 */
2024int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
2025		unsigned long address, int write_access)
2026{
2027	pgd_t *pgd;
2028	pud_t *pud;
2029	pmd_t *pmd;
2030	pte_t *pte;
2031
2032	__set_current_state(TASK_RUNNING);
2033
2034	inc_page_state(pgfault);
2035
2036	if (is_vm_hugetlb_page(vma))
2037		return VM_FAULT_SIGBUS;	/* mapping truncation does this. */
2038
2039	/*
2040	 * We need the page table lock to synchronize with kswapd
2041	 * and the SMP-safe atomic PTE updates.
2042	 */
2043	pgd = pgd_offset(mm, address);
2044	spin_lock(&mm->page_table_lock);
2045
2046	pud = pud_alloc(mm, pgd, address);
2047	if (!pud)
2048		goto oom;
2049
2050	pmd = pmd_alloc(mm, pud, address);
2051	if (!pmd)
2052		goto oom;
2053
2054	pte = pte_alloc_map(mm, pmd, address);
2055	if (!pte)
2056		goto oom;
2057
2058	return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
2059
2060 oom:
2061	spin_unlock(&mm->page_table_lock);
2062	return VM_FAULT_OOM;
2063}
2064
2065#ifndef __PAGETABLE_PUD_FOLDED
2066/*
2067 * Allocate page upper directory.
2068 *
2069 * We've already handled the fast-path in-line, and we own the
2070 * page table lock.
2071 */
2072pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2073{
2074	pud_t *new;
2075
2076	spin_unlock(&mm->page_table_lock);
2077	new = pud_alloc_one(mm, address);
2078	spin_lock(&mm->page_table_lock);
2079	if (!new)
2080		return NULL;
2081
2082	/*
2083	 * Because we dropped the lock, we should re-check the
2084	 * entry, as somebody else could have populated it..
2085	 */
2086	if (pgd_present(*pgd)) {
2087		pud_free(new);
2088		goto out;
2089	}
2090	pgd_populate(mm, pgd, new);
2091 out:
2092	return pud_offset(pgd, address);
2093}
2094#endif /* __PAGETABLE_PUD_FOLDED */
2095
2096#ifndef __PAGETABLE_PMD_FOLDED
2097/*
2098 * Allocate page middle directory.
2099 *
2100 * We've already handled the fast-path in-line, and we own the
2101 * page table lock.
2102 */
2103pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2104{
2105	pmd_t *new;
2106
2107	spin_unlock(&mm->page_table_lock);
2108	new = pmd_alloc_one(mm, address);
2109	spin_lock(&mm->page_table_lock);
2110	if (!new)
2111		return NULL;
2112
2113	/*
2114	 * Because we dropped the lock, we should re-check the
2115	 * entry, as somebody else could have populated it..
2116	 */
2117#ifndef __ARCH_HAS_4LEVEL_HACK
2118	if (pud_present(*pud)) {
2119		pmd_free(new);
2120		goto out;
2121	}
2122	pud_populate(mm, pud, new);
2123#else
2124	if (pgd_present(*pud)) {
2125		pmd_free(new);
2126		goto out;
2127	}
2128	pgd_populate(mm, pud, new);
2129#endif /* __ARCH_HAS_4LEVEL_HACK */
2130
2131 out:
2132	return pmd_offset(pud, address);
2133}
2134#endif /* __PAGETABLE_PMD_FOLDED */
2135
2136int make_pages_present(unsigned long addr, unsigned long end)
2137{
2138	int ret, len, write;
2139	struct vm_area_struct * vma;
2140
2141	vma = find_vma(current->mm, addr);
2142	if (!vma)
2143		return -1;
2144	write = (vma->vm_flags & VM_WRITE) != 0;
2145	if (addr >= end)
2146		BUG();
2147	if (end > vma->vm_end)
2148		BUG();
2149	len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
2150	ret = get_user_pages(current, current->mm, addr,
2151			len, write, 0, NULL, NULL);
2152	if (ret < 0)
2153		return ret;
2154	return ret == len ? 0 : -1;
2155}
2156
2157/*
2158 * Map a vmalloc()-space virtual address to the physical page.
2159 */
2160struct page * vmalloc_to_page(void * vmalloc_addr)
2161{
2162	unsigned long addr = (unsigned long) vmalloc_addr;
2163	struct page *page = NULL;
2164	pgd_t *pgd = pgd_offset_k(addr);
2165	pud_t *pud;
2166	pmd_t *pmd;
2167	pte_t *ptep, pte;
2168
2169	if (!pgd_none(*pgd)) {
2170		pud = pud_offset(pgd, addr);
2171		if (!pud_none(*pud)) {
2172			pmd = pmd_offset(pud, addr);
2173			if (!pmd_none(*pmd)) {
2174				ptep = pte_offset_map(pmd, addr);
2175				pte = *ptep;
2176				if (pte_present(pte))
2177					page = pte_page(pte);
2178				pte_unmap(ptep);
2179			}
2180		}
2181	}
2182	return page;
2183}
2184
2185EXPORT_SYMBOL(vmalloc_to_page);
2186
2187/*
2188 * Map a vmalloc()-space virtual address to the physical page frame number.
2189 */
2190unsigned long vmalloc_to_pfn(void * vmalloc_addr)
2191{
2192	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
2193}
2194
2195EXPORT_SYMBOL(vmalloc_to_pfn);
2196
2197/*
2198 * update_mem_hiwater
2199 *	- update per process rss and vm high water data
2200 */
2201void update_mem_hiwater(struct task_struct *tsk)
2202{
2203	if (tsk->mm) {
2204		unsigned long rss = get_mm_counter(tsk->mm, rss);
2205
2206		if (tsk->mm->hiwater_rss < rss)
2207			tsk->mm->hiwater_rss = rss;
2208		if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
2209			tsk->mm->hiwater_vm = tsk->mm->total_vm;
2210	}
2211}
2212
2213#if !defined(__HAVE_ARCH_GATE_AREA)
2214
2215#if defined(AT_SYSINFO_EHDR)
2216struct vm_area_struct gate_vma;
2217
2218static int __init gate_vma_init(void)
2219{
2220	gate_vma.vm_mm = NULL;
2221	gate_vma.vm_start = FIXADDR_USER_START;
2222	gate_vma.vm_end = FIXADDR_USER_END;
2223	gate_vma.vm_page_prot = PAGE_READONLY;
2224	gate_vma.vm_flags = 0;
2225	return 0;
2226}
2227__initcall(gate_vma_init);
2228#endif
2229
2230struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
2231{
2232#ifdef AT_SYSINFO_EHDR
2233	return &gate_vma;
2234#else
2235	return NULL;
2236#endif
2237}
2238
2239int in_gate_area_no_task(unsigned long addr)
2240{
2241#ifdef AT_SYSINFO_EHDR
2242	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2243		return 1;
2244#endif
2245	return 0;
2246}
2247
2248#endif	/* __HAVE_ARCH_GATE_AREA */
2249