fremap.c revision 9888a1cae3f859db38b9604e3df1c02177161bb0
1/*
2 *   linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/file.h>
12#include <linux/mman.h>
13#include <linux/pagemap.h>
14#include <linux/swapops.h>
15#include <linux/rmap.h>
16#include <linux/module.h>
17#include <linux/syscalls.h>
18
19#include <asm/mmu_context.h>
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22
23static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24			unsigned long addr, pte_t *ptep)
25{
26	pte_t pte = *ptep;
27	struct page *page = NULL;
28
29	if (pte_present(pte)) {
30		flush_cache_page(vma, addr, pte_pfn(pte));
31		pte = ptep_clear_flush(vma, addr, ptep);
32		page = vm_normal_page(vma, addr, pte);
33		if (page) {
34			if (pte_dirty(pte))
35				set_page_dirty(page);
36			page_remove_rmap(page);
37			page_cache_release(page);
38		}
39	} else {
40		if (!pte_file(pte))
41			free_swap_and_cache(pte_to_swp_entry(pte));
42		pte_clear_not_present_full(mm, addr, ptep, 0);
43	}
44	return !!page;
45}
46
47/*
48 * Install a file page to a given virtual memory address, release any
49 * previously existing mapping.
50 */
51int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
52		unsigned long addr, struct page *page, pgprot_t prot)
53{
54	struct inode *inode;
55	pgoff_t size;
56	int err = -ENOMEM;
57	pte_t *pte;
58	pte_t pte_val;
59	spinlock_t *ptl;
60
61	pte = get_locked_pte(mm, addr, &ptl);
62	if (!pte)
63		goto out;
64
65	/*
66	 * This page may have been truncated. Tell the
67	 * caller about it.
68	 */
69	err = -EINVAL;
70	inode = vma->vm_file->f_mapping->host;
71	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
72	if (!page->mapping || page->index >= size)
73		goto unlock;
74	err = -ENOMEM;
75	if (page_mapcount(page) > INT_MAX/2)
76		goto unlock;
77
78	if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
79		inc_mm_counter(mm, file_rss);
80
81	flush_icache_page(vma, page);
82	pte_val = mk_pte(page, prot);
83	set_pte_at(mm, addr, pte, pte_val);
84	page_add_file_rmap(page);
85	update_mmu_cache(vma, addr, pte_val);
86	lazy_mmu_prot_update(pte_val);
87	err = 0;
88unlock:
89	pte_unmap_unlock(pte, ptl);
90out:
91	return err;
92}
93EXPORT_SYMBOL(install_page);
94
95/*
96 * Install a file pte to a given virtual memory address, release any
97 * previously existing mapping.
98 */
99int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
100		unsigned long addr, unsigned long pgoff, pgprot_t prot)
101{
102	int err = -ENOMEM;
103	pte_t *pte;
104	pte_t pte_val;
105	spinlock_t *ptl;
106
107	pte = get_locked_pte(mm, addr, &ptl);
108	if (!pte)
109		goto out;
110
111	if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
112		update_hiwater_rss(mm);
113		dec_mm_counter(mm, file_rss);
114	}
115
116	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
117	pte_val = *pte;
118	/*
119	 * We don't need to run update_mmu_cache() here because the "file pte"
120	 * being installed by install_file_pte() is not a real pte - it's a
121	 * non-present entry (like a swap entry), noting what file offset should
122	 * be mapped there when there's a fault (in a non-linear vma where
123	 * that's not obvious).
124	 */
125	pte_unmap_unlock(pte, ptl);
126	err = 0;
127out:
128	return err;
129}
130
131/***
132 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
133 *                        file within an existing vma.
134 * @start: start of the remapped virtual memory range
135 * @size: size of the remapped virtual memory range
136 * @prot: new protection bits of the range
137 * @pgoff: to be mapped page of the backing store file
138 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
139 *
140 * this syscall works purely via pagetables, so it's the most efficient
141 * way to map the same (large) file into a given virtual window. Unlike
142 * mmap()/mremap() it does not create any new vmas. The new mappings are
143 * also safe across swapout.
144 *
145 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
146 * protection is used. Arbitrary protections might be implemented in the
147 * future.
148 */
149asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
150	unsigned long __prot, unsigned long pgoff, unsigned long flags)
151{
152	struct mm_struct *mm = current->mm;
153	struct address_space *mapping;
154	unsigned long end = start + size;
155	struct vm_area_struct *vma;
156	int err = -EINVAL;
157	int has_write_lock = 0;
158
159	if (__prot)
160		return err;
161	/*
162	 * Sanitize the syscall parameters:
163	 */
164	start = start & PAGE_MASK;
165	size = size & PAGE_MASK;
166
167	/* Does the address range wrap, or is the span zero-sized? */
168	if (start + size <= start)
169		return err;
170
171	/* Can we represent this offset inside this architecture's pte's? */
172#if PTE_FILE_MAX_BITS < BITS_PER_LONG
173	if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
174		return err;
175#endif
176
177	/* We need down_write() to change vma->vm_flags. */
178	down_read(&mm->mmap_sem);
179 retry:
180	vma = find_vma(mm, start);
181
182	/*
183	 * Make sure the vma is shared, that it supports prefaulting,
184	 * and that the remapped range is valid and fully within
185	 * the single existing vma.  vm_private_data is used as a
186	 * swapout cursor in a VM_NONLINEAR vma.
187	 */
188	if (vma && (vma->vm_flags & VM_SHARED) &&
189		(!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) &&
190		vma->vm_ops && vma->vm_ops->populate &&
191			end > start && start >= vma->vm_start &&
192				end <= vma->vm_end) {
193
194		/* Must set VM_NONLINEAR before any pages are populated. */
195		if (pgoff != linear_page_index(vma, start) &&
196		    !(vma->vm_flags & VM_NONLINEAR)) {
197			if (!has_write_lock) {
198				up_read(&mm->mmap_sem);
199				down_write(&mm->mmap_sem);
200				has_write_lock = 1;
201				goto retry;
202			}
203			mapping = vma->vm_file->f_mapping;
204			spin_lock(&mapping->i_mmap_lock);
205			flush_dcache_mmap_lock(mapping);
206			vma->vm_flags |= VM_NONLINEAR;
207			vma_prio_tree_remove(vma, &mapping->i_mmap);
208			vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
209			flush_dcache_mmap_unlock(mapping);
210			spin_unlock(&mapping->i_mmap_lock);
211		}
212
213		err = vma->vm_ops->populate(vma, start, size,
214					    vma->vm_page_prot,
215					    pgoff, flags & MAP_NONBLOCK);
216
217		/*
218		 * We can't clear VM_NONLINEAR because we'd have to do
219		 * it after ->populate completes, and that would prevent
220		 * downgrading the lock.  (Locks can't be upgraded).
221		 */
222	}
223	if (likely(!has_write_lock))
224		up_read(&mm->mmap_sem);
225	else
226		up_write(&mm->mmap_sem);
227
228	return err;
229}
230
231