fremap.c revision ba470de43188cdbff795b5da43a1474523c6c2fb
1/*
2 *   linux/mm/fremap.c
3 *
4 * Explicit pagetable population and nonlinear (random) mappings support.
5 *
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
7 */
8#include <linux/backing-dev.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/file.h>
12#include <linux/mman.h>
13#include <linux/pagemap.h>
14#include <linux/swapops.h>
15#include <linux/rmap.h>
16#include <linux/module.h>
17#include <linux/syscalls.h>
18#include <linux/mmu_notifier.h>
19
20#include <asm/mmu_context.h>
21#include <asm/cacheflush.h>
22#include <asm/tlbflush.h>
23
24#include "internal.h"
25
26static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
27			unsigned long addr, pte_t *ptep)
28{
29	pte_t pte = *ptep;
30
31	if (pte_present(pte)) {
32		struct page *page;
33
34		flush_cache_page(vma, addr, pte_pfn(pte));
35		pte = ptep_clear_flush(vma, addr, ptep);
36		page = vm_normal_page(vma, addr, pte);
37		if (page) {
38			if (pte_dirty(pte))
39				set_page_dirty(page);
40			page_remove_rmap(page, vma);
41			page_cache_release(page);
42			update_hiwater_rss(mm);
43			dec_mm_counter(mm, file_rss);
44		}
45	} else {
46		if (!pte_file(pte))
47			free_swap_and_cache(pte_to_swp_entry(pte));
48		pte_clear_not_present_full(mm, addr, ptep, 0);
49	}
50}
51
52/*
53 * Install a file pte to a given virtual memory address, release any
54 * previously existing mapping.
55 */
56static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
57		unsigned long addr, unsigned long pgoff, pgprot_t prot)
58{
59	int err = -ENOMEM;
60	pte_t *pte;
61	spinlock_t *ptl;
62
63	pte = get_locked_pte(mm, addr, &ptl);
64	if (!pte)
65		goto out;
66
67	if (!pte_none(*pte))
68		zap_pte(mm, vma, addr, pte);
69
70	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
71	/*
72	 * We don't need to run update_mmu_cache() here because the "file pte"
73	 * being installed by install_file_pte() is not a real pte - it's a
74	 * non-present entry (like a swap entry), noting what file offset should
75	 * be mapped there when there's a fault (in a non-linear vma where
76	 * that's not obvious).
77	 */
78	pte_unmap_unlock(pte, ptl);
79	err = 0;
80out:
81	return err;
82}
83
84static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
85			unsigned long addr, unsigned long size, pgoff_t pgoff)
86{
87	int err;
88
89	do {
90		err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
91		if (err)
92			return err;
93
94		size -= PAGE_SIZE;
95		addr += PAGE_SIZE;
96		pgoff++;
97	} while (size);
98
99        return 0;
100
101}
102
103/**
104 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
105 * @start: start of the remapped virtual memory range
106 * @size: size of the remapped virtual memory range
107 * @prot: new protection bits of the range (see NOTE)
108 * @pgoff: to-be-mapped page of the backing store file
109 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
110 *
111 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
112 * (shared backing store file).
113 *
114 * This syscall works purely via pagetables, so it's the most efficient
115 * way to map the same (large) file into a given virtual window. Unlike
116 * mmap()/mremap() it does not create any new vmas. The new mappings are
117 * also safe across swapout.
118 *
119 * NOTE: the @prot parameter right now is ignored (but must be zero),
120 * and the vma's default protection is used. Arbitrary protections
121 * might be implemented in the future.
122 */
123asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
124	unsigned long prot, unsigned long pgoff, unsigned long flags)
125{
126	struct mm_struct *mm = current->mm;
127	struct address_space *mapping;
128	unsigned long end = start + size;
129	struct vm_area_struct *vma;
130	int err = -EINVAL;
131	int has_write_lock = 0;
132
133	if (prot)
134		return err;
135	/*
136	 * Sanitize the syscall parameters:
137	 */
138	start = start & PAGE_MASK;
139	size = size & PAGE_MASK;
140
141	/* Does the address range wrap, or is the span zero-sized? */
142	if (start + size <= start)
143		return err;
144
145	/* Can we represent this offset inside this architecture's pte's? */
146#if PTE_FILE_MAX_BITS < BITS_PER_LONG
147	if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
148		return err;
149#endif
150
151	/* We need down_write() to change vma->vm_flags. */
152	down_read(&mm->mmap_sem);
153 retry:
154	vma = find_vma(mm, start);
155
156	/*
157	 * Make sure the vma is shared, that it supports prefaulting,
158	 * and that the remapped range is valid and fully within
159	 * the single existing vma.  vm_private_data is used as a
160	 * swapout cursor in a VM_NONLINEAR vma.
161	 */
162	if (!vma || !(vma->vm_flags & VM_SHARED))
163		goto out;
164
165	if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
166		goto out;
167
168	if (!(vma->vm_flags & VM_CAN_NONLINEAR))
169		goto out;
170
171	if (end <= start || start < vma->vm_start || end > vma->vm_end)
172		goto out;
173
174	/* Must set VM_NONLINEAR before any pages are populated. */
175	if (!(vma->vm_flags & VM_NONLINEAR)) {
176		/* Don't need a nonlinear mapping, exit success */
177		if (pgoff == linear_page_index(vma, start)) {
178			err = 0;
179			goto out;
180		}
181
182		if (!has_write_lock) {
183			up_read(&mm->mmap_sem);
184			down_write(&mm->mmap_sem);
185			has_write_lock = 1;
186			goto retry;
187		}
188		mapping = vma->vm_file->f_mapping;
189		/*
190		 * page_mkclean doesn't work on nonlinear vmas, so if
191		 * dirty pages need to be accounted, emulate with linear
192		 * vmas.
193		 */
194		if (mapping_cap_account_dirty(mapping)) {
195			unsigned long addr;
196			struct file *file = vma->vm_file;
197
198			flags &= MAP_NONBLOCK;
199			get_file(file);
200			addr = mmap_region(file, start, size,
201					flags, vma->vm_flags, pgoff, 1);
202			fput(file);
203			if (IS_ERR_VALUE(addr)) {
204				err = addr;
205			} else {
206				BUG_ON(addr != start);
207				err = 0;
208			}
209			goto out;
210		}
211		spin_lock(&mapping->i_mmap_lock);
212		flush_dcache_mmap_lock(mapping);
213		vma->vm_flags |= VM_NONLINEAR;
214		vma_prio_tree_remove(vma, &mapping->i_mmap);
215		vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
216		flush_dcache_mmap_unlock(mapping);
217		spin_unlock(&mapping->i_mmap_lock);
218	}
219
220	if (vma->vm_flags & VM_LOCKED) {
221		/*
222		 * drop PG_Mlocked flag for over-mapped range
223		 */
224		unsigned int saved_flags = vma->vm_flags;
225		munlock_vma_pages_range(vma, start, start + size);
226		vma->vm_flags = saved_flags;
227	}
228
229	mmu_notifier_invalidate_range_start(mm, start, start + size);
230	err = populate_range(mm, vma, start, size, pgoff);
231	mmu_notifier_invalidate_range_end(mm, start, start + size);
232	if (!err && !(flags & MAP_NONBLOCK)) {
233		if (vma->vm_flags & VM_LOCKED) {
234			/*
235			 * might be mapping previously unmapped range of file
236			 */
237			mlock_vma_pages_range(vma, start, start + size);
238		} else {
239			if (unlikely(has_write_lock)) {
240				downgrade_write(&mm->mmap_sem);
241				has_write_lock = 0;
242			}
243			make_pages_present(start, start+size);
244		}
245	}
246
247	/*
248	 * We can't clear VM_NONLINEAR because we'd have to do
249	 * it after ->populate completes, and that would prevent
250	 * downgrading the lock.  (Locks can't be upgraded).
251	 */
252
253out:
254	if (likely(!has_write_lock))
255		up_read(&mm->mmap_sem);
256	else
257		up_write(&mm->mmap_sem);
258
259	return err;
260}
261