mprotect.c revision ab50b8ed818016cfecd747d6d4bb9139986bc029
1/*
2 *  mm/mprotect.c
3 *
4 *  (C) Copyright 1994 Linus Torvalds
5 *  (C) Copyright 2002 Christoph Hellwig
6 *
7 *  Address space accounting code	<alan@redhat.com>
8 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/slab.h>
14#include <linux/shm.h>
15#include <linux/mman.h>
16#include <linux/fs.h>
17#include <linux/highmem.h>
18#include <linux/security.h>
19#include <linux/mempolicy.h>
20#include <linux/personality.h>
21#include <linux/syscalls.h>
22
23#include <asm/uaccess.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27
28static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
29		unsigned long addr, unsigned long end, pgprot_t newprot)
30{
31	pte_t *pte;
32
33	pte = pte_offset_map(pmd, addr);
34	do {
35		if (pte_present(*pte)) {
36			pte_t ptent;
37
38			/* Avoid an SMP race with hardware updated dirty/clean
39			 * bits by wiping the pte and then setting the new pte
40			 * into place.
41			 */
42			ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
43			set_pte_at(mm, addr, pte, ptent);
44			lazy_mmu_prot_update(ptent);
45		}
46	} while (pte++, addr += PAGE_SIZE, addr != end);
47	pte_unmap(pte - 1);
48}
49
50static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
51		unsigned long addr, unsigned long end, pgprot_t newprot)
52{
53	pmd_t *pmd;
54	unsigned long next;
55
56	pmd = pmd_offset(pud, addr);
57	do {
58		next = pmd_addr_end(addr, end);
59		if (pmd_none_or_clear_bad(pmd))
60			continue;
61		change_pte_range(mm, pmd, addr, next, newprot);
62	} while (pmd++, addr = next, addr != end);
63}
64
65static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
66		unsigned long addr, unsigned long end, pgprot_t newprot)
67{
68	pud_t *pud;
69	unsigned long next;
70
71	pud = pud_offset(pgd, addr);
72	do {
73		next = pud_addr_end(addr, end);
74		if (pud_none_or_clear_bad(pud))
75			continue;
76		change_pmd_range(mm, pud, addr, next, newprot);
77	} while (pud++, addr = next, addr != end);
78}
79
80static void change_protection(struct vm_area_struct *vma,
81		unsigned long addr, unsigned long end, pgprot_t newprot)
82{
83	struct mm_struct *mm = vma->vm_mm;
84	pgd_t *pgd;
85	unsigned long next;
86	unsigned long start = addr;
87
88	BUG_ON(addr >= end);
89	pgd = pgd_offset(mm, addr);
90	flush_cache_range(vma, addr, end);
91	spin_lock(&mm->page_table_lock);
92	do {
93		next = pgd_addr_end(addr, end);
94		if (pgd_none_or_clear_bad(pgd))
95			continue;
96		change_pud_range(mm, pgd, addr, next, newprot);
97	} while (pgd++, addr = next, addr != end);
98	flush_tlb_range(vma, start, end);
99	spin_unlock(&mm->page_table_lock);
100}
101
102static int
103mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
104	unsigned long start, unsigned long end, unsigned long newflags)
105{
106	struct mm_struct *mm = vma->vm_mm;
107	unsigned long oldflags = vma->vm_flags;
108	long nrpages = (end - start) >> PAGE_SHIFT;
109	unsigned long charged = 0;
110	pgprot_t newprot;
111	pgoff_t pgoff;
112	int error;
113
114	if (newflags == oldflags) {
115		*pprev = vma;
116		return 0;
117	}
118
119	/*
120	 * If we make a private mapping writable we increase our commit;
121	 * but (without finer accounting) cannot reduce our commit if we
122	 * make it unwritable again.
123	 *
124	 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
125	 * a MAP_NORESERVE private mapping to writable will now reserve.
126	 */
127	if (newflags & VM_WRITE) {
128		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED|VM_HUGETLB))) {
129			charged = nrpages;
130			if (security_vm_enough_memory(charged))
131				return -ENOMEM;
132			newflags |= VM_ACCOUNT;
133		}
134	}
135
136	newprot = protection_map[newflags & 0xf];
137
138	/*
139	 * First try to merge with previous and/or next vma.
140	 */
141	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
142	*pprev = vma_merge(mm, *pprev, start, end, newflags,
143			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
144	if (*pprev) {
145		vma = *pprev;
146		goto success;
147	}
148
149	*pprev = vma;
150
151	if (start != vma->vm_start) {
152		error = split_vma(mm, vma, start, 1);
153		if (error)
154			goto fail;
155	}
156
157	if (end != vma->vm_end) {
158		error = split_vma(mm, vma, end, 0);
159		if (error)
160			goto fail;
161	}
162
163success:
164	/*
165	 * vm_flags and vm_page_prot are protected by the mmap_sem
166	 * held in write mode.
167	 */
168	vma->vm_flags = newflags;
169	vma->vm_page_prot = newprot;
170	change_protection(vma, start, end, newprot);
171	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
172	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
173	return 0;
174
175fail:
176	vm_unacct_memory(charged);
177	return error;
178}
179
180asmlinkage long
181sys_mprotect(unsigned long start, size_t len, unsigned long prot)
182{
183	unsigned long vm_flags, nstart, end, tmp, reqprot;
184	struct vm_area_struct *vma, *prev;
185	int error = -EINVAL;
186	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
187	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
188	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
189		return -EINVAL;
190
191	if (start & ~PAGE_MASK)
192		return -EINVAL;
193	if (!len)
194		return 0;
195	len = PAGE_ALIGN(len);
196	end = start + len;
197	if (end <= start)
198		return -ENOMEM;
199	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
200		return -EINVAL;
201
202	reqprot = prot;
203	/*
204	 * Does the application expect PROT_READ to imply PROT_EXEC:
205	 */
206	if (unlikely((prot & PROT_READ) &&
207			(current->personality & READ_IMPLIES_EXEC)))
208		prot |= PROT_EXEC;
209
210	vm_flags = calc_vm_prot_bits(prot);
211
212	down_write(&current->mm->mmap_sem);
213
214	vma = find_vma_prev(current->mm, start, &prev);
215	error = -ENOMEM;
216	if (!vma)
217		goto out;
218	if (unlikely(grows & PROT_GROWSDOWN)) {
219		if (vma->vm_start >= end)
220			goto out;
221		start = vma->vm_start;
222		error = -EINVAL;
223		if (!(vma->vm_flags & VM_GROWSDOWN))
224			goto out;
225	}
226	else {
227		if (vma->vm_start > start)
228			goto out;
229		if (unlikely(grows & PROT_GROWSUP)) {
230			end = vma->vm_end;
231			error = -EINVAL;
232			if (!(vma->vm_flags & VM_GROWSUP))
233				goto out;
234		}
235	}
236	if (start > vma->vm_start)
237		prev = vma;
238
239	for (nstart = start ; ; ) {
240		unsigned long newflags;
241
242		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
243
244		if (is_vm_hugetlb_page(vma)) {
245			error = -EACCES;
246			goto out;
247		}
248
249		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
250
251		/* newflags >> 4 shift VM_MAY% in place of VM_% */
252		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
253			error = -EACCES;
254			goto out;
255		}
256
257		error = security_file_mprotect(vma, reqprot, prot);
258		if (error)
259			goto out;
260
261		tmp = vma->vm_end;
262		if (tmp > end)
263			tmp = end;
264		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
265		if (error)
266			goto out;
267		nstart = tmp;
268
269		if (nstart < prev->vm_end)
270			nstart = prev->vm_end;
271		if (nstart >= end)
272			goto out;
273
274		vma = prev->vm_next;
275		if (!vma || vma->vm_start != nstart) {
276			error = -ENOMEM;
277			goto out;
278		}
279	}
280out:
281	up_write(&current->mm->mmap_sem);
282	return error;
283}
284