nommu.c revision 7561e8ca0dfaf6fca3feef982830de3b65300e5b
1/*
2 *  linux/mm/nommu.c
3 *
4 *  Replacement code for mm functions to support CPU's that don't
5 *  have any form of memory management unit (thus no virtual memory).
6 *
7 *  See Documentation/nommu-mmap.txt
8 *
9 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
10 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13 *  Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org>
14 */
15
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/swap.h>
20#include <linux/file.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/tracehook.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/mount.h>
29#include <linux/personality.h>
30#include <linux/security.h>
31#include <linux/syscalls.h>
32
33#include <asm/uaccess.h>
34#include <asm/tlb.h>
35#include <asm/tlbflush.h>
36#include <asm/mmu_context.h>
37#include "internal.h"
38
39static inline __attribute__((format(printf, 1, 2)))
40void no_printk(const char *fmt, ...)
41{
42}
43
44#if 0
45#define kenter(FMT, ...) \
46	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
47#define kleave(FMT, ...) \
48	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
49#define kdebug(FMT, ...) \
50	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
51#else
52#define kenter(FMT, ...) \
53	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
54#define kleave(FMT, ...) \
55	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
56#define kdebug(FMT, ...) \
57	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
58#endif
59
60void *high_memory;
61struct page *mem_map;
62unsigned long max_mapnr;
63unsigned long num_physpages;
64unsigned long highest_memmap_pfn;
65struct percpu_counter vm_committed_as;
66int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
67int sysctl_overcommit_ratio = 50; /* default is 50% */
68int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70int heap_stack_gap = 0;
71
72atomic_long_t mmap_pages_allocated;
73
74EXPORT_SYMBOL(mem_map);
75EXPORT_SYMBOL(num_physpages);
76
77/* list of mapped, potentially shareable regions */
78static struct kmem_cache *vm_region_jar;
79struct rb_root nommu_region_tree = RB_ROOT;
80DECLARE_RWSEM(nommu_region_sem);
81
82const struct vm_operations_struct generic_file_vm_ops = {
83};
84
85/*
86 * Return the total memory allocated for this pointer, not
87 * just what the caller asked for.
88 *
89 * Doesn't have to be accurate, i.e. may have races.
90 */
91unsigned int kobjsize(const void *objp)
92{
93	struct page *page;
94
95	/*
96	 * If the object we have should not have ksize performed on it,
97	 * return size of 0
98	 */
99	if (!objp || !virt_addr_valid(objp))
100		return 0;
101
102	page = virt_to_head_page(objp);
103
104	/*
105	 * If the allocator sets PageSlab, we know the pointer came from
106	 * kmalloc().
107	 */
108	if (PageSlab(page))
109		return ksize(objp);
110
111	/*
112	 * If it's not a compound page, see if we have a matching VMA
113	 * region. This test is intentionally done in reverse order,
114	 * so if there's no VMA, we still fall through and hand back
115	 * PAGE_SIZE for 0-order pages.
116	 */
117	if (!PageCompound(page)) {
118		struct vm_area_struct *vma;
119
120		vma = find_vma(current->mm, (unsigned long)objp);
121		if (vma)
122			return vma->vm_end - vma->vm_start;
123	}
124
125	/*
126	 * The ksize() function is only guaranteed to work for pointers
127	 * returned by kmalloc(). So handle arbitrary pointers here.
128	 */
129	return PAGE_SIZE << compound_order(page);
130}
131
132int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
133		     unsigned long start, int nr_pages, unsigned int foll_flags,
134		     struct page **pages, struct vm_area_struct **vmas)
135{
136	struct vm_area_struct *vma;
137	unsigned long vm_flags;
138	int i;
139
140	/* calculate required read or write permissions.
141	 * If FOLL_FORCE is set, we only require the "MAY" flags.
142	 */
143	vm_flags  = (foll_flags & FOLL_WRITE) ?
144			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
145	vm_flags &= (foll_flags & FOLL_FORCE) ?
146			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
147
148	for (i = 0; i < nr_pages; i++) {
149		vma = find_vma(mm, start);
150		if (!vma)
151			goto finish_or_fault;
152
153		/* protect what we can, including chardevs */
154		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
155		    !(vm_flags & vma->vm_flags))
156			goto finish_or_fault;
157
158		if (pages) {
159			pages[i] = virt_to_page(start);
160			if (pages[i])
161				page_cache_get(pages[i]);
162		}
163		if (vmas)
164			vmas[i] = vma;
165		start += PAGE_SIZE;
166	}
167
168	return i;
169
170finish_or_fault:
171	return i ? : -EFAULT;
172}
173
174/*
175 * get a list of pages in an address range belonging to the specified process
176 * and indicate the VMA that covers each page
177 * - this is potentially dodgy as we may end incrementing the page count of a
178 *   slab page or a secondary page from a compound page
179 * - don't permit access to VMAs that don't support it, such as I/O mappings
180 */
181int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
182	unsigned long start, int nr_pages, int write, int force,
183	struct page **pages, struct vm_area_struct **vmas)
184{
185	int flags = 0;
186
187	if (write)
188		flags |= FOLL_WRITE;
189	if (force)
190		flags |= FOLL_FORCE;
191
192	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
193}
194EXPORT_SYMBOL(get_user_pages);
195
196/**
197 * follow_pfn - look up PFN at a user virtual address
198 * @vma: memory mapping
199 * @address: user virtual address
200 * @pfn: location to store found PFN
201 *
202 * Only IO mappings and raw PFN mappings are allowed.
203 *
204 * Returns zero and the pfn at @pfn on success, -ve otherwise.
205 */
206int follow_pfn(struct vm_area_struct *vma, unsigned long address,
207	unsigned long *pfn)
208{
209	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
210		return -EINVAL;
211
212	*pfn = address >> PAGE_SHIFT;
213	return 0;
214}
215EXPORT_SYMBOL(follow_pfn);
216
217DEFINE_RWLOCK(vmlist_lock);
218struct vm_struct *vmlist;
219
220void vfree(const void *addr)
221{
222	kfree(addr);
223}
224EXPORT_SYMBOL(vfree);
225
226void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
227{
228	/*
229	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
230	 * returns only a logical address.
231	 */
232	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
233}
234EXPORT_SYMBOL(__vmalloc);
235
236void *vmalloc_user(unsigned long size)
237{
238	void *ret;
239
240	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
241			PAGE_KERNEL);
242	if (ret) {
243		struct vm_area_struct *vma;
244
245		down_write(&current->mm->mmap_sem);
246		vma = find_vma(current->mm, (unsigned long)ret);
247		if (vma)
248			vma->vm_flags |= VM_USERMAP;
249		up_write(&current->mm->mmap_sem);
250	}
251
252	return ret;
253}
254EXPORT_SYMBOL(vmalloc_user);
255
256struct page *vmalloc_to_page(const void *addr)
257{
258	return virt_to_page(addr);
259}
260EXPORT_SYMBOL(vmalloc_to_page);
261
262unsigned long vmalloc_to_pfn(const void *addr)
263{
264	return page_to_pfn(virt_to_page(addr));
265}
266EXPORT_SYMBOL(vmalloc_to_pfn);
267
268long vread(char *buf, char *addr, unsigned long count)
269{
270	memcpy(buf, addr, count);
271	return count;
272}
273
274long vwrite(char *buf, char *addr, unsigned long count)
275{
276	/* Don't allow overflow */
277	if ((unsigned long) addr + count < count)
278		count = -(unsigned long) addr;
279
280	memcpy(addr, buf, count);
281	return(count);
282}
283
284/*
285 *	vmalloc  -  allocate virtually continguos memory
286 *
287 *	@size:		allocation size
288 *
289 *	Allocate enough pages to cover @size from the page level
290 *	allocator and map them into continguos kernel virtual space.
291 *
292 *	For tight control over page level allocator and protection flags
293 *	use __vmalloc() instead.
294 */
295void *vmalloc(unsigned long size)
296{
297       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
298}
299EXPORT_SYMBOL(vmalloc);
300
301void *vmalloc_node(unsigned long size, int node)
302{
303	return vmalloc(size);
304}
305EXPORT_SYMBOL(vmalloc_node);
306
307#ifndef PAGE_KERNEL_EXEC
308# define PAGE_KERNEL_EXEC PAGE_KERNEL
309#endif
310
311/**
312 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
313 *	@size:		allocation size
314 *
315 *	Kernel-internal function to allocate enough pages to cover @size
316 *	the page level allocator and map them into contiguous and
317 *	executable kernel virtual space.
318 *
319 *	For tight control over page level allocator and protection flags
320 *	use __vmalloc() instead.
321 */
322
323void *vmalloc_exec(unsigned long size)
324{
325	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
326}
327
328/**
329 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
330 *	@size:		allocation size
331 *
332 *	Allocate enough 32bit PA addressable pages to cover @size from the
333 *	page level allocator and map them into continguos kernel virtual space.
334 */
335void *vmalloc_32(unsigned long size)
336{
337	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
338}
339EXPORT_SYMBOL(vmalloc_32);
340
341/**
342 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
343 *	@size:		allocation size
344 *
345 * The resulting memory area is 32bit addressable and zeroed so it can be
346 * mapped to userspace without leaking data.
347 *
348 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
349 * remap_vmalloc_range() are permissible.
350 */
351void *vmalloc_32_user(unsigned long size)
352{
353	/*
354	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
355	 * but for now this can simply use vmalloc_user() directly.
356	 */
357	return vmalloc_user(size);
358}
359EXPORT_SYMBOL(vmalloc_32_user);
360
361void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
362{
363	BUG();
364	return NULL;
365}
366EXPORT_SYMBOL(vmap);
367
368void vunmap(const void *addr)
369{
370	BUG();
371}
372EXPORT_SYMBOL(vunmap);
373
374void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
375{
376	BUG();
377	return NULL;
378}
379EXPORT_SYMBOL(vm_map_ram);
380
381void vm_unmap_ram(const void *mem, unsigned int count)
382{
383	BUG();
384}
385EXPORT_SYMBOL(vm_unmap_ram);
386
387void vm_unmap_aliases(void)
388{
389}
390EXPORT_SYMBOL_GPL(vm_unmap_aliases);
391
392/*
393 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
394 * have one.
395 */
396void  __attribute__((weak)) vmalloc_sync_all(void)
397{
398}
399
400int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
401		   struct page *page)
402{
403	return -EINVAL;
404}
405EXPORT_SYMBOL(vm_insert_page);
406
407/*
408 *  sys_brk() for the most part doesn't need the global kernel
409 *  lock, except when an application is doing something nasty
410 *  like trying to un-brk an area that has already been mapped
411 *  to a regular file.  in this case, the unmapping will need
412 *  to invoke file system routines that need the global lock.
413 */
414SYSCALL_DEFINE1(brk, unsigned long, brk)
415{
416	struct mm_struct *mm = current->mm;
417
418	if (brk < mm->start_brk || brk > mm->context.end_brk)
419		return mm->brk;
420
421	if (mm->brk == brk)
422		return mm->brk;
423
424	/*
425	 * Always allow shrinking brk
426	 */
427	if (brk <= mm->brk) {
428		mm->brk = brk;
429		return brk;
430	}
431
432	/*
433	 * Ok, looks good - let it rip.
434	 */
435	flush_icache_range(mm->brk, brk);
436	return mm->brk = brk;
437}
438
439/*
440 * initialise the VMA and region record slabs
441 */
442void __init mmap_init(void)
443{
444	int ret;
445
446	ret = percpu_counter_init(&vm_committed_as, 0);
447	VM_BUG_ON(ret);
448	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
449}
450
451/*
452 * validate the region tree
453 * - the caller must hold the region lock
454 */
455#ifdef CONFIG_DEBUG_NOMMU_REGIONS
456static noinline void validate_nommu_regions(void)
457{
458	struct vm_region *region, *last;
459	struct rb_node *p, *lastp;
460
461	lastp = rb_first(&nommu_region_tree);
462	if (!lastp)
463		return;
464
465	last = rb_entry(lastp, struct vm_region, vm_rb);
466	BUG_ON(unlikely(last->vm_end <= last->vm_start));
467	BUG_ON(unlikely(last->vm_top < last->vm_end));
468
469	while ((p = rb_next(lastp))) {
470		region = rb_entry(p, struct vm_region, vm_rb);
471		last = rb_entry(lastp, struct vm_region, vm_rb);
472
473		BUG_ON(unlikely(region->vm_end <= region->vm_start));
474		BUG_ON(unlikely(region->vm_top < region->vm_end));
475		BUG_ON(unlikely(region->vm_start < last->vm_top));
476
477		lastp = p;
478	}
479}
480#else
481static void validate_nommu_regions(void)
482{
483}
484#endif
485
486/*
487 * add a region into the global tree
488 */
489static void add_nommu_region(struct vm_region *region)
490{
491	struct vm_region *pregion;
492	struct rb_node **p, *parent;
493
494	validate_nommu_regions();
495
496	parent = NULL;
497	p = &nommu_region_tree.rb_node;
498	while (*p) {
499		parent = *p;
500		pregion = rb_entry(parent, struct vm_region, vm_rb);
501		if (region->vm_start < pregion->vm_start)
502			p = &(*p)->rb_left;
503		else if (region->vm_start > pregion->vm_start)
504			p = &(*p)->rb_right;
505		else if (pregion == region)
506			return;
507		else
508			BUG();
509	}
510
511	rb_link_node(&region->vm_rb, parent, p);
512	rb_insert_color(&region->vm_rb, &nommu_region_tree);
513
514	validate_nommu_regions();
515}
516
517/*
518 * delete a region from the global tree
519 */
520static void delete_nommu_region(struct vm_region *region)
521{
522	BUG_ON(!nommu_region_tree.rb_node);
523
524	validate_nommu_regions();
525	rb_erase(&region->vm_rb, &nommu_region_tree);
526	validate_nommu_regions();
527}
528
529/*
530 * free a contiguous series of pages
531 */
532static void free_page_series(unsigned long from, unsigned long to)
533{
534	for (; from < to; from += PAGE_SIZE) {
535		struct page *page = virt_to_page(from);
536
537		kdebug("- free %lx", from);
538		atomic_long_dec(&mmap_pages_allocated);
539		if (page_count(page) != 1)
540			kdebug("free page %p: refcount not one: %d",
541			       page, page_count(page));
542		put_page(page);
543	}
544}
545
546/*
547 * release a reference to a region
548 * - the caller must hold the region semaphore for writing, which this releases
549 * - the region may not have been added to the tree yet, in which case vm_top
550 *   will equal vm_start
551 */
552static void __put_nommu_region(struct vm_region *region)
553	__releases(nommu_region_sem)
554{
555	kenter("%p{%d}", region, region->vm_usage);
556
557	BUG_ON(!nommu_region_tree.rb_node);
558
559	if (--region->vm_usage == 0) {
560		if (region->vm_top > region->vm_start)
561			delete_nommu_region(region);
562		up_write(&nommu_region_sem);
563
564		if (region->vm_file)
565			fput(region->vm_file);
566
567		/* IO memory and memory shared directly out of the pagecache
568		 * from ramfs/tmpfs mustn't be released here */
569		if (region->vm_flags & VM_MAPPED_COPY) {
570			kdebug("free series");
571			free_page_series(region->vm_start, region->vm_top);
572		}
573		kmem_cache_free(vm_region_jar, region);
574	} else {
575		up_write(&nommu_region_sem);
576	}
577}
578
579/*
580 * release a reference to a region
581 */
582static void put_nommu_region(struct vm_region *region)
583{
584	down_write(&nommu_region_sem);
585	__put_nommu_region(region);
586}
587
588/*
589 * update protection on a vma
590 */
591static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
592{
593#ifdef CONFIG_MPU
594	struct mm_struct *mm = vma->vm_mm;
595	long start = vma->vm_start & PAGE_MASK;
596	while (start < vma->vm_end) {
597		protect_page(mm, start, flags);
598		start += PAGE_SIZE;
599	}
600	update_protections(mm);
601#endif
602}
603
604/*
605 * add a VMA into a process's mm_struct in the appropriate place in the list
606 * and tree and add to the address space's page tree also if not an anonymous
607 * page
608 * - should be called with mm->mmap_sem held writelocked
609 */
610static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
611{
612	struct vm_area_struct *pvma, **pp;
613	struct address_space *mapping;
614	struct rb_node **p, *parent;
615
616	kenter(",%p", vma);
617
618	BUG_ON(!vma->vm_region);
619
620	mm->map_count++;
621	vma->vm_mm = mm;
622
623	protect_vma(vma, vma->vm_flags);
624
625	/* add the VMA to the mapping */
626	if (vma->vm_file) {
627		mapping = vma->vm_file->f_mapping;
628
629		flush_dcache_mmap_lock(mapping);
630		vma_prio_tree_insert(vma, &mapping->i_mmap);
631		flush_dcache_mmap_unlock(mapping);
632	}
633
634	/* add the VMA to the tree */
635	parent = NULL;
636	p = &mm->mm_rb.rb_node;
637	while (*p) {
638		parent = *p;
639		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
640
641		/* sort by: start addr, end addr, VMA struct addr in that order
642		 * (the latter is necessary as we may get identical VMAs) */
643		if (vma->vm_start < pvma->vm_start)
644			p = &(*p)->rb_left;
645		else if (vma->vm_start > pvma->vm_start)
646			p = &(*p)->rb_right;
647		else if (vma->vm_end < pvma->vm_end)
648			p = &(*p)->rb_left;
649		else if (vma->vm_end > pvma->vm_end)
650			p = &(*p)->rb_right;
651		else if (vma < pvma)
652			p = &(*p)->rb_left;
653		else if (vma > pvma)
654			p = &(*p)->rb_right;
655		else
656			BUG();
657	}
658
659	rb_link_node(&vma->vm_rb, parent, p);
660	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
661
662	/* add VMA to the VMA list also */
663	for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
664		if (pvma->vm_start > vma->vm_start)
665			break;
666		if (pvma->vm_start < vma->vm_start)
667			continue;
668		if (pvma->vm_end < vma->vm_end)
669			break;
670	}
671
672	vma->vm_next = *pp;
673	*pp = vma;
674}
675
676/*
677 * delete a VMA from its owning mm_struct and address space
678 */
679static void delete_vma_from_mm(struct vm_area_struct *vma)
680{
681	struct vm_area_struct **pp;
682	struct address_space *mapping;
683	struct mm_struct *mm = vma->vm_mm;
684
685	kenter("%p", vma);
686
687	protect_vma(vma, 0);
688
689	mm->map_count--;
690	if (mm->mmap_cache == vma)
691		mm->mmap_cache = NULL;
692
693	/* remove the VMA from the mapping */
694	if (vma->vm_file) {
695		mapping = vma->vm_file->f_mapping;
696
697		flush_dcache_mmap_lock(mapping);
698		vma_prio_tree_remove(vma, &mapping->i_mmap);
699		flush_dcache_mmap_unlock(mapping);
700	}
701
702	/* remove from the MM's tree and list */
703	rb_erase(&vma->vm_rb, &mm->mm_rb);
704	for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
705		if (*pp == vma) {
706			*pp = vma->vm_next;
707			break;
708		}
709	}
710
711	vma->vm_mm = NULL;
712}
713
714/*
715 * destroy a VMA record
716 */
717static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
718{
719	kenter("%p", vma);
720	if (vma->vm_ops && vma->vm_ops->close)
721		vma->vm_ops->close(vma);
722	if (vma->vm_file) {
723		fput(vma->vm_file);
724		if (vma->vm_flags & VM_EXECUTABLE)
725			removed_exe_file_vma(mm);
726	}
727	put_nommu_region(vma->vm_region);
728	kmem_cache_free(vm_area_cachep, vma);
729}
730
731/*
732 * look up the first VMA in which addr resides, NULL if none
733 * - should be called with mm->mmap_sem at least held readlocked
734 */
735struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
736{
737	struct vm_area_struct *vma;
738	struct rb_node *n = mm->mm_rb.rb_node;
739
740	/* check the cache first */
741	vma = mm->mmap_cache;
742	if (vma && vma->vm_start <= addr && vma->vm_end > addr)
743		return vma;
744
745	/* trawl the tree (there may be multiple mappings in which addr
746	 * resides) */
747	for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
748		vma = rb_entry(n, struct vm_area_struct, vm_rb);
749		if (vma->vm_start > addr)
750			return NULL;
751		if (vma->vm_end > addr) {
752			mm->mmap_cache = vma;
753			return vma;
754		}
755	}
756
757	return NULL;
758}
759EXPORT_SYMBOL(find_vma);
760
761/*
762 * find a VMA
763 * - we don't extend stack VMAs under NOMMU conditions
764 */
765struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
766{
767	return find_vma(mm, addr);
768}
769
770/*
771 * expand a stack to a given address
772 * - not supported under NOMMU conditions
773 */
774int expand_stack(struct vm_area_struct *vma, unsigned long address)
775{
776	return -ENOMEM;
777}
778
779/*
780 * look up the first VMA exactly that exactly matches addr
781 * - should be called with mm->mmap_sem at least held readlocked
782 */
783static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
784					     unsigned long addr,
785					     unsigned long len)
786{
787	struct vm_area_struct *vma;
788	struct rb_node *n = mm->mm_rb.rb_node;
789	unsigned long end = addr + len;
790
791	/* check the cache first */
792	vma = mm->mmap_cache;
793	if (vma && vma->vm_start == addr && vma->vm_end == end)
794		return vma;
795
796	/* trawl the tree (there may be multiple mappings in which addr
797	 * resides) */
798	for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
799		vma = rb_entry(n, struct vm_area_struct, vm_rb);
800		if (vma->vm_start < addr)
801			continue;
802		if (vma->vm_start > addr)
803			return NULL;
804		if (vma->vm_end == end) {
805			mm->mmap_cache = vma;
806			return vma;
807		}
808	}
809
810	return NULL;
811}
812
813/*
814 * determine whether a mapping should be permitted and, if so, what sort of
815 * mapping we're capable of supporting
816 */
817static int validate_mmap_request(struct file *file,
818				 unsigned long addr,
819				 unsigned long len,
820				 unsigned long prot,
821				 unsigned long flags,
822				 unsigned long pgoff,
823				 unsigned long *_capabilities)
824{
825	unsigned long capabilities, rlen;
826	unsigned long reqprot = prot;
827	int ret;
828
829	/* do the simple checks first */
830	if (flags & MAP_FIXED) {
831		printk(KERN_DEBUG
832		       "%d: Can't do fixed-address/overlay mmap of RAM\n",
833		       current->pid);
834		return -EINVAL;
835	}
836
837	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
838	    (flags & MAP_TYPE) != MAP_SHARED)
839		return -EINVAL;
840
841	if (!len)
842		return -EINVAL;
843
844	/* Careful about overflows.. */
845	rlen = PAGE_ALIGN(len);
846	if (!rlen || rlen > TASK_SIZE)
847		return -ENOMEM;
848
849	/* offset overflow? */
850	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
851		return -EOVERFLOW;
852
853	if (file) {
854		/* validate file mapping requests */
855		struct address_space *mapping;
856
857		/* files must support mmap */
858		if (!file->f_op || !file->f_op->mmap)
859			return -ENODEV;
860
861		/* work out if what we've got could possibly be shared
862		 * - we support chardevs that provide their own "memory"
863		 * - we support files/blockdevs that are memory backed
864		 */
865		mapping = file->f_mapping;
866		if (!mapping)
867			mapping = file->f_path.dentry->d_inode->i_mapping;
868
869		capabilities = 0;
870		if (mapping && mapping->backing_dev_info)
871			capabilities = mapping->backing_dev_info->capabilities;
872
873		if (!capabilities) {
874			/* no explicit capabilities set, so assume some
875			 * defaults */
876			switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
877			case S_IFREG:
878			case S_IFBLK:
879				capabilities = BDI_CAP_MAP_COPY;
880				break;
881
882			case S_IFCHR:
883				capabilities =
884					BDI_CAP_MAP_DIRECT |
885					BDI_CAP_READ_MAP |
886					BDI_CAP_WRITE_MAP;
887				break;
888
889			default:
890				return -EINVAL;
891			}
892		}
893
894		/* eliminate any capabilities that we can't support on this
895		 * device */
896		if (!file->f_op->get_unmapped_area)
897			capabilities &= ~BDI_CAP_MAP_DIRECT;
898		if (!file->f_op->read)
899			capabilities &= ~BDI_CAP_MAP_COPY;
900
901		/* The file shall have been opened with read permission. */
902		if (!(file->f_mode & FMODE_READ))
903			return -EACCES;
904
905		if (flags & MAP_SHARED) {
906			/* do checks for writing, appending and locking */
907			if ((prot & PROT_WRITE) &&
908			    !(file->f_mode & FMODE_WRITE))
909				return -EACCES;
910
911			if (IS_APPEND(file->f_path.dentry->d_inode) &&
912			    (file->f_mode & FMODE_WRITE))
913				return -EACCES;
914
915			if (locks_verify_locked(file->f_path.dentry->d_inode))
916				return -EAGAIN;
917
918			if (!(capabilities & BDI_CAP_MAP_DIRECT))
919				return -ENODEV;
920
921			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
922			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
923			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
924			    ) {
925				printk("MAP_SHARED not completely supported on !MMU\n");
926				return -EINVAL;
927			}
928
929			/* we mustn't privatise shared mappings */
930			capabilities &= ~BDI_CAP_MAP_COPY;
931		}
932		else {
933			/* we're going to read the file into private memory we
934			 * allocate */
935			if (!(capabilities & BDI_CAP_MAP_COPY))
936				return -ENODEV;
937
938			/* we don't permit a private writable mapping to be
939			 * shared with the backing device */
940			if (prot & PROT_WRITE)
941				capabilities &= ~BDI_CAP_MAP_DIRECT;
942		}
943
944		/* handle executable mappings and implied executable
945		 * mappings */
946		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
947			if (prot & PROT_EXEC)
948				return -EPERM;
949		}
950		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
951			/* handle implication of PROT_EXEC by PROT_READ */
952			if (current->personality & READ_IMPLIES_EXEC) {
953				if (capabilities & BDI_CAP_EXEC_MAP)
954					prot |= PROT_EXEC;
955			}
956		}
957		else if ((prot & PROT_READ) &&
958			 (prot & PROT_EXEC) &&
959			 !(capabilities & BDI_CAP_EXEC_MAP)
960			 ) {
961			/* backing file is not executable, try to copy */
962			capabilities &= ~BDI_CAP_MAP_DIRECT;
963		}
964	}
965	else {
966		/* anonymous mappings are always memory backed and can be
967		 * privately mapped
968		 */
969		capabilities = BDI_CAP_MAP_COPY;
970
971		/* handle PROT_EXEC implication by PROT_READ */
972		if ((prot & PROT_READ) &&
973		    (current->personality & READ_IMPLIES_EXEC))
974			prot |= PROT_EXEC;
975	}
976
977	/* allow the security API to have its say */
978	ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
979	if (ret < 0)
980		return ret;
981
982	/* looks okay */
983	*_capabilities = capabilities;
984	return 0;
985}
986
987/*
988 * we've determined that we can make the mapping, now translate what we
989 * now know into VMA flags
990 */
991static unsigned long determine_vm_flags(struct file *file,
992					unsigned long prot,
993					unsigned long flags,
994					unsigned long capabilities)
995{
996	unsigned long vm_flags;
997
998	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
999	vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1000	/* vm_flags |= mm->def_flags; */
1001
1002	if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1003		/* attempt to share read-only copies of mapped file chunks */
1004		if (file && !(prot & PROT_WRITE))
1005			vm_flags |= VM_MAYSHARE;
1006	}
1007	else {
1008		/* overlay a shareable mapping on the backing device or inode
1009		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1010		 * romfs/cramfs */
1011		if (flags & MAP_SHARED)
1012			vm_flags |= VM_MAYSHARE | VM_SHARED;
1013		else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
1014			vm_flags |= VM_MAYSHARE;
1015	}
1016
1017	/* refuse to let anyone share private mappings with this process if
1018	 * it's being traced - otherwise breakpoints set in it may interfere
1019	 * with another untraced process
1020	 */
1021	if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
1022		vm_flags &= ~VM_MAYSHARE;
1023
1024	return vm_flags;
1025}
1026
1027/*
1028 * set up a shared mapping on a file (the driver or filesystem provides and
1029 * pins the storage)
1030 */
1031static int do_mmap_shared_file(struct vm_area_struct *vma)
1032{
1033	int ret;
1034
1035	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1036	if (ret == 0) {
1037		vma->vm_region->vm_top = vma->vm_region->vm_end;
1038		return 0;
1039	}
1040	if (ret != -ENOSYS)
1041		return ret;
1042
1043	/* getting -ENOSYS indicates that direct mmap isn't possible (as
1044	 * opposed to tried but failed) so we can only give a suitable error as
1045	 * it's not possible to make a private copy if MAP_SHARED was given */
1046	return -ENODEV;
1047}
1048
1049/*
1050 * set up a private mapping or an anonymous shared mapping
1051 */
1052static int do_mmap_private(struct vm_area_struct *vma,
1053			   struct vm_region *region,
1054			   unsigned long len,
1055			   unsigned long capabilities)
1056{
1057	struct page *pages;
1058	unsigned long total, point, n, rlen;
1059	void *base;
1060	int ret, order;
1061
1062	/* invoke the file's mapping function so that it can keep track of
1063	 * shared mappings on devices or memory
1064	 * - VM_MAYSHARE will be set if it may attempt to share
1065	 */
1066	if (capabilities & BDI_CAP_MAP_DIRECT) {
1067		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1068		if (ret == 0) {
1069			/* shouldn't return success if we're not sharing */
1070			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1071			vma->vm_region->vm_top = vma->vm_region->vm_end;
1072			return 0;
1073		}
1074		if (ret != -ENOSYS)
1075			return ret;
1076
1077		/* getting an ENOSYS error indicates that direct mmap isn't
1078		 * possible (as opposed to tried but failed) so we'll try to
1079		 * make a private copy of the data and map that instead */
1080	}
1081
1082	rlen = PAGE_ALIGN(len);
1083
1084	/* allocate some memory to hold the mapping
1085	 * - note that this may not return a page-aligned address if the object
1086	 *   we're allocating is smaller than a page
1087	 */
1088	order = get_order(rlen);
1089	kdebug("alloc order %d for %lx", order, len);
1090
1091	pages = alloc_pages(GFP_KERNEL, order);
1092	if (!pages)
1093		goto enomem;
1094
1095	total = 1 << order;
1096	atomic_long_add(total, &mmap_pages_allocated);
1097
1098	point = rlen >> PAGE_SHIFT;
1099
1100	/* we allocated a power-of-2 sized page set, so we may want to trim off
1101	 * the excess */
1102	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1103		while (total > point) {
1104			order = ilog2(total - point);
1105			n = 1 << order;
1106			kdebug("shave %lu/%lu @%lu", n, total - point, total);
1107			atomic_long_sub(n, &mmap_pages_allocated);
1108			total -= n;
1109			set_page_refcounted(pages + total);
1110			__free_pages(pages + total, order);
1111		}
1112	}
1113
1114	for (point = 1; point < total; point++)
1115		set_page_refcounted(&pages[point]);
1116
1117	base = page_address(pages);
1118	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1119	region->vm_start = (unsigned long) base;
1120	region->vm_end   = region->vm_start + rlen;
1121	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1122
1123	vma->vm_start = region->vm_start;
1124	vma->vm_end   = region->vm_start + len;
1125
1126	if (vma->vm_file) {
1127		/* read the contents of a file into the copy */
1128		mm_segment_t old_fs;
1129		loff_t fpos;
1130
1131		fpos = vma->vm_pgoff;
1132		fpos <<= PAGE_SHIFT;
1133
1134		old_fs = get_fs();
1135		set_fs(KERNEL_DS);
1136		ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
1137		set_fs(old_fs);
1138
1139		if (ret < 0)
1140			goto error_free;
1141
1142		/* clear the last little bit */
1143		if (ret < rlen)
1144			memset(base + ret, 0, rlen - ret);
1145
1146	}
1147
1148	return 0;
1149
1150error_free:
1151	free_page_series(region->vm_start, region->vm_end);
1152	region->vm_start = vma->vm_start = 0;
1153	region->vm_end   = vma->vm_end = 0;
1154	region->vm_top   = 0;
1155	return ret;
1156
1157enomem:
1158	printk("Allocation of length %lu from process %d (%s) failed\n",
1159	       len, current->pid, current->comm);
1160	show_free_areas();
1161	return -ENOMEM;
1162}
1163
1164/*
1165 * handle mapping creation for uClinux
1166 */
1167unsigned long do_mmap_pgoff(struct file *file,
1168			    unsigned long addr,
1169			    unsigned long len,
1170			    unsigned long prot,
1171			    unsigned long flags,
1172			    unsigned long pgoff)
1173{
1174	struct vm_area_struct *vma;
1175	struct vm_region *region;
1176	struct rb_node *rb;
1177	unsigned long capabilities, vm_flags, result;
1178	int ret;
1179
1180	kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1181
1182	/* decide whether we should attempt the mapping, and if so what sort of
1183	 * mapping */
1184	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1185				    &capabilities);
1186	if (ret < 0) {
1187		kleave(" = %d [val]", ret);
1188		return ret;
1189	}
1190
1191	/* we ignore the address hint */
1192	addr = 0;
1193
1194	/* we've determined that we can make the mapping, now translate what we
1195	 * now know into VMA flags */
1196	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1197
1198	/* we're going to need to record the mapping */
1199	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1200	if (!region)
1201		goto error_getting_region;
1202
1203	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1204	if (!vma)
1205		goto error_getting_vma;
1206
1207	region->vm_usage = 1;
1208	region->vm_flags = vm_flags;
1209	region->vm_pgoff = pgoff;
1210
1211	INIT_LIST_HEAD(&vma->anon_vma_chain);
1212	vma->vm_flags = vm_flags;
1213	vma->vm_pgoff = pgoff;
1214
1215	if (file) {
1216		region->vm_file = file;
1217		get_file(file);
1218		vma->vm_file = file;
1219		get_file(file);
1220		if (vm_flags & VM_EXECUTABLE) {
1221			added_exe_file_vma(current->mm);
1222			vma->vm_mm = current->mm;
1223		}
1224	}
1225
1226	down_write(&nommu_region_sem);
1227
1228	/* if we want to share, we need to check for regions created by other
1229	 * mmap() calls that overlap with our proposed mapping
1230	 * - we can only share with a superset match on most regular files
1231	 * - shared mappings on character devices and memory backed files are
1232	 *   permitted to overlap inexactly as far as we are concerned for in
1233	 *   these cases, sharing is handled in the driver or filesystem rather
1234	 *   than here
1235	 */
1236	if (vm_flags & VM_MAYSHARE) {
1237		struct vm_region *pregion;
1238		unsigned long pglen, rpglen, pgend, rpgend, start;
1239
1240		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1241		pgend = pgoff + pglen;
1242
1243		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1244			pregion = rb_entry(rb, struct vm_region, vm_rb);
1245
1246			if (!(pregion->vm_flags & VM_MAYSHARE))
1247				continue;
1248
1249			/* search for overlapping mappings on the same file */
1250			if (pregion->vm_file->f_path.dentry->d_inode !=
1251			    file->f_path.dentry->d_inode)
1252				continue;
1253
1254			if (pregion->vm_pgoff >= pgend)
1255				continue;
1256
1257			rpglen = pregion->vm_end - pregion->vm_start;
1258			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1259			rpgend = pregion->vm_pgoff + rpglen;
1260			if (pgoff >= rpgend)
1261				continue;
1262
1263			/* handle inexactly overlapping matches between
1264			 * mappings */
1265			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1266			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1267				/* new mapping is not a subset of the region */
1268				if (!(capabilities & BDI_CAP_MAP_DIRECT))
1269					goto sharing_violation;
1270				continue;
1271			}
1272
1273			/* we've found a region we can share */
1274			pregion->vm_usage++;
1275			vma->vm_region = pregion;
1276			start = pregion->vm_start;
1277			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1278			vma->vm_start = start;
1279			vma->vm_end = start + len;
1280
1281			if (pregion->vm_flags & VM_MAPPED_COPY) {
1282				kdebug("share copy");
1283				vma->vm_flags |= VM_MAPPED_COPY;
1284			} else {
1285				kdebug("share mmap");
1286				ret = do_mmap_shared_file(vma);
1287				if (ret < 0) {
1288					vma->vm_region = NULL;
1289					vma->vm_start = 0;
1290					vma->vm_end = 0;
1291					pregion->vm_usage--;
1292					pregion = NULL;
1293					goto error_just_free;
1294				}
1295			}
1296			fput(region->vm_file);
1297			kmem_cache_free(vm_region_jar, region);
1298			region = pregion;
1299			result = start;
1300			goto share;
1301		}
1302
1303		/* obtain the address at which to make a shared mapping
1304		 * - this is the hook for quasi-memory character devices to
1305		 *   tell us the location of a shared mapping
1306		 */
1307		if (capabilities & BDI_CAP_MAP_DIRECT) {
1308			addr = file->f_op->get_unmapped_area(file, addr, len,
1309							     pgoff, flags);
1310			if (IS_ERR((void *) addr)) {
1311				ret = addr;
1312				if (ret != (unsigned long) -ENOSYS)
1313					goto error_just_free;
1314
1315				/* the driver refused to tell us where to site
1316				 * the mapping so we'll have to attempt to copy
1317				 * it */
1318				ret = (unsigned long) -ENODEV;
1319				if (!(capabilities & BDI_CAP_MAP_COPY))
1320					goto error_just_free;
1321
1322				capabilities &= ~BDI_CAP_MAP_DIRECT;
1323			} else {
1324				vma->vm_start = region->vm_start = addr;
1325				vma->vm_end = region->vm_end = addr + len;
1326			}
1327		}
1328	}
1329
1330	vma->vm_region = region;
1331
1332	/* set up the mapping
1333	 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
1334	 */
1335	if (file && vma->vm_flags & VM_SHARED)
1336		ret = do_mmap_shared_file(vma);
1337	else
1338		ret = do_mmap_private(vma, region, len, capabilities);
1339	if (ret < 0)
1340		goto error_just_free;
1341	add_nommu_region(region);
1342
1343	/* clear anonymous mappings that don't ask for uninitialized data */
1344	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1345		memset((void *)region->vm_start, 0,
1346		       region->vm_end - region->vm_start);
1347
1348	/* okay... we have a mapping; now we have to register it */
1349	result = vma->vm_start;
1350
1351	current->mm->total_vm += len >> PAGE_SHIFT;
1352
1353share:
1354	add_vma_to_mm(current->mm, vma);
1355
1356	/* we flush the region from the icache only when the first executable
1357	 * mapping of it is made  */
1358	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1359		flush_icache_range(region->vm_start, region->vm_end);
1360		region->vm_icache_flushed = true;
1361	}
1362
1363	up_write(&nommu_region_sem);
1364
1365	kleave(" = %lx", result);
1366	return result;
1367
1368error_just_free:
1369	up_write(&nommu_region_sem);
1370error:
1371	if (region->vm_file)
1372		fput(region->vm_file);
1373	kmem_cache_free(vm_region_jar, region);
1374	if (vma->vm_file)
1375		fput(vma->vm_file);
1376	if (vma->vm_flags & VM_EXECUTABLE)
1377		removed_exe_file_vma(vma->vm_mm);
1378	kmem_cache_free(vm_area_cachep, vma);
1379	kleave(" = %d", ret);
1380	return ret;
1381
1382sharing_violation:
1383	up_write(&nommu_region_sem);
1384	printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1385	ret = -EINVAL;
1386	goto error;
1387
1388error_getting_vma:
1389	kmem_cache_free(vm_region_jar, region);
1390	printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1391	       " from process %d failed\n",
1392	       len, current->pid);
1393	show_free_areas();
1394	return -ENOMEM;
1395
1396error_getting_region:
1397	printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1398	       " from process %d failed\n",
1399	       len, current->pid);
1400	show_free_areas();
1401	return -ENOMEM;
1402}
1403EXPORT_SYMBOL(do_mmap_pgoff);
1404
1405SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1406		unsigned long, prot, unsigned long, flags,
1407		unsigned long, fd, unsigned long, pgoff)
1408{
1409	struct file *file = NULL;
1410	unsigned long retval = -EBADF;
1411
1412	if (!(flags & MAP_ANONYMOUS)) {
1413		file = fget(fd);
1414		if (!file)
1415			goto out;
1416	}
1417
1418	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1419
1420	down_write(&current->mm->mmap_sem);
1421	retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1422	up_write(&current->mm->mmap_sem);
1423
1424	if (file)
1425		fput(file);
1426out:
1427	return retval;
1428}
1429
1430#ifdef __ARCH_WANT_SYS_OLD_MMAP
1431struct mmap_arg_struct {
1432	unsigned long addr;
1433	unsigned long len;
1434	unsigned long prot;
1435	unsigned long flags;
1436	unsigned long fd;
1437	unsigned long offset;
1438};
1439
1440SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1441{
1442	struct mmap_arg_struct a;
1443
1444	if (copy_from_user(&a, arg, sizeof(a)))
1445		return -EFAULT;
1446	if (a.offset & ~PAGE_MASK)
1447		return -EINVAL;
1448
1449	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1450			      a.offset >> PAGE_SHIFT);
1451}
1452#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1453
1454/*
1455 * split a vma into two pieces at address 'addr', a new vma is allocated either
1456 * for the first part or the tail.
1457 */
1458int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1459	      unsigned long addr, int new_below)
1460{
1461	struct vm_area_struct *new;
1462	struct vm_region *region;
1463	unsigned long npages;
1464
1465	kenter("");
1466
1467	/* we're only permitted to split anonymous regions (these should have
1468	 * only a single usage on the region) */
1469	if (vma->vm_file)
1470		return -ENOMEM;
1471
1472	if (mm->map_count >= sysctl_max_map_count)
1473		return -ENOMEM;
1474
1475	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1476	if (!region)
1477		return -ENOMEM;
1478
1479	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1480	if (!new) {
1481		kmem_cache_free(vm_region_jar, region);
1482		return -ENOMEM;
1483	}
1484
1485	/* most fields are the same, copy all, and then fixup */
1486	*new = *vma;
1487	*region = *vma->vm_region;
1488	new->vm_region = region;
1489
1490	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1491
1492	if (new_below) {
1493		region->vm_top = region->vm_end = new->vm_end = addr;
1494	} else {
1495		region->vm_start = new->vm_start = addr;
1496		region->vm_pgoff = new->vm_pgoff += npages;
1497	}
1498
1499	if (new->vm_ops && new->vm_ops->open)
1500		new->vm_ops->open(new);
1501
1502	delete_vma_from_mm(vma);
1503	down_write(&nommu_region_sem);
1504	delete_nommu_region(vma->vm_region);
1505	if (new_below) {
1506		vma->vm_region->vm_start = vma->vm_start = addr;
1507		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1508	} else {
1509		vma->vm_region->vm_end = vma->vm_end = addr;
1510		vma->vm_region->vm_top = addr;
1511	}
1512	add_nommu_region(vma->vm_region);
1513	add_nommu_region(new->vm_region);
1514	up_write(&nommu_region_sem);
1515	add_vma_to_mm(mm, vma);
1516	add_vma_to_mm(mm, new);
1517	return 0;
1518}
1519
1520/*
1521 * shrink a VMA by removing the specified chunk from either the beginning or
1522 * the end
1523 */
1524static int shrink_vma(struct mm_struct *mm,
1525		      struct vm_area_struct *vma,
1526		      unsigned long from, unsigned long to)
1527{
1528	struct vm_region *region;
1529
1530	kenter("");
1531
1532	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1533	 * and list */
1534	delete_vma_from_mm(vma);
1535	if (from > vma->vm_start)
1536		vma->vm_end = from;
1537	else
1538		vma->vm_start = to;
1539	add_vma_to_mm(mm, vma);
1540
1541	/* cut the backing region down to size */
1542	region = vma->vm_region;
1543	BUG_ON(region->vm_usage != 1);
1544
1545	down_write(&nommu_region_sem);
1546	delete_nommu_region(region);
1547	if (from > region->vm_start) {
1548		to = region->vm_top;
1549		region->vm_top = region->vm_end = from;
1550	} else {
1551		region->vm_start = to;
1552	}
1553	add_nommu_region(region);
1554	up_write(&nommu_region_sem);
1555
1556	free_page_series(from, to);
1557	return 0;
1558}
1559
1560/*
1561 * release a mapping
1562 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1563 *   VMA, though it need not cover the whole VMA
1564 */
1565int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1566{
1567	struct vm_area_struct *vma;
1568	struct rb_node *rb;
1569	unsigned long end = start + len;
1570	int ret;
1571
1572	kenter(",%lx,%zx", start, len);
1573
1574	if (len == 0)
1575		return -EINVAL;
1576
1577	/* find the first potentially overlapping VMA */
1578	vma = find_vma(mm, start);
1579	if (!vma) {
1580		static int limit = 0;
1581		if (limit < 5) {
1582			printk(KERN_WARNING
1583			       "munmap of memory not mmapped by process %d"
1584			       " (%s): 0x%lx-0x%lx\n",
1585			       current->pid, current->comm,
1586			       start, start + len - 1);
1587			limit++;
1588		}
1589		return -EINVAL;
1590	}
1591
1592	/* we're allowed to split an anonymous VMA but not a file-backed one */
1593	if (vma->vm_file) {
1594		do {
1595			if (start > vma->vm_start) {
1596				kleave(" = -EINVAL [miss]");
1597				return -EINVAL;
1598			}
1599			if (end == vma->vm_end)
1600				goto erase_whole_vma;
1601			rb = rb_next(&vma->vm_rb);
1602			vma = rb_entry(rb, struct vm_area_struct, vm_rb);
1603		} while (rb);
1604		kleave(" = -EINVAL [split file]");
1605		return -EINVAL;
1606	} else {
1607		/* the chunk must be a subset of the VMA found */
1608		if (start == vma->vm_start && end == vma->vm_end)
1609			goto erase_whole_vma;
1610		if (start < vma->vm_start || end > vma->vm_end) {
1611			kleave(" = -EINVAL [superset]");
1612			return -EINVAL;
1613		}
1614		if (start & ~PAGE_MASK) {
1615			kleave(" = -EINVAL [unaligned start]");
1616			return -EINVAL;
1617		}
1618		if (end != vma->vm_end && end & ~PAGE_MASK) {
1619			kleave(" = -EINVAL [unaligned split]");
1620			return -EINVAL;
1621		}
1622		if (start != vma->vm_start && end != vma->vm_end) {
1623			ret = split_vma(mm, vma, start, 1);
1624			if (ret < 0) {
1625				kleave(" = %d [split]", ret);
1626				return ret;
1627			}
1628		}
1629		return shrink_vma(mm, vma, start, end);
1630	}
1631
1632erase_whole_vma:
1633	delete_vma_from_mm(vma);
1634	delete_vma(mm, vma);
1635	kleave(" = 0");
1636	return 0;
1637}
1638EXPORT_SYMBOL(do_munmap);
1639
1640SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1641{
1642	int ret;
1643	struct mm_struct *mm = current->mm;
1644
1645	down_write(&mm->mmap_sem);
1646	ret = do_munmap(mm, addr, len);
1647	up_write(&mm->mmap_sem);
1648	return ret;
1649}
1650
1651/*
1652 * release all the mappings made in a process's VM space
1653 */
1654void exit_mmap(struct mm_struct *mm)
1655{
1656	struct vm_area_struct *vma;
1657
1658	if (!mm)
1659		return;
1660
1661	kenter("");
1662
1663	mm->total_vm = 0;
1664
1665	while ((vma = mm->mmap)) {
1666		mm->mmap = vma->vm_next;
1667		delete_vma_from_mm(vma);
1668		delete_vma(mm, vma);
1669	}
1670
1671	kleave("");
1672}
1673
1674unsigned long do_brk(unsigned long addr, unsigned long len)
1675{
1676	return -ENOMEM;
1677}
1678
1679/*
1680 * expand (or shrink) an existing mapping, potentially moving it at the same
1681 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1682 *
1683 * under NOMMU conditions, we only permit changing a mapping's size, and only
1684 * as long as it stays within the region allocated by do_mmap_private() and the
1685 * block is not shareable
1686 *
1687 * MREMAP_FIXED is not supported under NOMMU conditions
1688 */
1689unsigned long do_mremap(unsigned long addr,
1690			unsigned long old_len, unsigned long new_len,
1691			unsigned long flags, unsigned long new_addr)
1692{
1693	struct vm_area_struct *vma;
1694
1695	/* insanity checks first */
1696	if (old_len == 0 || new_len == 0)
1697		return (unsigned long) -EINVAL;
1698
1699	if (addr & ~PAGE_MASK)
1700		return -EINVAL;
1701
1702	if (flags & MREMAP_FIXED && new_addr != addr)
1703		return (unsigned long) -EINVAL;
1704
1705	vma = find_vma_exact(current->mm, addr, old_len);
1706	if (!vma)
1707		return (unsigned long) -EINVAL;
1708
1709	if (vma->vm_end != vma->vm_start + old_len)
1710		return (unsigned long) -EFAULT;
1711
1712	if (vma->vm_flags & VM_MAYSHARE)
1713		return (unsigned long) -EPERM;
1714
1715	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1716		return (unsigned long) -ENOMEM;
1717
1718	/* all checks complete - do it */
1719	vma->vm_end = vma->vm_start + new_len;
1720	return vma->vm_start;
1721}
1722EXPORT_SYMBOL(do_mremap);
1723
1724SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1725		unsigned long, new_len, unsigned long, flags,
1726		unsigned long, new_addr)
1727{
1728	unsigned long ret;
1729
1730	down_write(&current->mm->mmap_sem);
1731	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1732	up_write(&current->mm->mmap_sem);
1733	return ret;
1734}
1735
1736struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1737			unsigned int foll_flags)
1738{
1739	return NULL;
1740}
1741
1742int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1743		unsigned long to, unsigned long size, pgprot_t prot)
1744{
1745	vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
1746	return 0;
1747}
1748EXPORT_SYMBOL(remap_pfn_range);
1749
1750int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1751			unsigned long pgoff)
1752{
1753	unsigned int size = vma->vm_end - vma->vm_start;
1754
1755	if (!(vma->vm_flags & VM_USERMAP))
1756		return -EINVAL;
1757
1758	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1759	vma->vm_end = vma->vm_start + size;
1760
1761	return 0;
1762}
1763EXPORT_SYMBOL(remap_vmalloc_range);
1764
1765void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1766{
1767}
1768
1769unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1770	unsigned long len, unsigned long pgoff, unsigned long flags)
1771{
1772	return -ENOMEM;
1773}
1774
1775void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1776{
1777}
1778
1779void unmap_mapping_range(struct address_space *mapping,
1780			 loff_t const holebegin, loff_t const holelen,
1781			 int even_cows)
1782{
1783}
1784EXPORT_SYMBOL(unmap_mapping_range);
1785
1786/*
1787 * Check that a process has enough memory to allocate a new virtual
1788 * mapping. 0 means there is enough memory for the allocation to
1789 * succeed and -ENOMEM implies there is not.
1790 *
1791 * We currently support three overcommit policies, which are set via the
1792 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1793 *
1794 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1795 * Additional code 2002 Jul 20 by Robert Love.
1796 *
1797 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1798 *
1799 * Note this is a helper function intended to be used by LSMs which
1800 * wish to use this logic.
1801 */
1802int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1803{
1804	unsigned long free, allowed;
1805
1806	vm_acct_memory(pages);
1807
1808	/*
1809	 * Sometimes we want to use more memory than we have
1810	 */
1811	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1812		return 0;
1813
1814	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1815		unsigned long n;
1816
1817		free = global_page_state(NR_FILE_PAGES);
1818		free += nr_swap_pages;
1819
1820		/*
1821		 * Any slabs which are created with the
1822		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1823		 * which are reclaimable, under pressure.  The dentry
1824		 * cache and most inode caches should fall into this
1825		 */
1826		free += global_page_state(NR_SLAB_RECLAIMABLE);
1827
1828		/*
1829		 * Leave the last 3% for root
1830		 */
1831		if (!cap_sys_admin)
1832			free -= free / 32;
1833
1834		if (free > pages)
1835			return 0;
1836
1837		/*
1838		 * nr_free_pages() is very expensive on large systems,
1839		 * only call if we're about to fail.
1840		 */
1841		n = nr_free_pages();
1842
1843		/*
1844		 * Leave reserved pages. The pages are not for anonymous pages.
1845		 */
1846		if (n <= totalreserve_pages)
1847			goto error;
1848		else
1849			n -= totalreserve_pages;
1850
1851		/*
1852		 * Leave the last 3% for root
1853		 */
1854		if (!cap_sys_admin)
1855			n -= n / 32;
1856		free += n;
1857
1858		if (free > pages)
1859			return 0;
1860
1861		goto error;
1862	}
1863
1864	allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1865	/*
1866	 * Leave the last 3% for root
1867	 */
1868	if (!cap_sys_admin)
1869		allowed -= allowed / 32;
1870	allowed += total_swap_pages;
1871
1872	/* Don't let a single process grow too big:
1873	   leave 3% of the size of this process for other processes */
1874	if (mm)
1875		allowed -= mm->total_vm / 32;
1876
1877	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1878		return 0;
1879
1880error:
1881	vm_unacct_memory(pages);
1882
1883	return -ENOMEM;
1884}
1885
1886int in_gate_area_no_task(unsigned long addr)
1887{
1888	return 0;
1889}
1890
1891int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1892{
1893	BUG();
1894	return 0;
1895}
1896EXPORT_SYMBOL(filemap_fault);
1897
1898/*
1899 * Access another process' address space.
1900 * - source/target buffer must be kernel space
1901 */
1902int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1903{
1904	struct vm_area_struct *vma;
1905	struct mm_struct *mm;
1906
1907	if (addr + len < addr)
1908		return 0;
1909
1910	mm = get_task_mm(tsk);
1911	if (!mm)
1912		return 0;
1913
1914	down_read(&mm->mmap_sem);
1915
1916	/* the access must start within one of the target process's mappings */
1917	vma = find_vma(mm, addr);
1918	if (vma) {
1919		/* don't overrun this mapping */
1920		if (addr + len >= vma->vm_end)
1921			len = vma->vm_end - addr;
1922
1923		/* only read or write mappings where it is permitted */
1924		if (write && vma->vm_flags & VM_MAYWRITE)
1925			copy_to_user_page(vma, NULL, addr,
1926					 (void *) addr, buf, len);
1927		else if (!write && vma->vm_flags & VM_MAYREAD)
1928			copy_from_user_page(vma, NULL, addr,
1929					    buf, (void *) addr, len);
1930		else
1931			len = 0;
1932	} else {
1933		len = 0;
1934	}
1935
1936	up_read(&mm->mmap_sem);
1937	mmput(mm);
1938	return len;
1939}
1940
1941/**
1942 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1943 * @inode: The inode to check
1944 * @size: The current filesize of the inode
1945 * @newsize: The proposed filesize of the inode
1946 *
1947 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1948 * make sure that that any outstanding VMAs aren't broken and then shrink the
1949 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1950 * automatically grant mappings that are too large.
1951 */
1952int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1953				size_t newsize)
1954{
1955	struct vm_area_struct *vma;
1956	struct prio_tree_iter iter;
1957	struct vm_region *region;
1958	pgoff_t low, high;
1959	size_t r_size, r_top;
1960
1961	low = newsize >> PAGE_SHIFT;
1962	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1963
1964	down_write(&nommu_region_sem);
1965
1966	/* search for VMAs that fall within the dead zone */
1967	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1968			      low, high) {
1969		/* found one - only interested if it's shared out of the page
1970		 * cache */
1971		if (vma->vm_flags & VM_SHARED) {
1972			up_write(&nommu_region_sem);
1973			return -ETXTBSY; /* not quite true, but near enough */
1974		}
1975	}
1976
1977	/* reduce any regions that overlap the dead zone - if in existence,
1978	 * these will be pointed to by VMAs that don't overlap the dead zone
1979	 *
1980	 * we don't check for any regions that start beyond the EOF as there
1981	 * shouldn't be any
1982	 */
1983	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1984			      0, ULONG_MAX) {
1985		if (!(vma->vm_flags & VM_SHARED))
1986			continue;
1987
1988		region = vma->vm_region;
1989		r_size = region->vm_top - region->vm_start;
1990		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1991
1992		if (r_top > newsize) {
1993			region->vm_top -= r_top - newsize;
1994			if (region->vm_end > region->vm_top)
1995				region->vm_end = region->vm_top;
1996		}
1997	}
1998
1999	up_write(&nommu_region_sem);
2000	return 0;
2001}
2002