vmalloc.c revision a10aa579878fc6f9cd17455067380bbdf1d53c91
1/*
2 *  linux/mm/vmalloc.c
3 *
4 *  Copyright (C) 1993  Linus Torvalds
5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 *  Numa awareness, Christoph Lameter, SGI, June 2005
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/seq_file.h>
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28			    int node);
29
30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31{
32	pte_t *pte;
33
34	pte = pte_offset_kernel(pmd, addr);
35	do {
36		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38	} while (pte++, addr += PAGE_SIZE, addr != end);
39}
40
41static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
42						unsigned long end)
43{
44	pmd_t *pmd;
45	unsigned long next;
46
47	pmd = pmd_offset(pud, addr);
48	do {
49		next = pmd_addr_end(addr, end);
50		if (pmd_none_or_clear_bad(pmd))
51			continue;
52		vunmap_pte_range(pmd, addr, next);
53	} while (pmd++, addr = next, addr != end);
54}
55
56static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
57						unsigned long end)
58{
59	pud_t *pud;
60	unsigned long next;
61
62	pud = pud_offset(pgd, addr);
63	do {
64		next = pud_addr_end(addr, end);
65		if (pud_none_or_clear_bad(pud))
66			continue;
67		vunmap_pmd_range(pud, addr, next);
68	} while (pud++, addr = next, addr != end);
69}
70
71void unmap_kernel_range(unsigned long addr, unsigned long size)
72{
73	pgd_t *pgd;
74	unsigned long next;
75	unsigned long start = addr;
76	unsigned long end = addr + size;
77
78	BUG_ON(addr >= end);
79	pgd = pgd_offset_k(addr);
80	flush_cache_vunmap(addr, end);
81	do {
82		next = pgd_addr_end(addr, end);
83		if (pgd_none_or_clear_bad(pgd))
84			continue;
85		vunmap_pud_range(pgd, addr, next);
86	} while (pgd++, addr = next, addr != end);
87	flush_tlb_kernel_range(start, end);
88}
89
90static void unmap_vm_area(struct vm_struct *area)
91{
92	unmap_kernel_range((unsigned long)area->addr, area->size);
93}
94
95static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
96			unsigned long end, pgprot_t prot, struct page ***pages)
97{
98	pte_t *pte;
99
100	pte = pte_alloc_kernel(pmd, addr);
101	if (!pte)
102		return -ENOMEM;
103	do {
104		struct page *page = **pages;
105		WARN_ON(!pte_none(*pte));
106		if (!page)
107			return -ENOMEM;
108		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
109		(*pages)++;
110	} while (pte++, addr += PAGE_SIZE, addr != end);
111	return 0;
112}
113
114static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
115			unsigned long end, pgprot_t prot, struct page ***pages)
116{
117	pmd_t *pmd;
118	unsigned long next;
119
120	pmd = pmd_alloc(&init_mm, pud, addr);
121	if (!pmd)
122		return -ENOMEM;
123	do {
124		next = pmd_addr_end(addr, end);
125		if (vmap_pte_range(pmd, addr, next, prot, pages))
126			return -ENOMEM;
127	} while (pmd++, addr = next, addr != end);
128	return 0;
129}
130
131static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
132			unsigned long end, pgprot_t prot, struct page ***pages)
133{
134	pud_t *pud;
135	unsigned long next;
136
137	pud = pud_alloc(&init_mm, pgd, addr);
138	if (!pud)
139		return -ENOMEM;
140	do {
141		next = pud_addr_end(addr, end);
142		if (vmap_pmd_range(pud, addr, next, prot, pages))
143			return -ENOMEM;
144	} while (pud++, addr = next, addr != end);
145	return 0;
146}
147
148int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
149{
150	pgd_t *pgd;
151	unsigned long next;
152	unsigned long addr = (unsigned long) area->addr;
153	unsigned long end = addr + area->size - PAGE_SIZE;
154	int err;
155
156	BUG_ON(addr >= end);
157	pgd = pgd_offset_k(addr);
158	do {
159		next = pgd_addr_end(addr, end);
160		err = vmap_pud_range(pgd, addr, next, prot, pages);
161		if (err)
162			break;
163	} while (pgd++, addr = next, addr != end);
164	flush_cache_vmap((unsigned long) area->addr, end);
165	return err;
166}
167EXPORT_SYMBOL_GPL(map_vm_area);
168
169/*
170 * Map a vmalloc()-space virtual address to the physical page.
171 */
172struct page *vmalloc_to_page(const void *vmalloc_addr)
173{
174	unsigned long addr = (unsigned long) vmalloc_addr;
175	struct page *page = NULL;
176	pgd_t *pgd = pgd_offset_k(addr);
177	pud_t *pud;
178	pmd_t *pmd;
179	pte_t *ptep, pte;
180
181	if (!pgd_none(*pgd)) {
182		pud = pud_offset(pgd, addr);
183		if (!pud_none(*pud)) {
184			pmd = pmd_offset(pud, addr);
185			if (!pmd_none(*pmd)) {
186				ptep = pte_offset_map(pmd, addr);
187				pte = *ptep;
188				if (pte_present(pte))
189					page = pte_page(pte);
190				pte_unmap(ptep);
191			}
192		}
193	}
194	return page;
195}
196EXPORT_SYMBOL(vmalloc_to_page);
197
198/*
199 * Map a vmalloc()-space virtual address to the physical page frame number.
200 */
201unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
202{
203	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
204}
205EXPORT_SYMBOL(vmalloc_to_pfn);
206
207static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
208					    unsigned long start, unsigned long end,
209					    int node, gfp_t gfp_mask)
210{
211	struct vm_struct **p, *tmp, *area;
212	unsigned long align = 1;
213	unsigned long addr;
214
215	BUG_ON(in_interrupt());
216	if (flags & VM_IOREMAP) {
217		int bit = fls(size);
218
219		if (bit > IOREMAP_MAX_ORDER)
220			bit = IOREMAP_MAX_ORDER;
221		else if (bit < PAGE_SHIFT)
222			bit = PAGE_SHIFT;
223
224		align = 1ul << bit;
225	}
226	addr = ALIGN(start, align);
227	size = PAGE_ALIGN(size);
228	if (unlikely(!size))
229		return NULL;
230
231	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
232
233	if (unlikely(!area))
234		return NULL;
235
236	/*
237	 * We always allocate a guard page.
238	 */
239	size += PAGE_SIZE;
240
241	write_lock(&vmlist_lock);
242	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
243		if ((unsigned long)tmp->addr < addr) {
244			if((unsigned long)tmp->addr + tmp->size >= addr)
245				addr = ALIGN(tmp->size +
246					     (unsigned long)tmp->addr, align);
247			continue;
248		}
249		if ((size + addr) < addr)
250			goto out;
251		if (size + addr <= (unsigned long)tmp->addr)
252			goto found;
253		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
254		if (addr > end - size)
255			goto out;
256	}
257	if ((size + addr) < addr)
258		goto out;
259	if (addr > end - size)
260		goto out;
261
262found:
263	area->next = *p;
264	*p = area;
265
266	area->flags = flags;
267	area->addr = (void *)addr;
268	area->size = size;
269	area->pages = NULL;
270	area->nr_pages = 0;
271	area->phys_addr = 0;
272	write_unlock(&vmlist_lock);
273
274	return area;
275
276out:
277	write_unlock(&vmlist_lock);
278	kfree(area);
279	if (printk_ratelimit())
280		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
281	return NULL;
282}
283
284struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
285				unsigned long start, unsigned long end)
286{
287	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
288}
289EXPORT_SYMBOL_GPL(__get_vm_area);
290
291/**
292 *	get_vm_area  -  reserve a contiguous kernel virtual area
293 *	@size:		size of the area
294 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
295 *
296 *	Search an area of @size in the kernel virtual mapping area,
297 *	and reserved it for out purposes.  Returns the area descriptor
298 *	on success or %NULL on failure.
299 */
300struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
301{
302	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
303}
304
305struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
306				   int node, gfp_t gfp_mask)
307{
308	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
309				  gfp_mask);
310}
311
312/* Caller must hold vmlist_lock */
313static struct vm_struct *__find_vm_area(const void *addr)
314{
315	struct vm_struct *tmp;
316
317	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
318		 if (tmp->addr == addr)
319			break;
320	}
321
322	return tmp;
323}
324
325/* Caller must hold vmlist_lock */
326static struct vm_struct *__remove_vm_area(const void *addr)
327{
328	struct vm_struct **p, *tmp;
329
330	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
331		 if (tmp->addr == addr)
332			 goto found;
333	}
334	return NULL;
335
336found:
337	unmap_vm_area(tmp);
338	*p = tmp->next;
339
340	/*
341	 * Remove the guard page.
342	 */
343	tmp->size -= PAGE_SIZE;
344	return tmp;
345}
346
347/**
348 *	remove_vm_area  -  find and remove a continuous kernel virtual area
349 *	@addr:		base address
350 *
351 *	Search for the kernel VM area starting at @addr, and remove it.
352 *	This function returns the found VM area, but using it is NOT safe
353 *	on SMP machines, except for its size or flags.
354 */
355struct vm_struct *remove_vm_area(const void *addr)
356{
357	struct vm_struct *v;
358	write_lock(&vmlist_lock);
359	v = __remove_vm_area(addr);
360	write_unlock(&vmlist_lock);
361	return v;
362}
363
364static void __vunmap(const void *addr, int deallocate_pages)
365{
366	struct vm_struct *area;
367
368	if (!addr)
369		return;
370
371	if ((PAGE_SIZE-1) & (unsigned long)addr) {
372		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
373		WARN_ON(1);
374		return;
375	}
376
377	area = remove_vm_area(addr);
378	if (unlikely(!area)) {
379		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
380				addr);
381		WARN_ON(1);
382		return;
383	}
384
385	debug_check_no_locks_freed(addr, area->size);
386
387	if (deallocate_pages) {
388		int i;
389
390		for (i = 0; i < area->nr_pages; i++) {
391			struct page *page = area->pages[i];
392
393			BUG_ON(!page);
394			__free_page(page);
395		}
396
397		if (area->flags & VM_VPAGES)
398			vfree(area->pages);
399		else
400			kfree(area->pages);
401	}
402
403	kfree(area);
404	return;
405}
406
407/**
408 *	vfree  -  release memory allocated by vmalloc()
409 *	@addr:		memory base address
410 *
411 *	Free the virtually continuous memory area starting at @addr, as
412 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
413 *	NULL, no operation is performed.
414 *
415 *	Must not be called in interrupt context.
416 */
417void vfree(const void *addr)
418{
419	BUG_ON(in_interrupt());
420	__vunmap(addr, 1);
421}
422EXPORT_SYMBOL(vfree);
423
424/**
425 *	vunmap  -  release virtual mapping obtained by vmap()
426 *	@addr:		memory base address
427 *
428 *	Free the virtually contiguous memory area starting at @addr,
429 *	which was created from the page array passed to vmap().
430 *
431 *	Must not be called in interrupt context.
432 */
433void vunmap(const void *addr)
434{
435	BUG_ON(in_interrupt());
436	__vunmap(addr, 0);
437}
438EXPORT_SYMBOL(vunmap);
439
440/**
441 *	vmap  -  map an array of pages into virtually contiguous space
442 *	@pages:		array of page pointers
443 *	@count:		number of pages to map
444 *	@flags:		vm_area->flags
445 *	@prot:		page protection for the mapping
446 *
447 *	Maps @count pages from @pages into contiguous kernel virtual
448 *	space.
449 */
450void *vmap(struct page **pages, unsigned int count,
451		unsigned long flags, pgprot_t prot)
452{
453	struct vm_struct *area;
454
455	if (count > num_physpages)
456		return NULL;
457
458	area = get_vm_area((count << PAGE_SHIFT), flags);
459	if (!area)
460		return NULL;
461	if (map_vm_area(area, prot, &pages)) {
462		vunmap(area->addr);
463		return NULL;
464	}
465
466	return area->addr;
467}
468EXPORT_SYMBOL(vmap);
469
470static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
471				 pgprot_t prot, int node)
472{
473	struct page **pages;
474	unsigned int nr_pages, array_size, i;
475
476	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
477	array_size = (nr_pages * sizeof(struct page *));
478
479	area->nr_pages = nr_pages;
480	/* Please note that the recursion is strictly bounded. */
481	if (array_size > PAGE_SIZE) {
482		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
483					PAGE_KERNEL, node);
484		area->flags |= VM_VPAGES;
485	} else {
486		pages = kmalloc_node(array_size,
487				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
488				node);
489	}
490	area->pages = pages;
491	if (!area->pages) {
492		remove_vm_area(area->addr);
493		kfree(area);
494		return NULL;
495	}
496
497	for (i = 0; i < area->nr_pages; i++) {
498		struct page *page;
499
500		if (node < 0)
501			page = alloc_page(gfp_mask);
502		else
503			page = alloc_pages_node(node, gfp_mask, 0);
504
505		if (unlikely(!page)) {
506			/* Successfully allocated i pages, free them in __vunmap() */
507			area->nr_pages = i;
508			goto fail;
509		}
510		area->pages[i] = page;
511	}
512
513	if (map_vm_area(area, prot, &pages))
514		goto fail;
515	return area->addr;
516
517fail:
518	vfree(area->addr);
519	return NULL;
520}
521
522void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
523{
524	return __vmalloc_area_node(area, gfp_mask, prot, -1);
525}
526
527/**
528 *	__vmalloc_node  -  allocate virtually contiguous memory
529 *	@size:		allocation size
530 *	@gfp_mask:	flags for the page level allocator
531 *	@prot:		protection mask for the allocated pages
532 *	@node:		node to use for allocation or -1
533 *
534 *	Allocate enough pages to cover @size from the page level
535 *	allocator with @gfp_mask flags.  Map them into contiguous
536 *	kernel virtual space, using a pagetable protection of @prot.
537 */
538static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
539			    int node)
540{
541	struct vm_struct *area;
542
543	size = PAGE_ALIGN(size);
544	if (!size || (size >> PAGE_SHIFT) > num_physpages)
545		return NULL;
546
547	area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
548	if (!area)
549		return NULL;
550
551	return __vmalloc_area_node(area, gfp_mask, prot, node);
552}
553
554void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
555{
556	return __vmalloc_node(size, gfp_mask, prot, -1);
557}
558EXPORT_SYMBOL(__vmalloc);
559
560/**
561 *	vmalloc  -  allocate virtually contiguous memory
562 *	@size:		allocation size
563 *	Allocate enough pages to cover @size from the page level
564 *	allocator and map them into contiguous kernel virtual space.
565 *
566 *	For tight control over page level allocator and protection flags
567 *	use __vmalloc() instead.
568 */
569void *vmalloc(unsigned long size)
570{
571	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
572}
573EXPORT_SYMBOL(vmalloc);
574
575/**
576 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
577 * @size: allocation size
578 *
579 * The resulting memory area is zeroed so it can be mapped to userspace
580 * without leaking data.
581 */
582void *vmalloc_user(unsigned long size)
583{
584	struct vm_struct *area;
585	void *ret;
586
587	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
588	if (ret) {
589		write_lock(&vmlist_lock);
590		area = __find_vm_area(ret);
591		area->flags |= VM_USERMAP;
592		write_unlock(&vmlist_lock);
593	}
594	return ret;
595}
596EXPORT_SYMBOL(vmalloc_user);
597
598/**
599 *	vmalloc_node  -  allocate memory on a specific node
600 *	@size:		allocation size
601 *	@node:		numa node
602 *
603 *	Allocate enough pages to cover @size from the page level
604 *	allocator and map them into contiguous kernel virtual space.
605 *
606 *	For tight control over page level allocator and protection flags
607 *	use __vmalloc() instead.
608 */
609void *vmalloc_node(unsigned long size, int node)
610{
611	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
612}
613EXPORT_SYMBOL(vmalloc_node);
614
615#ifndef PAGE_KERNEL_EXEC
616# define PAGE_KERNEL_EXEC PAGE_KERNEL
617#endif
618
619/**
620 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
621 *	@size:		allocation size
622 *
623 *	Kernel-internal function to allocate enough pages to cover @size
624 *	the page level allocator and map them into contiguous and
625 *	executable kernel virtual space.
626 *
627 *	For tight control over page level allocator and protection flags
628 *	use __vmalloc() instead.
629 */
630
631void *vmalloc_exec(unsigned long size)
632{
633	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
634}
635
636#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
637#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
638#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
639#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
640#else
641#define GFP_VMALLOC32 GFP_KERNEL
642#endif
643
644/**
645 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
646 *	@size:		allocation size
647 *
648 *	Allocate enough 32bit PA addressable pages to cover @size from the
649 *	page level allocator and map them into contiguous kernel virtual space.
650 */
651void *vmalloc_32(unsigned long size)
652{
653	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
654}
655EXPORT_SYMBOL(vmalloc_32);
656
657/**
658 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
659 *	@size:		allocation size
660 *
661 * The resulting memory area is 32bit addressable and zeroed so it can be
662 * mapped to userspace without leaking data.
663 */
664void *vmalloc_32_user(unsigned long size)
665{
666	struct vm_struct *area;
667	void *ret;
668
669	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
670	if (ret) {
671		write_lock(&vmlist_lock);
672		area = __find_vm_area(ret);
673		area->flags |= VM_USERMAP;
674		write_unlock(&vmlist_lock);
675	}
676	return ret;
677}
678EXPORT_SYMBOL(vmalloc_32_user);
679
680long vread(char *buf, char *addr, unsigned long count)
681{
682	struct vm_struct *tmp;
683	char *vaddr, *buf_start = buf;
684	unsigned long n;
685
686	/* Don't allow overflow */
687	if ((unsigned long) addr + count < count)
688		count = -(unsigned long) addr;
689
690	read_lock(&vmlist_lock);
691	for (tmp = vmlist; tmp; tmp = tmp->next) {
692		vaddr = (char *) tmp->addr;
693		if (addr >= vaddr + tmp->size - PAGE_SIZE)
694			continue;
695		while (addr < vaddr) {
696			if (count == 0)
697				goto finished;
698			*buf = '\0';
699			buf++;
700			addr++;
701			count--;
702		}
703		n = vaddr + tmp->size - PAGE_SIZE - addr;
704		do {
705			if (count == 0)
706				goto finished;
707			*buf = *addr;
708			buf++;
709			addr++;
710			count--;
711		} while (--n > 0);
712	}
713finished:
714	read_unlock(&vmlist_lock);
715	return buf - buf_start;
716}
717
718long vwrite(char *buf, char *addr, unsigned long count)
719{
720	struct vm_struct *tmp;
721	char *vaddr, *buf_start = buf;
722	unsigned long n;
723
724	/* Don't allow overflow */
725	if ((unsigned long) addr + count < count)
726		count = -(unsigned long) addr;
727
728	read_lock(&vmlist_lock);
729	for (tmp = vmlist; tmp; tmp = tmp->next) {
730		vaddr = (char *) tmp->addr;
731		if (addr >= vaddr + tmp->size - PAGE_SIZE)
732			continue;
733		while (addr < vaddr) {
734			if (count == 0)
735				goto finished;
736			buf++;
737			addr++;
738			count--;
739		}
740		n = vaddr + tmp->size - PAGE_SIZE - addr;
741		do {
742			if (count == 0)
743				goto finished;
744			*addr = *buf;
745			buf++;
746			addr++;
747			count--;
748		} while (--n > 0);
749	}
750finished:
751	read_unlock(&vmlist_lock);
752	return buf - buf_start;
753}
754
755/**
756 *	remap_vmalloc_range  -  map vmalloc pages to userspace
757 *	@vma:		vma to cover (map full range of vma)
758 *	@addr:		vmalloc memory
759 *	@pgoff:		number of pages into addr before first page to map
760 *
761 *	Returns:	0 for success, -Exxx on failure
762 *
763 *	This function checks that addr is a valid vmalloc'ed area, and
764 *	that it is big enough to cover the vma. Will return failure if
765 *	that criteria isn't met.
766 *
767 *	Similar to remap_pfn_range() (see mm/memory.c)
768 */
769int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
770						unsigned long pgoff)
771{
772	struct vm_struct *area;
773	unsigned long uaddr = vma->vm_start;
774	unsigned long usize = vma->vm_end - vma->vm_start;
775	int ret;
776
777	if ((PAGE_SIZE-1) & (unsigned long)addr)
778		return -EINVAL;
779
780	read_lock(&vmlist_lock);
781	area = __find_vm_area(addr);
782	if (!area)
783		goto out_einval_locked;
784
785	if (!(area->flags & VM_USERMAP))
786		goto out_einval_locked;
787
788	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
789		goto out_einval_locked;
790	read_unlock(&vmlist_lock);
791
792	addr += pgoff << PAGE_SHIFT;
793	do {
794		struct page *page = vmalloc_to_page(addr);
795		ret = vm_insert_page(vma, uaddr, page);
796		if (ret)
797			return ret;
798
799		uaddr += PAGE_SIZE;
800		addr += PAGE_SIZE;
801		usize -= PAGE_SIZE;
802	} while (usize > 0);
803
804	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
805	vma->vm_flags |= VM_RESERVED;
806
807	return ret;
808
809out_einval_locked:
810	read_unlock(&vmlist_lock);
811	return -EINVAL;
812}
813EXPORT_SYMBOL(remap_vmalloc_range);
814
815/*
816 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
817 * have one.
818 */
819void  __attribute__((weak)) vmalloc_sync_all(void)
820{
821}
822
823
824static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
825{
826	/* apply_to_page_range() does all the hard work. */
827	return 0;
828}
829
830/**
831 *	alloc_vm_area - allocate a range of kernel address space
832 *	@size:		size of the area
833 *
834 *	Returns:	NULL on failure, vm_struct on success
835 *
836 *	This function reserves a range of kernel address space, and
837 *	allocates pagetables to map that range.  No actual mappings
838 *	are created.  If the kernel address space is not shared
839 *	between processes, it syncs the pagetable across all
840 *	processes.
841 */
842struct vm_struct *alloc_vm_area(size_t size)
843{
844	struct vm_struct *area;
845
846	area = get_vm_area(size, VM_IOREMAP);
847	if (area == NULL)
848		return NULL;
849
850	/*
851	 * This ensures that page tables are constructed for this region
852	 * of kernel virtual address space and mapped into init_mm.
853	 */
854	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
855				area->size, f, NULL)) {
856		free_vm_area(area);
857		return NULL;
858	}
859
860	/* Make sure the pagetables are constructed in process kernel
861	   mappings */
862	vmalloc_sync_all();
863
864	return area;
865}
866EXPORT_SYMBOL_GPL(alloc_vm_area);
867
868void free_vm_area(struct vm_struct *area)
869{
870	struct vm_struct *ret;
871	ret = remove_vm_area(area->addr);
872	BUG_ON(ret != area);
873	kfree(area);
874}
875EXPORT_SYMBOL_GPL(free_vm_area);
876
877
878#ifdef CONFIG_PROC_FS
879static void *s_start(struct seq_file *m, loff_t *pos)
880{
881	loff_t n = *pos;
882	struct vm_struct *v;
883
884	read_lock(&vmlist_lock);
885	v = vmlist;
886	while (n > 0 && v) {
887		n--;
888		v = v->next;
889	}
890	if (!n)
891		return v;
892
893	return NULL;
894
895}
896
897static void *s_next(struct seq_file *m, void *p, loff_t *pos)
898{
899	struct vm_struct *v = p;
900
901	++*pos;
902	return v->next;
903}
904
905static void s_stop(struct seq_file *m, void *p)
906{
907	read_unlock(&vmlist_lock);
908}
909
910static int s_show(struct seq_file *m, void *p)
911{
912	struct vm_struct *v = p;
913
914	seq_printf(m, "0x%p-0x%p %7ld",
915		v->addr, v->addr + v->size, v->size);
916
917	if (v->nr_pages)
918		seq_printf(m, " pages=%d", v->nr_pages);
919
920	if (v->phys_addr)
921		seq_printf(m, " phys=%lx", v->phys_addr);
922
923	if (v->flags & VM_IOREMAP)
924		seq_printf(m, " ioremap");
925
926	if (v->flags & VM_ALLOC)
927		seq_printf(m, " vmalloc");
928
929	if (v->flags & VM_MAP)
930		seq_printf(m, " vmap");
931
932	if (v->flags & VM_USERMAP)
933		seq_printf(m, " user");
934
935	if (v->flags & VM_VPAGES)
936		seq_printf(m, " vpages");
937
938	seq_putc(m, '\n');
939	return 0;
940}
941
942const struct seq_operations vmalloc_op = {
943	.start = s_start,
944	.next = s_next,
945	.stop = s_stop,
946	.show = s_show,
947};
948#endif
949
950