vmalloc.c revision 6cb062296f73e74768cca2f3eaf90deac54de02d
1/*
2 *  linux/mm/vmalloc.c
3 *
4 *  Copyright (C) 1993  Linus Torvalds
5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 *  Numa awareness, Christoph Lameter, SGI, June 2005
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28			    int node);
29
30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31{
32	pte_t *pte;
33
34	pte = pte_offset_kernel(pmd, addr);
35	do {
36		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38	} while (pte++, addr += PAGE_SIZE, addr != end);
39}
40
41static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
42						unsigned long end)
43{
44	pmd_t *pmd;
45	unsigned long next;
46
47	pmd = pmd_offset(pud, addr);
48	do {
49		next = pmd_addr_end(addr, end);
50		if (pmd_none_or_clear_bad(pmd))
51			continue;
52		vunmap_pte_range(pmd, addr, next);
53	} while (pmd++, addr = next, addr != end);
54}
55
56static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
57						unsigned long end)
58{
59	pud_t *pud;
60	unsigned long next;
61
62	pud = pud_offset(pgd, addr);
63	do {
64		next = pud_addr_end(addr, end);
65		if (pud_none_or_clear_bad(pud))
66			continue;
67		vunmap_pmd_range(pud, addr, next);
68	} while (pud++, addr = next, addr != end);
69}
70
71void unmap_kernel_range(unsigned long addr, unsigned long size)
72{
73	pgd_t *pgd;
74	unsigned long next;
75	unsigned long start = addr;
76	unsigned long end = addr + size;
77
78	BUG_ON(addr >= end);
79	pgd = pgd_offset_k(addr);
80	flush_cache_vunmap(addr, end);
81	do {
82		next = pgd_addr_end(addr, end);
83		if (pgd_none_or_clear_bad(pgd))
84			continue;
85		vunmap_pud_range(pgd, addr, next);
86	} while (pgd++, addr = next, addr != end);
87	flush_tlb_kernel_range(start, end);
88}
89
90static void unmap_vm_area(struct vm_struct *area)
91{
92	unmap_kernel_range((unsigned long)area->addr, area->size);
93}
94
95static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
96			unsigned long end, pgprot_t prot, struct page ***pages)
97{
98	pte_t *pte;
99
100	pte = pte_alloc_kernel(pmd, addr);
101	if (!pte)
102		return -ENOMEM;
103	do {
104		struct page *page = **pages;
105		WARN_ON(!pte_none(*pte));
106		if (!page)
107			return -ENOMEM;
108		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
109		(*pages)++;
110	} while (pte++, addr += PAGE_SIZE, addr != end);
111	return 0;
112}
113
114static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
115			unsigned long end, pgprot_t prot, struct page ***pages)
116{
117	pmd_t *pmd;
118	unsigned long next;
119
120	pmd = pmd_alloc(&init_mm, pud, addr);
121	if (!pmd)
122		return -ENOMEM;
123	do {
124		next = pmd_addr_end(addr, end);
125		if (vmap_pte_range(pmd, addr, next, prot, pages))
126			return -ENOMEM;
127	} while (pmd++, addr = next, addr != end);
128	return 0;
129}
130
131static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
132			unsigned long end, pgprot_t prot, struct page ***pages)
133{
134	pud_t *pud;
135	unsigned long next;
136
137	pud = pud_alloc(&init_mm, pgd, addr);
138	if (!pud)
139		return -ENOMEM;
140	do {
141		next = pud_addr_end(addr, end);
142		if (vmap_pmd_range(pud, addr, next, prot, pages))
143			return -ENOMEM;
144	} while (pud++, addr = next, addr != end);
145	return 0;
146}
147
148int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
149{
150	pgd_t *pgd;
151	unsigned long next;
152	unsigned long addr = (unsigned long) area->addr;
153	unsigned long end = addr + area->size - PAGE_SIZE;
154	int err;
155
156	BUG_ON(addr >= end);
157	pgd = pgd_offset_k(addr);
158	do {
159		next = pgd_addr_end(addr, end);
160		err = vmap_pud_range(pgd, addr, next, prot, pages);
161		if (err)
162			break;
163	} while (pgd++, addr = next, addr != end);
164	flush_cache_vmap((unsigned long) area->addr, end);
165	return err;
166}
167EXPORT_SYMBOL_GPL(map_vm_area);
168
169static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
170					    unsigned long start, unsigned long end,
171					    int node, gfp_t gfp_mask)
172{
173	struct vm_struct **p, *tmp, *area;
174	unsigned long align = 1;
175	unsigned long addr;
176
177	BUG_ON(in_interrupt());
178	if (flags & VM_IOREMAP) {
179		int bit = fls(size);
180
181		if (bit > IOREMAP_MAX_ORDER)
182			bit = IOREMAP_MAX_ORDER;
183		else if (bit < PAGE_SHIFT)
184			bit = PAGE_SHIFT;
185
186		align = 1ul << bit;
187	}
188	addr = ALIGN(start, align);
189	size = PAGE_ALIGN(size);
190	if (unlikely(!size))
191		return NULL;
192
193	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
194
195	if (unlikely(!area))
196		return NULL;
197
198	/*
199	 * We always allocate a guard page.
200	 */
201	size += PAGE_SIZE;
202
203	write_lock(&vmlist_lock);
204	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
205		if ((unsigned long)tmp->addr < addr) {
206			if((unsigned long)tmp->addr + tmp->size >= addr)
207				addr = ALIGN(tmp->size +
208					     (unsigned long)tmp->addr, align);
209			continue;
210		}
211		if ((size + addr) < addr)
212			goto out;
213		if (size + addr <= (unsigned long)tmp->addr)
214			goto found;
215		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
216		if (addr > end - size)
217			goto out;
218	}
219
220found:
221	area->next = *p;
222	*p = area;
223
224	area->flags = flags;
225	area->addr = (void *)addr;
226	area->size = size;
227	area->pages = NULL;
228	area->nr_pages = 0;
229	area->phys_addr = 0;
230	write_unlock(&vmlist_lock);
231
232	return area;
233
234out:
235	write_unlock(&vmlist_lock);
236	kfree(area);
237	if (printk_ratelimit())
238		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
239	return NULL;
240}
241
242struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
243				unsigned long start, unsigned long end)
244{
245	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
246}
247EXPORT_SYMBOL_GPL(__get_vm_area);
248
249/**
250 *	get_vm_area  -  reserve a contingous kernel virtual area
251 *	@size:		size of the area
252 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
253 *
254 *	Search an area of @size in the kernel virtual mapping area,
255 *	and reserved it for out purposes.  Returns the area descriptor
256 *	on success or %NULL on failure.
257 */
258struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
259{
260	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
261}
262
263struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
264				   int node, gfp_t gfp_mask)
265{
266	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
267				  gfp_mask);
268}
269
270/* Caller must hold vmlist_lock */
271static struct vm_struct *__find_vm_area(void *addr)
272{
273	struct vm_struct *tmp;
274
275	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
276		 if (tmp->addr == addr)
277			break;
278	}
279
280	return tmp;
281}
282
283/* Caller must hold vmlist_lock */
284static struct vm_struct *__remove_vm_area(void *addr)
285{
286	struct vm_struct **p, *tmp;
287
288	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
289		 if (tmp->addr == addr)
290			 goto found;
291	}
292	return NULL;
293
294found:
295	unmap_vm_area(tmp);
296	*p = tmp->next;
297
298	/*
299	 * Remove the guard page.
300	 */
301	tmp->size -= PAGE_SIZE;
302	return tmp;
303}
304
305/**
306 *	remove_vm_area  -  find and remove a contingous kernel virtual area
307 *	@addr:		base address
308 *
309 *	Search for the kernel VM area starting at @addr, and remove it.
310 *	This function returns the found VM area, but using it is NOT safe
311 *	on SMP machines, except for its size or flags.
312 */
313struct vm_struct *remove_vm_area(void *addr)
314{
315	struct vm_struct *v;
316	write_lock(&vmlist_lock);
317	v = __remove_vm_area(addr);
318	write_unlock(&vmlist_lock);
319	return v;
320}
321
322static void __vunmap(void *addr, int deallocate_pages)
323{
324	struct vm_struct *area;
325
326	if (!addr)
327		return;
328
329	if ((PAGE_SIZE-1) & (unsigned long)addr) {
330		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
331		WARN_ON(1);
332		return;
333	}
334
335	area = remove_vm_area(addr);
336	if (unlikely(!area)) {
337		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
338				addr);
339		WARN_ON(1);
340		return;
341	}
342
343	debug_check_no_locks_freed(addr, area->size);
344
345	if (deallocate_pages) {
346		int i;
347
348		for (i = 0; i < area->nr_pages; i++) {
349			BUG_ON(!area->pages[i]);
350			__free_page(area->pages[i]);
351		}
352
353		if (area->flags & VM_VPAGES)
354			vfree(area->pages);
355		else
356			kfree(area->pages);
357	}
358
359	kfree(area);
360	return;
361}
362
363/**
364 *	vfree  -  release memory allocated by vmalloc()
365 *	@addr:		memory base address
366 *
367 *	Free the virtually contiguous memory area starting at @addr, as
368 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
369 *	NULL, no operation is performed.
370 *
371 *	Must not be called in interrupt context.
372 */
373void vfree(void *addr)
374{
375	BUG_ON(in_interrupt());
376	__vunmap(addr, 1);
377}
378EXPORT_SYMBOL(vfree);
379
380/**
381 *	vunmap  -  release virtual mapping obtained by vmap()
382 *	@addr:		memory base address
383 *
384 *	Free the virtually contiguous memory area starting at @addr,
385 *	which was created from the page array passed to vmap().
386 *
387 *	Must not be called in interrupt context.
388 */
389void vunmap(void *addr)
390{
391	BUG_ON(in_interrupt());
392	__vunmap(addr, 0);
393}
394EXPORT_SYMBOL(vunmap);
395
396/**
397 *	vmap  -  map an array of pages into virtually contiguous space
398 *	@pages:		array of page pointers
399 *	@count:		number of pages to map
400 *	@flags:		vm_area->flags
401 *	@prot:		page protection for the mapping
402 *
403 *	Maps @count pages from @pages into contiguous kernel virtual
404 *	space.
405 */
406void *vmap(struct page **pages, unsigned int count,
407		unsigned long flags, pgprot_t prot)
408{
409	struct vm_struct *area;
410
411	if (count > num_physpages)
412		return NULL;
413
414	area = get_vm_area((count << PAGE_SHIFT), flags);
415	if (!area)
416		return NULL;
417	if (map_vm_area(area, prot, &pages)) {
418		vunmap(area->addr);
419		return NULL;
420	}
421
422	return area->addr;
423}
424EXPORT_SYMBOL(vmap);
425
426void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
427				pgprot_t prot, int node)
428{
429	struct page **pages;
430	unsigned int nr_pages, array_size, i;
431
432	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
433	array_size = (nr_pages * sizeof(struct page *));
434
435	area->nr_pages = nr_pages;
436	/* Please note that the recursion is strictly bounded. */
437	if (array_size > PAGE_SIZE) {
438		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
439					PAGE_KERNEL, node);
440		area->flags |= VM_VPAGES;
441	} else {
442		pages = kmalloc_node(array_size,
443				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
444				node);
445	}
446	area->pages = pages;
447	if (!area->pages) {
448		remove_vm_area(area->addr);
449		kfree(area);
450		return NULL;
451	}
452
453	for (i = 0; i < area->nr_pages; i++) {
454		if (node < 0)
455			area->pages[i] = alloc_page(gfp_mask);
456		else
457			area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
458		if (unlikely(!area->pages[i])) {
459			/* Successfully allocated i pages, free them in __vunmap() */
460			area->nr_pages = i;
461			goto fail;
462		}
463	}
464
465	if (map_vm_area(area, prot, &pages))
466		goto fail;
467	return area->addr;
468
469fail:
470	vfree(area->addr);
471	return NULL;
472}
473
474void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
475{
476	return __vmalloc_area_node(area, gfp_mask, prot, -1);
477}
478
479/**
480 *	__vmalloc_node  -  allocate virtually contiguous memory
481 *	@size:		allocation size
482 *	@gfp_mask:	flags for the page level allocator
483 *	@prot:		protection mask for the allocated pages
484 *	@node:		node to use for allocation or -1
485 *
486 *	Allocate enough pages to cover @size from the page level
487 *	allocator with @gfp_mask flags.  Map them into contiguous
488 *	kernel virtual space, using a pagetable protection of @prot.
489 */
490static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
491			    int node)
492{
493	struct vm_struct *area;
494
495	size = PAGE_ALIGN(size);
496	if (!size || (size >> PAGE_SHIFT) > num_physpages)
497		return NULL;
498
499	area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
500	if (!area)
501		return NULL;
502
503	return __vmalloc_area_node(area, gfp_mask, prot, node);
504}
505
506void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
507{
508	return __vmalloc_node(size, gfp_mask, prot, -1);
509}
510EXPORT_SYMBOL(__vmalloc);
511
512/**
513 *	vmalloc  -  allocate virtually contiguous memory
514 *	@size:		allocation size
515 *	Allocate enough pages to cover @size from the page level
516 *	allocator and map them into contiguous kernel virtual space.
517 *
518 *	For tight control over page level allocator and protection flags
519 *	use __vmalloc() instead.
520 */
521void *vmalloc(unsigned long size)
522{
523	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
524}
525EXPORT_SYMBOL(vmalloc);
526
527/**
528 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
529 * @size: allocation size
530 *
531 * The resulting memory area is zeroed so it can be mapped to userspace
532 * without leaking data.
533 */
534void *vmalloc_user(unsigned long size)
535{
536	struct vm_struct *area;
537	void *ret;
538
539	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
540	if (ret) {
541		write_lock(&vmlist_lock);
542		area = __find_vm_area(ret);
543		area->flags |= VM_USERMAP;
544		write_unlock(&vmlist_lock);
545	}
546	return ret;
547}
548EXPORT_SYMBOL(vmalloc_user);
549
550/**
551 *	vmalloc_node  -  allocate memory on a specific node
552 *	@size:		allocation size
553 *	@node:		numa node
554 *
555 *	Allocate enough pages to cover @size from the page level
556 *	allocator and map them into contiguous kernel virtual space.
557 *
558 *	For tight control over page level allocator and protection flags
559 *	use __vmalloc() instead.
560 */
561void *vmalloc_node(unsigned long size, int node)
562{
563	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
564}
565EXPORT_SYMBOL(vmalloc_node);
566
567#ifndef PAGE_KERNEL_EXEC
568# define PAGE_KERNEL_EXEC PAGE_KERNEL
569#endif
570
571/**
572 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
573 *	@size:		allocation size
574 *
575 *	Kernel-internal function to allocate enough pages to cover @size
576 *	the page level allocator and map them into contiguous and
577 *	executable kernel virtual space.
578 *
579 *	For tight control over page level allocator and protection flags
580 *	use __vmalloc() instead.
581 */
582
583void *vmalloc_exec(unsigned long size)
584{
585	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
586}
587
588#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
589#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
590#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
591#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
592#else
593#define GFP_VMALLOC32 GFP_KERNEL
594#endif
595
596/**
597 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
598 *	@size:		allocation size
599 *
600 *	Allocate enough 32bit PA addressable pages to cover @size from the
601 *	page level allocator and map them into contiguous kernel virtual space.
602 */
603void *vmalloc_32(unsigned long size)
604{
605	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
606}
607EXPORT_SYMBOL(vmalloc_32);
608
609/**
610 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
611 *	@size:		allocation size
612 *
613 * The resulting memory area is 32bit addressable and zeroed so it can be
614 * mapped to userspace without leaking data.
615 */
616void *vmalloc_32_user(unsigned long size)
617{
618	struct vm_struct *area;
619	void *ret;
620
621	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
622	if (ret) {
623		write_lock(&vmlist_lock);
624		area = __find_vm_area(ret);
625		area->flags |= VM_USERMAP;
626		write_unlock(&vmlist_lock);
627	}
628	return ret;
629}
630EXPORT_SYMBOL(vmalloc_32_user);
631
632long vread(char *buf, char *addr, unsigned long count)
633{
634	struct vm_struct *tmp;
635	char *vaddr, *buf_start = buf;
636	unsigned long n;
637
638	/* Don't allow overflow */
639	if ((unsigned long) addr + count < count)
640		count = -(unsigned long) addr;
641
642	read_lock(&vmlist_lock);
643	for (tmp = vmlist; tmp; tmp = tmp->next) {
644		vaddr = (char *) tmp->addr;
645		if (addr >= vaddr + tmp->size - PAGE_SIZE)
646			continue;
647		while (addr < vaddr) {
648			if (count == 0)
649				goto finished;
650			*buf = '\0';
651			buf++;
652			addr++;
653			count--;
654		}
655		n = vaddr + tmp->size - PAGE_SIZE - addr;
656		do {
657			if (count == 0)
658				goto finished;
659			*buf = *addr;
660			buf++;
661			addr++;
662			count--;
663		} while (--n > 0);
664	}
665finished:
666	read_unlock(&vmlist_lock);
667	return buf - buf_start;
668}
669
670long vwrite(char *buf, char *addr, unsigned long count)
671{
672	struct vm_struct *tmp;
673	char *vaddr, *buf_start = buf;
674	unsigned long n;
675
676	/* Don't allow overflow */
677	if ((unsigned long) addr + count < count)
678		count = -(unsigned long) addr;
679
680	read_lock(&vmlist_lock);
681	for (tmp = vmlist; tmp; tmp = tmp->next) {
682		vaddr = (char *) tmp->addr;
683		if (addr >= vaddr + tmp->size - PAGE_SIZE)
684			continue;
685		while (addr < vaddr) {
686			if (count == 0)
687				goto finished;
688			buf++;
689			addr++;
690			count--;
691		}
692		n = vaddr + tmp->size - PAGE_SIZE - addr;
693		do {
694			if (count == 0)
695				goto finished;
696			*addr = *buf;
697			buf++;
698			addr++;
699			count--;
700		} while (--n > 0);
701	}
702finished:
703	read_unlock(&vmlist_lock);
704	return buf - buf_start;
705}
706
707/**
708 *	remap_vmalloc_range  -  map vmalloc pages to userspace
709 *	@vma:		vma to cover (map full range of vma)
710 *	@addr:		vmalloc memory
711 *	@pgoff:		number of pages into addr before first page to map
712 *	@returns:	0 for success, -Exxx on failure
713 *
714 *	This function checks that addr is a valid vmalloc'ed area, and
715 *	that it is big enough to cover the vma. Will return failure if
716 *	that criteria isn't met.
717 *
718 *	Similar to remap_pfn_range() (see mm/memory.c)
719 */
720int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
721						unsigned long pgoff)
722{
723	struct vm_struct *area;
724	unsigned long uaddr = vma->vm_start;
725	unsigned long usize = vma->vm_end - vma->vm_start;
726	int ret;
727
728	if ((PAGE_SIZE-1) & (unsigned long)addr)
729		return -EINVAL;
730
731	read_lock(&vmlist_lock);
732	area = __find_vm_area(addr);
733	if (!area)
734		goto out_einval_locked;
735
736	if (!(area->flags & VM_USERMAP))
737		goto out_einval_locked;
738
739	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
740		goto out_einval_locked;
741	read_unlock(&vmlist_lock);
742
743	addr += pgoff << PAGE_SHIFT;
744	do {
745		struct page *page = vmalloc_to_page(addr);
746		ret = vm_insert_page(vma, uaddr, page);
747		if (ret)
748			return ret;
749
750		uaddr += PAGE_SIZE;
751		addr += PAGE_SIZE;
752		usize -= PAGE_SIZE;
753	} while (usize > 0);
754
755	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
756	vma->vm_flags |= VM_RESERVED;
757
758	return ret;
759
760out_einval_locked:
761	read_unlock(&vmlist_lock);
762	return -EINVAL;
763}
764EXPORT_SYMBOL(remap_vmalloc_range);
765
766/*
767 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
768 * have one.
769 */
770void  __attribute__((weak)) vmalloc_sync_all(void)
771{
772}
773
774
775static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
776{
777	/* apply_to_page_range() does all the hard work. */
778	return 0;
779}
780
781/**
782 *	alloc_vm_area - allocate a range of kernel address space
783 *	@size:		size of the area
784 *	@returns:	NULL on failure, vm_struct on success
785 *
786 *	This function reserves a range of kernel address space, and
787 *	allocates pagetables to map that range.  No actual mappings
788 *	are created.  If the kernel address space is not shared
789 *	between processes, it syncs the pagetable across all
790 *	processes.
791 */
792struct vm_struct *alloc_vm_area(size_t size)
793{
794	struct vm_struct *area;
795
796	area = get_vm_area(size, VM_IOREMAP);
797	if (area == NULL)
798		return NULL;
799
800	/*
801	 * This ensures that page tables are constructed for this region
802	 * of kernel virtual address space and mapped into init_mm.
803	 */
804	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
805				area->size, f, NULL)) {
806		free_vm_area(area);
807		return NULL;
808	}
809
810	/* Make sure the pagetables are constructed in process kernel
811	   mappings */
812	vmalloc_sync_all();
813
814	return area;
815}
816EXPORT_SYMBOL_GPL(alloc_vm_area);
817
818void free_vm_area(struct vm_struct *area)
819{
820	struct vm_struct *ret;
821	ret = remove_vm_area(area->addr);
822	BUG_ON(ret != area);
823	kfree(area);
824}
825EXPORT_SYMBOL_GPL(free_vm_area);
826