vmalloc.c revision 0d08e0d3a97cce22ebf80b54785e00d9b94e1add
1/*
2 *  linux/mm/vmalloc.c
3 *
4 *  Copyright (C) 1993  Linus Torvalds
5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 *  Numa awareness, Christoph Lameter, SGI, June 2005
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
27static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
28			    int node);
29
30static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
31{
32	pte_t *pte;
33
34	pte = pte_offset_kernel(pmd, addr);
35	do {
36		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
37		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
38	} while (pte++, addr += PAGE_SIZE, addr != end);
39}
40
41static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
42						unsigned long end)
43{
44	pmd_t *pmd;
45	unsigned long next;
46
47	pmd = pmd_offset(pud, addr);
48	do {
49		next = pmd_addr_end(addr, end);
50		if (pmd_none_or_clear_bad(pmd))
51			continue;
52		vunmap_pte_range(pmd, addr, next);
53	} while (pmd++, addr = next, addr != end);
54}
55
56static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
57						unsigned long end)
58{
59	pud_t *pud;
60	unsigned long next;
61
62	pud = pud_offset(pgd, addr);
63	do {
64		next = pud_addr_end(addr, end);
65		if (pud_none_or_clear_bad(pud))
66			continue;
67		vunmap_pmd_range(pud, addr, next);
68	} while (pud++, addr = next, addr != end);
69}
70
71void unmap_vm_area(struct vm_struct *area)
72{
73	pgd_t *pgd;
74	unsigned long next;
75	unsigned long addr = (unsigned long) area->addr;
76	unsigned long end = addr + area->size;
77
78	BUG_ON(addr >= end);
79	pgd = pgd_offset_k(addr);
80	flush_cache_vunmap(addr, end);
81	do {
82		next = pgd_addr_end(addr, end);
83		if (pgd_none_or_clear_bad(pgd))
84			continue;
85		vunmap_pud_range(pgd, addr, next);
86	} while (pgd++, addr = next, addr != end);
87	flush_tlb_kernel_range((unsigned long) area->addr, end);
88}
89
90static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
91			unsigned long end, pgprot_t prot, struct page ***pages)
92{
93	pte_t *pte;
94
95	pte = pte_alloc_kernel(pmd, addr);
96	if (!pte)
97		return -ENOMEM;
98	do {
99		struct page *page = **pages;
100		WARN_ON(!pte_none(*pte));
101		if (!page)
102			return -ENOMEM;
103		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
104		(*pages)++;
105	} while (pte++, addr += PAGE_SIZE, addr != end);
106	return 0;
107}
108
109static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
110			unsigned long end, pgprot_t prot, struct page ***pages)
111{
112	pmd_t *pmd;
113	unsigned long next;
114
115	pmd = pmd_alloc(&init_mm, pud, addr);
116	if (!pmd)
117		return -ENOMEM;
118	do {
119		next = pmd_addr_end(addr, end);
120		if (vmap_pte_range(pmd, addr, next, prot, pages))
121			return -ENOMEM;
122	} while (pmd++, addr = next, addr != end);
123	return 0;
124}
125
126static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
127			unsigned long end, pgprot_t prot, struct page ***pages)
128{
129	pud_t *pud;
130	unsigned long next;
131
132	pud = pud_alloc(&init_mm, pgd, addr);
133	if (!pud)
134		return -ENOMEM;
135	do {
136		next = pud_addr_end(addr, end);
137		if (vmap_pmd_range(pud, addr, next, prot, pages))
138			return -ENOMEM;
139	} while (pud++, addr = next, addr != end);
140	return 0;
141}
142
143int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
144{
145	pgd_t *pgd;
146	unsigned long next;
147	unsigned long addr = (unsigned long) area->addr;
148	unsigned long end = addr + area->size - PAGE_SIZE;
149	int err;
150
151	BUG_ON(addr >= end);
152	pgd = pgd_offset_k(addr);
153	do {
154		next = pgd_addr_end(addr, end);
155		err = vmap_pud_range(pgd, addr, next, prot, pages);
156		if (err)
157			break;
158	} while (pgd++, addr = next, addr != end);
159	flush_cache_vmap((unsigned long) area->addr, end);
160	return err;
161}
162
163static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
164					    unsigned long start, unsigned long end,
165					    int node, gfp_t gfp_mask)
166{
167	struct vm_struct **p, *tmp, *area;
168	unsigned long align = 1;
169	unsigned long addr;
170
171	BUG_ON(in_interrupt());
172	if (flags & VM_IOREMAP) {
173		int bit = fls(size);
174
175		if (bit > IOREMAP_MAX_ORDER)
176			bit = IOREMAP_MAX_ORDER;
177		else if (bit < PAGE_SHIFT)
178			bit = PAGE_SHIFT;
179
180		align = 1ul << bit;
181	}
182	addr = ALIGN(start, align);
183	size = PAGE_ALIGN(size);
184	if (unlikely(!size))
185		return NULL;
186
187	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
188	if (unlikely(!area))
189		return NULL;
190
191	/*
192	 * We always allocate a guard page.
193	 */
194	size += PAGE_SIZE;
195
196	write_lock(&vmlist_lock);
197	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
198		if ((unsigned long)tmp->addr < addr) {
199			if((unsigned long)tmp->addr + tmp->size >= addr)
200				addr = ALIGN(tmp->size +
201					     (unsigned long)tmp->addr, align);
202			continue;
203		}
204		if ((size + addr) < addr)
205			goto out;
206		if (size + addr <= (unsigned long)tmp->addr)
207			goto found;
208		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
209		if (addr > end - size)
210			goto out;
211	}
212
213found:
214	area->next = *p;
215	*p = area;
216
217	area->flags = flags;
218	area->addr = (void *)addr;
219	area->size = size;
220	area->pages = NULL;
221	area->nr_pages = 0;
222	area->phys_addr = 0;
223	write_unlock(&vmlist_lock);
224
225	return area;
226
227out:
228	write_unlock(&vmlist_lock);
229	kfree(area);
230	if (printk_ratelimit())
231		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
232	return NULL;
233}
234
235struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
236				unsigned long start, unsigned long end)
237{
238	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
239}
240
241/**
242 *	get_vm_area  -  reserve a contingous kernel virtual area
243 *	@size:		size of the area
244 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
245 *
246 *	Search an area of @size in the kernel virtual mapping area,
247 *	and reserved it for out purposes.  Returns the area descriptor
248 *	on success or %NULL on failure.
249 */
250struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
251{
252	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
253}
254
255struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
256				   int node, gfp_t gfp_mask)
257{
258	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
259				  gfp_mask);
260}
261
262/* Caller must hold vmlist_lock */
263static struct vm_struct *__find_vm_area(void *addr)
264{
265	struct vm_struct *tmp;
266
267	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
268		 if (tmp->addr == addr)
269			break;
270	}
271
272	return tmp;
273}
274
275/* Caller must hold vmlist_lock */
276static struct vm_struct *__remove_vm_area(void *addr)
277{
278	struct vm_struct **p, *tmp;
279
280	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
281		 if (tmp->addr == addr)
282			 goto found;
283	}
284	return NULL;
285
286found:
287	unmap_vm_area(tmp);
288	*p = tmp->next;
289
290	/*
291	 * Remove the guard page.
292	 */
293	tmp->size -= PAGE_SIZE;
294	return tmp;
295}
296
297/**
298 *	remove_vm_area  -  find and remove a contingous kernel virtual area
299 *	@addr:		base address
300 *
301 *	Search for the kernel VM area starting at @addr, and remove it.
302 *	This function returns the found VM area, but using it is NOT safe
303 *	on SMP machines, except for its size or flags.
304 */
305struct vm_struct *remove_vm_area(void *addr)
306{
307	struct vm_struct *v;
308	write_lock(&vmlist_lock);
309	v = __remove_vm_area(addr);
310	write_unlock(&vmlist_lock);
311	return v;
312}
313
314void __vunmap(void *addr, int deallocate_pages)
315{
316	struct vm_struct *area;
317
318	if (!addr)
319		return;
320
321	if ((PAGE_SIZE-1) & (unsigned long)addr) {
322		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
323		WARN_ON(1);
324		return;
325	}
326
327	area = remove_vm_area(addr);
328	if (unlikely(!area)) {
329		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
330				addr);
331		WARN_ON(1);
332		return;
333	}
334
335	debug_check_no_locks_freed(addr, area->size);
336
337	if (deallocate_pages) {
338		int i;
339
340		for (i = 0; i < area->nr_pages; i++) {
341			BUG_ON(!area->pages[i]);
342			__free_page(area->pages[i]);
343		}
344
345		if (area->flags & VM_VPAGES)
346			vfree(area->pages);
347		else
348			kfree(area->pages);
349	}
350
351	kfree(area);
352	return;
353}
354
355/**
356 *	vfree  -  release memory allocated by vmalloc()
357 *	@addr:		memory base address
358 *
359 *	Free the virtually contiguous memory area starting at @addr, as
360 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
361 *	NULL, no operation is performed.
362 *
363 *	Must not be called in interrupt context.
364 */
365void vfree(void *addr)
366{
367	BUG_ON(in_interrupt());
368	__vunmap(addr, 1);
369}
370EXPORT_SYMBOL(vfree);
371
372/**
373 *	vunmap  -  release virtual mapping obtained by vmap()
374 *	@addr:		memory base address
375 *
376 *	Free the virtually contiguous memory area starting at @addr,
377 *	which was created from the page array passed to vmap().
378 *
379 *	Must not be called in interrupt context.
380 */
381void vunmap(void *addr)
382{
383	BUG_ON(in_interrupt());
384	__vunmap(addr, 0);
385}
386EXPORT_SYMBOL(vunmap);
387
388/**
389 *	vmap  -  map an array of pages into virtually contiguous space
390 *	@pages:		array of page pointers
391 *	@count:		number of pages to map
392 *	@flags:		vm_area->flags
393 *	@prot:		page protection for the mapping
394 *
395 *	Maps @count pages from @pages into contiguous kernel virtual
396 *	space.
397 */
398void *vmap(struct page **pages, unsigned int count,
399		unsigned long flags, pgprot_t prot)
400{
401	struct vm_struct *area;
402
403	if (count > num_physpages)
404		return NULL;
405
406	area = get_vm_area((count << PAGE_SHIFT), flags);
407	if (!area)
408		return NULL;
409	if (map_vm_area(area, prot, &pages)) {
410		vunmap(area->addr);
411		return NULL;
412	}
413
414	return area->addr;
415}
416EXPORT_SYMBOL(vmap);
417
418void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
419				pgprot_t prot, int node)
420{
421	struct page **pages;
422	unsigned int nr_pages, array_size, i;
423
424	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
425	array_size = (nr_pages * sizeof(struct page *));
426
427	area->nr_pages = nr_pages;
428	/* Please note that the recursion is strictly bounded. */
429	if (array_size > PAGE_SIZE) {
430		pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
431		area->flags |= VM_VPAGES;
432	} else {
433		pages = kmalloc_node(array_size,
434				(gfp_mask & GFP_LEVEL_MASK),
435				node);
436	}
437	area->pages = pages;
438	if (!area->pages) {
439		remove_vm_area(area->addr);
440		kfree(area);
441		return NULL;
442	}
443	memset(area->pages, 0, array_size);
444
445	for (i = 0; i < area->nr_pages; i++) {
446		if (node < 0)
447			area->pages[i] = alloc_page(gfp_mask);
448		else
449			area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
450		if (unlikely(!area->pages[i])) {
451			/* Successfully allocated i pages, free them in __vunmap() */
452			area->nr_pages = i;
453			goto fail;
454		}
455	}
456
457	if (map_vm_area(area, prot, &pages))
458		goto fail;
459	return area->addr;
460
461fail:
462	vfree(area->addr);
463	return NULL;
464}
465
466void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
467{
468	return __vmalloc_area_node(area, gfp_mask, prot, -1);
469}
470
471/**
472 *	__vmalloc_node  -  allocate virtually contiguous memory
473 *	@size:		allocation size
474 *	@gfp_mask:	flags for the page level allocator
475 *	@prot:		protection mask for the allocated pages
476 *	@node:		node to use for allocation or -1
477 *
478 *	Allocate enough pages to cover @size from the page level
479 *	allocator with @gfp_mask flags.  Map them into contiguous
480 *	kernel virtual space, using a pagetable protection of @prot.
481 */
482static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
483			    int node)
484{
485	struct vm_struct *area;
486
487	size = PAGE_ALIGN(size);
488	if (!size || (size >> PAGE_SHIFT) > num_physpages)
489		return NULL;
490
491	area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
492	if (!area)
493		return NULL;
494
495	return __vmalloc_area_node(area, gfp_mask, prot, node);
496}
497
498void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
499{
500	return __vmalloc_node(size, gfp_mask, prot, -1);
501}
502EXPORT_SYMBOL(__vmalloc);
503
504/**
505 *	vmalloc  -  allocate virtually contiguous memory
506 *	@size:		allocation size
507 *	Allocate enough pages to cover @size from the page level
508 *	allocator and map them into contiguous kernel virtual space.
509 *
510 *	For tight control over page level allocator and protection flags
511 *	use __vmalloc() instead.
512 */
513void *vmalloc(unsigned long size)
514{
515	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
516}
517EXPORT_SYMBOL(vmalloc);
518
519/**
520 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
521 * @size: allocation size
522 *
523 * The resulting memory area is zeroed so it can be mapped to userspace
524 * without leaking data.
525 */
526void *vmalloc_user(unsigned long size)
527{
528	struct vm_struct *area;
529	void *ret;
530
531	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
532	if (ret) {
533		write_lock(&vmlist_lock);
534		area = __find_vm_area(ret);
535		area->flags |= VM_USERMAP;
536		write_unlock(&vmlist_lock);
537	}
538	return ret;
539}
540EXPORT_SYMBOL(vmalloc_user);
541
542/**
543 *	vmalloc_node  -  allocate memory on a specific node
544 *	@size:		allocation size
545 *	@node:		numa node
546 *
547 *	Allocate enough pages to cover @size from the page level
548 *	allocator and map them into contiguous kernel virtual space.
549 *
550 *	For tight control over page level allocator and protection flags
551 *	use __vmalloc() instead.
552 */
553void *vmalloc_node(unsigned long size, int node)
554{
555	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
556}
557EXPORT_SYMBOL(vmalloc_node);
558
559#ifndef PAGE_KERNEL_EXEC
560# define PAGE_KERNEL_EXEC PAGE_KERNEL
561#endif
562
563/**
564 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
565 *	@size:		allocation size
566 *
567 *	Kernel-internal function to allocate enough pages to cover @size
568 *	the page level allocator and map them into contiguous and
569 *	executable kernel virtual space.
570 *
571 *	For tight control over page level allocator and protection flags
572 *	use __vmalloc() instead.
573 */
574
575void *vmalloc_exec(unsigned long size)
576{
577	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
578}
579
580#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
581#define GFP_VMALLOC32 GFP_DMA32
582#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
583#define GFP_VMALLOC32 GFP_DMA
584#else
585#define GFP_VMALLOC32 GFP_KERNEL
586#endif
587
588/**
589 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
590 *	@size:		allocation size
591 *
592 *	Allocate enough 32bit PA addressable pages to cover @size from the
593 *	page level allocator and map them into contiguous kernel virtual space.
594 */
595void *vmalloc_32(unsigned long size)
596{
597	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
598}
599EXPORT_SYMBOL(vmalloc_32);
600
601/**
602 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
603 *	@size:		allocation size
604 *
605 * The resulting memory area is 32bit addressable and zeroed so it can be
606 * mapped to userspace without leaking data.
607 */
608void *vmalloc_32_user(unsigned long size)
609{
610	struct vm_struct *area;
611	void *ret;
612
613	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
614	if (ret) {
615		write_lock(&vmlist_lock);
616		area = __find_vm_area(ret);
617		area->flags |= VM_USERMAP;
618		write_unlock(&vmlist_lock);
619	}
620	return ret;
621}
622EXPORT_SYMBOL(vmalloc_32_user);
623
624long vread(char *buf, char *addr, unsigned long count)
625{
626	struct vm_struct *tmp;
627	char *vaddr, *buf_start = buf;
628	unsigned long n;
629
630	/* Don't allow overflow */
631	if ((unsigned long) addr + count < count)
632		count = -(unsigned long) addr;
633
634	read_lock(&vmlist_lock);
635	for (tmp = vmlist; tmp; tmp = tmp->next) {
636		vaddr = (char *) tmp->addr;
637		if (addr >= vaddr + tmp->size - PAGE_SIZE)
638			continue;
639		while (addr < vaddr) {
640			if (count == 0)
641				goto finished;
642			*buf = '\0';
643			buf++;
644			addr++;
645			count--;
646		}
647		n = vaddr + tmp->size - PAGE_SIZE - addr;
648		do {
649			if (count == 0)
650				goto finished;
651			*buf = *addr;
652			buf++;
653			addr++;
654			count--;
655		} while (--n > 0);
656	}
657finished:
658	read_unlock(&vmlist_lock);
659	return buf - buf_start;
660}
661
662long vwrite(char *buf, char *addr, unsigned long count)
663{
664	struct vm_struct *tmp;
665	char *vaddr, *buf_start = buf;
666	unsigned long n;
667
668	/* Don't allow overflow */
669	if ((unsigned long) addr + count < count)
670		count = -(unsigned long) addr;
671
672	read_lock(&vmlist_lock);
673	for (tmp = vmlist; tmp; tmp = tmp->next) {
674		vaddr = (char *) tmp->addr;
675		if (addr >= vaddr + tmp->size - PAGE_SIZE)
676			continue;
677		while (addr < vaddr) {
678			if (count == 0)
679				goto finished;
680			buf++;
681			addr++;
682			count--;
683		}
684		n = vaddr + tmp->size - PAGE_SIZE - addr;
685		do {
686			if (count == 0)
687				goto finished;
688			*addr = *buf;
689			buf++;
690			addr++;
691			count--;
692		} while (--n > 0);
693	}
694finished:
695	read_unlock(&vmlist_lock);
696	return buf - buf_start;
697}
698
699/**
700 *	remap_vmalloc_range  -  map vmalloc pages to userspace
701 *	@vma:		vma to cover (map full range of vma)
702 *	@addr:		vmalloc memory
703 *	@pgoff:		number of pages into addr before first page to map
704 *	@returns:	0 for success, -Exxx on failure
705 *
706 *	This function checks that addr is a valid vmalloc'ed area, and
707 *	that it is big enough to cover the vma. Will return failure if
708 *	that criteria isn't met.
709 *
710 *	Similar to remap_pfn_range() (see mm/memory.c)
711 */
712int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
713						unsigned long pgoff)
714{
715	struct vm_struct *area;
716	unsigned long uaddr = vma->vm_start;
717	unsigned long usize = vma->vm_end - vma->vm_start;
718	int ret;
719
720	if ((PAGE_SIZE-1) & (unsigned long)addr)
721		return -EINVAL;
722
723	read_lock(&vmlist_lock);
724	area = __find_vm_area(addr);
725	if (!area)
726		goto out_einval_locked;
727
728	if (!(area->flags & VM_USERMAP))
729		goto out_einval_locked;
730
731	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
732		goto out_einval_locked;
733	read_unlock(&vmlist_lock);
734
735	addr += pgoff << PAGE_SHIFT;
736	do {
737		struct page *page = vmalloc_to_page(addr);
738		ret = vm_insert_page(vma, uaddr, page);
739		if (ret)
740			return ret;
741
742		uaddr += PAGE_SIZE;
743		addr += PAGE_SIZE;
744		usize -= PAGE_SIZE;
745	} while (usize > 0);
746
747	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
748	vma->vm_flags |= VM_RESERVED;
749
750	return ret;
751
752out_einval_locked:
753	read_unlock(&vmlist_lock);
754	return -EINVAL;
755}
756EXPORT_SYMBOL(remap_vmalloc_range);
757
758