vmalloc.c revision d44e0780bcc47c9b8851099c0dfc1dda3c9db5a9
1/*
2 *  linux/mm/vmalloc.c
3 *
4 *  Copyright (C) 1993  Linus Torvalds
5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 *  Numa awareness, Christoph Lameter, SGI, June 2005
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17
18#include <linux/vmalloc.h>
19
20#include <asm/uaccess.h>
21#include <asm/tlbflush.h>
22
23
24DEFINE_RWLOCK(vmlist_lock);
25struct vm_struct *vmlist;
26
27static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28{
29	pte_t *pte;
30
31	pte = pte_offset_kernel(pmd, addr);
32	do {
33		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
34		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
35	} while (pte++, addr += PAGE_SIZE, addr != end);
36}
37
38static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
39						unsigned long end)
40{
41	pmd_t *pmd;
42	unsigned long next;
43
44	pmd = pmd_offset(pud, addr);
45	do {
46		next = pmd_addr_end(addr, end);
47		if (pmd_none_or_clear_bad(pmd))
48			continue;
49		vunmap_pte_range(pmd, addr, next);
50	} while (pmd++, addr = next, addr != end);
51}
52
53static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
54						unsigned long end)
55{
56	pud_t *pud;
57	unsigned long next;
58
59	pud = pud_offset(pgd, addr);
60	do {
61		next = pud_addr_end(addr, end);
62		if (pud_none_or_clear_bad(pud))
63			continue;
64		vunmap_pmd_range(pud, addr, next);
65	} while (pud++, addr = next, addr != end);
66}
67
68void unmap_vm_area(struct vm_struct *area)
69{
70	pgd_t *pgd;
71	unsigned long next;
72	unsigned long addr = (unsigned long) area->addr;
73	unsigned long end = addr + area->size;
74
75	BUG_ON(addr >= end);
76	pgd = pgd_offset_k(addr);
77	flush_cache_vunmap(addr, end);
78	do {
79		next = pgd_addr_end(addr, end);
80		if (pgd_none_or_clear_bad(pgd))
81			continue;
82		vunmap_pud_range(pgd, addr, next);
83	} while (pgd++, addr = next, addr != end);
84	flush_tlb_kernel_range((unsigned long) area->addr, end);
85}
86
87static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
88			unsigned long end, pgprot_t prot, struct page ***pages)
89{
90	pte_t *pte;
91
92	pte = pte_alloc_kernel(pmd, addr);
93	if (!pte)
94		return -ENOMEM;
95	do {
96		struct page *page = **pages;
97		WARN_ON(!pte_none(*pte));
98		if (!page)
99			return -ENOMEM;
100		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
101		(*pages)++;
102	} while (pte++, addr += PAGE_SIZE, addr != end);
103	return 0;
104}
105
106static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
107			unsigned long end, pgprot_t prot, struct page ***pages)
108{
109	pmd_t *pmd;
110	unsigned long next;
111
112	pmd = pmd_alloc(&init_mm, pud, addr);
113	if (!pmd)
114		return -ENOMEM;
115	do {
116		next = pmd_addr_end(addr, end);
117		if (vmap_pte_range(pmd, addr, next, prot, pages))
118			return -ENOMEM;
119	} while (pmd++, addr = next, addr != end);
120	return 0;
121}
122
123static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
124			unsigned long end, pgprot_t prot, struct page ***pages)
125{
126	pud_t *pud;
127	unsigned long next;
128
129	pud = pud_alloc(&init_mm, pgd, addr);
130	if (!pud)
131		return -ENOMEM;
132	do {
133		next = pud_addr_end(addr, end);
134		if (vmap_pmd_range(pud, addr, next, prot, pages))
135			return -ENOMEM;
136	} while (pud++, addr = next, addr != end);
137	return 0;
138}
139
140int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
141{
142	pgd_t *pgd;
143	unsigned long next;
144	unsigned long addr = (unsigned long) area->addr;
145	unsigned long end = addr + area->size - PAGE_SIZE;
146	int err;
147
148	BUG_ON(addr >= end);
149	pgd = pgd_offset_k(addr);
150	do {
151		next = pgd_addr_end(addr, end);
152		err = vmap_pud_range(pgd, addr, next, prot, pages);
153		if (err)
154			break;
155	} while (pgd++, addr = next, addr != end);
156	flush_cache_vmap((unsigned long) area->addr, end);
157	return err;
158}
159
160struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
161				unsigned long start, unsigned long end, int node)
162{
163	struct vm_struct **p, *tmp, *area;
164	unsigned long align = 1;
165	unsigned long addr;
166
167	if (flags & VM_IOREMAP) {
168		int bit = fls(size);
169
170		if (bit > IOREMAP_MAX_ORDER)
171			bit = IOREMAP_MAX_ORDER;
172		else if (bit < PAGE_SHIFT)
173			bit = PAGE_SHIFT;
174
175		align = 1ul << bit;
176	}
177	addr = ALIGN(start, align);
178	size = PAGE_ALIGN(size);
179
180	area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
181	if (unlikely(!area))
182		return NULL;
183
184	if (unlikely(!size)) {
185		kfree (area);
186		return NULL;
187	}
188
189	/*
190	 * We always allocate a guard page.
191	 */
192	size += PAGE_SIZE;
193
194	write_lock(&vmlist_lock);
195	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
196		if ((unsigned long)tmp->addr < addr) {
197			if((unsigned long)tmp->addr + tmp->size >= addr)
198				addr = ALIGN(tmp->size +
199					     (unsigned long)tmp->addr, align);
200			continue;
201		}
202		if ((size + addr) < addr)
203			goto out;
204		if (size + addr <= (unsigned long)tmp->addr)
205			goto found;
206		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
207		if (addr > end - size)
208			goto out;
209	}
210
211found:
212	area->next = *p;
213	*p = area;
214
215	area->flags = flags;
216	area->addr = (void *)addr;
217	area->size = size;
218	area->pages = NULL;
219	area->nr_pages = 0;
220	area->phys_addr = 0;
221	write_unlock(&vmlist_lock);
222
223	return area;
224
225out:
226	write_unlock(&vmlist_lock);
227	kfree(area);
228	if (printk_ratelimit())
229		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
230	return NULL;
231}
232
233struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
234				unsigned long start, unsigned long end)
235{
236	return __get_vm_area_node(size, flags, start, end, -1);
237}
238
239/**
240 *	get_vm_area  -  reserve a contingous kernel virtual area
241 *
242 *	@size:		size of the area
243 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
244 *
245 *	Search an area of @size in the kernel virtual mapping area,
246 *	and reserved it for out purposes.  Returns the area descriptor
247 *	on success or %NULL on failure.
248 */
249struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
250{
251	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
252}
253
254struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
255{
256	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
257}
258
259/* Caller must hold vmlist_lock */
260struct vm_struct *__remove_vm_area(void *addr)
261{
262	struct vm_struct **p, *tmp;
263
264	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
265		 if (tmp->addr == addr)
266			 goto found;
267	}
268	return NULL;
269
270found:
271	unmap_vm_area(tmp);
272	*p = tmp->next;
273
274	/*
275	 * Remove the guard page.
276	 */
277	tmp->size -= PAGE_SIZE;
278	return tmp;
279}
280
281/**
282 *	remove_vm_area  -  find and remove a contingous kernel virtual area
283 *
284 *	@addr:		base address
285 *
286 *	Search for the kernel VM area starting at @addr, and remove it.
287 *	This function returns the found VM area, but using it is NOT safe
288 *	on SMP machines, except for its size or flags.
289 */
290struct vm_struct *remove_vm_area(void *addr)
291{
292	struct vm_struct *v;
293	write_lock(&vmlist_lock);
294	v = __remove_vm_area(addr);
295	write_unlock(&vmlist_lock);
296	return v;
297}
298
299void __vunmap(void *addr, int deallocate_pages)
300{
301	struct vm_struct *area;
302
303	if (!addr)
304		return;
305
306	if ((PAGE_SIZE-1) & (unsigned long)addr) {
307		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
308		WARN_ON(1);
309		return;
310	}
311
312	area = remove_vm_area(addr);
313	if (unlikely(!area)) {
314		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
315				addr);
316		WARN_ON(1);
317		return;
318	}
319
320	if (deallocate_pages) {
321		int i;
322
323		for (i = 0; i < area->nr_pages; i++) {
324			if (unlikely(!area->pages[i]))
325				BUG();
326			__free_page(area->pages[i]);
327		}
328
329		if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
330			vfree(area->pages);
331		else
332			kfree(area->pages);
333	}
334
335	kfree(area);
336	return;
337}
338
339/**
340 *	vfree  -  release memory allocated by vmalloc()
341 *
342 *	@addr:		memory base address
343 *
344 *	Free the virtually contiguous memory area starting at @addr, as
345 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
346 *	NULL, no operation is performed.
347 *
348 *	Must not be called in interrupt context.
349 */
350void vfree(void *addr)
351{
352	BUG_ON(in_interrupt());
353	__vunmap(addr, 1);
354}
355EXPORT_SYMBOL(vfree);
356
357/**
358 *	vunmap  -  release virtual mapping obtained by vmap()
359 *
360 *	@addr:		memory base address
361 *
362 *	Free the virtually contiguous memory area starting at @addr,
363 *	which was created from the page array passed to vmap().
364 *
365 *	Must not be called in interrupt context.
366 */
367void vunmap(void *addr)
368{
369	BUG_ON(in_interrupt());
370	__vunmap(addr, 0);
371}
372EXPORT_SYMBOL(vunmap);
373
374/**
375 *	vmap  -  map an array of pages into virtually contiguous space
376 *
377 *	@pages:		array of page pointers
378 *	@count:		number of pages to map
379 *	@flags:		vm_area->flags
380 *	@prot:		page protection for the mapping
381 *
382 *	Maps @count pages from @pages into contiguous kernel virtual
383 *	space.
384 */
385void *vmap(struct page **pages, unsigned int count,
386		unsigned long flags, pgprot_t prot)
387{
388	struct vm_struct *area;
389
390	if (count > num_physpages)
391		return NULL;
392
393	area = get_vm_area((count << PAGE_SHIFT), flags);
394	if (!area)
395		return NULL;
396	if (map_vm_area(area, prot, &pages)) {
397		vunmap(area->addr);
398		return NULL;
399	}
400
401	return area->addr;
402}
403EXPORT_SYMBOL(vmap);
404
405void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
406				pgprot_t prot, int node)
407{
408	struct page **pages;
409	unsigned int nr_pages, array_size, i;
410
411	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
412	array_size = (nr_pages * sizeof(struct page *));
413
414	area->nr_pages = nr_pages;
415	/* Please note that the recursion is strictly bounded. */
416	if (array_size > PAGE_SIZE)
417		pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
418	else
419		pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
420	area->pages = pages;
421	if (!area->pages) {
422		remove_vm_area(area->addr);
423		kfree(area);
424		return NULL;
425	}
426	memset(area->pages, 0, array_size);
427
428	for (i = 0; i < area->nr_pages; i++) {
429		if (node < 0)
430			area->pages[i] = alloc_page(gfp_mask);
431		else
432			area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
433		if (unlikely(!area->pages[i])) {
434			/* Successfully allocated i pages, free them in __vunmap() */
435			area->nr_pages = i;
436			goto fail;
437		}
438	}
439
440	if (map_vm_area(area, prot, &pages))
441		goto fail;
442	return area->addr;
443
444fail:
445	vfree(area->addr);
446	return NULL;
447}
448
449void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
450{
451	return __vmalloc_area_node(area, gfp_mask, prot, -1);
452}
453
454/**
455 *	__vmalloc_node  -  allocate virtually contiguous memory
456 *
457 *	@size:		allocation size
458 *	@gfp_mask:	flags for the page level allocator
459 *	@prot:		protection mask for the allocated pages
460 *	@node:		node to use for allocation or -1
461 *
462 *	Allocate enough pages to cover @size from the page level
463 *	allocator with @gfp_mask flags.  Map them into contiguous
464 *	kernel virtual space, using a pagetable protection of @prot.
465 */
466void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
467			int node)
468{
469	struct vm_struct *area;
470
471	size = PAGE_ALIGN(size);
472	if (!size || (size >> PAGE_SHIFT) > num_physpages)
473		return NULL;
474
475	area = get_vm_area_node(size, VM_ALLOC, node);
476	if (!area)
477		return NULL;
478
479	return __vmalloc_area_node(area, gfp_mask, prot, node);
480}
481EXPORT_SYMBOL(__vmalloc_node);
482
483void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
484{
485	return __vmalloc_node(size, gfp_mask, prot, -1);
486}
487EXPORT_SYMBOL(__vmalloc);
488
489/**
490 *	vmalloc  -  allocate virtually contiguous memory
491 *
492 *	@size:		allocation size
493 *
494 *	Allocate enough pages to cover @size from the page level
495 *	allocator and map them into contiguous kernel virtual space.
496 *
497 *	For tight cotrol over page level allocator and protection flags
498 *	use __vmalloc() instead.
499 */
500void *vmalloc(unsigned long size)
501{
502       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
503}
504EXPORT_SYMBOL(vmalloc);
505
506/**
507 *	vmalloc_node  -  allocate memory on a specific node
508 *
509 *	@size:		allocation size
510 *	@node:		numa node
511 *
512 *	Allocate enough pages to cover @size from the page level
513 *	allocator and map them into contiguous kernel virtual space.
514 *
515 *	For tight cotrol over page level allocator and protection flags
516 *	use __vmalloc() instead.
517 */
518void *vmalloc_node(unsigned long size, int node)
519{
520       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
521}
522EXPORT_SYMBOL(vmalloc_node);
523
524#ifndef PAGE_KERNEL_EXEC
525# define PAGE_KERNEL_EXEC PAGE_KERNEL
526#endif
527
528/**
529 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
530 *
531 *	@size:		allocation size
532 *
533 *	Kernel-internal function to allocate enough pages to cover @size
534 *	the page level allocator and map them into contiguous and
535 *	executable kernel virtual space.
536 *
537 *	For tight cotrol over page level allocator and protection flags
538 *	use __vmalloc() instead.
539 */
540
541void *vmalloc_exec(unsigned long size)
542{
543	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
544}
545
546/**
547 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
548 *
549 *	@size:		allocation size
550 *
551 *	Allocate enough 32bit PA addressable pages to cover @size from the
552 *	page level allocator and map them into contiguous kernel virtual space.
553 */
554void *vmalloc_32(unsigned long size)
555{
556	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
557}
558EXPORT_SYMBOL(vmalloc_32);
559
560long vread(char *buf, char *addr, unsigned long count)
561{
562	struct vm_struct *tmp;
563	char *vaddr, *buf_start = buf;
564	unsigned long n;
565
566	/* Don't allow overflow */
567	if ((unsigned long) addr + count < count)
568		count = -(unsigned long) addr;
569
570	read_lock(&vmlist_lock);
571	for (tmp = vmlist; tmp; tmp = tmp->next) {
572		vaddr = (char *) tmp->addr;
573		if (addr >= vaddr + tmp->size - PAGE_SIZE)
574			continue;
575		while (addr < vaddr) {
576			if (count == 0)
577				goto finished;
578			*buf = '\0';
579			buf++;
580			addr++;
581			count--;
582		}
583		n = vaddr + tmp->size - PAGE_SIZE - addr;
584		do {
585			if (count == 0)
586				goto finished;
587			*buf = *addr;
588			buf++;
589			addr++;
590			count--;
591		} while (--n > 0);
592	}
593finished:
594	read_unlock(&vmlist_lock);
595	return buf - buf_start;
596}
597
598long vwrite(char *buf, char *addr, unsigned long count)
599{
600	struct vm_struct *tmp;
601	char *vaddr, *buf_start = buf;
602	unsigned long n;
603
604	/* Don't allow overflow */
605	if ((unsigned long) addr + count < count)
606		count = -(unsigned long) addr;
607
608	read_lock(&vmlist_lock);
609	for (tmp = vmlist; tmp; tmp = tmp->next) {
610		vaddr = (char *) tmp->addr;
611		if (addr >= vaddr + tmp->size - PAGE_SIZE)
612			continue;
613		while (addr < vaddr) {
614			if (count == 0)
615				goto finished;
616			buf++;
617			addr++;
618			count--;
619		}
620		n = vaddr + tmp->size - PAGE_SIZE - addr;
621		do {
622			if (count == 0)
623				goto finished;
624			*addr = *buf;
625			buf++;
626			addr++;
627			count--;
628		} while (--n > 0);
629	}
630finished:
631	read_unlock(&vmlist_lock);
632	return buf - buf_start;
633}
634