vmalloc.c revision 80e93effce55044c5a7fa96e8b313640a80bd4e9
1/* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 */ 9 10#include <linux/mm.h> 11#include <linux/module.h> 12#include <linux/highmem.h> 13#include <linux/slab.h> 14#include <linux/spinlock.h> 15#include <linux/interrupt.h> 16 17#include <linux/vmalloc.h> 18 19#include <asm/uaccess.h> 20#include <asm/tlbflush.h> 21 22 23DEFINE_RWLOCK(vmlist_lock); 24struct vm_struct *vmlist; 25 26static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 27{ 28 pte_t *pte; 29 30 pte = pte_offset_kernel(pmd, addr); 31 do { 32 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 33 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 34 } while (pte++, addr += PAGE_SIZE, addr != end); 35} 36 37static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, 38 unsigned long end) 39{ 40 pmd_t *pmd; 41 unsigned long next; 42 43 pmd = pmd_offset(pud, addr); 44 do { 45 next = pmd_addr_end(addr, end); 46 if (pmd_none_or_clear_bad(pmd)) 47 continue; 48 vunmap_pte_range(pmd, addr, next); 49 } while (pmd++, addr = next, addr != end); 50} 51 52static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, 53 unsigned long end) 54{ 55 pud_t *pud; 56 unsigned long next; 57 58 pud = pud_offset(pgd, addr); 59 do { 60 next = pud_addr_end(addr, end); 61 if (pud_none_or_clear_bad(pud)) 62 continue; 63 vunmap_pmd_range(pud, addr, next); 64 } while (pud++, addr = next, addr != end); 65} 66 67void unmap_vm_area(struct vm_struct *area) 68{ 69 pgd_t *pgd; 70 unsigned long next; 71 unsigned long addr = (unsigned long) area->addr; 72 unsigned long end = addr + area->size; 73 74 BUG_ON(addr >= end); 75 pgd = pgd_offset_k(addr); 76 flush_cache_vunmap(addr, end); 77 do { 78 next = pgd_addr_end(addr, end); 79 if (pgd_none_or_clear_bad(pgd)) 80 continue; 81 vunmap_pud_range(pgd, addr, next); 82 } while (pgd++, addr = next, addr != end); 83 flush_tlb_kernel_range((unsigned long) area->addr, end); 84} 85 86static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 87 unsigned long end, pgprot_t prot, struct page ***pages) 88{ 89 pte_t *pte; 90 91 pte = pte_alloc_kernel(&init_mm, pmd, addr); 92 if (!pte) 93 return -ENOMEM; 94 do { 95 struct page *page = **pages; 96 WARN_ON(!pte_none(*pte)); 97 if (!page) 98 return -ENOMEM; 99 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 100 (*pages)++; 101 } while (pte++, addr += PAGE_SIZE, addr != end); 102 return 0; 103} 104 105static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, 106 unsigned long end, pgprot_t prot, struct page ***pages) 107{ 108 pmd_t *pmd; 109 unsigned long next; 110 111 pmd = pmd_alloc(&init_mm, pud, addr); 112 if (!pmd) 113 return -ENOMEM; 114 do { 115 next = pmd_addr_end(addr, end); 116 if (vmap_pte_range(pmd, addr, next, prot, pages)) 117 return -ENOMEM; 118 } while (pmd++, addr = next, addr != end); 119 return 0; 120} 121 122static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, 123 unsigned long end, pgprot_t prot, struct page ***pages) 124{ 125 pud_t *pud; 126 unsigned long next; 127 128 pud = pud_alloc(&init_mm, pgd, addr); 129 if (!pud) 130 return -ENOMEM; 131 do { 132 next = pud_addr_end(addr, end); 133 if (vmap_pmd_range(pud, addr, next, prot, pages)) 134 return -ENOMEM; 135 } while (pud++, addr = next, addr != end); 136 return 0; 137} 138 139int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 140{ 141 pgd_t *pgd; 142 unsigned long next; 143 unsigned long addr = (unsigned long) area->addr; 144 unsigned long end = addr + area->size - PAGE_SIZE; 145 int err; 146 147 BUG_ON(addr >= end); 148 pgd = pgd_offset_k(addr); 149 spin_lock(&init_mm.page_table_lock); 150 do { 151 next = pgd_addr_end(addr, end); 152 err = vmap_pud_range(pgd, addr, next, prot, pages); 153 if (err) 154 break; 155 } while (pgd++, addr = next, addr != end); 156 spin_unlock(&init_mm.page_table_lock); 157 flush_cache_vmap((unsigned long) area->addr, end); 158 return err; 159} 160 161struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 162 unsigned long start, unsigned long end) 163{ 164 struct vm_struct **p, *tmp, *area; 165 unsigned long align = 1; 166 unsigned long addr; 167 168 if (flags & VM_IOREMAP) { 169 int bit = fls(size); 170 171 if (bit > IOREMAP_MAX_ORDER) 172 bit = IOREMAP_MAX_ORDER; 173 else if (bit < PAGE_SHIFT) 174 bit = PAGE_SHIFT; 175 176 align = 1ul << bit; 177 } 178 addr = ALIGN(start, align); 179 size = PAGE_ALIGN(size); 180 181 area = kmalloc(sizeof(*area), GFP_KERNEL); 182 if (unlikely(!area)) 183 return NULL; 184 185 if (unlikely(!size)) { 186 kfree (area); 187 return NULL; 188 } 189 190 /* 191 * We always allocate a guard page. 192 */ 193 size += PAGE_SIZE; 194 195 write_lock(&vmlist_lock); 196 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { 197 if ((unsigned long)tmp->addr < addr) { 198 if((unsigned long)tmp->addr + tmp->size >= addr) 199 addr = ALIGN(tmp->size + 200 (unsigned long)tmp->addr, align); 201 continue; 202 } 203 if ((size + addr) < addr) 204 goto out; 205 if (size + addr <= (unsigned long)tmp->addr) 206 goto found; 207 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); 208 if (addr > end - size) 209 goto out; 210 } 211 212found: 213 area->next = *p; 214 *p = area; 215 216 area->flags = flags; 217 area->addr = (void *)addr; 218 area->size = size; 219 area->pages = NULL; 220 area->nr_pages = 0; 221 area->phys_addr = 0; 222 write_unlock(&vmlist_lock); 223 224 return area; 225 226out: 227 write_unlock(&vmlist_lock); 228 kfree(area); 229 if (printk_ratelimit()) 230 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); 231 return NULL; 232} 233 234/** 235 * get_vm_area - reserve a contingous kernel virtual area 236 * 237 * @size: size of the area 238 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 239 * 240 * Search an area of @size in the kernel virtual mapping area, 241 * and reserved it for out purposes. Returns the area descriptor 242 * on success or %NULL on failure. 243 */ 244struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 245{ 246 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 247} 248 249/* Caller must hold vmlist_lock */ 250struct vm_struct *__remove_vm_area(void *addr) 251{ 252 struct vm_struct **p, *tmp; 253 254 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 255 if (tmp->addr == addr) 256 goto found; 257 } 258 return NULL; 259 260found: 261 unmap_vm_area(tmp); 262 *p = tmp->next; 263 264 /* 265 * Remove the guard page. 266 */ 267 tmp->size -= PAGE_SIZE; 268 return tmp; 269} 270 271/** 272 * remove_vm_area - find and remove a contingous kernel virtual area 273 * 274 * @addr: base address 275 * 276 * Search for the kernel VM area starting at @addr, and remove it. 277 * This function returns the found VM area, but using it is NOT safe 278 * on SMP machines, except for its size or flags. 279 */ 280struct vm_struct *remove_vm_area(void *addr) 281{ 282 struct vm_struct *v; 283 write_lock(&vmlist_lock); 284 v = __remove_vm_area(addr); 285 write_unlock(&vmlist_lock); 286 return v; 287} 288 289void __vunmap(void *addr, int deallocate_pages) 290{ 291 struct vm_struct *area; 292 293 if (!addr) 294 return; 295 296 if ((PAGE_SIZE-1) & (unsigned long)addr) { 297 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 298 WARN_ON(1); 299 return; 300 } 301 302 area = remove_vm_area(addr); 303 if (unlikely(!area)) { 304 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 305 addr); 306 WARN_ON(1); 307 return; 308 } 309 310 if (deallocate_pages) { 311 int i; 312 313 for (i = 0; i < area->nr_pages; i++) { 314 if (unlikely(!area->pages[i])) 315 BUG(); 316 __free_page(area->pages[i]); 317 } 318 319 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *)) 320 vfree(area->pages); 321 else 322 kfree(area->pages); 323 } 324 325 kfree(area); 326 return; 327} 328 329/** 330 * vfree - release memory allocated by vmalloc() 331 * 332 * @addr: memory base address 333 * 334 * Free the virtually contiguous memory area starting at @addr, as 335 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 336 * NULL, no operation is performed. 337 * 338 * Must not be called in interrupt context. 339 */ 340void vfree(void *addr) 341{ 342 BUG_ON(in_interrupt()); 343 __vunmap(addr, 1); 344} 345 346EXPORT_SYMBOL(vfree); 347 348/** 349 * vunmap - release virtual mapping obtained by vmap() 350 * 351 * @addr: memory base address 352 * 353 * Free the virtually contiguous memory area starting at @addr, 354 * which was created from the page array passed to vmap(). 355 * 356 * Must not be called in interrupt context. 357 */ 358void vunmap(void *addr) 359{ 360 BUG_ON(in_interrupt()); 361 __vunmap(addr, 0); 362} 363 364EXPORT_SYMBOL(vunmap); 365 366/** 367 * vmap - map an array of pages into virtually contiguous space 368 * 369 * @pages: array of page pointers 370 * @count: number of pages to map 371 * @flags: vm_area->flags 372 * @prot: page protection for the mapping 373 * 374 * Maps @count pages from @pages into contiguous kernel virtual 375 * space. 376 */ 377void *vmap(struct page **pages, unsigned int count, 378 unsigned long flags, pgprot_t prot) 379{ 380 struct vm_struct *area; 381 382 if (count > num_physpages) 383 return NULL; 384 385 area = get_vm_area((count << PAGE_SHIFT), flags); 386 if (!area) 387 return NULL; 388 if (map_vm_area(area, prot, &pages)) { 389 vunmap(area->addr); 390 return NULL; 391 } 392 393 return area->addr; 394} 395 396EXPORT_SYMBOL(vmap); 397 398void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) 399{ 400 struct page **pages; 401 unsigned int nr_pages, array_size, i; 402 403 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 404 array_size = (nr_pages * sizeof(struct page *)); 405 406 area->nr_pages = nr_pages; 407 /* Please note that the recursion is strictly bounded. */ 408 if (array_size > PAGE_SIZE) 409 pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL); 410 else 411 pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM)); 412 area->pages = pages; 413 if (!area->pages) { 414 remove_vm_area(area->addr); 415 kfree(area); 416 return NULL; 417 } 418 memset(area->pages, 0, array_size); 419 420 for (i = 0; i < area->nr_pages; i++) { 421 area->pages[i] = alloc_page(gfp_mask); 422 if (unlikely(!area->pages[i])) { 423 /* Successfully allocated i pages, free them in __vunmap() */ 424 area->nr_pages = i; 425 goto fail; 426 } 427 } 428 429 if (map_vm_area(area, prot, &pages)) 430 goto fail; 431 return area->addr; 432 433fail: 434 vfree(area->addr); 435 return NULL; 436} 437 438/** 439 * __vmalloc - allocate virtually contiguous memory 440 * 441 * @size: allocation size 442 * @gfp_mask: flags for the page level allocator 443 * @prot: protection mask for the allocated pages 444 * 445 * Allocate enough pages to cover @size from the page level 446 * allocator with @gfp_mask flags. Map them into contiguous 447 * kernel virtual space, using a pagetable protection of @prot. 448 */ 449void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) 450{ 451 struct vm_struct *area; 452 453 size = PAGE_ALIGN(size); 454 if (!size || (size >> PAGE_SHIFT) > num_physpages) 455 return NULL; 456 457 area = get_vm_area(size, VM_ALLOC); 458 if (!area) 459 return NULL; 460 461 return __vmalloc_area(area, gfp_mask, prot); 462} 463 464EXPORT_SYMBOL(__vmalloc); 465 466/** 467 * vmalloc - allocate virtually contiguous memory 468 * 469 * @size: allocation size 470 * 471 * Allocate enough pages to cover @size from the page level 472 * allocator and map them into contiguous kernel virtual space. 473 * 474 * For tight cotrol over page level allocator and protection flags 475 * use __vmalloc() instead. 476 */ 477void *vmalloc(unsigned long size) 478{ 479 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 480} 481 482EXPORT_SYMBOL(vmalloc); 483 484#ifndef PAGE_KERNEL_EXEC 485# define PAGE_KERNEL_EXEC PAGE_KERNEL 486#endif 487 488/** 489 * vmalloc_exec - allocate virtually contiguous, executable memory 490 * 491 * @size: allocation size 492 * 493 * Kernel-internal function to allocate enough pages to cover @size 494 * the page level allocator and map them into contiguous and 495 * executable kernel virtual space. 496 * 497 * For tight cotrol over page level allocator and protection flags 498 * use __vmalloc() instead. 499 */ 500 501void *vmalloc_exec(unsigned long size) 502{ 503 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 504} 505 506/** 507 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 508 * 509 * @size: allocation size 510 * 511 * Allocate enough 32bit PA addressable pages to cover @size from the 512 * page level allocator and map them into contiguous kernel virtual space. 513 */ 514void *vmalloc_32(unsigned long size) 515{ 516 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 517} 518 519EXPORT_SYMBOL(vmalloc_32); 520 521long vread(char *buf, char *addr, unsigned long count) 522{ 523 struct vm_struct *tmp; 524 char *vaddr, *buf_start = buf; 525 unsigned long n; 526 527 /* Don't allow overflow */ 528 if ((unsigned long) addr + count < count) 529 count = -(unsigned long) addr; 530 531 read_lock(&vmlist_lock); 532 for (tmp = vmlist; tmp; tmp = tmp->next) { 533 vaddr = (char *) tmp->addr; 534 if (addr >= vaddr + tmp->size - PAGE_SIZE) 535 continue; 536 while (addr < vaddr) { 537 if (count == 0) 538 goto finished; 539 *buf = '\0'; 540 buf++; 541 addr++; 542 count--; 543 } 544 n = vaddr + tmp->size - PAGE_SIZE - addr; 545 do { 546 if (count == 0) 547 goto finished; 548 *buf = *addr; 549 buf++; 550 addr++; 551 count--; 552 } while (--n > 0); 553 } 554finished: 555 read_unlock(&vmlist_lock); 556 return buf - buf_start; 557} 558 559long vwrite(char *buf, char *addr, unsigned long count) 560{ 561 struct vm_struct *tmp; 562 char *vaddr, *buf_start = buf; 563 unsigned long n; 564 565 /* Don't allow overflow */ 566 if ((unsigned long) addr + count < count) 567 count = -(unsigned long) addr; 568 569 read_lock(&vmlist_lock); 570 for (tmp = vmlist; tmp; tmp = tmp->next) { 571 vaddr = (char *) tmp->addr; 572 if (addr >= vaddr + tmp->size - PAGE_SIZE) 573 continue; 574 while (addr < vaddr) { 575 if (count == 0) 576 goto finished; 577 buf++; 578 addr++; 579 count--; 580 } 581 n = vaddr + tmp->size - PAGE_SIZE - addr; 582 do { 583 if (count == 0) 584 goto finished; 585 *addr = *buf; 586 buf++; 587 addr++; 588 count--; 589 } while (--n > 0); 590 } 591finished: 592 read_unlock(&vmlist_lock); 593 return buf - buf_start; 594} 595