vmalloc.c revision 822c18f2e38cbc775792ab65ace4f9198678dec9
1/* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 9 */ 10 11#include <linux/vmalloc.h> 12#include <linux/mm.h> 13#include <linux/module.h> 14#include <linux/highmem.h> 15#include <linux/slab.h> 16#include <linux/spinlock.h> 17#include <linux/mutex.h> 18#include <linux/interrupt.h> 19#include <linux/proc_fs.h> 20#include <linux/seq_file.h> 21#include <linux/debugobjects.h> 22#include <linux/kallsyms.h> 23#include <linux/list.h> 24#include <linux/rbtree.h> 25#include <linux/radix-tree.h> 26#include <linux/rcupdate.h> 27#include <linux/bootmem.h> 28 29#include <asm/atomic.h> 30#include <asm/uaccess.h> 31#include <asm/tlbflush.h> 32 33 34/*** Page table manipulation functions ***/ 35 36static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 37{ 38 pte_t *pte; 39 40 pte = pte_offset_kernel(pmd, addr); 41 do { 42 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 43 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 44 } while (pte++, addr += PAGE_SIZE, addr != end); 45} 46 47static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 48{ 49 pmd_t *pmd; 50 unsigned long next; 51 52 pmd = pmd_offset(pud, addr); 53 do { 54 next = pmd_addr_end(addr, end); 55 if (pmd_none_or_clear_bad(pmd)) 56 continue; 57 vunmap_pte_range(pmd, addr, next); 58 } while (pmd++, addr = next, addr != end); 59} 60 61static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 62{ 63 pud_t *pud; 64 unsigned long next; 65 66 pud = pud_offset(pgd, addr); 67 do { 68 next = pud_addr_end(addr, end); 69 if (pud_none_or_clear_bad(pud)) 70 continue; 71 vunmap_pmd_range(pud, addr, next); 72 } while (pud++, addr = next, addr != end); 73} 74 75static void vunmap_page_range(unsigned long addr, unsigned long end) 76{ 77 pgd_t *pgd; 78 unsigned long next; 79 80 BUG_ON(addr >= end); 81 pgd = pgd_offset_k(addr); 82 do { 83 next = pgd_addr_end(addr, end); 84 if (pgd_none_or_clear_bad(pgd)) 85 continue; 86 vunmap_pud_range(pgd, addr, next); 87 } while (pgd++, addr = next, addr != end); 88} 89 90static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 91 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 92{ 93 pte_t *pte; 94 95 /* 96 * nr is a running index into the array which helps higher level 97 * callers keep track of where we're up to. 98 */ 99 100 pte = pte_alloc_kernel(pmd, addr); 101 if (!pte) 102 return -ENOMEM; 103 do { 104 struct page *page = pages[*nr]; 105 106 if (WARN_ON(!pte_none(*pte))) 107 return -EBUSY; 108 if (WARN_ON(!page)) 109 return -ENOMEM; 110 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 111 (*nr)++; 112 } while (pte++, addr += PAGE_SIZE, addr != end); 113 return 0; 114} 115 116static int vmap_pmd_range(pud_t *pud, unsigned long addr, 117 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 118{ 119 pmd_t *pmd; 120 unsigned long next; 121 122 pmd = pmd_alloc(&init_mm, pud, addr); 123 if (!pmd) 124 return -ENOMEM; 125 do { 126 next = pmd_addr_end(addr, end); 127 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 128 return -ENOMEM; 129 } while (pmd++, addr = next, addr != end); 130 return 0; 131} 132 133static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 134 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 135{ 136 pud_t *pud; 137 unsigned long next; 138 139 pud = pud_alloc(&init_mm, pgd, addr); 140 if (!pud) 141 return -ENOMEM; 142 do { 143 next = pud_addr_end(addr, end); 144 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 145 return -ENOMEM; 146 } while (pud++, addr = next, addr != end); 147 return 0; 148} 149 150/* 151 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 152 * will have pfns corresponding to the "pages" array. 153 * 154 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 155 */ 156static int vmap_page_range(unsigned long start, unsigned long end, 157 pgprot_t prot, struct page **pages) 158{ 159 pgd_t *pgd; 160 unsigned long next; 161 unsigned long addr = start; 162 int err = 0; 163 int nr = 0; 164 165 BUG_ON(addr >= end); 166 pgd = pgd_offset_k(addr); 167 do { 168 next = pgd_addr_end(addr, end); 169 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 170 if (err) 171 break; 172 } while (pgd++, addr = next, addr != end); 173 flush_cache_vmap(start, end); 174 175 if (unlikely(err)) 176 return err; 177 return nr; 178} 179 180static inline int is_vmalloc_or_module_addr(const void *x) 181{ 182 /* 183 * ARM, x86-64 and sparc64 put modules in a special place, 184 * and fall back on vmalloc() if that fails. Others 185 * just put it in the vmalloc space. 186 */ 187#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 188 unsigned long addr = (unsigned long)x; 189 if (addr >= MODULES_VADDR && addr < MODULES_END) 190 return 1; 191#endif 192 return is_vmalloc_addr(x); 193} 194 195/* 196 * Walk a vmap address to the struct page it maps. 197 */ 198struct page *vmalloc_to_page(const void *vmalloc_addr) 199{ 200 unsigned long addr = (unsigned long) vmalloc_addr; 201 struct page *page = NULL; 202 pgd_t *pgd = pgd_offset_k(addr); 203 204 /* 205 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 206 * architectures that do not vmalloc module space 207 */ 208 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 209 210 if (!pgd_none(*pgd)) { 211 pud_t *pud = pud_offset(pgd, addr); 212 if (!pud_none(*pud)) { 213 pmd_t *pmd = pmd_offset(pud, addr); 214 if (!pmd_none(*pmd)) { 215 pte_t *ptep, pte; 216 217 ptep = pte_offset_map(pmd, addr); 218 pte = *ptep; 219 if (pte_present(pte)) 220 page = pte_page(pte); 221 pte_unmap(ptep); 222 } 223 } 224 } 225 return page; 226} 227EXPORT_SYMBOL(vmalloc_to_page); 228 229/* 230 * Map a vmalloc()-space virtual address to the physical page frame number. 231 */ 232unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 233{ 234 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 235} 236EXPORT_SYMBOL(vmalloc_to_pfn); 237 238 239/*** Global kva allocator ***/ 240 241#define VM_LAZY_FREE 0x01 242#define VM_LAZY_FREEING 0x02 243#define VM_VM_AREA 0x04 244 245struct vmap_area { 246 unsigned long va_start; 247 unsigned long va_end; 248 unsigned long flags; 249 struct rb_node rb_node; /* address sorted rbtree */ 250 struct list_head list; /* address sorted list */ 251 struct list_head purge_list; /* "lazy purge" list */ 252 void *private; 253 struct rcu_head rcu_head; 254}; 255 256static DEFINE_SPINLOCK(vmap_area_lock); 257static struct rb_root vmap_area_root = RB_ROOT; 258static LIST_HEAD(vmap_area_list); 259 260static struct vmap_area *__find_vmap_area(unsigned long addr) 261{ 262 struct rb_node *n = vmap_area_root.rb_node; 263 264 while (n) { 265 struct vmap_area *va; 266 267 va = rb_entry(n, struct vmap_area, rb_node); 268 if (addr < va->va_start) 269 n = n->rb_left; 270 else if (addr > va->va_start) 271 n = n->rb_right; 272 else 273 return va; 274 } 275 276 return NULL; 277} 278 279static void __insert_vmap_area(struct vmap_area *va) 280{ 281 struct rb_node **p = &vmap_area_root.rb_node; 282 struct rb_node *parent = NULL; 283 struct rb_node *tmp; 284 285 while (*p) { 286 struct vmap_area *tmp; 287 288 parent = *p; 289 tmp = rb_entry(parent, struct vmap_area, rb_node); 290 if (va->va_start < tmp->va_end) 291 p = &(*p)->rb_left; 292 else if (va->va_end > tmp->va_start) 293 p = &(*p)->rb_right; 294 else 295 BUG(); 296 } 297 298 rb_link_node(&va->rb_node, parent, p); 299 rb_insert_color(&va->rb_node, &vmap_area_root); 300 301 /* address-sort this list so it is usable like the vmlist */ 302 tmp = rb_prev(&va->rb_node); 303 if (tmp) { 304 struct vmap_area *prev; 305 prev = rb_entry(tmp, struct vmap_area, rb_node); 306 list_add_rcu(&va->list, &prev->list); 307 } else 308 list_add_rcu(&va->list, &vmap_area_list); 309} 310 311static void purge_vmap_area_lazy(void); 312 313/* 314 * Allocate a region of KVA of the specified size and alignment, within the 315 * vstart and vend. 316 */ 317static struct vmap_area *alloc_vmap_area(unsigned long size, 318 unsigned long align, 319 unsigned long vstart, unsigned long vend, 320 int node, gfp_t gfp_mask) 321{ 322 struct vmap_area *va; 323 struct rb_node *n; 324 unsigned long addr; 325 int purged = 0; 326 327 BUG_ON(size & ~PAGE_MASK); 328 329 va = kmalloc_node(sizeof(struct vmap_area), 330 gfp_mask & GFP_RECLAIM_MASK, node); 331 if (unlikely(!va)) 332 return ERR_PTR(-ENOMEM); 333 334retry: 335 addr = ALIGN(vstart, align); 336 337 spin_lock(&vmap_area_lock); 338 /* XXX: could have a last_hole cache */ 339 n = vmap_area_root.rb_node; 340 if (n) { 341 struct vmap_area *first = NULL; 342 343 do { 344 struct vmap_area *tmp; 345 tmp = rb_entry(n, struct vmap_area, rb_node); 346 if (tmp->va_end >= addr) { 347 if (!first && tmp->va_start < addr + size) 348 first = tmp; 349 n = n->rb_left; 350 } else { 351 first = tmp; 352 n = n->rb_right; 353 } 354 } while (n); 355 356 if (!first) 357 goto found; 358 359 if (first->va_end < addr) { 360 n = rb_next(&first->rb_node); 361 if (n) 362 first = rb_entry(n, struct vmap_area, rb_node); 363 else 364 goto found; 365 } 366 367 while (addr + size > first->va_start && addr + size <= vend) { 368 addr = ALIGN(first->va_end + PAGE_SIZE, align); 369 370 n = rb_next(&first->rb_node); 371 if (n) 372 first = rb_entry(n, struct vmap_area, rb_node); 373 else 374 goto found; 375 } 376 } 377found: 378 if (addr + size > vend) { 379 spin_unlock(&vmap_area_lock); 380 if (!purged) { 381 purge_vmap_area_lazy(); 382 purged = 1; 383 goto retry; 384 } 385 if (printk_ratelimit()) 386 printk(KERN_WARNING 387 "vmap allocation for size %lu failed: " 388 "use vmalloc=<size> to increase size.\n", size); 389 return ERR_PTR(-EBUSY); 390 } 391 392 BUG_ON(addr & (align-1)); 393 394 va->va_start = addr; 395 va->va_end = addr + size; 396 va->flags = 0; 397 __insert_vmap_area(va); 398 spin_unlock(&vmap_area_lock); 399 400 return va; 401} 402 403static void rcu_free_va(struct rcu_head *head) 404{ 405 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 406 407 kfree(va); 408} 409 410static void __free_vmap_area(struct vmap_area *va) 411{ 412 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 413 rb_erase(&va->rb_node, &vmap_area_root); 414 RB_CLEAR_NODE(&va->rb_node); 415 list_del_rcu(&va->list); 416 417 call_rcu(&va->rcu_head, rcu_free_va); 418} 419 420/* 421 * Free a region of KVA allocated by alloc_vmap_area 422 */ 423static void free_vmap_area(struct vmap_area *va) 424{ 425 spin_lock(&vmap_area_lock); 426 __free_vmap_area(va); 427 spin_unlock(&vmap_area_lock); 428} 429 430/* 431 * Clear the pagetable entries of a given vmap_area 432 */ 433static void unmap_vmap_area(struct vmap_area *va) 434{ 435 vunmap_page_range(va->va_start, va->va_end); 436} 437 438static void vmap_debug_free_range(unsigned long start, unsigned long end) 439{ 440 /* 441 * Unmap page tables and force a TLB flush immediately if 442 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 443 * bugs similarly to those in linear kernel virtual address 444 * space after a page has been freed. 445 * 446 * All the lazy freeing logic is still retained, in order to 447 * minimise intrusiveness of this debugging feature. 448 * 449 * This is going to be *slow* (linear kernel virtual address 450 * debugging doesn't do a broadcast TLB flush so it is a lot 451 * faster). 452 */ 453#ifdef CONFIG_DEBUG_PAGEALLOC 454 vunmap_page_range(start, end); 455 flush_tlb_kernel_range(start, end); 456#endif 457} 458 459/* 460 * lazy_max_pages is the maximum amount of virtual address space we gather up 461 * before attempting to purge with a TLB flush. 462 * 463 * There is a tradeoff here: a larger number will cover more kernel page tables 464 * and take slightly longer to purge, but it will linearly reduce the number of 465 * global TLB flushes that must be performed. It would seem natural to scale 466 * this number up linearly with the number of CPUs (because vmapping activity 467 * could also scale linearly with the number of CPUs), however it is likely 468 * that in practice, workloads might be constrained in other ways that mean 469 * vmap activity will not scale linearly with CPUs. Also, I want to be 470 * conservative and not introduce a big latency on huge systems, so go with 471 * a less aggressive log scale. It will still be an improvement over the old 472 * code, and it will be simple to change the scale factor if we find that it 473 * becomes a problem on bigger systems. 474 */ 475static unsigned long lazy_max_pages(void) 476{ 477 unsigned int log; 478 479 log = fls(num_online_cpus()); 480 481 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 482} 483 484static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 485 486/* 487 * Purges all lazily-freed vmap areas. 488 * 489 * If sync is 0 then don't purge if there is already a purge in progress. 490 * If force_flush is 1, then flush kernel TLBs between *start and *end even 491 * if we found no lazy vmap areas to unmap (callers can use this to optimise 492 * their own TLB flushing). 493 * Returns with *start = min(*start, lowest purged address) 494 * *end = max(*end, highest purged address) 495 */ 496static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 497 int sync, int force_flush) 498{ 499 static DEFINE_MUTEX(purge_lock); 500 LIST_HEAD(valist); 501 struct vmap_area *va; 502 int nr = 0; 503 504 /* 505 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 506 * should not expect such behaviour. This just simplifies locking for 507 * the case that isn't actually used at the moment anyway. 508 */ 509 if (!sync && !force_flush) { 510 if (!mutex_trylock(&purge_lock)) 511 return; 512 } else 513 mutex_lock(&purge_lock); 514 515 rcu_read_lock(); 516 list_for_each_entry_rcu(va, &vmap_area_list, list) { 517 if (va->flags & VM_LAZY_FREE) { 518 if (va->va_start < *start) 519 *start = va->va_start; 520 if (va->va_end > *end) 521 *end = va->va_end; 522 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 523 unmap_vmap_area(va); 524 list_add_tail(&va->purge_list, &valist); 525 va->flags |= VM_LAZY_FREEING; 526 va->flags &= ~VM_LAZY_FREE; 527 } 528 } 529 rcu_read_unlock(); 530 531 if (nr) { 532 BUG_ON(nr > atomic_read(&vmap_lazy_nr)); 533 atomic_sub(nr, &vmap_lazy_nr); 534 } 535 536 if (nr || force_flush) 537 flush_tlb_kernel_range(*start, *end); 538 539 if (nr) { 540 spin_lock(&vmap_area_lock); 541 list_for_each_entry(va, &valist, purge_list) 542 __free_vmap_area(va); 543 spin_unlock(&vmap_area_lock); 544 } 545 mutex_unlock(&purge_lock); 546} 547 548/* 549 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 550 * is already purging. 551 */ 552static void try_purge_vmap_area_lazy(void) 553{ 554 unsigned long start = ULONG_MAX, end = 0; 555 556 __purge_vmap_area_lazy(&start, &end, 0, 0); 557} 558 559/* 560 * Kick off a purge of the outstanding lazy areas. 561 */ 562static void purge_vmap_area_lazy(void) 563{ 564 unsigned long start = ULONG_MAX, end = 0; 565 566 __purge_vmap_area_lazy(&start, &end, 1, 0); 567} 568 569/* 570 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 571 * called for the correct range previously. 572 */ 573static void free_unmap_vmap_area_noflush(struct vmap_area *va) 574{ 575 va->flags |= VM_LAZY_FREE; 576 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 577 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 578 try_purge_vmap_area_lazy(); 579} 580 581/* 582 * Free and unmap a vmap area 583 */ 584static void free_unmap_vmap_area(struct vmap_area *va) 585{ 586 flush_cache_vunmap(va->va_start, va->va_end); 587 free_unmap_vmap_area_noflush(va); 588} 589 590static struct vmap_area *find_vmap_area(unsigned long addr) 591{ 592 struct vmap_area *va; 593 594 spin_lock(&vmap_area_lock); 595 va = __find_vmap_area(addr); 596 spin_unlock(&vmap_area_lock); 597 598 return va; 599} 600 601static void free_unmap_vmap_area_addr(unsigned long addr) 602{ 603 struct vmap_area *va; 604 605 va = find_vmap_area(addr); 606 BUG_ON(!va); 607 free_unmap_vmap_area(va); 608} 609 610 611/*** Per cpu kva allocator ***/ 612 613/* 614 * vmap space is limited especially on 32 bit architectures. Ensure there is 615 * room for at least 16 percpu vmap blocks per CPU. 616 */ 617/* 618 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 619 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 620 * instead (we just need a rough idea) 621 */ 622#if BITS_PER_LONG == 32 623#define VMALLOC_SPACE (128UL*1024*1024) 624#else 625#define VMALLOC_SPACE (128UL*1024*1024*1024) 626#endif 627 628#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 629#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 630#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 631#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 632#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 633#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 634#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 635 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 636 VMALLOC_PAGES / NR_CPUS / 16)) 637 638#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 639 640static bool vmap_initialized __read_mostly = false; 641 642struct vmap_block_queue { 643 spinlock_t lock; 644 struct list_head free; 645 struct list_head dirty; 646 unsigned int nr_dirty; 647}; 648 649struct vmap_block { 650 spinlock_t lock; 651 struct vmap_area *va; 652 struct vmap_block_queue *vbq; 653 unsigned long free, dirty; 654 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 655 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 656 union { 657 struct { 658 struct list_head free_list; 659 struct list_head dirty_list; 660 }; 661 struct rcu_head rcu_head; 662 }; 663}; 664 665/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 666static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 667 668/* 669 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 670 * in the free path. Could get rid of this if we change the API to return a 671 * "cookie" from alloc, to be passed to free. But no big deal yet. 672 */ 673static DEFINE_SPINLOCK(vmap_block_tree_lock); 674static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 675 676/* 677 * We should probably have a fallback mechanism to allocate virtual memory 678 * out of partially filled vmap blocks. However vmap block sizing should be 679 * fairly reasonable according to the vmalloc size, so it shouldn't be a 680 * big problem. 681 */ 682 683static unsigned long addr_to_vb_idx(unsigned long addr) 684{ 685 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 686 addr /= VMAP_BLOCK_SIZE; 687 return addr; 688} 689 690static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 691{ 692 struct vmap_block_queue *vbq; 693 struct vmap_block *vb; 694 struct vmap_area *va; 695 unsigned long vb_idx; 696 int node, err; 697 698 node = numa_node_id(); 699 700 vb = kmalloc_node(sizeof(struct vmap_block), 701 gfp_mask & GFP_RECLAIM_MASK, node); 702 if (unlikely(!vb)) 703 return ERR_PTR(-ENOMEM); 704 705 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 706 VMALLOC_START, VMALLOC_END, 707 node, gfp_mask); 708 if (unlikely(IS_ERR(va))) { 709 kfree(vb); 710 return ERR_PTR(PTR_ERR(va)); 711 } 712 713 err = radix_tree_preload(gfp_mask); 714 if (unlikely(err)) { 715 kfree(vb); 716 free_vmap_area(va); 717 return ERR_PTR(err); 718 } 719 720 spin_lock_init(&vb->lock); 721 vb->va = va; 722 vb->free = VMAP_BBMAP_BITS; 723 vb->dirty = 0; 724 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 725 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 726 INIT_LIST_HEAD(&vb->free_list); 727 INIT_LIST_HEAD(&vb->dirty_list); 728 729 vb_idx = addr_to_vb_idx(va->va_start); 730 spin_lock(&vmap_block_tree_lock); 731 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 732 spin_unlock(&vmap_block_tree_lock); 733 BUG_ON(err); 734 radix_tree_preload_end(); 735 736 vbq = &get_cpu_var(vmap_block_queue); 737 vb->vbq = vbq; 738 spin_lock(&vbq->lock); 739 list_add(&vb->free_list, &vbq->free); 740 spin_unlock(&vbq->lock); 741 put_cpu_var(vmap_cpu_blocks); 742 743 return vb; 744} 745 746static void rcu_free_vb(struct rcu_head *head) 747{ 748 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 749 750 kfree(vb); 751} 752 753static void free_vmap_block(struct vmap_block *vb) 754{ 755 struct vmap_block *tmp; 756 unsigned long vb_idx; 757 758 spin_lock(&vb->vbq->lock); 759 if (!list_empty(&vb->free_list)) 760 list_del(&vb->free_list); 761 if (!list_empty(&vb->dirty_list)) 762 list_del(&vb->dirty_list); 763 spin_unlock(&vb->vbq->lock); 764 765 vb_idx = addr_to_vb_idx(vb->va->va_start); 766 spin_lock(&vmap_block_tree_lock); 767 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 768 spin_unlock(&vmap_block_tree_lock); 769 BUG_ON(tmp != vb); 770 771 free_unmap_vmap_area_noflush(vb->va); 772 call_rcu(&vb->rcu_head, rcu_free_vb); 773} 774 775static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 776{ 777 struct vmap_block_queue *vbq; 778 struct vmap_block *vb; 779 unsigned long addr = 0; 780 unsigned int order; 781 782 BUG_ON(size & ~PAGE_MASK); 783 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 784 order = get_order(size); 785 786again: 787 rcu_read_lock(); 788 vbq = &get_cpu_var(vmap_block_queue); 789 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 790 int i; 791 792 spin_lock(&vb->lock); 793 i = bitmap_find_free_region(vb->alloc_map, 794 VMAP_BBMAP_BITS, order); 795 796 if (i >= 0) { 797 addr = vb->va->va_start + (i << PAGE_SHIFT); 798 BUG_ON(addr_to_vb_idx(addr) != 799 addr_to_vb_idx(vb->va->va_start)); 800 vb->free -= 1UL << order; 801 if (vb->free == 0) { 802 spin_lock(&vbq->lock); 803 list_del_init(&vb->free_list); 804 spin_unlock(&vbq->lock); 805 } 806 spin_unlock(&vb->lock); 807 break; 808 } 809 spin_unlock(&vb->lock); 810 } 811 put_cpu_var(vmap_cpu_blocks); 812 rcu_read_unlock(); 813 814 if (!addr) { 815 vb = new_vmap_block(gfp_mask); 816 if (IS_ERR(vb)) 817 return vb; 818 goto again; 819 } 820 821 return (void *)addr; 822} 823 824static void vb_free(const void *addr, unsigned long size) 825{ 826 unsigned long offset; 827 unsigned long vb_idx; 828 unsigned int order; 829 struct vmap_block *vb; 830 831 BUG_ON(size & ~PAGE_MASK); 832 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 833 834 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 835 836 order = get_order(size); 837 838 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 839 840 vb_idx = addr_to_vb_idx((unsigned long)addr); 841 rcu_read_lock(); 842 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 843 rcu_read_unlock(); 844 BUG_ON(!vb); 845 846 spin_lock(&vb->lock); 847 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); 848 if (!vb->dirty) { 849 spin_lock(&vb->vbq->lock); 850 list_add(&vb->dirty_list, &vb->vbq->dirty); 851 spin_unlock(&vb->vbq->lock); 852 } 853 vb->dirty += 1UL << order; 854 if (vb->dirty == VMAP_BBMAP_BITS) { 855 BUG_ON(vb->free || !list_empty(&vb->free_list)); 856 spin_unlock(&vb->lock); 857 free_vmap_block(vb); 858 } else 859 spin_unlock(&vb->lock); 860} 861 862/** 863 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 864 * 865 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 866 * to amortize TLB flushing overheads. What this means is that any page you 867 * have now, may, in a former life, have been mapped into kernel virtual 868 * address by the vmap layer and so there might be some CPUs with TLB entries 869 * still referencing that page (additional to the regular 1:1 kernel mapping). 870 * 871 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 872 * be sure that none of the pages we have control over will have any aliases 873 * from the vmap layer. 874 */ 875void vm_unmap_aliases(void) 876{ 877 unsigned long start = ULONG_MAX, end = 0; 878 int cpu; 879 int flush = 0; 880 881 if (unlikely(!vmap_initialized)) 882 return; 883 884 for_each_possible_cpu(cpu) { 885 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 886 struct vmap_block *vb; 887 888 rcu_read_lock(); 889 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 890 int i; 891 892 spin_lock(&vb->lock); 893 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 894 while (i < VMAP_BBMAP_BITS) { 895 unsigned long s, e; 896 int j; 897 j = find_next_zero_bit(vb->dirty_map, 898 VMAP_BBMAP_BITS, i); 899 900 s = vb->va->va_start + (i << PAGE_SHIFT); 901 e = vb->va->va_start + (j << PAGE_SHIFT); 902 vunmap_page_range(s, e); 903 flush = 1; 904 905 if (s < start) 906 start = s; 907 if (e > end) 908 end = e; 909 910 i = j; 911 i = find_next_bit(vb->dirty_map, 912 VMAP_BBMAP_BITS, i); 913 } 914 spin_unlock(&vb->lock); 915 } 916 rcu_read_unlock(); 917 } 918 919 __purge_vmap_area_lazy(&start, &end, 1, flush); 920} 921EXPORT_SYMBOL_GPL(vm_unmap_aliases); 922 923/** 924 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 925 * @mem: the pointer returned by vm_map_ram 926 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 927 */ 928void vm_unmap_ram(const void *mem, unsigned int count) 929{ 930 unsigned long size = count << PAGE_SHIFT; 931 unsigned long addr = (unsigned long)mem; 932 933 BUG_ON(!addr); 934 BUG_ON(addr < VMALLOC_START); 935 BUG_ON(addr > VMALLOC_END); 936 BUG_ON(addr & (PAGE_SIZE-1)); 937 938 debug_check_no_locks_freed(mem, size); 939 vmap_debug_free_range(addr, addr+size); 940 941 if (likely(count <= VMAP_MAX_ALLOC)) 942 vb_free(mem, size); 943 else 944 free_unmap_vmap_area_addr(addr); 945} 946EXPORT_SYMBOL(vm_unmap_ram); 947 948/** 949 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 950 * @pages: an array of pointers to the pages to be mapped 951 * @count: number of pages 952 * @node: prefer to allocate data structures on this node 953 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 954 * 955 * Returns: a pointer to the address that has been mapped, or %NULL on failure 956 */ 957void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 958{ 959 unsigned long size = count << PAGE_SHIFT; 960 unsigned long addr; 961 void *mem; 962 963 if (likely(count <= VMAP_MAX_ALLOC)) { 964 mem = vb_alloc(size, GFP_KERNEL); 965 if (IS_ERR(mem)) 966 return NULL; 967 addr = (unsigned long)mem; 968 } else { 969 struct vmap_area *va; 970 va = alloc_vmap_area(size, PAGE_SIZE, 971 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 972 if (IS_ERR(va)) 973 return NULL; 974 975 addr = va->va_start; 976 mem = (void *)addr; 977 } 978 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 979 vm_unmap_ram(mem, count); 980 return NULL; 981 } 982 return mem; 983} 984EXPORT_SYMBOL(vm_map_ram); 985 986void __init vmalloc_init(void) 987{ 988 struct vmap_area *va; 989 struct vm_struct *tmp; 990 int i; 991 992 for_each_possible_cpu(i) { 993 struct vmap_block_queue *vbq; 994 995 vbq = &per_cpu(vmap_block_queue, i); 996 spin_lock_init(&vbq->lock); 997 INIT_LIST_HEAD(&vbq->free); 998 INIT_LIST_HEAD(&vbq->dirty); 999 vbq->nr_dirty = 0; 1000 } 1001 1002 /* Import existing vmlist entries. */ 1003 for (tmp = vmlist; tmp; tmp = tmp->next) { 1004 va = alloc_bootmem(sizeof(struct vmap_area)); 1005 va->flags = tmp->flags | VM_VM_AREA; 1006 va->va_start = (unsigned long)tmp->addr; 1007 va->va_end = va->va_start + tmp->size; 1008 __insert_vmap_area(va); 1009 } 1010 vmap_initialized = true; 1011} 1012 1013void unmap_kernel_range(unsigned long addr, unsigned long size) 1014{ 1015 unsigned long end = addr + size; 1016 vunmap_page_range(addr, end); 1017 flush_tlb_kernel_range(addr, end); 1018} 1019 1020int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1021{ 1022 unsigned long addr = (unsigned long)area->addr; 1023 unsigned long end = addr + area->size - PAGE_SIZE; 1024 int err; 1025 1026 err = vmap_page_range(addr, end, prot, *pages); 1027 if (err > 0) { 1028 *pages += err; 1029 err = 0; 1030 } 1031 1032 return err; 1033} 1034EXPORT_SYMBOL_GPL(map_vm_area); 1035 1036/*** Old vmalloc interfaces ***/ 1037DEFINE_RWLOCK(vmlist_lock); 1038struct vm_struct *vmlist; 1039 1040static struct vm_struct *__get_vm_area_node(unsigned long size, 1041 unsigned long flags, unsigned long start, unsigned long end, 1042 int node, gfp_t gfp_mask, void *caller) 1043{ 1044 static struct vmap_area *va; 1045 struct vm_struct *area; 1046 struct vm_struct *tmp, **p; 1047 unsigned long align = 1; 1048 1049 BUG_ON(in_interrupt()); 1050 if (flags & VM_IOREMAP) { 1051 int bit = fls(size); 1052 1053 if (bit > IOREMAP_MAX_ORDER) 1054 bit = IOREMAP_MAX_ORDER; 1055 else if (bit < PAGE_SHIFT) 1056 bit = PAGE_SHIFT; 1057 1058 align = 1ul << bit; 1059 } 1060 1061 size = PAGE_ALIGN(size); 1062 if (unlikely(!size)) 1063 return NULL; 1064 1065 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 1066 if (unlikely(!area)) 1067 return NULL; 1068 1069 /* 1070 * We always allocate a guard page. 1071 */ 1072 size += PAGE_SIZE; 1073 1074 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1075 if (IS_ERR(va)) { 1076 kfree(area); 1077 return NULL; 1078 } 1079 1080 area->flags = flags; 1081 area->addr = (void *)va->va_start; 1082 area->size = size; 1083 area->pages = NULL; 1084 area->nr_pages = 0; 1085 area->phys_addr = 0; 1086 area->caller = caller; 1087 va->private = area; 1088 va->flags |= VM_VM_AREA; 1089 1090 write_lock(&vmlist_lock); 1091 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1092 if (tmp->addr >= area->addr) 1093 break; 1094 } 1095 area->next = *p; 1096 *p = area; 1097 write_unlock(&vmlist_lock); 1098 1099 return area; 1100} 1101 1102struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1103 unsigned long start, unsigned long end) 1104{ 1105 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1106 __builtin_return_address(0)); 1107} 1108EXPORT_SYMBOL_GPL(__get_vm_area); 1109 1110/** 1111 * get_vm_area - reserve a contiguous kernel virtual area 1112 * @size: size of the area 1113 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 1114 * 1115 * Search an area of @size in the kernel virtual mapping area, 1116 * and reserved it for out purposes. Returns the area descriptor 1117 * on success or %NULL on failure. 1118 */ 1119struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1120{ 1121 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1122 -1, GFP_KERNEL, __builtin_return_address(0)); 1123} 1124 1125struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1126 void *caller) 1127{ 1128 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1129 -1, GFP_KERNEL, caller); 1130} 1131 1132struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1133 int node, gfp_t gfp_mask) 1134{ 1135 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 1136 gfp_mask, __builtin_return_address(0)); 1137} 1138 1139static struct vm_struct *find_vm_area(const void *addr) 1140{ 1141 struct vmap_area *va; 1142 1143 va = find_vmap_area((unsigned long)addr); 1144 if (va && va->flags & VM_VM_AREA) 1145 return va->private; 1146 1147 return NULL; 1148} 1149 1150/** 1151 * remove_vm_area - find and remove a continuous kernel virtual area 1152 * @addr: base address 1153 * 1154 * Search for the kernel VM area starting at @addr, and remove it. 1155 * This function returns the found VM area, but using it is NOT safe 1156 * on SMP machines, except for its size or flags. 1157 */ 1158struct vm_struct *remove_vm_area(const void *addr) 1159{ 1160 struct vmap_area *va; 1161 1162 va = find_vmap_area((unsigned long)addr); 1163 if (va && va->flags & VM_VM_AREA) { 1164 struct vm_struct *vm = va->private; 1165 struct vm_struct *tmp, **p; 1166 1167 vmap_debug_free_range(va->va_start, va->va_end); 1168 free_unmap_vmap_area(va); 1169 vm->size -= PAGE_SIZE; 1170 1171 write_lock(&vmlist_lock); 1172 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1173 ; 1174 *p = tmp->next; 1175 write_unlock(&vmlist_lock); 1176 1177 return vm; 1178 } 1179 return NULL; 1180} 1181 1182static void __vunmap(const void *addr, int deallocate_pages) 1183{ 1184 struct vm_struct *area; 1185 1186 if (!addr) 1187 return; 1188 1189 if ((PAGE_SIZE-1) & (unsigned long)addr) { 1190 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 1191 return; 1192 } 1193 1194 area = remove_vm_area(addr); 1195 if (unlikely(!area)) { 1196 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 1197 addr); 1198 return; 1199 } 1200 1201 debug_check_no_locks_freed(addr, area->size); 1202 debug_check_no_obj_freed(addr, area->size); 1203 1204 if (deallocate_pages) { 1205 int i; 1206 1207 for (i = 0; i < area->nr_pages; i++) { 1208 struct page *page = area->pages[i]; 1209 1210 BUG_ON(!page); 1211 __free_page(page); 1212 } 1213 1214 if (area->flags & VM_VPAGES) 1215 vfree(area->pages); 1216 else 1217 kfree(area->pages); 1218 } 1219 1220 kfree(area); 1221 return; 1222} 1223 1224/** 1225 * vfree - release memory allocated by vmalloc() 1226 * @addr: memory base address 1227 * 1228 * Free the virtually continuous memory area starting at @addr, as 1229 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1230 * NULL, no operation is performed. 1231 * 1232 * Must not be called in interrupt context. 1233 */ 1234void vfree(const void *addr) 1235{ 1236 BUG_ON(in_interrupt()); 1237 __vunmap(addr, 1); 1238} 1239EXPORT_SYMBOL(vfree); 1240 1241/** 1242 * vunmap - release virtual mapping obtained by vmap() 1243 * @addr: memory base address 1244 * 1245 * Free the virtually contiguous memory area starting at @addr, 1246 * which was created from the page array passed to vmap(). 1247 * 1248 * Must not be called in interrupt context. 1249 */ 1250void vunmap(const void *addr) 1251{ 1252 BUG_ON(in_interrupt()); 1253 __vunmap(addr, 0); 1254} 1255EXPORT_SYMBOL(vunmap); 1256 1257/** 1258 * vmap - map an array of pages into virtually contiguous space 1259 * @pages: array of page pointers 1260 * @count: number of pages to map 1261 * @flags: vm_area->flags 1262 * @prot: page protection for the mapping 1263 * 1264 * Maps @count pages from @pages into contiguous kernel virtual 1265 * space. 1266 */ 1267void *vmap(struct page **pages, unsigned int count, 1268 unsigned long flags, pgprot_t prot) 1269{ 1270 struct vm_struct *area; 1271 1272 if (count > num_physpages) 1273 return NULL; 1274 1275 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1276 __builtin_return_address(0)); 1277 if (!area) 1278 return NULL; 1279 1280 if (map_vm_area(area, prot, &pages)) { 1281 vunmap(area->addr); 1282 return NULL; 1283 } 1284 1285 return area->addr; 1286} 1287EXPORT_SYMBOL(vmap); 1288 1289static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1290 int node, void *caller); 1291static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1292 pgprot_t prot, int node, void *caller) 1293{ 1294 struct page **pages; 1295 unsigned int nr_pages, array_size, i; 1296 1297 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1298 array_size = (nr_pages * sizeof(struct page *)); 1299 1300 area->nr_pages = nr_pages; 1301 /* Please note that the recursion is strictly bounded. */ 1302 if (array_size > PAGE_SIZE) { 1303 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 1304 PAGE_KERNEL, node, caller); 1305 area->flags |= VM_VPAGES; 1306 } else { 1307 pages = kmalloc_node(array_size, 1308 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 1309 node); 1310 } 1311 area->pages = pages; 1312 area->caller = caller; 1313 if (!area->pages) { 1314 remove_vm_area(area->addr); 1315 kfree(area); 1316 return NULL; 1317 } 1318 1319 for (i = 0; i < area->nr_pages; i++) { 1320 struct page *page; 1321 1322 if (node < 0) 1323 page = alloc_page(gfp_mask); 1324 else 1325 page = alloc_pages_node(node, gfp_mask, 0); 1326 1327 if (unlikely(!page)) { 1328 /* Successfully allocated i pages, free them in __vunmap() */ 1329 area->nr_pages = i; 1330 goto fail; 1331 } 1332 area->pages[i] = page; 1333 } 1334 1335 if (map_vm_area(area, prot, &pages)) 1336 goto fail; 1337 return area->addr; 1338 1339fail: 1340 vfree(area->addr); 1341 return NULL; 1342} 1343 1344void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 1345{ 1346 return __vmalloc_area_node(area, gfp_mask, prot, -1, 1347 __builtin_return_address(0)); 1348} 1349 1350/** 1351 * __vmalloc_node - allocate virtually contiguous memory 1352 * @size: allocation size 1353 * @gfp_mask: flags for the page level allocator 1354 * @prot: protection mask for the allocated pages 1355 * @node: node to use for allocation or -1 1356 * @caller: caller's return address 1357 * 1358 * Allocate enough pages to cover @size from the page level 1359 * allocator with @gfp_mask flags. Map them into contiguous 1360 * kernel virtual space, using a pagetable protection of @prot. 1361 */ 1362static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1363 int node, void *caller) 1364{ 1365 struct vm_struct *area; 1366 1367 size = PAGE_ALIGN(size); 1368 if (!size || (size >> PAGE_SHIFT) > num_physpages) 1369 return NULL; 1370 1371 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 1372 node, gfp_mask, caller); 1373 1374 if (!area) 1375 return NULL; 1376 1377 return __vmalloc_area_node(area, gfp_mask, prot, node, caller); 1378} 1379 1380void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1381{ 1382 return __vmalloc_node(size, gfp_mask, prot, -1, 1383 __builtin_return_address(0)); 1384} 1385EXPORT_SYMBOL(__vmalloc); 1386 1387/** 1388 * vmalloc - allocate virtually contiguous memory 1389 * @size: allocation size 1390 * Allocate enough pages to cover @size from the page level 1391 * allocator and map them into contiguous kernel virtual space. 1392 * 1393 * For tight control over page level allocator and protection flags 1394 * use __vmalloc() instead. 1395 */ 1396void *vmalloc(unsigned long size) 1397{ 1398 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1399 -1, __builtin_return_address(0)); 1400} 1401EXPORT_SYMBOL(vmalloc); 1402 1403/** 1404 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 1405 * @size: allocation size 1406 * 1407 * The resulting memory area is zeroed so it can be mapped to userspace 1408 * without leaking data. 1409 */ 1410void *vmalloc_user(unsigned long size) 1411{ 1412 struct vm_struct *area; 1413 void *ret; 1414 1415 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1416 PAGE_KERNEL, -1, __builtin_return_address(0)); 1417 if (ret) { 1418 area = find_vm_area(ret); 1419 area->flags |= VM_USERMAP; 1420 } 1421 return ret; 1422} 1423EXPORT_SYMBOL(vmalloc_user); 1424 1425/** 1426 * vmalloc_node - allocate memory on a specific node 1427 * @size: allocation size 1428 * @node: numa node 1429 * 1430 * Allocate enough pages to cover @size from the page level 1431 * allocator and map them into contiguous kernel virtual space. 1432 * 1433 * For tight control over page level allocator and protection flags 1434 * use __vmalloc() instead. 1435 */ 1436void *vmalloc_node(unsigned long size, int node) 1437{ 1438 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1439 node, __builtin_return_address(0)); 1440} 1441EXPORT_SYMBOL(vmalloc_node); 1442 1443#ifndef PAGE_KERNEL_EXEC 1444# define PAGE_KERNEL_EXEC PAGE_KERNEL 1445#endif 1446 1447/** 1448 * vmalloc_exec - allocate virtually contiguous, executable memory 1449 * @size: allocation size 1450 * 1451 * Kernel-internal function to allocate enough pages to cover @size 1452 * the page level allocator and map them into contiguous and 1453 * executable kernel virtual space. 1454 * 1455 * For tight control over page level allocator and protection flags 1456 * use __vmalloc() instead. 1457 */ 1458 1459void *vmalloc_exec(unsigned long size) 1460{ 1461 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1462 -1, __builtin_return_address(0)); 1463} 1464 1465#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1466#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 1467#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 1468#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 1469#else 1470#define GFP_VMALLOC32 GFP_KERNEL 1471#endif 1472 1473/** 1474 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 1475 * @size: allocation size 1476 * 1477 * Allocate enough 32bit PA addressable pages to cover @size from the 1478 * page level allocator and map them into contiguous kernel virtual space. 1479 */ 1480void *vmalloc_32(unsigned long size) 1481{ 1482 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 1483 -1, __builtin_return_address(0)); 1484} 1485EXPORT_SYMBOL(vmalloc_32); 1486 1487/** 1488 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 1489 * @size: allocation size 1490 * 1491 * The resulting memory area is 32bit addressable and zeroed so it can be 1492 * mapped to userspace without leaking data. 1493 */ 1494void *vmalloc_32_user(unsigned long size) 1495{ 1496 struct vm_struct *area; 1497 void *ret; 1498 1499 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1500 -1, __builtin_return_address(0)); 1501 if (ret) { 1502 area = find_vm_area(ret); 1503 area->flags |= VM_USERMAP; 1504 } 1505 return ret; 1506} 1507EXPORT_SYMBOL(vmalloc_32_user); 1508 1509long vread(char *buf, char *addr, unsigned long count) 1510{ 1511 struct vm_struct *tmp; 1512 char *vaddr, *buf_start = buf; 1513 unsigned long n; 1514 1515 /* Don't allow overflow */ 1516 if ((unsigned long) addr + count < count) 1517 count = -(unsigned long) addr; 1518 1519 read_lock(&vmlist_lock); 1520 for (tmp = vmlist; tmp; tmp = tmp->next) { 1521 vaddr = (char *) tmp->addr; 1522 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1523 continue; 1524 while (addr < vaddr) { 1525 if (count == 0) 1526 goto finished; 1527 *buf = '\0'; 1528 buf++; 1529 addr++; 1530 count--; 1531 } 1532 n = vaddr + tmp->size - PAGE_SIZE - addr; 1533 do { 1534 if (count == 0) 1535 goto finished; 1536 *buf = *addr; 1537 buf++; 1538 addr++; 1539 count--; 1540 } while (--n > 0); 1541 } 1542finished: 1543 read_unlock(&vmlist_lock); 1544 return buf - buf_start; 1545} 1546 1547long vwrite(char *buf, char *addr, unsigned long count) 1548{ 1549 struct vm_struct *tmp; 1550 char *vaddr, *buf_start = buf; 1551 unsigned long n; 1552 1553 /* Don't allow overflow */ 1554 if ((unsigned long) addr + count < count) 1555 count = -(unsigned long) addr; 1556 1557 read_lock(&vmlist_lock); 1558 for (tmp = vmlist; tmp; tmp = tmp->next) { 1559 vaddr = (char *) tmp->addr; 1560 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1561 continue; 1562 while (addr < vaddr) { 1563 if (count == 0) 1564 goto finished; 1565 buf++; 1566 addr++; 1567 count--; 1568 } 1569 n = vaddr + tmp->size - PAGE_SIZE - addr; 1570 do { 1571 if (count == 0) 1572 goto finished; 1573 *addr = *buf; 1574 buf++; 1575 addr++; 1576 count--; 1577 } while (--n > 0); 1578 } 1579finished: 1580 read_unlock(&vmlist_lock); 1581 return buf - buf_start; 1582} 1583 1584/** 1585 * remap_vmalloc_range - map vmalloc pages to userspace 1586 * @vma: vma to cover (map full range of vma) 1587 * @addr: vmalloc memory 1588 * @pgoff: number of pages into addr before first page to map 1589 * 1590 * Returns: 0 for success, -Exxx on failure 1591 * 1592 * This function checks that addr is a valid vmalloc'ed area, and 1593 * that it is big enough to cover the vma. Will return failure if 1594 * that criteria isn't met. 1595 * 1596 * Similar to remap_pfn_range() (see mm/memory.c) 1597 */ 1598int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1599 unsigned long pgoff) 1600{ 1601 struct vm_struct *area; 1602 unsigned long uaddr = vma->vm_start; 1603 unsigned long usize = vma->vm_end - vma->vm_start; 1604 1605 if ((PAGE_SIZE-1) & (unsigned long)addr) 1606 return -EINVAL; 1607 1608 area = find_vm_area(addr); 1609 if (!area) 1610 return -EINVAL; 1611 1612 if (!(area->flags & VM_USERMAP)) 1613 return -EINVAL; 1614 1615 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 1616 return -EINVAL; 1617 1618 addr += pgoff << PAGE_SHIFT; 1619 do { 1620 struct page *page = vmalloc_to_page(addr); 1621 int ret; 1622 1623 ret = vm_insert_page(vma, uaddr, page); 1624 if (ret) 1625 return ret; 1626 1627 uaddr += PAGE_SIZE; 1628 addr += PAGE_SIZE; 1629 usize -= PAGE_SIZE; 1630 } while (usize > 0); 1631 1632 /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 1633 vma->vm_flags |= VM_RESERVED; 1634 1635 return 0; 1636} 1637EXPORT_SYMBOL(remap_vmalloc_range); 1638 1639/* 1640 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 1641 * have one. 1642 */ 1643void __attribute__((weak)) vmalloc_sync_all(void) 1644{ 1645} 1646 1647 1648static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 1649{ 1650 /* apply_to_page_range() does all the hard work. */ 1651 return 0; 1652} 1653 1654/** 1655 * alloc_vm_area - allocate a range of kernel address space 1656 * @size: size of the area 1657 * 1658 * Returns: NULL on failure, vm_struct on success 1659 * 1660 * This function reserves a range of kernel address space, and 1661 * allocates pagetables to map that range. No actual mappings 1662 * are created. If the kernel address space is not shared 1663 * between processes, it syncs the pagetable across all 1664 * processes. 1665 */ 1666struct vm_struct *alloc_vm_area(size_t size) 1667{ 1668 struct vm_struct *area; 1669 1670 area = get_vm_area_caller(size, VM_IOREMAP, 1671 __builtin_return_address(0)); 1672 if (area == NULL) 1673 return NULL; 1674 1675 /* 1676 * This ensures that page tables are constructed for this region 1677 * of kernel virtual address space and mapped into init_mm. 1678 */ 1679 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 1680 area->size, f, NULL)) { 1681 free_vm_area(area); 1682 return NULL; 1683 } 1684 1685 /* Make sure the pagetables are constructed in process kernel 1686 mappings */ 1687 vmalloc_sync_all(); 1688 1689 return area; 1690} 1691EXPORT_SYMBOL_GPL(alloc_vm_area); 1692 1693void free_vm_area(struct vm_struct *area) 1694{ 1695 struct vm_struct *ret; 1696 ret = remove_vm_area(area->addr); 1697 BUG_ON(ret != area); 1698 kfree(area); 1699} 1700EXPORT_SYMBOL_GPL(free_vm_area); 1701 1702 1703#ifdef CONFIG_PROC_FS 1704static void *s_start(struct seq_file *m, loff_t *pos) 1705{ 1706 loff_t n = *pos; 1707 struct vm_struct *v; 1708 1709 read_lock(&vmlist_lock); 1710 v = vmlist; 1711 while (n > 0 && v) { 1712 n--; 1713 v = v->next; 1714 } 1715 if (!n) 1716 return v; 1717 1718 return NULL; 1719 1720} 1721 1722static void *s_next(struct seq_file *m, void *p, loff_t *pos) 1723{ 1724 struct vm_struct *v = p; 1725 1726 ++*pos; 1727 return v->next; 1728} 1729 1730static void s_stop(struct seq_file *m, void *p) 1731{ 1732 read_unlock(&vmlist_lock); 1733} 1734 1735static void show_numa_info(struct seq_file *m, struct vm_struct *v) 1736{ 1737 if (NUMA_BUILD) { 1738 unsigned int nr, *counters = m->private; 1739 1740 if (!counters) 1741 return; 1742 1743 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 1744 1745 for (nr = 0; nr < v->nr_pages; nr++) 1746 counters[page_to_nid(v->pages[nr])]++; 1747 1748 for_each_node_state(nr, N_HIGH_MEMORY) 1749 if (counters[nr]) 1750 seq_printf(m, " N%u=%u", nr, counters[nr]); 1751 } 1752} 1753 1754static int s_show(struct seq_file *m, void *p) 1755{ 1756 struct vm_struct *v = p; 1757 1758 seq_printf(m, "0x%p-0x%p %7ld", 1759 v->addr, v->addr + v->size, v->size); 1760 1761 if (v->caller) { 1762 char buff[KSYM_SYMBOL_LEN]; 1763 1764 seq_putc(m, ' '); 1765 sprint_symbol(buff, (unsigned long)v->caller); 1766 seq_puts(m, buff); 1767 } 1768 1769 if (v->nr_pages) 1770 seq_printf(m, " pages=%d", v->nr_pages); 1771 1772 if (v->phys_addr) 1773 seq_printf(m, " phys=%lx", v->phys_addr); 1774 1775 if (v->flags & VM_IOREMAP) 1776 seq_printf(m, " ioremap"); 1777 1778 if (v->flags & VM_ALLOC) 1779 seq_printf(m, " vmalloc"); 1780 1781 if (v->flags & VM_MAP) 1782 seq_printf(m, " vmap"); 1783 1784 if (v->flags & VM_USERMAP) 1785 seq_printf(m, " user"); 1786 1787 if (v->flags & VM_VPAGES) 1788 seq_printf(m, " vpages"); 1789 1790 show_numa_info(m, v); 1791 seq_putc(m, '\n'); 1792 return 0; 1793} 1794 1795static const struct seq_operations vmalloc_op = { 1796 .start = s_start, 1797 .next = s_next, 1798 .stop = s_stop, 1799 .show = s_show, 1800}; 1801 1802static int vmalloc_open(struct inode *inode, struct file *file) 1803{ 1804 unsigned int *ptr = NULL; 1805 int ret; 1806 1807 if (NUMA_BUILD) 1808 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 1809 ret = seq_open(file, &vmalloc_op); 1810 if (!ret) { 1811 struct seq_file *m = file->private_data; 1812 m->private = ptr; 1813 } else 1814 kfree(ptr); 1815 return ret; 1816} 1817 1818static const struct file_operations proc_vmalloc_operations = { 1819 .open = vmalloc_open, 1820 .read = seq_read, 1821 .llseek = seq_lseek, 1822 .release = seq_release_private, 1823}; 1824 1825static int __init proc_vmalloc_init(void) 1826{ 1827 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 1828 return 0; 1829} 1830module_init(proc_vmalloc_init); 1831#endif 1832 1833