vmalloc.c revision 3ee48b6af49cf534ca2f481ecc484b156a41451d
1/* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 9 */ 10 11#include <linux/vmalloc.h> 12#include <linux/mm.h> 13#include <linux/module.h> 14#include <linux/highmem.h> 15#include <linux/sched.h> 16#include <linux/slab.h> 17#include <linux/spinlock.h> 18#include <linux/interrupt.h> 19#include <linux/proc_fs.h> 20#include <linux/seq_file.h> 21#include <linux/debugobjects.h> 22#include <linux/kallsyms.h> 23#include <linux/list.h> 24#include <linux/rbtree.h> 25#include <linux/radix-tree.h> 26#include <linux/rcupdate.h> 27#include <linux/pfn.h> 28#include <linux/kmemleak.h> 29#include <asm/atomic.h> 30#include <asm/uaccess.h> 31#include <asm/tlbflush.h> 32#include <asm/shmparam.h> 33 34bool vmap_lazy_unmap __read_mostly = true; 35 36/*** Page table manipulation functions ***/ 37 38static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 39{ 40 pte_t *pte; 41 42 pte = pte_offset_kernel(pmd, addr); 43 do { 44 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 45 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 46 } while (pte++, addr += PAGE_SIZE, addr != end); 47} 48 49static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 50{ 51 pmd_t *pmd; 52 unsigned long next; 53 54 pmd = pmd_offset(pud, addr); 55 do { 56 next = pmd_addr_end(addr, end); 57 if (pmd_none_or_clear_bad(pmd)) 58 continue; 59 vunmap_pte_range(pmd, addr, next); 60 } while (pmd++, addr = next, addr != end); 61} 62 63static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 64{ 65 pud_t *pud; 66 unsigned long next; 67 68 pud = pud_offset(pgd, addr); 69 do { 70 next = pud_addr_end(addr, end); 71 if (pud_none_or_clear_bad(pud)) 72 continue; 73 vunmap_pmd_range(pud, addr, next); 74 } while (pud++, addr = next, addr != end); 75} 76 77static void vunmap_page_range(unsigned long addr, unsigned long end) 78{ 79 pgd_t *pgd; 80 unsigned long next; 81 82 BUG_ON(addr >= end); 83 pgd = pgd_offset_k(addr); 84 do { 85 next = pgd_addr_end(addr, end); 86 if (pgd_none_or_clear_bad(pgd)) 87 continue; 88 vunmap_pud_range(pgd, addr, next); 89 } while (pgd++, addr = next, addr != end); 90} 91 92static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 93 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 94{ 95 pte_t *pte; 96 97 /* 98 * nr is a running index into the array which helps higher level 99 * callers keep track of where we're up to. 100 */ 101 102 pte = pte_alloc_kernel(pmd, addr); 103 if (!pte) 104 return -ENOMEM; 105 do { 106 struct page *page = pages[*nr]; 107 108 if (WARN_ON(!pte_none(*pte))) 109 return -EBUSY; 110 if (WARN_ON(!page)) 111 return -ENOMEM; 112 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 113 (*nr)++; 114 } while (pte++, addr += PAGE_SIZE, addr != end); 115 return 0; 116} 117 118static int vmap_pmd_range(pud_t *pud, unsigned long addr, 119 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 120{ 121 pmd_t *pmd; 122 unsigned long next; 123 124 pmd = pmd_alloc(&init_mm, pud, addr); 125 if (!pmd) 126 return -ENOMEM; 127 do { 128 next = pmd_addr_end(addr, end); 129 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 130 return -ENOMEM; 131 } while (pmd++, addr = next, addr != end); 132 return 0; 133} 134 135static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 136 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 137{ 138 pud_t *pud; 139 unsigned long next; 140 141 pud = pud_alloc(&init_mm, pgd, addr); 142 if (!pud) 143 return -ENOMEM; 144 do { 145 next = pud_addr_end(addr, end); 146 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 147 return -ENOMEM; 148 } while (pud++, addr = next, addr != end); 149 return 0; 150} 151 152/* 153 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 154 * will have pfns corresponding to the "pages" array. 155 * 156 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 157 */ 158static int vmap_page_range_noflush(unsigned long start, unsigned long end, 159 pgprot_t prot, struct page **pages) 160{ 161 pgd_t *pgd; 162 unsigned long next; 163 unsigned long addr = start; 164 int err = 0; 165 int nr = 0; 166 167 BUG_ON(addr >= end); 168 pgd = pgd_offset_k(addr); 169 do { 170 next = pgd_addr_end(addr, end); 171 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 172 if (err) 173 return err; 174 } while (pgd++, addr = next, addr != end); 175 176 return nr; 177} 178 179static int vmap_page_range(unsigned long start, unsigned long end, 180 pgprot_t prot, struct page **pages) 181{ 182 int ret; 183 184 ret = vmap_page_range_noflush(start, end, prot, pages); 185 flush_cache_vmap(start, end); 186 return ret; 187} 188 189int is_vmalloc_or_module_addr(const void *x) 190{ 191 /* 192 * ARM, x86-64 and sparc64 put modules in a special place, 193 * and fall back on vmalloc() if that fails. Others 194 * just put it in the vmalloc space. 195 */ 196#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 197 unsigned long addr = (unsigned long)x; 198 if (addr >= MODULES_VADDR && addr < MODULES_END) 199 return 1; 200#endif 201 return is_vmalloc_addr(x); 202} 203 204/* 205 * Walk a vmap address to the struct page it maps. 206 */ 207struct page *vmalloc_to_page(const void *vmalloc_addr) 208{ 209 unsigned long addr = (unsigned long) vmalloc_addr; 210 struct page *page = NULL; 211 pgd_t *pgd = pgd_offset_k(addr); 212 213 /* 214 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 215 * architectures that do not vmalloc module space 216 */ 217 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 218 219 if (!pgd_none(*pgd)) { 220 pud_t *pud = pud_offset(pgd, addr); 221 if (!pud_none(*pud)) { 222 pmd_t *pmd = pmd_offset(pud, addr); 223 if (!pmd_none(*pmd)) { 224 pte_t *ptep, pte; 225 226 ptep = pte_offset_map(pmd, addr); 227 pte = *ptep; 228 if (pte_present(pte)) 229 page = pte_page(pte); 230 pte_unmap(ptep); 231 } 232 } 233 } 234 return page; 235} 236EXPORT_SYMBOL(vmalloc_to_page); 237 238/* 239 * Map a vmalloc()-space virtual address to the physical page frame number. 240 */ 241unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 242{ 243 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 244} 245EXPORT_SYMBOL(vmalloc_to_pfn); 246 247 248/*** Global kva allocator ***/ 249 250#define VM_LAZY_FREE 0x01 251#define VM_LAZY_FREEING 0x02 252#define VM_VM_AREA 0x04 253 254struct vmap_area { 255 unsigned long va_start; 256 unsigned long va_end; 257 unsigned long flags; 258 struct rb_node rb_node; /* address sorted rbtree */ 259 struct list_head list; /* address sorted list */ 260 struct list_head purge_list; /* "lazy purge" list */ 261 void *private; 262 struct rcu_head rcu_head; 263}; 264 265static DEFINE_SPINLOCK(vmap_area_lock); 266static struct rb_root vmap_area_root = RB_ROOT; 267static LIST_HEAD(vmap_area_list); 268static unsigned long vmap_area_pcpu_hole; 269 270static struct vmap_area *__find_vmap_area(unsigned long addr) 271{ 272 struct rb_node *n = vmap_area_root.rb_node; 273 274 while (n) { 275 struct vmap_area *va; 276 277 va = rb_entry(n, struct vmap_area, rb_node); 278 if (addr < va->va_start) 279 n = n->rb_left; 280 else if (addr > va->va_start) 281 n = n->rb_right; 282 else 283 return va; 284 } 285 286 return NULL; 287} 288 289static void __insert_vmap_area(struct vmap_area *va) 290{ 291 struct rb_node **p = &vmap_area_root.rb_node; 292 struct rb_node *parent = NULL; 293 struct rb_node *tmp; 294 295 while (*p) { 296 struct vmap_area *tmp; 297 298 parent = *p; 299 tmp = rb_entry(parent, struct vmap_area, rb_node); 300 if (va->va_start < tmp->va_end) 301 p = &(*p)->rb_left; 302 else if (va->va_end > tmp->va_start) 303 p = &(*p)->rb_right; 304 else 305 BUG(); 306 } 307 308 rb_link_node(&va->rb_node, parent, p); 309 rb_insert_color(&va->rb_node, &vmap_area_root); 310 311 /* address-sort this list so it is usable like the vmlist */ 312 tmp = rb_prev(&va->rb_node); 313 if (tmp) { 314 struct vmap_area *prev; 315 prev = rb_entry(tmp, struct vmap_area, rb_node); 316 list_add_rcu(&va->list, &prev->list); 317 } else 318 list_add_rcu(&va->list, &vmap_area_list); 319} 320 321static void purge_vmap_area_lazy(void); 322 323/* 324 * Allocate a region of KVA of the specified size and alignment, within the 325 * vstart and vend. 326 */ 327static struct vmap_area *alloc_vmap_area(unsigned long size, 328 unsigned long align, 329 unsigned long vstart, unsigned long vend, 330 int node, gfp_t gfp_mask) 331{ 332 struct vmap_area *va; 333 struct rb_node *n; 334 unsigned long addr; 335 int purged = 0; 336 337 BUG_ON(!size); 338 BUG_ON(size & ~PAGE_MASK); 339 340 va = kmalloc_node(sizeof(struct vmap_area), 341 gfp_mask & GFP_RECLAIM_MASK, node); 342 if (unlikely(!va)) 343 return ERR_PTR(-ENOMEM); 344 345retry: 346 addr = ALIGN(vstart, align); 347 348 spin_lock(&vmap_area_lock); 349 if (addr + size - 1 < addr) 350 goto overflow; 351 352 /* XXX: could have a last_hole cache */ 353 n = vmap_area_root.rb_node; 354 if (n) { 355 struct vmap_area *first = NULL; 356 357 do { 358 struct vmap_area *tmp; 359 tmp = rb_entry(n, struct vmap_area, rb_node); 360 if (tmp->va_end >= addr) { 361 if (!first && tmp->va_start < addr + size) 362 first = tmp; 363 n = n->rb_left; 364 } else { 365 first = tmp; 366 n = n->rb_right; 367 } 368 } while (n); 369 370 if (!first) 371 goto found; 372 373 if (first->va_end < addr) { 374 n = rb_next(&first->rb_node); 375 if (n) 376 first = rb_entry(n, struct vmap_area, rb_node); 377 else 378 goto found; 379 } 380 381 while (addr + size > first->va_start && addr + size <= vend) { 382 addr = ALIGN(first->va_end + PAGE_SIZE, align); 383 if (addr + size - 1 < addr) 384 goto overflow; 385 386 n = rb_next(&first->rb_node); 387 if (n) 388 first = rb_entry(n, struct vmap_area, rb_node); 389 else 390 goto found; 391 } 392 } 393found: 394 if (addr + size > vend) { 395overflow: 396 spin_unlock(&vmap_area_lock); 397 if (!purged) { 398 purge_vmap_area_lazy(); 399 purged = 1; 400 goto retry; 401 } 402 if (printk_ratelimit()) 403 printk(KERN_WARNING 404 "vmap allocation for size %lu failed: " 405 "use vmalloc=<size> to increase size.\n", size); 406 kfree(va); 407 return ERR_PTR(-EBUSY); 408 } 409 410 BUG_ON(addr & (align-1)); 411 412 va->va_start = addr; 413 va->va_end = addr + size; 414 va->flags = 0; 415 __insert_vmap_area(va); 416 spin_unlock(&vmap_area_lock); 417 418 return va; 419} 420 421static void rcu_free_va(struct rcu_head *head) 422{ 423 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 424 425 kfree(va); 426} 427 428static void __free_vmap_area(struct vmap_area *va) 429{ 430 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 431 rb_erase(&va->rb_node, &vmap_area_root); 432 RB_CLEAR_NODE(&va->rb_node); 433 list_del_rcu(&va->list); 434 435 /* 436 * Track the highest possible candidate for pcpu area 437 * allocation. Areas outside of vmalloc area can be returned 438 * here too, consider only end addresses which fall inside 439 * vmalloc area proper. 440 */ 441 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 442 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 443 444 call_rcu(&va->rcu_head, rcu_free_va); 445} 446 447/* 448 * Free a region of KVA allocated by alloc_vmap_area 449 */ 450static void free_vmap_area(struct vmap_area *va) 451{ 452 spin_lock(&vmap_area_lock); 453 __free_vmap_area(va); 454 spin_unlock(&vmap_area_lock); 455} 456 457/* 458 * Clear the pagetable entries of a given vmap_area 459 */ 460static void unmap_vmap_area(struct vmap_area *va) 461{ 462 vunmap_page_range(va->va_start, va->va_end); 463} 464 465static void vmap_debug_free_range(unsigned long start, unsigned long end) 466{ 467 /* 468 * Unmap page tables and force a TLB flush immediately if 469 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 470 * bugs similarly to those in linear kernel virtual address 471 * space after a page has been freed. 472 * 473 * All the lazy freeing logic is still retained, in order to 474 * minimise intrusiveness of this debugging feature. 475 * 476 * This is going to be *slow* (linear kernel virtual address 477 * debugging doesn't do a broadcast TLB flush so it is a lot 478 * faster). 479 */ 480#ifdef CONFIG_DEBUG_PAGEALLOC 481 vunmap_page_range(start, end); 482 flush_tlb_kernel_range(start, end); 483#endif 484} 485 486/* 487 * lazy_max_pages is the maximum amount of virtual address space we gather up 488 * before attempting to purge with a TLB flush. 489 * 490 * There is a tradeoff here: a larger number will cover more kernel page tables 491 * and take slightly longer to purge, but it will linearly reduce the number of 492 * global TLB flushes that must be performed. It would seem natural to scale 493 * this number up linearly with the number of CPUs (because vmapping activity 494 * could also scale linearly with the number of CPUs), however it is likely 495 * that in practice, workloads might be constrained in other ways that mean 496 * vmap activity will not scale linearly with CPUs. Also, I want to be 497 * conservative and not introduce a big latency on huge systems, so go with 498 * a less aggressive log scale. It will still be an improvement over the old 499 * code, and it will be simple to change the scale factor if we find that it 500 * becomes a problem on bigger systems. 501 */ 502static unsigned long lazy_max_pages(void) 503{ 504 unsigned int log; 505 506 if (!vmap_lazy_unmap) 507 return 0; 508 509 log = fls(num_online_cpus()); 510 511 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 512} 513 514static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 515 516/* for per-CPU blocks */ 517static void purge_fragmented_blocks_allcpus(void); 518 519/* 520 * called before a call to iounmap() if the caller wants vm_area_struct's 521 * immediately freed. 522 */ 523void set_iounmap_nonlazy(void) 524{ 525 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 526} 527 528/* 529 * Purges all lazily-freed vmap areas. 530 * 531 * If sync is 0 then don't purge if there is already a purge in progress. 532 * If force_flush is 1, then flush kernel TLBs between *start and *end even 533 * if we found no lazy vmap areas to unmap (callers can use this to optimise 534 * their own TLB flushing). 535 * Returns with *start = min(*start, lowest purged address) 536 * *end = max(*end, highest purged address) 537 */ 538static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 539 int sync, int force_flush) 540{ 541 static DEFINE_SPINLOCK(purge_lock); 542 LIST_HEAD(valist); 543 struct vmap_area *va; 544 struct vmap_area *n_va; 545 int nr = 0; 546 547 /* 548 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 549 * should not expect such behaviour. This just simplifies locking for 550 * the case that isn't actually used at the moment anyway. 551 */ 552 if (!sync && !force_flush) { 553 if (!spin_trylock(&purge_lock)) 554 return; 555 } else 556 spin_lock(&purge_lock); 557 558 if (sync) 559 purge_fragmented_blocks_allcpus(); 560 561 rcu_read_lock(); 562 list_for_each_entry_rcu(va, &vmap_area_list, list) { 563 if (va->flags & VM_LAZY_FREE) { 564 if (va->va_start < *start) 565 *start = va->va_start; 566 if (va->va_end > *end) 567 *end = va->va_end; 568 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 569 unmap_vmap_area(va); 570 list_add_tail(&va->purge_list, &valist); 571 va->flags |= VM_LAZY_FREEING; 572 va->flags &= ~VM_LAZY_FREE; 573 } 574 } 575 rcu_read_unlock(); 576 577 if (nr) 578 atomic_sub(nr, &vmap_lazy_nr); 579 580 if (nr || force_flush) 581 flush_tlb_kernel_range(*start, *end); 582 583 if (nr) { 584 spin_lock(&vmap_area_lock); 585 list_for_each_entry_safe(va, n_va, &valist, purge_list) 586 __free_vmap_area(va); 587 spin_unlock(&vmap_area_lock); 588 } 589 spin_unlock(&purge_lock); 590} 591 592/* 593 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 594 * is already purging. 595 */ 596static void try_purge_vmap_area_lazy(void) 597{ 598 unsigned long start = ULONG_MAX, end = 0; 599 600 __purge_vmap_area_lazy(&start, &end, 0, 0); 601} 602 603/* 604 * Kick off a purge of the outstanding lazy areas. 605 */ 606static void purge_vmap_area_lazy(void) 607{ 608 unsigned long start = ULONG_MAX, end = 0; 609 610 __purge_vmap_area_lazy(&start, &end, 1, 0); 611} 612 613/* 614 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 615 * called for the correct range previously. 616 */ 617static void free_unmap_vmap_area_noflush(struct vmap_area *va) 618{ 619 va->flags |= VM_LAZY_FREE; 620 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 621 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 622 try_purge_vmap_area_lazy(); 623} 624 625/* 626 * Free and unmap a vmap area 627 */ 628static void free_unmap_vmap_area(struct vmap_area *va) 629{ 630 flush_cache_vunmap(va->va_start, va->va_end); 631 free_unmap_vmap_area_noflush(va); 632} 633 634static struct vmap_area *find_vmap_area(unsigned long addr) 635{ 636 struct vmap_area *va; 637 638 spin_lock(&vmap_area_lock); 639 va = __find_vmap_area(addr); 640 spin_unlock(&vmap_area_lock); 641 642 return va; 643} 644 645static void free_unmap_vmap_area_addr(unsigned long addr) 646{ 647 struct vmap_area *va; 648 649 va = find_vmap_area(addr); 650 BUG_ON(!va); 651 free_unmap_vmap_area(va); 652} 653 654 655/*** Per cpu kva allocator ***/ 656 657/* 658 * vmap space is limited especially on 32 bit architectures. Ensure there is 659 * room for at least 16 percpu vmap blocks per CPU. 660 */ 661/* 662 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 663 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 664 * instead (we just need a rough idea) 665 */ 666#if BITS_PER_LONG == 32 667#define VMALLOC_SPACE (128UL*1024*1024) 668#else 669#define VMALLOC_SPACE (128UL*1024*1024*1024) 670#endif 671 672#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 673#define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 674#define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 675#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 676#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 677#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 678#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 679 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 680 VMALLOC_PAGES / NR_CPUS / 16)) 681 682#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 683 684static bool vmap_initialized __read_mostly = false; 685 686struct vmap_block_queue { 687 spinlock_t lock; 688 struct list_head free; 689}; 690 691struct vmap_block { 692 spinlock_t lock; 693 struct vmap_area *va; 694 struct vmap_block_queue *vbq; 695 unsigned long free, dirty; 696 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 697 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 698 struct list_head free_list; 699 struct rcu_head rcu_head; 700 struct list_head purge; 701}; 702 703/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 704static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 705 706/* 707 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 708 * in the free path. Could get rid of this if we change the API to return a 709 * "cookie" from alloc, to be passed to free. But no big deal yet. 710 */ 711static DEFINE_SPINLOCK(vmap_block_tree_lock); 712static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 713 714/* 715 * We should probably have a fallback mechanism to allocate virtual memory 716 * out of partially filled vmap blocks. However vmap block sizing should be 717 * fairly reasonable according to the vmalloc size, so it shouldn't be a 718 * big problem. 719 */ 720 721static unsigned long addr_to_vb_idx(unsigned long addr) 722{ 723 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 724 addr /= VMAP_BLOCK_SIZE; 725 return addr; 726} 727 728static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 729{ 730 struct vmap_block_queue *vbq; 731 struct vmap_block *vb; 732 struct vmap_area *va; 733 unsigned long vb_idx; 734 int node, err; 735 736 node = numa_node_id(); 737 738 vb = kmalloc_node(sizeof(struct vmap_block), 739 gfp_mask & GFP_RECLAIM_MASK, node); 740 if (unlikely(!vb)) 741 return ERR_PTR(-ENOMEM); 742 743 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 744 VMALLOC_START, VMALLOC_END, 745 node, gfp_mask); 746 if (unlikely(IS_ERR(va))) { 747 kfree(vb); 748 return ERR_CAST(va); 749 } 750 751 err = radix_tree_preload(gfp_mask); 752 if (unlikely(err)) { 753 kfree(vb); 754 free_vmap_area(va); 755 return ERR_PTR(err); 756 } 757 758 spin_lock_init(&vb->lock); 759 vb->va = va; 760 vb->free = VMAP_BBMAP_BITS; 761 vb->dirty = 0; 762 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 763 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 764 INIT_LIST_HEAD(&vb->free_list); 765 766 vb_idx = addr_to_vb_idx(va->va_start); 767 spin_lock(&vmap_block_tree_lock); 768 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 769 spin_unlock(&vmap_block_tree_lock); 770 BUG_ON(err); 771 radix_tree_preload_end(); 772 773 vbq = &get_cpu_var(vmap_block_queue); 774 vb->vbq = vbq; 775 spin_lock(&vbq->lock); 776 list_add_rcu(&vb->free_list, &vbq->free); 777 spin_unlock(&vbq->lock); 778 put_cpu_var(vmap_block_queue); 779 780 return vb; 781} 782 783static void rcu_free_vb(struct rcu_head *head) 784{ 785 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 786 787 kfree(vb); 788} 789 790static void free_vmap_block(struct vmap_block *vb) 791{ 792 struct vmap_block *tmp; 793 unsigned long vb_idx; 794 795 vb_idx = addr_to_vb_idx(vb->va->va_start); 796 spin_lock(&vmap_block_tree_lock); 797 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 798 spin_unlock(&vmap_block_tree_lock); 799 BUG_ON(tmp != vb); 800 801 free_unmap_vmap_area_noflush(vb->va); 802 call_rcu(&vb->rcu_head, rcu_free_vb); 803} 804 805static void purge_fragmented_blocks(int cpu) 806{ 807 LIST_HEAD(purge); 808 struct vmap_block *vb; 809 struct vmap_block *n_vb; 810 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 811 812 rcu_read_lock(); 813 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 814 815 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 816 continue; 817 818 spin_lock(&vb->lock); 819 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 820 vb->free = 0; /* prevent further allocs after releasing lock */ 821 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 822 bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); 823 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); 824 spin_lock(&vbq->lock); 825 list_del_rcu(&vb->free_list); 826 spin_unlock(&vbq->lock); 827 spin_unlock(&vb->lock); 828 list_add_tail(&vb->purge, &purge); 829 } else 830 spin_unlock(&vb->lock); 831 } 832 rcu_read_unlock(); 833 834 list_for_each_entry_safe(vb, n_vb, &purge, purge) { 835 list_del(&vb->purge); 836 free_vmap_block(vb); 837 } 838} 839 840static void purge_fragmented_blocks_thiscpu(void) 841{ 842 purge_fragmented_blocks(smp_processor_id()); 843} 844 845static void purge_fragmented_blocks_allcpus(void) 846{ 847 int cpu; 848 849 for_each_possible_cpu(cpu) 850 purge_fragmented_blocks(cpu); 851} 852 853static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 854{ 855 struct vmap_block_queue *vbq; 856 struct vmap_block *vb; 857 unsigned long addr = 0; 858 unsigned int order; 859 int purge = 0; 860 861 BUG_ON(size & ~PAGE_MASK); 862 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 863 order = get_order(size); 864 865again: 866 rcu_read_lock(); 867 vbq = &get_cpu_var(vmap_block_queue); 868 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 869 int i; 870 871 spin_lock(&vb->lock); 872 if (vb->free < 1UL << order) 873 goto next; 874 875 i = bitmap_find_free_region(vb->alloc_map, 876 VMAP_BBMAP_BITS, order); 877 878 if (i < 0) { 879 if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { 880 /* fragmented and no outstanding allocations */ 881 BUG_ON(vb->dirty != VMAP_BBMAP_BITS); 882 purge = 1; 883 } 884 goto next; 885 } 886 addr = vb->va->va_start + (i << PAGE_SHIFT); 887 BUG_ON(addr_to_vb_idx(addr) != 888 addr_to_vb_idx(vb->va->va_start)); 889 vb->free -= 1UL << order; 890 if (vb->free == 0) { 891 spin_lock(&vbq->lock); 892 list_del_rcu(&vb->free_list); 893 spin_unlock(&vbq->lock); 894 } 895 spin_unlock(&vb->lock); 896 break; 897next: 898 spin_unlock(&vb->lock); 899 } 900 901 if (purge) 902 purge_fragmented_blocks_thiscpu(); 903 904 put_cpu_var(vmap_block_queue); 905 rcu_read_unlock(); 906 907 if (!addr) { 908 vb = new_vmap_block(gfp_mask); 909 if (IS_ERR(vb)) 910 return vb; 911 goto again; 912 } 913 914 return (void *)addr; 915} 916 917static void vb_free(const void *addr, unsigned long size) 918{ 919 unsigned long offset; 920 unsigned long vb_idx; 921 unsigned int order; 922 struct vmap_block *vb; 923 924 BUG_ON(size & ~PAGE_MASK); 925 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 926 927 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 928 929 order = get_order(size); 930 931 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 932 933 vb_idx = addr_to_vb_idx((unsigned long)addr); 934 rcu_read_lock(); 935 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 936 rcu_read_unlock(); 937 BUG_ON(!vb); 938 939 spin_lock(&vb->lock); 940 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 941 942 vb->dirty += 1UL << order; 943 if (vb->dirty == VMAP_BBMAP_BITS) { 944 BUG_ON(vb->free); 945 spin_unlock(&vb->lock); 946 free_vmap_block(vb); 947 } else 948 spin_unlock(&vb->lock); 949} 950 951/** 952 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 953 * 954 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 955 * to amortize TLB flushing overheads. What this means is that any page you 956 * have now, may, in a former life, have been mapped into kernel virtual 957 * address by the vmap layer and so there might be some CPUs with TLB entries 958 * still referencing that page (additional to the regular 1:1 kernel mapping). 959 * 960 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 961 * be sure that none of the pages we have control over will have any aliases 962 * from the vmap layer. 963 */ 964void vm_unmap_aliases(void) 965{ 966 unsigned long start = ULONG_MAX, end = 0; 967 int cpu; 968 int flush = 0; 969 970 if (unlikely(!vmap_initialized)) 971 return; 972 973 for_each_possible_cpu(cpu) { 974 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 975 struct vmap_block *vb; 976 977 rcu_read_lock(); 978 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 979 int i; 980 981 spin_lock(&vb->lock); 982 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 983 while (i < VMAP_BBMAP_BITS) { 984 unsigned long s, e; 985 int j; 986 j = find_next_zero_bit(vb->dirty_map, 987 VMAP_BBMAP_BITS, i); 988 989 s = vb->va->va_start + (i << PAGE_SHIFT); 990 e = vb->va->va_start + (j << PAGE_SHIFT); 991 vunmap_page_range(s, e); 992 flush = 1; 993 994 if (s < start) 995 start = s; 996 if (e > end) 997 end = e; 998 999 i = j; 1000 i = find_next_bit(vb->dirty_map, 1001 VMAP_BBMAP_BITS, i); 1002 } 1003 spin_unlock(&vb->lock); 1004 } 1005 rcu_read_unlock(); 1006 } 1007 1008 __purge_vmap_area_lazy(&start, &end, 1, flush); 1009} 1010EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1011 1012/** 1013 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1014 * @mem: the pointer returned by vm_map_ram 1015 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1016 */ 1017void vm_unmap_ram(const void *mem, unsigned int count) 1018{ 1019 unsigned long size = count << PAGE_SHIFT; 1020 unsigned long addr = (unsigned long)mem; 1021 1022 BUG_ON(!addr); 1023 BUG_ON(addr < VMALLOC_START); 1024 BUG_ON(addr > VMALLOC_END); 1025 BUG_ON(addr & (PAGE_SIZE-1)); 1026 1027 debug_check_no_locks_freed(mem, size); 1028 vmap_debug_free_range(addr, addr+size); 1029 1030 if (likely(count <= VMAP_MAX_ALLOC)) 1031 vb_free(mem, size); 1032 else 1033 free_unmap_vmap_area_addr(addr); 1034} 1035EXPORT_SYMBOL(vm_unmap_ram); 1036 1037/** 1038 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1039 * @pages: an array of pointers to the pages to be mapped 1040 * @count: number of pages 1041 * @node: prefer to allocate data structures on this node 1042 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1043 * 1044 * Returns: a pointer to the address that has been mapped, or %NULL on failure 1045 */ 1046void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1047{ 1048 unsigned long size = count << PAGE_SHIFT; 1049 unsigned long addr; 1050 void *mem; 1051 1052 if (likely(count <= VMAP_MAX_ALLOC)) { 1053 mem = vb_alloc(size, GFP_KERNEL); 1054 if (IS_ERR(mem)) 1055 return NULL; 1056 addr = (unsigned long)mem; 1057 } else { 1058 struct vmap_area *va; 1059 va = alloc_vmap_area(size, PAGE_SIZE, 1060 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1061 if (IS_ERR(va)) 1062 return NULL; 1063 1064 addr = va->va_start; 1065 mem = (void *)addr; 1066 } 1067 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1068 vm_unmap_ram(mem, count); 1069 return NULL; 1070 } 1071 return mem; 1072} 1073EXPORT_SYMBOL(vm_map_ram); 1074 1075/** 1076 * vm_area_register_early - register vmap area early during boot 1077 * @vm: vm_struct to register 1078 * @align: requested alignment 1079 * 1080 * This function is used to register kernel vm area before 1081 * vmalloc_init() is called. @vm->size and @vm->flags should contain 1082 * proper values on entry and other fields should be zero. On return, 1083 * vm->addr contains the allocated address. 1084 * 1085 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1086 */ 1087void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1088{ 1089 static size_t vm_init_off __initdata; 1090 unsigned long addr; 1091 1092 addr = ALIGN(VMALLOC_START + vm_init_off, align); 1093 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1094 1095 vm->addr = (void *)addr; 1096 1097 vm->next = vmlist; 1098 vmlist = vm; 1099} 1100 1101void __init vmalloc_init(void) 1102{ 1103 struct vmap_area *va; 1104 struct vm_struct *tmp; 1105 int i; 1106 1107 for_each_possible_cpu(i) { 1108 struct vmap_block_queue *vbq; 1109 1110 vbq = &per_cpu(vmap_block_queue, i); 1111 spin_lock_init(&vbq->lock); 1112 INIT_LIST_HEAD(&vbq->free); 1113 } 1114 1115 /* Import existing vmlist entries. */ 1116 for (tmp = vmlist; tmp; tmp = tmp->next) { 1117 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1118 va->flags = tmp->flags | VM_VM_AREA; 1119 va->va_start = (unsigned long)tmp->addr; 1120 va->va_end = va->va_start + tmp->size; 1121 __insert_vmap_area(va); 1122 } 1123 1124 vmap_area_pcpu_hole = VMALLOC_END; 1125 1126 vmap_initialized = true; 1127} 1128 1129/** 1130 * map_kernel_range_noflush - map kernel VM area with the specified pages 1131 * @addr: start of the VM area to map 1132 * @size: size of the VM area to map 1133 * @prot: page protection flags to use 1134 * @pages: pages to map 1135 * 1136 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 1137 * specify should have been allocated using get_vm_area() and its 1138 * friends. 1139 * 1140 * NOTE: 1141 * This function does NOT do any cache flushing. The caller is 1142 * responsible for calling flush_cache_vmap() on to-be-mapped areas 1143 * before calling this function. 1144 * 1145 * RETURNS: 1146 * The number of pages mapped on success, -errno on failure. 1147 */ 1148int map_kernel_range_noflush(unsigned long addr, unsigned long size, 1149 pgprot_t prot, struct page **pages) 1150{ 1151 return vmap_page_range_noflush(addr, addr + size, prot, pages); 1152} 1153 1154/** 1155 * unmap_kernel_range_noflush - unmap kernel VM area 1156 * @addr: start of the VM area to unmap 1157 * @size: size of the VM area to unmap 1158 * 1159 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 1160 * specify should have been allocated using get_vm_area() and its 1161 * friends. 1162 * 1163 * NOTE: 1164 * This function does NOT do any cache flushing. The caller is 1165 * responsible for calling flush_cache_vunmap() on to-be-mapped areas 1166 * before calling this function and flush_tlb_kernel_range() after. 1167 */ 1168void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 1169{ 1170 vunmap_page_range(addr, addr + size); 1171} 1172 1173/** 1174 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 1175 * @addr: start of the VM area to unmap 1176 * @size: size of the VM area to unmap 1177 * 1178 * Similar to unmap_kernel_range_noflush() but flushes vcache before 1179 * the unmapping and tlb after. 1180 */ 1181void unmap_kernel_range(unsigned long addr, unsigned long size) 1182{ 1183 unsigned long end = addr + size; 1184 1185 flush_cache_vunmap(addr, end); 1186 vunmap_page_range(addr, end); 1187 flush_tlb_kernel_range(addr, end); 1188} 1189 1190int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1191{ 1192 unsigned long addr = (unsigned long)area->addr; 1193 unsigned long end = addr + area->size - PAGE_SIZE; 1194 int err; 1195 1196 err = vmap_page_range(addr, end, prot, *pages); 1197 if (err > 0) { 1198 *pages += err; 1199 err = 0; 1200 } 1201 1202 return err; 1203} 1204EXPORT_SYMBOL_GPL(map_vm_area); 1205 1206/*** Old vmalloc interfaces ***/ 1207DEFINE_RWLOCK(vmlist_lock); 1208struct vm_struct *vmlist; 1209 1210static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 1211 unsigned long flags, void *caller) 1212{ 1213 struct vm_struct *tmp, **p; 1214 1215 vm->flags = flags; 1216 vm->addr = (void *)va->va_start; 1217 vm->size = va->va_end - va->va_start; 1218 vm->caller = caller; 1219 va->private = vm; 1220 va->flags |= VM_VM_AREA; 1221 1222 write_lock(&vmlist_lock); 1223 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1224 if (tmp->addr >= vm->addr) 1225 break; 1226 } 1227 vm->next = *p; 1228 *p = vm; 1229 write_unlock(&vmlist_lock); 1230} 1231 1232static struct vm_struct *__get_vm_area_node(unsigned long size, 1233 unsigned long align, unsigned long flags, unsigned long start, 1234 unsigned long end, int node, gfp_t gfp_mask, void *caller) 1235{ 1236 static struct vmap_area *va; 1237 struct vm_struct *area; 1238 1239 BUG_ON(in_interrupt()); 1240 if (flags & VM_IOREMAP) { 1241 int bit = fls(size); 1242 1243 if (bit > IOREMAP_MAX_ORDER) 1244 bit = IOREMAP_MAX_ORDER; 1245 else if (bit < PAGE_SHIFT) 1246 bit = PAGE_SHIFT; 1247 1248 align = 1ul << bit; 1249 } 1250 1251 size = PAGE_ALIGN(size); 1252 if (unlikely(!size)) 1253 return NULL; 1254 1255 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 1256 if (unlikely(!area)) 1257 return NULL; 1258 1259 /* 1260 * We always allocate a guard page. 1261 */ 1262 size += PAGE_SIZE; 1263 1264 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1265 if (IS_ERR(va)) { 1266 kfree(area); 1267 return NULL; 1268 } 1269 1270 insert_vmalloc_vm(area, va, flags, caller); 1271 return area; 1272} 1273 1274struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1275 unsigned long start, unsigned long end) 1276{ 1277 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 1278 __builtin_return_address(0)); 1279} 1280EXPORT_SYMBOL_GPL(__get_vm_area); 1281 1282struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1283 unsigned long start, unsigned long end, 1284 void *caller) 1285{ 1286 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, 1287 caller); 1288} 1289 1290/** 1291 * get_vm_area - reserve a contiguous kernel virtual area 1292 * @size: size of the area 1293 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 1294 * 1295 * Search an area of @size in the kernel virtual mapping area, 1296 * and reserved it for out purposes. Returns the area descriptor 1297 * on success or %NULL on failure. 1298 */ 1299struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1300{ 1301 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1302 -1, GFP_KERNEL, __builtin_return_address(0)); 1303} 1304 1305struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1306 void *caller) 1307{ 1308 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1309 -1, GFP_KERNEL, caller); 1310} 1311 1312struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1313 int node, gfp_t gfp_mask) 1314{ 1315 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1316 node, gfp_mask, __builtin_return_address(0)); 1317} 1318 1319static struct vm_struct *find_vm_area(const void *addr) 1320{ 1321 struct vmap_area *va; 1322 1323 va = find_vmap_area((unsigned long)addr); 1324 if (va && va->flags & VM_VM_AREA) 1325 return va->private; 1326 1327 return NULL; 1328} 1329 1330/** 1331 * remove_vm_area - find and remove a continuous kernel virtual area 1332 * @addr: base address 1333 * 1334 * Search for the kernel VM area starting at @addr, and remove it. 1335 * This function returns the found VM area, but using it is NOT safe 1336 * on SMP machines, except for its size or flags. 1337 */ 1338struct vm_struct *remove_vm_area(const void *addr) 1339{ 1340 struct vmap_area *va; 1341 1342 va = find_vmap_area((unsigned long)addr); 1343 if (va && va->flags & VM_VM_AREA) { 1344 struct vm_struct *vm = va->private; 1345 struct vm_struct *tmp, **p; 1346 /* 1347 * remove from list and disallow access to this vm_struct 1348 * before unmap. (address range confliction is maintained by 1349 * vmap.) 1350 */ 1351 write_lock(&vmlist_lock); 1352 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1353 ; 1354 *p = tmp->next; 1355 write_unlock(&vmlist_lock); 1356 1357 vmap_debug_free_range(va->va_start, va->va_end); 1358 free_unmap_vmap_area(va); 1359 vm->size -= PAGE_SIZE; 1360 1361 return vm; 1362 } 1363 return NULL; 1364} 1365 1366static void __vunmap(const void *addr, int deallocate_pages) 1367{ 1368 struct vm_struct *area; 1369 1370 if (!addr) 1371 return; 1372 1373 if ((PAGE_SIZE-1) & (unsigned long)addr) { 1374 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 1375 return; 1376 } 1377 1378 area = remove_vm_area(addr); 1379 if (unlikely(!area)) { 1380 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 1381 addr); 1382 return; 1383 } 1384 1385 debug_check_no_locks_freed(addr, area->size); 1386 debug_check_no_obj_freed(addr, area->size); 1387 1388 if (deallocate_pages) { 1389 int i; 1390 1391 for (i = 0; i < area->nr_pages; i++) { 1392 struct page *page = area->pages[i]; 1393 1394 BUG_ON(!page); 1395 __free_page(page); 1396 } 1397 1398 if (area->flags & VM_VPAGES) 1399 vfree(area->pages); 1400 else 1401 kfree(area->pages); 1402 } 1403 1404 kfree(area); 1405 return; 1406} 1407 1408/** 1409 * vfree - release memory allocated by vmalloc() 1410 * @addr: memory base address 1411 * 1412 * Free the virtually continuous memory area starting at @addr, as 1413 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1414 * NULL, no operation is performed. 1415 * 1416 * Must not be called in interrupt context. 1417 */ 1418void vfree(const void *addr) 1419{ 1420 BUG_ON(in_interrupt()); 1421 1422 kmemleak_free(addr); 1423 1424 __vunmap(addr, 1); 1425} 1426EXPORT_SYMBOL(vfree); 1427 1428/** 1429 * vunmap - release virtual mapping obtained by vmap() 1430 * @addr: memory base address 1431 * 1432 * Free the virtually contiguous memory area starting at @addr, 1433 * which was created from the page array passed to vmap(). 1434 * 1435 * Must not be called in interrupt context. 1436 */ 1437void vunmap(const void *addr) 1438{ 1439 BUG_ON(in_interrupt()); 1440 might_sleep(); 1441 __vunmap(addr, 0); 1442} 1443EXPORT_SYMBOL(vunmap); 1444 1445/** 1446 * vmap - map an array of pages into virtually contiguous space 1447 * @pages: array of page pointers 1448 * @count: number of pages to map 1449 * @flags: vm_area->flags 1450 * @prot: page protection for the mapping 1451 * 1452 * Maps @count pages from @pages into contiguous kernel virtual 1453 * space. 1454 */ 1455void *vmap(struct page **pages, unsigned int count, 1456 unsigned long flags, pgprot_t prot) 1457{ 1458 struct vm_struct *area; 1459 1460 might_sleep(); 1461 1462 if (count > totalram_pages) 1463 return NULL; 1464 1465 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1466 __builtin_return_address(0)); 1467 if (!area) 1468 return NULL; 1469 1470 if (map_vm_area(area, prot, &pages)) { 1471 vunmap(area->addr); 1472 return NULL; 1473 } 1474 1475 return area->addr; 1476} 1477EXPORT_SYMBOL(vmap); 1478 1479static void *__vmalloc_node(unsigned long size, unsigned long align, 1480 gfp_t gfp_mask, pgprot_t prot, 1481 int node, void *caller); 1482static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1483 pgprot_t prot, int node, void *caller) 1484{ 1485 struct page **pages; 1486 unsigned int nr_pages, array_size, i; 1487 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1488 1489 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1490 array_size = (nr_pages * sizeof(struct page *)); 1491 1492 area->nr_pages = nr_pages; 1493 /* Please note that the recursion is strictly bounded. */ 1494 if (array_size > PAGE_SIZE) { 1495 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, 1496 PAGE_KERNEL, node, caller); 1497 area->flags |= VM_VPAGES; 1498 } else { 1499 pages = kmalloc_node(array_size, nested_gfp, node); 1500 } 1501 area->pages = pages; 1502 area->caller = caller; 1503 if (!area->pages) { 1504 remove_vm_area(area->addr); 1505 kfree(area); 1506 return NULL; 1507 } 1508 1509 for (i = 0; i < area->nr_pages; i++) { 1510 struct page *page; 1511 1512 if (node < 0) 1513 page = alloc_page(gfp_mask); 1514 else 1515 page = alloc_pages_node(node, gfp_mask, 0); 1516 1517 if (unlikely(!page)) { 1518 /* Successfully allocated i pages, free them in __vunmap() */ 1519 area->nr_pages = i; 1520 goto fail; 1521 } 1522 area->pages[i] = page; 1523 } 1524 1525 if (map_vm_area(area, prot, &pages)) 1526 goto fail; 1527 return area->addr; 1528 1529fail: 1530 vfree(area->addr); 1531 return NULL; 1532} 1533 1534void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 1535{ 1536 void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, 1537 __builtin_return_address(0)); 1538 1539 /* 1540 * A ref_count = 3 is needed because the vm_struct and vmap_area 1541 * structures allocated in the __get_vm_area_node() function contain 1542 * references to the virtual address of the vmalloc'ed block. 1543 */ 1544 kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); 1545 1546 return addr; 1547} 1548 1549/** 1550 * __vmalloc_node - allocate virtually contiguous memory 1551 * @size: allocation size 1552 * @align: desired alignment 1553 * @gfp_mask: flags for the page level allocator 1554 * @prot: protection mask for the allocated pages 1555 * @node: node to use for allocation or -1 1556 * @caller: caller's return address 1557 * 1558 * Allocate enough pages to cover @size from the page level 1559 * allocator with @gfp_mask flags. Map them into contiguous 1560 * kernel virtual space, using a pagetable protection of @prot. 1561 */ 1562static void *__vmalloc_node(unsigned long size, unsigned long align, 1563 gfp_t gfp_mask, pgprot_t prot, 1564 int node, void *caller) 1565{ 1566 struct vm_struct *area; 1567 void *addr; 1568 unsigned long real_size = size; 1569 1570 size = PAGE_ALIGN(size); 1571 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1572 return NULL; 1573 1574 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, 1575 VMALLOC_END, node, gfp_mask, caller); 1576 1577 if (!area) 1578 return NULL; 1579 1580 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 1581 1582 /* 1583 * A ref_count = 3 is needed because the vm_struct and vmap_area 1584 * structures allocated in the __get_vm_area_node() function contain 1585 * references to the virtual address of the vmalloc'ed block. 1586 */ 1587 kmemleak_alloc(addr, real_size, 3, gfp_mask); 1588 1589 return addr; 1590} 1591 1592void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1593{ 1594 return __vmalloc_node(size, 1, gfp_mask, prot, -1, 1595 __builtin_return_address(0)); 1596} 1597EXPORT_SYMBOL(__vmalloc); 1598 1599/** 1600 * vmalloc - allocate virtually contiguous memory 1601 * @size: allocation size 1602 * Allocate enough pages to cover @size from the page level 1603 * allocator and map them into contiguous kernel virtual space. 1604 * 1605 * For tight control over page level allocator and protection flags 1606 * use __vmalloc() instead. 1607 */ 1608void *vmalloc(unsigned long size) 1609{ 1610 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1611 -1, __builtin_return_address(0)); 1612} 1613EXPORT_SYMBOL(vmalloc); 1614 1615/** 1616 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 1617 * @size: allocation size 1618 * 1619 * The resulting memory area is zeroed so it can be mapped to userspace 1620 * without leaking data. 1621 */ 1622void *vmalloc_user(unsigned long size) 1623{ 1624 struct vm_struct *area; 1625 void *ret; 1626 1627 ret = __vmalloc_node(size, SHMLBA, 1628 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1629 PAGE_KERNEL, -1, __builtin_return_address(0)); 1630 if (ret) { 1631 area = find_vm_area(ret); 1632 area->flags |= VM_USERMAP; 1633 } 1634 return ret; 1635} 1636EXPORT_SYMBOL(vmalloc_user); 1637 1638/** 1639 * vmalloc_node - allocate memory on a specific node 1640 * @size: allocation size 1641 * @node: numa node 1642 * 1643 * Allocate enough pages to cover @size from the page level 1644 * allocator and map them into contiguous kernel virtual space. 1645 * 1646 * For tight control over page level allocator and protection flags 1647 * use __vmalloc() instead. 1648 */ 1649void *vmalloc_node(unsigned long size, int node) 1650{ 1651 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1652 node, __builtin_return_address(0)); 1653} 1654EXPORT_SYMBOL(vmalloc_node); 1655 1656#ifndef PAGE_KERNEL_EXEC 1657# define PAGE_KERNEL_EXEC PAGE_KERNEL 1658#endif 1659 1660/** 1661 * vmalloc_exec - allocate virtually contiguous, executable memory 1662 * @size: allocation size 1663 * 1664 * Kernel-internal function to allocate enough pages to cover @size 1665 * the page level allocator and map them into contiguous and 1666 * executable kernel virtual space. 1667 * 1668 * For tight control over page level allocator and protection flags 1669 * use __vmalloc() instead. 1670 */ 1671 1672void *vmalloc_exec(unsigned long size) 1673{ 1674 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1675 -1, __builtin_return_address(0)); 1676} 1677 1678#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1679#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 1680#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 1681#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 1682#else 1683#define GFP_VMALLOC32 GFP_KERNEL 1684#endif 1685 1686/** 1687 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 1688 * @size: allocation size 1689 * 1690 * Allocate enough 32bit PA addressable pages to cover @size from the 1691 * page level allocator and map them into contiguous kernel virtual space. 1692 */ 1693void *vmalloc_32(unsigned long size) 1694{ 1695 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 1696 -1, __builtin_return_address(0)); 1697} 1698EXPORT_SYMBOL(vmalloc_32); 1699 1700/** 1701 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 1702 * @size: allocation size 1703 * 1704 * The resulting memory area is 32bit addressable and zeroed so it can be 1705 * mapped to userspace without leaking data. 1706 */ 1707void *vmalloc_32_user(unsigned long size) 1708{ 1709 struct vm_struct *area; 1710 void *ret; 1711 1712 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1713 -1, __builtin_return_address(0)); 1714 if (ret) { 1715 area = find_vm_area(ret); 1716 area->flags |= VM_USERMAP; 1717 } 1718 return ret; 1719} 1720EXPORT_SYMBOL(vmalloc_32_user); 1721 1722/* 1723 * small helper routine , copy contents to buf from addr. 1724 * If the page is not present, fill zero. 1725 */ 1726 1727static int aligned_vread(char *buf, char *addr, unsigned long count) 1728{ 1729 struct page *p; 1730 int copied = 0; 1731 1732 while (count) { 1733 unsigned long offset, length; 1734 1735 offset = (unsigned long)addr & ~PAGE_MASK; 1736 length = PAGE_SIZE - offset; 1737 if (length > count) 1738 length = count; 1739 p = vmalloc_to_page(addr); 1740 /* 1741 * To do safe access to this _mapped_ area, we need 1742 * lock. But adding lock here means that we need to add 1743 * overhead of vmalloc()/vfree() calles for this _debug_ 1744 * interface, rarely used. Instead of that, we'll use 1745 * kmap() and get small overhead in this access function. 1746 */ 1747 if (p) { 1748 /* 1749 * we can expect USER0 is not used (see vread/vwrite's 1750 * function description) 1751 */ 1752 void *map = kmap_atomic(p, KM_USER0); 1753 memcpy(buf, map + offset, length); 1754 kunmap_atomic(map, KM_USER0); 1755 } else 1756 memset(buf, 0, length); 1757 1758 addr += length; 1759 buf += length; 1760 copied += length; 1761 count -= length; 1762 } 1763 return copied; 1764} 1765 1766static int aligned_vwrite(char *buf, char *addr, unsigned long count) 1767{ 1768 struct page *p; 1769 int copied = 0; 1770 1771 while (count) { 1772 unsigned long offset, length; 1773 1774 offset = (unsigned long)addr & ~PAGE_MASK; 1775 length = PAGE_SIZE - offset; 1776 if (length > count) 1777 length = count; 1778 p = vmalloc_to_page(addr); 1779 /* 1780 * To do safe access to this _mapped_ area, we need 1781 * lock. But adding lock here means that we need to add 1782 * overhead of vmalloc()/vfree() calles for this _debug_ 1783 * interface, rarely used. Instead of that, we'll use 1784 * kmap() and get small overhead in this access function. 1785 */ 1786 if (p) { 1787 /* 1788 * we can expect USER0 is not used (see vread/vwrite's 1789 * function description) 1790 */ 1791 void *map = kmap_atomic(p, KM_USER0); 1792 memcpy(map + offset, buf, length); 1793 kunmap_atomic(map, KM_USER0); 1794 } 1795 addr += length; 1796 buf += length; 1797 copied += length; 1798 count -= length; 1799 } 1800 return copied; 1801} 1802 1803/** 1804 * vread() - read vmalloc area in a safe way. 1805 * @buf: buffer for reading data 1806 * @addr: vm address. 1807 * @count: number of bytes to be read. 1808 * 1809 * Returns # of bytes which addr and buf should be increased. 1810 * (same number to @count). Returns 0 if [addr...addr+count) doesn't 1811 * includes any intersect with alive vmalloc area. 1812 * 1813 * This function checks that addr is a valid vmalloc'ed area, and 1814 * copy data from that area to a given buffer. If the given memory range 1815 * of [addr...addr+count) includes some valid address, data is copied to 1816 * proper area of @buf. If there are memory holes, they'll be zero-filled. 1817 * IOREMAP area is treated as memory hole and no copy is done. 1818 * 1819 * If [addr...addr+count) doesn't includes any intersects with alive 1820 * vm_struct area, returns 0. 1821 * @buf should be kernel's buffer. Because this function uses KM_USER0, 1822 * the caller should guarantee KM_USER0 is not used. 1823 * 1824 * Note: In usual ops, vread() is never necessary because the caller 1825 * should know vmalloc() area is valid and can use memcpy(). 1826 * This is for routines which have to access vmalloc area without 1827 * any informaion, as /dev/kmem. 1828 * 1829 */ 1830 1831long vread(char *buf, char *addr, unsigned long count) 1832{ 1833 struct vm_struct *tmp; 1834 char *vaddr, *buf_start = buf; 1835 unsigned long buflen = count; 1836 unsigned long n; 1837 1838 /* Don't allow overflow */ 1839 if ((unsigned long) addr + count < count) 1840 count = -(unsigned long) addr; 1841 1842 read_lock(&vmlist_lock); 1843 for (tmp = vmlist; count && tmp; tmp = tmp->next) { 1844 vaddr = (char *) tmp->addr; 1845 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1846 continue; 1847 while (addr < vaddr) { 1848 if (count == 0) 1849 goto finished; 1850 *buf = '\0'; 1851 buf++; 1852 addr++; 1853 count--; 1854 } 1855 n = vaddr + tmp->size - PAGE_SIZE - addr; 1856 if (n > count) 1857 n = count; 1858 if (!(tmp->flags & VM_IOREMAP)) 1859 aligned_vread(buf, addr, n); 1860 else /* IOREMAP area is treated as memory hole */ 1861 memset(buf, 0, n); 1862 buf += n; 1863 addr += n; 1864 count -= n; 1865 } 1866finished: 1867 read_unlock(&vmlist_lock); 1868 1869 if (buf == buf_start) 1870 return 0; 1871 /* zero-fill memory holes */ 1872 if (buf != buf_start + buflen) 1873 memset(buf, 0, buflen - (buf - buf_start)); 1874 1875 return buflen; 1876} 1877 1878/** 1879 * vwrite() - write vmalloc area in a safe way. 1880 * @buf: buffer for source data 1881 * @addr: vm address. 1882 * @count: number of bytes to be read. 1883 * 1884 * Returns # of bytes which addr and buf should be incresed. 1885 * (same number to @count). 1886 * If [addr...addr+count) doesn't includes any intersect with valid 1887 * vmalloc area, returns 0. 1888 * 1889 * This function checks that addr is a valid vmalloc'ed area, and 1890 * copy data from a buffer to the given addr. If specified range of 1891 * [addr...addr+count) includes some valid address, data is copied from 1892 * proper area of @buf. If there are memory holes, no copy to hole. 1893 * IOREMAP area is treated as memory hole and no copy is done. 1894 * 1895 * If [addr...addr+count) doesn't includes any intersects with alive 1896 * vm_struct area, returns 0. 1897 * @buf should be kernel's buffer. Because this function uses KM_USER0, 1898 * the caller should guarantee KM_USER0 is not used. 1899 * 1900 * Note: In usual ops, vwrite() is never necessary because the caller 1901 * should know vmalloc() area is valid and can use memcpy(). 1902 * This is for routines which have to access vmalloc area without 1903 * any informaion, as /dev/kmem. 1904 * 1905 * The caller should guarantee KM_USER1 is not used. 1906 */ 1907 1908long vwrite(char *buf, char *addr, unsigned long count) 1909{ 1910 struct vm_struct *tmp; 1911 char *vaddr; 1912 unsigned long n, buflen; 1913 int copied = 0; 1914 1915 /* Don't allow overflow */ 1916 if ((unsigned long) addr + count < count) 1917 count = -(unsigned long) addr; 1918 buflen = count; 1919 1920 read_lock(&vmlist_lock); 1921 for (tmp = vmlist; count && tmp; tmp = tmp->next) { 1922 vaddr = (char *) tmp->addr; 1923 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1924 continue; 1925 while (addr < vaddr) { 1926 if (count == 0) 1927 goto finished; 1928 buf++; 1929 addr++; 1930 count--; 1931 } 1932 n = vaddr + tmp->size - PAGE_SIZE - addr; 1933 if (n > count) 1934 n = count; 1935 if (!(tmp->flags & VM_IOREMAP)) { 1936 aligned_vwrite(buf, addr, n); 1937 copied++; 1938 } 1939 buf += n; 1940 addr += n; 1941 count -= n; 1942 } 1943finished: 1944 read_unlock(&vmlist_lock); 1945 if (!copied) 1946 return 0; 1947 return buflen; 1948} 1949 1950/** 1951 * remap_vmalloc_range - map vmalloc pages to userspace 1952 * @vma: vma to cover (map full range of vma) 1953 * @addr: vmalloc memory 1954 * @pgoff: number of pages into addr before first page to map 1955 * 1956 * Returns: 0 for success, -Exxx on failure 1957 * 1958 * This function checks that addr is a valid vmalloc'ed area, and 1959 * that it is big enough to cover the vma. Will return failure if 1960 * that criteria isn't met. 1961 * 1962 * Similar to remap_pfn_range() (see mm/memory.c) 1963 */ 1964int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1965 unsigned long pgoff) 1966{ 1967 struct vm_struct *area; 1968 unsigned long uaddr = vma->vm_start; 1969 unsigned long usize = vma->vm_end - vma->vm_start; 1970 1971 if ((PAGE_SIZE-1) & (unsigned long)addr) 1972 return -EINVAL; 1973 1974 area = find_vm_area(addr); 1975 if (!area) 1976 return -EINVAL; 1977 1978 if (!(area->flags & VM_USERMAP)) 1979 return -EINVAL; 1980 1981 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 1982 return -EINVAL; 1983 1984 addr += pgoff << PAGE_SHIFT; 1985 do { 1986 struct page *page = vmalloc_to_page(addr); 1987 int ret; 1988 1989 ret = vm_insert_page(vma, uaddr, page); 1990 if (ret) 1991 return ret; 1992 1993 uaddr += PAGE_SIZE; 1994 addr += PAGE_SIZE; 1995 usize -= PAGE_SIZE; 1996 } while (usize > 0); 1997 1998 /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 1999 vma->vm_flags |= VM_RESERVED; 2000 2001 return 0; 2002} 2003EXPORT_SYMBOL(remap_vmalloc_range); 2004 2005/* 2006 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 2007 * have one. 2008 */ 2009void __attribute__((weak)) vmalloc_sync_all(void) 2010{ 2011} 2012 2013 2014static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2015{ 2016 /* apply_to_page_range() does all the hard work. */ 2017 return 0; 2018} 2019 2020/** 2021 * alloc_vm_area - allocate a range of kernel address space 2022 * @size: size of the area 2023 * 2024 * Returns: NULL on failure, vm_struct on success 2025 * 2026 * This function reserves a range of kernel address space, and 2027 * allocates pagetables to map that range. No actual mappings 2028 * are created. If the kernel address space is not shared 2029 * between processes, it syncs the pagetable across all 2030 * processes. 2031 */ 2032struct vm_struct *alloc_vm_area(size_t size) 2033{ 2034 struct vm_struct *area; 2035 2036 area = get_vm_area_caller(size, VM_IOREMAP, 2037 __builtin_return_address(0)); 2038 if (area == NULL) 2039 return NULL; 2040 2041 /* 2042 * This ensures that page tables are constructed for this region 2043 * of kernel virtual address space and mapped into init_mm. 2044 */ 2045 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2046 area->size, f, NULL)) { 2047 free_vm_area(area); 2048 return NULL; 2049 } 2050 2051 /* Make sure the pagetables are constructed in process kernel 2052 mappings */ 2053 vmalloc_sync_all(); 2054 2055 return area; 2056} 2057EXPORT_SYMBOL_GPL(alloc_vm_area); 2058 2059void free_vm_area(struct vm_struct *area) 2060{ 2061 struct vm_struct *ret; 2062 ret = remove_vm_area(area->addr); 2063 BUG_ON(ret != area); 2064 kfree(area); 2065} 2066EXPORT_SYMBOL_GPL(free_vm_area); 2067 2068static struct vmap_area *node_to_va(struct rb_node *n) 2069{ 2070 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; 2071} 2072 2073/** 2074 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2075 * @end: target address 2076 * @pnext: out arg for the next vmap_area 2077 * @pprev: out arg for the previous vmap_area 2078 * 2079 * Returns: %true if either or both of next and prev are found, 2080 * %false if no vmap_area exists 2081 * 2082 * Find vmap_areas end addresses of which enclose @end. ie. if not 2083 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2084 */ 2085static bool pvm_find_next_prev(unsigned long end, 2086 struct vmap_area **pnext, 2087 struct vmap_area **pprev) 2088{ 2089 struct rb_node *n = vmap_area_root.rb_node; 2090 struct vmap_area *va = NULL; 2091 2092 while (n) { 2093 va = rb_entry(n, struct vmap_area, rb_node); 2094 if (end < va->va_end) 2095 n = n->rb_left; 2096 else if (end > va->va_end) 2097 n = n->rb_right; 2098 else 2099 break; 2100 } 2101 2102 if (!va) 2103 return false; 2104 2105 if (va->va_end > end) { 2106 *pnext = va; 2107 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2108 } else { 2109 *pprev = va; 2110 *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2111 } 2112 return true; 2113} 2114 2115/** 2116 * pvm_determine_end - find the highest aligned address between two vmap_areas 2117 * @pnext: in/out arg for the next vmap_area 2118 * @pprev: in/out arg for the previous vmap_area 2119 * @align: alignment 2120 * 2121 * Returns: determined end address 2122 * 2123 * Find the highest aligned address between *@pnext and *@pprev below 2124 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2125 * down address is between the end addresses of the two vmap_areas. 2126 * 2127 * Please note that the address returned by this function may fall 2128 * inside *@pnext vmap_area. The caller is responsible for checking 2129 * that. 2130 */ 2131static unsigned long pvm_determine_end(struct vmap_area **pnext, 2132 struct vmap_area **pprev, 2133 unsigned long align) 2134{ 2135 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2136 unsigned long addr; 2137 2138 if (*pnext) 2139 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2140 else 2141 addr = vmalloc_end; 2142 2143 while (*pprev && (*pprev)->va_end > addr) { 2144 *pnext = *pprev; 2145 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2146 } 2147 2148 return addr; 2149} 2150 2151/** 2152 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2153 * @offsets: array containing offset of each area 2154 * @sizes: array containing size of each area 2155 * @nr_vms: the number of areas to allocate 2156 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2157 * @gfp_mask: allocation mask 2158 * 2159 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2160 * vm_structs on success, %NULL on failure 2161 * 2162 * Percpu allocator wants to use congruent vm areas so that it can 2163 * maintain the offsets among percpu areas. This function allocates 2164 * congruent vmalloc areas for it. These areas tend to be scattered 2165 * pretty far, distance between two areas easily going up to 2166 * gigabytes. To avoid interacting with regular vmallocs, these areas 2167 * are allocated from top. 2168 * 2169 * Despite its complicated look, this allocator is rather simple. It 2170 * does everything top-down and scans areas from the end looking for 2171 * matching slot. While scanning, if any of the areas overlaps with 2172 * existing vmap_area, the base address is pulled down to fit the 2173 * area. Scanning is repeated till all the areas fit and then all 2174 * necessary data structres are inserted and the result is returned. 2175 */ 2176struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2177 const size_t *sizes, int nr_vms, 2178 size_t align, gfp_t gfp_mask) 2179{ 2180 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2181 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2182 struct vmap_area **vas, *prev, *next; 2183 struct vm_struct **vms; 2184 int area, area2, last_area, term_area; 2185 unsigned long base, start, end, last_end; 2186 bool purged = false; 2187 2188 gfp_mask &= GFP_RECLAIM_MASK; 2189 2190 /* verify parameters and allocate data structures */ 2191 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2192 for (last_area = 0, area = 0; area < nr_vms; area++) { 2193 start = offsets[area]; 2194 end = start + sizes[area]; 2195 2196 /* is everything aligned properly? */ 2197 BUG_ON(!IS_ALIGNED(offsets[area], align)); 2198 BUG_ON(!IS_ALIGNED(sizes[area], align)); 2199 2200 /* detect the area with the highest address */ 2201 if (start > offsets[last_area]) 2202 last_area = area; 2203 2204 for (area2 = 0; area2 < nr_vms; area2++) { 2205 unsigned long start2 = offsets[area2]; 2206 unsigned long end2 = start2 + sizes[area2]; 2207 2208 if (area2 == area) 2209 continue; 2210 2211 BUG_ON(start2 >= start && start2 < end); 2212 BUG_ON(end2 <= end && end2 > start); 2213 } 2214 } 2215 last_end = offsets[last_area] + sizes[last_area]; 2216 2217 if (vmalloc_end - vmalloc_start < last_end) { 2218 WARN_ON(true); 2219 return NULL; 2220 } 2221 2222 vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); 2223 vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); 2224 if (!vas || !vms) 2225 goto err_free; 2226 2227 for (area = 0; area < nr_vms; area++) { 2228 vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); 2229 vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); 2230 if (!vas[area] || !vms[area]) 2231 goto err_free; 2232 } 2233retry: 2234 spin_lock(&vmap_area_lock); 2235 2236 /* start scanning - we scan from the top, begin with the last area */ 2237 area = term_area = last_area; 2238 start = offsets[area]; 2239 end = start + sizes[area]; 2240 2241 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2242 base = vmalloc_end - last_end; 2243 goto found; 2244 } 2245 base = pvm_determine_end(&next, &prev, align) - end; 2246 2247 while (true) { 2248 BUG_ON(next && next->va_end <= base + end); 2249 BUG_ON(prev && prev->va_end > base + end); 2250 2251 /* 2252 * base might have underflowed, add last_end before 2253 * comparing. 2254 */ 2255 if (base + last_end < vmalloc_start + last_end) { 2256 spin_unlock(&vmap_area_lock); 2257 if (!purged) { 2258 purge_vmap_area_lazy(); 2259 purged = true; 2260 goto retry; 2261 } 2262 goto err_free; 2263 } 2264 2265 /* 2266 * If next overlaps, move base downwards so that it's 2267 * right below next and then recheck. 2268 */ 2269 if (next && next->va_start < base + end) { 2270 base = pvm_determine_end(&next, &prev, align) - end; 2271 term_area = area; 2272 continue; 2273 } 2274 2275 /* 2276 * If prev overlaps, shift down next and prev and move 2277 * base so that it's right below new next and then 2278 * recheck. 2279 */ 2280 if (prev && prev->va_end > base + start) { 2281 next = prev; 2282 prev = node_to_va(rb_prev(&next->rb_node)); 2283 base = pvm_determine_end(&next, &prev, align) - end; 2284 term_area = area; 2285 continue; 2286 } 2287 2288 /* 2289 * This area fits, move on to the previous one. If 2290 * the previous one is the terminal one, we're done. 2291 */ 2292 area = (area + nr_vms - 1) % nr_vms; 2293 if (area == term_area) 2294 break; 2295 start = offsets[area]; 2296 end = start + sizes[area]; 2297 pvm_find_next_prev(base + end, &next, &prev); 2298 } 2299found: 2300 /* we've found a fitting base, insert all va's */ 2301 for (area = 0; area < nr_vms; area++) { 2302 struct vmap_area *va = vas[area]; 2303 2304 va->va_start = base + offsets[area]; 2305 va->va_end = va->va_start + sizes[area]; 2306 __insert_vmap_area(va); 2307 } 2308 2309 vmap_area_pcpu_hole = base + offsets[last_area]; 2310 2311 spin_unlock(&vmap_area_lock); 2312 2313 /* insert all vm's */ 2314 for (area = 0; area < nr_vms; area++) 2315 insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2316 pcpu_get_vm_areas); 2317 2318 kfree(vas); 2319 return vms; 2320 2321err_free: 2322 for (area = 0; area < nr_vms; area++) { 2323 if (vas) 2324 kfree(vas[area]); 2325 if (vms) 2326 kfree(vms[area]); 2327 } 2328 kfree(vas); 2329 kfree(vms); 2330 return NULL; 2331} 2332 2333/** 2334 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2335 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2336 * @nr_vms: the number of allocated areas 2337 * 2338 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2339 */ 2340void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2341{ 2342 int i; 2343 2344 for (i = 0; i < nr_vms; i++) 2345 free_vm_area(vms[i]); 2346 kfree(vms); 2347} 2348 2349#ifdef CONFIG_PROC_FS 2350static void *s_start(struct seq_file *m, loff_t *pos) 2351{ 2352 loff_t n = *pos; 2353 struct vm_struct *v; 2354 2355 read_lock(&vmlist_lock); 2356 v = vmlist; 2357 while (n > 0 && v) { 2358 n--; 2359 v = v->next; 2360 } 2361 if (!n) 2362 return v; 2363 2364 return NULL; 2365 2366} 2367 2368static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2369{ 2370 struct vm_struct *v = p; 2371 2372 ++*pos; 2373 return v->next; 2374} 2375 2376static void s_stop(struct seq_file *m, void *p) 2377{ 2378 read_unlock(&vmlist_lock); 2379} 2380 2381static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2382{ 2383 if (NUMA_BUILD) { 2384 unsigned int nr, *counters = m->private; 2385 2386 if (!counters) 2387 return; 2388 2389 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2390 2391 for (nr = 0; nr < v->nr_pages; nr++) 2392 counters[page_to_nid(v->pages[nr])]++; 2393 2394 for_each_node_state(nr, N_HIGH_MEMORY) 2395 if (counters[nr]) 2396 seq_printf(m, " N%u=%u", nr, counters[nr]); 2397 } 2398} 2399 2400static int s_show(struct seq_file *m, void *p) 2401{ 2402 struct vm_struct *v = p; 2403 2404 seq_printf(m, "0x%p-0x%p %7ld", 2405 v->addr, v->addr + v->size, v->size); 2406 2407 if (v->caller) { 2408 char buff[KSYM_SYMBOL_LEN]; 2409 2410 seq_putc(m, ' '); 2411 sprint_symbol(buff, (unsigned long)v->caller); 2412 seq_puts(m, buff); 2413 } 2414 2415 if (v->nr_pages) 2416 seq_printf(m, " pages=%d", v->nr_pages); 2417 2418 if (v->phys_addr) 2419 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); 2420 2421 if (v->flags & VM_IOREMAP) 2422 seq_printf(m, " ioremap"); 2423 2424 if (v->flags & VM_ALLOC) 2425 seq_printf(m, " vmalloc"); 2426 2427 if (v->flags & VM_MAP) 2428 seq_printf(m, " vmap"); 2429 2430 if (v->flags & VM_USERMAP) 2431 seq_printf(m, " user"); 2432 2433 if (v->flags & VM_VPAGES) 2434 seq_printf(m, " vpages"); 2435 2436 show_numa_info(m, v); 2437 seq_putc(m, '\n'); 2438 return 0; 2439} 2440 2441static const struct seq_operations vmalloc_op = { 2442 .start = s_start, 2443 .next = s_next, 2444 .stop = s_stop, 2445 .show = s_show, 2446}; 2447 2448static int vmalloc_open(struct inode *inode, struct file *file) 2449{ 2450 unsigned int *ptr = NULL; 2451 int ret; 2452 2453 if (NUMA_BUILD) { 2454 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 2455 if (ptr == NULL) 2456 return -ENOMEM; 2457 } 2458 ret = seq_open(file, &vmalloc_op); 2459 if (!ret) { 2460 struct seq_file *m = file->private_data; 2461 m->private = ptr; 2462 } else 2463 kfree(ptr); 2464 return ret; 2465} 2466 2467static const struct file_operations proc_vmalloc_operations = { 2468 .open = vmalloc_open, 2469 .read = seq_read, 2470 .llseek = seq_lseek, 2471 .release = seq_release_private, 2472}; 2473 2474static int __init proc_vmalloc_init(void) 2475{ 2476 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 2477 return 0; 2478} 2479module_init(proc_vmalloc_init); 2480#endif 2481 2482