percpu.c revision 88999a898b565960690f18e4a13a1e8a9fa4dfef
1/* 2 * mm/percpu.c - percpu memory allocator 3 * 4 * Copyright (C) 2009 SUSE Linux Products GmbH 5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6 * 7 * This file is released under the GPLv2. 8 * 9 * This is percpu allocator which can handle both static and dynamic 10 * areas. Percpu areas are allocated in chunks. Each chunk is 11 * consisted of boot-time determined number of units and the first 12 * chunk is used for static percpu variables in the kernel image 13 * (special boot time alloc/init handling necessary as these areas 14 * need to be brought up before allocation services are running). 15 * Unit grows as necessary and all units grow or shrink in unison. 16 * When a chunk is filled up, another chunk is allocated. 17 * 18 * c0 c1 c2 19 * ------------------- ------------------- ------------ 20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21 * ------------------- ...... ------------------- .... ------------ 22 * 23 * Allocation is done in offset-size areas of single unit space. Ie, 24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 26 * cpus. On NUMA, the mapping can be non-linear and even sparse. 27 * Percpu access can be done by configuring percpu base registers 28 * according to cpu to unit mapping and pcpu_unit_size. 29 * 30 * There are usually many small percpu allocations many of them being 31 * as small as 4 bytes. The allocator organizes chunks into lists 32 * according to free size and tries to allocate from the fullest one. 33 * Each chunk keeps the maximum contiguous area size hint which is 34 * guaranteed to be eqaul to or larger than the maximum contiguous 35 * area in the chunk. This helps the allocator not to iterate the 36 * chunk maps unnecessarily. 37 * 38 * Allocation state in each chunk is kept using an array of integers 39 * on chunk->map. A positive value in the map represents a free 40 * region and negative allocated. Allocation inside a chunk is done 41 * by scanning this map sequentially and serving the first matching 42 * entry. This is mostly copied from the percpu_modalloc() allocator. 43 * Chunks can be determined from the address using the index field 44 * in the page struct. The index field contains a pointer to the chunk. 45 * 46 * To use this allocator, arch code should do the followings. 47 * 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49 * regular address to percpu pointer and back if they need to be 50 * different from the default 51 * 52 * - use pcpu_setup_first_chunk() during percpu area initialization to 53 * setup the first chunk containing the kernel static percpu area 54 */ 55 56#include <linux/bitmap.h> 57#include <linux/bootmem.h> 58#include <linux/err.h> 59#include <linux/list.h> 60#include <linux/log2.h> 61#include <linux/mm.h> 62#include <linux/module.h> 63#include <linux/mutex.h> 64#include <linux/percpu.h> 65#include <linux/pfn.h> 66#include <linux/slab.h> 67#include <linux/spinlock.h> 68#include <linux/vmalloc.h> 69#include <linux/workqueue.h> 70 71#include <asm/cacheflush.h> 72#include <asm/sections.h> 73#include <asm/tlbflush.h> 74#include <asm/io.h> 75 76#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 77#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 78 79/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 80#ifndef __addr_to_pcpu_ptr 81#define __addr_to_pcpu_ptr(addr) \ 82 (void __percpu *)((unsigned long)(addr) - \ 83 (unsigned long)pcpu_base_addr + \ 84 (unsigned long)__per_cpu_start) 85#endif 86#ifndef __pcpu_ptr_to_addr 87#define __pcpu_ptr_to_addr(ptr) \ 88 (void __force *)((unsigned long)(ptr) + \ 89 (unsigned long)pcpu_base_addr - \ 90 (unsigned long)__per_cpu_start) 91#endif 92 93struct pcpu_chunk { 94 struct list_head list; /* linked to pcpu_slot lists */ 95 int free_size; /* free bytes in the chunk */ 96 int contig_hint; /* max contiguous size hint */ 97 void *base_addr; /* base address of this chunk */ 98 int map_used; /* # of map entries used */ 99 int map_alloc; /* # of map entries allocated */ 100 int *map; /* allocation map */ 101 void *data; /* chunk data */ 102 bool immutable; /* no [de]population allowed */ 103 unsigned long populated[]; /* populated bitmap */ 104}; 105 106static int pcpu_unit_pages __read_mostly; 107static int pcpu_unit_size __read_mostly; 108static int pcpu_nr_units __read_mostly; 109static int pcpu_atom_size __read_mostly; 110static int pcpu_nr_slots __read_mostly; 111static size_t pcpu_chunk_struct_size __read_mostly; 112 113/* cpus with the lowest and highest unit numbers */ 114static unsigned int pcpu_first_unit_cpu __read_mostly; 115static unsigned int pcpu_last_unit_cpu __read_mostly; 116 117/* the address of the first chunk which starts with the kernel static area */ 118void *pcpu_base_addr __read_mostly; 119EXPORT_SYMBOL_GPL(pcpu_base_addr); 120 121static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ 122const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ 123 124/* group information, used for vm allocation */ 125static int pcpu_nr_groups __read_mostly; 126static const unsigned long *pcpu_group_offsets __read_mostly; 127static const size_t *pcpu_group_sizes __read_mostly; 128 129/* 130 * The first chunk which always exists. Note that unlike other 131 * chunks, this one can be allocated and mapped in several different 132 * ways and thus often doesn't live in the vmalloc area. 133 */ 134static struct pcpu_chunk *pcpu_first_chunk; 135 136/* 137 * Optional reserved chunk. This chunk reserves part of the first 138 * chunk and serves it for reserved allocations. The amount of 139 * reserved offset is in pcpu_reserved_chunk_limit. When reserved 140 * area doesn't exist, the following variables contain NULL and 0 141 * respectively. 142 */ 143static struct pcpu_chunk *pcpu_reserved_chunk; 144static int pcpu_reserved_chunk_limit; 145 146/* 147 * Synchronization rules. 148 * 149 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 150 * protects allocation/reclaim paths, chunks, populated bitmap and 151 * vmalloc mapping. The latter is a spinlock and protects the index 152 * data structures - chunk slots, chunks and area maps in chunks. 153 * 154 * During allocation, pcpu_alloc_mutex is kept locked all the time and 155 * pcpu_lock is grabbed and released as necessary. All actual memory 156 * allocations are done using GFP_KERNEL with pcpu_lock released. In 157 * general, percpu memory can't be allocated with irq off but 158 * irqsave/restore are still used in alloc path so that it can be used 159 * from early init path - sched_init() specifically. 160 * 161 * Free path accesses and alters only the index data structures, so it 162 * can be safely called from atomic context. When memory needs to be 163 * returned to the system, free path schedules reclaim_work which 164 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 165 * reclaimed, release both locks and frees the chunks. Note that it's 166 * necessary to grab both locks to remove a chunk from circulation as 167 * allocation path might be referencing the chunk with only 168 * pcpu_alloc_mutex locked. 169 */ 170static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 171static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 172 173static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 174 175/* reclaim work to release fully free chunks, scheduled from free path */ 176static void pcpu_reclaim(struct work_struct *work); 177static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 178 179static bool pcpu_addr_in_first_chunk(void *addr) 180{ 181 void *first_start = pcpu_first_chunk->base_addr; 182 183 return addr >= first_start && addr < first_start + pcpu_unit_size; 184} 185 186static bool pcpu_addr_in_reserved_chunk(void *addr) 187{ 188 void *first_start = pcpu_first_chunk->base_addr; 189 190 return addr >= first_start && 191 addr < first_start + pcpu_reserved_chunk_limit; 192} 193 194static int __pcpu_size_to_slot(int size) 195{ 196 int highbit = fls(size); /* size is in bytes */ 197 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 198} 199 200static int pcpu_size_to_slot(int size) 201{ 202 if (size == pcpu_unit_size) 203 return pcpu_nr_slots - 1; 204 return __pcpu_size_to_slot(size); 205} 206 207static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 208{ 209 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 210 return 0; 211 212 return pcpu_size_to_slot(chunk->free_size); 213} 214 215/* set the pointer to a chunk in a page struct */ 216static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 217{ 218 page->index = (unsigned long)pcpu; 219} 220 221/* obtain pointer to a chunk from a page struct */ 222static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 223{ 224 return (struct pcpu_chunk *)page->index; 225} 226 227static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 228{ 229 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 230} 231 232static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk, 233 unsigned int cpu, int page_idx) 234{ 235 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 236 (page_idx << PAGE_SHIFT); 237} 238 239static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 240 unsigned int cpu, int page_idx) 241{ 242 /* must not be used on pre-mapped chunk */ 243 WARN_ON(chunk->immutable); 244 245 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 246} 247 248static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, 249 int *rs, int *re, int end) 250{ 251 *rs = find_next_zero_bit(chunk->populated, end, *rs); 252 *re = find_next_bit(chunk->populated, end, *rs + 1); 253} 254 255static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, 256 int *rs, int *re, int end) 257{ 258 *rs = find_next_bit(chunk->populated, end, *rs); 259 *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 260} 261 262/* 263 * (Un)populated page region iterators. Iterate over (un)populated 264 * page regions betwen @start and @end in @chunk. @rs and @re should 265 * be integer variables and will be set to start and end page index of 266 * the current region. 267 */ 268#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 269 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 270 (rs) < (re); \ 271 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 272 273#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 274 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 275 (rs) < (re); \ 276 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 277 278/** 279 * pcpu_mem_alloc - allocate memory 280 * @size: bytes to allocate 281 * 282 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 283 * kzalloc() is used; otherwise, vmalloc() is used. The returned 284 * memory is always zeroed. 285 * 286 * CONTEXT: 287 * Does GFP_KERNEL allocation. 288 * 289 * RETURNS: 290 * Pointer to the allocated area on success, NULL on failure. 291 */ 292static void *pcpu_mem_alloc(size_t size) 293{ 294 if (size <= PAGE_SIZE) 295 return kzalloc(size, GFP_KERNEL); 296 else { 297 void *ptr = vmalloc(size); 298 if (ptr) 299 memset(ptr, 0, size); 300 return ptr; 301 } 302} 303 304/** 305 * pcpu_mem_free - free memory 306 * @ptr: memory to free 307 * @size: size of the area 308 * 309 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 310 */ 311static void pcpu_mem_free(void *ptr, size_t size) 312{ 313 if (size <= PAGE_SIZE) 314 kfree(ptr); 315 else 316 vfree(ptr); 317} 318 319/** 320 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 321 * @chunk: chunk of interest 322 * @oslot: the previous slot it was on 323 * 324 * This function is called after an allocation or free changed @chunk. 325 * New slot according to the changed state is determined and @chunk is 326 * moved to the slot. Note that the reserved chunk is never put on 327 * chunk slots. 328 * 329 * CONTEXT: 330 * pcpu_lock. 331 */ 332static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 333{ 334 int nslot = pcpu_chunk_slot(chunk); 335 336 if (chunk != pcpu_reserved_chunk && oslot != nslot) { 337 if (oslot < nslot) 338 list_move(&chunk->list, &pcpu_slot[nslot]); 339 else 340 list_move_tail(&chunk->list, &pcpu_slot[nslot]); 341 } 342} 343 344/** 345 * pcpu_need_to_extend - determine whether chunk area map needs to be extended 346 * @chunk: chunk of interest 347 * 348 * Determine whether area map of @chunk needs to be extended to 349 * accomodate a new allocation. 350 * 351 * CONTEXT: 352 * pcpu_lock. 353 * 354 * RETURNS: 355 * New target map allocation length if extension is necessary, 0 356 * otherwise. 357 */ 358static int pcpu_need_to_extend(struct pcpu_chunk *chunk) 359{ 360 int new_alloc; 361 362 if (chunk->map_alloc >= chunk->map_used + 2) 363 return 0; 364 365 new_alloc = PCPU_DFL_MAP_ALLOC; 366 while (new_alloc < chunk->map_used + 2) 367 new_alloc *= 2; 368 369 return new_alloc; 370} 371 372/** 373 * pcpu_extend_area_map - extend area map of a chunk 374 * @chunk: chunk of interest 375 * @new_alloc: new target allocation length of the area map 376 * 377 * Extend area map of @chunk to have @new_alloc entries. 378 * 379 * CONTEXT: 380 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 381 * 382 * RETURNS: 383 * 0 on success, -errno on failure. 384 */ 385static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 386{ 387 int *old = NULL, *new = NULL; 388 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 389 unsigned long flags; 390 391 new = pcpu_mem_alloc(new_size); 392 if (!new) 393 return -ENOMEM; 394 395 /* acquire pcpu_lock and switch to new area map */ 396 spin_lock_irqsave(&pcpu_lock, flags); 397 398 if (new_alloc <= chunk->map_alloc) 399 goto out_unlock; 400 401 old_size = chunk->map_alloc * sizeof(chunk->map[0]); 402 memcpy(new, chunk->map, old_size); 403 404 /* 405 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 406 * one of the first chunks and still using static map. 407 */ 408 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 409 old = chunk->map; 410 411 chunk->map_alloc = new_alloc; 412 chunk->map = new; 413 new = NULL; 414 415out_unlock: 416 spin_unlock_irqrestore(&pcpu_lock, flags); 417 418 /* 419 * pcpu_mem_free() might end up calling vfree() which uses 420 * IRQ-unsafe lock and thus can't be called under pcpu_lock. 421 */ 422 pcpu_mem_free(old, old_size); 423 pcpu_mem_free(new, new_size); 424 425 return 0; 426} 427 428/** 429 * pcpu_split_block - split a map block 430 * @chunk: chunk of interest 431 * @i: index of map block to split 432 * @head: head size in bytes (can be 0) 433 * @tail: tail size in bytes (can be 0) 434 * 435 * Split the @i'th map block into two or three blocks. If @head is 436 * non-zero, @head bytes block is inserted before block @i moving it 437 * to @i+1 and reducing its size by @head bytes. 438 * 439 * If @tail is non-zero, the target block, which can be @i or @i+1 440 * depending on @head, is reduced by @tail bytes and @tail byte block 441 * is inserted after the target block. 442 * 443 * @chunk->map must have enough free slots to accomodate the split. 444 * 445 * CONTEXT: 446 * pcpu_lock. 447 */ 448static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 449 int head, int tail) 450{ 451 int nr_extra = !!head + !!tail; 452 453 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 454 455 /* insert new subblocks */ 456 memmove(&chunk->map[i + nr_extra], &chunk->map[i], 457 sizeof(chunk->map[0]) * (chunk->map_used - i)); 458 chunk->map_used += nr_extra; 459 460 if (head) { 461 chunk->map[i + 1] = chunk->map[i] - head; 462 chunk->map[i++] = head; 463 } 464 if (tail) { 465 chunk->map[i++] -= tail; 466 chunk->map[i] = tail; 467 } 468} 469 470/** 471 * pcpu_alloc_area - allocate area from a pcpu_chunk 472 * @chunk: chunk of interest 473 * @size: wanted size in bytes 474 * @align: wanted align 475 * 476 * Try to allocate @size bytes area aligned at @align from @chunk. 477 * Note that this function only allocates the offset. It doesn't 478 * populate or map the area. 479 * 480 * @chunk->map must have at least two free slots. 481 * 482 * CONTEXT: 483 * pcpu_lock. 484 * 485 * RETURNS: 486 * Allocated offset in @chunk on success, -1 if no matching area is 487 * found. 488 */ 489static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 490{ 491 int oslot = pcpu_chunk_slot(chunk); 492 int max_contig = 0; 493 int i, off; 494 495 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 496 bool is_last = i + 1 == chunk->map_used; 497 int head, tail; 498 499 /* extra for alignment requirement */ 500 head = ALIGN(off, align) - off; 501 BUG_ON(i == 0 && head != 0); 502 503 if (chunk->map[i] < 0) 504 continue; 505 if (chunk->map[i] < head + size) { 506 max_contig = max(chunk->map[i], max_contig); 507 continue; 508 } 509 510 /* 511 * If head is small or the previous block is free, 512 * merge'em. Note that 'small' is defined as smaller 513 * than sizeof(int), which is very small but isn't too 514 * uncommon for percpu allocations. 515 */ 516 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 517 if (chunk->map[i - 1] > 0) 518 chunk->map[i - 1] += head; 519 else { 520 chunk->map[i - 1] -= head; 521 chunk->free_size -= head; 522 } 523 chunk->map[i] -= head; 524 off += head; 525 head = 0; 526 } 527 528 /* if tail is small, just keep it around */ 529 tail = chunk->map[i] - head - size; 530 if (tail < sizeof(int)) 531 tail = 0; 532 533 /* split if warranted */ 534 if (head || tail) { 535 pcpu_split_block(chunk, i, head, tail); 536 if (head) { 537 i++; 538 off += head; 539 max_contig = max(chunk->map[i - 1], max_contig); 540 } 541 if (tail) 542 max_contig = max(chunk->map[i + 1], max_contig); 543 } 544 545 /* update hint and mark allocated */ 546 if (is_last) 547 chunk->contig_hint = max_contig; /* fully scanned */ 548 else 549 chunk->contig_hint = max(chunk->contig_hint, 550 max_contig); 551 552 chunk->free_size -= chunk->map[i]; 553 chunk->map[i] = -chunk->map[i]; 554 555 pcpu_chunk_relocate(chunk, oslot); 556 return off; 557 } 558 559 chunk->contig_hint = max_contig; /* fully scanned */ 560 pcpu_chunk_relocate(chunk, oslot); 561 562 /* tell the upper layer that this chunk has no matching area */ 563 return -1; 564} 565 566/** 567 * pcpu_free_area - free area to a pcpu_chunk 568 * @chunk: chunk of interest 569 * @freeme: offset of area to free 570 * 571 * Free area starting from @freeme to @chunk. Note that this function 572 * only modifies the allocation map. It doesn't depopulate or unmap 573 * the area. 574 * 575 * CONTEXT: 576 * pcpu_lock. 577 */ 578static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 579{ 580 int oslot = pcpu_chunk_slot(chunk); 581 int i, off; 582 583 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 584 if (off == freeme) 585 break; 586 BUG_ON(off != freeme); 587 BUG_ON(chunk->map[i] > 0); 588 589 chunk->map[i] = -chunk->map[i]; 590 chunk->free_size += chunk->map[i]; 591 592 /* merge with previous? */ 593 if (i > 0 && chunk->map[i - 1] >= 0) { 594 chunk->map[i - 1] += chunk->map[i]; 595 chunk->map_used--; 596 memmove(&chunk->map[i], &chunk->map[i + 1], 597 (chunk->map_used - i) * sizeof(chunk->map[0])); 598 i--; 599 } 600 /* merge with next? */ 601 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 602 chunk->map[i] += chunk->map[i + 1]; 603 chunk->map_used--; 604 memmove(&chunk->map[i + 1], &chunk->map[i + 2], 605 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 606 } 607 608 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 609 pcpu_chunk_relocate(chunk, oslot); 610} 611 612static struct pcpu_chunk *pcpu_alloc_chunk(void) 613{ 614 struct pcpu_chunk *chunk; 615 616 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 617 if (!chunk) 618 return NULL; 619 620 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 621 if (!chunk->map) { 622 kfree(chunk); 623 return NULL; 624 } 625 626 chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 627 chunk->map[chunk->map_used++] = pcpu_unit_size; 628 629 INIT_LIST_HEAD(&chunk->list); 630 chunk->free_size = pcpu_unit_size; 631 chunk->contig_hint = pcpu_unit_size; 632 633 return chunk; 634} 635 636static void pcpu_free_chunk(struct pcpu_chunk *chunk) 637{ 638 if (!chunk) 639 return; 640 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 641 kfree(chunk); 642} 643 644/** 645 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap 646 * @chunk: chunk of interest 647 * @bitmapp: output parameter for bitmap 648 * @may_alloc: may allocate the array 649 * 650 * Returns pointer to array of pointers to struct page and bitmap, 651 * both of which can be indexed with pcpu_page_idx(). The returned 652 * array is cleared to zero and *@bitmapp is copied from 653 * @chunk->populated. Note that there is only one array and bitmap 654 * and access exclusion is the caller's responsibility. 655 * 656 * CONTEXT: 657 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. 658 * Otherwise, don't care. 659 * 660 * RETURNS: 661 * Pointer to temp pages array on success, NULL on failure. 662 */ 663static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, 664 unsigned long **bitmapp, 665 bool may_alloc) 666{ 667 static struct page **pages; 668 static unsigned long *bitmap; 669 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 670 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * 671 sizeof(unsigned long); 672 673 if (!pages || !bitmap) { 674 if (may_alloc && !pages) 675 pages = pcpu_mem_alloc(pages_size); 676 if (may_alloc && !bitmap) 677 bitmap = pcpu_mem_alloc(bitmap_size); 678 if (!pages || !bitmap) 679 return NULL; 680 } 681 682 memset(pages, 0, pages_size); 683 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 684 685 *bitmapp = bitmap; 686 return pages; 687} 688 689/** 690 * pcpu_free_pages - free pages which were allocated for @chunk 691 * @chunk: chunk pages were allocated for 692 * @pages: array of pages to be freed, indexed by pcpu_page_idx() 693 * @populated: populated bitmap 694 * @page_start: page index of the first page to be freed 695 * @page_end: page index of the last page to be freed + 1 696 * 697 * Free pages [@page_start and @page_end) in @pages for all units. 698 * The pages were allocated for @chunk. 699 */ 700static void pcpu_free_pages(struct pcpu_chunk *chunk, 701 struct page **pages, unsigned long *populated, 702 int page_start, int page_end) 703{ 704 unsigned int cpu; 705 int i; 706 707 for_each_possible_cpu(cpu) { 708 for (i = page_start; i < page_end; i++) { 709 struct page *page = pages[pcpu_page_idx(cpu, i)]; 710 711 if (page) 712 __free_page(page); 713 } 714 } 715} 716 717/** 718 * pcpu_alloc_pages - allocates pages for @chunk 719 * @chunk: target chunk 720 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 721 * @populated: populated bitmap 722 * @page_start: page index of the first page to be allocated 723 * @page_end: page index of the last page to be allocated + 1 724 * 725 * Allocate pages [@page_start,@page_end) into @pages for all units. 726 * The allocation is for @chunk. Percpu core doesn't care about the 727 * content of @pages and will pass it verbatim to pcpu_map_pages(). 728 */ 729static int pcpu_alloc_pages(struct pcpu_chunk *chunk, 730 struct page **pages, unsigned long *populated, 731 int page_start, int page_end) 732{ 733 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 734 unsigned int cpu; 735 int i; 736 737 for_each_possible_cpu(cpu) { 738 for (i = page_start; i < page_end; i++) { 739 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 740 741 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 742 if (!*pagep) { 743 pcpu_free_pages(chunk, pages, populated, 744 page_start, page_end); 745 return -ENOMEM; 746 } 747 } 748 } 749 return 0; 750} 751 752/** 753 * pcpu_pre_unmap_flush - flush cache prior to unmapping 754 * @chunk: chunk the regions to be flushed belongs to 755 * @page_start: page index of the first page to be flushed 756 * @page_end: page index of the last page to be flushed + 1 757 * 758 * Pages in [@page_start,@page_end) of @chunk are about to be 759 * unmapped. Flush cache. As each flushing trial can be very 760 * expensive, issue flush on the whole region at once rather than 761 * doing it for each cpu. This could be an overkill but is more 762 * scalable. 763 */ 764static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, 765 int page_start, int page_end) 766{ 767 flush_cache_vunmap( 768 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 769 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 770} 771 772static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 773{ 774 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); 775} 776 777/** 778 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 779 * @chunk: chunk of interest 780 * @pages: pages array which can be used to pass information to free 781 * @populated: populated bitmap 782 * @page_start: page index of the first page to unmap 783 * @page_end: page index of the last page to unmap + 1 784 * 785 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 786 * Corresponding elements in @pages were cleared by the caller and can 787 * be used to carry information to pcpu_free_pages() which will be 788 * called after all unmaps are finished. The caller should call 789 * proper pre/post flush functions. 790 */ 791static void pcpu_unmap_pages(struct pcpu_chunk *chunk, 792 struct page **pages, unsigned long *populated, 793 int page_start, int page_end) 794{ 795 unsigned int cpu; 796 int i; 797 798 for_each_possible_cpu(cpu) { 799 for (i = page_start; i < page_end; i++) { 800 struct page *page; 801 802 page = pcpu_chunk_page(chunk, cpu, i); 803 WARN_ON(!page); 804 pages[pcpu_page_idx(cpu, i)] = page; 805 } 806 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 807 page_end - page_start); 808 } 809 810 for (i = page_start; i < page_end; i++) 811 __clear_bit(i, populated); 812} 813 814/** 815 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping 816 * @chunk: pcpu_chunk the regions to be flushed belong to 817 * @page_start: page index of the first page to be flushed 818 * @page_end: page index of the last page to be flushed + 1 819 * 820 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush 821 * TLB for the regions. This can be skipped if the area is to be 822 * returned to vmalloc as vmalloc will handle TLB flushing lazily. 823 * 824 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 825 * for the whole region. 826 */ 827static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 828 int page_start, int page_end) 829{ 830 flush_tlb_kernel_range( 831 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 832 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 833} 834 835static int __pcpu_map_pages(unsigned long addr, struct page **pages, 836 int nr_pages) 837{ 838 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, 839 PAGE_KERNEL, pages); 840} 841 842/** 843 * pcpu_map_pages - map pages into a pcpu_chunk 844 * @chunk: chunk of interest 845 * @pages: pages array containing pages to be mapped 846 * @populated: populated bitmap 847 * @page_start: page index of the first page to map 848 * @page_end: page index of the last page to map + 1 849 * 850 * For each cpu, map pages [@page_start,@page_end) into @chunk. The 851 * caller is responsible for calling pcpu_post_map_flush() after all 852 * mappings are complete. 853 * 854 * This function is responsible for setting corresponding bits in 855 * @chunk->populated bitmap and whatever is necessary for reverse 856 * lookup (addr -> chunk). 857 */ 858static int pcpu_map_pages(struct pcpu_chunk *chunk, 859 struct page **pages, unsigned long *populated, 860 int page_start, int page_end) 861{ 862 unsigned int cpu, tcpu; 863 int i, err; 864 865 for_each_possible_cpu(cpu) { 866 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), 867 &pages[pcpu_page_idx(cpu, page_start)], 868 page_end - page_start); 869 if (err < 0) 870 goto err; 871 } 872 873 /* mapping successful, link chunk and mark populated */ 874 for (i = page_start; i < page_end; i++) { 875 for_each_possible_cpu(cpu) 876 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 877 chunk); 878 __set_bit(i, populated); 879 } 880 881 return 0; 882 883err: 884 for_each_possible_cpu(tcpu) { 885 if (tcpu == cpu) 886 break; 887 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), 888 page_end - page_start); 889 } 890 return err; 891} 892 893/** 894 * pcpu_post_map_flush - flush cache after mapping 895 * @chunk: pcpu_chunk the regions to be flushed belong to 896 * @page_start: page index of the first page to be flushed 897 * @page_end: page index of the last page to be flushed + 1 898 * 899 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush 900 * cache. 901 * 902 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 903 * for the whole region. 904 */ 905static void pcpu_post_map_flush(struct pcpu_chunk *chunk, 906 int page_start, int page_end) 907{ 908 flush_cache_vmap( 909 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 910 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 911} 912 913/** 914 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 915 * @chunk: chunk to depopulate 916 * @off: offset to the area to depopulate 917 * @size: size of the area to depopulate in bytes 918 * @flush: whether to flush cache and tlb or not 919 * 920 * For each cpu, depopulate and unmap pages [@page_start,@page_end) 921 * from @chunk. If @flush is true, vcache is flushed before unmapping 922 * and tlb after. 923 * 924 * CONTEXT: 925 * pcpu_alloc_mutex. 926 */ 927static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 928{ 929 int page_start = PFN_DOWN(off); 930 int page_end = PFN_UP(off + size); 931 struct page **pages; 932 unsigned long *populated; 933 int rs, re; 934 935 /* quick path, check whether it's empty already */ 936 rs = page_start; 937 pcpu_next_unpop(chunk, &rs, &re, page_end); 938 if (rs == page_start && re == page_end) 939 return; 940 941 /* immutable chunks can't be depopulated */ 942 WARN_ON(chunk->immutable); 943 944 /* 945 * If control reaches here, there must have been at least one 946 * successful population attempt so the temp pages array must 947 * be available now. 948 */ 949 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); 950 BUG_ON(!pages); 951 952 /* unmap and free */ 953 pcpu_pre_unmap_flush(chunk, page_start, page_end); 954 955 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 956 pcpu_unmap_pages(chunk, pages, populated, rs, re); 957 958 /* no need to flush tlb, vmalloc will handle it lazily */ 959 960 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 961 pcpu_free_pages(chunk, pages, populated, rs, re); 962 963 /* commit new bitmap */ 964 bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 965} 966 967/** 968 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 969 * @chunk: chunk of interest 970 * @off: offset to the area to populate 971 * @size: size of the area to populate in bytes 972 * 973 * For each cpu, populate and map pages [@page_start,@page_end) into 974 * @chunk. The area is cleared on return. 975 * 976 * CONTEXT: 977 * pcpu_alloc_mutex, does GFP_KERNEL allocation. 978 */ 979static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 980{ 981 int page_start = PFN_DOWN(off); 982 int page_end = PFN_UP(off + size); 983 int free_end = page_start, unmap_end = page_start; 984 struct page **pages; 985 unsigned long *populated; 986 unsigned int cpu; 987 int rs, re, rc; 988 989 /* quick path, check whether all pages are already there */ 990 rs = page_start; 991 pcpu_next_pop(chunk, &rs, &re, page_end); 992 if (rs == page_start && re == page_end) 993 goto clear; 994 995 /* need to allocate and map pages, this chunk can't be immutable */ 996 WARN_ON(chunk->immutable); 997 998 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); 999 if (!pages) 1000 return -ENOMEM; 1001 1002 /* alloc and map */ 1003 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 1004 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); 1005 if (rc) 1006 goto err_free; 1007 free_end = re; 1008 } 1009 1010 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 1011 rc = pcpu_map_pages(chunk, pages, populated, rs, re); 1012 if (rc) 1013 goto err_unmap; 1014 unmap_end = re; 1015 } 1016 pcpu_post_map_flush(chunk, page_start, page_end); 1017 1018 /* commit new bitmap */ 1019 bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 1020clear: 1021 for_each_possible_cpu(cpu) 1022 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1023 return 0; 1024 1025err_unmap: 1026 pcpu_pre_unmap_flush(chunk, page_start, unmap_end); 1027 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) 1028 pcpu_unmap_pages(chunk, pages, populated, rs, re); 1029 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); 1030err_free: 1031 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) 1032 pcpu_free_pages(chunk, pages, populated, rs, re); 1033 return rc; 1034} 1035 1036static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) 1037{ 1038 if (chunk && chunk->data) 1039 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); 1040 pcpu_free_chunk(chunk); 1041} 1042 1043static struct pcpu_chunk *pcpu_create_chunk(void) 1044{ 1045 struct pcpu_chunk *chunk; 1046 struct vm_struct **vms; 1047 1048 chunk = pcpu_alloc_chunk(); 1049 if (!chunk) 1050 return NULL; 1051 1052 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, 1053 pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL); 1054 if (!vms) { 1055 pcpu_free_chunk(chunk); 1056 return NULL; 1057 } 1058 1059 chunk->data = vms; 1060 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; 1061 return chunk; 1062} 1063 1064/** 1065 * pcpu_chunk_addr_search - determine chunk containing specified address 1066 * @addr: address for which the chunk needs to be determined. 1067 * 1068 * RETURNS: 1069 * The address of the found chunk. 1070 */ 1071static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 1072{ 1073 /* is it in the first chunk? */ 1074 if (pcpu_addr_in_first_chunk(addr)) { 1075 /* is it in the reserved area? */ 1076 if (pcpu_addr_in_reserved_chunk(addr)) 1077 return pcpu_reserved_chunk; 1078 return pcpu_first_chunk; 1079 } 1080 1081 /* 1082 * The address is relative to unit0 which might be unused and 1083 * thus unmapped. Offset the address to the unit space of the 1084 * current processor before looking it up in the vmalloc 1085 * space. Note that any possible cpu id can be used here, so 1086 * there's no need to worry about preemption or cpu hotplug. 1087 */ 1088 addr += pcpu_unit_offsets[raw_smp_processor_id()]; 1089 return pcpu_get_page_chunk(vmalloc_to_page(addr)); 1090} 1091 1092/** 1093 * pcpu_alloc - the percpu allocator 1094 * @size: size of area to allocate in bytes 1095 * @align: alignment of area (max PAGE_SIZE) 1096 * @reserved: allocate from the reserved chunk if available 1097 * 1098 * Allocate percpu area of @size bytes aligned at @align. 1099 * 1100 * CONTEXT: 1101 * Does GFP_KERNEL allocation. 1102 * 1103 * RETURNS: 1104 * Percpu pointer to the allocated area on success, NULL on failure. 1105 */ 1106static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) 1107{ 1108 static int warn_limit = 10; 1109 struct pcpu_chunk *chunk; 1110 const char *err; 1111 int slot, off, new_alloc; 1112 unsigned long flags; 1113 1114 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1115 WARN(true, "illegal size (%zu) or align (%zu) for " 1116 "percpu allocation\n", size, align); 1117 return NULL; 1118 } 1119 1120 mutex_lock(&pcpu_alloc_mutex); 1121 spin_lock_irqsave(&pcpu_lock, flags); 1122 1123 /* serve reserved allocations from the reserved chunk if available */ 1124 if (reserved && pcpu_reserved_chunk) { 1125 chunk = pcpu_reserved_chunk; 1126 1127 if (size > chunk->contig_hint) { 1128 err = "alloc from reserved chunk failed"; 1129 goto fail_unlock; 1130 } 1131 1132 while ((new_alloc = pcpu_need_to_extend(chunk))) { 1133 spin_unlock_irqrestore(&pcpu_lock, flags); 1134 if (pcpu_extend_area_map(chunk, new_alloc) < 0) { 1135 err = "failed to extend area map of reserved chunk"; 1136 goto fail_unlock_mutex; 1137 } 1138 spin_lock_irqsave(&pcpu_lock, flags); 1139 } 1140 1141 off = pcpu_alloc_area(chunk, size, align); 1142 if (off >= 0) 1143 goto area_found; 1144 1145 err = "alloc from reserved chunk failed"; 1146 goto fail_unlock; 1147 } 1148 1149restart: 1150 /* search through normal chunks */ 1151 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1152 list_for_each_entry(chunk, &pcpu_slot[slot], list) { 1153 if (size > chunk->contig_hint) 1154 continue; 1155 1156 new_alloc = pcpu_need_to_extend(chunk); 1157 if (new_alloc) { 1158 spin_unlock_irqrestore(&pcpu_lock, flags); 1159 if (pcpu_extend_area_map(chunk, 1160 new_alloc) < 0) { 1161 err = "failed to extend area map"; 1162 goto fail_unlock_mutex; 1163 } 1164 spin_lock_irqsave(&pcpu_lock, flags); 1165 /* 1166 * pcpu_lock has been dropped, need to 1167 * restart cpu_slot list walking. 1168 */ 1169 goto restart; 1170 } 1171 1172 off = pcpu_alloc_area(chunk, size, align); 1173 if (off >= 0) 1174 goto area_found; 1175 } 1176 } 1177 1178 /* hmmm... no space left, create a new chunk */ 1179 spin_unlock_irqrestore(&pcpu_lock, flags); 1180 1181 chunk = pcpu_create_chunk(); 1182 if (!chunk) { 1183 err = "failed to allocate new chunk"; 1184 goto fail_unlock_mutex; 1185 } 1186 1187 spin_lock_irqsave(&pcpu_lock, flags); 1188 pcpu_chunk_relocate(chunk, -1); 1189 goto restart; 1190 1191area_found: 1192 spin_unlock_irqrestore(&pcpu_lock, flags); 1193 1194 /* populate, map and clear the area */ 1195 if (pcpu_populate_chunk(chunk, off, size)) { 1196 spin_lock_irqsave(&pcpu_lock, flags); 1197 pcpu_free_area(chunk, off); 1198 err = "failed to populate"; 1199 goto fail_unlock; 1200 } 1201 1202 mutex_unlock(&pcpu_alloc_mutex); 1203 1204 /* return address relative to base address */ 1205 return __addr_to_pcpu_ptr(chunk->base_addr + off); 1206 1207fail_unlock: 1208 spin_unlock_irqrestore(&pcpu_lock, flags); 1209fail_unlock_mutex: 1210 mutex_unlock(&pcpu_alloc_mutex); 1211 if (warn_limit) { 1212 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " 1213 "%s\n", size, align, err); 1214 dump_stack(); 1215 if (!--warn_limit) 1216 pr_info("PERCPU: limit reached, disable warning\n"); 1217 } 1218 return NULL; 1219} 1220 1221/** 1222 * __alloc_percpu - allocate dynamic percpu area 1223 * @size: size of area to allocate in bytes 1224 * @align: alignment of area (max PAGE_SIZE) 1225 * 1226 * Allocate percpu area of @size bytes aligned at @align. Might 1227 * sleep. Might trigger writeouts. 1228 * 1229 * CONTEXT: 1230 * Does GFP_KERNEL allocation. 1231 * 1232 * RETURNS: 1233 * Percpu pointer to the allocated area on success, NULL on failure. 1234 */ 1235void __percpu *__alloc_percpu(size_t size, size_t align) 1236{ 1237 return pcpu_alloc(size, align, false); 1238} 1239EXPORT_SYMBOL_GPL(__alloc_percpu); 1240 1241/** 1242 * __alloc_reserved_percpu - allocate reserved percpu area 1243 * @size: size of area to allocate in bytes 1244 * @align: alignment of area (max PAGE_SIZE) 1245 * 1246 * Allocate percpu area of @size bytes aligned at @align from reserved 1247 * percpu area if arch has set it up; otherwise, allocation is served 1248 * from the same dynamic area. Might sleep. Might trigger writeouts. 1249 * 1250 * CONTEXT: 1251 * Does GFP_KERNEL allocation. 1252 * 1253 * RETURNS: 1254 * Percpu pointer to the allocated area on success, NULL on failure. 1255 */ 1256void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1257{ 1258 return pcpu_alloc(size, align, true); 1259} 1260 1261/** 1262 * pcpu_reclaim - reclaim fully free chunks, workqueue function 1263 * @work: unused 1264 * 1265 * Reclaim all fully free chunks except for the first one. 1266 * 1267 * CONTEXT: 1268 * workqueue context. 1269 */ 1270static void pcpu_reclaim(struct work_struct *work) 1271{ 1272 LIST_HEAD(todo); 1273 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 1274 struct pcpu_chunk *chunk, *next; 1275 1276 mutex_lock(&pcpu_alloc_mutex); 1277 spin_lock_irq(&pcpu_lock); 1278 1279 list_for_each_entry_safe(chunk, next, head, list) { 1280 WARN_ON(chunk->immutable); 1281 1282 /* spare the first one */ 1283 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 1284 continue; 1285 1286 list_move(&chunk->list, &todo); 1287 } 1288 1289 spin_unlock_irq(&pcpu_lock); 1290 1291 list_for_each_entry_safe(chunk, next, &todo, list) { 1292 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 1293 pcpu_destroy_chunk(chunk); 1294 } 1295 1296 mutex_unlock(&pcpu_alloc_mutex); 1297} 1298 1299/** 1300 * free_percpu - free percpu area 1301 * @ptr: pointer to area to free 1302 * 1303 * Free percpu area @ptr. 1304 * 1305 * CONTEXT: 1306 * Can be called from atomic context. 1307 */ 1308void free_percpu(void __percpu *ptr) 1309{ 1310 void *addr; 1311 struct pcpu_chunk *chunk; 1312 unsigned long flags; 1313 int off; 1314 1315 if (!ptr) 1316 return; 1317 1318 addr = __pcpu_ptr_to_addr(ptr); 1319 1320 spin_lock_irqsave(&pcpu_lock, flags); 1321 1322 chunk = pcpu_chunk_addr_search(addr); 1323 off = addr - chunk->base_addr; 1324 1325 pcpu_free_area(chunk, off); 1326 1327 /* if there are more than one fully free chunks, wake up grim reaper */ 1328 if (chunk->free_size == pcpu_unit_size) { 1329 struct pcpu_chunk *pos; 1330 1331 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1332 if (pos != chunk) { 1333 schedule_work(&pcpu_reclaim_work); 1334 break; 1335 } 1336 } 1337 1338 spin_unlock_irqrestore(&pcpu_lock, flags); 1339} 1340EXPORT_SYMBOL_GPL(free_percpu); 1341 1342/** 1343 * is_kernel_percpu_address - test whether address is from static percpu area 1344 * @addr: address to test 1345 * 1346 * Test whether @addr belongs to in-kernel static percpu area. Module 1347 * static percpu areas are not considered. For those, use 1348 * is_module_percpu_address(). 1349 * 1350 * RETURNS: 1351 * %true if @addr is from in-kernel static percpu area, %false otherwise. 1352 */ 1353bool is_kernel_percpu_address(unsigned long addr) 1354{ 1355 const size_t static_size = __per_cpu_end - __per_cpu_start; 1356 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 1357 unsigned int cpu; 1358 1359 for_each_possible_cpu(cpu) { 1360 void *start = per_cpu_ptr(base, cpu); 1361 1362 if ((void *)addr >= start && (void *)addr < start + static_size) 1363 return true; 1364 } 1365 return false; 1366} 1367 1368/** 1369 * per_cpu_ptr_to_phys - convert translated percpu address to physical address 1370 * @addr: the address to be converted to physical address 1371 * 1372 * Given @addr which is dereferenceable address obtained via one of 1373 * percpu access macros, this function translates it into its physical 1374 * address. The caller is responsible for ensuring @addr stays valid 1375 * until this function finishes. 1376 * 1377 * RETURNS: 1378 * The physical address for @addr. 1379 */ 1380phys_addr_t per_cpu_ptr_to_phys(void *addr) 1381{ 1382 if (pcpu_addr_in_first_chunk(addr)) { 1383 if ((unsigned long)addr < VMALLOC_START || 1384 (unsigned long)addr >= VMALLOC_END) 1385 return __pa(addr); 1386 else 1387 return page_to_phys(vmalloc_to_page(addr)); 1388 } else 1389 return page_to_phys(vmalloc_to_page(addr)); 1390} 1391 1392static inline size_t pcpu_calc_fc_sizes(size_t static_size, 1393 size_t reserved_size, 1394 ssize_t *dyn_sizep) 1395{ 1396 size_t size_sum; 1397 1398 size_sum = PFN_ALIGN(static_size + reserved_size + 1399 (*dyn_sizep >= 0 ? *dyn_sizep : 0)); 1400 if (*dyn_sizep != 0) 1401 *dyn_sizep = size_sum - static_size - reserved_size; 1402 1403 return size_sum; 1404} 1405 1406/** 1407 * pcpu_alloc_alloc_info - allocate percpu allocation info 1408 * @nr_groups: the number of groups 1409 * @nr_units: the number of units 1410 * 1411 * Allocate ai which is large enough for @nr_groups groups containing 1412 * @nr_units units. The returned ai's groups[0].cpu_map points to the 1413 * cpu_map array which is long enough for @nr_units and filled with 1414 * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1415 * pointer of other groups. 1416 * 1417 * RETURNS: 1418 * Pointer to the allocated pcpu_alloc_info on success, NULL on 1419 * failure. 1420 */ 1421struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1422 int nr_units) 1423{ 1424 struct pcpu_alloc_info *ai; 1425 size_t base_size, ai_size; 1426 void *ptr; 1427 int unit; 1428 1429 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1430 __alignof__(ai->groups[0].cpu_map[0])); 1431 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1432 1433 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); 1434 if (!ptr) 1435 return NULL; 1436 ai = ptr; 1437 ptr += base_size; 1438 1439 ai->groups[0].cpu_map = ptr; 1440 1441 for (unit = 0; unit < nr_units; unit++) 1442 ai->groups[0].cpu_map[unit] = NR_CPUS; 1443 1444 ai->nr_groups = nr_groups; 1445 ai->__ai_size = PFN_ALIGN(ai_size); 1446 1447 return ai; 1448} 1449 1450/** 1451 * pcpu_free_alloc_info - free percpu allocation info 1452 * @ai: pcpu_alloc_info to free 1453 * 1454 * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1455 */ 1456void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1457{ 1458 free_bootmem(__pa(ai), ai->__ai_size); 1459} 1460 1461/** 1462 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1463 * @reserved_size: the size of reserved percpu area in bytes 1464 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1465 * @atom_size: allocation atom size 1466 * @cpu_distance_fn: callback to determine distance between cpus, optional 1467 * 1468 * This function determines grouping of units, their mappings to cpus 1469 * and other parameters considering needed percpu size, allocation 1470 * atom size and distances between CPUs. 1471 * 1472 * Groups are always mutliples of atom size and CPUs which are of 1473 * LOCAL_DISTANCE both ways are grouped together and share space for 1474 * units in the same group. The returned configuration is guaranteed 1475 * to have CPUs on different nodes on different groups and >=75% usage 1476 * of allocated virtual address space. 1477 * 1478 * RETURNS: 1479 * On success, pointer to the new allocation_info is returned. On 1480 * failure, ERR_PTR value is returned. 1481 */ 1482struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1483 size_t reserved_size, ssize_t dyn_size, 1484 size_t atom_size, 1485 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1486{ 1487 static int group_map[NR_CPUS] __initdata; 1488 static int group_cnt[NR_CPUS] __initdata; 1489 const size_t static_size = __per_cpu_end - __per_cpu_start; 1490 int group_cnt_max = 0, nr_groups = 1, nr_units = 0; 1491 size_t size_sum, min_unit_size, alloc_size; 1492 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1493 int last_allocs, group, unit; 1494 unsigned int cpu, tcpu; 1495 struct pcpu_alloc_info *ai; 1496 unsigned int *cpu_map; 1497 1498 /* this function may be called multiple times */ 1499 memset(group_map, 0, sizeof(group_map)); 1500 memset(group_cnt, 0, sizeof(group_map)); 1501 1502 /* 1503 * Determine min_unit_size, alloc_size and max_upa such that 1504 * alloc_size is multiple of atom_size and is the smallest 1505 * which can accomodate 4k aligned segments which are equal to 1506 * or larger than min_unit_size. 1507 */ 1508 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); 1509 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1510 1511 alloc_size = roundup(min_unit_size, atom_size); 1512 upa = alloc_size / min_unit_size; 1513 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1514 upa--; 1515 max_upa = upa; 1516 1517 /* group cpus according to their proximity */ 1518 for_each_possible_cpu(cpu) { 1519 group = 0; 1520 next_group: 1521 for_each_possible_cpu(tcpu) { 1522 if (cpu == tcpu) 1523 break; 1524 if (group_map[tcpu] == group && cpu_distance_fn && 1525 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1526 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1527 group++; 1528 nr_groups = max(nr_groups, group + 1); 1529 goto next_group; 1530 } 1531 } 1532 group_map[cpu] = group; 1533 group_cnt[group]++; 1534 group_cnt_max = max(group_cnt_max, group_cnt[group]); 1535 } 1536 1537 /* 1538 * Expand unit size until address space usage goes over 75% 1539 * and then as much as possible without using more address 1540 * space. 1541 */ 1542 last_allocs = INT_MAX; 1543 for (upa = max_upa; upa; upa--) { 1544 int allocs = 0, wasted = 0; 1545 1546 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1547 continue; 1548 1549 for (group = 0; group < nr_groups; group++) { 1550 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1551 allocs += this_allocs; 1552 wasted += this_allocs * upa - group_cnt[group]; 1553 } 1554 1555 /* 1556 * Don't accept if wastage is over 25%. The 1557 * greater-than comparison ensures upa==1 always 1558 * passes the following check. 1559 */ 1560 if (wasted > num_possible_cpus() / 3) 1561 continue; 1562 1563 /* and then don't consume more memory */ 1564 if (allocs > last_allocs) 1565 break; 1566 last_allocs = allocs; 1567 best_upa = upa; 1568 } 1569 upa = best_upa; 1570 1571 /* allocate and fill alloc_info */ 1572 for (group = 0; group < nr_groups; group++) 1573 nr_units += roundup(group_cnt[group], upa); 1574 1575 ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1576 if (!ai) 1577 return ERR_PTR(-ENOMEM); 1578 cpu_map = ai->groups[0].cpu_map; 1579 1580 for (group = 0; group < nr_groups; group++) { 1581 ai->groups[group].cpu_map = cpu_map; 1582 cpu_map += roundup(group_cnt[group], upa); 1583 } 1584 1585 ai->static_size = static_size; 1586 ai->reserved_size = reserved_size; 1587 ai->dyn_size = dyn_size; 1588 ai->unit_size = alloc_size / upa; 1589 ai->atom_size = atom_size; 1590 ai->alloc_size = alloc_size; 1591 1592 for (group = 0, unit = 0; group_cnt[group]; group++) { 1593 struct pcpu_group_info *gi = &ai->groups[group]; 1594 1595 /* 1596 * Initialize base_offset as if all groups are located 1597 * back-to-back. The caller should update this to 1598 * reflect actual allocation. 1599 */ 1600 gi->base_offset = unit * ai->unit_size; 1601 1602 for_each_possible_cpu(cpu) 1603 if (group_map[cpu] == group) 1604 gi->cpu_map[gi->nr_units++] = cpu; 1605 gi->nr_units = roundup(gi->nr_units, upa); 1606 unit += gi->nr_units; 1607 } 1608 BUG_ON(unit != nr_units); 1609 1610 return ai; 1611} 1612 1613/** 1614 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1615 * @lvl: loglevel 1616 * @ai: allocation info to dump 1617 * 1618 * Print out information about @ai using loglevel @lvl. 1619 */ 1620static void pcpu_dump_alloc_info(const char *lvl, 1621 const struct pcpu_alloc_info *ai) 1622{ 1623 int group_width = 1, cpu_width = 1, width; 1624 char empty_str[] = "--------"; 1625 int alloc = 0, alloc_end = 0; 1626 int group, v; 1627 int upa, apl; /* units per alloc, allocs per line */ 1628 1629 v = ai->nr_groups; 1630 while (v /= 10) 1631 group_width++; 1632 1633 v = num_possible_cpus(); 1634 while (v /= 10) 1635 cpu_width++; 1636 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1637 1638 upa = ai->alloc_size / ai->unit_size; 1639 width = upa * (cpu_width + 1) + group_width + 3; 1640 apl = rounddown_pow_of_two(max(60 / width, 1)); 1641 1642 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1643 lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1644 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1645 1646 for (group = 0; group < ai->nr_groups; group++) { 1647 const struct pcpu_group_info *gi = &ai->groups[group]; 1648 int unit = 0, unit_end = 0; 1649 1650 BUG_ON(gi->nr_units % upa); 1651 for (alloc_end += gi->nr_units / upa; 1652 alloc < alloc_end; alloc++) { 1653 if (!(alloc % apl)) { 1654 printk("\n"); 1655 printk("%spcpu-alloc: ", lvl); 1656 } 1657 printk("[%0*d] ", group_width, group); 1658 1659 for (unit_end += upa; unit < unit_end; unit++) 1660 if (gi->cpu_map[unit] != NR_CPUS) 1661 printk("%0*d ", cpu_width, 1662 gi->cpu_map[unit]); 1663 else 1664 printk("%s ", empty_str); 1665 } 1666 } 1667 printk("\n"); 1668} 1669 1670/** 1671 * pcpu_setup_first_chunk - initialize the first percpu chunk 1672 * @ai: pcpu_alloc_info describing how to percpu area is shaped 1673 * @base_addr: mapped address 1674 * 1675 * Initialize the first percpu chunk which contains the kernel static 1676 * perpcu area. This function is to be called from arch percpu area 1677 * setup path. 1678 * 1679 * @ai contains all information necessary to initialize the first 1680 * chunk and prime the dynamic percpu allocator. 1681 * 1682 * @ai->static_size is the size of static percpu area. 1683 * 1684 * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1685 * reserve after the static area in the first chunk. This reserves 1686 * the first chunk such that it's available only through reserved 1687 * percpu allocation. This is primarily used to serve module percpu 1688 * static areas on architectures where the addressing model has 1689 * limited offset range for symbol relocations to guarantee module 1690 * percpu symbols fall inside the relocatable range. 1691 * 1692 * @ai->dyn_size determines the number of bytes available for dynamic 1693 * allocation in the first chunk. The area between @ai->static_size + 1694 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 1695 * 1696 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1697 * and equal to or larger than @ai->static_size + @ai->reserved_size + 1698 * @ai->dyn_size. 1699 * 1700 * @ai->atom_size is the allocation atom size and used as alignment 1701 * for vm areas. 1702 * 1703 * @ai->alloc_size is the allocation size and always multiple of 1704 * @ai->atom_size. This is larger than @ai->atom_size if 1705 * @ai->unit_size is larger than @ai->atom_size. 1706 * 1707 * @ai->nr_groups and @ai->groups describe virtual memory layout of 1708 * percpu areas. Units which should be colocated are put into the 1709 * same group. Dynamic VM areas will be allocated according to these 1710 * groupings. If @ai->nr_groups is zero, a single group containing 1711 * all units is assumed. 1712 * 1713 * The caller should have mapped the first chunk at @base_addr and 1714 * copied static data to each unit. 1715 * 1716 * If the first chunk ends up with both reserved and dynamic areas, it 1717 * is served by two chunks - one to serve the core static and reserved 1718 * areas and the other for the dynamic area. They share the same vm 1719 * and page map but uses different area allocation map to stay away 1720 * from each other. The latter chunk is circulated in the chunk slots 1721 * and available for dynamic allocation like any other chunks. 1722 * 1723 * RETURNS: 1724 * 0 on success, -errno on failure. 1725 */ 1726int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1727 void *base_addr) 1728{ 1729 static char cpus_buf[4096] __initdata; 1730 static int smap[2], dmap[2]; 1731 size_t dyn_size = ai->dyn_size; 1732 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1733 struct pcpu_chunk *schunk, *dchunk = NULL; 1734 unsigned long *group_offsets; 1735 size_t *group_sizes; 1736 unsigned long *unit_off; 1737 unsigned int cpu; 1738 int *unit_map; 1739 int group, unit, i; 1740 1741 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); 1742 1743#define PCPU_SETUP_BUG_ON(cond) do { \ 1744 if (unlikely(cond)) { \ 1745 pr_emerg("PERCPU: failed to initialize, %s", #cond); \ 1746 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ 1747 pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1748 BUG(); \ 1749 } \ 1750} while (0) 1751 1752 /* sanity checks */ 1753 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1754 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1755 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1756 PCPU_SETUP_BUG_ON(!ai->static_size); 1757 PCPU_SETUP_BUG_ON(!base_addr); 1758 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1759 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1760 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1761 1762 /* process group information and build config tables accordingly */ 1763 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 1764 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); 1765 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1766 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 1767 1768 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1769 unit_map[cpu] = UINT_MAX; 1770 pcpu_first_unit_cpu = NR_CPUS; 1771 1772 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1773 const struct pcpu_group_info *gi = &ai->groups[group]; 1774 1775 group_offsets[group] = gi->base_offset; 1776 group_sizes[group] = gi->nr_units * ai->unit_size; 1777 1778 for (i = 0; i < gi->nr_units; i++) { 1779 cpu = gi->cpu_map[i]; 1780 if (cpu == NR_CPUS) 1781 continue; 1782 1783 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1784 PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1785 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1786 1787 unit_map[cpu] = unit + i; 1788 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1789 1790 if (pcpu_first_unit_cpu == NR_CPUS) 1791 pcpu_first_unit_cpu = cpu; 1792 } 1793 } 1794 pcpu_last_unit_cpu = cpu; 1795 pcpu_nr_units = unit; 1796 1797 for_each_possible_cpu(cpu) 1798 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1799 1800 /* we're done parsing the input, undefine BUG macro and dump config */ 1801#undef PCPU_SETUP_BUG_ON 1802 pcpu_dump_alloc_info(KERN_INFO, ai); 1803 1804 pcpu_nr_groups = ai->nr_groups; 1805 pcpu_group_offsets = group_offsets; 1806 pcpu_group_sizes = group_sizes; 1807 pcpu_unit_map = unit_map; 1808 pcpu_unit_offsets = unit_off; 1809 1810 /* determine basic parameters */ 1811 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1812 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1813 pcpu_atom_size = ai->atom_size; 1814 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1815 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1816 1817 /* 1818 * Allocate chunk slots. The additional last slot is for 1819 * empty chunks. 1820 */ 1821 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1822 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1823 for (i = 0; i < pcpu_nr_slots; i++) 1824 INIT_LIST_HEAD(&pcpu_slot[i]); 1825 1826 /* 1827 * Initialize static chunk. If reserved_size is zero, the 1828 * static chunk covers static area + dynamic allocation area 1829 * in the first chunk. If reserved_size is not zero, it 1830 * covers static area + reserved area (mostly used for module 1831 * static percpu allocation). 1832 */ 1833 schunk = alloc_bootmem(pcpu_chunk_struct_size); 1834 INIT_LIST_HEAD(&schunk->list); 1835 schunk->base_addr = base_addr; 1836 schunk->map = smap; 1837 schunk->map_alloc = ARRAY_SIZE(smap); 1838 schunk->immutable = true; 1839 bitmap_fill(schunk->populated, pcpu_unit_pages); 1840 1841 if (ai->reserved_size) { 1842 schunk->free_size = ai->reserved_size; 1843 pcpu_reserved_chunk = schunk; 1844 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1845 } else { 1846 schunk->free_size = dyn_size; 1847 dyn_size = 0; /* dynamic area covered */ 1848 } 1849 schunk->contig_hint = schunk->free_size; 1850 1851 schunk->map[schunk->map_used++] = -ai->static_size; 1852 if (schunk->free_size) 1853 schunk->map[schunk->map_used++] = schunk->free_size; 1854 1855 /* init dynamic chunk if necessary */ 1856 if (dyn_size) { 1857 dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1858 INIT_LIST_HEAD(&dchunk->list); 1859 dchunk->base_addr = base_addr; 1860 dchunk->map = dmap; 1861 dchunk->map_alloc = ARRAY_SIZE(dmap); 1862 dchunk->immutable = true; 1863 bitmap_fill(dchunk->populated, pcpu_unit_pages); 1864 1865 dchunk->contig_hint = dchunk->free_size = dyn_size; 1866 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1867 dchunk->map[dchunk->map_used++] = dchunk->free_size; 1868 } 1869 1870 /* link the first chunk in */ 1871 pcpu_first_chunk = dchunk ?: schunk; 1872 pcpu_chunk_relocate(pcpu_first_chunk, -1); 1873 1874 /* we're done */ 1875 pcpu_base_addr = base_addr; 1876 return 0; 1877} 1878 1879const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1880 [PCPU_FC_AUTO] = "auto", 1881 [PCPU_FC_EMBED] = "embed", 1882 [PCPU_FC_PAGE] = "page", 1883}; 1884 1885enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1886 1887static int __init percpu_alloc_setup(char *str) 1888{ 1889 if (0) 1890 /* nada */; 1891#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1892 else if (!strcmp(str, "embed")) 1893 pcpu_chosen_fc = PCPU_FC_EMBED; 1894#endif 1895#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1896 else if (!strcmp(str, "page")) 1897 pcpu_chosen_fc = PCPU_FC_PAGE; 1898#endif 1899 else 1900 pr_warning("PERCPU: unknown allocator %s specified\n", str); 1901 1902 return 0; 1903} 1904early_param("percpu_alloc", percpu_alloc_setup); 1905 1906#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 1907 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 1908/** 1909 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 1910 * @reserved_size: the size of reserved percpu area in bytes 1911 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1912 * @atom_size: allocation atom size 1913 * @cpu_distance_fn: callback to determine distance between cpus, optional 1914 * @alloc_fn: function to allocate percpu page 1915 * @free_fn: funtion to free percpu page 1916 * 1917 * This is a helper to ease setting up embedded first percpu chunk and 1918 * can be called where pcpu_setup_first_chunk() is expected. 1919 * 1920 * If this function is used to setup the first chunk, it is allocated 1921 * by calling @alloc_fn and used as-is without being mapped into 1922 * vmalloc area. Allocations are always whole multiples of @atom_size 1923 * aligned to @atom_size. 1924 * 1925 * This enables the first chunk to piggy back on the linear physical 1926 * mapping which often uses larger page size. Please note that this 1927 * can result in very sparse cpu->unit mapping on NUMA machines thus 1928 * requiring large vmalloc address space. Don't use this allocator if 1929 * vmalloc space is not orders of magnitude larger than distances 1930 * between node memory addresses (ie. 32bit NUMA machines). 1931 * 1932 * When @dyn_size is positive, dynamic area might be larger than 1933 * specified to fill page alignment. When @dyn_size is auto, 1934 * @dyn_size is just big enough to fill page alignment after static 1935 * and reserved areas. 1936 * 1937 * If the needed size is smaller than the minimum or specified unit 1938 * size, the leftover is returned using @free_fn. 1939 * 1940 * RETURNS: 1941 * 0 on success, -errno on failure. 1942 */ 1943int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, 1944 size_t atom_size, 1945 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1946 pcpu_fc_alloc_fn_t alloc_fn, 1947 pcpu_fc_free_fn_t free_fn) 1948{ 1949 void *base = (void *)ULONG_MAX; 1950 void **areas = NULL; 1951 struct pcpu_alloc_info *ai; 1952 size_t size_sum, areas_size, max_distance; 1953 int group, i, rc; 1954 1955 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1956 cpu_distance_fn); 1957 if (IS_ERR(ai)) 1958 return PTR_ERR(ai); 1959 1960 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1961 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 1962 1963 areas = alloc_bootmem_nopanic(areas_size); 1964 if (!areas) { 1965 rc = -ENOMEM; 1966 goto out_free; 1967 } 1968 1969 /* allocate, copy and determine base address */ 1970 for (group = 0; group < ai->nr_groups; group++) { 1971 struct pcpu_group_info *gi = &ai->groups[group]; 1972 unsigned int cpu = NR_CPUS; 1973 void *ptr; 1974 1975 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 1976 cpu = gi->cpu_map[i]; 1977 BUG_ON(cpu == NR_CPUS); 1978 1979 /* allocate space for the whole group */ 1980 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 1981 if (!ptr) { 1982 rc = -ENOMEM; 1983 goto out_free_areas; 1984 } 1985 areas[group] = ptr; 1986 1987 base = min(ptr, base); 1988 1989 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1990 if (gi->cpu_map[i] == NR_CPUS) { 1991 /* unused unit, free whole */ 1992 free_fn(ptr, ai->unit_size); 1993 continue; 1994 } 1995 /* copy and return the unused part */ 1996 memcpy(ptr, __per_cpu_load, ai->static_size); 1997 free_fn(ptr + size_sum, ai->unit_size - size_sum); 1998 } 1999 } 2000 2001 /* base address is now known, determine group base offsets */ 2002 max_distance = 0; 2003 for (group = 0; group < ai->nr_groups; group++) { 2004 ai->groups[group].base_offset = areas[group] - base; 2005 max_distance = max_t(size_t, max_distance, 2006 ai->groups[group].base_offset); 2007 } 2008 max_distance += ai->unit_size; 2009 2010 /* warn if maximum distance is further than 75% of vmalloc space */ 2011 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { 2012 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 2013 "space 0x%lx\n", 2014 max_distance, VMALLOC_END - VMALLOC_START); 2015#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2016 /* and fail if we have fallback */ 2017 rc = -EINVAL; 2018 goto out_free; 2019#endif 2020 } 2021 2022 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 2023 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 2024 ai->dyn_size, ai->unit_size); 2025 2026 rc = pcpu_setup_first_chunk(ai, base); 2027 goto out_free; 2028 2029out_free_areas: 2030 for (group = 0; group < ai->nr_groups; group++) 2031 free_fn(areas[group], 2032 ai->groups[group].nr_units * ai->unit_size); 2033out_free: 2034 pcpu_free_alloc_info(ai); 2035 if (areas) 2036 free_bootmem(__pa(areas), areas_size); 2037 return rc; 2038} 2039#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 2040 !CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2041 2042#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2043/** 2044 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2045 * @reserved_size: the size of reserved percpu area in bytes 2046 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 2047 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 2048 * @populate_pte_fn: function to populate pte 2049 * 2050 * This is a helper to ease setting up page-remapped first percpu 2051 * chunk and can be called where pcpu_setup_first_chunk() is expected. 2052 * 2053 * This is the basic allocator. Static percpu area is allocated 2054 * page-by-page into vmalloc area. 2055 * 2056 * RETURNS: 2057 * 0 on success, -errno on failure. 2058 */ 2059int __init pcpu_page_first_chunk(size_t reserved_size, 2060 pcpu_fc_alloc_fn_t alloc_fn, 2061 pcpu_fc_free_fn_t free_fn, 2062 pcpu_fc_populate_pte_fn_t populate_pte_fn) 2063{ 2064 static struct vm_struct vm; 2065 struct pcpu_alloc_info *ai; 2066 char psize_str[16]; 2067 int unit_pages; 2068 size_t pages_size; 2069 struct page **pages; 2070 int unit, i, j, rc; 2071 2072 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 2073 2074 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL); 2075 if (IS_ERR(ai)) 2076 return PTR_ERR(ai); 2077 BUG_ON(ai->nr_groups != 1); 2078 BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); 2079 2080 unit_pages = ai->unit_size >> PAGE_SHIFT; 2081 2082 /* unaligned allocations can't be freed, round up to page size */ 2083 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2084 sizeof(pages[0])); 2085 pages = alloc_bootmem(pages_size); 2086 2087 /* allocate pages */ 2088 j = 0; 2089 for (unit = 0; unit < num_possible_cpus(); unit++) 2090 for (i = 0; i < unit_pages; i++) { 2091 unsigned int cpu = ai->groups[0].cpu_map[unit]; 2092 void *ptr; 2093 2094 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2095 if (!ptr) { 2096 pr_warning("PERCPU: failed to allocate %s page " 2097 "for cpu%u\n", psize_str, cpu); 2098 goto enomem; 2099 } 2100 pages[j++] = virt_to_page(ptr); 2101 } 2102 2103 /* allocate vm area, map the pages and copy static data */ 2104 vm.flags = VM_ALLOC; 2105 vm.size = num_possible_cpus() * ai->unit_size; 2106 vm_area_register_early(&vm, PAGE_SIZE); 2107 2108 for (unit = 0; unit < num_possible_cpus(); unit++) { 2109 unsigned long unit_addr = 2110 (unsigned long)vm.addr + unit * ai->unit_size; 2111 2112 for (i = 0; i < unit_pages; i++) 2113 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 2114 2115 /* pte already populated, the following shouldn't fail */ 2116 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2117 unit_pages); 2118 if (rc < 0) 2119 panic("failed to map percpu area, err=%d\n", rc); 2120 2121 /* 2122 * FIXME: Archs with virtual cache should flush local 2123 * cache for the linear mapping here - something 2124 * equivalent to flush_cache_vmap() on the local cpu. 2125 * flush_cache_vmap() can't be used as most supporting 2126 * data structures are not set up yet. 2127 */ 2128 2129 /* copy static data */ 2130 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 2131 } 2132 2133 /* we're ready, commit */ 2134 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", 2135 unit_pages, psize_str, vm.addr, ai->static_size, 2136 ai->reserved_size, ai->dyn_size); 2137 2138 rc = pcpu_setup_first_chunk(ai, vm.addr); 2139 goto out_free_ar; 2140 2141enomem: 2142 while (--j >= 0) 2143 free_fn(page_address(pages[j]), PAGE_SIZE); 2144 rc = -ENOMEM; 2145out_free_ar: 2146 free_bootmem(__pa(pages), pages_size); 2147 pcpu_free_alloc_info(ai); 2148 return rc; 2149} 2150#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 2151 2152/* 2153 * Generic percpu area setup. 2154 * 2155 * The embedding helper is used because its behavior closely resembles 2156 * the original non-dynamic generic percpu area setup. This is 2157 * important because many archs have addressing restrictions and might 2158 * fail if the percpu area is located far away from the previous 2159 * location. As an added bonus, in non-NUMA cases, embedding is 2160 * generally a good idea TLB-wise because percpu area can piggy back 2161 * on the physical linear memory mapping which uses large page 2162 * mappings on applicable archs. 2163 */ 2164#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 2165unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2166EXPORT_SYMBOL(__per_cpu_offset); 2167 2168static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2169 size_t align) 2170{ 2171 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 2172} 2173 2174static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2175{ 2176 free_bootmem(__pa(ptr), size); 2177} 2178 2179void __init setup_per_cpu_areas(void) 2180{ 2181 unsigned long delta; 2182 unsigned int cpu; 2183 int rc; 2184 2185 /* 2186 * Always reserve area for module percpu variables. That's 2187 * what the legacy allocator did. 2188 */ 2189 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2190 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2191 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2192 if (rc < 0) 2193 panic("Failed to initialized percpu areas."); 2194 2195 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2196 for_each_possible_cpu(cpu) 2197 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2198} 2199#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2200