percpu.c revision f58dc01ba2ca9fe3ab2ba4ca43d9c8a735cf62d8
1/* 2 * linux/mm/percpu.c - percpu memory allocator 3 * 4 * Copyright (C) 2009 SUSE Linux Products GmbH 5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6 * 7 * This file is released under the GPLv2. 8 * 9 * This is percpu allocator which can handle both static and dynamic 10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each 11 * chunk is consisted of boot-time determined number of units and the 12 * first chunk is used for static percpu variables in the kernel image 13 * (special boot time alloc/init handling necessary as these areas 14 * need to be brought up before allocation services are running). 15 * Unit grows as necessary and all units grow or shrink in unison. 16 * When a chunk is filled up, another chunk is allocated. ie. in 17 * vmalloc area 18 * 19 * c0 c1 c2 20 * ------------------- ------------------- ------------ 21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 22 * ------------------- ...... ------------------- .... ------------ 23 * 24 * Allocation is done in offset-size areas of single unit space. Ie, 25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 26 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 27 * cpus. On NUMA, the mapping can be non-linear and even sparse. 28 * Percpu access can be done by configuring percpu base registers 29 * according to cpu to unit mapping and pcpu_unit_size. 30 * 31 * There are usually many small percpu allocations many of them being 32 * as small as 4 bytes. The allocator organizes chunks into lists 33 * according to free size and tries to allocate from the fullest one. 34 * Each chunk keeps the maximum contiguous area size hint which is 35 * guaranteed to be eqaul to or larger than the maximum contiguous 36 * area in the chunk. This helps the allocator not to iterate the 37 * chunk maps unnecessarily. 38 * 39 * Allocation state in each chunk is kept using an array of integers 40 * on chunk->map. A positive value in the map represents a free 41 * region and negative allocated. Allocation inside a chunk is done 42 * by scanning this map sequentially and serving the first matching 43 * entry. This is mostly copied from the percpu_modalloc() allocator. 44 * Chunks can be determined from the address using the index field 45 * in the page struct. The index field contains a pointer to the chunk. 46 * 47 * To use this allocator, arch code should do the followings. 48 * 49 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA 50 * 51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 52 * regular address to percpu pointer and back if they need to be 53 * different from the default 54 * 55 * - use pcpu_setup_first_chunk() during percpu area initialization to 56 * setup the first chunk containing the kernel static percpu area 57 */ 58 59#include <linux/bitmap.h> 60#include <linux/bootmem.h> 61#include <linux/list.h> 62#include <linux/log2.h> 63#include <linux/mm.h> 64#include <linux/module.h> 65#include <linux/mutex.h> 66#include <linux/percpu.h> 67#include <linux/pfn.h> 68#include <linux/slab.h> 69#include <linux/spinlock.h> 70#include <linux/vmalloc.h> 71#include <linux/workqueue.h> 72 73#include <asm/cacheflush.h> 74#include <asm/sections.h> 75#include <asm/tlbflush.h> 76 77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 79 80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 81#ifndef __addr_to_pcpu_ptr 82#define __addr_to_pcpu_ptr(addr) \ 83 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ 84 + (unsigned long)__per_cpu_start) 85#endif 86#ifndef __pcpu_ptr_to_addr 87#define __pcpu_ptr_to_addr(ptr) \ 88 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ 89 - (unsigned long)__per_cpu_start) 90#endif 91 92struct pcpu_chunk { 93 struct list_head list; /* linked to pcpu_slot lists */ 94 int free_size; /* free bytes in the chunk */ 95 int contig_hint; /* max contiguous size hint */ 96 struct vm_struct *vm; /* mapped vmalloc region */ 97 int map_used; /* # of map entries used */ 98 int map_alloc; /* # of map entries allocated */ 99 int *map; /* allocation map */ 100 bool immutable; /* no [de]population allowed */ 101 unsigned long populated[]; /* populated bitmap */ 102}; 103 104static int pcpu_unit_pages __read_mostly; 105static int pcpu_unit_size __read_mostly; 106static int pcpu_nr_units __read_mostly; 107static int pcpu_chunk_size __read_mostly; 108static int pcpu_nr_slots __read_mostly; 109static size_t pcpu_chunk_struct_size __read_mostly; 110 111/* cpus with the lowest and highest unit numbers */ 112static unsigned int pcpu_first_unit_cpu __read_mostly; 113static unsigned int pcpu_last_unit_cpu __read_mostly; 114 115/* the address of the first chunk which starts with the kernel static area */ 116void *pcpu_base_addr __read_mostly; 117EXPORT_SYMBOL_GPL(pcpu_base_addr); 118 119/* cpu -> unit map */ 120const int *pcpu_unit_map __read_mostly; 121 122/* 123 * The first chunk which always exists. Note that unlike other 124 * chunks, this one can be allocated and mapped in several different 125 * ways and thus often doesn't live in the vmalloc area. 126 */ 127static struct pcpu_chunk *pcpu_first_chunk; 128 129/* 130 * Optional reserved chunk. This chunk reserves part of the first 131 * chunk and serves it for reserved allocations. The amount of 132 * reserved offset is in pcpu_reserved_chunk_limit. When reserved 133 * area doesn't exist, the following variables contain NULL and 0 134 * respectively. 135 */ 136static struct pcpu_chunk *pcpu_reserved_chunk; 137static int pcpu_reserved_chunk_limit; 138 139/* 140 * Synchronization rules. 141 * 142 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 143 * protects allocation/reclaim paths, chunks, populated bitmap and 144 * vmalloc mapping. The latter is a spinlock and protects the index 145 * data structures - chunk slots, chunks and area maps in chunks. 146 * 147 * During allocation, pcpu_alloc_mutex is kept locked all the time and 148 * pcpu_lock is grabbed and released as necessary. All actual memory 149 * allocations are done using GFP_KERNEL with pcpu_lock released. 150 * 151 * Free path accesses and alters only the index data structures, so it 152 * can be safely called from atomic context. When memory needs to be 153 * returned to the system, free path schedules reclaim_work which 154 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 155 * reclaimed, release both locks and frees the chunks. Note that it's 156 * necessary to grab both locks to remove a chunk from circulation as 157 * allocation path might be referencing the chunk with only 158 * pcpu_alloc_mutex locked. 159 */ 160static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 161static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 162 163static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 164 165/* reclaim work to release fully free chunks, scheduled from free path */ 166static void pcpu_reclaim(struct work_struct *work); 167static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 168 169static int __pcpu_size_to_slot(int size) 170{ 171 int highbit = fls(size); /* size is in bytes */ 172 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 173} 174 175static int pcpu_size_to_slot(int size) 176{ 177 if (size == pcpu_unit_size) 178 return pcpu_nr_slots - 1; 179 return __pcpu_size_to_slot(size); 180} 181 182static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 183{ 184 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 185 return 0; 186 187 return pcpu_size_to_slot(chunk->free_size); 188} 189 190static int pcpu_page_idx(unsigned int cpu, int page_idx) 191{ 192 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 193} 194 195static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 196 unsigned int cpu, int page_idx) 197{ 198 return (unsigned long)chunk->vm->addr + 199 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); 200} 201 202static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 203 unsigned int cpu, int page_idx) 204{ 205 /* must not be used on pre-mapped chunk */ 206 WARN_ON(chunk->immutable); 207 208 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 209} 210 211/* set the pointer to a chunk in a page struct */ 212static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 213{ 214 page->index = (unsigned long)pcpu; 215} 216 217/* obtain pointer to a chunk from a page struct */ 218static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 219{ 220 return (struct pcpu_chunk *)page->index; 221} 222 223static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 224{ 225 *rs = find_next_zero_bit(chunk->populated, end, *rs); 226 *re = find_next_bit(chunk->populated, end, *rs + 1); 227} 228 229static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 230{ 231 *rs = find_next_bit(chunk->populated, end, *rs); 232 *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 233} 234 235/* 236 * (Un)populated page region iterators. Iterate over (un)populated 237 * page regions betwen @start and @end in @chunk. @rs and @re should 238 * be integer variables and will be set to start and end page index of 239 * the current region. 240 */ 241#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 242 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 243 (rs) < (re); \ 244 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 245 246#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 247 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 248 (rs) < (re); \ 249 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 250 251/** 252 * pcpu_mem_alloc - allocate memory 253 * @size: bytes to allocate 254 * 255 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 256 * kzalloc() is used; otherwise, vmalloc() is used. The returned 257 * memory is always zeroed. 258 * 259 * CONTEXT: 260 * Does GFP_KERNEL allocation. 261 * 262 * RETURNS: 263 * Pointer to the allocated area on success, NULL on failure. 264 */ 265static void *pcpu_mem_alloc(size_t size) 266{ 267 if (size <= PAGE_SIZE) 268 return kzalloc(size, GFP_KERNEL); 269 else { 270 void *ptr = vmalloc(size); 271 if (ptr) 272 memset(ptr, 0, size); 273 return ptr; 274 } 275} 276 277/** 278 * pcpu_mem_free - free memory 279 * @ptr: memory to free 280 * @size: size of the area 281 * 282 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 283 */ 284static void pcpu_mem_free(void *ptr, size_t size) 285{ 286 if (size <= PAGE_SIZE) 287 kfree(ptr); 288 else 289 vfree(ptr); 290} 291 292/** 293 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 294 * @chunk: chunk of interest 295 * @oslot: the previous slot it was on 296 * 297 * This function is called after an allocation or free changed @chunk. 298 * New slot according to the changed state is determined and @chunk is 299 * moved to the slot. Note that the reserved chunk is never put on 300 * chunk slots. 301 * 302 * CONTEXT: 303 * pcpu_lock. 304 */ 305static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 306{ 307 int nslot = pcpu_chunk_slot(chunk); 308 309 if (chunk != pcpu_reserved_chunk && oslot != nslot) { 310 if (oslot < nslot) 311 list_move(&chunk->list, &pcpu_slot[nslot]); 312 else 313 list_move_tail(&chunk->list, &pcpu_slot[nslot]); 314 } 315} 316 317/** 318 * pcpu_chunk_addr_search - determine chunk containing specified address 319 * @addr: address for which the chunk needs to be determined. 320 * 321 * RETURNS: 322 * The address of the found chunk. 323 */ 324static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 325{ 326 void *first_start = pcpu_first_chunk->vm->addr; 327 328 /* is it in the first chunk? */ 329 if (addr >= first_start && addr < first_start + pcpu_unit_size) { 330 /* is it in the reserved area? */ 331 if (addr < first_start + pcpu_reserved_chunk_limit) 332 return pcpu_reserved_chunk; 333 return pcpu_first_chunk; 334 } 335 336 /* 337 * The address is relative to unit0 which might be unused and 338 * thus unmapped. Offset the address to the unit space of the 339 * current processor before looking it up in the vmalloc 340 * space. Note that any possible cpu id can be used here, so 341 * there's no need to worry about preemption or cpu hotplug. 342 */ 343 addr += pcpu_unit_map[smp_processor_id()] * pcpu_unit_size; 344 return pcpu_get_page_chunk(vmalloc_to_page(addr)); 345} 346 347/** 348 * pcpu_extend_area_map - extend area map for allocation 349 * @chunk: target chunk 350 * 351 * Extend area map of @chunk so that it can accomodate an allocation. 352 * A single allocation can split an area into three areas, so this 353 * function makes sure that @chunk->map has at least two extra slots. 354 * 355 * CONTEXT: 356 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 357 * if area map is extended. 358 * 359 * RETURNS: 360 * 0 if noop, 1 if successfully extended, -errno on failure. 361 */ 362static int pcpu_extend_area_map(struct pcpu_chunk *chunk) 363{ 364 int new_alloc; 365 int *new; 366 size_t size; 367 368 /* has enough? */ 369 if (chunk->map_alloc >= chunk->map_used + 2) 370 return 0; 371 372 spin_unlock_irq(&pcpu_lock); 373 374 new_alloc = PCPU_DFL_MAP_ALLOC; 375 while (new_alloc < chunk->map_used + 2) 376 new_alloc *= 2; 377 378 new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 379 if (!new) { 380 spin_lock_irq(&pcpu_lock); 381 return -ENOMEM; 382 } 383 384 /* 385 * Acquire pcpu_lock and switch to new area map. Only free 386 * could have happened inbetween, so map_used couldn't have 387 * grown. 388 */ 389 spin_lock_irq(&pcpu_lock); 390 BUG_ON(new_alloc < chunk->map_used + 2); 391 392 size = chunk->map_alloc * sizeof(chunk->map[0]); 393 memcpy(new, chunk->map, size); 394 395 /* 396 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 397 * one of the first chunks and still using static map. 398 */ 399 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 400 pcpu_mem_free(chunk->map, size); 401 402 chunk->map_alloc = new_alloc; 403 chunk->map = new; 404 return 0; 405} 406 407/** 408 * pcpu_split_block - split a map block 409 * @chunk: chunk of interest 410 * @i: index of map block to split 411 * @head: head size in bytes (can be 0) 412 * @tail: tail size in bytes (can be 0) 413 * 414 * Split the @i'th map block into two or three blocks. If @head is 415 * non-zero, @head bytes block is inserted before block @i moving it 416 * to @i+1 and reducing its size by @head bytes. 417 * 418 * If @tail is non-zero, the target block, which can be @i or @i+1 419 * depending on @head, is reduced by @tail bytes and @tail byte block 420 * is inserted after the target block. 421 * 422 * @chunk->map must have enough free slots to accomodate the split. 423 * 424 * CONTEXT: 425 * pcpu_lock. 426 */ 427static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 428 int head, int tail) 429{ 430 int nr_extra = !!head + !!tail; 431 432 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 433 434 /* insert new subblocks */ 435 memmove(&chunk->map[i + nr_extra], &chunk->map[i], 436 sizeof(chunk->map[0]) * (chunk->map_used - i)); 437 chunk->map_used += nr_extra; 438 439 if (head) { 440 chunk->map[i + 1] = chunk->map[i] - head; 441 chunk->map[i++] = head; 442 } 443 if (tail) { 444 chunk->map[i++] -= tail; 445 chunk->map[i] = tail; 446 } 447} 448 449/** 450 * pcpu_alloc_area - allocate area from a pcpu_chunk 451 * @chunk: chunk of interest 452 * @size: wanted size in bytes 453 * @align: wanted align 454 * 455 * Try to allocate @size bytes area aligned at @align from @chunk. 456 * Note that this function only allocates the offset. It doesn't 457 * populate or map the area. 458 * 459 * @chunk->map must have at least two free slots. 460 * 461 * CONTEXT: 462 * pcpu_lock. 463 * 464 * RETURNS: 465 * Allocated offset in @chunk on success, -1 if no matching area is 466 * found. 467 */ 468static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 469{ 470 int oslot = pcpu_chunk_slot(chunk); 471 int max_contig = 0; 472 int i, off; 473 474 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 475 bool is_last = i + 1 == chunk->map_used; 476 int head, tail; 477 478 /* extra for alignment requirement */ 479 head = ALIGN(off, align) - off; 480 BUG_ON(i == 0 && head != 0); 481 482 if (chunk->map[i] < 0) 483 continue; 484 if (chunk->map[i] < head + size) { 485 max_contig = max(chunk->map[i], max_contig); 486 continue; 487 } 488 489 /* 490 * If head is small or the previous block is free, 491 * merge'em. Note that 'small' is defined as smaller 492 * than sizeof(int), which is very small but isn't too 493 * uncommon for percpu allocations. 494 */ 495 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 496 if (chunk->map[i - 1] > 0) 497 chunk->map[i - 1] += head; 498 else { 499 chunk->map[i - 1] -= head; 500 chunk->free_size -= head; 501 } 502 chunk->map[i] -= head; 503 off += head; 504 head = 0; 505 } 506 507 /* if tail is small, just keep it around */ 508 tail = chunk->map[i] - head - size; 509 if (tail < sizeof(int)) 510 tail = 0; 511 512 /* split if warranted */ 513 if (head || tail) { 514 pcpu_split_block(chunk, i, head, tail); 515 if (head) { 516 i++; 517 off += head; 518 max_contig = max(chunk->map[i - 1], max_contig); 519 } 520 if (tail) 521 max_contig = max(chunk->map[i + 1], max_contig); 522 } 523 524 /* update hint and mark allocated */ 525 if (is_last) 526 chunk->contig_hint = max_contig; /* fully scanned */ 527 else 528 chunk->contig_hint = max(chunk->contig_hint, 529 max_contig); 530 531 chunk->free_size -= chunk->map[i]; 532 chunk->map[i] = -chunk->map[i]; 533 534 pcpu_chunk_relocate(chunk, oslot); 535 return off; 536 } 537 538 chunk->contig_hint = max_contig; /* fully scanned */ 539 pcpu_chunk_relocate(chunk, oslot); 540 541 /* tell the upper layer that this chunk has no matching area */ 542 return -1; 543} 544 545/** 546 * pcpu_free_area - free area to a pcpu_chunk 547 * @chunk: chunk of interest 548 * @freeme: offset of area to free 549 * 550 * Free area starting from @freeme to @chunk. Note that this function 551 * only modifies the allocation map. It doesn't depopulate or unmap 552 * the area. 553 * 554 * CONTEXT: 555 * pcpu_lock. 556 */ 557static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 558{ 559 int oslot = pcpu_chunk_slot(chunk); 560 int i, off; 561 562 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 563 if (off == freeme) 564 break; 565 BUG_ON(off != freeme); 566 BUG_ON(chunk->map[i] > 0); 567 568 chunk->map[i] = -chunk->map[i]; 569 chunk->free_size += chunk->map[i]; 570 571 /* merge with previous? */ 572 if (i > 0 && chunk->map[i - 1] >= 0) { 573 chunk->map[i - 1] += chunk->map[i]; 574 chunk->map_used--; 575 memmove(&chunk->map[i], &chunk->map[i + 1], 576 (chunk->map_used - i) * sizeof(chunk->map[0])); 577 i--; 578 } 579 /* merge with next? */ 580 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 581 chunk->map[i] += chunk->map[i + 1]; 582 chunk->map_used--; 583 memmove(&chunk->map[i + 1], &chunk->map[i + 2], 584 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 585 } 586 587 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 588 pcpu_chunk_relocate(chunk, oslot); 589} 590 591/** 592 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap 593 * @chunk: chunk of interest 594 * @bitmapp: output parameter for bitmap 595 * @may_alloc: may allocate the array 596 * 597 * Returns pointer to array of pointers to struct page and bitmap, 598 * both of which can be indexed with pcpu_page_idx(). The returned 599 * array is cleared to zero and *@bitmapp is copied from 600 * @chunk->populated. Note that there is only one array and bitmap 601 * and access exclusion is the caller's responsibility. 602 * 603 * CONTEXT: 604 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. 605 * Otherwise, don't care. 606 * 607 * RETURNS: 608 * Pointer to temp pages array on success, NULL on failure. 609 */ 610static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, 611 unsigned long **bitmapp, 612 bool may_alloc) 613{ 614 static struct page **pages; 615 static unsigned long *bitmap; 616 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 617 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * 618 sizeof(unsigned long); 619 620 if (!pages || !bitmap) { 621 if (may_alloc && !pages) 622 pages = pcpu_mem_alloc(pages_size); 623 if (may_alloc && !bitmap) 624 bitmap = pcpu_mem_alloc(bitmap_size); 625 if (!pages || !bitmap) 626 return NULL; 627 } 628 629 memset(pages, 0, pages_size); 630 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 631 632 *bitmapp = bitmap; 633 return pages; 634} 635 636/** 637 * pcpu_free_pages - free pages which were allocated for @chunk 638 * @chunk: chunk pages were allocated for 639 * @pages: array of pages to be freed, indexed by pcpu_page_idx() 640 * @populated: populated bitmap 641 * @page_start: page index of the first page to be freed 642 * @page_end: page index of the last page to be freed + 1 643 * 644 * Free pages [@page_start and @page_end) in @pages for all units. 645 * The pages were allocated for @chunk. 646 */ 647static void pcpu_free_pages(struct pcpu_chunk *chunk, 648 struct page **pages, unsigned long *populated, 649 int page_start, int page_end) 650{ 651 unsigned int cpu; 652 int i; 653 654 for_each_possible_cpu(cpu) { 655 for (i = page_start; i < page_end; i++) { 656 struct page *page = pages[pcpu_page_idx(cpu, i)]; 657 658 if (page) 659 __free_page(page); 660 } 661 } 662} 663 664/** 665 * pcpu_alloc_pages - allocates pages for @chunk 666 * @chunk: target chunk 667 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 668 * @populated: populated bitmap 669 * @page_start: page index of the first page to be allocated 670 * @page_end: page index of the last page to be allocated + 1 671 * 672 * Allocate pages [@page_start,@page_end) into @pages for all units. 673 * The allocation is for @chunk. Percpu core doesn't care about the 674 * content of @pages and will pass it verbatim to pcpu_map_pages(). 675 */ 676static int pcpu_alloc_pages(struct pcpu_chunk *chunk, 677 struct page **pages, unsigned long *populated, 678 int page_start, int page_end) 679{ 680 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 681 unsigned int cpu; 682 int i; 683 684 for_each_possible_cpu(cpu) { 685 for (i = page_start; i < page_end; i++) { 686 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 687 688 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 689 if (!*pagep) { 690 pcpu_free_pages(chunk, pages, populated, 691 page_start, page_end); 692 return -ENOMEM; 693 } 694 } 695 } 696 return 0; 697} 698 699/** 700 * pcpu_pre_unmap_flush - flush cache prior to unmapping 701 * @chunk: chunk the regions to be flushed belongs to 702 * @page_start: page index of the first page to be flushed 703 * @page_end: page index of the last page to be flushed + 1 704 * 705 * Pages in [@page_start,@page_end) of @chunk are about to be 706 * unmapped. Flush cache. As each flushing trial can be very 707 * expensive, issue flush on the whole region at once rather than 708 * doing it for each cpu. This could be an overkill but is more 709 * scalable. 710 */ 711static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, 712 int page_start, int page_end) 713{ 714 flush_cache_vunmap( 715 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 716 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 717} 718 719static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 720{ 721 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); 722} 723 724/** 725 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 726 * @chunk: chunk of interest 727 * @pages: pages array which can be used to pass information to free 728 * @populated: populated bitmap 729 * @page_start: page index of the first page to unmap 730 * @page_end: page index of the last page to unmap + 1 731 * 732 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 733 * Corresponding elements in @pages were cleared by the caller and can 734 * be used to carry information to pcpu_free_pages() which will be 735 * called after all unmaps are finished. The caller should call 736 * proper pre/post flush functions. 737 */ 738static void pcpu_unmap_pages(struct pcpu_chunk *chunk, 739 struct page **pages, unsigned long *populated, 740 int page_start, int page_end) 741{ 742 unsigned int cpu; 743 int i; 744 745 for_each_possible_cpu(cpu) { 746 for (i = page_start; i < page_end; i++) { 747 struct page *page; 748 749 page = pcpu_chunk_page(chunk, cpu, i); 750 WARN_ON(!page); 751 pages[pcpu_page_idx(cpu, i)] = page; 752 } 753 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 754 page_end - page_start); 755 } 756 757 for (i = page_start; i < page_end; i++) 758 __clear_bit(i, populated); 759} 760 761/** 762 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping 763 * @chunk: pcpu_chunk the regions to be flushed belong to 764 * @page_start: page index of the first page to be flushed 765 * @page_end: page index of the last page to be flushed + 1 766 * 767 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush 768 * TLB for the regions. This can be skipped if the area is to be 769 * returned to vmalloc as vmalloc will handle TLB flushing lazily. 770 * 771 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 772 * for the whole region. 773 */ 774static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 775 int page_start, int page_end) 776{ 777 flush_tlb_kernel_range( 778 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 779 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 780} 781 782static int __pcpu_map_pages(unsigned long addr, struct page **pages, 783 int nr_pages) 784{ 785 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, 786 PAGE_KERNEL, pages); 787} 788 789/** 790 * pcpu_map_pages - map pages into a pcpu_chunk 791 * @chunk: chunk of interest 792 * @pages: pages array containing pages to be mapped 793 * @populated: populated bitmap 794 * @page_start: page index of the first page to map 795 * @page_end: page index of the last page to map + 1 796 * 797 * For each cpu, map pages [@page_start,@page_end) into @chunk. The 798 * caller is responsible for calling pcpu_post_map_flush() after all 799 * mappings are complete. 800 * 801 * This function is responsible for setting corresponding bits in 802 * @chunk->populated bitmap and whatever is necessary for reverse 803 * lookup (addr -> chunk). 804 */ 805static int pcpu_map_pages(struct pcpu_chunk *chunk, 806 struct page **pages, unsigned long *populated, 807 int page_start, int page_end) 808{ 809 unsigned int cpu, tcpu; 810 int i, err; 811 812 for_each_possible_cpu(cpu) { 813 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), 814 &pages[pcpu_page_idx(cpu, page_start)], 815 page_end - page_start); 816 if (err < 0) 817 goto err; 818 } 819 820 /* mapping successful, link chunk and mark populated */ 821 for (i = page_start; i < page_end; i++) { 822 for_each_possible_cpu(cpu) 823 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 824 chunk); 825 __set_bit(i, populated); 826 } 827 828 return 0; 829 830err: 831 for_each_possible_cpu(tcpu) { 832 if (tcpu == cpu) 833 break; 834 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), 835 page_end - page_start); 836 } 837 return err; 838} 839 840/** 841 * pcpu_post_map_flush - flush cache after mapping 842 * @chunk: pcpu_chunk the regions to be flushed belong to 843 * @page_start: page index of the first page to be flushed 844 * @page_end: page index of the last page to be flushed + 1 845 * 846 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush 847 * cache. 848 * 849 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 850 * for the whole region. 851 */ 852static void pcpu_post_map_flush(struct pcpu_chunk *chunk, 853 int page_start, int page_end) 854{ 855 flush_cache_vmap( 856 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 857 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 858} 859 860/** 861 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 862 * @chunk: chunk to depopulate 863 * @off: offset to the area to depopulate 864 * @size: size of the area to depopulate in bytes 865 * @flush: whether to flush cache and tlb or not 866 * 867 * For each cpu, depopulate and unmap pages [@page_start,@page_end) 868 * from @chunk. If @flush is true, vcache is flushed before unmapping 869 * and tlb after. 870 * 871 * CONTEXT: 872 * pcpu_alloc_mutex. 873 */ 874static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 875{ 876 int page_start = PFN_DOWN(off); 877 int page_end = PFN_UP(off + size); 878 struct page **pages; 879 unsigned long *populated; 880 int rs, re; 881 882 /* quick path, check whether it's empty already */ 883 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 884 if (rs == page_start && re == page_end) 885 return; 886 break; 887 } 888 889 /* immutable chunks can't be depopulated */ 890 WARN_ON(chunk->immutable); 891 892 /* 893 * If control reaches here, there must have been at least one 894 * successful population attempt so the temp pages array must 895 * be available now. 896 */ 897 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); 898 BUG_ON(!pages); 899 900 /* unmap and free */ 901 pcpu_pre_unmap_flush(chunk, page_start, page_end); 902 903 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 904 pcpu_unmap_pages(chunk, pages, populated, rs, re); 905 906 /* no need to flush tlb, vmalloc will handle it lazily */ 907 908 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 909 pcpu_free_pages(chunk, pages, populated, rs, re); 910 911 /* commit new bitmap */ 912 bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 913} 914 915/** 916 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 917 * @chunk: chunk of interest 918 * @off: offset to the area to populate 919 * @size: size of the area to populate in bytes 920 * 921 * For each cpu, populate and map pages [@page_start,@page_end) into 922 * @chunk. The area is cleared on return. 923 * 924 * CONTEXT: 925 * pcpu_alloc_mutex, does GFP_KERNEL allocation. 926 */ 927static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 928{ 929 int page_start = PFN_DOWN(off); 930 int page_end = PFN_UP(off + size); 931 int free_end = page_start, unmap_end = page_start; 932 struct page **pages; 933 unsigned long *populated; 934 unsigned int cpu; 935 int rs, re, rc; 936 937 /* quick path, check whether all pages are already there */ 938 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { 939 if (rs == page_start && re == page_end) 940 goto clear; 941 break; 942 } 943 944 /* need to allocate and map pages, this chunk can't be immutable */ 945 WARN_ON(chunk->immutable); 946 947 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); 948 if (!pages) 949 return -ENOMEM; 950 951 /* alloc and map */ 952 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 953 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); 954 if (rc) 955 goto err_free; 956 free_end = re; 957 } 958 959 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 960 rc = pcpu_map_pages(chunk, pages, populated, rs, re); 961 if (rc) 962 goto err_unmap; 963 unmap_end = re; 964 } 965 pcpu_post_map_flush(chunk, page_start, page_end); 966 967 /* commit new bitmap */ 968 bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 969clear: 970 for_each_possible_cpu(cpu) 971 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 972 return 0; 973 974err_unmap: 975 pcpu_pre_unmap_flush(chunk, page_start, unmap_end); 976 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) 977 pcpu_unmap_pages(chunk, pages, populated, rs, re); 978 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); 979err_free: 980 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) 981 pcpu_free_pages(chunk, pages, populated, rs, re); 982 return rc; 983} 984 985static void free_pcpu_chunk(struct pcpu_chunk *chunk) 986{ 987 if (!chunk) 988 return; 989 if (chunk->vm) 990 free_vm_area(chunk->vm); 991 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 992 kfree(chunk); 993} 994 995static struct pcpu_chunk *alloc_pcpu_chunk(void) 996{ 997 struct pcpu_chunk *chunk; 998 999 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 1000 if (!chunk) 1001 return NULL; 1002 1003 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 1004 chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 1005 chunk->map[chunk->map_used++] = pcpu_unit_size; 1006 1007 chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC); 1008 if (!chunk->vm) { 1009 free_pcpu_chunk(chunk); 1010 return NULL; 1011 } 1012 1013 INIT_LIST_HEAD(&chunk->list); 1014 chunk->free_size = pcpu_unit_size; 1015 chunk->contig_hint = pcpu_unit_size; 1016 1017 return chunk; 1018} 1019 1020/** 1021 * pcpu_alloc - the percpu allocator 1022 * @size: size of area to allocate in bytes 1023 * @align: alignment of area (max PAGE_SIZE) 1024 * @reserved: allocate from the reserved chunk if available 1025 * 1026 * Allocate percpu area of @size bytes aligned at @align. 1027 * 1028 * CONTEXT: 1029 * Does GFP_KERNEL allocation. 1030 * 1031 * RETURNS: 1032 * Percpu pointer to the allocated area on success, NULL on failure. 1033 */ 1034static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1035{ 1036 struct pcpu_chunk *chunk; 1037 int slot, off; 1038 1039 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1040 WARN(true, "illegal size (%zu) or align (%zu) for " 1041 "percpu allocation\n", size, align); 1042 return NULL; 1043 } 1044 1045 mutex_lock(&pcpu_alloc_mutex); 1046 spin_lock_irq(&pcpu_lock); 1047 1048 /* serve reserved allocations from the reserved chunk if available */ 1049 if (reserved && pcpu_reserved_chunk) { 1050 chunk = pcpu_reserved_chunk; 1051 if (size > chunk->contig_hint || 1052 pcpu_extend_area_map(chunk) < 0) 1053 goto fail_unlock; 1054 off = pcpu_alloc_area(chunk, size, align); 1055 if (off >= 0) 1056 goto area_found; 1057 goto fail_unlock; 1058 } 1059 1060restart: 1061 /* search through normal chunks */ 1062 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1063 list_for_each_entry(chunk, &pcpu_slot[slot], list) { 1064 if (size > chunk->contig_hint) 1065 continue; 1066 1067 switch (pcpu_extend_area_map(chunk)) { 1068 case 0: 1069 break; 1070 case 1: 1071 goto restart; /* pcpu_lock dropped, restart */ 1072 default: 1073 goto fail_unlock; 1074 } 1075 1076 off = pcpu_alloc_area(chunk, size, align); 1077 if (off >= 0) 1078 goto area_found; 1079 } 1080 } 1081 1082 /* hmmm... no space left, create a new chunk */ 1083 spin_unlock_irq(&pcpu_lock); 1084 1085 chunk = alloc_pcpu_chunk(); 1086 if (!chunk) 1087 goto fail_unlock_mutex; 1088 1089 spin_lock_irq(&pcpu_lock); 1090 pcpu_chunk_relocate(chunk, -1); 1091 goto restart; 1092 1093area_found: 1094 spin_unlock_irq(&pcpu_lock); 1095 1096 /* populate, map and clear the area */ 1097 if (pcpu_populate_chunk(chunk, off, size)) { 1098 spin_lock_irq(&pcpu_lock); 1099 pcpu_free_area(chunk, off); 1100 goto fail_unlock; 1101 } 1102 1103 mutex_unlock(&pcpu_alloc_mutex); 1104 1105 /* return address relative to unit0 */ 1106 return __addr_to_pcpu_ptr(chunk->vm->addr + off); 1107 1108fail_unlock: 1109 spin_unlock_irq(&pcpu_lock); 1110fail_unlock_mutex: 1111 mutex_unlock(&pcpu_alloc_mutex); 1112 return NULL; 1113} 1114 1115/** 1116 * __alloc_percpu - allocate dynamic percpu area 1117 * @size: size of area to allocate in bytes 1118 * @align: alignment of area (max PAGE_SIZE) 1119 * 1120 * Allocate percpu area of @size bytes aligned at @align. Might 1121 * sleep. Might trigger writeouts. 1122 * 1123 * CONTEXT: 1124 * Does GFP_KERNEL allocation. 1125 * 1126 * RETURNS: 1127 * Percpu pointer to the allocated area on success, NULL on failure. 1128 */ 1129void *__alloc_percpu(size_t size, size_t align) 1130{ 1131 return pcpu_alloc(size, align, false); 1132} 1133EXPORT_SYMBOL_GPL(__alloc_percpu); 1134 1135/** 1136 * __alloc_reserved_percpu - allocate reserved percpu area 1137 * @size: size of area to allocate in bytes 1138 * @align: alignment of area (max PAGE_SIZE) 1139 * 1140 * Allocate percpu area of @size bytes aligned at @align from reserved 1141 * percpu area if arch has set it up; otherwise, allocation is served 1142 * from the same dynamic area. Might sleep. Might trigger writeouts. 1143 * 1144 * CONTEXT: 1145 * Does GFP_KERNEL allocation. 1146 * 1147 * RETURNS: 1148 * Percpu pointer to the allocated area on success, NULL on failure. 1149 */ 1150void *__alloc_reserved_percpu(size_t size, size_t align) 1151{ 1152 return pcpu_alloc(size, align, true); 1153} 1154 1155/** 1156 * pcpu_reclaim - reclaim fully free chunks, workqueue function 1157 * @work: unused 1158 * 1159 * Reclaim all fully free chunks except for the first one. 1160 * 1161 * CONTEXT: 1162 * workqueue context. 1163 */ 1164static void pcpu_reclaim(struct work_struct *work) 1165{ 1166 LIST_HEAD(todo); 1167 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 1168 struct pcpu_chunk *chunk, *next; 1169 1170 mutex_lock(&pcpu_alloc_mutex); 1171 spin_lock_irq(&pcpu_lock); 1172 1173 list_for_each_entry_safe(chunk, next, head, list) { 1174 WARN_ON(chunk->immutable); 1175 1176 /* spare the first one */ 1177 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 1178 continue; 1179 1180 list_move(&chunk->list, &todo); 1181 } 1182 1183 spin_unlock_irq(&pcpu_lock); 1184 1185 list_for_each_entry_safe(chunk, next, &todo, list) { 1186 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 1187 free_pcpu_chunk(chunk); 1188 } 1189 1190 mutex_unlock(&pcpu_alloc_mutex); 1191} 1192 1193/** 1194 * free_percpu - free percpu area 1195 * @ptr: pointer to area to free 1196 * 1197 * Free percpu area @ptr. 1198 * 1199 * CONTEXT: 1200 * Can be called from atomic context. 1201 */ 1202void free_percpu(void *ptr) 1203{ 1204 void *addr = __pcpu_ptr_to_addr(ptr); 1205 struct pcpu_chunk *chunk; 1206 unsigned long flags; 1207 int off; 1208 1209 if (!ptr) 1210 return; 1211 1212 spin_lock_irqsave(&pcpu_lock, flags); 1213 1214 chunk = pcpu_chunk_addr_search(addr); 1215 off = addr - chunk->vm->addr; 1216 1217 pcpu_free_area(chunk, off); 1218 1219 /* if there are more than one fully free chunks, wake up grim reaper */ 1220 if (chunk->free_size == pcpu_unit_size) { 1221 struct pcpu_chunk *pos; 1222 1223 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1224 if (pos != chunk) { 1225 schedule_work(&pcpu_reclaim_work); 1226 break; 1227 } 1228 } 1229 1230 spin_unlock_irqrestore(&pcpu_lock, flags); 1231} 1232EXPORT_SYMBOL_GPL(free_percpu); 1233 1234/** 1235 * pcpu_setup_first_chunk - initialize the first percpu chunk 1236 * @static_size: the size of static percpu area in bytes 1237 * @reserved_size: the size of reserved percpu area in bytes, 0 for none 1238 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1239 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE 1240 * @base_addr: mapped address 1241 * @unit_map: cpu -> unit map, NULL for sequential mapping 1242 * 1243 * Initialize the first percpu chunk which contains the kernel static 1244 * perpcu area. This function is to be called from arch percpu area 1245 * setup path. 1246 * 1247 * @reserved_size, if non-zero, specifies the amount of bytes to 1248 * reserve after the static area in the first chunk. This reserves 1249 * the first chunk such that it's available only through reserved 1250 * percpu allocation. This is primarily used to serve module percpu 1251 * static areas on architectures where the addressing model has 1252 * limited offset range for symbol relocations to guarantee module 1253 * percpu symbols fall inside the relocatable range. 1254 * 1255 * @dyn_size, if non-negative, determines the number of bytes 1256 * available for dynamic allocation in the first chunk. Specifying 1257 * non-negative value makes percpu leave alone the area beyond 1258 * @static_size + @reserved_size + @dyn_size. 1259 * 1260 * @unit_size specifies unit size and must be aligned to PAGE_SIZE and 1261 * equal to or larger than @static_size + @reserved_size + if 1262 * non-negative, @dyn_size. 1263 * 1264 * The caller should have mapped the first chunk at @base_addr and 1265 * copied static data to each unit. 1266 * 1267 * If the first chunk ends up with both reserved and dynamic areas, it 1268 * is served by two chunks - one to serve the core static and reserved 1269 * areas and the other for the dynamic area. They share the same vm 1270 * and page map but uses different area allocation map to stay away 1271 * from each other. The latter chunk is circulated in the chunk slots 1272 * and available for dynamic allocation like any other chunks. 1273 * 1274 * RETURNS: 1275 * The determined pcpu_unit_size which can be used to initialize 1276 * percpu access. 1277 */ 1278size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, 1279 ssize_t dyn_size, size_t unit_size, 1280 void *base_addr, const int *unit_map) 1281{ 1282 static struct vm_struct first_vm; 1283 static int smap[2], dmap[2]; 1284 size_t size_sum = static_size + reserved_size + 1285 (dyn_size >= 0 ? dyn_size : 0); 1286 struct pcpu_chunk *schunk, *dchunk = NULL; 1287 unsigned int cpu, tcpu; 1288 int i; 1289 1290 /* sanity checks */ 1291 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1292 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1293 BUG_ON(!static_size); 1294 BUG_ON(!base_addr); 1295 BUG_ON(unit_size < size_sum); 1296 BUG_ON(unit_size & ~PAGE_MASK); 1297 BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); 1298 1299 /* determine number of units and verify and initialize pcpu_unit_map */ 1300 if (unit_map) { 1301 int first_unit = INT_MAX, last_unit = INT_MIN; 1302 1303 for_each_possible_cpu(cpu) { 1304 int unit = unit_map[cpu]; 1305 1306 BUG_ON(unit < 0); 1307 for_each_possible_cpu(tcpu) { 1308 if (tcpu == cpu) 1309 break; 1310 /* the mapping should be one-to-one */ 1311 BUG_ON(unit_map[tcpu] == unit); 1312 } 1313 1314 if (unit < first_unit) { 1315 pcpu_first_unit_cpu = cpu; 1316 first_unit = unit; 1317 } 1318 if (unit > last_unit) { 1319 pcpu_last_unit_cpu = cpu; 1320 last_unit = unit; 1321 } 1322 } 1323 pcpu_nr_units = last_unit + 1; 1324 pcpu_unit_map = unit_map; 1325 } else { 1326 int *identity_map; 1327 1328 /* #units == #cpus, identity mapped */ 1329 identity_map = alloc_bootmem(nr_cpu_ids * 1330 sizeof(identity_map[0])); 1331 1332 for_each_possible_cpu(cpu) 1333 identity_map[cpu] = cpu; 1334 1335 pcpu_first_unit_cpu = 0; 1336 pcpu_last_unit_cpu = pcpu_nr_units - 1; 1337 pcpu_nr_units = nr_cpu_ids; 1338 pcpu_unit_map = identity_map; 1339 } 1340 1341 /* determine basic parameters */ 1342 pcpu_unit_pages = unit_size >> PAGE_SHIFT; 1343 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1344 pcpu_chunk_size = pcpu_nr_units * pcpu_unit_size; 1345 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1346 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1347 1348 if (dyn_size < 0) 1349 dyn_size = pcpu_unit_size - static_size - reserved_size; 1350 1351 first_vm.flags = VM_ALLOC; 1352 first_vm.size = pcpu_chunk_size; 1353 first_vm.addr = base_addr; 1354 1355 /* 1356 * Allocate chunk slots. The additional last slot is for 1357 * empty chunks. 1358 */ 1359 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1360 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1361 for (i = 0; i < pcpu_nr_slots; i++) 1362 INIT_LIST_HEAD(&pcpu_slot[i]); 1363 1364 /* 1365 * Initialize static chunk. If reserved_size is zero, the 1366 * static chunk covers static area + dynamic allocation area 1367 * in the first chunk. If reserved_size is not zero, it 1368 * covers static area + reserved area (mostly used for module 1369 * static percpu allocation). 1370 */ 1371 schunk = alloc_bootmem(pcpu_chunk_struct_size); 1372 INIT_LIST_HEAD(&schunk->list); 1373 schunk->vm = &first_vm; 1374 schunk->map = smap; 1375 schunk->map_alloc = ARRAY_SIZE(smap); 1376 schunk->immutable = true; 1377 bitmap_fill(schunk->populated, pcpu_unit_pages); 1378 1379 if (reserved_size) { 1380 schunk->free_size = reserved_size; 1381 pcpu_reserved_chunk = schunk; 1382 pcpu_reserved_chunk_limit = static_size + reserved_size; 1383 } else { 1384 schunk->free_size = dyn_size; 1385 dyn_size = 0; /* dynamic area covered */ 1386 } 1387 schunk->contig_hint = schunk->free_size; 1388 1389 schunk->map[schunk->map_used++] = -static_size; 1390 if (schunk->free_size) 1391 schunk->map[schunk->map_used++] = schunk->free_size; 1392 1393 /* init dynamic chunk if necessary */ 1394 if (dyn_size) { 1395 dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1396 INIT_LIST_HEAD(&dchunk->list); 1397 dchunk->vm = &first_vm; 1398 dchunk->map = dmap; 1399 dchunk->map_alloc = ARRAY_SIZE(dmap); 1400 dchunk->immutable = true; 1401 bitmap_fill(dchunk->populated, pcpu_unit_pages); 1402 1403 dchunk->contig_hint = dchunk->free_size = dyn_size; 1404 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1405 dchunk->map[dchunk->map_used++] = dchunk->free_size; 1406 } 1407 1408 /* link the first chunk in */ 1409 pcpu_first_chunk = dchunk ?: schunk; 1410 pcpu_chunk_relocate(pcpu_first_chunk, -1); 1411 1412 /* we're done */ 1413 pcpu_base_addr = schunk->vm->addr; 1414 return pcpu_unit_size; 1415} 1416 1417const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1418 [PCPU_FC_AUTO] = "auto", 1419 [PCPU_FC_EMBED] = "embed", 1420 [PCPU_FC_PAGE] = "page", 1421 [PCPU_FC_LPAGE] = "lpage", 1422}; 1423 1424enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1425 1426static int __init percpu_alloc_setup(char *str) 1427{ 1428 if (0) 1429 /* nada */; 1430#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1431 else if (!strcmp(str, "embed")) 1432 pcpu_chosen_fc = PCPU_FC_EMBED; 1433#endif 1434#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1435 else if (!strcmp(str, "page")) 1436 pcpu_chosen_fc = PCPU_FC_PAGE; 1437#endif 1438#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK 1439 else if (!strcmp(str, "lpage")) 1440 pcpu_chosen_fc = PCPU_FC_LPAGE; 1441#endif 1442 else 1443 pr_warning("PERCPU: unknown allocator %s specified\n", str); 1444 1445 return 0; 1446} 1447early_param("percpu_alloc", percpu_alloc_setup); 1448 1449static inline size_t pcpu_calc_fc_sizes(size_t static_size, 1450 size_t reserved_size, 1451 ssize_t *dyn_sizep) 1452{ 1453 size_t size_sum; 1454 1455 size_sum = PFN_ALIGN(static_size + reserved_size + 1456 (*dyn_sizep >= 0 ? *dyn_sizep : 0)); 1457 if (*dyn_sizep != 0) 1458 *dyn_sizep = size_sum - static_size - reserved_size; 1459 1460 return size_sum; 1461} 1462 1463#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 1464 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 1465/** 1466 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 1467 * @static_size: the size of static percpu area in bytes 1468 * @reserved_size: the size of reserved percpu area in bytes 1469 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1470 * 1471 * This is a helper to ease setting up embedded first percpu chunk and 1472 * can be called where pcpu_setup_first_chunk() is expected. 1473 * 1474 * If this function is used to setup the first chunk, it is allocated 1475 * as a contiguous area using bootmem allocator and used as-is without 1476 * being mapped into vmalloc area. This enables the first chunk to 1477 * piggy back on the linear physical mapping which often uses larger 1478 * page size. 1479 * 1480 * When @dyn_size is positive, dynamic area might be larger than 1481 * specified to fill page alignment. When @dyn_size is auto, 1482 * @dyn_size is just big enough to fill page alignment after static 1483 * and reserved areas. 1484 * 1485 * If the needed size is smaller than the minimum or specified unit 1486 * size, the leftover is returned to the bootmem allocator. 1487 * 1488 * RETURNS: 1489 * The determined pcpu_unit_size which can be used to initialize 1490 * percpu access on success, -errno on failure. 1491 */ 1492ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, 1493 ssize_t dyn_size) 1494{ 1495 size_t size_sum, unit_size, chunk_size; 1496 void *base; 1497 unsigned int cpu; 1498 1499 /* determine parameters and allocate */ 1500 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); 1501 1502 unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1503 chunk_size = unit_size * nr_cpu_ids; 1504 1505 base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, 1506 __pa(MAX_DMA_ADDRESS)); 1507 if (!base) { 1508 pr_warning("PERCPU: failed to allocate %zu bytes for " 1509 "embedding\n", chunk_size); 1510 return -ENOMEM; 1511 } 1512 1513 /* return the leftover and copy */ 1514 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 1515 void *ptr = base + cpu * unit_size; 1516 1517 if (cpu_possible(cpu)) { 1518 free_bootmem(__pa(ptr + size_sum), 1519 unit_size - size_sum); 1520 memcpy(ptr, __per_cpu_load, static_size); 1521 } else 1522 free_bootmem(__pa(ptr), unit_size); 1523 } 1524 1525 /* we're ready, commit */ 1526 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1527 PFN_DOWN(size_sum), base, static_size, reserved_size, dyn_size, 1528 unit_size); 1529 1530 return pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, 1531 unit_size, base, NULL); 1532} 1533#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 1534 !CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1535 1536#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1537/** 1538 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1539 * @static_size: the size of static percpu area in bytes 1540 * @reserved_size: the size of reserved percpu area in bytes 1541 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 1542 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 1543 * @populate_pte_fn: function to populate pte 1544 * 1545 * This is a helper to ease setting up page-remapped first percpu 1546 * chunk and can be called where pcpu_setup_first_chunk() is expected. 1547 * 1548 * This is the basic allocator. Static percpu area is allocated 1549 * page-by-page into vmalloc area. 1550 * 1551 * RETURNS: 1552 * The determined pcpu_unit_size which can be used to initialize 1553 * percpu access on success, -errno on failure. 1554 */ 1555ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size, 1556 pcpu_fc_alloc_fn_t alloc_fn, 1557 pcpu_fc_free_fn_t free_fn, 1558 pcpu_fc_populate_pte_fn_t populate_pte_fn) 1559{ 1560 static struct vm_struct vm; 1561 char psize_str[16]; 1562 int unit_pages; 1563 size_t pages_size; 1564 struct page **pages; 1565 unsigned int cpu; 1566 int i, j; 1567 ssize_t ret; 1568 1569 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 1570 1571 unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size, 1572 PCPU_MIN_UNIT_SIZE)); 1573 1574 /* unaligned allocations can't be freed, round up to page size */ 1575 pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0])); 1576 pages = alloc_bootmem(pages_size); 1577 1578 /* allocate pages */ 1579 j = 0; 1580 for_each_possible_cpu(cpu) 1581 for (i = 0; i < unit_pages; i++) { 1582 void *ptr; 1583 1584 ptr = alloc_fn(cpu, PAGE_SIZE); 1585 if (!ptr) { 1586 pr_warning("PERCPU: failed to allocate %s page " 1587 "for cpu%u\n", psize_str, cpu); 1588 goto enomem; 1589 } 1590 pages[j++] = virt_to_page(ptr); 1591 } 1592 1593 /* allocate vm area, map the pages and copy static data */ 1594 vm.flags = VM_ALLOC; 1595 vm.size = nr_cpu_ids * unit_pages << PAGE_SHIFT; 1596 vm_area_register_early(&vm, PAGE_SIZE); 1597 1598 for_each_possible_cpu(cpu) { 1599 unsigned long unit_addr = (unsigned long)vm.addr + 1600 (cpu * unit_pages << PAGE_SHIFT); 1601 1602 for (i = 0; i < unit_pages; i++) 1603 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 1604 1605 /* pte already populated, the following shouldn't fail */ 1606 ret = __pcpu_map_pages(unit_addr, &pages[cpu * unit_pages], 1607 unit_pages); 1608 if (ret < 0) 1609 panic("failed to map percpu area, err=%zd\n", ret); 1610 1611 /* 1612 * FIXME: Archs with virtual cache should flush local 1613 * cache for the linear mapping here - something 1614 * equivalent to flush_cache_vmap() on the local cpu. 1615 * flush_cache_vmap() can't be used as most supporting 1616 * data structures are not set up yet. 1617 */ 1618 1619 /* copy static data */ 1620 memcpy((void *)unit_addr, __per_cpu_load, static_size); 1621 } 1622 1623 /* we're ready, commit */ 1624 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu\n", 1625 unit_pages, psize_str, vm.addr, static_size, reserved_size); 1626 1627 ret = pcpu_setup_first_chunk(static_size, reserved_size, -1, 1628 unit_pages << PAGE_SHIFT, vm.addr, NULL); 1629 goto out_free_ar; 1630 1631enomem: 1632 while (--j >= 0) 1633 free_fn(page_address(pages[j]), PAGE_SIZE); 1634 ret = -ENOMEM; 1635out_free_ar: 1636 free_bootmem(__pa(pages), pages_size); 1637 return ret; 1638} 1639#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 1640 1641#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK 1642/** 1643 * pcpu_lpage_build_unit_map - build unit_map for large page remapping 1644 * @static_size: the size of static percpu area in bytes 1645 * @reserved_size: the size of reserved percpu area in bytes 1646 * @dyn_sizep: in/out parameter for dynamic size, -1 for auto 1647 * @unit_sizep: out parameter for unit size 1648 * @unit_map: unit_map to be filled 1649 * @cpu_distance_fn: callback to determine distance between cpus 1650 * 1651 * This function builds cpu -> unit map and determine other parameters 1652 * considering needed percpu size, large page size and distances 1653 * between CPUs in NUMA. 1654 * 1655 * CPUs which are of LOCAL_DISTANCE both ways are grouped together and 1656 * may share units in the same large page. The returned configuration 1657 * is guaranteed to have CPUs on different nodes on different large 1658 * pages and >=75% usage of allocated virtual address space. 1659 * 1660 * RETURNS: 1661 * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and 1662 * returns the number of units to be allocated. -errno on failure. 1663 */ 1664int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size, 1665 ssize_t *dyn_sizep, size_t *unit_sizep, 1666 size_t lpage_size, int *unit_map, 1667 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1668{ 1669 static int group_map[NR_CPUS] __initdata; 1670 static int group_cnt[NR_CPUS] __initdata; 1671 int group_cnt_max = 0; 1672 size_t size_sum, min_unit_size, alloc_size; 1673 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1674 int last_allocs; 1675 unsigned int cpu, tcpu; 1676 int group, unit; 1677 1678 /* 1679 * Determine min_unit_size, alloc_size and max_upa such that 1680 * alloc_size is multiple of lpage_size and is the smallest 1681 * which can accomodate 4k aligned segments which are equal to 1682 * or larger than min_unit_size. 1683 */ 1684 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, dyn_sizep); 1685 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1686 1687 alloc_size = roundup(min_unit_size, lpage_size); 1688 upa = alloc_size / min_unit_size; 1689 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1690 upa--; 1691 max_upa = upa; 1692 1693 /* group cpus according to their proximity */ 1694 for_each_possible_cpu(cpu) { 1695 group = 0; 1696 next_group: 1697 for_each_possible_cpu(tcpu) { 1698 if (cpu == tcpu) 1699 break; 1700 if (group_map[tcpu] == group && 1701 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1702 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1703 group++; 1704 goto next_group; 1705 } 1706 } 1707 group_map[cpu] = group; 1708 group_cnt[group]++; 1709 group_cnt_max = max(group_cnt_max, group_cnt[group]); 1710 } 1711 1712 /* 1713 * Expand unit size until address space usage goes over 75% 1714 * and then as much as possible without using more address 1715 * space. 1716 */ 1717 last_allocs = INT_MAX; 1718 for (upa = max_upa; upa; upa--) { 1719 int allocs = 0, wasted = 0; 1720 1721 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1722 continue; 1723 1724 for (group = 0; group_cnt[group]; group++) { 1725 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1726 allocs += this_allocs; 1727 wasted += this_allocs * upa - group_cnt[group]; 1728 } 1729 1730 /* 1731 * Don't accept if wastage is over 25%. The 1732 * greater-than comparison ensures upa==1 always 1733 * passes the following check. 1734 */ 1735 if (wasted > num_possible_cpus() / 3) 1736 continue; 1737 1738 /* and then don't consume more memory */ 1739 if (allocs > last_allocs) 1740 break; 1741 last_allocs = allocs; 1742 best_upa = upa; 1743 } 1744 *unit_sizep = alloc_size / best_upa; 1745 1746 /* assign units to cpus accordingly */ 1747 unit = 0; 1748 for (group = 0; group_cnt[group]; group++) { 1749 for_each_possible_cpu(cpu) 1750 if (group_map[cpu] == group) 1751 unit_map[cpu] = unit++; 1752 unit = roundup(unit, best_upa); 1753 } 1754 1755 return unit; /* unit contains aligned number of units */ 1756} 1757 1758struct pcpul_ent { 1759 void *ptr; 1760 void *map_addr; 1761}; 1762 1763static size_t pcpul_size; 1764static size_t pcpul_lpage_size; 1765static int pcpul_nr_lpages; 1766static struct pcpul_ent *pcpul_map; 1767 1768static bool __init pcpul_unit_to_cpu(int unit, const int *unit_map, 1769 unsigned int *cpup) 1770{ 1771 unsigned int cpu; 1772 1773 for_each_possible_cpu(cpu) 1774 if (unit_map[cpu] == unit) { 1775 if (cpup) 1776 *cpup = cpu; 1777 return true; 1778 } 1779 1780 return false; 1781} 1782 1783static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size, 1784 size_t reserved_size, size_t dyn_size, 1785 size_t unit_size, size_t lpage_size, 1786 const int *unit_map, int nr_units) 1787{ 1788 int width = 1, v = nr_units; 1789 char empty_str[] = "--------"; 1790 int upl, lpl; /* units per lpage, lpage per line */ 1791 unsigned int cpu; 1792 int lpage, unit; 1793 1794 while (v /= 10) 1795 width++; 1796 empty_str[min_t(int, width, sizeof(empty_str) - 1)] = '\0'; 1797 1798 upl = max_t(int, lpage_size / unit_size, 1); 1799 lpl = rounddown_pow_of_two(max_t(int, 60 / (upl * (width + 1) + 2), 1)); 1800 1801 printk("%spcpu-lpage: sta/res/dyn=%zu/%zu/%zu unit=%zu lpage=%zu", lvl, 1802 static_size, reserved_size, dyn_size, unit_size, lpage_size); 1803 1804 for (lpage = 0, unit = 0; unit < nr_units; unit++) { 1805 if (!(unit % upl)) { 1806 if (!(lpage++ % lpl)) { 1807 printk("\n"); 1808 printk("%spcpu-lpage: ", lvl); 1809 } else 1810 printk("| "); 1811 } 1812 if (pcpul_unit_to_cpu(unit, unit_map, &cpu)) 1813 printk("%0*d ", width, cpu); 1814 else 1815 printk("%s ", empty_str); 1816 } 1817 printk("\n"); 1818} 1819 1820/** 1821 * pcpu_lpage_first_chunk - remap the first percpu chunk using large page 1822 * @static_size: the size of static percpu area in bytes 1823 * @reserved_size: the size of reserved percpu area in bytes 1824 * @dyn_size: free size for dynamic allocation in bytes 1825 * @unit_size: unit size in bytes 1826 * @lpage_size: the size of a large page 1827 * @unit_map: cpu -> unit mapping 1828 * @nr_units: the number of units 1829 * @alloc_fn: function to allocate percpu lpage, always called with lpage_size 1830 * @free_fn: function to free percpu memory, @size <= lpage_size 1831 * @map_fn: function to map percpu lpage, always called with lpage_size 1832 * 1833 * This allocator uses large page to build and map the first chunk. 1834 * Unlike other helpers, the caller should always specify @dyn_size 1835 * and @unit_size. These parameters along with @unit_map and 1836 * @nr_units can be determined using pcpu_lpage_build_unit_map(). 1837 * This two stage initialization is to allow arch code to evaluate the 1838 * parameters before committing to it. 1839 * 1840 * Large pages are allocated as directed by @unit_map and other 1841 * parameters and mapped to vmalloc space. Unused holes are returned 1842 * to the page allocator. Note that these holes end up being actively 1843 * mapped twice - once to the physical mapping and to the vmalloc area 1844 * for the first percpu chunk. Depending on architecture, this might 1845 * cause problem when changing page attributes of the returned area. 1846 * These double mapped areas can be detected using 1847 * pcpu_lpage_remapped(). 1848 * 1849 * RETURNS: 1850 * The determined pcpu_unit_size which can be used to initialize 1851 * percpu access on success, -errno on failure. 1852 */ 1853ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size, 1854 size_t dyn_size, size_t unit_size, 1855 size_t lpage_size, const int *unit_map, 1856 int nr_units, 1857 pcpu_fc_alloc_fn_t alloc_fn, 1858 pcpu_fc_free_fn_t free_fn, 1859 pcpu_fc_map_fn_t map_fn) 1860{ 1861 static struct vm_struct vm; 1862 size_t chunk_size = unit_size * nr_units; 1863 size_t map_size; 1864 unsigned int cpu; 1865 ssize_t ret; 1866 int i, j, unit; 1867 1868 pcpul_lpage_dump_cfg(KERN_DEBUG, static_size, reserved_size, dyn_size, 1869 unit_size, lpage_size, unit_map, nr_units); 1870 1871 BUG_ON(chunk_size % lpage_size); 1872 1873 pcpul_size = static_size + reserved_size + dyn_size; 1874 pcpul_lpage_size = lpage_size; 1875 pcpul_nr_lpages = chunk_size / lpage_size; 1876 1877 /* allocate pointer array and alloc large pages */ 1878 map_size = pcpul_nr_lpages * sizeof(pcpul_map[0]); 1879 pcpul_map = alloc_bootmem(map_size); 1880 1881 /* allocate all pages */ 1882 for (i = 0; i < pcpul_nr_lpages; i++) { 1883 size_t offset = i * lpage_size; 1884 int first_unit = offset / unit_size; 1885 int last_unit = (offset + lpage_size - 1) / unit_size; 1886 void *ptr; 1887 1888 /* find out which cpu is mapped to this unit */ 1889 for (unit = first_unit; unit <= last_unit; unit++) 1890 if (pcpul_unit_to_cpu(unit, unit_map, &cpu)) 1891 goto found; 1892 continue; 1893 found: 1894 ptr = alloc_fn(cpu, lpage_size); 1895 if (!ptr) { 1896 pr_warning("PERCPU: failed to allocate large page " 1897 "for cpu%u\n", cpu); 1898 goto enomem; 1899 } 1900 1901 pcpul_map[i].ptr = ptr; 1902 } 1903 1904 /* return unused holes */ 1905 for (unit = 0; unit < nr_units; unit++) { 1906 size_t start = unit * unit_size; 1907 size_t end = start + unit_size; 1908 size_t off, next; 1909 1910 /* don't free used part of occupied unit */ 1911 if (pcpul_unit_to_cpu(unit, unit_map, NULL)) 1912 start += pcpul_size; 1913 1914 /* unit can span more than one page, punch the holes */ 1915 for (off = start; off < end; off = next) { 1916 void *ptr = pcpul_map[off / lpage_size].ptr; 1917 next = min(roundup(off + 1, lpage_size), end); 1918 if (ptr) 1919 free_fn(ptr + off % lpage_size, next - off); 1920 } 1921 } 1922 1923 /* allocate address, map and copy */ 1924 vm.flags = VM_ALLOC; 1925 vm.size = chunk_size; 1926 vm_area_register_early(&vm, unit_size); 1927 1928 for (i = 0; i < pcpul_nr_lpages; i++) { 1929 if (!pcpul_map[i].ptr) 1930 continue; 1931 pcpul_map[i].map_addr = vm.addr + i * lpage_size; 1932 map_fn(pcpul_map[i].ptr, lpage_size, pcpul_map[i].map_addr); 1933 } 1934 1935 for_each_possible_cpu(cpu) 1936 memcpy(vm.addr + unit_map[cpu] * unit_size, __per_cpu_load, 1937 static_size); 1938 1939 /* we're ready, commit */ 1940 pr_info("PERCPU: large pages @%p s%zu r%zu d%zu u%zu\n", 1941 vm.addr, static_size, reserved_size, dyn_size, unit_size); 1942 1943 ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, 1944 unit_size, vm.addr, unit_map); 1945 1946 /* 1947 * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped 1948 * lpages are pushed to the end and trimmed. 1949 */ 1950 for (i = 0; i < pcpul_nr_lpages - 1; i++) 1951 for (j = i + 1; j < pcpul_nr_lpages; j++) { 1952 struct pcpul_ent tmp; 1953 1954 if (!pcpul_map[j].ptr) 1955 continue; 1956 if (pcpul_map[i].ptr && 1957 pcpul_map[i].ptr < pcpul_map[j].ptr) 1958 continue; 1959 1960 tmp = pcpul_map[i]; 1961 pcpul_map[i] = pcpul_map[j]; 1962 pcpul_map[j] = tmp; 1963 } 1964 1965 while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr) 1966 pcpul_nr_lpages--; 1967 1968 return ret; 1969 1970enomem: 1971 for (i = 0; i < pcpul_nr_lpages; i++) 1972 if (pcpul_map[i].ptr) 1973 free_fn(pcpul_map[i].ptr, lpage_size); 1974 free_bootmem(__pa(pcpul_map), map_size); 1975 return -ENOMEM; 1976} 1977 1978/** 1979 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area 1980 * @kaddr: the kernel address in question 1981 * 1982 * Determine whether @kaddr falls in the pcpul recycled area. This is 1983 * used by pageattr to detect VM aliases and break up the pcpu large 1984 * page mapping such that the same physical page is not mapped under 1985 * different attributes. 1986 * 1987 * The recycled area is always at the tail of a partially used large 1988 * page. 1989 * 1990 * RETURNS: 1991 * Address of corresponding remapped pcpu address if match is found; 1992 * otherwise, NULL. 1993 */ 1994void *pcpu_lpage_remapped(void *kaddr) 1995{ 1996 unsigned long lpage_mask = pcpul_lpage_size - 1; 1997 void *lpage_addr = (void *)((unsigned long)kaddr & ~lpage_mask); 1998 unsigned long offset = (unsigned long)kaddr & lpage_mask; 1999 int left = 0, right = pcpul_nr_lpages - 1; 2000 int pos; 2001 2002 /* pcpul in use at all? */ 2003 if (!pcpul_map) 2004 return NULL; 2005 2006 /* okay, perform binary search */ 2007 while (left <= right) { 2008 pos = (left + right) / 2; 2009 2010 if (pcpul_map[pos].ptr < lpage_addr) 2011 left = pos + 1; 2012 else if (pcpul_map[pos].ptr > lpage_addr) 2013 right = pos - 1; 2014 else 2015 return pcpul_map[pos].map_addr + offset; 2016 } 2017 2018 return NULL; 2019} 2020#endif /* CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK */ 2021 2022/* 2023 * Generic percpu area setup. 2024 * 2025 * The embedding helper is used because its behavior closely resembles 2026 * the original non-dynamic generic percpu area setup. This is 2027 * important because many archs have addressing restrictions and might 2028 * fail if the percpu area is located far away from the previous 2029 * location. As an added bonus, in non-NUMA cases, embedding is 2030 * generally a good idea TLB-wise because percpu area can piggy back 2031 * on the physical linear memory mapping which uses large page 2032 * mappings on applicable archs. 2033 */ 2034#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 2035unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2036EXPORT_SYMBOL(__per_cpu_offset); 2037 2038void __init setup_per_cpu_areas(void) 2039{ 2040 size_t static_size = __per_cpu_end - __per_cpu_start; 2041 ssize_t unit_size; 2042 unsigned long delta; 2043 unsigned int cpu; 2044 2045 /* 2046 * Always reserve area for module percpu variables. That's 2047 * what the legacy allocator did. 2048 */ 2049 unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, 2050 PERCPU_DYNAMIC_RESERVE); 2051 if (unit_size < 0) 2052 panic("Failed to initialized percpu areas."); 2053 2054 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2055 for_each_possible_cpu(cpu) 2056 __per_cpu_offset[cpu] = delta + cpu * unit_size; 2057} 2058#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2059