percpu.c revision 3c9a024fde58b08745680863859d1483def64f74
1/*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009		SUSE Linux Products GmbH
5 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
17 *
18 *  c0                           c1                         c2
19 *  -------------------          -------------------        ------------
20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
21 *  -------------------  ......  -------------------  ....  ------------
22 *
23 * Allocation is done in offset-size areas of single unit space.  Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
29 *
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes.  The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be eqaul to or larger than the maximum contiguous
35 * area in the chunk.  This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map.  A positive value in the map represents a free
40 * region and negative allocated.  Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
45 *
46 * To use this allocator, arch code should do the followings.
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 *   regular address to percpu pointer and back if they need to be
50 *   different from the default
51 *
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 *   setup the first chunk containing the kernel static percpu area
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/err.h>
59#include <linux/list.h>
60#include <linux/log2.h>
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
66#include <linux/slab.h>
67#include <linux/spinlock.h>
68#include <linux/vmalloc.h>
69#include <linux/workqueue.h>
70
71#include <asm/cacheflush.h>
72#include <asm/sections.h>
73#include <asm/tlbflush.h>
74#include <asm/io.h>
75
76#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
77#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
78
79#ifdef CONFIG_SMP
80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81#ifndef __addr_to_pcpu_ptr
82#define __addr_to_pcpu_ptr(addr)					\
83	(void __percpu *)((unsigned long)(addr) -			\
84			  (unsigned long)pcpu_base_addr	+		\
85			  (unsigned long)__per_cpu_start)
86#endif
87#ifndef __pcpu_ptr_to_addr
88#define __pcpu_ptr_to_addr(ptr)						\
89	(void __force *)((unsigned long)(ptr) +				\
90			 (unsigned long)pcpu_base_addr -		\
91			 (unsigned long)__per_cpu_start)
92#endif
93#else	/* CONFIG_SMP */
94/* on UP, it's always identity mapped */
95#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
96#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
97#endif	/* CONFIG_SMP */
98
99struct pcpu_chunk {
100	struct list_head	list;		/* linked to pcpu_slot lists */
101	int			free_size;	/* free bytes in the chunk */
102	int			contig_hint;	/* max contiguous size hint */
103	void			*base_addr;	/* base address of this chunk */
104	int			map_used;	/* # of map entries used */
105	int			map_alloc;	/* # of map entries allocated */
106	int			*map;		/* allocation map */
107	void			*data;		/* chunk data */
108	bool			immutable;	/* no [de]population allowed */
109	unsigned long		populated[];	/* populated bitmap */
110};
111
112static int pcpu_unit_pages __read_mostly;
113static int pcpu_unit_size __read_mostly;
114static int pcpu_nr_units __read_mostly;
115static int pcpu_atom_size __read_mostly;
116static int pcpu_nr_slots __read_mostly;
117static size_t pcpu_chunk_struct_size __read_mostly;
118
119/* cpus with the lowest and highest unit numbers */
120static unsigned int pcpu_first_unit_cpu __read_mostly;
121static unsigned int pcpu_last_unit_cpu __read_mostly;
122
123/* the address of the first chunk which starts with the kernel static area */
124void *pcpu_base_addr __read_mostly;
125EXPORT_SYMBOL_GPL(pcpu_base_addr);
126
127static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
128const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
129
130/* group information, used for vm allocation */
131static int pcpu_nr_groups __read_mostly;
132static const unsigned long *pcpu_group_offsets __read_mostly;
133static const size_t *pcpu_group_sizes __read_mostly;
134
135/*
136 * The first chunk which always exists.  Note that unlike other
137 * chunks, this one can be allocated and mapped in several different
138 * ways and thus often doesn't live in the vmalloc area.
139 */
140static struct pcpu_chunk *pcpu_first_chunk;
141
142/*
143 * Optional reserved chunk.  This chunk reserves part of the first
144 * chunk and serves it for reserved allocations.  The amount of
145 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
146 * area doesn't exist, the following variables contain NULL and 0
147 * respectively.
148 */
149static struct pcpu_chunk *pcpu_reserved_chunk;
150static int pcpu_reserved_chunk_limit;
151
152/*
153 * Synchronization rules.
154 *
155 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
156 * protects allocation/reclaim paths, chunks, populated bitmap and
157 * vmalloc mapping.  The latter is a spinlock and protects the index
158 * data structures - chunk slots, chunks and area maps in chunks.
159 *
160 * During allocation, pcpu_alloc_mutex is kept locked all the time and
161 * pcpu_lock is grabbed and released as necessary.  All actual memory
162 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
163 * general, percpu memory can't be allocated with irq off but
164 * irqsave/restore are still used in alloc path so that it can be used
165 * from early init path - sched_init() specifically.
166 *
167 * Free path accesses and alters only the index data structures, so it
168 * can be safely called from atomic context.  When memory needs to be
169 * returned to the system, free path schedules reclaim_work which
170 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
171 * reclaimed, release both locks and frees the chunks.  Note that it's
172 * necessary to grab both locks to remove a chunk from circulation as
173 * allocation path might be referencing the chunk with only
174 * pcpu_alloc_mutex locked.
175 */
176static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
177static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
178
179static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
180
181/* reclaim work to release fully free chunks, scheduled from free path */
182static void pcpu_reclaim(struct work_struct *work);
183static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
184
185static bool pcpu_addr_in_first_chunk(void *addr)
186{
187	void *first_start = pcpu_first_chunk->base_addr;
188
189	return addr >= first_start && addr < first_start + pcpu_unit_size;
190}
191
192static bool pcpu_addr_in_reserved_chunk(void *addr)
193{
194	void *first_start = pcpu_first_chunk->base_addr;
195
196	return addr >= first_start &&
197		addr < first_start + pcpu_reserved_chunk_limit;
198}
199
200static int __pcpu_size_to_slot(int size)
201{
202	int highbit = fls(size);	/* size is in bytes */
203	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
204}
205
206static int pcpu_size_to_slot(int size)
207{
208	if (size == pcpu_unit_size)
209		return pcpu_nr_slots - 1;
210	return __pcpu_size_to_slot(size);
211}
212
213static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
214{
215	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
216		return 0;
217
218	return pcpu_size_to_slot(chunk->free_size);
219}
220
221/* set the pointer to a chunk in a page struct */
222static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
223{
224	page->index = (unsigned long)pcpu;
225}
226
227/* obtain pointer to a chunk from a page struct */
228static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
229{
230	return (struct pcpu_chunk *)page->index;
231}
232
233static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
234{
235	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
236}
237
238static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
239				     unsigned int cpu, int page_idx)
240{
241	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
242		(page_idx << PAGE_SHIFT);
243}
244
245static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
246					   int *rs, int *re, int end)
247{
248	*rs = find_next_zero_bit(chunk->populated, end, *rs);
249	*re = find_next_bit(chunk->populated, end, *rs + 1);
250}
251
252static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
253					 int *rs, int *re, int end)
254{
255	*rs = find_next_bit(chunk->populated, end, *rs);
256	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
257}
258
259/*
260 * (Un)populated page region iterators.  Iterate over (un)populated
261 * page regions betwen @start and @end in @chunk.  @rs and @re should
262 * be integer variables and will be set to start and end page index of
263 * the current region.
264 */
265#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
266	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
267	     (rs) < (re);						    \
268	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
269
270#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
271	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
272	     (rs) < (re);						    \
273	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
274
275/**
276 * pcpu_mem_alloc - allocate memory
277 * @size: bytes to allocate
278 *
279 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
280 * kzalloc() is used; otherwise, vmalloc() is used.  The returned
281 * memory is always zeroed.
282 *
283 * CONTEXT:
284 * Does GFP_KERNEL allocation.
285 *
286 * RETURNS:
287 * Pointer to the allocated area on success, NULL on failure.
288 */
289static void *pcpu_mem_alloc(size_t size)
290{
291	if (WARN_ON_ONCE(!slab_is_available()))
292		return NULL;
293
294	if (size <= PAGE_SIZE)
295		return kzalloc(size, GFP_KERNEL);
296	else {
297		void *ptr = vmalloc(size);
298		if (ptr)
299			memset(ptr, 0, size);
300		return ptr;
301	}
302}
303
304/**
305 * pcpu_mem_free - free memory
306 * @ptr: memory to free
307 * @size: size of the area
308 *
309 * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
310 */
311static void pcpu_mem_free(void *ptr, size_t size)
312{
313	if (size <= PAGE_SIZE)
314		kfree(ptr);
315	else
316		vfree(ptr);
317}
318
319/**
320 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
321 * @chunk: chunk of interest
322 * @oslot: the previous slot it was on
323 *
324 * This function is called after an allocation or free changed @chunk.
325 * New slot according to the changed state is determined and @chunk is
326 * moved to the slot.  Note that the reserved chunk is never put on
327 * chunk slots.
328 *
329 * CONTEXT:
330 * pcpu_lock.
331 */
332static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
333{
334	int nslot = pcpu_chunk_slot(chunk);
335
336	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
337		if (oslot < nslot)
338			list_move(&chunk->list, &pcpu_slot[nslot]);
339		else
340			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
341	}
342}
343
344/**
345 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
346 * @chunk: chunk of interest
347 *
348 * Determine whether area map of @chunk needs to be extended to
349 * accomodate a new allocation.
350 *
351 * CONTEXT:
352 * pcpu_lock.
353 *
354 * RETURNS:
355 * New target map allocation length if extension is necessary, 0
356 * otherwise.
357 */
358static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
359{
360	int new_alloc;
361
362	if (chunk->map_alloc >= chunk->map_used + 2)
363		return 0;
364
365	new_alloc = PCPU_DFL_MAP_ALLOC;
366	while (new_alloc < chunk->map_used + 2)
367		new_alloc *= 2;
368
369	return new_alloc;
370}
371
372/**
373 * pcpu_extend_area_map - extend area map of a chunk
374 * @chunk: chunk of interest
375 * @new_alloc: new target allocation length of the area map
376 *
377 * Extend area map of @chunk to have @new_alloc entries.
378 *
379 * CONTEXT:
380 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
381 *
382 * RETURNS:
383 * 0 on success, -errno on failure.
384 */
385static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
386{
387	int *old = NULL, *new = NULL;
388	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
389	unsigned long flags;
390
391	new = pcpu_mem_alloc(new_size);
392	if (!new)
393		return -ENOMEM;
394
395	/* acquire pcpu_lock and switch to new area map */
396	spin_lock_irqsave(&pcpu_lock, flags);
397
398	if (new_alloc <= chunk->map_alloc)
399		goto out_unlock;
400
401	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
402	old = chunk->map;
403
404	memcpy(new, old, old_size);
405
406	chunk->map_alloc = new_alloc;
407	chunk->map = new;
408	new = NULL;
409
410out_unlock:
411	spin_unlock_irqrestore(&pcpu_lock, flags);
412
413	/*
414	 * pcpu_mem_free() might end up calling vfree() which uses
415	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
416	 */
417	pcpu_mem_free(old, old_size);
418	pcpu_mem_free(new, new_size);
419
420	return 0;
421}
422
423/**
424 * pcpu_split_block - split a map block
425 * @chunk: chunk of interest
426 * @i: index of map block to split
427 * @head: head size in bytes (can be 0)
428 * @tail: tail size in bytes (can be 0)
429 *
430 * Split the @i'th map block into two or three blocks.  If @head is
431 * non-zero, @head bytes block is inserted before block @i moving it
432 * to @i+1 and reducing its size by @head bytes.
433 *
434 * If @tail is non-zero, the target block, which can be @i or @i+1
435 * depending on @head, is reduced by @tail bytes and @tail byte block
436 * is inserted after the target block.
437 *
438 * @chunk->map must have enough free slots to accomodate the split.
439 *
440 * CONTEXT:
441 * pcpu_lock.
442 */
443static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
444			     int head, int tail)
445{
446	int nr_extra = !!head + !!tail;
447
448	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
449
450	/* insert new subblocks */
451	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
452		sizeof(chunk->map[0]) * (chunk->map_used - i));
453	chunk->map_used += nr_extra;
454
455	if (head) {
456		chunk->map[i + 1] = chunk->map[i] - head;
457		chunk->map[i++] = head;
458	}
459	if (tail) {
460		chunk->map[i++] -= tail;
461		chunk->map[i] = tail;
462	}
463}
464
465/**
466 * pcpu_alloc_area - allocate area from a pcpu_chunk
467 * @chunk: chunk of interest
468 * @size: wanted size in bytes
469 * @align: wanted align
470 *
471 * Try to allocate @size bytes area aligned at @align from @chunk.
472 * Note that this function only allocates the offset.  It doesn't
473 * populate or map the area.
474 *
475 * @chunk->map must have at least two free slots.
476 *
477 * CONTEXT:
478 * pcpu_lock.
479 *
480 * RETURNS:
481 * Allocated offset in @chunk on success, -1 if no matching area is
482 * found.
483 */
484static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
485{
486	int oslot = pcpu_chunk_slot(chunk);
487	int max_contig = 0;
488	int i, off;
489
490	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
491		bool is_last = i + 1 == chunk->map_used;
492		int head, tail;
493
494		/* extra for alignment requirement */
495		head = ALIGN(off, align) - off;
496		BUG_ON(i == 0 && head != 0);
497
498		if (chunk->map[i] < 0)
499			continue;
500		if (chunk->map[i] < head + size) {
501			max_contig = max(chunk->map[i], max_contig);
502			continue;
503		}
504
505		/*
506		 * If head is small or the previous block is free,
507		 * merge'em.  Note that 'small' is defined as smaller
508		 * than sizeof(int), which is very small but isn't too
509		 * uncommon for percpu allocations.
510		 */
511		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
512			if (chunk->map[i - 1] > 0)
513				chunk->map[i - 1] += head;
514			else {
515				chunk->map[i - 1] -= head;
516				chunk->free_size -= head;
517			}
518			chunk->map[i] -= head;
519			off += head;
520			head = 0;
521		}
522
523		/* if tail is small, just keep it around */
524		tail = chunk->map[i] - head - size;
525		if (tail < sizeof(int))
526			tail = 0;
527
528		/* split if warranted */
529		if (head || tail) {
530			pcpu_split_block(chunk, i, head, tail);
531			if (head) {
532				i++;
533				off += head;
534				max_contig = max(chunk->map[i - 1], max_contig);
535			}
536			if (tail)
537				max_contig = max(chunk->map[i + 1], max_contig);
538		}
539
540		/* update hint and mark allocated */
541		if (is_last)
542			chunk->contig_hint = max_contig; /* fully scanned */
543		else
544			chunk->contig_hint = max(chunk->contig_hint,
545						 max_contig);
546
547		chunk->free_size -= chunk->map[i];
548		chunk->map[i] = -chunk->map[i];
549
550		pcpu_chunk_relocate(chunk, oslot);
551		return off;
552	}
553
554	chunk->contig_hint = max_contig;	/* fully scanned */
555	pcpu_chunk_relocate(chunk, oslot);
556
557	/* tell the upper layer that this chunk has no matching area */
558	return -1;
559}
560
561/**
562 * pcpu_free_area - free area to a pcpu_chunk
563 * @chunk: chunk of interest
564 * @freeme: offset of area to free
565 *
566 * Free area starting from @freeme to @chunk.  Note that this function
567 * only modifies the allocation map.  It doesn't depopulate or unmap
568 * the area.
569 *
570 * CONTEXT:
571 * pcpu_lock.
572 */
573static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
574{
575	int oslot = pcpu_chunk_slot(chunk);
576	int i, off;
577
578	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
579		if (off == freeme)
580			break;
581	BUG_ON(off != freeme);
582	BUG_ON(chunk->map[i] > 0);
583
584	chunk->map[i] = -chunk->map[i];
585	chunk->free_size += chunk->map[i];
586
587	/* merge with previous? */
588	if (i > 0 && chunk->map[i - 1] >= 0) {
589		chunk->map[i - 1] += chunk->map[i];
590		chunk->map_used--;
591		memmove(&chunk->map[i], &chunk->map[i + 1],
592			(chunk->map_used - i) * sizeof(chunk->map[0]));
593		i--;
594	}
595	/* merge with next? */
596	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
597		chunk->map[i] += chunk->map[i + 1];
598		chunk->map_used--;
599		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
600			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
601	}
602
603	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
604	pcpu_chunk_relocate(chunk, oslot);
605}
606
607static struct pcpu_chunk *pcpu_alloc_chunk(void)
608{
609	struct pcpu_chunk *chunk;
610
611	chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
612	if (!chunk)
613		return NULL;
614
615	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
616	if (!chunk->map) {
617		kfree(chunk);
618		return NULL;
619	}
620
621	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
622	chunk->map[chunk->map_used++] = pcpu_unit_size;
623
624	INIT_LIST_HEAD(&chunk->list);
625	chunk->free_size = pcpu_unit_size;
626	chunk->contig_hint = pcpu_unit_size;
627
628	return chunk;
629}
630
631static void pcpu_free_chunk(struct pcpu_chunk *chunk)
632{
633	if (!chunk)
634		return;
635	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
636	kfree(chunk);
637}
638
639/*
640 * Chunk management implementation.
641 *
642 * To allow different implementations, chunk alloc/free and
643 * [de]population are implemented in a separate file which is pulled
644 * into this file and compiled together.  The following functions
645 * should be implemented.
646 *
647 * pcpu_populate_chunk		- populate the specified range of a chunk
648 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
649 * pcpu_create_chunk		- create a new chunk
650 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
651 * pcpu_addr_to_page		- translate address to physical address
652 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
653 */
654static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
655static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
656static struct pcpu_chunk *pcpu_create_chunk(void);
657static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
658static struct page *pcpu_addr_to_page(void *addr);
659static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
660
661#ifdef CONFIG_NEED_PER_CPU_KM
662#include "percpu-km.c"
663#else
664#include "percpu-vm.c"
665#endif
666
667/**
668 * pcpu_chunk_addr_search - determine chunk containing specified address
669 * @addr: address for which the chunk needs to be determined.
670 *
671 * RETURNS:
672 * The address of the found chunk.
673 */
674static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
675{
676	/* is it in the first chunk? */
677	if (pcpu_addr_in_first_chunk(addr)) {
678		/* is it in the reserved area? */
679		if (pcpu_addr_in_reserved_chunk(addr))
680			return pcpu_reserved_chunk;
681		return pcpu_first_chunk;
682	}
683
684	/*
685	 * The address is relative to unit0 which might be unused and
686	 * thus unmapped.  Offset the address to the unit space of the
687	 * current processor before looking it up in the vmalloc
688	 * space.  Note that any possible cpu id can be used here, so
689	 * there's no need to worry about preemption or cpu hotplug.
690	 */
691	addr += pcpu_unit_offsets[raw_smp_processor_id()];
692	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
693}
694
695/**
696 * pcpu_alloc - the percpu allocator
697 * @size: size of area to allocate in bytes
698 * @align: alignment of area (max PAGE_SIZE)
699 * @reserved: allocate from the reserved chunk if available
700 *
701 * Allocate percpu area of @size bytes aligned at @align.
702 *
703 * CONTEXT:
704 * Does GFP_KERNEL allocation.
705 *
706 * RETURNS:
707 * Percpu pointer to the allocated area on success, NULL on failure.
708 */
709static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
710{
711	static int warn_limit = 10;
712	struct pcpu_chunk *chunk;
713	const char *err;
714	int slot, off, new_alloc;
715	unsigned long flags;
716
717	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
718		WARN(true, "illegal size (%zu) or align (%zu) for "
719		     "percpu allocation\n", size, align);
720		return NULL;
721	}
722
723	mutex_lock(&pcpu_alloc_mutex);
724	spin_lock_irqsave(&pcpu_lock, flags);
725
726	/* serve reserved allocations from the reserved chunk if available */
727	if (reserved && pcpu_reserved_chunk) {
728		chunk = pcpu_reserved_chunk;
729
730		if (size > chunk->contig_hint) {
731			err = "alloc from reserved chunk failed";
732			goto fail_unlock;
733		}
734
735		while ((new_alloc = pcpu_need_to_extend(chunk))) {
736			spin_unlock_irqrestore(&pcpu_lock, flags);
737			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
738				err = "failed to extend area map of reserved chunk";
739				goto fail_unlock_mutex;
740			}
741			spin_lock_irqsave(&pcpu_lock, flags);
742		}
743
744		off = pcpu_alloc_area(chunk, size, align);
745		if (off >= 0)
746			goto area_found;
747
748		err = "alloc from reserved chunk failed";
749		goto fail_unlock;
750	}
751
752restart:
753	/* search through normal chunks */
754	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
755		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
756			if (size > chunk->contig_hint)
757				continue;
758
759			new_alloc = pcpu_need_to_extend(chunk);
760			if (new_alloc) {
761				spin_unlock_irqrestore(&pcpu_lock, flags);
762				if (pcpu_extend_area_map(chunk,
763							 new_alloc) < 0) {
764					err = "failed to extend area map";
765					goto fail_unlock_mutex;
766				}
767				spin_lock_irqsave(&pcpu_lock, flags);
768				/*
769				 * pcpu_lock has been dropped, need to
770				 * restart cpu_slot list walking.
771				 */
772				goto restart;
773			}
774
775			off = pcpu_alloc_area(chunk, size, align);
776			if (off >= 0)
777				goto area_found;
778		}
779	}
780
781	/* hmmm... no space left, create a new chunk */
782	spin_unlock_irqrestore(&pcpu_lock, flags);
783
784	chunk = pcpu_create_chunk();
785	if (!chunk) {
786		err = "failed to allocate new chunk";
787		goto fail_unlock_mutex;
788	}
789
790	spin_lock_irqsave(&pcpu_lock, flags);
791	pcpu_chunk_relocate(chunk, -1);
792	goto restart;
793
794area_found:
795	spin_unlock_irqrestore(&pcpu_lock, flags);
796
797	/* populate, map and clear the area */
798	if (pcpu_populate_chunk(chunk, off, size)) {
799		spin_lock_irqsave(&pcpu_lock, flags);
800		pcpu_free_area(chunk, off);
801		err = "failed to populate";
802		goto fail_unlock;
803	}
804
805	mutex_unlock(&pcpu_alloc_mutex);
806
807	/* return address relative to base address */
808	return __addr_to_pcpu_ptr(chunk->base_addr + off);
809
810fail_unlock:
811	spin_unlock_irqrestore(&pcpu_lock, flags);
812fail_unlock_mutex:
813	mutex_unlock(&pcpu_alloc_mutex);
814	if (warn_limit) {
815		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
816			   "%s\n", size, align, err);
817		dump_stack();
818		if (!--warn_limit)
819			pr_info("PERCPU: limit reached, disable warning\n");
820	}
821	return NULL;
822}
823
824/**
825 * __alloc_percpu - allocate dynamic percpu area
826 * @size: size of area to allocate in bytes
827 * @align: alignment of area (max PAGE_SIZE)
828 *
829 * Allocate percpu area of @size bytes aligned at @align.  Might
830 * sleep.  Might trigger writeouts.
831 *
832 * CONTEXT:
833 * Does GFP_KERNEL allocation.
834 *
835 * RETURNS:
836 * Percpu pointer to the allocated area on success, NULL on failure.
837 */
838void __percpu *__alloc_percpu(size_t size, size_t align)
839{
840	return pcpu_alloc(size, align, false);
841}
842EXPORT_SYMBOL_GPL(__alloc_percpu);
843
844/**
845 * __alloc_reserved_percpu - allocate reserved percpu area
846 * @size: size of area to allocate in bytes
847 * @align: alignment of area (max PAGE_SIZE)
848 *
849 * Allocate percpu area of @size bytes aligned at @align from reserved
850 * percpu area if arch has set it up; otherwise, allocation is served
851 * from the same dynamic area.  Might sleep.  Might trigger writeouts.
852 *
853 * CONTEXT:
854 * Does GFP_KERNEL allocation.
855 *
856 * RETURNS:
857 * Percpu pointer to the allocated area on success, NULL on failure.
858 */
859void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
860{
861	return pcpu_alloc(size, align, true);
862}
863
864/**
865 * pcpu_reclaim - reclaim fully free chunks, workqueue function
866 * @work: unused
867 *
868 * Reclaim all fully free chunks except for the first one.
869 *
870 * CONTEXT:
871 * workqueue context.
872 */
873static void pcpu_reclaim(struct work_struct *work)
874{
875	LIST_HEAD(todo);
876	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
877	struct pcpu_chunk *chunk, *next;
878
879	mutex_lock(&pcpu_alloc_mutex);
880	spin_lock_irq(&pcpu_lock);
881
882	list_for_each_entry_safe(chunk, next, head, list) {
883		WARN_ON(chunk->immutable);
884
885		/* spare the first one */
886		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
887			continue;
888
889		list_move(&chunk->list, &todo);
890	}
891
892	spin_unlock_irq(&pcpu_lock);
893
894	list_for_each_entry_safe(chunk, next, &todo, list) {
895		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
896		pcpu_destroy_chunk(chunk);
897	}
898
899	mutex_unlock(&pcpu_alloc_mutex);
900}
901
902/**
903 * free_percpu - free percpu area
904 * @ptr: pointer to area to free
905 *
906 * Free percpu area @ptr.
907 *
908 * CONTEXT:
909 * Can be called from atomic context.
910 */
911void free_percpu(void __percpu *ptr)
912{
913	void *addr;
914	struct pcpu_chunk *chunk;
915	unsigned long flags;
916	int off;
917
918	if (!ptr)
919		return;
920
921	addr = __pcpu_ptr_to_addr(ptr);
922
923	spin_lock_irqsave(&pcpu_lock, flags);
924
925	chunk = pcpu_chunk_addr_search(addr);
926	off = addr - chunk->base_addr;
927
928	pcpu_free_area(chunk, off);
929
930	/* if there are more than one fully free chunks, wake up grim reaper */
931	if (chunk->free_size == pcpu_unit_size) {
932		struct pcpu_chunk *pos;
933
934		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
935			if (pos != chunk) {
936				schedule_work(&pcpu_reclaim_work);
937				break;
938			}
939	}
940
941	spin_unlock_irqrestore(&pcpu_lock, flags);
942}
943EXPORT_SYMBOL_GPL(free_percpu);
944
945/**
946 * is_kernel_percpu_address - test whether address is from static percpu area
947 * @addr: address to test
948 *
949 * Test whether @addr belongs to in-kernel static percpu area.  Module
950 * static percpu areas are not considered.  For those, use
951 * is_module_percpu_address().
952 *
953 * RETURNS:
954 * %true if @addr is from in-kernel static percpu area, %false otherwise.
955 */
956bool is_kernel_percpu_address(unsigned long addr)
957{
958#ifdef CONFIG_SMP
959	const size_t static_size = __per_cpu_end - __per_cpu_start;
960	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
961	unsigned int cpu;
962
963	for_each_possible_cpu(cpu) {
964		void *start = per_cpu_ptr(base, cpu);
965
966		if ((void *)addr >= start && (void *)addr < start + static_size)
967			return true;
968        }
969#endif
970	/* on UP, can't distinguish from other static vars, always false */
971	return false;
972}
973
974/**
975 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
976 * @addr: the address to be converted to physical address
977 *
978 * Given @addr which is dereferenceable address obtained via one of
979 * percpu access macros, this function translates it into its physical
980 * address.  The caller is responsible for ensuring @addr stays valid
981 * until this function finishes.
982 *
983 * RETURNS:
984 * The physical address for @addr.
985 */
986phys_addr_t per_cpu_ptr_to_phys(void *addr)
987{
988	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
989	bool in_first_chunk = false;
990	unsigned long first_start, first_end;
991	unsigned int cpu;
992
993	/*
994	 * The following test on first_start/end isn't strictly
995	 * necessary but will speed up lookups of addresses which
996	 * aren't in the first chunk.
997	 */
998	first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
999	first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
1000				    pcpu_unit_pages);
1001	if ((unsigned long)addr >= first_start &&
1002	    (unsigned long)addr < first_end) {
1003		for_each_possible_cpu(cpu) {
1004			void *start = per_cpu_ptr(base, cpu);
1005
1006			if (addr >= start && addr < start + pcpu_unit_size) {
1007				in_first_chunk = true;
1008				break;
1009			}
1010		}
1011	}
1012
1013	if (in_first_chunk) {
1014		if ((unsigned long)addr < VMALLOC_START ||
1015		    (unsigned long)addr >= VMALLOC_END)
1016			return __pa(addr);
1017		else
1018			return page_to_phys(vmalloc_to_page(addr));
1019	} else
1020		return page_to_phys(pcpu_addr_to_page(addr));
1021}
1022
1023/**
1024 * pcpu_alloc_alloc_info - allocate percpu allocation info
1025 * @nr_groups: the number of groups
1026 * @nr_units: the number of units
1027 *
1028 * Allocate ai which is large enough for @nr_groups groups containing
1029 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1030 * cpu_map array which is long enough for @nr_units and filled with
1031 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1032 * pointer of other groups.
1033 *
1034 * RETURNS:
1035 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1036 * failure.
1037 */
1038struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1039						      int nr_units)
1040{
1041	struct pcpu_alloc_info *ai;
1042	size_t base_size, ai_size;
1043	void *ptr;
1044	int unit;
1045
1046	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1047			  __alignof__(ai->groups[0].cpu_map[0]));
1048	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1049
1050	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1051	if (!ptr)
1052		return NULL;
1053	ai = ptr;
1054	ptr += base_size;
1055
1056	ai->groups[0].cpu_map = ptr;
1057
1058	for (unit = 0; unit < nr_units; unit++)
1059		ai->groups[0].cpu_map[unit] = NR_CPUS;
1060
1061	ai->nr_groups = nr_groups;
1062	ai->__ai_size = PFN_ALIGN(ai_size);
1063
1064	return ai;
1065}
1066
1067/**
1068 * pcpu_free_alloc_info - free percpu allocation info
1069 * @ai: pcpu_alloc_info to free
1070 *
1071 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1072 */
1073void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1074{
1075	free_bootmem(__pa(ai), ai->__ai_size);
1076}
1077
1078/**
1079 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1080 * @lvl: loglevel
1081 * @ai: allocation info to dump
1082 *
1083 * Print out information about @ai using loglevel @lvl.
1084 */
1085static void pcpu_dump_alloc_info(const char *lvl,
1086				 const struct pcpu_alloc_info *ai)
1087{
1088	int group_width = 1, cpu_width = 1, width;
1089	char empty_str[] = "--------";
1090	int alloc = 0, alloc_end = 0;
1091	int group, v;
1092	int upa, apl;	/* units per alloc, allocs per line */
1093
1094	v = ai->nr_groups;
1095	while (v /= 10)
1096		group_width++;
1097
1098	v = num_possible_cpus();
1099	while (v /= 10)
1100		cpu_width++;
1101	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1102
1103	upa = ai->alloc_size / ai->unit_size;
1104	width = upa * (cpu_width + 1) + group_width + 3;
1105	apl = rounddown_pow_of_two(max(60 / width, 1));
1106
1107	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1108	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1109	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1110
1111	for (group = 0; group < ai->nr_groups; group++) {
1112		const struct pcpu_group_info *gi = &ai->groups[group];
1113		int unit = 0, unit_end = 0;
1114
1115		BUG_ON(gi->nr_units % upa);
1116		for (alloc_end += gi->nr_units / upa;
1117		     alloc < alloc_end; alloc++) {
1118			if (!(alloc % apl)) {
1119				printk("\n");
1120				printk("%spcpu-alloc: ", lvl);
1121			}
1122			printk("[%0*d] ", group_width, group);
1123
1124			for (unit_end += upa; unit < unit_end; unit++)
1125				if (gi->cpu_map[unit] != NR_CPUS)
1126					printk("%0*d ", cpu_width,
1127					       gi->cpu_map[unit]);
1128				else
1129					printk("%s ", empty_str);
1130		}
1131	}
1132	printk("\n");
1133}
1134
1135/**
1136 * pcpu_setup_first_chunk - initialize the first percpu chunk
1137 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1138 * @base_addr: mapped address
1139 *
1140 * Initialize the first percpu chunk which contains the kernel static
1141 * perpcu area.  This function is to be called from arch percpu area
1142 * setup path.
1143 *
1144 * @ai contains all information necessary to initialize the first
1145 * chunk and prime the dynamic percpu allocator.
1146 *
1147 * @ai->static_size is the size of static percpu area.
1148 *
1149 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1150 * reserve after the static area in the first chunk.  This reserves
1151 * the first chunk such that it's available only through reserved
1152 * percpu allocation.  This is primarily used to serve module percpu
1153 * static areas on architectures where the addressing model has
1154 * limited offset range for symbol relocations to guarantee module
1155 * percpu symbols fall inside the relocatable range.
1156 *
1157 * @ai->dyn_size determines the number of bytes available for dynamic
1158 * allocation in the first chunk.  The area between @ai->static_size +
1159 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1160 *
1161 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1162 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1163 * @ai->dyn_size.
1164 *
1165 * @ai->atom_size is the allocation atom size and used as alignment
1166 * for vm areas.
1167 *
1168 * @ai->alloc_size is the allocation size and always multiple of
1169 * @ai->atom_size.  This is larger than @ai->atom_size if
1170 * @ai->unit_size is larger than @ai->atom_size.
1171 *
1172 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1173 * percpu areas.  Units which should be colocated are put into the
1174 * same group.  Dynamic VM areas will be allocated according to these
1175 * groupings.  If @ai->nr_groups is zero, a single group containing
1176 * all units is assumed.
1177 *
1178 * The caller should have mapped the first chunk at @base_addr and
1179 * copied static data to each unit.
1180 *
1181 * If the first chunk ends up with both reserved and dynamic areas, it
1182 * is served by two chunks - one to serve the core static and reserved
1183 * areas and the other for the dynamic area.  They share the same vm
1184 * and page map but uses different area allocation map to stay away
1185 * from each other.  The latter chunk is circulated in the chunk slots
1186 * and available for dynamic allocation like any other chunks.
1187 *
1188 * RETURNS:
1189 * 0 on success, -errno on failure.
1190 */
1191int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1192				  void *base_addr)
1193{
1194	static char cpus_buf[4096] __initdata;
1195	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1196	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1197	size_t dyn_size = ai->dyn_size;
1198	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1199	struct pcpu_chunk *schunk, *dchunk = NULL;
1200	unsigned long *group_offsets;
1201	size_t *group_sizes;
1202	unsigned long *unit_off;
1203	unsigned int cpu;
1204	int *unit_map;
1205	int group, unit, i;
1206
1207	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1208
1209#define PCPU_SETUP_BUG_ON(cond)	do {					\
1210	if (unlikely(cond)) {						\
1211		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1212		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
1213		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1214		BUG();							\
1215	}								\
1216} while (0)
1217
1218	/* sanity checks */
1219	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1220#ifdef CONFIG_SMP
1221	PCPU_SETUP_BUG_ON(!ai->static_size);
1222#endif
1223	PCPU_SETUP_BUG_ON(!base_addr);
1224	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1225	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1226	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1227	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1228	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1229
1230	/* process group information and build config tables accordingly */
1231	group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1232	group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1233	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1234	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1235
1236	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1237		unit_map[cpu] = UINT_MAX;
1238	pcpu_first_unit_cpu = NR_CPUS;
1239
1240	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1241		const struct pcpu_group_info *gi = &ai->groups[group];
1242
1243		group_offsets[group] = gi->base_offset;
1244		group_sizes[group] = gi->nr_units * ai->unit_size;
1245
1246		for (i = 0; i < gi->nr_units; i++) {
1247			cpu = gi->cpu_map[i];
1248			if (cpu == NR_CPUS)
1249				continue;
1250
1251			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1252			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1253			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1254
1255			unit_map[cpu] = unit + i;
1256			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1257
1258			if (pcpu_first_unit_cpu == NR_CPUS)
1259				pcpu_first_unit_cpu = cpu;
1260		}
1261	}
1262	pcpu_last_unit_cpu = cpu;
1263	pcpu_nr_units = unit;
1264
1265	for_each_possible_cpu(cpu)
1266		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1267
1268	/* we're done parsing the input, undefine BUG macro and dump config */
1269#undef PCPU_SETUP_BUG_ON
1270	pcpu_dump_alloc_info(KERN_INFO, ai);
1271
1272	pcpu_nr_groups = ai->nr_groups;
1273	pcpu_group_offsets = group_offsets;
1274	pcpu_group_sizes = group_sizes;
1275	pcpu_unit_map = unit_map;
1276	pcpu_unit_offsets = unit_off;
1277
1278	/* determine basic parameters */
1279	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1280	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1281	pcpu_atom_size = ai->atom_size;
1282	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1283		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1284
1285	/*
1286	 * Allocate chunk slots.  The additional last slot is for
1287	 * empty chunks.
1288	 */
1289	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1290	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1291	for (i = 0; i < pcpu_nr_slots; i++)
1292		INIT_LIST_HEAD(&pcpu_slot[i]);
1293
1294	/*
1295	 * Initialize static chunk.  If reserved_size is zero, the
1296	 * static chunk covers static area + dynamic allocation area
1297	 * in the first chunk.  If reserved_size is not zero, it
1298	 * covers static area + reserved area (mostly used for module
1299	 * static percpu allocation).
1300	 */
1301	schunk = alloc_bootmem(pcpu_chunk_struct_size);
1302	INIT_LIST_HEAD(&schunk->list);
1303	schunk->base_addr = base_addr;
1304	schunk->map = smap;
1305	schunk->map_alloc = ARRAY_SIZE(smap);
1306	schunk->immutable = true;
1307	bitmap_fill(schunk->populated, pcpu_unit_pages);
1308
1309	if (ai->reserved_size) {
1310		schunk->free_size = ai->reserved_size;
1311		pcpu_reserved_chunk = schunk;
1312		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1313	} else {
1314		schunk->free_size = dyn_size;
1315		dyn_size = 0;			/* dynamic area covered */
1316	}
1317	schunk->contig_hint = schunk->free_size;
1318
1319	schunk->map[schunk->map_used++] = -ai->static_size;
1320	if (schunk->free_size)
1321		schunk->map[schunk->map_used++] = schunk->free_size;
1322
1323	/* init dynamic chunk if necessary */
1324	if (dyn_size) {
1325		dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1326		INIT_LIST_HEAD(&dchunk->list);
1327		dchunk->base_addr = base_addr;
1328		dchunk->map = dmap;
1329		dchunk->map_alloc = ARRAY_SIZE(dmap);
1330		dchunk->immutable = true;
1331		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1332
1333		dchunk->contig_hint = dchunk->free_size = dyn_size;
1334		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1335		dchunk->map[dchunk->map_used++] = dchunk->free_size;
1336	}
1337
1338	/* link the first chunk in */
1339	pcpu_first_chunk = dchunk ?: schunk;
1340	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1341
1342	/* we're done */
1343	pcpu_base_addr = base_addr;
1344	return 0;
1345}
1346
1347#ifdef CONFIG_SMP
1348
1349const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1350	[PCPU_FC_AUTO]	= "auto",
1351	[PCPU_FC_EMBED]	= "embed",
1352	[PCPU_FC_PAGE]	= "page",
1353};
1354
1355enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1356
1357static int __init percpu_alloc_setup(char *str)
1358{
1359	if (0)
1360		/* nada */;
1361#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1362	else if (!strcmp(str, "embed"))
1363		pcpu_chosen_fc = PCPU_FC_EMBED;
1364#endif
1365#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1366	else if (!strcmp(str, "page"))
1367		pcpu_chosen_fc = PCPU_FC_PAGE;
1368#endif
1369	else
1370		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1371
1372	return 0;
1373}
1374early_param("percpu_alloc", percpu_alloc_setup);
1375
1376/*
1377 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1378 * Build it if needed by the arch config or the generic setup is going
1379 * to be used.
1380 */
1381#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1382	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1383#define BUILD_EMBED_FIRST_CHUNK
1384#endif
1385
1386/* build pcpu_page_first_chunk() iff needed by the arch config */
1387#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1388#define BUILD_PAGE_FIRST_CHUNK
1389#endif
1390
1391/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1392#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1393/**
1394 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1395 * @reserved_size: the size of reserved percpu area in bytes
1396 * @dyn_size: minimum free size for dynamic allocation in bytes
1397 * @atom_size: allocation atom size
1398 * @cpu_distance_fn: callback to determine distance between cpus, optional
1399 *
1400 * This function determines grouping of units, their mappings to cpus
1401 * and other parameters considering needed percpu size, allocation
1402 * atom size and distances between CPUs.
1403 *
1404 * Groups are always mutliples of atom size and CPUs which are of
1405 * LOCAL_DISTANCE both ways are grouped together and share space for
1406 * units in the same group.  The returned configuration is guaranteed
1407 * to have CPUs on different nodes on different groups and >=75% usage
1408 * of allocated virtual address space.
1409 *
1410 * RETURNS:
1411 * On success, pointer to the new allocation_info is returned.  On
1412 * failure, ERR_PTR value is returned.
1413 */
1414static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1415				size_t reserved_size, size_t dyn_size,
1416				size_t atom_size,
1417				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1418{
1419	static int group_map[NR_CPUS] __initdata;
1420	static int group_cnt[NR_CPUS] __initdata;
1421	const size_t static_size = __per_cpu_end - __per_cpu_start;
1422	int nr_groups = 1, nr_units = 0;
1423	size_t size_sum, min_unit_size, alloc_size;
1424	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1425	int last_allocs, group, unit;
1426	unsigned int cpu, tcpu;
1427	struct pcpu_alloc_info *ai;
1428	unsigned int *cpu_map;
1429
1430	/* this function may be called multiple times */
1431	memset(group_map, 0, sizeof(group_map));
1432	memset(group_cnt, 0, sizeof(group_cnt));
1433
1434	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1435	size_sum = PFN_ALIGN(static_size + reserved_size +
1436			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1437	dyn_size = size_sum - static_size - reserved_size;
1438
1439	/*
1440	 * Determine min_unit_size, alloc_size and max_upa such that
1441	 * alloc_size is multiple of atom_size and is the smallest
1442	 * which can accomodate 4k aligned segments which are equal to
1443	 * or larger than min_unit_size.
1444	 */
1445	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1446
1447	alloc_size = roundup(min_unit_size, atom_size);
1448	upa = alloc_size / min_unit_size;
1449	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1450		upa--;
1451	max_upa = upa;
1452
1453	/* group cpus according to their proximity */
1454	for_each_possible_cpu(cpu) {
1455		group = 0;
1456	next_group:
1457		for_each_possible_cpu(tcpu) {
1458			if (cpu == tcpu)
1459				break;
1460			if (group_map[tcpu] == group && cpu_distance_fn &&
1461			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1462			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1463				group++;
1464				nr_groups = max(nr_groups, group + 1);
1465				goto next_group;
1466			}
1467		}
1468		group_map[cpu] = group;
1469		group_cnt[group]++;
1470	}
1471
1472	/*
1473	 * Expand unit size until address space usage goes over 75%
1474	 * and then as much as possible without using more address
1475	 * space.
1476	 */
1477	last_allocs = INT_MAX;
1478	for (upa = max_upa; upa; upa--) {
1479		int allocs = 0, wasted = 0;
1480
1481		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1482			continue;
1483
1484		for (group = 0; group < nr_groups; group++) {
1485			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1486			allocs += this_allocs;
1487			wasted += this_allocs * upa - group_cnt[group];
1488		}
1489
1490		/*
1491		 * Don't accept if wastage is over 1/3.  The
1492		 * greater-than comparison ensures upa==1 always
1493		 * passes the following check.
1494		 */
1495		if (wasted > num_possible_cpus() / 3)
1496			continue;
1497
1498		/* and then don't consume more memory */
1499		if (allocs > last_allocs)
1500			break;
1501		last_allocs = allocs;
1502		best_upa = upa;
1503	}
1504	upa = best_upa;
1505
1506	/* allocate and fill alloc_info */
1507	for (group = 0; group < nr_groups; group++)
1508		nr_units += roundup(group_cnt[group], upa);
1509
1510	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1511	if (!ai)
1512		return ERR_PTR(-ENOMEM);
1513	cpu_map = ai->groups[0].cpu_map;
1514
1515	for (group = 0; group < nr_groups; group++) {
1516		ai->groups[group].cpu_map = cpu_map;
1517		cpu_map += roundup(group_cnt[group], upa);
1518	}
1519
1520	ai->static_size = static_size;
1521	ai->reserved_size = reserved_size;
1522	ai->dyn_size = dyn_size;
1523	ai->unit_size = alloc_size / upa;
1524	ai->atom_size = atom_size;
1525	ai->alloc_size = alloc_size;
1526
1527	for (group = 0, unit = 0; group_cnt[group]; group++) {
1528		struct pcpu_group_info *gi = &ai->groups[group];
1529
1530		/*
1531		 * Initialize base_offset as if all groups are located
1532		 * back-to-back.  The caller should update this to
1533		 * reflect actual allocation.
1534		 */
1535		gi->base_offset = unit * ai->unit_size;
1536
1537		for_each_possible_cpu(cpu)
1538			if (group_map[cpu] == group)
1539				gi->cpu_map[gi->nr_units++] = cpu;
1540		gi->nr_units = roundup(gi->nr_units, upa);
1541		unit += gi->nr_units;
1542	}
1543	BUG_ON(unit != nr_units);
1544
1545	return ai;
1546}
1547#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1548
1549#if defined(BUILD_EMBED_FIRST_CHUNK)
1550/**
1551 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1552 * @reserved_size: the size of reserved percpu area in bytes
1553 * @dyn_size: minimum free size for dynamic allocation in bytes
1554 * @atom_size: allocation atom size
1555 * @cpu_distance_fn: callback to determine distance between cpus, optional
1556 * @alloc_fn: function to allocate percpu page
1557 * @free_fn: funtion to free percpu page
1558 *
1559 * This is a helper to ease setting up embedded first percpu chunk and
1560 * can be called where pcpu_setup_first_chunk() is expected.
1561 *
1562 * If this function is used to setup the first chunk, it is allocated
1563 * by calling @alloc_fn and used as-is without being mapped into
1564 * vmalloc area.  Allocations are always whole multiples of @atom_size
1565 * aligned to @atom_size.
1566 *
1567 * This enables the first chunk to piggy back on the linear physical
1568 * mapping which often uses larger page size.  Please note that this
1569 * can result in very sparse cpu->unit mapping on NUMA machines thus
1570 * requiring large vmalloc address space.  Don't use this allocator if
1571 * vmalloc space is not orders of magnitude larger than distances
1572 * between node memory addresses (ie. 32bit NUMA machines).
1573 *
1574 * @dyn_size specifies the minimum dynamic area size.
1575 *
1576 * If the needed size is smaller than the minimum or specified unit
1577 * size, the leftover is returned using @free_fn.
1578 *
1579 * RETURNS:
1580 * 0 on success, -errno on failure.
1581 */
1582int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1583				  size_t atom_size,
1584				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1585				  pcpu_fc_alloc_fn_t alloc_fn,
1586				  pcpu_fc_free_fn_t free_fn)
1587{
1588	void *base = (void *)ULONG_MAX;
1589	void **areas = NULL;
1590	struct pcpu_alloc_info *ai;
1591	size_t size_sum, areas_size, max_distance;
1592	int group, i, rc;
1593
1594	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1595				   cpu_distance_fn);
1596	if (IS_ERR(ai))
1597		return PTR_ERR(ai);
1598
1599	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1600	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1601
1602	areas = alloc_bootmem_nopanic(areas_size);
1603	if (!areas) {
1604		rc = -ENOMEM;
1605		goto out_free;
1606	}
1607
1608	/* allocate, copy and determine base address */
1609	for (group = 0; group < ai->nr_groups; group++) {
1610		struct pcpu_group_info *gi = &ai->groups[group];
1611		unsigned int cpu = NR_CPUS;
1612		void *ptr;
1613
1614		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1615			cpu = gi->cpu_map[i];
1616		BUG_ON(cpu == NR_CPUS);
1617
1618		/* allocate space for the whole group */
1619		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1620		if (!ptr) {
1621			rc = -ENOMEM;
1622			goto out_free_areas;
1623		}
1624		areas[group] = ptr;
1625
1626		base = min(ptr, base);
1627
1628		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1629			if (gi->cpu_map[i] == NR_CPUS) {
1630				/* unused unit, free whole */
1631				free_fn(ptr, ai->unit_size);
1632				continue;
1633			}
1634			/* copy and return the unused part */
1635			memcpy(ptr, __per_cpu_load, ai->static_size);
1636			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1637		}
1638	}
1639
1640	/* base address is now known, determine group base offsets */
1641	max_distance = 0;
1642	for (group = 0; group < ai->nr_groups; group++) {
1643		ai->groups[group].base_offset = areas[group] - base;
1644		max_distance = max_t(size_t, max_distance,
1645				     ai->groups[group].base_offset);
1646	}
1647	max_distance += ai->unit_size;
1648
1649	/* warn if maximum distance is further than 75% of vmalloc space */
1650	if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1651		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1652			   "space 0x%lx\n",
1653			   max_distance, VMALLOC_END - VMALLOC_START);
1654#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1655		/* and fail if we have fallback */
1656		rc = -EINVAL;
1657		goto out_free;
1658#endif
1659	}
1660
1661	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1662		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1663		ai->dyn_size, ai->unit_size);
1664
1665	rc = pcpu_setup_first_chunk(ai, base);
1666	goto out_free;
1667
1668out_free_areas:
1669	for (group = 0; group < ai->nr_groups; group++)
1670		free_fn(areas[group],
1671			ai->groups[group].nr_units * ai->unit_size);
1672out_free:
1673	pcpu_free_alloc_info(ai);
1674	if (areas)
1675		free_bootmem(__pa(areas), areas_size);
1676	return rc;
1677}
1678#endif /* BUILD_EMBED_FIRST_CHUNK */
1679
1680#ifdef BUILD_PAGE_FIRST_CHUNK
1681/**
1682 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1683 * @reserved_size: the size of reserved percpu area in bytes
1684 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1685 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1686 * @populate_pte_fn: function to populate pte
1687 *
1688 * This is a helper to ease setting up page-remapped first percpu
1689 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1690 *
1691 * This is the basic allocator.  Static percpu area is allocated
1692 * page-by-page into vmalloc area.
1693 *
1694 * RETURNS:
1695 * 0 on success, -errno on failure.
1696 */
1697int __init pcpu_page_first_chunk(size_t reserved_size,
1698				 pcpu_fc_alloc_fn_t alloc_fn,
1699				 pcpu_fc_free_fn_t free_fn,
1700				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1701{
1702	static struct vm_struct vm;
1703	struct pcpu_alloc_info *ai;
1704	char psize_str[16];
1705	int unit_pages;
1706	size_t pages_size;
1707	struct page **pages;
1708	int unit, i, j, rc;
1709
1710	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1711
1712	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1713	if (IS_ERR(ai))
1714		return PTR_ERR(ai);
1715	BUG_ON(ai->nr_groups != 1);
1716	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1717
1718	unit_pages = ai->unit_size >> PAGE_SHIFT;
1719
1720	/* unaligned allocations can't be freed, round up to page size */
1721	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1722			       sizeof(pages[0]));
1723	pages = alloc_bootmem(pages_size);
1724
1725	/* allocate pages */
1726	j = 0;
1727	for (unit = 0; unit < num_possible_cpus(); unit++)
1728		for (i = 0; i < unit_pages; i++) {
1729			unsigned int cpu = ai->groups[0].cpu_map[unit];
1730			void *ptr;
1731
1732			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1733			if (!ptr) {
1734				pr_warning("PERCPU: failed to allocate %s page "
1735					   "for cpu%u\n", psize_str, cpu);
1736				goto enomem;
1737			}
1738			pages[j++] = virt_to_page(ptr);
1739		}
1740
1741	/* allocate vm area, map the pages and copy static data */
1742	vm.flags = VM_ALLOC;
1743	vm.size = num_possible_cpus() * ai->unit_size;
1744	vm_area_register_early(&vm, PAGE_SIZE);
1745
1746	for (unit = 0; unit < num_possible_cpus(); unit++) {
1747		unsigned long unit_addr =
1748			(unsigned long)vm.addr + unit * ai->unit_size;
1749
1750		for (i = 0; i < unit_pages; i++)
1751			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1752
1753		/* pte already populated, the following shouldn't fail */
1754		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1755				      unit_pages);
1756		if (rc < 0)
1757			panic("failed to map percpu area, err=%d\n", rc);
1758
1759		/*
1760		 * FIXME: Archs with virtual cache should flush local
1761		 * cache for the linear mapping here - something
1762		 * equivalent to flush_cache_vmap() on the local cpu.
1763		 * flush_cache_vmap() can't be used as most supporting
1764		 * data structures are not set up yet.
1765		 */
1766
1767		/* copy static data */
1768		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1769	}
1770
1771	/* we're ready, commit */
1772	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1773		unit_pages, psize_str, vm.addr, ai->static_size,
1774		ai->reserved_size, ai->dyn_size);
1775
1776	rc = pcpu_setup_first_chunk(ai, vm.addr);
1777	goto out_free_ar;
1778
1779enomem:
1780	while (--j >= 0)
1781		free_fn(page_address(pages[j]), PAGE_SIZE);
1782	rc = -ENOMEM;
1783out_free_ar:
1784	free_bootmem(__pa(pages), pages_size);
1785	pcpu_free_alloc_info(ai);
1786	return rc;
1787}
1788#endif /* BUILD_PAGE_FIRST_CHUNK */
1789
1790#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
1791/*
1792 * Generic SMP percpu area setup.
1793 *
1794 * The embedding helper is used because its behavior closely resembles
1795 * the original non-dynamic generic percpu area setup.  This is
1796 * important because many archs have addressing restrictions and might
1797 * fail if the percpu area is located far away from the previous
1798 * location.  As an added bonus, in non-NUMA cases, embedding is
1799 * generally a good idea TLB-wise because percpu area can piggy back
1800 * on the physical linear memory mapping which uses large page
1801 * mappings on applicable archs.
1802 */
1803unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1804EXPORT_SYMBOL(__per_cpu_offset);
1805
1806static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1807				       size_t align)
1808{
1809	return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1810}
1811
1812static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1813{
1814	free_bootmem(__pa(ptr), size);
1815}
1816
1817void __init setup_per_cpu_areas(void)
1818{
1819	unsigned long delta;
1820	unsigned int cpu;
1821	int rc;
1822
1823	/*
1824	 * Always reserve area for module percpu variables.  That's
1825	 * what the legacy allocator did.
1826	 */
1827	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1828				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1829				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1830	if (rc < 0)
1831		panic("Failed to initialize percpu areas.");
1832
1833	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1834	for_each_possible_cpu(cpu)
1835		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1836}
1837#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1838
1839#else	/* CONFIG_SMP */
1840
1841/*
1842 * UP percpu area setup.
1843 *
1844 * UP always uses km-based percpu allocator with identity mapping.
1845 * Static percpu variables are indistinguishable from the usual static
1846 * variables and don't require any special preparation.
1847 */
1848void __init setup_per_cpu_areas(void)
1849{
1850	const size_t unit_size =
1851		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1852					 PERCPU_DYNAMIC_RESERVE));
1853	struct pcpu_alloc_info *ai;
1854	void *fc;
1855
1856	ai = pcpu_alloc_alloc_info(1, 1);
1857	fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1858	if (!ai || !fc)
1859		panic("Failed to allocate memory for percpu areas.");
1860
1861	ai->dyn_size = unit_size;
1862	ai->unit_size = unit_size;
1863	ai->atom_size = unit_size;
1864	ai->alloc_size = unit_size;
1865	ai->groups[0].nr_units = 1;
1866	ai->groups[0].cpu_map[0] = 0;
1867
1868	if (pcpu_setup_first_chunk(ai, fc) < 0)
1869		panic("Failed to initialize percpu areas.");
1870}
1871
1872#endif	/* CONFIG_SMP */
1873
1874/*
1875 * First and reserved chunks are initialized with temporary allocation
1876 * map in initdata so that they can be used before slab is online.
1877 * This function is called after slab is brought up and replaces those
1878 * with properly allocated maps.
1879 */
1880void __init percpu_init_late(void)
1881{
1882	struct pcpu_chunk *target_chunks[] =
1883		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1884	struct pcpu_chunk *chunk;
1885	unsigned long flags;
1886	int i;
1887
1888	for (i = 0; (chunk = target_chunks[i]); i++) {
1889		int *map;
1890		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1891
1892		BUILD_BUG_ON(size > PAGE_SIZE);
1893
1894		map = pcpu_mem_alloc(size);
1895		BUG_ON(!map);
1896
1897		spin_lock_irqsave(&pcpu_lock, flags);
1898		memcpy(map, chunk->map, size);
1899		chunk->map = map;
1900		spin_unlock_irqrestore(&pcpu_lock, flags);
1901	}
1902}
1903