percpu.c revision 9f6455325618821dcf6775d7972881fde32e77c5
1/*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009		SUSE Linux Products GmbH
5 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
17 *
18 *  c0                           c1                         c2
19 *  -------------------          -------------------        ------------
20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
21 *  -------------------  ......  -------------------  ....  ------------
22 *
23 * Allocation is done in offset-size areas of single unit space.  Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
29 *
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes.  The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be eqaul to or larger than the maximum contiguous
35 * area in the chunk.  This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map.  A positive value in the map represents a free
40 * region and negative allocated.  Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
45 *
46 * To use this allocator, arch code should do the followings.
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 *   regular address to percpu pointer and back if they need to be
50 *   different from the default
51 *
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 *   setup the first chunk containing the kernel static percpu area
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/err.h>
59#include <linux/list.h>
60#include <linux/log2.h>
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
66#include <linux/slab.h>
67#include <linux/spinlock.h>
68#include <linux/vmalloc.h>
69#include <linux/workqueue.h>
70
71#include <asm/cacheflush.h>
72#include <asm/sections.h>
73#include <asm/tlbflush.h>
74#include <asm/io.h>
75
76#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
77#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
78
79/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
80#ifndef __addr_to_pcpu_ptr
81#define __addr_to_pcpu_ptr(addr)					\
82	(void __percpu *)((unsigned long)(addr) -			\
83			  (unsigned long)pcpu_base_addr	+		\
84			  (unsigned long)__per_cpu_start)
85#endif
86#ifndef __pcpu_ptr_to_addr
87#define __pcpu_ptr_to_addr(ptr)						\
88	(void __force *)((unsigned long)(ptr) +				\
89			 (unsigned long)pcpu_base_addr -		\
90			 (unsigned long)__per_cpu_start)
91#endif
92
93struct pcpu_chunk {
94	struct list_head	list;		/* linked to pcpu_slot lists */
95	int			free_size;	/* free bytes in the chunk */
96	int			contig_hint;	/* max contiguous size hint */
97	void			*base_addr;	/* base address of this chunk */
98	int			map_used;	/* # of map entries used */
99	int			map_alloc;	/* # of map entries allocated */
100	int			*map;		/* allocation map */
101	void			*data;		/* chunk data */
102	bool			immutable;	/* no [de]population allowed */
103	unsigned long		populated[];	/* populated bitmap */
104};
105
106static int pcpu_unit_pages __read_mostly;
107static int pcpu_unit_size __read_mostly;
108static int pcpu_nr_units __read_mostly;
109static int pcpu_atom_size __read_mostly;
110static int pcpu_nr_slots __read_mostly;
111static size_t pcpu_chunk_struct_size __read_mostly;
112
113/* cpus with the lowest and highest unit numbers */
114static unsigned int pcpu_first_unit_cpu __read_mostly;
115static unsigned int pcpu_last_unit_cpu __read_mostly;
116
117/* the address of the first chunk which starts with the kernel static area */
118void *pcpu_base_addr __read_mostly;
119EXPORT_SYMBOL_GPL(pcpu_base_addr);
120
121static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
122const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
123
124/* group information, used for vm allocation */
125static int pcpu_nr_groups __read_mostly;
126static const unsigned long *pcpu_group_offsets __read_mostly;
127static const size_t *pcpu_group_sizes __read_mostly;
128
129/*
130 * The first chunk which always exists.  Note that unlike other
131 * chunks, this one can be allocated and mapped in several different
132 * ways and thus often doesn't live in the vmalloc area.
133 */
134static struct pcpu_chunk *pcpu_first_chunk;
135
136/*
137 * Optional reserved chunk.  This chunk reserves part of the first
138 * chunk and serves it for reserved allocations.  The amount of
139 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
140 * area doesn't exist, the following variables contain NULL and 0
141 * respectively.
142 */
143static struct pcpu_chunk *pcpu_reserved_chunk;
144static int pcpu_reserved_chunk_limit;
145
146/*
147 * Synchronization rules.
148 *
149 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
150 * protects allocation/reclaim paths, chunks, populated bitmap and
151 * vmalloc mapping.  The latter is a spinlock and protects the index
152 * data structures - chunk slots, chunks and area maps in chunks.
153 *
154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
155 * pcpu_lock is grabbed and released as necessary.  All actual memory
156 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
157 * general, percpu memory can't be allocated with irq off but
158 * irqsave/restore are still used in alloc path so that it can be used
159 * from early init path - sched_init() specifically.
160 *
161 * Free path accesses and alters only the index data structures, so it
162 * can be safely called from atomic context.  When memory needs to be
163 * returned to the system, free path schedules reclaim_work which
164 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
165 * reclaimed, release both locks and frees the chunks.  Note that it's
166 * necessary to grab both locks to remove a chunk from circulation as
167 * allocation path might be referencing the chunk with only
168 * pcpu_alloc_mutex locked.
169 */
170static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
171static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
172
173static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
174
175/* reclaim work to release fully free chunks, scheduled from free path */
176static void pcpu_reclaim(struct work_struct *work);
177static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
178
179static bool pcpu_addr_in_first_chunk(void *addr)
180{
181	void *first_start = pcpu_first_chunk->base_addr;
182
183	return addr >= first_start && addr < first_start + pcpu_unit_size;
184}
185
186static bool pcpu_addr_in_reserved_chunk(void *addr)
187{
188	void *first_start = pcpu_first_chunk->base_addr;
189
190	return addr >= first_start &&
191		addr < first_start + pcpu_reserved_chunk_limit;
192}
193
194static int __pcpu_size_to_slot(int size)
195{
196	int highbit = fls(size);	/* size is in bytes */
197	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
198}
199
200static int pcpu_size_to_slot(int size)
201{
202	if (size == pcpu_unit_size)
203		return pcpu_nr_slots - 1;
204	return __pcpu_size_to_slot(size);
205}
206
207static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
208{
209	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
210		return 0;
211
212	return pcpu_size_to_slot(chunk->free_size);
213}
214
215/* set the pointer to a chunk in a page struct */
216static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
217{
218	page->index = (unsigned long)pcpu;
219}
220
221/* obtain pointer to a chunk from a page struct */
222static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
223{
224	return (struct pcpu_chunk *)page->index;
225}
226
227static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
228{
229	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
230}
231
232static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk,
233						unsigned int cpu, int page_idx)
234{
235	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
236		(page_idx << PAGE_SHIFT);
237}
238
239static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
240					   int *rs, int *re, int end)
241{
242	*rs = find_next_zero_bit(chunk->populated, end, *rs);
243	*re = find_next_bit(chunk->populated, end, *rs + 1);
244}
245
246static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
247					 int *rs, int *re, int end)
248{
249	*rs = find_next_bit(chunk->populated, end, *rs);
250	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
251}
252
253/*
254 * (Un)populated page region iterators.  Iterate over (un)populated
255 * page regions betwen @start and @end in @chunk.  @rs and @re should
256 * be integer variables and will be set to start and end page index of
257 * the current region.
258 */
259#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
260	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
261	     (rs) < (re);						    \
262	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
263
264#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
265	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
266	     (rs) < (re);						    \
267	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
268
269/**
270 * pcpu_mem_alloc - allocate memory
271 * @size: bytes to allocate
272 *
273 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
274 * kzalloc() is used; otherwise, vmalloc() is used.  The returned
275 * memory is always zeroed.
276 *
277 * CONTEXT:
278 * Does GFP_KERNEL allocation.
279 *
280 * RETURNS:
281 * Pointer to the allocated area on success, NULL on failure.
282 */
283static void *pcpu_mem_alloc(size_t size)
284{
285	if (size <= PAGE_SIZE)
286		return kzalloc(size, GFP_KERNEL);
287	else {
288		void *ptr = vmalloc(size);
289		if (ptr)
290			memset(ptr, 0, size);
291		return ptr;
292	}
293}
294
295/**
296 * pcpu_mem_free - free memory
297 * @ptr: memory to free
298 * @size: size of the area
299 *
300 * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
301 */
302static void pcpu_mem_free(void *ptr, size_t size)
303{
304	if (size <= PAGE_SIZE)
305		kfree(ptr);
306	else
307		vfree(ptr);
308}
309
310/**
311 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
312 * @chunk: chunk of interest
313 * @oslot: the previous slot it was on
314 *
315 * This function is called after an allocation or free changed @chunk.
316 * New slot according to the changed state is determined and @chunk is
317 * moved to the slot.  Note that the reserved chunk is never put on
318 * chunk slots.
319 *
320 * CONTEXT:
321 * pcpu_lock.
322 */
323static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
324{
325	int nslot = pcpu_chunk_slot(chunk);
326
327	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
328		if (oslot < nslot)
329			list_move(&chunk->list, &pcpu_slot[nslot]);
330		else
331			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
332	}
333}
334
335/**
336 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
337 * @chunk: chunk of interest
338 *
339 * Determine whether area map of @chunk needs to be extended to
340 * accomodate a new allocation.
341 *
342 * CONTEXT:
343 * pcpu_lock.
344 *
345 * RETURNS:
346 * New target map allocation length if extension is necessary, 0
347 * otherwise.
348 */
349static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
350{
351	int new_alloc;
352
353	if (chunk->map_alloc >= chunk->map_used + 2)
354		return 0;
355
356	new_alloc = PCPU_DFL_MAP_ALLOC;
357	while (new_alloc < chunk->map_used + 2)
358		new_alloc *= 2;
359
360	return new_alloc;
361}
362
363/**
364 * pcpu_extend_area_map - extend area map of a chunk
365 * @chunk: chunk of interest
366 * @new_alloc: new target allocation length of the area map
367 *
368 * Extend area map of @chunk to have @new_alloc entries.
369 *
370 * CONTEXT:
371 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
372 *
373 * RETURNS:
374 * 0 on success, -errno on failure.
375 */
376static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
377{
378	int *old = NULL, *new = NULL;
379	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
380	unsigned long flags;
381
382	new = pcpu_mem_alloc(new_size);
383	if (!new)
384		return -ENOMEM;
385
386	/* acquire pcpu_lock and switch to new area map */
387	spin_lock_irqsave(&pcpu_lock, flags);
388
389	if (new_alloc <= chunk->map_alloc)
390		goto out_unlock;
391
392	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
393	memcpy(new, chunk->map, old_size);
394
395	/*
396	 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
397	 * one of the first chunks and still using static map.
398	 */
399	if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
400		old = chunk->map;
401
402	chunk->map_alloc = new_alloc;
403	chunk->map = new;
404	new = NULL;
405
406out_unlock:
407	spin_unlock_irqrestore(&pcpu_lock, flags);
408
409	/*
410	 * pcpu_mem_free() might end up calling vfree() which uses
411	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
412	 */
413	pcpu_mem_free(old, old_size);
414	pcpu_mem_free(new, new_size);
415
416	return 0;
417}
418
419/**
420 * pcpu_split_block - split a map block
421 * @chunk: chunk of interest
422 * @i: index of map block to split
423 * @head: head size in bytes (can be 0)
424 * @tail: tail size in bytes (can be 0)
425 *
426 * Split the @i'th map block into two or three blocks.  If @head is
427 * non-zero, @head bytes block is inserted before block @i moving it
428 * to @i+1 and reducing its size by @head bytes.
429 *
430 * If @tail is non-zero, the target block, which can be @i or @i+1
431 * depending on @head, is reduced by @tail bytes and @tail byte block
432 * is inserted after the target block.
433 *
434 * @chunk->map must have enough free slots to accomodate the split.
435 *
436 * CONTEXT:
437 * pcpu_lock.
438 */
439static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
440			     int head, int tail)
441{
442	int nr_extra = !!head + !!tail;
443
444	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
445
446	/* insert new subblocks */
447	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
448		sizeof(chunk->map[0]) * (chunk->map_used - i));
449	chunk->map_used += nr_extra;
450
451	if (head) {
452		chunk->map[i + 1] = chunk->map[i] - head;
453		chunk->map[i++] = head;
454	}
455	if (tail) {
456		chunk->map[i++] -= tail;
457		chunk->map[i] = tail;
458	}
459}
460
461/**
462 * pcpu_alloc_area - allocate area from a pcpu_chunk
463 * @chunk: chunk of interest
464 * @size: wanted size in bytes
465 * @align: wanted align
466 *
467 * Try to allocate @size bytes area aligned at @align from @chunk.
468 * Note that this function only allocates the offset.  It doesn't
469 * populate or map the area.
470 *
471 * @chunk->map must have at least two free slots.
472 *
473 * CONTEXT:
474 * pcpu_lock.
475 *
476 * RETURNS:
477 * Allocated offset in @chunk on success, -1 if no matching area is
478 * found.
479 */
480static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
481{
482	int oslot = pcpu_chunk_slot(chunk);
483	int max_contig = 0;
484	int i, off;
485
486	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
487		bool is_last = i + 1 == chunk->map_used;
488		int head, tail;
489
490		/* extra for alignment requirement */
491		head = ALIGN(off, align) - off;
492		BUG_ON(i == 0 && head != 0);
493
494		if (chunk->map[i] < 0)
495			continue;
496		if (chunk->map[i] < head + size) {
497			max_contig = max(chunk->map[i], max_contig);
498			continue;
499		}
500
501		/*
502		 * If head is small or the previous block is free,
503		 * merge'em.  Note that 'small' is defined as smaller
504		 * than sizeof(int), which is very small but isn't too
505		 * uncommon for percpu allocations.
506		 */
507		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
508			if (chunk->map[i - 1] > 0)
509				chunk->map[i - 1] += head;
510			else {
511				chunk->map[i - 1] -= head;
512				chunk->free_size -= head;
513			}
514			chunk->map[i] -= head;
515			off += head;
516			head = 0;
517		}
518
519		/* if tail is small, just keep it around */
520		tail = chunk->map[i] - head - size;
521		if (tail < sizeof(int))
522			tail = 0;
523
524		/* split if warranted */
525		if (head || tail) {
526			pcpu_split_block(chunk, i, head, tail);
527			if (head) {
528				i++;
529				off += head;
530				max_contig = max(chunk->map[i - 1], max_contig);
531			}
532			if (tail)
533				max_contig = max(chunk->map[i + 1], max_contig);
534		}
535
536		/* update hint and mark allocated */
537		if (is_last)
538			chunk->contig_hint = max_contig; /* fully scanned */
539		else
540			chunk->contig_hint = max(chunk->contig_hint,
541						 max_contig);
542
543		chunk->free_size -= chunk->map[i];
544		chunk->map[i] = -chunk->map[i];
545
546		pcpu_chunk_relocate(chunk, oslot);
547		return off;
548	}
549
550	chunk->contig_hint = max_contig;	/* fully scanned */
551	pcpu_chunk_relocate(chunk, oslot);
552
553	/* tell the upper layer that this chunk has no matching area */
554	return -1;
555}
556
557/**
558 * pcpu_free_area - free area to a pcpu_chunk
559 * @chunk: chunk of interest
560 * @freeme: offset of area to free
561 *
562 * Free area starting from @freeme to @chunk.  Note that this function
563 * only modifies the allocation map.  It doesn't depopulate or unmap
564 * the area.
565 *
566 * CONTEXT:
567 * pcpu_lock.
568 */
569static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
570{
571	int oslot = pcpu_chunk_slot(chunk);
572	int i, off;
573
574	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
575		if (off == freeme)
576			break;
577	BUG_ON(off != freeme);
578	BUG_ON(chunk->map[i] > 0);
579
580	chunk->map[i] = -chunk->map[i];
581	chunk->free_size += chunk->map[i];
582
583	/* merge with previous? */
584	if (i > 0 && chunk->map[i - 1] >= 0) {
585		chunk->map[i - 1] += chunk->map[i];
586		chunk->map_used--;
587		memmove(&chunk->map[i], &chunk->map[i + 1],
588			(chunk->map_used - i) * sizeof(chunk->map[0]));
589		i--;
590	}
591	/* merge with next? */
592	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
593		chunk->map[i] += chunk->map[i + 1];
594		chunk->map_used--;
595		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
596			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
597	}
598
599	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
600	pcpu_chunk_relocate(chunk, oslot);
601}
602
603static struct pcpu_chunk *pcpu_alloc_chunk(void)
604{
605	struct pcpu_chunk *chunk;
606
607	chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
608	if (!chunk)
609		return NULL;
610
611	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
612	if (!chunk->map) {
613		kfree(chunk);
614		return NULL;
615	}
616
617	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
618	chunk->map[chunk->map_used++] = pcpu_unit_size;
619
620	INIT_LIST_HEAD(&chunk->list);
621	chunk->free_size = pcpu_unit_size;
622	chunk->contig_hint = pcpu_unit_size;
623
624	return chunk;
625}
626
627static void pcpu_free_chunk(struct pcpu_chunk *chunk)
628{
629	if (!chunk)
630		return;
631	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
632	kfree(chunk);
633}
634
635/*
636 * Chunk management implementation.
637 *
638 * To allow different implementations, chunk alloc/free and
639 * [de]population are implemented in a separate file which is pulled
640 * into this file and compiled together.  The following functions
641 * should be implemented.
642 *
643 * pcpu_populate_chunk		- populate the specified range of a chunk
644 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
645 * pcpu_create_chunk		- create a new chunk
646 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
647 * pcpu_addr_to_page		- translate address to physical address
648 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
649 */
650static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
651static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
652static struct pcpu_chunk *pcpu_create_chunk(void);
653static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
654static struct page *pcpu_addr_to_page(void *addr);
655static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
656
657#include "percpu-vm.c"
658
659/**
660 * pcpu_chunk_addr_search - determine chunk containing specified address
661 * @addr: address for which the chunk needs to be determined.
662 *
663 * RETURNS:
664 * The address of the found chunk.
665 */
666static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
667{
668	/* is it in the first chunk? */
669	if (pcpu_addr_in_first_chunk(addr)) {
670		/* is it in the reserved area? */
671		if (pcpu_addr_in_reserved_chunk(addr))
672			return pcpu_reserved_chunk;
673		return pcpu_first_chunk;
674	}
675
676	/*
677	 * The address is relative to unit0 which might be unused and
678	 * thus unmapped.  Offset the address to the unit space of the
679	 * current processor before looking it up in the vmalloc
680	 * space.  Note that any possible cpu id can be used here, so
681	 * there's no need to worry about preemption or cpu hotplug.
682	 */
683	addr += pcpu_unit_offsets[raw_smp_processor_id()];
684	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
685}
686
687/**
688 * pcpu_alloc - the percpu allocator
689 * @size: size of area to allocate in bytes
690 * @align: alignment of area (max PAGE_SIZE)
691 * @reserved: allocate from the reserved chunk if available
692 *
693 * Allocate percpu area of @size bytes aligned at @align.
694 *
695 * CONTEXT:
696 * Does GFP_KERNEL allocation.
697 *
698 * RETURNS:
699 * Percpu pointer to the allocated area on success, NULL on failure.
700 */
701static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
702{
703	static int warn_limit = 10;
704	struct pcpu_chunk *chunk;
705	const char *err;
706	int slot, off, new_alloc;
707	unsigned long flags;
708
709	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
710		WARN(true, "illegal size (%zu) or align (%zu) for "
711		     "percpu allocation\n", size, align);
712		return NULL;
713	}
714
715	mutex_lock(&pcpu_alloc_mutex);
716	spin_lock_irqsave(&pcpu_lock, flags);
717
718	/* serve reserved allocations from the reserved chunk if available */
719	if (reserved && pcpu_reserved_chunk) {
720		chunk = pcpu_reserved_chunk;
721
722		if (size > chunk->contig_hint) {
723			err = "alloc from reserved chunk failed";
724			goto fail_unlock;
725		}
726
727		while ((new_alloc = pcpu_need_to_extend(chunk))) {
728			spin_unlock_irqrestore(&pcpu_lock, flags);
729			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
730				err = "failed to extend area map of reserved chunk";
731				goto fail_unlock_mutex;
732			}
733			spin_lock_irqsave(&pcpu_lock, flags);
734		}
735
736		off = pcpu_alloc_area(chunk, size, align);
737		if (off >= 0)
738			goto area_found;
739
740		err = "alloc from reserved chunk failed";
741		goto fail_unlock;
742	}
743
744restart:
745	/* search through normal chunks */
746	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
747		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
748			if (size > chunk->contig_hint)
749				continue;
750
751			new_alloc = pcpu_need_to_extend(chunk);
752			if (new_alloc) {
753				spin_unlock_irqrestore(&pcpu_lock, flags);
754				if (pcpu_extend_area_map(chunk,
755							 new_alloc) < 0) {
756					err = "failed to extend area map";
757					goto fail_unlock_mutex;
758				}
759				spin_lock_irqsave(&pcpu_lock, flags);
760				/*
761				 * pcpu_lock has been dropped, need to
762				 * restart cpu_slot list walking.
763				 */
764				goto restart;
765			}
766
767			off = pcpu_alloc_area(chunk, size, align);
768			if (off >= 0)
769				goto area_found;
770		}
771	}
772
773	/* hmmm... no space left, create a new chunk */
774	spin_unlock_irqrestore(&pcpu_lock, flags);
775
776	chunk = pcpu_create_chunk();
777	if (!chunk) {
778		err = "failed to allocate new chunk";
779		goto fail_unlock_mutex;
780	}
781
782	spin_lock_irqsave(&pcpu_lock, flags);
783	pcpu_chunk_relocate(chunk, -1);
784	goto restart;
785
786area_found:
787	spin_unlock_irqrestore(&pcpu_lock, flags);
788
789	/* populate, map and clear the area */
790	if (pcpu_populate_chunk(chunk, off, size)) {
791		spin_lock_irqsave(&pcpu_lock, flags);
792		pcpu_free_area(chunk, off);
793		err = "failed to populate";
794		goto fail_unlock;
795	}
796
797	mutex_unlock(&pcpu_alloc_mutex);
798
799	/* return address relative to base address */
800	return __addr_to_pcpu_ptr(chunk->base_addr + off);
801
802fail_unlock:
803	spin_unlock_irqrestore(&pcpu_lock, flags);
804fail_unlock_mutex:
805	mutex_unlock(&pcpu_alloc_mutex);
806	if (warn_limit) {
807		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
808			   "%s\n", size, align, err);
809		dump_stack();
810		if (!--warn_limit)
811			pr_info("PERCPU: limit reached, disable warning\n");
812	}
813	return NULL;
814}
815
816/**
817 * __alloc_percpu - allocate dynamic percpu area
818 * @size: size of area to allocate in bytes
819 * @align: alignment of area (max PAGE_SIZE)
820 *
821 * Allocate percpu area of @size bytes aligned at @align.  Might
822 * sleep.  Might trigger writeouts.
823 *
824 * CONTEXT:
825 * Does GFP_KERNEL allocation.
826 *
827 * RETURNS:
828 * Percpu pointer to the allocated area on success, NULL on failure.
829 */
830void __percpu *__alloc_percpu(size_t size, size_t align)
831{
832	return pcpu_alloc(size, align, false);
833}
834EXPORT_SYMBOL_GPL(__alloc_percpu);
835
836/**
837 * __alloc_reserved_percpu - allocate reserved percpu area
838 * @size: size of area to allocate in bytes
839 * @align: alignment of area (max PAGE_SIZE)
840 *
841 * Allocate percpu area of @size bytes aligned at @align from reserved
842 * percpu area if arch has set it up; otherwise, allocation is served
843 * from the same dynamic area.  Might sleep.  Might trigger writeouts.
844 *
845 * CONTEXT:
846 * Does GFP_KERNEL allocation.
847 *
848 * RETURNS:
849 * Percpu pointer to the allocated area on success, NULL on failure.
850 */
851void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
852{
853	return pcpu_alloc(size, align, true);
854}
855
856/**
857 * pcpu_reclaim - reclaim fully free chunks, workqueue function
858 * @work: unused
859 *
860 * Reclaim all fully free chunks except for the first one.
861 *
862 * CONTEXT:
863 * workqueue context.
864 */
865static void pcpu_reclaim(struct work_struct *work)
866{
867	LIST_HEAD(todo);
868	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
869	struct pcpu_chunk *chunk, *next;
870
871	mutex_lock(&pcpu_alloc_mutex);
872	spin_lock_irq(&pcpu_lock);
873
874	list_for_each_entry_safe(chunk, next, head, list) {
875		WARN_ON(chunk->immutable);
876
877		/* spare the first one */
878		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
879			continue;
880
881		list_move(&chunk->list, &todo);
882	}
883
884	spin_unlock_irq(&pcpu_lock);
885
886	list_for_each_entry_safe(chunk, next, &todo, list) {
887		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
888		pcpu_destroy_chunk(chunk);
889	}
890
891	mutex_unlock(&pcpu_alloc_mutex);
892}
893
894/**
895 * free_percpu - free percpu area
896 * @ptr: pointer to area to free
897 *
898 * Free percpu area @ptr.
899 *
900 * CONTEXT:
901 * Can be called from atomic context.
902 */
903void free_percpu(void __percpu *ptr)
904{
905	void *addr;
906	struct pcpu_chunk *chunk;
907	unsigned long flags;
908	int off;
909
910	if (!ptr)
911		return;
912
913	addr = __pcpu_ptr_to_addr(ptr);
914
915	spin_lock_irqsave(&pcpu_lock, flags);
916
917	chunk = pcpu_chunk_addr_search(addr);
918	off = addr - chunk->base_addr;
919
920	pcpu_free_area(chunk, off);
921
922	/* if there are more than one fully free chunks, wake up grim reaper */
923	if (chunk->free_size == pcpu_unit_size) {
924		struct pcpu_chunk *pos;
925
926		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
927			if (pos != chunk) {
928				schedule_work(&pcpu_reclaim_work);
929				break;
930			}
931	}
932
933	spin_unlock_irqrestore(&pcpu_lock, flags);
934}
935EXPORT_SYMBOL_GPL(free_percpu);
936
937/**
938 * is_kernel_percpu_address - test whether address is from static percpu area
939 * @addr: address to test
940 *
941 * Test whether @addr belongs to in-kernel static percpu area.  Module
942 * static percpu areas are not considered.  For those, use
943 * is_module_percpu_address().
944 *
945 * RETURNS:
946 * %true if @addr is from in-kernel static percpu area, %false otherwise.
947 */
948bool is_kernel_percpu_address(unsigned long addr)
949{
950	const size_t static_size = __per_cpu_end - __per_cpu_start;
951	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
952	unsigned int cpu;
953
954	for_each_possible_cpu(cpu) {
955		void *start = per_cpu_ptr(base, cpu);
956
957		if ((void *)addr >= start && (void *)addr < start + static_size)
958			return true;
959        }
960	return false;
961}
962
963/**
964 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
965 * @addr: the address to be converted to physical address
966 *
967 * Given @addr which is dereferenceable address obtained via one of
968 * percpu access macros, this function translates it into its physical
969 * address.  The caller is responsible for ensuring @addr stays valid
970 * until this function finishes.
971 *
972 * RETURNS:
973 * The physical address for @addr.
974 */
975phys_addr_t per_cpu_ptr_to_phys(void *addr)
976{
977	if (pcpu_addr_in_first_chunk(addr)) {
978		if ((unsigned long)addr < VMALLOC_START ||
979		    (unsigned long)addr >= VMALLOC_END)
980			return __pa(addr);
981		else
982			return page_to_phys(vmalloc_to_page(addr));
983	} else
984		return page_to_phys(pcpu_addr_to_page(addr));
985}
986
987static inline size_t pcpu_calc_fc_sizes(size_t static_size,
988					size_t reserved_size,
989					ssize_t *dyn_sizep)
990{
991	size_t size_sum;
992
993	size_sum = PFN_ALIGN(static_size + reserved_size +
994			     (*dyn_sizep >= 0 ? *dyn_sizep : 0));
995	if (*dyn_sizep != 0)
996		*dyn_sizep = size_sum - static_size - reserved_size;
997
998	return size_sum;
999}
1000
1001/**
1002 * pcpu_alloc_alloc_info - allocate percpu allocation info
1003 * @nr_groups: the number of groups
1004 * @nr_units: the number of units
1005 *
1006 * Allocate ai which is large enough for @nr_groups groups containing
1007 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1008 * cpu_map array which is long enough for @nr_units and filled with
1009 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1010 * pointer of other groups.
1011 *
1012 * RETURNS:
1013 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1014 * failure.
1015 */
1016struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1017						      int nr_units)
1018{
1019	struct pcpu_alloc_info *ai;
1020	size_t base_size, ai_size;
1021	void *ptr;
1022	int unit;
1023
1024	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1025			  __alignof__(ai->groups[0].cpu_map[0]));
1026	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1027
1028	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1029	if (!ptr)
1030		return NULL;
1031	ai = ptr;
1032	ptr += base_size;
1033
1034	ai->groups[0].cpu_map = ptr;
1035
1036	for (unit = 0; unit < nr_units; unit++)
1037		ai->groups[0].cpu_map[unit] = NR_CPUS;
1038
1039	ai->nr_groups = nr_groups;
1040	ai->__ai_size = PFN_ALIGN(ai_size);
1041
1042	return ai;
1043}
1044
1045/**
1046 * pcpu_free_alloc_info - free percpu allocation info
1047 * @ai: pcpu_alloc_info to free
1048 *
1049 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1050 */
1051void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1052{
1053	free_bootmem(__pa(ai), ai->__ai_size);
1054}
1055
1056/**
1057 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1058 * @reserved_size: the size of reserved percpu area in bytes
1059 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1060 * @atom_size: allocation atom size
1061 * @cpu_distance_fn: callback to determine distance between cpus, optional
1062 *
1063 * This function determines grouping of units, their mappings to cpus
1064 * and other parameters considering needed percpu size, allocation
1065 * atom size and distances between CPUs.
1066 *
1067 * Groups are always mutliples of atom size and CPUs which are of
1068 * LOCAL_DISTANCE both ways are grouped together and share space for
1069 * units in the same group.  The returned configuration is guaranteed
1070 * to have CPUs on different nodes on different groups and >=75% usage
1071 * of allocated virtual address space.
1072 *
1073 * RETURNS:
1074 * On success, pointer to the new allocation_info is returned.  On
1075 * failure, ERR_PTR value is returned.
1076 */
1077struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1078				size_t reserved_size, ssize_t dyn_size,
1079				size_t atom_size,
1080				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1081{
1082	static int group_map[NR_CPUS] __initdata;
1083	static int group_cnt[NR_CPUS] __initdata;
1084	const size_t static_size = __per_cpu_end - __per_cpu_start;
1085	int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
1086	size_t size_sum, min_unit_size, alloc_size;
1087	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1088	int last_allocs, group, unit;
1089	unsigned int cpu, tcpu;
1090	struct pcpu_alloc_info *ai;
1091	unsigned int *cpu_map;
1092
1093	/* this function may be called multiple times */
1094	memset(group_map, 0, sizeof(group_map));
1095	memset(group_cnt, 0, sizeof(group_map));
1096
1097	/*
1098	 * Determine min_unit_size, alloc_size and max_upa such that
1099	 * alloc_size is multiple of atom_size and is the smallest
1100	 * which can accomodate 4k aligned segments which are equal to
1101	 * or larger than min_unit_size.
1102	 */
1103	size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1104	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1105
1106	alloc_size = roundup(min_unit_size, atom_size);
1107	upa = alloc_size / min_unit_size;
1108	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1109		upa--;
1110	max_upa = upa;
1111
1112	/* group cpus according to their proximity */
1113	for_each_possible_cpu(cpu) {
1114		group = 0;
1115	next_group:
1116		for_each_possible_cpu(tcpu) {
1117			if (cpu == tcpu)
1118				break;
1119			if (group_map[tcpu] == group && cpu_distance_fn &&
1120			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1121			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1122				group++;
1123				nr_groups = max(nr_groups, group + 1);
1124				goto next_group;
1125			}
1126		}
1127		group_map[cpu] = group;
1128		group_cnt[group]++;
1129		group_cnt_max = max(group_cnt_max, group_cnt[group]);
1130	}
1131
1132	/*
1133	 * Expand unit size until address space usage goes over 75%
1134	 * and then as much as possible without using more address
1135	 * space.
1136	 */
1137	last_allocs = INT_MAX;
1138	for (upa = max_upa; upa; upa--) {
1139		int allocs = 0, wasted = 0;
1140
1141		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1142			continue;
1143
1144		for (group = 0; group < nr_groups; group++) {
1145			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1146			allocs += this_allocs;
1147			wasted += this_allocs * upa - group_cnt[group];
1148		}
1149
1150		/*
1151		 * Don't accept if wastage is over 25%.  The
1152		 * greater-than comparison ensures upa==1 always
1153		 * passes the following check.
1154		 */
1155		if (wasted > num_possible_cpus() / 3)
1156			continue;
1157
1158		/* and then don't consume more memory */
1159		if (allocs > last_allocs)
1160			break;
1161		last_allocs = allocs;
1162		best_upa = upa;
1163	}
1164	upa = best_upa;
1165
1166	/* allocate and fill alloc_info */
1167	for (group = 0; group < nr_groups; group++)
1168		nr_units += roundup(group_cnt[group], upa);
1169
1170	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1171	if (!ai)
1172		return ERR_PTR(-ENOMEM);
1173	cpu_map = ai->groups[0].cpu_map;
1174
1175	for (group = 0; group < nr_groups; group++) {
1176		ai->groups[group].cpu_map = cpu_map;
1177		cpu_map += roundup(group_cnt[group], upa);
1178	}
1179
1180	ai->static_size = static_size;
1181	ai->reserved_size = reserved_size;
1182	ai->dyn_size = dyn_size;
1183	ai->unit_size = alloc_size / upa;
1184	ai->atom_size = atom_size;
1185	ai->alloc_size = alloc_size;
1186
1187	for (group = 0, unit = 0; group_cnt[group]; group++) {
1188		struct pcpu_group_info *gi = &ai->groups[group];
1189
1190		/*
1191		 * Initialize base_offset as if all groups are located
1192		 * back-to-back.  The caller should update this to
1193		 * reflect actual allocation.
1194		 */
1195		gi->base_offset = unit * ai->unit_size;
1196
1197		for_each_possible_cpu(cpu)
1198			if (group_map[cpu] == group)
1199				gi->cpu_map[gi->nr_units++] = cpu;
1200		gi->nr_units = roundup(gi->nr_units, upa);
1201		unit += gi->nr_units;
1202	}
1203	BUG_ON(unit != nr_units);
1204
1205	return ai;
1206}
1207
1208/**
1209 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1210 * @lvl: loglevel
1211 * @ai: allocation info to dump
1212 *
1213 * Print out information about @ai using loglevel @lvl.
1214 */
1215static void pcpu_dump_alloc_info(const char *lvl,
1216				 const struct pcpu_alloc_info *ai)
1217{
1218	int group_width = 1, cpu_width = 1, width;
1219	char empty_str[] = "--------";
1220	int alloc = 0, alloc_end = 0;
1221	int group, v;
1222	int upa, apl;	/* units per alloc, allocs per line */
1223
1224	v = ai->nr_groups;
1225	while (v /= 10)
1226		group_width++;
1227
1228	v = num_possible_cpus();
1229	while (v /= 10)
1230		cpu_width++;
1231	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1232
1233	upa = ai->alloc_size / ai->unit_size;
1234	width = upa * (cpu_width + 1) + group_width + 3;
1235	apl = rounddown_pow_of_two(max(60 / width, 1));
1236
1237	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1238	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1239	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1240
1241	for (group = 0; group < ai->nr_groups; group++) {
1242		const struct pcpu_group_info *gi = &ai->groups[group];
1243		int unit = 0, unit_end = 0;
1244
1245		BUG_ON(gi->nr_units % upa);
1246		for (alloc_end += gi->nr_units / upa;
1247		     alloc < alloc_end; alloc++) {
1248			if (!(alloc % apl)) {
1249				printk("\n");
1250				printk("%spcpu-alloc: ", lvl);
1251			}
1252			printk("[%0*d] ", group_width, group);
1253
1254			for (unit_end += upa; unit < unit_end; unit++)
1255				if (gi->cpu_map[unit] != NR_CPUS)
1256					printk("%0*d ", cpu_width,
1257					       gi->cpu_map[unit]);
1258				else
1259					printk("%s ", empty_str);
1260		}
1261	}
1262	printk("\n");
1263}
1264
1265/**
1266 * pcpu_setup_first_chunk - initialize the first percpu chunk
1267 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1268 * @base_addr: mapped address
1269 *
1270 * Initialize the first percpu chunk which contains the kernel static
1271 * perpcu area.  This function is to be called from arch percpu area
1272 * setup path.
1273 *
1274 * @ai contains all information necessary to initialize the first
1275 * chunk and prime the dynamic percpu allocator.
1276 *
1277 * @ai->static_size is the size of static percpu area.
1278 *
1279 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1280 * reserve after the static area in the first chunk.  This reserves
1281 * the first chunk such that it's available only through reserved
1282 * percpu allocation.  This is primarily used to serve module percpu
1283 * static areas on architectures where the addressing model has
1284 * limited offset range for symbol relocations to guarantee module
1285 * percpu symbols fall inside the relocatable range.
1286 *
1287 * @ai->dyn_size determines the number of bytes available for dynamic
1288 * allocation in the first chunk.  The area between @ai->static_size +
1289 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1290 *
1291 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1292 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1293 * @ai->dyn_size.
1294 *
1295 * @ai->atom_size is the allocation atom size and used as alignment
1296 * for vm areas.
1297 *
1298 * @ai->alloc_size is the allocation size and always multiple of
1299 * @ai->atom_size.  This is larger than @ai->atom_size if
1300 * @ai->unit_size is larger than @ai->atom_size.
1301 *
1302 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1303 * percpu areas.  Units which should be colocated are put into the
1304 * same group.  Dynamic VM areas will be allocated according to these
1305 * groupings.  If @ai->nr_groups is zero, a single group containing
1306 * all units is assumed.
1307 *
1308 * The caller should have mapped the first chunk at @base_addr and
1309 * copied static data to each unit.
1310 *
1311 * If the first chunk ends up with both reserved and dynamic areas, it
1312 * is served by two chunks - one to serve the core static and reserved
1313 * areas and the other for the dynamic area.  They share the same vm
1314 * and page map but uses different area allocation map to stay away
1315 * from each other.  The latter chunk is circulated in the chunk slots
1316 * and available for dynamic allocation like any other chunks.
1317 *
1318 * RETURNS:
1319 * 0 on success, -errno on failure.
1320 */
1321int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1322				  void *base_addr)
1323{
1324	static char cpus_buf[4096] __initdata;
1325	static int smap[2], dmap[2];
1326	size_t dyn_size = ai->dyn_size;
1327	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1328	struct pcpu_chunk *schunk, *dchunk = NULL;
1329	unsigned long *group_offsets;
1330	size_t *group_sizes;
1331	unsigned long *unit_off;
1332	unsigned int cpu;
1333	int *unit_map;
1334	int group, unit, i;
1335
1336	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1337
1338#define PCPU_SETUP_BUG_ON(cond)	do {					\
1339	if (unlikely(cond)) {						\
1340		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1341		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
1342		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1343		BUG();							\
1344	}								\
1345} while (0)
1346
1347	/* sanity checks */
1348	BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1349		     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1350	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1351	PCPU_SETUP_BUG_ON(!ai->static_size);
1352	PCPU_SETUP_BUG_ON(!base_addr);
1353	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1354	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1355	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1356	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1357
1358	/* process group information and build config tables accordingly */
1359	group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1360	group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1361	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1362	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1363
1364	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1365		unit_map[cpu] = UINT_MAX;
1366	pcpu_first_unit_cpu = NR_CPUS;
1367
1368	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1369		const struct pcpu_group_info *gi = &ai->groups[group];
1370
1371		group_offsets[group] = gi->base_offset;
1372		group_sizes[group] = gi->nr_units * ai->unit_size;
1373
1374		for (i = 0; i < gi->nr_units; i++) {
1375			cpu = gi->cpu_map[i];
1376			if (cpu == NR_CPUS)
1377				continue;
1378
1379			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1380			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1381			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1382
1383			unit_map[cpu] = unit + i;
1384			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1385
1386			if (pcpu_first_unit_cpu == NR_CPUS)
1387				pcpu_first_unit_cpu = cpu;
1388		}
1389	}
1390	pcpu_last_unit_cpu = cpu;
1391	pcpu_nr_units = unit;
1392
1393	for_each_possible_cpu(cpu)
1394		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1395
1396	/* we're done parsing the input, undefine BUG macro and dump config */
1397#undef PCPU_SETUP_BUG_ON
1398	pcpu_dump_alloc_info(KERN_INFO, ai);
1399
1400	pcpu_nr_groups = ai->nr_groups;
1401	pcpu_group_offsets = group_offsets;
1402	pcpu_group_sizes = group_sizes;
1403	pcpu_unit_map = unit_map;
1404	pcpu_unit_offsets = unit_off;
1405
1406	/* determine basic parameters */
1407	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1408	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1409	pcpu_atom_size = ai->atom_size;
1410	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1411		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1412
1413	/*
1414	 * Allocate chunk slots.  The additional last slot is for
1415	 * empty chunks.
1416	 */
1417	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1418	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1419	for (i = 0; i < pcpu_nr_slots; i++)
1420		INIT_LIST_HEAD(&pcpu_slot[i]);
1421
1422	/*
1423	 * Initialize static chunk.  If reserved_size is zero, the
1424	 * static chunk covers static area + dynamic allocation area
1425	 * in the first chunk.  If reserved_size is not zero, it
1426	 * covers static area + reserved area (mostly used for module
1427	 * static percpu allocation).
1428	 */
1429	schunk = alloc_bootmem(pcpu_chunk_struct_size);
1430	INIT_LIST_HEAD(&schunk->list);
1431	schunk->base_addr = base_addr;
1432	schunk->map = smap;
1433	schunk->map_alloc = ARRAY_SIZE(smap);
1434	schunk->immutable = true;
1435	bitmap_fill(schunk->populated, pcpu_unit_pages);
1436
1437	if (ai->reserved_size) {
1438		schunk->free_size = ai->reserved_size;
1439		pcpu_reserved_chunk = schunk;
1440		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1441	} else {
1442		schunk->free_size = dyn_size;
1443		dyn_size = 0;			/* dynamic area covered */
1444	}
1445	schunk->contig_hint = schunk->free_size;
1446
1447	schunk->map[schunk->map_used++] = -ai->static_size;
1448	if (schunk->free_size)
1449		schunk->map[schunk->map_used++] = schunk->free_size;
1450
1451	/* init dynamic chunk if necessary */
1452	if (dyn_size) {
1453		dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1454		INIT_LIST_HEAD(&dchunk->list);
1455		dchunk->base_addr = base_addr;
1456		dchunk->map = dmap;
1457		dchunk->map_alloc = ARRAY_SIZE(dmap);
1458		dchunk->immutable = true;
1459		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1460
1461		dchunk->contig_hint = dchunk->free_size = dyn_size;
1462		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1463		dchunk->map[dchunk->map_used++] = dchunk->free_size;
1464	}
1465
1466	/* link the first chunk in */
1467	pcpu_first_chunk = dchunk ?: schunk;
1468	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1469
1470	/* we're done */
1471	pcpu_base_addr = base_addr;
1472	return 0;
1473}
1474
1475const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1476	[PCPU_FC_AUTO]	= "auto",
1477	[PCPU_FC_EMBED]	= "embed",
1478	[PCPU_FC_PAGE]	= "page",
1479};
1480
1481enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1482
1483static int __init percpu_alloc_setup(char *str)
1484{
1485	if (0)
1486		/* nada */;
1487#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1488	else if (!strcmp(str, "embed"))
1489		pcpu_chosen_fc = PCPU_FC_EMBED;
1490#endif
1491#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1492	else if (!strcmp(str, "page"))
1493		pcpu_chosen_fc = PCPU_FC_PAGE;
1494#endif
1495	else
1496		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1497
1498	return 0;
1499}
1500early_param("percpu_alloc", percpu_alloc_setup);
1501
1502#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1503	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1504/**
1505 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1506 * @reserved_size: the size of reserved percpu area in bytes
1507 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1508 * @atom_size: allocation atom size
1509 * @cpu_distance_fn: callback to determine distance between cpus, optional
1510 * @alloc_fn: function to allocate percpu page
1511 * @free_fn: funtion to free percpu page
1512 *
1513 * This is a helper to ease setting up embedded first percpu chunk and
1514 * can be called where pcpu_setup_first_chunk() is expected.
1515 *
1516 * If this function is used to setup the first chunk, it is allocated
1517 * by calling @alloc_fn and used as-is without being mapped into
1518 * vmalloc area.  Allocations are always whole multiples of @atom_size
1519 * aligned to @atom_size.
1520 *
1521 * This enables the first chunk to piggy back on the linear physical
1522 * mapping which often uses larger page size.  Please note that this
1523 * can result in very sparse cpu->unit mapping on NUMA machines thus
1524 * requiring large vmalloc address space.  Don't use this allocator if
1525 * vmalloc space is not orders of magnitude larger than distances
1526 * between node memory addresses (ie. 32bit NUMA machines).
1527 *
1528 * When @dyn_size is positive, dynamic area might be larger than
1529 * specified to fill page alignment.  When @dyn_size is auto,
1530 * @dyn_size is just big enough to fill page alignment after static
1531 * and reserved areas.
1532 *
1533 * If the needed size is smaller than the minimum or specified unit
1534 * size, the leftover is returned using @free_fn.
1535 *
1536 * RETURNS:
1537 * 0 on success, -errno on failure.
1538 */
1539int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1540				  size_t atom_size,
1541				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1542				  pcpu_fc_alloc_fn_t alloc_fn,
1543				  pcpu_fc_free_fn_t free_fn)
1544{
1545	void *base = (void *)ULONG_MAX;
1546	void **areas = NULL;
1547	struct pcpu_alloc_info *ai;
1548	size_t size_sum, areas_size, max_distance;
1549	int group, i, rc;
1550
1551	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1552				   cpu_distance_fn);
1553	if (IS_ERR(ai))
1554		return PTR_ERR(ai);
1555
1556	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1557	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1558
1559	areas = alloc_bootmem_nopanic(areas_size);
1560	if (!areas) {
1561		rc = -ENOMEM;
1562		goto out_free;
1563	}
1564
1565	/* allocate, copy and determine base address */
1566	for (group = 0; group < ai->nr_groups; group++) {
1567		struct pcpu_group_info *gi = &ai->groups[group];
1568		unsigned int cpu = NR_CPUS;
1569		void *ptr;
1570
1571		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1572			cpu = gi->cpu_map[i];
1573		BUG_ON(cpu == NR_CPUS);
1574
1575		/* allocate space for the whole group */
1576		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1577		if (!ptr) {
1578			rc = -ENOMEM;
1579			goto out_free_areas;
1580		}
1581		areas[group] = ptr;
1582
1583		base = min(ptr, base);
1584
1585		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1586			if (gi->cpu_map[i] == NR_CPUS) {
1587				/* unused unit, free whole */
1588				free_fn(ptr, ai->unit_size);
1589				continue;
1590			}
1591			/* copy and return the unused part */
1592			memcpy(ptr, __per_cpu_load, ai->static_size);
1593			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1594		}
1595	}
1596
1597	/* base address is now known, determine group base offsets */
1598	max_distance = 0;
1599	for (group = 0; group < ai->nr_groups; group++) {
1600		ai->groups[group].base_offset = areas[group] - base;
1601		max_distance = max_t(size_t, max_distance,
1602				     ai->groups[group].base_offset);
1603	}
1604	max_distance += ai->unit_size;
1605
1606	/* warn if maximum distance is further than 75% of vmalloc space */
1607	if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1608		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1609			   "space 0x%lx\n",
1610			   max_distance, VMALLOC_END - VMALLOC_START);
1611#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1612		/* and fail if we have fallback */
1613		rc = -EINVAL;
1614		goto out_free;
1615#endif
1616	}
1617
1618	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1619		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1620		ai->dyn_size, ai->unit_size);
1621
1622	rc = pcpu_setup_first_chunk(ai, base);
1623	goto out_free;
1624
1625out_free_areas:
1626	for (group = 0; group < ai->nr_groups; group++)
1627		free_fn(areas[group],
1628			ai->groups[group].nr_units * ai->unit_size);
1629out_free:
1630	pcpu_free_alloc_info(ai);
1631	if (areas)
1632		free_bootmem(__pa(areas), areas_size);
1633	return rc;
1634}
1635#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1636	  !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1637
1638#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1639/**
1640 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1641 * @reserved_size: the size of reserved percpu area in bytes
1642 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1643 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1644 * @populate_pte_fn: function to populate pte
1645 *
1646 * This is a helper to ease setting up page-remapped first percpu
1647 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1648 *
1649 * This is the basic allocator.  Static percpu area is allocated
1650 * page-by-page into vmalloc area.
1651 *
1652 * RETURNS:
1653 * 0 on success, -errno on failure.
1654 */
1655int __init pcpu_page_first_chunk(size_t reserved_size,
1656				 pcpu_fc_alloc_fn_t alloc_fn,
1657				 pcpu_fc_free_fn_t free_fn,
1658				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1659{
1660	static struct vm_struct vm;
1661	struct pcpu_alloc_info *ai;
1662	char psize_str[16];
1663	int unit_pages;
1664	size_t pages_size;
1665	struct page **pages;
1666	int unit, i, j, rc;
1667
1668	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1669
1670	ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
1671	if (IS_ERR(ai))
1672		return PTR_ERR(ai);
1673	BUG_ON(ai->nr_groups != 1);
1674	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1675
1676	unit_pages = ai->unit_size >> PAGE_SHIFT;
1677
1678	/* unaligned allocations can't be freed, round up to page size */
1679	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1680			       sizeof(pages[0]));
1681	pages = alloc_bootmem(pages_size);
1682
1683	/* allocate pages */
1684	j = 0;
1685	for (unit = 0; unit < num_possible_cpus(); unit++)
1686		for (i = 0; i < unit_pages; i++) {
1687			unsigned int cpu = ai->groups[0].cpu_map[unit];
1688			void *ptr;
1689
1690			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1691			if (!ptr) {
1692				pr_warning("PERCPU: failed to allocate %s page "
1693					   "for cpu%u\n", psize_str, cpu);
1694				goto enomem;
1695			}
1696			pages[j++] = virt_to_page(ptr);
1697		}
1698
1699	/* allocate vm area, map the pages and copy static data */
1700	vm.flags = VM_ALLOC;
1701	vm.size = num_possible_cpus() * ai->unit_size;
1702	vm_area_register_early(&vm, PAGE_SIZE);
1703
1704	for (unit = 0; unit < num_possible_cpus(); unit++) {
1705		unsigned long unit_addr =
1706			(unsigned long)vm.addr + unit * ai->unit_size;
1707
1708		for (i = 0; i < unit_pages; i++)
1709			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1710
1711		/* pte already populated, the following shouldn't fail */
1712		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1713				      unit_pages);
1714		if (rc < 0)
1715			panic("failed to map percpu area, err=%d\n", rc);
1716
1717		/*
1718		 * FIXME: Archs with virtual cache should flush local
1719		 * cache for the linear mapping here - something
1720		 * equivalent to flush_cache_vmap() on the local cpu.
1721		 * flush_cache_vmap() can't be used as most supporting
1722		 * data structures are not set up yet.
1723		 */
1724
1725		/* copy static data */
1726		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1727	}
1728
1729	/* we're ready, commit */
1730	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1731		unit_pages, psize_str, vm.addr, ai->static_size,
1732		ai->reserved_size, ai->dyn_size);
1733
1734	rc = pcpu_setup_first_chunk(ai, vm.addr);
1735	goto out_free_ar;
1736
1737enomem:
1738	while (--j >= 0)
1739		free_fn(page_address(pages[j]), PAGE_SIZE);
1740	rc = -ENOMEM;
1741out_free_ar:
1742	free_bootmem(__pa(pages), pages_size);
1743	pcpu_free_alloc_info(ai);
1744	return rc;
1745}
1746#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1747
1748/*
1749 * Generic percpu area setup.
1750 *
1751 * The embedding helper is used because its behavior closely resembles
1752 * the original non-dynamic generic percpu area setup.  This is
1753 * important because many archs have addressing restrictions and might
1754 * fail if the percpu area is located far away from the previous
1755 * location.  As an added bonus, in non-NUMA cases, embedding is
1756 * generally a good idea TLB-wise because percpu area can piggy back
1757 * on the physical linear memory mapping which uses large page
1758 * mappings on applicable archs.
1759 */
1760#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1761unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1762EXPORT_SYMBOL(__per_cpu_offset);
1763
1764static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1765				       size_t align)
1766{
1767	return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1768}
1769
1770static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1771{
1772	free_bootmem(__pa(ptr), size);
1773}
1774
1775void __init setup_per_cpu_areas(void)
1776{
1777	unsigned long delta;
1778	unsigned int cpu;
1779	int rc;
1780
1781	/*
1782	 * Always reserve area for module percpu variables.  That's
1783	 * what the legacy allocator did.
1784	 */
1785	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1786				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1787				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1788	if (rc < 0)
1789		panic("Failed to initialized percpu areas.");
1790
1791	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1792	for_each_possible_cpu(cpu)
1793		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1794}
1795#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1796