percpu.c revision fb435d5233f8b6f9b93c11d6304d8e98fed03234
1/*
2 * linux/mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009		SUSE Linux Products GmbH
5 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas.  Percpu areas are allocated in chunks in vmalloc area.  Each
11 * chunk is consisted of boot-time determined number of units and the
12 * first chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.  ie. in
17 * vmalloc area
18 *
19 *  c0                           c1                         c2
20 *  -------------------          -------------------        ------------
21 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
22 *  -------------------  ......  -------------------  ....  ------------
23 *
24 * Allocation is done in offset-size areas of single unit space.  Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
26 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
27 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
30 *
31 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes.  The allocator organizes chunks into lists
33 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk.  This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
38 *
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map.  A positive value in the map represents a free
41 * region and negative allocated.  Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry.  This is mostly copied from the percpu_modalloc() allocator.
44 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
46 *
47 * To use this allocator, arch code should do the followings.
48 *
49 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
50 *
51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
52 *   regular address to percpu pointer and back if they need to be
53 *   different from the default
54 *
55 * - use pcpu_setup_first_chunk() during percpu area initialization to
56 *   setup the first chunk containing the kernel static percpu area
57 */
58
59#include <linux/bitmap.h>
60#include <linux/bootmem.h>
61#include <linux/err.h>
62#include <linux/list.h>
63#include <linux/log2.h>
64#include <linux/mm.h>
65#include <linux/module.h>
66#include <linux/mutex.h>
67#include <linux/percpu.h>
68#include <linux/pfn.h>
69#include <linux/slab.h>
70#include <linux/spinlock.h>
71#include <linux/vmalloc.h>
72#include <linux/workqueue.h>
73
74#include <asm/cacheflush.h>
75#include <asm/sections.h>
76#include <asm/tlbflush.h>
77
78#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
79#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
80
81/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82#ifndef __addr_to_pcpu_ptr
83#define __addr_to_pcpu_ptr(addr)					\
84	(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr	\
85		 + (unsigned long)__per_cpu_start)
86#endif
87#ifndef __pcpu_ptr_to_addr
88#define __pcpu_ptr_to_addr(ptr)						\
89	(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr	\
90		 - (unsigned long)__per_cpu_start)
91#endif
92
93struct pcpu_chunk {
94	struct list_head	list;		/* linked to pcpu_slot lists */
95	int			free_size;	/* free bytes in the chunk */
96	int			contig_hint;	/* max contiguous size hint */
97	struct vm_struct	*vm;		/* mapped vmalloc region */
98	int			map_used;	/* # of map entries used */
99	int			map_alloc;	/* # of map entries allocated */
100	int			*map;		/* allocation map */
101	bool			immutable;	/* no [de]population allowed */
102	unsigned long		populated[];	/* populated bitmap */
103};
104
105static int pcpu_unit_pages __read_mostly;
106static int pcpu_unit_size __read_mostly;
107static int pcpu_nr_units __read_mostly;
108static int pcpu_chunk_size __read_mostly;
109static int pcpu_nr_slots __read_mostly;
110static size_t pcpu_chunk_struct_size __read_mostly;
111
112/* cpus with the lowest and highest unit numbers */
113static unsigned int pcpu_first_unit_cpu __read_mostly;
114static unsigned int pcpu_last_unit_cpu __read_mostly;
115
116/* the address of the first chunk which starts with the kernel static area */
117void *pcpu_base_addr __read_mostly;
118EXPORT_SYMBOL_GPL(pcpu_base_addr);
119
120static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
121const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
122
123/*
124 * The first chunk which always exists.  Note that unlike other
125 * chunks, this one can be allocated and mapped in several different
126 * ways and thus often doesn't live in the vmalloc area.
127 */
128static struct pcpu_chunk *pcpu_first_chunk;
129
130/*
131 * Optional reserved chunk.  This chunk reserves part of the first
132 * chunk and serves it for reserved allocations.  The amount of
133 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
134 * area doesn't exist, the following variables contain NULL and 0
135 * respectively.
136 */
137static struct pcpu_chunk *pcpu_reserved_chunk;
138static int pcpu_reserved_chunk_limit;
139
140/*
141 * Synchronization rules.
142 *
143 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
144 * protects allocation/reclaim paths, chunks, populated bitmap and
145 * vmalloc mapping.  The latter is a spinlock and protects the index
146 * data structures - chunk slots, chunks and area maps in chunks.
147 *
148 * During allocation, pcpu_alloc_mutex is kept locked all the time and
149 * pcpu_lock is grabbed and released as necessary.  All actual memory
150 * allocations are done using GFP_KERNEL with pcpu_lock released.
151 *
152 * Free path accesses and alters only the index data structures, so it
153 * can be safely called from atomic context.  When memory needs to be
154 * returned to the system, free path schedules reclaim_work which
155 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
156 * reclaimed, release both locks and frees the chunks.  Note that it's
157 * necessary to grab both locks to remove a chunk from circulation as
158 * allocation path might be referencing the chunk with only
159 * pcpu_alloc_mutex locked.
160 */
161static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
162static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
163
164static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
165
166/* reclaim work to release fully free chunks, scheduled from free path */
167static void pcpu_reclaim(struct work_struct *work);
168static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
169
170static int __pcpu_size_to_slot(int size)
171{
172	int highbit = fls(size);	/* size is in bytes */
173	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
174}
175
176static int pcpu_size_to_slot(int size)
177{
178	if (size == pcpu_unit_size)
179		return pcpu_nr_slots - 1;
180	return __pcpu_size_to_slot(size);
181}
182
183static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
184{
185	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
186		return 0;
187
188	return pcpu_size_to_slot(chunk->free_size);
189}
190
191static int pcpu_page_idx(unsigned int cpu, int page_idx)
192{
193	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
194}
195
196static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
197				     unsigned int cpu, int page_idx)
198{
199	return (unsigned long)chunk->vm->addr + pcpu_unit_offsets[cpu] +
200		(page_idx << PAGE_SHIFT);
201}
202
203static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
204				    unsigned int cpu, int page_idx)
205{
206	/* must not be used on pre-mapped chunk */
207	WARN_ON(chunk->immutable);
208
209	return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
210}
211
212/* set the pointer to a chunk in a page struct */
213static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
214{
215	page->index = (unsigned long)pcpu;
216}
217
218/* obtain pointer to a chunk from a page struct */
219static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
220{
221	return (struct pcpu_chunk *)page->index;
222}
223
224static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
225{
226	*rs = find_next_zero_bit(chunk->populated, end, *rs);
227	*re = find_next_bit(chunk->populated, end, *rs + 1);
228}
229
230static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
231{
232	*rs = find_next_bit(chunk->populated, end, *rs);
233	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
234}
235
236/*
237 * (Un)populated page region iterators.  Iterate over (un)populated
238 * page regions betwen @start and @end in @chunk.  @rs and @re should
239 * be integer variables and will be set to start and end page index of
240 * the current region.
241 */
242#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
243	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
244	     (rs) < (re);						    \
245	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
246
247#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
248	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
249	     (rs) < (re);						    \
250	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
251
252/**
253 * pcpu_mem_alloc - allocate memory
254 * @size: bytes to allocate
255 *
256 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
257 * kzalloc() is used; otherwise, vmalloc() is used.  The returned
258 * memory is always zeroed.
259 *
260 * CONTEXT:
261 * Does GFP_KERNEL allocation.
262 *
263 * RETURNS:
264 * Pointer to the allocated area on success, NULL on failure.
265 */
266static void *pcpu_mem_alloc(size_t size)
267{
268	if (size <= PAGE_SIZE)
269		return kzalloc(size, GFP_KERNEL);
270	else {
271		void *ptr = vmalloc(size);
272		if (ptr)
273			memset(ptr, 0, size);
274		return ptr;
275	}
276}
277
278/**
279 * pcpu_mem_free - free memory
280 * @ptr: memory to free
281 * @size: size of the area
282 *
283 * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
284 */
285static void pcpu_mem_free(void *ptr, size_t size)
286{
287	if (size <= PAGE_SIZE)
288		kfree(ptr);
289	else
290		vfree(ptr);
291}
292
293/**
294 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
295 * @chunk: chunk of interest
296 * @oslot: the previous slot it was on
297 *
298 * This function is called after an allocation or free changed @chunk.
299 * New slot according to the changed state is determined and @chunk is
300 * moved to the slot.  Note that the reserved chunk is never put on
301 * chunk slots.
302 *
303 * CONTEXT:
304 * pcpu_lock.
305 */
306static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
307{
308	int nslot = pcpu_chunk_slot(chunk);
309
310	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
311		if (oslot < nslot)
312			list_move(&chunk->list, &pcpu_slot[nslot]);
313		else
314			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
315	}
316}
317
318/**
319 * pcpu_chunk_addr_search - determine chunk containing specified address
320 * @addr: address for which the chunk needs to be determined.
321 *
322 * RETURNS:
323 * The address of the found chunk.
324 */
325static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
326{
327	void *first_start = pcpu_first_chunk->vm->addr;
328
329	/* is it in the first chunk? */
330	if (addr >= first_start && addr < first_start + pcpu_unit_size) {
331		/* is it in the reserved area? */
332		if (addr < first_start + pcpu_reserved_chunk_limit)
333			return pcpu_reserved_chunk;
334		return pcpu_first_chunk;
335	}
336
337	/*
338	 * The address is relative to unit0 which might be unused and
339	 * thus unmapped.  Offset the address to the unit space of the
340	 * current processor before looking it up in the vmalloc
341	 * space.  Note that any possible cpu id can be used here, so
342	 * there's no need to worry about preemption or cpu hotplug.
343	 */
344	addr += pcpu_unit_offsets[smp_processor_id()];
345	return pcpu_get_page_chunk(vmalloc_to_page(addr));
346}
347
348/**
349 * pcpu_extend_area_map - extend area map for allocation
350 * @chunk: target chunk
351 *
352 * Extend area map of @chunk so that it can accomodate an allocation.
353 * A single allocation can split an area into three areas, so this
354 * function makes sure that @chunk->map has at least two extra slots.
355 *
356 * CONTEXT:
357 * pcpu_alloc_mutex, pcpu_lock.  pcpu_lock is released and reacquired
358 * if area map is extended.
359 *
360 * RETURNS:
361 * 0 if noop, 1 if successfully extended, -errno on failure.
362 */
363static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
364{
365	int new_alloc;
366	int *new;
367	size_t size;
368
369	/* has enough? */
370	if (chunk->map_alloc >= chunk->map_used + 2)
371		return 0;
372
373	spin_unlock_irq(&pcpu_lock);
374
375	new_alloc = PCPU_DFL_MAP_ALLOC;
376	while (new_alloc < chunk->map_used + 2)
377		new_alloc *= 2;
378
379	new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
380	if (!new) {
381		spin_lock_irq(&pcpu_lock);
382		return -ENOMEM;
383	}
384
385	/*
386	 * Acquire pcpu_lock and switch to new area map.  Only free
387	 * could have happened inbetween, so map_used couldn't have
388	 * grown.
389	 */
390	spin_lock_irq(&pcpu_lock);
391	BUG_ON(new_alloc < chunk->map_used + 2);
392
393	size = chunk->map_alloc * sizeof(chunk->map[0]);
394	memcpy(new, chunk->map, size);
395
396	/*
397	 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
398	 * one of the first chunks and still using static map.
399	 */
400	if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
401		pcpu_mem_free(chunk->map, size);
402
403	chunk->map_alloc = new_alloc;
404	chunk->map = new;
405	return 0;
406}
407
408/**
409 * pcpu_split_block - split a map block
410 * @chunk: chunk of interest
411 * @i: index of map block to split
412 * @head: head size in bytes (can be 0)
413 * @tail: tail size in bytes (can be 0)
414 *
415 * Split the @i'th map block into two or three blocks.  If @head is
416 * non-zero, @head bytes block is inserted before block @i moving it
417 * to @i+1 and reducing its size by @head bytes.
418 *
419 * If @tail is non-zero, the target block, which can be @i or @i+1
420 * depending on @head, is reduced by @tail bytes and @tail byte block
421 * is inserted after the target block.
422 *
423 * @chunk->map must have enough free slots to accomodate the split.
424 *
425 * CONTEXT:
426 * pcpu_lock.
427 */
428static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
429			     int head, int tail)
430{
431	int nr_extra = !!head + !!tail;
432
433	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
434
435	/* insert new subblocks */
436	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
437		sizeof(chunk->map[0]) * (chunk->map_used - i));
438	chunk->map_used += nr_extra;
439
440	if (head) {
441		chunk->map[i + 1] = chunk->map[i] - head;
442		chunk->map[i++] = head;
443	}
444	if (tail) {
445		chunk->map[i++] -= tail;
446		chunk->map[i] = tail;
447	}
448}
449
450/**
451 * pcpu_alloc_area - allocate area from a pcpu_chunk
452 * @chunk: chunk of interest
453 * @size: wanted size in bytes
454 * @align: wanted align
455 *
456 * Try to allocate @size bytes area aligned at @align from @chunk.
457 * Note that this function only allocates the offset.  It doesn't
458 * populate or map the area.
459 *
460 * @chunk->map must have at least two free slots.
461 *
462 * CONTEXT:
463 * pcpu_lock.
464 *
465 * RETURNS:
466 * Allocated offset in @chunk on success, -1 if no matching area is
467 * found.
468 */
469static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
470{
471	int oslot = pcpu_chunk_slot(chunk);
472	int max_contig = 0;
473	int i, off;
474
475	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
476		bool is_last = i + 1 == chunk->map_used;
477		int head, tail;
478
479		/* extra for alignment requirement */
480		head = ALIGN(off, align) - off;
481		BUG_ON(i == 0 && head != 0);
482
483		if (chunk->map[i] < 0)
484			continue;
485		if (chunk->map[i] < head + size) {
486			max_contig = max(chunk->map[i], max_contig);
487			continue;
488		}
489
490		/*
491		 * If head is small or the previous block is free,
492		 * merge'em.  Note that 'small' is defined as smaller
493		 * than sizeof(int), which is very small but isn't too
494		 * uncommon for percpu allocations.
495		 */
496		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
497			if (chunk->map[i - 1] > 0)
498				chunk->map[i - 1] += head;
499			else {
500				chunk->map[i - 1] -= head;
501				chunk->free_size -= head;
502			}
503			chunk->map[i] -= head;
504			off += head;
505			head = 0;
506		}
507
508		/* if tail is small, just keep it around */
509		tail = chunk->map[i] - head - size;
510		if (tail < sizeof(int))
511			tail = 0;
512
513		/* split if warranted */
514		if (head || tail) {
515			pcpu_split_block(chunk, i, head, tail);
516			if (head) {
517				i++;
518				off += head;
519				max_contig = max(chunk->map[i - 1], max_contig);
520			}
521			if (tail)
522				max_contig = max(chunk->map[i + 1], max_contig);
523		}
524
525		/* update hint and mark allocated */
526		if (is_last)
527			chunk->contig_hint = max_contig; /* fully scanned */
528		else
529			chunk->contig_hint = max(chunk->contig_hint,
530						 max_contig);
531
532		chunk->free_size -= chunk->map[i];
533		chunk->map[i] = -chunk->map[i];
534
535		pcpu_chunk_relocate(chunk, oslot);
536		return off;
537	}
538
539	chunk->contig_hint = max_contig;	/* fully scanned */
540	pcpu_chunk_relocate(chunk, oslot);
541
542	/* tell the upper layer that this chunk has no matching area */
543	return -1;
544}
545
546/**
547 * pcpu_free_area - free area to a pcpu_chunk
548 * @chunk: chunk of interest
549 * @freeme: offset of area to free
550 *
551 * Free area starting from @freeme to @chunk.  Note that this function
552 * only modifies the allocation map.  It doesn't depopulate or unmap
553 * the area.
554 *
555 * CONTEXT:
556 * pcpu_lock.
557 */
558static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
559{
560	int oslot = pcpu_chunk_slot(chunk);
561	int i, off;
562
563	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
564		if (off == freeme)
565			break;
566	BUG_ON(off != freeme);
567	BUG_ON(chunk->map[i] > 0);
568
569	chunk->map[i] = -chunk->map[i];
570	chunk->free_size += chunk->map[i];
571
572	/* merge with previous? */
573	if (i > 0 && chunk->map[i - 1] >= 0) {
574		chunk->map[i - 1] += chunk->map[i];
575		chunk->map_used--;
576		memmove(&chunk->map[i], &chunk->map[i + 1],
577			(chunk->map_used - i) * sizeof(chunk->map[0]));
578		i--;
579	}
580	/* merge with next? */
581	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
582		chunk->map[i] += chunk->map[i + 1];
583		chunk->map_used--;
584		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
585			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
586	}
587
588	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
589	pcpu_chunk_relocate(chunk, oslot);
590}
591
592/**
593 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
594 * @chunk: chunk of interest
595 * @bitmapp: output parameter for bitmap
596 * @may_alloc: may allocate the array
597 *
598 * Returns pointer to array of pointers to struct page and bitmap,
599 * both of which can be indexed with pcpu_page_idx().  The returned
600 * array is cleared to zero and *@bitmapp is copied from
601 * @chunk->populated.  Note that there is only one array and bitmap
602 * and access exclusion is the caller's responsibility.
603 *
604 * CONTEXT:
605 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
606 * Otherwise, don't care.
607 *
608 * RETURNS:
609 * Pointer to temp pages array on success, NULL on failure.
610 */
611static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
612					       unsigned long **bitmapp,
613					       bool may_alloc)
614{
615	static struct page **pages;
616	static unsigned long *bitmap;
617	size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
618	size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
619			     sizeof(unsigned long);
620
621	if (!pages || !bitmap) {
622		if (may_alloc && !pages)
623			pages = pcpu_mem_alloc(pages_size);
624		if (may_alloc && !bitmap)
625			bitmap = pcpu_mem_alloc(bitmap_size);
626		if (!pages || !bitmap)
627			return NULL;
628	}
629
630	memset(pages, 0, pages_size);
631	bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
632
633	*bitmapp = bitmap;
634	return pages;
635}
636
637/**
638 * pcpu_free_pages - free pages which were allocated for @chunk
639 * @chunk: chunk pages were allocated for
640 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
641 * @populated: populated bitmap
642 * @page_start: page index of the first page to be freed
643 * @page_end: page index of the last page to be freed + 1
644 *
645 * Free pages [@page_start and @page_end) in @pages for all units.
646 * The pages were allocated for @chunk.
647 */
648static void pcpu_free_pages(struct pcpu_chunk *chunk,
649			    struct page **pages, unsigned long *populated,
650			    int page_start, int page_end)
651{
652	unsigned int cpu;
653	int i;
654
655	for_each_possible_cpu(cpu) {
656		for (i = page_start; i < page_end; i++) {
657			struct page *page = pages[pcpu_page_idx(cpu, i)];
658
659			if (page)
660				__free_page(page);
661		}
662	}
663}
664
665/**
666 * pcpu_alloc_pages - allocates pages for @chunk
667 * @chunk: target chunk
668 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
669 * @populated: populated bitmap
670 * @page_start: page index of the first page to be allocated
671 * @page_end: page index of the last page to be allocated + 1
672 *
673 * Allocate pages [@page_start,@page_end) into @pages for all units.
674 * The allocation is for @chunk.  Percpu core doesn't care about the
675 * content of @pages and will pass it verbatim to pcpu_map_pages().
676 */
677static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
678			    struct page **pages, unsigned long *populated,
679			    int page_start, int page_end)
680{
681	const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
682	unsigned int cpu;
683	int i;
684
685	for_each_possible_cpu(cpu) {
686		for (i = page_start; i < page_end; i++) {
687			struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
688
689			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
690			if (!*pagep) {
691				pcpu_free_pages(chunk, pages, populated,
692						page_start, page_end);
693				return -ENOMEM;
694			}
695		}
696	}
697	return 0;
698}
699
700/**
701 * pcpu_pre_unmap_flush - flush cache prior to unmapping
702 * @chunk: chunk the regions to be flushed belongs to
703 * @page_start: page index of the first page to be flushed
704 * @page_end: page index of the last page to be flushed + 1
705 *
706 * Pages in [@page_start,@page_end) of @chunk are about to be
707 * unmapped.  Flush cache.  As each flushing trial can be very
708 * expensive, issue flush on the whole region at once rather than
709 * doing it for each cpu.  This could be an overkill but is more
710 * scalable.
711 */
712static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
713				 int page_start, int page_end)
714{
715	flush_cache_vunmap(
716		pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
717		pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
718}
719
720static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
721{
722	unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
723}
724
725/**
726 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
727 * @chunk: chunk of interest
728 * @pages: pages array which can be used to pass information to free
729 * @populated: populated bitmap
730 * @page_start: page index of the first page to unmap
731 * @page_end: page index of the last page to unmap + 1
732 *
733 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
734 * Corresponding elements in @pages were cleared by the caller and can
735 * be used to carry information to pcpu_free_pages() which will be
736 * called after all unmaps are finished.  The caller should call
737 * proper pre/post flush functions.
738 */
739static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
740			     struct page **pages, unsigned long *populated,
741			     int page_start, int page_end)
742{
743	unsigned int cpu;
744	int i;
745
746	for_each_possible_cpu(cpu) {
747		for (i = page_start; i < page_end; i++) {
748			struct page *page;
749
750			page = pcpu_chunk_page(chunk, cpu, i);
751			WARN_ON(!page);
752			pages[pcpu_page_idx(cpu, i)] = page;
753		}
754		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
755				   page_end - page_start);
756	}
757
758	for (i = page_start; i < page_end; i++)
759		__clear_bit(i, populated);
760}
761
762/**
763 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
764 * @chunk: pcpu_chunk the regions to be flushed belong to
765 * @page_start: page index of the first page to be flushed
766 * @page_end: page index of the last page to be flushed + 1
767 *
768 * Pages [@page_start,@page_end) of @chunk have been unmapped.  Flush
769 * TLB for the regions.  This can be skipped if the area is to be
770 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
771 *
772 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
773 * for the whole region.
774 */
775static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
776				      int page_start, int page_end)
777{
778	flush_tlb_kernel_range(
779		pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
780		pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
781}
782
783static int __pcpu_map_pages(unsigned long addr, struct page **pages,
784			    int nr_pages)
785{
786	return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
787					PAGE_KERNEL, pages);
788}
789
790/**
791 * pcpu_map_pages - map pages into a pcpu_chunk
792 * @chunk: chunk of interest
793 * @pages: pages array containing pages to be mapped
794 * @populated: populated bitmap
795 * @page_start: page index of the first page to map
796 * @page_end: page index of the last page to map + 1
797 *
798 * For each cpu, map pages [@page_start,@page_end) into @chunk.  The
799 * caller is responsible for calling pcpu_post_map_flush() after all
800 * mappings are complete.
801 *
802 * This function is responsible for setting corresponding bits in
803 * @chunk->populated bitmap and whatever is necessary for reverse
804 * lookup (addr -> chunk).
805 */
806static int pcpu_map_pages(struct pcpu_chunk *chunk,
807			  struct page **pages, unsigned long *populated,
808			  int page_start, int page_end)
809{
810	unsigned int cpu, tcpu;
811	int i, err;
812
813	for_each_possible_cpu(cpu) {
814		err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
815				       &pages[pcpu_page_idx(cpu, page_start)],
816				       page_end - page_start);
817		if (err < 0)
818			goto err;
819	}
820
821	/* mapping successful, link chunk and mark populated */
822	for (i = page_start; i < page_end; i++) {
823		for_each_possible_cpu(cpu)
824			pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
825					    chunk);
826		__set_bit(i, populated);
827	}
828
829	return 0;
830
831err:
832	for_each_possible_cpu(tcpu) {
833		if (tcpu == cpu)
834			break;
835		__pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
836				   page_end - page_start);
837	}
838	return err;
839}
840
841/**
842 * pcpu_post_map_flush - flush cache after mapping
843 * @chunk: pcpu_chunk the regions to be flushed belong to
844 * @page_start: page index of the first page to be flushed
845 * @page_end: page index of the last page to be flushed + 1
846 *
847 * Pages [@page_start,@page_end) of @chunk have been mapped.  Flush
848 * cache.
849 *
850 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
851 * for the whole region.
852 */
853static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
854				int page_start, int page_end)
855{
856	flush_cache_vmap(
857		pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
858		pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
859}
860
861/**
862 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
863 * @chunk: chunk to depopulate
864 * @off: offset to the area to depopulate
865 * @size: size of the area to depopulate in bytes
866 * @flush: whether to flush cache and tlb or not
867 *
868 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
869 * from @chunk.  If @flush is true, vcache is flushed before unmapping
870 * and tlb after.
871 *
872 * CONTEXT:
873 * pcpu_alloc_mutex.
874 */
875static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
876{
877	int page_start = PFN_DOWN(off);
878	int page_end = PFN_UP(off + size);
879	struct page **pages;
880	unsigned long *populated;
881	int rs, re;
882
883	/* quick path, check whether it's empty already */
884	pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
885		if (rs == page_start && re == page_end)
886			return;
887		break;
888	}
889
890	/* immutable chunks can't be depopulated */
891	WARN_ON(chunk->immutable);
892
893	/*
894	 * If control reaches here, there must have been at least one
895	 * successful population attempt so the temp pages array must
896	 * be available now.
897	 */
898	pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
899	BUG_ON(!pages);
900
901	/* unmap and free */
902	pcpu_pre_unmap_flush(chunk, page_start, page_end);
903
904	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
905		pcpu_unmap_pages(chunk, pages, populated, rs, re);
906
907	/* no need to flush tlb, vmalloc will handle it lazily */
908
909	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
910		pcpu_free_pages(chunk, pages, populated, rs, re);
911
912	/* commit new bitmap */
913	bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
914}
915
916/**
917 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
918 * @chunk: chunk of interest
919 * @off: offset to the area to populate
920 * @size: size of the area to populate in bytes
921 *
922 * For each cpu, populate and map pages [@page_start,@page_end) into
923 * @chunk.  The area is cleared on return.
924 *
925 * CONTEXT:
926 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
927 */
928static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
929{
930	int page_start = PFN_DOWN(off);
931	int page_end = PFN_UP(off + size);
932	int free_end = page_start, unmap_end = page_start;
933	struct page **pages;
934	unsigned long *populated;
935	unsigned int cpu;
936	int rs, re, rc;
937
938	/* quick path, check whether all pages are already there */
939	pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
940		if (rs == page_start && re == page_end)
941			goto clear;
942		break;
943	}
944
945	/* need to allocate and map pages, this chunk can't be immutable */
946	WARN_ON(chunk->immutable);
947
948	pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
949	if (!pages)
950		return -ENOMEM;
951
952	/* alloc and map */
953	pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
954		rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
955		if (rc)
956			goto err_free;
957		free_end = re;
958	}
959
960	pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
961		rc = pcpu_map_pages(chunk, pages, populated, rs, re);
962		if (rc)
963			goto err_unmap;
964		unmap_end = re;
965	}
966	pcpu_post_map_flush(chunk, page_start, page_end);
967
968	/* commit new bitmap */
969	bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
970clear:
971	for_each_possible_cpu(cpu)
972		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
973	return 0;
974
975err_unmap:
976	pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
977	pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
978		pcpu_unmap_pages(chunk, pages, populated, rs, re);
979	pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
980err_free:
981	pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
982		pcpu_free_pages(chunk, pages, populated, rs, re);
983	return rc;
984}
985
986static void free_pcpu_chunk(struct pcpu_chunk *chunk)
987{
988	if (!chunk)
989		return;
990	if (chunk->vm)
991		free_vm_area(chunk->vm);
992	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
993	kfree(chunk);
994}
995
996static struct pcpu_chunk *alloc_pcpu_chunk(void)
997{
998	struct pcpu_chunk *chunk;
999
1000	chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
1001	if (!chunk)
1002		return NULL;
1003
1004	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
1005	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
1006	chunk->map[chunk->map_used++] = pcpu_unit_size;
1007
1008	chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC);
1009	if (!chunk->vm) {
1010		free_pcpu_chunk(chunk);
1011		return NULL;
1012	}
1013
1014	INIT_LIST_HEAD(&chunk->list);
1015	chunk->free_size = pcpu_unit_size;
1016	chunk->contig_hint = pcpu_unit_size;
1017
1018	return chunk;
1019}
1020
1021/**
1022 * pcpu_alloc - the percpu allocator
1023 * @size: size of area to allocate in bytes
1024 * @align: alignment of area (max PAGE_SIZE)
1025 * @reserved: allocate from the reserved chunk if available
1026 *
1027 * Allocate percpu area of @size bytes aligned at @align.
1028 *
1029 * CONTEXT:
1030 * Does GFP_KERNEL allocation.
1031 *
1032 * RETURNS:
1033 * Percpu pointer to the allocated area on success, NULL on failure.
1034 */
1035static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1036{
1037	struct pcpu_chunk *chunk;
1038	int slot, off;
1039
1040	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
1041		WARN(true, "illegal size (%zu) or align (%zu) for "
1042		     "percpu allocation\n", size, align);
1043		return NULL;
1044	}
1045
1046	mutex_lock(&pcpu_alloc_mutex);
1047	spin_lock_irq(&pcpu_lock);
1048
1049	/* serve reserved allocations from the reserved chunk if available */
1050	if (reserved && pcpu_reserved_chunk) {
1051		chunk = pcpu_reserved_chunk;
1052		if (size > chunk->contig_hint ||
1053		    pcpu_extend_area_map(chunk) < 0)
1054			goto fail_unlock;
1055		off = pcpu_alloc_area(chunk, size, align);
1056		if (off >= 0)
1057			goto area_found;
1058		goto fail_unlock;
1059	}
1060
1061restart:
1062	/* search through normal chunks */
1063	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1064		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1065			if (size > chunk->contig_hint)
1066				continue;
1067
1068			switch (pcpu_extend_area_map(chunk)) {
1069			case 0:
1070				break;
1071			case 1:
1072				goto restart;	/* pcpu_lock dropped, restart */
1073			default:
1074				goto fail_unlock;
1075			}
1076
1077			off = pcpu_alloc_area(chunk, size, align);
1078			if (off >= 0)
1079				goto area_found;
1080		}
1081	}
1082
1083	/* hmmm... no space left, create a new chunk */
1084	spin_unlock_irq(&pcpu_lock);
1085
1086	chunk = alloc_pcpu_chunk();
1087	if (!chunk)
1088		goto fail_unlock_mutex;
1089
1090	spin_lock_irq(&pcpu_lock);
1091	pcpu_chunk_relocate(chunk, -1);
1092	goto restart;
1093
1094area_found:
1095	spin_unlock_irq(&pcpu_lock);
1096
1097	/* populate, map and clear the area */
1098	if (pcpu_populate_chunk(chunk, off, size)) {
1099		spin_lock_irq(&pcpu_lock);
1100		pcpu_free_area(chunk, off);
1101		goto fail_unlock;
1102	}
1103
1104	mutex_unlock(&pcpu_alloc_mutex);
1105
1106	/* return address relative to unit0 */
1107	return __addr_to_pcpu_ptr(chunk->vm->addr + off);
1108
1109fail_unlock:
1110	spin_unlock_irq(&pcpu_lock);
1111fail_unlock_mutex:
1112	mutex_unlock(&pcpu_alloc_mutex);
1113	return NULL;
1114}
1115
1116/**
1117 * __alloc_percpu - allocate dynamic percpu area
1118 * @size: size of area to allocate in bytes
1119 * @align: alignment of area (max PAGE_SIZE)
1120 *
1121 * Allocate percpu area of @size bytes aligned at @align.  Might
1122 * sleep.  Might trigger writeouts.
1123 *
1124 * CONTEXT:
1125 * Does GFP_KERNEL allocation.
1126 *
1127 * RETURNS:
1128 * Percpu pointer to the allocated area on success, NULL on failure.
1129 */
1130void *__alloc_percpu(size_t size, size_t align)
1131{
1132	return pcpu_alloc(size, align, false);
1133}
1134EXPORT_SYMBOL_GPL(__alloc_percpu);
1135
1136/**
1137 * __alloc_reserved_percpu - allocate reserved percpu area
1138 * @size: size of area to allocate in bytes
1139 * @align: alignment of area (max PAGE_SIZE)
1140 *
1141 * Allocate percpu area of @size bytes aligned at @align from reserved
1142 * percpu area if arch has set it up; otherwise, allocation is served
1143 * from the same dynamic area.  Might sleep.  Might trigger writeouts.
1144 *
1145 * CONTEXT:
1146 * Does GFP_KERNEL allocation.
1147 *
1148 * RETURNS:
1149 * Percpu pointer to the allocated area on success, NULL on failure.
1150 */
1151void *__alloc_reserved_percpu(size_t size, size_t align)
1152{
1153	return pcpu_alloc(size, align, true);
1154}
1155
1156/**
1157 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1158 * @work: unused
1159 *
1160 * Reclaim all fully free chunks except for the first one.
1161 *
1162 * CONTEXT:
1163 * workqueue context.
1164 */
1165static void pcpu_reclaim(struct work_struct *work)
1166{
1167	LIST_HEAD(todo);
1168	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
1169	struct pcpu_chunk *chunk, *next;
1170
1171	mutex_lock(&pcpu_alloc_mutex);
1172	spin_lock_irq(&pcpu_lock);
1173
1174	list_for_each_entry_safe(chunk, next, head, list) {
1175		WARN_ON(chunk->immutable);
1176
1177		/* spare the first one */
1178		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
1179			continue;
1180
1181		list_move(&chunk->list, &todo);
1182	}
1183
1184	spin_unlock_irq(&pcpu_lock);
1185
1186	list_for_each_entry_safe(chunk, next, &todo, list) {
1187		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
1188		free_pcpu_chunk(chunk);
1189	}
1190
1191	mutex_unlock(&pcpu_alloc_mutex);
1192}
1193
1194/**
1195 * free_percpu - free percpu area
1196 * @ptr: pointer to area to free
1197 *
1198 * Free percpu area @ptr.
1199 *
1200 * CONTEXT:
1201 * Can be called from atomic context.
1202 */
1203void free_percpu(void *ptr)
1204{
1205	void *addr = __pcpu_ptr_to_addr(ptr);
1206	struct pcpu_chunk *chunk;
1207	unsigned long flags;
1208	int off;
1209
1210	if (!ptr)
1211		return;
1212
1213	spin_lock_irqsave(&pcpu_lock, flags);
1214
1215	chunk = pcpu_chunk_addr_search(addr);
1216	off = addr - chunk->vm->addr;
1217
1218	pcpu_free_area(chunk, off);
1219
1220	/* if there are more than one fully free chunks, wake up grim reaper */
1221	if (chunk->free_size == pcpu_unit_size) {
1222		struct pcpu_chunk *pos;
1223
1224		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1225			if (pos != chunk) {
1226				schedule_work(&pcpu_reclaim_work);
1227				break;
1228			}
1229	}
1230
1231	spin_unlock_irqrestore(&pcpu_lock, flags);
1232}
1233EXPORT_SYMBOL_GPL(free_percpu);
1234
1235static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1236					size_t reserved_size,
1237					ssize_t *dyn_sizep)
1238{
1239	size_t size_sum;
1240
1241	size_sum = PFN_ALIGN(static_size + reserved_size +
1242			     (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1243	if (*dyn_sizep != 0)
1244		*dyn_sizep = size_sum - static_size - reserved_size;
1245
1246	return size_sum;
1247}
1248
1249/**
1250 * pcpu_alloc_alloc_info - allocate percpu allocation info
1251 * @nr_groups: the number of groups
1252 * @nr_units: the number of units
1253 *
1254 * Allocate ai which is large enough for @nr_groups groups containing
1255 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1256 * cpu_map array which is long enough for @nr_units and filled with
1257 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1258 * pointer of other groups.
1259 *
1260 * RETURNS:
1261 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1262 * failure.
1263 */
1264struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1265						      int nr_units)
1266{
1267	struct pcpu_alloc_info *ai;
1268	size_t base_size, ai_size;
1269	void *ptr;
1270	int unit;
1271
1272	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1273			  __alignof__(ai->groups[0].cpu_map[0]));
1274	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1275
1276	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1277	if (!ptr)
1278		return NULL;
1279	ai = ptr;
1280	ptr += base_size;
1281
1282	ai->groups[0].cpu_map = ptr;
1283
1284	for (unit = 0; unit < nr_units; unit++)
1285		ai->groups[0].cpu_map[unit] = NR_CPUS;
1286
1287	ai->nr_groups = nr_groups;
1288	ai->__ai_size = PFN_ALIGN(ai_size);
1289
1290	return ai;
1291}
1292
1293/**
1294 * pcpu_free_alloc_info - free percpu allocation info
1295 * @ai: pcpu_alloc_info to free
1296 *
1297 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1298 */
1299void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1300{
1301	free_bootmem(__pa(ai), ai->__ai_size);
1302}
1303
1304/**
1305 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1306 * @reserved_size: the size of reserved percpu area in bytes
1307 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1308 * @atom_size: allocation atom size
1309 * @cpu_distance_fn: callback to determine distance between cpus, optional
1310 *
1311 * This function determines grouping of units, their mappings to cpus
1312 * and other parameters considering needed percpu size, allocation
1313 * atom size and distances between CPUs.
1314 *
1315 * Groups are always mutliples of atom size and CPUs which are of
1316 * LOCAL_DISTANCE both ways are grouped together and share space for
1317 * units in the same group.  The returned configuration is guaranteed
1318 * to have CPUs on different nodes on different groups and >=75% usage
1319 * of allocated virtual address space.
1320 *
1321 * RETURNS:
1322 * On success, pointer to the new allocation_info is returned.  On
1323 * failure, ERR_PTR value is returned.
1324 */
1325struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1326				size_t reserved_size, ssize_t dyn_size,
1327				size_t atom_size,
1328				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1329{
1330	static int group_map[NR_CPUS] __initdata;
1331	static int group_cnt[NR_CPUS] __initdata;
1332	const size_t static_size = __per_cpu_end - __per_cpu_start;
1333	int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
1334	size_t size_sum, min_unit_size, alloc_size;
1335	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1336	int last_allocs, group, unit;
1337	unsigned int cpu, tcpu;
1338	struct pcpu_alloc_info *ai;
1339	unsigned int *cpu_map;
1340
1341	/*
1342	 * Determine min_unit_size, alloc_size and max_upa such that
1343	 * alloc_size is multiple of atom_size and is the smallest
1344	 * which can accomodate 4k aligned segments which are equal to
1345	 * or larger than min_unit_size.
1346	 */
1347	size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1348	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1349
1350	alloc_size = roundup(min_unit_size, atom_size);
1351	upa = alloc_size / min_unit_size;
1352	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1353		upa--;
1354	max_upa = upa;
1355
1356	/* group cpus according to their proximity */
1357	for_each_possible_cpu(cpu) {
1358		group = 0;
1359	next_group:
1360		for_each_possible_cpu(tcpu) {
1361			if (cpu == tcpu)
1362				break;
1363			if (group_map[tcpu] == group && cpu_distance_fn &&
1364			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1365			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1366				group++;
1367				nr_groups = max(nr_groups, group + 1);
1368				goto next_group;
1369			}
1370		}
1371		group_map[cpu] = group;
1372		group_cnt[group]++;
1373		group_cnt_max = max(group_cnt_max, group_cnt[group]);
1374	}
1375
1376	/*
1377	 * Expand unit size until address space usage goes over 75%
1378	 * and then as much as possible without using more address
1379	 * space.
1380	 */
1381	last_allocs = INT_MAX;
1382	for (upa = max_upa; upa; upa--) {
1383		int allocs = 0, wasted = 0;
1384
1385		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1386			continue;
1387
1388		for (group = 0; group < nr_groups; group++) {
1389			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1390			allocs += this_allocs;
1391			wasted += this_allocs * upa - group_cnt[group];
1392		}
1393
1394		/*
1395		 * Don't accept if wastage is over 25%.  The
1396		 * greater-than comparison ensures upa==1 always
1397		 * passes the following check.
1398		 */
1399		if (wasted > num_possible_cpus() / 3)
1400			continue;
1401
1402		/* and then don't consume more memory */
1403		if (allocs > last_allocs)
1404			break;
1405		last_allocs = allocs;
1406		best_upa = upa;
1407	}
1408	upa = best_upa;
1409
1410	/* allocate and fill alloc_info */
1411	for (group = 0; group < nr_groups; group++)
1412		nr_units += roundup(group_cnt[group], upa);
1413
1414	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1415	if (!ai)
1416		return ERR_PTR(-ENOMEM);
1417	cpu_map = ai->groups[0].cpu_map;
1418
1419	for (group = 0; group < nr_groups; group++) {
1420		ai->groups[group].cpu_map = cpu_map;
1421		cpu_map += roundup(group_cnt[group], upa);
1422	}
1423
1424	ai->static_size = static_size;
1425	ai->reserved_size = reserved_size;
1426	ai->dyn_size = dyn_size;
1427	ai->unit_size = alloc_size / upa;
1428	ai->atom_size = atom_size;
1429	ai->alloc_size = alloc_size;
1430
1431	for (group = 0, unit = 0; group_cnt[group]; group++) {
1432		struct pcpu_group_info *gi = &ai->groups[group];
1433
1434		/*
1435		 * Initialize base_offset as if all groups are located
1436		 * back-to-back.  The caller should update this to
1437		 * reflect actual allocation.
1438		 */
1439		gi->base_offset = unit * ai->unit_size;
1440
1441		for_each_possible_cpu(cpu)
1442			if (group_map[cpu] == group)
1443				gi->cpu_map[gi->nr_units++] = cpu;
1444		gi->nr_units = roundup(gi->nr_units, upa);
1445		unit += gi->nr_units;
1446	}
1447	BUG_ON(unit != nr_units);
1448
1449	return ai;
1450}
1451
1452/**
1453 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1454 * @lvl: loglevel
1455 * @ai: allocation info to dump
1456 *
1457 * Print out information about @ai using loglevel @lvl.
1458 */
1459static void pcpu_dump_alloc_info(const char *lvl,
1460				 const struct pcpu_alloc_info *ai)
1461{
1462	int group_width = 1, cpu_width = 1, width;
1463	char empty_str[] = "--------";
1464	int alloc = 0, alloc_end = 0;
1465	int group, v;
1466	int upa, apl;	/* units per alloc, allocs per line */
1467
1468	v = ai->nr_groups;
1469	while (v /= 10)
1470		group_width++;
1471
1472	v = num_possible_cpus();
1473	while (v /= 10)
1474		cpu_width++;
1475	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1476
1477	upa = ai->alloc_size / ai->unit_size;
1478	width = upa * (cpu_width + 1) + group_width + 3;
1479	apl = rounddown_pow_of_two(max(60 / width, 1));
1480
1481	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1482	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1483	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1484
1485	for (group = 0; group < ai->nr_groups; group++) {
1486		const struct pcpu_group_info *gi = &ai->groups[group];
1487		int unit = 0, unit_end = 0;
1488
1489		BUG_ON(gi->nr_units % upa);
1490		for (alloc_end += gi->nr_units / upa;
1491		     alloc < alloc_end; alloc++) {
1492			if (!(alloc % apl)) {
1493				printk("\n");
1494				printk("%spcpu-alloc: ", lvl);
1495			}
1496			printk("[%0*d] ", group_width, group);
1497
1498			for (unit_end += upa; unit < unit_end; unit++)
1499				if (gi->cpu_map[unit] != NR_CPUS)
1500					printk("%0*d ", cpu_width,
1501					       gi->cpu_map[unit]);
1502				else
1503					printk("%s ", empty_str);
1504		}
1505	}
1506	printk("\n");
1507}
1508
1509/**
1510 * pcpu_setup_first_chunk - initialize the first percpu chunk
1511 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1512 * @base_addr: mapped address
1513 *
1514 * Initialize the first percpu chunk which contains the kernel static
1515 * perpcu area.  This function is to be called from arch percpu area
1516 * setup path.
1517 *
1518 * @ai contains all information necessary to initialize the first
1519 * chunk and prime the dynamic percpu allocator.
1520 *
1521 * @ai->static_size is the size of static percpu area.
1522 *
1523 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1524 * reserve after the static area in the first chunk.  This reserves
1525 * the first chunk such that it's available only through reserved
1526 * percpu allocation.  This is primarily used to serve module percpu
1527 * static areas on architectures where the addressing model has
1528 * limited offset range for symbol relocations to guarantee module
1529 * percpu symbols fall inside the relocatable range.
1530 *
1531 * @ai->dyn_size determines the number of bytes available for dynamic
1532 * allocation in the first chunk.  The area between @ai->static_size +
1533 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1534 *
1535 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1536 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1537 * @ai->dyn_size.
1538 *
1539 * @ai->atom_size is the allocation atom size and used as alignment
1540 * for vm areas.
1541 *
1542 * @ai->alloc_size is the allocation size and always multiple of
1543 * @ai->atom_size.  This is larger than @ai->atom_size if
1544 * @ai->unit_size is larger than @ai->atom_size.
1545 *
1546 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1547 * percpu areas.  Units which should be colocated are put into the
1548 * same group.  Dynamic VM areas will be allocated according to these
1549 * groupings.  If @ai->nr_groups is zero, a single group containing
1550 * all units is assumed.
1551 *
1552 * The caller should have mapped the first chunk at @base_addr and
1553 * copied static data to each unit.
1554 *
1555 * If the first chunk ends up with both reserved and dynamic areas, it
1556 * is served by two chunks - one to serve the core static and reserved
1557 * areas and the other for the dynamic area.  They share the same vm
1558 * and page map but uses different area allocation map to stay away
1559 * from each other.  The latter chunk is circulated in the chunk slots
1560 * and available for dynamic allocation like any other chunks.
1561 *
1562 * RETURNS:
1563 * 0 on success, -errno on failure.
1564 */
1565int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1566				  void *base_addr)
1567{
1568	static struct vm_struct first_vm;
1569	static int smap[2], dmap[2];
1570	size_t dyn_size = ai->dyn_size;
1571	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1572	struct pcpu_chunk *schunk, *dchunk = NULL;
1573	unsigned long *unit_off;
1574	unsigned int cpu;
1575	int *unit_map;
1576	int group, unit, i;
1577
1578	/* sanity checks */
1579	BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1580		     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1581	BUG_ON(ai->nr_groups <= 0);
1582	BUG_ON(!ai->static_size);
1583	BUG_ON(!base_addr);
1584	BUG_ON(ai->unit_size < size_sum);
1585	BUG_ON(ai->unit_size & ~PAGE_MASK);
1586	BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1587
1588	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1589
1590	/* determine number of units and initialize unit_map and base */
1591	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1592	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1593
1594	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1595		unit_map[cpu] = NR_CPUS;
1596	pcpu_first_unit_cpu = NR_CPUS;
1597
1598	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1599		const struct pcpu_group_info *gi = &ai->groups[group];
1600
1601		for (i = 0; i < gi->nr_units; i++) {
1602			cpu = gi->cpu_map[i];
1603			if (cpu == NR_CPUS)
1604				continue;
1605
1606			BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu));
1607			BUG_ON(unit_map[cpu] != NR_CPUS);
1608
1609			unit_map[cpu] = unit + i;
1610			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1611
1612			if (pcpu_first_unit_cpu == NR_CPUS)
1613				pcpu_first_unit_cpu = cpu;
1614		}
1615	}
1616	pcpu_last_unit_cpu = cpu;
1617	pcpu_nr_units = unit;
1618
1619	for_each_possible_cpu(cpu)
1620		BUG_ON(unit_map[cpu] == NR_CPUS);
1621
1622	pcpu_unit_map = unit_map;
1623	pcpu_unit_offsets = unit_off;
1624
1625	/* determine basic parameters */
1626	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1627	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1628	pcpu_chunk_size = pcpu_nr_units * pcpu_unit_size;
1629	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1630		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1631
1632	first_vm.flags = VM_ALLOC;
1633	first_vm.size = pcpu_chunk_size;
1634	first_vm.addr = base_addr;
1635
1636	/*
1637	 * Allocate chunk slots.  The additional last slot is for
1638	 * empty chunks.
1639	 */
1640	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1641	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1642	for (i = 0; i < pcpu_nr_slots; i++)
1643		INIT_LIST_HEAD(&pcpu_slot[i]);
1644
1645	/*
1646	 * Initialize static chunk.  If reserved_size is zero, the
1647	 * static chunk covers static area + dynamic allocation area
1648	 * in the first chunk.  If reserved_size is not zero, it
1649	 * covers static area + reserved area (mostly used for module
1650	 * static percpu allocation).
1651	 */
1652	schunk = alloc_bootmem(pcpu_chunk_struct_size);
1653	INIT_LIST_HEAD(&schunk->list);
1654	schunk->vm = &first_vm;
1655	schunk->map = smap;
1656	schunk->map_alloc = ARRAY_SIZE(smap);
1657	schunk->immutable = true;
1658	bitmap_fill(schunk->populated, pcpu_unit_pages);
1659
1660	if (ai->reserved_size) {
1661		schunk->free_size = ai->reserved_size;
1662		pcpu_reserved_chunk = schunk;
1663		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1664	} else {
1665		schunk->free_size = dyn_size;
1666		dyn_size = 0;			/* dynamic area covered */
1667	}
1668	schunk->contig_hint = schunk->free_size;
1669
1670	schunk->map[schunk->map_used++] = -ai->static_size;
1671	if (schunk->free_size)
1672		schunk->map[schunk->map_used++] = schunk->free_size;
1673
1674	/* init dynamic chunk if necessary */
1675	if (dyn_size) {
1676		dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1677		INIT_LIST_HEAD(&dchunk->list);
1678		dchunk->vm = &first_vm;
1679		dchunk->map = dmap;
1680		dchunk->map_alloc = ARRAY_SIZE(dmap);
1681		dchunk->immutable = true;
1682		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1683
1684		dchunk->contig_hint = dchunk->free_size = dyn_size;
1685		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1686		dchunk->map[dchunk->map_used++] = dchunk->free_size;
1687	}
1688
1689	/* link the first chunk in */
1690	pcpu_first_chunk = dchunk ?: schunk;
1691	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1692
1693	/* we're done */
1694	pcpu_base_addr = schunk->vm->addr;
1695	return 0;
1696}
1697
1698const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1699	[PCPU_FC_AUTO]	= "auto",
1700	[PCPU_FC_EMBED]	= "embed",
1701	[PCPU_FC_PAGE]	= "page",
1702	[PCPU_FC_LPAGE]	= "lpage",
1703};
1704
1705enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1706
1707static int __init percpu_alloc_setup(char *str)
1708{
1709	if (0)
1710		/* nada */;
1711#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1712	else if (!strcmp(str, "embed"))
1713		pcpu_chosen_fc = PCPU_FC_EMBED;
1714#endif
1715#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1716	else if (!strcmp(str, "page"))
1717		pcpu_chosen_fc = PCPU_FC_PAGE;
1718#endif
1719#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
1720	else if (!strcmp(str, "lpage"))
1721		pcpu_chosen_fc = PCPU_FC_LPAGE;
1722#endif
1723	else
1724		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1725
1726	return 0;
1727}
1728early_param("percpu_alloc", percpu_alloc_setup);
1729
1730#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1731	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1732/**
1733 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1734 * @reserved_size: the size of reserved percpu area in bytes
1735 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1736 *
1737 * This is a helper to ease setting up embedded first percpu chunk and
1738 * can be called where pcpu_setup_first_chunk() is expected.
1739 *
1740 * If this function is used to setup the first chunk, it is allocated
1741 * as a contiguous area using bootmem allocator and used as-is without
1742 * being mapped into vmalloc area.  This enables the first chunk to
1743 * piggy back on the linear physical mapping which often uses larger
1744 * page size.
1745 *
1746 * When @dyn_size is positive, dynamic area might be larger than
1747 * specified to fill page alignment.  When @dyn_size is auto,
1748 * @dyn_size is just big enough to fill page alignment after static
1749 * and reserved areas.
1750 *
1751 * If the needed size is smaller than the minimum or specified unit
1752 * size, the leftover is returned to the bootmem allocator.
1753 *
1754 * RETURNS:
1755 * 0 on success, -errno on failure.
1756 */
1757int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size)
1758{
1759	struct pcpu_alloc_info *ai;
1760	size_t size_sum, chunk_size;
1761	void *base;
1762	int unit;
1763	int rc;
1764
1765	ai = pcpu_build_alloc_info(reserved_size, dyn_size, PAGE_SIZE, NULL);
1766	if (IS_ERR(ai))
1767		return PTR_ERR(ai);
1768	BUG_ON(ai->nr_groups != 1);
1769	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1770
1771	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1772	chunk_size = ai->unit_size * num_possible_cpus();
1773
1774	base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
1775				       __pa(MAX_DMA_ADDRESS));
1776	if (!base) {
1777		pr_warning("PERCPU: failed to allocate %zu bytes for "
1778			   "embedding\n", chunk_size);
1779		rc = -ENOMEM;
1780		goto out_free_ai;
1781	}
1782
1783	/* return the leftover and copy */
1784	for (unit = 0; unit < num_possible_cpus(); unit++) {
1785		void *ptr = base + unit * ai->unit_size;
1786
1787		free_bootmem(__pa(ptr + size_sum), ai->unit_size - size_sum);
1788		memcpy(ptr, __per_cpu_load, ai->static_size);
1789	}
1790
1791	/* we're ready, commit */
1792	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1793		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1794		ai->dyn_size, ai->unit_size);
1795
1796	rc = pcpu_setup_first_chunk(ai, base);
1797out_free_ai:
1798	pcpu_free_alloc_info(ai);
1799	return rc;
1800}
1801#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1802	  !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1803
1804#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1805/**
1806 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1807 * @reserved_size: the size of reserved percpu area in bytes
1808 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1809 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1810 * @populate_pte_fn: function to populate pte
1811 *
1812 * This is a helper to ease setting up page-remapped first percpu
1813 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1814 *
1815 * This is the basic allocator.  Static percpu area is allocated
1816 * page-by-page into vmalloc area.
1817 *
1818 * RETURNS:
1819 * 0 on success, -errno on failure.
1820 */
1821int __init pcpu_page_first_chunk(size_t reserved_size,
1822				 pcpu_fc_alloc_fn_t alloc_fn,
1823				 pcpu_fc_free_fn_t free_fn,
1824				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1825{
1826	static struct vm_struct vm;
1827	struct pcpu_alloc_info *ai;
1828	char psize_str[16];
1829	int unit_pages;
1830	size_t pages_size;
1831	struct page **pages;
1832	int unit, i, j, rc;
1833
1834	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1835
1836	ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
1837	if (IS_ERR(ai))
1838		return PTR_ERR(ai);
1839	BUG_ON(ai->nr_groups != 1);
1840	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1841
1842	unit_pages = ai->unit_size >> PAGE_SHIFT;
1843
1844	/* unaligned allocations can't be freed, round up to page size */
1845	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1846			       sizeof(pages[0]));
1847	pages = alloc_bootmem(pages_size);
1848
1849	/* allocate pages */
1850	j = 0;
1851	for (unit = 0; unit < num_possible_cpus(); unit++)
1852		for (i = 0; i < unit_pages; i++) {
1853			unsigned int cpu = ai->groups[0].cpu_map[unit];
1854			void *ptr;
1855
1856			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1857			if (!ptr) {
1858				pr_warning("PERCPU: failed to allocate %s page "
1859					   "for cpu%u\n", psize_str, cpu);
1860				goto enomem;
1861			}
1862			pages[j++] = virt_to_page(ptr);
1863		}
1864
1865	/* allocate vm area, map the pages and copy static data */
1866	vm.flags = VM_ALLOC;
1867	vm.size = num_possible_cpus() * ai->unit_size;
1868	vm_area_register_early(&vm, PAGE_SIZE);
1869
1870	for (unit = 0; unit < num_possible_cpus(); unit++) {
1871		unsigned long unit_addr =
1872			(unsigned long)vm.addr + unit * ai->unit_size;
1873
1874		for (i = 0; i < unit_pages; i++)
1875			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1876
1877		/* pte already populated, the following shouldn't fail */
1878		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1879				      unit_pages);
1880		if (rc < 0)
1881			panic("failed to map percpu area, err=%d\n", rc);
1882
1883		/*
1884		 * FIXME: Archs with virtual cache should flush local
1885		 * cache for the linear mapping here - something
1886		 * equivalent to flush_cache_vmap() on the local cpu.
1887		 * flush_cache_vmap() can't be used as most supporting
1888		 * data structures are not set up yet.
1889		 */
1890
1891		/* copy static data */
1892		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1893	}
1894
1895	/* we're ready, commit */
1896	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1897		unit_pages, psize_str, vm.addr, ai->static_size,
1898		ai->reserved_size, ai->dyn_size);
1899
1900	rc = pcpu_setup_first_chunk(ai, vm.addr);
1901	goto out_free_ar;
1902
1903enomem:
1904	while (--j >= 0)
1905		free_fn(page_address(pages[j]), PAGE_SIZE);
1906	rc = -ENOMEM;
1907out_free_ar:
1908	free_bootmem(__pa(pages), pages_size);
1909	pcpu_free_alloc_info(ai);
1910	return rc;
1911}
1912#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1913
1914#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
1915struct pcpul_ent {
1916	void		*ptr;
1917	void		*map_addr;
1918};
1919
1920static size_t pcpul_size;
1921static size_t pcpul_lpage_size;
1922static int pcpul_nr_lpages;
1923static struct pcpul_ent *pcpul_map;
1924
1925static bool __init pcpul_unit_to_cpu(int unit, const struct pcpu_alloc_info *ai,
1926				     unsigned int *cpup)
1927{
1928	int group, cunit;
1929
1930	for (group = 0, cunit = 0; group < ai->nr_groups; group++) {
1931		const struct pcpu_group_info *gi = &ai->groups[group];
1932
1933		if (unit < cunit + gi->nr_units) {
1934			if (cpup)
1935				*cpup = gi->cpu_map[unit - cunit];
1936			return true;
1937		}
1938		cunit += gi->nr_units;
1939	}
1940
1941	return false;
1942}
1943
1944static int __init pcpul_cpu_to_unit(int cpu, const struct pcpu_alloc_info *ai)
1945{
1946	int group, unit, i;
1947
1948	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1949		const struct pcpu_group_info *gi = &ai->groups[group];
1950
1951		for (i = 0; i < gi->nr_units; i++)
1952			if (gi->cpu_map[i] == cpu)
1953				return unit + i;
1954	}
1955	BUG();
1956}
1957
1958/**
1959 * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
1960 * @ai: pcpu_alloc_info
1961 * @alloc_fn: function to allocate percpu lpage, always called with lpage_size
1962 * @free_fn: function to free percpu memory, @size <= lpage_size
1963 * @map_fn: function to map percpu lpage, always called with lpage_size
1964 *
1965 * This allocator uses large page to build and map the first chunk.
1966 * Unlike other helpers, the caller should provide fully initialized
1967 * @ai.  This can be done using pcpu_build_alloc_info().  This two
1968 * stage initialization is to allow arch code to evaluate the
1969 * parameters before committing to it.
1970 *
1971 * Large pages are allocated as directed by @unit_map and other
1972 * parameters and mapped to vmalloc space.  Unused holes are returned
1973 * to the page allocator.  Note that these holes end up being actively
1974 * mapped twice - once to the physical mapping and to the vmalloc area
1975 * for the first percpu chunk.  Depending on architecture, this might
1976 * cause problem when changing page attributes of the returned area.
1977 * These double mapped areas can be detected using
1978 * pcpu_lpage_remapped().
1979 *
1980 * RETURNS:
1981 * 0 on success, -errno on failure.
1982 */
1983int __init pcpu_lpage_first_chunk(const struct pcpu_alloc_info *ai,
1984				  pcpu_fc_alloc_fn_t alloc_fn,
1985				  pcpu_fc_free_fn_t free_fn,
1986				  pcpu_fc_map_fn_t map_fn)
1987{
1988	static struct vm_struct vm;
1989	const size_t lpage_size = ai->atom_size;
1990	size_t chunk_size, map_size;
1991	unsigned int cpu;
1992	int i, j, unit, nr_units, rc;
1993
1994	nr_units = 0;
1995	for (i = 0; i < ai->nr_groups; i++)
1996		nr_units += ai->groups[i].nr_units;
1997
1998	chunk_size = ai->unit_size * nr_units;
1999	BUG_ON(chunk_size % lpage_size);
2000
2001	pcpul_size = ai->static_size + ai->reserved_size + ai->dyn_size;
2002	pcpul_lpage_size = lpage_size;
2003	pcpul_nr_lpages = chunk_size / lpage_size;
2004
2005	/* allocate pointer array and alloc large pages */
2006	map_size = pcpul_nr_lpages * sizeof(pcpul_map[0]);
2007	pcpul_map = alloc_bootmem(map_size);
2008
2009	/* allocate all pages */
2010	for (i = 0; i < pcpul_nr_lpages; i++) {
2011		size_t offset = i * lpage_size;
2012		int first_unit = offset / ai->unit_size;
2013		int last_unit = (offset + lpage_size - 1) / ai->unit_size;
2014		void *ptr;
2015
2016		/* find out which cpu is mapped to this unit */
2017		for (unit = first_unit; unit <= last_unit; unit++)
2018			if (pcpul_unit_to_cpu(unit, ai, &cpu))
2019				goto found;
2020		continue;
2021	found:
2022		ptr = alloc_fn(cpu, lpage_size, lpage_size);
2023		if (!ptr) {
2024			pr_warning("PERCPU: failed to allocate large page "
2025				   "for cpu%u\n", cpu);
2026			goto enomem;
2027		}
2028
2029		pcpul_map[i].ptr = ptr;
2030	}
2031
2032	/* return unused holes */
2033	for (unit = 0; unit < nr_units; unit++) {
2034		size_t start = unit * ai->unit_size;
2035		size_t end = start + ai->unit_size;
2036		size_t off, next;
2037
2038		/* don't free used part of occupied unit */
2039		if (pcpul_unit_to_cpu(unit, ai, NULL))
2040			start += pcpul_size;
2041
2042		/* unit can span more than one page, punch the holes */
2043		for (off = start; off < end; off = next) {
2044			void *ptr = pcpul_map[off / lpage_size].ptr;
2045			next = min(roundup(off + 1, lpage_size), end);
2046			if (ptr)
2047				free_fn(ptr + off % lpage_size, next - off);
2048		}
2049	}
2050
2051	/* allocate address, map and copy */
2052	vm.flags = VM_ALLOC;
2053	vm.size = chunk_size;
2054	vm_area_register_early(&vm, ai->unit_size);
2055
2056	for (i = 0; i < pcpul_nr_lpages; i++) {
2057		if (!pcpul_map[i].ptr)
2058			continue;
2059		pcpul_map[i].map_addr = vm.addr + i * lpage_size;
2060		map_fn(pcpul_map[i].ptr, lpage_size, pcpul_map[i].map_addr);
2061	}
2062
2063	for_each_possible_cpu(cpu)
2064		memcpy(vm.addr + pcpul_cpu_to_unit(cpu, ai) * ai->unit_size,
2065		       __per_cpu_load, ai->static_size);
2066
2067	/* we're ready, commit */
2068	pr_info("PERCPU: large pages @%p s%zu r%zu d%zu u%zu\n",
2069		vm.addr, ai->static_size, ai->reserved_size, ai->dyn_size,
2070		ai->unit_size);
2071
2072	rc = pcpu_setup_first_chunk(ai, vm.addr);
2073
2074	/*
2075	 * Sort pcpul_map array for pcpu_lpage_remapped().  Unmapped
2076	 * lpages are pushed to the end and trimmed.
2077	 */
2078	for (i = 0; i < pcpul_nr_lpages - 1; i++)
2079		for (j = i + 1; j < pcpul_nr_lpages; j++) {
2080			struct pcpul_ent tmp;
2081
2082			if (!pcpul_map[j].ptr)
2083				continue;
2084			if (pcpul_map[i].ptr &&
2085			    pcpul_map[i].ptr < pcpul_map[j].ptr)
2086				continue;
2087
2088			tmp = pcpul_map[i];
2089			pcpul_map[i] = pcpul_map[j];
2090			pcpul_map[j] = tmp;
2091		}
2092
2093	while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr)
2094		pcpul_nr_lpages--;
2095
2096	return rc;
2097
2098enomem:
2099	for (i = 0; i < pcpul_nr_lpages; i++)
2100		if (pcpul_map[i].ptr)
2101			free_fn(pcpul_map[i].ptr, lpage_size);
2102	free_bootmem(__pa(pcpul_map), map_size);
2103	return -ENOMEM;
2104}
2105
2106/**
2107 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
2108 * @kaddr: the kernel address in question
2109 *
2110 * Determine whether @kaddr falls in the pcpul recycled area.  This is
2111 * used by pageattr to detect VM aliases and break up the pcpu large
2112 * page mapping such that the same physical page is not mapped under
2113 * different attributes.
2114 *
2115 * The recycled area is always at the tail of a partially used large
2116 * page.
2117 *
2118 * RETURNS:
2119 * Address of corresponding remapped pcpu address if match is found;
2120 * otherwise, NULL.
2121 */
2122void *pcpu_lpage_remapped(void *kaddr)
2123{
2124	unsigned long lpage_mask = pcpul_lpage_size - 1;
2125	void *lpage_addr = (void *)((unsigned long)kaddr & ~lpage_mask);
2126	unsigned long offset = (unsigned long)kaddr & lpage_mask;
2127	int left = 0, right = pcpul_nr_lpages - 1;
2128	int pos;
2129
2130	/* pcpul in use at all? */
2131	if (!pcpul_map)
2132		return NULL;
2133
2134	/* okay, perform binary search */
2135	while (left <= right) {
2136		pos = (left + right) / 2;
2137
2138		if (pcpul_map[pos].ptr < lpage_addr)
2139			left = pos + 1;
2140		else if (pcpul_map[pos].ptr > lpage_addr)
2141			right = pos - 1;
2142		else
2143			return pcpul_map[pos].map_addr + offset;
2144	}
2145
2146	return NULL;
2147}
2148#endif /* CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK */
2149
2150/*
2151 * Generic percpu area setup.
2152 *
2153 * The embedding helper is used because its behavior closely resembles
2154 * the original non-dynamic generic percpu area setup.  This is
2155 * important because many archs have addressing restrictions and might
2156 * fail if the percpu area is located far away from the previous
2157 * location.  As an added bonus, in non-NUMA cases, embedding is
2158 * generally a good idea TLB-wise because percpu area can piggy back
2159 * on the physical linear memory mapping which uses large page
2160 * mappings on applicable archs.
2161 */
2162#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2163unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2164EXPORT_SYMBOL(__per_cpu_offset);
2165
2166void __init setup_per_cpu_areas(void)
2167{
2168	unsigned long delta;
2169	unsigned int cpu;
2170	int rc;
2171
2172	/*
2173	 * Always reserve area for module percpu variables.  That's
2174	 * what the legacy allocator did.
2175	 */
2176	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2177				    PERCPU_DYNAMIC_RESERVE);
2178	if (rc < 0)
2179		panic("Failed to initialized percpu areas.");
2180
2181	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2182	for_each_possible_cpu(cpu)
2183		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2184}
2185#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2186