slab.c revision 7ae439ce0c01d7db0c70d1542985969e95ef750d
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 *	(c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * 	(c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 *	UNIX Internals: The New Frontiers by Uresh Vahalia
16 *	Pub: Prentice Hall	ISBN 0-13-101908-2
17 * or with a little more detail in;
18 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 *	Jeff Bonwick (Sun Microsystems).
20 *	Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
29 * slabs and you must pass objects with the same intializations to
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 *   full slabs with 0 free objects
38 *   partial slabs
39 *   empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
53 * The c_cpuarray may not be read with enabled local interrupts -
54 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 *  constructors and destructors are called without any locking.
58 *  Several members in struct kmem_cache and struct slab never change, they
59 *	are accessed without any locking.
60 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 *  	and local interrupts are disabled so slab code is preempt-safe.
62 *  The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97.  Started multi-threading - markhe
71 *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72 *	The sem is only needed when accessing/extending the cache-chain, which
73 *	can never happen inside an interrupt (kmem_cache_create(),
74 *	kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 *	At present, each engine can be growing a cache.  This should be blocked.
77 *
78 * 15 March 2005. NUMA slab allocator.
79 *	Shai Fultheim <shai@scalex86.org>.
80 *	Shobhit Dayal <shobhit@calsoftinc.com>
81 *	Alok N Kataria <alokk@calsoftinc.com>
82 *	Christoph Lameter <christoph@lameter.com>
83 *
84 *	Modified the slab allocator to be node aware on NUMA systems.
85 *	Each node has its own list of partial, free and full slabs.
86 *	All object allocations for a node occur from node specific slab lists.
87 */
88
89#include	<linux/slab.h>
90#include	<linux/mm.h>
91#include	<linux/poison.h>
92#include	<linux/swap.h>
93#include	<linux/cache.h>
94#include	<linux/interrupt.h>
95#include	<linux/init.h>
96#include	<linux/compiler.h>
97#include	<linux/cpuset.h>
98#include	<linux/seq_file.h>
99#include	<linux/notifier.h>
100#include	<linux/kallsyms.h>
101#include	<linux/cpu.h>
102#include	<linux/sysctl.h>
103#include	<linux/module.h>
104#include	<linux/rcupdate.h>
105#include	<linux/string.h>
106#include	<linux/uaccess.h>
107#include	<linux/nodemask.h>
108#include	<linux/mempolicy.h>
109#include	<linux/mutex.h>
110#include	<linux/fault-inject.h>
111#include	<linux/rtmutex.h>
112#include	<linux/reciprocal_div.h>
113
114#include	<asm/cacheflush.h>
115#include	<asm/tlbflush.h>
116#include	<asm/page.h>
117
118/*
119 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
120 *		  0 for faster, smaller code (especially in the critical paths).
121 *
122 * STATS	- 1 to collect stats for /proc/slabinfo.
123 *		  0 for faster, smaller code (especially in the critical paths).
124 *
125 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
126 */
127
128#ifdef CONFIG_DEBUG_SLAB
129#define	DEBUG		1
130#define	STATS		1
131#define	FORCED_DEBUG	1
132#else
133#define	DEBUG		0
134#define	STATS		0
135#define	FORCED_DEBUG	0
136#endif
137
138/* Shouldn't this be in a header file somewhere? */
139#define	BYTES_PER_WORD		sizeof(void *)
140
141#ifndef cache_line_size
142#define cache_line_size()	L1_CACHE_BYTES
143#endif
144
145#ifndef ARCH_KMALLOC_MINALIGN
146/*
147 * Enforce a minimum alignment for the kmalloc caches.
148 * Usually, the kmalloc caches are cache_line_size() aligned, except when
149 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
150 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
151 * alignment larger than the alignment of a 64-bit integer.
152 * ARCH_KMALLOC_MINALIGN allows that.
153 * Note that increasing this value may disable some debug features.
154 */
155#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
156#endif
157
158#ifndef ARCH_SLAB_MINALIGN
159/*
160 * Enforce a minimum alignment for all caches.
161 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
162 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
163 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
164 * some debug features.
165 */
166#define ARCH_SLAB_MINALIGN 0
167#endif
168
169#ifndef ARCH_KMALLOC_FLAGS
170#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
171#endif
172
173/* Legal flag mask for kmem_cache_create(). */
174#if DEBUG
175# define CREATE_MASK	(SLAB_RED_ZONE | \
176			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
177			 SLAB_CACHE_DMA | \
178			 SLAB_STORE_USER | \
179			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
181#else
182# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
183			 SLAB_CACHE_DMA | \
184			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
185			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
186#endif
187
188/*
189 * kmem_bufctl_t:
190 *
191 * Bufctl's are used for linking objs within a slab
192 * linked offsets.
193 *
194 * This implementation relies on "struct page" for locating the cache &
195 * slab an object belongs to.
196 * This allows the bufctl structure to be small (one int), but limits
197 * the number of objects a slab (not a cache) can contain when off-slab
198 * bufctls are used. The limit is the size of the largest general cache
199 * that does not use off-slab slabs.
200 * For 32bit archs with 4 kB pages, is this 56.
201 * This is not serious, as it is only for large objects, when it is unwise
202 * to have too many per slab.
203 * Note: This limit can be raised by introducing a general cache whose size
204 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
205 */
206
207typedef unsigned int kmem_bufctl_t;
208#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
209#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
210#define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
211#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
212
213/*
214 * struct slab
215 *
216 * Manages the objs in a slab. Placed either at the beginning of mem allocated
217 * for a slab, or allocated from an general cache.
218 * Slabs are chained into three list: fully used, partial, fully free slabs.
219 */
220struct slab {
221	struct list_head list;
222	unsigned long colouroff;
223	void *s_mem;		/* including colour offset */
224	unsigned int inuse;	/* num of objs active in slab */
225	kmem_bufctl_t free;
226	unsigned short nodeid;
227};
228
229/*
230 * struct slab_rcu
231 *
232 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
233 * arrange for kmem_freepages to be called via RCU.  This is useful if
234 * we need to approach a kernel structure obliquely, from its address
235 * obtained without the usual locking.  We can lock the structure to
236 * stabilize it and check it's still at the given address, only if we
237 * can be sure that the memory has not been meanwhile reused for some
238 * other kind of object (which our subsystem's lock might corrupt).
239 *
240 * rcu_read_lock before reading the address, then rcu_read_unlock after
241 * taking the spinlock within the structure expected at that address.
242 *
243 * We assume struct slab_rcu can overlay struct slab when destroying.
244 */
245struct slab_rcu {
246	struct rcu_head head;
247	struct kmem_cache *cachep;
248	void *addr;
249};
250
251/*
252 * struct array_cache
253 *
254 * Purpose:
255 * - LIFO ordering, to hand out cache-warm objects from _alloc
256 * - reduce the number of linked list operations
257 * - reduce spinlock operations
258 *
259 * The limit is stored in the per-cpu structure to reduce the data cache
260 * footprint.
261 *
262 */
263struct array_cache {
264	unsigned int avail;
265	unsigned int limit;
266	unsigned int batchcount;
267	unsigned int touched;
268	spinlock_t lock;
269	void *entry[0];	/*
270			 * Must have this definition in here for the proper
271			 * alignment of array_cache. Also simplifies accessing
272			 * the entries.
273			 * [0] is for gcc 2.95. It should really be [].
274			 */
275};
276
277/*
278 * bootstrap: The caches do not work without cpuarrays anymore, but the
279 * cpuarrays are allocated from the generic caches...
280 */
281#define BOOT_CPUCACHE_ENTRIES	1
282struct arraycache_init {
283	struct array_cache cache;
284	void *entries[BOOT_CPUCACHE_ENTRIES];
285};
286
287/*
288 * The slab lists for all objects.
289 */
290struct kmem_list3 {
291	struct list_head slabs_partial;	/* partial list first, better asm code */
292	struct list_head slabs_full;
293	struct list_head slabs_free;
294	unsigned long free_objects;
295	unsigned int free_limit;
296	unsigned int colour_next;	/* Per-node cache coloring */
297	spinlock_t list_lock;
298	struct array_cache *shared;	/* shared per node */
299	struct array_cache **alien;	/* on other nodes */
300	unsigned long next_reap;	/* updated without locking */
301	int free_touched;		/* updated without locking */
302};
303
304/*
305 * Need this for bootstrapping a per node allocator.
306 */
307#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
308struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
309#define	CACHE_CACHE 0
310#define	SIZE_AC 1
311#define	SIZE_L3 (1 + MAX_NUMNODES)
312
313static int drain_freelist(struct kmem_cache *cache,
314			struct kmem_list3 *l3, int tofree);
315static void free_block(struct kmem_cache *cachep, void **objpp, int len,
316			int node);
317static int enable_cpucache(struct kmem_cache *cachep);
318static void cache_reap(struct work_struct *unused);
319
320/*
321 * This function must be completely optimized away if a constant is passed to
322 * it.  Mostly the same as what is in linux/slab.h except it returns an index.
323 */
324static __always_inline int index_of(const size_t size)
325{
326	extern void __bad_size(void);
327
328	if (__builtin_constant_p(size)) {
329		int i = 0;
330
331#define CACHE(x) \
332	if (size <=x) \
333		return i; \
334	else \
335		i++;
336#include "linux/kmalloc_sizes.h"
337#undef CACHE
338		__bad_size();
339	} else
340		__bad_size();
341	return 0;
342}
343
344static int slab_early_init = 1;
345
346#define INDEX_AC index_of(sizeof(struct arraycache_init))
347#define INDEX_L3 index_of(sizeof(struct kmem_list3))
348
349static void kmem_list3_init(struct kmem_list3 *parent)
350{
351	INIT_LIST_HEAD(&parent->slabs_full);
352	INIT_LIST_HEAD(&parent->slabs_partial);
353	INIT_LIST_HEAD(&parent->slabs_free);
354	parent->shared = NULL;
355	parent->alien = NULL;
356	parent->colour_next = 0;
357	spin_lock_init(&parent->list_lock);
358	parent->free_objects = 0;
359	parent->free_touched = 0;
360}
361
362#define MAKE_LIST(cachep, listp, slab, nodeid)				\
363	do {								\
364		INIT_LIST_HEAD(listp);					\
365		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
366	} while (0)
367
368#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
369	do {								\
370	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
371	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
372	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
373	} while (0)
374
375/*
376 * struct kmem_cache
377 *
378 * manages a cache.
379 */
380
381struct kmem_cache {
382/* 1) per-cpu data, touched during every alloc/free */
383	struct array_cache *array[NR_CPUS];
384/* 2) Cache tunables. Protected by cache_chain_mutex */
385	unsigned int batchcount;
386	unsigned int limit;
387	unsigned int shared;
388
389	unsigned int buffer_size;
390	u32 reciprocal_buffer_size;
391/* 3) touched by every alloc & free from the backend */
392
393	unsigned int flags;		/* constant flags */
394	unsigned int num;		/* # of objs per slab */
395
396/* 4) cache_grow/shrink */
397	/* order of pgs per slab (2^n) */
398	unsigned int gfporder;
399
400	/* force GFP flags, e.g. GFP_DMA */
401	gfp_t gfpflags;
402
403	size_t colour;			/* cache colouring range */
404	unsigned int colour_off;	/* colour offset */
405	struct kmem_cache *slabp_cache;
406	unsigned int slab_size;
407	unsigned int dflags;		/* dynamic flags */
408
409	/* constructor func */
410	void (*ctor) (void *, struct kmem_cache *, unsigned long);
411
412	/* de-constructor func */
413	void (*dtor) (void *, struct kmem_cache *, unsigned long);
414
415/* 5) cache creation/removal */
416	const char *name;
417	struct list_head next;
418
419/* 6) statistics */
420#if STATS
421	unsigned long num_active;
422	unsigned long num_allocations;
423	unsigned long high_mark;
424	unsigned long grown;
425	unsigned long reaped;
426	unsigned long errors;
427	unsigned long max_freeable;
428	unsigned long node_allocs;
429	unsigned long node_frees;
430	unsigned long node_overflow;
431	atomic_t allochit;
432	atomic_t allocmiss;
433	atomic_t freehit;
434	atomic_t freemiss;
435#endif
436#if DEBUG
437	/*
438	 * If debugging is enabled, then the allocator can add additional
439	 * fields and/or padding to every object. buffer_size contains the total
440	 * object size including these internal fields, the following two
441	 * variables contain the offset to the user object and its size.
442	 */
443	int obj_offset;
444	int obj_size;
445#endif
446	/*
447	 * We put nodelists[] at the end of kmem_cache, because we want to size
448	 * this array to nr_node_ids slots instead of MAX_NUMNODES
449	 * (see kmem_cache_init())
450	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
451	 * is statically defined, so we reserve the max number of nodes.
452	 */
453	struct kmem_list3 *nodelists[MAX_NUMNODES];
454	/*
455	 * Do not add fields after nodelists[]
456	 */
457};
458
459#define CFLGS_OFF_SLAB		(0x80000000UL)
460#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
461
462#define BATCHREFILL_LIMIT	16
463/*
464 * Optimization question: fewer reaps means less probability for unnessary
465 * cpucache drain/refill cycles.
466 *
467 * OTOH the cpuarrays can contain lots of objects,
468 * which could lock up otherwise freeable slabs.
469 */
470#define REAPTIMEOUT_CPUC	(2*HZ)
471#define REAPTIMEOUT_LIST3	(4*HZ)
472
473#if STATS
474#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
475#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
476#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
477#define	STATS_INC_GROWN(x)	((x)->grown++)
478#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
479#define	STATS_SET_HIGH(x)						\
480	do {								\
481		if ((x)->num_active > (x)->high_mark)			\
482			(x)->high_mark = (x)->num_active;		\
483	} while (0)
484#define	STATS_INC_ERR(x)	((x)->errors++)
485#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
486#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
487#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
488#define	STATS_SET_FREEABLE(x, i)					\
489	do {								\
490		if ((x)->max_freeable < i)				\
491			(x)->max_freeable = i;				\
492	} while (0)
493#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
494#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
495#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
496#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
497#else
498#define	STATS_INC_ACTIVE(x)	do { } while (0)
499#define	STATS_DEC_ACTIVE(x)	do { } while (0)
500#define	STATS_INC_ALLOCED(x)	do { } while (0)
501#define	STATS_INC_GROWN(x)	do { } while (0)
502#define	STATS_ADD_REAPED(x,y)	do { } while (0)
503#define	STATS_SET_HIGH(x)	do { } while (0)
504#define	STATS_INC_ERR(x)	do { } while (0)
505#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
506#define	STATS_INC_NODEFREES(x)	do { } while (0)
507#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
508#define	STATS_SET_FREEABLE(x, i) do { } while (0)
509#define STATS_INC_ALLOCHIT(x)	do { } while (0)
510#define STATS_INC_ALLOCMISS(x)	do { } while (0)
511#define STATS_INC_FREEHIT(x)	do { } while (0)
512#define STATS_INC_FREEMISS(x)	do { } while (0)
513#endif
514
515#if DEBUG
516
517/*
518 * memory layout of objects:
519 * 0		: objp
520 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
521 * 		the end of an object is aligned with the end of the real
522 * 		allocation. Catches writes behind the end of the allocation.
523 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
524 * 		redzone word.
525 * cachep->obj_offset: The real object.
526 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
527 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
528 *					[BYTES_PER_WORD long]
529 */
530static int obj_offset(struct kmem_cache *cachep)
531{
532	return cachep->obj_offset;
533}
534
535static int obj_size(struct kmem_cache *cachep)
536{
537	return cachep->obj_size;
538}
539
540static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
541{
542	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
543	return (unsigned long long*) (objp + obj_offset(cachep) -
544				      sizeof(unsigned long long));
545}
546
547static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
548{
549	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
550	if (cachep->flags & SLAB_STORE_USER)
551		return (unsigned long long *)(objp + cachep->buffer_size -
552					      sizeof(unsigned long long) -
553					      BYTES_PER_WORD);
554	return (unsigned long long *) (objp + cachep->buffer_size -
555				       sizeof(unsigned long long));
556}
557
558static void **dbg_userword(struct kmem_cache *cachep, void *objp)
559{
560	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
561	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
562}
563
564#else
565
566#define obj_offset(x)			0
567#define obj_size(cachep)		(cachep->buffer_size)
568#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
569#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
570#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
571
572#endif
573
574/*
575 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
576 * order.
577 */
578#if defined(CONFIG_LARGE_ALLOCS)
579#define	MAX_OBJ_ORDER	13	/* up to 32Mb */
580#define	MAX_GFP_ORDER	13	/* up to 32Mb */
581#elif defined(CONFIG_MMU)
582#define	MAX_OBJ_ORDER	5	/* 32 pages */
583#define	MAX_GFP_ORDER	5	/* 32 pages */
584#else
585#define	MAX_OBJ_ORDER	8	/* up to 1Mb */
586#define	MAX_GFP_ORDER	8	/* up to 1Mb */
587#endif
588
589/*
590 * Do not go above this order unless 0 objects fit into the slab.
591 */
592#define	BREAK_GFP_ORDER_HI	1
593#define	BREAK_GFP_ORDER_LO	0
594static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
595
596/*
597 * Functions for storing/retrieving the cachep and or slab from the page
598 * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
599 * these are used to find the cache which an obj belongs to.
600 */
601static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
602{
603	page->lru.next = (struct list_head *)cache;
604}
605
606static inline struct kmem_cache *page_get_cache(struct page *page)
607{
608	page = compound_head(page);
609	BUG_ON(!PageSlab(page));
610	return (struct kmem_cache *)page->lru.next;
611}
612
613static inline void page_set_slab(struct page *page, struct slab *slab)
614{
615	page->lru.prev = (struct list_head *)slab;
616}
617
618static inline struct slab *page_get_slab(struct page *page)
619{
620	BUG_ON(!PageSlab(page));
621	return (struct slab *)page->lru.prev;
622}
623
624static inline struct kmem_cache *virt_to_cache(const void *obj)
625{
626	struct page *page = virt_to_head_page(obj);
627	return page_get_cache(page);
628}
629
630static inline struct slab *virt_to_slab(const void *obj)
631{
632	struct page *page = virt_to_head_page(obj);
633	return page_get_slab(page);
634}
635
636static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
637				 unsigned int idx)
638{
639	return slab->s_mem + cache->buffer_size * idx;
640}
641
642/*
643 * We want to avoid an expensive divide : (offset / cache->buffer_size)
644 *   Using the fact that buffer_size is a constant for a particular cache,
645 *   we can replace (offset / cache->buffer_size) by
646 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
647 */
648static inline unsigned int obj_to_index(const struct kmem_cache *cache,
649					const struct slab *slab, void *obj)
650{
651	u32 offset = (obj - slab->s_mem);
652	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
653}
654
655/*
656 * These are the default caches for kmalloc. Custom caches can have other sizes.
657 */
658struct cache_sizes malloc_sizes[] = {
659#define CACHE(x) { .cs_size = (x) },
660#include <linux/kmalloc_sizes.h>
661	CACHE(ULONG_MAX)
662#undef CACHE
663};
664EXPORT_SYMBOL(malloc_sizes);
665
666/* Must match cache_sizes above. Out of line to keep cache footprint low. */
667struct cache_names {
668	char *name;
669	char *name_dma;
670};
671
672static struct cache_names __initdata cache_names[] = {
673#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
674#include <linux/kmalloc_sizes.h>
675	{NULL,}
676#undef CACHE
677};
678
679static struct arraycache_init initarray_cache __initdata =
680    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
681static struct arraycache_init initarray_generic =
682    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
683
684/* internal cache of cache description objs */
685static struct kmem_cache cache_cache = {
686	.batchcount = 1,
687	.limit = BOOT_CPUCACHE_ENTRIES,
688	.shared = 1,
689	.buffer_size = sizeof(struct kmem_cache),
690	.name = "kmem_cache",
691};
692
693#define BAD_ALIEN_MAGIC 0x01020304ul
694
695#ifdef CONFIG_LOCKDEP
696
697/*
698 * Slab sometimes uses the kmalloc slabs to store the slab headers
699 * for other slabs "off slab".
700 * The locking for this is tricky in that it nests within the locks
701 * of all other slabs in a few places; to deal with this special
702 * locking we put on-slab caches into a separate lock-class.
703 *
704 * We set lock class for alien array caches which are up during init.
705 * The lock annotation will be lost if all cpus of a node goes down and
706 * then comes back up during hotplug
707 */
708static struct lock_class_key on_slab_l3_key;
709static struct lock_class_key on_slab_alc_key;
710
711static inline void init_lock_keys(void)
712
713{
714	int q;
715	struct cache_sizes *s = malloc_sizes;
716
717	while (s->cs_size != ULONG_MAX) {
718		for_each_node(q) {
719			struct array_cache **alc;
720			int r;
721			struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
722			if (!l3 || OFF_SLAB(s->cs_cachep))
723				continue;
724			lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
725			alc = l3->alien;
726			/*
727			 * FIXME: This check for BAD_ALIEN_MAGIC
728			 * should go away when common slab code is taught to
729			 * work even without alien caches.
730			 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
731			 * for alloc_alien_cache,
732			 */
733			if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
734				continue;
735			for_each_node(r) {
736				if (alc[r])
737					lockdep_set_class(&alc[r]->lock,
738					     &on_slab_alc_key);
739			}
740		}
741		s++;
742	}
743}
744#else
745static inline void init_lock_keys(void)
746{
747}
748#endif
749
750/*
751 * 1. Guard access to the cache-chain.
752 * 2. Protect sanity of cpu_online_map against cpu hotplug events
753 */
754static DEFINE_MUTEX(cache_chain_mutex);
755static struct list_head cache_chain;
756
757/*
758 * chicken and egg problem: delay the per-cpu array allocation
759 * until the general caches are up.
760 */
761static enum {
762	NONE,
763	PARTIAL_AC,
764	PARTIAL_L3,
765	FULL
766} g_cpucache_up;
767
768/*
769 * used by boot code to determine if it can use slab based allocator
770 */
771int slab_is_available(void)
772{
773	return g_cpucache_up == FULL;
774}
775
776static DEFINE_PER_CPU(struct delayed_work, reap_work);
777
778static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
779{
780	return cachep->array[smp_processor_id()];
781}
782
783static inline struct kmem_cache *__find_general_cachep(size_t size,
784							gfp_t gfpflags)
785{
786	struct cache_sizes *csizep = malloc_sizes;
787
788#if DEBUG
789	/* This happens if someone tries to call
790	 * kmem_cache_create(), or __kmalloc(), before
791	 * the generic caches are initialized.
792	 */
793	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
794#endif
795	while (size > csizep->cs_size)
796		csizep++;
797
798	/*
799	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
800	 * has cs_{dma,}cachep==NULL. Thus no special case
801	 * for large kmalloc calls required.
802	 */
803#ifdef CONFIG_ZONE_DMA
804	if (unlikely(gfpflags & GFP_DMA))
805		return csizep->cs_dmacachep;
806#endif
807	return csizep->cs_cachep;
808}
809
810static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
811{
812	return __find_general_cachep(size, gfpflags);
813}
814
815static size_t slab_mgmt_size(size_t nr_objs, size_t align)
816{
817	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
818}
819
820/*
821 * Calculate the number of objects and left-over bytes for a given buffer size.
822 */
823static void cache_estimate(unsigned long gfporder, size_t buffer_size,
824			   size_t align, int flags, size_t *left_over,
825			   unsigned int *num)
826{
827	int nr_objs;
828	size_t mgmt_size;
829	size_t slab_size = PAGE_SIZE << gfporder;
830
831	/*
832	 * The slab management structure can be either off the slab or
833	 * on it. For the latter case, the memory allocated for a
834	 * slab is used for:
835	 *
836	 * - The struct slab
837	 * - One kmem_bufctl_t for each object
838	 * - Padding to respect alignment of @align
839	 * - @buffer_size bytes for each object
840	 *
841	 * If the slab management structure is off the slab, then the
842	 * alignment will already be calculated into the size. Because
843	 * the slabs are all pages aligned, the objects will be at the
844	 * correct alignment when allocated.
845	 */
846	if (flags & CFLGS_OFF_SLAB) {
847		mgmt_size = 0;
848		nr_objs = slab_size / buffer_size;
849
850		if (nr_objs > SLAB_LIMIT)
851			nr_objs = SLAB_LIMIT;
852	} else {
853		/*
854		 * Ignore padding for the initial guess. The padding
855		 * is at most @align-1 bytes, and @buffer_size is at
856		 * least @align. In the worst case, this result will
857		 * be one greater than the number of objects that fit
858		 * into the memory allocation when taking the padding
859		 * into account.
860		 */
861		nr_objs = (slab_size - sizeof(struct slab)) /
862			  (buffer_size + sizeof(kmem_bufctl_t));
863
864		/*
865		 * This calculated number will be either the right
866		 * amount, or one greater than what we want.
867		 */
868		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
869		       > slab_size)
870			nr_objs--;
871
872		if (nr_objs > SLAB_LIMIT)
873			nr_objs = SLAB_LIMIT;
874
875		mgmt_size = slab_mgmt_size(nr_objs, align);
876	}
877	*num = nr_objs;
878	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
879}
880
881#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
882
883static void __slab_error(const char *function, struct kmem_cache *cachep,
884			char *msg)
885{
886	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
887	       function, cachep->name, msg);
888	dump_stack();
889}
890
891/*
892 * By default on NUMA we use alien caches to stage the freeing of
893 * objects allocated from other nodes. This causes massive memory
894 * inefficiencies when using fake NUMA setup to split memory into a
895 * large number of small nodes, so it can be disabled on the command
896 * line
897  */
898
899static int use_alien_caches __read_mostly = 1;
900static int __init noaliencache_setup(char *s)
901{
902	use_alien_caches = 0;
903	return 1;
904}
905__setup("noaliencache", noaliencache_setup);
906
907#ifdef CONFIG_NUMA
908/*
909 * Special reaping functions for NUMA systems called from cache_reap().
910 * These take care of doing round robin flushing of alien caches (containing
911 * objects freed on different nodes from which they were allocated) and the
912 * flushing of remote pcps by calling drain_node_pages.
913 */
914static DEFINE_PER_CPU(unsigned long, reap_node);
915
916static void init_reap_node(int cpu)
917{
918	int node;
919
920	node = next_node(cpu_to_node(cpu), node_online_map);
921	if (node == MAX_NUMNODES)
922		node = first_node(node_online_map);
923
924	per_cpu(reap_node, cpu) = node;
925}
926
927static void next_reap_node(void)
928{
929	int node = __get_cpu_var(reap_node);
930
931	/*
932	 * Also drain per cpu pages on remote zones
933	 */
934	if (node != numa_node_id())
935		drain_node_pages(node);
936
937	node = next_node(node, node_online_map);
938	if (unlikely(node >= MAX_NUMNODES))
939		node = first_node(node_online_map);
940	__get_cpu_var(reap_node) = node;
941}
942
943#else
944#define init_reap_node(cpu) do { } while (0)
945#define next_reap_node(void) do { } while (0)
946#endif
947
948/*
949 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
950 * via the workqueue/eventd.
951 * Add the CPU number into the expiration time to minimize the possibility of
952 * the CPUs getting into lockstep and contending for the global cache chain
953 * lock.
954 */
955static void __devinit start_cpu_timer(int cpu)
956{
957	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
958
959	/*
960	 * When this gets called from do_initcalls via cpucache_init(),
961	 * init_workqueues() has already run, so keventd will be setup
962	 * at that time.
963	 */
964	if (keventd_up() && reap_work->work.func == NULL) {
965		init_reap_node(cpu);
966		INIT_DELAYED_WORK(reap_work, cache_reap);
967		schedule_delayed_work_on(cpu, reap_work,
968					__round_jiffies_relative(HZ, cpu));
969	}
970}
971
972static struct array_cache *alloc_arraycache(int node, int entries,
973					    int batchcount)
974{
975	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
976	struct array_cache *nc = NULL;
977
978	nc = kmalloc_node(memsize, GFP_KERNEL, node);
979	if (nc) {
980		nc->avail = 0;
981		nc->limit = entries;
982		nc->batchcount = batchcount;
983		nc->touched = 0;
984		spin_lock_init(&nc->lock);
985	}
986	return nc;
987}
988
989/*
990 * Transfer objects in one arraycache to another.
991 * Locking must be handled by the caller.
992 *
993 * Return the number of entries transferred.
994 */
995static int transfer_objects(struct array_cache *to,
996		struct array_cache *from, unsigned int max)
997{
998	/* Figure out how many entries to transfer */
999	int nr = min(min(from->avail, max), to->limit - to->avail);
1000
1001	if (!nr)
1002		return 0;
1003
1004	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
1005			sizeof(void *) *nr);
1006
1007	from->avail -= nr;
1008	to->avail += nr;
1009	to->touched = 1;
1010	return nr;
1011}
1012
1013#ifndef CONFIG_NUMA
1014
1015#define drain_alien_cache(cachep, alien) do { } while (0)
1016#define reap_alien(cachep, l3) do { } while (0)
1017
1018static inline struct array_cache **alloc_alien_cache(int node, int limit)
1019{
1020	return (struct array_cache **)BAD_ALIEN_MAGIC;
1021}
1022
1023static inline void free_alien_cache(struct array_cache **ac_ptr)
1024{
1025}
1026
1027static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1028{
1029	return 0;
1030}
1031
1032static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1033		gfp_t flags)
1034{
1035	return NULL;
1036}
1037
1038static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1039		 gfp_t flags, int nodeid)
1040{
1041	return NULL;
1042}
1043
1044#else	/* CONFIG_NUMA */
1045
1046static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1047static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1048
1049static struct array_cache **alloc_alien_cache(int node, int limit)
1050{
1051	struct array_cache **ac_ptr;
1052	int memsize = sizeof(void *) * nr_node_ids;
1053	int i;
1054
1055	if (limit > 1)
1056		limit = 12;
1057	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
1058	if (ac_ptr) {
1059		for_each_node(i) {
1060			if (i == node || !node_online(i)) {
1061				ac_ptr[i] = NULL;
1062				continue;
1063			}
1064			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
1065			if (!ac_ptr[i]) {
1066				for (i--; i <= 0; i--)
1067					kfree(ac_ptr[i]);
1068				kfree(ac_ptr);
1069				return NULL;
1070			}
1071		}
1072	}
1073	return ac_ptr;
1074}
1075
1076static void free_alien_cache(struct array_cache **ac_ptr)
1077{
1078	int i;
1079
1080	if (!ac_ptr)
1081		return;
1082	for_each_node(i)
1083	    kfree(ac_ptr[i]);
1084	kfree(ac_ptr);
1085}
1086
1087static void __drain_alien_cache(struct kmem_cache *cachep,
1088				struct array_cache *ac, int node)
1089{
1090	struct kmem_list3 *rl3 = cachep->nodelists[node];
1091
1092	if (ac->avail) {
1093		spin_lock(&rl3->list_lock);
1094		/*
1095		 * Stuff objects into the remote nodes shared array first.
1096		 * That way we could avoid the overhead of putting the objects
1097		 * into the free lists and getting them back later.
1098		 */
1099		if (rl3->shared)
1100			transfer_objects(rl3->shared, ac, ac->limit);
1101
1102		free_block(cachep, ac->entry, ac->avail, node);
1103		ac->avail = 0;
1104		spin_unlock(&rl3->list_lock);
1105	}
1106}
1107
1108/*
1109 * Called from cache_reap() to regularly drain alien caches round robin.
1110 */
1111static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1112{
1113	int node = __get_cpu_var(reap_node);
1114
1115	if (l3->alien) {
1116		struct array_cache *ac = l3->alien[node];
1117
1118		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1119			__drain_alien_cache(cachep, ac, node);
1120			spin_unlock_irq(&ac->lock);
1121		}
1122	}
1123}
1124
1125static void drain_alien_cache(struct kmem_cache *cachep,
1126				struct array_cache **alien)
1127{
1128	int i = 0;
1129	struct array_cache *ac;
1130	unsigned long flags;
1131
1132	for_each_online_node(i) {
1133		ac = alien[i];
1134		if (ac) {
1135			spin_lock_irqsave(&ac->lock, flags);
1136			__drain_alien_cache(cachep, ac, i);
1137			spin_unlock_irqrestore(&ac->lock, flags);
1138		}
1139	}
1140}
1141
1142static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1143{
1144	struct slab *slabp = virt_to_slab(objp);
1145	int nodeid = slabp->nodeid;
1146	struct kmem_list3 *l3;
1147	struct array_cache *alien = NULL;
1148	int node;
1149
1150	node = numa_node_id();
1151
1152	/*
1153	 * Make sure we are not freeing a object from another node to the array
1154	 * cache on this cpu.
1155	 */
1156	if (likely(slabp->nodeid == node))
1157		return 0;
1158
1159	l3 = cachep->nodelists[node];
1160	STATS_INC_NODEFREES(cachep);
1161	if (l3->alien && l3->alien[nodeid]) {
1162		alien = l3->alien[nodeid];
1163		spin_lock(&alien->lock);
1164		if (unlikely(alien->avail == alien->limit)) {
1165			STATS_INC_ACOVERFLOW(cachep);
1166			__drain_alien_cache(cachep, alien, nodeid);
1167		}
1168		alien->entry[alien->avail++] = objp;
1169		spin_unlock(&alien->lock);
1170	} else {
1171		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1172		free_block(cachep, &objp, 1, nodeid);
1173		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1174	}
1175	return 1;
1176}
1177#endif
1178
1179static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1180				    unsigned long action, void *hcpu)
1181{
1182	long cpu = (long)hcpu;
1183	struct kmem_cache *cachep;
1184	struct kmem_list3 *l3 = NULL;
1185	int node = cpu_to_node(cpu);
1186	int memsize = sizeof(struct kmem_list3);
1187
1188	switch (action) {
1189	case CPU_UP_PREPARE:
1190		mutex_lock(&cache_chain_mutex);
1191		/*
1192		 * We need to do this right in the beginning since
1193		 * alloc_arraycache's are going to use this list.
1194		 * kmalloc_node allows us to add the slab to the right
1195		 * kmem_list3 and not this cpu's kmem_list3
1196		 */
1197
1198		list_for_each_entry(cachep, &cache_chain, next) {
1199			/*
1200			 * Set up the size64 kmemlist for cpu before we can
1201			 * begin anything. Make sure some other cpu on this
1202			 * node has not already allocated this
1203			 */
1204			if (!cachep->nodelists[node]) {
1205				l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1206				if (!l3)
1207					goto bad;
1208				kmem_list3_init(l3);
1209				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1210				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1211
1212				/*
1213				 * The l3s don't come and go as CPUs come and
1214				 * go.  cache_chain_mutex is sufficient
1215				 * protection here.
1216				 */
1217				cachep->nodelists[node] = l3;
1218			}
1219
1220			spin_lock_irq(&cachep->nodelists[node]->list_lock);
1221			cachep->nodelists[node]->free_limit =
1222				(1 + nr_cpus_node(node)) *
1223				cachep->batchcount + cachep->num;
1224			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1225		}
1226
1227		/*
1228		 * Now we can go ahead with allocating the shared arrays and
1229		 * array caches
1230		 */
1231		list_for_each_entry(cachep, &cache_chain, next) {
1232			struct array_cache *nc;
1233			struct array_cache *shared = NULL;
1234			struct array_cache **alien = NULL;
1235
1236			nc = alloc_arraycache(node, cachep->limit,
1237						cachep->batchcount);
1238			if (!nc)
1239				goto bad;
1240			if (cachep->shared) {
1241				shared = alloc_arraycache(node,
1242					cachep->shared * cachep->batchcount,
1243					0xbaadf00d);
1244				if (!shared)
1245					goto bad;
1246			}
1247			if (use_alien_caches) {
1248                                alien = alloc_alien_cache(node, cachep->limit);
1249                                if (!alien)
1250                                        goto bad;
1251                        }
1252			cachep->array[cpu] = nc;
1253			l3 = cachep->nodelists[node];
1254			BUG_ON(!l3);
1255
1256			spin_lock_irq(&l3->list_lock);
1257			if (!l3->shared) {
1258				/*
1259				 * We are serialised from CPU_DEAD or
1260				 * CPU_UP_CANCELLED by the cpucontrol lock
1261				 */
1262				l3->shared = shared;
1263				shared = NULL;
1264			}
1265#ifdef CONFIG_NUMA
1266			if (!l3->alien) {
1267				l3->alien = alien;
1268				alien = NULL;
1269			}
1270#endif
1271			spin_unlock_irq(&l3->list_lock);
1272			kfree(shared);
1273			free_alien_cache(alien);
1274		}
1275		break;
1276	case CPU_ONLINE:
1277		mutex_unlock(&cache_chain_mutex);
1278		start_cpu_timer(cpu);
1279		break;
1280#ifdef CONFIG_HOTPLUG_CPU
1281	case CPU_DOWN_PREPARE:
1282		mutex_lock(&cache_chain_mutex);
1283		break;
1284	case CPU_DOWN_FAILED:
1285		mutex_unlock(&cache_chain_mutex);
1286		break;
1287	case CPU_DEAD:
1288		/*
1289		 * Even if all the cpus of a node are down, we don't free the
1290		 * kmem_list3 of any cache. This to avoid a race between
1291		 * cpu_down, and a kmalloc allocation from another cpu for
1292		 * memory from the node of the cpu going down.  The list3
1293		 * structure is usually allocated from kmem_cache_create() and
1294		 * gets destroyed at kmem_cache_destroy().
1295		 */
1296		/* fall thru */
1297#endif
1298	case CPU_UP_CANCELED:
1299		list_for_each_entry(cachep, &cache_chain, next) {
1300			struct array_cache *nc;
1301			struct array_cache *shared;
1302			struct array_cache **alien;
1303			cpumask_t mask;
1304
1305			mask = node_to_cpumask(node);
1306			/* cpu is dead; no one can alloc from it. */
1307			nc = cachep->array[cpu];
1308			cachep->array[cpu] = NULL;
1309			l3 = cachep->nodelists[node];
1310
1311			if (!l3)
1312				goto free_array_cache;
1313
1314			spin_lock_irq(&l3->list_lock);
1315
1316			/* Free limit for this kmem_list3 */
1317			l3->free_limit -= cachep->batchcount;
1318			if (nc)
1319				free_block(cachep, nc->entry, nc->avail, node);
1320
1321			if (!cpus_empty(mask)) {
1322				spin_unlock_irq(&l3->list_lock);
1323				goto free_array_cache;
1324			}
1325
1326			shared = l3->shared;
1327			if (shared) {
1328				free_block(cachep, shared->entry,
1329					   shared->avail, node);
1330				l3->shared = NULL;
1331			}
1332
1333			alien = l3->alien;
1334			l3->alien = NULL;
1335
1336			spin_unlock_irq(&l3->list_lock);
1337
1338			kfree(shared);
1339			if (alien) {
1340				drain_alien_cache(cachep, alien);
1341				free_alien_cache(alien);
1342			}
1343free_array_cache:
1344			kfree(nc);
1345		}
1346		/*
1347		 * In the previous loop, all the objects were freed to
1348		 * the respective cache's slabs,  now we can go ahead and
1349		 * shrink each nodelist to its limit.
1350		 */
1351		list_for_each_entry(cachep, &cache_chain, next) {
1352			l3 = cachep->nodelists[node];
1353			if (!l3)
1354				continue;
1355			drain_freelist(cachep, l3, l3->free_objects);
1356		}
1357		mutex_unlock(&cache_chain_mutex);
1358		break;
1359	}
1360	return NOTIFY_OK;
1361bad:
1362	return NOTIFY_BAD;
1363}
1364
1365static struct notifier_block __cpuinitdata cpucache_notifier = {
1366	&cpuup_callback, NULL, 0
1367};
1368
1369/*
1370 * swap the static kmem_list3 with kmalloced memory
1371 */
1372static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1373			int nodeid)
1374{
1375	struct kmem_list3 *ptr;
1376
1377	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1378	BUG_ON(!ptr);
1379
1380	local_irq_disable();
1381	memcpy(ptr, list, sizeof(struct kmem_list3));
1382	/*
1383	 * Do not assume that spinlocks can be initialized via memcpy:
1384	 */
1385	spin_lock_init(&ptr->list_lock);
1386
1387	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1388	cachep->nodelists[nodeid] = ptr;
1389	local_irq_enable();
1390}
1391
1392/*
1393 * Initialisation.  Called after the page allocator have been initialised and
1394 * before smp_init().
1395 */
1396void __init kmem_cache_init(void)
1397{
1398	size_t left_over;
1399	struct cache_sizes *sizes;
1400	struct cache_names *names;
1401	int i;
1402	int order;
1403	int node;
1404
1405	if (num_possible_nodes() == 1)
1406		use_alien_caches = 0;
1407
1408	for (i = 0; i < NUM_INIT_LISTS; i++) {
1409		kmem_list3_init(&initkmem_list3[i]);
1410		if (i < MAX_NUMNODES)
1411			cache_cache.nodelists[i] = NULL;
1412	}
1413
1414	/*
1415	 * Fragmentation resistance on low memory - only use bigger
1416	 * page orders on machines with more than 32MB of memory.
1417	 */
1418	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1419		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1420
1421	/* Bootstrap is tricky, because several objects are allocated
1422	 * from caches that do not exist yet:
1423	 * 1) initialize the cache_cache cache: it contains the struct
1424	 *    kmem_cache structures of all caches, except cache_cache itself:
1425	 *    cache_cache is statically allocated.
1426	 *    Initially an __init data area is used for the head array and the
1427	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1428	 *    array at the end of the bootstrap.
1429	 * 2) Create the first kmalloc cache.
1430	 *    The struct kmem_cache for the new cache is allocated normally.
1431	 *    An __init data area is used for the head array.
1432	 * 3) Create the remaining kmalloc caches, with minimally sized
1433	 *    head arrays.
1434	 * 4) Replace the __init data head arrays for cache_cache and the first
1435	 *    kmalloc cache with kmalloc allocated arrays.
1436	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1437	 *    the other cache's with kmalloc allocated memory.
1438	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1439	 */
1440
1441	node = numa_node_id();
1442
1443	/* 1) create the cache_cache */
1444	INIT_LIST_HEAD(&cache_chain);
1445	list_add(&cache_cache.next, &cache_chain);
1446	cache_cache.colour_off = cache_line_size();
1447	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1448	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
1449
1450	/*
1451	 * struct kmem_cache size depends on nr_node_ids, which
1452	 * can be less than MAX_NUMNODES.
1453	 */
1454	cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1455				 nr_node_ids * sizeof(struct kmem_list3 *);
1456#if DEBUG
1457	cache_cache.obj_size = cache_cache.buffer_size;
1458#endif
1459	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1460					cache_line_size());
1461	cache_cache.reciprocal_buffer_size =
1462		reciprocal_value(cache_cache.buffer_size);
1463
1464	for (order = 0; order < MAX_ORDER; order++) {
1465		cache_estimate(order, cache_cache.buffer_size,
1466			cache_line_size(), 0, &left_over, &cache_cache.num);
1467		if (cache_cache.num)
1468			break;
1469	}
1470	BUG_ON(!cache_cache.num);
1471	cache_cache.gfporder = order;
1472	cache_cache.colour = left_over / cache_cache.colour_off;
1473	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1474				      sizeof(struct slab), cache_line_size());
1475
1476	/* 2+3) create the kmalloc caches */
1477	sizes = malloc_sizes;
1478	names = cache_names;
1479
1480	/*
1481	 * Initialize the caches that provide memory for the array cache and the
1482	 * kmem_list3 structures first.  Without this, further allocations will
1483	 * bug.
1484	 */
1485
1486	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1487					sizes[INDEX_AC].cs_size,
1488					ARCH_KMALLOC_MINALIGN,
1489					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1490					NULL, NULL);
1491
1492	if (INDEX_AC != INDEX_L3) {
1493		sizes[INDEX_L3].cs_cachep =
1494			kmem_cache_create(names[INDEX_L3].name,
1495				sizes[INDEX_L3].cs_size,
1496				ARCH_KMALLOC_MINALIGN,
1497				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1498				NULL, NULL);
1499	}
1500
1501	slab_early_init = 0;
1502
1503	while (sizes->cs_size != ULONG_MAX) {
1504		/*
1505		 * For performance, all the general caches are L1 aligned.
1506		 * This should be particularly beneficial on SMP boxes, as it
1507		 * eliminates "false sharing".
1508		 * Note for systems short on memory removing the alignment will
1509		 * allow tighter packing of the smaller caches.
1510		 */
1511		if (!sizes->cs_cachep) {
1512			sizes->cs_cachep = kmem_cache_create(names->name,
1513					sizes->cs_size,
1514					ARCH_KMALLOC_MINALIGN,
1515					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1516					NULL, NULL);
1517		}
1518#ifdef CONFIG_ZONE_DMA
1519		sizes->cs_dmacachep = kmem_cache_create(
1520					names->name_dma,
1521					sizes->cs_size,
1522					ARCH_KMALLOC_MINALIGN,
1523					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1524						SLAB_PANIC,
1525					NULL, NULL);
1526#endif
1527		sizes++;
1528		names++;
1529	}
1530	/* 4) Replace the bootstrap head arrays */
1531	{
1532		struct array_cache *ptr;
1533
1534		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1535
1536		local_irq_disable();
1537		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1538		memcpy(ptr, cpu_cache_get(&cache_cache),
1539		       sizeof(struct arraycache_init));
1540		/*
1541		 * Do not assume that spinlocks can be initialized via memcpy:
1542		 */
1543		spin_lock_init(&ptr->lock);
1544
1545		cache_cache.array[smp_processor_id()] = ptr;
1546		local_irq_enable();
1547
1548		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1549
1550		local_irq_disable();
1551		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1552		       != &initarray_generic.cache);
1553		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1554		       sizeof(struct arraycache_init));
1555		/*
1556		 * Do not assume that spinlocks can be initialized via memcpy:
1557		 */
1558		spin_lock_init(&ptr->lock);
1559
1560		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1561		    ptr;
1562		local_irq_enable();
1563	}
1564	/* 5) Replace the bootstrap kmem_list3's */
1565	{
1566		int nid;
1567
1568		/* Replace the static kmem_list3 structures for the boot cpu */
1569		init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1570
1571		for_each_online_node(nid) {
1572			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1573				  &initkmem_list3[SIZE_AC + nid], nid);
1574
1575			if (INDEX_AC != INDEX_L3) {
1576				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1577					  &initkmem_list3[SIZE_L3 + nid], nid);
1578			}
1579		}
1580	}
1581
1582	/* 6) resize the head arrays to their final sizes */
1583	{
1584		struct kmem_cache *cachep;
1585		mutex_lock(&cache_chain_mutex);
1586		list_for_each_entry(cachep, &cache_chain, next)
1587			if (enable_cpucache(cachep))
1588				BUG();
1589		mutex_unlock(&cache_chain_mutex);
1590	}
1591
1592	/* Annotate slab for lockdep -- annotate the malloc caches */
1593	init_lock_keys();
1594
1595
1596	/* Done! */
1597	g_cpucache_up = FULL;
1598
1599	/*
1600	 * Register a cpu startup notifier callback that initializes
1601	 * cpu_cache_get for all new cpus
1602	 */
1603	register_cpu_notifier(&cpucache_notifier);
1604
1605	/*
1606	 * The reap timers are started later, with a module init call: That part
1607	 * of the kernel is not yet operational.
1608	 */
1609}
1610
1611static int __init cpucache_init(void)
1612{
1613	int cpu;
1614
1615	/*
1616	 * Register the timers that return unneeded pages to the page allocator
1617	 */
1618	for_each_online_cpu(cpu)
1619		start_cpu_timer(cpu);
1620	return 0;
1621}
1622__initcall(cpucache_init);
1623
1624/*
1625 * Interface to system's page allocator. No need to hold the cache-lock.
1626 *
1627 * If we requested dmaable memory, we will get it. Even if we
1628 * did not request dmaable memory, we might get it, but that
1629 * would be relatively rare and ignorable.
1630 */
1631static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1632{
1633	struct page *page;
1634	int nr_pages;
1635	int i;
1636
1637#ifndef CONFIG_MMU
1638	/*
1639	 * Nommu uses slab's for process anonymous memory allocations, and thus
1640	 * requires __GFP_COMP to properly refcount higher order allocations
1641	 */
1642	flags |= __GFP_COMP;
1643#endif
1644
1645	flags |= cachep->gfpflags;
1646
1647	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1648	if (!page)
1649		return NULL;
1650
1651	nr_pages = (1 << cachep->gfporder);
1652	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1653		add_zone_page_state(page_zone(page),
1654			NR_SLAB_RECLAIMABLE, nr_pages);
1655	else
1656		add_zone_page_state(page_zone(page),
1657			NR_SLAB_UNRECLAIMABLE, nr_pages);
1658	for (i = 0; i < nr_pages; i++)
1659		__SetPageSlab(page + i);
1660	return page_address(page);
1661}
1662
1663/*
1664 * Interface to system's page release.
1665 */
1666static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1667{
1668	unsigned long i = (1 << cachep->gfporder);
1669	struct page *page = virt_to_page(addr);
1670	const unsigned long nr_freed = i;
1671
1672	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1673		sub_zone_page_state(page_zone(page),
1674				NR_SLAB_RECLAIMABLE, nr_freed);
1675	else
1676		sub_zone_page_state(page_zone(page),
1677				NR_SLAB_UNRECLAIMABLE, nr_freed);
1678	while (i--) {
1679		BUG_ON(!PageSlab(page));
1680		__ClearPageSlab(page);
1681		page++;
1682	}
1683	if (current->reclaim_state)
1684		current->reclaim_state->reclaimed_slab += nr_freed;
1685	free_pages((unsigned long)addr, cachep->gfporder);
1686}
1687
1688static void kmem_rcu_free(struct rcu_head *head)
1689{
1690	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1691	struct kmem_cache *cachep = slab_rcu->cachep;
1692
1693	kmem_freepages(cachep, slab_rcu->addr);
1694	if (OFF_SLAB(cachep))
1695		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1696}
1697
1698#if DEBUG
1699
1700#ifdef CONFIG_DEBUG_PAGEALLOC
1701static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1702			    unsigned long caller)
1703{
1704	int size = obj_size(cachep);
1705
1706	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1707
1708	if (size < 5 * sizeof(unsigned long))
1709		return;
1710
1711	*addr++ = 0x12345678;
1712	*addr++ = caller;
1713	*addr++ = smp_processor_id();
1714	size -= 3 * sizeof(unsigned long);
1715	{
1716		unsigned long *sptr = &caller;
1717		unsigned long svalue;
1718
1719		while (!kstack_end(sptr)) {
1720			svalue = *sptr++;
1721			if (kernel_text_address(svalue)) {
1722				*addr++ = svalue;
1723				size -= sizeof(unsigned long);
1724				if (size <= sizeof(unsigned long))
1725					break;
1726			}
1727		}
1728
1729	}
1730	*addr++ = 0x87654321;
1731}
1732#endif
1733
1734static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1735{
1736	int size = obj_size(cachep);
1737	addr = &((char *)addr)[obj_offset(cachep)];
1738
1739	memset(addr, val, size);
1740	*(unsigned char *)(addr + size - 1) = POISON_END;
1741}
1742
1743static void dump_line(char *data, int offset, int limit)
1744{
1745	int i;
1746	unsigned char error = 0;
1747	int bad_count = 0;
1748
1749	printk(KERN_ERR "%03x:", offset);
1750	for (i = 0; i < limit; i++) {
1751		if (data[offset + i] != POISON_FREE) {
1752			error = data[offset + i];
1753			bad_count++;
1754		}
1755		printk(" %02x", (unsigned char)data[offset + i]);
1756	}
1757	printk("\n");
1758
1759	if (bad_count == 1) {
1760		error ^= POISON_FREE;
1761		if (!(error & (error - 1))) {
1762			printk(KERN_ERR "Single bit error detected. Probably "
1763					"bad RAM.\n");
1764#ifdef CONFIG_X86
1765			printk(KERN_ERR "Run memtest86+ or a similar memory "
1766					"test tool.\n");
1767#else
1768			printk(KERN_ERR "Run a memory test tool.\n");
1769#endif
1770		}
1771	}
1772}
1773#endif
1774
1775#if DEBUG
1776
1777static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1778{
1779	int i, size;
1780	char *realobj;
1781
1782	if (cachep->flags & SLAB_RED_ZONE) {
1783		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1784			*dbg_redzone1(cachep, objp),
1785			*dbg_redzone2(cachep, objp));
1786	}
1787
1788	if (cachep->flags & SLAB_STORE_USER) {
1789		printk(KERN_ERR "Last user: [<%p>]",
1790			*dbg_userword(cachep, objp));
1791		print_symbol("(%s)",
1792				(unsigned long)*dbg_userword(cachep, objp));
1793		printk("\n");
1794	}
1795	realobj = (char *)objp + obj_offset(cachep);
1796	size = obj_size(cachep);
1797	for (i = 0; i < size && lines; i += 16, lines--) {
1798		int limit;
1799		limit = 16;
1800		if (i + limit > size)
1801			limit = size - i;
1802		dump_line(realobj, i, limit);
1803	}
1804}
1805
1806static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1807{
1808	char *realobj;
1809	int size, i;
1810	int lines = 0;
1811
1812	realobj = (char *)objp + obj_offset(cachep);
1813	size = obj_size(cachep);
1814
1815	for (i = 0; i < size; i++) {
1816		char exp = POISON_FREE;
1817		if (i == size - 1)
1818			exp = POISON_END;
1819		if (realobj[i] != exp) {
1820			int limit;
1821			/* Mismatch ! */
1822			/* Print header */
1823			if (lines == 0) {
1824				printk(KERN_ERR
1825					"Slab corruption: %s start=%p, len=%d\n",
1826					cachep->name, realobj, size);
1827				print_objinfo(cachep, objp, 0);
1828			}
1829			/* Hexdump the affected line */
1830			i = (i / 16) * 16;
1831			limit = 16;
1832			if (i + limit > size)
1833				limit = size - i;
1834			dump_line(realobj, i, limit);
1835			i += 16;
1836			lines++;
1837			/* Limit to 5 lines */
1838			if (lines > 5)
1839				break;
1840		}
1841	}
1842	if (lines != 0) {
1843		/* Print some data about the neighboring objects, if they
1844		 * exist:
1845		 */
1846		struct slab *slabp = virt_to_slab(objp);
1847		unsigned int objnr;
1848
1849		objnr = obj_to_index(cachep, slabp, objp);
1850		if (objnr) {
1851			objp = index_to_obj(cachep, slabp, objnr - 1);
1852			realobj = (char *)objp + obj_offset(cachep);
1853			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1854			       realobj, size);
1855			print_objinfo(cachep, objp, 2);
1856		}
1857		if (objnr + 1 < cachep->num) {
1858			objp = index_to_obj(cachep, slabp, objnr + 1);
1859			realobj = (char *)objp + obj_offset(cachep);
1860			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1861			       realobj, size);
1862			print_objinfo(cachep, objp, 2);
1863		}
1864	}
1865}
1866#endif
1867
1868#if DEBUG
1869/**
1870 * slab_destroy_objs - destroy a slab and its objects
1871 * @cachep: cache pointer being destroyed
1872 * @slabp: slab pointer being destroyed
1873 *
1874 * Call the registered destructor for each object in a slab that is being
1875 * destroyed.
1876 */
1877static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1878{
1879	int i;
1880	for (i = 0; i < cachep->num; i++) {
1881		void *objp = index_to_obj(cachep, slabp, i);
1882
1883		if (cachep->flags & SLAB_POISON) {
1884#ifdef CONFIG_DEBUG_PAGEALLOC
1885			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1886					OFF_SLAB(cachep))
1887				kernel_map_pages(virt_to_page(objp),
1888					cachep->buffer_size / PAGE_SIZE, 1);
1889			else
1890				check_poison_obj(cachep, objp);
1891#else
1892			check_poison_obj(cachep, objp);
1893#endif
1894		}
1895		if (cachep->flags & SLAB_RED_ZONE) {
1896			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1897				slab_error(cachep, "start of a freed object "
1898					   "was overwritten");
1899			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1900				slab_error(cachep, "end of a freed object "
1901					   "was overwritten");
1902		}
1903		if (cachep->dtor && !(cachep->flags & SLAB_POISON))
1904			(cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1905	}
1906}
1907#else
1908static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1909{
1910	if (cachep->dtor) {
1911		int i;
1912		for (i = 0; i < cachep->num; i++) {
1913			void *objp = index_to_obj(cachep, slabp, i);
1914			(cachep->dtor) (objp, cachep, 0);
1915		}
1916	}
1917}
1918#endif
1919
1920/**
1921 * slab_destroy - destroy and release all objects in a slab
1922 * @cachep: cache pointer being destroyed
1923 * @slabp: slab pointer being destroyed
1924 *
1925 * Destroy all the objs in a slab, and release the mem back to the system.
1926 * Before calling the slab must have been unlinked from the cache.  The
1927 * cache-lock is not held/needed.
1928 */
1929static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1930{
1931	void *addr = slabp->s_mem - slabp->colouroff;
1932
1933	slab_destroy_objs(cachep, slabp);
1934	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1935		struct slab_rcu *slab_rcu;
1936
1937		slab_rcu = (struct slab_rcu *)slabp;
1938		slab_rcu->cachep = cachep;
1939		slab_rcu->addr = addr;
1940		call_rcu(&slab_rcu->head, kmem_rcu_free);
1941	} else {
1942		kmem_freepages(cachep, addr);
1943		if (OFF_SLAB(cachep))
1944			kmem_cache_free(cachep->slabp_cache, slabp);
1945	}
1946}
1947
1948/*
1949 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1950 * size of kmem_list3.
1951 */
1952static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1953{
1954	int node;
1955
1956	for_each_online_node(node) {
1957		cachep->nodelists[node] = &initkmem_list3[index + node];
1958		cachep->nodelists[node]->next_reap = jiffies +
1959		    REAPTIMEOUT_LIST3 +
1960		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1961	}
1962}
1963
1964static void __kmem_cache_destroy(struct kmem_cache *cachep)
1965{
1966	int i;
1967	struct kmem_list3 *l3;
1968
1969	for_each_online_cpu(i)
1970	    kfree(cachep->array[i]);
1971
1972	/* NUMA: free the list3 structures */
1973	for_each_online_node(i) {
1974		l3 = cachep->nodelists[i];
1975		if (l3) {
1976			kfree(l3->shared);
1977			free_alien_cache(l3->alien);
1978			kfree(l3);
1979		}
1980	}
1981	kmem_cache_free(&cache_cache, cachep);
1982}
1983
1984
1985/**
1986 * calculate_slab_order - calculate size (page order) of slabs
1987 * @cachep: pointer to the cache that is being created
1988 * @size: size of objects to be created in this cache.
1989 * @align: required alignment for the objects.
1990 * @flags: slab allocation flags
1991 *
1992 * Also calculates the number of objects per slab.
1993 *
1994 * This could be made much more intelligent.  For now, try to avoid using
1995 * high order pages for slabs.  When the gfp() functions are more friendly
1996 * towards high-order requests, this should be changed.
1997 */
1998static size_t calculate_slab_order(struct kmem_cache *cachep,
1999			size_t size, size_t align, unsigned long flags)
2000{
2001	unsigned long offslab_limit;
2002	size_t left_over = 0;
2003	int gfporder;
2004
2005	for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
2006		unsigned int num;
2007		size_t remainder;
2008
2009		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2010		if (!num)
2011			continue;
2012
2013		if (flags & CFLGS_OFF_SLAB) {
2014			/*
2015			 * Max number of objs-per-slab for caches which
2016			 * use off-slab slabs. Needed to avoid a possible
2017			 * looping condition in cache_grow().
2018			 */
2019			offslab_limit = size - sizeof(struct slab);
2020			offslab_limit /= sizeof(kmem_bufctl_t);
2021
2022 			if (num > offslab_limit)
2023				break;
2024		}
2025
2026		/* Found something acceptable - save it away */
2027		cachep->num = num;
2028		cachep->gfporder = gfporder;
2029		left_over = remainder;
2030
2031		/*
2032		 * A VFS-reclaimable slab tends to have most allocations
2033		 * as GFP_NOFS and we really don't want to have to be allocating
2034		 * higher-order pages when we are unable to shrink dcache.
2035		 */
2036		if (flags & SLAB_RECLAIM_ACCOUNT)
2037			break;
2038
2039		/*
2040		 * Large number of objects is good, but very large slabs are
2041		 * currently bad for the gfp()s.
2042		 */
2043		if (gfporder >= slab_break_gfp_order)
2044			break;
2045
2046		/*
2047		 * Acceptable internal fragmentation?
2048		 */
2049		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2050			break;
2051	}
2052	return left_over;
2053}
2054
2055static int setup_cpu_cache(struct kmem_cache *cachep)
2056{
2057	if (g_cpucache_up == FULL)
2058		return enable_cpucache(cachep);
2059
2060	if (g_cpucache_up == NONE) {
2061		/*
2062		 * Note: the first kmem_cache_create must create the cache
2063		 * that's used by kmalloc(24), otherwise the creation of
2064		 * further caches will BUG().
2065		 */
2066		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2067
2068		/*
2069		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2070		 * the first cache, then we need to set up all its list3s,
2071		 * otherwise the creation of further caches will BUG().
2072		 */
2073		set_up_list3s(cachep, SIZE_AC);
2074		if (INDEX_AC == INDEX_L3)
2075			g_cpucache_up = PARTIAL_L3;
2076		else
2077			g_cpucache_up = PARTIAL_AC;
2078	} else {
2079		cachep->array[smp_processor_id()] =
2080			kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2081
2082		if (g_cpucache_up == PARTIAL_AC) {
2083			set_up_list3s(cachep, SIZE_L3);
2084			g_cpucache_up = PARTIAL_L3;
2085		} else {
2086			int node;
2087			for_each_online_node(node) {
2088				cachep->nodelists[node] =
2089				    kmalloc_node(sizeof(struct kmem_list3),
2090						GFP_KERNEL, node);
2091				BUG_ON(!cachep->nodelists[node]);
2092				kmem_list3_init(cachep->nodelists[node]);
2093			}
2094		}
2095	}
2096	cachep->nodelists[numa_node_id()]->next_reap =
2097			jiffies + REAPTIMEOUT_LIST3 +
2098			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2099
2100	cpu_cache_get(cachep)->avail = 0;
2101	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2102	cpu_cache_get(cachep)->batchcount = 1;
2103	cpu_cache_get(cachep)->touched = 0;
2104	cachep->batchcount = 1;
2105	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2106	return 0;
2107}
2108
2109/**
2110 * kmem_cache_create - Create a cache.
2111 * @name: A string which is used in /proc/slabinfo to identify this cache.
2112 * @size: The size of objects to be created in this cache.
2113 * @align: The required alignment for the objects.
2114 * @flags: SLAB flags
2115 * @ctor: A constructor for the objects.
2116 * @dtor: A destructor for the objects.
2117 *
2118 * Returns a ptr to the cache on success, NULL on failure.
2119 * Cannot be called within a int, but can be interrupted.
2120 * The @ctor is run when new pages are allocated by the cache
2121 * and the @dtor is run before the pages are handed back.
2122 *
2123 * @name must be valid until the cache is destroyed. This implies that
2124 * the module calling this has to destroy the cache before getting unloaded.
2125 *
2126 * The flags are
2127 *
2128 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2129 * to catch references to uninitialised memory.
2130 *
2131 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2132 * for buffer overruns.
2133 *
2134 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2135 * cacheline.  This can be beneficial if you're counting cycles as closely
2136 * as davem.
2137 */
2138struct kmem_cache *
2139kmem_cache_create (const char *name, size_t size, size_t align,
2140	unsigned long flags,
2141	void (*ctor)(void*, struct kmem_cache *, unsigned long),
2142	void (*dtor)(void*, struct kmem_cache *, unsigned long))
2143{
2144	size_t left_over, slab_size, ralign;
2145	struct kmem_cache *cachep = NULL, *pc;
2146
2147	/*
2148	 * Sanity checks... these are all serious usage bugs.
2149	 */
2150	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2151	    (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
2152		printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
2153				name);
2154		BUG();
2155	}
2156
2157	/*
2158	 * We use cache_chain_mutex to ensure a consistent view of
2159	 * cpu_online_map as well.  Please see cpuup_callback
2160	 */
2161	mutex_lock(&cache_chain_mutex);
2162
2163	list_for_each_entry(pc, &cache_chain, next) {
2164		char tmp;
2165		int res;
2166
2167		/*
2168		 * This happens when the module gets unloaded and doesn't
2169		 * destroy its slab cache and no-one else reuses the vmalloc
2170		 * area of the module.  Print a warning.
2171		 */
2172		res = probe_kernel_address(pc->name, tmp);
2173		if (res) {
2174			printk(KERN_ERR
2175			       "SLAB: cache with size %d has lost its name\n",
2176			       pc->buffer_size);
2177			continue;
2178		}
2179
2180		if (!strcmp(pc->name, name)) {
2181			printk(KERN_ERR
2182			       "kmem_cache_create: duplicate cache %s\n", name);
2183			dump_stack();
2184			goto oops;
2185		}
2186	}
2187
2188#if DEBUG
2189	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2190#if FORCED_DEBUG
2191	/*
2192	 * Enable redzoning and last user accounting, except for caches with
2193	 * large objects, if the increased size would increase the object size
2194	 * above the next power of two: caches with object sizes just above a
2195	 * power of two have a significant amount of internal fragmentation.
2196	 */
2197	if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
2198		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2199	if (!(flags & SLAB_DESTROY_BY_RCU))
2200		flags |= SLAB_POISON;
2201#endif
2202	if (flags & SLAB_DESTROY_BY_RCU)
2203		BUG_ON(flags & SLAB_POISON);
2204#endif
2205	if (flags & SLAB_DESTROY_BY_RCU)
2206		BUG_ON(dtor);
2207
2208	/*
2209	 * Always checks flags, a caller might be expecting debug support which
2210	 * isn't available.
2211	 */
2212	BUG_ON(flags & ~CREATE_MASK);
2213
2214	/*
2215	 * Check that size is in terms of words.  This is needed to avoid
2216	 * unaligned accesses for some archs when redzoning is used, and makes
2217	 * sure any on-slab bufctl's are also correctly aligned.
2218	 */
2219	if (size & (BYTES_PER_WORD - 1)) {
2220		size += (BYTES_PER_WORD - 1);
2221		size &= ~(BYTES_PER_WORD - 1);
2222	}
2223
2224	/* calculate the final buffer alignment: */
2225
2226	/* 1) arch recommendation: can be overridden for debug */
2227	if (flags & SLAB_HWCACHE_ALIGN) {
2228		/*
2229		 * Default alignment: as specified by the arch code.  Except if
2230		 * an object is really small, then squeeze multiple objects into
2231		 * one cacheline.
2232		 */
2233		ralign = cache_line_size();
2234		while (size <= ralign / 2)
2235			ralign /= 2;
2236	} else {
2237		ralign = BYTES_PER_WORD;
2238	}
2239
2240	/*
2241	 * Redzoning and user store require word alignment. Note this will be
2242	 * overridden by architecture or caller mandated alignment if either
2243	 * is greater than BYTES_PER_WORD.
2244	 */
2245	if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
2246		ralign = __alignof__(unsigned long long);
2247
2248	/* 2) arch mandated alignment */
2249	if (ralign < ARCH_SLAB_MINALIGN) {
2250		ralign = ARCH_SLAB_MINALIGN;
2251	}
2252	/* 3) caller mandated alignment */
2253	if (ralign < align) {
2254		ralign = align;
2255	}
2256	/* disable debug if necessary */
2257	if (ralign > __alignof__(unsigned long long))
2258		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2259	/*
2260	 * 4) Store it.
2261	 */
2262	align = ralign;
2263
2264	/* Get cache's description obj. */
2265	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
2266	if (!cachep)
2267		goto oops;
2268
2269#if DEBUG
2270	cachep->obj_size = size;
2271
2272	/*
2273	 * Both debugging options require word-alignment which is calculated
2274	 * into align above.
2275	 */
2276	if (flags & SLAB_RED_ZONE) {
2277		/* add space for red zone words */
2278		cachep->obj_offset += sizeof(unsigned long long);
2279		size += 2 * sizeof(unsigned long long);
2280	}
2281	if (flags & SLAB_STORE_USER) {
2282		/* user store requires one word storage behind the end of
2283		 * the real object.
2284		 */
2285		size += BYTES_PER_WORD;
2286	}
2287#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2288	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2289	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2290		cachep->obj_offset += PAGE_SIZE - size;
2291		size = PAGE_SIZE;
2292	}
2293#endif
2294#endif
2295
2296	/*
2297	 * Determine if the slab management is 'on' or 'off' slab.
2298	 * (bootstrapping cannot cope with offslab caches so don't do
2299	 * it too early on.)
2300	 */
2301	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
2302		/*
2303		 * Size is large, assume best to place the slab management obj
2304		 * off-slab (should allow better packing of objs).
2305		 */
2306		flags |= CFLGS_OFF_SLAB;
2307
2308	size = ALIGN(size, align);
2309
2310	left_over = calculate_slab_order(cachep, size, align, flags);
2311
2312	if (!cachep->num) {
2313		printk(KERN_ERR
2314		       "kmem_cache_create: couldn't create cache %s.\n", name);
2315		kmem_cache_free(&cache_cache, cachep);
2316		cachep = NULL;
2317		goto oops;
2318	}
2319	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2320			  + sizeof(struct slab), align);
2321
2322	/*
2323	 * If the slab has been placed off-slab, and we have enough space then
2324	 * move it on-slab. This is at the expense of any extra colouring.
2325	 */
2326	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2327		flags &= ~CFLGS_OFF_SLAB;
2328		left_over -= slab_size;
2329	}
2330
2331	if (flags & CFLGS_OFF_SLAB) {
2332		/* really off slab. No need for manual alignment */
2333		slab_size =
2334		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2335	}
2336
2337	cachep->colour_off = cache_line_size();
2338	/* Offset must be a multiple of the alignment. */
2339	if (cachep->colour_off < align)
2340		cachep->colour_off = align;
2341	cachep->colour = left_over / cachep->colour_off;
2342	cachep->slab_size = slab_size;
2343	cachep->flags = flags;
2344	cachep->gfpflags = 0;
2345	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2346		cachep->gfpflags |= GFP_DMA;
2347	cachep->buffer_size = size;
2348	cachep->reciprocal_buffer_size = reciprocal_value(size);
2349
2350	if (flags & CFLGS_OFF_SLAB) {
2351		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2352		/*
2353		 * This is a possibility for one of the malloc_sizes caches.
2354		 * But since we go off slab only for object size greater than
2355		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2356		 * this should not happen at all.
2357		 * But leave a BUG_ON for some lucky dude.
2358		 */
2359		BUG_ON(!cachep->slabp_cache);
2360	}
2361	cachep->ctor = ctor;
2362	cachep->dtor = dtor;
2363	cachep->name = name;
2364
2365	if (setup_cpu_cache(cachep)) {
2366		__kmem_cache_destroy(cachep);
2367		cachep = NULL;
2368		goto oops;
2369	}
2370
2371	/* cache setup completed, link it into the list */
2372	list_add(&cachep->next, &cache_chain);
2373oops:
2374	if (!cachep && (flags & SLAB_PANIC))
2375		panic("kmem_cache_create(): failed to create slab `%s'\n",
2376		      name);
2377	mutex_unlock(&cache_chain_mutex);
2378	return cachep;
2379}
2380EXPORT_SYMBOL(kmem_cache_create);
2381
2382#if DEBUG
2383static void check_irq_off(void)
2384{
2385	BUG_ON(!irqs_disabled());
2386}
2387
2388static void check_irq_on(void)
2389{
2390	BUG_ON(irqs_disabled());
2391}
2392
2393static void check_spinlock_acquired(struct kmem_cache *cachep)
2394{
2395#ifdef CONFIG_SMP
2396	check_irq_off();
2397	assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
2398#endif
2399}
2400
2401static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2402{
2403#ifdef CONFIG_SMP
2404	check_irq_off();
2405	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2406#endif
2407}
2408
2409#else
2410#define check_irq_off()	do { } while(0)
2411#define check_irq_on()	do { } while(0)
2412#define check_spinlock_acquired(x) do { } while(0)
2413#define check_spinlock_acquired_node(x, y) do { } while(0)
2414#endif
2415
2416static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2417			struct array_cache *ac,
2418			int force, int node);
2419
2420static void do_drain(void *arg)
2421{
2422	struct kmem_cache *cachep = arg;
2423	struct array_cache *ac;
2424	int node = numa_node_id();
2425
2426	check_irq_off();
2427	ac = cpu_cache_get(cachep);
2428	spin_lock(&cachep->nodelists[node]->list_lock);
2429	free_block(cachep, ac->entry, ac->avail, node);
2430	spin_unlock(&cachep->nodelists[node]->list_lock);
2431	ac->avail = 0;
2432}
2433
2434static void drain_cpu_caches(struct kmem_cache *cachep)
2435{
2436	struct kmem_list3 *l3;
2437	int node;
2438
2439	on_each_cpu(do_drain, cachep, 1, 1);
2440	check_irq_on();
2441	for_each_online_node(node) {
2442		l3 = cachep->nodelists[node];
2443		if (l3 && l3->alien)
2444			drain_alien_cache(cachep, l3->alien);
2445	}
2446
2447	for_each_online_node(node) {
2448		l3 = cachep->nodelists[node];
2449		if (l3)
2450			drain_array(cachep, l3, l3->shared, 1, node);
2451	}
2452}
2453
2454/*
2455 * Remove slabs from the list of free slabs.
2456 * Specify the number of slabs to drain in tofree.
2457 *
2458 * Returns the actual number of slabs released.
2459 */
2460static int drain_freelist(struct kmem_cache *cache,
2461			struct kmem_list3 *l3, int tofree)
2462{
2463	struct list_head *p;
2464	int nr_freed;
2465	struct slab *slabp;
2466
2467	nr_freed = 0;
2468	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2469
2470		spin_lock_irq(&l3->list_lock);
2471		p = l3->slabs_free.prev;
2472		if (p == &l3->slabs_free) {
2473			spin_unlock_irq(&l3->list_lock);
2474			goto out;
2475		}
2476
2477		slabp = list_entry(p, struct slab, list);
2478#if DEBUG
2479		BUG_ON(slabp->inuse);
2480#endif
2481		list_del(&slabp->list);
2482		/*
2483		 * Safe to drop the lock. The slab is no longer linked
2484		 * to the cache.
2485		 */
2486		l3->free_objects -= cache->num;
2487		spin_unlock_irq(&l3->list_lock);
2488		slab_destroy(cache, slabp);
2489		nr_freed++;
2490	}
2491out:
2492	return nr_freed;
2493}
2494
2495/* Called with cache_chain_mutex held to protect against cpu hotplug */
2496static int __cache_shrink(struct kmem_cache *cachep)
2497{
2498	int ret = 0, i = 0;
2499	struct kmem_list3 *l3;
2500
2501	drain_cpu_caches(cachep);
2502
2503	check_irq_on();
2504	for_each_online_node(i) {
2505		l3 = cachep->nodelists[i];
2506		if (!l3)
2507			continue;
2508
2509		drain_freelist(cachep, l3, l3->free_objects);
2510
2511		ret += !list_empty(&l3->slabs_full) ||
2512			!list_empty(&l3->slabs_partial);
2513	}
2514	return (ret ? 1 : 0);
2515}
2516
2517/**
2518 * kmem_cache_shrink - Shrink a cache.
2519 * @cachep: The cache to shrink.
2520 *
2521 * Releases as many slabs as possible for a cache.
2522 * To help debugging, a zero exit status indicates all slabs were released.
2523 */
2524int kmem_cache_shrink(struct kmem_cache *cachep)
2525{
2526	int ret;
2527	BUG_ON(!cachep || in_interrupt());
2528
2529	mutex_lock(&cache_chain_mutex);
2530	ret = __cache_shrink(cachep);
2531	mutex_unlock(&cache_chain_mutex);
2532	return ret;
2533}
2534EXPORT_SYMBOL(kmem_cache_shrink);
2535
2536/**
2537 * kmem_cache_destroy - delete a cache
2538 * @cachep: the cache to destroy
2539 *
2540 * Remove a &struct kmem_cache object from the slab cache.
2541 *
2542 * It is expected this function will be called by a module when it is
2543 * unloaded.  This will remove the cache completely, and avoid a duplicate
2544 * cache being allocated each time a module is loaded and unloaded, if the
2545 * module doesn't have persistent in-kernel storage across loads and unloads.
2546 *
2547 * The cache must be empty before calling this function.
2548 *
2549 * The caller must guarantee that noone will allocate memory from the cache
2550 * during the kmem_cache_destroy().
2551 */
2552void kmem_cache_destroy(struct kmem_cache *cachep)
2553{
2554	BUG_ON(!cachep || in_interrupt());
2555
2556	/* Find the cache in the chain of caches. */
2557	mutex_lock(&cache_chain_mutex);
2558	/*
2559	 * the chain is never empty, cache_cache is never destroyed
2560	 */
2561	list_del(&cachep->next);
2562	if (__cache_shrink(cachep)) {
2563		slab_error(cachep, "Can't free all objects");
2564		list_add(&cachep->next, &cache_chain);
2565		mutex_unlock(&cache_chain_mutex);
2566		return;
2567	}
2568
2569	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2570		synchronize_rcu();
2571
2572	__kmem_cache_destroy(cachep);
2573	mutex_unlock(&cache_chain_mutex);
2574}
2575EXPORT_SYMBOL(kmem_cache_destroy);
2576
2577/*
2578 * Get the memory for a slab management obj.
2579 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2580 * always come from malloc_sizes caches.  The slab descriptor cannot
2581 * come from the same cache which is getting created because,
2582 * when we are searching for an appropriate cache for these
2583 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2584 * If we are creating a malloc_sizes cache here it would not be visible to
2585 * kmem_find_general_cachep till the initialization is complete.
2586 * Hence we cannot have slabp_cache same as the original cache.
2587 */
2588static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2589				   int colour_off, gfp_t local_flags,
2590				   int nodeid)
2591{
2592	struct slab *slabp;
2593
2594	if (OFF_SLAB(cachep)) {
2595		/* Slab management obj is off-slab. */
2596		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2597					      local_flags & ~GFP_THISNODE, nodeid);
2598		if (!slabp)
2599			return NULL;
2600	} else {
2601		slabp = objp + colour_off;
2602		colour_off += cachep->slab_size;
2603	}
2604	slabp->inuse = 0;
2605	slabp->colouroff = colour_off;
2606	slabp->s_mem = objp + colour_off;
2607	slabp->nodeid = nodeid;
2608	return slabp;
2609}
2610
2611static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2612{
2613	return (kmem_bufctl_t *) (slabp + 1);
2614}
2615
2616static void cache_init_objs(struct kmem_cache *cachep,
2617			    struct slab *slabp, unsigned long ctor_flags)
2618{
2619	int i;
2620
2621	for (i = 0; i < cachep->num; i++) {
2622		void *objp = index_to_obj(cachep, slabp, i);
2623#if DEBUG
2624		/* need to poison the objs? */
2625		if (cachep->flags & SLAB_POISON)
2626			poison_obj(cachep, objp, POISON_FREE);
2627		if (cachep->flags & SLAB_STORE_USER)
2628			*dbg_userword(cachep, objp) = NULL;
2629
2630		if (cachep->flags & SLAB_RED_ZONE) {
2631			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2632			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2633		}
2634		/*
2635		 * Constructors are not allowed to allocate memory from the same
2636		 * cache which they are a constructor for.  Otherwise, deadlock.
2637		 * They must also be threaded.
2638		 */
2639		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2640			cachep->ctor(objp + obj_offset(cachep), cachep,
2641				     ctor_flags);
2642
2643		if (cachep->flags & SLAB_RED_ZONE) {
2644			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2645				slab_error(cachep, "constructor overwrote the"
2646					   " end of an object");
2647			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2648				slab_error(cachep, "constructor overwrote the"
2649					   " start of an object");
2650		}
2651		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2652			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2653			kernel_map_pages(virt_to_page(objp),
2654					 cachep->buffer_size / PAGE_SIZE, 0);
2655#else
2656		if (cachep->ctor)
2657			cachep->ctor(objp, cachep, ctor_flags);
2658#endif
2659		slab_bufctl(slabp)[i] = i + 1;
2660	}
2661	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2662	slabp->free = 0;
2663}
2664
2665static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2666{
2667	if (CONFIG_ZONE_DMA_FLAG) {
2668		if (flags & GFP_DMA)
2669			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2670		else
2671			BUG_ON(cachep->gfpflags & GFP_DMA);
2672	}
2673}
2674
2675static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2676				int nodeid)
2677{
2678	void *objp = index_to_obj(cachep, slabp, slabp->free);
2679	kmem_bufctl_t next;
2680
2681	slabp->inuse++;
2682	next = slab_bufctl(slabp)[slabp->free];
2683#if DEBUG
2684	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2685	WARN_ON(slabp->nodeid != nodeid);
2686#endif
2687	slabp->free = next;
2688
2689	return objp;
2690}
2691
2692static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2693				void *objp, int nodeid)
2694{
2695	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2696
2697#if DEBUG
2698	/* Verify that the slab belongs to the intended node */
2699	WARN_ON(slabp->nodeid != nodeid);
2700
2701	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2702		printk(KERN_ERR "slab: double free detected in cache "
2703				"'%s', objp %p\n", cachep->name, objp);
2704		BUG();
2705	}
2706#endif
2707	slab_bufctl(slabp)[objnr] = slabp->free;
2708	slabp->free = objnr;
2709	slabp->inuse--;
2710}
2711
2712/*
2713 * Map pages beginning at addr to the given cache and slab. This is required
2714 * for the slab allocator to be able to lookup the cache and slab of a
2715 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2716 */
2717static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2718			   void *addr)
2719{
2720	int nr_pages;
2721	struct page *page;
2722
2723	page = virt_to_page(addr);
2724
2725	nr_pages = 1;
2726	if (likely(!PageCompound(page)))
2727		nr_pages <<= cache->gfporder;
2728
2729	do {
2730		page_set_cache(page, cache);
2731		page_set_slab(page, slab);
2732		page++;
2733	} while (--nr_pages);
2734}
2735
2736/*
2737 * Grow (by 1) the number of slabs within a cache.  This is called by
2738 * kmem_cache_alloc() when there are no active objs left in a cache.
2739 */
2740static int cache_grow(struct kmem_cache *cachep,
2741		gfp_t flags, int nodeid, void *objp)
2742{
2743	struct slab *slabp;
2744	size_t offset;
2745	gfp_t local_flags;
2746	unsigned long ctor_flags;
2747	struct kmem_list3 *l3;
2748
2749	/*
2750	 * Be lazy and only check for valid flags here,  keeping it out of the
2751	 * critical path in kmem_cache_alloc().
2752	 */
2753	BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
2754
2755	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2756	local_flags = (flags & GFP_LEVEL_MASK);
2757	/* Take the l3 list lock to change the colour_next on this node */
2758	check_irq_off();
2759	l3 = cachep->nodelists[nodeid];
2760	spin_lock(&l3->list_lock);
2761
2762	/* Get colour for the slab, and cal the next value. */
2763	offset = l3->colour_next;
2764	l3->colour_next++;
2765	if (l3->colour_next >= cachep->colour)
2766		l3->colour_next = 0;
2767	spin_unlock(&l3->list_lock);
2768
2769	offset *= cachep->colour_off;
2770
2771	if (local_flags & __GFP_WAIT)
2772		local_irq_enable();
2773
2774	/*
2775	 * The test for missing atomic flag is performed here, rather than
2776	 * the more obvious place, simply to reduce the critical path length
2777	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2778	 * will eventually be caught here (where it matters).
2779	 */
2780	kmem_flagcheck(cachep, flags);
2781
2782	/*
2783	 * Get mem for the objs.  Attempt to allocate a physical page from
2784	 * 'nodeid'.
2785	 */
2786	if (!objp)
2787		objp = kmem_getpages(cachep, flags, nodeid);
2788	if (!objp)
2789		goto failed;
2790
2791	/* Get slab management. */
2792	slabp = alloc_slabmgmt(cachep, objp, offset,
2793			local_flags & ~GFP_THISNODE, nodeid);
2794	if (!slabp)
2795		goto opps1;
2796
2797	slabp->nodeid = nodeid;
2798	slab_map_pages(cachep, slabp, objp);
2799
2800	cache_init_objs(cachep, slabp, ctor_flags);
2801
2802	if (local_flags & __GFP_WAIT)
2803		local_irq_disable();
2804	check_irq_off();
2805	spin_lock(&l3->list_lock);
2806
2807	/* Make slab active. */
2808	list_add_tail(&slabp->list, &(l3->slabs_free));
2809	STATS_INC_GROWN(cachep);
2810	l3->free_objects += cachep->num;
2811	spin_unlock(&l3->list_lock);
2812	return 1;
2813opps1:
2814	kmem_freepages(cachep, objp);
2815failed:
2816	if (local_flags & __GFP_WAIT)
2817		local_irq_disable();
2818	return 0;
2819}
2820
2821#if DEBUG
2822
2823/*
2824 * Perform extra freeing checks:
2825 * - detect bad pointers.
2826 * - POISON/RED_ZONE checking
2827 * - destructor calls, for caches with POISON+dtor
2828 */
2829static void kfree_debugcheck(const void *objp)
2830{
2831	if (!virt_addr_valid(objp)) {
2832		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2833		       (unsigned long)objp);
2834		BUG();
2835	}
2836}
2837
2838static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2839{
2840	unsigned long long redzone1, redzone2;
2841
2842	redzone1 = *dbg_redzone1(cache, obj);
2843	redzone2 = *dbg_redzone2(cache, obj);
2844
2845	/*
2846	 * Redzone is ok.
2847	 */
2848	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2849		return;
2850
2851	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2852		slab_error(cache, "double free detected");
2853	else
2854		slab_error(cache, "memory outside object was overwritten");
2855
2856	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2857			obj, redzone1, redzone2);
2858}
2859
2860static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2861				   void *caller)
2862{
2863	struct page *page;
2864	unsigned int objnr;
2865	struct slab *slabp;
2866
2867	objp -= obj_offset(cachep);
2868	kfree_debugcheck(objp);
2869	page = virt_to_head_page(objp);
2870
2871	slabp = page_get_slab(page);
2872
2873	if (cachep->flags & SLAB_RED_ZONE) {
2874		verify_redzone_free(cachep, objp);
2875		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2876		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2877	}
2878	if (cachep->flags & SLAB_STORE_USER)
2879		*dbg_userword(cachep, objp) = caller;
2880
2881	objnr = obj_to_index(cachep, slabp, objp);
2882
2883	BUG_ON(objnr >= cachep->num);
2884	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2885
2886	if (cachep->flags & SLAB_POISON && cachep->dtor) {
2887		/* we want to cache poison the object,
2888		 * call the destruction callback
2889		 */
2890		cachep->dtor(objp + obj_offset(cachep), cachep, 0);
2891	}
2892#ifdef CONFIG_DEBUG_SLAB_LEAK
2893	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2894#endif
2895	if (cachep->flags & SLAB_POISON) {
2896#ifdef CONFIG_DEBUG_PAGEALLOC
2897		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2898			store_stackinfo(cachep, objp, (unsigned long)caller);
2899			kernel_map_pages(virt_to_page(objp),
2900					 cachep->buffer_size / PAGE_SIZE, 0);
2901		} else {
2902			poison_obj(cachep, objp, POISON_FREE);
2903		}
2904#else
2905		poison_obj(cachep, objp, POISON_FREE);
2906#endif
2907	}
2908	return objp;
2909}
2910
2911static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2912{
2913	kmem_bufctl_t i;
2914	int entries = 0;
2915
2916	/* Check slab's freelist to see if this obj is there. */
2917	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2918		entries++;
2919		if (entries > cachep->num || i >= cachep->num)
2920			goto bad;
2921	}
2922	if (entries != cachep->num - slabp->inuse) {
2923bad:
2924		printk(KERN_ERR "slab: Internal list corruption detected in "
2925				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2926			cachep->name, cachep->num, slabp, slabp->inuse);
2927		for (i = 0;
2928		     i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2929		     i++) {
2930			if (i % 16 == 0)
2931				printk("\n%03x:", i);
2932			printk(" %02x", ((unsigned char *)slabp)[i]);
2933		}
2934		printk("\n");
2935		BUG();
2936	}
2937}
2938#else
2939#define kfree_debugcheck(x) do { } while(0)
2940#define cache_free_debugcheck(x,objp,z) (objp)
2941#define check_slabp(x,y) do { } while(0)
2942#endif
2943
2944static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2945{
2946	int batchcount;
2947	struct kmem_list3 *l3;
2948	struct array_cache *ac;
2949	int node;
2950
2951	node = numa_node_id();
2952
2953	check_irq_off();
2954	ac = cpu_cache_get(cachep);
2955retry:
2956	batchcount = ac->batchcount;
2957	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2958		/*
2959		 * If there was little recent activity on this cache, then
2960		 * perform only a partial refill.  Otherwise we could generate
2961		 * refill bouncing.
2962		 */
2963		batchcount = BATCHREFILL_LIMIT;
2964	}
2965	l3 = cachep->nodelists[node];
2966
2967	BUG_ON(ac->avail > 0 || !l3);
2968	spin_lock(&l3->list_lock);
2969
2970	/* See if we can refill from the shared array */
2971	if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2972		goto alloc_done;
2973
2974	while (batchcount > 0) {
2975		struct list_head *entry;
2976		struct slab *slabp;
2977		/* Get slab alloc is to come from. */
2978		entry = l3->slabs_partial.next;
2979		if (entry == &l3->slabs_partial) {
2980			l3->free_touched = 1;
2981			entry = l3->slabs_free.next;
2982			if (entry == &l3->slabs_free)
2983				goto must_grow;
2984		}
2985
2986		slabp = list_entry(entry, struct slab, list);
2987		check_slabp(cachep, slabp);
2988		check_spinlock_acquired(cachep);
2989
2990		/*
2991		 * The slab was either on partial or free list so
2992		 * there must be at least one object available for
2993		 * allocation.
2994		 */
2995		BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
2996
2997		while (slabp->inuse < cachep->num && batchcount--) {
2998			STATS_INC_ALLOCED(cachep);
2999			STATS_INC_ACTIVE(cachep);
3000			STATS_SET_HIGH(cachep);
3001
3002			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3003							    node);
3004		}
3005		check_slabp(cachep, slabp);
3006
3007		/* move slabp to correct slabp list: */
3008		list_del(&slabp->list);
3009		if (slabp->free == BUFCTL_END)
3010			list_add(&slabp->list, &l3->slabs_full);
3011		else
3012			list_add(&slabp->list, &l3->slabs_partial);
3013	}
3014
3015must_grow:
3016	l3->free_objects -= ac->avail;
3017alloc_done:
3018	spin_unlock(&l3->list_lock);
3019
3020	if (unlikely(!ac->avail)) {
3021		int x;
3022		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3023
3024		/* cache_grow can reenable interrupts, then ac could change. */
3025		ac = cpu_cache_get(cachep);
3026		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3027			return NULL;
3028
3029		if (!ac->avail)		/* objects refilled by interrupt? */
3030			goto retry;
3031	}
3032	ac->touched = 1;
3033	return ac->entry[--ac->avail];
3034}
3035
3036static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3037						gfp_t flags)
3038{
3039	might_sleep_if(flags & __GFP_WAIT);
3040#if DEBUG
3041	kmem_flagcheck(cachep, flags);
3042#endif
3043}
3044
3045#if DEBUG
3046static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3047				gfp_t flags, void *objp, void *caller)
3048{
3049	if (!objp)
3050		return objp;
3051	if (cachep->flags & SLAB_POISON) {
3052#ifdef CONFIG_DEBUG_PAGEALLOC
3053		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3054			kernel_map_pages(virt_to_page(objp),
3055					 cachep->buffer_size / PAGE_SIZE, 1);
3056		else
3057			check_poison_obj(cachep, objp);
3058#else
3059		check_poison_obj(cachep, objp);
3060#endif
3061		poison_obj(cachep, objp, POISON_INUSE);
3062	}
3063	if (cachep->flags & SLAB_STORE_USER)
3064		*dbg_userword(cachep, objp) = caller;
3065
3066	if (cachep->flags & SLAB_RED_ZONE) {
3067		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3068				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3069			slab_error(cachep, "double free, or memory outside"
3070						" object was overwritten");
3071			printk(KERN_ERR
3072				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3073				objp, *dbg_redzone1(cachep, objp),
3074				*dbg_redzone2(cachep, objp));
3075		}
3076		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3077		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3078	}
3079#ifdef CONFIG_DEBUG_SLAB_LEAK
3080	{
3081		struct slab *slabp;
3082		unsigned objnr;
3083
3084		slabp = page_get_slab(virt_to_head_page(objp));
3085		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3086		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3087	}
3088#endif
3089	objp += obj_offset(cachep);
3090	if (cachep->ctor && cachep->flags & SLAB_POISON)
3091		cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR);
3092#if ARCH_SLAB_MINALIGN
3093	if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3094		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3095		       objp, ARCH_SLAB_MINALIGN);
3096	}
3097#endif
3098	return objp;
3099}
3100#else
3101#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3102#endif
3103
3104#ifdef CONFIG_FAILSLAB
3105
3106static struct failslab_attr {
3107
3108	struct fault_attr attr;
3109
3110	u32 ignore_gfp_wait;
3111#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3112	struct dentry *ignore_gfp_wait_file;
3113#endif
3114
3115} failslab = {
3116	.attr = FAULT_ATTR_INITIALIZER,
3117	.ignore_gfp_wait = 1,
3118};
3119
3120static int __init setup_failslab(char *str)
3121{
3122	return setup_fault_attr(&failslab.attr, str);
3123}
3124__setup("failslab=", setup_failslab);
3125
3126static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3127{
3128	if (cachep == &cache_cache)
3129		return 0;
3130	if (flags & __GFP_NOFAIL)
3131		return 0;
3132	if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3133		return 0;
3134
3135	return should_fail(&failslab.attr, obj_size(cachep));
3136}
3137
3138#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3139
3140static int __init failslab_debugfs(void)
3141{
3142	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3143	struct dentry *dir;
3144	int err;
3145
3146	err = init_fault_attr_dentries(&failslab.attr, "failslab");
3147	if (err)
3148		return err;
3149	dir = failslab.attr.dentries.dir;
3150
3151	failslab.ignore_gfp_wait_file =
3152		debugfs_create_bool("ignore-gfp-wait", mode, dir,
3153				      &failslab.ignore_gfp_wait);
3154
3155	if (!failslab.ignore_gfp_wait_file) {
3156		err = -ENOMEM;
3157		debugfs_remove(failslab.ignore_gfp_wait_file);
3158		cleanup_fault_attr_dentries(&failslab.attr);
3159	}
3160
3161	return err;
3162}
3163
3164late_initcall(failslab_debugfs);
3165
3166#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3167
3168#else /* CONFIG_FAILSLAB */
3169
3170static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3171{
3172	return 0;
3173}
3174
3175#endif /* CONFIG_FAILSLAB */
3176
3177static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3178{
3179	void *objp;
3180	struct array_cache *ac;
3181
3182	check_irq_off();
3183
3184	ac = cpu_cache_get(cachep);
3185	if (likely(ac->avail)) {
3186		STATS_INC_ALLOCHIT(cachep);
3187		ac->touched = 1;
3188		objp = ac->entry[--ac->avail];
3189	} else {
3190		STATS_INC_ALLOCMISS(cachep);
3191		objp = cache_alloc_refill(cachep, flags);
3192	}
3193	return objp;
3194}
3195
3196#ifdef CONFIG_NUMA
3197/*
3198 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3199 *
3200 * If we are in_interrupt, then process context, including cpusets and
3201 * mempolicy, may not apply and should not be used for allocation policy.
3202 */
3203static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3204{
3205	int nid_alloc, nid_here;
3206
3207	if (in_interrupt() || (flags & __GFP_THISNODE))
3208		return NULL;
3209	nid_alloc = nid_here = numa_node_id();
3210	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3211		nid_alloc = cpuset_mem_spread_node();
3212	else if (current->mempolicy)
3213		nid_alloc = slab_node(current->mempolicy);
3214	if (nid_alloc != nid_here)
3215		return ____cache_alloc_node(cachep, flags, nid_alloc);
3216	return NULL;
3217}
3218
3219/*
3220 * Fallback function if there was no memory available and no objects on a
3221 * certain node and fall back is permitted. First we scan all the
3222 * available nodelists for available objects. If that fails then we
3223 * perform an allocation without specifying a node. This allows the page
3224 * allocator to do its reclaim / fallback magic. We then insert the
3225 * slab into the proper nodelist and then allocate from it.
3226 */
3227static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3228{
3229	struct zonelist *zonelist;
3230	gfp_t local_flags;
3231	struct zone **z;
3232	void *obj = NULL;
3233	int nid;
3234
3235	if (flags & __GFP_THISNODE)
3236		return NULL;
3237
3238	zonelist = &NODE_DATA(slab_node(current->mempolicy))
3239			->node_zonelists[gfp_zone(flags)];
3240	local_flags = (flags & GFP_LEVEL_MASK);
3241
3242retry:
3243	/*
3244	 * Look through allowed nodes for objects available
3245	 * from existing per node queues.
3246	 */
3247	for (z = zonelist->zones; *z && !obj; z++) {
3248		nid = zone_to_nid(*z);
3249
3250		if (cpuset_zone_allowed_hardwall(*z, flags) &&
3251			cache->nodelists[nid] &&
3252			cache->nodelists[nid]->free_objects)
3253				obj = ____cache_alloc_node(cache,
3254					flags | GFP_THISNODE, nid);
3255	}
3256
3257	if (!obj) {
3258		/*
3259		 * This allocation will be performed within the constraints
3260		 * of the current cpuset / memory policy requirements.
3261		 * We may trigger various forms of reclaim on the allowed
3262		 * set and go into memory reserves if necessary.
3263		 */
3264		if (local_flags & __GFP_WAIT)
3265			local_irq_enable();
3266		kmem_flagcheck(cache, flags);
3267		obj = kmem_getpages(cache, flags, -1);
3268		if (local_flags & __GFP_WAIT)
3269			local_irq_disable();
3270		if (obj) {
3271			/*
3272			 * Insert into the appropriate per node queues
3273			 */
3274			nid = page_to_nid(virt_to_page(obj));
3275			if (cache_grow(cache, flags, nid, obj)) {
3276				obj = ____cache_alloc_node(cache,
3277					flags | GFP_THISNODE, nid);
3278				if (!obj)
3279					/*
3280					 * Another processor may allocate the
3281					 * objects in the slab since we are
3282					 * not holding any locks.
3283					 */
3284					goto retry;
3285			} else {
3286				/* cache_grow already freed obj */
3287				obj = NULL;
3288			}
3289		}
3290	}
3291	return obj;
3292}
3293
3294/*
3295 * A interface to enable slab creation on nodeid
3296 */
3297static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3298				int nodeid)
3299{
3300	struct list_head *entry;
3301	struct slab *slabp;
3302	struct kmem_list3 *l3;
3303	void *obj;
3304	int x;
3305
3306	l3 = cachep->nodelists[nodeid];
3307	BUG_ON(!l3);
3308
3309retry:
3310	check_irq_off();
3311	spin_lock(&l3->list_lock);
3312	entry = l3->slabs_partial.next;
3313	if (entry == &l3->slabs_partial) {
3314		l3->free_touched = 1;
3315		entry = l3->slabs_free.next;
3316		if (entry == &l3->slabs_free)
3317			goto must_grow;
3318	}
3319
3320	slabp = list_entry(entry, struct slab, list);
3321	check_spinlock_acquired_node(cachep, nodeid);
3322	check_slabp(cachep, slabp);
3323
3324	STATS_INC_NODEALLOCS(cachep);
3325	STATS_INC_ACTIVE(cachep);
3326	STATS_SET_HIGH(cachep);
3327
3328	BUG_ON(slabp->inuse == cachep->num);
3329
3330	obj = slab_get_obj(cachep, slabp, nodeid);
3331	check_slabp(cachep, slabp);
3332	l3->free_objects--;
3333	/* move slabp to correct slabp list: */
3334	list_del(&slabp->list);
3335
3336	if (slabp->free == BUFCTL_END)
3337		list_add(&slabp->list, &l3->slabs_full);
3338	else
3339		list_add(&slabp->list, &l3->slabs_partial);
3340
3341	spin_unlock(&l3->list_lock);
3342	goto done;
3343
3344must_grow:
3345	spin_unlock(&l3->list_lock);
3346	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3347	if (x)
3348		goto retry;
3349
3350	return fallback_alloc(cachep, flags);
3351
3352done:
3353	return obj;
3354}
3355
3356/**
3357 * kmem_cache_alloc_node - Allocate an object on the specified node
3358 * @cachep: The cache to allocate from.
3359 * @flags: See kmalloc().
3360 * @nodeid: node number of the target node.
3361 * @caller: return address of caller, used for debug information
3362 *
3363 * Identical to kmem_cache_alloc but it will allocate memory on the given
3364 * node, which can improve the performance for cpu bound structures.
3365 *
3366 * Fallback to other node is possible if __GFP_THISNODE is not set.
3367 */
3368static __always_inline void *
3369__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3370		   void *caller)
3371{
3372	unsigned long save_flags;
3373	void *ptr;
3374
3375	if (should_failslab(cachep, flags))
3376		return NULL;
3377
3378	cache_alloc_debugcheck_before(cachep, flags);
3379	local_irq_save(save_flags);
3380
3381	if (unlikely(nodeid == -1))
3382		nodeid = numa_node_id();
3383
3384	if (unlikely(!cachep->nodelists[nodeid])) {
3385		/* Node not bootstrapped yet */
3386		ptr = fallback_alloc(cachep, flags);
3387		goto out;
3388	}
3389
3390	if (nodeid == numa_node_id()) {
3391		/*
3392		 * Use the locally cached objects if possible.
3393		 * However ____cache_alloc does not allow fallback
3394		 * to other nodes. It may fail while we still have
3395		 * objects on other nodes available.
3396		 */
3397		ptr = ____cache_alloc(cachep, flags);
3398		if (ptr)
3399			goto out;
3400	}
3401	/* ___cache_alloc_node can fall back to other nodes */
3402	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3403  out:
3404	local_irq_restore(save_flags);
3405	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3406
3407	return ptr;
3408}
3409
3410static __always_inline void *
3411__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3412{
3413	void *objp;
3414
3415	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3416		objp = alternate_node_alloc(cache, flags);
3417		if (objp)
3418			goto out;
3419	}
3420	objp = ____cache_alloc(cache, flags);
3421
3422	/*
3423	 * We may just have run out of memory on the local node.
3424	 * ____cache_alloc_node() knows how to locate memory on other nodes
3425	 */
3426 	if (!objp)
3427 		objp = ____cache_alloc_node(cache, flags, numa_node_id());
3428
3429  out:
3430	return objp;
3431}
3432#else
3433
3434static __always_inline void *
3435__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3436{
3437	return ____cache_alloc(cachep, flags);
3438}
3439
3440#endif /* CONFIG_NUMA */
3441
3442static __always_inline void *
3443__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3444{
3445	unsigned long save_flags;
3446	void *objp;
3447
3448	if (should_failslab(cachep, flags))
3449		return NULL;
3450
3451	cache_alloc_debugcheck_before(cachep, flags);
3452	local_irq_save(save_flags);
3453	objp = __do_cache_alloc(cachep, flags);
3454	local_irq_restore(save_flags);
3455	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3456	prefetchw(objp);
3457
3458	return objp;
3459}
3460
3461/*
3462 * Caller needs to acquire correct kmem_list's list_lock
3463 */
3464static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3465		       int node)
3466{
3467	int i;
3468	struct kmem_list3 *l3;
3469
3470	for (i = 0; i < nr_objects; i++) {
3471		void *objp = objpp[i];
3472		struct slab *slabp;
3473
3474		slabp = virt_to_slab(objp);
3475		l3 = cachep->nodelists[node];
3476		list_del(&slabp->list);
3477		check_spinlock_acquired_node(cachep, node);
3478		check_slabp(cachep, slabp);
3479		slab_put_obj(cachep, slabp, objp, node);
3480		STATS_DEC_ACTIVE(cachep);
3481		l3->free_objects++;
3482		check_slabp(cachep, slabp);
3483
3484		/* fixup slab chains */
3485		if (slabp->inuse == 0) {
3486			if (l3->free_objects > l3->free_limit) {
3487				l3->free_objects -= cachep->num;
3488				/* No need to drop any previously held
3489				 * lock here, even if we have a off-slab slab
3490				 * descriptor it is guaranteed to come from
3491				 * a different cache, refer to comments before
3492				 * alloc_slabmgmt.
3493				 */
3494				slab_destroy(cachep, slabp);
3495			} else {
3496				list_add(&slabp->list, &l3->slabs_free);
3497			}
3498		} else {
3499			/* Unconditionally move a slab to the end of the
3500			 * partial list on free - maximum time for the
3501			 * other objects to be freed, too.
3502			 */
3503			list_add_tail(&slabp->list, &l3->slabs_partial);
3504		}
3505	}
3506}
3507
3508static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3509{
3510	int batchcount;
3511	struct kmem_list3 *l3;
3512	int node = numa_node_id();
3513
3514	batchcount = ac->batchcount;
3515#if DEBUG
3516	BUG_ON(!batchcount || batchcount > ac->avail);
3517#endif
3518	check_irq_off();
3519	l3 = cachep->nodelists[node];
3520	spin_lock(&l3->list_lock);
3521	if (l3->shared) {
3522		struct array_cache *shared_array = l3->shared;
3523		int max = shared_array->limit - shared_array->avail;
3524		if (max) {
3525			if (batchcount > max)
3526				batchcount = max;
3527			memcpy(&(shared_array->entry[shared_array->avail]),
3528			       ac->entry, sizeof(void *) * batchcount);
3529			shared_array->avail += batchcount;
3530			goto free_done;
3531		}
3532	}
3533
3534	free_block(cachep, ac->entry, batchcount, node);
3535free_done:
3536#if STATS
3537	{
3538		int i = 0;
3539		struct list_head *p;
3540
3541		p = l3->slabs_free.next;
3542		while (p != &(l3->slabs_free)) {
3543			struct slab *slabp;
3544
3545			slabp = list_entry(p, struct slab, list);
3546			BUG_ON(slabp->inuse);
3547
3548			i++;
3549			p = p->next;
3550		}
3551		STATS_SET_FREEABLE(cachep, i);
3552	}
3553#endif
3554	spin_unlock(&l3->list_lock);
3555	ac->avail -= batchcount;
3556	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3557}
3558
3559/*
3560 * Release an obj back to its cache. If the obj has a constructed state, it must
3561 * be in this state _before_ it is released.  Called with disabled ints.
3562 */
3563static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3564{
3565	struct array_cache *ac = cpu_cache_get(cachep);
3566
3567	check_irq_off();
3568	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3569
3570	if (use_alien_caches && cache_free_alien(cachep, objp))
3571		return;
3572
3573	if (likely(ac->avail < ac->limit)) {
3574		STATS_INC_FREEHIT(cachep);
3575		ac->entry[ac->avail++] = objp;
3576		return;
3577	} else {
3578		STATS_INC_FREEMISS(cachep);
3579		cache_flusharray(cachep, ac);
3580		ac->entry[ac->avail++] = objp;
3581	}
3582}
3583
3584/**
3585 * kmem_cache_alloc - Allocate an object
3586 * @cachep: The cache to allocate from.
3587 * @flags: See kmalloc().
3588 *
3589 * Allocate an object from this cache.  The flags are only relevant
3590 * if the cache has no available objects.
3591 */
3592void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3593{
3594	return __cache_alloc(cachep, flags, __builtin_return_address(0));
3595}
3596EXPORT_SYMBOL(kmem_cache_alloc);
3597
3598/**
3599 * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
3600 * @cache: The cache to allocate from.
3601 * @flags: See kmalloc().
3602 *
3603 * Allocate an object from this cache and set the allocated memory to zero.
3604 * The flags are only relevant if the cache has no available objects.
3605 */
3606void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
3607{
3608	void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
3609	if (ret)
3610		memset(ret, 0, obj_size(cache));
3611	return ret;
3612}
3613EXPORT_SYMBOL(kmem_cache_zalloc);
3614
3615/**
3616 * kmem_ptr_validate - check if an untrusted pointer might
3617 *	be a slab entry.
3618 * @cachep: the cache we're checking against
3619 * @ptr: pointer to validate
3620 *
3621 * This verifies that the untrusted pointer looks sane:
3622 * it is _not_ a guarantee that the pointer is actually
3623 * part of the slab cache in question, but it at least
3624 * validates that the pointer can be dereferenced and
3625 * looks half-way sane.
3626 *
3627 * Currently only used for dentry validation.
3628 */
3629int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3630{
3631	unsigned long addr = (unsigned long)ptr;
3632	unsigned long min_addr = PAGE_OFFSET;
3633	unsigned long align_mask = BYTES_PER_WORD - 1;
3634	unsigned long size = cachep->buffer_size;
3635	struct page *page;
3636
3637	if (unlikely(addr < min_addr))
3638		goto out;
3639	if (unlikely(addr > (unsigned long)high_memory - size))
3640		goto out;
3641	if (unlikely(addr & align_mask))
3642		goto out;
3643	if (unlikely(!kern_addr_valid(addr)))
3644		goto out;
3645	if (unlikely(!kern_addr_valid(addr + size - 1)))
3646		goto out;
3647	page = virt_to_page(ptr);
3648	if (unlikely(!PageSlab(page)))
3649		goto out;
3650	if (unlikely(page_get_cache(page) != cachep))
3651		goto out;
3652	return 1;
3653out:
3654	return 0;
3655}
3656
3657#ifdef CONFIG_NUMA
3658void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3659{
3660	return __cache_alloc_node(cachep, flags, nodeid,
3661			__builtin_return_address(0));
3662}
3663EXPORT_SYMBOL(kmem_cache_alloc_node);
3664
3665static __always_inline void *
3666__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3667{
3668	struct kmem_cache *cachep;
3669
3670	cachep = kmem_find_general_cachep(size, flags);
3671	if (unlikely(cachep == NULL))
3672		return NULL;
3673	return kmem_cache_alloc_node(cachep, flags, node);
3674}
3675
3676#ifdef CONFIG_DEBUG_SLAB
3677void *__kmalloc_node(size_t size, gfp_t flags, int node)
3678{
3679	return __do_kmalloc_node(size, flags, node,
3680			__builtin_return_address(0));
3681}
3682EXPORT_SYMBOL(__kmalloc_node);
3683
3684void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3685		int node, void *caller)
3686{
3687	return __do_kmalloc_node(size, flags, node, caller);
3688}
3689EXPORT_SYMBOL(__kmalloc_node_track_caller);
3690#else
3691void *__kmalloc_node(size_t size, gfp_t flags, int node)
3692{
3693	return __do_kmalloc_node(size, flags, node, NULL);
3694}
3695EXPORT_SYMBOL(__kmalloc_node);
3696#endif /* CONFIG_DEBUG_SLAB */
3697#endif /* CONFIG_NUMA */
3698
3699/**
3700 * __do_kmalloc - allocate memory
3701 * @size: how many bytes of memory are required.
3702 * @flags: the type of memory to allocate (see kmalloc).
3703 * @caller: function caller for debug tracking of the caller
3704 */
3705static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3706					  void *caller)
3707{
3708	struct kmem_cache *cachep;
3709
3710	/* If you want to save a few bytes .text space: replace
3711	 * __ with kmem_.
3712	 * Then kmalloc uses the uninlined functions instead of the inline
3713	 * functions.
3714	 */
3715	cachep = __find_general_cachep(size, flags);
3716	if (unlikely(cachep == NULL))
3717		return NULL;
3718	return __cache_alloc(cachep, flags, caller);
3719}
3720
3721
3722#ifdef CONFIG_DEBUG_SLAB
3723void *__kmalloc(size_t size, gfp_t flags)
3724{
3725	return __do_kmalloc(size, flags, __builtin_return_address(0));
3726}
3727EXPORT_SYMBOL(__kmalloc);
3728
3729void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3730{
3731	return __do_kmalloc(size, flags, caller);
3732}
3733EXPORT_SYMBOL(__kmalloc_track_caller);
3734
3735#else
3736void *__kmalloc(size_t size, gfp_t flags)
3737{
3738	return __do_kmalloc(size, flags, NULL);
3739}
3740EXPORT_SYMBOL(__kmalloc);
3741#endif
3742
3743/**
3744 * krealloc - reallocate memory. The contents will remain unchanged.
3745 * @p: object to reallocate memory for.
3746 * @new_size: how many bytes of memory are required.
3747 * @flags: the type of memory to allocate.
3748 *
3749 * The contents of the object pointed to are preserved up to the
3750 * lesser of the new and old sizes.  If @p is %NULL, krealloc()
3751 * behaves exactly like kmalloc().  If @size is 0 and @p is not a
3752 * %NULL pointer, the object pointed to is freed.
3753 */
3754void *krealloc(const void *p, size_t new_size, gfp_t flags)
3755{
3756	struct kmem_cache *cache, *new_cache;
3757	void *ret;
3758
3759	if (unlikely(!p))
3760		return kmalloc_track_caller(new_size, flags);
3761
3762	if (unlikely(!new_size)) {
3763		kfree(p);
3764		return NULL;
3765	}
3766
3767	cache = virt_to_cache(p);
3768	new_cache = __find_general_cachep(new_size, flags);
3769
3770	/*
3771 	 * If new size fits in the current cache, bail out.
3772 	 */
3773	if (likely(cache == new_cache))
3774		return (void *)p;
3775
3776	/*
3777 	 * We are on the slow-path here so do not use __cache_alloc
3778 	 * because it bloats kernel text.
3779 	 */
3780	ret = kmalloc_track_caller(new_size, flags);
3781	if (ret) {
3782		memcpy(ret, p, min(new_size, ksize(p)));
3783		kfree(p);
3784	}
3785	return ret;
3786}
3787EXPORT_SYMBOL(krealloc);
3788
3789/**
3790 * kmem_cache_free - Deallocate an object
3791 * @cachep: The cache the allocation was from.
3792 * @objp: The previously allocated object.
3793 *
3794 * Free an object which was previously allocated from this
3795 * cache.
3796 */
3797void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3798{
3799	unsigned long flags;
3800
3801	BUG_ON(virt_to_cache(objp) != cachep);
3802
3803	local_irq_save(flags);
3804	debug_check_no_locks_freed(objp, obj_size(cachep));
3805	__cache_free(cachep, objp);
3806	local_irq_restore(flags);
3807}
3808EXPORT_SYMBOL(kmem_cache_free);
3809
3810/**
3811 * kfree - free previously allocated memory
3812 * @objp: pointer returned by kmalloc.
3813 *
3814 * If @objp is NULL, no operation is performed.
3815 *
3816 * Don't free memory not originally allocated by kmalloc()
3817 * or you will run into trouble.
3818 */
3819void kfree(const void *objp)
3820{
3821	struct kmem_cache *c;
3822	unsigned long flags;
3823
3824	if (unlikely(!objp))
3825		return;
3826	local_irq_save(flags);
3827	kfree_debugcheck(objp);
3828	c = virt_to_cache(objp);
3829	debug_check_no_locks_freed(objp, obj_size(c));
3830	__cache_free(c, (void *)objp);
3831	local_irq_restore(flags);
3832}
3833EXPORT_SYMBOL(kfree);
3834
3835unsigned int kmem_cache_size(struct kmem_cache *cachep)
3836{
3837	return obj_size(cachep);
3838}
3839EXPORT_SYMBOL(kmem_cache_size);
3840
3841const char *kmem_cache_name(struct kmem_cache *cachep)
3842{
3843	return cachep->name;
3844}
3845EXPORT_SYMBOL_GPL(kmem_cache_name);
3846
3847/*
3848 * This initializes kmem_list3 or resizes varioius caches for all nodes.
3849 */
3850static int alloc_kmemlist(struct kmem_cache *cachep)
3851{
3852	int node;
3853	struct kmem_list3 *l3;
3854	struct array_cache *new_shared;
3855	struct array_cache **new_alien = NULL;
3856
3857	for_each_online_node(node) {
3858
3859                if (use_alien_caches) {
3860                        new_alien = alloc_alien_cache(node, cachep->limit);
3861                        if (!new_alien)
3862                                goto fail;
3863                }
3864
3865		new_shared = NULL;
3866		if (cachep->shared) {
3867			new_shared = alloc_arraycache(node,
3868				cachep->shared*cachep->batchcount,
3869					0xbaadf00d);
3870			if (!new_shared) {
3871				free_alien_cache(new_alien);
3872				goto fail;
3873			}
3874		}
3875
3876		l3 = cachep->nodelists[node];
3877		if (l3) {
3878			struct array_cache *shared = l3->shared;
3879
3880			spin_lock_irq(&l3->list_lock);
3881
3882			if (shared)
3883				free_block(cachep, shared->entry,
3884						shared->avail, node);
3885
3886			l3->shared = new_shared;
3887			if (!l3->alien) {
3888				l3->alien = new_alien;
3889				new_alien = NULL;
3890			}
3891			l3->free_limit = (1 + nr_cpus_node(node)) *
3892					cachep->batchcount + cachep->num;
3893			spin_unlock_irq(&l3->list_lock);
3894			kfree(shared);
3895			free_alien_cache(new_alien);
3896			continue;
3897		}
3898		l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3899		if (!l3) {
3900			free_alien_cache(new_alien);
3901			kfree(new_shared);
3902			goto fail;
3903		}
3904
3905		kmem_list3_init(l3);
3906		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3907				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3908		l3->shared = new_shared;
3909		l3->alien = new_alien;
3910		l3->free_limit = (1 + nr_cpus_node(node)) *
3911					cachep->batchcount + cachep->num;
3912		cachep->nodelists[node] = l3;
3913	}
3914	return 0;
3915
3916fail:
3917	if (!cachep->next.next) {
3918		/* Cache is not active yet. Roll back what we did */
3919		node--;
3920		while (node >= 0) {
3921			if (cachep->nodelists[node]) {
3922				l3 = cachep->nodelists[node];
3923
3924				kfree(l3->shared);
3925				free_alien_cache(l3->alien);
3926				kfree(l3);
3927				cachep->nodelists[node] = NULL;
3928			}
3929			node--;
3930		}
3931	}
3932	return -ENOMEM;
3933}
3934
3935struct ccupdate_struct {
3936	struct kmem_cache *cachep;
3937	struct array_cache *new[NR_CPUS];
3938};
3939
3940static void do_ccupdate_local(void *info)
3941{
3942	struct ccupdate_struct *new = info;
3943	struct array_cache *old;
3944
3945	check_irq_off();
3946	old = cpu_cache_get(new->cachep);
3947
3948	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3949	new->new[smp_processor_id()] = old;
3950}
3951
3952/* Always called with the cache_chain_mutex held */
3953static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3954				int batchcount, int shared)
3955{
3956	struct ccupdate_struct *new;
3957	int i;
3958
3959	new = kzalloc(sizeof(*new), GFP_KERNEL);
3960	if (!new)
3961		return -ENOMEM;
3962
3963	for_each_online_cpu(i) {
3964		new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3965						batchcount);
3966		if (!new->new[i]) {
3967			for (i--; i >= 0; i--)
3968				kfree(new->new[i]);
3969			kfree(new);
3970			return -ENOMEM;
3971		}
3972	}
3973	new->cachep = cachep;
3974
3975	on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
3976
3977	check_irq_on();
3978	cachep->batchcount = batchcount;
3979	cachep->limit = limit;
3980	cachep->shared = shared;
3981
3982	for_each_online_cpu(i) {
3983		struct array_cache *ccold = new->new[i];
3984		if (!ccold)
3985			continue;
3986		spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3987		free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3988		spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3989		kfree(ccold);
3990	}
3991	kfree(new);
3992	return alloc_kmemlist(cachep);
3993}
3994
3995/* Called with cache_chain_mutex held always */
3996static int enable_cpucache(struct kmem_cache *cachep)
3997{
3998	int err;
3999	int limit, shared;
4000
4001	/*
4002	 * The head array serves three purposes:
4003	 * - create a LIFO ordering, i.e. return objects that are cache-warm
4004	 * - reduce the number of spinlock operations.
4005	 * - reduce the number of linked list operations on the slab and
4006	 *   bufctl chains: array operations are cheaper.
4007	 * The numbers are guessed, we should auto-tune as described by
4008	 * Bonwick.
4009	 */
4010	if (cachep->buffer_size > 131072)
4011		limit = 1;
4012	else if (cachep->buffer_size > PAGE_SIZE)
4013		limit = 8;
4014	else if (cachep->buffer_size > 1024)
4015		limit = 24;
4016	else if (cachep->buffer_size > 256)
4017		limit = 54;
4018	else
4019		limit = 120;
4020
4021	/*
4022	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
4023	 * allocation behaviour: Most allocs on one cpu, most free operations
4024	 * on another cpu. For these cases, an efficient object passing between
4025	 * cpus is necessary. This is provided by a shared array. The array
4026	 * replaces Bonwick's magazine layer.
4027	 * On uniprocessor, it's functionally equivalent (but less efficient)
4028	 * to a larger limit. Thus disabled by default.
4029	 */
4030	shared = 0;
4031	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
4032		shared = 8;
4033
4034#if DEBUG
4035	/*
4036	 * With debugging enabled, large batchcount lead to excessively long
4037	 * periods with disabled local interrupts. Limit the batchcount
4038	 */
4039	if (limit > 32)
4040		limit = 32;
4041#endif
4042	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
4043	if (err)
4044		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4045		       cachep->name, -err);
4046	return err;
4047}
4048
4049/*
4050 * Drain an array if it contains any elements taking the l3 lock only if
4051 * necessary. Note that the l3 listlock also protects the array_cache
4052 * if drain_array() is used on the shared array.
4053 */
4054void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4055			 struct array_cache *ac, int force, int node)
4056{
4057	int tofree;
4058
4059	if (!ac || !ac->avail)
4060		return;
4061	if (ac->touched && !force) {
4062		ac->touched = 0;
4063	} else {
4064		spin_lock_irq(&l3->list_lock);
4065		if (ac->avail) {
4066			tofree = force ? ac->avail : (ac->limit + 4) / 5;
4067			if (tofree > ac->avail)
4068				tofree = (ac->avail + 1) / 2;
4069			free_block(cachep, ac->entry, tofree, node);
4070			ac->avail -= tofree;
4071			memmove(ac->entry, &(ac->entry[tofree]),
4072				sizeof(void *) * ac->avail);
4073		}
4074		spin_unlock_irq(&l3->list_lock);
4075	}
4076}
4077
4078/**
4079 * cache_reap - Reclaim memory from caches.
4080 * @w: work descriptor
4081 *
4082 * Called from workqueue/eventd every few seconds.
4083 * Purpose:
4084 * - clear the per-cpu caches for this CPU.
4085 * - return freeable pages to the main free memory pool.
4086 *
4087 * If we cannot acquire the cache chain mutex then just give up - we'll try
4088 * again on the next iteration.
4089 */
4090static void cache_reap(struct work_struct *w)
4091{
4092	struct kmem_cache *searchp;
4093	struct kmem_list3 *l3;
4094	int node = numa_node_id();
4095	struct delayed_work *work =
4096		container_of(w, struct delayed_work, work);
4097
4098	if (!mutex_trylock(&cache_chain_mutex))
4099		/* Give up. Setup the next iteration. */
4100		goto out;
4101
4102	list_for_each_entry(searchp, &cache_chain, next) {
4103		check_irq_on();
4104
4105		/*
4106		 * We only take the l3 lock if absolutely necessary and we
4107		 * have established with reasonable certainty that
4108		 * we can do some work if the lock was obtained.
4109		 */
4110		l3 = searchp->nodelists[node];
4111
4112		reap_alien(searchp, l3);
4113
4114		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4115
4116		/*
4117		 * These are racy checks but it does not matter
4118		 * if we skip one check or scan twice.
4119		 */
4120		if (time_after(l3->next_reap, jiffies))
4121			goto next;
4122
4123		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4124
4125		drain_array(searchp, l3, l3->shared, 0, node);
4126
4127		if (l3->free_touched)
4128			l3->free_touched = 0;
4129		else {
4130			int freed;
4131
4132			freed = drain_freelist(searchp, l3, (l3->free_limit +
4133				5 * searchp->num - 1) / (5 * searchp->num));
4134			STATS_ADD_REAPED(searchp, freed);
4135		}
4136next:
4137		cond_resched();
4138	}
4139	check_irq_on();
4140	mutex_unlock(&cache_chain_mutex);
4141	next_reap_node();
4142	refresh_cpu_vm_stats(smp_processor_id());
4143out:
4144	/* Set up the next iteration */
4145	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4146}
4147
4148#ifdef CONFIG_PROC_FS
4149
4150static void print_slabinfo_header(struct seq_file *m)
4151{
4152	/*
4153	 * Output format version, so at least we can change it
4154	 * without _too_ many complaints.
4155	 */
4156#if STATS
4157	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4158#else
4159	seq_puts(m, "slabinfo - version: 2.1\n");
4160#endif
4161	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4162		 "<objperslab> <pagesperslab>");
4163	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4164	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4165#if STATS
4166	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4167		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4168	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4169#endif
4170	seq_putc(m, '\n');
4171}
4172
4173static void *s_start(struct seq_file *m, loff_t *pos)
4174{
4175	loff_t n = *pos;
4176	struct list_head *p;
4177
4178	mutex_lock(&cache_chain_mutex);
4179	if (!n)
4180		print_slabinfo_header(m);
4181	p = cache_chain.next;
4182	while (n--) {
4183		p = p->next;
4184		if (p == &cache_chain)
4185			return NULL;
4186	}
4187	return list_entry(p, struct kmem_cache, next);
4188}
4189
4190static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4191{
4192	struct kmem_cache *cachep = p;
4193	++*pos;
4194	return cachep->next.next == &cache_chain ?
4195		NULL : list_entry(cachep->next.next, struct kmem_cache, next);
4196}
4197
4198static void s_stop(struct seq_file *m, void *p)
4199{
4200	mutex_unlock(&cache_chain_mutex);
4201}
4202
4203static int s_show(struct seq_file *m, void *p)
4204{
4205	struct kmem_cache *cachep = p;
4206	struct slab *slabp;
4207	unsigned long active_objs;
4208	unsigned long num_objs;
4209	unsigned long active_slabs = 0;
4210	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4211	const char *name;
4212	char *error = NULL;
4213	int node;
4214	struct kmem_list3 *l3;
4215
4216	active_objs = 0;
4217	num_slabs = 0;
4218	for_each_online_node(node) {
4219		l3 = cachep->nodelists[node];
4220		if (!l3)
4221			continue;
4222
4223		check_irq_on();
4224		spin_lock_irq(&l3->list_lock);
4225
4226		list_for_each_entry(slabp, &l3->slabs_full, list) {
4227			if (slabp->inuse != cachep->num && !error)
4228				error = "slabs_full accounting error";
4229			active_objs += cachep->num;
4230			active_slabs++;
4231		}
4232		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4233			if (slabp->inuse == cachep->num && !error)
4234				error = "slabs_partial inuse accounting error";
4235			if (!slabp->inuse && !error)
4236				error = "slabs_partial/inuse accounting error";
4237			active_objs += slabp->inuse;
4238			active_slabs++;
4239		}
4240		list_for_each_entry(slabp, &l3->slabs_free, list) {
4241			if (slabp->inuse && !error)
4242				error = "slabs_free/inuse accounting error";
4243			num_slabs++;
4244		}
4245		free_objects += l3->free_objects;
4246		if (l3->shared)
4247			shared_avail += l3->shared->avail;
4248
4249		spin_unlock_irq(&l3->list_lock);
4250	}
4251	num_slabs += active_slabs;
4252	num_objs = num_slabs * cachep->num;
4253	if (num_objs - active_objs != free_objects && !error)
4254		error = "free_objects accounting error";
4255
4256	name = cachep->name;
4257	if (error)
4258		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4259
4260	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4261		   name, active_objs, num_objs, cachep->buffer_size,
4262		   cachep->num, (1 << cachep->gfporder));
4263	seq_printf(m, " : tunables %4u %4u %4u",
4264		   cachep->limit, cachep->batchcount, cachep->shared);
4265	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4266		   active_slabs, num_slabs, shared_avail);
4267#if STATS
4268	{			/* list3 stats */
4269		unsigned long high = cachep->high_mark;
4270		unsigned long allocs = cachep->num_allocations;
4271		unsigned long grown = cachep->grown;
4272		unsigned long reaped = cachep->reaped;
4273		unsigned long errors = cachep->errors;
4274		unsigned long max_freeable = cachep->max_freeable;
4275		unsigned long node_allocs = cachep->node_allocs;
4276		unsigned long node_frees = cachep->node_frees;
4277		unsigned long overflows = cachep->node_overflow;
4278
4279		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
4280				%4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
4281				reaped, errors, max_freeable, node_allocs,
4282				node_frees, overflows);
4283	}
4284	/* cpu stats */
4285	{
4286		unsigned long allochit = atomic_read(&cachep->allochit);
4287		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4288		unsigned long freehit = atomic_read(&cachep->freehit);
4289		unsigned long freemiss = atomic_read(&cachep->freemiss);
4290
4291		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4292			   allochit, allocmiss, freehit, freemiss);
4293	}
4294#endif
4295	seq_putc(m, '\n');
4296	return 0;
4297}
4298
4299/*
4300 * slabinfo_op - iterator that generates /proc/slabinfo
4301 *
4302 * Output layout:
4303 * cache-name
4304 * num-active-objs
4305 * total-objs
4306 * object size
4307 * num-active-slabs
4308 * total-slabs
4309 * num-pages-per-slab
4310 * + further values on SMP and with statistics enabled
4311 */
4312
4313const struct seq_operations slabinfo_op = {
4314	.start = s_start,
4315	.next = s_next,
4316	.stop = s_stop,
4317	.show = s_show,
4318};
4319
4320#define MAX_SLABINFO_WRITE 128
4321/**
4322 * slabinfo_write - Tuning for the slab allocator
4323 * @file: unused
4324 * @buffer: user buffer
4325 * @count: data length
4326 * @ppos: unused
4327 */
4328ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4329		       size_t count, loff_t *ppos)
4330{
4331	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4332	int limit, batchcount, shared, res;
4333	struct kmem_cache *cachep;
4334
4335	if (count > MAX_SLABINFO_WRITE)
4336		return -EINVAL;
4337	if (copy_from_user(&kbuf, buffer, count))
4338		return -EFAULT;
4339	kbuf[MAX_SLABINFO_WRITE] = '\0';
4340
4341	tmp = strchr(kbuf, ' ');
4342	if (!tmp)
4343		return -EINVAL;
4344	*tmp = '\0';
4345	tmp++;
4346	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4347		return -EINVAL;
4348
4349	/* Find the cache in the chain of caches. */
4350	mutex_lock(&cache_chain_mutex);
4351	res = -EINVAL;
4352	list_for_each_entry(cachep, &cache_chain, next) {
4353		if (!strcmp(cachep->name, kbuf)) {
4354			if (limit < 1 || batchcount < 1 ||
4355					batchcount > limit || shared < 0) {
4356				res = 0;
4357			} else {
4358				res = do_tune_cpucache(cachep, limit,
4359						       batchcount, shared);
4360			}
4361			break;
4362		}
4363	}
4364	mutex_unlock(&cache_chain_mutex);
4365	if (res >= 0)
4366		res = count;
4367	return res;
4368}
4369
4370#ifdef CONFIG_DEBUG_SLAB_LEAK
4371
4372static void *leaks_start(struct seq_file *m, loff_t *pos)
4373{
4374	loff_t n = *pos;
4375	struct list_head *p;
4376
4377	mutex_lock(&cache_chain_mutex);
4378	p = cache_chain.next;
4379	while (n--) {
4380		p = p->next;
4381		if (p == &cache_chain)
4382			return NULL;
4383	}
4384	return list_entry(p, struct kmem_cache, next);
4385}
4386
4387static inline int add_caller(unsigned long *n, unsigned long v)
4388{
4389	unsigned long *p;
4390	int l;
4391	if (!v)
4392		return 1;
4393	l = n[1];
4394	p = n + 2;
4395	while (l) {
4396		int i = l/2;
4397		unsigned long *q = p + 2 * i;
4398		if (*q == v) {
4399			q[1]++;
4400			return 1;
4401		}
4402		if (*q > v) {
4403			l = i;
4404		} else {
4405			p = q + 2;
4406			l -= i + 1;
4407		}
4408	}
4409	if (++n[1] == n[0])
4410		return 0;
4411	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4412	p[0] = v;
4413	p[1] = 1;
4414	return 1;
4415}
4416
4417static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4418{
4419	void *p;
4420	int i;
4421	if (n[0] == n[1])
4422		return;
4423	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4424		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4425			continue;
4426		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4427			return;
4428	}
4429}
4430
4431static void show_symbol(struct seq_file *m, unsigned long address)
4432{
4433#ifdef CONFIG_KALLSYMS
4434	unsigned long offset, size;
4435	char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
4436
4437	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4438		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4439		if (modname[0])
4440			seq_printf(m, " [%s]", modname);
4441		return;
4442	}
4443#endif
4444	seq_printf(m, "%p", (void *)address);
4445}
4446
4447static int leaks_show(struct seq_file *m, void *p)
4448{
4449	struct kmem_cache *cachep = p;
4450	struct slab *slabp;
4451	struct kmem_list3 *l3;
4452	const char *name;
4453	unsigned long *n = m->private;
4454	int node;
4455	int i;
4456
4457	if (!(cachep->flags & SLAB_STORE_USER))
4458		return 0;
4459	if (!(cachep->flags & SLAB_RED_ZONE))
4460		return 0;
4461
4462	/* OK, we can do it */
4463
4464	n[1] = 0;
4465
4466	for_each_online_node(node) {
4467		l3 = cachep->nodelists[node];
4468		if (!l3)
4469			continue;
4470
4471		check_irq_on();
4472		spin_lock_irq(&l3->list_lock);
4473
4474		list_for_each_entry(slabp, &l3->slabs_full, list)
4475			handle_slab(n, cachep, slabp);
4476		list_for_each_entry(slabp, &l3->slabs_partial, list)
4477			handle_slab(n, cachep, slabp);
4478		spin_unlock_irq(&l3->list_lock);
4479	}
4480	name = cachep->name;
4481	if (n[0] == n[1]) {
4482		/* Increase the buffer size */
4483		mutex_unlock(&cache_chain_mutex);
4484		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4485		if (!m->private) {
4486			/* Too bad, we are really out */
4487			m->private = n;
4488			mutex_lock(&cache_chain_mutex);
4489			return -ENOMEM;
4490		}
4491		*(unsigned long *)m->private = n[0] * 2;
4492		kfree(n);
4493		mutex_lock(&cache_chain_mutex);
4494		/* Now make sure this entry will be retried */
4495		m->count = m->size;
4496		return 0;
4497	}
4498	for (i = 0; i < n[1]; i++) {
4499		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4500		show_symbol(m, n[2*i+2]);
4501		seq_putc(m, '\n');
4502	}
4503
4504	return 0;
4505}
4506
4507const struct seq_operations slabstats_op = {
4508	.start = leaks_start,
4509	.next = s_next,
4510	.stop = s_stop,
4511	.show = leaks_show,
4512};
4513#endif
4514#endif
4515
4516/**
4517 * ksize - get the actual amount of memory allocated for a given object
4518 * @objp: Pointer to the object
4519 *
4520 * kmalloc may internally round up allocations and return more memory
4521 * than requested. ksize() can be used to determine the actual amount of
4522 * memory allocated. The caller may use this additional memory, even though
4523 * a smaller amount of memory was initially specified with the kmalloc call.
4524 * The caller must guarantee that objp points to a valid object previously
4525 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4526 * must not be freed during the duration of the call.
4527 */
4528size_t ksize(const void *objp)
4529{
4530	if (unlikely(objp == NULL))
4531		return 0;
4532
4533	return obj_size(virt_to_cache(objp));
4534}
4535