slab.c revision 3df1cccdfb3fab6aa9176beb655d802eb384eabc
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 *	(c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * 	(c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 *	UNIX Internals: The New Frontiers by Uresh Vahalia
16 *	Pub: Prentice Hall	ISBN 0-13-101908-2
17 * or with a little more detail in;
18 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 *	Jeff Bonwick (Sun Microsystems).
20 *	Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
29 * slabs and you must pass objects with the same initializations to
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 *   full slabs with 0 free objects
38 *   partial slabs
39 *   empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
53 * The c_cpuarray may not be read with enabled local interrupts -
54 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 *  constructors and destructors are called without any locking.
58 *  Several members in struct kmem_cache and struct slab never change, they
59 *	are accessed without any locking.
60 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 *  	and local interrupts are disabled so slab code is preempt-safe.
62 *  The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97.  Started multi-threading - markhe
71 *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72 *	The sem is only needed when accessing/extending the cache-chain, which
73 *	can never happen inside an interrupt (kmem_cache_create(),
74 *	kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 *	At present, each engine can be growing a cache.  This should be blocked.
77 *
78 * 15 March 2005. NUMA slab allocator.
79 *	Shai Fultheim <shai@scalex86.org>.
80 *	Shobhit Dayal <shobhit@calsoftinc.com>
81 *	Alok N Kataria <alokk@calsoftinc.com>
82 *	Christoph Lameter <christoph@lameter.com>
83 *
84 *	Modified the slab allocator to be node aware on NUMA systems.
85 *	Each node has its own list of partial, free and full slabs.
86 *	All object allocations for a node occur from node specific slab lists.
87 */
88
89#include	<linux/slab.h>
90#include	<linux/mm.h>
91#include	<linux/poison.h>
92#include	<linux/swap.h>
93#include	<linux/cache.h>
94#include	<linux/interrupt.h>
95#include	<linux/init.h>
96#include	<linux/compiler.h>
97#include	<linux/cpuset.h>
98#include	<linux/proc_fs.h>
99#include	<linux/seq_file.h>
100#include	<linux/notifier.h>
101#include	<linux/kallsyms.h>
102#include	<linux/cpu.h>
103#include	<linux/sysctl.h>
104#include	<linux/module.h>
105#include	<linux/rcupdate.h>
106#include	<linux/string.h>
107#include	<linux/uaccess.h>
108#include	<linux/nodemask.h>
109#include	<linux/kmemleak.h>
110#include	<linux/mempolicy.h>
111#include	<linux/mutex.h>
112#include	<linux/fault-inject.h>
113#include	<linux/rtmutex.h>
114#include	<linux/reciprocal_div.h>
115#include	<linux/debugobjects.h>
116#include	<linux/kmemcheck.h>
117#include	<linux/memory.h>
118#include	<linux/prefetch.h>
119
120#include	<asm/cacheflush.h>
121#include	<asm/tlbflush.h>
122#include	<asm/page.h>
123
124/*
125 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
126 *		  0 for faster, smaller code (especially in the critical paths).
127 *
128 * STATS	- 1 to collect stats for /proc/slabinfo.
129 *		  0 for faster, smaller code (especially in the critical paths).
130 *
131 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
132 */
133
134#ifdef CONFIG_DEBUG_SLAB
135#define	DEBUG		1
136#define	STATS		1
137#define	FORCED_DEBUG	1
138#else
139#define	DEBUG		0
140#define	STATS		0
141#define	FORCED_DEBUG	0
142#endif
143
144/* Shouldn't this be in a header file somewhere? */
145#define	BYTES_PER_WORD		sizeof(void *)
146#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
147
148#ifndef ARCH_KMALLOC_FLAGS
149#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
150#endif
151
152/* Legal flag mask for kmem_cache_create(). */
153#if DEBUG
154# define CREATE_MASK	(SLAB_RED_ZONE | \
155			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
156			 SLAB_CACHE_DMA | \
157			 SLAB_STORE_USER | \
158			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
159			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
160			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
161#else
162# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
163			 SLAB_CACHE_DMA | \
164			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
165			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
166			 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
167#endif
168
169/*
170 * kmem_bufctl_t:
171 *
172 * Bufctl's are used for linking objs within a slab
173 * linked offsets.
174 *
175 * This implementation relies on "struct page" for locating the cache &
176 * slab an object belongs to.
177 * This allows the bufctl structure to be small (one int), but limits
178 * the number of objects a slab (not a cache) can contain when off-slab
179 * bufctls are used. The limit is the size of the largest general cache
180 * that does not use off-slab slabs.
181 * For 32bit archs with 4 kB pages, is this 56.
182 * This is not serious, as it is only for large objects, when it is unwise
183 * to have too many per slab.
184 * Note: This limit can be raised by introducing a general cache whose size
185 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
186 */
187
188typedef unsigned int kmem_bufctl_t;
189#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
190#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
191#define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
192#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
193
194/*
195 * struct slab_rcu
196 *
197 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
198 * arrange for kmem_freepages to be called via RCU.  This is useful if
199 * we need to approach a kernel structure obliquely, from its address
200 * obtained without the usual locking.  We can lock the structure to
201 * stabilize it and check it's still at the given address, only if we
202 * can be sure that the memory has not been meanwhile reused for some
203 * other kind of object (which our subsystem's lock might corrupt).
204 *
205 * rcu_read_lock before reading the address, then rcu_read_unlock after
206 * taking the spinlock within the structure expected at that address.
207 */
208struct slab_rcu {
209	struct rcu_head head;
210	struct kmem_cache *cachep;
211	void *addr;
212};
213
214/*
215 * struct slab
216 *
217 * Manages the objs in a slab. Placed either at the beginning of mem allocated
218 * for a slab, or allocated from an general cache.
219 * Slabs are chained into three list: fully used, partial, fully free slabs.
220 */
221struct slab {
222	union {
223		struct {
224			struct list_head list;
225			unsigned long colouroff;
226			void *s_mem;		/* including colour offset */
227			unsigned int inuse;	/* num of objs active in slab */
228			kmem_bufctl_t free;
229			unsigned short nodeid;
230		};
231		struct slab_rcu __slab_cover_slab_rcu;
232	};
233};
234
235/*
236 * struct array_cache
237 *
238 * Purpose:
239 * - LIFO ordering, to hand out cache-warm objects from _alloc
240 * - reduce the number of linked list operations
241 * - reduce spinlock operations
242 *
243 * The limit is stored in the per-cpu structure to reduce the data cache
244 * footprint.
245 *
246 */
247struct array_cache {
248	unsigned int avail;
249	unsigned int limit;
250	unsigned int batchcount;
251	unsigned int touched;
252	spinlock_t lock;
253	void *entry[];	/*
254			 * Must have this definition in here for the proper
255			 * alignment of array_cache. Also simplifies accessing
256			 * the entries.
257			 */
258};
259
260/*
261 * bootstrap: The caches do not work without cpuarrays anymore, but the
262 * cpuarrays are allocated from the generic caches...
263 */
264#define BOOT_CPUCACHE_ENTRIES	1
265struct arraycache_init {
266	struct array_cache cache;
267	void *entries[BOOT_CPUCACHE_ENTRIES];
268};
269
270/*
271 * The slab lists for all objects.
272 */
273struct kmem_list3 {
274	struct list_head slabs_partial;	/* partial list first, better asm code */
275	struct list_head slabs_full;
276	struct list_head slabs_free;
277	unsigned long free_objects;
278	unsigned int free_limit;
279	unsigned int colour_next;	/* Per-node cache coloring */
280	spinlock_t list_lock;
281	struct array_cache *shared;	/* shared per node */
282	struct array_cache **alien;	/* on other nodes */
283	unsigned long next_reap;	/* updated without locking */
284	int free_touched;		/* updated without locking */
285};
286
287/*
288 * Need this for bootstrapping a per node allocator.
289 */
290#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
291static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
292#define	CACHE_CACHE 0
293#define	SIZE_AC MAX_NUMNODES
294#define	SIZE_L3 (2 * MAX_NUMNODES)
295
296static int drain_freelist(struct kmem_cache *cache,
297			struct kmem_list3 *l3, int tofree);
298static void free_block(struct kmem_cache *cachep, void **objpp, int len,
299			int node);
300static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
301static void cache_reap(struct work_struct *unused);
302
303/*
304 * This function must be completely optimized away if a constant is passed to
305 * it.  Mostly the same as what is in linux/slab.h except it returns an index.
306 */
307static __always_inline int index_of(const size_t size)
308{
309	extern void __bad_size(void);
310
311	if (__builtin_constant_p(size)) {
312		int i = 0;
313
314#define CACHE(x) \
315	if (size <=x) \
316		return i; \
317	else \
318		i++;
319#include <linux/kmalloc_sizes.h>
320#undef CACHE
321		__bad_size();
322	} else
323		__bad_size();
324	return 0;
325}
326
327static int slab_early_init = 1;
328
329#define INDEX_AC index_of(sizeof(struct arraycache_init))
330#define INDEX_L3 index_of(sizeof(struct kmem_list3))
331
332static void kmem_list3_init(struct kmem_list3 *parent)
333{
334	INIT_LIST_HEAD(&parent->slabs_full);
335	INIT_LIST_HEAD(&parent->slabs_partial);
336	INIT_LIST_HEAD(&parent->slabs_free);
337	parent->shared = NULL;
338	parent->alien = NULL;
339	parent->colour_next = 0;
340	spin_lock_init(&parent->list_lock);
341	parent->free_objects = 0;
342	parent->free_touched = 0;
343}
344
345#define MAKE_LIST(cachep, listp, slab, nodeid)				\
346	do {								\
347		INIT_LIST_HEAD(listp);					\
348		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
349	} while (0)
350
351#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
352	do {								\
353	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
354	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
355	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
356	} while (0)
357
358#define CFLGS_OFF_SLAB		(0x80000000UL)
359#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
360
361#define BATCHREFILL_LIMIT	16
362/*
363 * Optimization question: fewer reaps means less probability for unnessary
364 * cpucache drain/refill cycles.
365 *
366 * OTOH the cpuarrays can contain lots of objects,
367 * which could lock up otherwise freeable slabs.
368 */
369#define REAPTIMEOUT_CPUC	(2*HZ)
370#define REAPTIMEOUT_LIST3	(4*HZ)
371
372#if STATS
373#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
374#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
375#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
376#define	STATS_INC_GROWN(x)	((x)->grown++)
377#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
378#define	STATS_SET_HIGH(x)						\
379	do {								\
380		if ((x)->num_active > (x)->high_mark)			\
381			(x)->high_mark = (x)->num_active;		\
382	} while (0)
383#define	STATS_INC_ERR(x)	((x)->errors++)
384#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
385#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
386#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
387#define	STATS_SET_FREEABLE(x, i)					\
388	do {								\
389		if ((x)->max_freeable < i)				\
390			(x)->max_freeable = i;				\
391	} while (0)
392#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
393#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
394#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
395#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
396#else
397#define	STATS_INC_ACTIVE(x)	do { } while (0)
398#define	STATS_DEC_ACTIVE(x)	do { } while (0)
399#define	STATS_INC_ALLOCED(x)	do { } while (0)
400#define	STATS_INC_GROWN(x)	do { } while (0)
401#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
402#define	STATS_SET_HIGH(x)	do { } while (0)
403#define	STATS_INC_ERR(x)	do { } while (0)
404#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
405#define	STATS_INC_NODEFREES(x)	do { } while (0)
406#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
407#define	STATS_SET_FREEABLE(x, i) do { } while (0)
408#define STATS_INC_ALLOCHIT(x)	do { } while (0)
409#define STATS_INC_ALLOCMISS(x)	do { } while (0)
410#define STATS_INC_FREEHIT(x)	do { } while (0)
411#define STATS_INC_FREEMISS(x)	do { } while (0)
412#endif
413
414#if DEBUG
415
416/*
417 * memory layout of objects:
418 * 0		: objp
419 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
420 * 		the end of an object is aligned with the end of the real
421 * 		allocation. Catches writes behind the end of the allocation.
422 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
423 * 		redzone word.
424 * cachep->obj_offset: The real object.
425 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
426 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
427 *					[BYTES_PER_WORD long]
428 */
429static int obj_offset(struct kmem_cache *cachep)
430{
431	return cachep->obj_offset;
432}
433
434static int obj_size(struct kmem_cache *cachep)
435{
436	return cachep->obj_size;
437}
438
439static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
440{
441	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
442	return (unsigned long long*) (objp + obj_offset(cachep) -
443				      sizeof(unsigned long long));
444}
445
446static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
447{
448	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
449	if (cachep->flags & SLAB_STORE_USER)
450		return (unsigned long long *)(objp + cachep->buffer_size -
451					      sizeof(unsigned long long) -
452					      REDZONE_ALIGN);
453	return (unsigned long long *) (objp + cachep->buffer_size -
454				       sizeof(unsigned long long));
455}
456
457static void **dbg_userword(struct kmem_cache *cachep, void *objp)
458{
459	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
460	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
461}
462
463#else
464
465#define obj_offset(x)			0
466#define obj_size(cachep)		(cachep->buffer_size)
467#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
468#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
469#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
470
471#endif
472
473#ifdef CONFIG_TRACING
474size_t slab_buffer_size(struct kmem_cache *cachep)
475{
476	return cachep->buffer_size;
477}
478EXPORT_SYMBOL(slab_buffer_size);
479#endif
480
481/*
482 * Do not go above this order unless 0 objects fit into the slab or
483 * overridden on the command line.
484 */
485#define	SLAB_MAX_ORDER_HI	1
486#define	SLAB_MAX_ORDER_LO	0
487static int slab_max_order = SLAB_MAX_ORDER_LO;
488static bool slab_max_order_set __initdata;
489
490/*
491 * Functions for storing/retrieving the cachep and or slab from the page
492 * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
493 * these are used to find the cache which an obj belongs to.
494 */
495static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
496{
497	page->lru.next = (struct list_head *)cache;
498}
499
500static inline struct kmem_cache *page_get_cache(struct page *page)
501{
502	page = compound_head(page);
503	BUG_ON(!PageSlab(page));
504	return (struct kmem_cache *)page->lru.next;
505}
506
507static inline void page_set_slab(struct page *page, struct slab *slab)
508{
509	page->lru.prev = (struct list_head *)slab;
510}
511
512static inline struct slab *page_get_slab(struct page *page)
513{
514	BUG_ON(!PageSlab(page));
515	return (struct slab *)page->lru.prev;
516}
517
518static inline struct kmem_cache *virt_to_cache(const void *obj)
519{
520	struct page *page = virt_to_head_page(obj);
521	return page_get_cache(page);
522}
523
524static inline struct slab *virt_to_slab(const void *obj)
525{
526	struct page *page = virt_to_head_page(obj);
527	return page_get_slab(page);
528}
529
530static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
531				 unsigned int idx)
532{
533	return slab->s_mem + cache->buffer_size * idx;
534}
535
536/*
537 * We want to avoid an expensive divide : (offset / cache->buffer_size)
538 *   Using the fact that buffer_size is a constant for a particular cache,
539 *   we can replace (offset / cache->buffer_size) by
540 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
541 */
542static inline unsigned int obj_to_index(const struct kmem_cache *cache,
543					const struct slab *slab, void *obj)
544{
545	u32 offset = (obj - slab->s_mem);
546	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
547}
548
549/*
550 * These are the default caches for kmalloc. Custom caches can have other sizes.
551 */
552struct cache_sizes malloc_sizes[] = {
553#define CACHE(x) { .cs_size = (x) },
554#include <linux/kmalloc_sizes.h>
555	CACHE(ULONG_MAX)
556#undef CACHE
557};
558EXPORT_SYMBOL(malloc_sizes);
559
560/* Must match cache_sizes above. Out of line to keep cache footprint low. */
561struct cache_names {
562	char *name;
563	char *name_dma;
564};
565
566static struct cache_names __initdata cache_names[] = {
567#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
568#include <linux/kmalloc_sizes.h>
569	{NULL,}
570#undef CACHE
571};
572
573static struct arraycache_init initarray_cache __initdata =
574    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
575static struct arraycache_init initarray_generic =
576    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
577
578/* internal cache of cache description objs */
579static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
580static struct kmem_cache cache_cache = {
581	.nodelists = cache_cache_nodelists,
582	.batchcount = 1,
583	.limit = BOOT_CPUCACHE_ENTRIES,
584	.shared = 1,
585	.buffer_size = sizeof(struct kmem_cache),
586	.name = "kmem_cache",
587};
588
589#define BAD_ALIEN_MAGIC 0x01020304ul
590
591/*
592 * chicken and egg problem: delay the per-cpu array allocation
593 * until the general caches are up.
594 */
595static enum {
596	NONE,
597	PARTIAL_AC,
598	PARTIAL_L3,
599	EARLY,
600	FULL
601} g_cpucache_up;
602
603/*
604 * used by boot code to determine if it can use slab based allocator
605 */
606int slab_is_available(void)
607{
608	return g_cpucache_up >= EARLY;
609}
610
611#ifdef CONFIG_LOCKDEP
612
613/*
614 * Slab sometimes uses the kmalloc slabs to store the slab headers
615 * for other slabs "off slab".
616 * The locking for this is tricky in that it nests within the locks
617 * of all other slabs in a few places; to deal with this special
618 * locking we put on-slab caches into a separate lock-class.
619 *
620 * We set lock class for alien array caches which are up during init.
621 * The lock annotation will be lost if all cpus of a node goes down and
622 * then comes back up during hotplug
623 */
624static struct lock_class_key on_slab_l3_key;
625static struct lock_class_key on_slab_alc_key;
626
627static struct lock_class_key debugobj_l3_key;
628static struct lock_class_key debugobj_alc_key;
629
630static void slab_set_lock_classes(struct kmem_cache *cachep,
631		struct lock_class_key *l3_key, struct lock_class_key *alc_key,
632		int q)
633{
634	struct array_cache **alc;
635	struct kmem_list3 *l3;
636	int r;
637
638	l3 = cachep->nodelists[q];
639	if (!l3)
640		return;
641
642	lockdep_set_class(&l3->list_lock, l3_key);
643	alc = l3->alien;
644	/*
645	 * FIXME: This check for BAD_ALIEN_MAGIC
646	 * should go away when common slab code is taught to
647	 * work even without alien caches.
648	 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
649	 * for alloc_alien_cache,
650	 */
651	if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
652		return;
653	for_each_node(r) {
654		if (alc[r])
655			lockdep_set_class(&alc[r]->lock, alc_key);
656	}
657}
658
659static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
660{
661	slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
662}
663
664static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
665{
666	int node;
667
668	for_each_online_node(node)
669		slab_set_debugobj_lock_classes_node(cachep, node);
670}
671
672static void init_node_lock_keys(int q)
673{
674	struct cache_sizes *s = malloc_sizes;
675
676	if (g_cpucache_up != FULL)
677		return;
678
679	for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
680		struct kmem_list3 *l3;
681
682		l3 = s->cs_cachep->nodelists[q];
683		if (!l3 || OFF_SLAB(s->cs_cachep))
684			continue;
685
686		slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
687				&on_slab_alc_key, q);
688	}
689}
690
691static inline void init_lock_keys(void)
692{
693	int node;
694
695	for_each_node(node)
696		init_node_lock_keys(node);
697}
698#else
699static void init_node_lock_keys(int q)
700{
701}
702
703static inline void init_lock_keys(void)
704{
705}
706
707static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
708{
709}
710
711static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
712{
713}
714#endif
715
716/*
717 * Guard access to the cache-chain.
718 */
719static DEFINE_MUTEX(cache_chain_mutex);
720static struct list_head cache_chain;
721
722static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
723
724static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
725{
726	return cachep->array[smp_processor_id()];
727}
728
729static inline struct kmem_cache *__find_general_cachep(size_t size,
730							gfp_t gfpflags)
731{
732	struct cache_sizes *csizep = malloc_sizes;
733
734#if DEBUG
735	/* This happens if someone tries to call
736	 * kmem_cache_create(), or __kmalloc(), before
737	 * the generic caches are initialized.
738	 */
739	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
740#endif
741	if (!size)
742		return ZERO_SIZE_PTR;
743
744	while (size > csizep->cs_size)
745		csizep++;
746
747	/*
748	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
749	 * has cs_{dma,}cachep==NULL. Thus no special case
750	 * for large kmalloc calls required.
751	 */
752#ifdef CONFIG_ZONE_DMA
753	if (unlikely(gfpflags & GFP_DMA))
754		return csizep->cs_dmacachep;
755#endif
756	return csizep->cs_cachep;
757}
758
759static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
760{
761	return __find_general_cachep(size, gfpflags);
762}
763
764static size_t slab_mgmt_size(size_t nr_objs, size_t align)
765{
766	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
767}
768
769/*
770 * Calculate the number of objects and left-over bytes for a given buffer size.
771 */
772static void cache_estimate(unsigned long gfporder, size_t buffer_size,
773			   size_t align, int flags, size_t *left_over,
774			   unsigned int *num)
775{
776	int nr_objs;
777	size_t mgmt_size;
778	size_t slab_size = PAGE_SIZE << gfporder;
779
780	/*
781	 * The slab management structure can be either off the slab or
782	 * on it. For the latter case, the memory allocated for a
783	 * slab is used for:
784	 *
785	 * - The struct slab
786	 * - One kmem_bufctl_t for each object
787	 * - Padding to respect alignment of @align
788	 * - @buffer_size bytes for each object
789	 *
790	 * If the slab management structure is off the slab, then the
791	 * alignment will already be calculated into the size. Because
792	 * the slabs are all pages aligned, the objects will be at the
793	 * correct alignment when allocated.
794	 */
795	if (flags & CFLGS_OFF_SLAB) {
796		mgmt_size = 0;
797		nr_objs = slab_size / buffer_size;
798
799		if (nr_objs > SLAB_LIMIT)
800			nr_objs = SLAB_LIMIT;
801	} else {
802		/*
803		 * Ignore padding for the initial guess. The padding
804		 * is at most @align-1 bytes, and @buffer_size is at
805		 * least @align. In the worst case, this result will
806		 * be one greater than the number of objects that fit
807		 * into the memory allocation when taking the padding
808		 * into account.
809		 */
810		nr_objs = (slab_size - sizeof(struct slab)) /
811			  (buffer_size + sizeof(kmem_bufctl_t));
812
813		/*
814		 * This calculated number will be either the right
815		 * amount, or one greater than what we want.
816		 */
817		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
818		       > slab_size)
819			nr_objs--;
820
821		if (nr_objs > SLAB_LIMIT)
822			nr_objs = SLAB_LIMIT;
823
824		mgmt_size = slab_mgmt_size(nr_objs, align);
825	}
826	*num = nr_objs;
827	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
828}
829
830#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
831
832static void __slab_error(const char *function, struct kmem_cache *cachep,
833			char *msg)
834{
835	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
836	       function, cachep->name, msg);
837	dump_stack();
838}
839
840/*
841 * By default on NUMA we use alien caches to stage the freeing of
842 * objects allocated from other nodes. This causes massive memory
843 * inefficiencies when using fake NUMA setup to split memory into a
844 * large number of small nodes, so it can be disabled on the command
845 * line
846  */
847
848static int use_alien_caches __read_mostly = 1;
849static int __init noaliencache_setup(char *s)
850{
851	use_alien_caches = 0;
852	return 1;
853}
854__setup("noaliencache", noaliencache_setup);
855
856static int __init slab_max_order_setup(char *str)
857{
858	get_option(&str, &slab_max_order);
859	slab_max_order = slab_max_order < 0 ? 0 :
860				min(slab_max_order, MAX_ORDER - 1);
861	slab_max_order_set = true;
862
863	return 1;
864}
865__setup("slab_max_order=", slab_max_order_setup);
866
867#ifdef CONFIG_NUMA
868/*
869 * Special reaping functions for NUMA systems called from cache_reap().
870 * These take care of doing round robin flushing of alien caches (containing
871 * objects freed on different nodes from which they were allocated) and the
872 * flushing of remote pcps by calling drain_node_pages.
873 */
874static DEFINE_PER_CPU(unsigned long, slab_reap_node);
875
876static void init_reap_node(int cpu)
877{
878	int node;
879
880	node = next_node(cpu_to_mem(cpu), node_online_map);
881	if (node == MAX_NUMNODES)
882		node = first_node(node_online_map);
883
884	per_cpu(slab_reap_node, cpu) = node;
885}
886
887static void next_reap_node(void)
888{
889	int node = __this_cpu_read(slab_reap_node);
890
891	node = next_node(node, node_online_map);
892	if (unlikely(node >= MAX_NUMNODES))
893		node = first_node(node_online_map);
894	__this_cpu_write(slab_reap_node, node);
895}
896
897#else
898#define init_reap_node(cpu) do { } while (0)
899#define next_reap_node(void) do { } while (0)
900#endif
901
902/*
903 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
904 * via the workqueue/eventd.
905 * Add the CPU number into the expiration time to minimize the possibility of
906 * the CPUs getting into lockstep and contending for the global cache chain
907 * lock.
908 */
909static void __cpuinit start_cpu_timer(int cpu)
910{
911	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
912
913	/*
914	 * When this gets called from do_initcalls via cpucache_init(),
915	 * init_workqueues() has already run, so keventd will be setup
916	 * at that time.
917	 */
918	if (keventd_up() && reap_work->work.func == NULL) {
919		init_reap_node(cpu);
920		INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
921		schedule_delayed_work_on(cpu, reap_work,
922					__round_jiffies_relative(HZ, cpu));
923	}
924}
925
926static struct array_cache *alloc_arraycache(int node, int entries,
927					    int batchcount, gfp_t gfp)
928{
929	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
930	struct array_cache *nc = NULL;
931
932	nc = kmalloc_node(memsize, gfp, node);
933	/*
934	 * The array_cache structures contain pointers to free object.
935	 * However, when such objects are allocated or transferred to another
936	 * cache the pointers are not cleared and they could be counted as
937	 * valid references during a kmemleak scan. Therefore, kmemleak must
938	 * not scan such objects.
939	 */
940	kmemleak_no_scan(nc);
941	if (nc) {
942		nc->avail = 0;
943		nc->limit = entries;
944		nc->batchcount = batchcount;
945		nc->touched = 0;
946		spin_lock_init(&nc->lock);
947	}
948	return nc;
949}
950
951/*
952 * Transfer objects in one arraycache to another.
953 * Locking must be handled by the caller.
954 *
955 * Return the number of entries transferred.
956 */
957static int transfer_objects(struct array_cache *to,
958		struct array_cache *from, unsigned int max)
959{
960	/* Figure out how many entries to transfer */
961	int nr = min3(from->avail, max, to->limit - to->avail);
962
963	if (!nr)
964		return 0;
965
966	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
967			sizeof(void *) *nr);
968
969	from->avail -= nr;
970	to->avail += nr;
971	return nr;
972}
973
974#ifndef CONFIG_NUMA
975
976#define drain_alien_cache(cachep, alien) do { } while (0)
977#define reap_alien(cachep, l3) do { } while (0)
978
979static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
980{
981	return (struct array_cache **)BAD_ALIEN_MAGIC;
982}
983
984static inline void free_alien_cache(struct array_cache **ac_ptr)
985{
986}
987
988static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
989{
990	return 0;
991}
992
993static inline void *alternate_node_alloc(struct kmem_cache *cachep,
994		gfp_t flags)
995{
996	return NULL;
997}
998
999static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1000		 gfp_t flags, int nodeid)
1001{
1002	return NULL;
1003}
1004
1005#else	/* CONFIG_NUMA */
1006
1007static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1008static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1009
1010static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
1011{
1012	struct array_cache **ac_ptr;
1013	int memsize = sizeof(void *) * nr_node_ids;
1014	int i;
1015
1016	if (limit > 1)
1017		limit = 12;
1018	ac_ptr = kzalloc_node(memsize, gfp, node);
1019	if (ac_ptr) {
1020		for_each_node(i) {
1021			if (i == node || !node_online(i))
1022				continue;
1023			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
1024			if (!ac_ptr[i]) {
1025				for (i--; i >= 0; i--)
1026					kfree(ac_ptr[i]);
1027				kfree(ac_ptr);
1028				return NULL;
1029			}
1030		}
1031	}
1032	return ac_ptr;
1033}
1034
1035static void free_alien_cache(struct array_cache **ac_ptr)
1036{
1037	int i;
1038
1039	if (!ac_ptr)
1040		return;
1041	for_each_node(i)
1042	    kfree(ac_ptr[i]);
1043	kfree(ac_ptr);
1044}
1045
1046static void __drain_alien_cache(struct kmem_cache *cachep,
1047				struct array_cache *ac, int node)
1048{
1049	struct kmem_list3 *rl3 = cachep->nodelists[node];
1050
1051	if (ac->avail) {
1052		spin_lock(&rl3->list_lock);
1053		/*
1054		 * Stuff objects into the remote nodes shared array first.
1055		 * That way we could avoid the overhead of putting the objects
1056		 * into the free lists and getting them back later.
1057		 */
1058		if (rl3->shared)
1059			transfer_objects(rl3->shared, ac, ac->limit);
1060
1061		free_block(cachep, ac->entry, ac->avail, node);
1062		ac->avail = 0;
1063		spin_unlock(&rl3->list_lock);
1064	}
1065}
1066
1067/*
1068 * Called from cache_reap() to regularly drain alien caches round robin.
1069 */
1070static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1071{
1072	int node = __this_cpu_read(slab_reap_node);
1073
1074	if (l3->alien) {
1075		struct array_cache *ac = l3->alien[node];
1076
1077		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1078			__drain_alien_cache(cachep, ac, node);
1079			spin_unlock_irq(&ac->lock);
1080		}
1081	}
1082}
1083
1084static void drain_alien_cache(struct kmem_cache *cachep,
1085				struct array_cache **alien)
1086{
1087	int i = 0;
1088	struct array_cache *ac;
1089	unsigned long flags;
1090
1091	for_each_online_node(i) {
1092		ac = alien[i];
1093		if (ac) {
1094			spin_lock_irqsave(&ac->lock, flags);
1095			__drain_alien_cache(cachep, ac, i);
1096			spin_unlock_irqrestore(&ac->lock, flags);
1097		}
1098	}
1099}
1100
1101static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1102{
1103	struct slab *slabp = virt_to_slab(objp);
1104	int nodeid = slabp->nodeid;
1105	struct kmem_list3 *l3;
1106	struct array_cache *alien = NULL;
1107	int node;
1108
1109	node = numa_mem_id();
1110
1111	/*
1112	 * Make sure we are not freeing a object from another node to the array
1113	 * cache on this cpu.
1114	 */
1115	if (likely(slabp->nodeid == node))
1116		return 0;
1117
1118	l3 = cachep->nodelists[node];
1119	STATS_INC_NODEFREES(cachep);
1120	if (l3->alien && l3->alien[nodeid]) {
1121		alien = l3->alien[nodeid];
1122		spin_lock(&alien->lock);
1123		if (unlikely(alien->avail == alien->limit)) {
1124			STATS_INC_ACOVERFLOW(cachep);
1125			__drain_alien_cache(cachep, alien, nodeid);
1126		}
1127		alien->entry[alien->avail++] = objp;
1128		spin_unlock(&alien->lock);
1129	} else {
1130		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1131		free_block(cachep, &objp, 1, nodeid);
1132		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1133	}
1134	return 1;
1135}
1136#endif
1137
1138/*
1139 * Allocates and initializes nodelists for a node on each slab cache, used for
1140 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_list3
1141 * will be allocated off-node since memory is not yet online for the new node.
1142 * When hotplugging memory or a cpu, existing nodelists are not replaced if
1143 * already in use.
1144 *
1145 * Must hold cache_chain_mutex.
1146 */
1147static int init_cache_nodelists_node(int node)
1148{
1149	struct kmem_cache *cachep;
1150	struct kmem_list3 *l3;
1151	const int memsize = sizeof(struct kmem_list3);
1152
1153	list_for_each_entry(cachep, &cache_chain, next) {
1154		/*
1155		 * Set up the size64 kmemlist for cpu before we can
1156		 * begin anything. Make sure some other cpu on this
1157		 * node has not already allocated this
1158		 */
1159		if (!cachep->nodelists[node]) {
1160			l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1161			if (!l3)
1162				return -ENOMEM;
1163			kmem_list3_init(l3);
1164			l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1165			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1166
1167			/*
1168			 * The l3s don't come and go as CPUs come and
1169			 * go.  cache_chain_mutex is sufficient
1170			 * protection here.
1171			 */
1172			cachep->nodelists[node] = l3;
1173		}
1174
1175		spin_lock_irq(&cachep->nodelists[node]->list_lock);
1176		cachep->nodelists[node]->free_limit =
1177			(1 + nr_cpus_node(node)) *
1178			cachep->batchcount + cachep->num;
1179		spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1180	}
1181	return 0;
1182}
1183
1184static void __cpuinit cpuup_canceled(long cpu)
1185{
1186	struct kmem_cache *cachep;
1187	struct kmem_list3 *l3 = NULL;
1188	int node = cpu_to_mem(cpu);
1189	const struct cpumask *mask = cpumask_of_node(node);
1190
1191	list_for_each_entry(cachep, &cache_chain, next) {
1192		struct array_cache *nc;
1193		struct array_cache *shared;
1194		struct array_cache **alien;
1195
1196		/* cpu is dead; no one can alloc from it. */
1197		nc = cachep->array[cpu];
1198		cachep->array[cpu] = NULL;
1199		l3 = cachep->nodelists[node];
1200
1201		if (!l3)
1202			goto free_array_cache;
1203
1204		spin_lock_irq(&l3->list_lock);
1205
1206		/* Free limit for this kmem_list3 */
1207		l3->free_limit -= cachep->batchcount;
1208		if (nc)
1209			free_block(cachep, nc->entry, nc->avail, node);
1210
1211		if (!cpumask_empty(mask)) {
1212			spin_unlock_irq(&l3->list_lock);
1213			goto free_array_cache;
1214		}
1215
1216		shared = l3->shared;
1217		if (shared) {
1218			free_block(cachep, shared->entry,
1219				   shared->avail, node);
1220			l3->shared = NULL;
1221		}
1222
1223		alien = l3->alien;
1224		l3->alien = NULL;
1225
1226		spin_unlock_irq(&l3->list_lock);
1227
1228		kfree(shared);
1229		if (alien) {
1230			drain_alien_cache(cachep, alien);
1231			free_alien_cache(alien);
1232		}
1233free_array_cache:
1234		kfree(nc);
1235	}
1236	/*
1237	 * In the previous loop, all the objects were freed to
1238	 * the respective cache's slabs,  now we can go ahead and
1239	 * shrink each nodelist to its limit.
1240	 */
1241	list_for_each_entry(cachep, &cache_chain, next) {
1242		l3 = cachep->nodelists[node];
1243		if (!l3)
1244			continue;
1245		drain_freelist(cachep, l3, l3->free_objects);
1246	}
1247}
1248
1249static int __cpuinit cpuup_prepare(long cpu)
1250{
1251	struct kmem_cache *cachep;
1252	struct kmem_list3 *l3 = NULL;
1253	int node = cpu_to_mem(cpu);
1254	int err;
1255
1256	/*
1257	 * We need to do this right in the beginning since
1258	 * alloc_arraycache's are going to use this list.
1259	 * kmalloc_node allows us to add the slab to the right
1260	 * kmem_list3 and not this cpu's kmem_list3
1261	 */
1262	err = init_cache_nodelists_node(node);
1263	if (err < 0)
1264		goto bad;
1265
1266	/*
1267	 * Now we can go ahead with allocating the shared arrays and
1268	 * array caches
1269	 */
1270	list_for_each_entry(cachep, &cache_chain, next) {
1271		struct array_cache *nc;
1272		struct array_cache *shared = NULL;
1273		struct array_cache **alien = NULL;
1274
1275		nc = alloc_arraycache(node, cachep->limit,
1276					cachep->batchcount, GFP_KERNEL);
1277		if (!nc)
1278			goto bad;
1279		if (cachep->shared) {
1280			shared = alloc_arraycache(node,
1281				cachep->shared * cachep->batchcount,
1282				0xbaadf00d, GFP_KERNEL);
1283			if (!shared) {
1284				kfree(nc);
1285				goto bad;
1286			}
1287		}
1288		if (use_alien_caches) {
1289			alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1290			if (!alien) {
1291				kfree(shared);
1292				kfree(nc);
1293				goto bad;
1294			}
1295		}
1296		cachep->array[cpu] = nc;
1297		l3 = cachep->nodelists[node];
1298		BUG_ON(!l3);
1299
1300		spin_lock_irq(&l3->list_lock);
1301		if (!l3->shared) {
1302			/*
1303			 * We are serialised from CPU_DEAD or
1304			 * CPU_UP_CANCELLED by the cpucontrol lock
1305			 */
1306			l3->shared = shared;
1307			shared = NULL;
1308		}
1309#ifdef CONFIG_NUMA
1310		if (!l3->alien) {
1311			l3->alien = alien;
1312			alien = NULL;
1313		}
1314#endif
1315		spin_unlock_irq(&l3->list_lock);
1316		kfree(shared);
1317		free_alien_cache(alien);
1318		if (cachep->flags & SLAB_DEBUG_OBJECTS)
1319			slab_set_debugobj_lock_classes_node(cachep, node);
1320	}
1321	init_node_lock_keys(node);
1322
1323	return 0;
1324bad:
1325	cpuup_canceled(cpu);
1326	return -ENOMEM;
1327}
1328
1329static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1330				    unsigned long action, void *hcpu)
1331{
1332	long cpu = (long)hcpu;
1333	int err = 0;
1334
1335	switch (action) {
1336	case CPU_UP_PREPARE:
1337	case CPU_UP_PREPARE_FROZEN:
1338		mutex_lock(&cache_chain_mutex);
1339		err = cpuup_prepare(cpu);
1340		mutex_unlock(&cache_chain_mutex);
1341		break;
1342	case CPU_ONLINE:
1343	case CPU_ONLINE_FROZEN:
1344		start_cpu_timer(cpu);
1345		break;
1346#ifdef CONFIG_HOTPLUG_CPU
1347  	case CPU_DOWN_PREPARE:
1348  	case CPU_DOWN_PREPARE_FROZEN:
1349		/*
1350		 * Shutdown cache reaper. Note that the cache_chain_mutex is
1351		 * held so that if cache_reap() is invoked it cannot do
1352		 * anything expensive but will only modify reap_work
1353		 * and reschedule the timer.
1354		*/
1355		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1356		/* Now the cache_reaper is guaranteed to be not running. */
1357		per_cpu(slab_reap_work, cpu).work.func = NULL;
1358  		break;
1359  	case CPU_DOWN_FAILED:
1360  	case CPU_DOWN_FAILED_FROZEN:
1361		start_cpu_timer(cpu);
1362  		break;
1363	case CPU_DEAD:
1364	case CPU_DEAD_FROZEN:
1365		/*
1366		 * Even if all the cpus of a node are down, we don't free the
1367		 * kmem_list3 of any cache. This to avoid a race between
1368		 * cpu_down, and a kmalloc allocation from another cpu for
1369		 * memory from the node of the cpu going down.  The list3
1370		 * structure is usually allocated from kmem_cache_create() and
1371		 * gets destroyed at kmem_cache_destroy().
1372		 */
1373		/* fall through */
1374#endif
1375	case CPU_UP_CANCELED:
1376	case CPU_UP_CANCELED_FROZEN:
1377		mutex_lock(&cache_chain_mutex);
1378		cpuup_canceled(cpu);
1379		mutex_unlock(&cache_chain_mutex);
1380		break;
1381	}
1382	return notifier_from_errno(err);
1383}
1384
1385static struct notifier_block __cpuinitdata cpucache_notifier = {
1386	&cpuup_callback, NULL, 0
1387};
1388
1389#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1390/*
1391 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1392 * Returns -EBUSY if all objects cannot be drained so that the node is not
1393 * removed.
1394 *
1395 * Must hold cache_chain_mutex.
1396 */
1397static int __meminit drain_cache_nodelists_node(int node)
1398{
1399	struct kmem_cache *cachep;
1400	int ret = 0;
1401
1402	list_for_each_entry(cachep, &cache_chain, next) {
1403		struct kmem_list3 *l3;
1404
1405		l3 = cachep->nodelists[node];
1406		if (!l3)
1407			continue;
1408
1409		drain_freelist(cachep, l3, l3->free_objects);
1410
1411		if (!list_empty(&l3->slabs_full) ||
1412		    !list_empty(&l3->slabs_partial)) {
1413			ret = -EBUSY;
1414			break;
1415		}
1416	}
1417	return ret;
1418}
1419
1420static int __meminit slab_memory_callback(struct notifier_block *self,
1421					unsigned long action, void *arg)
1422{
1423	struct memory_notify *mnb = arg;
1424	int ret = 0;
1425	int nid;
1426
1427	nid = mnb->status_change_nid;
1428	if (nid < 0)
1429		goto out;
1430
1431	switch (action) {
1432	case MEM_GOING_ONLINE:
1433		mutex_lock(&cache_chain_mutex);
1434		ret = init_cache_nodelists_node(nid);
1435		mutex_unlock(&cache_chain_mutex);
1436		break;
1437	case MEM_GOING_OFFLINE:
1438		mutex_lock(&cache_chain_mutex);
1439		ret = drain_cache_nodelists_node(nid);
1440		mutex_unlock(&cache_chain_mutex);
1441		break;
1442	case MEM_ONLINE:
1443	case MEM_OFFLINE:
1444	case MEM_CANCEL_ONLINE:
1445	case MEM_CANCEL_OFFLINE:
1446		break;
1447	}
1448out:
1449	return notifier_from_errno(ret);
1450}
1451#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1452
1453/*
1454 * swap the static kmem_list3 with kmalloced memory
1455 */
1456static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1457				int nodeid)
1458{
1459	struct kmem_list3 *ptr;
1460
1461	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
1462	BUG_ON(!ptr);
1463
1464	memcpy(ptr, list, sizeof(struct kmem_list3));
1465	/*
1466	 * Do not assume that spinlocks can be initialized via memcpy:
1467	 */
1468	spin_lock_init(&ptr->list_lock);
1469
1470	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1471	cachep->nodelists[nodeid] = ptr;
1472}
1473
1474/*
1475 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1476 * size of kmem_list3.
1477 */
1478static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1479{
1480	int node;
1481
1482	for_each_online_node(node) {
1483		cachep->nodelists[node] = &initkmem_list3[index + node];
1484		cachep->nodelists[node]->next_reap = jiffies +
1485		    REAPTIMEOUT_LIST3 +
1486		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1487	}
1488}
1489
1490/*
1491 * Initialisation.  Called after the page allocator have been initialised and
1492 * before smp_init().
1493 */
1494void __init kmem_cache_init(void)
1495{
1496	size_t left_over;
1497	struct cache_sizes *sizes;
1498	struct cache_names *names;
1499	int i;
1500	int order;
1501	int node;
1502
1503	if (num_possible_nodes() == 1)
1504		use_alien_caches = 0;
1505
1506	for (i = 0; i < NUM_INIT_LISTS; i++) {
1507		kmem_list3_init(&initkmem_list3[i]);
1508		if (i < MAX_NUMNODES)
1509			cache_cache.nodelists[i] = NULL;
1510	}
1511	set_up_list3s(&cache_cache, CACHE_CACHE);
1512
1513	/*
1514	 * Fragmentation resistance on low memory - only use bigger
1515	 * page orders on machines with more than 32MB of memory if
1516	 * not overridden on the command line.
1517	 */
1518	if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1519		slab_max_order = SLAB_MAX_ORDER_HI;
1520
1521	/* Bootstrap is tricky, because several objects are allocated
1522	 * from caches that do not exist yet:
1523	 * 1) initialize the cache_cache cache: it contains the struct
1524	 *    kmem_cache structures of all caches, except cache_cache itself:
1525	 *    cache_cache is statically allocated.
1526	 *    Initially an __init data area is used for the head array and the
1527	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1528	 *    array at the end of the bootstrap.
1529	 * 2) Create the first kmalloc cache.
1530	 *    The struct kmem_cache for the new cache is allocated normally.
1531	 *    An __init data area is used for the head array.
1532	 * 3) Create the remaining kmalloc caches, with minimally sized
1533	 *    head arrays.
1534	 * 4) Replace the __init data head arrays for cache_cache and the first
1535	 *    kmalloc cache with kmalloc allocated arrays.
1536	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1537	 *    the other cache's with kmalloc allocated memory.
1538	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1539	 */
1540
1541	node = numa_mem_id();
1542
1543	/* 1) create the cache_cache */
1544	INIT_LIST_HEAD(&cache_chain);
1545	list_add(&cache_cache.next, &cache_chain);
1546	cache_cache.colour_off = cache_line_size();
1547	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1548	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1549
1550	/*
1551	 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1552	 */
1553	cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1554				  nr_node_ids * sizeof(struct kmem_list3 *);
1555#if DEBUG
1556	cache_cache.obj_size = cache_cache.buffer_size;
1557#endif
1558	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1559					cache_line_size());
1560	cache_cache.reciprocal_buffer_size =
1561		reciprocal_value(cache_cache.buffer_size);
1562
1563	for (order = 0; order < MAX_ORDER; order++) {
1564		cache_estimate(order, cache_cache.buffer_size,
1565			cache_line_size(), 0, &left_over, &cache_cache.num);
1566		if (cache_cache.num)
1567			break;
1568	}
1569	BUG_ON(!cache_cache.num);
1570	cache_cache.gfporder = order;
1571	cache_cache.colour = left_over / cache_cache.colour_off;
1572	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1573				      sizeof(struct slab), cache_line_size());
1574
1575	/* 2+3) create the kmalloc caches */
1576	sizes = malloc_sizes;
1577	names = cache_names;
1578
1579	/*
1580	 * Initialize the caches that provide memory for the array cache and the
1581	 * kmem_list3 structures first.  Without this, further allocations will
1582	 * bug.
1583	 */
1584
1585	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1586					sizes[INDEX_AC].cs_size,
1587					ARCH_KMALLOC_MINALIGN,
1588					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1589					NULL);
1590
1591	if (INDEX_AC != INDEX_L3) {
1592		sizes[INDEX_L3].cs_cachep =
1593			kmem_cache_create(names[INDEX_L3].name,
1594				sizes[INDEX_L3].cs_size,
1595				ARCH_KMALLOC_MINALIGN,
1596				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1597				NULL);
1598	}
1599
1600	slab_early_init = 0;
1601
1602	while (sizes->cs_size != ULONG_MAX) {
1603		/*
1604		 * For performance, all the general caches are L1 aligned.
1605		 * This should be particularly beneficial on SMP boxes, as it
1606		 * eliminates "false sharing".
1607		 * Note for systems short on memory removing the alignment will
1608		 * allow tighter packing of the smaller caches.
1609		 */
1610		if (!sizes->cs_cachep) {
1611			sizes->cs_cachep = kmem_cache_create(names->name,
1612					sizes->cs_size,
1613					ARCH_KMALLOC_MINALIGN,
1614					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1615					NULL);
1616		}
1617#ifdef CONFIG_ZONE_DMA
1618		sizes->cs_dmacachep = kmem_cache_create(
1619					names->name_dma,
1620					sizes->cs_size,
1621					ARCH_KMALLOC_MINALIGN,
1622					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1623						SLAB_PANIC,
1624					NULL);
1625#endif
1626		sizes++;
1627		names++;
1628	}
1629	/* 4) Replace the bootstrap head arrays */
1630	{
1631		struct array_cache *ptr;
1632
1633		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1634
1635		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1636		memcpy(ptr, cpu_cache_get(&cache_cache),
1637		       sizeof(struct arraycache_init));
1638		/*
1639		 * Do not assume that spinlocks can be initialized via memcpy:
1640		 */
1641		spin_lock_init(&ptr->lock);
1642
1643		cache_cache.array[smp_processor_id()] = ptr;
1644
1645		ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1646
1647		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1648		       != &initarray_generic.cache);
1649		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1650		       sizeof(struct arraycache_init));
1651		/*
1652		 * Do not assume that spinlocks can be initialized via memcpy:
1653		 */
1654		spin_lock_init(&ptr->lock);
1655
1656		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1657		    ptr;
1658	}
1659	/* 5) Replace the bootstrap kmem_list3's */
1660	{
1661		int nid;
1662
1663		for_each_online_node(nid) {
1664			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1665
1666			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1667				  &initkmem_list3[SIZE_AC + nid], nid);
1668
1669			if (INDEX_AC != INDEX_L3) {
1670				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1671					  &initkmem_list3[SIZE_L3 + nid], nid);
1672			}
1673		}
1674	}
1675
1676	g_cpucache_up = EARLY;
1677}
1678
1679void __init kmem_cache_init_late(void)
1680{
1681	struct kmem_cache *cachep;
1682
1683	/* Annotate slab for lockdep -- annotate the malloc caches */
1684	init_lock_keys();
1685
1686	/* 6) resize the head arrays to their final sizes */
1687	mutex_lock(&cache_chain_mutex);
1688	list_for_each_entry(cachep, &cache_chain, next)
1689		if (enable_cpucache(cachep, GFP_NOWAIT))
1690			BUG();
1691	mutex_unlock(&cache_chain_mutex);
1692
1693	/* Done! */
1694	g_cpucache_up = FULL;
1695
1696	/*
1697	 * Register a cpu startup notifier callback that initializes
1698	 * cpu_cache_get for all new cpus
1699	 */
1700	register_cpu_notifier(&cpucache_notifier);
1701
1702#ifdef CONFIG_NUMA
1703	/*
1704	 * Register a memory hotplug callback that initializes and frees
1705	 * nodelists.
1706	 */
1707	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1708#endif
1709
1710	/*
1711	 * The reap timers are started later, with a module init call: That part
1712	 * of the kernel is not yet operational.
1713	 */
1714}
1715
1716static int __init cpucache_init(void)
1717{
1718	int cpu;
1719
1720	/*
1721	 * Register the timers that return unneeded pages to the page allocator
1722	 */
1723	for_each_online_cpu(cpu)
1724		start_cpu_timer(cpu);
1725	return 0;
1726}
1727__initcall(cpucache_init);
1728
1729/*
1730 * Interface to system's page allocator. No need to hold the cache-lock.
1731 *
1732 * If we requested dmaable memory, we will get it. Even if we
1733 * did not request dmaable memory, we might get it, but that
1734 * would be relatively rare and ignorable.
1735 */
1736static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1737{
1738	struct page *page;
1739	int nr_pages;
1740	int i;
1741
1742#ifndef CONFIG_MMU
1743	/*
1744	 * Nommu uses slab's for process anonymous memory allocations, and thus
1745	 * requires __GFP_COMP to properly refcount higher order allocations
1746	 */
1747	flags |= __GFP_COMP;
1748#endif
1749
1750	flags |= cachep->gfpflags;
1751	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1752		flags |= __GFP_RECLAIMABLE;
1753
1754	page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1755	if (!page)
1756		return NULL;
1757
1758	nr_pages = (1 << cachep->gfporder);
1759	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1760		add_zone_page_state(page_zone(page),
1761			NR_SLAB_RECLAIMABLE, nr_pages);
1762	else
1763		add_zone_page_state(page_zone(page),
1764			NR_SLAB_UNRECLAIMABLE, nr_pages);
1765	for (i = 0; i < nr_pages; i++)
1766		__SetPageSlab(page + i);
1767
1768	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1769		kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1770
1771		if (cachep->ctor)
1772			kmemcheck_mark_uninitialized_pages(page, nr_pages);
1773		else
1774			kmemcheck_mark_unallocated_pages(page, nr_pages);
1775	}
1776
1777	return page_address(page);
1778}
1779
1780/*
1781 * Interface to system's page release.
1782 */
1783static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1784{
1785	unsigned long i = (1 << cachep->gfporder);
1786	struct page *page = virt_to_page(addr);
1787	const unsigned long nr_freed = i;
1788
1789	kmemcheck_free_shadow(page, cachep->gfporder);
1790
1791	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1792		sub_zone_page_state(page_zone(page),
1793				NR_SLAB_RECLAIMABLE, nr_freed);
1794	else
1795		sub_zone_page_state(page_zone(page),
1796				NR_SLAB_UNRECLAIMABLE, nr_freed);
1797	while (i--) {
1798		BUG_ON(!PageSlab(page));
1799		__ClearPageSlab(page);
1800		page++;
1801	}
1802	if (current->reclaim_state)
1803		current->reclaim_state->reclaimed_slab += nr_freed;
1804	free_pages((unsigned long)addr, cachep->gfporder);
1805}
1806
1807static void kmem_rcu_free(struct rcu_head *head)
1808{
1809	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1810	struct kmem_cache *cachep = slab_rcu->cachep;
1811
1812	kmem_freepages(cachep, slab_rcu->addr);
1813	if (OFF_SLAB(cachep))
1814		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1815}
1816
1817#if DEBUG
1818
1819#ifdef CONFIG_DEBUG_PAGEALLOC
1820static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1821			    unsigned long caller)
1822{
1823	int size = obj_size(cachep);
1824
1825	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1826
1827	if (size < 5 * sizeof(unsigned long))
1828		return;
1829
1830	*addr++ = 0x12345678;
1831	*addr++ = caller;
1832	*addr++ = smp_processor_id();
1833	size -= 3 * sizeof(unsigned long);
1834	{
1835		unsigned long *sptr = &caller;
1836		unsigned long svalue;
1837
1838		while (!kstack_end(sptr)) {
1839			svalue = *sptr++;
1840			if (kernel_text_address(svalue)) {
1841				*addr++ = svalue;
1842				size -= sizeof(unsigned long);
1843				if (size <= sizeof(unsigned long))
1844					break;
1845			}
1846		}
1847
1848	}
1849	*addr++ = 0x87654321;
1850}
1851#endif
1852
1853static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1854{
1855	int size = obj_size(cachep);
1856	addr = &((char *)addr)[obj_offset(cachep)];
1857
1858	memset(addr, val, size);
1859	*(unsigned char *)(addr + size - 1) = POISON_END;
1860}
1861
1862static void dump_line(char *data, int offset, int limit)
1863{
1864	int i;
1865	unsigned char error = 0;
1866	int bad_count = 0;
1867
1868	printk(KERN_ERR "%03x: ", offset);
1869	for (i = 0; i < limit; i++) {
1870		if (data[offset + i] != POISON_FREE) {
1871			error = data[offset + i];
1872			bad_count++;
1873		}
1874	}
1875	print_hex_dump(KERN_CONT, "", 0, 16, 1,
1876			&data[offset], limit, 1);
1877
1878	if (bad_count == 1) {
1879		error ^= POISON_FREE;
1880		if (!(error & (error - 1))) {
1881			printk(KERN_ERR "Single bit error detected. Probably "
1882					"bad RAM.\n");
1883#ifdef CONFIG_X86
1884			printk(KERN_ERR "Run memtest86+ or a similar memory "
1885					"test tool.\n");
1886#else
1887			printk(KERN_ERR "Run a memory test tool.\n");
1888#endif
1889		}
1890	}
1891}
1892#endif
1893
1894#if DEBUG
1895
1896static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1897{
1898	int i, size;
1899	char *realobj;
1900
1901	if (cachep->flags & SLAB_RED_ZONE) {
1902		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1903			*dbg_redzone1(cachep, objp),
1904			*dbg_redzone2(cachep, objp));
1905	}
1906
1907	if (cachep->flags & SLAB_STORE_USER) {
1908		printk(KERN_ERR "Last user: [<%p>]",
1909			*dbg_userword(cachep, objp));
1910		print_symbol("(%s)",
1911				(unsigned long)*dbg_userword(cachep, objp));
1912		printk("\n");
1913	}
1914	realobj = (char *)objp + obj_offset(cachep);
1915	size = obj_size(cachep);
1916	for (i = 0; i < size && lines; i += 16, lines--) {
1917		int limit;
1918		limit = 16;
1919		if (i + limit > size)
1920			limit = size - i;
1921		dump_line(realobj, i, limit);
1922	}
1923}
1924
1925static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1926{
1927	char *realobj;
1928	int size, i;
1929	int lines = 0;
1930
1931	realobj = (char *)objp + obj_offset(cachep);
1932	size = obj_size(cachep);
1933
1934	for (i = 0; i < size; i++) {
1935		char exp = POISON_FREE;
1936		if (i == size - 1)
1937			exp = POISON_END;
1938		if (realobj[i] != exp) {
1939			int limit;
1940			/* Mismatch ! */
1941			/* Print header */
1942			if (lines == 0) {
1943				printk(KERN_ERR
1944					"Slab corruption: %s start=%p, len=%d\n",
1945					cachep->name, realobj, size);
1946				print_objinfo(cachep, objp, 0);
1947			}
1948			/* Hexdump the affected line */
1949			i = (i / 16) * 16;
1950			limit = 16;
1951			if (i + limit > size)
1952				limit = size - i;
1953			dump_line(realobj, i, limit);
1954			i += 16;
1955			lines++;
1956			/* Limit to 5 lines */
1957			if (lines > 5)
1958				break;
1959		}
1960	}
1961	if (lines != 0) {
1962		/* Print some data about the neighboring objects, if they
1963		 * exist:
1964		 */
1965		struct slab *slabp = virt_to_slab(objp);
1966		unsigned int objnr;
1967
1968		objnr = obj_to_index(cachep, slabp, objp);
1969		if (objnr) {
1970			objp = index_to_obj(cachep, slabp, objnr - 1);
1971			realobj = (char *)objp + obj_offset(cachep);
1972			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1973			       realobj, size);
1974			print_objinfo(cachep, objp, 2);
1975		}
1976		if (objnr + 1 < cachep->num) {
1977			objp = index_to_obj(cachep, slabp, objnr + 1);
1978			realobj = (char *)objp + obj_offset(cachep);
1979			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1980			       realobj, size);
1981			print_objinfo(cachep, objp, 2);
1982		}
1983	}
1984}
1985#endif
1986
1987#if DEBUG
1988static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1989{
1990	int i;
1991	for (i = 0; i < cachep->num; i++) {
1992		void *objp = index_to_obj(cachep, slabp, i);
1993
1994		if (cachep->flags & SLAB_POISON) {
1995#ifdef CONFIG_DEBUG_PAGEALLOC
1996			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1997					OFF_SLAB(cachep))
1998				kernel_map_pages(virt_to_page(objp),
1999					cachep->buffer_size / PAGE_SIZE, 1);
2000			else
2001				check_poison_obj(cachep, objp);
2002#else
2003			check_poison_obj(cachep, objp);
2004#endif
2005		}
2006		if (cachep->flags & SLAB_RED_ZONE) {
2007			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2008				slab_error(cachep, "start of a freed object "
2009					   "was overwritten");
2010			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2011				slab_error(cachep, "end of a freed object "
2012					   "was overwritten");
2013		}
2014	}
2015}
2016#else
2017static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
2018{
2019}
2020#endif
2021
2022/**
2023 * slab_destroy - destroy and release all objects in a slab
2024 * @cachep: cache pointer being destroyed
2025 * @slabp: slab pointer being destroyed
2026 *
2027 * Destroy all the objs in a slab, and release the mem back to the system.
2028 * Before calling the slab must have been unlinked from the cache.  The
2029 * cache-lock is not held/needed.
2030 */
2031static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2032{
2033	void *addr = slabp->s_mem - slabp->colouroff;
2034
2035	slab_destroy_debugcheck(cachep, slabp);
2036	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2037		struct slab_rcu *slab_rcu;
2038
2039		slab_rcu = (struct slab_rcu *)slabp;
2040		slab_rcu->cachep = cachep;
2041		slab_rcu->addr = addr;
2042		call_rcu(&slab_rcu->head, kmem_rcu_free);
2043	} else {
2044		kmem_freepages(cachep, addr);
2045		if (OFF_SLAB(cachep))
2046			kmem_cache_free(cachep->slabp_cache, slabp);
2047	}
2048}
2049
2050static void __kmem_cache_destroy(struct kmem_cache *cachep)
2051{
2052	int i;
2053	struct kmem_list3 *l3;
2054
2055	for_each_online_cpu(i)
2056	    kfree(cachep->array[i]);
2057
2058	/* NUMA: free the list3 structures */
2059	for_each_online_node(i) {
2060		l3 = cachep->nodelists[i];
2061		if (l3) {
2062			kfree(l3->shared);
2063			free_alien_cache(l3->alien);
2064			kfree(l3);
2065		}
2066	}
2067	kmem_cache_free(&cache_cache, cachep);
2068}
2069
2070
2071/**
2072 * calculate_slab_order - calculate size (page order) of slabs
2073 * @cachep: pointer to the cache that is being created
2074 * @size: size of objects to be created in this cache.
2075 * @align: required alignment for the objects.
2076 * @flags: slab allocation flags
2077 *
2078 * Also calculates the number of objects per slab.
2079 *
2080 * This could be made much more intelligent.  For now, try to avoid using
2081 * high order pages for slabs.  When the gfp() functions are more friendly
2082 * towards high-order requests, this should be changed.
2083 */
2084static size_t calculate_slab_order(struct kmem_cache *cachep,
2085			size_t size, size_t align, unsigned long flags)
2086{
2087	unsigned long offslab_limit;
2088	size_t left_over = 0;
2089	int gfporder;
2090
2091	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2092		unsigned int num;
2093		size_t remainder;
2094
2095		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2096		if (!num)
2097			continue;
2098
2099		if (flags & CFLGS_OFF_SLAB) {
2100			/*
2101			 * Max number of objs-per-slab for caches which
2102			 * use off-slab slabs. Needed to avoid a possible
2103			 * looping condition in cache_grow().
2104			 */
2105			offslab_limit = size - sizeof(struct slab);
2106			offslab_limit /= sizeof(kmem_bufctl_t);
2107
2108 			if (num > offslab_limit)
2109				break;
2110		}
2111
2112		/* Found something acceptable - save it away */
2113		cachep->num = num;
2114		cachep->gfporder = gfporder;
2115		left_over = remainder;
2116
2117		/*
2118		 * A VFS-reclaimable slab tends to have most allocations
2119		 * as GFP_NOFS and we really don't want to have to be allocating
2120		 * higher-order pages when we are unable to shrink dcache.
2121		 */
2122		if (flags & SLAB_RECLAIM_ACCOUNT)
2123			break;
2124
2125		/*
2126		 * Large number of objects is good, but very large slabs are
2127		 * currently bad for the gfp()s.
2128		 */
2129		if (gfporder >= slab_max_order)
2130			break;
2131
2132		/*
2133		 * Acceptable internal fragmentation?
2134		 */
2135		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2136			break;
2137	}
2138	return left_over;
2139}
2140
2141static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2142{
2143	if (g_cpucache_up == FULL)
2144		return enable_cpucache(cachep, gfp);
2145
2146	if (g_cpucache_up == NONE) {
2147		/*
2148		 * Note: the first kmem_cache_create must create the cache
2149		 * that's used by kmalloc(24), otherwise the creation of
2150		 * further caches will BUG().
2151		 */
2152		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2153
2154		/*
2155		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2156		 * the first cache, then we need to set up all its list3s,
2157		 * otherwise the creation of further caches will BUG().
2158		 */
2159		set_up_list3s(cachep, SIZE_AC);
2160		if (INDEX_AC == INDEX_L3)
2161			g_cpucache_up = PARTIAL_L3;
2162		else
2163			g_cpucache_up = PARTIAL_AC;
2164	} else {
2165		cachep->array[smp_processor_id()] =
2166			kmalloc(sizeof(struct arraycache_init), gfp);
2167
2168		if (g_cpucache_up == PARTIAL_AC) {
2169			set_up_list3s(cachep, SIZE_L3);
2170			g_cpucache_up = PARTIAL_L3;
2171		} else {
2172			int node;
2173			for_each_online_node(node) {
2174				cachep->nodelists[node] =
2175				    kmalloc_node(sizeof(struct kmem_list3),
2176						gfp, node);
2177				BUG_ON(!cachep->nodelists[node]);
2178				kmem_list3_init(cachep->nodelists[node]);
2179			}
2180		}
2181	}
2182	cachep->nodelists[numa_mem_id()]->next_reap =
2183			jiffies + REAPTIMEOUT_LIST3 +
2184			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2185
2186	cpu_cache_get(cachep)->avail = 0;
2187	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2188	cpu_cache_get(cachep)->batchcount = 1;
2189	cpu_cache_get(cachep)->touched = 0;
2190	cachep->batchcount = 1;
2191	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2192	return 0;
2193}
2194
2195/**
2196 * kmem_cache_create - Create a cache.
2197 * @name: A string which is used in /proc/slabinfo to identify this cache.
2198 * @size: The size of objects to be created in this cache.
2199 * @align: The required alignment for the objects.
2200 * @flags: SLAB flags
2201 * @ctor: A constructor for the objects.
2202 *
2203 * Returns a ptr to the cache on success, NULL on failure.
2204 * Cannot be called within a int, but can be interrupted.
2205 * The @ctor is run when new pages are allocated by the cache.
2206 *
2207 * @name must be valid until the cache is destroyed. This implies that
2208 * the module calling this has to destroy the cache before getting unloaded.
2209 *
2210 * The flags are
2211 *
2212 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2213 * to catch references to uninitialised memory.
2214 *
2215 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2216 * for buffer overruns.
2217 *
2218 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2219 * cacheline.  This can be beneficial if you're counting cycles as closely
2220 * as davem.
2221 */
2222struct kmem_cache *
2223kmem_cache_create (const char *name, size_t size, size_t align,
2224	unsigned long flags, void (*ctor)(void *))
2225{
2226	size_t left_over, slab_size, ralign;
2227	struct kmem_cache *cachep = NULL, *pc;
2228	gfp_t gfp;
2229
2230	/*
2231	 * Sanity checks... these are all serious usage bugs.
2232	 */
2233	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2234	    size > KMALLOC_MAX_SIZE) {
2235		printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
2236				name);
2237		BUG();
2238	}
2239
2240	/*
2241	 * We use cache_chain_mutex to ensure a consistent view of
2242	 * cpu_online_mask as well.  Please see cpuup_callback
2243	 */
2244	if (slab_is_available()) {
2245		get_online_cpus();
2246		mutex_lock(&cache_chain_mutex);
2247	}
2248
2249	list_for_each_entry(pc, &cache_chain, next) {
2250		char tmp;
2251		int res;
2252
2253		/*
2254		 * This happens when the module gets unloaded and doesn't
2255		 * destroy its slab cache and no-one else reuses the vmalloc
2256		 * area of the module.  Print a warning.
2257		 */
2258		res = probe_kernel_address(pc->name, tmp);
2259		if (res) {
2260			printk(KERN_ERR
2261			       "SLAB: cache with size %d has lost its name\n",
2262			       pc->buffer_size);
2263			continue;
2264		}
2265
2266		if (!strcmp(pc->name, name)) {
2267			printk(KERN_ERR
2268			       "kmem_cache_create: duplicate cache %s\n", name);
2269			dump_stack();
2270			goto oops;
2271		}
2272	}
2273
2274#if DEBUG
2275	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2276#if FORCED_DEBUG
2277	/*
2278	 * Enable redzoning and last user accounting, except for caches with
2279	 * large objects, if the increased size would increase the object size
2280	 * above the next power of two: caches with object sizes just above a
2281	 * power of two have a significant amount of internal fragmentation.
2282	 */
2283	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2284						2 * sizeof(unsigned long long)))
2285		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2286	if (!(flags & SLAB_DESTROY_BY_RCU))
2287		flags |= SLAB_POISON;
2288#endif
2289	if (flags & SLAB_DESTROY_BY_RCU)
2290		BUG_ON(flags & SLAB_POISON);
2291#endif
2292	/*
2293	 * Always checks flags, a caller might be expecting debug support which
2294	 * isn't available.
2295	 */
2296	BUG_ON(flags & ~CREATE_MASK);
2297
2298	/*
2299	 * Check that size is in terms of words.  This is needed to avoid
2300	 * unaligned accesses for some archs when redzoning is used, and makes
2301	 * sure any on-slab bufctl's are also correctly aligned.
2302	 */
2303	if (size & (BYTES_PER_WORD - 1)) {
2304		size += (BYTES_PER_WORD - 1);
2305		size &= ~(BYTES_PER_WORD - 1);
2306	}
2307
2308	/* calculate the final buffer alignment: */
2309
2310	/* 1) arch recommendation: can be overridden for debug */
2311	if (flags & SLAB_HWCACHE_ALIGN) {
2312		/*
2313		 * Default alignment: as specified by the arch code.  Except if
2314		 * an object is really small, then squeeze multiple objects into
2315		 * one cacheline.
2316		 */
2317		ralign = cache_line_size();
2318		while (size <= ralign / 2)
2319			ralign /= 2;
2320	} else {
2321		ralign = BYTES_PER_WORD;
2322	}
2323
2324	/*
2325	 * Redzoning and user store require word alignment or possibly larger.
2326	 * Note this will be overridden by architecture or caller mandated
2327	 * alignment if either is greater than BYTES_PER_WORD.
2328	 */
2329	if (flags & SLAB_STORE_USER)
2330		ralign = BYTES_PER_WORD;
2331
2332	if (flags & SLAB_RED_ZONE) {
2333		ralign = REDZONE_ALIGN;
2334		/* If redzoning, ensure that the second redzone is suitably
2335		 * aligned, by adjusting the object size accordingly. */
2336		size += REDZONE_ALIGN - 1;
2337		size &= ~(REDZONE_ALIGN - 1);
2338	}
2339
2340	/* 2) arch mandated alignment */
2341	if (ralign < ARCH_SLAB_MINALIGN) {
2342		ralign = ARCH_SLAB_MINALIGN;
2343	}
2344	/* 3) caller mandated alignment */
2345	if (ralign < align) {
2346		ralign = align;
2347	}
2348	/* disable debug if necessary */
2349	if (ralign > __alignof__(unsigned long long))
2350		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2351	/*
2352	 * 4) Store it.
2353	 */
2354	align = ralign;
2355
2356	if (slab_is_available())
2357		gfp = GFP_KERNEL;
2358	else
2359		gfp = GFP_NOWAIT;
2360
2361	/* Get cache's description obj. */
2362	cachep = kmem_cache_zalloc(&cache_cache, gfp);
2363	if (!cachep)
2364		goto oops;
2365
2366	cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2367#if DEBUG
2368	cachep->obj_size = size;
2369
2370	/*
2371	 * Both debugging options require word-alignment which is calculated
2372	 * into align above.
2373	 */
2374	if (flags & SLAB_RED_ZONE) {
2375		/* add space for red zone words */
2376		cachep->obj_offset += sizeof(unsigned long long);
2377		size += 2 * sizeof(unsigned long long);
2378	}
2379	if (flags & SLAB_STORE_USER) {
2380		/* user store requires one word storage behind the end of
2381		 * the real object. But if the second red zone needs to be
2382		 * aligned to 64 bits, we must allow that much space.
2383		 */
2384		if (flags & SLAB_RED_ZONE)
2385			size += REDZONE_ALIGN;
2386		else
2387			size += BYTES_PER_WORD;
2388	}
2389#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2390	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2391	    && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
2392		cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
2393		size = PAGE_SIZE;
2394	}
2395#endif
2396#endif
2397
2398	/*
2399	 * Determine if the slab management is 'on' or 'off' slab.
2400	 * (bootstrapping cannot cope with offslab caches so don't do
2401	 * it too early on. Always use on-slab management when
2402	 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
2403	 */
2404	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2405	    !(flags & SLAB_NOLEAKTRACE))
2406		/*
2407		 * Size is large, assume best to place the slab management obj
2408		 * off-slab (should allow better packing of objs).
2409		 */
2410		flags |= CFLGS_OFF_SLAB;
2411
2412	size = ALIGN(size, align);
2413
2414	left_over = calculate_slab_order(cachep, size, align, flags);
2415
2416	if (!cachep->num) {
2417		printk(KERN_ERR
2418		       "kmem_cache_create: couldn't create cache %s.\n", name);
2419		kmem_cache_free(&cache_cache, cachep);
2420		cachep = NULL;
2421		goto oops;
2422	}
2423	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2424			  + sizeof(struct slab), align);
2425
2426	/*
2427	 * If the slab has been placed off-slab, and we have enough space then
2428	 * move it on-slab. This is at the expense of any extra colouring.
2429	 */
2430	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2431		flags &= ~CFLGS_OFF_SLAB;
2432		left_over -= slab_size;
2433	}
2434
2435	if (flags & CFLGS_OFF_SLAB) {
2436		/* really off slab. No need for manual alignment */
2437		slab_size =
2438		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2439
2440#ifdef CONFIG_PAGE_POISONING
2441		/* If we're going to use the generic kernel_map_pages()
2442		 * poisoning, then it's going to smash the contents of
2443		 * the redzone and userword anyhow, so switch them off.
2444		 */
2445		if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2446			flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2447#endif
2448	}
2449
2450	cachep->colour_off = cache_line_size();
2451	/* Offset must be a multiple of the alignment. */
2452	if (cachep->colour_off < align)
2453		cachep->colour_off = align;
2454	cachep->colour = left_over / cachep->colour_off;
2455	cachep->slab_size = slab_size;
2456	cachep->flags = flags;
2457	cachep->gfpflags = 0;
2458	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2459		cachep->gfpflags |= GFP_DMA;
2460	cachep->buffer_size = size;
2461	cachep->reciprocal_buffer_size = reciprocal_value(size);
2462
2463	if (flags & CFLGS_OFF_SLAB) {
2464		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2465		/*
2466		 * This is a possibility for one of the malloc_sizes caches.
2467		 * But since we go off slab only for object size greater than
2468		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2469		 * this should not happen at all.
2470		 * But leave a BUG_ON for some lucky dude.
2471		 */
2472		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2473	}
2474	cachep->ctor = ctor;
2475	cachep->name = name;
2476
2477	if (setup_cpu_cache(cachep, gfp)) {
2478		__kmem_cache_destroy(cachep);
2479		cachep = NULL;
2480		goto oops;
2481	}
2482
2483	if (flags & SLAB_DEBUG_OBJECTS) {
2484		/*
2485		 * Would deadlock through slab_destroy()->call_rcu()->
2486		 * debug_object_activate()->kmem_cache_alloc().
2487		 */
2488		WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2489
2490		slab_set_debugobj_lock_classes(cachep);
2491	}
2492
2493	/* cache setup completed, link it into the list */
2494	list_add(&cachep->next, &cache_chain);
2495oops:
2496	if (!cachep && (flags & SLAB_PANIC))
2497		panic("kmem_cache_create(): failed to create slab `%s'\n",
2498		      name);
2499	if (slab_is_available()) {
2500		mutex_unlock(&cache_chain_mutex);
2501		put_online_cpus();
2502	}
2503	return cachep;
2504}
2505EXPORT_SYMBOL(kmem_cache_create);
2506
2507#if DEBUG
2508static void check_irq_off(void)
2509{
2510	BUG_ON(!irqs_disabled());
2511}
2512
2513static void check_irq_on(void)
2514{
2515	BUG_ON(irqs_disabled());
2516}
2517
2518static void check_spinlock_acquired(struct kmem_cache *cachep)
2519{
2520#ifdef CONFIG_SMP
2521	check_irq_off();
2522	assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2523#endif
2524}
2525
2526static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2527{
2528#ifdef CONFIG_SMP
2529	check_irq_off();
2530	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2531#endif
2532}
2533
2534#else
2535#define check_irq_off()	do { } while(0)
2536#define check_irq_on()	do { } while(0)
2537#define check_spinlock_acquired(x) do { } while(0)
2538#define check_spinlock_acquired_node(x, y) do { } while(0)
2539#endif
2540
2541static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2542			struct array_cache *ac,
2543			int force, int node);
2544
2545static void do_drain(void *arg)
2546{
2547	struct kmem_cache *cachep = arg;
2548	struct array_cache *ac;
2549	int node = numa_mem_id();
2550
2551	check_irq_off();
2552	ac = cpu_cache_get(cachep);
2553	spin_lock(&cachep->nodelists[node]->list_lock);
2554	free_block(cachep, ac->entry, ac->avail, node);
2555	spin_unlock(&cachep->nodelists[node]->list_lock);
2556	ac->avail = 0;
2557}
2558
2559static void drain_cpu_caches(struct kmem_cache *cachep)
2560{
2561	struct kmem_list3 *l3;
2562	int node;
2563
2564	on_each_cpu(do_drain, cachep, 1);
2565	check_irq_on();
2566	for_each_online_node(node) {
2567		l3 = cachep->nodelists[node];
2568		if (l3 && l3->alien)
2569			drain_alien_cache(cachep, l3->alien);
2570	}
2571
2572	for_each_online_node(node) {
2573		l3 = cachep->nodelists[node];
2574		if (l3)
2575			drain_array(cachep, l3, l3->shared, 1, node);
2576	}
2577}
2578
2579/*
2580 * Remove slabs from the list of free slabs.
2581 * Specify the number of slabs to drain in tofree.
2582 *
2583 * Returns the actual number of slabs released.
2584 */
2585static int drain_freelist(struct kmem_cache *cache,
2586			struct kmem_list3 *l3, int tofree)
2587{
2588	struct list_head *p;
2589	int nr_freed;
2590	struct slab *slabp;
2591
2592	nr_freed = 0;
2593	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2594
2595		spin_lock_irq(&l3->list_lock);
2596		p = l3->slabs_free.prev;
2597		if (p == &l3->slabs_free) {
2598			spin_unlock_irq(&l3->list_lock);
2599			goto out;
2600		}
2601
2602		slabp = list_entry(p, struct slab, list);
2603#if DEBUG
2604		BUG_ON(slabp->inuse);
2605#endif
2606		list_del(&slabp->list);
2607		/*
2608		 * Safe to drop the lock. The slab is no longer linked
2609		 * to the cache.
2610		 */
2611		l3->free_objects -= cache->num;
2612		spin_unlock_irq(&l3->list_lock);
2613		slab_destroy(cache, slabp);
2614		nr_freed++;
2615	}
2616out:
2617	return nr_freed;
2618}
2619
2620/* Called with cache_chain_mutex held to protect against cpu hotplug */
2621static int __cache_shrink(struct kmem_cache *cachep)
2622{
2623	int ret = 0, i = 0;
2624	struct kmem_list3 *l3;
2625
2626	drain_cpu_caches(cachep);
2627
2628	check_irq_on();
2629	for_each_online_node(i) {
2630		l3 = cachep->nodelists[i];
2631		if (!l3)
2632			continue;
2633
2634		drain_freelist(cachep, l3, l3->free_objects);
2635
2636		ret += !list_empty(&l3->slabs_full) ||
2637			!list_empty(&l3->slabs_partial);
2638	}
2639	return (ret ? 1 : 0);
2640}
2641
2642/**
2643 * kmem_cache_shrink - Shrink a cache.
2644 * @cachep: The cache to shrink.
2645 *
2646 * Releases as many slabs as possible for a cache.
2647 * To help debugging, a zero exit status indicates all slabs were released.
2648 */
2649int kmem_cache_shrink(struct kmem_cache *cachep)
2650{
2651	int ret;
2652	BUG_ON(!cachep || in_interrupt());
2653
2654	get_online_cpus();
2655	mutex_lock(&cache_chain_mutex);
2656	ret = __cache_shrink(cachep);
2657	mutex_unlock(&cache_chain_mutex);
2658	put_online_cpus();
2659	return ret;
2660}
2661EXPORT_SYMBOL(kmem_cache_shrink);
2662
2663/**
2664 * kmem_cache_destroy - delete a cache
2665 * @cachep: the cache to destroy
2666 *
2667 * Remove a &struct kmem_cache object from the slab cache.
2668 *
2669 * It is expected this function will be called by a module when it is
2670 * unloaded.  This will remove the cache completely, and avoid a duplicate
2671 * cache being allocated each time a module is loaded and unloaded, if the
2672 * module doesn't have persistent in-kernel storage across loads and unloads.
2673 *
2674 * The cache must be empty before calling this function.
2675 *
2676 * The caller must guarantee that no one will allocate memory from the cache
2677 * during the kmem_cache_destroy().
2678 */
2679void kmem_cache_destroy(struct kmem_cache *cachep)
2680{
2681	BUG_ON(!cachep || in_interrupt());
2682
2683	/* Find the cache in the chain of caches. */
2684	get_online_cpus();
2685	mutex_lock(&cache_chain_mutex);
2686	/*
2687	 * the chain is never empty, cache_cache is never destroyed
2688	 */
2689	list_del(&cachep->next);
2690	if (__cache_shrink(cachep)) {
2691		slab_error(cachep, "Can't free all objects");
2692		list_add(&cachep->next, &cache_chain);
2693		mutex_unlock(&cache_chain_mutex);
2694		put_online_cpus();
2695		return;
2696	}
2697
2698	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2699		rcu_barrier();
2700
2701	__kmem_cache_destroy(cachep);
2702	mutex_unlock(&cache_chain_mutex);
2703	put_online_cpus();
2704}
2705EXPORT_SYMBOL(kmem_cache_destroy);
2706
2707/*
2708 * Get the memory for a slab management obj.
2709 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2710 * always come from malloc_sizes caches.  The slab descriptor cannot
2711 * come from the same cache which is getting created because,
2712 * when we are searching for an appropriate cache for these
2713 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2714 * If we are creating a malloc_sizes cache here it would not be visible to
2715 * kmem_find_general_cachep till the initialization is complete.
2716 * Hence we cannot have slabp_cache same as the original cache.
2717 */
2718static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2719				   int colour_off, gfp_t local_flags,
2720				   int nodeid)
2721{
2722	struct slab *slabp;
2723
2724	if (OFF_SLAB(cachep)) {
2725		/* Slab management obj is off-slab. */
2726		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2727					      local_flags, nodeid);
2728		/*
2729		 * If the first object in the slab is leaked (it's allocated
2730		 * but no one has a reference to it), we want to make sure
2731		 * kmemleak does not treat the ->s_mem pointer as a reference
2732		 * to the object. Otherwise we will not report the leak.
2733		 */
2734		kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2735				   local_flags);
2736		if (!slabp)
2737			return NULL;
2738	} else {
2739		slabp = objp + colour_off;
2740		colour_off += cachep->slab_size;
2741	}
2742	slabp->inuse = 0;
2743	slabp->colouroff = colour_off;
2744	slabp->s_mem = objp + colour_off;
2745	slabp->nodeid = nodeid;
2746	slabp->free = 0;
2747	return slabp;
2748}
2749
2750static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2751{
2752	return (kmem_bufctl_t *) (slabp + 1);
2753}
2754
2755static void cache_init_objs(struct kmem_cache *cachep,
2756			    struct slab *slabp)
2757{
2758	int i;
2759
2760	for (i = 0; i < cachep->num; i++) {
2761		void *objp = index_to_obj(cachep, slabp, i);
2762#if DEBUG
2763		/* need to poison the objs? */
2764		if (cachep->flags & SLAB_POISON)
2765			poison_obj(cachep, objp, POISON_FREE);
2766		if (cachep->flags & SLAB_STORE_USER)
2767			*dbg_userword(cachep, objp) = NULL;
2768
2769		if (cachep->flags & SLAB_RED_ZONE) {
2770			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2771			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2772		}
2773		/*
2774		 * Constructors are not allowed to allocate memory from the same
2775		 * cache which they are a constructor for.  Otherwise, deadlock.
2776		 * They must also be threaded.
2777		 */
2778		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2779			cachep->ctor(objp + obj_offset(cachep));
2780
2781		if (cachep->flags & SLAB_RED_ZONE) {
2782			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2783				slab_error(cachep, "constructor overwrote the"
2784					   " end of an object");
2785			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2786				slab_error(cachep, "constructor overwrote the"
2787					   " start of an object");
2788		}
2789		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2790			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2791			kernel_map_pages(virt_to_page(objp),
2792					 cachep->buffer_size / PAGE_SIZE, 0);
2793#else
2794		if (cachep->ctor)
2795			cachep->ctor(objp);
2796#endif
2797		slab_bufctl(slabp)[i] = i + 1;
2798	}
2799	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2800}
2801
2802static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2803{
2804	if (CONFIG_ZONE_DMA_FLAG) {
2805		if (flags & GFP_DMA)
2806			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2807		else
2808			BUG_ON(cachep->gfpflags & GFP_DMA);
2809	}
2810}
2811
2812static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2813				int nodeid)
2814{
2815	void *objp = index_to_obj(cachep, slabp, slabp->free);
2816	kmem_bufctl_t next;
2817
2818	slabp->inuse++;
2819	next = slab_bufctl(slabp)[slabp->free];
2820#if DEBUG
2821	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2822	WARN_ON(slabp->nodeid != nodeid);
2823#endif
2824	slabp->free = next;
2825
2826	return objp;
2827}
2828
2829static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2830				void *objp, int nodeid)
2831{
2832	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2833
2834#if DEBUG
2835	/* Verify that the slab belongs to the intended node */
2836	WARN_ON(slabp->nodeid != nodeid);
2837
2838	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2839		printk(KERN_ERR "slab: double free detected in cache "
2840				"'%s', objp %p\n", cachep->name, objp);
2841		BUG();
2842	}
2843#endif
2844	slab_bufctl(slabp)[objnr] = slabp->free;
2845	slabp->free = objnr;
2846	slabp->inuse--;
2847}
2848
2849/*
2850 * Map pages beginning at addr to the given cache and slab. This is required
2851 * for the slab allocator to be able to lookup the cache and slab of a
2852 * virtual address for kfree, ksize, and slab debugging.
2853 */
2854static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2855			   void *addr)
2856{
2857	int nr_pages;
2858	struct page *page;
2859
2860	page = virt_to_page(addr);
2861
2862	nr_pages = 1;
2863	if (likely(!PageCompound(page)))
2864		nr_pages <<= cache->gfporder;
2865
2866	do {
2867		page_set_cache(page, cache);
2868		page_set_slab(page, slab);
2869		page++;
2870	} while (--nr_pages);
2871}
2872
2873/*
2874 * Grow (by 1) the number of slabs within a cache.  This is called by
2875 * kmem_cache_alloc() when there are no active objs left in a cache.
2876 */
2877static int cache_grow(struct kmem_cache *cachep,
2878		gfp_t flags, int nodeid, void *objp)
2879{
2880	struct slab *slabp;
2881	size_t offset;
2882	gfp_t local_flags;
2883	struct kmem_list3 *l3;
2884
2885	/*
2886	 * Be lazy and only check for valid flags here,  keeping it out of the
2887	 * critical path in kmem_cache_alloc().
2888	 */
2889	BUG_ON(flags & GFP_SLAB_BUG_MASK);
2890	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2891
2892	/* Take the l3 list lock to change the colour_next on this node */
2893	check_irq_off();
2894	l3 = cachep->nodelists[nodeid];
2895	spin_lock(&l3->list_lock);
2896
2897	/* Get colour for the slab, and cal the next value. */
2898	offset = l3->colour_next;
2899	l3->colour_next++;
2900	if (l3->colour_next >= cachep->colour)
2901		l3->colour_next = 0;
2902	spin_unlock(&l3->list_lock);
2903
2904	offset *= cachep->colour_off;
2905
2906	if (local_flags & __GFP_WAIT)
2907		local_irq_enable();
2908
2909	/*
2910	 * The test for missing atomic flag is performed here, rather than
2911	 * the more obvious place, simply to reduce the critical path length
2912	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2913	 * will eventually be caught here (where it matters).
2914	 */
2915	kmem_flagcheck(cachep, flags);
2916
2917	/*
2918	 * Get mem for the objs.  Attempt to allocate a physical page from
2919	 * 'nodeid'.
2920	 */
2921	if (!objp)
2922		objp = kmem_getpages(cachep, local_flags, nodeid);
2923	if (!objp)
2924		goto failed;
2925
2926	/* Get slab management. */
2927	slabp = alloc_slabmgmt(cachep, objp, offset,
2928			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2929	if (!slabp)
2930		goto opps1;
2931
2932	slab_map_pages(cachep, slabp, objp);
2933
2934	cache_init_objs(cachep, slabp);
2935
2936	if (local_flags & __GFP_WAIT)
2937		local_irq_disable();
2938	check_irq_off();
2939	spin_lock(&l3->list_lock);
2940
2941	/* Make slab active. */
2942	list_add_tail(&slabp->list, &(l3->slabs_free));
2943	STATS_INC_GROWN(cachep);
2944	l3->free_objects += cachep->num;
2945	spin_unlock(&l3->list_lock);
2946	return 1;
2947opps1:
2948	kmem_freepages(cachep, objp);
2949failed:
2950	if (local_flags & __GFP_WAIT)
2951		local_irq_disable();
2952	return 0;
2953}
2954
2955#if DEBUG
2956
2957/*
2958 * Perform extra freeing checks:
2959 * - detect bad pointers.
2960 * - POISON/RED_ZONE checking
2961 */
2962static void kfree_debugcheck(const void *objp)
2963{
2964	if (!virt_addr_valid(objp)) {
2965		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2966		       (unsigned long)objp);
2967		BUG();
2968	}
2969}
2970
2971static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2972{
2973	unsigned long long redzone1, redzone2;
2974
2975	redzone1 = *dbg_redzone1(cache, obj);
2976	redzone2 = *dbg_redzone2(cache, obj);
2977
2978	/*
2979	 * Redzone is ok.
2980	 */
2981	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2982		return;
2983
2984	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2985		slab_error(cache, "double free detected");
2986	else
2987		slab_error(cache, "memory outside object was overwritten");
2988
2989	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2990			obj, redzone1, redzone2);
2991}
2992
2993static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2994				   void *caller)
2995{
2996	struct page *page;
2997	unsigned int objnr;
2998	struct slab *slabp;
2999
3000	BUG_ON(virt_to_cache(objp) != cachep);
3001
3002	objp -= obj_offset(cachep);
3003	kfree_debugcheck(objp);
3004	page = virt_to_head_page(objp);
3005
3006	slabp = page_get_slab(page);
3007
3008	if (cachep->flags & SLAB_RED_ZONE) {
3009		verify_redzone_free(cachep, objp);
3010		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
3011		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
3012	}
3013	if (cachep->flags & SLAB_STORE_USER)
3014		*dbg_userword(cachep, objp) = caller;
3015
3016	objnr = obj_to_index(cachep, slabp, objp);
3017
3018	BUG_ON(objnr >= cachep->num);
3019	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
3020
3021#ifdef CONFIG_DEBUG_SLAB_LEAK
3022	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
3023#endif
3024	if (cachep->flags & SLAB_POISON) {
3025#ifdef CONFIG_DEBUG_PAGEALLOC
3026		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
3027			store_stackinfo(cachep, objp, (unsigned long)caller);
3028			kernel_map_pages(virt_to_page(objp),
3029					 cachep->buffer_size / PAGE_SIZE, 0);
3030		} else {
3031			poison_obj(cachep, objp, POISON_FREE);
3032		}
3033#else
3034		poison_obj(cachep, objp, POISON_FREE);
3035#endif
3036	}
3037	return objp;
3038}
3039
3040static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
3041{
3042	kmem_bufctl_t i;
3043	int entries = 0;
3044
3045	/* Check slab's freelist to see if this obj is there. */
3046	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
3047		entries++;
3048		if (entries > cachep->num || i >= cachep->num)
3049			goto bad;
3050	}
3051	if (entries != cachep->num - slabp->inuse) {
3052bad:
3053		printk(KERN_ERR "slab: Internal list corruption detected in "
3054				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
3055			cachep->name, cachep->num, slabp, slabp->inuse);
3056		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
3057			sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
3058			1);
3059		BUG();
3060	}
3061}
3062#else
3063#define kfree_debugcheck(x) do { } while(0)
3064#define cache_free_debugcheck(x,objp,z) (objp)
3065#define check_slabp(x,y) do { } while(0)
3066#endif
3067
3068static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3069{
3070	int batchcount;
3071	struct kmem_list3 *l3;
3072	struct array_cache *ac;
3073	int node;
3074
3075retry:
3076	check_irq_off();
3077	node = numa_mem_id();
3078	ac = cpu_cache_get(cachep);
3079	batchcount = ac->batchcount;
3080	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3081		/*
3082		 * If there was little recent activity on this cache, then
3083		 * perform only a partial refill.  Otherwise we could generate
3084		 * refill bouncing.
3085		 */
3086		batchcount = BATCHREFILL_LIMIT;
3087	}
3088	l3 = cachep->nodelists[node];
3089
3090	BUG_ON(ac->avail > 0 || !l3);
3091	spin_lock(&l3->list_lock);
3092
3093	/* See if we can refill from the shared array */
3094	if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
3095		l3->shared->touched = 1;
3096		goto alloc_done;
3097	}
3098
3099	while (batchcount > 0) {
3100		struct list_head *entry;
3101		struct slab *slabp;
3102		/* Get slab alloc is to come from. */
3103		entry = l3->slabs_partial.next;
3104		if (entry == &l3->slabs_partial) {
3105			l3->free_touched = 1;
3106			entry = l3->slabs_free.next;
3107			if (entry == &l3->slabs_free)
3108				goto must_grow;
3109		}
3110
3111		slabp = list_entry(entry, struct slab, list);
3112		check_slabp(cachep, slabp);
3113		check_spinlock_acquired(cachep);
3114
3115		/*
3116		 * The slab was either on partial or free list so
3117		 * there must be at least one object available for
3118		 * allocation.
3119		 */
3120		BUG_ON(slabp->inuse >= cachep->num);
3121
3122		while (slabp->inuse < cachep->num && batchcount--) {
3123			STATS_INC_ALLOCED(cachep);
3124			STATS_INC_ACTIVE(cachep);
3125			STATS_SET_HIGH(cachep);
3126
3127			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3128							    node);
3129		}
3130		check_slabp(cachep, slabp);
3131
3132		/* move slabp to correct slabp list: */
3133		list_del(&slabp->list);
3134		if (slabp->free == BUFCTL_END)
3135			list_add(&slabp->list, &l3->slabs_full);
3136		else
3137			list_add(&slabp->list, &l3->slabs_partial);
3138	}
3139
3140must_grow:
3141	l3->free_objects -= ac->avail;
3142alloc_done:
3143	spin_unlock(&l3->list_lock);
3144
3145	if (unlikely(!ac->avail)) {
3146		int x;
3147		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3148
3149		/* cache_grow can reenable interrupts, then ac could change. */
3150		ac = cpu_cache_get(cachep);
3151		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3152			return NULL;
3153
3154		if (!ac->avail)		/* objects refilled by interrupt? */
3155			goto retry;
3156	}
3157	ac->touched = 1;
3158	return ac->entry[--ac->avail];
3159}
3160
3161static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3162						gfp_t flags)
3163{
3164	might_sleep_if(flags & __GFP_WAIT);
3165#if DEBUG
3166	kmem_flagcheck(cachep, flags);
3167#endif
3168}
3169
3170#if DEBUG
3171static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3172				gfp_t flags, void *objp, void *caller)
3173{
3174	if (!objp)
3175		return objp;
3176	if (cachep->flags & SLAB_POISON) {
3177#ifdef CONFIG_DEBUG_PAGEALLOC
3178		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3179			kernel_map_pages(virt_to_page(objp),
3180					 cachep->buffer_size / PAGE_SIZE, 1);
3181		else
3182			check_poison_obj(cachep, objp);
3183#else
3184		check_poison_obj(cachep, objp);
3185#endif
3186		poison_obj(cachep, objp, POISON_INUSE);
3187	}
3188	if (cachep->flags & SLAB_STORE_USER)
3189		*dbg_userword(cachep, objp) = caller;
3190
3191	if (cachep->flags & SLAB_RED_ZONE) {
3192		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3193				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3194			slab_error(cachep, "double free, or memory outside"
3195						" object was overwritten");
3196			printk(KERN_ERR
3197				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3198				objp, *dbg_redzone1(cachep, objp),
3199				*dbg_redzone2(cachep, objp));
3200		}
3201		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3202		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3203	}
3204#ifdef CONFIG_DEBUG_SLAB_LEAK
3205	{
3206		struct slab *slabp;
3207		unsigned objnr;
3208
3209		slabp = page_get_slab(virt_to_head_page(objp));
3210		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3211		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3212	}
3213#endif
3214	objp += obj_offset(cachep);
3215	if (cachep->ctor && cachep->flags & SLAB_POISON)
3216		cachep->ctor(objp);
3217	if (ARCH_SLAB_MINALIGN &&
3218	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3219		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3220		       objp, (int)ARCH_SLAB_MINALIGN);
3221	}
3222	return objp;
3223}
3224#else
3225#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3226#endif
3227
3228static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3229{
3230	if (cachep == &cache_cache)
3231		return false;
3232
3233	return should_failslab(obj_size(cachep), flags, cachep->flags);
3234}
3235
3236static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3237{
3238	void *objp;
3239	struct array_cache *ac;
3240
3241	check_irq_off();
3242
3243	ac = cpu_cache_get(cachep);
3244	if (likely(ac->avail)) {
3245		STATS_INC_ALLOCHIT(cachep);
3246		ac->touched = 1;
3247		objp = ac->entry[--ac->avail];
3248	} else {
3249		STATS_INC_ALLOCMISS(cachep);
3250		objp = cache_alloc_refill(cachep, flags);
3251		/*
3252		 * the 'ac' may be updated by cache_alloc_refill(),
3253		 * and kmemleak_erase() requires its correct value.
3254		 */
3255		ac = cpu_cache_get(cachep);
3256	}
3257	/*
3258	 * To avoid a false negative, if an object that is in one of the
3259	 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3260	 * treat the array pointers as a reference to the object.
3261	 */
3262	if (objp)
3263		kmemleak_erase(&ac->entry[ac->avail]);
3264	return objp;
3265}
3266
3267#ifdef CONFIG_NUMA
3268/*
3269 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3270 *
3271 * If we are in_interrupt, then process context, including cpusets and
3272 * mempolicy, may not apply and should not be used for allocation policy.
3273 */
3274static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3275{
3276	int nid_alloc, nid_here;
3277
3278	if (in_interrupt() || (flags & __GFP_THISNODE))
3279		return NULL;
3280	nid_alloc = nid_here = numa_mem_id();
3281	get_mems_allowed();
3282	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3283		nid_alloc = cpuset_slab_spread_node();
3284	else if (current->mempolicy)
3285		nid_alloc = slab_node(current->mempolicy);
3286	put_mems_allowed();
3287	if (nid_alloc != nid_here)
3288		return ____cache_alloc_node(cachep, flags, nid_alloc);
3289	return NULL;
3290}
3291
3292/*
3293 * Fallback function if there was no memory available and no objects on a
3294 * certain node and fall back is permitted. First we scan all the
3295 * available nodelists for available objects. If that fails then we
3296 * perform an allocation without specifying a node. This allows the page
3297 * allocator to do its reclaim / fallback magic. We then insert the
3298 * slab into the proper nodelist and then allocate from it.
3299 */
3300static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3301{
3302	struct zonelist *zonelist;
3303	gfp_t local_flags;
3304	struct zoneref *z;
3305	struct zone *zone;
3306	enum zone_type high_zoneidx = gfp_zone(flags);
3307	void *obj = NULL;
3308	int nid;
3309
3310	if (flags & __GFP_THISNODE)
3311		return NULL;
3312
3313	get_mems_allowed();
3314	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
3315	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3316
3317retry:
3318	/*
3319	 * Look through allowed nodes for objects available
3320	 * from existing per node queues.
3321	 */
3322	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3323		nid = zone_to_nid(zone);
3324
3325		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3326			cache->nodelists[nid] &&
3327			cache->nodelists[nid]->free_objects) {
3328				obj = ____cache_alloc_node(cache,
3329					flags | GFP_THISNODE, nid);
3330				if (obj)
3331					break;
3332		}
3333	}
3334
3335	if (!obj) {
3336		/*
3337		 * This allocation will be performed within the constraints
3338		 * of the current cpuset / memory policy requirements.
3339		 * We may trigger various forms of reclaim on the allowed
3340		 * set and go into memory reserves if necessary.
3341		 */
3342		if (local_flags & __GFP_WAIT)
3343			local_irq_enable();
3344		kmem_flagcheck(cache, flags);
3345		obj = kmem_getpages(cache, local_flags, numa_mem_id());
3346		if (local_flags & __GFP_WAIT)
3347			local_irq_disable();
3348		if (obj) {
3349			/*
3350			 * Insert into the appropriate per node queues
3351			 */
3352			nid = page_to_nid(virt_to_page(obj));
3353			if (cache_grow(cache, flags, nid, obj)) {
3354				obj = ____cache_alloc_node(cache,
3355					flags | GFP_THISNODE, nid);
3356				if (!obj)
3357					/*
3358					 * Another processor may allocate the
3359					 * objects in the slab since we are
3360					 * not holding any locks.
3361					 */
3362					goto retry;
3363			} else {
3364				/* cache_grow already freed obj */
3365				obj = NULL;
3366			}
3367		}
3368	}
3369	put_mems_allowed();
3370	return obj;
3371}
3372
3373/*
3374 * A interface to enable slab creation on nodeid
3375 */
3376static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3377				int nodeid)
3378{
3379	struct list_head *entry;
3380	struct slab *slabp;
3381	struct kmem_list3 *l3;
3382	void *obj;
3383	int x;
3384
3385	l3 = cachep->nodelists[nodeid];
3386	BUG_ON(!l3);
3387
3388retry:
3389	check_irq_off();
3390	spin_lock(&l3->list_lock);
3391	entry = l3->slabs_partial.next;
3392	if (entry == &l3->slabs_partial) {
3393		l3->free_touched = 1;
3394		entry = l3->slabs_free.next;
3395		if (entry == &l3->slabs_free)
3396			goto must_grow;
3397	}
3398
3399	slabp = list_entry(entry, struct slab, list);
3400	check_spinlock_acquired_node(cachep, nodeid);
3401	check_slabp(cachep, slabp);
3402
3403	STATS_INC_NODEALLOCS(cachep);
3404	STATS_INC_ACTIVE(cachep);
3405	STATS_SET_HIGH(cachep);
3406
3407	BUG_ON(slabp->inuse == cachep->num);
3408
3409	obj = slab_get_obj(cachep, slabp, nodeid);
3410	check_slabp(cachep, slabp);
3411	l3->free_objects--;
3412	/* move slabp to correct slabp list: */
3413	list_del(&slabp->list);
3414
3415	if (slabp->free == BUFCTL_END)
3416		list_add(&slabp->list, &l3->slabs_full);
3417	else
3418		list_add(&slabp->list, &l3->slabs_partial);
3419
3420	spin_unlock(&l3->list_lock);
3421	goto done;
3422
3423must_grow:
3424	spin_unlock(&l3->list_lock);
3425	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3426	if (x)
3427		goto retry;
3428
3429	return fallback_alloc(cachep, flags);
3430
3431done:
3432	return obj;
3433}
3434
3435/**
3436 * kmem_cache_alloc_node - Allocate an object on the specified node
3437 * @cachep: The cache to allocate from.
3438 * @flags: See kmalloc().
3439 * @nodeid: node number of the target node.
3440 * @caller: return address of caller, used for debug information
3441 *
3442 * Identical to kmem_cache_alloc but it will allocate memory on the given
3443 * node, which can improve the performance for cpu bound structures.
3444 *
3445 * Fallback to other node is possible if __GFP_THISNODE is not set.
3446 */
3447static __always_inline void *
3448__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3449		   void *caller)
3450{
3451	unsigned long save_flags;
3452	void *ptr;
3453	int slab_node = numa_mem_id();
3454
3455	flags &= gfp_allowed_mask;
3456
3457	lockdep_trace_alloc(flags);
3458
3459	if (slab_should_failslab(cachep, flags))
3460		return NULL;
3461
3462	cache_alloc_debugcheck_before(cachep, flags);
3463	local_irq_save(save_flags);
3464
3465	if (nodeid == NUMA_NO_NODE)
3466		nodeid = slab_node;
3467
3468	if (unlikely(!cachep->nodelists[nodeid])) {
3469		/* Node not bootstrapped yet */
3470		ptr = fallback_alloc(cachep, flags);
3471		goto out;
3472	}
3473
3474	if (nodeid == slab_node) {
3475		/*
3476		 * Use the locally cached objects if possible.
3477		 * However ____cache_alloc does not allow fallback
3478		 * to other nodes. It may fail while we still have
3479		 * objects on other nodes available.
3480		 */
3481		ptr = ____cache_alloc(cachep, flags);
3482		if (ptr)
3483			goto out;
3484	}
3485	/* ___cache_alloc_node can fall back to other nodes */
3486	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3487  out:
3488	local_irq_restore(save_flags);
3489	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3490	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3491				 flags);
3492
3493	if (likely(ptr))
3494		kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
3495
3496	if (unlikely((flags & __GFP_ZERO) && ptr))
3497		memset(ptr, 0, obj_size(cachep));
3498
3499	return ptr;
3500}
3501
3502static __always_inline void *
3503__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3504{
3505	void *objp;
3506
3507	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3508		objp = alternate_node_alloc(cache, flags);
3509		if (objp)
3510			goto out;
3511	}
3512	objp = ____cache_alloc(cache, flags);
3513
3514	/*
3515	 * We may just have run out of memory on the local node.
3516	 * ____cache_alloc_node() knows how to locate memory on other nodes
3517	 */
3518	if (!objp)
3519		objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3520
3521  out:
3522	return objp;
3523}
3524#else
3525
3526static __always_inline void *
3527__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3528{
3529	return ____cache_alloc(cachep, flags);
3530}
3531
3532#endif /* CONFIG_NUMA */
3533
3534static __always_inline void *
3535__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3536{
3537	unsigned long save_flags;
3538	void *objp;
3539
3540	flags &= gfp_allowed_mask;
3541
3542	lockdep_trace_alloc(flags);
3543
3544	if (slab_should_failslab(cachep, flags))
3545		return NULL;
3546
3547	cache_alloc_debugcheck_before(cachep, flags);
3548	local_irq_save(save_flags);
3549	objp = __do_cache_alloc(cachep, flags);
3550	local_irq_restore(save_flags);
3551	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3552	kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3553				 flags);
3554	prefetchw(objp);
3555
3556	if (likely(objp))
3557		kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
3558
3559	if (unlikely((flags & __GFP_ZERO) && objp))
3560		memset(objp, 0, obj_size(cachep));
3561
3562	return objp;
3563}
3564
3565/*
3566 * Caller needs to acquire correct kmem_list's list_lock
3567 */
3568static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3569		       int node)
3570{
3571	int i;
3572	struct kmem_list3 *l3;
3573
3574	for (i = 0; i < nr_objects; i++) {
3575		void *objp = objpp[i];
3576		struct slab *slabp;
3577
3578		slabp = virt_to_slab(objp);
3579		l3 = cachep->nodelists[node];
3580		list_del(&slabp->list);
3581		check_spinlock_acquired_node(cachep, node);
3582		check_slabp(cachep, slabp);
3583		slab_put_obj(cachep, slabp, objp, node);
3584		STATS_DEC_ACTIVE(cachep);
3585		l3->free_objects++;
3586		check_slabp(cachep, slabp);
3587
3588		/* fixup slab chains */
3589		if (slabp->inuse == 0) {
3590			if (l3->free_objects > l3->free_limit) {
3591				l3->free_objects -= cachep->num;
3592				/* No need to drop any previously held
3593				 * lock here, even if we have a off-slab slab
3594				 * descriptor it is guaranteed to come from
3595				 * a different cache, refer to comments before
3596				 * alloc_slabmgmt.
3597				 */
3598				slab_destroy(cachep, slabp);
3599			} else {
3600				list_add(&slabp->list, &l3->slabs_free);
3601			}
3602		} else {
3603			/* Unconditionally move a slab to the end of the
3604			 * partial list on free - maximum time for the
3605			 * other objects to be freed, too.
3606			 */
3607			list_add_tail(&slabp->list, &l3->slabs_partial);
3608		}
3609	}
3610}
3611
3612static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3613{
3614	int batchcount;
3615	struct kmem_list3 *l3;
3616	int node = numa_mem_id();
3617
3618	batchcount = ac->batchcount;
3619#if DEBUG
3620	BUG_ON(!batchcount || batchcount > ac->avail);
3621#endif
3622	check_irq_off();
3623	l3 = cachep->nodelists[node];
3624	spin_lock(&l3->list_lock);
3625	if (l3->shared) {
3626		struct array_cache *shared_array = l3->shared;
3627		int max = shared_array->limit - shared_array->avail;
3628		if (max) {
3629			if (batchcount > max)
3630				batchcount = max;
3631			memcpy(&(shared_array->entry[shared_array->avail]),
3632			       ac->entry, sizeof(void *) * batchcount);
3633			shared_array->avail += batchcount;
3634			goto free_done;
3635		}
3636	}
3637
3638	free_block(cachep, ac->entry, batchcount, node);
3639free_done:
3640#if STATS
3641	{
3642		int i = 0;
3643		struct list_head *p;
3644
3645		p = l3->slabs_free.next;
3646		while (p != &(l3->slabs_free)) {
3647			struct slab *slabp;
3648
3649			slabp = list_entry(p, struct slab, list);
3650			BUG_ON(slabp->inuse);
3651
3652			i++;
3653			p = p->next;
3654		}
3655		STATS_SET_FREEABLE(cachep, i);
3656	}
3657#endif
3658	spin_unlock(&l3->list_lock);
3659	ac->avail -= batchcount;
3660	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3661}
3662
3663/*
3664 * Release an obj back to its cache. If the obj has a constructed state, it must
3665 * be in this state _before_ it is released.  Called with disabled ints.
3666 */
3667static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3668    void *caller)
3669{
3670	struct array_cache *ac = cpu_cache_get(cachep);
3671
3672	check_irq_off();
3673	kmemleak_free_recursive(objp, cachep->flags);
3674	objp = cache_free_debugcheck(cachep, objp, caller);
3675
3676	kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3677
3678	/*
3679	 * Skip calling cache_free_alien() when the platform is not numa.
3680	 * This will avoid cache misses that happen while accessing slabp (which
3681	 * is per page memory  reference) to get nodeid. Instead use a global
3682	 * variable to skip the call, which is mostly likely to be present in
3683	 * the cache.
3684	 */
3685	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3686		return;
3687
3688	if (likely(ac->avail < ac->limit)) {
3689		STATS_INC_FREEHIT(cachep);
3690		ac->entry[ac->avail++] = objp;
3691		return;
3692	} else {
3693		STATS_INC_FREEMISS(cachep);
3694		cache_flusharray(cachep, ac);
3695		ac->entry[ac->avail++] = objp;
3696	}
3697}
3698
3699/**
3700 * kmem_cache_alloc - Allocate an object
3701 * @cachep: The cache to allocate from.
3702 * @flags: See kmalloc().
3703 *
3704 * Allocate an object from this cache.  The flags are only relevant
3705 * if the cache has no available objects.
3706 */
3707void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3708{
3709	void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3710
3711	trace_kmem_cache_alloc(_RET_IP_, ret,
3712			       obj_size(cachep), cachep->buffer_size, flags);
3713
3714	return ret;
3715}
3716EXPORT_SYMBOL(kmem_cache_alloc);
3717
3718#ifdef CONFIG_TRACING
3719void *
3720kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
3721{
3722	void *ret;
3723
3724	ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3725
3726	trace_kmalloc(_RET_IP_, ret,
3727		      size, slab_buffer_size(cachep), flags);
3728	return ret;
3729}
3730EXPORT_SYMBOL(kmem_cache_alloc_trace);
3731#endif
3732
3733#ifdef CONFIG_NUMA
3734void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3735{
3736	void *ret = __cache_alloc_node(cachep, flags, nodeid,
3737				       __builtin_return_address(0));
3738
3739	trace_kmem_cache_alloc_node(_RET_IP_, ret,
3740				    obj_size(cachep), cachep->buffer_size,
3741				    flags, nodeid);
3742
3743	return ret;
3744}
3745EXPORT_SYMBOL(kmem_cache_alloc_node);
3746
3747#ifdef CONFIG_TRACING
3748void *kmem_cache_alloc_node_trace(size_t size,
3749				  struct kmem_cache *cachep,
3750				  gfp_t flags,
3751				  int nodeid)
3752{
3753	void *ret;
3754
3755	ret = __cache_alloc_node(cachep, flags, nodeid,
3756				  __builtin_return_address(0));
3757	trace_kmalloc_node(_RET_IP_, ret,
3758			   size, slab_buffer_size(cachep),
3759			   flags, nodeid);
3760	return ret;
3761}
3762EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3763#endif
3764
3765static __always_inline void *
3766__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3767{
3768	struct kmem_cache *cachep;
3769
3770	cachep = kmem_find_general_cachep(size, flags);
3771	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3772		return cachep;
3773	return kmem_cache_alloc_node_trace(size, cachep, flags, node);
3774}
3775
3776#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3777void *__kmalloc_node(size_t size, gfp_t flags, int node)
3778{
3779	return __do_kmalloc_node(size, flags, node,
3780			__builtin_return_address(0));
3781}
3782EXPORT_SYMBOL(__kmalloc_node);
3783
3784void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3785		int node, unsigned long caller)
3786{
3787	return __do_kmalloc_node(size, flags, node, (void *)caller);
3788}
3789EXPORT_SYMBOL(__kmalloc_node_track_caller);
3790#else
3791void *__kmalloc_node(size_t size, gfp_t flags, int node)
3792{
3793	return __do_kmalloc_node(size, flags, node, NULL);
3794}
3795EXPORT_SYMBOL(__kmalloc_node);
3796#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3797#endif /* CONFIG_NUMA */
3798
3799/**
3800 * __do_kmalloc - allocate memory
3801 * @size: how many bytes of memory are required.
3802 * @flags: the type of memory to allocate (see kmalloc).
3803 * @caller: function caller for debug tracking of the caller
3804 */
3805static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3806					  void *caller)
3807{
3808	struct kmem_cache *cachep;
3809	void *ret;
3810
3811	/* If you want to save a few bytes .text space: replace
3812	 * __ with kmem_.
3813	 * Then kmalloc uses the uninlined functions instead of the inline
3814	 * functions.
3815	 */
3816	cachep = __find_general_cachep(size, flags);
3817	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3818		return cachep;
3819	ret = __cache_alloc(cachep, flags, caller);
3820
3821	trace_kmalloc((unsigned long) caller, ret,
3822		      size, cachep->buffer_size, flags);
3823
3824	return ret;
3825}
3826
3827
3828#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3829void *__kmalloc(size_t size, gfp_t flags)
3830{
3831	return __do_kmalloc(size, flags, __builtin_return_address(0));
3832}
3833EXPORT_SYMBOL(__kmalloc);
3834
3835void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3836{
3837	return __do_kmalloc(size, flags, (void *)caller);
3838}
3839EXPORT_SYMBOL(__kmalloc_track_caller);
3840
3841#else
3842void *__kmalloc(size_t size, gfp_t flags)
3843{
3844	return __do_kmalloc(size, flags, NULL);
3845}
3846EXPORT_SYMBOL(__kmalloc);
3847#endif
3848
3849/**
3850 * kmem_cache_free - Deallocate an object
3851 * @cachep: The cache the allocation was from.
3852 * @objp: The previously allocated object.
3853 *
3854 * Free an object which was previously allocated from this
3855 * cache.
3856 */
3857void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3858{
3859	unsigned long flags;
3860
3861	local_irq_save(flags);
3862	debug_check_no_locks_freed(objp, obj_size(cachep));
3863	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3864		debug_check_no_obj_freed(objp, obj_size(cachep));
3865	__cache_free(cachep, objp, __builtin_return_address(0));
3866	local_irq_restore(flags);
3867
3868	trace_kmem_cache_free(_RET_IP_, objp);
3869}
3870EXPORT_SYMBOL(kmem_cache_free);
3871
3872/**
3873 * kfree - free previously allocated memory
3874 * @objp: pointer returned by kmalloc.
3875 *
3876 * If @objp is NULL, no operation is performed.
3877 *
3878 * Don't free memory not originally allocated by kmalloc()
3879 * or you will run into trouble.
3880 */
3881void kfree(const void *objp)
3882{
3883	struct kmem_cache *c;
3884	unsigned long flags;
3885
3886	trace_kfree(_RET_IP_, objp);
3887
3888	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3889		return;
3890	local_irq_save(flags);
3891	kfree_debugcheck(objp);
3892	c = virt_to_cache(objp);
3893	debug_check_no_locks_freed(objp, obj_size(c));
3894	debug_check_no_obj_freed(objp, obj_size(c));
3895	__cache_free(c, (void *)objp, __builtin_return_address(0));
3896	local_irq_restore(flags);
3897}
3898EXPORT_SYMBOL(kfree);
3899
3900unsigned int kmem_cache_size(struct kmem_cache *cachep)
3901{
3902	return obj_size(cachep);
3903}
3904EXPORT_SYMBOL(kmem_cache_size);
3905
3906/*
3907 * This initializes kmem_list3 or resizes various caches for all nodes.
3908 */
3909static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3910{
3911	int node;
3912	struct kmem_list3 *l3;
3913	struct array_cache *new_shared;
3914	struct array_cache **new_alien = NULL;
3915
3916	for_each_online_node(node) {
3917
3918                if (use_alien_caches) {
3919                        new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3920                        if (!new_alien)
3921                                goto fail;
3922                }
3923
3924		new_shared = NULL;
3925		if (cachep->shared) {
3926			new_shared = alloc_arraycache(node,
3927				cachep->shared*cachep->batchcount,
3928					0xbaadf00d, gfp);
3929			if (!new_shared) {
3930				free_alien_cache(new_alien);
3931				goto fail;
3932			}
3933		}
3934
3935		l3 = cachep->nodelists[node];
3936		if (l3) {
3937			struct array_cache *shared = l3->shared;
3938
3939			spin_lock_irq(&l3->list_lock);
3940
3941			if (shared)
3942				free_block(cachep, shared->entry,
3943						shared->avail, node);
3944
3945			l3->shared = new_shared;
3946			if (!l3->alien) {
3947				l3->alien = new_alien;
3948				new_alien = NULL;
3949			}
3950			l3->free_limit = (1 + nr_cpus_node(node)) *
3951					cachep->batchcount + cachep->num;
3952			spin_unlock_irq(&l3->list_lock);
3953			kfree(shared);
3954			free_alien_cache(new_alien);
3955			continue;
3956		}
3957		l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
3958		if (!l3) {
3959			free_alien_cache(new_alien);
3960			kfree(new_shared);
3961			goto fail;
3962		}
3963
3964		kmem_list3_init(l3);
3965		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3966				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3967		l3->shared = new_shared;
3968		l3->alien = new_alien;
3969		l3->free_limit = (1 + nr_cpus_node(node)) *
3970					cachep->batchcount + cachep->num;
3971		cachep->nodelists[node] = l3;
3972	}
3973	return 0;
3974
3975fail:
3976	if (!cachep->next.next) {
3977		/* Cache is not active yet. Roll back what we did */
3978		node--;
3979		while (node >= 0) {
3980			if (cachep->nodelists[node]) {
3981				l3 = cachep->nodelists[node];
3982
3983				kfree(l3->shared);
3984				free_alien_cache(l3->alien);
3985				kfree(l3);
3986				cachep->nodelists[node] = NULL;
3987			}
3988			node--;
3989		}
3990	}
3991	return -ENOMEM;
3992}
3993
3994struct ccupdate_struct {
3995	struct kmem_cache *cachep;
3996	struct array_cache *new[0];
3997};
3998
3999static void do_ccupdate_local(void *info)
4000{
4001	struct ccupdate_struct *new = info;
4002	struct array_cache *old;
4003
4004	check_irq_off();
4005	old = cpu_cache_get(new->cachep);
4006
4007	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
4008	new->new[smp_processor_id()] = old;
4009}
4010
4011/* Always called with the cache_chain_mutex held */
4012static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4013				int batchcount, int shared, gfp_t gfp)
4014{
4015	struct ccupdate_struct *new;
4016	int i;
4017
4018	new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
4019		      gfp);
4020	if (!new)
4021		return -ENOMEM;
4022
4023	for_each_online_cpu(i) {
4024		new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
4025						batchcount, gfp);
4026		if (!new->new[i]) {
4027			for (i--; i >= 0; i--)
4028				kfree(new->new[i]);
4029			kfree(new);
4030			return -ENOMEM;
4031		}
4032	}
4033	new->cachep = cachep;
4034
4035	on_each_cpu(do_ccupdate_local, (void *)new, 1);
4036
4037	check_irq_on();
4038	cachep->batchcount = batchcount;
4039	cachep->limit = limit;
4040	cachep->shared = shared;
4041
4042	for_each_online_cpu(i) {
4043		struct array_cache *ccold = new->new[i];
4044		if (!ccold)
4045			continue;
4046		spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4047		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4048		spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4049		kfree(ccold);
4050	}
4051	kfree(new);
4052	return alloc_kmemlist(cachep, gfp);
4053}
4054
4055/* Called with cache_chain_mutex held always */
4056static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4057{
4058	int err;
4059	int limit, shared;
4060
4061	/*
4062	 * The head array serves three purposes:
4063	 * - create a LIFO ordering, i.e. return objects that are cache-warm
4064	 * - reduce the number of spinlock operations.
4065	 * - reduce the number of linked list operations on the slab and
4066	 *   bufctl chains: array operations are cheaper.
4067	 * The numbers are guessed, we should auto-tune as described by
4068	 * Bonwick.
4069	 */
4070	if (cachep->buffer_size > 131072)
4071		limit = 1;
4072	else if (cachep->buffer_size > PAGE_SIZE)
4073		limit = 8;
4074	else if (cachep->buffer_size > 1024)
4075		limit = 24;
4076	else if (cachep->buffer_size > 256)
4077		limit = 54;
4078	else
4079		limit = 120;
4080
4081	/*
4082	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
4083	 * allocation behaviour: Most allocs on one cpu, most free operations
4084	 * on another cpu. For these cases, an efficient object passing between
4085	 * cpus is necessary. This is provided by a shared array. The array
4086	 * replaces Bonwick's magazine layer.
4087	 * On uniprocessor, it's functionally equivalent (but less efficient)
4088	 * to a larger limit. Thus disabled by default.
4089	 */
4090	shared = 0;
4091	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
4092		shared = 8;
4093
4094#if DEBUG
4095	/*
4096	 * With debugging enabled, large batchcount lead to excessively long
4097	 * periods with disabled local interrupts. Limit the batchcount
4098	 */
4099	if (limit > 32)
4100		limit = 32;
4101#endif
4102	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
4103	if (err)
4104		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4105		       cachep->name, -err);
4106	return err;
4107}
4108
4109/*
4110 * Drain an array if it contains any elements taking the l3 lock only if
4111 * necessary. Note that the l3 listlock also protects the array_cache
4112 * if drain_array() is used on the shared array.
4113 */
4114static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4115			 struct array_cache *ac, int force, int node)
4116{
4117	int tofree;
4118
4119	if (!ac || !ac->avail)
4120		return;
4121	if (ac->touched && !force) {
4122		ac->touched = 0;
4123	} else {
4124		spin_lock_irq(&l3->list_lock);
4125		if (ac->avail) {
4126			tofree = force ? ac->avail : (ac->limit + 4) / 5;
4127			if (tofree > ac->avail)
4128				tofree = (ac->avail + 1) / 2;
4129			free_block(cachep, ac->entry, tofree, node);
4130			ac->avail -= tofree;
4131			memmove(ac->entry, &(ac->entry[tofree]),
4132				sizeof(void *) * ac->avail);
4133		}
4134		spin_unlock_irq(&l3->list_lock);
4135	}
4136}
4137
4138/**
4139 * cache_reap - Reclaim memory from caches.
4140 * @w: work descriptor
4141 *
4142 * Called from workqueue/eventd every few seconds.
4143 * Purpose:
4144 * - clear the per-cpu caches for this CPU.
4145 * - return freeable pages to the main free memory pool.
4146 *
4147 * If we cannot acquire the cache chain mutex then just give up - we'll try
4148 * again on the next iteration.
4149 */
4150static void cache_reap(struct work_struct *w)
4151{
4152	struct kmem_cache *searchp;
4153	struct kmem_list3 *l3;
4154	int node = numa_mem_id();
4155	struct delayed_work *work = to_delayed_work(w);
4156
4157	if (!mutex_trylock(&cache_chain_mutex))
4158		/* Give up. Setup the next iteration. */
4159		goto out;
4160
4161	list_for_each_entry(searchp, &cache_chain, next) {
4162		check_irq_on();
4163
4164		/*
4165		 * We only take the l3 lock if absolutely necessary and we
4166		 * have established with reasonable certainty that
4167		 * we can do some work if the lock was obtained.
4168		 */
4169		l3 = searchp->nodelists[node];
4170
4171		reap_alien(searchp, l3);
4172
4173		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4174
4175		/*
4176		 * These are racy checks but it does not matter
4177		 * if we skip one check or scan twice.
4178		 */
4179		if (time_after(l3->next_reap, jiffies))
4180			goto next;
4181
4182		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4183
4184		drain_array(searchp, l3, l3->shared, 0, node);
4185
4186		if (l3->free_touched)
4187			l3->free_touched = 0;
4188		else {
4189			int freed;
4190
4191			freed = drain_freelist(searchp, l3, (l3->free_limit +
4192				5 * searchp->num - 1) / (5 * searchp->num));
4193			STATS_ADD_REAPED(searchp, freed);
4194		}
4195next:
4196		cond_resched();
4197	}
4198	check_irq_on();
4199	mutex_unlock(&cache_chain_mutex);
4200	next_reap_node();
4201out:
4202	/* Set up the next iteration */
4203	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4204}
4205
4206#ifdef CONFIG_SLABINFO
4207
4208static void print_slabinfo_header(struct seq_file *m)
4209{
4210	/*
4211	 * Output format version, so at least we can change it
4212	 * without _too_ many complaints.
4213	 */
4214#if STATS
4215	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4216#else
4217	seq_puts(m, "slabinfo - version: 2.1\n");
4218#endif
4219	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4220		 "<objperslab> <pagesperslab>");
4221	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4222	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4223#if STATS
4224	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4225		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4226	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4227#endif
4228	seq_putc(m, '\n');
4229}
4230
4231static void *s_start(struct seq_file *m, loff_t *pos)
4232{
4233	loff_t n = *pos;
4234
4235	mutex_lock(&cache_chain_mutex);
4236	if (!n)
4237		print_slabinfo_header(m);
4238
4239	return seq_list_start(&cache_chain, *pos);
4240}
4241
4242static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4243{
4244	return seq_list_next(p, &cache_chain, pos);
4245}
4246
4247static void s_stop(struct seq_file *m, void *p)
4248{
4249	mutex_unlock(&cache_chain_mutex);
4250}
4251
4252static int s_show(struct seq_file *m, void *p)
4253{
4254	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4255	struct slab *slabp;
4256	unsigned long active_objs;
4257	unsigned long num_objs;
4258	unsigned long active_slabs = 0;
4259	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4260	const char *name;
4261	char *error = NULL;
4262	int node;
4263	struct kmem_list3 *l3;
4264
4265	active_objs = 0;
4266	num_slabs = 0;
4267	for_each_online_node(node) {
4268		l3 = cachep->nodelists[node];
4269		if (!l3)
4270			continue;
4271
4272		check_irq_on();
4273		spin_lock_irq(&l3->list_lock);
4274
4275		list_for_each_entry(slabp, &l3->slabs_full, list) {
4276			if (slabp->inuse != cachep->num && !error)
4277				error = "slabs_full accounting error";
4278			active_objs += cachep->num;
4279			active_slabs++;
4280		}
4281		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4282			if (slabp->inuse == cachep->num && !error)
4283				error = "slabs_partial inuse accounting error";
4284			if (!slabp->inuse && !error)
4285				error = "slabs_partial/inuse accounting error";
4286			active_objs += slabp->inuse;
4287			active_slabs++;
4288		}
4289		list_for_each_entry(slabp, &l3->slabs_free, list) {
4290			if (slabp->inuse && !error)
4291				error = "slabs_free/inuse accounting error";
4292			num_slabs++;
4293		}
4294		free_objects += l3->free_objects;
4295		if (l3->shared)
4296			shared_avail += l3->shared->avail;
4297
4298		spin_unlock_irq(&l3->list_lock);
4299	}
4300	num_slabs += active_slabs;
4301	num_objs = num_slabs * cachep->num;
4302	if (num_objs - active_objs != free_objects && !error)
4303		error = "free_objects accounting error";
4304
4305	name = cachep->name;
4306	if (error)
4307		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4308
4309	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4310		   name, active_objs, num_objs, cachep->buffer_size,
4311		   cachep->num, (1 << cachep->gfporder));
4312	seq_printf(m, " : tunables %4u %4u %4u",
4313		   cachep->limit, cachep->batchcount, cachep->shared);
4314	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4315		   active_slabs, num_slabs, shared_avail);
4316#if STATS
4317	{			/* list3 stats */
4318		unsigned long high = cachep->high_mark;
4319		unsigned long allocs = cachep->num_allocations;
4320		unsigned long grown = cachep->grown;
4321		unsigned long reaped = cachep->reaped;
4322		unsigned long errors = cachep->errors;
4323		unsigned long max_freeable = cachep->max_freeable;
4324		unsigned long node_allocs = cachep->node_allocs;
4325		unsigned long node_frees = cachep->node_frees;
4326		unsigned long overflows = cachep->node_overflow;
4327
4328		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4329			   "%4lu %4lu %4lu %4lu %4lu",
4330			   allocs, high, grown,
4331			   reaped, errors, max_freeable, node_allocs,
4332			   node_frees, overflows);
4333	}
4334	/* cpu stats */
4335	{
4336		unsigned long allochit = atomic_read(&cachep->allochit);
4337		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4338		unsigned long freehit = atomic_read(&cachep->freehit);
4339		unsigned long freemiss = atomic_read(&cachep->freemiss);
4340
4341		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4342			   allochit, allocmiss, freehit, freemiss);
4343	}
4344#endif
4345	seq_putc(m, '\n');
4346	return 0;
4347}
4348
4349/*
4350 * slabinfo_op - iterator that generates /proc/slabinfo
4351 *
4352 * Output layout:
4353 * cache-name
4354 * num-active-objs
4355 * total-objs
4356 * object size
4357 * num-active-slabs
4358 * total-slabs
4359 * num-pages-per-slab
4360 * + further values on SMP and with statistics enabled
4361 */
4362
4363static const struct seq_operations slabinfo_op = {
4364	.start = s_start,
4365	.next = s_next,
4366	.stop = s_stop,
4367	.show = s_show,
4368};
4369
4370#define MAX_SLABINFO_WRITE 128
4371/**
4372 * slabinfo_write - Tuning for the slab allocator
4373 * @file: unused
4374 * @buffer: user buffer
4375 * @count: data length
4376 * @ppos: unused
4377 */
4378static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4379		       size_t count, loff_t *ppos)
4380{
4381	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4382	int limit, batchcount, shared, res;
4383	struct kmem_cache *cachep;
4384
4385	if (count > MAX_SLABINFO_WRITE)
4386		return -EINVAL;
4387	if (copy_from_user(&kbuf, buffer, count))
4388		return -EFAULT;
4389	kbuf[MAX_SLABINFO_WRITE] = '\0';
4390
4391	tmp = strchr(kbuf, ' ');
4392	if (!tmp)
4393		return -EINVAL;
4394	*tmp = '\0';
4395	tmp++;
4396	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4397		return -EINVAL;
4398
4399	/* Find the cache in the chain of caches. */
4400	mutex_lock(&cache_chain_mutex);
4401	res = -EINVAL;
4402	list_for_each_entry(cachep, &cache_chain, next) {
4403		if (!strcmp(cachep->name, kbuf)) {
4404			if (limit < 1 || batchcount < 1 ||
4405					batchcount > limit || shared < 0) {
4406				res = 0;
4407			} else {
4408				res = do_tune_cpucache(cachep, limit,
4409						       batchcount, shared,
4410						       GFP_KERNEL);
4411			}
4412			break;
4413		}
4414	}
4415	mutex_unlock(&cache_chain_mutex);
4416	if (res >= 0)
4417		res = count;
4418	return res;
4419}
4420
4421static int slabinfo_open(struct inode *inode, struct file *file)
4422{
4423	return seq_open(file, &slabinfo_op);
4424}
4425
4426static const struct file_operations proc_slabinfo_operations = {
4427	.open		= slabinfo_open,
4428	.read		= seq_read,
4429	.write		= slabinfo_write,
4430	.llseek		= seq_lseek,
4431	.release	= seq_release,
4432};
4433
4434#ifdef CONFIG_DEBUG_SLAB_LEAK
4435
4436static void *leaks_start(struct seq_file *m, loff_t *pos)
4437{
4438	mutex_lock(&cache_chain_mutex);
4439	return seq_list_start(&cache_chain, *pos);
4440}
4441
4442static inline int add_caller(unsigned long *n, unsigned long v)
4443{
4444	unsigned long *p;
4445	int l;
4446	if (!v)
4447		return 1;
4448	l = n[1];
4449	p = n + 2;
4450	while (l) {
4451		int i = l/2;
4452		unsigned long *q = p + 2 * i;
4453		if (*q == v) {
4454			q[1]++;
4455			return 1;
4456		}
4457		if (*q > v) {
4458			l = i;
4459		} else {
4460			p = q + 2;
4461			l -= i + 1;
4462		}
4463	}
4464	if (++n[1] == n[0])
4465		return 0;
4466	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4467	p[0] = v;
4468	p[1] = 1;
4469	return 1;
4470}
4471
4472static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4473{
4474	void *p;
4475	int i;
4476	if (n[0] == n[1])
4477		return;
4478	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4479		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4480			continue;
4481		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4482			return;
4483	}
4484}
4485
4486static void show_symbol(struct seq_file *m, unsigned long address)
4487{
4488#ifdef CONFIG_KALLSYMS
4489	unsigned long offset, size;
4490	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4491
4492	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4493		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4494		if (modname[0])
4495			seq_printf(m, " [%s]", modname);
4496		return;
4497	}
4498#endif
4499	seq_printf(m, "%p", (void *)address);
4500}
4501
4502static int leaks_show(struct seq_file *m, void *p)
4503{
4504	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4505	struct slab *slabp;
4506	struct kmem_list3 *l3;
4507	const char *name;
4508	unsigned long *n = m->private;
4509	int node;
4510	int i;
4511
4512	if (!(cachep->flags & SLAB_STORE_USER))
4513		return 0;
4514	if (!(cachep->flags & SLAB_RED_ZONE))
4515		return 0;
4516
4517	/* OK, we can do it */
4518
4519	n[1] = 0;
4520
4521	for_each_online_node(node) {
4522		l3 = cachep->nodelists[node];
4523		if (!l3)
4524			continue;
4525
4526		check_irq_on();
4527		spin_lock_irq(&l3->list_lock);
4528
4529		list_for_each_entry(slabp, &l3->slabs_full, list)
4530			handle_slab(n, cachep, slabp);
4531		list_for_each_entry(slabp, &l3->slabs_partial, list)
4532			handle_slab(n, cachep, slabp);
4533		spin_unlock_irq(&l3->list_lock);
4534	}
4535	name = cachep->name;
4536	if (n[0] == n[1]) {
4537		/* Increase the buffer size */
4538		mutex_unlock(&cache_chain_mutex);
4539		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4540		if (!m->private) {
4541			/* Too bad, we are really out */
4542			m->private = n;
4543			mutex_lock(&cache_chain_mutex);
4544			return -ENOMEM;
4545		}
4546		*(unsigned long *)m->private = n[0] * 2;
4547		kfree(n);
4548		mutex_lock(&cache_chain_mutex);
4549		/* Now make sure this entry will be retried */
4550		m->count = m->size;
4551		return 0;
4552	}
4553	for (i = 0; i < n[1]; i++) {
4554		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4555		show_symbol(m, n[2*i+2]);
4556		seq_putc(m, '\n');
4557	}
4558
4559	return 0;
4560}
4561
4562static const struct seq_operations slabstats_op = {
4563	.start = leaks_start,
4564	.next = s_next,
4565	.stop = s_stop,
4566	.show = leaks_show,
4567};
4568
4569static int slabstats_open(struct inode *inode, struct file *file)
4570{
4571	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4572	int ret = -ENOMEM;
4573	if (n) {
4574		ret = seq_open(file, &slabstats_op);
4575		if (!ret) {
4576			struct seq_file *m = file->private_data;
4577			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4578			m->private = n;
4579			n = NULL;
4580		}
4581		kfree(n);
4582	}
4583	return ret;
4584}
4585
4586static const struct file_operations proc_slabstats_operations = {
4587	.open		= slabstats_open,
4588	.read		= seq_read,
4589	.llseek		= seq_lseek,
4590	.release	= seq_release_private,
4591};
4592#endif
4593
4594static int __init slab_proc_init(void)
4595{
4596	proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
4597#ifdef CONFIG_DEBUG_SLAB_LEAK
4598	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4599#endif
4600	return 0;
4601}
4602module_init(slab_proc_init);
4603#endif
4604
4605/**
4606 * ksize - get the actual amount of memory allocated for a given object
4607 * @objp: Pointer to the object
4608 *
4609 * kmalloc may internally round up allocations and return more memory
4610 * than requested. ksize() can be used to determine the actual amount of
4611 * memory allocated. The caller may use this additional memory, even though
4612 * a smaller amount of memory was initially specified with the kmalloc call.
4613 * The caller must guarantee that objp points to a valid object previously
4614 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4615 * must not be freed during the duration of the call.
4616 */
4617size_t ksize(const void *objp)
4618{
4619	BUG_ON(!objp);
4620	if (unlikely(objp == ZERO_SIZE_PTR))
4621		return 0;
4622
4623	return obj_size(virt_to_cache(objp));
4624}
4625EXPORT_SYMBOL(ksize);
4626