slub.c revision 49bd5221ce8fb55d12c04a3ffd375201c5bbfb7a
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
17#include <linux/seq_file.h>
18#include <linux/cpu.h>
19#include <linux/cpuset.h>
20#include <linux/mempolicy.h>
21#include <linux/ctype.h>
22#include <linux/kallsyms.h>
23#include <linux/memory.h>
24
25/*
26 * Lock order:
27 *   1. slab_lock(page)
28 *   2. slab->list_lock
29 *
30 *   The slab_lock protects operations on the object of a particular
31 *   slab and its metadata in the page struct. If the slab lock
32 *   has been taken then no allocations nor frees can be performed
33 *   on the objects in the slab nor can the slab be added or removed
34 *   from the partial or full lists since this would mean modifying
35 *   the page_struct of the slab.
36 *
37 *   The list_lock protects the partial and full list on each node and
38 *   the partial slab counter. If taken then no new slabs may be added or
39 *   removed from the lists nor make the number of partial slabs be modified.
40 *   (Note that the total number of slabs is an atomic value that may be
41 *   modified without taking the list lock).
42 *
43 *   The list_lock is a centralized lock and thus we avoid taking it as
44 *   much as possible. As long as SLUB does not have to handle partial
45 *   slabs, operations can continue without any centralized lock. F.e.
46 *   allocating a long series of objects that fill up slabs does not require
47 *   the list lock.
48 *
49 *   The lock order is sometimes inverted when we are trying to get a slab
50 *   off a list. We take the list_lock and then look for a page on the list
51 *   to use. While we do that objects in the slabs may be freed. We can
52 *   only operate on the slab if we have also taken the slab_lock. So we use
53 *   a slab_trylock() on the slab. If trylock was successful then no frees
54 *   can occur anymore and we can use the slab for allocations etc. If the
55 *   slab_trylock() does not succeed then frees are in progress in the slab and
56 *   we must stay away from it for a while since we may cause a bouncing
57 *   cacheline if we try to acquire the lock. So go onto the next slab.
58 *   If all pages are busy then we may allocate a new slab instead of reusing
59 *   a partial slab. A new slab has noone operating on it and thus there is
60 *   no danger of cacheline contention.
61 *
62 *   Interrupts are disabled during allocation and deallocation in order to
63 *   make the slab allocator safe to use in the context of an irq. In addition
64 *   interrupts are disabled to ensure that the processor does not change
65 *   while handling per_cpu slabs, due to kernel preemption.
66 *
67 * SLUB assigns one slab for allocation to each processor.
68 * Allocations only occur from these slabs called cpu slabs.
69 *
70 * Slabs with free elements are kept on a partial list and during regular
71 * operations no list for full slabs is used. If an object in a full slab is
72 * freed then the slab will show up again on the partial lists.
73 * We track full slabs for debugging purposes though because otherwise we
74 * cannot scan all objects.
75 *
76 * Slabs are freed when they become empty. Teardown and setup is
77 * minimal so we rely on the page allocators per cpu caches for
78 * fast frees and allocs.
79 *
80 * Overloading of page flags that are otherwise used for LRU management.
81 *
82 * PageActive 		The slab is frozen and exempt from list processing.
83 * 			This means that the slab is dedicated to a purpose
84 * 			such as satisfying allocations for a specific
85 * 			processor. Objects may be freed in the slab while
86 * 			it is frozen but slab_free will then skip the usual
87 * 			list operations. It is up to the processor holding
88 * 			the slab to integrate the slab into the slab lists
89 * 			when the slab is no longer needed.
90 *
91 * 			One use of this flag is to mark slabs that are
92 * 			used for allocations. Then such a slab becomes a cpu
93 * 			slab. The cpu slab may be equipped with an additional
94 * 			freelist that allows lockless access to
95 * 			free objects in addition to the regular freelist
96 * 			that requires the slab lock.
97 *
98 * PageError		Slab requires special handling due to debug
99 * 			options set. This moves	slab handling out of
100 * 			the fast path and disables lockless freelists.
101 */
102
103#define FROZEN (1 << PG_active)
104
105#ifdef CONFIG_SLUB_DEBUG
106#define SLABDEBUG (1 << PG_error)
107#else
108#define SLABDEBUG 0
109#endif
110
111static inline int SlabFrozen(struct page *page)
112{
113	return page->flags & FROZEN;
114}
115
116static inline void SetSlabFrozen(struct page *page)
117{
118	page->flags |= FROZEN;
119}
120
121static inline void ClearSlabFrozen(struct page *page)
122{
123	page->flags &= ~FROZEN;
124}
125
126static inline int SlabDebug(struct page *page)
127{
128	return page->flags & SLABDEBUG;
129}
130
131static inline void SetSlabDebug(struct page *page)
132{
133	page->flags |= SLABDEBUG;
134}
135
136static inline void ClearSlabDebug(struct page *page)
137{
138	page->flags &= ~SLABDEBUG;
139}
140
141/*
142 * Issues still to be resolved:
143 *
144 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
145 *
146 * - Variable sizing of the per node arrays
147 */
148
149/* Enable to test recovery from slab corruption on boot */
150#undef SLUB_RESILIENCY_TEST
151
152#if PAGE_SHIFT <= 12
153
154/*
155 * Small page size. Make sure that we do not fragment memory
156 */
157#define DEFAULT_MAX_ORDER 1
158#define DEFAULT_MIN_OBJECTS 4
159
160#else
161
162/*
163 * Large page machines are customarily able to handle larger
164 * page orders.
165 */
166#define DEFAULT_MAX_ORDER 2
167#define DEFAULT_MIN_OBJECTS 8
168
169#endif
170
171/*
172 * Mininum number of partial slabs. These will be left on the partial
173 * lists even if they are empty. kmem_cache_shrink may reclaim them.
174 */
175#define MIN_PARTIAL 5
176
177/*
178 * Maximum number of desirable partial slabs.
179 * The existence of more partial slabs makes kmem_cache_shrink
180 * sort the partial list by the number of objects in the.
181 */
182#define MAX_PARTIAL 10
183
184#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
185				SLAB_POISON | SLAB_STORE_USER)
186
187/*
188 * Set of flags that will prevent slab merging
189 */
190#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
191		SLAB_TRACE | SLAB_DESTROY_BY_RCU)
192
193#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
194		SLAB_CACHE_DMA)
195
196#ifndef ARCH_KMALLOC_MINALIGN
197#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
198#endif
199
200#ifndef ARCH_SLAB_MINALIGN
201#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
202#endif
203
204/* Internal SLUB flags */
205#define __OBJECT_POISON		0x80000000 /* Poison object */
206#define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
207#define __KMALLOC_CACHE		0x20000000 /* objects freed using kfree */
208#define __PAGE_ALLOC_FALLBACK	0x10000000 /* Allow fallback to page alloc */
209
210/* Not all arches define cache_line_size */
211#ifndef cache_line_size
212#define cache_line_size()	L1_CACHE_BYTES
213#endif
214
215static int kmem_size = sizeof(struct kmem_cache);
216
217#ifdef CONFIG_SMP
218static struct notifier_block slab_notifier;
219#endif
220
221static enum {
222	DOWN,		/* No slab functionality available */
223	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
224	UP,		/* Everything works but does not show up in sysfs */
225	SYSFS		/* Sysfs up */
226} slab_state = DOWN;
227
228/* A list of all slab caches on the system */
229static DECLARE_RWSEM(slub_lock);
230static LIST_HEAD(slab_caches);
231
232/*
233 * Tracking user of a slab.
234 */
235struct track {
236	void *addr;		/* Called from address */
237	int cpu;		/* Was running on cpu */
238	int pid;		/* Pid context */
239	unsigned long when;	/* When did the operation occur */
240};
241
242enum track_item { TRACK_ALLOC, TRACK_FREE };
243
244#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
245static int sysfs_slab_add(struct kmem_cache *);
246static int sysfs_slab_alias(struct kmem_cache *, const char *);
247static void sysfs_slab_remove(struct kmem_cache *);
248
249#else
250static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
251static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
252							{ return 0; }
253static inline void sysfs_slab_remove(struct kmem_cache *s)
254{
255	kfree(s);
256}
257
258#endif
259
260static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
261{
262#ifdef CONFIG_SLUB_STATS
263	c->stat[si]++;
264#endif
265}
266
267/********************************************************************
268 * 			Core slab cache functions
269 *******************************************************************/
270
271int slab_is_available(void)
272{
273	return slab_state >= UP;
274}
275
276static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
277{
278#ifdef CONFIG_NUMA
279	return s->node[node];
280#else
281	return &s->local_node;
282#endif
283}
284
285static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
286{
287#ifdef CONFIG_SMP
288	return s->cpu_slab[cpu];
289#else
290	return &s->cpu_slab;
291#endif
292}
293
294/* Verify that a pointer has an address that is valid within a slab page */
295static inline int check_valid_pointer(struct kmem_cache *s,
296				struct page *page, const void *object)
297{
298	void *base;
299
300	if (!object)
301		return 1;
302
303	base = page_address(page);
304	if (object < base || object >= base + s->objects * s->size ||
305		(object - base) % s->size) {
306		return 0;
307	}
308
309	return 1;
310}
311
312/*
313 * Slow version of get and set free pointer.
314 *
315 * This version requires touching the cache lines of kmem_cache which
316 * we avoid to do in the fast alloc free paths. There we obtain the offset
317 * from the page struct.
318 */
319static inline void *get_freepointer(struct kmem_cache *s, void *object)
320{
321	return *(void **)(object + s->offset);
322}
323
324static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
325{
326	*(void **)(object + s->offset) = fp;
327}
328
329/* Loop over all objects in a slab */
330#define for_each_object(__p, __s, __addr) \
331	for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
332			__p += (__s)->size)
333
334/* Scan freelist */
335#define for_each_free_object(__p, __s, __free) \
336	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
337
338/* Determine object index from a given position */
339static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
340{
341	return (p - addr) / s->size;
342}
343
344#ifdef CONFIG_SLUB_DEBUG
345/*
346 * Debug settings:
347 */
348#ifdef CONFIG_SLUB_DEBUG_ON
349static int slub_debug = DEBUG_DEFAULT_FLAGS;
350#else
351static int slub_debug;
352#endif
353
354static char *slub_debug_slabs;
355
356/*
357 * Object debugging
358 */
359static void print_section(char *text, u8 *addr, unsigned int length)
360{
361	int i, offset;
362	int newline = 1;
363	char ascii[17];
364
365	ascii[16] = 0;
366
367	for (i = 0; i < length; i++) {
368		if (newline) {
369			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
370			newline = 0;
371		}
372		printk(KERN_CONT " %02x", addr[i]);
373		offset = i % 16;
374		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
375		if (offset == 15) {
376			printk(KERN_CONT " %s\n", ascii);
377			newline = 1;
378		}
379	}
380	if (!newline) {
381		i %= 16;
382		while (i < 16) {
383			printk(KERN_CONT "   ");
384			ascii[i] = ' ';
385			i++;
386		}
387		printk(KERN_CONT " %s\n", ascii);
388	}
389}
390
391static struct track *get_track(struct kmem_cache *s, void *object,
392	enum track_item alloc)
393{
394	struct track *p;
395
396	if (s->offset)
397		p = object + s->offset + sizeof(void *);
398	else
399		p = object + s->inuse;
400
401	return p + alloc;
402}
403
404static void set_track(struct kmem_cache *s, void *object,
405				enum track_item alloc, void *addr)
406{
407	struct track *p;
408
409	if (s->offset)
410		p = object + s->offset + sizeof(void *);
411	else
412		p = object + s->inuse;
413
414	p += alloc;
415	if (addr) {
416		p->addr = addr;
417		p->cpu = smp_processor_id();
418		p->pid = current ? current->pid : -1;
419		p->when = jiffies;
420	} else
421		memset(p, 0, sizeof(struct track));
422}
423
424static void init_tracking(struct kmem_cache *s, void *object)
425{
426	if (!(s->flags & SLAB_STORE_USER))
427		return;
428
429	set_track(s, object, TRACK_FREE, NULL);
430	set_track(s, object, TRACK_ALLOC, NULL);
431}
432
433static void print_track(const char *s, struct track *t)
434{
435	if (!t->addr)
436		return;
437
438	printk(KERN_ERR "INFO: %s in ", s);
439	__print_symbol("%s", (unsigned long)t->addr);
440	printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
441}
442
443static void print_tracking(struct kmem_cache *s, void *object)
444{
445	if (!(s->flags & SLAB_STORE_USER))
446		return;
447
448	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
449	print_track("Freed", get_track(s, object, TRACK_FREE));
450}
451
452static void print_page_info(struct page *page)
453{
454	printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
455		page, page->inuse, page->freelist, page->flags);
456
457}
458
459static void slab_bug(struct kmem_cache *s, char *fmt, ...)
460{
461	va_list args;
462	char buf[100];
463
464	va_start(args, fmt);
465	vsnprintf(buf, sizeof(buf), fmt, args);
466	va_end(args);
467	printk(KERN_ERR "========================================"
468			"=====================================\n");
469	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
470	printk(KERN_ERR "----------------------------------------"
471			"-------------------------------------\n\n");
472}
473
474static void slab_fix(struct kmem_cache *s, char *fmt, ...)
475{
476	va_list args;
477	char buf[100];
478
479	va_start(args, fmt);
480	vsnprintf(buf, sizeof(buf), fmt, args);
481	va_end(args);
482	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
483}
484
485static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
486{
487	unsigned int off;	/* Offset of last byte */
488	u8 *addr = page_address(page);
489
490	print_tracking(s, p);
491
492	print_page_info(page);
493
494	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
495			p, p - addr, get_freepointer(s, p));
496
497	if (p > addr + 16)
498		print_section("Bytes b4", p - 16, 16);
499
500	print_section("Object", p, min(s->objsize, 128));
501
502	if (s->flags & SLAB_RED_ZONE)
503		print_section("Redzone", p + s->objsize,
504			s->inuse - s->objsize);
505
506	if (s->offset)
507		off = s->offset + sizeof(void *);
508	else
509		off = s->inuse;
510
511	if (s->flags & SLAB_STORE_USER)
512		off += 2 * sizeof(struct track);
513
514	if (off != s->size)
515		/* Beginning of the filler is the free pointer */
516		print_section("Padding", p + off, s->size - off);
517
518	dump_stack();
519}
520
521static void object_err(struct kmem_cache *s, struct page *page,
522			u8 *object, char *reason)
523{
524	slab_bug(s, reason);
525	print_trailer(s, page, object);
526}
527
528static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
529{
530	va_list args;
531	char buf[100];
532
533	va_start(args, fmt);
534	vsnprintf(buf, sizeof(buf), fmt, args);
535	va_end(args);
536	slab_bug(s, fmt);
537	print_page_info(page);
538	dump_stack();
539}
540
541static void init_object(struct kmem_cache *s, void *object, int active)
542{
543	u8 *p = object;
544
545	if (s->flags & __OBJECT_POISON) {
546		memset(p, POISON_FREE, s->objsize - 1);
547		p[s->objsize - 1] = POISON_END;
548	}
549
550	if (s->flags & SLAB_RED_ZONE)
551		memset(p + s->objsize,
552			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
553			s->inuse - s->objsize);
554}
555
556static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
557{
558	while (bytes) {
559		if (*start != (u8)value)
560			return start;
561		start++;
562		bytes--;
563	}
564	return NULL;
565}
566
567static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
568						void *from, void *to)
569{
570	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
571	memset(from, data, to - from);
572}
573
574static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
575			u8 *object, char *what,
576			u8 *start, unsigned int value, unsigned int bytes)
577{
578	u8 *fault;
579	u8 *end;
580
581	fault = check_bytes(start, value, bytes);
582	if (!fault)
583		return 1;
584
585	end = start + bytes;
586	while (end > fault && end[-1] == value)
587		end--;
588
589	slab_bug(s, "%s overwritten", what);
590	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
591					fault, end - 1, fault[0], value);
592	print_trailer(s, page, object);
593
594	restore_bytes(s, what, value, fault, end);
595	return 0;
596}
597
598/*
599 * Object layout:
600 *
601 * object address
602 * 	Bytes of the object to be managed.
603 * 	If the freepointer may overlay the object then the free
604 * 	pointer is the first word of the object.
605 *
606 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
607 * 	0xa5 (POISON_END)
608 *
609 * object + s->objsize
610 * 	Padding to reach word boundary. This is also used for Redzoning.
611 * 	Padding is extended by another word if Redzoning is enabled and
612 * 	objsize == inuse.
613 *
614 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
615 * 	0xcc (RED_ACTIVE) for objects in use.
616 *
617 * object + s->inuse
618 * 	Meta data starts here.
619 *
620 * 	A. Free pointer (if we cannot overwrite object on free)
621 * 	B. Tracking data for SLAB_STORE_USER
622 * 	C. Padding to reach required alignment boundary or at mininum
623 * 		one word if debugging is on to be able to detect writes
624 * 		before the word boundary.
625 *
626 *	Padding is done using 0x5a (POISON_INUSE)
627 *
628 * object + s->size
629 * 	Nothing is used beyond s->size.
630 *
631 * If slabcaches are merged then the objsize and inuse boundaries are mostly
632 * ignored. And therefore no slab options that rely on these boundaries
633 * may be used with merged slabcaches.
634 */
635
636static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
637{
638	unsigned long off = s->inuse;	/* The end of info */
639
640	if (s->offset)
641		/* Freepointer is placed after the object. */
642		off += sizeof(void *);
643
644	if (s->flags & SLAB_STORE_USER)
645		/* We also have user information there */
646		off += 2 * sizeof(struct track);
647
648	if (s->size == off)
649		return 1;
650
651	return check_bytes_and_report(s, page, p, "Object padding",
652				p + off, POISON_INUSE, s->size - off);
653}
654
655static int slab_pad_check(struct kmem_cache *s, struct page *page)
656{
657	u8 *start;
658	u8 *fault;
659	u8 *end;
660	int length;
661	int remainder;
662
663	if (!(s->flags & SLAB_POISON))
664		return 1;
665
666	start = page_address(page);
667	end = start + (PAGE_SIZE << s->order);
668	length = s->objects * s->size;
669	remainder = end - (start + length);
670	if (!remainder)
671		return 1;
672
673	fault = check_bytes(start + length, POISON_INUSE, remainder);
674	if (!fault)
675		return 1;
676	while (end > fault && end[-1] == POISON_INUSE)
677		end--;
678
679	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
680	print_section("Padding", start, length);
681
682	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
683	return 0;
684}
685
686static int check_object(struct kmem_cache *s, struct page *page,
687					void *object, int active)
688{
689	u8 *p = object;
690	u8 *endobject = object + s->objsize;
691
692	if (s->flags & SLAB_RED_ZONE) {
693		unsigned int red =
694			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
695
696		if (!check_bytes_and_report(s, page, object, "Redzone",
697			endobject, red, s->inuse - s->objsize))
698			return 0;
699	} else {
700		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
701			check_bytes_and_report(s, page, p, "Alignment padding",
702				endobject, POISON_INUSE, s->inuse - s->objsize);
703		}
704	}
705
706	if (s->flags & SLAB_POISON) {
707		if (!active && (s->flags & __OBJECT_POISON) &&
708			(!check_bytes_and_report(s, page, p, "Poison", p,
709					POISON_FREE, s->objsize - 1) ||
710			 !check_bytes_and_report(s, page, p, "Poison",
711				p + s->objsize - 1, POISON_END, 1)))
712			return 0;
713		/*
714		 * check_pad_bytes cleans up on its own.
715		 */
716		check_pad_bytes(s, page, p);
717	}
718
719	if (!s->offset && active)
720		/*
721		 * Object and freepointer overlap. Cannot check
722		 * freepointer while object is allocated.
723		 */
724		return 1;
725
726	/* Check free pointer validity */
727	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
728		object_err(s, page, p, "Freepointer corrupt");
729		/*
730		 * No choice but to zap it and thus loose the remainder
731		 * of the free objects in this slab. May cause
732		 * another error because the object count is now wrong.
733		 */
734		set_freepointer(s, p, NULL);
735		return 0;
736	}
737	return 1;
738}
739
740static int check_slab(struct kmem_cache *s, struct page *page)
741{
742	VM_BUG_ON(!irqs_disabled());
743
744	if (!PageSlab(page)) {
745		slab_err(s, page, "Not a valid slab page");
746		return 0;
747	}
748	if (page->inuse > s->objects) {
749		slab_err(s, page, "inuse %u > max %u",
750			s->name, page->inuse, s->objects);
751		return 0;
752	}
753	/* Slab_pad_check fixes things up after itself */
754	slab_pad_check(s, page);
755	return 1;
756}
757
758/*
759 * Determine if a certain object on a page is on the freelist. Must hold the
760 * slab lock to guarantee that the chains are in a consistent state.
761 */
762static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
763{
764	int nr = 0;
765	void *fp = page->freelist;
766	void *object = NULL;
767
768	while (fp && nr <= s->objects) {
769		if (fp == search)
770			return 1;
771		if (!check_valid_pointer(s, page, fp)) {
772			if (object) {
773				object_err(s, page, object,
774					"Freechain corrupt");
775				set_freepointer(s, object, NULL);
776				break;
777			} else {
778				slab_err(s, page, "Freepointer corrupt");
779				page->freelist = NULL;
780				page->inuse = s->objects;
781				slab_fix(s, "Freelist cleared");
782				return 0;
783			}
784			break;
785		}
786		object = fp;
787		fp = get_freepointer(s, object);
788		nr++;
789	}
790
791	if (page->inuse != s->objects - nr) {
792		slab_err(s, page, "Wrong object count. Counter is %d but "
793			"counted were %d", page->inuse, s->objects - nr);
794		page->inuse = s->objects - nr;
795		slab_fix(s, "Object count adjusted.");
796	}
797	return search == NULL;
798}
799
800static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
801{
802	if (s->flags & SLAB_TRACE) {
803		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
804			s->name,
805			alloc ? "alloc" : "free",
806			object, page->inuse,
807			page->freelist);
808
809		if (!alloc)
810			print_section("Object", (void *)object, s->objsize);
811
812		dump_stack();
813	}
814}
815
816/*
817 * Tracking of fully allocated slabs for debugging purposes.
818 */
819static void add_full(struct kmem_cache_node *n, struct page *page)
820{
821	spin_lock(&n->list_lock);
822	list_add(&page->lru, &n->full);
823	spin_unlock(&n->list_lock);
824}
825
826static void remove_full(struct kmem_cache *s, struct page *page)
827{
828	struct kmem_cache_node *n;
829
830	if (!(s->flags & SLAB_STORE_USER))
831		return;
832
833	n = get_node(s, page_to_nid(page));
834
835	spin_lock(&n->list_lock);
836	list_del(&page->lru);
837	spin_unlock(&n->list_lock);
838}
839
840static void setup_object_debug(struct kmem_cache *s, struct page *page,
841								void *object)
842{
843	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
844		return;
845
846	init_object(s, object, 0);
847	init_tracking(s, object);
848}
849
850static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
851						void *object, void *addr)
852{
853	if (!check_slab(s, page))
854		goto bad;
855
856	if (!on_freelist(s, page, object)) {
857		object_err(s, page, object, "Object already allocated");
858		goto bad;
859	}
860
861	if (!check_valid_pointer(s, page, object)) {
862		object_err(s, page, object, "Freelist Pointer check fails");
863		goto bad;
864	}
865
866	if (!check_object(s, page, object, 0))
867		goto bad;
868
869	/* Success perform special debug activities for allocs */
870	if (s->flags & SLAB_STORE_USER)
871		set_track(s, object, TRACK_ALLOC, addr);
872	trace(s, page, object, 1);
873	init_object(s, object, 1);
874	return 1;
875
876bad:
877	if (PageSlab(page)) {
878		/*
879		 * If this is a slab page then lets do the best we can
880		 * to avoid issues in the future. Marking all objects
881		 * as used avoids touching the remaining objects.
882		 */
883		slab_fix(s, "Marking all objects used");
884		page->inuse = s->objects;
885		page->freelist = NULL;
886	}
887	return 0;
888}
889
890static int free_debug_processing(struct kmem_cache *s, struct page *page,
891						void *object, void *addr)
892{
893	if (!check_slab(s, page))
894		goto fail;
895
896	if (!check_valid_pointer(s, page, object)) {
897		slab_err(s, page, "Invalid object pointer 0x%p", object);
898		goto fail;
899	}
900
901	if (on_freelist(s, page, object)) {
902		object_err(s, page, object, "Object already free");
903		goto fail;
904	}
905
906	if (!check_object(s, page, object, 1))
907		return 0;
908
909	if (unlikely(s != page->slab)) {
910		if (!PageSlab(page)) {
911			slab_err(s, page, "Attempt to free object(0x%p) "
912				"outside of slab", object);
913		} else if (!page->slab) {
914			printk(KERN_ERR
915				"SLUB <none>: no slab for object 0x%p.\n",
916						object);
917			dump_stack();
918		} else
919			object_err(s, page, object,
920					"page slab pointer corrupt.");
921		goto fail;
922	}
923
924	/* Special debug activities for freeing objects */
925	if (!SlabFrozen(page) && !page->freelist)
926		remove_full(s, page);
927	if (s->flags & SLAB_STORE_USER)
928		set_track(s, object, TRACK_FREE, addr);
929	trace(s, page, object, 0);
930	init_object(s, object, 0);
931	return 1;
932
933fail:
934	slab_fix(s, "Object at 0x%p not freed", object);
935	return 0;
936}
937
938static int __init setup_slub_debug(char *str)
939{
940	slub_debug = DEBUG_DEFAULT_FLAGS;
941	if (*str++ != '=' || !*str)
942		/*
943		 * No options specified. Switch on full debugging.
944		 */
945		goto out;
946
947	if (*str == ',')
948		/*
949		 * No options but restriction on slabs. This means full
950		 * debugging for slabs matching a pattern.
951		 */
952		goto check_slabs;
953
954	slub_debug = 0;
955	if (*str == '-')
956		/*
957		 * Switch off all debugging measures.
958		 */
959		goto out;
960
961	/*
962	 * Determine which debug features should be switched on
963	 */
964	for (; *str && *str != ','; str++) {
965		switch (tolower(*str)) {
966		case 'f':
967			slub_debug |= SLAB_DEBUG_FREE;
968			break;
969		case 'z':
970			slub_debug |= SLAB_RED_ZONE;
971			break;
972		case 'p':
973			slub_debug |= SLAB_POISON;
974			break;
975		case 'u':
976			slub_debug |= SLAB_STORE_USER;
977			break;
978		case 't':
979			slub_debug |= SLAB_TRACE;
980			break;
981		default:
982			printk(KERN_ERR "slub_debug option '%c' "
983				"unknown. skipped\n", *str);
984		}
985	}
986
987check_slabs:
988	if (*str == ',')
989		slub_debug_slabs = str + 1;
990out:
991	return 1;
992}
993
994__setup("slub_debug", setup_slub_debug);
995
996static unsigned long kmem_cache_flags(unsigned long objsize,
997	unsigned long flags, const char *name,
998	void (*ctor)(struct kmem_cache *, void *))
999{
1000	/*
1001	 * Enable debugging if selected on the kernel commandline.
1002	 */
1003	if (slub_debug && (!slub_debug_slabs ||
1004	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1005			flags |= slub_debug;
1006
1007	return flags;
1008}
1009#else
1010static inline void setup_object_debug(struct kmem_cache *s,
1011			struct page *page, void *object) {}
1012
1013static inline int alloc_debug_processing(struct kmem_cache *s,
1014	struct page *page, void *object, void *addr) { return 0; }
1015
1016static inline int free_debug_processing(struct kmem_cache *s,
1017	struct page *page, void *object, void *addr) { return 0; }
1018
1019static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1020			{ return 1; }
1021static inline int check_object(struct kmem_cache *s, struct page *page,
1022			void *object, int active) { return 1; }
1023static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1024static inline unsigned long kmem_cache_flags(unsigned long objsize,
1025	unsigned long flags, const char *name,
1026	void (*ctor)(struct kmem_cache *, void *))
1027{
1028	return flags;
1029}
1030#define slub_debug 0
1031#endif
1032/*
1033 * Slab allocation and freeing
1034 */
1035static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1036{
1037	struct page *page;
1038	int pages = 1 << s->order;
1039
1040	flags |= s->allocflags;
1041
1042	if (node == -1)
1043		page = alloc_pages(flags, s->order);
1044	else
1045		page = alloc_pages_node(node, flags, s->order);
1046
1047	if (!page)
1048		return NULL;
1049
1050	mod_zone_page_state(page_zone(page),
1051		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1052		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1053		pages);
1054
1055	return page;
1056}
1057
1058static void setup_object(struct kmem_cache *s, struct page *page,
1059				void *object)
1060{
1061	setup_object_debug(s, page, object);
1062	if (unlikely(s->ctor))
1063		s->ctor(s, object);
1064}
1065
1066static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1067{
1068	struct page *page;
1069	struct kmem_cache_node *n;
1070	void *start;
1071	void *last;
1072	void *p;
1073
1074	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1075
1076	page = allocate_slab(s,
1077		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1078	if (!page)
1079		goto out;
1080
1081	n = get_node(s, page_to_nid(page));
1082	if (n)
1083		atomic_long_inc(&n->nr_slabs);
1084	page->slab = s;
1085	page->flags |= 1 << PG_slab;
1086	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1087			SLAB_STORE_USER | SLAB_TRACE))
1088		SetSlabDebug(page);
1089
1090	start = page_address(page);
1091
1092	if (unlikely(s->flags & SLAB_POISON))
1093		memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1094
1095	last = start;
1096	for_each_object(p, s, start) {
1097		setup_object(s, page, last);
1098		set_freepointer(s, last, p);
1099		last = p;
1100	}
1101	setup_object(s, page, last);
1102	set_freepointer(s, last, NULL);
1103
1104	page->freelist = start;
1105	page->inuse = 0;
1106out:
1107	return page;
1108}
1109
1110static void __free_slab(struct kmem_cache *s, struct page *page)
1111{
1112	int pages = 1 << s->order;
1113
1114	if (unlikely(SlabDebug(page))) {
1115		void *p;
1116
1117		slab_pad_check(s, page);
1118		for_each_object(p, s, page_address(page))
1119			check_object(s, page, p, 0);
1120		ClearSlabDebug(page);
1121	}
1122
1123	mod_zone_page_state(page_zone(page),
1124		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1125		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1126		-pages);
1127
1128	__ClearPageSlab(page);
1129	reset_page_mapcount(page);
1130	__free_pages(page, s->order);
1131}
1132
1133static void rcu_free_slab(struct rcu_head *h)
1134{
1135	struct page *page;
1136
1137	page = container_of((struct list_head *)h, struct page, lru);
1138	__free_slab(page->slab, page);
1139}
1140
1141static void free_slab(struct kmem_cache *s, struct page *page)
1142{
1143	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1144		/*
1145		 * RCU free overloads the RCU head over the LRU
1146		 */
1147		struct rcu_head *head = (void *)&page->lru;
1148
1149		call_rcu(head, rcu_free_slab);
1150	} else
1151		__free_slab(s, page);
1152}
1153
1154static void discard_slab(struct kmem_cache *s, struct page *page)
1155{
1156	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1157
1158	atomic_long_dec(&n->nr_slabs);
1159	free_slab(s, page);
1160}
1161
1162/*
1163 * Per slab locking using the pagelock
1164 */
1165static __always_inline void slab_lock(struct page *page)
1166{
1167	bit_spin_lock(PG_locked, &page->flags);
1168}
1169
1170static __always_inline void slab_unlock(struct page *page)
1171{
1172	__bit_spin_unlock(PG_locked, &page->flags);
1173}
1174
1175static __always_inline int slab_trylock(struct page *page)
1176{
1177	int rc = 1;
1178
1179	rc = bit_spin_trylock(PG_locked, &page->flags);
1180	return rc;
1181}
1182
1183/*
1184 * Management of partially allocated slabs
1185 */
1186static void add_partial(struct kmem_cache_node *n,
1187				struct page *page, int tail)
1188{
1189	spin_lock(&n->list_lock);
1190	n->nr_partial++;
1191	if (tail)
1192		list_add_tail(&page->lru, &n->partial);
1193	else
1194		list_add(&page->lru, &n->partial);
1195	spin_unlock(&n->list_lock);
1196}
1197
1198static void remove_partial(struct kmem_cache *s,
1199						struct page *page)
1200{
1201	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1202
1203	spin_lock(&n->list_lock);
1204	list_del(&page->lru);
1205	n->nr_partial--;
1206	spin_unlock(&n->list_lock);
1207}
1208
1209/*
1210 * Lock slab and remove from the partial list.
1211 *
1212 * Must hold list_lock.
1213 */
1214static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
1215{
1216	if (slab_trylock(page)) {
1217		list_del(&page->lru);
1218		n->nr_partial--;
1219		SetSlabFrozen(page);
1220		return 1;
1221	}
1222	return 0;
1223}
1224
1225/*
1226 * Try to allocate a partial slab from a specific node.
1227 */
1228static struct page *get_partial_node(struct kmem_cache_node *n)
1229{
1230	struct page *page;
1231
1232	/*
1233	 * Racy check. If we mistakenly see no partial slabs then we
1234	 * just allocate an empty slab. If we mistakenly try to get a
1235	 * partial slab and there is none available then get_partials()
1236	 * will return NULL.
1237	 */
1238	if (!n || !n->nr_partial)
1239		return NULL;
1240
1241	spin_lock(&n->list_lock);
1242	list_for_each_entry(page, &n->partial, lru)
1243		if (lock_and_freeze_slab(n, page))
1244			goto out;
1245	page = NULL;
1246out:
1247	spin_unlock(&n->list_lock);
1248	return page;
1249}
1250
1251/*
1252 * Get a page from somewhere. Search in increasing NUMA distances.
1253 */
1254static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1255{
1256#ifdef CONFIG_NUMA
1257	struct zonelist *zonelist;
1258	struct zone **z;
1259	struct page *page;
1260
1261	/*
1262	 * The defrag ratio allows a configuration of the tradeoffs between
1263	 * inter node defragmentation and node local allocations. A lower
1264	 * defrag_ratio increases the tendency to do local allocations
1265	 * instead of attempting to obtain partial slabs from other nodes.
1266	 *
1267	 * If the defrag_ratio is set to 0 then kmalloc() always
1268	 * returns node local objects. If the ratio is higher then kmalloc()
1269	 * may return off node objects because partial slabs are obtained
1270	 * from other nodes and filled up.
1271	 *
1272	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1273	 * defrag_ratio = 1000) then every (well almost) allocation will
1274	 * first attempt to defrag slab caches on other nodes. This means
1275	 * scanning over all nodes to look for partial slabs which may be
1276	 * expensive if we do it every time we are trying to find a slab
1277	 * with available objects.
1278	 */
1279	if (!s->remote_node_defrag_ratio ||
1280			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1281		return NULL;
1282
1283	zonelist = &NODE_DATA(
1284		slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
1285	for (z = zonelist->zones; *z; z++) {
1286		struct kmem_cache_node *n;
1287
1288		n = get_node(s, zone_to_nid(*z));
1289
1290		if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
1291				n->nr_partial > MIN_PARTIAL) {
1292			page = get_partial_node(n);
1293			if (page)
1294				return page;
1295		}
1296	}
1297#endif
1298	return NULL;
1299}
1300
1301/*
1302 * Get a partial page, lock it and return it.
1303 */
1304static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1305{
1306	struct page *page;
1307	int searchnode = (node == -1) ? numa_node_id() : node;
1308
1309	page = get_partial_node(get_node(s, searchnode));
1310	if (page || (flags & __GFP_THISNODE))
1311		return page;
1312
1313	return get_any_partial(s, flags);
1314}
1315
1316/*
1317 * Move a page back to the lists.
1318 *
1319 * Must be called with the slab lock held.
1320 *
1321 * On exit the slab lock will have been dropped.
1322 */
1323static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1324{
1325	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1326	struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1327
1328	ClearSlabFrozen(page);
1329	if (page->inuse) {
1330
1331		if (page->freelist) {
1332			add_partial(n, page, tail);
1333			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1334		} else {
1335			stat(c, DEACTIVATE_FULL);
1336			if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1337				add_full(n, page);
1338		}
1339		slab_unlock(page);
1340	} else {
1341		stat(c, DEACTIVATE_EMPTY);
1342		if (n->nr_partial < MIN_PARTIAL) {
1343			/*
1344			 * Adding an empty slab to the partial slabs in order
1345			 * to avoid page allocator overhead. This slab needs
1346			 * to come after the other slabs with objects in
1347			 * so that the others get filled first. That way the
1348			 * size of the partial list stays small.
1349			 *
1350			 * kmem_cache_shrink can reclaim any empty slabs from the
1351			 * partial list.
1352			 */
1353			add_partial(n, page, 1);
1354			slab_unlock(page);
1355		} else {
1356			slab_unlock(page);
1357			stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
1358			discard_slab(s, page);
1359		}
1360	}
1361}
1362
1363/*
1364 * Remove the cpu slab
1365 */
1366static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1367{
1368	struct page *page = c->page;
1369	int tail = 1;
1370
1371	if (page->freelist)
1372		stat(c, DEACTIVATE_REMOTE_FREES);
1373	/*
1374	 * Merge cpu freelist into slab freelist. Typically we get here
1375	 * because both freelists are empty. So this is unlikely
1376	 * to occur.
1377	 */
1378	while (unlikely(c->freelist)) {
1379		void **object;
1380
1381		tail = 0;	/* Hot objects. Put the slab first */
1382
1383		/* Retrieve object from cpu_freelist */
1384		object = c->freelist;
1385		c->freelist = c->freelist[c->offset];
1386
1387		/* And put onto the regular freelist */
1388		object[c->offset] = page->freelist;
1389		page->freelist = object;
1390		page->inuse--;
1391	}
1392	c->page = NULL;
1393	unfreeze_slab(s, page, tail);
1394}
1395
1396static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1397{
1398	stat(c, CPUSLAB_FLUSH);
1399	slab_lock(c->page);
1400	deactivate_slab(s, c);
1401}
1402
1403/*
1404 * Flush cpu slab.
1405 *
1406 * Called from IPI handler with interrupts disabled.
1407 */
1408static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1409{
1410	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1411
1412	if (likely(c && c->page))
1413		flush_slab(s, c);
1414}
1415
1416static void flush_cpu_slab(void *d)
1417{
1418	struct kmem_cache *s = d;
1419
1420	__flush_cpu_slab(s, smp_processor_id());
1421}
1422
1423static void flush_all(struct kmem_cache *s)
1424{
1425#ifdef CONFIG_SMP
1426	on_each_cpu(flush_cpu_slab, s, 1, 1);
1427#else
1428	unsigned long flags;
1429
1430	local_irq_save(flags);
1431	flush_cpu_slab(s);
1432	local_irq_restore(flags);
1433#endif
1434}
1435
1436/*
1437 * Check if the objects in a per cpu structure fit numa
1438 * locality expectations.
1439 */
1440static inline int node_match(struct kmem_cache_cpu *c, int node)
1441{
1442#ifdef CONFIG_NUMA
1443	if (node != -1 && c->node != node)
1444		return 0;
1445#endif
1446	return 1;
1447}
1448
1449/*
1450 * Slow path. The lockless freelist is empty or we need to perform
1451 * debugging duties.
1452 *
1453 * Interrupts are disabled.
1454 *
1455 * Processing is still very fast if new objects have been freed to the
1456 * regular freelist. In that case we simply take over the regular freelist
1457 * as the lockless freelist and zap the regular freelist.
1458 *
1459 * If that is not working then we fall back to the partial lists. We take the
1460 * first element of the freelist as the object to allocate now and move the
1461 * rest of the freelist to the lockless freelist.
1462 *
1463 * And if we were unable to get a new slab from the partial slab lists then
1464 * we need to allocate a new slab. This is the slowest path since it involves
1465 * a call to the page allocator and the setup of a new slab.
1466 */
1467static void *__slab_alloc(struct kmem_cache *s,
1468		gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
1469{
1470	void **object;
1471	struct page *new;
1472
1473	/* We handle __GFP_ZERO in the caller */
1474	gfpflags &= ~__GFP_ZERO;
1475
1476	if (!c->page)
1477		goto new_slab;
1478
1479	slab_lock(c->page);
1480	if (unlikely(!node_match(c, node)))
1481		goto another_slab;
1482
1483	stat(c, ALLOC_REFILL);
1484
1485load_freelist:
1486	object = c->page->freelist;
1487	if (unlikely(!object))
1488		goto another_slab;
1489	if (unlikely(SlabDebug(c->page)))
1490		goto debug;
1491
1492	c->freelist = object[c->offset];
1493	c->page->inuse = s->objects;
1494	c->page->freelist = NULL;
1495	c->node = page_to_nid(c->page);
1496unlock_out:
1497	slab_unlock(c->page);
1498	stat(c, ALLOC_SLOWPATH);
1499	return object;
1500
1501another_slab:
1502	deactivate_slab(s, c);
1503
1504new_slab:
1505	new = get_partial(s, gfpflags, node);
1506	if (new) {
1507		c->page = new;
1508		stat(c, ALLOC_FROM_PARTIAL);
1509		goto load_freelist;
1510	}
1511
1512	if (gfpflags & __GFP_WAIT)
1513		local_irq_enable();
1514
1515	new = new_slab(s, gfpflags, node);
1516
1517	if (gfpflags & __GFP_WAIT)
1518		local_irq_disable();
1519
1520	if (new) {
1521		c = get_cpu_slab(s, smp_processor_id());
1522		stat(c, ALLOC_SLAB);
1523		if (c->page)
1524			flush_slab(s, c);
1525		slab_lock(new);
1526		SetSlabFrozen(new);
1527		c->page = new;
1528		goto load_freelist;
1529	}
1530
1531	/*
1532	 * No memory available.
1533	 *
1534	 * If the slab uses higher order allocs but the object is
1535	 * smaller than a page size then we can fallback in emergencies
1536	 * to the page allocator via kmalloc_large. The page allocator may
1537	 * have failed to obtain a higher order page and we can try to
1538	 * allocate a single page if the object fits into a single page.
1539	 * That is only possible if certain conditions are met that are being
1540	 * checked when a slab is created.
1541	 */
1542	if (!(gfpflags & __GFP_NORETRY) &&
1543				(s->flags & __PAGE_ALLOC_FALLBACK)) {
1544		if (gfpflags & __GFP_WAIT)
1545			local_irq_enable();
1546		object = kmalloc_large(s->objsize, gfpflags);
1547		if (gfpflags & __GFP_WAIT)
1548			local_irq_disable();
1549		return object;
1550	}
1551	return NULL;
1552debug:
1553	if (!alloc_debug_processing(s, c->page, object, addr))
1554		goto another_slab;
1555
1556	c->page->inuse++;
1557	c->page->freelist = object[c->offset];
1558	c->node = -1;
1559	goto unlock_out;
1560}
1561
1562/*
1563 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1564 * have the fastpath folded into their functions. So no function call
1565 * overhead for requests that can be satisfied on the fastpath.
1566 *
1567 * The fastpath works by first checking if the lockless freelist can be used.
1568 * If not then __slab_alloc is called for slow processing.
1569 *
1570 * Otherwise we can simply pick the next object from the lockless free list.
1571 */
1572static __always_inline void *slab_alloc(struct kmem_cache *s,
1573		gfp_t gfpflags, int node, void *addr)
1574{
1575	void **object;
1576	struct kmem_cache_cpu *c;
1577	unsigned long flags;
1578
1579	local_irq_save(flags);
1580	c = get_cpu_slab(s, smp_processor_id());
1581	if (unlikely(!c->freelist || !node_match(c, node)))
1582
1583		object = __slab_alloc(s, gfpflags, node, addr, c);
1584
1585	else {
1586		object = c->freelist;
1587		c->freelist = object[c->offset];
1588		stat(c, ALLOC_FASTPATH);
1589	}
1590	local_irq_restore(flags);
1591
1592	if (unlikely((gfpflags & __GFP_ZERO) && object))
1593		memset(object, 0, c->objsize);
1594
1595	return object;
1596}
1597
1598void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1599{
1600	return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
1601}
1602EXPORT_SYMBOL(kmem_cache_alloc);
1603
1604#ifdef CONFIG_NUMA
1605void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1606{
1607	return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
1608}
1609EXPORT_SYMBOL(kmem_cache_alloc_node);
1610#endif
1611
1612/*
1613 * Slow patch handling. This may still be called frequently since objects
1614 * have a longer lifetime than the cpu slabs in most processing loads.
1615 *
1616 * So we still attempt to reduce cache line usage. Just take the slab
1617 * lock and free the item. If there is no additional partial page
1618 * handling required then we can return immediately.
1619 */
1620static void __slab_free(struct kmem_cache *s, struct page *page,
1621				void *x, void *addr, unsigned int offset)
1622{
1623	void *prior;
1624	void **object = (void *)x;
1625	struct kmem_cache_cpu *c;
1626
1627	c = get_cpu_slab(s, raw_smp_processor_id());
1628	stat(c, FREE_SLOWPATH);
1629	slab_lock(page);
1630
1631	if (unlikely(SlabDebug(page)))
1632		goto debug;
1633
1634checks_ok:
1635	prior = object[offset] = page->freelist;
1636	page->freelist = object;
1637	page->inuse--;
1638
1639	if (unlikely(SlabFrozen(page))) {
1640		stat(c, FREE_FROZEN);
1641		goto out_unlock;
1642	}
1643
1644	if (unlikely(!page->inuse))
1645		goto slab_empty;
1646
1647	/*
1648	 * Objects left in the slab. If it was not on the partial list before
1649	 * then add it.
1650	 */
1651	if (unlikely(!prior)) {
1652		add_partial(get_node(s, page_to_nid(page)), page, 1);
1653		stat(c, FREE_ADD_PARTIAL);
1654	}
1655
1656out_unlock:
1657	slab_unlock(page);
1658	return;
1659
1660slab_empty:
1661	if (prior) {
1662		/*
1663		 * Slab still on the partial list.
1664		 */
1665		remove_partial(s, page);
1666		stat(c, FREE_REMOVE_PARTIAL);
1667	}
1668	slab_unlock(page);
1669	stat(c, FREE_SLAB);
1670	discard_slab(s, page);
1671	return;
1672
1673debug:
1674	if (!free_debug_processing(s, page, x, addr))
1675		goto out_unlock;
1676	goto checks_ok;
1677}
1678
1679/*
1680 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1681 * can perform fastpath freeing without additional function calls.
1682 *
1683 * The fastpath is only possible if we are freeing to the current cpu slab
1684 * of this processor. This typically the case if we have just allocated
1685 * the item before.
1686 *
1687 * If fastpath is not possible then fall back to __slab_free where we deal
1688 * with all sorts of special processing.
1689 */
1690static __always_inline void slab_free(struct kmem_cache *s,
1691			struct page *page, void *x, void *addr)
1692{
1693	void **object = (void *)x;
1694	struct kmem_cache_cpu *c;
1695	unsigned long flags;
1696
1697	local_irq_save(flags);
1698	c = get_cpu_slab(s, smp_processor_id());
1699	debug_check_no_locks_freed(object, c->objsize);
1700	if (likely(page == c->page && c->node >= 0)) {
1701		object[c->offset] = c->freelist;
1702		c->freelist = object;
1703		stat(c, FREE_FASTPATH);
1704	} else
1705		__slab_free(s, page, x, addr, c->offset);
1706
1707	local_irq_restore(flags);
1708}
1709
1710void kmem_cache_free(struct kmem_cache *s, void *x)
1711{
1712	struct page *page;
1713
1714	page = virt_to_head_page(x);
1715
1716	slab_free(s, page, x, __builtin_return_address(0));
1717}
1718EXPORT_SYMBOL(kmem_cache_free);
1719
1720/* Figure out on which slab object the object resides */
1721static struct page *get_object_page(const void *x)
1722{
1723	struct page *page = virt_to_head_page(x);
1724
1725	if (!PageSlab(page))
1726		return NULL;
1727
1728	return page;
1729}
1730
1731/*
1732 * Object placement in a slab is made very easy because we always start at
1733 * offset 0. If we tune the size of the object to the alignment then we can
1734 * get the required alignment by putting one properly sized object after
1735 * another.
1736 *
1737 * Notice that the allocation order determines the sizes of the per cpu
1738 * caches. Each processor has always one slab available for allocations.
1739 * Increasing the allocation order reduces the number of times that slabs
1740 * must be moved on and off the partial lists and is therefore a factor in
1741 * locking overhead.
1742 */
1743
1744/*
1745 * Mininum / Maximum order of slab pages. This influences locking overhead
1746 * and slab fragmentation. A higher order reduces the number of partial slabs
1747 * and increases the number of allocations possible without having to
1748 * take the list_lock.
1749 */
1750static int slub_min_order;
1751static int slub_max_order = DEFAULT_MAX_ORDER;
1752static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1753
1754/*
1755 * Merge control. If this is set then no merging of slab caches will occur.
1756 * (Could be removed. This was introduced to pacify the merge skeptics.)
1757 */
1758static int slub_nomerge;
1759
1760/*
1761 * Calculate the order of allocation given an slab object size.
1762 *
1763 * The order of allocation has significant impact on performance and other
1764 * system components. Generally order 0 allocations should be preferred since
1765 * order 0 does not cause fragmentation in the page allocator. Larger objects
1766 * be problematic to put into order 0 slabs because there may be too much
1767 * unused space left. We go to a higher order if more than 1/8th of the slab
1768 * would be wasted.
1769 *
1770 * In order to reach satisfactory performance we must ensure that a minimum
1771 * number of objects is in one slab. Otherwise we may generate too much
1772 * activity on the partial lists which requires taking the list_lock. This is
1773 * less a concern for large slabs though which are rarely used.
1774 *
1775 * slub_max_order specifies the order where we begin to stop considering the
1776 * number of objects in a slab as critical. If we reach slub_max_order then
1777 * we try to keep the page order as low as possible. So we accept more waste
1778 * of space in favor of a small page order.
1779 *
1780 * Higher order allocations also allow the placement of more objects in a
1781 * slab and thereby reduce object handling overhead. If the user has
1782 * requested a higher mininum order then we start with that one instead of
1783 * the smallest order which will fit the object.
1784 */
1785static inline int slab_order(int size, int min_objects,
1786				int max_order, int fract_leftover)
1787{
1788	int order;
1789	int rem;
1790	int min_order = slub_min_order;
1791
1792	for (order = max(min_order,
1793				fls(min_objects * size - 1) - PAGE_SHIFT);
1794			order <= max_order; order++) {
1795
1796		unsigned long slab_size = PAGE_SIZE << order;
1797
1798		if (slab_size < min_objects * size)
1799			continue;
1800
1801		rem = slab_size % size;
1802
1803		if (rem <= slab_size / fract_leftover)
1804			break;
1805
1806	}
1807
1808	return order;
1809}
1810
1811static inline int calculate_order(int size)
1812{
1813	int order;
1814	int min_objects;
1815	int fraction;
1816
1817	/*
1818	 * Attempt to find best configuration for a slab. This
1819	 * works by first attempting to generate a layout with
1820	 * the best configuration and backing off gradually.
1821	 *
1822	 * First we reduce the acceptable waste in a slab. Then
1823	 * we reduce the minimum objects required in a slab.
1824	 */
1825	min_objects = slub_min_objects;
1826	while (min_objects > 1) {
1827		fraction = 8;
1828		while (fraction >= 4) {
1829			order = slab_order(size, min_objects,
1830						slub_max_order, fraction);
1831			if (order <= slub_max_order)
1832				return order;
1833			fraction /= 2;
1834		}
1835		min_objects /= 2;
1836	}
1837
1838	/*
1839	 * We were unable to place multiple objects in a slab. Now
1840	 * lets see if we can place a single object there.
1841	 */
1842	order = slab_order(size, 1, slub_max_order, 1);
1843	if (order <= slub_max_order)
1844		return order;
1845
1846	/*
1847	 * Doh this slab cannot be placed using slub_max_order.
1848	 */
1849	order = slab_order(size, 1, MAX_ORDER, 1);
1850	if (order <= MAX_ORDER)
1851		return order;
1852	return -ENOSYS;
1853}
1854
1855/*
1856 * Figure out what the alignment of the objects will be.
1857 */
1858static unsigned long calculate_alignment(unsigned long flags,
1859		unsigned long align, unsigned long size)
1860{
1861	/*
1862	 * If the user wants hardware cache aligned objects then follow that
1863	 * suggestion if the object is sufficiently large.
1864	 *
1865	 * The hardware cache alignment cannot override the specified
1866	 * alignment though. If that is greater then use it.
1867	 */
1868	if (flags & SLAB_HWCACHE_ALIGN) {
1869		unsigned long ralign = cache_line_size();
1870		while (size <= ralign / 2)
1871			ralign /= 2;
1872		align = max(align, ralign);
1873	}
1874
1875	if (align < ARCH_SLAB_MINALIGN)
1876		align = ARCH_SLAB_MINALIGN;
1877
1878	return ALIGN(align, sizeof(void *));
1879}
1880
1881static void init_kmem_cache_cpu(struct kmem_cache *s,
1882			struct kmem_cache_cpu *c)
1883{
1884	c->page = NULL;
1885	c->freelist = NULL;
1886	c->node = 0;
1887	c->offset = s->offset / sizeof(void *);
1888	c->objsize = s->objsize;
1889#ifdef CONFIG_SLUB_STATS
1890	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1891#endif
1892}
1893
1894static void init_kmem_cache_node(struct kmem_cache_node *n)
1895{
1896	n->nr_partial = 0;
1897	atomic_long_set(&n->nr_slabs, 0);
1898	spin_lock_init(&n->list_lock);
1899	INIT_LIST_HEAD(&n->partial);
1900#ifdef CONFIG_SLUB_DEBUG
1901	INIT_LIST_HEAD(&n->full);
1902#endif
1903}
1904
1905#ifdef CONFIG_SMP
1906/*
1907 * Per cpu array for per cpu structures.
1908 *
1909 * The per cpu array places all kmem_cache_cpu structures from one processor
1910 * close together meaning that it becomes possible that multiple per cpu
1911 * structures are contained in one cacheline. This may be particularly
1912 * beneficial for the kmalloc caches.
1913 *
1914 * A desktop system typically has around 60-80 slabs. With 100 here we are
1915 * likely able to get per cpu structures for all caches from the array defined
1916 * here. We must be able to cover all kmalloc caches during bootstrap.
1917 *
1918 * If the per cpu array is exhausted then fall back to kmalloc
1919 * of individual cachelines. No sharing is possible then.
1920 */
1921#define NR_KMEM_CACHE_CPU 100
1922
1923static DEFINE_PER_CPU(struct kmem_cache_cpu,
1924				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1925
1926static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1927static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
1928
1929static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1930							int cpu, gfp_t flags)
1931{
1932	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
1933
1934	if (c)
1935		per_cpu(kmem_cache_cpu_free, cpu) =
1936				(void *)c->freelist;
1937	else {
1938		/* Table overflow: So allocate ourselves */
1939		c = kmalloc_node(
1940			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
1941			flags, cpu_to_node(cpu));
1942		if (!c)
1943			return NULL;
1944	}
1945
1946	init_kmem_cache_cpu(s, c);
1947	return c;
1948}
1949
1950static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
1951{
1952	if (c < per_cpu(kmem_cache_cpu, cpu) ||
1953			c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
1954		kfree(c);
1955		return;
1956	}
1957	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
1958	per_cpu(kmem_cache_cpu_free, cpu) = c;
1959}
1960
1961static void free_kmem_cache_cpus(struct kmem_cache *s)
1962{
1963	int cpu;
1964
1965	for_each_online_cpu(cpu) {
1966		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1967
1968		if (c) {
1969			s->cpu_slab[cpu] = NULL;
1970			free_kmem_cache_cpu(c, cpu);
1971		}
1972	}
1973}
1974
1975static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
1976{
1977	int cpu;
1978
1979	for_each_online_cpu(cpu) {
1980		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1981
1982		if (c)
1983			continue;
1984
1985		c = alloc_kmem_cache_cpu(s, cpu, flags);
1986		if (!c) {
1987			free_kmem_cache_cpus(s);
1988			return 0;
1989		}
1990		s->cpu_slab[cpu] = c;
1991	}
1992	return 1;
1993}
1994
1995/*
1996 * Initialize the per cpu array.
1997 */
1998static void init_alloc_cpu_cpu(int cpu)
1999{
2000	int i;
2001
2002	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
2003		return;
2004
2005	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2006		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2007
2008	cpu_set(cpu, kmem_cach_cpu_free_init_once);
2009}
2010
2011static void __init init_alloc_cpu(void)
2012{
2013	int cpu;
2014
2015	for_each_online_cpu(cpu)
2016		init_alloc_cpu_cpu(cpu);
2017  }
2018
2019#else
2020static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2021static inline void init_alloc_cpu(void) {}
2022
2023static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2024{
2025	init_kmem_cache_cpu(s, &s->cpu_slab);
2026	return 1;
2027}
2028#endif
2029
2030#ifdef CONFIG_NUMA
2031/*
2032 * No kmalloc_node yet so do it by hand. We know that this is the first
2033 * slab on the node for this slabcache. There are no concurrent accesses
2034 * possible.
2035 *
2036 * Note that this function only works on the kmalloc_node_cache
2037 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2038 * memory on a fresh node that has no slab structures yet.
2039 */
2040static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2041							   int node)
2042{
2043	struct page *page;
2044	struct kmem_cache_node *n;
2045	unsigned long flags;
2046
2047	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2048
2049	page = new_slab(kmalloc_caches, gfpflags, node);
2050
2051	BUG_ON(!page);
2052	if (page_to_nid(page) != node) {
2053		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2054				"node %d\n", node);
2055		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2056				"in order to be able to continue\n");
2057	}
2058
2059	n = page->freelist;
2060	BUG_ON(!n);
2061	page->freelist = get_freepointer(kmalloc_caches, n);
2062	page->inuse++;
2063	kmalloc_caches->node[node] = n;
2064#ifdef CONFIG_SLUB_DEBUG
2065	init_object(kmalloc_caches, n, 1);
2066	init_tracking(kmalloc_caches, n);
2067#endif
2068	init_kmem_cache_node(n);
2069	atomic_long_inc(&n->nr_slabs);
2070
2071	/*
2072	 * lockdep requires consistent irq usage for each lock
2073	 * so even though there cannot be a race this early in
2074	 * the boot sequence, we still disable irqs.
2075	 */
2076	local_irq_save(flags);
2077	add_partial(n, page, 0);
2078	local_irq_restore(flags);
2079	return n;
2080}
2081
2082static void free_kmem_cache_nodes(struct kmem_cache *s)
2083{
2084	int node;
2085
2086	for_each_node_state(node, N_NORMAL_MEMORY) {
2087		struct kmem_cache_node *n = s->node[node];
2088		if (n && n != &s->local_node)
2089			kmem_cache_free(kmalloc_caches, n);
2090		s->node[node] = NULL;
2091	}
2092}
2093
2094static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2095{
2096	int node;
2097	int local_node;
2098
2099	if (slab_state >= UP)
2100		local_node = page_to_nid(virt_to_page(s));
2101	else
2102		local_node = 0;
2103
2104	for_each_node_state(node, N_NORMAL_MEMORY) {
2105		struct kmem_cache_node *n;
2106
2107		if (local_node == node)
2108			n = &s->local_node;
2109		else {
2110			if (slab_state == DOWN) {
2111				n = early_kmem_cache_node_alloc(gfpflags,
2112								node);
2113				continue;
2114			}
2115			n = kmem_cache_alloc_node(kmalloc_caches,
2116							gfpflags, node);
2117
2118			if (!n) {
2119				free_kmem_cache_nodes(s);
2120				return 0;
2121			}
2122
2123		}
2124		s->node[node] = n;
2125		init_kmem_cache_node(n);
2126	}
2127	return 1;
2128}
2129#else
2130static void free_kmem_cache_nodes(struct kmem_cache *s)
2131{
2132}
2133
2134static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2135{
2136	init_kmem_cache_node(&s->local_node);
2137	return 1;
2138}
2139#endif
2140
2141/*
2142 * calculate_sizes() determines the order and the distribution of data within
2143 * a slab object.
2144 */
2145static int calculate_sizes(struct kmem_cache *s)
2146{
2147	unsigned long flags = s->flags;
2148	unsigned long size = s->objsize;
2149	unsigned long align = s->align;
2150
2151	/*
2152	 * Round up object size to the next word boundary. We can only
2153	 * place the free pointer at word boundaries and this determines
2154	 * the possible location of the free pointer.
2155	 */
2156	size = ALIGN(size, sizeof(void *));
2157
2158#ifdef CONFIG_SLUB_DEBUG
2159	/*
2160	 * Determine if we can poison the object itself. If the user of
2161	 * the slab may touch the object after free or before allocation
2162	 * then we should never poison the object itself.
2163	 */
2164	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2165			!s->ctor)
2166		s->flags |= __OBJECT_POISON;
2167	else
2168		s->flags &= ~__OBJECT_POISON;
2169
2170
2171	/*
2172	 * If we are Redzoning then check if there is some space between the
2173	 * end of the object and the free pointer. If not then add an
2174	 * additional word to have some bytes to store Redzone information.
2175	 */
2176	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2177		size += sizeof(void *);
2178#endif
2179
2180	/*
2181	 * With that we have determined the number of bytes in actual use
2182	 * by the object. This is the potential offset to the free pointer.
2183	 */
2184	s->inuse = size;
2185
2186	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2187		s->ctor)) {
2188		/*
2189		 * Relocate free pointer after the object if it is not
2190		 * permitted to overwrite the first word of the object on
2191		 * kmem_cache_free.
2192		 *
2193		 * This is the case if we do RCU, have a constructor or
2194		 * destructor or are poisoning the objects.
2195		 */
2196		s->offset = size;
2197		size += sizeof(void *);
2198	}
2199
2200#ifdef CONFIG_SLUB_DEBUG
2201	if (flags & SLAB_STORE_USER)
2202		/*
2203		 * Need to store information about allocs and frees after
2204		 * the object.
2205		 */
2206		size += 2 * sizeof(struct track);
2207
2208	if (flags & SLAB_RED_ZONE)
2209		/*
2210		 * Add some empty padding so that we can catch
2211		 * overwrites from earlier objects rather than let
2212		 * tracking information or the free pointer be
2213		 * corrupted if an user writes before the start
2214		 * of the object.
2215		 */
2216		size += sizeof(void *);
2217#endif
2218
2219	/*
2220	 * Determine the alignment based on various parameters that the
2221	 * user specified and the dynamic determination of cache line size
2222	 * on bootup.
2223	 */
2224	align = calculate_alignment(flags, align, s->objsize);
2225
2226	/*
2227	 * SLUB stores one object immediately after another beginning from
2228	 * offset 0. In order to align the objects we have to simply size
2229	 * each object to conform to the alignment.
2230	 */
2231	size = ALIGN(size, align);
2232	s->size = size;
2233
2234	if ((flags & __KMALLOC_CACHE) &&
2235			PAGE_SIZE / size < slub_min_objects) {
2236		/*
2237		 * Kmalloc cache that would not have enough objects in
2238		 * an order 0 page. Kmalloc slabs can fallback to
2239		 * page allocator order 0 allocs so take a reasonably large
2240		 * order that will allows us a good number of objects.
2241		 */
2242		s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
2243		s->flags |= __PAGE_ALLOC_FALLBACK;
2244		s->allocflags |= __GFP_NOWARN;
2245	} else
2246		s->order = calculate_order(size);
2247
2248	if (s->order < 0)
2249		return 0;
2250
2251	s->allocflags = 0;
2252	if (s->order)
2253		s->allocflags |= __GFP_COMP;
2254
2255	if (s->flags & SLAB_CACHE_DMA)
2256		s->allocflags |= SLUB_DMA;
2257
2258	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2259		s->allocflags |= __GFP_RECLAIMABLE;
2260
2261	/*
2262	 * Determine the number of objects per slab
2263	 */
2264	s->objects = (PAGE_SIZE << s->order) / size;
2265
2266	return !!s->objects;
2267
2268}
2269
2270static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2271		const char *name, size_t size,
2272		size_t align, unsigned long flags,
2273		void (*ctor)(struct kmem_cache *, void *))
2274{
2275	memset(s, 0, kmem_size);
2276	s->name = name;
2277	s->ctor = ctor;
2278	s->objsize = size;
2279	s->align = align;
2280	s->flags = kmem_cache_flags(size, flags, name, ctor);
2281
2282	if (!calculate_sizes(s))
2283		goto error;
2284
2285	s->refcount = 1;
2286#ifdef CONFIG_NUMA
2287	s->remote_node_defrag_ratio = 100;
2288#endif
2289	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2290		goto error;
2291
2292	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2293		return 1;
2294	free_kmem_cache_nodes(s);
2295error:
2296	if (flags & SLAB_PANIC)
2297		panic("Cannot create slab %s size=%lu realsize=%u "
2298			"order=%u offset=%u flags=%lx\n",
2299			s->name, (unsigned long)size, s->size, s->order,
2300			s->offset, flags);
2301	return 0;
2302}
2303
2304/*
2305 * Check if a given pointer is valid
2306 */
2307int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2308{
2309	struct page *page;
2310
2311	page = get_object_page(object);
2312
2313	if (!page || s != page->slab)
2314		/* No slab or wrong slab */
2315		return 0;
2316
2317	if (!check_valid_pointer(s, page, object))
2318		return 0;
2319
2320	/*
2321	 * We could also check if the object is on the slabs freelist.
2322	 * But this would be too expensive and it seems that the main
2323	 * purpose of kmem_ptr_valid() is to check if the object belongs
2324	 * to a certain slab.
2325	 */
2326	return 1;
2327}
2328EXPORT_SYMBOL(kmem_ptr_validate);
2329
2330/*
2331 * Determine the size of a slab object
2332 */
2333unsigned int kmem_cache_size(struct kmem_cache *s)
2334{
2335	return s->objsize;
2336}
2337EXPORT_SYMBOL(kmem_cache_size);
2338
2339const char *kmem_cache_name(struct kmem_cache *s)
2340{
2341	return s->name;
2342}
2343EXPORT_SYMBOL(kmem_cache_name);
2344
2345/*
2346 * Attempt to free all slabs on a node. Return the number of slabs we
2347 * were unable to free.
2348 */
2349static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2350			struct list_head *list)
2351{
2352	int slabs_inuse = 0;
2353	unsigned long flags;
2354	struct page *page, *h;
2355
2356	spin_lock_irqsave(&n->list_lock, flags);
2357	list_for_each_entry_safe(page, h, list, lru)
2358		if (!page->inuse) {
2359			list_del(&page->lru);
2360			discard_slab(s, page);
2361		} else
2362			slabs_inuse++;
2363	spin_unlock_irqrestore(&n->list_lock, flags);
2364	return slabs_inuse;
2365}
2366
2367/*
2368 * Release all resources used by a slab cache.
2369 */
2370static inline int kmem_cache_close(struct kmem_cache *s)
2371{
2372	int node;
2373
2374	flush_all(s);
2375
2376	/* Attempt to free all objects */
2377	free_kmem_cache_cpus(s);
2378	for_each_node_state(node, N_NORMAL_MEMORY) {
2379		struct kmem_cache_node *n = get_node(s, node);
2380
2381		n->nr_partial -= free_list(s, n, &n->partial);
2382		if (atomic_long_read(&n->nr_slabs))
2383			return 1;
2384	}
2385	free_kmem_cache_nodes(s);
2386	return 0;
2387}
2388
2389/*
2390 * Close a cache and release the kmem_cache structure
2391 * (must be used for caches created using kmem_cache_create)
2392 */
2393void kmem_cache_destroy(struct kmem_cache *s)
2394{
2395	down_write(&slub_lock);
2396	s->refcount--;
2397	if (!s->refcount) {
2398		list_del(&s->list);
2399		up_write(&slub_lock);
2400		if (kmem_cache_close(s))
2401			WARN_ON(1);
2402		sysfs_slab_remove(s);
2403	} else
2404		up_write(&slub_lock);
2405}
2406EXPORT_SYMBOL(kmem_cache_destroy);
2407
2408/********************************************************************
2409 *		Kmalloc subsystem
2410 *******************************************************************/
2411
2412struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2413EXPORT_SYMBOL(kmalloc_caches);
2414
2415static int __init setup_slub_min_order(char *str)
2416{
2417	get_option(&str, &slub_min_order);
2418
2419	return 1;
2420}
2421
2422__setup("slub_min_order=", setup_slub_min_order);
2423
2424static int __init setup_slub_max_order(char *str)
2425{
2426	get_option(&str, &slub_max_order);
2427
2428	return 1;
2429}
2430
2431__setup("slub_max_order=", setup_slub_max_order);
2432
2433static int __init setup_slub_min_objects(char *str)
2434{
2435	get_option(&str, &slub_min_objects);
2436
2437	return 1;
2438}
2439
2440__setup("slub_min_objects=", setup_slub_min_objects);
2441
2442static int __init setup_slub_nomerge(char *str)
2443{
2444	slub_nomerge = 1;
2445	return 1;
2446}
2447
2448__setup("slub_nomerge", setup_slub_nomerge);
2449
2450static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2451		const char *name, int size, gfp_t gfp_flags)
2452{
2453	unsigned int flags = 0;
2454
2455	if (gfp_flags & SLUB_DMA)
2456		flags = SLAB_CACHE_DMA;
2457
2458	down_write(&slub_lock);
2459	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2460			flags | __KMALLOC_CACHE, NULL))
2461		goto panic;
2462
2463	list_add(&s->list, &slab_caches);
2464	up_write(&slub_lock);
2465	if (sysfs_slab_add(s))
2466		goto panic;
2467	return s;
2468
2469panic:
2470	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2471}
2472
2473#ifdef CONFIG_ZONE_DMA
2474static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2475
2476static void sysfs_add_func(struct work_struct *w)
2477{
2478	struct kmem_cache *s;
2479
2480	down_write(&slub_lock);
2481	list_for_each_entry(s, &slab_caches, list) {
2482		if (s->flags & __SYSFS_ADD_DEFERRED) {
2483			s->flags &= ~__SYSFS_ADD_DEFERRED;
2484			sysfs_slab_add(s);
2485		}
2486	}
2487	up_write(&slub_lock);
2488}
2489
2490static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2491
2492static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2493{
2494	struct kmem_cache *s;
2495	char *text;
2496	size_t realsize;
2497
2498	s = kmalloc_caches_dma[index];
2499	if (s)
2500		return s;
2501
2502	/* Dynamically create dma cache */
2503	if (flags & __GFP_WAIT)
2504		down_write(&slub_lock);
2505	else {
2506		if (!down_write_trylock(&slub_lock))
2507			goto out;
2508	}
2509
2510	if (kmalloc_caches_dma[index])
2511		goto unlock_out;
2512
2513	realsize = kmalloc_caches[index].objsize;
2514	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2515			 (unsigned int)realsize);
2516	s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2517
2518	if (!s || !text || !kmem_cache_open(s, flags, text,
2519			realsize, ARCH_KMALLOC_MINALIGN,
2520			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2521		kfree(s);
2522		kfree(text);
2523		goto unlock_out;
2524	}
2525
2526	list_add(&s->list, &slab_caches);
2527	kmalloc_caches_dma[index] = s;
2528
2529	schedule_work(&sysfs_add_work);
2530
2531unlock_out:
2532	up_write(&slub_lock);
2533out:
2534	return kmalloc_caches_dma[index];
2535}
2536#endif
2537
2538/*
2539 * Conversion table for small slabs sizes / 8 to the index in the
2540 * kmalloc array. This is necessary for slabs < 192 since we have non power
2541 * of two cache sizes there. The size of larger slabs can be determined using
2542 * fls.
2543 */
2544static s8 size_index[24] = {
2545	3,	/* 8 */
2546	4,	/* 16 */
2547	5,	/* 24 */
2548	5,	/* 32 */
2549	6,	/* 40 */
2550	6,	/* 48 */
2551	6,	/* 56 */
2552	6,	/* 64 */
2553	1,	/* 72 */
2554	1,	/* 80 */
2555	1,	/* 88 */
2556	1,	/* 96 */
2557	7,	/* 104 */
2558	7,	/* 112 */
2559	7,	/* 120 */
2560	7,	/* 128 */
2561	2,	/* 136 */
2562	2,	/* 144 */
2563	2,	/* 152 */
2564	2,	/* 160 */
2565	2,	/* 168 */
2566	2,	/* 176 */
2567	2,	/* 184 */
2568	2	/* 192 */
2569};
2570
2571static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2572{
2573	int index;
2574
2575	if (size <= 192) {
2576		if (!size)
2577			return ZERO_SIZE_PTR;
2578
2579		index = size_index[(size - 1) / 8];
2580	} else
2581		index = fls(size - 1);
2582
2583#ifdef CONFIG_ZONE_DMA
2584	if (unlikely((flags & SLUB_DMA)))
2585		return dma_kmalloc_cache(index, flags);
2586
2587#endif
2588	return &kmalloc_caches[index];
2589}
2590
2591void *__kmalloc(size_t size, gfp_t flags)
2592{
2593	struct kmem_cache *s;
2594
2595	if (unlikely(size > PAGE_SIZE))
2596		return kmalloc_large(size, flags);
2597
2598	s = get_slab(size, flags);
2599
2600	if (unlikely(ZERO_OR_NULL_PTR(s)))
2601		return s;
2602
2603	return slab_alloc(s, flags, -1, __builtin_return_address(0));
2604}
2605EXPORT_SYMBOL(__kmalloc);
2606
2607static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2608{
2609	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2610						get_order(size));
2611
2612	if (page)
2613		return page_address(page);
2614	else
2615		return NULL;
2616}
2617
2618#ifdef CONFIG_NUMA
2619void *__kmalloc_node(size_t size, gfp_t flags, int node)
2620{
2621	struct kmem_cache *s;
2622
2623	if (unlikely(size > PAGE_SIZE))
2624		return kmalloc_large_node(size, flags, node);
2625
2626	s = get_slab(size, flags);
2627
2628	if (unlikely(ZERO_OR_NULL_PTR(s)))
2629		return s;
2630
2631	return slab_alloc(s, flags, node, __builtin_return_address(0));
2632}
2633EXPORT_SYMBOL(__kmalloc_node);
2634#endif
2635
2636size_t ksize(const void *object)
2637{
2638	struct page *page;
2639	struct kmem_cache *s;
2640
2641	if (unlikely(object == ZERO_SIZE_PTR))
2642		return 0;
2643
2644	page = virt_to_head_page(object);
2645
2646	if (unlikely(!PageSlab(page)))
2647		return PAGE_SIZE << compound_order(page);
2648
2649	s = page->slab;
2650
2651#ifdef CONFIG_SLUB_DEBUG
2652	/*
2653	 * Debugging requires use of the padding between object
2654	 * and whatever may come after it.
2655	 */
2656	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2657		return s->objsize;
2658
2659#endif
2660	/*
2661	 * If we have the need to store the freelist pointer
2662	 * back there or track user information then we can
2663	 * only use the space before that information.
2664	 */
2665	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2666		return s->inuse;
2667	/*
2668	 * Else we can use all the padding etc for the allocation
2669	 */
2670	return s->size;
2671}
2672EXPORT_SYMBOL(ksize);
2673
2674void kfree(const void *x)
2675{
2676	struct page *page;
2677	void *object = (void *)x;
2678
2679	if (unlikely(ZERO_OR_NULL_PTR(x)))
2680		return;
2681
2682	page = virt_to_head_page(x);
2683	if (unlikely(!PageSlab(page))) {
2684		put_page(page);
2685		return;
2686	}
2687	slab_free(page->slab, page, object, __builtin_return_address(0));
2688}
2689EXPORT_SYMBOL(kfree);
2690
2691/*
2692 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2693 * the remaining slabs by the number of items in use. The slabs with the
2694 * most items in use come first. New allocations will then fill those up
2695 * and thus they can be removed from the partial lists.
2696 *
2697 * The slabs with the least items are placed last. This results in them
2698 * being allocated from last increasing the chance that the last objects
2699 * are freed in them.
2700 */
2701int kmem_cache_shrink(struct kmem_cache *s)
2702{
2703	int node;
2704	int i;
2705	struct kmem_cache_node *n;
2706	struct page *page;
2707	struct page *t;
2708	struct list_head *slabs_by_inuse =
2709		kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
2710	unsigned long flags;
2711
2712	if (!slabs_by_inuse)
2713		return -ENOMEM;
2714
2715	flush_all(s);
2716	for_each_node_state(node, N_NORMAL_MEMORY) {
2717		n = get_node(s, node);
2718
2719		if (!n->nr_partial)
2720			continue;
2721
2722		for (i = 0; i < s->objects; i++)
2723			INIT_LIST_HEAD(slabs_by_inuse + i);
2724
2725		spin_lock_irqsave(&n->list_lock, flags);
2726
2727		/*
2728		 * Build lists indexed by the items in use in each slab.
2729		 *
2730		 * Note that concurrent frees may occur while we hold the
2731		 * list_lock. page->inuse here is the upper limit.
2732		 */
2733		list_for_each_entry_safe(page, t, &n->partial, lru) {
2734			if (!page->inuse && slab_trylock(page)) {
2735				/*
2736				 * Must hold slab lock here because slab_free
2737				 * may have freed the last object and be
2738				 * waiting to release the slab.
2739				 */
2740				list_del(&page->lru);
2741				n->nr_partial--;
2742				slab_unlock(page);
2743				discard_slab(s, page);
2744			} else {
2745				list_move(&page->lru,
2746				slabs_by_inuse + page->inuse);
2747			}
2748		}
2749
2750		/*
2751		 * Rebuild the partial list with the slabs filled up most
2752		 * first and the least used slabs at the end.
2753		 */
2754		for (i = s->objects - 1; i >= 0; i--)
2755			list_splice(slabs_by_inuse + i, n->partial.prev);
2756
2757		spin_unlock_irqrestore(&n->list_lock, flags);
2758	}
2759
2760	kfree(slabs_by_inuse);
2761	return 0;
2762}
2763EXPORT_SYMBOL(kmem_cache_shrink);
2764
2765#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2766static int slab_mem_going_offline_callback(void *arg)
2767{
2768	struct kmem_cache *s;
2769
2770	down_read(&slub_lock);
2771	list_for_each_entry(s, &slab_caches, list)
2772		kmem_cache_shrink(s);
2773	up_read(&slub_lock);
2774
2775	return 0;
2776}
2777
2778static void slab_mem_offline_callback(void *arg)
2779{
2780	struct kmem_cache_node *n;
2781	struct kmem_cache *s;
2782	struct memory_notify *marg = arg;
2783	int offline_node;
2784
2785	offline_node = marg->status_change_nid;
2786
2787	/*
2788	 * If the node still has available memory. we need kmem_cache_node
2789	 * for it yet.
2790	 */
2791	if (offline_node < 0)
2792		return;
2793
2794	down_read(&slub_lock);
2795	list_for_each_entry(s, &slab_caches, list) {
2796		n = get_node(s, offline_node);
2797		if (n) {
2798			/*
2799			 * if n->nr_slabs > 0, slabs still exist on the node
2800			 * that is going down. We were unable to free them,
2801			 * and offline_pages() function shoudn't call this
2802			 * callback. So, we must fail.
2803			 */
2804			BUG_ON(atomic_long_read(&n->nr_slabs));
2805
2806			s->node[offline_node] = NULL;
2807			kmem_cache_free(kmalloc_caches, n);
2808		}
2809	}
2810	up_read(&slub_lock);
2811}
2812
2813static int slab_mem_going_online_callback(void *arg)
2814{
2815	struct kmem_cache_node *n;
2816	struct kmem_cache *s;
2817	struct memory_notify *marg = arg;
2818	int nid = marg->status_change_nid;
2819	int ret = 0;
2820
2821	/*
2822	 * If the node's memory is already available, then kmem_cache_node is
2823	 * already created. Nothing to do.
2824	 */
2825	if (nid < 0)
2826		return 0;
2827
2828	/*
2829	 * We are bringing a node online. No memory is availabe yet. We must
2830	 * allocate a kmem_cache_node structure in order to bring the node
2831	 * online.
2832	 */
2833	down_read(&slub_lock);
2834	list_for_each_entry(s, &slab_caches, list) {
2835		/*
2836		 * XXX: kmem_cache_alloc_node will fallback to other nodes
2837		 *      since memory is not yet available from the node that
2838		 *      is brought up.
2839		 */
2840		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2841		if (!n) {
2842			ret = -ENOMEM;
2843			goto out;
2844		}
2845		init_kmem_cache_node(n);
2846		s->node[nid] = n;
2847	}
2848out:
2849	up_read(&slub_lock);
2850	return ret;
2851}
2852
2853static int slab_memory_callback(struct notifier_block *self,
2854				unsigned long action, void *arg)
2855{
2856	int ret = 0;
2857
2858	switch (action) {
2859	case MEM_GOING_ONLINE:
2860		ret = slab_mem_going_online_callback(arg);
2861		break;
2862	case MEM_GOING_OFFLINE:
2863		ret = slab_mem_going_offline_callback(arg);
2864		break;
2865	case MEM_OFFLINE:
2866	case MEM_CANCEL_ONLINE:
2867		slab_mem_offline_callback(arg);
2868		break;
2869	case MEM_ONLINE:
2870	case MEM_CANCEL_OFFLINE:
2871		break;
2872	}
2873
2874	ret = notifier_from_errno(ret);
2875	return ret;
2876}
2877
2878#endif /* CONFIG_MEMORY_HOTPLUG */
2879
2880/********************************************************************
2881 *			Basic setup of slabs
2882 *******************************************************************/
2883
2884void __init kmem_cache_init(void)
2885{
2886	int i;
2887	int caches = 0;
2888
2889	init_alloc_cpu();
2890
2891#ifdef CONFIG_NUMA
2892	/*
2893	 * Must first have the slab cache available for the allocations of the
2894	 * struct kmem_cache_node's. There is special bootstrap code in
2895	 * kmem_cache_open for slab_state == DOWN.
2896	 */
2897	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2898		sizeof(struct kmem_cache_node), GFP_KERNEL);
2899	kmalloc_caches[0].refcount = -1;
2900	caches++;
2901
2902	hotplug_memory_notifier(slab_memory_callback, 1);
2903#endif
2904
2905	/* Able to allocate the per node structures */
2906	slab_state = PARTIAL;
2907
2908	/* Caches that are not of the two-to-the-power-of size */
2909	if (KMALLOC_MIN_SIZE <= 64) {
2910		create_kmalloc_cache(&kmalloc_caches[1],
2911				"kmalloc-96", 96, GFP_KERNEL);
2912		caches++;
2913	}
2914	if (KMALLOC_MIN_SIZE <= 128) {
2915		create_kmalloc_cache(&kmalloc_caches[2],
2916				"kmalloc-192", 192, GFP_KERNEL);
2917		caches++;
2918	}
2919
2920	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
2921		create_kmalloc_cache(&kmalloc_caches[i],
2922			"kmalloc", 1 << i, GFP_KERNEL);
2923		caches++;
2924	}
2925
2926
2927	/*
2928	 * Patch up the size_index table if we have strange large alignment
2929	 * requirements for the kmalloc array. This is only the case for
2930	 * MIPS it seems. The standard arches will not generate any code here.
2931	 *
2932	 * Largest permitted alignment is 256 bytes due to the way we
2933	 * handle the index determination for the smaller caches.
2934	 *
2935	 * Make sure that nothing crazy happens if someone starts tinkering
2936	 * around with ARCH_KMALLOC_MINALIGN
2937	 */
2938	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2939		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2940
2941	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
2942		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
2943
2944	slab_state = UP;
2945
2946	/* Provide the correct kmalloc names now that the caches are up */
2947	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
2948		kmalloc_caches[i]. name =
2949			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2950
2951#ifdef CONFIG_SMP
2952	register_cpu_notifier(&slab_notifier);
2953	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
2954				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
2955#else
2956	kmem_size = sizeof(struct kmem_cache);
2957#endif
2958
2959	printk(KERN_INFO
2960		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2961		" CPUs=%d, Nodes=%d\n",
2962		caches, cache_line_size(),
2963		slub_min_order, slub_max_order, slub_min_objects,
2964		nr_cpu_ids, nr_node_ids);
2965}
2966
2967/*
2968 * Find a mergeable slab cache
2969 */
2970static int slab_unmergeable(struct kmem_cache *s)
2971{
2972	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2973		return 1;
2974
2975	if ((s->flags & __PAGE_ALLOC_FALLBACK))
2976		return 1;
2977
2978	if (s->ctor)
2979		return 1;
2980
2981	/*
2982	 * We may have set a slab to be unmergeable during bootstrap.
2983	 */
2984	if (s->refcount < 0)
2985		return 1;
2986
2987	return 0;
2988}
2989
2990static struct kmem_cache *find_mergeable(size_t size,
2991		size_t align, unsigned long flags, const char *name,
2992		void (*ctor)(struct kmem_cache *, void *))
2993{
2994	struct kmem_cache *s;
2995
2996	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2997		return NULL;
2998
2999	if (ctor)
3000		return NULL;
3001
3002	size = ALIGN(size, sizeof(void *));
3003	align = calculate_alignment(flags, align, size);
3004	size = ALIGN(size, align);
3005	flags = kmem_cache_flags(size, flags, name, NULL);
3006
3007	list_for_each_entry(s, &slab_caches, list) {
3008		if (slab_unmergeable(s))
3009			continue;
3010
3011		if (size > s->size)
3012			continue;
3013
3014		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3015				continue;
3016		/*
3017		 * Check if alignment is compatible.
3018		 * Courtesy of Adrian Drzewiecki
3019		 */
3020		if ((s->size & ~(align - 1)) != s->size)
3021			continue;
3022
3023		if (s->size - size >= sizeof(void *))
3024			continue;
3025
3026		return s;
3027	}
3028	return NULL;
3029}
3030
3031struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3032		size_t align, unsigned long flags,
3033		void (*ctor)(struct kmem_cache *, void *))
3034{
3035	struct kmem_cache *s;
3036
3037	down_write(&slub_lock);
3038	s = find_mergeable(size, align, flags, name, ctor);
3039	if (s) {
3040		int cpu;
3041
3042		s->refcount++;
3043		/*
3044		 * Adjust the object sizes so that we clear
3045		 * the complete object on kzalloc.
3046		 */
3047		s->objsize = max(s->objsize, (int)size);
3048
3049		/*
3050		 * And then we need to update the object size in the
3051		 * per cpu structures
3052		 */
3053		for_each_online_cpu(cpu)
3054			get_cpu_slab(s, cpu)->objsize = s->objsize;
3055
3056		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3057		up_write(&slub_lock);
3058
3059		if (sysfs_slab_alias(s, name))
3060			goto err;
3061		return s;
3062	}
3063
3064	s = kmalloc(kmem_size, GFP_KERNEL);
3065	if (s) {
3066		if (kmem_cache_open(s, GFP_KERNEL, name,
3067				size, align, flags, ctor)) {
3068			list_add(&s->list, &slab_caches);
3069			up_write(&slub_lock);
3070			if (sysfs_slab_add(s))
3071				goto err;
3072			return s;
3073		}
3074		kfree(s);
3075	}
3076	up_write(&slub_lock);
3077
3078err:
3079	if (flags & SLAB_PANIC)
3080		panic("Cannot create slabcache %s\n", name);
3081	else
3082		s = NULL;
3083	return s;
3084}
3085EXPORT_SYMBOL(kmem_cache_create);
3086
3087#ifdef CONFIG_SMP
3088/*
3089 * Use the cpu notifier to insure that the cpu slabs are flushed when
3090 * necessary.
3091 */
3092static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3093		unsigned long action, void *hcpu)
3094{
3095	long cpu = (long)hcpu;
3096	struct kmem_cache *s;
3097	unsigned long flags;
3098
3099	switch (action) {
3100	case CPU_UP_PREPARE:
3101	case CPU_UP_PREPARE_FROZEN:
3102		init_alloc_cpu_cpu(cpu);
3103		down_read(&slub_lock);
3104		list_for_each_entry(s, &slab_caches, list)
3105			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3106							GFP_KERNEL);
3107		up_read(&slub_lock);
3108		break;
3109
3110	case CPU_UP_CANCELED:
3111	case CPU_UP_CANCELED_FROZEN:
3112	case CPU_DEAD:
3113	case CPU_DEAD_FROZEN:
3114		down_read(&slub_lock);
3115		list_for_each_entry(s, &slab_caches, list) {
3116			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3117
3118			local_irq_save(flags);
3119			__flush_cpu_slab(s, cpu);
3120			local_irq_restore(flags);
3121			free_kmem_cache_cpu(c, cpu);
3122			s->cpu_slab[cpu] = NULL;
3123		}
3124		up_read(&slub_lock);
3125		break;
3126	default:
3127		break;
3128	}
3129	return NOTIFY_OK;
3130}
3131
3132static struct notifier_block __cpuinitdata slab_notifier = {
3133	.notifier_call = slab_cpuup_callback
3134};
3135
3136#endif
3137
3138void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3139{
3140	struct kmem_cache *s;
3141
3142	if (unlikely(size > PAGE_SIZE))
3143		return kmalloc_large(size, gfpflags);
3144
3145	s = get_slab(size, gfpflags);
3146
3147	if (unlikely(ZERO_OR_NULL_PTR(s)))
3148		return s;
3149
3150	return slab_alloc(s, gfpflags, -1, caller);
3151}
3152
3153void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3154					int node, void *caller)
3155{
3156	struct kmem_cache *s;
3157
3158	if (unlikely(size > PAGE_SIZE))
3159		return kmalloc_large_node(size, gfpflags, node);
3160
3161	s = get_slab(size, gfpflags);
3162
3163	if (unlikely(ZERO_OR_NULL_PTR(s)))
3164		return s;
3165
3166	return slab_alloc(s, gfpflags, node, caller);
3167}
3168
3169#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
3170static unsigned long count_partial(struct kmem_cache_node *n)
3171{
3172	unsigned long flags;
3173	unsigned long x = 0;
3174	struct page *page;
3175
3176	spin_lock_irqsave(&n->list_lock, flags);
3177	list_for_each_entry(page, &n->partial, lru)
3178		x += page->inuse;
3179	spin_unlock_irqrestore(&n->list_lock, flags);
3180	return x;
3181}
3182#endif
3183
3184#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3185static int validate_slab(struct kmem_cache *s, struct page *page,
3186						unsigned long *map)
3187{
3188	void *p;
3189	void *addr = page_address(page);
3190
3191	if (!check_slab(s, page) ||
3192			!on_freelist(s, page, NULL))
3193		return 0;
3194
3195	/* Now we know that a valid freelist exists */
3196	bitmap_zero(map, s->objects);
3197
3198	for_each_free_object(p, s, page->freelist) {
3199		set_bit(slab_index(p, s, addr), map);
3200		if (!check_object(s, page, p, 0))
3201			return 0;
3202	}
3203
3204	for_each_object(p, s, addr)
3205		if (!test_bit(slab_index(p, s, addr), map))
3206			if (!check_object(s, page, p, 1))
3207				return 0;
3208	return 1;
3209}
3210
3211static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3212						unsigned long *map)
3213{
3214	if (slab_trylock(page)) {
3215		validate_slab(s, page, map);
3216		slab_unlock(page);
3217	} else
3218		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3219			s->name, page);
3220
3221	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3222		if (!SlabDebug(page))
3223			printk(KERN_ERR "SLUB %s: SlabDebug not set "
3224				"on slab 0x%p\n", s->name, page);
3225	} else {
3226		if (SlabDebug(page))
3227			printk(KERN_ERR "SLUB %s: SlabDebug set on "
3228				"slab 0x%p\n", s->name, page);
3229	}
3230}
3231
3232static int validate_slab_node(struct kmem_cache *s,
3233		struct kmem_cache_node *n, unsigned long *map)
3234{
3235	unsigned long count = 0;
3236	struct page *page;
3237	unsigned long flags;
3238
3239	spin_lock_irqsave(&n->list_lock, flags);
3240
3241	list_for_each_entry(page, &n->partial, lru) {
3242		validate_slab_slab(s, page, map);
3243		count++;
3244	}
3245	if (count != n->nr_partial)
3246		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3247			"counter=%ld\n", s->name, count, n->nr_partial);
3248
3249	if (!(s->flags & SLAB_STORE_USER))
3250		goto out;
3251
3252	list_for_each_entry(page, &n->full, lru) {
3253		validate_slab_slab(s, page, map);
3254		count++;
3255	}
3256	if (count != atomic_long_read(&n->nr_slabs))
3257		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3258			"counter=%ld\n", s->name, count,
3259			atomic_long_read(&n->nr_slabs));
3260
3261out:
3262	spin_unlock_irqrestore(&n->list_lock, flags);
3263	return count;
3264}
3265
3266static long validate_slab_cache(struct kmem_cache *s)
3267{
3268	int node;
3269	unsigned long count = 0;
3270	unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
3271				sizeof(unsigned long), GFP_KERNEL);
3272
3273	if (!map)
3274		return -ENOMEM;
3275
3276	flush_all(s);
3277	for_each_node_state(node, N_NORMAL_MEMORY) {
3278		struct kmem_cache_node *n = get_node(s, node);
3279
3280		count += validate_slab_node(s, n, map);
3281	}
3282	kfree(map);
3283	return count;
3284}
3285
3286#ifdef SLUB_RESILIENCY_TEST
3287static void resiliency_test(void)
3288{
3289	u8 *p;
3290
3291	printk(KERN_ERR "SLUB resiliency testing\n");
3292	printk(KERN_ERR "-----------------------\n");
3293	printk(KERN_ERR "A. Corruption after allocation\n");
3294
3295	p = kzalloc(16, GFP_KERNEL);
3296	p[16] = 0x12;
3297	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3298			" 0x12->0x%p\n\n", p + 16);
3299
3300	validate_slab_cache(kmalloc_caches + 4);
3301
3302	/* Hmmm... The next two are dangerous */
3303	p = kzalloc(32, GFP_KERNEL);
3304	p[32 + sizeof(void *)] = 0x34;
3305	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3306			" 0x34 -> -0x%p\n", p);
3307	printk(KERN_ERR
3308		"If allocated object is overwritten then not detectable\n\n");
3309
3310	validate_slab_cache(kmalloc_caches + 5);
3311	p = kzalloc(64, GFP_KERNEL);
3312	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3313	*p = 0x56;
3314	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3315									p);
3316	printk(KERN_ERR
3317		"If allocated object is overwritten then not detectable\n\n");
3318	validate_slab_cache(kmalloc_caches + 6);
3319
3320	printk(KERN_ERR "\nB. Corruption after free\n");
3321	p = kzalloc(128, GFP_KERNEL);
3322	kfree(p);
3323	*p = 0x78;
3324	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3325	validate_slab_cache(kmalloc_caches + 7);
3326
3327	p = kzalloc(256, GFP_KERNEL);
3328	kfree(p);
3329	p[50] = 0x9a;
3330	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3331			p);
3332	validate_slab_cache(kmalloc_caches + 8);
3333
3334	p = kzalloc(512, GFP_KERNEL);
3335	kfree(p);
3336	p[512] = 0xab;
3337	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3338	validate_slab_cache(kmalloc_caches + 9);
3339}
3340#else
3341static void resiliency_test(void) {};
3342#endif
3343
3344/*
3345 * Generate lists of code addresses where slabcache objects are allocated
3346 * and freed.
3347 */
3348
3349struct location {
3350	unsigned long count;
3351	void *addr;
3352	long long sum_time;
3353	long min_time;
3354	long max_time;
3355	long min_pid;
3356	long max_pid;
3357	cpumask_t cpus;
3358	nodemask_t nodes;
3359};
3360
3361struct loc_track {
3362	unsigned long max;
3363	unsigned long count;
3364	struct location *loc;
3365};
3366
3367static void free_loc_track(struct loc_track *t)
3368{
3369	if (t->max)
3370		free_pages((unsigned long)t->loc,
3371			get_order(sizeof(struct location) * t->max));
3372}
3373
3374static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3375{
3376	struct location *l;
3377	int order;
3378
3379	order = get_order(sizeof(struct location) * max);
3380
3381	l = (void *)__get_free_pages(flags, order);
3382	if (!l)
3383		return 0;
3384
3385	if (t->count) {
3386		memcpy(l, t->loc, sizeof(struct location) * t->count);
3387		free_loc_track(t);
3388	}
3389	t->max = max;
3390	t->loc = l;
3391	return 1;
3392}
3393
3394static int add_location(struct loc_track *t, struct kmem_cache *s,
3395				const struct track *track)
3396{
3397	long start, end, pos;
3398	struct location *l;
3399	void *caddr;
3400	unsigned long age = jiffies - track->when;
3401
3402	start = -1;
3403	end = t->count;
3404
3405	for ( ; ; ) {
3406		pos = start + (end - start + 1) / 2;
3407
3408		/*
3409		 * There is nothing at "end". If we end up there
3410		 * we need to add something to before end.
3411		 */
3412		if (pos == end)
3413			break;
3414
3415		caddr = t->loc[pos].addr;
3416		if (track->addr == caddr) {
3417
3418			l = &t->loc[pos];
3419			l->count++;
3420			if (track->when) {
3421				l->sum_time += age;
3422				if (age < l->min_time)
3423					l->min_time = age;
3424				if (age > l->max_time)
3425					l->max_time = age;
3426
3427				if (track->pid < l->min_pid)
3428					l->min_pid = track->pid;
3429				if (track->pid > l->max_pid)
3430					l->max_pid = track->pid;
3431
3432				cpu_set(track->cpu, l->cpus);
3433			}
3434			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3435			return 1;
3436		}
3437
3438		if (track->addr < caddr)
3439			end = pos;
3440		else
3441			start = pos;
3442	}
3443
3444	/*
3445	 * Not found. Insert new tracking element.
3446	 */
3447	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3448		return 0;
3449
3450	l = t->loc + pos;
3451	if (pos < t->count)
3452		memmove(l + 1, l,
3453			(t->count - pos) * sizeof(struct location));
3454	t->count++;
3455	l->count = 1;
3456	l->addr = track->addr;
3457	l->sum_time = age;
3458	l->min_time = age;
3459	l->max_time = age;
3460	l->min_pid = track->pid;
3461	l->max_pid = track->pid;
3462	cpus_clear(l->cpus);
3463	cpu_set(track->cpu, l->cpus);
3464	nodes_clear(l->nodes);
3465	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3466	return 1;
3467}
3468
3469static void process_slab(struct loc_track *t, struct kmem_cache *s,
3470		struct page *page, enum track_item alloc)
3471{
3472	void *addr = page_address(page);
3473	DECLARE_BITMAP(map, s->objects);
3474	void *p;
3475
3476	bitmap_zero(map, s->objects);
3477	for_each_free_object(p, s, page->freelist)
3478		set_bit(slab_index(p, s, addr), map);
3479
3480	for_each_object(p, s, addr)
3481		if (!test_bit(slab_index(p, s, addr), map))
3482			add_location(t, s, get_track(s, p, alloc));
3483}
3484
3485static int list_locations(struct kmem_cache *s, char *buf,
3486					enum track_item alloc)
3487{
3488	int len = 0;
3489	unsigned long i;
3490	struct loc_track t = { 0, 0, NULL };
3491	int node;
3492
3493	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3494			GFP_TEMPORARY))
3495		return sprintf(buf, "Out of memory\n");
3496
3497	/* Push back cpu slabs */
3498	flush_all(s);
3499
3500	for_each_node_state(node, N_NORMAL_MEMORY) {
3501		struct kmem_cache_node *n = get_node(s, node);
3502		unsigned long flags;
3503		struct page *page;
3504
3505		if (!atomic_long_read(&n->nr_slabs))
3506			continue;
3507
3508		spin_lock_irqsave(&n->list_lock, flags);
3509		list_for_each_entry(page, &n->partial, lru)
3510			process_slab(&t, s, page, alloc);
3511		list_for_each_entry(page, &n->full, lru)
3512			process_slab(&t, s, page, alloc);
3513		spin_unlock_irqrestore(&n->list_lock, flags);
3514	}
3515
3516	for (i = 0; i < t.count; i++) {
3517		struct location *l = &t.loc[i];
3518
3519		if (len > PAGE_SIZE - 100)
3520			break;
3521		len += sprintf(buf + len, "%7ld ", l->count);
3522
3523		if (l->addr)
3524			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3525		else
3526			len += sprintf(buf + len, "<not-available>");
3527
3528		if (l->sum_time != l->min_time) {
3529			unsigned long remainder;
3530
3531			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3532			l->min_time,
3533			div_long_long_rem(l->sum_time, l->count, &remainder),
3534			l->max_time);
3535		} else
3536			len += sprintf(buf + len, " age=%ld",
3537				l->min_time);
3538
3539		if (l->min_pid != l->max_pid)
3540			len += sprintf(buf + len, " pid=%ld-%ld",
3541				l->min_pid, l->max_pid);
3542		else
3543			len += sprintf(buf + len, " pid=%ld",
3544				l->min_pid);
3545
3546		if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
3547				len < PAGE_SIZE - 60) {
3548			len += sprintf(buf + len, " cpus=");
3549			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3550					l->cpus);
3551		}
3552
3553		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3554				len < PAGE_SIZE - 60) {
3555			len += sprintf(buf + len, " nodes=");
3556			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3557					l->nodes);
3558		}
3559
3560		len += sprintf(buf + len, "\n");
3561	}
3562
3563	free_loc_track(&t);
3564	if (!t.count)
3565		len += sprintf(buf, "No data\n");
3566	return len;
3567}
3568
3569enum slab_stat_type {
3570	SL_FULL,
3571	SL_PARTIAL,
3572	SL_CPU,
3573	SL_OBJECTS
3574};
3575
3576#define SO_FULL		(1 << SL_FULL)
3577#define SO_PARTIAL	(1 << SL_PARTIAL)
3578#define SO_CPU		(1 << SL_CPU)
3579#define SO_OBJECTS	(1 << SL_OBJECTS)
3580
3581static ssize_t show_slab_objects(struct kmem_cache *s,
3582			    char *buf, unsigned long flags)
3583{
3584	unsigned long total = 0;
3585	int cpu;
3586	int node;
3587	int x;
3588	unsigned long *nodes;
3589	unsigned long *per_cpu;
3590
3591	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3592	if (!nodes)
3593		return -ENOMEM;
3594	per_cpu = nodes + nr_node_ids;
3595
3596	for_each_possible_cpu(cpu) {
3597		struct page *page;
3598		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3599
3600		if (!c)
3601			continue;
3602
3603		page = c->page;
3604		node = c->node;
3605		if (node < 0)
3606			continue;
3607		if (page) {
3608			if (flags & SO_CPU) {
3609				if (flags & SO_OBJECTS)
3610					x = page->inuse;
3611				else
3612					x = 1;
3613				total += x;
3614				nodes[node] += x;
3615			}
3616			per_cpu[node]++;
3617		}
3618	}
3619
3620	for_each_node_state(node, N_NORMAL_MEMORY) {
3621		struct kmem_cache_node *n = get_node(s, node);
3622
3623		if (flags & SO_PARTIAL) {
3624			if (flags & SO_OBJECTS)
3625				x = count_partial(n);
3626			else
3627				x = n->nr_partial;
3628			total += x;
3629			nodes[node] += x;
3630		}
3631
3632		if (flags & SO_FULL) {
3633			int full_slabs = atomic_long_read(&n->nr_slabs)
3634					- per_cpu[node]
3635					- n->nr_partial;
3636
3637			if (flags & SO_OBJECTS)
3638				x = full_slabs * s->objects;
3639			else
3640				x = full_slabs;
3641			total += x;
3642			nodes[node] += x;
3643		}
3644	}
3645
3646	x = sprintf(buf, "%lu", total);
3647#ifdef CONFIG_NUMA
3648	for_each_node_state(node, N_NORMAL_MEMORY)
3649		if (nodes[node])
3650			x += sprintf(buf + x, " N%d=%lu",
3651					node, nodes[node]);
3652#endif
3653	kfree(nodes);
3654	return x + sprintf(buf + x, "\n");
3655}
3656
3657static int any_slab_objects(struct kmem_cache *s)
3658{
3659	int node;
3660	int cpu;
3661
3662	for_each_possible_cpu(cpu) {
3663		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3664
3665		if (c && c->page)
3666			return 1;
3667	}
3668
3669	for_each_online_node(node) {
3670		struct kmem_cache_node *n = get_node(s, node);
3671
3672		if (!n)
3673			continue;
3674
3675		if (n->nr_partial || atomic_long_read(&n->nr_slabs))
3676			return 1;
3677	}
3678	return 0;
3679}
3680
3681#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3682#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3683
3684struct slab_attribute {
3685	struct attribute attr;
3686	ssize_t (*show)(struct kmem_cache *s, char *buf);
3687	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3688};
3689
3690#define SLAB_ATTR_RO(_name) \
3691	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3692
3693#define SLAB_ATTR(_name) \
3694	static struct slab_attribute _name##_attr =  \
3695	__ATTR(_name, 0644, _name##_show, _name##_store)
3696
3697static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3698{
3699	return sprintf(buf, "%d\n", s->size);
3700}
3701SLAB_ATTR_RO(slab_size);
3702
3703static ssize_t align_show(struct kmem_cache *s, char *buf)
3704{
3705	return sprintf(buf, "%d\n", s->align);
3706}
3707SLAB_ATTR_RO(align);
3708
3709static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3710{
3711	return sprintf(buf, "%d\n", s->objsize);
3712}
3713SLAB_ATTR_RO(object_size);
3714
3715static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3716{
3717	return sprintf(buf, "%d\n", s->objects);
3718}
3719SLAB_ATTR_RO(objs_per_slab);
3720
3721static ssize_t order_show(struct kmem_cache *s, char *buf)
3722{
3723	return sprintf(buf, "%d\n", s->order);
3724}
3725SLAB_ATTR_RO(order);
3726
3727static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3728{
3729	if (s->ctor) {
3730		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3731
3732		return n + sprintf(buf + n, "\n");
3733	}
3734	return 0;
3735}
3736SLAB_ATTR_RO(ctor);
3737
3738static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3739{
3740	return sprintf(buf, "%d\n", s->refcount - 1);
3741}
3742SLAB_ATTR_RO(aliases);
3743
3744static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3745{
3746	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
3747}
3748SLAB_ATTR_RO(slabs);
3749
3750static ssize_t partial_show(struct kmem_cache *s, char *buf)
3751{
3752	return show_slab_objects(s, buf, SO_PARTIAL);
3753}
3754SLAB_ATTR_RO(partial);
3755
3756static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3757{
3758	return show_slab_objects(s, buf, SO_CPU);
3759}
3760SLAB_ATTR_RO(cpu_slabs);
3761
3762static ssize_t objects_show(struct kmem_cache *s, char *buf)
3763{
3764	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
3765}
3766SLAB_ATTR_RO(objects);
3767
3768static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3769{
3770	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3771}
3772
3773static ssize_t sanity_checks_store(struct kmem_cache *s,
3774				const char *buf, size_t length)
3775{
3776	s->flags &= ~SLAB_DEBUG_FREE;
3777	if (buf[0] == '1')
3778		s->flags |= SLAB_DEBUG_FREE;
3779	return length;
3780}
3781SLAB_ATTR(sanity_checks);
3782
3783static ssize_t trace_show(struct kmem_cache *s, char *buf)
3784{
3785	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3786}
3787
3788static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3789							size_t length)
3790{
3791	s->flags &= ~SLAB_TRACE;
3792	if (buf[0] == '1')
3793		s->flags |= SLAB_TRACE;
3794	return length;
3795}
3796SLAB_ATTR(trace);
3797
3798static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3799{
3800	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3801}
3802
3803static ssize_t reclaim_account_store(struct kmem_cache *s,
3804				const char *buf, size_t length)
3805{
3806	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3807	if (buf[0] == '1')
3808		s->flags |= SLAB_RECLAIM_ACCOUNT;
3809	return length;
3810}
3811SLAB_ATTR(reclaim_account);
3812
3813static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3814{
3815	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3816}
3817SLAB_ATTR_RO(hwcache_align);
3818
3819#ifdef CONFIG_ZONE_DMA
3820static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3821{
3822	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3823}
3824SLAB_ATTR_RO(cache_dma);
3825#endif
3826
3827static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3828{
3829	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3830}
3831SLAB_ATTR_RO(destroy_by_rcu);
3832
3833static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3834{
3835	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3836}
3837
3838static ssize_t red_zone_store(struct kmem_cache *s,
3839				const char *buf, size_t length)
3840{
3841	if (any_slab_objects(s))
3842		return -EBUSY;
3843
3844	s->flags &= ~SLAB_RED_ZONE;
3845	if (buf[0] == '1')
3846		s->flags |= SLAB_RED_ZONE;
3847	calculate_sizes(s);
3848	return length;
3849}
3850SLAB_ATTR(red_zone);
3851
3852static ssize_t poison_show(struct kmem_cache *s, char *buf)
3853{
3854	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3855}
3856
3857static ssize_t poison_store(struct kmem_cache *s,
3858				const char *buf, size_t length)
3859{
3860	if (any_slab_objects(s))
3861		return -EBUSY;
3862
3863	s->flags &= ~SLAB_POISON;
3864	if (buf[0] == '1')
3865		s->flags |= SLAB_POISON;
3866	calculate_sizes(s);
3867	return length;
3868}
3869SLAB_ATTR(poison);
3870
3871static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3872{
3873	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3874}
3875
3876static ssize_t store_user_store(struct kmem_cache *s,
3877				const char *buf, size_t length)
3878{
3879	if (any_slab_objects(s))
3880		return -EBUSY;
3881
3882	s->flags &= ~SLAB_STORE_USER;
3883	if (buf[0] == '1')
3884		s->flags |= SLAB_STORE_USER;
3885	calculate_sizes(s);
3886	return length;
3887}
3888SLAB_ATTR(store_user);
3889
3890static ssize_t validate_show(struct kmem_cache *s, char *buf)
3891{
3892	return 0;
3893}
3894
3895static ssize_t validate_store(struct kmem_cache *s,
3896			const char *buf, size_t length)
3897{
3898	int ret = -EINVAL;
3899
3900	if (buf[0] == '1') {
3901		ret = validate_slab_cache(s);
3902		if (ret >= 0)
3903			ret = length;
3904	}
3905	return ret;
3906}
3907SLAB_ATTR(validate);
3908
3909static ssize_t shrink_show(struct kmem_cache *s, char *buf)
3910{
3911	return 0;
3912}
3913
3914static ssize_t shrink_store(struct kmem_cache *s,
3915			const char *buf, size_t length)
3916{
3917	if (buf[0] == '1') {
3918		int rc = kmem_cache_shrink(s);
3919
3920		if (rc)
3921			return rc;
3922	} else
3923		return -EINVAL;
3924	return length;
3925}
3926SLAB_ATTR(shrink);
3927
3928static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3929{
3930	if (!(s->flags & SLAB_STORE_USER))
3931		return -ENOSYS;
3932	return list_locations(s, buf, TRACK_ALLOC);
3933}
3934SLAB_ATTR_RO(alloc_calls);
3935
3936static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3937{
3938	if (!(s->flags & SLAB_STORE_USER))
3939		return -ENOSYS;
3940	return list_locations(s, buf, TRACK_FREE);
3941}
3942SLAB_ATTR_RO(free_calls);
3943
3944#ifdef CONFIG_NUMA
3945static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
3946{
3947	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
3948}
3949
3950static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
3951				const char *buf, size_t length)
3952{
3953	int n = simple_strtoul(buf, NULL, 10);
3954
3955	if (n < 100)
3956		s->remote_node_defrag_ratio = n * 10;
3957	return length;
3958}
3959SLAB_ATTR(remote_node_defrag_ratio);
3960#endif
3961
3962#ifdef CONFIG_SLUB_STATS
3963static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
3964{
3965	unsigned long sum  = 0;
3966	int cpu;
3967	int len;
3968	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
3969
3970	if (!data)
3971		return -ENOMEM;
3972
3973	for_each_online_cpu(cpu) {
3974		unsigned x = get_cpu_slab(s, cpu)->stat[si];
3975
3976		data[cpu] = x;
3977		sum += x;
3978	}
3979
3980	len = sprintf(buf, "%lu", sum);
3981
3982#ifdef CONFIG_SMP
3983	for_each_online_cpu(cpu) {
3984		if (data[cpu] && len < PAGE_SIZE - 20)
3985			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
3986	}
3987#endif
3988	kfree(data);
3989	return len + sprintf(buf + len, "\n");
3990}
3991
3992#define STAT_ATTR(si, text) 					\
3993static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
3994{								\
3995	return show_stat(s, buf, si);				\
3996}								\
3997SLAB_ATTR_RO(text);						\
3998
3999STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4000STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4001STAT_ATTR(FREE_FASTPATH, free_fastpath);
4002STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4003STAT_ATTR(FREE_FROZEN, free_frozen);
4004STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4005STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4006STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4007STAT_ATTR(ALLOC_SLAB, alloc_slab);
4008STAT_ATTR(ALLOC_REFILL, alloc_refill);
4009STAT_ATTR(FREE_SLAB, free_slab);
4010STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4011STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4012STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4013STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4014STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4015STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4016
4017#endif
4018
4019static struct attribute *slab_attrs[] = {
4020	&slab_size_attr.attr,
4021	&object_size_attr.attr,
4022	&objs_per_slab_attr.attr,
4023	&order_attr.attr,
4024	&objects_attr.attr,
4025	&slabs_attr.attr,
4026	&partial_attr.attr,
4027	&cpu_slabs_attr.attr,
4028	&ctor_attr.attr,
4029	&aliases_attr.attr,
4030	&align_attr.attr,
4031	&sanity_checks_attr.attr,
4032	&trace_attr.attr,
4033	&hwcache_align_attr.attr,
4034	&reclaim_account_attr.attr,
4035	&destroy_by_rcu_attr.attr,
4036	&red_zone_attr.attr,
4037	&poison_attr.attr,
4038	&store_user_attr.attr,
4039	&validate_attr.attr,
4040	&shrink_attr.attr,
4041	&alloc_calls_attr.attr,
4042	&free_calls_attr.attr,
4043#ifdef CONFIG_ZONE_DMA
4044	&cache_dma_attr.attr,
4045#endif
4046#ifdef CONFIG_NUMA
4047	&remote_node_defrag_ratio_attr.attr,
4048#endif
4049#ifdef CONFIG_SLUB_STATS
4050	&alloc_fastpath_attr.attr,
4051	&alloc_slowpath_attr.attr,
4052	&free_fastpath_attr.attr,
4053	&free_slowpath_attr.attr,
4054	&free_frozen_attr.attr,
4055	&free_add_partial_attr.attr,
4056	&free_remove_partial_attr.attr,
4057	&alloc_from_partial_attr.attr,
4058	&alloc_slab_attr.attr,
4059	&alloc_refill_attr.attr,
4060	&free_slab_attr.attr,
4061	&cpuslab_flush_attr.attr,
4062	&deactivate_full_attr.attr,
4063	&deactivate_empty_attr.attr,
4064	&deactivate_to_head_attr.attr,
4065	&deactivate_to_tail_attr.attr,
4066	&deactivate_remote_frees_attr.attr,
4067#endif
4068	NULL
4069};
4070
4071static struct attribute_group slab_attr_group = {
4072	.attrs = slab_attrs,
4073};
4074
4075static ssize_t slab_attr_show(struct kobject *kobj,
4076				struct attribute *attr,
4077				char *buf)
4078{
4079	struct slab_attribute *attribute;
4080	struct kmem_cache *s;
4081	int err;
4082
4083	attribute = to_slab_attr(attr);
4084	s = to_slab(kobj);
4085
4086	if (!attribute->show)
4087		return -EIO;
4088
4089	err = attribute->show(s, buf);
4090
4091	return err;
4092}
4093
4094static ssize_t slab_attr_store(struct kobject *kobj,
4095				struct attribute *attr,
4096				const char *buf, size_t len)
4097{
4098	struct slab_attribute *attribute;
4099	struct kmem_cache *s;
4100	int err;
4101
4102	attribute = to_slab_attr(attr);
4103	s = to_slab(kobj);
4104
4105	if (!attribute->store)
4106		return -EIO;
4107
4108	err = attribute->store(s, buf, len);
4109
4110	return err;
4111}
4112
4113static void kmem_cache_release(struct kobject *kobj)
4114{
4115	struct kmem_cache *s = to_slab(kobj);
4116
4117	kfree(s);
4118}
4119
4120static struct sysfs_ops slab_sysfs_ops = {
4121	.show = slab_attr_show,
4122	.store = slab_attr_store,
4123};
4124
4125static struct kobj_type slab_ktype = {
4126	.sysfs_ops = &slab_sysfs_ops,
4127	.release = kmem_cache_release
4128};
4129
4130static int uevent_filter(struct kset *kset, struct kobject *kobj)
4131{
4132	struct kobj_type *ktype = get_ktype(kobj);
4133
4134	if (ktype == &slab_ktype)
4135		return 1;
4136	return 0;
4137}
4138
4139static struct kset_uevent_ops slab_uevent_ops = {
4140	.filter = uevent_filter,
4141};
4142
4143static struct kset *slab_kset;
4144
4145#define ID_STR_LENGTH 64
4146
4147/* Create a unique string id for a slab cache:
4148 *
4149 * Format	:[flags-]size
4150 */
4151static char *create_unique_id(struct kmem_cache *s)
4152{
4153	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4154	char *p = name;
4155
4156	BUG_ON(!name);
4157
4158	*p++ = ':';
4159	/*
4160	 * First flags affecting slabcache operations. We will only
4161	 * get here for aliasable slabs so we do not need to support
4162	 * too many flags. The flags here must cover all flags that
4163	 * are matched during merging to guarantee that the id is
4164	 * unique.
4165	 */
4166	if (s->flags & SLAB_CACHE_DMA)
4167		*p++ = 'd';
4168	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4169		*p++ = 'a';
4170	if (s->flags & SLAB_DEBUG_FREE)
4171		*p++ = 'F';
4172	if (p != name + 1)
4173		*p++ = '-';
4174	p += sprintf(p, "%07d", s->size);
4175	BUG_ON(p > name + ID_STR_LENGTH - 1);
4176	return name;
4177}
4178
4179static int sysfs_slab_add(struct kmem_cache *s)
4180{
4181	int err;
4182	const char *name;
4183	int unmergeable;
4184
4185	if (slab_state < SYSFS)
4186		/* Defer until later */
4187		return 0;
4188
4189	unmergeable = slab_unmergeable(s);
4190	if (unmergeable) {
4191		/*
4192		 * Slabcache can never be merged so we can use the name proper.
4193		 * This is typically the case for debug situations. In that
4194		 * case we can catch duplicate names easily.
4195		 */
4196		sysfs_remove_link(&slab_kset->kobj, s->name);
4197		name = s->name;
4198	} else {
4199		/*
4200		 * Create a unique name for the slab as a target
4201		 * for the symlinks.
4202		 */
4203		name = create_unique_id(s);
4204	}
4205
4206	s->kobj.kset = slab_kset;
4207	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4208	if (err) {
4209		kobject_put(&s->kobj);
4210		return err;
4211	}
4212
4213	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4214	if (err)
4215		return err;
4216	kobject_uevent(&s->kobj, KOBJ_ADD);
4217	if (!unmergeable) {
4218		/* Setup first alias */
4219		sysfs_slab_alias(s, s->name);
4220		kfree(name);
4221	}
4222	return 0;
4223}
4224
4225static void sysfs_slab_remove(struct kmem_cache *s)
4226{
4227	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4228	kobject_del(&s->kobj);
4229	kobject_put(&s->kobj);
4230}
4231
4232/*
4233 * Need to buffer aliases during bootup until sysfs becomes
4234 * available lest we loose that information.
4235 */
4236struct saved_alias {
4237	struct kmem_cache *s;
4238	const char *name;
4239	struct saved_alias *next;
4240};
4241
4242static struct saved_alias *alias_list;
4243
4244static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4245{
4246	struct saved_alias *al;
4247
4248	if (slab_state == SYSFS) {
4249		/*
4250		 * If we have a leftover link then remove it.
4251		 */
4252		sysfs_remove_link(&slab_kset->kobj, name);
4253		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4254	}
4255
4256	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4257	if (!al)
4258		return -ENOMEM;
4259
4260	al->s = s;
4261	al->name = name;
4262	al->next = alias_list;
4263	alias_list = al;
4264	return 0;
4265}
4266
4267static int __init slab_sysfs_init(void)
4268{
4269	struct kmem_cache *s;
4270	int err;
4271
4272	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4273	if (!slab_kset) {
4274		printk(KERN_ERR "Cannot register slab subsystem.\n");
4275		return -ENOSYS;
4276	}
4277
4278	slab_state = SYSFS;
4279
4280	list_for_each_entry(s, &slab_caches, list) {
4281		err = sysfs_slab_add(s);
4282		if (err)
4283			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4284						" to sysfs\n", s->name);
4285	}
4286
4287	while (alias_list) {
4288		struct saved_alias *al = alias_list;
4289
4290		alias_list = alias_list->next;
4291		err = sysfs_slab_alias(al->s, al->name);
4292		if (err)
4293			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4294					" %s to sysfs\n", s->name);
4295		kfree(al);
4296	}
4297
4298	resiliency_test();
4299	return 0;
4300}
4301
4302__initcall(slab_sysfs_init);
4303#endif
4304
4305/*
4306 * The /proc/slabinfo ABI
4307 */
4308#ifdef CONFIG_SLABINFO
4309
4310ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4311                       size_t count, loff_t *ppos)
4312{
4313	return -EINVAL;
4314}
4315
4316
4317static void print_slabinfo_header(struct seq_file *m)
4318{
4319	seq_puts(m, "slabinfo - version: 2.1\n");
4320	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4321		 "<objperslab> <pagesperslab>");
4322	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4323	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4324	seq_putc(m, '\n');
4325}
4326
4327static void *s_start(struct seq_file *m, loff_t *pos)
4328{
4329	loff_t n = *pos;
4330
4331	down_read(&slub_lock);
4332	if (!n)
4333		print_slabinfo_header(m);
4334
4335	return seq_list_start(&slab_caches, *pos);
4336}
4337
4338static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4339{
4340	return seq_list_next(p, &slab_caches, pos);
4341}
4342
4343static void s_stop(struct seq_file *m, void *p)
4344{
4345	up_read(&slub_lock);
4346}
4347
4348static int s_show(struct seq_file *m, void *p)
4349{
4350	unsigned long nr_partials = 0;
4351	unsigned long nr_slabs = 0;
4352	unsigned long nr_inuse = 0;
4353	unsigned long nr_objs;
4354	struct kmem_cache *s;
4355	int node;
4356
4357	s = list_entry(p, struct kmem_cache, list);
4358
4359	for_each_online_node(node) {
4360		struct kmem_cache_node *n = get_node(s, node);
4361
4362		if (!n)
4363			continue;
4364
4365		nr_partials += n->nr_partial;
4366		nr_slabs += atomic_long_read(&n->nr_slabs);
4367		nr_inuse += count_partial(n);
4368	}
4369
4370	nr_objs = nr_slabs * s->objects;
4371	nr_inuse += (nr_slabs - nr_partials) * s->objects;
4372
4373	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4374		   nr_objs, s->size, s->objects, (1 << s->order));
4375	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4376	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4377		   0UL);
4378	seq_putc(m, '\n');
4379	return 0;
4380}
4381
4382const struct seq_operations slabinfo_op = {
4383	.start = s_start,
4384	.next = s_next,
4385	.stop = s_stop,
4386	.show = s_show,
4387};
4388
4389#endif /* CONFIG_SLABINFO */
4390