slub.c revision 756dee75872a2a764b478e18076360b8a4ec9045
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter
9 */
10
11#include <linux/mm.h>
12#include <linux/swap.h> /* struct reclaim_state */
13#include <linux/module.h>
14#include <linux/bit_spinlock.h>
15#include <linux/interrupt.h>
16#include <linux/bitops.h>
17#include <linux/slab.h>
18#include <linux/proc_fs.h>
19#include <linux/seq_file.h>
20#include <linux/kmemtrace.h>
21#include <linux/kmemcheck.h>
22#include <linux/cpu.h>
23#include <linux/cpuset.h>
24#include <linux/mempolicy.h>
25#include <linux/ctype.h>
26#include <linux/debugobjects.h>
27#include <linux/kallsyms.h>
28#include <linux/memory.h>
29#include <linux/math64.h>
30#include <linux/fault-inject.h>
31
32/*
33 * Lock order:
34 *   1. slab_lock(page)
35 *   2. slab->list_lock
36 *
37 *   The slab_lock protects operations on the object of a particular
38 *   slab and its metadata in the page struct. If the slab lock
39 *   has been taken then no allocations nor frees can be performed
40 *   on the objects in the slab nor can the slab be added or removed
41 *   from the partial or full lists since this would mean modifying
42 *   the page_struct of the slab.
43 *
44 *   The list_lock protects the partial and full list on each node and
45 *   the partial slab counter. If taken then no new slabs may be added or
46 *   removed from the lists nor make the number of partial slabs be modified.
47 *   (Note that the total number of slabs is an atomic value that may be
48 *   modified without taking the list lock).
49 *
50 *   The list_lock is a centralized lock and thus we avoid taking it as
51 *   much as possible. As long as SLUB does not have to handle partial
52 *   slabs, operations can continue without any centralized lock. F.e.
53 *   allocating a long series of objects that fill up slabs does not require
54 *   the list lock.
55 *
56 *   The lock order is sometimes inverted when we are trying to get a slab
57 *   off a list. We take the list_lock and then look for a page on the list
58 *   to use. While we do that objects in the slabs may be freed. We can
59 *   only operate on the slab if we have also taken the slab_lock. So we use
60 *   a slab_trylock() on the slab. If trylock was successful then no frees
61 *   can occur anymore and we can use the slab for allocations etc. If the
62 *   slab_trylock() does not succeed then frees are in progress in the slab and
63 *   we must stay away from it for a while since we may cause a bouncing
64 *   cacheline if we try to acquire the lock. So go onto the next slab.
65 *   If all pages are busy then we may allocate a new slab instead of reusing
66 *   a partial slab. A new slab has noone operating on it and thus there is
67 *   no danger of cacheline contention.
68 *
69 *   Interrupts are disabled during allocation and deallocation in order to
70 *   make the slab allocator safe to use in the context of an irq. In addition
71 *   interrupts are disabled to ensure that the processor does not change
72 *   while handling per_cpu slabs, due to kernel preemption.
73 *
74 * SLUB assigns one slab for allocation to each processor.
75 * Allocations only occur from these slabs called cpu slabs.
76 *
77 * Slabs with free elements are kept on a partial list and during regular
78 * operations no list for full slabs is used. If an object in a full slab is
79 * freed then the slab will show up again on the partial lists.
80 * We track full slabs for debugging purposes though because otherwise we
81 * cannot scan all objects.
82 *
83 * Slabs are freed when they become empty. Teardown and setup is
84 * minimal so we rely on the page allocators per cpu caches for
85 * fast frees and allocs.
86 *
87 * Overloading of page flags that are otherwise used for LRU management.
88 *
89 * PageActive 		The slab is frozen and exempt from list processing.
90 * 			This means that the slab is dedicated to a purpose
91 * 			such as satisfying allocations for a specific
92 * 			processor. Objects may be freed in the slab while
93 * 			it is frozen but slab_free will then skip the usual
94 * 			list operations. It is up to the processor holding
95 * 			the slab to integrate the slab into the slab lists
96 * 			when the slab is no longer needed.
97 *
98 * 			One use of this flag is to mark slabs that are
99 * 			used for allocations. Then such a slab becomes a cpu
100 * 			slab. The cpu slab may be equipped with an additional
101 * 			freelist that allows lockless access to
102 * 			free objects in addition to the regular freelist
103 * 			that requires the slab lock.
104 *
105 * PageError		Slab requires special handling due to debug
106 * 			options set. This moves	slab handling out of
107 * 			the fast path and disables lockless freelists.
108 */
109
110#ifdef CONFIG_SLUB_DEBUG
111#define SLABDEBUG 1
112#else
113#define SLABDEBUG 0
114#endif
115
116/*
117 * Issues still to be resolved:
118 *
119 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
120 *
121 * - Variable sizing of the per node arrays
122 */
123
124/* Enable to test recovery from slab corruption on boot */
125#undef SLUB_RESILIENCY_TEST
126
127/*
128 * Mininum number of partial slabs. These will be left on the partial
129 * lists even if they are empty. kmem_cache_shrink may reclaim them.
130 */
131#define MIN_PARTIAL 5
132
133/*
134 * Maximum number of desirable partial slabs.
135 * The existence of more partial slabs makes kmem_cache_shrink
136 * sort the partial list by the number of objects in the.
137 */
138#define MAX_PARTIAL 10
139
140#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
141				SLAB_POISON | SLAB_STORE_USER)
142
143/*
144 * Debugging flags that require metadata to be stored in the slab.  These get
145 * disabled when slub_debug=O is used and a cache's min order increases with
146 * metadata.
147 */
148#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
149
150/*
151 * Set of flags that will prevent slab merging
152 */
153#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
154		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
155
156#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
157		SLAB_CACHE_DMA | SLAB_NOTRACK)
158
159#ifndef ARCH_KMALLOC_MINALIGN
160#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
161#endif
162
163#ifndef ARCH_SLAB_MINALIGN
164#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
165#endif
166
167#define OO_SHIFT	16
168#define OO_MASK		((1 << OO_SHIFT) - 1)
169#define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
170
171/* Internal SLUB flags */
172#define __OBJECT_POISON		0x80000000 /* Poison object */
173#define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
174
175static int kmem_size = sizeof(struct kmem_cache);
176
177#ifdef CONFIG_SMP
178static struct notifier_block slab_notifier;
179#endif
180
181static enum {
182	DOWN,		/* No slab functionality available */
183	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
184	UP,		/* Everything works but does not show up in sysfs */
185	SYSFS		/* Sysfs up */
186} slab_state = DOWN;
187
188/* A list of all slab caches on the system */
189static DECLARE_RWSEM(slub_lock);
190static LIST_HEAD(slab_caches);
191
192/*
193 * Tracking user of a slab.
194 */
195struct track {
196	unsigned long addr;	/* Called from address */
197	int cpu;		/* Was running on cpu */
198	int pid;		/* Pid context */
199	unsigned long when;	/* When did the operation occur */
200};
201
202enum track_item { TRACK_ALLOC, TRACK_FREE };
203
204#ifdef CONFIG_SLUB_DEBUG
205static int sysfs_slab_add(struct kmem_cache *);
206static int sysfs_slab_alias(struct kmem_cache *, const char *);
207static void sysfs_slab_remove(struct kmem_cache *);
208
209#else
210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
212							{ return 0; }
213static inline void sysfs_slab_remove(struct kmem_cache *s)
214{
215	kfree(s);
216}
217
218#endif
219
220static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
221{
222#ifdef CONFIG_SLUB_STATS
223	c->stat[si]++;
224#endif
225}
226
227/********************************************************************
228 * 			Core slab cache functions
229 *******************************************************************/
230
231int slab_is_available(void)
232{
233	return slab_state >= UP;
234}
235
236static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
237{
238#ifdef CONFIG_NUMA
239	return s->node[node];
240#else
241	return &s->local_node;
242#endif
243}
244
245/* Verify that a pointer has an address that is valid within a slab page */
246static inline int check_valid_pointer(struct kmem_cache *s,
247				struct page *page, const void *object)
248{
249	void *base;
250
251	if (!object)
252		return 1;
253
254	base = page_address(page);
255	if (object < base || object >= base + page->objects * s->size ||
256		(object - base) % s->size) {
257		return 0;
258	}
259
260	return 1;
261}
262
263/*
264 * Slow version of get and set free pointer.
265 *
266 * This version requires touching the cache lines of kmem_cache which
267 * we avoid to do in the fast alloc free paths. There we obtain the offset
268 * from the page struct.
269 */
270static inline void *get_freepointer(struct kmem_cache *s, void *object)
271{
272	return *(void **)(object + s->offset);
273}
274
275static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
276{
277	*(void **)(object + s->offset) = fp;
278}
279
280/* Loop over all objects in a slab */
281#define for_each_object(__p, __s, __addr, __objects) \
282	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
283			__p += (__s)->size)
284
285/* Scan freelist */
286#define for_each_free_object(__p, __s, __free) \
287	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
288
289/* Determine object index from a given position */
290static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
291{
292	return (p - addr) / s->size;
293}
294
295static inline struct kmem_cache_order_objects oo_make(int order,
296						unsigned long size)
297{
298	struct kmem_cache_order_objects x = {
299		(order << OO_SHIFT) + (PAGE_SIZE << order) / size
300	};
301
302	return x;
303}
304
305static inline int oo_order(struct kmem_cache_order_objects x)
306{
307	return x.x >> OO_SHIFT;
308}
309
310static inline int oo_objects(struct kmem_cache_order_objects x)
311{
312	return x.x & OO_MASK;
313}
314
315#ifdef CONFIG_SLUB_DEBUG
316/*
317 * Debug settings:
318 */
319#ifdef CONFIG_SLUB_DEBUG_ON
320static int slub_debug = DEBUG_DEFAULT_FLAGS;
321#else
322static int slub_debug;
323#endif
324
325static char *slub_debug_slabs;
326static int disable_higher_order_debug;
327
328/*
329 * Object debugging
330 */
331static void print_section(char *text, u8 *addr, unsigned int length)
332{
333	int i, offset;
334	int newline = 1;
335	char ascii[17];
336
337	ascii[16] = 0;
338
339	for (i = 0; i < length; i++) {
340		if (newline) {
341			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
342			newline = 0;
343		}
344		printk(KERN_CONT " %02x", addr[i]);
345		offset = i % 16;
346		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
347		if (offset == 15) {
348			printk(KERN_CONT " %s\n", ascii);
349			newline = 1;
350		}
351	}
352	if (!newline) {
353		i %= 16;
354		while (i < 16) {
355			printk(KERN_CONT "   ");
356			ascii[i] = ' ';
357			i++;
358		}
359		printk(KERN_CONT " %s\n", ascii);
360	}
361}
362
363static struct track *get_track(struct kmem_cache *s, void *object,
364	enum track_item alloc)
365{
366	struct track *p;
367
368	if (s->offset)
369		p = object + s->offset + sizeof(void *);
370	else
371		p = object + s->inuse;
372
373	return p + alloc;
374}
375
376static void set_track(struct kmem_cache *s, void *object,
377			enum track_item alloc, unsigned long addr)
378{
379	struct track *p = get_track(s, object, alloc);
380
381	if (addr) {
382		p->addr = addr;
383		p->cpu = smp_processor_id();
384		p->pid = current->pid;
385		p->when = jiffies;
386	} else
387		memset(p, 0, sizeof(struct track));
388}
389
390static void init_tracking(struct kmem_cache *s, void *object)
391{
392	if (!(s->flags & SLAB_STORE_USER))
393		return;
394
395	set_track(s, object, TRACK_FREE, 0UL);
396	set_track(s, object, TRACK_ALLOC, 0UL);
397}
398
399static void print_track(const char *s, struct track *t)
400{
401	if (!t->addr)
402		return;
403
404	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
405		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
406}
407
408static void print_tracking(struct kmem_cache *s, void *object)
409{
410	if (!(s->flags & SLAB_STORE_USER))
411		return;
412
413	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
414	print_track("Freed", get_track(s, object, TRACK_FREE));
415}
416
417static void print_page_info(struct page *page)
418{
419	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
420		page, page->objects, page->inuse, page->freelist, page->flags);
421
422}
423
424static void slab_bug(struct kmem_cache *s, char *fmt, ...)
425{
426	va_list args;
427	char buf[100];
428
429	va_start(args, fmt);
430	vsnprintf(buf, sizeof(buf), fmt, args);
431	va_end(args);
432	printk(KERN_ERR "========================================"
433			"=====================================\n");
434	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
435	printk(KERN_ERR "----------------------------------------"
436			"-------------------------------------\n\n");
437}
438
439static void slab_fix(struct kmem_cache *s, char *fmt, ...)
440{
441	va_list args;
442	char buf[100];
443
444	va_start(args, fmt);
445	vsnprintf(buf, sizeof(buf), fmt, args);
446	va_end(args);
447	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
448}
449
450static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
451{
452	unsigned int off;	/* Offset of last byte */
453	u8 *addr = page_address(page);
454
455	print_tracking(s, p);
456
457	print_page_info(page);
458
459	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
460			p, p - addr, get_freepointer(s, p));
461
462	if (p > addr + 16)
463		print_section("Bytes b4", p - 16, 16);
464
465	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
466
467	if (s->flags & SLAB_RED_ZONE)
468		print_section("Redzone", p + s->objsize,
469			s->inuse - s->objsize);
470
471	if (s->offset)
472		off = s->offset + sizeof(void *);
473	else
474		off = s->inuse;
475
476	if (s->flags & SLAB_STORE_USER)
477		off += 2 * sizeof(struct track);
478
479	if (off != s->size)
480		/* Beginning of the filler is the free pointer */
481		print_section("Padding", p + off, s->size - off);
482
483	dump_stack();
484}
485
486static void object_err(struct kmem_cache *s, struct page *page,
487			u8 *object, char *reason)
488{
489	slab_bug(s, "%s", reason);
490	print_trailer(s, page, object);
491}
492
493static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
494{
495	va_list args;
496	char buf[100];
497
498	va_start(args, fmt);
499	vsnprintf(buf, sizeof(buf), fmt, args);
500	va_end(args);
501	slab_bug(s, "%s", buf);
502	print_page_info(page);
503	dump_stack();
504}
505
506static void init_object(struct kmem_cache *s, void *object, int active)
507{
508	u8 *p = object;
509
510	if (s->flags & __OBJECT_POISON) {
511		memset(p, POISON_FREE, s->objsize - 1);
512		p[s->objsize - 1] = POISON_END;
513	}
514
515	if (s->flags & SLAB_RED_ZONE)
516		memset(p + s->objsize,
517			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
518			s->inuse - s->objsize);
519}
520
521static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
522{
523	while (bytes) {
524		if (*start != (u8)value)
525			return start;
526		start++;
527		bytes--;
528	}
529	return NULL;
530}
531
532static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
533						void *from, void *to)
534{
535	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
536	memset(from, data, to - from);
537}
538
539static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
540			u8 *object, char *what,
541			u8 *start, unsigned int value, unsigned int bytes)
542{
543	u8 *fault;
544	u8 *end;
545
546	fault = check_bytes(start, value, bytes);
547	if (!fault)
548		return 1;
549
550	end = start + bytes;
551	while (end > fault && end[-1] == value)
552		end--;
553
554	slab_bug(s, "%s overwritten", what);
555	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
556					fault, end - 1, fault[0], value);
557	print_trailer(s, page, object);
558
559	restore_bytes(s, what, value, fault, end);
560	return 0;
561}
562
563/*
564 * Object layout:
565 *
566 * object address
567 * 	Bytes of the object to be managed.
568 * 	If the freepointer may overlay the object then the free
569 * 	pointer is the first word of the object.
570 *
571 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
572 * 	0xa5 (POISON_END)
573 *
574 * object + s->objsize
575 * 	Padding to reach word boundary. This is also used for Redzoning.
576 * 	Padding is extended by another word if Redzoning is enabled and
577 * 	objsize == inuse.
578 *
579 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
580 * 	0xcc (RED_ACTIVE) for objects in use.
581 *
582 * object + s->inuse
583 * 	Meta data starts here.
584 *
585 * 	A. Free pointer (if we cannot overwrite object on free)
586 * 	B. Tracking data for SLAB_STORE_USER
587 * 	C. Padding to reach required alignment boundary or at mininum
588 * 		one word if debugging is on to be able to detect writes
589 * 		before the word boundary.
590 *
591 *	Padding is done using 0x5a (POISON_INUSE)
592 *
593 * object + s->size
594 * 	Nothing is used beyond s->size.
595 *
596 * If slabcaches are merged then the objsize and inuse boundaries are mostly
597 * ignored. And therefore no slab options that rely on these boundaries
598 * may be used with merged slabcaches.
599 */
600
601static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
602{
603	unsigned long off = s->inuse;	/* The end of info */
604
605	if (s->offset)
606		/* Freepointer is placed after the object. */
607		off += sizeof(void *);
608
609	if (s->flags & SLAB_STORE_USER)
610		/* We also have user information there */
611		off += 2 * sizeof(struct track);
612
613	if (s->size == off)
614		return 1;
615
616	return check_bytes_and_report(s, page, p, "Object padding",
617				p + off, POISON_INUSE, s->size - off);
618}
619
620/* Check the pad bytes at the end of a slab page */
621static int slab_pad_check(struct kmem_cache *s, struct page *page)
622{
623	u8 *start;
624	u8 *fault;
625	u8 *end;
626	int length;
627	int remainder;
628
629	if (!(s->flags & SLAB_POISON))
630		return 1;
631
632	start = page_address(page);
633	length = (PAGE_SIZE << compound_order(page));
634	end = start + length;
635	remainder = length % s->size;
636	if (!remainder)
637		return 1;
638
639	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
640	if (!fault)
641		return 1;
642	while (end > fault && end[-1] == POISON_INUSE)
643		end--;
644
645	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
646	print_section("Padding", end - remainder, remainder);
647
648	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
649	return 0;
650}
651
652static int check_object(struct kmem_cache *s, struct page *page,
653					void *object, int active)
654{
655	u8 *p = object;
656	u8 *endobject = object + s->objsize;
657
658	if (s->flags & SLAB_RED_ZONE) {
659		unsigned int red =
660			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
661
662		if (!check_bytes_and_report(s, page, object, "Redzone",
663			endobject, red, s->inuse - s->objsize))
664			return 0;
665	} else {
666		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
667			check_bytes_and_report(s, page, p, "Alignment padding",
668				endobject, POISON_INUSE, s->inuse - s->objsize);
669		}
670	}
671
672	if (s->flags & SLAB_POISON) {
673		if (!active && (s->flags & __OBJECT_POISON) &&
674			(!check_bytes_and_report(s, page, p, "Poison", p,
675					POISON_FREE, s->objsize - 1) ||
676			 !check_bytes_and_report(s, page, p, "Poison",
677				p + s->objsize - 1, POISON_END, 1)))
678			return 0;
679		/*
680		 * check_pad_bytes cleans up on its own.
681		 */
682		check_pad_bytes(s, page, p);
683	}
684
685	if (!s->offset && active)
686		/*
687		 * Object and freepointer overlap. Cannot check
688		 * freepointer while object is allocated.
689		 */
690		return 1;
691
692	/* Check free pointer validity */
693	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
694		object_err(s, page, p, "Freepointer corrupt");
695		/*
696		 * No choice but to zap it and thus lose the remainder
697		 * of the free objects in this slab. May cause
698		 * another error because the object count is now wrong.
699		 */
700		set_freepointer(s, p, NULL);
701		return 0;
702	}
703	return 1;
704}
705
706static int check_slab(struct kmem_cache *s, struct page *page)
707{
708	int maxobj;
709
710	VM_BUG_ON(!irqs_disabled());
711
712	if (!PageSlab(page)) {
713		slab_err(s, page, "Not a valid slab page");
714		return 0;
715	}
716
717	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
718	if (page->objects > maxobj) {
719		slab_err(s, page, "objects %u > max %u",
720			s->name, page->objects, maxobj);
721		return 0;
722	}
723	if (page->inuse > page->objects) {
724		slab_err(s, page, "inuse %u > max %u",
725			s->name, page->inuse, page->objects);
726		return 0;
727	}
728	/* Slab_pad_check fixes things up after itself */
729	slab_pad_check(s, page);
730	return 1;
731}
732
733/*
734 * Determine if a certain object on a page is on the freelist. Must hold the
735 * slab lock to guarantee that the chains are in a consistent state.
736 */
737static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
738{
739	int nr = 0;
740	void *fp = page->freelist;
741	void *object = NULL;
742	unsigned long max_objects;
743
744	while (fp && nr <= page->objects) {
745		if (fp == search)
746			return 1;
747		if (!check_valid_pointer(s, page, fp)) {
748			if (object) {
749				object_err(s, page, object,
750					"Freechain corrupt");
751				set_freepointer(s, object, NULL);
752				break;
753			} else {
754				slab_err(s, page, "Freepointer corrupt");
755				page->freelist = NULL;
756				page->inuse = page->objects;
757				slab_fix(s, "Freelist cleared");
758				return 0;
759			}
760			break;
761		}
762		object = fp;
763		fp = get_freepointer(s, object);
764		nr++;
765	}
766
767	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
768	if (max_objects > MAX_OBJS_PER_PAGE)
769		max_objects = MAX_OBJS_PER_PAGE;
770
771	if (page->objects != max_objects) {
772		slab_err(s, page, "Wrong number of objects. Found %d but "
773			"should be %d", page->objects, max_objects);
774		page->objects = max_objects;
775		slab_fix(s, "Number of objects adjusted.");
776	}
777	if (page->inuse != page->objects - nr) {
778		slab_err(s, page, "Wrong object count. Counter is %d but "
779			"counted were %d", page->inuse, page->objects - nr);
780		page->inuse = page->objects - nr;
781		slab_fix(s, "Object count adjusted.");
782	}
783	return search == NULL;
784}
785
786static void trace(struct kmem_cache *s, struct page *page, void *object,
787								int alloc)
788{
789	if (s->flags & SLAB_TRACE) {
790		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
791			s->name,
792			alloc ? "alloc" : "free",
793			object, page->inuse,
794			page->freelist);
795
796		if (!alloc)
797			print_section("Object", (void *)object, s->objsize);
798
799		dump_stack();
800	}
801}
802
803/*
804 * Tracking of fully allocated slabs for debugging purposes.
805 */
806static void add_full(struct kmem_cache_node *n, struct page *page)
807{
808	spin_lock(&n->list_lock);
809	list_add(&page->lru, &n->full);
810	spin_unlock(&n->list_lock);
811}
812
813static void remove_full(struct kmem_cache *s, struct page *page)
814{
815	struct kmem_cache_node *n;
816
817	if (!(s->flags & SLAB_STORE_USER))
818		return;
819
820	n = get_node(s, page_to_nid(page));
821
822	spin_lock(&n->list_lock);
823	list_del(&page->lru);
824	spin_unlock(&n->list_lock);
825}
826
827/* Tracking of the number of slabs for debugging purposes */
828static inline unsigned long slabs_node(struct kmem_cache *s, int node)
829{
830	struct kmem_cache_node *n = get_node(s, node);
831
832	return atomic_long_read(&n->nr_slabs);
833}
834
835static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
836{
837	return atomic_long_read(&n->nr_slabs);
838}
839
840static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
841{
842	struct kmem_cache_node *n = get_node(s, node);
843
844	/*
845	 * May be called early in order to allocate a slab for the
846	 * kmem_cache_node structure. Solve the chicken-egg
847	 * dilemma by deferring the increment of the count during
848	 * bootstrap (see early_kmem_cache_node_alloc).
849	 */
850	if (!NUMA_BUILD || n) {
851		atomic_long_inc(&n->nr_slabs);
852		atomic_long_add(objects, &n->total_objects);
853	}
854}
855static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
856{
857	struct kmem_cache_node *n = get_node(s, node);
858
859	atomic_long_dec(&n->nr_slabs);
860	atomic_long_sub(objects, &n->total_objects);
861}
862
863/* Object debug checks for alloc/free paths */
864static void setup_object_debug(struct kmem_cache *s, struct page *page,
865								void *object)
866{
867	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
868		return;
869
870	init_object(s, object, 0);
871	init_tracking(s, object);
872}
873
874static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
875					void *object, unsigned long addr)
876{
877	if (!check_slab(s, page))
878		goto bad;
879
880	if (!on_freelist(s, page, object)) {
881		object_err(s, page, object, "Object already allocated");
882		goto bad;
883	}
884
885	if (!check_valid_pointer(s, page, object)) {
886		object_err(s, page, object, "Freelist Pointer check fails");
887		goto bad;
888	}
889
890	if (!check_object(s, page, object, 0))
891		goto bad;
892
893	/* Success perform special debug activities for allocs */
894	if (s->flags & SLAB_STORE_USER)
895		set_track(s, object, TRACK_ALLOC, addr);
896	trace(s, page, object, 1);
897	init_object(s, object, 1);
898	return 1;
899
900bad:
901	if (PageSlab(page)) {
902		/*
903		 * If this is a slab page then lets do the best we can
904		 * to avoid issues in the future. Marking all objects
905		 * as used avoids touching the remaining objects.
906		 */
907		slab_fix(s, "Marking all objects used");
908		page->inuse = page->objects;
909		page->freelist = NULL;
910	}
911	return 0;
912}
913
914static int free_debug_processing(struct kmem_cache *s, struct page *page,
915					void *object, unsigned long addr)
916{
917	if (!check_slab(s, page))
918		goto fail;
919
920	if (!check_valid_pointer(s, page, object)) {
921		slab_err(s, page, "Invalid object pointer 0x%p", object);
922		goto fail;
923	}
924
925	if (on_freelist(s, page, object)) {
926		object_err(s, page, object, "Object already free");
927		goto fail;
928	}
929
930	if (!check_object(s, page, object, 1))
931		return 0;
932
933	if (unlikely(s != page->slab)) {
934		if (!PageSlab(page)) {
935			slab_err(s, page, "Attempt to free object(0x%p) "
936				"outside of slab", object);
937		} else if (!page->slab) {
938			printk(KERN_ERR
939				"SLUB <none>: no slab for object 0x%p.\n",
940						object);
941			dump_stack();
942		} else
943			object_err(s, page, object,
944					"page slab pointer corrupt.");
945		goto fail;
946	}
947
948	/* Special debug activities for freeing objects */
949	if (!PageSlubFrozen(page) && !page->freelist)
950		remove_full(s, page);
951	if (s->flags & SLAB_STORE_USER)
952		set_track(s, object, TRACK_FREE, addr);
953	trace(s, page, object, 0);
954	init_object(s, object, 0);
955	return 1;
956
957fail:
958	slab_fix(s, "Object at 0x%p not freed", object);
959	return 0;
960}
961
962static int __init setup_slub_debug(char *str)
963{
964	slub_debug = DEBUG_DEFAULT_FLAGS;
965	if (*str++ != '=' || !*str)
966		/*
967		 * No options specified. Switch on full debugging.
968		 */
969		goto out;
970
971	if (*str == ',')
972		/*
973		 * No options but restriction on slabs. This means full
974		 * debugging for slabs matching a pattern.
975		 */
976		goto check_slabs;
977
978	if (tolower(*str) == 'o') {
979		/*
980		 * Avoid enabling debugging on caches if its minimum order
981		 * would increase as a result.
982		 */
983		disable_higher_order_debug = 1;
984		goto out;
985	}
986
987	slub_debug = 0;
988	if (*str == '-')
989		/*
990		 * Switch off all debugging measures.
991		 */
992		goto out;
993
994	/*
995	 * Determine which debug features should be switched on
996	 */
997	for (; *str && *str != ','; str++) {
998		switch (tolower(*str)) {
999		case 'f':
1000			slub_debug |= SLAB_DEBUG_FREE;
1001			break;
1002		case 'z':
1003			slub_debug |= SLAB_RED_ZONE;
1004			break;
1005		case 'p':
1006			slub_debug |= SLAB_POISON;
1007			break;
1008		case 'u':
1009			slub_debug |= SLAB_STORE_USER;
1010			break;
1011		case 't':
1012			slub_debug |= SLAB_TRACE;
1013			break;
1014		default:
1015			printk(KERN_ERR "slub_debug option '%c' "
1016				"unknown. skipped\n", *str);
1017		}
1018	}
1019
1020check_slabs:
1021	if (*str == ',')
1022		slub_debug_slabs = str + 1;
1023out:
1024	return 1;
1025}
1026
1027__setup("slub_debug", setup_slub_debug);
1028
1029static unsigned long kmem_cache_flags(unsigned long objsize,
1030	unsigned long flags, const char *name,
1031	void (*ctor)(void *))
1032{
1033	/*
1034	 * Enable debugging if selected on the kernel commandline.
1035	 */
1036	if (slub_debug && (!slub_debug_slabs ||
1037		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
1038		flags |= slub_debug;
1039
1040	return flags;
1041}
1042#else
1043static inline void setup_object_debug(struct kmem_cache *s,
1044			struct page *page, void *object) {}
1045
1046static inline int alloc_debug_processing(struct kmem_cache *s,
1047	struct page *page, void *object, unsigned long addr) { return 0; }
1048
1049static inline int free_debug_processing(struct kmem_cache *s,
1050	struct page *page, void *object, unsigned long addr) { return 0; }
1051
1052static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1053			{ return 1; }
1054static inline int check_object(struct kmem_cache *s, struct page *page,
1055			void *object, int active) { return 1; }
1056static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1057static inline unsigned long kmem_cache_flags(unsigned long objsize,
1058	unsigned long flags, const char *name,
1059	void (*ctor)(void *))
1060{
1061	return flags;
1062}
1063#define slub_debug 0
1064
1065#define disable_higher_order_debug 0
1066
1067static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1068							{ return 0; }
1069static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1070							{ return 0; }
1071static inline void inc_slabs_node(struct kmem_cache *s, int node,
1072							int objects) {}
1073static inline void dec_slabs_node(struct kmem_cache *s, int node,
1074							int objects) {}
1075#endif
1076
1077/*
1078 * Slab allocation and freeing
1079 */
1080static inline struct page *alloc_slab_page(gfp_t flags, int node,
1081					struct kmem_cache_order_objects oo)
1082{
1083	int order = oo_order(oo);
1084
1085	flags |= __GFP_NOTRACK;
1086
1087	if (node == -1)
1088		return alloc_pages(flags, order);
1089	else
1090		return alloc_pages_node(node, flags, order);
1091}
1092
1093static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1094{
1095	struct page *page;
1096	struct kmem_cache_order_objects oo = s->oo;
1097	gfp_t alloc_gfp;
1098
1099	flags |= s->allocflags;
1100
1101	/*
1102	 * Let the initial higher-order allocation fail under memory pressure
1103	 * so we fall-back to the minimum order allocation.
1104	 */
1105	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1106
1107	page = alloc_slab_page(alloc_gfp, node, oo);
1108	if (unlikely(!page)) {
1109		oo = s->min;
1110		/*
1111		 * Allocation may have failed due to fragmentation.
1112		 * Try a lower order alloc if possible
1113		 */
1114		page = alloc_slab_page(flags, node, oo);
1115		if (!page)
1116			return NULL;
1117
1118		stat(this_cpu_ptr(s->cpu_slab), ORDER_FALLBACK);
1119	}
1120
1121	if (kmemcheck_enabled
1122		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1123		int pages = 1 << oo_order(oo);
1124
1125		kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
1126
1127		/*
1128		 * Objects from caches that have a constructor don't get
1129		 * cleared when they're allocated, so we need to do it here.
1130		 */
1131		if (s->ctor)
1132			kmemcheck_mark_uninitialized_pages(page, pages);
1133		else
1134			kmemcheck_mark_unallocated_pages(page, pages);
1135	}
1136
1137	page->objects = oo_objects(oo);
1138	mod_zone_page_state(page_zone(page),
1139		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1140		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1141		1 << oo_order(oo));
1142
1143	return page;
1144}
1145
1146static void setup_object(struct kmem_cache *s, struct page *page,
1147				void *object)
1148{
1149	setup_object_debug(s, page, object);
1150	if (unlikely(s->ctor))
1151		s->ctor(object);
1152}
1153
1154static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1155{
1156	struct page *page;
1157	void *start;
1158	void *last;
1159	void *p;
1160
1161	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1162
1163	page = allocate_slab(s,
1164		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1165	if (!page)
1166		goto out;
1167
1168	inc_slabs_node(s, page_to_nid(page), page->objects);
1169	page->slab = s;
1170	page->flags |= 1 << PG_slab;
1171	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1172			SLAB_STORE_USER | SLAB_TRACE))
1173		__SetPageSlubDebug(page);
1174
1175	start = page_address(page);
1176
1177	if (unlikely(s->flags & SLAB_POISON))
1178		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1179
1180	last = start;
1181	for_each_object(p, s, start, page->objects) {
1182		setup_object(s, page, last);
1183		set_freepointer(s, last, p);
1184		last = p;
1185	}
1186	setup_object(s, page, last);
1187	set_freepointer(s, last, NULL);
1188
1189	page->freelist = start;
1190	page->inuse = 0;
1191out:
1192	return page;
1193}
1194
1195static void __free_slab(struct kmem_cache *s, struct page *page)
1196{
1197	int order = compound_order(page);
1198	int pages = 1 << order;
1199
1200	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
1201		void *p;
1202
1203		slab_pad_check(s, page);
1204		for_each_object(p, s, page_address(page),
1205						page->objects)
1206			check_object(s, page, p, 0);
1207		__ClearPageSlubDebug(page);
1208	}
1209
1210	kmemcheck_free_shadow(page, compound_order(page));
1211
1212	mod_zone_page_state(page_zone(page),
1213		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1214		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1215		-pages);
1216
1217	__ClearPageSlab(page);
1218	reset_page_mapcount(page);
1219	if (current->reclaim_state)
1220		current->reclaim_state->reclaimed_slab += pages;
1221	__free_pages(page, order);
1222}
1223
1224static void rcu_free_slab(struct rcu_head *h)
1225{
1226	struct page *page;
1227
1228	page = container_of((struct list_head *)h, struct page, lru);
1229	__free_slab(page->slab, page);
1230}
1231
1232static void free_slab(struct kmem_cache *s, struct page *page)
1233{
1234	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1235		/*
1236		 * RCU free overloads the RCU head over the LRU
1237		 */
1238		struct rcu_head *head = (void *)&page->lru;
1239
1240		call_rcu(head, rcu_free_slab);
1241	} else
1242		__free_slab(s, page);
1243}
1244
1245static void discard_slab(struct kmem_cache *s, struct page *page)
1246{
1247	dec_slabs_node(s, page_to_nid(page), page->objects);
1248	free_slab(s, page);
1249}
1250
1251/*
1252 * Per slab locking using the pagelock
1253 */
1254static __always_inline void slab_lock(struct page *page)
1255{
1256	bit_spin_lock(PG_locked, &page->flags);
1257}
1258
1259static __always_inline void slab_unlock(struct page *page)
1260{
1261	__bit_spin_unlock(PG_locked, &page->flags);
1262}
1263
1264static __always_inline int slab_trylock(struct page *page)
1265{
1266	int rc = 1;
1267
1268	rc = bit_spin_trylock(PG_locked, &page->flags);
1269	return rc;
1270}
1271
1272/*
1273 * Management of partially allocated slabs
1274 */
1275static void add_partial(struct kmem_cache_node *n,
1276				struct page *page, int tail)
1277{
1278	spin_lock(&n->list_lock);
1279	n->nr_partial++;
1280	if (tail)
1281		list_add_tail(&page->lru, &n->partial);
1282	else
1283		list_add(&page->lru, &n->partial);
1284	spin_unlock(&n->list_lock);
1285}
1286
1287static void remove_partial(struct kmem_cache *s, struct page *page)
1288{
1289	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1290
1291	spin_lock(&n->list_lock);
1292	list_del(&page->lru);
1293	n->nr_partial--;
1294	spin_unlock(&n->list_lock);
1295}
1296
1297/*
1298 * Lock slab and remove from the partial list.
1299 *
1300 * Must hold list_lock.
1301 */
1302static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1303							struct page *page)
1304{
1305	if (slab_trylock(page)) {
1306		list_del(&page->lru);
1307		n->nr_partial--;
1308		__SetPageSlubFrozen(page);
1309		return 1;
1310	}
1311	return 0;
1312}
1313
1314/*
1315 * Try to allocate a partial slab from a specific node.
1316 */
1317static struct page *get_partial_node(struct kmem_cache_node *n)
1318{
1319	struct page *page;
1320
1321	/*
1322	 * Racy check. If we mistakenly see no partial slabs then we
1323	 * just allocate an empty slab. If we mistakenly try to get a
1324	 * partial slab and there is none available then get_partials()
1325	 * will return NULL.
1326	 */
1327	if (!n || !n->nr_partial)
1328		return NULL;
1329
1330	spin_lock(&n->list_lock);
1331	list_for_each_entry(page, &n->partial, lru)
1332		if (lock_and_freeze_slab(n, page))
1333			goto out;
1334	page = NULL;
1335out:
1336	spin_unlock(&n->list_lock);
1337	return page;
1338}
1339
1340/*
1341 * Get a page from somewhere. Search in increasing NUMA distances.
1342 */
1343static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1344{
1345#ifdef CONFIG_NUMA
1346	struct zonelist *zonelist;
1347	struct zoneref *z;
1348	struct zone *zone;
1349	enum zone_type high_zoneidx = gfp_zone(flags);
1350	struct page *page;
1351
1352	/*
1353	 * The defrag ratio allows a configuration of the tradeoffs between
1354	 * inter node defragmentation and node local allocations. A lower
1355	 * defrag_ratio increases the tendency to do local allocations
1356	 * instead of attempting to obtain partial slabs from other nodes.
1357	 *
1358	 * If the defrag_ratio is set to 0 then kmalloc() always
1359	 * returns node local objects. If the ratio is higher then kmalloc()
1360	 * may return off node objects because partial slabs are obtained
1361	 * from other nodes and filled up.
1362	 *
1363	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1364	 * defrag_ratio = 1000) then every (well almost) allocation will
1365	 * first attempt to defrag slab caches on other nodes. This means
1366	 * scanning over all nodes to look for partial slabs which may be
1367	 * expensive if we do it every time we are trying to find a slab
1368	 * with available objects.
1369	 */
1370	if (!s->remote_node_defrag_ratio ||
1371			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1372		return NULL;
1373
1374	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1375	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1376		struct kmem_cache_node *n;
1377
1378		n = get_node(s, zone_to_nid(zone));
1379
1380		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1381				n->nr_partial > s->min_partial) {
1382			page = get_partial_node(n);
1383			if (page)
1384				return page;
1385		}
1386	}
1387#endif
1388	return NULL;
1389}
1390
1391/*
1392 * Get a partial page, lock it and return it.
1393 */
1394static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1395{
1396	struct page *page;
1397	int searchnode = (node == -1) ? numa_node_id() : node;
1398
1399	page = get_partial_node(get_node(s, searchnode));
1400	if (page || (flags & __GFP_THISNODE))
1401		return page;
1402
1403	return get_any_partial(s, flags);
1404}
1405
1406/*
1407 * Move a page back to the lists.
1408 *
1409 * Must be called with the slab lock held.
1410 *
1411 * On exit the slab lock will have been dropped.
1412 */
1413static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1414{
1415	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1416	struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1417
1418	__ClearPageSlubFrozen(page);
1419	if (page->inuse) {
1420
1421		if (page->freelist) {
1422			add_partial(n, page, tail);
1423			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1424		} else {
1425			stat(c, DEACTIVATE_FULL);
1426			if (SLABDEBUG && PageSlubDebug(page) &&
1427						(s->flags & SLAB_STORE_USER))
1428				add_full(n, page);
1429		}
1430		slab_unlock(page);
1431	} else {
1432		stat(c, DEACTIVATE_EMPTY);
1433		if (n->nr_partial < s->min_partial) {
1434			/*
1435			 * Adding an empty slab to the partial slabs in order
1436			 * to avoid page allocator overhead. This slab needs
1437			 * to come after the other slabs with objects in
1438			 * so that the others get filled first. That way the
1439			 * size of the partial list stays small.
1440			 *
1441			 * kmem_cache_shrink can reclaim any empty slabs from
1442			 * the partial list.
1443			 */
1444			add_partial(n, page, 1);
1445			slab_unlock(page);
1446		} else {
1447			slab_unlock(page);
1448			stat(__this_cpu_ptr(s->cpu_slab), FREE_SLAB);
1449			discard_slab(s, page);
1450		}
1451	}
1452}
1453
1454/*
1455 * Remove the cpu slab
1456 */
1457static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1458{
1459	struct page *page = c->page;
1460	int tail = 1;
1461
1462	if (page->freelist)
1463		stat(c, DEACTIVATE_REMOTE_FREES);
1464	/*
1465	 * Merge cpu freelist into slab freelist. Typically we get here
1466	 * because both freelists are empty. So this is unlikely
1467	 * to occur.
1468	 */
1469	while (unlikely(c->freelist)) {
1470		void **object;
1471
1472		tail = 0;	/* Hot objects. Put the slab first */
1473
1474		/* Retrieve object from cpu_freelist */
1475		object = c->freelist;
1476		c->freelist = c->freelist[c->offset];
1477
1478		/* And put onto the regular freelist */
1479		object[c->offset] = page->freelist;
1480		page->freelist = object;
1481		page->inuse--;
1482	}
1483	c->page = NULL;
1484	unfreeze_slab(s, page, tail);
1485}
1486
1487static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1488{
1489	stat(c, CPUSLAB_FLUSH);
1490	slab_lock(c->page);
1491	deactivate_slab(s, c);
1492}
1493
1494/*
1495 * Flush cpu slab.
1496 *
1497 * Called from IPI handler with interrupts disabled.
1498 */
1499static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1500{
1501	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
1502
1503	if (likely(c && c->page))
1504		flush_slab(s, c);
1505}
1506
1507static void flush_cpu_slab(void *d)
1508{
1509	struct kmem_cache *s = d;
1510
1511	__flush_cpu_slab(s, smp_processor_id());
1512}
1513
1514static void flush_all(struct kmem_cache *s)
1515{
1516	on_each_cpu(flush_cpu_slab, s, 1);
1517}
1518
1519/*
1520 * Check if the objects in a per cpu structure fit numa
1521 * locality expectations.
1522 */
1523static inline int node_match(struct kmem_cache_cpu *c, int node)
1524{
1525#ifdef CONFIG_NUMA
1526	if (node != -1 && c->node != node)
1527		return 0;
1528#endif
1529	return 1;
1530}
1531
1532static int count_free(struct page *page)
1533{
1534	return page->objects - page->inuse;
1535}
1536
1537static unsigned long count_partial(struct kmem_cache_node *n,
1538					int (*get_count)(struct page *))
1539{
1540	unsigned long flags;
1541	unsigned long x = 0;
1542	struct page *page;
1543
1544	spin_lock_irqsave(&n->list_lock, flags);
1545	list_for_each_entry(page, &n->partial, lru)
1546		x += get_count(page);
1547	spin_unlock_irqrestore(&n->list_lock, flags);
1548	return x;
1549}
1550
1551static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
1552{
1553#ifdef CONFIG_SLUB_DEBUG
1554	return atomic_long_read(&n->total_objects);
1555#else
1556	return 0;
1557#endif
1558}
1559
1560static noinline void
1561slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
1562{
1563	int node;
1564
1565	printk(KERN_WARNING
1566		"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1567		nid, gfpflags);
1568	printk(KERN_WARNING "  cache: %s, object size: %d, buffer size: %d, "
1569		"default order: %d, min order: %d\n", s->name, s->objsize,
1570		s->size, oo_order(s->oo), oo_order(s->min));
1571
1572	if (oo_order(s->min) > get_order(s->objsize))
1573		printk(KERN_WARNING "  %s debugging increased min order, use "
1574		       "slub_debug=O to disable.\n", s->name);
1575
1576	for_each_online_node(node) {
1577		struct kmem_cache_node *n = get_node(s, node);
1578		unsigned long nr_slabs;
1579		unsigned long nr_objs;
1580		unsigned long nr_free;
1581
1582		if (!n)
1583			continue;
1584
1585		nr_free  = count_partial(n, count_free);
1586		nr_slabs = node_nr_slabs(n);
1587		nr_objs  = node_nr_objs(n);
1588
1589		printk(KERN_WARNING
1590			"  node %d: slabs: %ld, objs: %ld, free: %ld\n",
1591			node, nr_slabs, nr_objs, nr_free);
1592	}
1593}
1594
1595/*
1596 * Slow path. The lockless freelist is empty or we need to perform
1597 * debugging duties.
1598 *
1599 * Interrupts are disabled.
1600 *
1601 * Processing is still very fast if new objects have been freed to the
1602 * regular freelist. In that case we simply take over the regular freelist
1603 * as the lockless freelist and zap the regular freelist.
1604 *
1605 * If that is not working then we fall back to the partial lists. We take the
1606 * first element of the freelist as the object to allocate now and move the
1607 * rest of the freelist to the lockless freelist.
1608 *
1609 * And if we were unable to get a new slab from the partial slab lists then
1610 * we need to allocate a new slab. This is the slowest path since it involves
1611 * a call to the page allocator and the setup of a new slab.
1612 */
1613static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1614			  unsigned long addr, struct kmem_cache_cpu *c)
1615{
1616	void **object;
1617	struct page *new;
1618
1619	/* We handle __GFP_ZERO in the caller */
1620	gfpflags &= ~__GFP_ZERO;
1621
1622	if (!c->page)
1623		goto new_slab;
1624
1625	slab_lock(c->page);
1626	if (unlikely(!node_match(c, node)))
1627		goto another_slab;
1628
1629	stat(c, ALLOC_REFILL);
1630
1631load_freelist:
1632	object = c->page->freelist;
1633	if (unlikely(!object))
1634		goto another_slab;
1635	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
1636		goto debug;
1637
1638	c->freelist = object[c->offset];
1639	c->page->inuse = c->page->objects;
1640	c->page->freelist = NULL;
1641	c->node = page_to_nid(c->page);
1642unlock_out:
1643	slab_unlock(c->page);
1644	stat(c, ALLOC_SLOWPATH);
1645	return object;
1646
1647another_slab:
1648	deactivate_slab(s, c);
1649
1650new_slab:
1651	new = get_partial(s, gfpflags, node);
1652	if (new) {
1653		c->page = new;
1654		stat(c, ALLOC_FROM_PARTIAL);
1655		goto load_freelist;
1656	}
1657
1658	if (gfpflags & __GFP_WAIT)
1659		local_irq_enable();
1660
1661	new = new_slab(s, gfpflags, node);
1662
1663	if (gfpflags & __GFP_WAIT)
1664		local_irq_disable();
1665
1666	if (new) {
1667		c = __this_cpu_ptr(s->cpu_slab);
1668		stat(c, ALLOC_SLAB);
1669		if (c->page)
1670			flush_slab(s, c);
1671		slab_lock(new);
1672		__SetPageSlubFrozen(new);
1673		c->page = new;
1674		goto load_freelist;
1675	}
1676	if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1677		slab_out_of_memory(s, gfpflags, node);
1678	return NULL;
1679debug:
1680	if (!alloc_debug_processing(s, c->page, object, addr))
1681		goto another_slab;
1682
1683	c->page->inuse++;
1684	c->page->freelist = object[c->offset];
1685	c->node = -1;
1686	goto unlock_out;
1687}
1688
1689/*
1690 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1691 * have the fastpath folded into their functions. So no function call
1692 * overhead for requests that can be satisfied on the fastpath.
1693 *
1694 * The fastpath works by first checking if the lockless freelist can be used.
1695 * If not then __slab_alloc is called for slow processing.
1696 *
1697 * Otherwise we can simply pick the next object from the lockless free list.
1698 */
1699static __always_inline void *slab_alloc(struct kmem_cache *s,
1700		gfp_t gfpflags, int node, unsigned long addr)
1701{
1702	void **object;
1703	struct kmem_cache_cpu *c;
1704	unsigned long flags;
1705	unsigned long objsize;
1706
1707	gfpflags &= gfp_allowed_mask;
1708
1709	lockdep_trace_alloc(gfpflags);
1710	might_sleep_if(gfpflags & __GFP_WAIT);
1711
1712	if (should_failslab(s->objsize, gfpflags))
1713		return NULL;
1714
1715	local_irq_save(flags);
1716	c = __this_cpu_ptr(s->cpu_slab);
1717	object = c->freelist;
1718	objsize = c->objsize;
1719	if (unlikely(!object || !node_match(c, node)))
1720
1721		object = __slab_alloc(s, gfpflags, node, addr, c);
1722
1723	else {
1724		c->freelist = object[c->offset];
1725		stat(c, ALLOC_FASTPATH);
1726	}
1727	local_irq_restore(flags);
1728
1729	if (unlikely(gfpflags & __GFP_ZERO) && object)
1730		memset(object, 0, objsize);
1731
1732	kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
1733	kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
1734
1735	return object;
1736}
1737
1738void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1739{
1740	void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
1741
1742	trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
1743
1744	return ret;
1745}
1746EXPORT_SYMBOL(kmem_cache_alloc);
1747
1748#ifdef CONFIG_TRACING
1749void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
1750{
1751	return slab_alloc(s, gfpflags, -1, _RET_IP_);
1752}
1753EXPORT_SYMBOL(kmem_cache_alloc_notrace);
1754#endif
1755
1756#ifdef CONFIG_NUMA
1757void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1758{
1759	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1760
1761	trace_kmem_cache_alloc_node(_RET_IP_, ret,
1762				    s->objsize, s->size, gfpflags, node);
1763
1764	return ret;
1765}
1766EXPORT_SYMBOL(kmem_cache_alloc_node);
1767#endif
1768
1769#ifdef CONFIG_TRACING
1770void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
1771				    gfp_t gfpflags,
1772				    int node)
1773{
1774	return slab_alloc(s, gfpflags, node, _RET_IP_);
1775}
1776EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
1777#endif
1778
1779/*
1780 * Slow patch handling. This may still be called frequently since objects
1781 * have a longer lifetime than the cpu slabs in most processing loads.
1782 *
1783 * So we still attempt to reduce cache line usage. Just take the slab
1784 * lock and free the item. If there is no additional partial page
1785 * handling required then we can return immediately.
1786 */
1787static void __slab_free(struct kmem_cache *s, struct page *page,
1788			void *x, unsigned long addr, unsigned int offset)
1789{
1790	void *prior;
1791	void **object = (void *)x;
1792	struct kmem_cache_cpu *c;
1793
1794	c = __this_cpu_ptr(s->cpu_slab);
1795	stat(c, FREE_SLOWPATH);
1796	slab_lock(page);
1797
1798	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
1799		goto debug;
1800
1801checks_ok:
1802	prior = object[offset] = page->freelist;
1803	page->freelist = object;
1804	page->inuse--;
1805
1806	if (unlikely(PageSlubFrozen(page))) {
1807		stat(c, FREE_FROZEN);
1808		goto out_unlock;
1809	}
1810
1811	if (unlikely(!page->inuse))
1812		goto slab_empty;
1813
1814	/*
1815	 * Objects left in the slab. If it was not on the partial list before
1816	 * then add it.
1817	 */
1818	if (unlikely(!prior)) {
1819		add_partial(get_node(s, page_to_nid(page)), page, 1);
1820		stat(c, FREE_ADD_PARTIAL);
1821	}
1822
1823out_unlock:
1824	slab_unlock(page);
1825	return;
1826
1827slab_empty:
1828	if (prior) {
1829		/*
1830		 * Slab still on the partial list.
1831		 */
1832		remove_partial(s, page);
1833		stat(c, FREE_REMOVE_PARTIAL);
1834	}
1835	slab_unlock(page);
1836	stat(c, FREE_SLAB);
1837	discard_slab(s, page);
1838	return;
1839
1840debug:
1841	if (!free_debug_processing(s, page, x, addr))
1842		goto out_unlock;
1843	goto checks_ok;
1844}
1845
1846/*
1847 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1848 * can perform fastpath freeing without additional function calls.
1849 *
1850 * The fastpath is only possible if we are freeing to the current cpu slab
1851 * of this processor. This typically the case if we have just allocated
1852 * the item before.
1853 *
1854 * If fastpath is not possible then fall back to __slab_free where we deal
1855 * with all sorts of special processing.
1856 */
1857static __always_inline void slab_free(struct kmem_cache *s,
1858			struct page *page, void *x, unsigned long addr)
1859{
1860	void **object = (void *)x;
1861	struct kmem_cache_cpu *c;
1862	unsigned long flags;
1863
1864	kmemleak_free_recursive(x, s->flags);
1865	local_irq_save(flags);
1866	c = __this_cpu_ptr(s->cpu_slab);
1867	kmemcheck_slab_free(s, object, c->objsize);
1868	debug_check_no_locks_freed(object, c->objsize);
1869	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1870		debug_check_no_obj_freed(object, c->objsize);
1871	if (likely(page == c->page && c->node >= 0)) {
1872		object[c->offset] = c->freelist;
1873		c->freelist = object;
1874		stat(c, FREE_FASTPATH);
1875	} else
1876		__slab_free(s, page, x, addr, c->offset);
1877
1878	local_irq_restore(flags);
1879}
1880
1881void kmem_cache_free(struct kmem_cache *s, void *x)
1882{
1883	struct page *page;
1884
1885	page = virt_to_head_page(x);
1886
1887	slab_free(s, page, x, _RET_IP_);
1888
1889	trace_kmem_cache_free(_RET_IP_, x);
1890}
1891EXPORT_SYMBOL(kmem_cache_free);
1892
1893/* Figure out on which slab page the object resides */
1894static struct page *get_object_page(const void *x)
1895{
1896	struct page *page = virt_to_head_page(x);
1897
1898	if (!PageSlab(page))
1899		return NULL;
1900
1901	return page;
1902}
1903
1904/*
1905 * Object placement in a slab is made very easy because we always start at
1906 * offset 0. If we tune the size of the object to the alignment then we can
1907 * get the required alignment by putting one properly sized object after
1908 * another.
1909 *
1910 * Notice that the allocation order determines the sizes of the per cpu
1911 * caches. Each processor has always one slab available for allocations.
1912 * Increasing the allocation order reduces the number of times that slabs
1913 * must be moved on and off the partial lists and is therefore a factor in
1914 * locking overhead.
1915 */
1916
1917/*
1918 * Mininum / Maximum order of slab pages. This influences locking overhead
1919 * and slab fragmentation. A higher order reduces the number of partial slabs
1920 * and increases the number of allocations possible without having to
1921 * take the list_lock.
1922 */
1923static int slub_min_order;
1924static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
1925static int slub_min_objects;
1926
1927/*
1928 * Merge control. If this is set then no merging of slab caches will occur.
1929 * (Could be removed. This was introduced to pacify the merge skeptics.)
1930 */
1931static int slub_nomerge;
1932
1933/*
1934 * Calculate the order of allocation given an slab object size.
1935 *
1936 * The order of allocation has significant impact on performance and other
1937 * system components. Generally order 0 allocations should be preferred since
1938 * order 0 does not cause fragmentation in the page allocator. Larger objects
1939 * be problematic to put into order 0 slabs because there may be too much
1940 * unused space left. We go to a higher order if more than 1/16th of the slab
1941 * would be wasted.
1942 *
1943 * In order to reach satisfactory performance we must ensure that a minimum
1944 * number of objects is in one slab. Otherwise we may generate too much
1945 * activity on the partial lists which requires taking the list_lock. This is
1946 * less a concern for large slabs though which are rarely used.
1947 *
1948 * slub_max_order specifies the order where we begin to stop considering the
1949 * number of objects in a slab as critical. If we reach slub_max_order then
1950 * we try to keep the page order as low as possible. So we accept more waste
1951 * of space in favor of a small page order.
1952 *
1953 * Higher order allocations also allow the placement of more objects in a
1954 * slab and thereby reduce object handling overhead. If the user has
1955 * requested a higher mininum order then we start with that one instead of
1956 * the smallest order which will fit the object.
1957 */
1958static inline int slab_order(int size, int min_objects,
1959				int max_order, int fract_leftover)
1960{
1961	int order;
1962	int rem;
1963	int min_order = slub_min_order;
1964
1965	if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1966		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1967
1968	for (order = max(min_order,
1969				fls(min_objects * size - 1) - PAGE_SHIFT);
1970			order <= max_order; order++) {
1971
1972		unsigned long slab_size = PAGE_SIZE << order;
1973
1974		if (slab_size < min_objects * size)
1975			continue;
1976
1977		rem = slab_size % size;
1978
1979		if (rem <= slab_size / fract_leftover)
1980			break;
1981
1982	}
1983
1984	return order;
1985}
1986
1987static inline int calculate_order(int size)
1988{
1989	int order;
1990	int min_objects;
1991	int fraction;
1992	int max_objects;
1993
1994	/*
1995	 * Attempt to find best configuration for a slab. This
1996	 * works by first attempting to generate a layout with
1997	 * the best configuration and backing off gradually.
1998	 *
1999	 * First we reduce the acceptable waste in a slab. Then
2000	 * we reduce the minimum objects required in a slab.
2001	 */
2002	min_objects = slub_min_objects;
2003	if (!min_objects)
2004		min_objects = 4 * (fls(nr_cpu_ids) + 1);
2005	max_objects = (PAGE_SIZE << slub_max_order)/size;
2006	min_objects = min(min_objects, max_objects);
2007
2008	while (min_objects > 1) {
2009		fraction = 16;
2010		while (fraction >= 4) {
2011			order = slab_order(size, min_objects,
2012						slub_max_order, fraction);
2013			if (order <= slub_max_order)
2014				return order;
2015			fraction /= 2;
2016		}
2017		min_objects--;
2018	}
2019
2020	/*
2021	 * We were unable to place multiple objects in a slab. Now
2022	 * lets see if we can place a single object there.
2023	 */
2024	order = slab_order(size, 1, slub_max_order, 1);
2025	if (order <= slub_max_order)
2026		return order;
2027
2028	/*
2029	 * Doh this slab cannot be placed using slub_max_order.
2030	 */
2031	order = slab_order(size, 1, MAX_ORDER, 1);
2032	if (order < MAX_ORDER)
2033		return order;
2034	return -ENOSYS;
2035}
2036
2037/*
2038 * Figure out what the alignment of the objects will be.
2039 */
2040static unsigned long calculate_alignment(unsigned long flags,
2041		unsigned long align, unsigned long size)
2042{
2043	/*
2044	 * If the user wants hardware cache aligned objects then follow that
2045	 * suggestion if the object is sufficiently large.
2046	 *
2047	 * The hardware cache alignment cannot override the specified
2048	 * alignment though. If that is greater then use it.
2049	 */
2050	if (flags & SLAB_HWCACHE_ALIGN) {
2051		unsigned long ralign = cache_line_size();
2052		while (size <= ralign / 2)
2053			ralign /= 2;
2054		align = max(align, ralign);
2055	}
2056
2057	if (align < ARCH_SLAB_MINALIGN)
2058		align = ARCH_SLAB_MINALIGN;
2059
2060	return ALIGN(align, sizeof(void *));
2061}
2062
2063static void init_kmem_cache_cpu(struct kmem_cache *s,
2064			struct kmem_cache_cpu *c)
2065{
2066	c->page = NULL;
2067	c->freelist = NULL;
2068	c->node = 0;
2069	c->offset = s->offset / sizeof(void *);
2070	c->objsize = s->objsize;
2071#ifdef CONFIG_SLUB_STATS
2072	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
2073#endif
2074}
2075
2076static void
2077init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
2078{
2079	n->nr_partial = 0;
2080	spin_lock_init(&n->list_lock);
2081	INIT_LIST_HEAD(&n->partial);
2082#ifdef CONFIG_SLUB_DEBUG
2083	atomic_long_set(&n->nr_slabs, 0);
2084	atomic_long_set(&n->total_objects, 0);
2085	INIT_LIST_HEAD(&n->full);
2086#endif
2087}
2088
2089static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);
2090
2091static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2092{
2093	int cpu;
2094
2095	if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
2096		/*
2097		 * Boot time creation of the kmalloc array. Use static per cpu data
2098		 * since the per cpu allocator is not available yet.
2099		 */
2100		s->cpu_slab = per_cpu_var(kmalloc_percpu) + (s - kmalloc_caches);
2101	else
2102		s->cpu_slab =  alloc_percpu(struct kmem_cache_cpu);
2103
2104	if (!s->cpu_slab)
2105		return 0;
2106
2107	for_each_possible_cpu(cpu)
2108		init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
2109	return 1;
2110}
2111
2112#ifdef CONFIG_NUMA
2113/*
2114 * No kmalloc_node yet so do it by hand. We know that this is the first
2115 * slab on the node for this slabcache. There are no concurrent accesses
2116 * possible.
2117 *
2118 * Note that this function only works on the kmalloc_node_cache
2119 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2120 * memory on a fresh node that has no slab structures yet.
2121 */
2122static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2123{
2124	struct page *page;
2125	struct kmem_cache_node *n;
2126	unsigned long flags;
2127
2128	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2129
2130	page = new_slab(kmalloc_caches, gfpflags, node);
2131
2132	BUG_ON(!page);
2133	if (page_to_nid(page) != node) {
2134		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2135				"node %d\n", node);
2136		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2137				"in order to be able to continue\n");
2138	}
2139
2140	n = page->freelist;
2141	BUG_ON(!n);
2142	page->freelist = get_freepointer(kmalloc_caches, n);
2143	page->inuse++;
2144	kmalloc_caches->node[node] = n;
2145#ifdef CONFIG_SLUB_DEBUG
2146	init_object(kmalloc_caches, n, 1);
2147	init_tracking(kmalloc_caches, n);
2148#endif
2149	init_kmem_cache_node(n, kmalloc_caches);
2150	inc_slabs_node(kmalloc_caches, node, page->objects);
2151
2152	/*
2153	 * lockdep requires consistent irq usage for each lock
2154	 * so even though there cannot be a race this early in
2155	 * the boot sequence, we still disable irqs.
2156	 */
2157	local_irq_save(flags);
2158	add_partial(n, page, 0);
2159	local_irq_restore(flags);
2160}
2161
2162static void free_kmem_cache_nodes(struct kmem_cache *s)
2163{
2164	int node;
2165
2166	for_each_node_state(node, N_NORMAL_MEMORY) {
2167		struct kmem_cache_node *n = s->node[node];
2168		if (n && n != &s->local_node)
2169			kmem_cache_free(kmalloc_caches, n);
2170		s->node[node] = NULL;
2171	}
2172}
2173
2174static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2175{
2176	int node;
2177	int local_node;
2178
2179	if (slab_state >= UP)
2180		local_node = page_to_nid(virt_to_page(s));
2181	else
2182		local_node = 0;
2183
2184	for_each_node_state(node, N_NORMAL_MEMORY) {
2185		struct kmem_cache_node *n;
2186
2187		if (local_node == node)
2188			n = &s->local_node;
2189		else {
2190			if (slab_state == DOWN) {
2191				early_kmem_cache_node_alloc(gfpflags, node);
2192				continue;
2193			}
2194			n = kmem_cache_alloc_node(kmalloc_caches,
2195							gfpflags, node);
2196
2197			if (!n) {
2198				free_kmem_cache_nodes(s);
2199				return 0;
2200			}
2201
2202		}
2203		s->node[node] = n;
2204		init_kmem_cache_node(n, s);
2205	}
2206	return 1;
2207}
2208#else
2209static void free_kmem_cache_nodes(struct kmem_cache *s)
2210{
2211}
2212
2213static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2214{
2215	init_kmem_cache_node(&s->local_node, s);
2216	return 1;
2217}
2218#endif
2219
2220static void set_min_partial(struct kmem_cache *s, unsigned long min)
2221{
2222	if (min < MIN_PARTIAL)
2223		min = MIN_PARTIAL;
2224	else if (min > MAX_PARTIAL)
2225		min = MAX_PARTIAL;
2226	s->min_partial = min;
2227}
2228
2229/*
2230 * calculate_sizes() determines the order and the distribution of data within
2231 * a slab object.
2232 */
2233static int calculate_sizes(struct kmem_cache *s, int forced_order)
2234{
2235	unsigned long flags = s->flags;
2236	unsigned long size = s->objsize;
2237	unsigned long align = s->align;
2238	int order;
2239
2240	/*
2241	 * Round up object size to the next word boundary. We can only
2242	 * place the free pointer at word boundaries and this determines
2243	 * the possible location of the free pointer.
2244	 */
2245	size = ALIGN(size, sizeof(void *));
2246
2247#ifdef CONFIG_SLUB_DEBUG
2248	/*
2249	 * Determine if we can poison the object itself. If the user of
2250	 * the slab may touch the object after free or before allocation
2251	 * then we should never poison the object itself.
2252	 */
2253	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2254			!s->ctor)
2255		s->flags |= __OBJECT_POISON;
2256	else
2257		s->flags &= ~__OBJECT_POISON;
2258
2259
2260	/*
2261	 * If we are Redzoning then check if there is some space between the
2262	 * end of the object and the free pointer. If not then add an
2263	 * additional word to have some bytes to store Redzone information.
2264	 */
2265	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2266		size += sizeof(void *);
2267#endif
2268
2269	/*
2270	 * With that we have determined the number of bytes in actual use
2271	 * by the object. This is the potential offset to the free pointer.
2272	 */
2273	s->inuse = size;
2274
2275	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2276		s->ctor)) {
2277		/*
2278		 * Relocate free pointer after the object if it is not
2279		 * permitted to overwrite the first word of the object on
2280		 * kmem_cache_free.
2281		 *
2282		 * This is the case if we do RCU, have a constructor or
2283		 * destructor or are poisoning the objects.
2284		 */
2285		s->offset = size;
2286		size += sizeof(void *);
2287	}
2288
2289#ifdef CONFIG_SLUB_DEBUG
2290	if (flags & SLAB_STORE_USER)
2291		/*
2292		 * Need to store information about allocs and frees after
2293		 * the object.
2294		 */
2295		size += 2 * sizeof(struct track);
2296
2297	if (flags & SLAB_RED_ZONE)
2298		/*
2299		 * Add some empty padding so that we can catch
2300		 * overwrites from earlier objects rather than let
2301		 * tracking information or the free pointer be
2302		 * corrupted if a user writes before the start
2303		 * of the object.
2304		 */
2305		size += sizeof(void *);
2306#endif
2307
2308	/*
2309	 * Determine the alignment based on various parameters that the
2310	 * user specified and the dynamic determination of cache line size
2311	 * on bootup.
2312	 */
2313	align = calculate_alignment(flags, align, s->objsize);
2314	s->align = align;
2315
2316	/*
2317	 * SLUB stores one object immediately after another beginning from
2318	 * offset 0. In order to align the objects we have to simply size
2319	 * each object to conform to the alignment.
2320	 */
2321	size = ALIGN(size, align);
2322	s->size = size;
2323	if (forced_order >= 0)
2324		order = forced_order;
2325	else
2326		order = calculate_order(size);
2327
2328	if (order < 0)
2329		return 0;
2330
2331	s->allocflags = 0;
2332	if (order)
2333		s->allocflags |= __GFP_COMP;
2334
2335	if (s->flags & SLAB_CACHE_DMA)
2336		s->allocflags |= SLUB_DMA;
2337
2338	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2339		s->allocflags |= __GFP_RECLAIMABLE;
2340
2341	/*
2342	 * Determine the number of objects per slab
2343	 */
2344	s->oo = oo_make(order, size);
2345	s->min = oo_make(get_order(size), size);
2346	if (oo_objects(s->oo) > oo_objects(s->max))
2347		s->max = s->oo;
2348
2349	return !!oo_objects(s->oo);
2350
2351}
2352
2353static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2354		const char *name, size_t size,
2355		size_t align, unsigned long flags,
2356		void (*ctor)(void *))
2357{
2358	memset(s, 0, kmem_size);
2359	s->name = name;
2360	s->ctor = ctor;
2361	s->objsize = size;
2362	s->align = align;
2363	s->flags = kmem_cache_flags(size, flags, name, ctor);
2364
2365	if (!calculate_sizes(s, -1))
2366		goto error;
2367	if (disable_higher_order_debug) {
2368		/*
2369		 * Disable debugging flags that store metadata if the min slab
2370		 * order increased.
2371		 */
2372		if (get_order(s->size) > get_order(s->objsize)) {
2373			s->flags &= ~DEBUG_METADATA_FLAGS;
2374			s->offset = 0;
2375			if (!calculate_sizes(s, -1))
2376				goto error;
2377		}
2378	}
2379
2380	/*
2381	 * The larger the object size is, the more pages we want on the partial
2382	 * list to avoid pounding the page allocator excessively.
2383	 */
2384	set_min_partial(s, ilog2(s->size));
2385	s->refcount = 1;
2386#ifdef CONFIG_NUMA
2387	s->remote_node_defrag_ratio = 1000;
2388#endif
2389	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2390		goto error;
2391
2392	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2393		return 1;
2394	free_kmem_cache_nodes(s);
2395error:
2396	if (flags & SLAB_PANIC)
2397		panic("Cannot create slab %s size=%lu realsize=%u "
2398			"order=%u offset=%u flags=%lx\n",
2399			s->name, (unsigned long)size, s->size, oo_order(s->oo),
2400			s->offset, flags);
2401	return 0;
2402}
2403
2404/*
2405 * Check if a given pointer is valid
2406 */
2407int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2408{
2409	struct page *page;
2410
2411	page = get_object_page(object);
2412
2413	if (!page || s != page->slab)
2414		/* No slab or wrong slab */
2415		return 0;
2416
2417	if (!check_valid_pointer(s, page, object))
2418		return 0;
2419
2420	/*
2421	 * We could also check if the object is on the slabs freelist.
2422	 * But this would be too expensive and it seems that the main
2423	 * purpose of kmem_ptr_valid() is to check if the object belongs
2424	 * to a certain slab.
2425	 */
2426	return 1;
2427}
2428EXPORT_SYMBOL(kmem_ptr_validate);
2429
2430/*
2431 * Determine the size of a slab object
2432 */
2433unsigned int kmem_cache_size(struct kmem_cache *s)
2434{
2435	return s->objsize;
2436}
2437EXPORT_SYMBOL(kmem_cache_size);
2438
2439const char *kmem_cache_name(struct kmem_cache *s)
2440{
2441	return s->name;
2442}
2443EXPORT_SYMBOL(kmem_cache_name);
2444
2445static void list_slab_objects(struct kmem_cache *s, struct page *page,
2446							const char *text)
2447{
2448#ifdef CONFIG_SLUB_DEBUG
2449	void *addr = page_address(page);
2450	void *p;
2451	DECLARE_BITMAP(map, page->objects);
2452
2453	bitmap_zero(map, page->objects);
2454	slab_err(s, page, "%s", text);
2455	slab_lock(page);
2456	for_each_free_object(p, s, page->freelist)
2457		set_bit(slab_index(p, s, addr), map);
2458
2459	for_each_object(p, s, addr, page->objects) {
2460
2461		if (!test_bit(slab_index(p, s, addr), map)) {
2462			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2463							p, p - addr);
2464			print_tracking(s, p);
2465		}
2466	}
2467	slab_unlock(page);
2468#endif
2469}
2470
2471/*
2472 * Attempt to free all partial slabs on a node.
2473 */
2474static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2475{
2476	unsigned long flags;
2477	struct page *page, *h;
2478
2479	spin_lock_irqsave(&n->list_lock, flags);
2480	list_for_each_entry_safe(page, h, &n->partial, lru) {
2481		if (!page->inuse) {
2482			list_del(&page->lru);
2483			discard_slab(s, page);
2484			n->nr_partial--;
2485		} else {
2486			list_slab_objects(s, page,
2487				"Objects remaining on kmem_cache_close()");
2488		}
2489	}
2490	spin_unlock_irqrestore(&n->list_lock, flags);
2491}
2492
2493/*
2494 * Release all resources used by a slab cache.
2495 */
2496static inline int kmem_cache_close(struct kmem_cache *s)
2497{
2498	int node;
2499
2500	flush_all(s);
2501	free_percpu(s->cpu_slab);
2502	/* Attempt to free all objects */
2503	for_each_node_state(node, N_NORMAL_MEMORY) {
2504		struct kmem_cache_node *n = get_node(s, node);
2505
2506		free_partial(s, n);
2507		if (n->nr_partial || slabs_node(s, node))
2508			return 1;
2509	}
2510	free_kmem_cache_nodes(s);
2511	return 0;
2512}
2513
2514/*
2515 * Close a cache and release the kmem_cache structure
2516 * (must be used for caches created using kmem_cache_create)
2517 */
2518void kmem_cache_destroy(struct kmem_cache *s)
2519{
2520	down_write(&slub_lock);
2521	s->refcount--;
2522	if (!s->refcount) {
2523		list_del(&s->list);
2524		up_write(&slub_lock);
2525		if (kmem_cache_close(s)) {
2526			printk(KERN_ERR "SLUB %s: %s called for cache that "
2527				"still has objects.\n", s->name, __func__);
2528			dump_stack();
2529		}
2530		if (s->flags & SLAB_DESTROY_BY_RCU)
2531			rcu_barrier();
2532		sysfs_slab_remove(s);
2533	} else
2534		up_write(&slub_lock);
2535}
2536EXPORT_SYMBOL(kmem_cache_destroy);
2537
2538/********************************************************************
2539 *		Kmalloc subsystem
2540 *******************************************************************/
2541
2542struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
2543EXPORT_SYMBOL(kmalloc_caches);
2544
2545static int __init setup_slub_min_order(char *str)
2546{
2547	get_option(&str, &slub_min_order);
2548
2549	return 1;
2550}
2551
2552__setup("slub_min_order=", setup_slub_min_order);
2553
2554static int __init setup_slub_max_order(char *str)
2555{
2556	get_option(&str, &slub_max_order);
2557	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
2558
2559	return 1;
2560}
2561
2562__setup("slub_max_order=", setup_slub_max_order);
2563
2564static int __init setup_slub_min_objects(char *str)
2565{
2566	get_option(&str, &slub_min_objects);
2567
2568	return 1;
2569}
2570
2571__setup("slub_min_objects=", setup_slub_min_objects);
2572
2573static int __init setup_slub_nomerge(char *str)
2574{
2575	slub_nomerge = 1;
2576	return 1;
2577}
2578
2579__setup("slub_nomerge", setup_slub_nomerge);
2580
2581static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2582		const char *name, int size, gfp_t gfp_flags)
2583{
2584	unsigned int flags = 0;
2585
2586	if (gfp_flags & SLUB_DMA)
2587		flags = SLAB_CACHE_DMA;
2588
2589	/*
2590	 * This function is called with IRQs disabled during early-boot on
2591	 * single CPU so there's no need to take slub_lock here.
2592	 */
2593	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2594								flags, NULL))
2595		goto panic;
2596
2597	list_add(&s->list, &slab_caches);
2598
2599	if (sysfs_slab_add(s))
2600		goto panic;
2601	return s;
2602
2603panic:
2604	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2605}
2606
2607#ifdef CONFIG_ZONE_DMA
2608static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2609
2610static void sysfs_add_func(struct work_struct *w)
2611{
2612	struct kmem_cache *s;
2613
2614	down_write(&slub_lock);
2615	list_for_each_entry(s, &slab_caches, list) {
2616		if (s->flags & __SYSFS_ADD_DEFERRED) {
2617			s->flags &= ~__SYSFS_ADD_DEFERRED;
2618			sysfs_slab_add(s);
2619		}
2620	}
2621	up_write(&slub_lock);
2622}
2623
2624static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2625
2626static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2627{
2628	struct kmem_cache *s;
2629	char *text;
2630	size_t realsize;
2631	unsigned long slabflags;
2632	int i;
2633
2634	s = kmalloc_caches_dma[index];
2635	if (s)
2636		return s;
2637
2638	/* Dynamically create dma cache */
2639	if (flags & __GFP_WAIT)
2640		down_write(&slub_lock);
2641	else {
2642		if (!down_write_trylock(&slub_lock))
2643			goto out;
2644	}
2645
2646	if (kmalloc_caches_dma[index])
2647		goto unlock_out;
2648
2649	realsize = kmalloc_caches[index].objsize;
2650	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2651			 (unsigned int)realsize);
2652
2653	s = NULL;
2654	for (i = 0; i < KMALLOC_CACHES; i++)
2655		if (!kmalloc_caches[i].size)
2656			break;
2657
2658	BUG_ON(i >= KMALLOC_CACHES);
2659	s = kmalloc_caches + i;
2660
2661	/*
2662	 * Must defer sysfs creation to a workqueue because we don't know
2663	 * what context we are called from. Before sysfs comes up, we don't
2664	 * need to do anything because our sysfs initcall will start by
2665	 * adding all existing slabs to sysfs.
2666	 */
2667	slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
2668	if (slab_state >= SYSFS)
2669		slabflags |= __SYSFS_ADD_DEFERRED;
2670
2671	if (!s || !text || !kmem_cache_open(s, flags, text,
2672			realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
2673		s->size = 0;
2674		kfree(text);
2675		goto unlock_out;
2676	}
2677
2678	list_add(&s->list, &slab_caches);
2679	kmalloc_caches_dma[index] = s;
2680
2681	if (slab_state >= SYSFS)
2682		schedule_work(&sysfs_add_work);
2683
2684unlock_out:
2685	up_write(&slub_lock);
2686out:
2687	return kmalloc_caches_dma[index];
2688}
2689#endif
2690
2691/*
2692 * Conversion table for small slabs sizes / 8 to the index in the
2693 * kmalloc array. This is necessary for slabs < 192 since we have non power
2694 * of two cache sizes there. The size of larger slabs can be determined using
2695 * fls.
2696 */
2697static s8 size_index[24] = {
2698	3,	/* 8 */
2699	4,	/* 16 */
2700	5,	/* 24 */
2701	5,	/* 32 */
2702	6,	/* 40 */
2703	6,	/* 48 */
2704	6,	/* 56 */
2705	6,	/* 64 */
2706	1,	/* 72 */
2707	1,	/* 80 */
2708	1,	/* 88 */
2709	1,	/* 96 */
2710	7,	/* 104 */
2711	7,	/* 112 */
2712	7,	/* 120 */
2713	7,	/* 128 */
2714	2,	/* 136 */
2715	2,	/* 144 */
2716	2,	/* 152 */
2717	2,	/* 160 */
2718	2,	/* 168 */
2719	2,	/* 176 */
2720	2,	/* 184 */
2721	2	/* 192 */
2722};
2723
2724static inline int size_index_elem(size_t bytes)
2725{
2726	return (bytes - 1) / 8;
2727}
2728
2729static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2730{
2731	int index;
2732
2733	if (size <= 192) {
2734		if (!size)
2735			return ZERO_SIZE_PTR;
2736
2737		index = size_index[size_index_elem(size)];
2738	} else
2739		index = fls(size - 1);
2740
2741#ifdef CONFIG_ZONE_DMA
2742	if (unlikely((flags & SLUB_DMA)))
2743		return dma_kmalloc_cache(index, flags);
2744
2745#endif
2746	return &kmalloc_caches[index];
2747}
2748
2749void *__kmalloc(size_t size, gfp_t flags)
2750{
2751	struct kmem_cache *s;
2752	void *ret;
2753
2754	if (unlikely(size > SLUB_MAX_SIZE))
2755		return kmalloc_large(size, flags);
2756
2757	s = get_slab(size, flags);
2758
2759	if (unlikely(ZERO_OR_NULL_PTR(s)))
2760		return s;
2761
2762	ret = slab_alloc(s, flags, -1, _RET_IP_);
2763
2764	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
2765
2766	return ret;
2767}
2768EXPORT_SYMBOL(__kmalloc);
2769
2770static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2771{
2772	struct page *page;
2773	void *ptr = NULL;
2774
2775	flags |= __GFP_COMP | __GFP_NOTRACK;
2776	page = alloc_pages_node(node, flags, get_order(size));
2777	if (page)
2778		ptr = page_address(page);
2779
2780	kmemleak_alloc(ptr, size, 1, flags);
2781	return ptr;
2782}
2783
2784#ifdef CONFIG_NUMA
2785void *__kmalloc_node(size_t size, gfp_t flags, int node)
2786{
2787	struct kmem_cache *s;
2788	void *ret;
2789
2790	if (unlikely(size > SLUB_MAX_SIZE)) {
2791		ret = kmalloc_large_node(size, flags, node);
2792
2793		trace_kmalloc_node(_RET_IP_, ret,
2794				   size, PAGE_SIZE << get_order(size),
2795				   flags, node);
2796
2797		return ret;
2798	}
2799
2800	s = get_slab(size, flags);
2801
2802	if (unlikely(ZERO_OR_NULL_PTR(s)))
2803		return s;
2804
2805	ret = slab_alloc(s, flags, node, _RET_IP_);
2806
2807	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
2808
2809	return ret;
2810}
2811EXPORT_SYMBOL(__kmalloc_node);
2812#endif
2813
2814size_t ksize(const void *object)
2815{
2816	struct page *page;
2817	struct kmem_cache *s;
2818
2819	if (unlikely(object == ZERO_SIZE_PTR))
2820		return 0;
2821
2822	page = virt_to_head_page(object);
2823
2824	if (unlikely(!PageSlab(page))) {
2825		WARN_ON(!PageCompound(page));
2826		return PAGE_SIZE << compound_order(page);
2827	}
2828	s = page->slab;
2829
2830#ifdef CONFIG_SLUB_DEBUG
2831	/*
2832	 * Debugging requires use of the padding between object
2833	 * and whatever may come after it.
2834	 */
2835	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2836		return s->objsize;
2837
2838#endif
2839	/*
2840	 * If we have the need to store the freelist pointer
2841	 * back there or track user information then we can
2842	 * only use the space before that information.
2843	 */
2844	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2845		return s->inuse;
2846	/*
2847	 * Else we can use all the padding etc for the allocation
2848	 */
2849	return s->size;
2850}
2851EXPORT_SYMBOL(ksize);
2852
2853void kfree(const void *x)
2854{
2855	struct page *page;
2856	void *object = (void *)x;
2857
2858	trace_kfree(_RET_IP_, x);
2859
2860	if (unlikely(ZERO_OR_NULL_PTR(x)))
2861		return;
2862
2863	page = virt_to_head_page(x);
2864	if (unlikely(!PageSlab(page))) {
2865		BUG_ON(!PageCompound(page));
2866		kmemleak_free(x);
2867		put_page(page);
2868		return;
2869	}
2870	slab_free(page->slab, page, object, _RET_IP_);
2871}
2872EXPORT_SYMBOL(kfree);
2873
2874/*
2875 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2876 * the remaining slabs by the number of items in use. The slabs with the
2877 * most items in use come first. New allocations will then fill those up
2878 * and thus they can be removed from the partial lists.
2879 *
2880 * The slabs with the least items are placed last. This results in them
2881 * being allocated from last increasing the chance that the last objects
2882 * are freed in them.
2883 */
2884int kmem_cache_shrink(struct kmem_cache *s)
2885{
2886	int node;
2887	int i;
2888	struct kmem_cache_node *n;
2889	struct page *page;
2890	struct page *t;
2891	int objects = oo_objects(s->max);
2892	struct list_head *slabs_by_inuse =
2893		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2894	unsigned long flags;
2895
2896	if (!slabs_by_inuse)
2897		return -ENOMEM;
2898
2899	flush_all(s);
2900	for_each_node_state(node, N_NORMAL_MEMORY) {
2901		n = get_node(s, node);
2902
2903		if (!n->nr_partial)
2904			continue;
2905
2906		for (i = 0; i < objects; i++)
2907			INIT_LIST_HEAD(slabs_by_inuse + i);
2908
2909		spin_lock_irqsave(&n->list_lock, flags);
2910
2911		/*
2912		 * Build lists indexed by the items in use in each slab.
2913		 *
2914		 * Note that concurrent frees may occur while we hold the
2915		 * list_lock. page->inuse here is the upper limit.
2916		 */
2917		list_for_each_entry_safe(page, t, &n->partial, lru) {
2918			if (!page->inuse && slab_trylock(page)) {
2919				/*
2920				 * Must hold slab lock here because slab_free
2921				 * may have freed the last object and be
2922				 * waiting to release the slab.
2923				 */
2924				list_del(&page->lru);
2925				n->nr_partial--;
2926				slab_unlock(page);
2927				discard_slab(s, page);
2928			} else {
2929				list_move(&page->lru,
2930				slabs_by_inuse + page->inuse);
2931			}
2932		}
2933
2934		/*
2935		 * Rebuild the partial list with the slabs filled up most
2936		 * first and the least used slabs at the end.
2937		 */
2938		for (i = objects - 1; i >= 0; i--)
2939			list_splice(slabs_by_inuse + i, n->partial.prev);
2940
2941		spin_unlock_irqrestore(&n->list_lock, flags);
2942	}
2943
2944	kfree(slabs_by_inuse);
2945	return 0;
2946}
2947EXPORT_SYMBOL(kmem_cache_shrink);
2948
2949#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2950static int slab_mem_going_offline_callback(void *arg)
2951{
2952	struct kmem_cache *s;
2953
2954	down_read(&slub_lock);
2955	list_for_each_entry(s, &slab_caches, list)
2956		kmem_cache_shrink(s);
2957	up_read(&slub_lock);
2958
2959	return 0;
2960}
2961
2962static void slab_mem_offline_callback(void *arg)
2963{
2964	struct kmem_cache_node *n;
2965	struct kmem_cache *s;
2966	struct memory_notify *marg = arg;
2967	int offline_node;
2968
2969	offline_node = marg->status_change_nid;
2970
2971	/*
2972	 * If the node still has available memory. we need kmem_cache_node
2973	 * for it yet.
2974	 */
2975	if (offline_node < 0)
2976		return;
2977
2978	down_read(&slub_lock);
2979	list_for_each_entry(s, &slab_caches, list) {
2980		n = get_node(s, offline_node);
2981		if (n) {
2982			/*
2983			 * if n->nr_slabs > 0, slabs still exist on the node
2984			 * that is going down. We were unable to free them,
2985			 * and offline_pages() function shoudn't call this
2986			 * callback. So, we must fail.
2987			 */
2988			BUG_ON(slabs_node(s, offline_node));
2989
2990			s->node[offline_node] = NULL;
2991			kmem_cache_free(kmalloc_caches, n);
2992		}
2993	}
2994	up_read(&slub_lock);
2995}
2996
2997static int slab_mem_going_online_callback(void *arg)
2998{
2999	struct kmem_cache_node *n;
3000	struct kmem_cache *s;
3001	struct memory_notify *marg = arg;
3002	int nid = marg->status_change_nid;
3003	int ret = 0;
3004
3005	/*
3006	 * If the node's memory is already available, then kmem_cache_node is
3007	 * already created. Nothing to do.
3008	 */
3009	if (nid < 0)
3010		return 0;
3011
3012	/*
3013	 * We are bringing a node online. No memory is available yet. We must
3014	 * allocate a kmem_cache_node structure in order to bring the node
3015	 * online.
3016	 */
3017	down_read(&slub_lock);
3018	list_for_each_entry(s, &slab_caches, list) {
3019		/*
3020		 * XXX: kmem_cache_alloc_node will fallback to other nodes
3021		 *      since memory is not yet available from the node that
3022		 *      is brought up.
3023		 */
3024		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
3025		if (!n) {
3026			ret = -ENOMEM;
3027			goto out;
3028		}
3029		init_kmem_cache_node(n, s);
3030		s->node[nid] = n;
3031	}
3032out:
3033	up_read(&slub_lock);
3034	return ret;
3035}
3036
3037static int slab_memory_callback(struct notifier_block *self,
3038				unsigned long action, void *arg)
3039{
3040	int ret = 0;
3041
3042	switch (action) {
3043	case MEM_GOING_ONLINE:
3044		ret = slab_mem_going_online_callback(arg);
3045		break;
3046	case MEM_GOING_OFFLINE:
3047		ret = slab_mem_going_offline_callback(arg);
3048		break;
3049	case MEM_OFFLINE:
3050	case MEM_CANCEL_ONLINE:
3051		slab_mem_offline_callback(arg);
3052		break;
3053	case MEM_ONLINE:
3054	case MEM_CANCEL_OFFLINE:
3055		break;
3056	}
3057	if (ret)
3058		ret = notifier_from_errno(ret);
3059	else
3060		ret = NOTIFY_OK;
3061	return ret;
3062}
3063
3064#endif /* CONFIG_MEMORY_HOTPLUG */
3065
3066/********************************************************************
3067 *			Basic setup of slabs
3068 *******************************************************************/
3069
3070void __init kmem_cache_init(void)
3071{
3072	int i;
3073	int caches = 0;
3074
3075#ifdef CONFIG_NUMA
3076	/*
3077	 * Must first have the slab cache available for the allocations of the
3078	 * struct kmem_cache_node's. There is special bootstrap code in
3079	 * kmem_cache_open for slab_state == DOWN.
3080	 */
3081	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
3082		sizeof(struct kmem_cache_node), GFP_NOWAIT);
3083	kmalloc_caches[0].refcount = -1;
3084	caches++;
3085
3086	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
3087#endif
3088
3089	/* Able to allocate the per node structures */
3090	slab_state = PARTIAL;
3091
3092	/* Caches that are not of the two-to-the-power-of size */
3093	if (KMALLOC_MIN_SIZE <= 32) {
3094		create_kmalloc_cache(&kmalloc_caches[1],
3095				"kmalloc-96", 96, GFP_NOWAIT);
3096		caches++;
3097	}
3098	if (KMALLOC_MIN_SIZE <= 64) {
3099		create_kmalloc_cache(&kmalloc_caches[2],
3100				"kmalloc-192", 192, GFP_NOWAIT);
3101		caches++;
3102	}
3103
3104	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
3105		create_kmalloc_cache(&kmalloc_caches[i],
3106			"kmalloc", 1 << i, GFP_NOWAIT);
3107		caches++;
3108	}
3109
3110
3111	/*
3112	 * Patch up the size_index table if we have strange large alignment
3113	 * requirements for the kmalloc array. This is only the case for
3114	 * MIPS it seems. The standard arches will not generate any code here.
3115	 *
3116	 * Largest permitted alignment is 256 bytes due to the way we
3117	 * handle the index determination for the smaller caches.
3118	 *
3119	 * Make sure that nothing crazy happens if someone starts tinkering
3120	 * around with ARCH_KMALLOC_MINALIGN
3121	 */
3122	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3123		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3124
3125	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
3126		int elem = size_index_elem(i);
3127		if (elem >= ARRAY_SIZE(size_index))
3128			break;
3129		size_index[elem] = KMALLOC_SHIFT_LOW;
3130	}
3131
3132	if (KMALLOC_MIN_SIZE == 64) {
3133		/*
3134		 * The 96 byte size cache is not used if the alignment
3135		 * is 64 byte.
3136		 */
3137		for (i = 64 + 8; i <= 96; i += 8)
3138			size_index[size_index_elem(i)] = 7;
3139	} else if (KMALLOC_MIN_SIZE == 128) {
3140		/*
3141		 * The 192 byte sized cache is not used if the alignment
3142		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3143		 * instead.
3144		 */
3145		for (i = 128 + 8; i <= 192; i += 8)
3146			size_index[size_index_elem(i)] = 8;
3147	}
3148
3149	slab_state = UP;
3150
3151	/* Provide the correct kmalloc names now that the caches are up */
3152	for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3153		kmalloc_caches[i]. name =
3154			kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
3155
3156#ifdef CONFIG_SMP
3157	register_cpu_notifier(&slab_notifier);
3158#endif
3159#ifdef CONFIG_NUMA
3160	kmem_size = offsetof(struct kmem_cache, node) +
3161				nr_node_ids * sizeof(struct kmem_cache_node *);
3162#else
3163	kmem_size = sizeof(struct kmem_cache);
3164#endif
3165
3166	printk(KERN_INFO
3167		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3168		" CPUs=%d, Nodes=%d\n",
3169		caches, cache_line_size(),
3170		slub_min_order, slub_max_order, slub_min_objects,
3171		nr_cpu_ids, nr_node_ids);
3172}
3173
3174void __init kmem_cache_init_late(void)
3175{
3176}
3177
3178/*
3179 * Find a mergeable slab cache
3180 */
3181static int slab_unmergeable(struct kmem_cache *s)
3182{
3183	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3184		return 1;
3185
3186	if (s->ctor)
3187		return 1;
3188
3189	/*
3190	 * We may have set a slab to be unmergeable during bootstrap.
3191	 */
3192	if (s->refcount < 0)
3193		return 1;
3194
3195	return 0;
3196}
3197
3198static struct kmem_cache *find_mergeable(size_t size,
3199		size_t align, unsigned long flags, const char *name,
3200		void (*ctor)(void *))
3201{
3202	struct kmem_cache *s;
3203
3204	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3205		return NULL;
3206
3207	if (ctor)
3208		return NULL;
3209
3210	size = ALIGN(size, sizeof(void *));
3211	align = calculate_alignment(flags, align, size);
3212	size = ALIGN(size, align);
3213	flags = kmem_cache_flags(size, flags, name, NULL);
3214
3215	list_for_each_entry(s, &slab_caches, list) {
3216		if (slab_unmergeable(s))
3217			continue;
3218
3219		if (size > s->size)
3220			continue;
3221
3222		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3223				continue;
3224		/*
3225		 * Check if alignment is compatible.
3226		 * Courtesy of Adrian Drzewiecki
3227		 */
3228		if ((s->size & ~(align - 1)) != s->size)
3229			continue;
3230
3231		if (s->size - size >= sizeof(void *))
3232			continue;
3233
3234		return s;
3235	}
3236	return NULL;
3237}
3238
3239struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3240		size_t align, unsigned long flags, void (*ctor)(void *))
3241{
3242	struct kmem_cache *s;
3243
3244	if (WARN_ON(!name))
3245		return NULL;
3246
3247	down_write(&slub_lock);
3248	s = find_mergeable(size, align, flags, name, ctor);
3249	if (s) {
3250		int cpu;
3251
3252		s->refcount++;
3253		/*
3254		 * Adjust the object sizes so that we clear
3255		 * the complete object on kzalloc.
3256		 */
3257		s->objsize = max(s->objsize, (int)size);
3258
3259		/*
3260		 * And then we need to update the object size in the
3261		 * per cpu structures
3262		 */
3263		for_each_online_cpu(cpu)
3264			per_cpu_ptr(s->cpu_slab, cpu)->objsize = s->objsize;
3265
3266		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3267		up_write(&slub_lock);
3268
3269		if (sysfs_slab_alias(s, name)) {
3270			down_write(&slub_lock);
3271			s->refcount--;
3272			up_write(&slub_lock);
3273			goto err;
3274		}
3275		return s;
3276	}
3277
3278	s = kmalloc(kmem_size, GFP_KERNEL);
3279	if (s) {
3280		if (kmem_cache_open(s, GFP_KERNEL, name,
3281				size, align, flags, ctor)) {
3282			list_add(&s->list, &slab_caches);
3283			up_write(&slub_lock);
3284			if (sysfs_slab_add(s)) {
3285				down_write(&slub_lock);
3286				list_del(&s->list);
3287				up_write(&slub_lock);
3288				kfree(s);
3289				goto err;
3290			}
3291			return s;
3292		}
3293		kfree(s);
3294	}
3295	up_write(&slub_lock);
3296
3297err:
3298	if (flags & SLAB_PANIC)
3299		panic("Cannot create slabcache %s\n", name);
3300	else
3301		s = NULL;
3302	return s;
3303}
3304EXPORT_SYMBOL(kmem_cache_create);
3305
3306#ifdef CONFIG_SMP
3307/*
3308 * Use the cpu notifier to insure that the cpu slabs are flushed when
3309 * necessary.
3310 */
3311static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3312		unsigned long action, void *hcpu)
3313{
3314	long cpu = (long)hcpu;
3315	struct kmem_cache *s;
3316	unsigned long flags;
3317
3318	switch (action) {
3319	case CPU_UP_PREPARE:
3320	case CPU_UP_PREPARE_FROZEN:
3321		down_read(&slub_lock);
3322		list_for_each_entry(s, &slab_caches, list)
3323			init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
3324		up_read(&slub_lock);
3325		break;
3326
3327	case CPU_UP_CANCELED:
3328	case CPU_UP_CANCELED_FROZEN:
3329	case CPU_DEAD:
3330	case CPU_DEAD_FROZEN:
3331		down_read(&slub_lock);
3332		list_for_each_entry(s, &slab_caches, list) {
3333			local_irq_save(flags);
3334			__flush_cpu_slab(s, cpu);
3335			local_irq_restore(flags);
3336		}
3337		up_read(&slub_lock);
3338		break;
3339	default:
3340		break;
3341	}
3342	return NOTIFY_OK;
3343}
3344
3345static struct notifier_block __cpuinitdata slab_notifier = {
3346	.notifier_call = slab_cpuup_callback
3347};
3348
3349#endif
3350
3351void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3352{
3353	struct kmem_cache *s;
3354	void *ret;
3355
3356	if (unlikely(size > SLUB_MAX_SIZE))
3357		return kmalloc_large(size, gfpflags);
3358
3359	s = get_slab(size, gfpflags);
3360
3361	if (unlikely(ZERO_OR_NULL_PTR(s)))
3362		return s;
3363
3364	ret = slab_alloc(s, gfpflags, -1, caller);
3365
3366	/* Honor the call site pointer we recieved. */
3367	trace_kmalloc(caller, ret, size, s->size, gfpflags);
3368
3369	return ret;
3370}
3371
3372void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3373					int node, unsigned long caller)
3374{
3375	struct kmem_cache *s;
3376	void *ret;
3377
3378	if (unlikely(size > SLUB_MAX_SIZE))
3379		return kmalloc_large_node(size, gfpflags, node);
3380
3381	s = get_slab(size, gfpflags);
3382
3383	if (unlikely(ZERO_OR_NULL_PTR(s)))
3384		return s;
3385
3386	ret = slab_alloc(s, gfpflags, node, caller);
3387
3388	/* Honor the call site pointer we recieved. */
3389	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
3390
3391	return ret;
3392}
3393
3394#ifdef CONFIG_SLUB_DEBUG
3395static int count_inuse(struct page *page)
3396{
3397	return page->inuse;
3398}
3399
3400static int count_total(struct page *page)
3401{
3402	return page->objects;
3403}
3404
3405static int validate_slab(struct kmem_cache *s, struct page *page,
3406						unsigned long *map)
3407{
3408	void *p;
3409	void *addr = page_address(page);
3410
3411	if (!check_slab(s, page) ||
3412			!on_freelist(s, page, NULL))
3413		return 0;
3414
3415	/* Now we know that a valid freelist exists */
3416	bitmap_zero(map, page->objects);
3417
3418	for_each_free_object(p, s, page->freelist) {
3419		set_bit(slab_index(p, s, addr), map);
3420		if (!check_object(s, page, p, 0))
3421			return 0;
3422	}
3423
3424	for_each_object(p, s, addr, page->objects)
3425		if (!test_bit(slab_index(p, s, addr), map))
3426			if (!check_object(s, page, p, 1))
3427				return 0;
3428	return 1;
3429}
3430
3431static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3432						unsigned long *map)
3433{
3434	if (slab_trylock(page)) {
3435		validate_slab(s, page, map);
3436		slab_unlock(page);
3437	} else
3438		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3439			s->name, page);
3440
3441	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3442		if (!PageSlubDebug(page))
3443			printk(KERN_ERR "SLUB %s: SlubDebug not set "
3444				"on slab 0x%p\n", s->name, page);
3445	} else {
3446		if (PageSlubDebug(page))
3447			printk(KERN_ERR "SLUB %s: SlubDebug set on "
3448				"slab 0x%p\n", s->name, page);
3449	}
3450}
3451
3452static int validate_slab_node(struct kmem_cache *s,
3453		struct kmem_cache_node *n, unsigned long *map)
3454{
3455	unsigned long count = 0;
3456	struct page *page;
3457	unsigned long flags;
3458
3459	spin_lock_irqsave(&n->list_lock, flags);
3460
3461	list_for_each_entry(page, &n->partial, lru) {
3462		validate_slab_slab(s, page, map);
3463		count++;
3464	}
3465	if (count != n->nr_partial)
3466		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3467			"counter=%ld\n", s->name, count, n->nr_partial);
3468
3469	if (!(s->flags & SLAB_STORE_USER))
3470		goto out;
3471
3472	list_for_each_entry(page, &n->full, lru) {
3473		validate_slab_slab(s, page, map);
3474		count++;
3475	}
3476	if (count != atomic_long_read(&n->nr_slabs))
3477		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3478			"counter=%ld\n", s->name, count,
3479			atomic_long_read(&n->nr_slabs));
3480
3481out:
3482	spin_unlock_irqrestore(&n->list_lock, flags);
3483	return count;
3484}
3485
3486static long validate_slab_cache(struct kmem_cache *s)
3487{
3488	int node;
3489	unsigned long count = 0;
3490	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3491				sizeof(unsigned long), GFP_KERNEL);
3492
3493	if (!map)
3494		return -ENOMEM;
3495
3496	flush_all(s);
3497	for_each_node_state(node, N_NORMAL_MEMORY) {
3498		struct kmem_cache_node *n = get_node(s, node);
3499
3500		count += validate_slab_node(s, n, map);
3501	}
3502	kfree(map);
3503	return count;
3504}
3505
3506#ifdef SLUB_RESILIENCY_TEST
3507static void resiliency_test(void)
3508{
3509	u8 *p;
3510
3511	printk(KERN_ERR "SLUB resiliency testing\n");
3512	printk(KERN_ERR "-----------------------\n");
3513	printk(KERN_ERR "A. Corruption after allocation\n");
3514
3515	p = kzalloc(16, GFP_KERNEL);
3516	p[16] = 0x12;
3517	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3518			" 0x12->0x%p\n\n", p + 16);
3519
3520	validate_slab_cache(kmalloc_caches + 4);
3521
3522	/* Hmmm... The next two are dangerous */
3523	p = kzalloc(32, GFP_KERNEL);
3524	p[32 + sizeof(void *)] = 0x34;
3525	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3526			" 0x34 -> -0x%p\n", p);
3527	printk(KERN_ERR
3528		"If allocated object is overwritten then not detectable\n\n");
3529
3530	validate_slab_cache(kmalloc_caches + 5);
3531	p = kzalloc(64, GFP_KERNEL);
3532	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3533	*p = 0x56;
3534	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3535									p);
3536	printk(KERN_ERR
3537		"If allocated object is overwritten then not detectable\n\n");
3538	validate_slab_cache(kmalloc_caches + 6);
3539
3540	printk(KERN_ERR "\nB. Corruption after free\n");
3541	p = kzalloc(128, GFP_KERNEL);
3542	kfree(p);
3543	*p = 0x78;
3544	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3545	validate_slab_cache(kmalloc_caches + 7);
3546
3547	p = kzalloc(256, GFP_KERNEL);
3548	kfree(p);
3549	p[50] = 0x9a;
3550	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3551			p);
3552	validate_slab_cache(kmalloc_caches + 8);
3553
3554	p = kzalloc(512, GFP_KERNEL);
3555	kfree(p);
3556	p[512] = 0xab;
3557	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3558	validate_slab_cache(kmalloc_caches + 9);
3559}
3560#else
3561static void resiliency_test(void) {};
3562#endif
3563
3564/*
3565 * Generate lists of code addresses where slabcache objects are allocated
3566 * and freed.
3567 */
3568
3569struct location {
3570	unsigned long count;
3571	unsigned long addr;
3572	long long sum_time;
3573	long min_time;
3574	long max_time;
3575	long min_pid;
3576	long max_pid;
3577	DECLARE_BITMAP(cpus, NR_CPUS);
3578	nodemask_t nodes;
3579};
3580
3581struct loc_track {
3582	unsigned long max;
3583	unsigned long count;
3584	struct location *loc;
3585};
3586
3587static void free_loc_track(struct loc_track *t)
3588{
3589	if (t->max)
3590		free_pages((unsigned long)t->loc,
3591			get_order(sizeof(struct location) * t->max));
3592}
3593
3594static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3595{
3596	struct location *l;
3597	int order;
3598
3599	order = get_order(sizeof(struct location) * max);
3600
3601	l = (void *)__get_free_pages(flags, order);
3602	if (!l)
3603		return 0;
3604
3605	if (t->count) {
3606		memcpy(l, t->loc, sizeof(struct location) * t->count);
3607		free_loc_track(t);
3608	}
3609	t->max = max;
3610	t->loc = l;
3611	return 1;
3612}
3613
3614static int add_location(struct loc_track *t, struct kmem_cache *s,
3615				const struct track *track)
3616{
3617	long start, end, pos;
3618	struct location *l;
3619	unsigned long caddr;
3620	unsigned long age = jiffies - track->when;
3621
3622	start = -1;
3623	end = t->count;
3624
3625	for ( ; ; ) {
3626		pos = start + (end - start + 1) / 2;
3627
3628		/*
3629		 * There is nothing at "end". If we end up there
3630		 * we need to add something to before end.
3631		 */
3632		if (pos == end)
3633			break;
3634
3635		caddr = t->loc[pos].addr;
3636		if (track->addr == caddr) {
3637
3638			l = &t->loc[pos];
3639			l->count++;
3640			if (track->when) {
3641				l->sum_time += age;
3642				if (age < l->min_time)
3643					l->min_time = age;
3644				if (age > l->max_time)
3645					l->max_time = age;
3646
3647				if (track->pid < l->min_pid)
3648					l->min_pid = track->pid;
3649				if (track->pid > l->max_pid)
3650					l->max_pid = track->pid;
3651
3652				cpumask_set_cpu(track->cpu,
3653						to_cpumask(l->cpus));
3654			}
3655			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3656			return 1;
3657		}
3658
3659		if (track->addr < caddr)
3660			end = pos;
3661		else
3662			start = pos;
3663	}
3664
3665	/*
3666	 * Not found. Insert new tracking element.
3667	 */
3668	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3669		return 0;
3670
3671	l = t->loc + pos;
3672	if (pos < t->count)
3673		memmove(l + 1, l,
3674			(t->count - pos) * sizeof(struct location));
3675	t->count++;
3676	l->count = 1;
3677	l->addr = track->addr;
3678	l->sum_time = age;
3679	l->min_time = age;
3680	l->max_time = age;
3681	l->min_pid = track->pid;
3682	l->max_pid = track->pid;
3683	cpumask_clear(to_cpumask(l->cpus));
3684	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3685	nodes_clear(l->nodes);
3686	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3687	return 1;
3688}
3689
3690static void process_slab(struct loc_track *t, struct kmem_cache *s,
3691		struct page *page, enum track_item alloc)
3692{
3693	void *addr = page_address(page);
3694	DECLARE_BITMAP(map, page->objects);
3695	void *p;
3696
3697	bitmap_zero(map, page->objects);
3698	for_each_free_object(p, s, page->freelist)
3699		set_bit(slab_index(p, s, addr), map);
3700
3701	for_each_object(p, s, addr, page->objects)
3702		if (!test_bit(slab_index(p, s, addr), map))
3703			add_location(t, s, get_track(s, p, alloc));
3704}
3705
3706static int list_locations(struct kmem_cache *s, char *buf,
3707					enum track_item alloc)
3708{
3709	int len = 0;
3710	unsigned long i;
3711	struct loc_track t = { 0, 0, NULL };
3712	int node;
3713
3714	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3715			GFP_TEMPORARY))
3716		return sprintf(buf, "Out of memory\n");
3717
3718	/* Push back cpu slabs */
3719	flush_all(s);
3720
3721	for_each_node_state(node, N_NORMAL_MEMORY) {
3722		struct kmem_cache_node *n = get_node(s, node);
3723		unsigned long flags;
3724		struct page *page;
3725
3726		if (!atomic_long_read(&n->nr_slabs))
3727			continue;
3728
3729		spin_lock_irqsave(&n->list_lock, flags);
3730		list_for_each_entry(page, &n->partial, lru)
3731			process_slab(&t, s, page, alloc);
3732		list_for_each_entry(page, &n->full, lru)
3733			process_slab(&t, s, page, alloc);
3734		spin_unlock_irqrestore(&n->list_lock, flags);
3735	}
3736
3737	for (i = 0; i < t.count; i++) {
3738		struct location *l = &t.loc[i];
3739
3740		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
3741			break;
3742		len += sprintf(buf + len, "%7ld ", l->count);
3743
3744		if (l->addr)
3745			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3746		else
3747			len += sprintf(buf + len, "<not-available>");
3748
3749		if (l->sum_time != l->min_time) {
3750			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3751				l->min_time,
3752				(long)div_u64(l->sum_time, l->count),
3753				l->max_time);
3754		} else
3755			len += sprintf(buf + len, " age=%ld",
3756				l->min_time);
3757
3758		if (l->min_pid != l->max_pid)
3759			len += sprintf(buf + len, " pid=%ld-%ld",
3760				l->min_pid, l->max_pid);
3761		else
3762			len += sprintf(buf + len, " pid=%ld",
3763				l->min_pid);
3764
3765		if (num_online_cpus() > 1 &&
3766				!cpumask_empty(to_cpumask(l->cpus)) &&
3767				len < PAGE_SIZE - 60) {
3768			len += sprintf(buf + len, " cpus=");
3769			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3770						 to_cpumask(l->cpus));
3771		}
3772
3773		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
3774				len < PAGE_SIZE - 60) {
3775			len += sprintf(buf + len, " nodes=");
3776			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3777					l->nodes);
3778		}
3779
3780		len += sprintf(buf + len, "\n");
3781	}
3782
3783	free_loc_track(&t);
3784	if (!t.count)
3785		len += sprintf(buf, "No data\n");
3786	return len;
3787}
3788
3789enum slab_stat_type {
3790	SL_ALL,			/* All slabs */
3791	SL_PARTIAL,		/* Only partially allocated slabs */
3792	SL_CPU,			/* Only slabs used for cpu caches */
3793	SL_OBJECTS,		/* Determine allocated objects not slabs */
3794	SL_TOTAL		/* Determine object capacity not slabs */
3795};
3796
3797#define SO_ALL		(1 << SL_ALL)
3798#define SO_PARTIAL	(1 << SL_PARTIAL)
3799#define SO_CPU		(1 << SL_CPU)
3800#define SO_OBJECTS	(1 << SL_OBJECTS)
3801#define SO_TOTAL	(1 << SL_TOTAL)
3802
3803static ssize_t show_slab_objects(struct kmem_cache *s,
3804			    char *buf, unsigned long flags)
3805{
3806	unsigned long total = 0;
3807	int node;
3808	int x;
3809	unsigned long *nodes;
3810	unsigned long *per_cpu;
3811
3812	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3813	if (!nodes)
3814		return -ENOMEM;
3815	per_cpu = nodes + nr_node_ids;
3816
3817	if (flags & SO_CPU) {
3818		int cpu;
3819
3820		for_each_possible_cpu(cpu) {
3821			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3822
3823			if (!c || c->node < 0)
3824				continue;
3825
3826			if (c->page) {
3827					if (flags & SO_TOTAL)
3828						x = c->page->objects;
3829				else if (flags & SO_OBJECTS)
3830					x = c->page->inuse;
3831				else
3832					x = 1;
3833
3834				total += x;
3835				nodes[c->node] += x;
3836			}
3837			per_cpu[c->node]++;
3838		}
3839	}
3840
3841	if (flags & SO_ALL) {
3842		for_each_node_state(node, N_NORMAL_MEMORY) {
3843			struct kmem_cache_node *n = get_node(s, node);
3844
3845		if (flags & SO_TOTAL)
3846			x = atomic_long_read(&n->total_objects);
3847		else if (flags & SO_OBJECTS)
3848			x = atomic_long_read(&n->total_objects) -
3849				count_partial(n, count_free);
3850
3851			else
3852				x = atomic_long_read(&n->nr_slabs);
3853			total += x;
3854			nodes[node] += x;
3855		}
3856
3857	} else if (flags & SO_PARTIAL) {
3858		for_each_node_state(node, N_NORMAL_MEMORY) {
3859			struct kmem_cache_node *n = get_node(s, node);
3860
3861			if (flags & SO_TOTAL)
3862				x = count_partial(n, count_total);
3863			else if (flags & SO_OBJECTS)
3864				x = count_partial(n, count_inuse);
3865			else
3866				x = n->nr_partial;
3867			total += x;
3868			nodes[node] += x;
3869		}
3870	}
3871	x = sprintf(buf, "%lu", total);
3872#ifdef CONFIG_NUMA
3873	for_each_node_state(node, N_NORMAL_MEMORY)
3874		if (nodes[node])
3875			x += sprintf(buf + x, " N%d=%lu",
3876					node, nodes[node]);
3877#endif
3878	kfree(nodes);
3879	return x + sprintf(buf + x, "\n");
3880}
3881
3882static int any_slab_objects(struct kmem_cache *s)
3883{
3884	int node;
3885
3886	for_each_online_node(node) {
3887		struct kmem_cache_node *n = get_node(s, node);
3888
3889		if (!n)
3890			continue;
3891
3892		if (atomic_long_read(&n->total_objects))
3893			return 1;
3894	}
3895	return 0;
3896}
3897
3898#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3899#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3900
3901struct slab_attribute {
3902	struct attribute attr;
3903	ssize_t (*show)(struct kmem_cache *s, char *buf);
3904	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3905};
3906
3907#define SLAB_ATTR_RO(_name) \
3908	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3909
3910#define SLAB_ATTR(_name) \
3911	static struct slab_attribute _name##_attr =  \
3912	__ATTR(_name, 0644, _name##_show, _name##_store)
3913
3914static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3915{
3916	return sprintf(buf, "%d\n", s->size);
3917}
3918SLAB_ATTR_RO(slab_size);
3919
3920static ssize_t align_show(struct kmem_cache *s, char *buf)
3921{
3922	return sprintf(buf, "%d\n", s->align);
3923}
3924SLAB_ATTR_RO(align);
3925
3926static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3927{
3928	return sprintf(buf, "%d\n", s->objsize);
3929}
3930SLAB_ATTR_RO(object_size);
3931
3932static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3933{
3934	return sprintf(buf, "%d\n", oo_objects(s->oo));
3935}
3936SLAB_ATTR_RO(objs_per_slab);
3937
3938static ssize_t order_store(struct kmem_cache *s,
3939				const char *buf, size_t length)
3940{
3941	unsigned long order;
3942	int err;
3943
3944	err = strict_strtoul(buf, 10, &order);
3945	if (err)
3946		return err;
3947
3948	if (order > slub_max_order || order < slub_min_order)
3949		return -EINVAL;
3950
3951	calculate_sizes(s, order);
3952	return length;
3953}
3954
3955static ssize_t order_show(struct kmem_cache *s, char *buf)
3956{
3957	return sprintf(buf, "%d\n", oo_order(s->oo));
3958}
3959SLAB_ATTR(order);
3960
3961static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3962{
3963	return sprintf(buf, "%lu\n", s->min_partial);
3964}
3965
3966static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3967				 size_t length)
3968{
3969	unsigned long min;
3970	int err;
3971
3972	err = strict_strtoul(buf, 10, &min);
3973	if (err)
3974		return err;
3975
3976	set_min_partial(s, min);
3977	return length;
3978}
3979SLAB_ATTR(min_partial);
3980
3981static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3982{
3983	if (s->ctor) {
3984		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3985
3986		return n + sprintf(buf + n, "\n");
3987	}
3988	return 0;
3989}
3990SLAB_ATTR_RO(ctor);
3991
3992static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3993{
3994	return sprintf(buf, "%d\n", s->refcount - 1);
3995}
3996SLAB_ATTR_RO(aliases);
3997
3998static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3999{
4000	return show_slab_objects(s, buf, SO_ALL);
4001}
4002SLAB_ATTR_RO(slabs);
4003
4004static ssize_t partial_show(struct kmem_cache *s, char *buf)
4005{
4006	return show_slab_objects(s, buf, SO_PARTIAL);
4007}
4008SLAB_ATTR_RO(partial);
4009
4010static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4011{
4012	return show_slab_objects(s, buf, SO_CPU);
4013}
4014SLAB_ATTR_RO(cpu_slabs);
4015
4016static ssize_t objects_show(struct kmem_cache *s, char *buf)
4017{
4018	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4019}
4020SLAB_ATTR_RO(objects);
4021
4022static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4023{
4024	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4025}
4026SLAB_ATTR_RO(objects_partial);
4027
4028static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4029{
4030	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4031}
4032SLAB_ATTR_RO(total_objects);
4033
4034static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4035{
4036	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4037}
4038
4039static ssize_t sanity_checks_store(struct kmem_cache *s,
4040				const char *buf, size_t length)
4041{
4042	s->flags &= ~SLAB_DEBUG_FREE;
4043	if (buf[0] == '1')
4044		s->flags |= SLAB_DEBUG_FREE;
4045	return length;
4046}
4047SLAB_ATTR(sanity_checks);
4048
4049static ssize_t trace_show(struct kmem_cache *s, char *buf)
4050{
4051	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4052}
4053
4054static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4055							size_t length)
4056{
4057	s->flags &= ~SLAB_TRACE;
4058	if (buf[0] == '1')
4059		s->flags |= SLAB_TRACE;
4060	return length;
4061}
4062SLAB_ATTR(trace);
4063
4064static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4065{
4066	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4067}
4068
4069static ssize_t reclaim_account_store(struct kmem_cache *s,
4070				const char *buf, size_t length)
4071{
4072	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4073	if (buf[0] == '1')
4074		s->flags |= SLAB_RECLAIM_ACCOUNT;
4075	return length;
4076}
4077SLAB_ATTR(reclaim_account);
4078
4079static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4080{
4081	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4082}
4083SLAB_ATTR_RO(hwcache_align);
4084
4085#ifdef CONFIG_ZONE_DMA
4086static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4087{
4088	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4089}
4090SLAB_ATTR_RO(cache_dma);
4091#endif
4092
4093static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4094{
4095	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4096}
4097SLAB_ATTR_RO(destroy_by_rcu);
4098
4099static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4100{
4101	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4102}
4103
4104static ssize_t red_zone_store(struct kmem_cache *s,
4105				const char *buf, size_t length)
4106{
4107	if (any_slab_objects(s))
4108		return -EBUSY;
4109
4110	s->flags &= ~SLAB_RED_ZONE;
4111	if (buf[0] == '1')
4112		s->flags |= SLAB_RED_ZONE;
4113	calculate_sizes(s, -1);
4114	return length;
4115}
4116SLAB_ATTR(red_zone);
4117
4118static ssize_t poison_show(struct kmem_cache *s, char *buf)
4119{
4120	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4121}
4122
4123static ssize_t poison_store(struct kmem_cache *s,
4124				const char *buf, size_t length)
4125{
4126	if (any_slab_objects(s))
4127		return -EBUSY;
4128
4129	s->flags &= ~SLAB_POISON;
4130	if (buf[0] == '1')
4131		s->flags |= SLAB_POISON;
4132	calculate_sizes(s, -1);
4133	return length;
4134}
4135SLAB_ATTR(poison);
4136
4137static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4138{
4139	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4140}
4141
4142static ssize_t store_user_store(struct kmem_cache *s,
4143				const char *buf, size_t length)
4144{
4145	if (any_slab_objects(s))
4146		return -EBUSY;
4147
4148	s->flags &= ~SLAB_STORE_USER;
4149	if (buf[0] == '1')
4150		s->flags |= SLAB_STORE_USER;
4151	calculate_sizes(s, -1);
4152	return length;
4153}
4154SLAB_ATTR(store_user);
4155
4156static ssize_t validate_show(struct kmem_cache *s, char *buf)
4157{
4158	return 0;
4159}
4160
4161static ssize_t validate_store(struct kmem_cache *s,
4162			const char *buf, size_t length)
4163{
4164	int ret = -EINVAL;
4165
4166	if (buf[0] == '1') {
4167		ret = validate_slab_cache(s);
4168		if (ret >= 0)
4169			ret = length;
4170	}
4171	return ret;
4172}
4173SLAB_ATTR(validate);
4174
4175static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4176{
4177	return 0;
4178}
4179
4180static ssize_t shrink_store(struct kmem_cache *s,
4181			const char *buf, size_t length)
4182{
4183	if (buf[0] == '1') {
4184		int rc = kmem_cache_shrink(s);
4185
4186		if (rc)
4187			return rc;
4188	} else
4189		return -EINVAL;
4190	return length;
4191}
4192SLAB_ATTR(shrink);
4193
4194static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4195{
4196	if (!(s->flags & SLAB_STORE_USER))
4197		return -ENOSYS;
4198	return list_locations(s, buf, TRACK_ALLOC);
4199}
4200SLAB_ATTR_RO(alloc_calls);
4201
4202static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4203{
4204	if (!(s->flags & SLAB_STORE_USER))
4205		return -ENOSYS;
4206	return list_locations(s, buf, TRACK_FREE);
4207}
4208SLAB_ATTR_RO(free_calls);
4209
4210#ifdef CONFIG_NUMA
4211static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4212{
4213	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4214}
4215
4216static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4217				const char *buf, size_t length)
4218{
4219	unsigned long ratio;
4220	int err;
4221
4222	err = strict_strtoul(buf, 10, &ratio);
4223	if (err)
4224		return err;
4225
4226	if (ratio <= 100)
4227		s->remote_node_defrag_ratio = ratio * 10;
4228
4229	return length;
4230}
4231SLAB_ATTR(remote_node_defrag_ratio);
4232#endif
4233
4234#ifdef CONFIG_SLUB_STATS
4235static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4236{
4237	unsigned long sum  = 0;
4238	int cpu;
4239	int len;
4240	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4241
4242	if (!data)
4243		return -ENOMEM;
4244
4245	for_each_online_cpu(cpu) {
4246		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
4247
4248		data[cpu] = x;
4249		sum += x;
4250	}
4251
4252	len = sprintf(buf, "%lu", sum);
4253
4254#ifdef CONFIG_SMP
4255	for_each_online_cpu(cpu) {
4256		if (data[cpu] && len < PAGE_SIZE - 20)
4257			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4258	}
4259#endif
4260	kfree(data);
4261	return len + sprintf(buf + len, "\n");
4262}
4263
4264static void clear_stat(struct kmem_cache *s, enum stat_item si)
4265{
4266	int cpu;
4267
4268	for_each_online_cpu(cpu)
4269		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
4270}
4271
4272#define STAT_ATTR(si, text) 					\
4273static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4274{								\
4275	return show_stat(s, buf, si);				\
4276}								\
4277static ssize_t text##_store(struct kmem_cache *s,		\
4278				const char *buf, size_t length)	\
4279{								\
4280	if (buf[0] != '0')					\
4281		return -EINVAL;					\
4282	clear_stat(s, si);					\
4283	return length;						\
4284}								\
4285SLAB_ATTR(text);						\
4286
4287STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4288STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4289STAT_ATTR(FREE_FASTPATH, free_fastpath);
4290STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4291STAT_ATTR(FREE_FROZEN, free_frozen);
4292STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4293STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4294STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4295STAT_ATTR(ALLOC_SLAB, alloc_slab);
4296STAT_ATTR(ALLOC_REFILL, alloc_refill);
4297STAT_ATTR(FREE_SLAB, free_slab);
4298STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4299STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4300STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4301STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4302STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4303STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4304STAT_ATTR(ORDER_FALLBACK, order_fallback);
4305#endif
4306
4307static struct attribute *slab_attrs[] = {
4308	&slab_size_attr.attr,
4309	&object_size_attr.attr,
4310	&objs_per_slab_attr.attr,
4311	&order_attr.attr,
4312	&min_partial_attr.attr,
4313	&objects_attr.attr,
4314	&objects_partial_attr.attr,
4315	&total_objects_attr.attr,
4316	&slabs_attr.attr,
4317	&partial_attr.attr,
4318	&cpu_slabs_attr.attr,
4319	&ctor_attr.attr,
4320	&aliases_attr.attr,
4321	&align_attr.attr,
4322	&sanity_checks_attr.attr,
4323	&trace_attr.attr,
4324	&hwcache_align_attr.attr,
4325	&reclaim_account_attr.attr,
4326	&destroy_by_rcu_attr.attr,
4327	&red_zone_attr.attr,
4328	&poison_attr.attr,
4329	&store_user_attr.attr,
4330	&validate_attr.attr,
4331	&shrink_attr.attr,
4332	&alloc_calls_attr.attr,
4333	&free_calls_attr.attr,
4334#ifdef CONFIG_ZONE_DMA
4335	&cache_dma_attr.attr,
4336#endif
4337#ifdef CONFIG_NUMA
4338	&remote_node_defrag_ratio_attr.attr,
4339#endif
4340#ifdef CONFIG_SLUB_STATS
4341	&alloc_fastpath_attr.attr,
4342	&alloc_slowpath_attr.attr,
4343	&free_fastpath_attr.attr,
4344	&free_slowpath_attr.attr,
4345	&free_frozen_attr.attr,
4346	&free_add_partial_attr.attr,
4347	&free_remove_partial_attr.attr,
4348	&alloc_from_partial_attr.attr,
4349	&alloc_slab_attr.attr,
4350	&alloc_refill_attr.attr,
4351	&free_slab_attr.attr,
4352	&cpuslab_flush_attr.attr,
4353	&deactivate_full_attr.attr,
4354	&deactivate_empty_attr.attr,
4355	&deactivate_to_head_attr.attr,
4356	&deactivate_to_tail_attr.attr,
4357	&deactivate_remote_frees_attr.attr,
4358	&order_fallback_attr.attr,
4359#endif
4360	NULL
4361};
4362
4363static struct attribute_group slab_attr_group = {
4364	.attrs = slab_attrs,
4365};
4366
4367static ssize_t slab_attr_show(struct kobject *kobj,
4368				struct attribute *attr,
4369				char *buf)
4370{
4371	struct slab_attribute *attribute;
4372	struct kmem_cache *s;
4373	int err;
4374
4375	attribute = to_slab_attr(attr);
4376	s = to_slab(kobj);
4377
4378	if (!attribute->show)
4379		return -EIO;
4380
4381	err = attribute->show(s, buf);
4382
4383	return err;
4384}
4385
4386static ssize_t slab_attr_store(struct kobject *kobj,
4387				struct attribute *attr,
4388				const char *buf, size_t len)
4389{
4390	struct slab_attribute *attribute;
4391	struct kmem_cache *s;
4392	int err;
4393
4394	attribute = to_slab_attr(attr);
4395	s = to_slab(kobj);
4396
4397	if (!attribute->store)
4398		return -EIO;
4399
4400	err = attribute->store(s, buf, len);
4401
4402	return err;
4403}
4404
4405static void kmem_cache_release(struct kobject *kobj)
4406{
4407	struct kmem_cache *s = to_slab(kobj);
4408
4409	kfree(s);
4410}
4411
4412static struct sysfs_ops slab_sysfs_ops = {
4413	.show = slab_attr_show,
4414	.store = slab_attr_store,
4415};
4416
4417static struct kobj_type slab_ktype = {
4418	.sysfs_ops = &slab_sysfs_ops,
4419	.release = kmem_cache_release
4420};
4421
4422static int uevent_filter(struct kset *kset, struct kobject *kobj)
4423{
4424	struct kobj_type *ktype = get_ktype(kobj);
4425
4426	if (ktype == &slab_ktype)
4427		return 1;
4428	return 0;
4429}
4430
4431static struct kset_uevent_ops slab_uevent_ops = {
4432	.filter = uevent_filter,
4433};
4434
4435static struct kset *slab_kset;
4436
4437#define ID_STR_LENGTH 64
4438
4439/* Create a unique string id for a slab cache:
4440 *
4441 * Format	:[flags-]size
4442 */
4443static char *create_unique_id(struct kmem_cache *s)
4444{
4445	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4446	char *p = name;
4447
4448	BUG_ON(!name);
4449
4450	*p++ = ':';
4451	/*
4452	 * First flags affecting slabcache operations. We will only
4453	 * get here for aliasable slabs so we do not need to support
4454	 * too many flags. The flags here must cover all flags that
4455	 * are matched during merging to guarantee that the id is
4456	 * unique.
4457	 */
4458	if (s->flags & SLAB_CACHE_DMA)
4459		*p++ = 'd';
4460	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4461		*p++ = 'a';
4462	if (s->flags & SLAB_DEBUG_FREE)
4463		*p++ = 'F';
4464	if (!(s->flags & SLAB_NOTRACK))
4465		*p++ = 't';
4466	if (p != name + 1)
4467		*p++ = '-';
4468	p += sprintf(p, "%07d", s->size);
4469	BUG_ON(p > name + ID_STR_LENGTH - 1);
4470	return name;
4471}
4472
4473static int sysfs_slab_add(struct kmem_cache *s)
4474{
4475	int err;
4476	const char *name;
4477	int unmergeable;
4478
4479	if (slab_state < SYSFS)
4480		/* Defer until later */
4481		return 0;
4482
4483	unmergeable = slab_unmergeable(s);
4484	if (unmergeable) {
4485		/*
4486		 * Slabcache can never be merged so we can use the name proper.
4487		 * This is typically the case for debug situations. In that
4488		 * case we can catch duplicate names easily.
4489		 */
4490		sysfs_remove_link(&slab_kset->kobj, s->name);
4491		name = s->name;
4492	} else {
4493		/*
4494		 * Create a unique name for the slab as a target
4495		 * for the symlinks.
4496		 */
4497		name = create_unique_id(s);
4498	}
4499
4500	s->kobj.kset = slab_kset;
4501	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4502	if (err) {
4503		kobject_put(&s->kobj);
4504		return err;
4505	}
4506
4507	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4508	if (err) {
4509		kobject_del(&s->kobj);
4510		kobject_put(&s->kobj);
4511		return err;
4512	}
4513	kobject_uevent(&s->kobj, KOBJ_ADD);
4514	if (!unmergeable) {
4515		/* Setup first alias */
4516		sysfs_slab_alias(s, s->name);
4517		kfree(name);
4518	}
4519	return 0;
4520}
4521
4522static void sysfs_slab_remove(struct kmem_cache *s)
4523{
4524	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4525	kobject_del(&s->kobj);
4526	kobject_put(&s->kobj);
4527}
4528
4529/*
4530 * Need to buffer aliases during bootup until sysfs becomes
4531 * available lest we lose that information.
4532 */
4533struct saved_alias {
4534	struct kmem_cache *s;
4535	const char *name;
4536	struct saved_alias *next;
4537};
4538
4539static struct saved_alias *alias_list;
4540
4541static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4542{
4543	struct saved_alias *al;
4544
4545	if (slab_state == SYSFS) {
4546		/*
4547		 * If we have a leftover link then remove it.
4548		 */
4549		sysfs_remove_link(&slab_kset->kobj, name);
4550		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4551	}
4552
4553	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4554	if (!al)
4555		return -ENOMEM;
4556
4557	al->s = s;
4558	al->name = name;
4559	al->next = alias_list;
4560	alias_list = al;
4561	return 0;
4562}
4563
4564static int __init slab_sysfs_init(void)
4565{
4566	struct kmem_cache *s;
4567	int err;
4568
4569	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4570	if (!slab_kset) {
4571		printk(KERN_ERR "Cannot register slab subsystem.\n");
4572		return -ENOSYS;
4573	}
4574
4575	slab_state = SYSFS;
4576
4577	list_for_each_entry(s, &slab_caches, list) {
4578		err = sysfs_slab_add(s);
4579		if (err)
4580			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4581						" to sysfs\n", s->name);
4582	}
4583
4584	while (alias_list) {
4585		struct saved_alias *al = alias_list;
4586
4587		alias_list = alias_list->next;
4588		err = sysfs_slab_alias(al->s, al->name);
4589		if (err)
4590			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4591					" %s to sysfs\n", s->name);
4592		kfree(al);
4593	}
4594
4595	resiliency_test();
4596	return 0;
4597}
4598
4599__initcall(slab_sysfs_init);
4600#endif
4601
4602/*
4603 * The /proc/slabinfo ABI
4604 */
4605#ifdef CONFIG_SLABINFO
4606static void print_slabinfo_header(struct seq_file *m)
4607{
4608	seq_puts(m, "slabinfo - version: 2.1\n");
4609	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4610		 "<objperslab> <pagesperslab>");
4611	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4612	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4613	seq_putc(m, '\n');
4614}
4615
4616static void *s_start(struct seq_file *m, loff_t *pos)
4617{
4618	loff_t n = *pos;
4619
4620	down_read(&slub_lock);
4621	if (!n)
4622		print_slabinfo_header(m);
4623
4624	return seq_list_start(&slab_caches, *pos);
4625}
4626
4627static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4628{
4629	return seq_list_next(p, &slab_caches, pos);
4630}
4631
4632static void s_stop(struct seq_file *m, void *p)
4633{
4634	up_read(&slub_lock);
4635}
4636
4637static int s_show(struct seq_file *m, void *p)
4638{
4639	unsigned long nr_partials = 0;
4640	unsigned long nr_slabs = 0;
4641	unsigned long nr_inuse = 0;
4642	unsigned long nr_objs = 0;
4643	unsigned long nr_free = 0;
4644	struct kmem_cache *s;
4645	int node;
4646
4647	s = list_entry(p, struct kmem_cache, list);
4648
4649	for_each_online_node(node) {
4650		struct kmem_cache_node *n = get_node(s, node);
4651
4652		if (!n)
4653			continue;
4654
4655		nr_partials += n->nr_partial;
4656		nr_slabs += atomic_long_read(&n->nr_slabs);
4657		nr_objs += atomic_long_read(&n->total_objects);
4658		nr_free += count_partial(n, count_free);
4659	}
4660
4661	nr_inuse = nr_objs - nr_free;
4662
4663	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4664		   nr_objs, s->size, oo_objects(s->oo),
4665		   (1 << oo_order(s->oo)));
4666	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4667	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4668		   0UL);
4669	seq_putc(m, '\n');
4670	return 0;
4671}
4672
4673static const struct seq_operations slabinfo_op = {
4674	.start = s_start,
4675	.next = s_next,
4676	.stop = s_stop,
4677	.show = s_show,
4678};
4679
4680static int slabinfo_open(struct inode *inode, struct file *file)
4681{
4682	return seq_open(file, &slabinfo_op);
4683}
4684
4685static const struct file_operations proc_slabinfo_operations = {
4686	.open		= slabinfo_open,
4687	.read		= seq_read,
4688	.llseek		= seq_lseek,
4689	.release	= seq_release,
4690};
4691
4692static int __init slab_proc_init(void)
4693{
4694	proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
4695	return 0;
4696}
4697module_init(slab_proc_init);
4698#endif /* CONFIG_SLABINFO */
4699