slub.c revision 0094de92a4f1da3a845ccc4ecb12ec0db8e48997
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <linux/cpu.h>
20#include <linux/cpuset.h>
21#include <linux/mempolicy.h>
22#include <linux/ctype.h>
23#include <linux/debugobjects.h>
24#include <linux/kallsyms.h>
25#include <linux/memory.h>
26#include <linux/math64.h>
27
28/*
29 * Lock order:
30 *   1. slab_lock(page)
31 *   2. slab->list_lock
32 *
33 *   The slab_lock protects operations on the object of a particular
34 *   slab and its metadata in the page struct. If the slab lock
35 *   has been taken then no allocations nor frees can be performed
36 *   on the objects in the slab nor can the slab be added or removed
37 *   from the partial or full lists since this would mean modifying
38 *   the page_struct of the slab.
39 *
40 *   The list_lock protects the partial and full list on each node and
41 *   the partial slab counter. If taken then no new slabs may be added or
42 *   removed from the lists nor make the number of partial slabs be modified.
43 *   (Note that the total number of slabs is an atomic value that may be
44 *   modified without taking the list lock).
45 *
46 *   The list_lock is a centralized lock and thus we avoid taking it as
47 *   much as possible. As long as SLUB does not have to handle partial
48 *   slabs, operations can continue without any centralized lock. F.e.
49 *   allocating a long series of objects that fill up slabs does not require
50 *   the list lock.
51 *
52 *   The lock order is sometimes inverted when we are trying to get a slab
53 *   off a list. We take the list_lock and then look for a page on the list
54 *   to use. While we do that objects in the slabs may be freed. We can
55 *   only operate on the slab if we have also taken the slab_lock. So we use
56 *   a slab_trylock() on the slab. If trylock was successful then no frees
57 *   can occur anymore and we can use the slab for allocations etc. If the
58 *   slab_trylock() does not succeed then frees are in progress in the slab and
59 *   we must stay away from it for a while since we may cause a bouncing
60 *   cacheline if we try to acquire the lock. So go onto the next slab.
61 *   If all pages are busy then we may allocate a new slab instead of reusing
62 *   a partial slab. A new slab has noone operating on it and thus there is
63 *   no danger of cacheline contention.
64 *
65 *   Interrupts are disabled during allocation and deallocation in order to
66 *   make the slab allocator safe to use in the context of an irq. In addition
67 *   interrupts are disabled to ensure that the processor does not change
68 *   while handling per_cpu slabs, due to kernel preemption.
69 *
70 * SLUB assigns one slab for allocation to each processor.
71 * Allocations only occur from these slabs called cpu slabs.
72 *
73 * Slabs with free elements are kept on a partial list and during regular
74 * operations no list for full slabs is used. If an object in a full slab is
75 * freed then the slab will show up again on the partial lists.
76 * We track full slabs for debugging purposes though because otherwise we
77 * cannot scan all objects.
78 *
79 * Slabs are freed when they become empty. Teardown and setup is
80 * minimal so we rely on the page allocators per cpu caches for
81 * fast frees and allocs.
82 *
83 * Overloading of page flags that are otherwise used for LRU management.
84 *
85 * PageActive 		The slab is frozen and exempt from list processing.
86 * 			This means that the slab is dedicated to a purpose
87 * 			such as satisfying allocations for a specific
88 * 			processor. Objects may be freed in the slab while
89 * 			it is frozen but slab_free will then skip the usual
90 * 			list operations. It is up to the processor holding
91 * 			the slab to integrate the slab into the slab lists
92 * 			when the slab is no longer needed.
93 *
94 * 			One use of this flag is to mark slabs that are
95 * 			used for allocations. Then such a slab becomes a cpu
96 * 			slab. The cpu slab may be equipped with an additional
97 * 			freelist that allows lockless access to
98 * 			free objects in addition to the regular freelist
99 * 			that requires the slab lock.
100 *
101 * PageError		Slab requires special handling due to debug
102 * 			options set. This moves	slab handling out of
103 * 			the fast path and disables lockless freelists.
104 */
105
106#ifdef CONFIG_SLUB_DEBUG
107#define SLABDEBUG 1
108#else
109#define SLABDEBUG 0
110#endif
111
112/*
113 * Issues still to be resolved:
114 *
115 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
116 *
117 * - Variable sizing of the per node arrays
118 */
119
120/* Enable to test recovery from slab corruption on boot */
121#undef SLUB_RESILIENCY_TEST
122
123/*
124 * Mininum number of partial slabs. These will be left on the partial
125 * lists even if they are empty. kmem_cache_shrink may reclaim them.
126 */
127#define MIN_PARTIAL 5
128
129/*
130 * Maximum number of desirable partial slabs.
131 * The existence of more partial slabs makes kmem_cache_shrink
132 * sort the partial list by the number of objects in the.
133 */
134#define MAX_PARTIAL 10
135
136#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
137				SLAB_POISON | SLAB_STORE_USER)
138
139/*
140 * Set of flags that will prevent slab merging
141 */
142#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
143		SLAB_TRACE | SLAB_DESTROY_BY_RCU)
144
145#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
146		SLAB_CACHE_DMA)
147
148#ifndef ARCH_KMALLOC_MINALIGN
149#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
150#endif
151
152#ifndef ARCH_SLAB_MINALIGN
153#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
154#endif
155
156#define OO_SHIFT	16
157#define OO_MASK		((1 << OO_SHIFT) - 1)
158#define MAX_OBJS_PER_PAGE	65535 /* since page.objects is u16 */
159
160/* Internal SLUB flags */
161#define __OBJECT_POISON		0x80000000 /* Poison object */
162#define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
163
164static int kmem_size = sizeof(struct kmem_cache);
165
166#ifdef CONFIG_SMP
167static struct notifier_block slab_notifier;
168#endif
169
170static enum {
171	DOWN,		/* No slab functionality available */
172	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
173	UP,		/* Everything works but does not show up in sysfs */
174	SYSFS		/* Sysfs up */
175} slab_state = DOWN;
176
177/* A list of all slab caches on the system */
178static DECLARE_RWSEM(slub_lock);
179static LIST_HEAD(slab_caches);
180
181/*
182 * Tracking user of a slab.
183 */
184struct track {
185	unsigned long addr;	/* Called from address */
186	int cpu;		/* Was running on cpu */
187	int pid;		/* Pid context */
188	unsigned long when;	/* When did the operation occur */
189};
190
191enum track_item { TRACK_ALLOC, TRACK_FREE };
192
193#ifdef CONFIG_SLUB_DEBUG
194static int sysfs_slab_add(struct kmem_cache *);
195static int sysfs_slab_alias(struct kmem_cache *, const char *);
196static void sysfs_slab_remove(struct kmem_cache *);
197
198#else
199static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
200static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
201							{ return 0; }
202static inline void sysfs_slab_remove(struct kmem_cache *s)
203{
204	kfree(s);
205}
206
207#endif
208
209static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
210{
211#ifdef CONFIG_SLUB_STATS
212	c->stat[si]++;
213#endif
214}
215
216/********************************************************************
217 * 			Core slab cache functions
218 *******************************************************************/
219
220int slab_is_available(void)
221{
222	return slab_state >= UP;
223}
224
225static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
226{
227#ifdef CONFIG_NUMA
228	return s->node[node];
229#else
230	return &s->local_node;
231#endif
232}
233
234static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
235{
236#ifdef CONFIG_SMP
237	return s->cpu_slab[cpu];
238#else
239	return &s->cpu_slab;
240#endif
241}
242
243/* Verify that a pointer has an address that is valid within a slab page */
244static inline int check_valid_pointer(struct kmem_cache *s,
245				struct page *page, const void *object)
246{
247	void *base;
248
249	if (!object)
250		return 1;
251
252	base = page_address(page);
253	if (object < base || object >= base + page->objects * s->size ||
254		(object - base) % s->size) {
255		return 0;
256	}
257
258	return 1;
259}
260
261/*
262 * Slow version of get and set free pointer.
263 *
264 * This version requires touching the cache lines of kmem_cache which
265 * we avoid to do in the fast alloc free paths. There we obtain the offset
266 * from the page struct.
267 */
268static inline void *get_freepointer(struct kmem_cache *s, void *object)
269{
270	return *(void **)(object + s->offset);
271}
272
273static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
274{
275	*(void **)(object + s->offset) = fp;
276}
277
278/* Loop over all objects in a slab */
279#define for_each_object(__p, __s, __addr, __objects) \
280	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
281			__p += (__s)->size)
282
283/* Scan freelist */
284#define for_each_free_object(__p, __s, __free) \
285	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
286
287/* Determine object index from a given position */
288static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
289{
290	return (p - addr) / s->size;
291}
292
293static inline struct kmem_cache_order_objects oo_make(int order,
294						unsigned long size)
295{
296	struct kmem_cache_order_objects x = {
297		(order << OO_SHIFT) + (PAGE_SIZE << order) / size
298	};
299
300	return x;
301}
302
303static inline int oo_order(struct kmem_cache_order_objects x)
304{
305	return x.x >> OO_SHIFT;
306}
307
308static inline int oo_objects(struct kmem_cache_order_objects x)
309{
310	return x.x & OO_MASK;
311}
312
313#ifdef CONFIG_SLUB_DEBUG
314/*
315 * Debug settings:
316 */
317#ifdef CONFIG_SLUB_DEBUG_ON
318static int slub_debug = DEBUG_DEFAULT_FLAGS;
319#else
320static int slub_debug;
321#endif
322
323static char *slub_debug_slabs;
324
325/*
326 * Object debugging
327 */
328static void print_section(char *text, u8 *addr, unsigned int length)
329{
330	int i, offset;
331	int newline = 1;
332	char ascii[17];
333
334	ascii[16] = 0;
335
336	for (i = 0; i < length; i++) {
337		if (newline) {
338			printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
339			newline = 0;
340		}
341		printk(KERN_CONT " %02x", addr[i]);
342		offset = i % 16;
343		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
344		if (offset == 15) {
345			printk(KERN_CONT " %s\n", ascii);
346			newline = 1;
347		}
348	}
349	if (!newline) {
350		i %= 16;
351		while (i < 16) {
352			printk(KERN_CONT "   ");
353			ascii[i] = ' ';
354			i++;
355		}
356		printk(KERN_CONT " %s\n", ascii);
357	}
358}
359
360static struct track *get_track(struct kmem_cache *s, void *object,
361	enum track_item alloc)
362{
363	struct track *p;
364
365	if (s->offset)
366		p = object + s->offset + sizeof(void *);
367	else
368		p = object + s->inuse;
369
370	return p + alloc;
371}
372
373static void set_track(struct kmem_cache *s, void *object,
374			enum track_item alloc, unsigned long addr)
375{
376	struct track *p;
377
378	if (s->offset)
379		p = object + s->offset + sizeof(void *);
380	else
381		p = object + s->inuse;
382
383	p += alloc;
384	if (addr) {
385		p->addr = addr;
386		p->cpu = smp_processor_id();
387		p->pid = current->pid;
388		p->when = jiffies;
389	} else
390		memset(p, 0, sizeof(struct track));
391}
392
393static void init_tracking(struct kmem_cache *s, void *object)
394{
395	if (!(s->flags & SLAB_STORE_USER))
396		return;
397
398	set_track(s, object, TRACK_FREE, 0UL);
399	set_track(s, object, TRACK_ALLOC, 0UL);
400}
401
402static void print_track(const char *s, struct track *t)
403{
404	if (!t->addr)
405		return;
406
407	printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
408		s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
409}
410
411static void print_tracking(struct kmem_cache *s, void *object)
412{
413	if (!(s->flags & SLAB_STORE_USER))
414		return;
415
416	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
417	print_track("Freed", get_track(s, object, TRACK_FREE));
418}
419
420static void print_page_info(struct page *page)
421{
422	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
423		page, page->objects, page->inuse, page->freelist, page->flags);
424
425}
426
427static void slab_bug(struct kmem_cache *s, char *fmt, ...)
428{
429	va_list args;
430	char buf[100];
431
432	va_start(args, fmt);
433	vsnprintf(buf, sizeof(buf), fmt, args);
434	va_end(args);
435	printk(KERN_ERR "========================================"
436			"=====================================\n");
437	printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
438	printk(KERN_ERR "----------------------------------------"
439			"-------------------------------------\n\n");
440}
441
442static void slab_fix(struct kmem_cache *s, char *fmt, ...)
443{
444	va_list args;
445	char buf[100];
446
447	va_start(args, fmt);
448	vsnprintf(buf, sizeof(buf), fmt, args);
449	va_end(args);
450	printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
451}
452
453static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
454{
455	unsigned int off;	/* Offset of last byte */
456	u8 *addr = page_address(page);
457
458	print_tracking(s, p);
459
460	print_page_info(page);
461
462	printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
463			p, p - addr, get_freepointer(s, p));
464
465	if (p > addr + 16)
466		print_section("Bytes b4", p - 16, 16);
467
468	print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
469
470	if (s->flags & SLAB_RED_ZONE)
471		print_section("Redzone", p + s->objsize,
472			s->inuse - s->objsize);
473
474	if (s->offset)
475		off = s->offset + sizeof(void *);
476	else
477		off = s->inuse;
478
479	if (s->flags & SLAB_STORE_USER)
480		off += 2 * sizeof(struct track);
481
482	if (off != s->size)
483		/* Beginning of the filler is the free pointer */
484		print_section("Padding", p + off, s->size - off);
485
486	dump_stack();
487}
488
489static void object_err(struct kmem_cache *s, struct page *page,
490			u8 *object, char *reason)
491{
492	slab_bug(s, "%s", reason);
493	print_trailer(s, page, object);
494}
495
496static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
497{
498	va_list args;
499	char buf[100];
500
501	va_start(args, fmt);
502	vsnprintf(buf, sizeof(buf), fmt, args);
503	va_end(args);
504	slab_bug(s, "%s", buf);
505	print_page_info(page);
506	dump_stack();
507}
508
509static void init_object(struct kmem_cache *s, void *object, int active)
510{
511	u8 *p = object;
512
513	if (s->flags & __OBJECT_POISON) {
514		memset(p, POISON_FREE, s->objsize - 1);
515		p[s->objsize - 1] = POISON_END;
516	}
517
518	if (s->flags & SLAB_RED_ZONE)
519		memset(p + s->objsize,
520			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
521			s->inuse - s->objsize);
522}
523
524static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
525{
526	while (bytes) {
527		if (*start != (u8)value)
528			return start;
529		start++;
530		bytes--;
531	}
532	return NULL;
533}
534
535static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
536						void *from, void *to)
537{
538	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
539	memset(from, data, to - from);
540}
541
542static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
543			u8 *object, char *what,
544			u8 *start, unsigned int value, unsigned int bytes)
545{
546	u8 *fault;
547	u8 *end;
548
549	fault = check_bytes(start, value, bytes);
550	if (!fault)
551		return 1;
552
553	end = start + bytes;
554	while (end > fault && end[-1] == value)
555		end--;
556
557	slab_bug(s, "%s overwritten", what);
558	printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
559					fault, end - 1, fault[0], value);
560	print_trailer(s, page, object);
561
562	restore_bytes(s, what, value, fault, end);
563	return 0;
564}
565
566/*
567 * Object layout:
568 *
569 * object address
570 * 	Bytes of the object to be managed.
571 * 	If the freepointer may overlay the object then the free
572 * 	pointer is the first word of the object.
573 *
574 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
575 * 	0xa5 (POISON_END)
576 *
577 * object + s->objsize
578 * 	Padding to reach word boundary. This is also used for Redzoning.
579 * 	Padding is extended by another word if Redzoning is enabled and
580 * 	objsize == inuse.
581 *
582 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
583 * 	0xcc (RED_ACTIVE) for objects in use.
584 *
585 * object + s->inuse
586 * 	Meta data starts here.
587 *
588 * 	A. Free pointer (if we cannot overwrite object on free)
589 * 	B. Tracking data for SLAB_STORE_USER
590 * 	C. Padding to reach required alignment boundary or at mininum
591 * 		one word if debugging is on to be able to detect writes
592 * 		before the word boundary.
593 *
594 *	Padding is done using 0x5a (POISON_INUSE)
595 *
596 * object + s->size
597 * 	Nothing is used beyond s->size.
598 *
599 * If slabcaches are merged then the objsize and inuse boundaries are mostly
600 * ignored. And therefore no slab options that rely on these boundaries
601 * may be used with merged slabcaches.
602 */
603
604static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
605{
606	unsigned long off = s->inuse;	/* The end of info */
607
608	if (s->offset)
609		/* Freepointer is placed after the object. */
610		off += sizeof(void *);
611
612	if (s->flags & SLAB_STORE_USER)
613		/* We also have user information there */
614		off += 2 * sizeof(struct track);
615
616	if (s->size == off)
617		return 1;
618
619	return check_bytes_and_report(s, page, p, "Object padding",
620				p + off, POISON_INUSE, s->size - off);
621}
622
623/* Check the pad bytes at the end of a slab page */
624static int slab_pad_check(struct kmem_cache *s, struct page *page)
625{
626	u8 *start;
627	u8 *fault;
628	u8 *end;
629	int length;
630	int remainder;
631
632	if (!(s->flags & SLAB_POISON))
633		return 1;
634
635	start = page_address(page);
636	length = (PAGE_SIZE << compound_order(page));
637	end = start + length;
638	remainder = length % s->size;
639	if (!remainder)
640		return 1;
641
642	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
643	if (!fault)
644		return 1;
645	while (end > fault && end[-1] == POISON_INUSE)
646		end--;
647
648	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
649	print_section("Padding", end - remainder, remainder);
650
651	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
652	return 0;
653}
654
655static int check_object(struct kmem_cache *s, struct page *page,
656					void *object, int active)
657{
658	u8 *p = object;
659	u8 *endobject = object + s->objsize;
660
661	if (s->flags & SLAB_RED_ZONE) {
662		unsigned int red =
663			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
664
665		if (!check_bytes_and_report(s, page, object, "Redzone",
666			endobject, red, s->inuse - s->objsize))
667			return 0;
668	} else {
669		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
670			check_bytes_and_report(s, page, p, "Alignment padding",
671				endobject, POISON_INUSE, s->inuse - s->objsize);
672		}
673	}
674
675	if (s->flags & SLAB_POISON) {
676		if (!active && (s->flags & __OBJECT_POISON) &&
677			(!check_bytes_and_report(s, page, p, "Poison", p,
678					POISON_FREE, s->objsize - 1) ||
679			 !check_bytes_and_report(s, page, p, "Poison",
680				p + s->objsize - 1, POISON_END, 1)))
681			return 0;
682		/*
683		 * check_pad_bytes cleans up on its own.
684		 */
685		check_pad_bytes(s, page, p);
686	}
687
688	if (!s->offset && active)
689		/*
690		 * Object and freepointer overlap. Cannot check
691		 * freepointer while object is allocated.
692		 */
693		return 1;
694
695	/* Check free pointer validity */
696	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
697		object_err(s, page, p, "Freepointer corrupt");
698		/*
699		 * No choice but to zap it and thus loose the remainder
700		 * of the free objects in this slab. May cause
701		 * another error because the object count is now wrong.
702		 */
703		set_freepointer(s, p, NULL);
704		return 0;
705	}
706	return 1;
707}
708
709static int check_slab(struct kmem_cache *s, struct page *page)
710{
711	int maxobj;
712
713	VM_BUG_ON(!irqs_disabled());
714
715	if (!PageSlab(page)) {
716		slab_err(s, page, "Not a valid slab page");
717		return 0;
718	}
719
720	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
721	if (page->objects > maxobj) {
722		slab_err(s, page, "objects %u > max %u",
723			s->name, page->objects, maxobj);
724		return 0;
725	}
726	if (page->inuse > page->objects) {
727		slab_err(s, page, "inuse %u > max %u",
728			s->name, page->inuse, page->objects);
729		return 0;
730	}
731	/* Slab_pad_check fixes things up after itself */
732	slab_pad_check(s, page);
733	return 1;
734}
735
736/*
737 * Determine if a certain object on a page is on the freelist. Must hold the
738 * slab lock to guarantee that the chains are in a consistent state.
739 */
740static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
741{
742	int nr = 0;
743	void *fp = page->freelist;
744	void *object = NULL;
745	unsigned long max_objects;
746
747	while (fp && nr <= page->objects) {
748		if (fp == search)
749			return 1;
750		if (!check_valid_pointer(s, page, fp)) {
751			if (object) {
752				object_err(s, page, object,
753					"Freechain corrupt");
754				set_freepointer(s, object, NULL);
755				break;
756			} else {
757				slab_err(s, page, "Freepointer corrupt");
758				page->freelist = NULL;
759				page->inuse = page->objects;
760				slab_fix(s, "Freelist cleared");
761				return 0;
762			}
763			break;
764		}
765		object = fp;
766		fp = get_freepointer(s, object);
767		nr++;
768	}
769
770	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
771	if (max_objects > MAX_OBJS_PER_PAGE)
772		max_objects = MAX_OBJS_PER_PAGE;
773
774	if (page->objects != max_objects) {
775		slab_err(s, page, "Wrong number of objects. Found %d but "
776			"should be %d", page->objects, max_objects);
777		page->objects = max_objects;
778		slab_fix(s, "Number of objects adjusted.");
779	}
780	if (page->inuse != page->objects - nr) {
781		slab_err(s, page, "Wrong object count. Counter is %d but "
782			"counted were %d", page->inuse, page->objects - nr);
783		page->inuse = page->objects - nr;
784		slab_fix(s, "Object count adjusted.");
785	}
786	return search == NULL;
787}
788
789static void trace(struct kmem_cache *s, struct page *page, void *object,
790								int alloc)
791{
792	if (s->flags & SLAB_TRACE) {
793		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
794			s->name,
795			alloc ? "alloc" : "free",
796			object, page->inuse,
797			page->freelist);
798
799		if (!alloc)
800			print_section("Object", (void *)object, s->objsize);
801
802		dump_stack();
803	}
804}
805
806/*
807 * Tracking of fully allocated slabs for debugging purposes.
808 */
809static void add_full(struct kmem_cache_node *n, struct page *page)
810{
811	spin_lock(&n->list_lock);
812	list_add(&page->lru, &n->full);
813	spin_unlock(&n->list_lock);
814}
815
816static void remove_full(struct kmem_cache *s, struct page *page)
817{
818	struct kmem_cache_node *n;
819
820	if (!(s->flags & SLAB_STORE_USER))
821		return;
822
823	n = get_node(s, page_to_nid(page));
824
825	spin_lock(&n->list_lock);
826	list_del(&page->lru);
827	spin_unlock(&n->list_lock);
828}
829
830/* Tracking of the number of slabs for debugging purposes */
831static inline unsigned long slabs_node(struct kmem_cache *s, int node)
832{
833	struct kmem_cache_node *n = get_node(s, node);
834
835	return atomic_long_read(&n->nr_slabs);
836}
837
838static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
839{
840	struct kmem_cache_node *n = get_node(s, node);
841
842	/*
843	 * May be called early in order to allocate a slab for the
844	 * kmem_cache_node structure. Solve the chicken-egg
845	 * dilemma by deferring the increment of the count during
846	 * bootstrap (see early_kmem_cache_node_alloc).
847	 */
848	if (!NUMA_BUILD || n) {
849		atomic_long_inc(&n->nr_slabs);
850		atomic_long_add(objects, &n->total_objects);
851	}
852}
853static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
854{
855	struct kmem_cache_node *n = get_node(s, node);
856
857	atomic_long_dec(&n->nr_slabs);
858	atomic_long_sub(objects, &n->total_objects);
859}
860
861/* Object debug checks for alloc/free paths */
862static void setup_object_debug(struct kmem_cache *s, struct page *page,
863								void *object)
864{
865	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
866		return;
867
868	init_object(s, object, 0);
869	init_tracking(s, object);
870}
871
872static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
873					void *object, unsigned long addr)
874{
875	if (!check_slab(s, page))
876		goto bad;
877
878	if (!on_freelist(s, page, object)) {
879		object_err(s, page, object, "Object already allocated");
880		goto bad;
881	}
882
883	if (!check_valid_pointer(s, page, object)) {
884		object_err(s, page, object, "Freelist Pointer check fails");
885		goto bad;
886	}
887
888	if (!check_object(s, page, object, 0))
889		goto bad;
890
891	/* Success perform special debug activities for allocs */
892	if (s->flags & SLAB_STORE_USER)
893		set_track(s, object, TRACK_ALLOC, addr);
894	trace(s, page, object, 1);
895	init_object(s, object, 1);
896	return 1;
897
898bad:
899	if (PageSlab(page)) {
900		/*
901		 * If this is a slab page then lets do the best we can
902		 * to avoid issues in the future. Marking all objects
903		 * as used avoids touching the remaining objects.
904		 */
905		slab_fix(s, "Marking all objects used");
906		page->inuse = page->objects;
907		page->freelist = NULL;
908	}
909	return 0;
910}
911
912static int free_debug_processing(struct kmem_cache *s, struct page *page,
913					void *object, unsigned long addr)
914{
915	if (!check_slab(s, page))
916		goto fail;
917
918	if (!check_valid_pointer(s, page, object)) {
919		slab_err(s, page, "Invalid object pointer 0x%p", object);
920		goto fail;
921	}
922
923	if (on_freelist(s, page, object)) {
924		object_err(s, page, object, "Object already free");
925		goto fail;
926	}
927
928	if (!check_object(s, page, object, 1))
929		return 0;
930
931	if (unlikely(s != page->slab)) {
932		if (!PageSlab(page)) {
933			slab_err(s, page, "Attempt to free object(0x%p) "
934				"outside of slab", object);
935		} else if (!page->slab) {
936			printk(KERN_ERR
937				"SLUB <none>: no slab for object 0x%p.\n",
938						object);
939			dump_stack();
940		} else
941			object_err(s, page, object,
942					"page slab pointer corrupt.");
943		goto fail;
944	}
945
946	/* Special debug activities for freeing objects */
947	if (!PageSlubFrozen(page) && !page->freelist)
948		remove_full(s, page);
949	if (s->flags & SLAB_STORE_USER)
950		set_track(s, object, TRACK_FREE, addr);
951	trace(s, page, object, 0);
952	init_object(s, object, 0);
953	return 1;
954
955fail:
956	slab_fix(s, "Object at 0x%p not freed", object);
957	return 0;
958}
959
960static int __init setup_slub_debug(char *str)
961{
962	slub_debug = DEBUG_DEFAULT_FLAGS;
963	if (*str++ != '=' || !*str)
964		/*
965		 * No options specified. Switch on full debugging.
966		 */
967		goto out;
968
969	if (*str == ',')
970		/*
971		 * No options but restriction on slabs. This means full
972		 * debugging for slabs matching a pattern.
973		 */
974		goto check_slabs;
975
976	slub_debug = 0;
977	if (*str == '-')
978		/*
979		 * Switch off all debugging measures.
980		 */
981		goto out;
982
983	/*
984	 * Determine which debug features should be switched on
985	 */
986	for (; *str && *str != ','; str++) {
987		switch (tolower(*str)) {
988		case 'f':
989			slub_debug |= SLAB_DEBUG_FREE;
990			break;
991		case 'z':
992			slub_debug |= SLAB_RED_ZONE;
993			break;
994		case 'p':
995			slub_debug |= SLAB_POISON;
996			break;
997		case 'u':
998			slub_debug |= SLAB_STORE_USER;
999			break;
1000		case 't':
1001			slub_debug |= SLAB_TRACE;
1002			break;
1003		default:
1004			printk(KERN_ERR "slub_debug option '%c' "
1005				"unknown. skipped\n", *str);
1006		}
1007	}
1008
1009check_slabs:
1010	if (*str == ',')
1011		slub_debug_slabs = str + 1;
1012out:
1013	return 1;
1014}
1015
1016__setup("slub_debug", setup_slub_debug);
1017
1018static unsigned long kmem_cache_flags(unsigned long objsize,
1019	unsigned long flags, const char *name,
1020	void (*ctor)(void *))
1021{
1022	/*
1023	 * Enable debugging if selected on the kernel commandline.
1024	 */
1025	if (slub_debug && (!slub_debug_slabs ||
1026	    strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1027			flags |= slub_debug;
1028
1029	return flags;
1030}
1031#else
1032static inline void setup_object_debug(struct kmem_cache *s,
1033			struct page *page, void *object) {}
1034
1035static inline int alloc_debug_processing(struct kmem_cache *s,
1036	struct page *page, void *object, unsigned long addr) { return 0; }
1037
1038static inline int free_debug_processing(struct kmem_cache *s,
1039	struct page *page, void *object, unsigned long addr) { return 0; }
1040
1041static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1042			{ return 1; }
1043static inline int check_object(struct kmem_cache *s, struct page *page,
1044			void *object, int active) { return 1; }
1045static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
1046static inline unsigned long kmem_cache_flags(unsigned long objsize,
1047	unsigned long flags, const char *name,
1048	void (*ctor)(void *))
1049{
1050	return flags;
1051}
1052#define slub_debug 0
1053
1054static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1055							{ return 0; }
1056static inline void inc_slabs_node(struct kmem_cache *s, int node,
1057							int objects) {}
1058static inline void dec_slabs_node(struct kmem_cache *s, int node,
1059							int objects) {}
1060#endif
1061
1062/*
1063 * Slab allocation and freeing
1064 */
1065static inline struct page *alloc_slab_page(gfp_t flags, int node,
1066					struct kmem_cache_order_objects oo)
1067{
1068	int order = oo_order(oo);
1069
1070	if (node == -1)
1071		return alloc_pages(flags, order);
1072	else
1073		return alloc_pages_node(node, flags, order);
1074}
1075
1076static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1077{
1078	struct page *page;
1079	struct kmem_cache_order_objects oo = s->oo;
1080
1081	flags |= s->allocflags;
1082
1083	page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
1084									oo);
1085	if (unlikely(!page)) {
1086		oo = s->min;
1087		/*
1088		 * Allocation may have failed due to fragmentation.
1089		 * Try a lower order alloc if possible
1090		 */
1091		page = alloc_slab_page(flags, node, oo);
1092		if (!page)
1093			return NULL;
1094
1095		stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1096	}
1097	page->objects = oo_objects(oo);
1098	mod_zone_page_state(page_zone(page),
1099		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1100		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1101		1 << oo_order(oo));
1102
1103	return page;
1104}
1105
1106static void setup_object(struct kmem_cache *s, struct page *page,
1107				void *object)
1108{
1109	setup_object_debug(s, page, object);
1110	if (unlikely(s->ctor))
1111		s->ctor(object);
1112}
1113
1114static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1115{
1116	struct page *page;
1117	void *start;
1118	void *last;
1119	void *p;
1120
1121	BUG_ON(flags & GFP_SLAB_BUG_MASK);
1122
1123	page = allocate_slab(s,
1124		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1125	if (!page)
1126		goto out;
1127
1128	inc_slabs_node(s, page_to_nid(page), page->objects);
1129	page->slab = s;
1130	page->flags |= 1 << PG_slab;
1131	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1132			SLAB_STORE_USER | SLAB_TRACE))
1133		__SetPageSlubDebug(page);
1134
1135	start = page_address(page);
1136
1137	if (unlikely(s->flags & SLAB_POISON))
1138		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
1139
1140	last = start;
1141	for_each_object(p, s, start, page->objects) {
1142		setup_object(s, page, last);
1143		set_freepointer(s, last, p);
1144		last = p;
1145	}
1146	setup_object(s, page, last);
1147	set_freepointer(s, last, NULL);
1148
1149	page->freelist = start;
1150	page->inuse = 0;
1151out:
1152	return page;
1153}
1154
1155static void __free_slab(struct kmem_cache *s, struct page *page)
1156{
1157	int order = compound_order(page);
1158	int pages = 1 << order;
1159
1160	if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
1161		void *p;
1162
1163		slab_pad_check(s, page);
1164		for_each_object(p, s, page_address(page),
1165						page->objects)
1166			check_object(s, page, p, 0);
1167		__ClearPageSlubDebug(page);
1168	}
1169
1170	mod_zone_page_state(page_zone(page),
1171		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1172		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1173		-pages);
1174
1175	__ClearPageSlab(page);
1176	reset_page_mapcount(page);
1177	__free_pages(page, order);
1178}
1179
1180static void rcu_free_slab(struct rcu_head *h)
1181{
1182	struct page *page;
1183
1184	page = container_of((struct list_head *)h, struct page, lru);
1185	__free_slab(page->slab, page);
1186}
1187
1188static void free_slab(struct kmem_cache *s, struct page *page)
1189{
1190	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1191		/*
1192		 * RCU free overloads the RCU head over the LRU
1193		 */
1194		struct rcu_head *head = (void *)&page->lru;
1195
1196		call_rcu(head, rcu_free_slab);
1197	} else
1198		__free_slab(s, page);
1199}
1200
1201static void discard_slab(struct kmem_cache *s, struct page *page)
1202{
1203	dec_slabs_node(s, page_to_nid(page), page->objects);
1204	free_slab(s, page);
1205}
1206
1207/*
1208 * Per slab locking using the pagelock
1209 */
1210static __always_inline void slab_lock(struct page *page)
1211{
1212	bit_spin_lock(PG_locked, &page->flags);
1213}
1214
1215static __always_inline void slab_unlock(struct page *page)
1216{
1217	__bit_spin_unlock(PG_locked, &page->flags);
1218}
1219
1220static __always_inline int slab_trylock(struct page *page)
1221{
1222	int rc = 1;
1223
1224	rc = bit_spin_trylock(PG_locked, &page->flags);
1225	return rc;
1226}
1227
1228/*
1229 * Management of partially allocated slabs
1230 */
1231static void add_partial(struct kmem_cache_node *n,
1232				struct page *page, int tail)
1233{
1234	spin_lock(&n->list_lock);
1235	n->nr_partial++;
1236	if (tail)
1237		list_add_tail(&page->lru, &n->partial);
1238	else
1239		list_add(&page->lru, &n->partial);
1240	spin_unlock(&n->list_lock);
1241}
1242
1243static void remove_partial(struct kmem_cache *s, struct page *page)
1244{
1245	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1246
1247	spin_lock(&n->list_lock);
1248	list_del(&page->lru);
1249	n->nr_partial--;
1250	spin_unlock(&n->list_lock);
1251}
1252
1253/*
1254 * Lock slab and remove from the partial list.
1255 *
1256 * Must hold list_lock.
1257 */
1258static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1259							struct page *page)
1260{
1261	if (slab_trylock(page)) {
1262		list_del(&page->lru);
1263		n->nr_partial--;
1264		__SetPageSlubFrozen(page);
1265		return 1;
1266	}
1267	return 0;
1268}
1269
1270/*
1271 * Try to allocate a partial slab from a specific node.
1272 */
1273static struct page *get_partial_node(struct kmem_cache_node *n)
1274{
1275	struct page *page;
1276
1277	/*
1278	 * Racy check. If we mistakenly see no partial slabs then we
1279	 * just allocate an empty slab. If we mistakenly try to get a
1280	 * partial slab and there is none available then get_partials()
1281	 * will return NULL.
1282	 */
1283	if (!n || !n->nr_partial)
1284		return NULL;
1285
1286	spin_lock(&n->list_lock);
1287	list_for_each_entry(page, &n->partial, lru)
1288		if (lock_and_freeze_slab(n, page))
1289			goto out;
1290	page = NULL;
1291out:
1292	spin_unlock(&n->list_lock);
1293	return page;
1294}
1295
1296/*
1297 * Get a page from somewhere. Search in increasing NUMA distances.
1298 */
1299static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1300{
1301#ifdef CONFIG_NUMA
1302	struct zonelist *zonelist;
1303	struct zoneref *z;
1304	struct zone *zone;
1305	enum zone_type high_zoneidx = gfp_zone(flags);
1306	struct page *page;
1307
1308	/*
1309	 * The defrag ratio allows a configuration of the tradeoffs between
1310	 * inter node defragmentation and node local allocations. A lower
1311	 * defrag_ratio increases the tendency to do local allocations
1312	 * instead of attempting to obtain partial slabs from other nodes.
1313	 *
1314	 * If the defrag_ratio is set to 0 then kmalloc() always
1315	 * returns node local objects. If the ratio is higher then kmalloc()
1316	 * may return off node objects because partial slabs are obtained
1317	 * from other nodes and filled up.
1318	 *
1319	 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
1320	 * defrag_ratio = 1000) then every (well almost) allocation will
1321	 * first attempt to defrag slab caches on other nodes. This means
1322	 * scanning over all nodes to look for partial slabs which may be
1323	 * expensive if we do it every time we are trying to find a slab
1324	 * with available objects.
1325	 */
1326	if (!s->remote_node_defrag_ratio ||
1327			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1328		return NULL;
1329
1330	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1331	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1332		struct kmem_cache_node *n;
1333
1334		n = get_node(s, zone_to_nid(zone));
1335
1336		if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1337				n->nr_partial > n->min_partial) {
1338			page = get_partial_node(n);
1339			if (page)
1340				return page;
1341		}
1342	}
1343#endif
1344	return NULL;
1345}
1346
1347/*
1348 * Get a partial page, lock it and return it.
1349 */
1350static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1351{
1352	struct page *page;
1353	int searchnode = (node == -1) ? numa_node_id() : node;
1354
1355	page = get_partial_node(get_node(s, searchnode));
1356	if (page || (flags & __GFP_THISNODE))
1357		return page;
1358
1359	return get_any_partial(s, flags);
1360}
1361
1362/*
1363 * Move a page back to the lists.
1364 *
1365 * Must be called with the slab lock held.
1366 *
1367 * On exit the slab lock will have been dropped.
1368 */
1369static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1370{
1371	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1372	struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
1373
1374	__ClearPageSlubFrozen(page);
1375	if (page->inuse) {
1376
1377		if (page->freelist) {
1378			add_partial(n, page, tail);
1379			stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1380		} else {
1381			stat(c, DEACTIVATE_FULL);
1382			if (SLABDEBUG && PageSlubDebug(page) &&
1383						(s->flags & SLAB_STORE_USER))
1384				add_full(n, page);
1385		}
1386		slab_unlock(page);
1387	} else {
1388		stat(c, DEACTIVATE_EMPTY);
1389		if (n->nr_partial < n->min_partial) {
1390			/*
1391			 * Adding an empty slab to the partial slabs in order
1392			 * to avoid page allocator overhead. This slab needs
1393			 * to come after the other slabs with objects in
1394			 * so that the others get filled first. That way the
1395			 * size of the partial list stays small.
1396			 *
1397			 * kmem_cache_shrink can reclaim any empty slabs from
1398			 * the partial list.
1399			 */
1400			add_partial(n, page, 1);
1401			slab_unlock(page);
1402		} else {
1403			slab_unlock(page);
1404			stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
1405			discard_slab(s, page);
1406		}
1407	}
1408}
1409
1410/*
1411 * Remove the cpu slab
1412 */
1413static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1414{
1415	struct page *page = c->page;
1416	int tail = 1;
1417
1418	if (page->freelist)
1419		stat(c, DEACTIVATE_REMOTE_FREES);
1420	/*
1421	 * Merge cpu freelist into slab freelist. Typically we get here
1422	 * because both freelists are empty. So this is unlikely
1423	 * to occur.
1424	 */
1425	while (unlikely(c->freelist)) {
1426		void **object;
1427
1428		tail = 0;	/* Hot objects. Put the slab first */
1429
1430		/* Retrieve object from cpu_freelist */
1431		object = c->freelist;
1432		c->freelist = c->freelist[c->offset];
1433
1434		/* And put onto the regular freelist */
1435		object[c->offset] = page->freelist;
1436		page->freelist = object;
1437		page->inuse--;
1438	}
1439	c->page = NULL;
1440	unfreeze_slab(s, page, tail);
1441}
1442
1443static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1444{
1445	stat(c, CPUSLAB_FLUSH);
1446	slab_lock(c->page);
1447	deactivate_slab(s, c);
1448}
1449
1450/*
1451 * Flush cpu slab.
1452 *
1453 * Called from IPI handler with interrupts disabled.
1454 */
1455static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1456{
1457	struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1458
1459	if (likely(c && c->page))
1460		flush_slab(s, c);
1461}
1462
1463static void flush_cpu_slab(void *d)
1464{
1465	struct kmem_cache *s = d;
1466
1467	__flush_cpu_slab(s, smp_processor_id());
1468}
1469
1470static void flush_all(struct kmem_cache *s)
1471{
1472	on_each_cpu(flush_cpu_slab, s, 1);
1473}
1474
1475/*
1476 * Check if the objects in a per cpu structure fit numa
1477 * locality expectations.
1478 */
1479static inline int node_match(struct kmem_cache_cpu *c, int node)
1480{
1481#ifdef CONFIG_NUMA
1482	if (node != -1 && c->node != node)
1483		return 0;
1484#endif
1485	return 1;
1486}
1487
1488/*
1489 * Slow path. The lockless freelist is empty or we need to perform
1490 * debugging duties.
1491 *
1492 * Interrupts are disabled.
1493 *
1494 * Processing is still very fast if new objects have been freed to the
1495 * regular freelist. In that case we simply take over the regular freelist
1496 * as the lockless freelist and zap the regular freelist.
1497 *
1498 * If that is not working then we fall back to the partial lists. We take the
1499 * first element of the freelist as the object to allocate now and move the
1500 * rest of the freelist to the lockless freelist.
1501 *
1502 * And if we were unable to get a new slab from the partial slab lists then
1503 * we need to allocate a new slab. This is the slowest path since it involves
1504 * a call to the page allocator and the setup of a new slab.
1505 */
1506static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1507			  unsigned long addr, struct kmem_cache_cpu *c)
1508{
1509	void **object;
1510	struct page *new;
1511
1512	/* We handle __GFP_ZERO in the caller */
1513	gfpflags &= ~__GFP_ZERO;
1514
1515	if (!c->page)
1516		goto new_slab;
1517
1518	slab_lock(c->page);
1519	if (unlikely(!node_match(c, node)))
1520		goto another_slab;
1521
1522	stat(c, ALLOC_REFILL);
1523
1524load_freelist:
1525	object = c->page->freelist;
1526	if (unlikely(!object))
1527		goto another_slab;
1528	if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
1529		goto debug;
1530
1531	c->freelist = object[c->offset];
1532	c->page->inuse = c->page->objects;
1533	c->page->freelist = NULL;
1534	c->node = page_to_nid(c->page);
1535unlock_out:
1536	slab_unlock(c->page);
1537	stat(c, ALLOC_SLOWPATH);
1538	return object;
1539
1540another_slab:
1541	deactivate_slab(s, c);
1542
1543new_slab:
1544	new = get_partial(s, gfpflags, node);
1545	if (new) {
1546		c->page = new;
1547		stat(c, ALLOC_FROM_PARTIAL);
1548		goto load_freelist;
1549	}
1550
1551	if (gfpflags & __GFP_WAIT)
1552		local_irq_enable();
1553
1554	new = new_slab(s, gfpflags, node);
1555
1556	if (gfpflags & __GFP_WAIT)
1557		local_irq_disable();
1558
1559	if (new) {
1560		c = get_cpu_slab(s, smp_processor_id());
1561		stat(c, ALLOC_SLAB);
1562		if (c->page)
1563			flush_slab(s, c);
1564		slab_lock(new);
1565		__SetPageSlubFrozen(new);
1566		c->page = new;
1567		goto load_freelist;
1568	}
1569	return NULL;
1570debug:
1571	if (!alloc_debug_processing(s, c->page, object, addr))
1572		goto another_slab;
1573
1574	c->page->inuse++;
1575	c->page->freelist = object[c->offset];
1576	c->node = -1;
1577	goto unlock_out;
1578}
1579
1580/*
1581 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1582 * have the fastpath folded into their functions. So no function call
1583 * overhead for requests that can be satisfied on the fastpath.
1584 *
1585 * The fastpath works by first checking if the lockless freelist can be used.
1586 * If not then __slab_alloc is called for slow processing.
1587 *
1588 * Otherwise we can simply pick the next object from the lockless free list.
1589 */
1590static __always_inline void *slab_alloc(struct kmem_cache *s,
1591		gfp_t gfpflags, int node, unsigned long addr)
1592{
1593	void **object;
1594	struct kmem_cache_cpu *c;
1595	unsigned long flags;
1596	unsigned int objsize;
1597
1598	local_irq_save(flags);
1599	c = get_cpu_slab(s, smp_processor_id());
1600	objsize = c->objsize;
1601	if (unlikely(!c->freelist || !node_match(c, node)))
1602
1603		object = __slab_alloc(s, gfpflags, node, addr, c);
1604
1605	else {
1606		object = c->freelist;
1607		c->freelist = object[c->offset];
1608		stat(c, ALLOC_FASTPATH);
1609	}
1610	local_irq_restore(flags);
1611
1612	if (unlikely((gfpflags & __GFP_ZERO) && object))
1613		memset(object, 0, objsize);
1614
1615	return object;
1616}
1617
1618void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1619{
1620	return slab_alloc(s, gfpflags, -1, _RET_IP_);
1621}
1622EXPORT_SYMBOL(kmem_cache_alloc);
1623
1624#ifdef CONFIG_NUMA
1625void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1626{
1627	return slab_alloc(s, gfpflags, node, _RET_IP_);
1628}
1629EXPORT_SYMBOL(kmem_cache_alloc_node);
1630#endif
1631
1632/*
1633 * Slow patch handling. This may still be called frequently since objects
1634 * have a longer lifetime than the cpu slabs in most processing loads.
1635 *
1636 * So we still attempt to reduce cache line usage. Just take the slab
1637 * lock and free the item. If there is no additional partial page
1638 * handling required then we can return immediately.
1639 */
1640static void __slab_free(struct kmem_cache *s, struct page *page,
1641			void *x, unsigned long addr, unsigned int offset)
1642{
1643	void *prior;
1644	void **object = (void *)x;
1645	struct kmem_cache_cpu *c;
1646
1647	c = get_cpu_slab(s, raw_smp_processor_id());
1648	stat(c, FREE_SLOWPATH);
1649	slab_lock(page);
1650
1651	if (unlikely(SLABDEBUG && PageSlubDebug(page)))
1652		goto debug;
1653
1654checks_ok:
1655	prior = object[offset] = page->freelist;
1656	page->freelist = object;
1657	page->inuse--;
1658
1659	if (unlikely(PageSlubFrozen(page))) {
1660		stat(c, FREE_FROZEN);
1661		goto out_unlock;
1662	}
1663
1664	if (unlikely(!page->inuse))
1665		goto slab_empty;
1666
1667	/*
1668	 * Objects left in the slab. If it was not on the partial list before
1669	 * then add it.
1670	 */
1671	if (unlikely(!prior)) {
1672		add_partial(get_node(s, page_to_nid(page)), page, 1);
1673		stat(c, FREE_ADD_PARTIAL);
1674	}
1675
1676out_unlock:
1677	slab_unlock(page);
1678	return;
1679
1680slab_empty:
1681	if (prior) {
1682		/*
1683		 * Slab still on the partial list.
1684		 */
1685		remove_partial(s, page);
1686		stat(c, FREE_REMOVE_PARTIAL);
1687	}
1688	slab_unlock(page);
1689	stat(c, FREE_SLAB);
1690	discard_slab(s, page);
1691	return;
1692
1693debug:
1694	if (!free_debug_processing(s, page, x, addr))
1695		goto out_unlock;
1696	goto checks_ok;
1697}
1698
1699/*
1700 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1701 * can perform fastpath freeing without additional function calls.
1702 *
1703 * The fastpath is only possible if we are freeing to the current cpu slab
1704 * of this processor. This typically the case if we have just allocated
1705 * the item before.
1706 *
1707 * If fastpath is not possible then fall back to __slab_free where we deal
1708 * with all sorts of special processing.
1709 */
1710static __always_inline void slab_free(struct kmem_cache *s,
1711			struct page *page, void *x, unsigned long addr)
1712{
1713	void **object = (void *)x;
1714	struct kmem_cache_cpu *c;
1715	unsigned long flags;
1716
1717	local_irq_save(flags);
1718	c = get_cpu_slab(s, smp_processor_id());
1719	debug_check_no_locks_freed(object, c->objsize);
1720	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1721		debug_check_no_obj_freed(object, s->objsize);
1722	if (likely(page == c->page && c->node >= 0)) {
1723		object[c->offset] = c->freelist;
1724		c->freelist = object;
1725		stat(c, FREE_FASTPATH);
1726	} else
1727		__slab_free(s, page, x, addr, c->offset);
1728
1729	local_irq_restore(flags);
1730}
1731
1732void kmem_cache_free(struct kmem_cache *s, void *x)
1733{
1734	struct page *page;
1735
1736	page = virt_to_head_page(x);
1737
1738	slab_free(s, page, x, _RET_IP_);
1739}
1740EXPORT_SYMBOL(kmem_cache_free);
1741
1742/* Figure out on which slab page the object resides */
1743static struct page *get_object_page(const void *x)
1744{
1745	struct page *page = virt_to_head_page(x);
1746
1747	if (!PageSlab(page))
1748		return NULL;
1749
1750	return page;
1751}
1752
1753/*
1754 * Object placement in a slab is made very easy because we always start at
1755 * offset 0. If we tune the size of the object to the alignment then we can
1756 * get the required alignment by putting one properly sized object after
1757 * another.
1758 *
1759 * Notice that the allocation order determines the sizes of the per cpu
1760 * caches. Each processor has always one slab available for allocations.
1761 * Increasing the allocation order reduces the number of times that slabs
1762 * must be moved on and off the partial lists and is therefore a factor in
1763 * locking overhead.
1764 */
1765
1766/*
1767 * Mininum / Maximum order of slab pages. This influences locking overhead
1768 * and slab fragmentation. A higher order reduces the number of partial slabs
1769 * and increases the number of allocations possible without having to
1770 * take the list_lock.
1771 */
1772static int slub_min_order;
1773static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
1774static int slub_min_objects;
1775
1776/*
1777 * Merge control. If this is set then no merging of slab caches will occur.
1778 * (Could be removed. This was introduced to pacify the merge skeptics.)
1779 */
1780static int slub_nomerge;
1781
1782/*
1783 * Calculate the order of allocation given an slab object size.
1784 *
1785 * The order of allocation has significant impact on performance and other
1786 * system components. Generally order 0 allocations should be preferred since
1787 * order 0 does not cause fragmentation in the page allocator. Larger objects
1788 * be problematic to put into order 0 slabs because there may be too much
1789 * unused space left. We go to a higher order if more than 1/16th of the slab
1790 * would be wasted.
1791 *
1792 * In order to reach satisfactory performance we must ensure that a minimum
1793 * number of objects is in one slab. Otherwise we may generate too much
1794 * activity on the partial lists which requires taking the list_lock. This is
1795 * less a concern for large slabs though which are rarely used.
1796 *
1797 * slub_max_order specifies the order where we begin to stop considering the
1798 * number of objects in a slab as critical. If we reach slub_max_order then
1799 * we try to keep the page order as low as possible. So we accept more waste
1800 * of space in favor of a small page order.
1801 *
1802 * Higher order allocations also allow the placement of more objects in a
1803 * slab and thereby reduce object handling overhead. If the user has
1804 * requested a higher mininum order then we start with that one instead of
1805 * the smallest order which will fit the object.
1806 */
1807static inline int slab_order(int size, int min_objects,
1808				int max_order, int fract_leftover)
1809{
1810	int order;
1811	int rem;
1812	int min_order = slub_min_order;
1813
1814	if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
1815		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1816
1817	for (order = max(min_order,
1818				fls(min_objects * size - 1) - PAGE_SHIFT);
1819			order <= max_order; order++) {
1820
1821		unsigned long slab_size = PAGE_SIZE << order;
1822
1823		if (slab_size < min_objects * size)
1824			continue;
1825
1826		rem = slab_size % size;
1827
1828		if (rem <= slab_size / fract_leftover)
1829			break;
1830
1831	}
1832
1833	return order;
1834}
1835
1836static inline int calculate_order(int size)
1837{
1838	int order;
1839	int min_objects;
1840	int fraction;
1841
1842	/*
1843	 * Attempt to find best configuration for a slab. This
1844	 * works by first attempting to generate a layout with
1845	 * the best configuration and backing off gradually.
1846	 *
1847	 * First we reduce the acceptable waste in a slab. Then
1848	 * we reduce the minimum objects required in a slab.
1849	 */
1850	min_objects = slub_min_objects;
1851	if (!min_objects)
1852		min_objects = 4 * (fls(nr_cpu_ids) + 1);
1853	while (min_objects > 1) {
1854		fraction = 16;
1855		while (fraction >= 4) {
1856			order = slab_order(size, min_objects,
1857						slub_max_order, fraction);
1858			if (order <= slub_max_order)
1859				return order;
1860			fraction /= 2;
1861		}
1862		min_objects /= 2;
1863	}
1864
1865	/*
1866	 * We were unable to place multiple objects in a slab. Now
1867	 * lets see if we can place a single object there.
1868	 */
1869	order = slab_order(size, 1, slub_max_order, 1);
1870	if (order <= slub_max_order)
1871		return order;
1872
1873	/*
1874	 * Doh this slab cannot be placed using slub_max_order.
1875	 */
1876	order = slab_order(size, 1, MAX_ORDER, 1);
1877	if (order <= MAX_ORDER)
1878		return order;
1879	return -ENOSYS;
1880}
1881
1882/*
1883 * Figure out what the alignment of the objects will be.
1884 */
1885static unsigned long calculate_alignment(unsigned long flags,
1886		unsigned long align, unsigned long size)
1887{
1888	/*
1889	 * If the user wants hardware cache aligned objects then follow that
1890	 * suggestion if the object is sufficiently large.
1891	 *
1892	 * The hardware cache alignment cannot override the specified
1893	 * alignment though. If that is greater then use it.
1894	 */
1895	if (flags & SLAB_HWCACHE_ALIGN) {
1896		unsigned long ralign = cache_line_size();
1897		while (size <= ralign / 2)
1898			ralign /= 2;
1899		align = max(align, ralign);
1900	}
1901
1902	if (align < ARCH_SLAB_MINALIGN)
1903		align = ARCH_SLAB_MINALIGN;
1904
1905	return ALIGN(align, sizeof(void *));
1906}
1907
1908static void init_kmem_cache_cpu(struct kmem_cache *s,
1909			struct kmem_cache_cpu *c)
1910{
1911	c->page = NULL;
1912	c->freelist = NULL;
1913	c->node = 0;
1914	c->offset = s->offset / sizeof(void *);
1915	c->objsize = s->objsize;
1916#ifdef CONFIG_SLUB_STATS
1917	memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1918#endif
1919}
1920
1921static void
1922init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1923{
1924	n->nr_partial = 0;
1925
1926	/*
1927	 * The larger the object size is, the more pages we want on the partial
1928	 * list to avoid pounding the page allocator excessively.
1929	 */
1930	n->min_partial = ilog2(s->size);
1931	if (n->min_partial < MIN_PARTIAL)
1932		n->min_partial = MIN_PARTIAL;
1933	else if (n->min_partial > MAX_PARTIAL)
1934		n->min_partial = MAX_PARTIAL;
1935
1936	spin_lock_init(&n->list_lock);
1937	INIT_LIST_HEAD(&n->partial);
1938#ifdef CONFIG_SLUB_DEBUG
1939	atomic_long_set(&n->nr_slabs, 0);
1940	atomic_long_set(&n->total_objects, 0);
1941	INIT_LIST_HEAD(&n->full);
1942#endif
1943}
1944
1945#ifdef CONFIG_SMP
1946/*
1947 * Per cpu array for per cpu structures.
1948 *
1949 * The per cpu array places all kmem_cache_cpu structures from one processor
1950 * close together meaning that it becomes possible that multiple per cpu
1951 * structures are contained in one cacheline. This may be particularly
1952 * beneficial for the kmalloc caches.
1953 *
1954 * A desktop system typically has around 60-80 slabs. With 100 here we are
1955 * likely able to get per cpu structures for all caches from the array defined
1956 * here. We must be able to cover all kmalloc caches during bootstrap.
1957 *
1958 * If the per cpu array is exhausted then fall back to kmalloc
1959 * of individual cachelines. No sharing is possible then.
1960 */
1961#define NR_KMEM_CACHE_CPU 100
1962
1963static DEFINE_PER_CPU(struct kmem_cache_cpu,
1964				kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1965
1966static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1967static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
1968
1969static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1970							int cpu, gfp_t flags)
1971{
1972	struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
1973
1974	if (c)
1975		per_cpu(kmem_cache_cpu_free, cpu) =
1976				(void *)c->freelist;
1977	else {
1978		/* Table overflow: So allocate ourselves */
1979		c = kmalloc_node(
1980			ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
1981			flags, cpu_to_node(cpu));
1982		if (!c)
1983			return NULL;
1984	}
1985
1986	init_kmem_cache_cpu(s, c);
1987	return c;
1988}
1989
1990static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
1991{
1992	if (c < per_cpu(kmem_cache_cpu, cpu) ||
1993			c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
1994		kfree(c);
1995		return;
1996	}
1997	c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
1998	per_cpu(kmem_cache_cpu_free, cpu) = c;
1999}
2000
2001static void free_kmem_cache_cpus(struct kmem_cache *s)
2002{
2003	int cpu;
2004
2005	for_each_online_cpu(cpu) {
2006		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2007
2008		if (c) {
2009			s->cpu_slab[cpu] = NULL;
2010			free_kmem_cache_cpu(c, cpu);
2011		}
2012	}
2013}
2014
2015static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2016{
2017	int cpu;
2018
2019	for_each_online_cpu(cpu) {
2020		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2021
2022		if (c)
2023			continue;
2024
2025		c = alloc_kmem_cache_cpu(s, cpu, flags);
2026		if (!c) {
2027			free_kmem_cache_cpus(s);
2028			return 0;
2029		}
2030		s->cpu_slab[cpu] = c;
2031	}
2032	return 1;
2033}
2034
2035/*
2036 * Initialize the per cpu array.
2037 */
2038static void init_alloc_cpu_cpu(int cpu)
2039{
2040	int i;
2041
2042	if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
2043		return;
2044
2045	for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2046		free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2047
2048	cpu_set(cpu, kmem_cach_cpu_free_init_once);
2049}
2050
2051static void __init init_alloc_cpu(void)
2052{
2053	int cpu;
2054
2055	for_each_online_cpu(cpu)
2056		init_alloc_cpu_cpu(cpu);
2057  }
2058
2059#else
2060static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2061static inline void init_alloc_cpu(void) {}
2062
2063static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2064{
2065	init_kmem_cache_cpu(s, &s->cpu_slab);
2066	return 1;
2067}
2068#endif
2069
2070#ifdef CONFIG_NUMA
2071/*
2072 * No kmalloc_node yet so do it by hand. We know that this is the first
2073 * slab on the node for this slabcache. There are no concurrent accesses
2074 * possible.
2075 *
2076 * Note that this function only works on the kmalloc_node_cache
2077 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2078 * memory on a fresh node that has no slab structures yet.
2079 */
2080static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2081{
2082	struct page *page;
2083	struct kmem_cache_node *n;
2084	unsigned long flags;
2085
2086	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2087
2088	page = new_slab(kmalloc_caches, gfpflags, node);
2089
2090	BUG_ON(!page);
2091	if (page_to_nid(page) != node) {
2092		printk(KERN_ERR "SLUB: Unable to allocate memory from "
2093				"node %d\n", node);
2094		printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2095				"in order to be able to continue\n");
2096	}
2097
2098	n = page->freelist;
2099	BUG_ON(!n);
2100	page->freelist = get_freepointer(kmalloc_caches, n);
2101	page->inuse++;
2102	kmalloc_caches->node[node] = n;
2103#ifdef CONFIG_SLUB_DEBUG
2104	init_object(kmalloc_caches, n, 1);
2105	init_tracking(kmalloc_caches, n);
2106#endif
2107	init_kmem_cache_node(n, kmalloc_caches);
2108	inc_slabs_node(kmalloc_caches, node, page->objects);
2109
2110	/*
2111	 * lockdep requires consistent irq usage for each lock
2112	 * so even though there cannot be a race this early in
2113	 * the boot sequence, we still disable irqs.
2114	 */
2115	local_irq_save(flags);
2116	add_partial(n, page, 0);
2117	local_irq_restore(flags);
2118}
2119
2120static void free_kmem_cache_nodes(struct kmem_cache *s)
2121{
2122	int node;
2123
2124	for_each_node_state(node, N_NORMAL_MEMORY) {
2125		struct kmem_cache_node *n = s->node[node];
2126		if (n && n != &s->local_node)
2127			kmem_cache_free(kmalloc_caches, n);
2128		s->node[node] = NULL;
2129	}
2130}
2131
2132static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2133{
2134	int node;
2135	int local_node;
2136
2137	if (slab_state >= UP)
2138		local_node = page_to_nid(virt_to_page(s));
2139	else
2140		local_node = 0;
2141
2142	for_each_node_state(node, N_NORMAL_MEMORY) {
2143		struct kmem_cache_node *n;
2144
2145		if (local_node == node)
2146			n = &s->local_node;
2147		else {
2148			if (slab_state == DOWN) {
2149				early_kmem_cache_node_alloc(gfpflags, node);
2150				continue;
2151			}
2152			n = kmem_cache_alloc_node(kmalloc_caches,
2153							gfpflags, node);
2154
2155			if (!n) {
2156				free_kmem_cache_nodes(s);
2157				return 0;
2158			}
2159
2160		}
2161		s->node[node] = n;
2162		init_kmem_cache_node(n, s);
2163	}
2164	return 1;
2165}
2166#else
2167static void free_kmem_cache_nodes(struct kmem_cache *s)
2168{
2169}
2170
2171static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2172{
2173	init_kmem_cache_node(&s->local_node, s);
2174	return 1;
2175}
2176#endif
2177
2178/*
2179 * calculate_sizes() determines the order and the distribution of data within
2180 * a slab object.
2181 */
2182static int calculate_sizes(struct kmem_cache *s, int forced_order)
2183{
2184	unsigned long flags = s->flags;
2185	unsigned long size = s->objsize;
2186	unsigned long align = s->align;
2187	int order;
2188
2189	/*
2190	 * Round up object size to the next word boundary. We can only
2191	 * place the free pointer at word boundaries and this determines
2192	 * the possible location of the free pointer.
2193	 */
2194	size = ALIGN(size, sizeof(void *));
2195
2196#ifdef CONFIG_SLUB_DEBUG
2197	/*
2198	 * Determine if we can poison the object itself. If the user of
2199	 * the slab may touch the object after free or before allocation
2200	 * then we should never poison the object itself.
2201	 */
2202	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
2203			!s->ctor)
2204		s->flags |= __OBJECT_POISON;
2205	else
2206		s->flags &= ~__OBJECT_POISON;
2207
2208
2209	/*
2210	 * If we are Redzoning then check if there is some space between the
2211	 * end of the object and the free pointer. If not then add an
2212	 * additional word to have some bytes to store Redzone information.
2213	 */
2214	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2215		size += sizeof(void *);
2216#endif
2217
2218	/*
2219	 * With that we have determined the number of bytes in actual use
2220	 * by the object. This is the potential offset to the free pointer.
2221	 */
2222	s->inuse = size;
2223
2224	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
2225		s->ctor)) {
2226		/*
2227		 * Relocate free pointer after the object if it is not
2228		 * permitted to overwrite the first word of the object on
2229		 * kmem_cache_free.
2230		 *
2231		 * This is the case if we do RCU, have a constructor or
2232		 * destructor or are poisoning the objects.
2233		 */
2234		s->offset = size;
2235		size += sizeof(void *);
2236	}
2237
2238#ifdef CONFIG_SLUB_DEBUG
2239	if (flags & SLAB_STORE_USER)
2240		/*
2241		 * Need to store information about allocs and frees after
2242		 * the object.
2243		 */
2244		size += 2 * sizeof(struct track);
2245
2246	if (flags & SLAB_RED_ZONE)
2247		/*
2248		 * Add some empty padding so that we can catch
2249		 * overwrites from earlier objects rather than let
2250		 * tracking information or the free pointer be
2251		 * corrupted if an user writes before the start
2252		 * of the object.
2253		 */
2254		size += sizeof(void *);
2255#endif
2256
2257	/*
2258	 * Determine the alignment based on various parameters that the
2259	 * user specified and the dynamic determination of cache line size
2260	 * on bootup.
2261	 */
2262	align = calculate_alignment(flags, align, s->objsize);
2263
2264	/*
2265	 * SLUB stores one object immediately after another beginning from
2266	 * offset 0. In order to align the objects we have to simply size
2267	 * each object to conform to the alignment.
2268	 */
2269	size = ALIGN(size, align);
2270	s->size = size;
2271	if (forced_order >= 0)
2272		order = forced_order;
2273	else
2274		order = calculate_order(size);
2275
2276	if (order < 0)
2277		return 0;
2278
2279	s->allocflags = 0;
2280	if (order)
2281		s->allocflags |= __GFP_COMP;
2282
2283	if (s->flags & SLAB_CACHE_DMA)
2284		s->allocflags |= SLUB_DMA;
2285
2286	if (s->flags & SLAB_RECLAIM_ACCOUNT)
2287		s->allocflags |= __GFP_RECLAIMABLE;
2288
2289	/*
2290	 * Determine the number of objects per slab
2291	 */
2292	s->oo = oo_make(order, size);
2293	s->min = oo_make(get_order(size), size);
2294	if (oo_objects(s->oo) > oo_objects(s->max))
2295		s->max = s->oo;
2296
2297	return !!oo_objects(s->oo);
2298
2299}
2300
2301static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2302		const char *name, size_t size,
2303		size_t align, unsigned long flags,
2304		void (*ctor)(void *))
2305{
2306	memset(s, 0, kmem_size);
2307	s->name = name;
2308	s->ctor = ctor;
2309	s->objsize = size;
2310	s->align = align;
2311	s->flags = kmem_cache_flags(size, flags, name, ctor);
2312
2313	if (!calculate_sizes(s, -1))
2314		goto error;
2315
2316	s->refcount = 1;
2317#ifdef CONFIG_NUMA
2318	s->remote_node_defrag_ratio = 1000;
2319#endif
2320	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2321		goto error;
2322
2323	if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
2324		return 1;
2325	free_kmem_cache_nodes(s);
2326error:
2327	if (flags & SLAB_PANIC)
2328		panic("Cannot create slab %s size=%lu realsize=%u "
2329			"order=%u offset=%u flags=%lx\n",
2330			s->name, (unsigned long)size, s->size, oo_order(s->oo),
2331			s->offset, flags);
2332	return 0;
2333}
2334
2335/*
2336 * Check if a given pointer is valid
2337 */
2338int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2339{
2340	struct page *page;
2341
2342	page = get_object_page(object);
2343
2344	if (!page || s != page->slab)
2345		/* No slab or wrong slab */
2346		return 0;
2347
2348	if (!check_valid_pointer(s, page, object))
2349		return 0;
2350
2351	/*
2352	 * We could also check if the object is on the slabs freelist.
2353	 * But this would be too expensive and it seems that the main
2354	 * purpose of kmem_ptr_valid() is to check if the object belongs
2355	 * to a certain slab.
2356	 */
2357	return 1;
2358}
2359EXPORT_SYMBOL(kmem_ptr_validate);
2360
2361/*
2362 * Determine the size of a slab object
2363 */
2364unsigned int kmem_cache_size(struct kmem_cache *s)
2365{
2366	return s->objsize;
2367}
2368EXPORT_SYMBOL(kmem_cache_size);
2369
2370const char *kmem_cache_name(struct kmem_cache *s)
2371{
2372	return s->name;
2373}
2374EXPORT_SYMBOL(kmem_cache_name);
2375
2376static void list_slab_objects(struct kmem_cache *s, struct page *page,
2377							const char *text)
2378{
2379#ifdef CONFIG_SLUB_DEBUG
2380	void *addr = page_address(page);
2381	void *p;
2382	DECLARE_BITMAP(map, page->objects);
2383
2384	bitmap_zero(map, page->objects);
2385	slab_err(s, page, "%s", text);
2386	slab_lock(page);
2387	for_each_free_object(p, s, page->freelist)
2388		set_bit(slab_index(p, s, addr), map);
2389
2390	for_each_object(p, s, addr, page->objects) {
2391
2392		if (!test_bit(slab_index(p, s, addr), map)) {
2393			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2394							p, p - addr);
2395			print_tracking(s, p);
2396		}
2397	}
2398	slab_unlock(page);
2399#endif
2400}
2401
2402/*
2403 * Attempt to free all partial slabs on a node.
2404 */
2405static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2406{
2407	unsigned long flags;
2408	struct page *page, *h;
2409
2410	spin_lock_irqsave(&n->list_lock, flags);
2411	list_for_each_entry_safe(page, h, &n->partial, lru) {
2412		if (!page->inuse) {
2413			list_del(&page->lru);
2414			discard_slab(s, page);
2415			n->nr_partial--;
2416		} else {
2417			list_slab_objects(s, page,
2418				"Objects remaining on kmem_cache_close()");
2419		}
2420	}
2421	spin_unlock_irqrestore(&n->list_lock, flags);
2422}
2423
2424/*
2425 * Release all resources used by a slab cache.
2426 */
2427static inline int kmem_cache_close(struct kmem_cache *s)
2428{
2429	int node;
2430
2431	flush_all(s);
2432
2433	/* Attempt to free all objects */
2434	free_kmem_cache_cpus(s);
2435	for_each_node_state(node, N_NORMAL_MEMORY) {
2436		struct kmem_cache_node *n = get_node(s, node);
2437
2438		free_partial(s, n);
2439		if (n->nr_partial || slabs_node(s, node))
2440			return 1;
2441	}
2442	free_kmem_cache_nodes(s);
2443	return 0;
2444}
2445
2446/*
2447 * Close a cache and release the kmem_cache structure
2448 * (must be used for caches created using kmem_cache_create)
2449 */
2450void kmem_cache_destroy(struct kmem_cache *s)
2451{
2452	down_write(&slub_lock);
2453	s->refcount--;
2454	if (!s->refcount) {
2455		list_del(&s->list);
2456		up_write(&slub_lock);
2457		if (kmem_cache_close(s)) {
2458			printk(KERN_ERR "SLUB %s: %s called for cache that "
2459				"still has objects.\n", s->name, __func__);
2460			dump_stack();
2461		}
2462		sysfs_slab_remove(s);
2463	} else
2464		up_write(&slub_lock);
2465}
2466EXPORT_SYMBOL(kmem_cache_destroy);
2467
2468/********************************************************************
2469 *		Kmalloc subsystem
2470 *******************************************************************/
2471
2472struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
2473EXPORT_SYMBOL(kmalloc_caches);
2474
2475static int __init setup_slub_min_order(char *str)
2476{
2477	get_option(&str, &slub_min_order);
2478
2479	return 1;
2480}
2481
2482__setup("slub_min_order=", setup_slub_min_order);
2483
2484static int __init setup_slub_max_order(char *str)
2485{
2486	get_option(&str, &slub_max_order);
2487
2488	return 1;
2489}
2490
2491__setup("slub_max_order=", setup_slub_max_order);
2492
2493static int __init setup_slub_min_objects(char *str)
2494{
2495	get_option(&str, &slub_min_objects);
2496
2497	return 1;
2498}
2499
2500__setup("slub_min_objects=", setup_slub_min_objects);
2501
2502static int __init setup_slub_nomerge(char *str)
2503{
2504	slub_nomerge = 1;
2505	return 1;
2506}
2507
2508__setup("slub_nomerge", setup_slub_nomerge);
2509
2510static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2511		const char *name, int size, gfp_t gfp_flags)
2512{
2513	unsigned int flags = 0;
2514
2515	if (gfp_flags & SLUB_DMA)
2516		flags = SLAB_CACHE_DMA;
2517
2518	down_write(&slub_lock);
2519	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2520								flags, NULL))
2521		goto panic;
2522
2523	list_add(&s->list, &slab_caches);
2524	up_write(&slub_lock);
2525	if (sysfs_slab_add(s))
2526		goto panic;
2527	return s;
2528
2529panic:
2530	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2531}
2532
2533#ifdef CONFIG_ZONE_DMA
2534static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
2535
2536static void sysfs_add_func(struct work_struct *w)
2537{
2538	struct kmem_cache *s;
2539
2540	down_write(&slub_lock);
2541	list_for_each_entry(s, &slab_caches, list) {
2542		if (s->flags & __SYSFS_ADD_DEFERRED) {
2543			s->flags &= ~__SYSFS_ADD_DEFERRED;
2544			sysfs_slab_add(s);
2545		}
2546	}
2547	up_write(&slub_lock);
2548}
2549
2550static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2551
2552static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2553{
2554	struct kmem_cache *s;
2555	char *text;
2556	size_t realsize;
2557
2558	s = kmalloc_caches_dma[index];
2559	if (s)
2560		return s;
2561
2562	/* Dynamically create dma cache */
2563	if (flags & __GFP_WAIT)
2564		down_write(&slub_lock);
2565	else {
2566		if (!down_write_trylock(&slub_lock))
2567			goto out;
2568	}
2569
2570	if (kmalloc_caches_dma[index])
2571		goto unlock_out;
2572
2573	realsize = kmalloc_caches[index].objsize;
2574	text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2575			 (unsigned int)realsize);
2576	s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2577
2578	if (!s || !text || !kmem_cache_open(s, flags, text,
2579			realsize, ARCH_KMALLOC_MINALIGN,
2580			SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2581		kfree(s);
2582		kfree(text);
2583		goto unlock_out;
2584	}
2585
2586	list_add(&s->list, &slab_caches);
2587	kmalloc_caches_dma[index] = s;
2588
2589	schedule_work(&sysfs_add_work);
2590
2591unlock_out:
2592	up_write(&slub_lock);
2593out:
2594	return kmalloc_caches_dma[index];
2595}
2596#endif
2597
2598/*
2599 * Conversion table for small slabs sizes / 8 to the index in the
2600 * kmalloc array. This is necessary for slabs < 192 since we have non power
2601 * of two cache sizes there. The size of larger slabs can be determined using
2602 * fls.
2603 */
2604static s8 size_index[24] = {
2605	3,	/* 8 */
2606	4,	/* 16 */
2607	5,	/* 24 */
2608	5,	/* 32 */
2609	6,	/* 40 */
2610	6,	/* 48 */
2611	6,	/* 56 */
2612	6,	/* 64 */
2613	1,	/* 72 */
2614	1,	/* 80 */
2615	1,	/* 88 */
2616	1,	/* 96 */
2617	7,	/* 104 */
2618	7,	/* 112 */
2619	7,	/* 120 */
2620	7,	/* 128 */
2621	2,	/* 136 */
2622	2,	/* 144 */
2623	2,	/* 152 */
2624	2,	/* 160 */
2625	2,	/* 168 */
2626	2,	/* 176 */
2627	2,	/* 184 */
2628	2	/* 192 */
2629};
2630
2631static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2632{
2633	int index;
2634
2635	if (size <= 192) {
2636		if (!size)
2637			return ZERO_SIZE_PTR;
2638
2639		index = size_index[(size - 1) / 8];
2640	} else
2641		index = fls(size - 1);
2642
2643#ifdef CONFIG_ZONE_DMA
2644	if (unlikely((flags & SLUB_DMA)))
2645		return dma_kmalloc_cache(index, flags);
2646
2647#endif
2648	return &kmalloc_caches[index];
2649}
2650
2651void *__kmalloc(size_t size, gfp_t flags)
2652{
2653	struct kmem_cache *s;
2654
2655	if (unlikely(size > PAGE_SIZE))
2656		return kmalloc_large(size, flags);
2657
2658	s = get_slab(size, flags);
2659
2660	if (unlikely(ZERO_OR_NULL_PTR(s)))
2661		return s;
2662
2663	return slab_alloc(s, flags, -1, _RET_IP_);
2664}
2665EXPORT_SYMBOL(__kmalloc);
2666
2667static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2668{
2669	struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2670						get_order(size));
2671
2672	if (page)
2673		return page_address(page);
2674	else
2675		return NULL;
2676}
2677
2678#ifdef CONFIG_NUMA
2679void *__kmalloc_node(size_t size, gfp_t flags, int node)
2680{
2681	struct kmem_cache *s;
2682
2683	if (unlikely(size > PAGE_SIZE))
2684		return kmalloc_large_node(size, flags, node);
2685
2686	s = get_slab(size, flags);
2687
2688	if (unlikely(ZERO_OR_NULL_PTR(s)))
2689		return s;
2690
2691	return slab_alloc(s, flags, node, _RET_IP_);
2692}
2693EXPORT_SYMBOL(__kmalloc_node);
2694#endif
2695
2696size_t ksize(const void *object)
2697{
2698	struct page *page;
2699	struct kmem_cache *s;
2700
2701	if (unlikely(object == ZERO_SIZE_PTR))
2702		return 0;
2703
2704	page = virt_to_head_page(object);
2705
2706	if (unlikely(!PageSlab(page))) {
2707		WARN_ON(!PageCompound(page));
2708		return PAGE_SIZE << compound_order(page);
2709	}
2710	s = page->slab;
2711
2712#ifdef CONFIG_SLUB_DEBUG
2713	/*
2714	 * Debugging requires use of the padding between object
2715	 * and whatever may come after it.
2716	 */
2717	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2718		return s->objsize;
2719
2720#endif
2721	/*
2722	 * If we have the need to store the freelist pointer
2723	 * back there or track user information then we can
2724	 * only use the space before that information.
2725	 */
2726	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2727		return s->inuse;
2728	/*
2729	 * Else we can use all the padding etc for the allocation
2730	 */
2731	return s->size;
2732}
2733
2734void kfree(const void *x)
2735{
2736	struct page *page;
2737	void *object = (void *)x;
2738
2739	if (unlikely(ZERO_OR_NULL_PTR(x)))
2740		return;
2741
2742	page = virt_to_head_page(x);
2743	if (unlikely(!PageSlab(page))) {
2744		BUG_ON(!PageCompound(page));
2745		put_page(page);
2746		return;
2747	}
2748	slab_free(page->slab, page, object, _RET_IP_);
2749}
2750EXPORT_SYMBOL(kfree);
2751
2752/*
2753 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2754 * the remaining slabs by the number of items in use. The slabs with the
2755 * most items in use come first. New allocations will then fill those up
2756 * and thus they can be removed from the partial lists.
2757 *
2758 * The slabs with the least items are placed last. This results in them
2759 * being allocated from last increasing the chance that the last objects
2760 * are freed in them.
2761 */
2762int kmem_cache_shrink(struct kmem_cache *s)
2763{
2764	int node;
2765	int i;
2766	struct kmem_cache_node *n;
2767	struct page *page;
2768	struct page *t;
2769	int objects = oo_objects(s->max);
2770	struct list_head *slabs_by_inuse =
2771		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2772	unsigned long flags;
2773
2774	if (!slabs_by_inuse)
2775		return -ENOMEM;
2776
2777	flush_all(s);
2778	for_each_node_state(node, N_NORMAL_MEMORY) {
2779		n = get_node(s, node);
2780
2781		if (!n->nr_partial)
2782			continue;
2783
2784		for (i = 0; i < objects; i++)
2785			INIT_LIST_HEAD(slabs_by_inuse + i);
2786
2787		spin_lock_irqsave(&n->list_lock, flags);
2788
2789		/*
2790		 * Build lists indexed by the items in use in each slab.
2791		 *
2792		 * Note that concurrent frees may occur while we hold the
2793		 * list_lock. page->inuse here is the upper limit.
2794		 */
2795		list_for_each_entry_safe(page, t, &n->partial, lru) {
2796			if (!page->inuse && slab_trylock(page)) {
2797				/*
2798				 * Must hold slab lock here because slab_free
2799				 * may have freed the last object and be
2800				 * waiting to release the slab.
2801				 */
2802				list_del(&page->lru);
2803				n->nr_partial--;
2804				slab_unlock(page);
2805				discard_slab(s, page);
2806			} else {
2807				list_move(&page->lru,
2808				slabs_by_inuse + page->inuse);
2809			}
2810		}
2811
2812		/*
2813		 * Rebuild the partial list with the slabs filled up most
2814		 * first and the least used slabs at the end.
2815		 */
2816		for (i = objects - 1; i >= 0; i--)
2817			list_splice(slabs_by_inuse + i, n->partial.prev);
2818
2819		spin_unlock_irqrestore(&n->list_lock, flags);
2820	}
2821
2822	kfree(slabs_by_inuse);
2823	return 0;
2824}
2825EXPORT_SYMBOL(kmem_cache_shrink);
2826
2827#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2828static int slab_mem_going_offline_callback(void *arg)
2829{
2830	struct kmem_cache *s;
2831
2832	down_read(&slub_lock);
2833	list_for_each_entry(s, &slab_caches, list)
2834		kmem_cache_shrink(s);
2835	up_read(&slub_lock);
2836
2837	return 0;
2838}
2839
2840static void slab_mem_offline_callback(void *arg)
2841{
2842	struct kmem_cache_node *n;
2843	struct kmem_cache *s;
2844	struct memory_notify *marg = arg;
2845	int offline_node;
2846
2847	offline_node = marg->status_change_nid;
2848
2849	/*
2850	 * If the node still has available memory. we need kmem_cache_node
2851	 * for it yet.
2852	 */
2853	if (offline_node < 0)
2854		return;
2855
2856	down_read(&slub_lock);
2857	list_for_each_entry(s, &slab_caches, list) {
2858		n = get_node(s, offline_node);
2859		if (n) {
2860			/*
2861			 * if n->nr_slabs > 0, slabs still exist on the node
2862			 * that is going down. We were unable to free them,
2863			 * and offline_pages() function shoudn't call this
2864			 * callback. So, we must fail.
2865			 */
2866			BUG_ON(slabs_node(s, offline_node));
2867
2868			s->node[offline_node] = NULL;
2869			kmem_cache_free(kmalloc_caches, n);
2870		}
2871	}
2872	up_read(&slub_lock);
2873}
2874
2875static int slab_mem_going_online_callback(void *arg)
2876{
2877	struct kmem_cache_node *n;
2878	struct kmem_cache *s;
2879	struct memory_notify *marg = arg;
2880	int nid = marg->status_change_nid;
2881	int ret = 0;
2882
2883	/*
2884	 * If the node's memory is already available, then kmem_cache_node is
2885	 * already created. Nothing to do.
2886	 */
2887	if (nid < 0)
2888		return 0;
2889
2890	/*
2891	 * We are bringing a node online. No memory is available yet. We must
2892	 * allocate a kmem_cache_node structure in order to bring the node
2893	 * online.
2894	 */
2895	down_read(&slub_lock);
2896	list_for_each_entry(s, &slab_caches, list) {
2897		/*
2898		 * XXX: kmem_cache_alloc_node will fallback to other nodes
2899		 *      since memory is not yet available from the node that
2900		 *      is brought up.
2901		 */
2902		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2903		if (!n) {
2904			ret = -ENOMEM;
2905			goto out;
2906		}
2907		init_kmem_cache_node(n, s);
2908		s->node[nid] = n;
2909	}
2910out:
2911	up_read(&slub_lock);
2912	return ret;
2913}
2914
2915static int slab_memory_callback(struct notifier_block *self,
2916				unsigned long action, void *arg)
2917{
2918	int ret = 0;
2919
2920	switch (action) {
2921	case MEM_GOING_ONLINE:
2922		ret = slab_mem_going_online_callback(arg);
2923		break;
2924	case MEM_GOING_OFFLINE:
2925		ret = slab_mem_going_offline_callback(arg);
2926		break;
2927	case MEM_OFFLINE:
2928	case MEM_CANCEL_ONLINE:
2929		slab_mem_offline_callback(arg);
2930		break;
2931	case MEM_ONLINE:
2932	case MEM_CANCEL_OFFLINE:
2933		break;
2934	}
2935
2936	ret = notifier_from_errno(ret);
2937	return ret;
2938}
2939
2940#endif /* CONFIG_MEMORY_HOTPLUG */
2941
2942/********************************************************************
2943 *			Basic setup of slabs
2944 *******************************************************************/
2945
2946void __init kmem_cache_init(void)
2947{
2948	int i;
2949	int caches = 0;
2950
2951	init_alloc_cpu();
2952
2953#ifdef CONFIG_NUMA
2954	/*
2955	 * Must first have the slab cache available for the allocations of the
2956	 * struct kmem_cache_node's. There is special bootstrap code in
2957	 * kmem_cache_open for slab_state == DOWN.
2958	 */
2959	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2960		sizeof(struct kmem_cache_node), GFP_KERNEL);
2961	kmalloc_caches[0].refcount = -1;
2962	caches++;
2963
2964	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
2965#endif
2966
2967	/* Able to allocate the per node structures */
2968	slab_state = PARTIAL;
2969
2970	/* Caches that are not of the two-to-the-power-of size */
2971	if (KMALLOC_MIN_SIZE <= 64) {
2972		create_kmalloc_cache(&kmalloc_caches[1],
2973				"kmalloc-96", 96, GFP_KERNEL);
2974		caches++;
2975		create_kmalloc_cache(&kmalloc_caches[2],
2976				"kmalloc-192", 192, GFP_KERNEL);
2977		caches++;
2978	}
2979
2980	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
2981		create_kmalloc_cache(&kmalloc_caches[i],
2982			"kmalloc", 1 << i, GFP_KERNEL);
2983		caches++;
2984	}
2985
2986
2987	/*
2988	 * Patch up the size_index table if we have strange large alignment
2989	 * requirements for the kmalloc array. This is only the case for
2990	 * MIPS it seems. The standard arches will not generate any code here.
2991	 *
2992	 * Largest permitted alignment is 256 bytes due to the way we
2993	 * handle the index determination for the smaller caches.
2994	 *
2995	 * Make sure that nothing crazy happens if someone starts tinkering
2996	 * around with ARCH_KMALLOC_MINALIGN
2997	 */
2998	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2999		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3000
3001	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
3002		size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3003
3004	if (KMALLOC_MIN_SIZE == 128) {
3005		/*
3006		 * The 192 byte sized cache is not used if the alignment
3007		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3008		 * instead.
3009		 */
3010		for (i = 128 + 8; i <= 192; i += 8)
3011			size_index[(i - 1) / 8] = 8;
3012	}
3013
3014	slab_state = UP;
3015
3016	/* Provide the correct kmalloc names now that the caches are up */
3017	for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
3018		kmalloc_caches[i]. name =
3019			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3020
3021#ifdef CONFIG_SMP
3022	register_cpu_notifier(&slab_notifier);
3023	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
3024				nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
3025#else
3026	kmem_size = sizeof(struct kmem_cache);
3027#endif
3028
3029	printk(KERN_INFO
3030		"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
3031		" CPUs=%d, Nodes=%d\n",
3032		caches, cache_line_size(),
3033		slub_min_order, slub_max_order, slub_min_objects,
3034		nr_cpu_ids, nr_node_ids);
3035}
3036
3037/*
3038 * Find a mergeable slab cache
3039 */
3040static int slab_unmergeable(struct kmem_cache *s)
3041{
3042	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3043		return 1;
3044
3045	if (s->ctor)
3046		return 1;
3047
3048	/*
3049	 * We may have set a slab to be unmergeable during bootstrap.
3050	 */
3051	if (s->refcount < 0)
3052		return 1;
3053
3054	return 0;
3055}
3056
3057static struct kmem_cache *find_mergeable(size_t size,
3058		size_t align, unsigned long flags, const char *name,
3059		void (*ctor)(void *))
3060{
3061	struct kmem_cache *s;
3062
3063	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3064		return NULL;
3065
3066	if (ctor)
3067		return NULL;
3068
3069	size = ALIGN(size, sizeof(void *));
3070	align = calculate_alignment(flags, align, size);
3071	size = ALIGN(size, align);
3072	flags = kmem_cache_flags(size, flags, name, NULL);
3073
3074	list_for_each_entry(s, &slab_caches, list) {
3075		if (slab_unmergeable(s))
3076			continue;
3077
3078		if (size > s->size)
3079			continue;
3080
3081		if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
3082				continue;
3083		/*
3084		 * Check if alignment is compatible.
3085		 * Courtesy of Adrian Drzewiecki
3086		 */
3087		if ((s->size & ~(align - 1)) != s->size)
3088			continue;
3089
3090		if (s->size - size >= sizeof(void *))
3091			continue;
3092
3093		return s;
3094	}
3095	return NULL;
3096}
3097
3098struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3099		size_t align, unsigned long flags, void (*ctor)(void *))
3100{
3101	struct kmem_cache *s;
3102
3103	down_write(&slub_lock);
3104	s = find_mergeable(size, align, flags, name, ctor);
3105	if (s) {
3106		int cpu;
3107
3108		s->refcount++;
3109		/*
3110		 * Adjust the object sizes so that we clear
3111		 * the complete object on kzalloc.
3112		 */
3113		s->objsize = max(s->objsize, (int)size);
3114
3115		/*
3116		 * And then we need to update the object size in the
3117		 * per cpu structures
3118		 */
3119		for_each_online_cpu(cpu)
3120			get_cpu_slab(s, cpu)->objsize = s->objsize;
3121
3122		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3123		up_write(&slub_lock);
3124
3125		if (sysfs_slab_alias(s, name))
3126			goto err;
3127		return s;
3128	}
3129
3130	s = kmalloc(kmem_size, GFP_KERNEL);
3131	if (s) {
3132		if (kmem_cache_open(s, GFP_KERNEL, name,
3133				size, align, flags, ctor)) {
3134			list_add(&s->list, &slab_caches);
3135			up_write(&slub_lock);
3136			if (sysfs_slab_add(s))
3137				goto err;
3138			return s;
3139		}
3140		kfree(s);
3141	}
3142	up_write(&slub_lock);
3143
3144err:
3145	if (flags & SLAB_PANIC)
3146		panic("Cannot create slabcache %s\n", name);
3147	else
3148		s = NULL;
3149	return s;
3150}
3151EXPORT_SYMBOL(kmem_cache_create);
3152
3153#ifdef CONFIG_SMP
3154/*
3155 * Use the cpu notifier to insure that the cpu slabs are flushed when
3156 * necessary.
3157 */
3158static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3159		unsigned long action, void *hcpu)
3160{
3161	long cpu = (long)hcpu;
3162	struct kmem_cache *s;
3163	unsigned long flags;
3164
3165	switch (action) {
3166	case CPU_UP_PREPARE:
3167	case CPU_UP_PREPARE_FROZEN:
3168		init_alloc_cpu_cpu(cpu);
3169		down_read(&slub_lock);
3170		list_for_each_entry(s, &slab_caches, list)
3171			s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3172							GFP_KERNEL);
3173		up_read(&slub_lock);
3174		break;
3175
3176	case CPU_UP_CANCELED:
3177	case CPU_UP_CANCELED_FROZEN:
3178	case CPU_DEAD:
3179	case CPU_DEAD_FROZEN:
3180		down_read(&slub_lock);
3181		list_for_each_entry(s, &slab_caches, list) {
3182			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3183
3184			local_irq_save(flags);
3185			__flush_cpu_slab(s, cpu);
3186			local_irq_restore(flags);
3187			free_kmem_cache_cpu(c, cpu);
3188			s->cpu_slab[cpu] = NULL;
3189		}
3190		up_read(&slub_lock);
3191		break;
3192	default:
3193		break;
3194	}
3195	return NOTIFY_OK;
3196}
3197
3198static struct notifier_block __cpuinitdata slab_notifier = {
3199	.notifier_call = slab_cpuup_callback
3200};
3201
3202#endif
3203
3204void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3205{
3206	struct kmem_cache *s;
3207
3208	if (unlikely(size > PAGE_SIZE))
3209		return kmalloc_large(size, gfpflags);
3210
3211	s = get_slab(size, gfpflags);
3212
3213	if (unlikely(ZERO_OR_NULL_PTR(s)))
3214		return s;
3215
3216	return slab_alloc(s, gfpflags, -1, caller);
3217}
3218
3219void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3220					int node, unsigned long caller)
3221{
3222	struct kmem_cache *s;
3223
3224	if (unlikely(size > PAGE_SIZE))
3225		return kmalloc_large_node(size, gfpflags, node);
3226
3227	s = get_slab(size, gfpflags);
3228
3229	if (unlikely(ZERO_OR_NULL_PTR(s)))
3230		return s;
3231
3232	return slab_alloc(s, gfpflags, node, caller);
3233}
3234
3235#ifdef CONFIG_SLUB_DEBUG
3236static unsigned long count_partial(struct kmem_cache_node *n,
3237					int (*get_count)(struct page *))
3238{
3239	unsigned long flags;
3240	unsigned long x = 0;
3241	struct page *page;
3242
3243	spin_lock_irqsave(&n->list_lock, flags);
3244	list_for_each_entry(page, &n->partial, lru)
3245		x += get_count(page);
3246	spin_unlock_irqrestore(&n->list_lock, flags);
3247	return x;
3248}
3249
3250static int count_inuse(struct page *page)
3251{
3252	return page->inuse;
3253}
3254
3255static int count_total(struct page *page)
3256{
3257	return page->objects;
3258}
3259
3260static int count_free(struct page *page)
3261{
3262	return page->objects - page->inuse;
3263}
3264
3265static int validate_slab(struct kmem_cache *s, struct page *page,
3266						unsigned long *map)
3267{
3268	void *p;
3269	void *addr = page_address(page);
3270
3271	if (!check_slab(s, page) ||
3272			!on_freelist(s, page, NULL))
3273		return 0;
3274
3275	/* Now we know that a valid freelist exists */
3276	bitmap_zero(map, page->objects);
3277
3278	for_each_free_object(p, s, page->freelist) {
3279		set_bit(slab_index(p, s, addr), map);
3280		if (!check_object(s, page, p, 0))
3281			return 0;
3282	}
3283
3284	for_each_object(p, s, addr, page->objects)
3285		if (!test_bit(slab_index(p, s, addr), map))
3286			if (!check_object(s, page, p, 1))
3287				return 0;
3288	return 1;
3289}
3290
3291static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3292						unsigned long *map)
3293{
3294	if (slab_trylock(page)) {
3295		validate_slab(s, page, map);
3296		slab_unlock(page);
3297	} else
3298		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3299			s->name, page);
3300
3301	if (s->flags & DEBUG_DEFAULT_FLAGS) {
3302		if (!PageSlubDebug(page))
3303			printk(KERN_ERR "SLUB %s: SlubDebug not set "
3304				"on slab 0x%p\n", s->name, page);
3305	} else {
3306		if (PageSlubDebug(page))
3307			printk(KERN_ERR "SLUB %s: SlubDebug set on "
3308				"slab 0x%p\n", s->name, page);
3309	}
3310}
3311
3312static int validate_slab_node(struct kmem_cache *s,
3313		struct kmem_cache_node *n, unsigned long *map)
3314{
3315	unsigned long count = 0;
3316	struct page *page;
3317	unsigned long flags;
3318
3319	spin_lock_irqsave(&n->list_lock, flags);
3320
3321	list_for_each_entry(page, &n->partial, lru) {
3322		validate_slab_slab(s, page, map);
3323		count++;
3324	}
3325	if (count != n->nr_partial)
3326		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3327			"counter=%ld\n", s->name, count, n->nr_partial);
3328
3329	if (!(s->flags & SLAB_STORE_USER))
3330		goto out;
3331
3332	list_for_each_entry(page, &n->full, lru) {
3333		validate_slab_slab(s, page, map);
3334		count++;
3335	}
3336	if (count != atomic_long_read(&n->nr_slabs))
3337		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3338			"counter=%ld\n", s->name, count,
3339			atomic_long_read(&n->nr_slabs));
3340
3341out:
3342	spin_unlock_irqrestore(&n->list_lock, flags);
3343	return count;
3344}
3345
3346static long validate_slab_cache(struct kmem_cache *s)
3347{
3348	int node;
3349	unsigned long count = 0;
3350	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3351				sizeof(unsigned long), GFP_KERNEL);
3352
3353	if (!map)
3354		return -ENOMEM;
3355
3356	flush_all(s);
3357	for_each_node_state(node, N_NORMAL_MEMORY) {
3358		struct kmem_cache_node *n = get_node(s, node);
3359
3360		count += validate_slab_node(s, n, map);
3361	}
3362	kfree(map);
3363	return count;
3364}
3365
3366#ifdef SLUB_RESILIENCY_TEST
3367static void resiliency_test(void)
3368{
3369	u8 *p;
3370
3371	printk(KERN_ERR "SLUB resiliency testing\n");
3372	printk(KERN_ERR "-----------------------\n");
3373	printk(KERN_ERR "A. Corruption after allocation\n");
3374
3375	p = kzalloc(16, GFP_KERNEL);
3376	p[16] = 0x12;
3377	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3378			" 0x12->0x%p\n\n", p + 16);
3379
3380	validate_slab_cache(kmalloc_caches + 4);
3381
3382	/* Hmmm... The next two are dangerous */
3383	p = kzalloc(32, GFP_KERNEL);
3384	p[32 + sizeof(void *)] = 0x34;
3385	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3386			" 0x34 -> -0x%p\n", p);
3387	printk(KERN_ERR
3388		"If allocated object is overwritten then not detectable\n\n");
3389
3390	validate_slab_cache(kmalloc_caches + 5);
3391	p = kzalloc(64, GFP_KERNEL);
3392	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3393	*p = 0x56;
3394	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3395									p);
3396	printk(KERN_ERR
3397		"If allocated object is overwritten then not detectable\n\n");
3398	validate_slab_cache(kmalloc_caches + 6);
3399
3400	printk(KERN_ERR "\nB. Corruption after free\n");
3401	p = kzalloc(128, GFP_KERNEL);
3402	kfree(p);
3403	*p = 0x78;
3404	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3405	validate_slab_cache(kmalloc_caches + 7);
3406
3407	p = kzalloc(256, GFP_KERNEL);
3408	kfree(p);
3409	p[50] = 0x9a;
3410	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3411			p);
3412	validate_slab_cache(kmalloc_caches + 8);
3413
3414	p = kzalloc(512, GFP_KERNEL);
3415	kfree(p);
3416	p[512] = 0xab;
3417	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3418	validate_slab_cache(kmalloc_caches + 9);
3419}
3420#else
3421static void resiliency_test(void) {};
3422#endif
3423
3424/*
3425 * Generate lists of code addresses where slabcache objects are allocated
3426 * and freed.
3427 */
3428
3429struct location {
3430	unsigned long count;
3431	unsigned long addr;
3432	long long sum_time;
3433	long min_time;
3434	long max_time;
3435	long min_pid;
3436	long max_pid;
3437	cpumask_t cpus;
3438	nodemask_t nodes;
3439};
3440
3441struct loc_track {
3442	unsigned long max;
3443	unsigned long count;
3444	struct location *loc;
3445};
3446
3447static void free_loc_track(struct loc_track *t)
3448{
3449	if (t->max)
3450		free_pages((unsigned long)t->loc,
3451			get_order(sizeof(struct location) * t->max));
3452}
3453
3454static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
3455{
3456	struct location *l;
3457	int order;
3458
3459	order = get_order(sizeof(struct location) * max);
3460
3461	l = (void *)__get_free_pages(flags, order);
3462	if (!l)
3463		return 0;
3464
3465	if (t->count) {
3466		memcpy(l, t->loc, sizeof(struct location) * t->count);
3467		free_loc_track(t);
3468	}
3469	t->max = max;
3470	t->loc = l;
3471	return 1;
3472}
3473
3474static int add_location(struct loc_track *t, struct kmem_cache *s,
3475				const struct track *track)
3476{
3477	long start, end, pos;
3478	struct location *l;
3479	unsigned long caddr;
3480	unsigned long age = jiffies - track->when;
3481
3482	start = -1;
3483	end = t->count;
3484
3485	for ( ; ; ) {
3486		pos = start + (end - start + 1) / 2;
3487
3488		/*
3489		 * There is nothing at "end". If we end up there
3490		 * we need to add something to before end.
3491		 */
3492		if (pos == end)
3493			break;
3494
3495		caddr = t->loc[pos].addr;
3496		if (track->addr == caddr) {
3497
3498			l = &t->loc[pos];
3499			l->count++;
3500			if (track->when) {
3501				l->sum_time += age;
3502				if (age < l->min_time)
3503					l->min_time = age;
3504				if (age > l->max_time)
3505					l->max_time = age;
3506
3507				if (track->pid < l->min_pid)
3508					l->min_pid = track->pid;
3509				if (track->pid > l->max_pid)
3510					l->max_pid = track->pid;
3511
3512				cpu_set(track->cpu, l->cpus);
3513			}
3514			node_set(page_to_nid(virt_to_page(track)), l->nodes);
3515			return 1;
3516		}
3517
3518		if (track->addr < caddr)
3519			end = pos;
3520		else
3521			start = pos;
3522	}
3523
3524	/*
3525	 * Not found. Insert new tracking element.
3526	 */
3527	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
3528		return 0;
3529
3530	l = t->loc + pos;
3531	if (pos < t->count)
3532		memmove(l + 1, l,
3533			(t->count - pos) * sizeof(struct location));
3534	t->count++;
3535	l->count = 1;
3536	l->addr = track->addr;
3537	l->sum_time = age;
3538	l->min_time = age;
3539	l->max_time = age;
3540	l->min_pid = track->pid;
3541	l->max_pid = track->pid;
3542	cpus_clear(l->cpus);
3543	cpu_set(track->cpu, l->cpus);
3544	nodes_clear(l->nodes);
3545	node_set(page_to_nid(virt_to_page(track)), l->nodes);
3546	return 1;
3547}
3548
3549static void process_slab(struct loc_track *t, struct kmem_cache *s,
3550		struct page *page, enum track_item alloc)
3551{
3552	void *addr = page_address(page);
3553	DECLARE_BITMAP(map, page->objects);
3554	void *p;
3555
3556	bitmap_zero(map, page->objects);
3557	for_each_free_object(p, s, page->freelist)
3558		set_bit(slab_index(p, s, addr), map);
3559
3560	for_each_object(p, s, addr, page->objects)
3561		if (!test_bit(slab_index(p, s, addr), map))
3562			add_location(t, s, get_track(s, p, alloc));
3563}
3564
3565static int list_locations(struct kmem_cache *s, char *buf,
3566					enum track_item alloc)
3567{
3568	int len = 0;
3569	unsigned long i;
3570	struct loc_track t = { 0, 0, NULL };
3571	int node;
3572
3573	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3574			GFP_TEMPORARY))
3575		return sprintf(buf, "Out of memory\n");
3576
3577	/* Push back cpu slabs */
3578	flush_all(s);
3579
3580	for_each_node_state(node, N_NORMAL_MEMORY) {
3581		struct kmem_cache_node *n = get_node(s, node);
3582		unsigned long flags;
3583		struct page *page;
3584
3585		if (!atomic_long_read(&n->nr_slabs))
3586			continue;
3587
3588		spin_lock_irqsave(&n->list_lock, flags);
3589		list_for_each_entry(page, &n->partial, lru)
3590			process_slab(&t, s, page, alloc);
3591		list_for_each_entry(page, &n->full, lru)
3592			process_slab(&t, s, page, alloc);
3593		spin_unlock_irqrestore(&n->list_lock, flags);
3594	}
3595
3596	for (i = 0; i < t.count; i++) {
3597		struct location *l = &t.loc[i];
3598
3599		if (len > PAGE_SIZE - 100)
3600			break;
3601		len += sprintf(buf + len, "%7ld ", l->count);
3602
3603		if (l->addr)
3604			len += sprint_symbol(buf + len, (unsigned long)l->addr);
3605		else
3606			len += sprintf(buf + len, "<not-available>");
3607
3608		if (l->sum_time != l->min_time) {
3609			len += sprintf(buf + len, " age=%ld/%ld/%ld",
3610				l->min_time,
3611				(long)div_u64(l->sum_time, l->count),
3612				l->max_time);
3613		} else
3614			len += sprintf(buf + len, " age=%ld",
3615				l->min_time);
3616
3617		if (l->min_pid != l->max_pid)
3618			len += sprintf(buf + len, " pid=%ld-%ld",
3619				l->min_pid, l->max_pid);
3620		else
3621			len += sprintf(buf + len, " pid=%ld",
3622				l->min_pid);
3623
3624		if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
3625				len < PAGE_SIZE - 60) {
3626			len += sprintf(buf + len, " cpus=");
3627			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3628					l->cpus);
3629		}
3630
3631		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3632				len < PAGE_SIZE - 60) {
3633			len += sprintf(buf + len, " nodes=");
3634			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3635					l->nodes);
3636		}
3637
3638		len += sprintf(buf + len, "\n");
3639	}
3640
3641	free_loc_track(&t);
3642	if (!t.count)
3643		len += sprintf(buf, "No data\n");
3644	return len;
3645}
3646
3647enum slab_stat_type {
3648	SL_ALL,			/* All slabs */
3649	SL_PARTIAL,		/* Only partially allocated slabs */
3650	SL_CPU,			/* Only slabs used for cpu caches */
3651	SL_OBJECTS,		/* Determine allocated objects not slabs */
3652	SL_TOTAL		/* Determine object capacity not slabs */
3653};
3654
3655#define SO_ALL		(1 << SL_ALL)
3656#define SO_PARTIAL	(1 << SL_PARTIAL)
3657#define SO_CPU		(1 << SL_CPU)
3658#define SO_OBJECTS	(1 << SL_OBJECTS)
3659#define SO_TOTAL	(1 << SL_TOTAL)
3660
3661static ssize_t show_slab_objects(struct kmem_cache *s,
3662			    char *buf, unsigned long flags)
3663{
3664	unsigned long total = 0;
3665	int node;
3666	int x;
3667	unsigned long *nodes;
3668	unsigned long *per_cpu;
3669
3670	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3671	if (!nodes)
3672		return -ENOMEM;
3673	per_cpu = nodes + nr_node_ids;
3674
3675	if (flags & SO_CPU) {
3676		int cpu;
3677
3678		for_each_possible_cpu(cpu) {
3679			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3680
3681			if (!c || c->node < 0)
3682				continue;
3683
3684			if (c->page) {
3685					if (flags & SO_TOTAL)
3686						x = c->page->objects;
3687				else if (flags & SO_OBJECTS)
3688					x = c->page->inuse;
3689				else
3690					x = 1;
3691
3692				total += x;
3693				nodes[c->node] += x;
3694			}
3695			per_cpu[c->node]++;
3696		}
3697	}
3698
3699	if (flags & SO_ALL) {
3700		for_each_node_state(node, N_NORMAL_MEMORY) {
3701			struct kmem_cache_node *n = get_node(s, node);
3702
3703		if (flags & SO_TOTAL)
3704			x = atomic_long_read(&n->total_objects);
3705		else if (flags & SO_OBJECTS)
3706			x = atomic_long_read(&n->total_objects) -
3707				count_partial(n, count_free);
3708
3709			else
3710				x = atomic_long_read(&n->nr_slabs);
3711			total += x;
3712			nodes[node] += x;
3713		}
3714
3715	} else if (flags & SO_PARTIAL) {
3716		for_each_node_state(node, N_NORMAL_MEMORY) {
3717			struct kmem_cache_node *n = get_node(s, node);
3718
3719			if (flags & SO_TOTAL)
3720				x = count_partial(n, count_total);
3721			else if (flags & SO_OBJECTS)
3722				x = count_partial(n, count_inuse);
3723			else
3724				x = n->nr_partial;
3725			total += x;
3726			nodes[node] += x;
3727		}
3728	}
3729	x = sprintf(buf, "%lu", total);
3730#ifdef CONFIG_NUMA
3731	for_each_node_state(node, N_NORMAL_MEMORY)
3732		if (nodes[node])
3733			x += sprintf(buf + x, " N%d=%lu",
3734					node, nodes[node]);
3735#endif
3736	kfree(nodes);
3737	return x + sprintf(buf + x, "\n");
3738}
3739
3740static int any_slab_objects(struct kmem_cache *s)
3741{
3742	int node;
3743
3744	for_each_online_node(node) {
3745		struct kmem_cache_node *n = get_node(s, node);
3746
3747		if (!n)
3748			continue;
3749
3750		if (atomic_long_read(&n->total_objects))
3751			return 1;
3752	}
3753	return 0;
3754}
3755
3756#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3757#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3758
3759struct slab_attribute {
3760	struct attribute attr;
3761	ssize_t (*show)(struct kmem_cache *s, char *buf);
3762	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3763};
3764
3765#define SLAB_ATTR_RO(_name) \
3766	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3767
3768#define SLAB_ATTR(_name) \
3769	static struct slab_attribute _name##_attr =  \
3770	__ATTR(_name, 0644, _name##_show, _name##_store)
3771
3772static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3773{
3774	return sprintf(buf, "%d\n", s->size);
3775}
3776SLAB_ATTR_RO(slab_size);
3777
3778static ssize_t align_show(struct kmem_cache *s, char *buf)
3779{
3780	return sprintf(buf, "%d\n", s->align);
3781}
3782SLAB_ATTR_RO(align);
3783
3784static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3785{
3786	return sprintf(buf, "%d\n", s->objsize);
3787}
3788SLAB_ATTR_RO(object_size);
3789
3790static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3791{
3792	return sprintf(buf, "%d\n", oo_objects(s->oo));
3793}
3794SLAB_ATTR_RO(objs_per_slab);
3795
3796static ssize_t order_store(struct kmem_cache *s,
3797				const char *buf, size_t length)
3798{
3799	unsigned long order;
3800	int err;
3801
3802	err = strict_strtoul(buf, 10, &order);
3803	if (err)
3804		return err;
3805
3806	if (order > slub_max_order || order < slub_min_order)
3807		return -EINVAL;
3808
3809	calculate_sizes(s, order);
3810	return length;
3811}
3812
3813static ssize_t order_show(struct kmem_cache *s, char *buf)
3814{
3815	return sprintf(buf, "%d\n", oo_order(s->oo));
3816}
3817SLAB_ATTR(order);
3818
3819static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3820{
3821	if (s->ctor) {
3822		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3823
3824		return n + sprintf(buf + n, "\n");
3825	}
3826	return 0;
3827}
3828SLAB_ATTR_RO(ctor);
3829
3830static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3831{
3832	return sprintf(buf, "%d\n", s->refcount - 1);
3833}
3834SLAB_ATTR_RO(aliases);
3835
3836static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3837{
3838	return show_slab_objects(s, buf, SO_ALL);
3839}
3840SLAB_ATTR_RO(slabs);
3841
3842static ssize_t partial_show(struct kmem_cache *s, char *buf)
3843{
3844	return show_slab_objects(s, buf, SO_PARTIAL);
3845}
3846SLAB_ATTR_RO(partial);
3847
3848static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3849{
3850	return show_slab_objects(s, buf, SO_CPU);
3851}
3852SLAB_ATTR_RO(cpu_slabs);
3853
3854static ssize_t objects_show(struct kmem_cache *s, char *buf)
3855{
3856	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
3857}
3858SLAB_ATTR_RO(objects);
3859
3860static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3861{
3862	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3863}
3864SLAB_ATTR_RO(objects_partial);
3865
3866static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3867{
3868	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3869}
3870SLAB_ATTR_RO(total_objects);
3871
3872static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3873{
3874	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3875}
3876
3877static ssize_t sanity_checks_store(struct kmem_cache *s,
3878				const char *buf, size_t length)
3879{
3880	s->flags &= ~SLAB_DEBUG_FREE;
3881	if (buf[0] == '1')
3882		s->flags |= SLAB_DEBUG_FREE;
3883	return length;
3884}
3885SLAB_ATTR(sanity_checks);
3886
3887static ssize_t trace_show(struct kmem_cache *s, char *buf)
3888{
3889	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3890}
3891
3892static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3893							size_t length)
3894{
3895	s->flags &= ~SLAB_TRACE;
3896	if (buf[0] == '1')
3897		s->flags |= SLAB_TRACE;
3898	return length;
3899}
3900SLAB_ATTR(trace);
3901
3902static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3903{
3904	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3905}
3906
3907static ssize_t reclaim_account_store(struct kmem_cache *s,
3908				const char *buf, size_t length)
3909{
3910	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3911	if (buf[0] == '1')
3912		s->flags |= SLAB_RECLAIM_ACCOUNT;
3913	return length;
3914}
3915SLAB_ATTR(reclaim_account);
3916
3917static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3918{
3919	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3920}
3921SLAB_ATTR_RO(hwcache_align);
3922
3923#ifdef CONFIG_ZONE_DMA
3924static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3925{
3926	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3927}
3928SLAB_ATTR_RO(cache_dma);
3929#endif
3930
3931static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3932{
3933	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3934}
3935SLAB_ATTR_RO(destroy_by_rcu);
3936
3937static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3938{
3939	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3940}
3941
3942static ssize_t red_zone_store(struct kmem_cache *s,
3943				const char *buf, size_t length)
3944{
3945	if (any_slab_objects(s))
3946		return -EBUSY;
3947
3948	s->flags &= ~SLAB_RED_ZONE;
3949	if (buf[0] == '1')
3950		s->flags |= SLAB_RED_ZONE;
3951	calculate_sizes(s, -1);
3952	return length;
3953}
3954SLAB_ATTR(red_zone);
3955
3956static ssize_t poison_show(struct kmem_cache *s, char *buf)
3957{
3958	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3959}
3960
3961static ssize_t poison_store(struct kmem_cache *s,
3962				const char *buf, size_t length)
3963{
3964	if (any_slab_objects(s))
3965		return -EBUSY;
3966
3967	s->flags &= ~SLAB_POISON;
3968	if (buf[0] == '1')
3969		s->flags |= SLAB_POISON;
3970	calculate_sizes(s, -1);
3971	return length;
3972}
3973SLAB_ATTR(poison);
3974
3975static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3976{
3977	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3978}
3979
3980static ssize_t store_user_store(struct kmem_cache *s,
3981				const char *buf, size_t length)
3982{
3983	if (any_slab_objects(s))
3984		return -EBUSY;
3985
3986	s->flags &= ~SLAB_STORE_USER;
3987	if (buf[0] == '1')
3988		s->flags |= SLAB_STORE_USER;
3989	calculate_sizes(s, -1);
3990	return length;
3991}
3992SLAB_ATTR(store_user);
3993
3994static ssize_t validate_show(struct kmem_cache *s, char *buf)
3995{
3996	return 0;
3997}
3998
3999static ssize_t validate_store(struct kmem_cache *s,
4000			const char *buf, size_t length)
4001{
4002	int ret = -EINVAL;
4003
4004	if (buf[0] == '1') {
4005		ret = validate_slab_cache(s);
4006		if (ret >= 0)
4007			ret = length;
4008	}
4009	return ret;
4010}
4011SLAB_ATTR(validate);
4012
4013static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4014{
4015	return 0;
4016}
4017
4018static ssize_t shrink_store(struct kmem_cache *s,
4019			const char *buf, size_t length)
4020{
4021	if (buf[0] == '1') {
4022		int rc = kmem_cache_shrink(s);
4023
4024		if (rc)
4025			return rc;
4026	} else
4027		return -EINVAL;
4028	return length;
4029}
4030SLAB_ATTR(shrink);
4031
4032static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4033{
4034	if (!(s->flags & SLAB_STORE_USER))
4035		return -ENOSYS;
4036	return list_locations(s, buf, TRACK_ALLOC);
4037}
4038SLAB_ATTR_RO(alloc_calls);
4039
4040static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4041{
4042	if (!(s->flags & SLAB_STORE_USER))
4043		return -ENOSYS;
4044	return list_locations(s, buf, TRACK_FREE);
4045}
4046SLAB_ATTR_RO(free_calls);
4047
4048#ifdef CONFIG_NUMA
4049static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4050{
4051	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
4052}
4053
4054static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4055				const char *buf, size_t length)
4056{
4057	unsigned long ratio;
4058	int err;
4059
4060	err = strict_strtoul(buf, 10, &ratio);
4061	if (err)
4062		return err;
4063
4064	if (ratio <= 100)
4065		s->remote_node_defrag_ratio = ratio * 10;
4066
4067	return length;
4068}
4069SLAB_ATTR(remote_node_defrag_ratio);
4070#endif
4071
4072#ifdef CONFIG_SLUB_STATS
4073static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4074{
4075	unsigned long sum  = 0;
4076	int cpu;
4077	int len;
4078	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4079
4080	if (!data)
4081		return -ENOMEM;
4082
4083	for_each_online_cpu(cpu) {
4084		unsigned x = get_cpu_slab(s, cpu)->stat[si];
4085
4086		data[cpu] = x;
4087		sum += x;
4088	}
4089
4090	len = sprintf(buf, "%lu", sum);
4091
4092#ifdef CONFIG_SMP
4093	for_each_online_cpu(cpu) {
4094		if (data[cpu] && len < PAGE_SIZE - 20)
4095			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
4096	}
4097#endif
4098	kfree(data);
4099	return len + sprintf(buf + len, "\n");
4100}
4101
4102#define STAT_ATTR(si, text) 					\
4103static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
4104{								\
4105	return show_stat(s, buf, si);				\
4106}								\
4107SLAB_ATTR_RO(text);						\
4108
4109STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4110STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4111STAT_ATTR(FREE_FASTPATH, free_fastpath);
4112STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4113STAT_ATTR(FREE_FROZEN, free_frozen);
4114STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4115STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4116STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4117STAT_ATTR(ALLOC_SLAB, alloc_slab);
4118STAT_ATTR(ALLOC_REFILL, alloc_refill);
4119STAT_ATTR(FREE_SLAB, free_slab);
4120STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4121STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4122STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4123STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4124STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4125STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
4126STAT_ATTR(ORDER_FALLBACK, order_fallback);
4127#endif
4128
4129static struct attribute *slab_attrs[] = {
4130	&slab_size_attr.attr,
4131	&object_size_attr.attr,
4132	&objs_per_slab_attr.attr,
4133	&order_attr.attr,
4134	&objects_attr.attr,
4135	&objects_partial_attr.attr,
4136	&total_objects_attr.attr,
4137	&slabs_attr.attr,
4138	&partial_attr.attr,
4139	&cpu_slabs_attr.attr,
4140	&ctor_attr.attr,
4141	&aliases_attr.attr,
4142	&align_attr.attr,
4143	&sanity_checks_attr.attr,
4144	&trace_attr.attr,
4145	&hwcache_align_attr.attr,
4146	&reclaim_account_attr.attr,
4147	&destroy_by_rcu_attr.attr,
4148	&red_zone_attr.attr,
4149	&poison_attr.attr,
4150	&store_user_attr.attr,
4151	&validate_attr.attr,
4152	&shrink_attr.attr,
4153	&alloc_calls_attr.attr,
4154	&free_calls_attr.attr,
4155#ifdef CONFIG_ZONE_DMA
4156	&cache_dma_attr.attr,
4157#endif
4158#ifdef CONFIG_NUMA
4159	&remote_node_defrag_ratio_attr.attr,
4160#endif
4161#ifdef CONFIG_SLUB_STATS
4162	&alloc_fastpath_attr.attr,
4163	&alloc_slowpath_attr.attr,
4164	&free_fastpath_attr.attr,
4165	&free_slowpath_attr.attr,
4166	&free_frozen_attr.attr,
4167	&free_add_partial_attr.attr,
4168	&free_remove_partial_attr.attr,
4169	&alloc_from_partial_attr.attr,
4170	&alloc_slab_attr.attr,
4171	&alloc_refill_attr.attr,
4172	&free_slab_attr.attr,
4173	&cpuslab_flush_attr.attr,
4174	&deactivate_full_attr.attr,
4175	&deactivate_empty_attr.attr,
4176	&deactivate_to_head_attr.attr,
4177	&deactivate_to_tail_attr.attr,
4178	&deactivate_remote_frees_attr.attr,
4179	&order_fallback_attr.attr,
4180#endif
4181	NULL
4182};
4183
4184static struct attribute_group slab_attr_group = {
4185	.attrs = slab_attrs,
4186};
4187
4188static ssize_t slab_attr_show(struct kobject *kobj,
4189				struct attribute *attr,
4190				char *buf)
4191{
4192	struct slab_attribute *attribute;
4193	struct kmem_cache *s;
4194	int err;
4195
4196	attribute = to_slab_attr(attr);
4197	s = to_slab(kobj);
4198
4199	if (!attribute->show)
4200		return -EIO;
4201
4202	err = attribute->show(s, buf);
4203
4204	return err;
4205}
4206
4207static ssize_t slab_attr_store(struct kobject *kobj,
4208				struct attribute *attr,
4209				const char *buf, size_t len)
4210{
4211	struct slab_attribute *attribute;
4212	struct kmem_cache *s;
4213	int err;
4214
4215	attribute = to_slab_attr(attr);
4216	s = to_slab(kobj);
4217
4218	if (!attribute->store)
4219		return -EIO;
4220
4221	err = attribute->store(s, buf, len);
4222
4223	return err;
4224}
4225
4226static void kmem_cache_release(struct kobject *kobj)
4227{
4228	struct kmem_cache *s = to_slab(kobj);
4229
4230	kfree(s);
4231}
4232
4233static struct sysfs_ops slab_sysfs_ops = {
4234	.show = slab_attr_show,
4235	.store = slab_attr_store,
4236};
4237
4238static struct kobj_type slab_ktype = {
4239	.sysfs_ops = &slab_sysfs_ops,
4240	.release = kmem_cache_release
4241};
4242
4243static int uevent_filter(struct kset *kset, struct kobject *kobj)
4244{
4245	struct kobj_type *ktype = get_ktype(kobj);
4246
4247	if (ktype == &slab_ktype)
4248		return 1;
4249	return 0;
4250}
4251
4252static struct kset_uevent_ops slab_uevent_ops = {
4253	.filter = uevent_filter,
4254};
4255
4256static struct kset *slab_kset;
4257
4258#define ID_STR_LENGTH 64
4259
4260/* Create a unique string id for a slab cache:
4261 *
4262 * Format	:[flags-]size
4263 */
4264static char *create_unique_id(struct kmem_cache *s)
4265{
4266	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4267	char *p = name;
4268
4269	BUG_ON(!name);
4270
4271	*p++ = ':';
4272	/*
4273	 * First flags affecting slabcache operations. We will only
4274	 * get here for aliasable slabs so we do not need to support
4275	 * too many flags. The flags here must cover all flags that
4276	 * are matched during merging to guarantee that the id is
4277	 * unique.
4278	 */
4279	if (s->flags & SLAB_CACHE_DMA)
4280		*p++ = 'd';
4281	if (s->flags & SLAB_RECLAIM_ACCOUNT)
4282		*p++ = 'a';
4283	if (s->flags & SLAB_DEBUG_FREE)
4284		*p++ = 'F';
4285	if (p != name + 1)
4286		*p++ = '-';
4287	p += sprintf(p, "%07d", s->size);
4288	BUG_ON(p > name + ID_STR_LENGTH - 1);
4289	return name;
4290}
4291
4292static int sysfs_slab_add(struct kmem_cache *s)
4293{
4294	int err;
4295	const char *name;
4296	int unmergeable;
4297
4298	if (slab_state < SYSFS)
4299		/* Defer until later */
4300		return 0;
4301
4302	unmergeable = slab_unmergeable(s);
4303	if (unmergeable) {
4304		/*
4305		 * Slabcache can never be merged so we can use the name proper.
4306		 * This is typically the case for debug situations. In that
4307		 * case we can catch duplicate names easily.
4308		 */
4309		sysfs_remove_link(&slab_kset->kobj, s->name);
4310		name = s->name;
4311	} else {
4312		/*
4313		 * Create a unique name for the slab as a target
4314		 * for the symlinks.
4315		 */
4316		name = create_unique_id(s);
4317	}
4318
4319	s->kobj.kset = slab_kset;
4320	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4321	if (err) {
4322		kobject_put(&s->kobj);
4323		return err;
4324	}
4325
4326	err = sysfs_create_group(&s->kobj, &slab_attr_group);
4327	if (err)
4328		return err;
4329	kobject_uevent(&s->kobj, KOBJ_ADD);
4330	if (!unmergeable) {
4331		/* Setup first alias */
4332		sysfs_slab_alias(s, s->name);
4333		kfree(name);
4334	}
4335	return 0;
4336}
4337
4338static void sysfs_slab_remove(struct kmem_cache *s)
4339{
4340	kobject_uevent(&s->kobj, KOBJ_REMOVE);
4341	kobject_del(&s->kobj);
4342	kobject_put(&s->kobj);
4343}
4344
4345/*
4346 * Need to buffer aliases during bootup until sysfs becomes
4347 * available lest we loose that information.
4348 */
4349struct saved_alias {
4350	struct kmem_cache *s;
4351	const char *name;
4352	struct saved_alias *next;
4353};
4354
4355static struct saved_alias *alias_list;
4356
4357static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4358{
4359	struct saved_alias *al;
4360
4361	if (slab_state == SYSFS) {
4362		/*
4363		 * If we have a leftover link then remove it.
4364		 */
4365		sysfs_remove_link(&slab_kset->kobj, name);
4366		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4367	}
4368
4369	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4370	if (!al)
4371		return -ENOMEM;
4372
4373	al->s = s;
4374	al->name = name;
4375	al->next = alias_list;
4376	alias_list = al;
4377	return 0;
4378}
4379
4380static int __init slab_sysfs_init(void)
4381{
4382	struct kmem_cache *s;
4383	int err;
4384
4385	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4386	if (!slab_kset) {
4387		printk(KERN_ERR "Cannot register slab subsystem.\n");
4388		return -ENOSYS;
4389	}
4390
4391	slab_state = SYSFS;
4392
4393	list_for_each_entry(s, &slab_caches, list) {
4394		err = sysfs_slab_add(s);
4395		if (err)
4396			printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4397						" to sysfs\n", s->name);
4398	}
4399
4400	while (alias_list) {
4401		struct saved_alias *al = alias_list;
4402
4403		alias_list = alias_list->next;
4404		err = sysfs_slab_alias(al->s, al->name);
4405		if (err)
4406			printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4407					" %s to sysfs\n", s->name);
4408		kfree(al);
4409	}
4410
4411	resiliency_test();
4412	return 0;
4413}
4414
4415__initcall(slab_sysfs_init);
4416#endif
4417
4418/*
4419 * The /proc/slabinfo ABI
4420 */
4421#ifdef CONFIG_SLABINFO
4422static void print_slabinfo_header(struct seq_file *m)
4423{
4424	seq_puts(m, "slabinfo - version: 2.1\n");
4425	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4426		 "<objperslab> <pagesperslab>");
4427	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4428	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4429	seq_putc(m, '\n');
4430}
4431
4432static void *s_start(struct seq_file *m, loff_t *pos)
4433{
4434	loff_t n = *pos;
4435
4436	down_read(&slub_lock);
4437	if (!n)
4438		print_slabinfo_header(m);
4439
4440	return seq_list_start(&slab_caches, *pos);
4441}
4442
4443static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4444{
4445	return seq_list_next(p, &slab_caches, pos);
4446}
4447
4448static void s_stop(struct seq_file *m, void *p)
4449{
4450	up_read(&slub_lock);
4451}
4452
4453static int s_show(struct seq_file *m, void *p)
4454{
4455	unsigned long nr_partials = 0;
4456	unsigned long nr_slabs = 0;
4457	unsigned long nr_inuse = 0;
4458	unsigned long nr_objs = 0;
4459	unsigned long nr_free = 0;
4460	struct kmem_cache *s;
4461	int node;
4462
4463	s = list_entry(p, struct kmem_cache, list);
4464
4465	for_each_online_node(node) {
4466		struct kmem_cache_node *n = get_node(s, node);
4467
4468		if (!n)
4469			continue;
4470
4471		nr_partials += n->nr_partial;
4472		nr_slabs += atomic_long_read(&n->nr_slabs);
4473		nr_objs += atomic_long_read(&n->total_objects);
4474		nr_free += count_partial(n, count_free);
4475	}
4476
4477	nr_inuse = nr_objs - nr_free;
4478
4479	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
4480		   nr_objs, s->size, oo_objects(s->oo),
4481		   (1 << oo_order(s->oo)));
4482	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4483	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4484		   0UL);
4485	seq_putc(m, '\n');
4486	return 0;
4487}
4488
4489static const struct seq_operations slabinfo_op = {
4490	.start = s_start,
4491	.next = s_next,
4492	.stop = s_stop,
4493	.show = s_show,
4494};
4495
4496static int slabinfo_open(struct inode *inode, struct file *file)
4497{
4498	return seq_open(file, &slabinfo_op);
4499}
4500
4501static const struct file_operations proc_slabinfo_operations = {
4502	.open		= slabinfo_open,
4503	.read		= seq_read,
4504	.llseek		= seq_lseek,
4505	.release	= seq_release,
4506};
4507
4508static int __init slab_proc_init(void)
4509{
4510	proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
4511	return 0;
4512}
4513module_init(slab_proc_init);
4514#endif /* CONFIG_SLABINFO */
4515