slub.c revision dbc55faa64c12f4c9fab6e2bd131d771bc026ed1
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
17#include <linux/seq_file.h>
18#include <linux/cpu.h>
19#include <linux/cpuset.h>
20#include <linux/mempolicy.h>
21#include <linux/ctype.h>
22#include <linux/kallsyms.h>
23
24/*
25 * Lock order:
26 *   1. slab_lock(page)
27 *   2. slab->list_lock
28 *
29 *   The slab_lock protects operations on the object of a particular
30 *   slab and its metadata in the page struct. If the slab lock
31 *   has been taken then no allocations nor frees can be performed
32 *   on the objects in the slab nor can the slab be added or removed
33 *   from the partial or full lists since this would mean modifying
34 *   the page_struct of the slab.
35 *
36 *   The list_lock protects the partial and full list on each node and
37 *   the partial slab counter. If taken then no new slabs may be added or
38 *   removed from the lists nor make the number of partial slabs be modified.
39 *   (Note that the total number of slabs is an atomic value that may be
40 *   modified without taking the list lock).
41 *
42 *   The list_lock is a centralized lock and thus we avoid taking it as
43 *   much as possible. As long as SLUB does not have to handle partial
44 *   slabs, operations can continue without any centralized lock. F.e.
45 *   allocating a long series of objects that fill up slabs does not require
46 *   the list lock.
47 *
48 *   The lock order is sometimes inverted when we are trying to get a slab
49 *   off a list. We take the list_lock and then look for a page on the list
50 *   to use. While we do that objects in the slabs may be freed. We can
51 *   only operate on the slab if we have also taken the slab_lock. So we use
52 *   a slab_trylock() on the slab. If trylock was successful then no frees
53 *   can occur anymore and we can use the slab for allocations etc. If the
54 *   slab_trylock() does not succeed then frees are in progress in the slab and
55 *   we must stay away from it for a while since we may cause a bouncing
56 *   cacheline if we try to acquire the lock. So go onto the next slab.
57 *   If all pages are busy then we may allocate a new slab instead of reusing
58 *   a partial slab. A new slab has noone operating on it and thus there is
59 *   no danger of cacheline contention.
60 *
61 *   Interrupts are disabled during allocation and deallocation in order to
62 *   make the slab allocator safe to use in the context of an irq. In addition
63 *   interrupts are disabled to ensure that the processor does not change
64 *   while handling per_cpu slabs, due to kernel preemption.
65 *
66 * SLUB assigns one slab for allocation to each processor.
67 * Allocations only occur from these slabs called cpu slabs.
68 *
69 * Slabs with free elements are kept on a partial list and during regular
70 * operations no list for full slabs is used. If an object in a full slab is
71 * freed then the slab will show up again on the partial lists.
72 * We track full slabs for debugging purposes though because otherwise we
73 * cannot scan all objects.
74 *
75 * Slabs are freed when they become empty. Teardown and setup is
76 * minimal so we rely on the page allocators per cpu caches for
77 * fast frees and allocs.
78 *
79 * Overloading of page flags that are otherwise used for LRU management.
80 *
81 * PageActive 		The slab is frozen and exempt from list processing.
82 * 			This means that the slab is dedicated to a purpose
83 * 			such as satisfying allocations for a specific
84 * 			processor. Objects may be freed in the slab while
85 * 			it is frozen but slab_free will then skip the usual
86 * 			list operations. It is up to the processor holding
87 * 			the slab to integrate the slab into the slab lists
88 * 			when the slab is no longer needed.
89 *
90 * 			One use of this flag is to mark slabs that are
91 * 			used for allocations. Then such a slab becomes a cpu
92 * 			slab. The cpu slab may be equipped with an additional
93 * 			lockless_freelist that allows lockless access to
94 * 			free objects in addition to the regular freelist
95 * 			that requires the slab lock.
96 *
97 * PageError		Slab requires special handling due to debug
98 * 			options set. This moves	slab handling out of
99 * 			the fast path and disables lockless freelists.
100 */
101
102#define FROZEN (1 << PG_active)
103
104#ifdef CONFIG_SLUB_DEBUG
105#define SLABDEBUG (1 << PG_error)
106#else
107#define SLABDEBUG 0
108#endif
109
110static inline int SlabFrozen(struct page *page)
111{
112	return page->flags & FROZEN;
113}
114
115static inline void SetSlabFrozen(struct page *page)
116{
117	page->flags |= FROZEN;
118}
119
120static inline void ClearSlabFrozen(struct page *page)
121{
122	page->flags &= ~FROZEN;
123}
124
125static inline int SlabDebug(struct page *page)
126{
127	return page->flags & SLABDEBUG;
128}
129
130static inline void SetSlabDebug(struct page *page)
131{
132	page->flags |= SLABDEBUG;
133}
134
135static inline void ClearSlabDebug(struct page *page)
136{
137	page->flags &= ~SLABDEBUG;
138}
139
140/*
141 * Issues still to be resolved:
142 *
143 * - The per cpu array is updated for each new slab and and is a remote
144 *   cacheline for most nodes. This could become a bouncing cacheline given
145 *   enough frequent updates. There are 16 pointers in a cacheline, so at
146 *   max 16 cpus could compete for the cacheline which may be okay.
147 *
148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
149 *
150 * - Variable sizing of the per node arrays
151 */
152
153/* Enable to test recovery from slab corruption on boot */
154#undef SLUB_RESILIENCY_TEST
155
156#if PAGE_SHIFT <= 12
157
158/*
159 * Small page size. Make sure that we do not fragment memory
160 */
161#define DEFAULT_MAX_ORDER 1
162#define DEFAULT_MIN_OBJECTS 4
163
164#else
165
166/*
167 * Large page machines are customarily able to handle larger
168 * page orders.
169 */
170#define DEFAULT_MAX_ORDER 2
171#define DEFAULT_MIN_OBJECTS 8
172
173#endif
174
175/*
176 * Mininum number of partial slabs. These will be left on the partial
177 * lists even if they are empty. kmem_cache_shrink may reclaim them.
178 */
179#define MIN_PARTIAL 2
180
181/*
182 * Maximum number of desirable partial slabs.
183 * The existence of more partial slabs makes kmem_cache_shrink
184 * sort the partial list by the number of objects in the.
185 */
186#define MAX_PARTIAL 10
187
188#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
189				SLAB_POISON | SLAB_STORE_USER)
190
191/*
192 * Set of flags that will prevent slab merging
193 */
194#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
195		SLAB_TRACE | SLAB_DESTROY_BY_RCU)
196
197#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
198		SLAB_CACHE_DMA)
199
200#ifndef ARCH_KMALLOC_MINALIGN
201#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
202#endif
203
204#ifndef ARCH_SLAB_MINALIGN
205#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
206#endif
207
208/* Internal SLUB flags */
209#define __OBJECT_POISON 0x80000000	/* Poison object */
210
211/* Not all arches define cache_line_size */
212#ifndef cache_line_size
213#define cache_line_size()	L1_CACHE_BYTES
214#endif
215
216static int kmem_size = sizeof(struct kmem_cache);
217
218#ifdef CONFIG_SMP
219static struct notifier_block slab_notifier;
220#endif
221
222static enum {
223	DOWN,		/* No slab functionality available */
224	PARTIAL,	/* kmem_cache_open() works but kmalloc does not */
225	UP,		/* Everything works but does not show up in sysfs */
226	SYSFS		/* Sysfs up */
227} slab_state = DOWN;
228
229/* A list of all slab caches on the system */
230static DECLARE_RWSEM(slub_lock);
231LIST_HEAD(slab_caches);
232
233/*
234 * Tracking user of a slab.
235 */
236struct track {
237	void *addr;		/* Called from address */
238	int cpu;		/* Was running on cpu */
239	int pid;		/* Pid context */
240	unsigned long when;	/* When did the operation occur */
241};
242
243enum track_item { TRACK_ALLOC, TRACK_FREE };
244
245#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
246static int sysfs_slab_add(struct kmem_cache *);
247static int sysfs_slab_alias(struct kmem_cache *, const char *);
248static void sysfs_slab_remove(struct kmem_cache *);
249#else
250static int sysfs_slab_add(struct kmem_cache *s) { return 0; }
251static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }
252static void sysfs_slab_remove(struct kmem_cache *s) {}
253#endif
254
255/********************************************************************
256 * 			Core slab cache functions
257 *******************************************************************/
258
259int slab_is_available(void)
260{
261	return slab_state >= UP;
262}
263
264static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
265{
266#ifdef CONFIG_NUMA
267	return s->node[node];
268#else
269	return &s->local_node;
270#endif
271}
272
273static inline int check_valid_pointer(struct kmem_cache *s,
274				struct page *page, const void *object)
275{
276	void *base;
277
278	if (!object)
279		return 1;
280
281	base = page_address(page);
282	if (object < base || object >= base + s->objects * s->size ||
283		(object - base) % s->size) {
284		return 0;
285	}
286
287	return 1;
288}
289
290/*
291 * Slow version of get and set free pointer.
292 *
293 * This version requires touching the cache lines of kmem_cache which
294 * we avoid to do in the fast alloc free paths. There we obtain the offset
295 * from the page struct.
296 */
297static inline void *get_freepointer(struct kmem_cache *s, void *object)
298{
299	return *(void **)(object + s->offset);
300}
301
302static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
303{
304	*(void **)(object + s->offset) = fp;
305}
306
307/* Loop over all objects in a slab */
308#define for_each_object(__p, __s, __addr) \
309	for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
310			__p += (__s)->size)
311
312/* Scan freelist */
313#define for_each_free_object(__p, __s, __free) \
314	for (__p = (__free); __p; __p = get_freepointer((__s), __p))
315
316/* Determine object index from a given position */
317static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
318{
319	return (p - addr) / s->size;
320}
321
322#ifdef CONFIG_SLUB_DEBUG
323/*
324 * Debug settings:
325 */
326static int slub_debug;
327
328static char *slub_debug_slabs;
329
330/*
331 * Object debugging
332 */
333static void print_section(char *text, u8 *addr, unsigned int length)
334{
335	int i, offset;
336	int newline = 1;
337	char ascii[17];
338
339	ascii[16] = 0;
340
341	for (i = 0; i < length; i++) {
342		if (newline) {
343			printk(KERN_ERR "%10s 0x%p: ", text, addr + i);
344			newline = 0;
345		}
346		printk(" %02x", addr[i]);
347		offset = i % 16;
348		ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
349		if (offset == 15) {
350			printk(" %s\n",ascii);
351			newline = 1;
352		}
353	}
354	if (!newline) {
355		i %= 16;
356		while (i < 16) {
357			printk("   ");
358			ascii[i] = ' ';
359			i++;
360		}
361		printk(" %s\n", ascii);
362	}
363}
364
365static struct track *get_track(struct kmem_cache *s, void *object,
366	enum track_item alloc)
367{
368	struct track *p;
369
370	if (s->offset)
371		p = object + s->offset + sizeof(void *);
372	else
373		p = object + s->inuse;
374
375	return p + alloc;
376}
377
378static void set_track(struct kmem_cache *s, void *object,
379				enum track_item alloc, void *addr)
380{
381	struct track *p;
382
383	if (s->offset)
384		p = object + s->offset + sizeof(void *);
385	else
386		p = object + s->inuse;
387
388	p += alloc;
389	if (addr) {
390		p->addr = addr;
391		p->cpu = smp_processor_id();
392		p->pid = current ? current->pid : -1;
393		p->when = jiffies;
394	} else
395		memset(p, 0, sizeof(struct track));
396}
397
398static void init_tracking(struct kmem_cache *s, void *object)
399{
400	if (s->flags & SLAB_STORE_USER) {
401		set_track(s, object, TRACK_FREE, NULL);
402		set_track(s, object, TRACK_ALLOC, NULL);
403	}
404}
405
406static void print_track(const char *s, struct track *t)
407{
408	if (!t->addr)
409		return;
410
411	printk(KERN_ERR "%s: ", s);
412	__print_symbol("%s", (unsigned long)t->addr);
413	printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
414}
415
416static void print_trailer(struct kmem_cache *s, u8 *p)
417{
418	unsigned int off;	/* Offset of last byte */
419
420	if (s->flags & SLAB_RED_ZONE)
421		print_section("Redzone", p + s->objsize,
422			s->inuse - s->objsize);
423
424	printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
425			p + s->offset,
426			get_freepointer(s, p));
427
428	if (s->offset)
429		off = s->offset + sizeof(void *);
430	else
431		off = s->inuse;
432
433	if (s->flags & SLAB_STORE_USER) {
434		print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
435		print_track("Last free ", get_track(s, p, TRACK_FREE));
436		off += 2 * sizeof(struct track);
437	}
438
439	if (off != s->size)
440		/* Beginning of the filler is the free pointer */
441		print_section("Filler", p + off, s->size - off);
442}
443
444static void object_err(struct kmem_cache *s, struct page *page,
445			u8 *object, char *reason)
446{
447	u8 *addr = page_address(page);
448
449	printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
450			s->name, reason, object, page);
451	printk(KERN_ERR "    offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
452		object - addr, page->flags, page->inuse, page->freelist);
453	if (object > addr + 16)
454		print_section("Bytes b4", object - 16, 16);
455	print_section("Object", object, min(s->objsize, 128));
456	print_trailer(s, object);
457	dump_stack();
458}
459
460static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...)
461{
462	va_list args;
463	char buf[100];
464
465	va_start(args, reason);
466	vsnprintf(buf, sizeof(buf), reason, args);
467	va_end(args);
468	printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf,
469		page);
470	dump_stack();
471}
472
473static void init_object(struct kmem_cache *s, void *object, int active)
474{
475	u8 *p = object;
476
477	if (s->flags & __OBJECT_POISON) {
478		memset(p, POISON_FREE, s->objsize - 1);
479		p[s->objsize -1] = POISON_END;
480	}
481
482	if (s->flags & SLAB_RED_ZONE)
483		memset(p + s->objsize,
484			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
485			s->inuse - s->objsize);
486}
487
488static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
489{
490	while (bytes) {
491		if (*start != (u8)value)
492			return 0;
493		start++;
494		bytes--;
495	}
496	return 1;
497}
498
499/*
500 * Object layout:
501 *
502 * object address
503 * 	Bytes of the object to be managed.
504 * 	If the freepointer may overlay the object then the free
505 * 	pointer is the first word of the object.
506 *
507 * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
508 * 	0xa5 (POISON_END)
509 *
510 * object + s->objsize
511 * 	Padding to reach word boundary. This is also used for Redzoning.
512 * 	Padding is extended by another word if Redzoning is enabled and
513 * 	objsize == inuse.
514 *
515 * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
516 * 	0xcc (RED_ACTIVE) for objects in use.
517 *
518 * object + s->inuse
519 * 	Meta data starts here.
520 *
521 * 	A. Free pointer (if we cannot overwrite object on free)
522 * 	B. Tracking data for SLAB_STORE_USER
523 * 	C. Padding to reach required alignment boundary or at mininum
524 * 		one word if debuggin is on to be able to detect writes
525 * 		before the word boundary.
526 *
527 *	Padding is done using 0x5a (POISON_INUSE)
528 *
529 * object + s->size
530 * 	Nothing is used beyond s->size.
531 *
532 * If slabcaches are merged then the objsize and inuse boundaries are mostly
533 * ignored. And therefore no slab options that rely on these boundaries
534 * may be used with merged slabcaches.
535 */
536
537static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
538						void *from, void *to)
539{
540	printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
541		s->name, message, data, from, to - 1);
542	memset(from, data, to - from);
543}
544
545static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
546{
547	unsigned long off = s->inuse;	/* The end of info */
548
549	if (s->offset)
550		/* Freepointer is placed after the object. */
551		off += sizeof(void *);
552
553	if (s->flags & SLAB_STORE_USER)
554		/* We also have user information there */
555		off += 2 * sizeof(struct track);
556
557	if (s->size == off)
558		return 1;
559
560	if (check_bytes(p + off, POISON_INUSE, s->size - off))
561		return 1;
562
563	object_err(s, page, p, "Object padding check fails");
564
565	/*
566	 * Restore padding
567	 */
568	restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
569	return 0;
570}
571
572static int slab_pad_check(struct kmem_cache *s, struct page *page)
573{
574	u8 *p;
575	int length, remainder;
576
577	if (!(s->flags & SLAB_POISON))
578		return 1;
579
580	p = page_address(page);
581	length = s->objects * s->size;
582	remainder = (PAGE_SIZE << s->order) - length;
583	if (!remainder)
584		return 1;
585
586	if (!check_bytes(p + length, POISON_INUSE, remainder)) {
587		slab_err(s, page, "Padding check failed");
588		restore_bytes(s, "slab padding", POISON_INUSE, p + length,
589			p + length + remainder);
590		return 0;
591	}
592	return 1;
593}
594
595static int check_object(struct kmem_cache *s, struct page *page,
596					void *object, int active)
597{
598	u8 *p = object;
599	u8 *endobject = object + s->objsize;
600
601	if (s->flags & SLAB_RED_ZONE) {
602		unsigned int red =
603			active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
604
605		if (!check_bytes(endobject, red, s->inuse - s->objsize)) {
606			object_err(s, page, object,
607			active ? "Redzone Active" : "Redzone Inactive");
608			restore_bytes(s, "redzone", red,
609				endobject, object + s->inuse);
610			return 0;
611		}
612	} else {
613		if ((s->flags & SLAB_POISON) && s->objsize < s->inuse &&
614			!check_bytes(endobject, POISON_INUSE,
615					s->inuse - s->objsize)) {
616		object_err(s, page, p, "Alignment padding check fails");
617		/*
618		 * Fix it so that there will not be another report.
619		 *
620		 * Hmmm... We may be corrupting an object that now expects
621		 * to be longer than allowed.
622		 */
623		restore_bytes(s, "alignment padding", POISON_INUSE,
624			endobject, object + s->inuse);
625		}
626	}
627
628	if (s->flags & SLAB_POISON) {
629		if (!active && (s->flags & __OBJECT_POISON) &&
630			(!check_bytes(p, POISON_FREE, s->objsize - 1) ||
631				p[s->objsize - 1] != POISON_END)) {
632
633			object_err(s, page, p, "Poison check failed");
634			restore_bytes(s, "Poison", POISON_FREE,
635						p, p + s->objsize -1);
636			restore_bytes(s, "Poison", POISON_END,
637					p + s->objsize - 1, p + s->objsize);
638			return 0;
639		}
640		/*
641		 * check_pad_bytes cleans up on its own.
642		 */
643		check_pad_bytes(s, page, p);
644	}
645
646	if (!s->offset && active)
647		/*
648		 * Object and freepointer overlap. Cannot check
649		 * freepointer while object is allocated.
650		 */
651		return 1;
652
653	/* Check free pointer validity */
654	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
655		object_err(s, page, p, "Freepointer corrupt");
656		/*
657		 * No choice but to zap it and thus loose the remainder
658		 * of the free objects in this slab. May cause
659		 * another error because the object count is now wrong.
660		 */
661		set_freepointer(s, p, NULL);
662		return 0;
663	}
664	return 1;
665}
666
667static int check_slab(struct kmem_cache *s, struct page *page)
668{
669	VM_BUG_ON(!irqs_disabled());
670
671	if (!PageSlab(page)) {
672		slab_err(s, page, "Not a valid slab page flags=%lx "
673			"mapping=0x%p count=%d", page->flags, page->mapping,
674			page_count(page));
675		return 0;
676	}
677	if (page->offset * sizeof(void *) != s->offset) {
678		slab_err(s, page, "Corrupted offset %lu flags=0x%lx "
679			"mapping=0x%p count=%d",
680			(unsigned long)(page->offset * sizeof(void *)),
681			page->flags,
682			page->mapping,
683			page_count(page));
684		return 0;
685	}
686	if (page->inuse > s->objects) {
687		slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx "
688			"mapping=0x%p count=%d",
689			s->name, page->inuse, s->objects, page->flags,
690			page->mapping, page_count(page));
691		return 0;
692	}
693	/* Slab_pad_check fixes things up after itself */
694	slab_pad_check(s, page);
695	return 1;
696}
697
698/*
699 * Determine if a certain object on a page is on the freelist. Must hold the
700 * slab lock to guarantee that the chains are in a consistent state.
701 */
702static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
703{
704	int nr = 0;
705	void *fp = page->freelist;
706	void *object = NULL;
707
708	while (fp && nr <= s->objects) {
709		if (fp == search)
710			return 1;
711		if (!check_valid_pointer(s, page, fp)) {
712			if (object) {
713				object_err(s, page, object,
714					"Freechain corrupt");
715				set_freepointer(s, object, NULL);
716				break;
717			} else {
718				slab_err(s, page, "Freepointer 0x%p corrupt",
719									fp);
720				page->freelist = NULL;
721				page->inuse = s->objects;
722				printk(KERN_ERR "@@@ SLUB %s: Freelist "
723					"cleared. Slab 0x%p\n",
724					s->name, page);
725				return 0;
726			}
727			break;
728		}
729		object = fp;
730		fp = get_freepointer(s, object);
731		nr++;
732	}
733
734	if (page->inuse != s->objects - nr) {
735		slab_err(s, page, "Wrong object count. Counter is %d but "
736			"counted were %d", s, page, page->inuse,
737							s->objects - nr);
738		page->inuse = s->objects - nr;
739		printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. "
740			"Slab @0x%p\n", s->name, page);
741	}
742	return search == NULL;
743}
744
745static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
746{
747	if (s->flags & SLAB_TRACE) {
748		printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
749			s->name,
750			alloc ? "alloc" : "free",
751			object, page->inuse,
752			page->freelist);
753
754		if (!alloc)
755			print_section("Object", (void *)object, s->objsize);
756
757		dump_stack();
758	}
759}
760
761/*
762 * Tracking of fully allocated slabs for debugging purposes.
763 */
764static void add_full(struct kmem_cache_node *n, struct page *page)
765{
766	spin_lock(&n->list_lock);
767	list_add(&page->lru, &n->full);
768	spin_unlock(&n->list_lock);
769}
770
771static void remove_full(struct kmem_cache *s, struct page *page)
772{
773	struct kmem_cache_node *n;
774
775	if (!(s->flags & SLAB_STORE_USER))
776		return;
777
778	n = get_node(s, page_to_nid(page));
779
780	spin_lock(&n->list_lock);
781	list_del(&page->lru);
782	spin_unlock(&n->list_lock);
783}
784
785static void setup_object_debug(struct kmem_cache *s, struct page *page,
786								void *object)
787{
788	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
789		return;
790
791	init_object(s, object, 0);
792	init_tracking(s, object);
793}
794
795static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
796						void *object, void *addr)
797{
798	if (!check_slab(s, page))
799		goto bad;
800
801	if (object && !on_freelist(s, page, object)) {
802		slab_err(s, page, "Object 0x%p already allocated", object);
803		goto bad;
804	}
805
806	if (!check_valid_pointer(s, page, object)) {
807		object_err(s, page, object, "Freelist Pointer check fails");
808		goto bad;
809	}
810
811	if (object && !check_object(s, page, object, 0))
812		goto bad;
813
814	/* Success perform special debug activities for allocs */
815	if (s->flags & SLAB_STORE_USER)
816		set_track(s, object, TRACK_ALLOC, addr);
817	trace(s, page, object, 1);
818	init_object(s, object, 1);
819	return 1;
820
821bad:
822	if (PageSlab(page)) {
823		/*
824		 * If this is a slab page then lets do the best we can
825		 * to avoid issues in the future. Marking all objects
826		 * as used avoids touching the remaining objects.
827		 */
828		printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
829			s->name, page);
830		page->inuse = s->objects;
831		page->freelist = NULL;
832		/* Fix up fields that may be corrupted */
833		page->offset = s->offset / sizeof(void *);
834	}
835	return 0;
836}
837
838static int free_debug_processing(struct kmem_cache *s, struct page *page,
839						void *object, void *addr)
840{
841	if (!check_slab(s, page))
842		goto fail;
843
844	if (!check_valid_pointer(s, page, object)) {
845		slab_err(s, page, "Invalid object pointer 0x%p", object);
846		goto fail;
847	}
848
849	if (on_freelist(s, page, object)) {
850		slab_err(s, page, "Object 0x%p already free", object);
851		goto fail;
852	}
853
854	if (!check_object(s, page, object, 1))
855		return 0;
856
857	if (unlikely(s != page->slab)) {
858		if (!PageSlab(page))
859			slab_err(s, page, "Attempt to free object(0x%p) "
860				"outside of slab", object);
861		else
862		if (!page->slab) {
863			printk(KERN_ERR
864				"SLUB <none>: no slab for object 0x%p.\n",
865						object);
866			dump_stack();
867		}
868		else
869			slab_err(s, page, "object at 0x%p belongs "
870				"to slab %s", object, page->slab->name);
871		goto fail;
872	}
873
874	/* Special debug activities for freeing objects */
875	if (!SlabFrozen(page) && !page->freelist)
876		remove_full(s, page);
877	if (s->flags & SLAB_STORE_USER)
878		set_track(s, object, TRACK_FREE, addr);
879	trace(s, page, object, 0);
880	init_object(s, object, 0);
881	return 1;
882
883fail:
884	printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
885		s->name, page, object);
886	return 0;
887}
888
889static int __init setup_slub_debug(char *str)
890{
891	if (!str || *str != '=')
892		slub_debug = DEBUG_DEFAULT_FLAGS;
893	else {
894		str++;
895		if (*str == 0 || *str == ',')
896			slub_debug = DEBUG_DEFAULT_FLAGS;
897		else
898		for( ;*str && *str != ','; str++)
899			switch (*str) {
900			case 'f' : case 'F' :
901				slub_debug |= SLAB_DEBUG_FREE;
902				break;
903			case 'z' : case 'Z' :
904				slub_debug |= SLAB_RED_ZONE;
905				break;
906			case 'p' : case 'P' :
907				slub_debug |= SLAB_POISON;
908				break;
909			case 'u' : case 'U' :
910				slub_debug |= SLAB_STORE_USER;
911				break;
912			case 't' : case 'T' :
913				slub_debug |= SLAB_TRACE;
914				break;
915			default:
916				printk(KERN_ERR "slub_debug option '%c' "
917					"unknown. skipped\n",*str);
918			}
919	}
920
921	if (*str == ',')
922		slub_debug_slabs = str + 1;
923	return 1;
924}
925
926__setup("slub_debug", setup_slub_debug);
927
928static void kmem_cache_open_debug_check(struct kmem_cache *s)
929{
930	/*
931	 * The page->offset field is only 16 bit wide. This is an offset
932	 * in units of words from the beginning of an object. If the slab
933	 * size is bigger then we cannot move the free pointer behind the
934	 * object anymore.
935	 *
936	 * On 32 bit platforms the limit is 256k. On 64bit platforms
937	 * the limit is 512k.
938	 *
939	 * Debugging or ctor may create a need to move the free
940	 * pointer. Fail if this happens.
941	 */
942	if (s->objsize >= 65535 * sizeof(void *)) {
943		BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
944				SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
945		BUG_ON(s->ctor);
946	}
947	else
948		/*
949		 * Enable debugging if selected on the kernel commandline.
950		 */
951		if (slub_debug && (!slub_debug_slabs ||
952		    strncmp(slub_debug_slabs, s->name,
953		    	strlen(slub_debug_slabs)) == 0))
954				s->flags |= slub_debug;
955}
956#else
957static inline void setup_object_debug(struct kmem_cache *s,
958			struct page *page, void *object) {}
959
960static inline int alloc_debug_processing(struct kmem_cache *s,
961	struct page *page, void *object, void *addr) { return 0; }
962
963static inline int free_debug_processing(struct kmem_cache *s,
964	struct page *page, void *object, void *addr) { return 0; }
965
966static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
967			{ return 1; }
968static inline int check_object(struct kmem_cache *s, struct page *page,
969			void *object, int active) { return 1; }
970static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
971static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
972#define slub_debug 0
973#endif
974/*
975 * Slab allocation and freeing
976 */
977static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
978{
979	struct page * page;
980	int pages = 1 << s->order;
981
982	if (s->order)
983		flags |= __GFP_COMP;
984
985	if (s->flags & SLAB_CACHE_DMA)
986		flags |= SLUB_DMA;
987
988	if (node == -1)
989		page = alloc_pages(flags, s->order);
990	else
991		page = alloc_pages_node(node, flags, s->order);
992
993	if (!page)
994		return NULL;
995
996	mod_zone_page_state(page_zone(page),
997		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
998		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
999		pages);
1000
1001	return page;
1002}
1003
1004static void setup_object(struct kmem_cache *s, struct page *page,
1005				void *object)
1006{
1007	setup_object_debug(s, page, object);
1008	if (unlikely(s->ctor))
1009		s->ctor(object, s, 0);
1010}
1011
1012static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1013{
1014	struct page *page;
1015	struct kmem_cache_node *n;
1016	void *start;
1017	void *end;
1018	void *last;
1019	void *p;
1020
1021	BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
1022
1023	if (flags & __GFP_WAIT)
1024		local_irq_enable();
1025
1026	page = allocate_slab(s, flags & GFP_LEVEL_MASK, node);
1027	if (!page)
1028		goto out;
1029
1030	n = get_node(s, page_to_nid(page));
1031	if (n)
1032		atomic_long_inc(&n->nr_slabs);
1033	page->offset = s->offset / sizeof(void *);
1034	page->slab = s;
1035	page->flags |= 1 << PG_slab;
1036	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1037			SLAB_STORE_USER | SLAB_TRACE))
1038		SetSlabDebug(page);
1039
1040	start = page_address(page);
1041	end = start + s->objects * s->size;
1042
1043	if (unlikely(s->flags & SLAB_POISON))
1044		memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1045
1046	last = start;
1047	for_each_object(p, s, start) {
1048		setup_object(s, page, last);
1049		set_freepointer(s, last, p);
1050		last = p;
1051	}
1052	setup_object(s, page, last);
1053	set_freepointer(s, last, NULL);
1054
1055	page->freelist = start;
1056	page->lockless_freelist = NULL;
1057	page->inuse = 0;
1058out:
1059	if (flags & __GFP_WAIT)
1060		local_irq_disable();
1061	return page;
1062}
1063
1064static void __free_slab(struct kmem_cache *s, struct page *page)
1065{
1066	int pages = 1 << s->order;
1067
1068	if (unlikely(SlabDebug(page))) {
1069		void *p;
1070
1071		slab_pad_check(s, page);
1072		for_each_object(p, s, page_address(page))
1073			check_object(s, page, p, 0);
1074	}
1075
1076	mod_zone_page_state(page_zone(page),
1077		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1078		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1079		- pages);
1080
1081	page->mapping = NULL;
1082	__free_pages(page, s->order);
1083}
1084
1085static void rcu_free_slab(struct rcu_head *h)
1086{
1087	struct page *page;
1088
1089	page = container_of((struct list_head *)h, struct page, lru);
1090	__free_slab(page->slab, page);
1091}
1092
1093static void free_slab(struct kmem_cache *s, struct page *page)
1094{
1095	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1096		/*
1097		 * RCU free overloads the RCU head over the LRU
1098		 */
1099		struct rcu_head *head = (void *)&page->lru;
1100
1101		call_rcu(head, rcu_free_slab);
1102	} else
1103		__free_slab(s, page);
1104}
1105
1106static void discard_slab(struct kmem_cache *s, struct page *page)
1107{
1108	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1109
1110	atomic_long_dec(&n->nr_slabs);
1111	reset_page_mapcount(page);
1112	ClearSlabDebug(page);
1113	__ClearPageSlab(page);
1114	free_slab(s, page);
1115}
1116
1117/*
1118 * Per slab locking using the pagelock
1119 */
1120static __always_inline void slab_lock(struct page *page)
1121{
1122	bit_spin_lock(PG_locked, &page->flags);
1123}
1124
1125static __always_inline void slab_unlock(struct page *page)
1126{
1127	bit_spin_unlock(PG_locked, &page->flags);
1128}
1129
1130static __always_inline int slab_trylock(struct page *page)
1131{
1132	int rc = 1;
1133
1134	rc = bit_spin_trylock(PG_locked, &page->flags);
1135	return rc;
1136}
1137
1138/*
1139 * Management of partially allocated slabs
1140 */
1141static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
1142{
1143	spin_lock(&n->list_lock);
1144	n->nr_partial++;
1145	list_add_tail(&page->lru, &n->partial);
1146	spin_unlock(&n->list_lock);
1147}
1148
1149static void add_partial(struct kmem_cache_node *n, struct page *page)
1150{
1151	spin_lock(&n->list_lock);
1152	n->nr_partial++;
1153	list_add(&page->lru, &n->partial);
1154	spin_unlock(&n->list_lock);
1155}
1156
1157static void remove_partial(struct kmem_cache *s,
1158						struct page *page)
1159{
1160	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1161
1162	spin_lock(&n->list_lock);
1163	list_del(&page->lru);
1164	n->nr_partial--;
1165	spin_unlock(&n->list_lock);
1166}
1167
1168/*
1169 * Lock slab and remove from the partial list.
1170 *
1171 * Must hold list_lock.
1172 */
1173static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
1174{
1175	if (slab_trylock(page)) {
1176		list_del(&page->lru);
1177		n->nr_partial--;
1178		SetSlabFrozen(page);
1179		return 1;
1180	}
1181	return 0;
1182}
1183
1184/*
1185 * Try to allocate a partial slab from a specific node.
1186 */
1187static struct page *get_partial_node(struct kmem_cache_node *n)
1188{
1189	struct page *page;
1190
1191	/*
1192	 * Racy check. If we mistakenly see no partial slabs then we
1193	 * just allocate an empty slab. If we mistakenly try to get a
1194	 * partial slab and there is none available then get_partials()
1195	 * will return NULL.
1196	 */
1197	if (!n || !n->nr_partial)
1198		return NULL;
1199
1200	spin_lock(&n->list_lock);
1201	list_for_each_entry(page, &n->partial, lru)
1202		if (lock_and_freeze_slab(n, page))
1203			goto out;
1204	page = NULL;
1205out:
1206	spin_unlock(&n->list_lock);
1207	return page;
1208}
1209
1210/*
1211 * Get a page from somewhere. Search in increasing NUMA distances.
1212 */
1213static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1214{
1215#ifdef CONFIG_NUMA
1216	struct zonelist *zonelist;
1217	struct zone **z;
1218	struct page *page;
1219
1220	/*
1221	 * The defrag ratio allows a configuration of the tradeoffs between
1222	 * inter node defragmentation and node local allocations. A lower
1223	 * defrag_ratio increases the tendency to do local allocations
1224	 * instead of attempting to obtain partial slabs from other nodes.
1225	 *
1226	 * If the defrag_ratio is set to 0 then kmalloc() always
1227	 * returns node local objects. If the ratio is higher then kmalloc()
1228	 * may return off node objects because partial slabs are obtained
1229	 * from other nodes and filled up.
1230	 *
1231	 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
1232	 * defrag_ratio = 1000) then every (well almost) allocation will
1233	 * first attempt to defrag slab caches on other nodes. This means
1234	 * scanning over all nodes to look for partial slabs which may be
1235	 * expensive if we do it every time we are trying to find a slab
1236	 * with available objects.
1237	 */
1238	if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
1239		return NULL;
1240
1241	zonelist = &NODE_DATA(slab_node(current->mempolicy))
1242					->node_zonelists[gfp_zone(flags)];
1243	for (z = zonelist->zones; *z; z++) {
1244		struct kmem_cache_node *n;
1245
1246		n = get_node(s, zone_to_nid(*z));
1247
1248		if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
1249				n->nr_partial > MIN_PARTIAL) {
1250			page = get_partial_node(n);
1251			if (page)
1252				return page;
1253		}
1254	}
1255#endif
1256	return NULL;
1257}
1258
1259/*
1260 * Get a partial page, lock it and return it.
1261 */
1262static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1263{
1264	struct page *page;
1265	int searchnode = (node == -1) ? numa_node_id() : node;
1266
1267	page = get_partial_node(get_node(s, searchnode));
1268	if (page || (flags & __GFP_THISNODE))
1269		return page;
1270
1271	return get_any_partial(s, flags);
1272}
1273
1274/*
1275 * Move a page back to the lists.
1276 *
1277 * Must be called with the slab lock held.
1278 *
1279 * On exit the slab lock will have been dropped.
1280 */
1281static void unfreeze_slab(struct kmem_cache *s, struct page *page)
1282{
1283	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1284
1285	ClearSlabFrozen(page);
1286	if (page->inuse) {
1287
1288		if (page->freelist)
1289			add_partial(n, page);
1290		else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1291			add_full(n, page);
1292		slab_unlock(page);
1293
1294	} else {
1295		if (n->nr_partial < MIN_PARTIAL) {
1296			/*
1297			 * Adding an empty slab to the partial slabs in order
1298			 * to avoid page allocator overhead. This slab needs
1299			 * to come after the other slabs with objects in
1300			 * order to fill them up. That way the size of the
1301			 * partial list stays small. kmem_cache_shrink can
1302			 * reclaim empty slabs from the partial list.
1303			 */
1304			add_partial_tail(n, page);
1305			slab_unlock(page);
1306		} else {
1307			slab_unlock(page);
1308			discard_slab(s, page);
1309		}
1310	}
1311}
1312
1313/*
1314 * Remove the cpu slab
1315 */
1316static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
1317{
1318	/*
1319	 * Merge cpu freelist into freelist. Typically we get here
1320	 * because both freelists are empty. So this is unlikely
1321	 * to occur.
1322	 */
1323	while (unlikely(page->lockless_freelist)) {
1324		void **object;
1325
1326		/* Retrieve object from cpu_freelist */
1327		object = page->lockless_freelist;
1328		page->lockless_freelist = page->lockless_freelist[page->offset];
1329
1330		/* And put onto the regular freelist */
1331		object[page->offset] = page->freelist;
1332		page->freelist = object;
1333		page->inuse--;
1334	}
1335	s->cpu_slab[cpu] = NULL;
1336	unfreeze_slab(s, page);
1337}
1338
1339static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
1340{
1341	slab_lock(page);
1342	deactivate_slab(s, page, cpu);
1343}
1344
1345/*
1346 * Flush cpu slab.
1347 * Called from IPI handler with interrupts disabled.
1348 */
1349static void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1350{
1351	struct page *page = s->cpu_slab[cpu];
1352
1353	if (likely(page))
1354		flush_slab(s, page, cpu);
1355}
1356
1357static void flush_cpu_slab(void *d)
1358{
1359	struct kmem_cache *s = d;
1360	int cpu = smp_processor_id();
1361
1362	__flush_cpu_slab(s, cpu);
1363}
1364
1365static void flush_all(struct kmem_cache *s)
1366{
1367#ifdef CONFIG_SMP
1368	on_each_cpu(flush_cpu_slab, s, 1, 1);
1369#else
1370	unsigned long flags;
1371
1372	local_irq_save(flags);
1373	flush_cpu_slab(s);
1374	local_irq_restore(flags);
1375#endif
1376}
1377
1378/*
1379 * Slow path. The lockless freelist is empty or we need to perform
1380 * debugging duties.
1381 *
1382 * Interrupts are disabled.
1383 *
1384 * Processing is still very fast if new objects have been freed to the
1385 * regular freelist. In that case we simply take over the regular freelist
1386 * as the lockless freelist and zap the regular freelist.
1387 *
1388 * If that is not working then we fall back to the partial lists. We take the
1389 * first element of the freelist as the object to allocate now and move the
1390 * rest of the freelist to the lockless freelist.
1391 *
1392 * And if we were unable to get a new slab from the partial slab lists then
1393 * we need to allocate a new slab. This is slowest path since we may sleep.
1394 */
1395static void *__slab_alloc(struct kmem_cache *s,
1396		gfp_t gfpflags, int node, void *addr, struct page *page)
1397{
1398	void **object;
1399	int cpu = smp_processor_id();
1400
1401	if (!page)
1402		goto new_slab;
1403
1404	slab_lock(page);
1405	if (unlikely(node != -1 && page_to_nid(page) != node))
1406		goto another_slab;
1407load_freelist:
1408	object = page->freelist;
1409	if (unlikely(!object))
1410		goto another_slab;
1411	if (unlikely(SlabDebug(page)))
1412		goto debug;
1413
1414	object = page->freelist;
1415	page->lockless_freelist = object[page->offset];
1416	page->inuse = s->objects;
1417	page->freelist = NULL;
1418	slab_unlock(page);
1419	return object;
1420
1421another_slab:
1422	deactivate_slab(s, page, cpu);
1423
1424new_slab:
1425	page = get_partial(s, gfpflags, node);
1426	if (page) {
1427		s->cpu_slab[cpu] = page;
1428		goto load_freelist;
1429	}
1430
1431	page = new_slab(s, gfpflags, node);
1432	if (page) {
1433		cpu = smp_processor_id();
1434		if (s->cpu_slab[cpu]) {
1435			/*
1436			 * Someone else populated the cpu_slab while we
1437			 * enabled interrupts, or we have gotten scheduled
1438			 * on another cpu. The page may not be on the
1439			 * requested node even if __GFP_THISNODE was
1440			 * specified. So we need to recheck.
1441			 */
1442			if (node == -1 ||
1443				page_to_nid(s->cpu_slab[cpu]) == node) {
1444				/*
1445				 * Current cpuslab is acceptable and we
1446				 * want the current one since its cache hot
1447				 */
1448				discard_slab(s, page);
1449				page = s->cpu_slab[cpu];
1450				slab_lock(page);
1451				goto load_freelist;
1452			}
1453			/* New slab does not fit our expectations */
1454			flush_slab(s, s->cpu_slab[cpu], cpu);
1455		}
1456		slab_lock(page);
1457		SetSlabFrozen(page);
1458		s->cpu_slab[cpu] = page;
1459		goto load_freelist;
1460	}
1461	return NULL;
1462debug:
1463	object = page->freelist;
1464	if (!alloc_debug_processing(s, page, object, addr))
1465		goto another_slab;
1466
1467	page->inuse++;
1468	page->freelist = object[page->offset];
1469	slab_unlock(page);
1470	return object;
1471}
1472
1473/*
1474 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1475 * have the fastpath folded into their functions. So no function call
1476 * overhead for requests that can be satisfied on the fastpath.
1477 *
1478 * The fastpath works by first checking if the lockless freelist can be used.
1479 * If not then __slab_alloc is called for slow processing.
1480 *
1481 * Otherwise we can simply pick the next object from the lockless free list.
1482 */
1483static void __always_inline *slab_alloc(struct kmem_cache *s,
1484				gfp_t gfpflags, int node, void *addr)
1485{
1486	struct page *page;
1487	void **object;
1488	unsigned long flags;
1489
1490	local_irq_save(flags);
1491	page = s->cpu_slab[smp_processor_id()];
1492	if (unlikely(!page || !page->lockless_freelist ||
1493			(node != -1 && page_to_nid(page) != node)))
1494
1495		object = __slab_alloc(s, gfpflags, node, addr, page);
1496
1497	else {
1498		object = page->lockless_freelist;
1499		page->lockless_freelist = object[page->offset];
1500	}
1501	local_irq_restore(flags);
1502	return object;
1503}
1504
1505void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1506{
1507	return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
1508}
1509EXPORT_SYMBOL(kmem_cache_alloc);
1510
1511#ifdef CONFIG_NUMA
1512void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1513{
1514	return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
1515}
1516EXPORT_SYMBOL(kmem_cache_alloc_node);
1517#endif
1518
1519/*
1520 * Slow patch handling. This may still be called frequently since objects
1521 * have a longer lifetime than the cpu slabs in most processing loads.
1522 *
1523 * So we still attempt to reduce cache line usage. Just take the slab
1524 * lock and free the item. If there is no additional partial page
1525 * handling required then we can return immediately.
1526 */
1527static void __slab_free(struct kmem_cache *s, struct page *page,
1528					void *x, void *addr)
1529{
1530	void *prior;
1531	void **object = (void *)x;
1532
1533	slab_lock(page);
1534
1535	if (unlikely(SlabDebug(page)))
1536		goto debug;
1537checks_ok:
1538	prior = object[page->offset] = page->freelist;
1539	page->freelist = object;
1540	page->inuse--;
1541
1542	if (unlikely(SlabFrozen(page)))
1543		goto out_unlock;
1544
1545	if (unlikely(!page->inuse))
1546		goto slab_empty;
1547
1548	/*
1549	 * Objects left in the slab. If it
1550	 * was not on the partial list before
1551	 * then add it.
1552	 */
1553	if (unlikely(!prior))
1554		add_partial(get_node(s, page_to_nid(page)), page);
1555
1556out_unlock:
1557	slab_unlock(page);
1558	return;
1559
1560slab_empty:
1561	if (prior)
1562		/*
1563		 * Slab still on the partial list.
1564		 */
1565		remove_partial(s, page);
1566
1567	slab_unlock(page);
1568	discard_slab(s, page);
1569	return;
1570
1571debug:
1572	if (!free_debug_processing(s, page, x, addr))
1573		goto out_unlock;
1574	goto checks_ok;
1575}
1576
1577/*
1578 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1579 * can perform fastpath freeing without additional function calls.
1580 *
1581 * The fastpath is only possible if we are freeing to the current cpu slab
1582 * of this processor. This typically the case if we have just allocated
1583 * the item before.
1584 *
1585 * If fastpath is not possible then fall back to __slab_free where we deal
1586 * with all sorts of special processing.
1587 */
1588static void __always_inline slab_free(struct kmem_cache *s,
1589			struct page *page, void *x, void *addr)
1590{
1591	void **object = (void *)x;
1592	unsigned long flags;
1593
1594	local_irq_save(flags);
1595	if (likely(page == s->cpu_slab[smp_processor_id()] &&
1596						!SlabDebug(page))) {
1597		object[page->offset] = page->lockless_freelist;
1598		page->lockless_freelist = object;
1599	} else
1600		__slab_free(s, page, x, addr);
1601
1602	local_irq_restore(flags);
1603}
1604
1605void kmem_cache_free(struct kmem_cache *s, void *x)
1606{
1607	struct page *page;
1608
1609	page = virt_to_head_page(x);
1610
1611	slab_free(s, page, x, __builtin_return_address(0));
1612}
1613EXPORT_SYMBOL(kmem_cache_free);
1614
1615/* Figure out on which slab object the object resides */
1616static struct page *get_object_page(const void *x)
1617{
1618	struct page *page = virt_to_head_page(x);
1619
1620	if (!PageSlab(page))
1621		return NULL;
1622
1623	return page;
1624}
1625
1626/*
1627 * Object placement in a slab is made very easy because we always start at
1628 * offset 0. If we tune the size of the object to the alignment then we can
1629 * get the required alignment by putting one properly sized object after
1630 * another.
1631 *
1632 * Notice that the allocation order determines the sizes of the per cpu
1633 * caches. Each processor has always one slab available for allocations.
1634 * Increasing the allocation order reduces the number of times that slabs
1635 * must be moved on and off the partial lists and is therefore a factor in
1636 * locking overhead.
1637 */
1638
1639/*
1640 * Mininum / Maximum order of slab pages. This influences locking overhead
1641 * and slab fragmentation. A higher order reduces the number of partial slabs
1642 * and increases the number of allocations possible without having to
1643 * take the list_lock.
1644 */
1645static int slub_min_order;
1646static int slub_max_order = DEFAULT_MAX_ORDER;
1647static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1648
1649/*
1650 * Merge control. If this is set then no merging of slab caches will occur.
1651 * (Could be removed. This was introduced to pacify the merge skeptics.)
1652 */
1653static int slub_nomerge;
1654
1655/*
1656 * Calculate the order of allocation given an slab object size.
1657 *
1658 * The order of allocation has significant impact on performance and other
1659 * system components. Generally order 0 allocations should be preferred since
1660 * order 0 does not cause fragmentation in the page allocator. Larger objects
1661 * be problematic to put into order 0 slabs because there may be too much
1662 * unused space left. We go to a higher order if more than 1/8th of the slab
1663 * would be wasted.
1664 *
1665 * In order to reach satisfactory performance we must ensure that a minimum
1666 * number of objects is in one slab. Otherwise we may generate too much
1667 * activity on the partial lists which requires taking the list_lock. This is
1668 * less a concern for large slabs though which are rarely used.
1669 *
1670 * slub_max_order specifies the order where we begin to stop considering the
1671 * number of objects in a slab as critical. If we reach slub_max_order then
1672 * we try to keep the page order as low as possible. So we accept more waste
1673 * of space in favor of a small page order.
1674 *
1675 * Higher order allocations also allow the placement of more objects in a
1676 * slab and thereby reduce object handling overhead. If the user has
1677 * requested a higher mininum order then we start with that one instead of
1678 * the smallest order which will fit the object.
1679 */
1680static inline int slab_order(int size, int min_objects,
1681				int max_order, int fract_leftover)
1682{
1683	int order;
1684	int rem;
1685
1686	for (order = max(slub_min_order,
1687				fls(min_objects * size - 1) - PAGE_SHIFT);
1688			order <= max_order; order++) {
1689
1690		unsigned long slab_size = PAGE_SIZE << order;
1691
1692		if (slab_size < min_objects * size)
1693			continue;
1694
1695		rem = slab_size % size;
1696
1697		if (rem <= slab_size / fract_leftover)
1698			break;
1699
1700	}
1701
1702	return order;
1703}
1704
1705static inline int calculate_order(int size)
1706{
1707	int order;
1708	int min_objects;
1709	int fraction;
1710
1711	/*
1712	 * Attempt to find best configuration for a slab. This
1713	 * works by first attempting to generate a layout with
1714	 * the best configuration and backing off gradually.
1715	 *
1716	 * First we reduce the acceptable waste in a slab. Then
1717	 * we reduce the minimum objects required in a slab.
1718	 */
1719	min_objects = slub_min_objects;
1720	while (min_objects > 1) {
1721		fraction = 8;
1722		while (fraction >= 4) {
1723			order = slab_order(size, min_objects,
1724						slub_max_order, fraction);
1725			if (order <= slub_max_order)
1726				return order;
1727			fraction /= 2;
1728		}
1729		min_objects /= 2;
1730	}
1731
1732	/*
1733	 * We were unable to place multiple objects in a slab. Now
1734	 * lets see if we can place a single object there.
1735	 */
1736	order = slab_order(size, 1, slub_max_order, 1);
1737	if (order <= slub_max_order)
1738		return order;
1739
1740	/*
1741	 * Doh this slab cannot be placed using slub_max_order.
1742	 */
1743	order = slab_order(size, 1, MAX_ORDER, 1);
1744	if (order <= MAX_ORDER)
1745		return order;
1746	return -ENOSYS;
1747}
1748
1749/*
1750 * Figure out what the alignment of the objects will be.
1751 */
1752static unsigned long calculate_alignment(unsigned long flags,
1753		unsigned long align, unsigned long size)
1754{
1755	/*
1756	 * If the user wants hardware cache aligned objects then
1757	 * follow that suggestion if the object is sufficiently
1758	 * large.
1759	 *
1760	 * The hardware cache alignment cannot override the
1761	 * specified alignment though. If that is greater
1762	 * then use it.
1763	 */
1764	if ((flags & SLAB_HWCACHE_ALIGN) &&
1765			size > cache_line_size() / 2)
1766		return max_t(unsigned long, align, cache_line_size());
1767
1768	if (align < ARCH_SLAB_MINALIGN)
1769		return ARCH_SLAB_MINALIGN;
1770
1771	return ALIGN(align, sizeof(void *));
1772}
1773
1774static void init_kmem_cache_node(struct kmem_cache_node *n)
1775{
1776	n->nr_partial = 0;
1777	atomic_long_set(&n->nr_slabs, 0);
1778	spin_lock_init(&n->list_lock);
1779	INIT_LIST_HEAD(&n->partial);
1780	INIT_LIST_HEAD(&n->full);
1781}
1782
1783#ifdef CONFIG_NUMA
1784/*
1785 * No kmalloc_node yet so do it by hand. We know that this is the first
1786 * slab on the node for this slabcache. There are no concurrent accesses
1787 * possible.
1788 *
1789 * Note that this function only works on the kmalloc_node_cache
1790 * when allocating for the kmalloc_node_cache.
1791 */
1792static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflags,
1793								int node)
1794{
1795	struct page *page;
1796	struct kmem_cache_node *n;
1797
1798	BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
1799
1800	page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node);
1801
1802	BUG_ON(!page);
1803	n = page->freelist;
1804	BUG_ON(!n);
1805	page->freelist = get_freepointer(kmalloc_caches, n);
1806	page->inuse++;
1807	kmalloc_caches->node[node] = n;
1808	setup_object_debug(kmalloc_caches, page, n);
1809	init_kmem_cache_node(n);
1810	atomic_long_inc(&n->nr_slabs);
1811	add_partial(n, page);
1812
1813	/*
1814	 * new_slab() disables interupts. If we do not reenable interrupts here
1815	 * then bootup would continue with interrupts disabled.
1816	 */
1817	local_irq_enable();
1818	return n;
1819}
1820
1821static void free_kmem_cache_nodes(struct kmem_cache *s)
1822{
1823	int node;
1824
1825	for_each_online_node(node) {
1826		struct kmem_cache_node *n = s->node[node];
1827		if (n && n != &s->local_node)
1828			kmem_cache_free(kmalloc_caches, n);
1829		s->node[node] = NULL;
1830	}
1831}
1832
1833static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
1834{
1835	int node;
1836	int local_node;
1837
1838	if (slab_state >= UP)
1839		local_node = page_to_nid(virt_to_page(s));
1840	else
1841		local_node = 0;
1842
1843	for_each_online_node(node) {
1844		struct kmem_cache_node *n;
1845
1846		if (local_node == node)
1847			n = &s->local_node;
1848		else {
1849			if (slab_state == DOWN) {
1850				n = early_kmem_cache_node_alloc(gfpflags,
1851								node);
1852				continue;
1853			}
1854			n = kmem_cache_alloc_node(kmalloc_caches,
1855							gfpflags, node);
1856
1857			if (!n) {
1858				free_kmem_cache_nodes(s);
1859				return 0;
1860			}
1861
1862		}
1863		s->node[node] = n;
1864		init_kmem_cache_node(n);
1865	}
1866	return 1;
1867}
1868#else
1869static void free_kmem_cache_nodes(struct kmem_cache *s)
1870{
1871}
1872
1873static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
1874{
1875	init_kmem_cache_node(&s->local_node);
1876	return 1;
1877}
1878#endif
1879
1880/*
1881 * calculate_sizes() determines the order and the distribution of data within
1882 * a slab object.
1883 */
1884static int calculate_sizes(struct kmem_cache *s)
1885{
1886	unsigned long flags = s->flags;
1887	unsigned long size = s->objsize;
1888	unsigned long align = s->align;
1889
1890	/*
1891	 * Determine if we can poison the object itself. If the user of
1892	 * the slab may touch the object after free or before allocation
1893	 * then we should never poison the object itself.
1894	 */
1895	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
1896			!s->ctor)
1897		s->flags |= __OBJECT_POISON;
1898	else
1899		s->flags &= ~__OBJECT_POISON;
1900
1901	/*
1902	 * Round up object size to the next word boundary. We can only
1903	 * place the free pointer at word boundaries and this determines
1904	 * the possible location of the free pointer.
1905	 */
1906	size = ALIGN(size, sizeof(void *));
1907
1908#ifdef CONFIG_SLUB_DEBUG
1909	/*
1910	 * If we are Redzoning then check if there is some space between the
1911	 * end of the object and the free pointer. If not then add an
1912	 * additional word to have some bytes to store Redzone information.
1913	 */
1914	if ((flags & SLAB_RED_ZONE) && size == s->objsize)
1915		size += sizeof(void *);
1916#endif
1917
1918	/*
1919	 * With that we have determined the number of bytes in actual use
1920	 * by the object. This is the potential offset to the free pointer.
1921	 */
1922	s->inuse = size;
1923
1924	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
1925		s->ctor)) {
1926		/*
1927		 * Relocate free pointer after the object if it is not
1928		 * permitted to overwrite the first word of the object on
1929		 * kmem_cache_free.
1930		 *
1931		 * This is the case if we do RCU, have a constructor or
1932		 * destructor or are poisoning the objects.
1933		 */
1934		s->offset = size;
1935		size += sizeof(void *);
1936	}
1937
1938#ifdef CONFIG_SLUB_DEBUG
1939	if (flags & SLAB_STORE_USER)
1940		/*
1941		 * Need to store information about allocs and frees after
1942		 * the object.
1943		 */
1944		size += 2 * sizeof(struct track);
1945
1946	if (flags & SLAB_RED_ZONE)
1947		/*
1948		 * Add some empty padding so that we can catch
1949		 * overwrites from earlier objects rather than let
1950		 * tracking information or the free pointer be
1951		 * corrupted if an user writes before the start
1952		 * of the object.
1953		 */
1954		size += sizeof(void *);
1955#endif
1956
1957	/*
1958	 * Determine the alignment based on various parameters that the
1959	 * user specified and the dynamic determination of cache line size
1960	 * on bootup.
1961	 */
1962	align = calculate_alignment(flags, align, s->objsize);
1963
1964	/*
1965	 * SLUB stores one object immediately after another beginning from
1966	 * offset 0. In order to align the objects we have to simply size
1967	 * each object to conform to the alignment.
1968	 */
1969	size = ALIGN(size, align);
1970	s->size = size;
1971
1972	s->order = calculate_order(size);
1973	if (s->order < 0)
1974		return 0;
1975
1976	/*
1977	 * Determine the number of objects per slab
1978	 */
1979	s->objects = (PAGE_SIZE << s->order) / size;
1980
1981	/*
1982	 * Verify that the number of objects is within permitted limits.
1983	 * The page->inuse field is only 16 bit wide! So we cannot have
1984	 * more than 64k objects per slab.
1985	 */
1986	if (!s->objects || s->objects > 65535)
1987		return 0;
1988	return 1;
1989
1990}
1991
1992static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
1993		const char *name, size_t size,
1994		size_t align, unsigned long flags,
1995		void (*ctor)(void *, struct kmem_cache *, unsigned long))
1996{
1997	memset(s, 0, kmem_size);
1998	s->name = name;
1999	s->ctor = ctor;
2000	s->objsize = size;
2001	s->flags = flags;
2002	s->align = align;
2003	kmem_cache_open_debug_check(s);
2004
2005	if (!calculate_sizes(s))
2006		goto error;
2007
2008	s->refcount = 1;
2009#ifdef CONFIG_NUMA
2010	s->defrag_ratio = 100;
2011#endif
2012
2013	if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2014		return 1;
2015error:
2016	if (flags & SLAB_PANIC)
2017		panic("Cannot create slab %s size=%lu realsize=%u "
2018			"order=%u offset=%u flags=%lx\n",
2019			s->name, (unsigned long)size, s->size, s->order,
2020			s->offset, flags);
2021	return 0;
2022}
2023EXPORT_SYMBOL(kmem_cache_open);
2024
2025/*
2026 * Check if a given pointer is valid
2027 */
2028int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2029{
2030	struct page * page;
2031
2032	page = get_object_page(object);
2033
2034	if (!page || s != page->slab)
2035		/* No slab or wrong slab */
2036		return 0;
2037
2038	if (!check_valid_pointer(s, page, object))
2039		return 0;
2040
2041	/*
2042	 * We could also check if the object is on the slabs freelist.
2043	 * But this would be too expensive and it seems that the main
2044	 * purpose of kmem_ptr_valid is to check if the object belongs
2045	 * to a certain slab.
2046	 */
2047	return 1;
2048}
2049EXPORT_SYMBOL(kmem_ptr_validate);
2050
2051/*
2052 * Determine the size of a slab object
2053 */
2054unsigned int kmem_cache_size(struct kmem_cache *s)
2055{
2056	return s->objsize;
2057}
2058EXPORT_SYMBOL(kmem_cache_size);
2059
2060const char *kmem_cache_name(struct kmem_cache *s)
2061{
2062	return s->name;
2063}
2064EXPORT_SYMBOL(kmem_cache_name);
2065
2066/*
2067 * Attempt to free all slabs on a node. Return the number of slabs we
2068 * were unable to free.
2069 */
2070static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2071			struct list_head *list)
2072{
2073	int slabs_inuse = 0;
2074	unsigned long flags;
2075	struct page *page, *h;
2076
2077	spin_lock_irqsave(&n->list_lock, flags);
2078	list_for_each_entry_safe(page, h, list, lru)
2079		if (!page->inuse) {
2080			list_del(&page->lru);
2081			discard_slab(s, page);
2082		} else
2083			slabs_inuse++;
2084	spin_unlock_irqrestore(&n->list_lock, flags);
2085	return slabs_inuse;
2086}
2087
2088/*
2089 * Release all resources used by a slab cache.
2090 */
2091static int kmem_cache_close(struct kmem_cache *s)
2092{
2093	int node;
2094
2095	flush_all(s);
2096
2097	/* Attempt to free all objects */
2098	for_each_online_node(node) {
2099		struct kmem_cache_node *n = get_node(s, node);
2100
2101		n->nr_partial -= free_list(s, n, &n->partial);
2102		if (atomic_long_read(&n->nr_slabs))
2103			return 1;
2104	}
2105	free_kmem_cache_nodes(s);
2106	return 0;
2107}
2108
2109/*
2110 * Close a cache and release the kmem_cache structure
2111 * (must be used for caches created using kmem_cache_create)
2112 */
2113void kmem_cache_destroy(struct kmem_cache *s)
2114{
2115	down_write(&slub_lock);
2116	s->refcount--;
2117	if (!s->refcount) {
2118		list_del(&s->list);
2119		if (kmem_cache_close(s))
2120			WARN_ON(1);
2121		sysfs_slab_remove(s);
2122		kfree(s);
2123	}
2124	up_write(&slub_lock);
2125}
2126EXPORT_SYMBOL(kmem_cache_destroy);
2127
2128/********************************************************************
2129 *		Kmalloc subsystem
2130 *******************************************************************/
2131
2132struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned;
2133EXPORT_SYMBOL(kmalloc_caches);
2134
2135#ifdef CONFIG_ZONE_DMA
2136static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1];
2137#endif
2138
2139static int __init setup_slub_min_order(char *str)
2140{
2141	get_option (&str, &slub_min_order);
2142
2143	return 1;
2144}
2145
2146__setup("slub_min_order=", setup_slub_min_order);
2147
2148static int __init setup_slub_max_order(char *str)
2149{
2150	get_option (&str, &slub_max_order);
2151
2152	return 1;
2153}
2154
2155__setup("slub_max_order=", setup_slub_max_order);
2156
2157static int __init setup_slub_min_objects(char *str)
2158{
2159	get_option (&str, &slub_min_objects);
2160
2161	return 1;
2162}
2163
2164__setup("slub_min_objects=", setup_slub_min_objects);
2165
2166static int __init setup_slub_nomerge(char *str)
2167{
2168	slub_nomerge = 1;
2169	return 1;
2170}
2171
2172__setup("slub_nomerge", setup_slub_nomerge);
2173
2174static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2175		const char *name, int size, gfp_t gfp_flags)
2176{
2177	unsigned int flags = 0;
2178
2179	if (gfp_flags & SLUB_DMA)
2180		flags = SLAB_CACHE_DMA;
2181
2182	down_write(&slub_lock);
2183	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
2184			flags, NULL))
2185		goto panic;
2186
2187	list_add(&s->list, &slab_caches);
2188	up_write(&slub_lock);
2189	if (sysfs_slab_add(s))
2190		goto panic;
2191	return s;
2192
2193panic:
2194	panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2195}
2196
2197static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2198{
2199	int index = kmalloc_index(size);
2200
2201	if (!index)
2202		return NULL;
2203
2204	/* Allocation too large? */
2205	BUG_ON(index < 0);
2206
2207#ifdef CONFIG_ZONE_DMA
2208	if ((flags & SLUB_DMA)) {
2209		struct kmem_cache *s;
2210		struct kmem_cache *x;
2211		char *text;
2212		size_t realsize;
2213
2214		s = kmalloc_caches_dma[index];
2215		if (s)
2216			return s;
2217
2218		/* Dynamically create dma cache */
2219		x = kmalloc(kmem_size, flags & ~SLUB_DMA);
2220		if (!x)
2221			panic("Unable to allocate memory for dma cache\n");
2222
2223		if (index <= KMALLOC_SHIFT_HIGH)
2224			realsize = 1 << index;
2225		else {
2226			if (index == 1)
2227				realsize = 96;
2228			else
2229				realsize = 192;
2230		}
2231
2232		text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2233				(unsigned int)realsize);
2234		s = create_kmalloc_cache(x, text, realsize, flags);
2235		kmalloc_caches_dma[index] = s;
2236		return s;
2237	}
2238#endif
2239	return &kmalloc_caches[index];
2240}
2241
2242void *__kmalloc(size_t size, gfp_t flags)
2243{
2244	struct kmem_cache *s = get_slab(size, flags);
2245
2246	if (s)
2247		return slab_alloc(s, flags, -1, __builtin_return_address(0));
2248	return ZERO_SIZE_PTR;
2249}
2250EXPORT_SYMBOL(__kmalloc);
2251
2252#ifdef CONFIG_NUMA
2253void *__kmalloc_node(size_t size, gfp_t flags, int node)
2254{
2255	struct kmem_cache *s = get_slab(size, flags);
2256
2257	if (s)
2258		return slab_alloc(s, flags, node, __builtin_return_address(0));
2259	return ZERO_SIZE_PTR;
2260}
2261EXPORT_SYMBOL(__kmalloc_node);
2262#endif
2263
2264size_t ksize(const void *object)
2265{
2266	struct page *page;
2267	struct kmem_cache *s;
2268
2269	if (object == ZERO_SIZE_PTR)
2270		return 0;
2271
2272	page = get_object_page(object);
2273	BUG_ON(!page);
2274	s = page->slab;
2275	BUG_ON(!s);
2276
2277	/*
2278	 * Debugging requires use of the padding between object
2279	 * and whatever may come after it.
2280	 */
2281	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2282		return s->objsize;
2283
2284	/*
2285	 * If we have the need to store the freelist pointer
2286	 * back there or track user information then we can
2287	 * only use the space before that information.
2288	 */
2289	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2290		return s->inuse;
2291
2292	/*
2293	 * Else we can use all the padding etc for the allocation
2294	 */
2295	return s->size;
2296}
2297EXPORT_SYMBOL(ksize);
2298
2299void kfree(const void *x)
2300{
2301	struct kmem_cache *s;
2302	struct page *page;
2303
2304	/*
2305	 * This has to be an unsigned comparison. According to Linus
2306	 * some gcc version treat a pointer as a signed entity. Then
2307	 * this comparison would be true for all "negative" pointers
2308	 * (which would cover the whole upper half of the address space).
2309	 */
2310	if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR)
2311		return;
2312
2313	page = virt_to_head_page(x);
2314	s = page->slab;
2315
2316	slab_free(s, page, (void *)x, __builtin_return_address(0));
2317}
2318EXPORT_SYMBOL(kfree);
2319
2320/*
2321 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2322 * the remaining slabs by the number of items in use. The slabs with the
2323 * most items in use come first. New allocations will then fill those up
2324 * and thus they can be removed from the partial lists.
2325 *
2326 * The slabs with the least items are placed last. This results in them
2327 * being allocated from last increasing the chance that the last objects
2328 * are freed in them.
2329 */
2330int kmem_cache_shrink(struct kmem_cache *s)
2331{
2332	int node;
2333	int i;
2334	struct kmem_cache_node *n;
2335	struct page *page;
2336	struct page *t;
2337	struct list_head *slabs_by_inuse =
2338		kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
2339	unsigned long flags;
2340
2341	if (!slabs_by_inuse)
2342		return -ENOMEM;
2343
2344	flush_all(s);
2345	for_each_online_node(node) {
2346		n = get_node(s, node);
2347
2348		if (!n->nr_partial)
2349			continue;
2350
2351		for (i = 0; i < s->objects; i++)
2352			INIT_LIST_HEAD(slabs_by_inuse + i);
2353
2354		spin_lock_irqsave(&n->list_lock, flags);
2355
2356		/*
2357		 * Build lists indexed by the items in use in each slab.
2358		 *
2359		 * Note that concurrent frees may occur while we hold the
2360		 * list_lock. page->inuse here is the upper limit.
2361		 */
2362		list_for_each_entry_safe(page, t, &n->partial, lru) {
2363			if (!page->inuse && slab_trylock(page)) {
2364				/*
2365				 * Must hold slab lock here because slab_free
2366				 * may have freed the last object and be
2367				 * waiting to release the slab.
2368				 */
2369				list_del(&page->lru);
2370				n->nr_partial--;
2371				slab_unlock(page);
2372				discard_slab(s, page);
2373			} else {
2374				if (n->nr_partial > MAX_PARTIAL)
2375					list_move(&page->lru,
2376					slabs_by_inuse + page->inuse);
2377			}
2378		}
2379
2380		if (n->nr_partial <= MAX_PARTIAL)
2381			goto out;
2382
2383		/*
2384		 * Rebuild the partial list with the slabs filled up most
2385		 * first and the least used slabs at the end.
2386		 */
2387		for (i = s->objects - 1; i >= 0; i--)
2388			list_splice(slabs_by_inuse + i, n->partial.prev);
2389
2390	out:
2391		spin_unlock_irqrestore(&n->list_lock, flags);
2392	}
2393
2394	kfree(slabs_by_inuse);
2395	return 0;
2396}
2397EXPORT_SYMBOL(kmem_cache_shrink);
2398
2399/**
2400 * krealloc - reallocate memory. The contents will remain unchanged.
2401 * @p: object to reallocate memory for.
2402 * @new_size: how many bytes of memory are required.
2403 * @flags: the type of memory to allocate.
2404 *
2405 * The contents of the object pointed to are preserved up to the
2406 * lesser of the new and old sizes.  If @p is %NULL, krealloc()
2407 * behaves exactly like kmalloc().  If @size is 0 and @p is not a
2408 * %NULL pointer, the object pointed to is freed.
2409 */
2410void *krealloc(const void *p, size_t new_size, gfp_t flags)
2411{
2412	void *ret;
2413	size_t ks;
2414
2415	if (unlikely(!p || p == ZERO_SIZE_PTR))
2416		return kmalloc(new_size, flags);
2417
2418	if (unlikely(!new_size)) {
2419		kfree(p);
2420		return ZERO_SIZE_PTR;
2421	}
2422
2423	ks = ksize(p);
2424	if (ks >= new_size)
2425		return (void *)p;
2426
2427	ret = kmalloc(new_size, flags);
2428	if (ret) {
2429		memcpy(ret, p, min(new_size, ks));
2430		kfree(p);
2431	}
2432	return ret;
2433}
2434EXPORT_SYMBOL(krealloc);
2435
2436/********************************************************************
2437 *			Basic setup of slabs
2438 *******************************************************************/
2439
2440void __init kmem_cache_init(void)
2441{
2442	int i;
2443	int caches = 0;
2444
2445#ifdef CONFIG_NUMA
2446	/*
2447	 * Must first have the slab cache available for the allocations of the
2448	 * struct kmem_cache_node's. There is special bootstrap code in
2449	 * kmem_cache_open for slab_state == DOWN.
2450	 */
2451	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2452		sizeof(struct kmem_cache_node), GFP_KERNEL);
2453	kmalloc_caches[0].refcount = -1;
2454	caches++;
2455#endif
2456
2457	/* Able to allocate the per node structures */
2458	slab_state = PARTIAL;
2459
2460	/* Caches that are not of the two-to-the-power-of size */
2461	if (KMALLOC_MIN_SIZE <= 64) {
2462		create_kmalloc_cache(&kmalloc_caches[1],
2463				"kmalloc-96", 96, GFP_KERNEL);
2464		caches++;
2465	}
2466	if (KMALLOC_MIN_SIZE <= 128) {
2467		create_kmalloc_cache(&kmalloc_caches[2],
2468				"kmalloc-192", 192, GFP_KERNEL);
2469		caches++;
2470	}
2471
2472	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
2473		create_kmalloc_cache(&kmalloc_caches[i],
2474			"kmalloc", 1 << i, GFP_KERNEL);
2475		caches++;
2476	}
2477
2478	slab_state = UP;
2479
2480	/* Provide the correct kmalloc names now that the caches are up */
2481	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
2482		kmalloc_caches[i]. name =
2483			kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2484
2485#ifdef CONFIG_SMP
2486	register_cpu_notifier(&slab_notifier);
2487#endif
2488
2489	kmem_size = offsetof(struct kmem_cache, cpu_slab) +
2490				nr_cpu_ids * sizeof(struct page *);
2491
2492	printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2493		" CPUs=%d, Nodes=%d\n",
2494		caches, cache_line_size(),
2495		slub_min_order, slub_max_order, slub_min_objects,
2496		nr_cpu_ids, nr_node_ids);
2497}
2498
2499/*
2500 * Find a mergeable slab cache
2501 */
2502static int slab_unmergeable(struct kmem_cache *s)
2503{
2504	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2505		return 1;
2506
2507	if (s->ctor)
2508		return 1;
2509
2510	/*
2511	 * We may have set a slab to be unmergeable during bootstrap.
2512	 */
2513	if (s->refcount < 0)
2514		return 1;
2515
2516	return 0;
2517}
2518
2519static struct kmem_cache *find_mergeable(size_t size,
2520		size_t align, unsigned long flags,
2521		void (*ctor)(void *, struct kmem_cache *, unsigned long))
2522{
2523	struct list_head *h;
2524
2525	if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2526		return NULL;
2527
2528	if (ctor)
2529		return NULL;
2530
2531	size = ALIGN(size, sizeof(void *));
2532	align = calculate_alignment(flags, align, size);
2533	size = ALIGN(size, align);
2534
2535	list_for_each(h, &slab_caches) {
2536		struct kmem_cache *s =
2537			container_of(h, struct kmem_cache, list);
2538
2539		if (slab_unmergeable(s))
2540			continue;
2541
2542		if (size > s->size)
2543			continue;
2544
2545		if (((flags | slub_debug) & SLUB_MERGE_SAME) !=
2546			(s->flags & SLUB_MERGE_SAME))
2547				continue;
2548		/*
2549		 * Check if alignment is compatible.
2550		 * Courtesy of Adrian Drzewiecki
2551		 */
2552		if ((s->size & ~(align -1)) != s->size)
2553			continue;
2554
2555		if (s->size - size >= sizeof(void *))
2556			continue;
2557
2558		return s;
2559	}
2560	return NULL;
2561}
2562
2563struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2564		size_t align, unsigned long flags,
2565		void (*ctor)(void *, struct kmem_cache *, unsigned long),
2566		void (*dtor)(void *, struct kmem_cache *, unsigned long))
2567{
2568	struct kmem_cache *s;
2569
2570	BUG_ON(dtor);
2571	down_write(&slub_lock);
2572	s = find_mergeable(size, align, flags, ctor);
2573	if (s) {
2574		s->refcount++;
2575		/*
2576		 * Adjust the object sizes so that we clear
2577		 * the complete object on kzalloc.
2578		 */
2579		s->objsize = max(s->objsize, (int)size);
2580		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
2581		if (sysfs_slab_alias(s, name))
2582			goto err;
2583	} else {
2584		s = kmalloc(kmem_size, GFP_KERNEL);
2585		if (s && kmem_cache_open(s, GFP_KERNEL, name,
2586				size, align, flags, ctor)) {
2587			if (sysfs_slab_add(s)) {
2588				kfree(s);
2589				goto err;
2590			}
2591			list_add(&s->list, &slab_caches);
2592		} else
2593			kfree(s);
2594	}
2595	up_write(&slub_lock);
2596	return s;
2597
2598err:
2599	up_write(&slub_lock);
2600	if (flags & SLAB_PANIC)
2601		panic("Cannot create slabcache %s\n", name);
2602	else
2603		s = NULL;
2604	return s;
2605}
2606EXPORT_SYMBOL(kmem_cache_create);
2607
2608void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
2609{
2610	void *x;
2611
2612	x = slab_alloc(s, flags, -1, __builtin_return_address(0));
2613	if (x)
2614		memset(x, 0, s->objsize);
2615	return x;
2616}
2617EXPORT_SYMBOL(kmem_cache_zalloc);
2618
2619#ifdef CONFIG_SMP
2620static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
2621{
2622	struct list_head *h;
2623
2624	down_read(&slub_lock);
2625	list_for_each(h, &slab_caches) {
2626		struct kmem_cache *s =
2627			container_of(h, struct kmem_cache, list);
2628
2629		func(s, cpu);
2630	}
2631	up_read(&slub_lock);
2632}
2633
2634/*
2635 * Version of __flush_cpu_slab for the case that interrupts
2636 * are enabled.
2637 */
2638static void cpu_slab_flush(struct kmem_cache *s, int cpu)
2639{
2640	unsigned long flags;
2641
2642	local_irq_save(flags);
2643	__flush_cpu_slab(s, cpu);
2644	local_irq_restore(flags);
2645}
2646
2647/*
2648 * Use the cpu notifier to insure that the cpu slabs are flushed when
2649 * necessary.
2650 */
2651static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2652		unsigned long action, void *hcpu)
2653{
2654	long cpu = (long)hcpu;
2655
2656	switch (action) {
2657	case CPU_UP_CANCELED:
2658	case CPU_UP_CANCELED_FROZEN:
2659	case CPU_DEAD:
2660	case CPU_DEAD_FROZEN:
2661		for_all_slabs(cpu_slab_flush, cpu);
2662		break;
2663	default:
2664		break;
2665	}
2666	return NOTIFY_OK;
2667}
2668
2669static struct notifier_block __cpuinitdata slab_notifier =
2670	{ &slab_cpuup_callback, NULL, 0 };
2671
2672#endif
2673
2674void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2675{
2676	struct kmem_cache *s = get_slab(size, gfpflags);
2677
2678	if (!s)
2679		return ZERO_SIZE_PTR;
2680
2681	return slab_alloc(s, gfpflags, -1, caller);
2682}
2683
2684void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2685					int node, void *caller)
2686{
2687	struct kmem_cache *s = get_slab(size, gfpflags);
2688
2689	if (!s)
2690		return ZERO_SIZE_PTR;
2691
2692	return slab_alloc(s, gfpflags, node, caller);
2693}
2694
2695#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
2696static int validate_slab(struct kmem_cache *s, struct page *page)
2697{
2698	void *p;
2699	void *addr = page_address(page);
2700	DECLARE_BITMAP(map, s->objects);
2701
2702	if (!check_slab(s, page) ||
2703			!on_freelist(s, page, NULL))
2704		return 0;
2705
2706	/* Now we know that a valid freelist exists */
2707	bitmap_zero(map, s->objects);
2708
2709	for_each_free_object(p, s, page->freelist) {
2710		set_bit(slab_index(p, s, addr), map);
2711		if (!check_object(s, page, p, 0))
2712			return 0;
2713	}
2714
2715	for_each_object(p, s, addr)
2716		if (!test_bit(slab_index(p, s, addr), map))
2717			if (!check_object(s, page, p, 1))
2718				return 0;
2719	return 1;
2720}
2721
2722static void validate_slab_slab(struct kmem_cache *s, struct page *page)
2723{
2724	if (slab_trylock(page)) {
2725		validate_slab(s, page);
2726		slab_unlock(page);
2727	} else
2728		printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
2729			s->name, page);
2730
2731	if (s->flags & DEBUG_DEFAULT_FLAGS) {
2732		if (!SlabDebug(page))
2733			printk(KERN_ERR "SLUB %s: SlabDebug not set "
2734				"on slab 0x%p\n", s->name, page);
2735	} else {
2736		if (SlabDebug(page))
2737			printk(KERN_ERR "SLUB %s: SlabDebug set on "
2738				"slab 0x%p\n", s->name, page);
2739	}
2740}
2741
2742static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
2743{
2744	unsigned long count = 0;
2745	struct page *page;
2746	unsigned long flags;
2747
2748	spin_lock_irqsave(&n->list_lock, flags);
2749
2750	list_for_each_entry(page, &n->partial, lru) {
2751		validate_slab_slab(s, page);
2752		count++;
2753	}
2754	if (count != n->nr_partial)
2755		printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
2756			"counter=%ld\n", s->name, count, n->nr_partial);
2757
2758	if (!(s->flags & SLAB_STORE_USER))
2759		goto out;
2760
2761	list_for_each_entry(page, &n->full, lru) {
2762		validate_slab_slab(s, page);
2763		count++;
2764	}
2765	if (count != atomic_long_read(&n->nr_slabs))
2766		printk(KERN_ERR "SLUB: %s %ld slabs counted but "
2767			"counter=%ld\n", s->name, count,
2768			atomic_long_read(&n->nr_slabs));
2769
2770out:
2771	spin_unlock_irqrestore(&n->list_lock, flags);
2772	return count;
2773}
2774
2775static unsigned long validate_slab_cache(struct kmem_cache *s)
2776{
2777	int node;
2778	unsigned long count = 0;
2779
2780	flush_all(s);
2781	for_each_online_node(node) {
2782		struct kmem_cache_node *n = get_node(s, node);
2783
2784		count += validate_slab_node(s, n);
2785	}
2786	return count;
2787}
2788
2789#ifdef SLUB_RESILIENCY_TEST
2790static void resiliency_test(void)
2791{
2792	u8 *p;
2793
2794	printk(KERN_ERR "SLUB resiliency testing\n");
2795	printk(KERN_ERR "-----------------------\n");
2796	printk(KERN_ERR "A. Corruption after allocation\n");
2797
2798	p = kzalloc(16, GFP_KERNEL);
2799	p[16] = 0x12;
2800	printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
2801			" 0x12->0x%p\n\n", p + 16);
2802
2803	validate_slab_cache(kmalloc_caches + 4);
2804
2805	/* Hmmm... The next two are dangerous */
2806	p = kzalloc(32, GFP_KERNEL);
2807	p[32 + sizeof(void *)] = 0x34;
2808	printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
2809		 	" 0x34 -> -0x%p\n", p);
2810	printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2811
2812	validate_slab_cache(kmalloc_caches + 5);
2813	p = kzalloc(64, GFP_KERNEL);
2814	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
2815	*p = 0x56;
2816	printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
2817									p);
2818	printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2819	validate_slab_cache(kmalloc_caches + 6);
2820
2821	printk(KERN_ERR "\nB. Corruption after free\n");
2822	p = kzalloc(128, GFP_KERNEL);
2823	kfree(p);
2824	*p = 0x78;
2825	printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
2826	validate_slab_cache(kmalloc_caches + 7);
2827
2828	p = kzalloc(256, GFP_KERNEL);
2829	kfree(p);
2830	p[50] = 0x9a;
2831	printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
2832	validate_slab_cache(kmalloc_caches + 8);
2833
2834	p = kzalloc(512, GFP_KERNEL);
2835	kfree(p);
2836	p[512] = 0xab;
2837	printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
2838	validate_slab_cache(kmalloc_caches + 9);
2839}
2840#else
2841static void resiliency_test(void) {};
2842#endif
2843
2844/*
2845 * Generate lists of code addresses where slabcache objects are allocated
2846 * and freed.
2847 */
2848
2849struct location {
2850	unsigned long count;
2851	void *addr;
2852	long long sum_time;
2853	long min_time;
2854	long max_time;
2855	long min_pid;
2856	long max_pid;
2857	cpumask_t cpus;
2858	nodemask_t nodes;
2859};
2860
2861struct loc_track {
2862	unsigned long max;
2863	unsigned long count;
2864	struct location *loc;
2865};
2866
2867static void free_loc_track(struct loc_track *t)
2868{
2869	if (t->max)
2870		free_pages((unsigned long)t->loc,
2871			get_order(sizeof(struct location) * t->max));
2872}
2873
2874static int alloc_loc_track(struct loc_track *t, unsigned long max)
2875{
2876	struct location *l;
2877	int order;
2878
2879	if (!max)
2880		max = PAGE_SIZE / sizeof(struct location);
2881
2882	order = get_order(sizeof(struct location) * max);
2883
2884	l = (void *)__get_free_pages(GFP_ATOMIC, order);
2885
2886	if (!l)
2887		return 0;
2888
2889	if (t->count) {
2890		memcpy(l, t->loc, sizeof(struct location) * t->count);
2891		free_loc_track(t);
2892	}
2893	t->max = max;
2894	t->loc = l;
2895	return 1;
2896}
2897
2898static int add_location(struct loc_track *t, struct kmem_cache *s,
2899				const struct track *track)
2900{
2901	long start, end, pos;
2902	struct location *l;
2903	void *caddr;
2904	unsigned long age = jiffies - track->when;
2905
2906	start = -1;
2907	end = t->count;
2908
2909	for ( ; ; ) {
2910		pos = start + (end - start + 1) / 2;
2911
2912		/*
2913		 * There is nothing at "end". If we end up there
2914		 * we need to add something to before end.
2915		 */
2916		if (pos == end)
2917			break;
2918
2919		caddr = t->loc[pos].addr;
2920		if (track->addr == caddr) {
2921
2922			l = &t->loc[pos];
2923			l->count++;
2924			if (track->when) {
2925				l->sum_time += age;
2926				if (age < l->min_time)
2927					l->min_time = age;
2928				if (age > l->max_time)
2929					l->max_time = age;
2930
2931				if (track->pid < l->min_pid)
2932					l->min_pid = track->pid;
2933				if (track->pid > l->max_pid)
2934					l->max_pid = track->pid;
2935
2936				cpu_set(track->cpu, l->cpus);
2937			}
2938			node_set(page_to_nid(virt_to_page(track)), l->nodes);
2939			return 1;
2940		}
2941
2942		if (track->addr < caddr)
2943			end = pos;
2944		else
2945			start = pos;
2946	}
2947
2948	/*
2949	 * Not found. Insert new tracking element.
2950	 */
2951	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
2952		return 0;
2953
2954	l = t->loc + pos;
2955	if (pos < t->count)
2956		memmove(l + 1, l,
2957			(t->count - pos) * sizeof(struct location));
2958	t->count++;
2959	l->count = 1;
2960	l->addr = track->addr;
2961	l->sum_time = age;
2962	l->min_time = age;
2963	l->max_time = age;
2964	l->min_pid = track->pid;
2965	l->max_pid = track->pid;
2966	cpus_clear(l->cpus);
2967	cpu_set(track->cpu, l->cpus);
2968	nodes_clear(l->nodes);
2969	node_set(page_to_nid(virt_to_page(track)), l->nodes);
2970	return 1;
2971}
2972
2973static void process_slab(struct loc_track *t, struct kmem_cache *s,
2974		struct page *page, enum track_item alloc)
2975{
2976	void *addr = page_address(page);
2977	DECLARE_BITMAP(map, s->objects);
2978	void *p;
2979
2980	bitmap_zero(map, s->objects);
2981	for_each_free_object(p, s, page->freelist)
2982		set_bit(slab_index(p, s, addr), map);
2983
2984	for_each_object(p, s, addr)
2985		if (!test_bit(slab_index(p, s, addr), map))
2986			add_location(t, s, get_track(s, p, alloc));
2987}
2988
2989static int list_locations(struct kmem_cache *s, char *buf,
2990					enum track_item alloc)
2991{
2992	int n = 0;
2993	unsigned long i;
2994	struct loc_track t;
2995	int node;
2996
2997	t.count = 0;
2998	t.max = 0;
2999
3000	/* Push back cpu slabs */
3001	flush_all(s);
3002
3003	for_each_online_node(node) {
3004		struct kmem_cache_node *n = get_node(s, node);
3005		unsigned long flags;
3006		struct page *page;
3007
3008		if (!atomic_read(&n->nr_slabs))
3009			continue;
3010
3011		spin_lock_irqsave(&n->list_lock, flags);
3012		list_for_each_entry(page, &n->partial, lru)
3013			process_slab(&t, s, page, alloc);
3014		list_for_each_entry(page, &n->full, lru)
3015			process_slab(&t, s, page, alloc);
3016		spin_unlock_irqrestore(&n->list_lock, flags);
3017	}
3018
3019	for (i = 0; i < t.count; i++) {
3020		struct location *l = &t.loc[i];
3021
3022		if (n > PAGE_SIZE - 100)
3023			break;
3024		n += sprintf(buf + n, "%7ld ", l->count);
3025
3026		if (l->addr)
3027			n += sprint_symbol(buf + n, (unsigned long)l->addr);
3028		else
3029			n += sprintf(buf + n, "<not-available>");
3030
3031		if (l->sum_time != l->min_time) {
3032			unsigned long remainder;
3033
3034			n += sprintf(buf + n, " age=%ld/%ld/%ld",
3035			l->min_time,
3036			div_long_long_rem(l->sum_time, l->count, &remainder),
3037			l->max_time);
3038		} else
3039			n += sprintf(buf + n, " age=%ld",
3040				l->min_time);
3041
3042		if (l->min_pid != l->max_pid)
3043			n += sprintf(buf + n, " pid=%ld-%ld",
3044				l->min_pid, l->max_pid);
3045		else
3046			n += sprintf(buf + n, " pid=%ld",
3047				l->min_pid);
3048
3049		if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
3050				n < PAGE_SIZE - 60) {
3051			n += sprintf(buf + n, " cpus=");
3052			n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
3053					l->cpus);
3054		}
3055
3056		if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3057				n < PAGE_SIZE - 60) {
3058			n += sprintf(buf + n, " nodes=");
3059			n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
3060					l->nodes);
3061		}
3062
3063		n += sprintf(buf + n, "\n");
3064	}
3065
3066	free_loc_track(&t);
3067	if (!t.count)
3068		n += sprintf(buf, "No data\n");
3069	return n;
3070}
3071
3072static unsigned long count_partial(struct kmem_cache_node *n)
3073{
3074	unsigned long flags;
3075	unsigned long x = 0;
3076	struct page *page;
3077
3078	spin_lock_irqsave(&n->list_lock, flags);
3079	list_for_each_entry(page, &n->partial, lru)
3080		x += page->inuse;
3081	spin_unlock_irqrestore(&n->list_lock, flags);
3082	return x;
3083}
3084
3085enum slab_stat_type {
3086	SL_FULL,
3087	SL_PARTIAL,
3088	SL_CPU,
3089	SL_OBJECTS
3090};
3091
3092#define SO_FULL		(1 << SL_FULL)
3093#define SO_PARTIAL	(1 << SL_PARTIAL)
3094#define SO_CPU		(1 << SL_CPU)
3095#define SO_OBJECTS	(1 << SL_OBJECTS)
3096
3097static unsigned long slab_objects(struct kmem_cache *s,
3098			char *buf, unsigned long flags)
3099{
3100	unsigned long total = 0;
3101	int cpu;
3102	int node;
3103	int x;
3104	unsigned long *nodes;
3105	unsigned long *per_cpu;
3106
3107	nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3108	per_cpu = nodes + nr_node_ids;
3109
3110	for_each_possible_cpu(cpu) {
3111		struct page *page = s->cpu_slab[cpu];
3112		int node;
3113
3114		if (page) {
3115			node = page_to_nid(page);
3116			if (flags & SO_CPU) {
3117				int x = 0;
3118
3119				if (flags & SO_OBJECTS)
3120					x = page->inuse;
3121				else
3122					x = 1;
3123				total += x;
3124				nodes[node] += x;
3125			}
3126			per_cpu[node]++;
3127		}
3128	}
3129
3130	for_each_online_node(node) {
3131		struct kmem_cache_node *n = get_node(s, node);
3132
3133		if (flags & SO_PARTIAL) {
3134			if (flags & SO_OBJECTS)
3135				x = count_partial(n);
3136			else
3137				x = n->nr_partial;
3138			total += x;
3139			nodes[node] += x;
3140		}
3141
3142		if (flags & SO_FULL) {
3143			int full_slabs = atomic_read(&n->nr_slabs)
3144					- per_cpu[node]
3145					- n->nr_partial;
3146
3147			if (flags & SO_OBJECTS)
3148				x = full_slabs * s->objects;
3149			else
3150				x = full_slabs;
3151			total += x;
3152			nodes[node] += x;
3153		}
3154	}
3155
3156	x = sprintf(buf, "%lu", total);
3157#ifdef CONFIG_NUMA
3158	for_each_online_node(node)
3159		if (nodes[node])
3160			x += sprintf(buf + x, " N%d=%lu",
3161					node, nodes[node]);
3162#endif
3163	kfree(nodes);
3164	return x + sprintf(buf + x, "\n");
3165}
3166
3167static int any_slab_objects(struct kmem_cache *s)
3168{
3169	int node;
3170	int cpu;
3171
3172	for_each_possible_cpu(cpu)
3173		if (s->cpu_slab[cpu])
3174			return 1;
3175
3176	for_each_node(node) {
3177		struct kmem_cache_node *n = get_node(s, node);
3178
3179		if (n->nr_partial || atomic_read(&n->nr_slabs))
3180			return 1;
3181	}
3182	return 0;
3183}
3184
3185#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3186#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3187
3188struct slab_attribute {
3189	struct attribute attr;
3190	ssize_t (*show)(struct kmem_cache *s, char *buf);
3191	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3192};
3193
3194#define SLAB_ATTR_RO(_name) \
3195	static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3196
3197#define SLAB_ATTR(_name) \
3198	static struct slab_attribute _name##_attr =  \
3199	__ATTR(_name, 0644, _name##_show, _name##_store)
3200
3201static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3202{
3203	return sprintf(buf, "%d\n", s->size);
3204}
3205SLAB_ATTR_RO(slab_size);
3206
3207static ssize_t align_show(struct kmem_cache *s, char *buf)
3208{
3209	return sprintf(buf, "%d\n", s->align);
3210}
3211SLAB_ATTR_RO(align);
3212
3213static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3214{
3215	return sprintf(buf, "%d\n", s->objsize);
3216}
3217SLAB_ATTR_RO(object_size);
3218
3219static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3220{
3221	return sprintf(buf, "%d\n", s->objects);
3222}
3223SLAB_ATTR_RO(objs_per_slab);
3224
3225static ssize_t order_show(struct kmem_cache *s, char *buf)
3226{
3227	return sprintf(buf, "%d\n", s->order);
3228}
3229SLAB_ATTR_RO(order);
3230
3231static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3232{
3233	if (s->ctor) {
3234		int n = sprint_symbol(buf, (unsigned long)s->ctor);
3235
3236		return n + sprintf(buf + n, "\n");
3237	}
3238	return 0;
3239}
3240SLAB_ATTR_RO(ctor);
3241
3242static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3243{
3244	return sprintf(buf, "%d\n", s->refcount - 1);
3245}
3246SLAB_ATTR_RO(aliases);
3247
3248static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3249{
3250	return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
3251}
3252SLAB_ATTR_RO(slabs);
3253
3254static ssize_t partial_show(struct kmem_cache *s, char *buf)
3255{
3256	return slab_objects(s, buf, SO_PARTIAL);
3257}
3258SLAB_ATTR_RO(partial);
3259
3260static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3261{
3262	return slab_objects(s, buf, SO_CPU);
3263}
3264SLAB_ATTR_RO(cpu_slabs);
3265
3266static ssize_t objects_show(struct kmem_cache *s, char *buf)
3267{
3268	return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
3269}
3270SLAB_ATTR_RO(objects);
3271
3272static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3273{
3274	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3275}
3276
3277static ssize_t sanity_checks_store(struct kmem_cache *s,
3278				const char *buf, size_t length)
3279{
3280	s->flags &= ~SLAB_DEBUG_FREE;
3281	if (buf[0] == '1')
3282		s->flags |= SLAB_DEBUG_FREE;
3283	return length;
3284}
3285SLAB_ATTR(sanity_checks);
3286
3287static ssize_t trace_show(struct kmem_cache *s, char *buf)
3288{
3289	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3290}
3291
3292static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3293							size_t length)
3294{
3295	s->flags &= ~SLAB_TRACE;
3296	if (buf[0] == '1')
3297		s->flags |= SLAB_TRACE;
3298	return length;
3299}
3300SLAB_ATTR(trace);
3301
3302static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3303{
3304	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3305}
3306
3307static ssize_t reclaim_account_store(struct kmem_cache *s,
3308				const char *buf, size_t length)
3309{
3310	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3311	if (buf[0] == '1')
3312		s->flags |= SLAB_RECLAIM_ACCOUNT;
3313	return length;
3314}
3315SLAB_ATTR(reclaim_account);
3316
3317static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3318{
3319	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3320}
3321SLAB_ATTR_RO(hwcache_align);
3322
3323#ifdef CONFIG_ZONE_DMA
3324static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3325{
3326	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3327}
3328SLAB_ATTR_RO(cache_dma);
3329#endif
3330
3331static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3332{
3333	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3334}
3335SLAB_ATTR_RO(destroy_by_rcu);
3336
3337static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3338{
3339	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3340}
3341
3342static ssize_t red_zone_store(struct kmem_cache *s,
3343				const char *buf, size_t length)
3344{
3345	if (any_slab_objects(s))
3346		return -EBUSY;
3347
3348	s->flags &= ~SLAB_RED_ZONE;
3349	if (buf[0] == '1')
3350		s->flags |= SLAB_RED_ZONE;
3351	calculate_sizes(s);
3352	return length;
3353}
3354SLAB_ATTR(red_zone);
3355
3356static ssize_t poison_show(struct kmem_cache *s, char *buf)
3357{
3358	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3359}
3360
3361static ssize_t poison_store(struct kmem_cache *s,
3362				const char *buf, size_t length)
3363{
3364	if (any_slab_objects(s))
3365		return -EBUSY;
3366
3367	s->flags &= ~SLAB_POISON;
3368	if (buf[0] == '1')
3369		s->flags |= SLAB_POISON;
3370	calculate_sizes(s);
3371	return length;
3372}
3373SLAB_ATTR(poison);
3374
3375static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3376{
3377	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3378}
3379
3380static ssize_t store_user_store(struct kmem_cache *s,
3381				const char *buf, size_t length)
3382{
3383	if (any_slab_objects(s))
3384		return -EBUSY;
3385
3386	s->flags &= ~SLAB_STORE_USER;
3387	if (buf[0] == '1')
3388		s->flags |= SLAB_STORE_USER;
3389	calculate_sizes(s);
3390	return length;
3391}
3392SLAB_ATTR(store_user);
3393
3394static ssize_t validate_show(struct kmem_cache *s, char *buf)
3395{
3396	return 0;
3397}
3398
3399static ssize_t validate_store(struct kmem_cache *s,
3400			const char *buf, size_t length)
3401{
3402	if (buf[0] == '1')
3403		validate_slab_cache(s);
3404	else
3405		return -EINVAL;
3406	return length;
3407}
3408SLAB_ATTR(validate);
3409
3410static ssize_t shrink_show(struct kmem_cache *s, char *buf)
3411{
3412	return 0;
3413}
3414
3415static ssize_t shrink_store(struct kmem_cache *s,
3416			const char *buf, size_t length)
3417{
3418	if (buf[0] == '1') {
3419		int rc = kmem_cache_shrink(s);
3420
3421		if (rc)
3422			return rc;
3423	} else
3424		return -EINVAL;
3425	return length;
3426}
3427SLAB_ATTR(shrink);
3428
3429static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3430{
3431	if (!(s->flags & SLAB_STORE_USER))
3432		return -ENOSYS;
3433	return list_locations(s, buf, TRACK_ALLOC);
3434}
3435SLAB_ATTR_RO(alloc_calls);
3436
3437static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3438{
3439	if (!(s->flags & SLAB_STORE_USER))
3440		return -ENOSYS;
3441	return list_locations(s, buf, TRACK_FREE);
3442}
3443SLAB_ATTR_RO(free_calls);
3444
3445#ifdef CONFIG_NUMA
3446static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
3447{
3448	return sprintf(buf, "%d\n", s->defrag_ratio / 10);
3449}
3450
3451static ssize_t defrag_ratio_store(struct kmem_cache *s,
3452				const char *buf, size_t length)
3453{
3454	int n = simple_strtoul(buf, NULL, 10);
3455
3456	if (n < 100)
3457		s->defrag_ratio = n * 10;
3458	return length;
3459}
3460SLAB_ATTR(defrag_ratio);
3461#endif
3462
3463static struct attribute * slab_attrs[] = {
3464	&slab_size_attr.attr,
3465	&object_size_attr.attr,
3466	&objs_per_slab_attr.attr,
3467	&order_attr.attr,
3468	&objects_attr.attr,
3469	&slabs_attr.attr,
3470	&partial_attr.attr,
3471	&cpu_slabs_attr.attr,
3472	&ctor_attr.attr,
3473	&aliases_attr.attr,
3474	&align_attr.attr,
3475	&sanity_checks_attr.attr,
3476	&trace_attr.attr,
3477	&hwcache_align_attr.attr,
3478	&reclaim_account_attr.attr,
3479	&destroy_by_rcu_attr.attr,
3480	&red_zone_attr.attr,
3481	&poison_attr.attr,
3482	&store_user_attr.attr,
3483	&validate_attr.attr,
3484	&shrink_attr.attr,
3485	&alloc_calls_attr.attr,
3486	&free_calls_attr.attr,
3487#ifdef CONFIG_ZONE_DMA
3488	&cache_dma_attr.attr,
3489#endif
3490#ifdef CONFIG_NUMA
3491	&defrag_ratio_attr.attr,
3492#endif
3493	NULL
3494};
3495
3496static struct attribute_group slab_attr_group = {
3497	.attrs = slab_attrs,
3498};
3499
3500static ssize_t slab_attr_show(struct kobject *kobj,
3501				struct attribute *attr,
3502				char *buf)
3503{
3504	struct slab_attribute *attribute;
3505	struct kmem_cache *s;
3506	int err;
3507
3508	attribute = to_slab_attr(attr);
3509	s = to_slab(kobj);
3510
3511	if (!attribute->show)
3512		return -EIO;
3513
3514	err = attribute->show(s, buf);
3515
3516	return err;
3517}
3518
3519static ssize_t slab_attr_store(struct kobject *kobj,
3520				struct attribute *attr,
3521				const char *buf, size_t len)
3522{
3523	struct slab_attribute *attribute;
3524	struct kmem_cache *s;
3525	int err;
3526
3527	attribute = to_slab_attr(attr);
3528	s = to_slab(kobj);
3529
3530	if (!attribute->store)
3531		return -EIO;
3532
3533	err = attribute->store(s, buf, len);
3534
3535	return err;
3536}
3537
3538static struct sysfs_ops slab_sysfs_ops = {
3539	.show = slab_attr_show,
3540	.store = slab_attr_store,
3541};
3542
3543static struct kobj_type slab_ktype = {
3544	.sysfs_ops = &slab_sysfs_ops,
3545};
3546
3547static int uevent_filter(struct kset *kset, struct kobject *kobj)
3548{
3549	struct kobj_type *ktype = get_ktype(kobj);
3550
3551	if (ktype == &slab_ktype)
3552		return 1;
3553	return 0;
3554}
3555
3556static struct kset_uevent_ops slab_uevent_ops = {
3557	.filter = uevent_filter,
3558};
3559
3560decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
3561
3562#define ID_STR_LENGTH 64
3563
3564/* Create a unique string id for a slab cache:
3565 * format
3566 * :[flags-]size:[memory address of kmemcache]
3567 */
3568static char *create_unique_id(struct kmem_cache *s)
3569{
3570	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
3571	char *p = name;
3572
3573	BUG_ON(!name);
3574
3575	*p++ = ':';
3576	/*
3577	 * First flags affecting slabcache operations. We will only
3578	 * get here for aliasable slabs so we do not need to support
3579	 * too many flags. The flags here must cover all flags that
3580	 * are matched during merging to guarantee that the id is
3581	 * unique.
3582	 */
3583	if (s->flags & SLAB_CACHE_DMA)
3584		*p++ = 'd';
3585	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3586		*p++ = 'a';
3587	if (s->flags & SLAB_DEBUG_FREE)
3588		*p++ = 'F';
3589	if (p != name + 1)
3590		*p++ = '-';
3591	p += sprintf(p, "%07d", s->size);
3592	BUG_ON(p > name + ID_STR_LENGTH - 1);
3593	return name;
3594}
3595
3596static int sysfs_slab_add(struct kmem_cache *s)
3597{
3598	int err;
3599	const char *name;
3600	int unmergeable;
3601
3602	if (slab_state < SYSFS)
3603		/* Defer until later */
3604		return 0;
3605
3606	unmergeable = slab_unmergeable(s);
3607	if (unmergeable) {
3608		/*
3609		 * Slabcache can never be merged so we can use the name proper.
3610		 * This is typically the case for debug situations. In that
3611		 * case we can catch duplicate names easily.
3612		 */
3613		sysfs_remove_link(&slab_subsys.kobj, s->name);
3614		name = s->name;
3615	} else {
3616		/*
3617		 * Create a unique name for the slab as a target
3618		 * for the symlinks.
3619		 */
3620		name = create_unique_id(s);
3621	}
3622
3623	kobj_set_kset_s(s, slab_subsys);
3624	kobject_set_name(&s->kobj, name);
3625	kobject_init(&s->kobj);
3626	err = kobject_add(&s->kobj);
3627	if (err)
3628		return err;
3629
3630	err = sysfs_create_group(&s->kobj, &slab_attr_group);
3631	if (err)
3632		return err;
3633	kobject_uevent(&s->kobj, KOBJ_ADD);
3634	if (!unmergeable) {
3635		/* Setup first alias */
3636		sysfs_slab_alias(s, s->name);
3637		kfree(name);
3638	}
3639	return 0;
3640}
3641
3642static void sysfs_slab_remove(struct kmem_cache *s)
3643{
3644	kobject_uevent(&s->kobj, KOBJ_REMOVE);
3645	kobject_del(&s->kobj);
3646}
3647
3648/*
3649 * Need to buffer aliases during bootup until sysfs becomes
3650 * available lest we loose that information.
3651 */
3652struct saved_alias {
3653	struct kmem_cache *s;
3654	const char *name;
3655	struct saved_alias *next;
3656};
3657
3658struct saved_alias *alias_list;
3659
3660static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3661{
3662	struct saved_alias *al;
3663
3664	if (slab_state == SYSFS) {
3665		/*
3666		 * If we have a leftover link then remove it.
3667		 */
3668		sysfs_remove_link(&slab_subsys.kobj, name);
3669		return sysfs_create_link(&slab_subsys.kobj,
3670						&s->kobj, name);
3671	}
3672
3673	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
3674	if (!al)
3675		return -ENOMEM;
3676
3677	al->s = s;
3678	al->name = name;
3679	al->next = alias_list;
3680	alias_list = al;
3681	return 0;
3682}
3683
3684static int __init slab_sysfs_init(void)
3685{
3686	struct list_head *h;
3687	int err;
3688
3689	err = subsystem_register(&slab_subsys);
3690	if (err) {
3691		printk(KERN_ERR "Cannot register slab subsystem.\n");
3692		return -ENOSYS;
3693	}
3694
3695	slab_state = SYSFS;
3696
3697	list_for_each(h, &slab_caches) {
3698		struct kmem_cache *s =
3699			container_of(h, struct kmem_cache, list);
3700
3701		err = sysfs_slab_add(s);
3702		BUG_ON(err);
3703	}
3704
3705	while (alias_list) {
3706		struct saved_alias *al = alias_list;
3707
3708		alias_list = alias_list->next;
3709		err = sysfs_slab_alias(al->s, al->name);
3710		BUG_ON(err);
3711		kfree(al);
3712	}
3713
3714	resiliency_test();
3715	return 0;
3716}
3717
3718__initcall(slab_sysfs_init);
3719#endif
3720