slob.c revision d602dabaeba79df90cc67c32d5fe4ee0d5e2b73a
1/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
6 * NUMA support by Paul Mundt, 2007.
7 *
8 * How SLOB works:
9 *
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
12 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
14 *
15 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
20 *
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
26 *
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked, and also stores the exact
32 * allocation size in page->private so that it can be used to accurately
33 * provide ksize(). These objects are detected in kfree() because slob_page()
34 * is false for them.
35 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
44 * allocations.
45 *
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
49 * provided, alloc_pages_exact_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
52 *
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
58 */
59
60#include <linux/kernel.h>
61#include <linux/slab.h>
62#include <linux/mm.h>
63#include <linux/swap.h> /* struct reclaim_state */
64#include <linux/cache.h>
65#include <linux/init.h>
66#include <linux/module.h>
67#include <linux/rcupdate.h>
68#include <linux/list.h>
69#include <linux/kmemtrace.h>
70#include <linux/kmemleak.h>
71#include <asm/atomic.h>
72
73/*
74 * slob_block has a field 'units', which indicates size of block if +ve,
75 * or offset of next block if -ve (in SLOB_UNITs).
76 *
77 * Free blocks of size 1 unit simply contain the offset of the next block.
78 * Those with larger size contain their size in the first SLOB_UNIT of
79 * memory, and the offset of the next free block in the second SLOB_UNIT.
80 */
81#if PAGE_SIZE <= (32767 * 2)
82typedef s16 slobidx_t;
83#else
84typedef s32 slobidx_t;
85#endif
86
87struct slob_block {
88	slobidx_t units;
89};
90typedef struct slob_block slob_t;
91
92/*
93 * We use struct page fields to manage some slob allocation aspects,
94 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
95 * just define our own struct page type variant here.
96 */
97struct slob_page {
98	union {
99		struct {
100			unsigned long flags;	/* mandatory */
101			atomic_t _count;	/* mandatory */
102			slobidx_t units;	/* free units left in page */
103			unsigned long pad[2];
104			slob_t *free;		/* first free slob_t in page */
105			struct list_head list;	/* linked list of free pages */
106		};
107		struct page page;
108	};
109};
110static inline void struct_slob_page_wrong_size(void)
111{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
112
113/*
114 * free_slob_page: call before a slob_page is returned to the page allocator.
115 */
116static inline void free_slob_page(struct slob_page *sp)
117{
118	reset_page_mapcount(&sp->page);
119	sp->page.mapping = NULL;
120}
121
122/*
123 * All partially free slob pages go on these lists.
124 */
125#define SLOB_BREAK1 256
126#define SLOB_BREAK2 1024
127static LIST_HEAD(free_slob_small);
128static LIST_HEAD(free_slob_medium);
129static LIST_HEAD(free_slob_large);
130
131/*
132 * is_slob_page: True for all slob pages (false for bigblock pages)
133 */
134static inline int is_slob_page(struct slob_page *sp)
135{
136	return PageSlab((struct page *)sp);
137}
138
139static inline void set_slob_page(struct slob_page *sp)
140{
141	__SetPageSlab((struct page *)sp);
142}
143
144static inline void clear_slob_page(struct slob_page *sp)
145{
146	__ClearPageSlab((struct page *)sp);
147}
148
149static inline struct slob_page *slob_page(const void *addr)
150{
151	return (struct slob_page *)virt_to_page(addr);
152}
153
154/*
155 * slob_page_free: true for pages on free_slob_pages list.
156 */
157static inline int slob_page_free(struct slob_page *sp)
158{
159	return PageSlobFree((struct page *)sp);
160}
161
162static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
163{
164	list_add(&sp->list, list);
165	__SetPageSlobFree((struct page *)sp);
166}
167
168static inline void clear_slob_page_free(struct slob_page *sp)
169{
170	list_del(&sp->list);
171	__ClearPageSlobFree((struct page *)sp);
172}
173
174#define SLOB_UNIT sizeof(slob_t)
175#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
176#define SLOB_ALIGN L1_CACHE_BYTES
177
178/*
179 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
180 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
181 * the block using call_rcu.
182 */
183struct slob_rcu {
184	struct rcu_head head;
185	int size;
186};
187
188/*
189 * slob_lock protects all slob allocator structures.
190 */
191static DEFINE_SPINLOCK(slob_lock);
192
193/*
194 * Encode the given size and next info into a free slob block s.
195 */
196static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
197{
198	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
199	slobidx_t offset = next - base;
200
201	if (size > 1) {
202		s[0].units = size;
203		s[1].units = offset;
204	} else
205		s[0].units = -offset;
206}
207
208/*
209 * Return the size of a slob block.
210 */
211static slobidx_t slob_units(slob_t *s)
212{
213	if (s->units > 0)
214		return s->units;
215	return 1;
216}
217
218/*
219 * Return the next free slob block pointer after this one.
220 */
221static slob_t *slob_next(slob_t *s)
222{
223	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
224	slobidx_t next;
225
226	if (s[0].units < 0)
227		next = -s[0].units;
228	else
229		next = s[1].units;
230	return base+next;
231}
232
233/*
234 * Returns true if s is the last free block in its page.
235 */
236static int slob_last(slob_t *s)
237{
238	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
239}
240
241static void *slob_new_pages(gfp_t gfp, int order, int node)
242{
243	void *page;
244
245#ifdef CONFIG_NUMA
246	if (node != -1)
247		page = alloc_pages_exact_node(node, gfp, order);
248	else
249#endif
250		page = alloc_pages(gfp, order);
251
252	if (!page)
253		return NULL;
254
255	return page_address(page);
256}
257
258static void slob_free_pages(void *b, int order)
259{
260	if (current->reclaim_state)
261		current->reclaim_state->reclaimed_slab += 1 << order;
262	free_pages((unsigned long)b, order);
263}
264
265/*
266 * Allocate a slob block within a given slob_page sp.
267 */
268static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
269{
270	slob_t *prev, *cur, *aligned = NULL;
271	int delta = 0, units = SLOB_UNITS(size);
272
273	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
274		slobidx_t avail = slob_units(cur);
275
276		if (align) {
277			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
278			delta = aligned - cur;
279		}
280		if (avail >= units + delta) { /* room enough? */
281			slob_t *next;
282
283			if (delta) { /* need to fragment head to align? */
284				next = slob_next(cur);
285				set_slob(aligned, avail - delta, next);
286				set_slob(cur, delta, aligned);
287				prev = cur;
288				cur = aligned;
289				avail = slob_units(cur);
290			}
291
292			next = slob_next(cur);
293			if (avail == units) { /* exact fit? unlink. */
294				if (prev)
295					set_slob(prev, slob_units(prev), next);
296				else
297					sp->free = next;
298			} else { /* fragment */
299				if (prev)
300					set_slob(prev, slob_units(prev), cur + units);
301				else
302					sp->free = cur + units;
303				set_slob(cur + units, avail - units, next);
304			}
305
306			sp->units -= units;
307			if (!sp->units)
308				clear_slob_page_free(sp);
309			return cur;
310		}
311		if (slob_last(cur))
312			return NULL;
313	}
314}
315
316/*
317 * slob_alloc: entry point into the slob allocator.
318 */
319static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
320{
321	struct slob_page *sp;
322	struct list_head *prev;
323	struct list_head *slob_list;
324	slob_t *b = NULL;
325	unsigned long flags;
326
327	if (size < SLOB_BREAK1)
328		slob_list = &free_slob_small;
329	else if (size < SLOB_BREAK2)
330		slob_list = &free_slob_medium;
331	else
332		slob_list = &free_slob_large;
333
334	spin_lock_irqsave(&slob_lock, flags);
335	/* Iterate through each partially free page, try to find room */
336	list_for_each_entry(sp, slob_list, list) {
337#ifdef CONFIG_NUMA
338		/*
339		 * If there's a node specification, search for a partial
340		 * page with a matching node id in the freelist.
341		 */
342		if (node != -1 && page_to_nid(&sp->page) != node)
343			continue;
344#endif
345		/* Enough room on this page? */
346		if (sp->units < SLOB_UNITS(size))
347			continue;
348
349		/* Attempt to alloc */
350		prev = sp->list.prev;
351		b = slob_page_alloc(sp, size, align);
352		if (!b)
353			continue;
354
355		/* Improve fragment distribution and reduce our average
356		 * search time by starting our next search here. (see
357		 * Knuth vol 1, sec 2.5, pg 449) */
358		if (prev != slob_list->prev &&
359				slob_list->next != prev->next)
360			list_move_tail(slob_list, prev->next);
361		break;
362	}
363	spin_unlock_irqrestore(&slob_lock, flags);
364
365	/* Not enough space: must allocate a new page */
366	if (!b) {
367		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
368		if (!b)
369			return NULL;
370		sp = slob_page(b);
371		set_slob_page(sp);
372
373		spin_lock_irqsave(&slob_lock, flags);
374		sp->units = SLOB_UNITS(PAGE_SIZE);
375		sp->free = b;
376		INIT_LIST_HEAD(&sp->list);
377		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
378		set_slob_page_free(sp, slob_list);
379		b = slob_page_alloc(sp, size, align);
380		BUG_ON(!b);
381		spin_unlock_irqrestore(&slob_lock, flags);
382	}
383	if (unlikely((gfp & __GFP_ZERO) && b))
384		memset(b, 0, size);
385	return b;
386}
387
388/*
389 * slob_free: entry point into the slob allocator.
390 */
391static void slob_free(void *block, int size)
392{
393	struct slob_page *sp;
394	slob_t *prev, *next, *b = (slob_t *)block;
395	slobidx_t units;
396	unsigned long flags;
397	struct list_head *slob_list;
398
399	if (unlikely(ZERO_OR_NULL_PTR(block)))
400		return;
401	BUG_ON(!size);
402
403	sp = slob_page(block);
404	units = SLOB_UNITS(size);
405
406	spin_lock_irqsave(&slob_lock, flags);
407
408	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
409		/* Go directly to page allocator. Do not pass slob allocator */
410		if (slob_page_free(sp))
411			clear_slob_page_free(sp);
412		spin_unlock_irqrestore(&slob_lock, flags);
413		clear_slob_page(sp);
414		free_slob_page(sp);
415		slob_free_pages(b, 0);
416		return;
417	}
418
419	if (!slob_page_free(sp)) {
420		/* This slob page is about to become partially free. Easy! */
421		sp->units = units;
422		sp->free = b;
423		set_slob(b, units,
424			(void *)((unsigned long)(b +
425					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
426		if (size < SLOB_BREAK1)
427			slob_list = &free_slob_small;
428		else if (size < SLOB_BREAK2)
429			slob_list = &free_slob_medium;
430		else
431			slob_list = &free_slob_large;
432		set_slob_page_free(sp, slob_list);
433		goto out;
434	}
435
436	/*
437	 * Otherwise the page is already partially free, so find reinsertion
438	 * point.
439	 */
440	sp->units += units;
441
442	if (b < sp->free) {
443		if (b + units == sp->free) {
444			units += slob_units(sp->free);
445			sp->free = slob_next(sp->free);
446		}
447		set_slob(b, units, sp->free);
448		sp->free = b;
449	} else {
450		prev = sp->free;
451		next = slob_next(prev);
452		while (b > next) {
453			prev = next;
454			next = slob_next(prev);
455		}
456
457		if (!slob_last(prev) && b + units == next) {
458			units += slob_units(next);
459			set_slob(b, units, slob_next(next));
460		} else
461			set_slob(b, units, next);
462
463		if (prev + slob_units(prev) == b) {
464			units = slob_units(b) + slob_units(prev);
465			set_slob(prev, units, slob_next(b));
466		} else
467			set_slob(prev, slob_units(prev), b);
468	}
469out:
470	spin_unlock_irqrestore(&slob_lock, flags);
471}
472
473/*
474 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
475 */
476
477void *__kmalloc_node(size_t size, gfp_t gfp, int node)
478{
479	unsigned int *m;
480	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
481	void *ret;
482
483	lockdep_trace_alloc(gfp);
484
485	if (size < PAGE_SIZE - align) {
486		if (!size)
487			return ZERO_SIZE_PTR;
488
489		m = slob_alloc(size + align, gfp, align, node);
490
491		if (!m)
492			return NULL;
493		*m = size;
494		ret = (void *)m + align;
495
496		trace_kmalloc_node(_RET_IP_, ret,
497				   size, size + align, gfp, node);
498	} else {
499		unsigned int order = get_order(size);
500
501		ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
502		if (ret) {
503			struct page *page;
504			page = virt_to_page(ret);
505			page->private = size;
506		}
507
508		trace_kmalloc_node(_RET_IP_, ret,
509				   size, PAGE_SIZE << order, gfp, node);
510	}
511
512	kmemleak_alloc(ret, size, 1, gfp);
513	return ret;
514}
515EXPORT_SYMBOL(__kmalloc_node);
516
517void kfree(const void *block)
518{
519	struct slob_page *sp;
520
521	trace_kfree(_RET_IP_, block);
522
523	if (unlikely(ZERO_OR_NULL_PTR(block)))
524		return;
525	kmemleak_free(block);
526
527	sp = slob_page(block);
528	if (is_slob_page(sp)) {
529		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
530		unsigned int *m = (unsigned int *)(block - align);
531		slob_free(m, *m + align);
532	} else
533		put_page(&sp->page);
534}
535EXPORT_SYMBOL(kfree);
536
537/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
538size_t ksize(const void *block)
539{
540	struct slob_page *sp;
541
542	BUG_ON(!block);
543	if (unlikely(block == ZERO_SIZE_PTR))
544		return 0;
545
546	sp = slob_page(block);
547	if (is_slob_page(sp)) {
548		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
549		unsigned int *m = (unsigned int *)(block - align);
550		return SLOB_UNITS(*m) * SLOB_UNIT;
551	} else
552		return sp->page.private;
553}
554EXPORT_SYMBOL(ksize);
555
556struct kmem_cache {
557	unsigned int size, align;
558	unsigned long flags;
559	const char *name;
560	void (*ctor)(void *);
561};
562
563struct kmem_cache *kmem_cache_create(const char *name, size_t size,
564	size_t align, unsigned long flags, void (*ctor)(void *))
565{
566	struct kmem_cache *c;
567
568	c = slob_alloc(sizeof(struct kmem_cache),
569		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
570
571	if (c) {
572		c->name = name;
573		c->size = size;
574		if (flags & SLAB_DESTROY_BY_RCU) {
575			/* leave room for rcu footer at the end of object */
576			c->size += sizeof(struct slob_rcu);
577		}
578		c->flags = flags;
579		c->ctor = ctor;
580		/* ignore alignment unless it's forced */
581		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
582		if (c->align < ARCH_SLAB_MINALIGN)
583			c->align = ARCH_SLAB_MINALIGN;
584		if (c->align < align)
585			c->align = align;
586	} else if (flags & SLAB_PANIC)
587		panic("Cannot create slab cache %s\n", name);
588
589	kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
590	return c;
591}
592EXPORT_SYMBOL(kmem_cache_create);
593
594void kmem_cache_destroy(struct kmem_cache *c)
595{
596	kmemleak_free(c);
597	if (c->flags & SLAB_DESTROY_BY_RCU)
598		rcu_barrier();
599	slob_free(c, sizeof(struct kmem_cache));
600}
601EXPORT_SYMBOL(kmem_cache_destroy);
602
603void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
604{
605	void *b;
606
607	if (c->size < PAGE_SIZE) {
608		b = slob_alloc(c->size, flags, c->align, node);
609		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
610					    SLOB_UNITS(c->size) * SLOB_UNIT,
611					    flags, node);
612	} else {
613		b = slob_new_pages(flags, get_order(c->size), node);
614		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
615					    PAGE_SIZE << get_order(c->size),
616					    flags, node);
617	}
618
619	if (c->ctor)
620		c->ctor(b);
621
622	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
623	return b;
624}
625EXPORT_SYMBOL(kmem_cache_alloc_node);
626
627static void __kmem_cache_free(void *b, int size)
628{
629	if (size < PAGE_SIZE)
630		slob_free(b, size);
631	else
632		slob_free_pages(b, get_order(size));
633}
634
635static void kmem_rcu_free(struct rcu_head *head)
636{
637	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
638	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
639
640	__kmem_cache_free(b, slob_rcu->size);
641}
642
643void kmem_cache_free(struct kmem_cache *c, void *b)
644{
645	kmemleak_free_recursive(b, c->flags);
646	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
647		struct slob_rcu *slob_rcu;
648		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
649		INIT_RCU_HEAD(&slob_rcu->head);
650		slob_rcu->size = c->size;
651		call_rcu(&slob_rcu->head, kmem_rcu_free);
652	} else {
653		__kmem_cache_free(b, c->size);
654	}
655
656	trace_kmem_cache_free(_RET_IP_, b);
657}
658EXPORT_SYMBOL(kmem_cache_free);
659
660unsigned int kmem_cache_size(struct kmem_cache *c)
661{
662	return c->size;
663}
664EXPORT_SYMBOL(kmem_cache_size);
665
666const char *kmem_cache_name(struct kmem_cache *c)
667{
668	return c->name;
669}
670EXPORT_SYMBOL(kmem_cache_name);
671
672int kmem_cache_shrink(struct kmem_cache *d)
673{
674	return 0;
675}
676EXPORT_SYMBOL(kmem_cache_shrink);
677
678int kmem_ptr_validate(struct kmem_cache *a, const void *b)
679{
680	return 0;
681}
682
683static unsigned int slob_ready __read_mostly;
684
685int slab_is_available(void)
686{
687	return slob_ready;
688}
689
690void __init kmem_cache_init(void)
691{
692	slob_ready = 1;
693}
694
695void __init kmem_cache_init_late(void)
696{
697	/* Nothing to do */
698}
699