slob.c revision 9023cb7e8564d95a1893f8cb6895a293be9a71fe
1/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
6 * NUMA support by Paul Mundt, 2007.
7 *
8 * How SLOB works:
9 *
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
12 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
14 *
15 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
20 *
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
26 *
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked, and also stores the exact
32 * allocation size in page->private so that it can be used to accurately
33 * provide ksize(). These objects are detected in kfree() because slob_page()
34 * is false for them.
35 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
44 * allocations.
45 *
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
49 * provided, alloc_pages_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
52 *
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
58 */
59
60#include <linux/kernel.h>
61#include <linux/slab.h>
62#include <linux/mm.h>
63#include <linux/cache.h>
64#include <linux/init.h>
65#include <linux/module.h>
66#include <linux/rcupdate.h>
67#include <linux/list.h>
68#include <asm/atomic.h>
69
70/*
71 * slob_block has a field 'units', which indicates size of block if +ve,
72 * or offset of next block if -ve (in SLOB_UNITs).
73 *
74 * Free blocks of size 1 unit simply contain the offset of the next block.
75 * Those with larger size contain their size in the first SLOB_UNIT of
76 * memory, and the offset of the next free block in the second SLOB_UNIT.
77 */
78#if PAGE_SIZE <= (32767 * 2)
79typedef s16 slobidx_t;
80#else
81typedef s32 slobidx_t;
82#endif
83
84struct slob_block {
85	slobidx_t units;
86};
87typedef struct slob_block slob_t;
88
89/*
90 * We use struct page fields to manage some slob allocation aspects,
91 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
92 * just define our own struct page type variant here.
93 */
94struct slob_page {
95	union {
96		struct {
97			unsigned long flags;	/* mandatory */
98			atomic_t _count;	/* mandatory */
99			slobidx_t units;	/* free units left in page */
100			unsigned long pad[2];
101			slob_t *free;		/* first free slob_t in page */
102			struct list_head list;	/* linked list of free pages */
103		};
104		struct page page;
105	};
106};
107static inline void struct_slob_page_wrong_size(void)
108{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
109
110/*
111 * free_slob_page: call before a slob_page is returned to the page allocator.
112 */
113static inline void free_slob_page(struct slob_page *sp)
114{
115	reset_page_mapcount(&sp->page);
116	sp->page.mapping = NULL;
117}
118
119/*
120 * All partially free slob pages go on these lists.
121 */
122#define SLOB_BREAK1 256
123#define SLOB_BREAK2 1024
124static LIST_HEAD(free_slob_small);
125static LIST_HEAD(free_slob_medium);
126static LIST_HEAD(free_slob_large);
127
128/*
129 * slob_page: True for all slob pages (false for bigblock pages)
130 */
131static inline int slob_page(struct slob_page *sp)
132{
133	return PageSlobPage((struct page *)sp);
134}
135
136static inline void set_slob_page(struct slob_page *sp)
137{
138	__SetPageSlobPage((struct page *)sp);
139}
140
141static inline void clear_slob_page(struct slob_page *sp)
142{
143	__ClearPageSlobPage((struct page *)sp);
144}
145
146/*
147 * slob_page_free: true for pages on free_slob_pages list.
148 */
149static inline int slob_page_free(struct slob_page *sp)
150{
151	return PageSlobFree((struct page *)sp);
152}
153
154static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
155{
156	list_add(&sp->list, list);
157	__SetPageSlobFree((struct page *)sp);
158}
159
160static inline void clear_slob_page_free(struct slob_page *sp)
161{
162	list_del(&sp->list);
163	__ClearPageSlobFree((struct page *)sp);
164}
165
166#define SLOB_UNIT sizeof(slob_t)
167#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
168#define SLOB_ALIGN L1_CACHE_BYTES
169
170/*
171 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
172 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
173 * the block using call_rcu.
174 */
175struct slob_rcu {
176	struct rcu_head head;
177	int size;
178};
179
180/*
181 * slob_lock protects all slob allocator structures.
182 */
183static DEFINE_SPINLOCK(slob_lock);
184
185/*
186 * Encode the given size and next info into a free slob block s.
187 */
188static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
189{
190	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
191	slobidx_t offset = next - base;
192
193	if (size > 1) {
194		s[0].units = size;
195		s[1].units = offset;
196	} else
197		s[0].units = -offset;
198}
199
200/*
201 * Return the size of a slob block.
202 */
203static slobidx_t slob_units(slob_t *s)
204{
205	if (s->units > 0)
206		return s->units;
207	return 1;
208}
209
210/*
211 * Return the next free slob block pointer after this one.
212 */
213static slob_t *slob_next(slob_t *s)
214{
215	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
216	slobidx_t next;
217
218	if (s[0].units < 0)
219		next = -s[0].units;
220	else
221		next = s[1].units;
222	return base+next;
223}
224
225/*
226 * Returns true if s is the last free block in its page.
227 */
228static int slob_last(slob_t *s)
229{
230	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
231}
232
233static void *slob_new_page(gfp_t gfp, int order, int node)
234{
235	void *page;
236
237#ifdef CONFIG_NUMA
238	if (node != -1)
239		page = alloc_pages_node(node, gfp, order);
240	else
241#endif
242		page = alloc_pages(gfp, order);
243
244	if (!page)
245		return NULL;
246
247	return page_address(page);
248}
249
250/*
251 * Allocate a slob block within a given slob_page sp.
252 */
253static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
254{
255	slob_t *prev, *cur, *aligned = 0;
256	int delta = 0, units = SLOB_UNITS(size);
257
258	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
259		slobidx_t avail = slob_units(cur);
260
261		if (align) {
262			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
263			delta = aligned - cur;
264		}
265		if (avail >= units + delta) { /* room enough? */
266			slob_t *next;
267
268			if (delta) { /* need to fragment head to align? */
269				next = slob_next(cur);
270				set_slob(aligned, avail - delta, next);
271				set_slob(cur, delta, aligned);
272				prev = cur;
273				cur = aligned;
274				avail = slob_units(cur);
275			}
276
277			next = slob_next(cur);
278			if (avail == units) { /* exact fit? unlink. */
279				if (prev)
280					set_slob(prev, slob_units(prev), next);
281				else
282					sp->free = next;
283			} else { /* fragment */
284				if (prev)
285					set_slob(prev, slob_units(prev), cur + units);
286				else
287					sp->free = cur + units;
288				set_slob(cur + units, avail - units, next);
289			}
290
291			sp->units -= units;
292			if (!sp->units)
293				clear_slob_page_free(sp);
294			return cur;
295		}
296		if (slob_last(cur))
297			return NULL;
298	}
299}
300
301/*
302 * slob_alloc: entry point into the slob allocator.
303 */
304static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
305{
306	struct slob_page *sp;
307	struct list_head *prev;
308	struct list_head *slob_list;
309	slob_t *b = NULL;
310	unsigned long flags;
311
312	if (size < SLOB_BREAK1)
313		slob_list = &free_slob_small;
314	else if (size < SLOB_BREAK2)
315		slob_list = &free_slob_medium;
316	else
317		slob_list = &free_slob_large;
318
319	spin_lock_irqsave(&slob_lock, flags);
320	/* Iterate through each partially free page, try to find room */
321	list_for_each_entry(sp, slob_list, list) {
322#ifdef CONFIG_NUMA
323		/*
324		 * If there's a node specification, search for a partial
325		 * page with a matching node id in the freelist.
326		 */
327		if (node != -1 && page_to_nid(&sp->page) != node)
328			continue;
329#endif
330		/* Enough room on this page? */
331		if (sp->units < SLOB_UNITS(size))
332			continue;
333
334		/* Attempt to alloc */
335		prev = sp->list.prev;
336		b = slob_page_alloc(sp, size, align);
337		if (!b)
338			continue;
339
340		/* Improve fragment distribution and reduce our average
341		 * search time by starting our next search here. (see
342		 * Knuth vol 1, sec 2.5, pg 449) */
343		if (prev != slob_list->prev &&
344				slob_list->next != prev->next)
345			list_move_tail(slob_list, prev->next);
346		break;
347	}
348	spin_unlock_irqrestore(&slob_lock, flags);
349
350	/* Not enough space: must allocate a new page */
351	if (!b) {
352		b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
353		if (!b)
354			return 0;
355		sp = (struct slob_page *)virt_to_page(b);
356		set_slob_page(sp);
357
358		spin_lock_irqsave(&slob_lock, flags);
359		sp->units = SLOB_UNITS(PAGE_SIZE);
360		sp->free = b;
361		INIT_LIST_HEAD(&sp->list);
362		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
363		set_slob_page_free(sp, slob_list);
364		b = slob_page_alloc(sp, size, align);
365		BUG_ON(!b);
366		spin_unlock_irqrestore(&slob_lock, flags);
367	}
368	if (unlikely((gfp & __GFP_ZERO) && b))
369		memset(b, 0, size);
370	return b;
371}
372
373/*
374 * slob_free: entry point into the slob allocator.
375 */
376static void slob_free(void *block, int size)
377{
378	struct slob_page *sp;
379	slob_t *prev, *next, *b = (slob_t *)block;
380	slobidx_t units;
381	unsigned long flags;
382
383	if (unlikely(ZERO_OR_NULL_PTR(block)))
384		return;
385	BUG_ON(!size);
386
387	sp = (struct slob_page *)virt_to_page(block);
388	units = SLOB_UNITS(size);
389
390	spin_lock_irqsave(&slob_lock, flags);
391
392	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
393		/* Go directly to page allocator. Do not pass slob allocator */
394		if (slob_page_free(sp))
395			clear_slob_page_free(sp);
396		clear_slob_page(sp);
397		free_slob_page(sp);
398		free_page((unsigned long)b);
399		goto out;
400	}
401
402	if (!slob_page_free(sp)) {
403		/* This slob page is about to become partially free. Easy! */
404		sp->units = units;
405		sp->free = b;
406		set_slob(b, units,
407			(void *)((unsigned long)(b +
408					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
409		set_slob_page_free(sp, &free_slob_small);
410		goto out;
411	}
412
413	/*
414	 * Otherwise the page is already partially free, so find reinsertion
415	 * point.
416	 */
417	sp->units += units;
418
419	if (b < sp->free) {
420		if (b + units == sp->free) {
421			units += slob_units(sp->free);
422			sp->free = slob_next(sp->free);
423		}
424		set_slob(b, units, sp->free);
425		sp->free = b;
426	} else {
427		prev = sp->free;
428		next = slob_next(prev);
429		while (b > next) {
430			prev = next;
431			next = slob_next(prev);
432		}
433
434		if (!slob_last(prev) && b + units == next) {
435			units += slob_units(next);
436			set_slob(b, units, slob_next(next));
437		} else
438			set_slob(b, units, next);
439
440		if (prev + slob_units(prev) == b) {
441			units = slob_units(b) + slob_units(prev);
442			set_slob(prev, units, slob_next(b));
443		} else
444			set_slob(prev, slob_units(prev), b);
445	}
446out:
447	spin_unlock_irqrestore(&slob_lock, flags);
448}
449
450/*
451 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
452 */
453
454#ifndef ARCH_KMALLOC_MINALIGN
455#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
456#endif
457
458#ifndef ARCH_SLAB_MINALIGN
459#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
460#endif
461
462void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463{
464	unsigned int *m;
465	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
466
467	if (size < PAGE_SIZE - align) {
468		if (!size)
469			return ZERO_SIZE_PTR;
470
471		m = slob_alloc(size + align, gfp, align, node);
472		if (!m)
473			return NULL;
474		*m = size;
475		return (void *)m + align;
476	} else {
477		void *ret;
478
479		ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
480		if (ret) {
481			struct page *page;
482			page = virt_to_page(ret);
483			page->private = size;
484		}
485		return ret;
486	}
487}
488EXPORT_SYMBOL(__kmalloc_node);
489
490void kfree(const void *block)
491{
492	struct slob_page *sp;
493
494	if (unlikely(ZERO_OR_NULL_PTR(block)))
495		return;
496
497	sp = (struct slob_page *)virt_to_page(block);
498	if (slob_page(sp)) {
499		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
500		unsigned int *m = (unsigned int *)(block - align);
501		slob_free(m, *m + align);
502	} else
503		put_page(&sp->page);
504}
505EXPORT_SYMBOL(kfree);
506
507/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
508size_t ksize(const void *block)
509{
510	struct slob_page *sp;
511
512	BUG_ON(!block);
513	if (unlikely(block == ZERO_SIZE_PTR))
514		return 0;
515
516	sp = (struct slob_page *)virt_to_page(block);
517	if (slob_page(sp))
518		return ((slob_t *)block - 1)->units + SLOB_UNIT;
519	else
520		return sp->page.private;
521}
522EXPORT_SYMBOL(ksize);
523
524struct kmem_cache {
525	unsigned int size, align;
526	unsigned long flags;
527	const char *name;
528	void (*ctor)(struct kmem_cache *, void *);
529};
530
531struct kmem_cache *kmem_cache_create(const char *name, size_t size,
532	size_t align, unsigned long flags,
533	void (*ctor)(struct kmem_cache *, void *))
534{
535	struct kmem_cache *c;
536
537	c = slob_alloc(sizeof(struct kmem_cache),
538		flags, ARCH_KMALLOC_MINALIGN, -1);
539
540	if (c) {
541		c->name = name;
542		c->size = size;
543		if (flags & SLAB_DESTROY_BY_RCU) {
544			/* leave room for rcu footer at the end of object */
545			c->size += sizeof(struct slob_rcu);
546		}
547		c->flags = flags;
548		c->ctor = ctor;
549		/* ignore alignment unless it's forced */
550		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
551		if (c->align < ARCH_SLAB_MINALIGN)
552			c->align = ARCH_SLAB_MINALIGN;
553		if (c->align < align)
554			c->align = align;
555	} else if (flags & SLAB_PANIC)
556		panic("Cannot create slab cache %s\n", name);
557
558	return c;
559}
560EXPORT_SYMBOL(kmem_cache_create);
561
562void kmem_cache_destroy(struct kmem_cache *c)
563{
564	slob_free(c, sizeof(struct kmem_cache));
565}
566EXPORT_SYMBOL(kmem_cache_destroy);
567
568void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
569{
570	void *b;
571
572	if (c->size < PAGE_SIZE)
573		b = slob_alloc(c->size, flags, c->align, node);
574	else
575		b = slob_new_page(flags, get_order(c->size), node);
576
577	if (c->ctor)
578		c->ctor(c, b);
579
580	return b;
581}
582EXPORT_SYMBOL(kmem_cache_alloc_node);
583
584static void __kmem_cache_free(void *b, int size)
585{
586	if (size < PAGE_SIZE)
587		slob_free(b, size);
588	else
589		free_pages((unsigned long)b, get_order(size));
590}
591
592static void kmem_rcu_free(struct rcu_head *head)
593{
594	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
595	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
596
597	__kmem_cache_free(b, slob_rcu->size);
598}
599
600void kmem_cache_free(struct kmem_cache *c, void *b)
601{
602	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
603		struct slob_rcu *slob_rcu;
604		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
605		INIT_RCU_HEAD(&slob_rcu->head);
606		slob_rcu->size = c->size;
607		call_rcu(&slob_rcu->head, kmem_rcu_free);
608	} else {
609		__kmem_cache_free(b, c->size);
610	}
611}
612EXPORT_SYMBOL(kmem_cache_free);
613
614unsigned int kmem_cache_size(struct kmem_cache *c)
615{
616	return c->size;
617}
618EXPORT_SYMBOL(kmem_cache_size);
619
620const char *kmem_cache_name(struct kmem_cache *c)
621{
622	return c->name;
623}
624EXPORT_SYMBOL(kmem_cache_name);
625
626int kmem_cache_shrink(struct kmem_cache *d)
627{
628	return 0;
629}
630EXPORT_SYMBOL(kmem_cache_shrink);
631
632int kmem_ptr_validate(struct kmem_cache *a, const void *b)
633{
634	return 0;
635}
636
637static unsigned int slob_ready __read_mostly;
638
639int slab_is_available(void)
640{
641	return slob_ready;
642}
643
644void __init kmem_cache_init(void)
645{
646	slob_ready = 1;
647}
648