slob.c revision 2e892f43ccb602e8ffad73396a1000f2040c9e0b
1/*
2 * SLOB Allocator: Simple List Of Blocks
3 *
4 * Matt Mackall <mpm@selenic.com> 12/30/03
5 *
6 * How SLOB works:
7 *
8 * The core of SLOB is a traditional K&R style heap allocator, with
9 * support for returning aligned objects. The granularity of this
10 * allocator is 8 bytes on x86, though it's perhaps possible to reduce
11 * this to 4 if it's deemed worth the effort. The slob heap is a
12 * singly-linked list of pages from __get_free_page, grown on demand
13 * and allocation from the heap is currently first-fit.
14 *
15 * Above this is an implementation of kmalloc/kfree. Blocks returned
16 * from kmalloc are 8-byte aligned and prepended with a 8-byte header.
17 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
18 * __get_free_pages directly so that it can return page-aligned blocks
19 * and keeps a linked list of such pages and their orders. These
20 * objects are detected in kfree() by their page alignment.
21 *
22 * SLAB is emulated on top of SLOB by simply calling constructors and
23 * destructors for every SLAB allocation. Objects are returned with
24 * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
25 * set, in which case the low-level allocator will fragment blocks to
26 * create the proper alignment. Again, objects of page-size or greater
27 * are allocated by calling __get_free_pages. As SLAB objects know
28 * their size, no separate size bookkeeping is necessary and there is
29 * essentially no allocation space overhead.
30 */
31
32#include <linux/slab.h>
33#include <linux/mm.h>
34#include <linux/cache.h>
35#include <linux/init.h>
36#include <linux/module.h>
37#include <linux/timer.h>
38
39struct slob_block {
40	int units;
41	struct slob_block *next;
42};
43typedef struct slob_block slob_t;
44
45#define SLOB_UNIT sizeof(slob_t)
46#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
47#define SLOB_ALIGN L1_CACHE_BYTES
48
49struct bigblock {
50	int order;
51	void *pages;
52	struct bigblock *next;
53};
54typedef struct bigblock bigblock_t;
55
56static slob_t arena = { .next = &arena, .units = 1 };
57static slob_t *slobfree = &arena;
58static bigblock_t *bigblocks;
59static DEFINE_SPINLOCK(slob_lock);
60static DEFINE_SPINLOCK(block_lock);
61
62static void slob_free(void *b, int size);
63
64static void *slob_alloc(size_t size, gfp_t gfp, int align)
65{
66	slob_t *prev, *cur, *aligned = 0;
67	int delta = 0, units = SLOB_UNITS(size);
68	unsigned long flags;
69
70	spin_lock_irqsave(&slob_lock, flags);
71	prev = slobfree;
72	for (cur = prev->next; ; prev = cur, cur = cur->next) {
73		if (align) {
74			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
75			delta = aligned - cur;
76		}
77		if (cur->units >= units + delta) { /* room enough? */
78			if (delta) { /* need to fragment head to align? */
79				aligned->units = cur->units - delta;
80				aligned->next = cur->next;
81				cur->next = aligned;
82				cur->units = delta;
83				prev = cur;
84				cur = aligned;
85			}
86
87			if (cur->units == units) /* exact fit? */
88				prev->next = cur->next; /* unlink */
89			else { /* fragment */
90				prev->next = cur + units;
91				prev->next->units = cur->units - units;
92				prev->next->next = cur->next;
93				cur->units = units;
94			}
95
96			slobfree = prev;
97			spin_unlock_irqrestore(&slob_lock, flags);
98			return cur;
99		}
100		if (cur == slobfree) {
101			spin_unlock_irqrestore(&slob_lock, flags);
102
103			if (size == PAGE_SIZE) /* trying to shrink arena? */
104				return 0;
105
106			cur = (slob_t *)__get_free_page(gfp);
107			if (!cur)
108				return 0;
109
110			slob_free(cur, PAGE_SIZE);
111			spin_lock_irqsave(&slob_lock, flags);
112			cur = slobfree;
113		}
114	}
115}
116
117static void slob_free(void *block, int size)
118{
119	slob_t *cur, *b = (slob_t *)block;
120	unsigned long flags;
121
122	if (!block)
123		return;
124
125	if (size)
126		b->units = SLOB_UNITS(size);
127
128	/* Find reinsertion point */
129	spin_lock_irqsave(&slob_lock, flags);
130	for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
131		if (cur >= cur->next && (b > cur || b < cur->next))
132			break;
133
134	if (b + b->units == cur->next) {
135		b->units += cur->next->units;
136		b->next = cur->next->next;
137	} else
138		b->next = cur->next;
139
140	if (cur + cur->units == b) {
141		cur->units += b->units;
142		cur->next = b->next;
143	} else
144		cur->next = b;
145
146	slobfree = cur;
147
148	spin_unlock_irqrestore(&slob_lock, flags);
149}
150
151static int FASTCALL(find_order(int size));
152static int fastcall find_order(int size)
153{
154	int order = 0;
155	for ( ; size > 4096 ; size >>=1)
156		order++;
157	return order;
158}
159
160void *__kmalloc(size_t size, gfp_t gfp)
161{
162	slob_t *m;
163	bigblock_t *bb;
164	unsigned long flags;
165
166	if (size < PAGE_SIZE - SLOB_UNIT) {
167		m = slob_alloc(size + SLOB_UNIT, gfp, 0);
168		return m ? (void *)(m + 1) : 0;
169	}
170
171	bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
172	if (!bb)
173		return 0;
174
175	bb->order = find_order(size);
176	bb->pages = (void *)__get_free_pages(gfp, bb->order);
177
178	if (bb->pages) {
179		spin_lock_irqsave(&block_lock, flags);
180		bb->next = bigblocks;
181		bigblocks = bb;
182		spin_unlock_irqrestore(&block_lock, flags);
183		return bb->pages;
184	}
185
186	slob_free(bb, sizeof(bigblock_t));
187	return 0;
188}
189EXPORT_SYMBOL(__kmalloc);
190
191void kfree(const void *block)
192{
193	bigblock_t *bb, **last = &bigblocks;
194	unsigned long flags;
195
196	if (!block)
197		return;
198
199	if (!((unsigned long)block & (PAGE_SIZE-1))) {
200		/* might be on the big block list */
201		spin_lock_irqsave(&block_lock, flags);
202		for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
203			if (bb->pages == block) {
204				*last = bb->next;
205				spin_unlock_irqrestore(&block_lock, flags);
206				free_pages((unsigned long)block, bb->order);
207				slob_free(bb, sizeof(bigblock_t));
208				return;
209			}
210		}
211		spin_unlock_irqrestore(&block_lock, flags);
212	}
213
214	slob_free((slob_t *)block - 1, 0);
215	return;
216}
217
218EXPORT_SYMBOL(kfree);
219
220unsigned int ksize(const void *block)
221{
222	bigblock_t *bb;
223	unsigned long flags;
224
225	if (!block)
226		return 0;
227
228	if (!((unsigned long)block & (PAGE_SIZE-1))) {
229		spin_lock_irqsave(&block_lock, flags);
230		for (bb = bigblocks; bb; bb = bb->next)
231			if (bb->pages == block) {
232				spin_unlock_irqrestore(&slob_lock, flags);
233				return PAGE_SIZE << bb->order;
234			}
235		spin_unlock_irqrestore(&block_lock, flags);
236	}
237
238	return ((slob_t *)block - 1)->units * SLOB_UNIT;
239}
240
241struct kmem_cache {
242	unsigned int size, align;
243	const char *name;
244	void (*ctor)(void *, struct kmem_cache *, unsigned long);
245	void (*dtor)(void *, struct kmem_cache *, unsigned long);
246};
247
248struct kmem_cache *kmem_cache_create(const char *name, size_t size,
249	size_t align, unsigned long flags,
250	void (*ctor)(void*, struct kmem_cache *, unsigned long),
251	void (*dtor)(void*, struct kmem_cache *, unsigned long))
252{
253	struct kmem_cache *c;
254
255	c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
256
257	if (c) {
258		c->name = name;
259		c->size = size;
260		c->ctor = ctor;
261		c->dtor = dtor;
262		/* ignore alignment unless it's forced */
263		c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
264		if (c->align < align)
265			c->align = align;
266	}
267
268	return c;
269}
270EXPORT_SYMBOL(kmem_cache_create);
271
272void kmem_cache_destroy(struct kmem_cache *c)
273{
274	slob_free(c, sizeof(struct kmem_cache));
275}
276EXPORT_SYMBOL(kmem_cache_destroy);
277
278void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
279{
280	void *b;
281
282	if (c->size < PAGE_SIZE)
283		b = slob_alloc(c->size, flags, c->align);
284	else
285		b = (void *)__get_free_pages(flags, find_order(c->size));
286
287	if (c->ctor)
288		c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
289
290	return b;
291}
292EXPORT_SYMBOL(kmem_cache_alloc);
293
294void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
295{
296	void *ret = kmem_cache_alloc(c, flags);
297	if (ret)
298		memset(ret, 0, c->size);
299
300	return ret;
301}
302EXPORT_SYMBOL(kmem_cache_zalloc);
303
304void kmem_cache_free(struct kmem_cache *c, void *b)
305{
306	if (c->dtor)
307		c->dtor(b, c, 0);
308
309	if (c->size < PAGE_SIZE)
310		slob_free(b, c->size);
311	else
312		free_pages((unsigned long)b, find_order(c->size));
313}
314EXPORT_SYMBOL(kmem_cache_free);
315
316unsigned int kmem_cache_size(struct kmem_cache *c)
317{
318	return c->size;
319}
320EXPORT_SYMBOL(kmem_cache_size);
321
322const char *kmem_cache_name(struct kmem_cache *c)
323{
324	return c->name;
325}
326EXPORT_SYMBOL(kmem_cache_name);
327
328static struct timer_list slob_timer = TIMER_INITIALIZER(
329	(void (*)(unsigned long))kmem_cache_init, 0, 0);
330
331int kmem_cache_shrink(struct kmem_cache *d)
332{
333	return 0;
334}
335EXPORT_SYMBOL(kmem_cache_shrink);
336
337int kmem_ptr_validate(struct kmem_cache *a, void *b)
338{
339	return 0;
340}
341
342void kmem_cache_init(void)
343{
344	void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
345
346	if (p)
347		free_page((unsigned long)p);
348
349	mod_timer(&slob_timer, jiffies + HZ);
350}
351