1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <inttypes.h>
12#include <sys/types.h>
13#include <limits.h>
14#include <fcntl.h>
15
16#include "mutex.h"
17#include "arch/arch.h"
18#include "os/os.h"
19#include "smalloc.h"
20
21#define SMALLOC_REDZONE		/* define to detect memory corruption */
22
23#define SMALLOC_BPB	32	/* block size, bytes-per-bit in bitmap */
24#define SMALLOC_BPI	(sizeof(unsigned int) * 8)
25#define SMALLOC_BPL	(SMALLOC_BPB * SMALLOC_BPI)
26
27#define INITIAL_SIZE	8192*1024	/* new pool size */
28#define MAX_POOLS	128		/* maximum number of pools to setup */
29
30#define SMALLOC_PRE_RED		0xdeadbeefU
31#define SMALLOC_POST_RED	0x5aa55aa5U
32
33unsigned int smalloc_pool_size = INITIAL_SIZE;
34static const int int_mask = sizeof(int) - 1;
35
36struct pool {
37	struct fio_mutex *lock;			/* protects this pool */
38	void *map;				/* map of blocks */
39	unsigned int *bitmap;			/* blocks free/busy map */
40	size_t free_blocks;		/* free blocks */
41	size_t nr_blocks;			/* total blocks */
42	size_t next_non_full;
43	size_t mmap_size;
44};
45
46struct block_hdr {
47	size_t size;
48#ifdef SMALLOC_REDZONE
49	unsigned int prered;
50#endif
51};
52
53static struct pool mp[MAX_POOLS];
54static unsigned int nr_pools;
55static unsigned int last_pool;
56static struct fio_rwlock *lock;
57
58static inline void pool_lock(struct pool *pool)
59{
60	fio_mutex_down(pool->lock);
61}
62
63static inline void pool_unlock(struct pool *pool)
64{
65	fio_mutex_up(pool->lock);
66}
67
68static inline void global_read_lock(void)
69{
70	fio_rwlock_read(lock);
71}
72
73static inline void global_read_unlock(void)
74{
75	fio_rwlock_unlock(lock);
76}
77
78static inline void global_write_lock(void)
79{
80	fio_rwlock_write(lock);
81}
82
83static inline void global_write_unlock(void)
84{
85	fio_rwlock_unlock(lock);
86}
87
88static inline int ptr_valid(struct pool *pool, void *ptr)
89{
90	unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
91
92	return (ptr >= pool->map) && (ptr < pool->map + pool_size);
93}
94
95static inline size_t size_to_blocks(size_t size)
96{
97	return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
98}
99
100static int blocks_iter(struct pool *pool, unsigned int pool_idx,
101		       unsigned int idx, size_t nr_blocks,
102		       int (*func)(unsigned int *map, unsigned int mask))
103{
104
105	while (nr_blocks) {
106		unsigned int this_blocks, mask;
107		unsigned int *map;
108
109		if (pool_idx >= pool->nr_blocks)
110			return 0;
111
112		map = &pool->bitmap[pool_idx];
113
114		this_blocks = nr_blocks;
115		if (this_blocks + idx > SMALLOC_BPI) {
116			this_blocks = SMALLOC_BPI - idx;
117			idx = SMALLOC_BPI - this_blocks;
118		}
119
120		if (this_blocks == SMALLOC_BPI)
121			mask = -1U;
122		else
123			mask = ((1U << this_blocks) - 1) << idx;
124
125		if (!func(map, mask))
126			return 0;
127
128		nr_blocks -= this_blocks;
129		idx = 0;
130		pool_idx++;
131	}
132
133	return 1;
134}
135
136static int mask_cmp(unsigned int *map, unsigned int mask)
137{
138	return !(*map & mask);
139}
140
141static int mask_clear(unsigned int *map, unsigned int mask)
142{
143	assert((*map & mask) == mask);
144	*map &= ~mask;
145	return 1;
146}
147
148static int mask_set(unsigned int *map, unsigned int mask)
149{
150	assert(!(*map & mask));
151	*map |= mask;
152	return 1;
153}
154
155static int blocks_free(struct pool *pool, unsigned int pool_idx,
156		       unsigned int idx, size_t nr_blocks)
157{
158	return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
159}
160
161static void set_blocks(struct pool *pool, unsigned int pool_idx,
162		       unsigned int idx, size_t nr_blocks)
163{
164	blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
165}
166
167static void clear_blocks(struct pool *pool, unsigned int pool_idx,
168			 unsigned int idx, size_t nr_blocks)
169{
170	blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
171}
172
173static int find_next_zero(int word, int start)
174{
175	assert(word != -1U);
176	word >>= start;
177	return ffz(word) + start;
178}
179
180static int add_pool(struct pool *pool, unsigned int alloc_size)
181{
182	int bitmap_blocks;
183	void *ptr;
184
185#ifdef SMALLOC_REDZONE
186	alloc_size += sizeof(unsigned int);
187#endif
188	alloc_size += sizeof(struct block_hdr);
189	if (alloc_size < INITIAL_SIZE)
190		alloc_size = INITIAL_SIZE;
191
192	/* round up to nearest full number of blocks */
193	alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
194	bitmap_blocks = alloc_size / SMALLOC_BPL;
195	alloc_size += bitmap_blocks * sizeof(unsigned int);
196	pool->mmap_size = alloc_size;
197
198	pool->nr_blocks = bitmap_blocks;
199	pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
200
201	ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE,
202			MAP_SHARED | OS_MAP_ANON, -1, 0);
203	if (ptr == MAP_FAILED)
204		goto out_fail;
205
206	memset(ptr, 0, alloc_size);
207	pool->map = ptr;
208	pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
209
210	pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
211	if (!pool->lock)
212		goto out_fail;
213
214	nr_pools++;
215	return 0;
216out_fail:
217	fprintf(stderr, "smalloc: failed adding pool\n");
218	if (pool->map)
219		munmap(pool->map, pool->mmap_size);
220	return 1;
221}
222
223void sinit(void)
224{
225	int ret;
226
227	lock = fio_rwlock_init();
228	ret = add_pool(&mp[0], INITIAL_SIZE);
229	assert(!ret);
230}
231
232static void cleanup_pool(struct pool *pool)
233{
234	/*
235	 * This will also remove the temporary file we used as a backing
236	 * store, it was already unlinked
237	 */
238	munmap(pool->map, pool->mmap_size);
239
240	if (pool->lock)
241		fio_mutex_remove(pool->lock);
242}
243
244void scleanup(void)
245{
246	unsigned int i;
247
248	for (i = 0; i < nr_pools; i++)
249		cleanup_pool(&mp[i]);
250
251	if (lock)
252		fio_rwlock_remove(lock);
253}
254
255#ifdef SMALLOC_REDZONE
256static void *postred_ptr(struct block_hdr *hdr)
257{
258	uintptr_t ptr;
259
260	ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
261	ptr = (ptr + int_mask) & ~int_mask;
262
263	return (void *) ptr;
264}
265
266static void fill_redzone(struct block_hdr *hdr)
267{
268	unsigned int *postred = postred_ptr(hdr);
269
270	hdr->prered = SMALLOC_PRE_RED;
271	*postred = SMALLOC_POST_RED;
272}
273
274static void sfree_check_redzone(struct block_hdr *hdr)
275{
276	unsigned int *postred = postred_ptr(hdr);
277
278	if (hdr->prered != SMALLOC_PRE_RED) {
279		fprintf(stderr, "smalloc pre redzone destroyed!\n");
280		fprintf(stderr, "  ptr=%p, prered=%x, expected %x\n",
281				hdr, hdr->prered, SMALLOC_PRE_RED);
282		assert(0);
283	}
284	if (*postred != SMALLOC_POST_RED) {
285		fprintf(stderr, "smalloc post redzone destroyed!\n");
286		fprintf(stderr, "  ptr=%p, postred=%x, expected %x\n",
287				hdr, *postred, SMALLOC_POST_RED);
288		assert(0);
289	}
290}
291#else
292static void fill_redzone(struct block_hdr *hdr)
293{
294}
295
296static void sfree_check_redzone(struct block_hdr *hdr)
297{
298}
299#endif
300
301static void sfree_pool(struct pool *pool, void *ptr)
302{
303	struct block_hdr *hdr;
304	unsigned int i, idx;
305	unsigned long offset;
306
307	if (!ptr)
308		return;
309
310	ptr -= sizeof(*hdr);
311	hdr = ptr;
312
313	assert(ptr_valid(pool, ptr));
314
315	sfree_check_redzone(hdr);
316
317	offset = ptr - pool->map;
318	i = offset / SMALLOC_BPL;
319	idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
320
321	pool_lock(pool);
322	clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
323	if (i < pool->next_non_full)
324		pool->next_non_full = i;
325	pool->free_blocks += size_to_blocks(hdr->size);
326	pool_unlock(pool);
327}
328
329void sfree(void *ptr)
330{
331	struct pool *pool = NULL;
332	unsigned int i;
333
334	if (!ptr)
335		return;
336
337	global_read_lock();
338
339	for (i = 0; i < nr_pools; i++) {
340		if (ptr_valid(&mp[i], ptr)) {
341			pool = &mp[i];
342			break;
343		}
344	}
345
346	global_read_unlock();
347
348	assert(pool);
349	sfree_pool(pool, ptr);
350}
351
352static void *__smalloc_pool(struct pool *pool, size_t size)
353{
354	size_t nr_blocks;
355	unsigned int i;
356	unsigned int offset;
357	unsigned int last_idx;
358	void *ret = NULL;
359
360	pool_lock(pool);
361
362	nr_blocks = size_to_blocks(size);
363	if (nr_blocks > pool->free_blocks)
364		goto fail;
365
366	i = pool->next_non_full;
367	last_idx = 0;
368	offset = -1U;
369	while (i < pool->nr_blocks) {
370		unsigned int idx;
371
372		if (pool->bitmap[i] == -1U) {
373			i++;
374			pool->next_non_full = i;
375			last_idx = 0;
376			continue;
377		}
378
379		idx = find_next_zero(pool->bitmap[i], last_idx);
380		if (!blocks_free(pool, i, idx, nr_blocks)) {
381			idx += nr_blocks;
382			if (idx < SMALLOC_BPI)
383				last_idx = idx;
384			else {
385				last_idx = 0;
386				while (idx >= SMALLOC_BPI) {
387					i++;
388					idx -= SMALLOC_BPI;
389				}
390			}
391			continue;
392		}
393		set_blocks(pool, i, idx, nr_blocks);
394		offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
395		break;
396	}
397
398	if (i < pool->nr_blocks) {
399		pool->free_blocks -= nr_blocks;
400		ret = pool->map + offset;
401	}
402fail:
403	pool_unlock(pool);
404	return ret;
405}
406
407static void *smalloc_pool(struct pool *pool, size_t size)
408{
409	size_t alloc_size = size + sizeof(struct block_hdr);
410	void *ptr;
411
412	/*
413	 * Round to int alignment, so that the postred pointer will
414	 * be naturally aligned as well.
415	 */
416#ifdef SMALLOC_REDZONE
417	alloc_size += sizeof(unsigned int);
418	alloc_size = (alloc_size + int_mask) & ~int_mask;
419#endif
420
421	ptr = __smalloc_pool(pool, alloc_size);
422	if (ptr) {
423		struct block_hdr *hdr = ptr;
424
425		hdr->size = alloc_size;
426		fill_redzone(hdr);
427
428		ptr += sizeof(*hdr);
429		memset(ptr, 0, size);
430	}
431
432	return ptr;
433}
434
435void *smalloc(size_t size)
436{
437	unsigned int i;
438
439	if (size != (unsigned int) size)
440		return NULL;
441
442	global_write_lock();
443	i = last_pool;
444
445	do {
446		for (; i < nr_pools; i++) {
447			void *ptr = smalloc_pool(&mp[i], size);
448
449			if (ptr) {
450				last_pool = i;
451				global_write_unlock();
452				return ptr;
453			}
454		}
455		if (last_pool) {
456			last_pool = 0;
457			continue;
458		}
459
460		if (nr_pools + 1 > MAX_POOLS)
461			break;
462		else {
463			i = nr_pools;
464			if (add_pool(&mp[nr_pools], size))
465				goto out;
466		}
467	} while (1);
468
469out:
470	global_write_unlock();
471	return NULL;
472}
473
474char *smalloc_strdup(const char *str)
475{
476	char *ptr;
477
478	ptr = smalloc(strlen(str) + 1);
479	strcpy(ptr, str);
480	return ptr;
481}
482