smalloc.c revision b8a6582e89999f88c574b905b89743762d8080df
1/* 2 * simple memory allocator, backed by mmap() so that it hands out memory 3 * that can be shared across processes and threads 4 */ 5#include <sys/mman.h> 6#include <stdio.h> 7#include <stdlib.h> 8#include <assert.h> 9#include <string.h> 10#include <unistd.h> 11#include <sys/types.h> 12#include <limits.h> 13 14#include "mutex.h" 15#include "arch/arch.h" 16 17#define MP_SAFE /* define to make thread safe */ 18#define SMALLOC_REDZONE /* define to detect memory corruption */ 19 20#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */ 21#define SMALLOC_BPI (sizeof(unsigned int) * 8) 22#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI) 23 24#define INITIAL_SIZE 1024*1024 /* new pool size */ 25#define MAX_POOLS 128 /* maximum number of pools to setup */ 26 27#define SMALLOC_PRE_RED 0xdeadbeefU 28#define SMALLOC_POST_RED 0x5aa55aa5U 29 30unsigned int smalloc_pool_size = INITIAL_SIZE; 31 32struct pool { 33 struct fio_mutex *lock; /* protects this pool */ 34 void *map; /* map of blocks */ 35 unsigned int *bitmap; /* blocks free/busy map */ 36 unsigned int free_blocks; /* free blocks */ 37 unsigned int nr_blocks; /* total blocks */ 38 unsigned int next_non_full; 39 int fd; /* memory backing fd */ 40 unsigned int mmap_size; 41}; 42 43struct block_hdr { 44 unsigned int size; 45#ifdef SMALLOC_REDZONE 46 unsigned int prered; 47#endif 48}; 49 50static struct pool mp[MAX_POOLS]; 51static unsigned int nr_pools; 52static unsigned int last_pool; 53static struct fio_mutex *lock; 54 55static inline void pool_lock(struct pool *pool) 56{ 57 if (pool->lock) 58 fio_mutex_down(pool->lock); 59} 60 61static inline void pool_unlock(struct pool *pool) 62{ 63 if (pool->lock) 64 fio_mutex_up(pool->lock); 65} 66 67static inline void global_read_lock(void) 68{ 69 if (lock) 70 fio_mutex_down_read(lock); 71} 72 73static inline void global_read_unlock(void) 74{ 75 if (lock) 76 fio_mutex_up_read(lock); 77} 78 79static inline void global_write_lock(void) 80{ 81 if (lock) 82 fio_mutex_down_write(lock); 83} 84 85static inline void global_write_unlock(void) 86{ 87 if (lock) 88 fio_mutex_up_write(lock); 89} 90 91static inline int ptr_valid(struct pool *pool, void *ptr) 92{ 93 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL; 94 95 return (ptr >= pool->map) && (ptr < pool->map + pool_size); 96} 97 98static inline unsigned int size_to_blocks(unsigned int size) 99{ 100 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB; 101} 102 103static int blocks_iter(struct pool *pool, unsigned int pool_idx, 104 unsigned int idx, unsigned int nr_blocks, 105 int (*func)(unsigned int *map, unsigned int mask)) 106{ 107 108 while (nr_blocks) { 109 unsigned int this_blocks, mask; 110 unsigned int *map; 111 112 if (pool_idx >= pool->nr_blocks) 113 return 0; 114 115 map = &pool->bitmap[pool_idx]; 116 117 this_blocks = nr_blocks; 118 if (this_blocks + idx > SMALLOC_BPI) { 119 this_blocks = SMALLOC_BPI - idx; 120 idx = SMALLOC_BPI - this_blocks; 121 } 122 123 if (this_blocks == SMALLOC_BPI) 124 mask = -1U; 125 else 126 mask = ((1U << this_blocks) - 1) << idx; 127 128 if (!func(map, mask)) 129 return 0; 130 131 nr_blocks -= this_blocks; 132 idx = 0; 133 pool_idx++; 134 } 135 136 return 1; 137} 138 139static int mask_cmp(unsigned int *map, unsigned int mask) 140{ 141 return !(*map & mask); 142} 143 144static int mask_clear(unsigned int *map, unsigned int mask) 145{ 146 assert((*map & mask) == mask); 147 *map &= ~mask; 148 return 1; 149} 150 151static int mask_set(unsigned int *map, unsigned int mask) 152{ 153 assert(!(*map & mask)); 154 *map |= mask; 155 return 1; 156} 157 158static int blocks_free(struct pool *pool, unsigned int pool_idx, 159 unsigned int idx, unsigned int nr_blocks) 160{ 161 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp); 162} 163 164static void set_blocks(struct pool *pool, unsigned int pool_idx, 165 unsigned int idx, unsigned int nr_blocks) 166{ 167 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set); 168} 169 170static void clear_blocks(struct pool *pool, unsigned int pool_idx, 171 unsigned int idx, unsigned int nr_blocks) 172{ 173 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear); 174} 175 176static int find_next_zero(int word, int start) 177{ 178 assert(word != -1U); 179 word >>= (start + 1); 180 return ffz(word) + start + 1; 181} 182 183static int add_pool(struct pool *pool, unsigned int alloc_size) 184{ 185 int fd, bitmap_blocks; 186 char file[] = "/tmp/.fio_smalloc.XXXXXX"; 187 void *ptr; 188 189 fd = mkstemp(file); 190 if (fd < 0) 191 goto out_close; 192 193#ifdef SMALLOC_REDZONE 194 alloc_size += sizeof(unsigned int); 195#endif 196 alloc_size += sizeof(struct block_hdr); 197 if (alloc_size < INITIAL_SIZE) 198 alloc_size = INITIAL_SIZE; 199 200 /* round up to nearest full number of blocks */ 201 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1); 202 bitmap_blocks = alloc_size / SMALLOC_BPL; 203 alloc_size += bitmap_blocks * sizeof(unsigned int); 204 pool->mmap_size = alloc_size; 205 206 pool->nr_blocks = bitmap_blocks; 207 pool->free_blocks = bitmap_blocks * SMALLOC_BPB; 208 209 if (ftruncate(fd, alloc_size) < 0) 210 goto out_unlink; 211 212 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 213 if (ptr == MAP_FAILED) 214 goto out_unlink; 215 216 memset(ptr, 0, alloc_size); 217 pool->map = ptr; 218 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL); 219 220#ifdef MP_SAFE 221 pool->lock = fio_mutex_init(1); 222 if (!pool->lock) 223 goto out_unlink; 224#endif 225 226 /* 227 * Unlink pool file now. It wont get deleted until the fd is closed, 228 * which happens both for cleanup or unexpected quit. This way we 229 * don't leave temp files around in case of a crash. 230 */ 231 unlink(file); 232 pool->fd = fd; 233 234 nr_pools++; 235 return 0; 236out_unlink: 237 fprintf(stderr, "smalloc: failed adding pool\n"); 238 if (pool->map) 239 munmap(pool->map, pool->mmap_size); 240 unlink(file); 241out_close: 242 close(fd); 243 return 1; 244} 245 246void sinit(void) 247{ 248 int ret; 249 250#ifdef MP_SAFE 251 lock = fio_mutex_rw_init(); 252#endif 253 ret = add_pool(&mp[0], INITIAL_SIZE); 254 assert(!ret); 255} 256 257static void cleanup_pool(struct pool *pool) 258{ 259 /* 260 * This will also remove the temporary file we used as a backing 261 * store, it was already unlinked 262 */ 263 close(pool->fd); 264 munmap(pool->map, pool->mmap_size); 265 266 if (pool->lock) 267 fio_mutex_remove(pool->lock); 268} 269 270void scleanup(void) 271{ 272 unsigned int i; 273 274 for (i = 0; i < nr_pools; i++) 275 cleanup_pool(&mp[i]); 276 277 if (lock) 278 fio_mutex_remove(lock); 279} 280 281#ifdef SMALLOC_REDZONE 282static void fill_redzone(struct block_hdr *hdr) 283{ 284 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int); 285 286 hdr->prered = SMALLOC_PRE_RED; 287 *postred = SMALLOC_POST_RED; 288} 289 290static void sfree_check_redzone(struct block_hdr *hdr) 291{ 292 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int); 293 294 if (hdr->prered != SMALLOC_PRE_RED) { 295 fprintf(stderr, "smalloc pre redzone destroyed!\n"); 296 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n", 297 hdr, hdr->prered, SMALLOC_PRE_RED); 298 assert(0); 299 } 300 if (*postred != SMALLOC_POST_RED) { 301 fprintf(stderr, "smalloc post redzone destroyed!\n"); 302 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n", 303 hdr, *postred, SMALLOC_POST_RED); 304 assert(0); 305 } 306} 307#else 308static void fill_redzone(struct block_hdr *hdr) 309{ 310} 311 312static void sfree_check_redzone(struct block_hdr *hdr) 313{ 314} 315#endif 316 317static void sfree_pool(struct pool *pool, void *ptr) 318{ 319 struct block_hdr *hdr; 320 unsigned int i, idx; 321 unsigned long offset; 322 323 if (!ptr) 324 return; 325 326 ptr -= sizeof(*hdr); 327 hdr = ptr; 328 329 assert(ptr_valid(pool, ptr)); 330 331 sfree_check_redzone(hdr); 332 333 offset = ptr - pool->map; 334 i = offset / SMALLOC_BPL; 335 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB; 336 337 pool_lock(pool); 338 clear_blocks(pool, i, idx, size_to_blocks(hdr->size)); 339 if (i < pool->next_non_full) 340 pool->next_non_full = i; 341 pool->free_blocks += size_to_blocks(hdr->size); 342 pool_unlock(pool); 343} 344 345void sfree(void *ptr) 346{ 347 struct pool *pool = NULL; 348 unsigned int i; 349 350 if (!ptr) 351 return; 352 353 global_read_lock(); 354 355 for (i = 0; i < nr_pools; i++) { 356 if (ptr_valid(&mp[i], ptr)) { 357 pool = &mp[i]; 358 break; 359 } 360 } 361 362 global_read_unlock(); 363 364 assert(pool); 365 sfree_pool(pool, ptr); 366} 367 368static void *__smalloc_pool(struct pool *pool, unsigned int size) 369{ 370 unsigned int nr_blocks; 371 unsigned int i; 372 unsigned int offset; 373 unsigned int last_idx; 374 void *ret = NULL; 375 376 pool_lock(pool); 377 378 nr_blocks = size_to_blocks(size); 379 if (nr_blocks > pool->free_blocks) 380 goto fail; 381 382 i = pool->next_non_full; 383 last_idx = 0; 384 offset = -1U; 385 while (i < pool->nr_blocks) { 386 unsigned int idx; 387 388 if (pool->bitmap[i] == -1U) { 389 i++; 390 pool->next_non_full = i; 391 last_idx = 0; 392 continue; 393 } 394 395 idx = find_next_zero(pool->bitmap[i], last_idx); 396 if (!blocks_free(pool, i, idx, nr_blocks)) { 397 idx += nr_blocks; 398 if (idx < SMALLOC_BPI) 399 last_idx = idx; 400 else { 401 last_idx = 0; 402 while (idx >= SMALLOC_BPI) { 403 i++; 404 idx -= SMALLOC_BPI; 405 } 406 } 407 continue; 408 } 409 set_blocks(pool, i, idx, nr_blocks); 410 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB; 411 break; 412 } 413 414 if (i < pool->nr_blocks) { 415 pool->free_blocks -= nr_blocks; 416 ret = pool->map + offset; 417 } 418fail: 419 pool_unlock(pool); 420 return ret; 421} 422 423static void *smalloc_pool(struct pool *pool, unsigned int size) 424{ 425 unsigned int alloc_size = size + sizeof(struct block_hdr); 426 void *ptr; 427 428#ifdef SMALLOC_REDZONE 429 alloc_size += sizeof(unsigned int); 430#endif 431 432 ptr = __smalloc_pool(pool, alloc_size); 433 if (ptr) { 434 struct block_hdr *hdr = ptr; 435 436 hdr->size = alloc_size; 437 fill_redzone(hdr); 438 439 ptr += sizeof(*hdr); 440 memset(ptr, 0, size); 441 } 442 443 return ptr; 444} 445 446void *smalloc(unsigned int size) 447{ 448 unsigned int i; 449 450 global_write_lock(); 451 i = last_pool; 452 453 do { 454 for (; i < nr_pools; i++) { 455 void *ptr = smalloc_pool(&mp[i], size); 456 457 if (ptr) { 458 last_pool = i; 459 global_write_unlock(); 460 return ptr; 461 } 462 } 463 if (last_pool) { 464 last_pool = 0; 465 continue; 466 } 467 468 if (nr_pools + 1 > MAX_POOLS) 469 break; 470 else { 471 i = nr_pools; 472 if (add_pool(&mp[nr_pools], size)) 473 goto out; 474 } 475 } while (1); 476 477out: 478 global_write_unlock(); 479 return NULL; 480} 481 482char *smalloc_strdup(const char *str) 483{ 484 char *ptr; 485 486 ptr = smalloc(strlen(str) + 1); 487 strcpy(ptr, str); 488 return ptr; 489} 490