arena.h revision ef8897b4b938111fcc9b54725067f1dbb33a4c20
1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4/* 5 * Subpages are an artificially designated partitioning of pages. Their only 6 * purpose is to support subpage-spaced size classes. 7 * 8 * There must be at least 4 subpages per page, due to the way size classes are 9 * handled. 10 */ 11#define LG_SUBPAGE 8 12#define SUBPAGE ((size_t)(1U << LG_SUBPAGE)) 13#define SUBPAGE_MASK (SUBPAGE - 1) 14 15/* Return the smallest subpage multiple that is >= s. */ 16#define SUBPAGE_CEILING(s) \ 17 (((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK) 18 19/* Smallest size class to support. */ 20#define LG_TINY_MIN 3 21#define TINY_MIN (1U << LG_TINY_MIN) 22 23/* 24 * Maximum size class that is a multiple of the quantum, but not (necessarily) 25 * a power of 2. Above this size, allocations are rounded up to the nearest 26 * power of 2. 27 */ 28#define LG_QSPACE_MAX_DEFAULT 7 29 30/* 31 * Maximum size class that is a multiple of the cacheline, but not (necessarily) 32 * a power of 2. Above this size, allocations are rounded up to the nearest 33 * power of 2. 34 */ 35#define LG_CSPACE_MAX_DEFAULT 9 36 37/* 38 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized 39 * as small as possible such that this setting is still honored, without 40 * violating other constraints. The goal is to make runs as small as possible 41 * without exceeding a per run external fragmentation threshold. 42 * 43 * We use binary fixed point math for overhead computations, where the binary 44 * point is implicitly RUN_BFP bits to the left. 45 * 46 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be 47 * honored for some/all object sizes, since when heap profiling is enabled 48 * there is one pointer of header overhead per object (plus a constant). This 49 * constraint is relaxed (ignored) for runs that are so small that the 50 * per-region overhead is greater than: 51 * 52 * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) 53 */ 54#define RUN_BFP 12 55/* \/ Implicit binary fixed point. */ 56#define RUN_MAX_OVRHD 0x0000003dU 57#define RUN_MAX_OVRHD_RELAX 0x00001800U 58 59/* Maximum number of regions in one run. */ 60#define LG_RUN_MAXREGS 11 61#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 62 63/* 64 * The minimum ratio of active:dirty pages per arena is computed as: 65 * 66 * (nactive >> opt_lg_dirty_mult) >= ndirty 67 * 68 * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 69 * times as many active pages as dirty pages. 70 */ 71#define LG_DIRTY_MULT_DEFAULT 5 72 73typedef struct arena_chunk_map_s arena_chunk_map_t; 74typedef struct arena_chunk_s arena_chunk_t; 75typedef struct arena_run_s arena_run_t; 76typedef struct arena_bin_info_s arena_bin_info_t; 77typedef struct arena_bin_s arena_bin_t; 78typedef struct arena_s arena_t; 79 80#endif /* JEMALLOC_H_TYPES */ 81/******************************************************************************/ 82#ifdef JEMALLOC_H_STRUCTS 83 84/* Each element of the chunk map corresponds to one page within the chunk. */ 85struct arena_chunk_map_s { 86#ifndef JEMALLOC_PROF 87 /* 88 * Overlay prof_ctx in order to allow it to be referenced by dead code. 89 * Such antics aren't warranted for per arena data structures, but 90 * chunk map overhead accounts for a percentage of memory, rather than 91 * being just a fixed cost. 92 */ 93 union { 94#endif 95 union { 96 /* 97 * Linkage for run trees. There are two disjoint uses: 98 * 99 * 1) arena_t's runs_avail_{clean,dirty} trees. 100 * 2) arena_run_t conceptually uses this linkage for in-use 101 * non-full runs, rather than directly embedding linkage. 102 */ 103 rb_node(arena_chunk_map_t) rb_link; 104 /* 105 * List of runs currently in purgatory. arena_chunk_purge() 106 * temporarily allocates runs that contain dirty pages while 107 * purging, so that other threads cannot use the runs while the 108 * purging thread is operating without the arena lock held. 109 */ 110 ql_elm(arena_chunk_map_t) ql_link; 111 } u; 112 113 /* Profile counters, used for large object runs. */ 114 prof_ctx_t *prof_ctx; 115#ifndef JEMALLOC_PROF 116 }; /* union { ... }; */ 117#endif 118 119 /* 120 * Run address (or size) and various flags are stored together. The bit 121 * layout looks like (assuming 32-bit system): 122 * 123 * ???????? ???????? ????---- ----dula 124 * 125 * ? : Unallocated: Run address for first/last pages, unset for internal 126 * pages. 127 * Small: Run page offset. 128 * Large: Run size for first page, unset for trailing pages. 129 * - : Unused. 130 * d : dirty? 131 * u : unzeroed? 132 * l : large? 133 * a : allocated? 134 * 135 * Following are example bit patterns for the three types of runs. 136 * 137 * p : run page offset 138 * s : run size 139 * c : (binind+1) for size class (used only if prof_promote is true) 140 * x : don't care 141 * - : 0 142 * + : 1 143 * [DULA] : bit set 144 * [dula] : bit unset 145 * 146 * Unallocated (clean): 147 * ssssssss ssssssss ssss---- ----du-a 148 * xxxxxxxx xxxxxxxx xxxx---- -----Uxx 149 * ssssssss ssssssss ssss---- ----dU-a 150 * 151 * Unallocated (dirty): 152 * ssssssss ssssssss ssss---- ----D--a 153 * xxxxxxxx xxxxxxxx xxxx---- ----xxxx 154 * ssssssss ssssssss ssss---- ----D--a 155 * 156 * Small: 157 * pppppppp pppppppp pppp---- ----d--A 158 * pppppppp pppppppp pppp---- -------A 159 * pppppppp pppppppp pppp---- ----d--A 160 * 161 * Large: 162 * ssssssss ssssssss ssss---- ----D-LA 163 * xxxxxxxx xxxxxxxx xxxx---- ----xxxx 164 * -------- -------- -------- ----D-LA 165 * 166 * Large (sampled, size <= PAGE_SIZE): 167 * ssssssss ssssssss sssscccc ccccD-LA 168 * 169 * Large (not sampled, size == PAGE_SIZE): 170 * ssssssss ssssssss ssss---- ----D-LA 171 */ 172 size_t bits; 173#define CHUNK_MAP_CLASS_SHIFT 4 174#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U) 175#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) 176#define CHUNK_MAP_DIRTY ((size_t)0x8U) 177#define CHUNK_MAP_UNZEROED ((size_t)0x4U) 178#define CHUNK_MAP_LARGE ((size_t)0x2U) 179#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) 180#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED 181}; 182typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; 183typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; 184 185/* Arena chunk header. */ 186struct arena_chunk_s { 187 /* Arena that owns the chunk. */ 188 arena_t *arena; 189 190 /* Linkage for the arena's chunks_dirty list. */ 191 ql_elm(arena_chunk_t) link_dirty; 192 193 /* 194 * True if the chunk is currently in the chunks_dirty list, due to 195 * having at some point contained one or more dirty pages. Removal 196 * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. 197 */ 198 bool dirtied; 199 200 /* Number of dirty pages. */ 201 size_t ndirty; 202 203 /* 204 * Map of pages within chunk that keeps track of free/large/small. The 205 * first map_bias entries are omitted, since the chunk header does not 206 * need to be tracked in the map. This omission saves a header page 207 * for common chunk sizes (e.g. 4 MiB). 208 */ 209 arena_chunk_map_t map[1]; /* Dynamically sized. */ 210}; 211typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; 212 213struct arena_run_s { 214 /* Bin this run is associated with. */ 215 arena_bin_t *bin; 216 217 /* Index of next region that has never been allocated, or nregs. */ 218 uint32_t nextind; 219 220 /* Number of free regions in run. */ 221 unsigned nfree; 222}; 223 224/* 225 * Read-only information associated with each element of arena_t's bins array 226 * is stored separately, partly to reduce memory usage (only one copy, rather 227 * than one per arena), but mainly to avoid false cacheline sharing. 228 */ 229struct arena_bin_info_s { 230 /* Size of regions in a run for this bin's size class. */ 231 size_t reg_size; 232 233 /* Total size of a run for this bin's size class. */ 234 size_t run_size; 235 236 /* Total number of regions in a run for this bin's size class. */ 237 uint32_t nregs; 238 239 /* 240 * Offset of first bitmap_t element in a run header for this bin's size 241 * class. 242 */ 243 uint32_t bitmap_offset; 244 245 /* 246 * Metadata used to manipulate bitmaps for runs associated with this 247 * bin. 248 */ 249 bitmap_info_t bitmap_info; 250 251 /* 252 * Offset of first (prof_ctx_t *) in a run header for this bin's size 253 * class, or 0 if (config_prof == false || opt_prof == false). 254 */ 255 uint32_t ctx0_offset; 256 257 /* Offset of first region in a run for this bin's size class. */ 258 uint32_t reg0_offset; 259}; 260 261struct arena_bin_s { 262 /* 263 * All operations on runcur, runs, and stats require that lock be 264 * locked. Run allocation/deallocation are protected by the arena lock, 265 * which may be acquired while holding one or more bin locks, but not 266 * vise versa. 267 */ 268 malloc_mutex_t lock; 269 270 /* 271 * Current run being used to service allocations of this bin's size 272 * class. 273 */ 274 arena_run_t *runcur; 275 276 /* 277 * Tree of non-full runs. This tree is used when looking for an 278 * existing run when runcur is no longer usable. We choose the 279 * non-full run that is lowest in memory; this policy tends to keep 280 * objects packed well, and it can also help reduce the number of 281 * almost-empty chunks. 282 */ 283 arena_run_tree_t runs; 284 285 /* Bin statistics. */ 286 malloc_bin_stats_t stats; 287}; 288 289struct arena_s { 290 /* This arena's index within the arenas array. */ 291 unsigned ind; 292 293 /* 294 * Number of threads currently assigned to this arena. This field is 295 * protected by arenas_lock. 296 */ 297 unsigned nthreads; 298 299 /* 300 * There are three classes of arena operations from a locking 301 * perspective: 302 * 1) Thread asssignment (modifies nthreads) is protected by 303 * arenas_lock. 304 * 2) Bin-related operations are protected by bin locks. 305 * 3) Chunk- and run-related operations are protected by this mutex. 306 */ 307 malloc_mutex_t lock; 308 309 arena_stats_t stats; 310 /* 311 * List of tcaches for extant threads associated with this arena. 312 * Stats from these are merged incrementally, and at exit. 313 */ 314 ql_head(tcache_t) tcache_ql; 315 316 uint64_t prof_accumbytes; 317 318 /* List of dirty-page-containing chunks this arena manages. */ 319 ql_head(arena_chunk_t) chunks_dirty; 320 321 /* 322 * In order to avoid rapid chunk allocation/deallocation when an arena 323 * oscillates right on the cusp of needing a new chunk, cache the most 324 * recently freed chunk. The spare is left in the arena's chunk trees 325 * until it is deleted. 326 * 327 * There is one spare chunk per arena, rather than one spare total, in 328 * order to avoid interactions between multiple threads that could make 329 * a single spare inadequate. 330 */ 331 arena_chunk_t *spare; 332 333 /* Number of pages in active runs. */ 334 size_t nactive; 335 336 /* 337 * Current count of pages within unused runs that are potentially 338 * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 339 * By tracking this, we can institute a limit on how much dirty unused 340 * memory is mapped for each arena. 341 */ 342 size_t ndirty; 343 344 /* 345 * Approximate number of pages being purged. It is possible for 346 * multiple threads to purge dirty pages concurrently, and they use 347 * npurgatory to indicate the total number of pages all threads are 348 * attempting to purge. 349 */ 350 size_t npurgatory; 351 352 /* 353 * Size/address-ordered trees of this arena's available runs. The trees 354 * are used for first-best-fit run allocation. The dirty tree contains 355 * runs with dirty pages (i.e. very likely to have been touched and 356 * therefore have associated physical pages), whereas the clean tree 357 * contains runs with pages that either have no associated physical 358 * pages, or have pages that the kernel may recycle at any time due to 359 * previous madvise(2) calls. The dirty tree is used in preference to 360 * the clean tree for allocations, because using dirty pages reduces 361 * the amount of dirty purging necessary to keep the active:dirty page 362 * ratio below the purge threshold. 363 */ 364 arena_avail_tree_t runs_avail_clean; 365 arena_avail_tree_t runs_avail_dirty; 366 367 /* 368 * bins is used to store trees of free regions of the following sizes, 369 * assuming a 64-bit system with 16-byte quantum, 4 KiB page size, and 370 * default MALLOC_CONF. 371 * 372 * bins[i] | size | 373 * --------+--------+ 374 * 0 | 8 | 375 * --------+--------+ 376 * 1 | 16 | 377 * 2 | 32 | 378 * 3 | 48 | 379 * : : 380 * 6 | 96 | 381 * 7 | 112 | 382 * 8 | 128 | 383 * --------+--------+ 384 * 9 | 192 | 385 * 10 | 256 | 386 * 11 | 320 | 387 * 12 | 384 | 388 * 13 | 448 | 389 * 14 | 512 | 390 * --------+--------+ 391 * 15 | 768 | 392 * 16 | 1024 | 393 * 17 | 1280 | 394 * : : 395 * 25 | 3328 | 396 * 26 | 3584 | 397 * 27 | 3840 | 398 * --------+--------+ 399 */ 400 arena_bin_t bins[1]; /* Dynamically sized. */ 401}; 402 403#endif /* JEMALLOC_H_STRUCTS */ 404/******************************************************************************/ 405#ifdef JEMALLOC_H_EXTERNS 406 407extern size_t opt_lg_qspace_max; 408extern size_t opt_lg_cspace_max; 409extern ssize_t opt_lg_dirty_mult; 410/* 411 * small_size2bin is a compact lookup table that rounds request sizes up to 412 * size classes. In order to reduce cache footprint, the table is compressed, 413 * and all accesses are via the SMALL_SIZE2BIN macro. 414 */ 415extern uint8_t const *small_size2bin; 416#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) 417 418extern arena_bin_info_t *arena_bin_info; 419 420/* Various bin-related settings. */ 421 /* Number of (2^n)-spaced tiny bins. */ 422#define ntbins ((unsigned)(LG_QUANTUM - LG_TINY_MIN)) 423extern unsigned nqbins; /* Number of quantum-spaced bins. */ 424extern unsigned ncbins; /* Number of cacheline-spaced bins. */ 425extern unsigned nsbins; /* Number of subpage-spaced bins. */ 426extern unsigned nbins; 427#define tspace_max ((size_t)(QUANTUM >> 1)) 428#define qspace_min QUANTUM 429extern size_t qspace_max; 430extern size_t cspace_min; 431extern size_t cspace_max; 432extern size_t sspace_min; 433extern size_t sspace_max; 434#define small_maxclass sspace_max 435 436#define nlclasses (chunk_npages - map_bias) 437 438void arena_purge_all(arena_t *arena); 439void arena_prof_accum(arena_t *arena, uint64_t accumbytes); 440void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, 441 size_t binind, uint64_t prof_accumbytes); 442void *arena_malloc_small(arena_t *arena, size_t size, bool zero); 443void *arena_malloc_large(arena_t *arena, size_t size, bool zero); 444void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size, 445 size_t alignment, bool zero); 446size_t arena_salloc(const void *ptr); 447void arena_prof_promoted(const void *ptr, size_t size); 448size_t arena_salloc_demote(const void *ptr); 449void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 450 arena_chunk_map_t *mapelm); 451void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); 452void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, 453 arena_stats_t *astats, malloc_bin_stats_t *bstats, 454 malloc_large_stats_t *lstats); 455void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 456 size_t extra, bool zero); 457void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, 458 size_t alignment, bool zero); 459bool arena_new(arena_t *arena, unsigned ind); 460bool arena_boot(void); 461 462#endif /* JEMALLOC_H_EXTERNS */ 463/******************************************************************************/ 464#ifdef JEMALLOC_H_INLINES 465 466#ifndef JEMALLOC_ENABLE_INLINE 467size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 468unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 469 const void *ptr); 470prof_ctx_t *arena_prof_ctx_get(const void *ptr); 471void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 472void *arena_malloc(size_t size, bool zero); 473void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr); 474#endif 475 476#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 477JEMALLOC_INLINE size_t 478arena_bin_index(arena_t *arena, arena_bin_t *bin) 479{ 480 size_t binind = bin - arena->bins; 481 assert(binind < nbins); 482 return (binind); 483} 484 485JEMALLOC_INLINE unsigned 486arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 487{ 488 unsigned shift, diff, regind; 489 size_t size; 490 491 /* 492 * Freeing a pointer lower than region zero can cause assertion 493 * failure. 494 */ 495 assert((uintptr_t)ptr >= (uintptr_t)run + 496 (uintptr_t)bin_info->reg0_offset); 497 498 /* 499 * Avoid doing division with a variable divisor if possible. Using 500 * actual division here can reduce allocator throughput by over 20%! 501 */ 502 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - 503 bin_info->reg0_offset); 504 505 /* Rescale (factor powers of 2 out of the numerator and denominator). */ 506 size = bin_info->reg_size; 507 shift = ffs(size) - 1; 508 diff >>= shift; 509 size >>= shift; 510 511 if (size == 1) { 512 /* The divisor was a power of 2. */ 513 regind = diff; 514 } else { 515 /* 516 * To divide by a number D that is not a power of two we 517 * multiply by (2^21 / D) and then right shift by 21 positions. 518 * 519 * X / D 520 * 521 * becomes 522 * 523 * (X * size_invs[D - 3]) >> SIZE_INV_SHIFT 524 * 525 * We can omit the first three elements, because we never 526 * divide by 0, and 1 and 2 are both powers of two, which are 527 * handled above. 528 */ 529#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) 530#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) 531 static const unsigned size_invs[] = { 532 SIZE_INV(3), 533 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 534 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 535 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 536 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 537 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 538 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 539 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 540 }; 541 542 if (size <= ((sizeof(size_invs) / sizeof(unsigned)) + 2)) 543 regind = (diff * size_invs[size - 3]) >> SIZE_INV_SHIFT; 544 else 545 regind = diff / size; 546#undef SIZE_INV 547#undef SIZE_INV_SHIFT 548 } 549 assert(diff == regind * size); 550 assert(regind < bin_info->nregs); 551 552 return (regind); 553} 554 555JEMALLOC_INLINE prof_ctx_t * 556arena_prof_ctx_get(const void *ptr) 557{ 558 prof_ctx_t *ret; 559 arena_chunk_t *chunk; 560 size_t pageind, mapbits; 561 562 cassert(config_prof); 563 assert(ptr != NULL); 564 assert(CHUNK_ADDR2BASE(ptr) != ptr); 565 566 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 567 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; 568 mapbits = chunk->map[pageind-map_bias].bits; 569 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 570 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 571 if (prof_promote) 572 ret = (prof_ctx_t *)(uintptr_t)1U; 573 else { 574 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 575 (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) << 576 PAGE_SHIFT)); 577 size_t binind = arena_bin_index(chunk->arena, run->bin); 578 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 579 unsigned regind; 580 581 regind = arena_run_regind(run, bin_info, ptr); 582 ret = *(prof_ctx_t **)((uintptr_t)run + 583 bin_info->ctx0_offset + (regind * 584 sizeof(prof_ctx_t *))); 585 } 586 } else 587 ret = chunk->map[pageind-map_bias].prof_ctx; 588 589 return (ret); 590} 591 592JEMALLOC_INLINE void 593arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 594{ 595 arena_chunk_t *chunk; 596 size_t pageind, mapbits; 597 598 cassert(config_prof); 599 assert(ptr != NULL); 600 assert(CHUNK_ADDR2BASE(ptr) != ptr); 601 602 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 603 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; 604 mapbits = chunk->map[pageind-map_bias].bits; 605 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 606 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 607 if (prof_promote == false) { 608 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 609 (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) << 610 PAGE_SHIFT)); 611 arena_bin_t *bin = run->bin; 612 size_t binind; 613 arena_bin_info_t *bin_info; 614 unsigned regind; 615 616 binind = arena_bin_index(chunk->arena, bin); 617 bin_info = &arena_bin_info[binind]; 618 regind = arena_run_regind(run, bin_info, ptr); 619 620 *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset 621 + (regind * sizeof(prof_ctx_t *)))) = ctx; 622 } else 623 assert((uintptr_t)ctx == (uintptr_t)1U); 624 } else 625 chunk->map[pageind-map_bias].prof_ctx = ctx; 626} 627 628JEMALLOC_INLINE void * 629arena_malloc(size_t size, bool zero) 630{ 631 tcache_t *tcache; 632 633 assert(size != 0); 634 assert(QUANTUM_CEILING(size) <= arena_maxclass); 635 636 if (size <= small_maxclass) { 637 if ((tcache = tcache_get()) != NULL) 638 return (tcache_alloc_small(tcache, size, zero)); 639 else 640 return (arena_malloc_small(choose_arena(), size, zero)); 641 } else { 642 if (size <= tcache_maxclass && (tcache = tcache_get()) != NULL) 643 return (tcache_alloc_large(tcache, size, zero)); 644 else 645 return (arena_malloc_large(choose_arena(), size, zero)); 646 } 647} 648 649JEMALLOC_INLINE void 650arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) 651{ 652 size_t pageind; 653 arena_chunk_map_t *mapelm; 654 tcache_t *tcache = tcache_get(); 655 656 assert(arena != NULL); 657 assert(chunk->arena == arena); 658 assert(ptr != NULL); 659 assert(CHUNK_ADDR2BASE(ptr) != ptr); 660 661 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; 662 mapelm = &chunk->map[pageind-map_bias]; 663 assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); 664 if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { 665 /* Small allocation. */ 666 if (tcache != NULL) 667 tcache_dalloc_small(tcache, ptr); 668 else { 669 arena_run_t *run; 670 arena_bin_t *bin; 671 672 run = (arena_run_t *)((uintptr_t)chunk + 673 (uintptr_t)((pageind - (mapelm->bits >> 674 PAGE_SHIFT)) << PAGE_SHIFT)); 675 bin = run->bin; 676 if (config_debug) { 677 size_t binind = arena_bin_index(arena, bin); 678 UNUSED arena_bin_info_t *bin_info = 679 &arena_bin_info[binind]; 680 assert(((uintptr_t)ptr - ((uintptr_t)run + 681 (uintptr_t)bin_info->reg0_offset)) % 682 bin_info->reg_size == 0); 683 } 684 malloc_mutex_lock(&bin->lock); 685 arena_dalloc_bin(arena, chunk, ptr, mapelm); 686 malloc_mutex_unlock(&bin->lock); 687 } 688 } else { 689 size_t size = mapelm->bits & ~PAGE_MASK; 690 691 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 692 693 if (size <= tcache_maxclass && tcache != NULL) { 694 tcache_dalloc_large(tcache, ptr, size); 695 } else { 696 malloc_mutex_lock(&arena->lock); 697 arena_dalloc_large(arena, chunk, ptr); 698 malloc_mutex_unlock(&arena->lock); 699 } 700 } 701} 702#endif 703 704#endif /* JEMALLOC_H_INLINES */ 705/******************************************************************************/ 706