arena.h revision 01b3fe55ff3ac8e4aa689f09fcb0729da8037638
1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4/* 5 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized 6 * as small as possible such that this setting is still honored, without 7 * violating other constraints. The goal is to make runs as small as possible 8 * without exceeding a per run external fragmentation threshold. 9 * 10 * We use binary fixed point math for overhead computations, where the binary 11 * point is implicitly RUN_BFP bits to the left. 12 * 13 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be 14 * honored for some/all object sizes, since when heap profiling is enabled 15 * there is one pointer of header overhead per object (plus a constant). This 16 * constraint is relaxed (ignored) for runs that are so small that the 17 * per-region overhead is greater than: 18 * 19 * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) 20 */ 21#define RUN_BFP 12 22/* \/ Implicit binary fixed point. */ 23#define RUN_MAX_OVRHD 0x0000003dU 24#define RUN_MAX_OVRHD_RELAX 0x00001800U 25 26/* Maximum number of regions in one run. */ 27#define LG_RUN_MAXREGS 11 28#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 29 30/* 31 * The minimum ratio of active:dirty pages per arena is computed as: 32 * 33 * (nactive >> opt_lg_dirty_mult) >= ndirty 34 * 35 * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 36 * times as many active pages as dirty pages. 37 */ 38#define LG_DIRTY_MULT_DEFAULT 5 39 40typedef struct arena_chunk_map_s arena_chunk_map_t; 41typedef struct arena_chunk_s arena_chunk_t; 42typedef struct arena_run_s arena_run_t; 43typedef struct arena_bin_info_s arena_bin_info_t; 44typedef struct arena_bin_s arena_bin_t; 45typedef struct arena_s arena_t; 46 47#endif /* JEMALLOC_H_TYPES */ 48/******************************************************************************/ 49#ifdef JEMALLOC_H_STRUCTS 50 51/* Each element of the chunk map corresponds to one page within the chunk. */ 52struct arena_chunk_map_s { 53#ifndef JEMALLOC_PROF 54 /* 55 * Overlay prof_ctx in order to allow it to be referenced by dead code. 56 * Such antics aren't warranted for per arena data structures, but 57 * chunk map overhead accounts for a percentage of memory, rather than 58 * being just a fixed cost. 59 */ 60 union { 61#endif 62 union { 63 /* 64 * Linkage for run trees. There are two disjoint uses: 65 * 66 * 1) arena_t's runs_avail_{clean,dirty} trees. 67 * 2) arena_run_t conceptually uses this linkage for in-use 68 * non-full runs, rather than directly embedding linkage. 69 */ 70 rb_node(arena_chunk_map_t) rb_link; 71 /* 72 * List of runs currently in purgatory. arena_chunk_purge() 73 * temporarily allocates runs that contain dirty pages while 74 * purging, so that other threads cannot use the runs while the 75 * purging thread is operating without the arena lock held. 76 */ 77 ql_elm(arena_chunk_map_t) ql_link; 78 } u; 79 80 /* Profile counters, used for large object runs. */ 81 prof_ctx_t *prof_ctx; 82#ifndef JEMALLOC_PROF 83 }; /* union { ... }; */ 84#endif 85 86 /* 87 * Run address (or size) and various flags are stored together. The bit 88 * layout looks like (assuming 32-bit system): 89 * 90 * ???????? ???????? ????---- ----dula 91 * 92 * ? : Unallocated: Run address for first/last pages, unset for internal 93 * pages. 94 * Small: Run page offset. 95 * Large: Run size for first page, unset for trailing pages. 96 * - : Unused. 97 * d : dirty? 98 * u : unzeroed? 99 * l : large? 100 * a : allocated? 101 * 102 * Following are example bit patterns for the three types of runs. 103 * 104 * p : run page offset 105 * s : run size 106 * c : (binind+1) for size class (used only if prof_promote is true) 107 * x : don't care 108 * - : 0 109 * + : 1 110 * [DULA] : bit set 111 * [dula] : bit unset 112 * 113 * Unallocated (clean): 114 * ssssssss ssssssss ssss---- ----du-a 115 * xxxxxxxx xxxxxxxx xxxx---- -----Uxx 116 * ssssssss ssssssss ssss---- ----dU-a 117 * 118 * Unallocated (dirty): 119 * ssssssss ssssssss ssss---- ----D--a 120 * xxxxxxxx xxxxxxxx xxxx---- ----xxxx 121 * ssssssss ssssssss ssss---- ----D--a 122 * 123 * Small: 124 * pppppppp pppppppp pppp---- ----d--A 125 * pppppppp pppppppp pppp---- -------A 126 * pppppppp pppppppp pppp---- ----d--A 127 * 128 * Large: 129 * ssssssss ssssssss ssss---- ----D-LA 130 * xxxxxxxx xxxxxxxx xxxx---- ----xxxx 131 * -------- -------- -------- ----D-LA 132 * 133 * Large (sampled, size <= PAGE): 134 * ssssssss ssssssss sssscccc ccccD-LA 135 * 136 * Large (not sampled, size == PAGE): 137 * ssssssss ssssssss ssss---- ----D-LA 138 */ 139 size_t bits; 140#define CHUNK_MAP_CLASS_SHIFT 4 141#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U) 142#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) 143#define CHUNK_MAP_DIRTY ((size_t)0x8U) 144#define CHUNK_MAP_UNZEROED ((size_t)0x4U) 145#define CHUNK_MAP_LARGE ((size_t)0x2U) 146#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) 147#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED 148}; 149typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; 150typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; 151 152/* Arena chunk header. */ 153struct arena_chunk_s { 154 /* Arena that owns the chunk. */ 155 arena_t *arena; 156 157 /* Linkage for the arena's chunks_dirty list. */ 158 ql_elm(arena_chunk_t) link_dirty; 159 160 /* 161 * True if the chunk is currently in the chunks_dirty list, due to 162 * having at some point contained one or more dirty pages. Removal 163 * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. 164 */ 165 bool dirtied; 166 167 /* Number of dirty pages. */ 168 size_t ndirty; 169 170 /* 171 * Map of pages within chunk that keeps track of free/large/small. The 172 * first map_bias entries are omitted, since the chunk header does not 173 * need to be tracked in the map. This omission saves a header page 174 * for common chunk sizes (e.g. 4 MiB). 175 */ 176 arena_chunk_map_t map[1]; /* Dynamically sized. */ 177}; 178typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; 179 180struct arena_run_s { 181 /* Bin this run is associated with. */ 182 arena_bin_t *bin; 183 184 /* Index of next region that has never been allocated, or nregs. */ 185 uint32_t nextind; 186 187 /* Number of free regions in run. */ 188 unsigned nfree; 189}; 190 191/* 192 * Read-only information associated with each element of arena_t's bins array 193 * is stored separately, partly to reduce memory usage (only one copy, rather 194 * than one per arena), but mainly to avoid false cacheline sharing. 195 */ 196struct arena_bin_info_s { 197 /* Size of regions in a run for this bin's size class. */ 198 size_t reg_size; 199 200 /* Total size of a run for this bin's size class. */ 201 size_t run_size; 202 203 /* Total number of regions in a run for this bin's size class. */ 204 uint32_t nregs; 205 206 /* 207 * Offset of first bitmap_t element in a run header for this bin's size 208 * class. 209 */ 210 uint32_t bitmap_offset; 211 212 /* 213 * Metadata used to manipulate bitmaps for runs associated with this 214 * bin. 215 */ 216 bitmap_info_t bitmap_info; 217 218 /* 219 * Offset of first (prof_ctx_t *) in a run header for this bin's size 220 * class, or 0 if (config_prof == false || opt_prof == false). 221 */ 222 uint32_t ctx0_offset; 223 224 /* Offset of first region in a run for this bin's size class. */ 225 uint32_t reg0_offset; 226}; 227 228struct arena_bin_s { 229 /* 230 * All operations on runcur, runs, and stats require that lock be 231 * locked. Run allocation/deallocation are protected by the arena lock, 232 * which may be acquired while holding one or more bin locks, but not 233 * vise versa. 234 */ 235 malloc_mutex_t lock; 236 237 /* 238 * Current run being used to service allocations of this bin's size 239 * class. 240 */ 241 arena_run_t *runcur; 242 243 /* 244 * Tree of non-full runs. This tree is used when looking for an 245 * existing run when runcur is no longer usable. We choose the 246 * non-full run that is lowest in memory; this policy tends to keep 247 * objects packed well, and it can also help reduce the number of 248 * almost-empty chunks. 249 */ 250 arena_run_tree_t runs; 251 252 /* Bin statistics. */ 253 malloc_bin_stats_t stats; 254}; 255 256struct arena_s { 257 /* This arena's index within the arenas array. */ 258 unsigned ind; 259 260 /* 261 * Number of threads currently assigned to this arena. This field is 262 * protected by arenas_lock. 263 */ 264 unsigned nthreads; 265 266 /* 267 * There are three classes of arena operations from a locking 268 * perspective: 269 * 1) Thread asssignment (modifies nthreads) is protected by 270 * arenas_lock. 271 * 2) Bin-related operations are protected by bin locks. 272 * 3) Chunk- and run-related operations are protected by this mutex. 273 */ 274 malloc_mutex_t lock; 275 276 arena_stats_t stats; 277 /* 278 * List of tcaches for extant threads associated with this arena. 279 * Stats from these are merged incrementally, and at exit. 280 */ 281 ql_head(tcache_t) tcache_ql; 282 283 uint64_t prof_accumbytes; 284 285 /* List of dirty-page-containing chunks this arena manages. */ 286 ql_head(arena_chunk_t) chunks_dirty; 287 288 /* 289 * In order to avoid rapid chunk allocation/deallocation when an arena 290 * oscillates right on the cusp of needing a new chunk, cache the most 291 * recently freed chunk. The spare is left in the arena's chunk trees 292 * until it is deleted. 293 * 294 * There is one spare chunk per arena, rather than one spare total, in 295 * order to avoid interactions between multiple threads that could make 296 * a single spare inadequate. 297 */ 298 arena_chunk_t *spare; 299 300 /* Number of pages in active runs. */ 301 size_t nactive; 302 303 /* 304 * Current count of pages within unused runs that are potentially 305 * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 306 * By tracking this, we can institute a limit on how much dirty unused 307 * memory is mapped for each arena. 308 */ 309 size_t ndirty; 310 311 /* 312 * Approximate number of pages being purged. It is possible for 313 * multiple threads to purge dirty pages concurrently, and they use 314 * npurgatory to indicate the total number of pages all threads are 315 * attempting to purge. 316 */ 317 size_t npurgatory; 318 319 /* 320 * Size/address-ordered trees of this arena's available runs. The trees 321 * are used for first-best-fit run allocation. The dirty tree contains 322 * runs with dirty pages (i.e. very likely to have been touched and 323 * therefore have associated physical pages), whereas the clean tree 324 * contains runs with pages that either have no associated physical 325 * pages, or have pages that the kernel may recycle at any time due to 326 * previous madvise(2) calls. The dirty tree is used in preference to 327 * the clean tree for allocations, because using dirty pages reduces 328 * the amount of dirty purging necessary to keep the active:dirty page 329 * ratio below the purge threshold. 330 */ 331 arena_avail_tree_t runs_avail_clean; 332 arena_avail_tree_t runs_avail_dirty; 333 334 /* bins is used to store trees of free regions. */ 335 arena_bin_t bins[NBINS]; 336}; 337 338#endif /* JEMALLOC_H_STRUCTS */ 339/******************************************************************************/ 340#ifdef JEMALLOC_H_EXTERNS 341 342extern ssize_t opt_lg_dirty_mult; 343/* 344 * small_size2bin is a compact lookup table that rounds request sizes up to 345 * size classes. In order to reduce cache footprint, the table is compressed, 346 * and all accesses are via the SMALL_SIZE2BIN macro. 347 */ 348extern uint8_t const small_size2bin[]; 349#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) 350 351extern arena_bin_info_t arena_bin_info[NBINS]; 352 353/* Number of large size classes. */ 354#define nlclasses (chunk_npages - map_bias) 355 356void arena_purge_all(arena_t *arena); 357void arena_prof_accum(arena_t *arena, uint64_t accumbytes); 358void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, 359 size_t binind, uint64_t prof_accumbytes); 360void *arena_malloc_small(arena_t *arena, size_t size, bool zero); 361void *arena_malloc_large(arena_t *arena, size_t size, bool zero); 362void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size, 363 size_t alignment, bool zero); 364size_t arena_salloc(const void *ptr); 365void arena_prof_promoted(const void *ptr, size_t size); 366size_t arena_salloc_demote(const void *ptr); 367void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 368 arena_chunk_map_t *mapelm); 369void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); 370void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, 371 arena_stats_t *astats, malloc_bin_stats_t *bstats, 372 malloc_large_stats_t *lstats); 373void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 374 size_t extra, bool zero); 375void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, 376 size_t alignment, bool zero, bool try_tcache); 377bool arena_new(arena_t *arena, unsigned ind); 378void arena_boot(void); 379void arena_prefork(arena_t *arena); 380void arena_postfork_parent(arena_t *arena); 381void arena_postfork_child(arena_t *arena); 382 383#endif /* JEMALLOC_H_EXTERNS */ 384/******************************************************************************/ 385#ifdef JEMALLOC_H_INLINES 386 387#ifndef JEMALLOC_ENABLE_INLINE 388size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 389unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 390 const void *ptr); 391prof_ctx_t *arena_prof_ctx_get(const void *ptr); 392void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 393void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); 394void *arena_malloc_prechosen(arena_t *arena, size_t size, bool zero); 395void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, 396 bool try_tcache); 397#endif 398 399#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 400JEMALLOC_INLINE size_t 401arena_bin_index(arena_t *arena, arena_bin_t *bin) 402{ 403 size_t binind = bin - arena->bins; 404 assert(binind < NBINS); 405 return (binind); 406} 407 408JEMALLOC_INLINE unsigned 409arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 410{ 411 unsigned shift, diff, regind; 412 size_t size; 413 414 /* 415 * Freeing a pointer lower than region zero can cause assertion 416 * failure. 417 */ 418 assert((uintptr_t)ptr >= (uintptr_t)run + 419 (uintptr_t)bin_info->reg0_offset); 420 421 /* 422 * Avoid doing division with a variable divisor if possible. Using 423 * actual division here can reduce allocator throughput by over 20%! 424 */ 425 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - 426 bin_info->reg0_offset); 427 428 /* Rescale (factor powers of 2 out of the numerator and denominator). */ 429 size = bin_info->reg_size; 430 shift = ffs(size) - 1; 431 diff >>= shift; 432 size >>= shift; 433 434 if (size == 1) { 435 /* The divisor was a power of 2. */ 436 regind = diff; 437 } else { 438 /* 439 * To divide by a number D that is not a power of two we 440 * multiply by (2^21 / D) and then right shift by 21 positions. 441 * 442 * X / D 443 * 444 * becomes 445 * 446 * (X * size_invs[D - 3]) >> SIZE_INV_SHIFT 447 * 448 * We can omit the first three elements, because we never 449 * divide by 0, and 1 and 2 are both powers of two, which are 450 * handled above. 451 */ 452#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) 453#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) 454 static const unsigned size_invs[] = { 455 SIZE_INV(3), 456 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 457 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 458 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 459 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 460 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 461 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 462 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 463 }; 464 465 if (size <= ((sizeof(size_invs) / sizeof(unsigned)) + 2)) 466 regind = (diff * size_invs[size - 3]) >> SIZE_INV_SHIFT; 467 else 468 regind = diff / size; 469#undef SIZE_INV 470#undef SIZE_INV_SHIFT 471 } 472 assert(diff == regind * size); 473 assert(regind < bin_info->nregs); 474 475 return (regind); 476} 477 478JEMALLOC_INLINE prof_ctx_t * 479arena_prof_ctx_get(const void *ptr) 480{ 481 prof_ctx_t *ret; 482 arena_chunk_t *chunk; 483 size_t pageind, mapbits; 484 485 cassert(config_prof); 486 assert(ptr != NULL); 487 assert(CHUNK_ADDR2BASE(ptr) != ptr); 488 489 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 490 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 491 mapbits = chunk->map[pageind-map_bias].bits; 492 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 493 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 494 if (prof_promote) 495 ret = (prof_ctx_t *)(uintptr_t)1U; 496 else { 497 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 498 (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << 499 LG_PAGE)); 500 size_t binind = arena_bin_index(chunk->arena, run->bin); 501 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 502 unsigned regind; 503 504 regind = arena_run_regind(run, bin_info, ptr); 505 ret = *(prof_ctx_t **)((uintptr_t)run + 506 bin_info->ctx0_offset + (regind * 507 sizeof(prof_ctx_t *))); 508 } 509 } else 510 ret = chunk->map[pageind-map_bias].prof_ctx; 511 512 return (ret); 513} 514 515JEMALLOC_INLINE void 516arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 517{ 518 arena_chunk_t *chunk; 519 size_t pageind, mapbits; 520 521 cassert(config_prof); 522 assert(ptr != NULL); 523 assert(CHUNK_ADDR2BASE(ptr) != ptr); 524 525 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 526 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 527 mapbits = chunk->map[pageind-map_bias].bits; 528 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 529 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 530 if (prof_promote == false) { 531 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 532 (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << 533 LG_PAGE)); 534 arena_bin_t *bin = run->bin; 535 size_t binind; 536 arena_bin_info_t *bin_info; 537 unsigned regind; 538 539 binind = arena_bin_index(chunk->arena, bin); 540 bin_info = &arena_bin_info[binind]; 541 regind = arena_run_regind(run, bin_info, ptr); 542 543 *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset 544 + (regind * sizeof(prof_ctx_t *)))) = ctx; 545 } else 546 assert((uintptr_t)ctx == (uintptr_t)1U); 547 } else 548 chunk->map[pageind-map_bias].prof_ctx = ctx; 549} 550 551JEMALLOC_INLINE void * 552arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) 553{ 554 tcache_t *tcache; 555 556 assert(size != 0); 557 assert(size <= arena_maxclass); 558 559 if (size <= SMALL_MAXCLASS) { 560 if (try_tcache && (tcache = tcache_get(true)) != NULL) 561 return (tcache_alloc_small(tcache, size, zero)); 562 else { 563 return (arena_malloc_small(choose_arena(arena), size, 564 zero)); 565 } 566 } else { 567 /* 568 * Initialize tcache after checking size in order to avoid 569 * infinite recursion during tcache initialization. 570 */ 571 if (try_tcache && size <= tcache_maxclass && (tcache = 572 tcache_get(true)) != NULL) 573 return (tcache_alloc_large(tcache, size, zero)); 574 else { 575 return (arena_malloc_large(choose_arena(arena), size, 576 zero)); 577 } 578 } 579} 580 581JEMALLOC_INLINE void * 582arena_malloc_prechosen(arena_t *arena, size_t size, bool zero) 583{ 584 585 assert(size != 0); 586 assert(size <= arena_maxclass); 587 588 if (size <= SMALL_MAXCLASS) 589 return (arena_malloc_small(arena, size, zero)); 590 else 591 return (arena_malloc_large(arena, size, zero)); 592} 593 594JEMALLOC_INLINE void 595arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) 596{ 597 size_t pageind; 598 arena_chunk_map_t *mapelm; 599 tcache_t *tcache; 600 601 assert(arena != NULL); 602 assert(chunk->arena == arena); 603 assert(ptr != NULL); 604 assert(CHUNK_ADDR2BASE(ptr) != ptr); 605 606 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 607 mapelm = &chunk->map[pageind-map_bias]; 608 assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); 609 if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { 610 /* Small allocation. */ 611 if (try_tcache && (tcache = tcache_get(false)) != NULL) 612 tcache_dalloc_small(tcache, ptr); 613 else { 614 arena_run_t *run; 615 arena_bin_t *bin; 616 617 run = (arena_run_t *)((uintptr_t)chunk + 618 (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << 619 LG_PAGE)); 620 bin = run->bin; 621 if (config_debug) { 622 size_t binind = arena_bin_index(arena, bin); 623 UNUSED arena_bin_info_t *bin_info = 624 &arena_bin_info[binind]; 625 assert(((uintptr_t)ptr - ((uintptr_t)run + 626 (uintptr_t)bin_info->reg0_offset)) % 627 bin_info->reg_size == 0); 628 } 629 malloc_mutex_lock(&bin->lock); 630 arena_dalloc_bin(arena, chunk, ptr, mapelm); 631 malloc_mutex_unlock(&bin->lock); 632 } 633 } else { 634 size_t size = mapelm->bits & ~PAGE_MASK; 635 636 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 637 638 if (try_tcache && size <= tcache_maxclass && (tcache = 639 tcache_get(false)) != NULL) { 640 tcache_dalloc_large(tcache, ptr, size); 641 } else { 642 malloc_mutex_lock(&arena->lock); 643 arena_dalloc_large(arena, chunk, ptr); 644 malloc_mutex_unlock(&arena->lock); 645 } 646 } 647} 648#endif 649 650#endif /* JEMALLOC_H_INLINES */ 651/******************************************************************************/ 652