arena.h revision 122449b073bcbaa504c4f592ea2d733503c272d2
1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4/* 5 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized 6 * as small as possible such that this setting is still honored, without 7 * violating other constraints. The goal is to make runs as small as possible 8 * without exceeding a per run external fragmentation threshold. 9 * 10 * We use binary fixed point math for overhead computations, where the binary 11 * point is implicitly RUN_BFP bits to the left. 12 * 13 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be 14 * honored for some/all object sizes, since when heap profiling is enabled 15 * there is one pointer of header overhead per object (plus a constant). This 16 * constraint is relaxed (ignored) for runs that are so small that the 17 * per-region overhead is greater than: 18 * 19 * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) 20 */ 21#define RUN_BFP 12 22/* \/ Implicit binary fixed point. */ 23#define RUN_MAX_OVRHD 0x0000003dU 24#define RUN_MAX_OVRHD_RELAX 0x00001800U 25 26/* Maximum number of regions in one run. */ 27#define LG_RUN_MAXREGS 11 28#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 29 30/* 31 * Minimum redzone size. Redzones may be larger than this if necessary to 32 * preserve region alignment. 33 */ 34#define REDZONE_MINSIZE 16 35 36/* 37 * The minimum ratio of active:dirty pages per arena is computed as: 38 * 39 * (nactive >> opt_lg_dirty_mult) >= ndirty 40 * 41 * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 42 * times as many active pages as dirty pages. 43 */ 44#define LG_DIRTY_MULT_DEFAULT 5 45 46typedef struct arena_chunk_map_s arena_chunk_map_t; 47typedef struct arena_chunk_s arena_chunk_t; 48typedef struct arena_run_s arena_run_t; 49typedef struct arena_bin_info_s arena_bin_info_t; 50typedef struct arena_bin_s arena_bin_t; 51typedef struct arena_s arena_t; 52 53#endif /* JEMALLOC_H_TYPES */ 54/******************************************************************************/ 55#ifdef JEMALLOC_H_STRUCTS 56 57/* Each element of the chunk map corresponds to one page within the chunk. */ 58struct arena_chunk_map_s { 59#ifndef JEMALLOC_PROF 60 /* 61 * Overlay prof_ctx in order to allow it to be referenced by dead code. 62 * Such antics aren't warranted for per arena data structures, but 63 * chunk map overhead accounts for a percentage of memory, rather than 64 * being just a fixed cost. 65 */ 66 union { 67#endif 68 union { 69 /* 70 * Linkage for run trees. There are two disjoint uses: 71 * 72 * 1) arena_t's runs_avail_{clean,dirty} trees. 73 * 2) arena_run_t conceptually uses this linkage for in-use 74 * non-full runs, rather than directly embedding linkage. 75 */ 76 rb_node(arena_chunk_map_t) rb_link; 77 /* 78 * List of runs currently in purgatory. arena_chunk_purge() 79 * temporarily allocates runs that contain dirty pages while 80 * purging, so that other threads cannot use the runs while the 81 * purging thread is operating without the arena lock held. 82 */ 83 ql_elm(arena_chunk_map_t) ql_link; 84 } u; 85 86 /* Profile counters, used for large object runs. */ 87 prof_ctx_t *prof_ctx; 88#ifndef JEMALLOC_PROF 89 }; /* union { ... }; */ 90#endif 91 92 /* 93 * Run address (or size) and various flags are stored together. The bit 94 * layout looks like (assuming 32-bit system): 95 * 96 * ???????? ???????? ????---- ----dula 97 * 98 * ? : Unallocated: Run address for first/last pages, unset for internal 99 * pages. 100 * Small: Run page offset. 101 * Large: Run size for first page, unset for trailing pages. 102 * - : Unused. 103 * d : dirty? 104 * u : unzeroed? 105 * l : large? 106 * a : allocated? 107 * 108 * Following are example bit patterns for the three types of runs. 109 * 110 * p : run page offset 111 * s : run size 112 * c : (binind+1) for size class (used only if prof_promote is true) 113 * x : don't care 114 * - : 0 115 * + : 1 116 * [DULA] : bit set 117 * [dula] : bit unset 118 * 119 * Unallocated (clean): 120 * ssssssss ssssssss ssss---- ----du-a 121 * xxxxxxxx xxxxxxxx xxxx---- -----Uxx 122 * ssssssss ssssssss ssss---- ----dU-a 123 * 124 * Unallocated (dirty): 125 * ssssssss ssssssss ssss---- ----D--a 126 * xxxxxxxx xxxxxxxx xxxx---- ----xxxx 127 * ssssssss ssssssss ssss---- ----D--a 128 * 129 * Small: 130 * pppppppp pppppppp pppp---- ----d--A 131 * pppppppp pppppppp pppp---- -------A 132 * pppppppp pppppppp pppp---- ----d--A 133 * 134 * Large: 135 * ssssssss ssssssss ssss---- ----D-LA 136 * xxxxxxxx xxxxxxxx xxxx---- ----xxxx 137 * -------- -------- -------- ----D-LA 138 * 139 * Large (sampled, size <= PAGE): 140 * ssssssss ssssssss sssscccc ccccD-LA 141 * 142 * Large (not sampled, size == PAGE): 143 * ssssssss ssssssss ssss---- ----D-LA 144 */ 145 size_t bits; 146#define CHUNK_MAP_CLASS_SHIFT 4 147#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U) 148#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) 149#define CHUNK_MAP_DIRTY ((size_t)0x8U) 150#define CHUNK_MAP_UNZEROED ((size_t)0x4U) 151#define CHUNK_MAP_LARGE ((size_t)0x2U) 152#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) 153#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED 154}; 155typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; 156typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; 157 158/* Arena chunk header. */ 159struct arena_chunk_s { 160 /* Arena that owns the chunk. */ 161 arena_t *arena; 162 163 /* Linkage for the arena's chunks_dirty list. */ 164 ql_elm(arena_chunk_t) link_dirty; 165 166 /* 167 * True if the chunk is currently in the chunks_dirty list, due to 168 * having at some point contained one or more dirty pages. Removal 169 * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. 170 */ 171 bool dirtied; 172 173 /* Number of dirty pages. */ 174 size_t ndirty; 175 176 /* 177 * Map of pages within chunk that keeps track of free/large/small. The 178 * first map_bias entries are omitted, since the chunk header does not 179 * need to be tracked in the map. This omission saves a header page 180 * for common chunk sizes (e.g. 4 MiB). 181 */ 182 arena_chunk_map_t map[1]; /* Dynamically sized. */ 183}; 184typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; 185 186struct arena_run_s { 187 /* Bin this run is associated with. */ 188 arena_bin_t *bin; 189 190 /* Index of next region that has never been allocated, or nregs. */ 191 uint32_t nextind; 192 193 /* Number of free regions in run. */ 194 unsigned nfree; 195}; 196 197/* 198 * Read-only information associated with each element of arena_t's bins array 199 * is stored separately, partly to reduce memory usage (only one copy, rather 200 * than one per arena), but mainly to avoid false cacheline sharing. 201 * 202 * Each run has the following layout: 203 * 204 * /--------------------\ 205 * | arena_run_t header | 206 * | ... | 207 * bitmap_offset | bitmap | 208 * | ... | 209 * ctx0_offset | ctx map | 210 * | ... | 211 * |--------------------| 212 * | redzone | 213 * reg0_offset | region 0 | 214 * | redzone | 215 * |--------------------| \ 216 * | redzone | | 217 * | region 1 | > reg_interval 218 * | redzone | / 219 * |--------------------| 220 * | ... | 221 * | ... | 222 * | ... | 223 * |--------------------| 224 * | redzone | 225 * | region nregs-1 | 226 * | redzone | 227 * |--------------------| 228 * | alignment pad? | 229 * \--------------------/ 230 * 231 * reg_interval has at least the same minimum alignment as reg_size; this 232 * preserves the alignment constraint that sa2u() depends on. Alignment pad is 233 * either 0 or redzone_size; it is present only if needed to align reg0_offset. 234 */ 235struct arena_bin_info_s { 236 /* Size of regions in a run for this bin's size class. */ 237 size_t reg_size; 238 239 /* Redzone size. */ 240 size_t redzone_size; 241 242 /* Interval between regions (reg_size + (redzone_size << 1)). */ 243 size_t reg_interval; 244 245 /* Total size of a run for this bin's size class. */ 246 size_t run_size; 247 248 /* Total number of regions in a run for this bin's size class. */ 249 uint32_t nregs; 250 251 /* 252 * Offset of first bitmap_t element in a run header for this bin's size 253 * class. 254 */ 255 uint32_t bitmap_offset; 256 257 /* 258 * Metadata used to manipulate bitmaps for runs associated with this 259 * bin. 260 */ 261 bitmap_info_t bitmap_info; 262 263 /* 264 * Offset of first (prof_ctx_t *) in a run header for this bin's size 265 * class, or 0 if (config_prof == false || opt_prof == false). 266 */ 267 uint32_t ctx0_offset; 268 269 /* Offset of first region in a run for this bin's size class. */ 270 uint32_t reg0_offset; 271}; 272 273struct arena_bin_s { 274 /* 275 * All operations on runcur, runs, and stats require that lock be 276 * locked. Run allocation/deallocation are protected by the arena lock, 277 * which may be acquired while holding one or more bin locks, but not 278 * vise versa. 279 */ 280 malloc_mutex_t lock; 281 282 /* 283 * Current run being used to service allocations of this bin's size 284 * class. 285 */ 286 arena_run_t *runcur; 287 288 /* 289 * Tree of non-full runs. This tree is used when looking for an 290 * existing run when runcur is no longer usable. We choose the 291 * non-full run that is lowest in memory; this policy tends to keep 292 * objects packed well, and it can also help reduce the number of 293 * almost-empty chunks. 294 */ 295 arena_run_tree_t runs; 296 297 /* Bin statistics. */ 298 malloc_bin_stats_t stats; 299}; 300 301struct arena_s { 302 /* This arena's index within the arenas array. */ 303 unsigned ind; 304 305 /* 306 * Number of threads currently assigned to this arena. This field is 307 * protected by arenas_lock. 308 */ 309 unsigned nthreads; 310 311 /* 312 * There are three classes of arena operations from a locking 313 * perspective: 314 * 1) Thread asssignment (modifies nthreads) is protected by 315 * arenas_lock. 316 * 2) Bin-related operations are protected by bin locks. 317 * 3) Chunk- and run-related operations are protected by this mutex. 318 */ 319 malloc_mutex_t lock; 320 321 arena_stats_t stats; 322 /* 323 * List of tcaches for extant threads associated with this arena. 324 * Stats from these are merged incrementally, and at exit. 325 */ 326 ql_head(tcache_t) tcache_ql; 327 328 uint64_t prof_accumbytes; 329 330 /* List of dirty-page-containing chunks this arena manages. */ 331 ql_head(arena_chunk_t) chunks_dirty; 332 333 /* 334 * In order to avoid rapid chunk allocation/deallocation when an arena 335 * oscillates right on the cusp of needing a new chunk, cache the most 336 * recently freed chunk. The spare is left in the arena's chunk trees 337 * until it is deleted. 338 * 339 * There is one spare chunk per arena, rather than one spare total, in 340 * order to avoid interactions between multiple threads that could make 341 * a single spare inadequate. 342 */ 343 arena_chunk_t *spare; 344 345 /* Number of pages in active runs. */ 346 size_t nactive; 347 348 /* 349 * Current count of pages within unused runs that are potentially 350 * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 351 * By tracking this, we can institute a limit on how much dirty unused 352 * memory is mapped for each arena. 353 */ 354 size_t ndirty; 355 356 /* 357 * Approximate number of pages being purged. It is possible for 358 * multiple threads to purge dirty pages concurrently, and they use 359 * npurgatory to indicate the total number of pages all threads are 360 * attempting to purge. 361 */ 362 size_t npurgatory; 363 364 /* 365 * Size/address-ordered trees of this arena's available runs. The trees 366 * are used for first-best-fit run allocation. The dirty tree contains 367 * runs with dirty pages (i.e. very likely to have been touched and 368 * therefore have associated physical pages), whereas the clean tree 369 * contains runs with pages that either have no associated physical 370 * pages, or have pages that the kernel may recycle at any time due to 371 * previous madvise(2) calls. The dirty tree is used in preference to 372 * the clean tree for allocations, because using dirty pages reduces 373 * the amount of dirty purging necessary to keep the active:dirty page 374 * ratio below the purge threshold. 375 */ 376 arena_avail_tree_t runs_avail_clean; 377 arena_avail_tree_t runs_avail_dirty; 378 379 /* bins is used to store trees of free regions. */ 380 arena_bin_t bins[NBINS]; 381}; 382 383#endif /* JEMALLOC_H_STRUCTS */ 384/******************************************************************************/ 385#ifdef JEMALLOC_H_EXTERNS 386 387extern ssize_t opt_lg_dirty_mult; 388/* 389 * small_size2bin is a compact lookup table that rounds request sizes up to 390 * size classes. In order to reduce cache footprint, the table is compressed, 391 * and all accesses are via the SMALL_SIZE2BIN macro. 392 */ 393extern uint8_t const small_size2bin[]; 394#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) 395 396extern arena_bin_info_t arena_bin_info[NBINS]; 397 398/* Number of large size classes. */ 399#define nlclasses (chunk_npages - map_bias) 400 401void arena_purge_all(arena_t *arena); 402void arena_prof_accum(arena_t *arena, uint64_t accumbytes); 403void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, 404 size_t binind, uint64_t prof_accumbytes); 405void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, 406 bool zero); 407void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); 408void *arena_malloc_small(arena_t *arena, size_t size, bool zero); 409void *arena_malloc_large(arena_t *arena, size_t size, bool zero); 410void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size, 411 size_t alignment, bool zero); 412size_t arena_salloc(const void *ptr, bool demote); 413void arena_prof_promoted(const void *ptr, size_t size); 414void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 415 arena_chunk_map_t *mapelm); 416void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); 417void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, 418 arena_stats_t *astats, malloc_bin_stats_t *bstats, 419 malloc_large_stats_t *lstats); 420void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 421 size_t extra, bool zero); 422void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, 423 size_t alignment, bool zero, bool try_tcache); 424bool arena_new(arena_t *arena, unsigned ind); 425void arena_boot(void); 426void arena_prefork(arena_t *arena); 427void arena_postfork_parent(arena_t *arena); 428void arena_postfork_child(arena_t *arena); 429 430#endif /* JEMALLOC_H_EXTERNS */ 431/******************************************************************************/ 432#ifdef JEMALLOC_H_INLINES 433 434#ifndef JEMALLOC_ENABLE_INLINE 435size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 436unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 437 const void *ptr); 438prof_ctx_t *arena_prof_ctx_get(const void *ptr); 439void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 440void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); 441void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, 442 bool try_tcache); 443#endif 444 445#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 446JEMALLOC_INLINE size_t 447arena_bin_index(arena_t *arena, arena_bin_t *bin) 448{ 449 size_t binind = bin - arena->bins; 450 assert(binind < NBINS); 451 return (binind); 452} 453 454JEMALLOC_INLINE unsigned 455arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 456{ 457 unsigned shift, diff, regind; 458 size_t interval; 459 460 /* 461 * Freeing a pointer lower than region zero can cause assertion 462 * failure. 463 */ 464 assert((uintptr_t)ptr >= (uintptr_t)run + 465 (uintptr_t)bin_info->reg0_offset); 466 467 /* 468 * Avoid doing division with a variable divisor if possible. Using 469 * actual division here can reduce allocator throughput by over 20%! 470 */ 471 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - 472 bin_info->reg0_offset); 473 474 /* Rescale (factor powers of 2 out of the numerator and denominator). */ 475 interval = bin_info->reg_interval; 476 shift = ffs(interval) - 1; 477 diff >>= shift; 478 interval >>= shift; 479 480 if (interval == 1) { 481 /* The divisor was a power of 2. */ 482 regind = diff; 483 } else { 484 /* 485 * To divide by a number D that is not a power of two we 486 * multiply by (2^21 / D) and then right shift by 21 positions. 487 * 488 * X / D 489 * 490 * becomes 491 * 492 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT 493 * 494 * We can omit the first three elements, because we never 495 * divide by 0, and 1 and 2 are both powers of two, which are 496 * handled above. 497 */ 498#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) 499#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) 500 static const unsigned interval_invs[] = { 501 SIZE_INV(3), 502 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 503 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 504 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 505 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 506 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 507 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 508 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 509 }; 510 511 if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 512 2)) { 513 regind = (diff * interval_invs[interval - 3]) >> 514 SIZE_INV_SHIFT; 515 } else 516 regind = diff / interval; 517#undef SIZE_INV 518#undef SIZE_INV_SHIFT 519 } 520 assert(diff == regind * interval); 521 assert(regind < bin_info->nregs); 522 523 return (regind); 524} 525 526JEMALLOC_INLINE prof_ctx_t * 527arena_prof_ctx_get(const void *ptr) 528{ 529 prof_ctx_t *ret; 530 arena_chunk_t *chunk; 531 size_t pageind, mapbits; 532 533 cassert(config_prof); 534 assert(ptr != NULL); 535 assert(CHUNK_ADDR2BASE(ptr) != ptr); 536 537 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 538 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 539 mapbits = chunk->map[pageind-map_bias].bits; 540 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 541 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 542 if (prof_promote) 543 ret = (prof_ctx_t *)(uintptr_t)1U; 544 else { 545 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 546 (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << 547 LG_PAGE)); 548 size_t binind = arena_bin_index(chunk->arena, run->bin); 549 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 550 unsigned regind; 551 552 regind = arena_run_regind(run, bin_info, ptr); 553 ret = *(prof_ctx_t **)((uintptr_t)run + 554 bin_info->ctx0_offset + (regind * 555 sizeof(prof_ctx_t *))); 556 } 557 } else 558 ret = chunk->map[pageind-map_bias].prof_ctx; 559 560 return (ret); 561} 562 563JEMALLOC_INLINE void 564arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 565{ 566 arena_chunk_t *chunk; 567 size_t pageind, mapbits; 568 569 cassert(config_prof); 570 assert(ptr != NULL); 571 assert(CHUNK_ADDR2BASE(ptr) != ptr); 572 573 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 574 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 575 mapbits = chunk->map[pageind-map_bias].bits; 576 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 577 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 578 if (prof_promote == false) { 579 arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + 580 (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << 581 LG_PAGE)); 582 arena_bin_t *bin = run->bin; 583 size_t binind; 584 arena_bin_info_t *bin_info; 585 unsigned regind; 586 587 binind = arena_bin_index(chunk->arena, bin); 588 bin_info = &arena_bin_info[binind]; 589 regind = arena_run_regind(run, bin_info, ptr); 590 591 *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset 592 + (regind * sizeof(prof_ctx_t *)))) = ctx; 593 } else 594 assert((uintptr_t)ctx == (uintptr_t)1U); 595 } else 596 chunk->map[pageind-map_bias].prof_ctx = ctx; 597} 598 599JEMALLOC_INLINE void * 600arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) 601{ 602 tcache_t *tcache; 603 604 assert(size != 0); 605 assert(size <= arena_maxclass); 606 607 if (size <= SMALL_MAXCLASS) { 608 if (try_tcache && (tcache = tcache_get(true)) != NULL) 609 return (tcache_alloc_small(tcache, size, zero)); 610 else { 611 return (arena_malloc_small(choose_arena(arena), size, 612 zero)); 613 } 614 } else { 615 /* 616 * Initialize tcache after checking size in order to avoid 617 * infinite recursion during tcache initialization. 618 */ 619 if (try_tcache && size <= tcache_maxclass && (tcache = 620 tcache_get(true)) != NULL) 621 return (tcache_alloc_large(tcache, size, zero)); 622 else { 623 return (arena_malloc_large(choose_arena(arena), size, 624 zero)); 625 } 626 } 627} 628 629JEMALLOC_INLINE void 630arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) 631{ 632 size_t pageind; 633 arena_chunk_map_t *mapelm; 634 tcache_t *tcache; 635 636 assert(arena != NULL); 637 assert(chunk->arena == arena); 638 assert(ptr != NULL); 639 assert(CHUNK_ADDR2BASE(ptr) != ptr); 640 641 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 642 mapelm = &chunk->map[pageind-map_bias]; 643 assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); 644 if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { 645 /* Small allocation. */ 646 if (try_tcache && (tcache = tcache_get(false)) != NULL) 647 tcache_dalloc_small(tcache, ptr); 648 else { 649 arena_run_t *run; 650 arena_bin_t *bin; 651 652 run = (arena_run_t *)((uintptr_t)chunk + 653 (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << 654 LG_PAGE)); 655 bin = run->bin; 656 if (config_debug) { 657 size_t binind = arena_bin_index(arena, bin); 658 UNUSED arena_bin_info_t *bin_info = 659 &arena_bin_info[binind]; 660 assert(((uintptr_t)ptr - ((uintptr_t)run + 661 (uintptr_t)bin_info->reg0_offset)) % 662 bin_info->reg_interval == 0); 663 } 664 malloc_mutex_lock(&bin->lock); 665 arena_dalloc_bin(arena, chunk, ptr, mapelm); 666 malloc_mutex_unlock(&bin->lock); 667 } 668 } else { 669 size_t size = mapelm->bits & ~PAGE_MASK; 670 671 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 672 673 if (try_tcache && size <= tcache_maxclass && (tcache = 674 tcache_get(false)) != NULL) { 675 tcache_dalloc_large(tcache, ptr, size); 676 } else { 677 malloc_mutex_lock(&arena->lock); 678 arena_dalloc_large(arena, chunk, ptr); 679 malloc_mutex_unlock(&arena->lock); 680 } 681 } 682} 683#endif 684 685#endif /* JEMALLOC_H_INLINES */ 686/******************************************************************************/ 687