arena.h revision 59113bcc94b9fc7549611afb99ca99cad1a7f196
1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4/* 5 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized 6 * as small as possible such that this setting is still honored, without 7 * violating other constraints. The goal is to make runs as small as possible 8 * without exceeding a per run external fragmentation threshold. 9 * 10 * We use binary fixed point math for overhead computations, where the binary 11 * point is implicitly RUN_BFP bits to the left. 12 * 13 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be 14 * honored for some/all object sizes, since when heap profiling is enabled 15 * there is one pointer of header overhead per object (plus a constant). This 16 * constraint is relaxed (ignored) for runs that are so small that the 17 * per-region overhead is greater than: 18 * 19 * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) 20 */ 21#define RUN_BFP 12 22/* \/ Implicit binary fixed point. */ 23#define RUN_MAX_OVRHD 0x0000003dU 24#define RUN_MAX_OVRHD_RELAX 0x00001800U 25 26/* Maximum number of regions in one run. */ 27#define LG_RUN_MAXREGS 11 28#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 29 30/* 31 * Minimum redzone size. Redzones may be larger than this if necessary to 32 * preserve region alignment. 33 */ 34#define REDZONE_MINSIZE 16 35 36/* 37 * The minimum ratio of active:dirty pages per arena is computed as: 38 * 39 * (nactive >> opt_lg_dirty_mult) >= ndirty 40 * 41 * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times 42 * as many active pages as dirty pages. 43 */ 44#define LG_DIRTY_MULT_DEFAULT 3 45 46typedef struct arena_chunk_map_s arena_chunk_map_t; 47typedef struct arena_chunk_s arena_chunk_t; 48typedef struct arena_run_s arena_run_t; 49typedef struct arena_bin_info_s arena_bin_info_t; 50typedef struct arena_bin_s arena_bin_t; 51typedef struct arena_s arena_t; 52 53#endif /* JEMALLOC_H_TYPES */ 54/******************************************************************************/ 55#ifdef JEMALLOC_H_STRUCTS 56 57/* Each element of the chunk map corresponds to one page within the chunk. */ 58struct arena_chunk_map_s { 59#ifndef JEMALLOC_PROF 60 /* 61 * Overlay prof_ctx in order to allow it to be referenced by dead code. 62 * Such antics aren't warranted for per arena data structures, but 63 * chunk map overhead accounts for a percentage of memory, rather than 64 * being just a fixed cost. 65 */ 66 union { 67#endif 68 union { 69 /* 70 * Linkage for run trees. There are two disjoint uses: 71 * 72 * 1) arena_t's runs_avail tree. 73 * 2) arena_run_t conceptually uses this linkage for in-use 74 * non-full runs, rather than directly embedding linkage. 75 */ 76 rb_node(arena_chunk_map_t) rb_link; 77 /* 78 * List of runs currently in purgatory. arena_chunk_purge() 79 * temporarily allocates runs that contain dirty pages while 80 * purging, so that other threads cannot use the runs while the 81 * purging thread is operating without the arena lock held. 82 */ 83 ql_elm(arena_chunk_map_t) ql_link; 84 } u; 85 86 /* Profile counters, used for large object runs. */ 87 prof_ctx_t *prof_ctx; 88#ifndef JEMALLOC_PROF 89 }; /* union { ... }; */ 90#endif 91 92 /* 93 * Run address (or size) and various flags are stored together. The bit 94 * layout looks like (assuming 32-bit system): 95 * 96 * ???????? ???????? ????nnnn nnnndula 97 * 98 * ? : Unallocated: Run address for first/last pages, unset for internal 99 * pages. 100 * Small: Run page offset. 101 * Large: Run size for first page, unset for trailing pages. 102 * n : binind for small size class, BININD_INVALID for large size class. 103 * d : dirty? 104 * u : unzeroed? 105 * l : large? 106 * a : allocated? 107 * 108 * Following are example bit patterns for the three types of runs. 109 * 110 * p : run page offset 111 * s : run size 112 * n : binind for size class; large objects set these to BININD_INVALID 113 * x : don't care 114 * - : 0 115 * + : 1 116 * [DULA] : bit set 117 * [dula] : bit unset 118 * 119 * Unallocated (clean): 120 * ssssssss ssssssss ssss++++ ++++du-a 121 * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx 122 * ssssssss ssssssss ssss++++ ++++dU-a 123 * 124 * Unallocated (dirty): 125 * ssssssss ssssssss ssss++++ ++++D--a 126 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 127 * ssssssss ssssssss ssss++++ ++++D--a 128 * 129 * Small: 130 * pppppppp pppppppp ppppnnnn nnnnd--A 131 * pppppppp pppppppp ppppnnnn nnnn---A 132 * pppppppp pppppppp ppppnnnn nnnnd--A 133 * 134 * Large: 135 * ssssssss ssssssss ssss++++ ++++D-LA 136 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 137 * -------- -------- ----++++ ++++D-LA 138 * 139 * Large (sampled, size <= PAGE): 140 * ssssssss ssssssss ssssnnnn nnnnD-LA 141 * 142 * Large (not sampled, size == PAGE): 143 * ssssssss ssssssss ssss++++ ++++D-LA 144 */ 145 size_t bits; 146#define CHUNK_MAP_BININD_SHIFT 4 147#define BININD_INVALID ((size_t)0xffU) 148/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ 149#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) 150#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK 151#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) 152#define CHUNK_MAP_DIRTY ((size_t)0x8U) 153#define CHUNK_MAP_UNZEROED ((size_t)0x4U) 154#define CHUNK_MAP_LARGE ((size_t)0x2U) 155#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) 156#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED 157}; 158typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; 159typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; 160typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t; 161 162/* Arena chunk header. */ 163struct arena_chunk_s { 164 /* Arena that owns the chunk. */ 165 arena_t *arena; 166 167 /* Linkage for tree of arena chunks that contain dirty runs. */ 168 rb_node(arena_chunk_t) dirty_link; 169 170 /* Number of dirty pages. */ 171 size_t ndirty; 172 173 /* Number of available runs. */ 174 size_t nruns_avail; 175 176 /* 177 * Number of available run adjacencies that purging could coalesce. 178 * Clean and dirty available runs are not coalesced, which causes 179 * virtual memory fragmentation. The ratio of 180 * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this 181 * fragmentation. 182 */ 183 size_t nruns_adjac; 184 185 /* 186 * Map of pages within chunk that keeps track of free/large/small. The 187 * first map_bias entries are omitted, since the chunk header does not 188 * need to be tracked in the map. This omission saves a header page 189 * for common chunk sizes (e.g. 4 MiB). 190 */ 191 arena_chunk_map_t map[1]; /* Dynamically sized. */ 192}; 193typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; 194 195struct arena_run_s { 196 /* Bin this run is associated with. */ 197 arena_bin_t *bin; 198 199 /* Index of next region that has never been allocated, or nregs. */ 200 uint32_t nextind; 201 202 /* Number of free regions in run. */ 203 unsigned nfree; 204}; 205 206/* 207 * Read-only information associated with each element of arena_t's bins array 208 * is stored separately, partly to reduce memory usage (only one copy, rather 209 * than one per arena), but mainly to avoid false cacheline sharing. 210 * 211 * Each run has the following layout: 212 * 213 * /--------------------\ 214 * | arena_run_t header | 215 * | ... | 216 * bitmap_offset | bitmap | 217 * | ... | 218 * |--------------------| 219 * | redzone | 220 * reg0_offset | region 0 | 221 * | redzone | 222 * |--------------------| \ 223 * | redzone | | 224 * | region 1 | > reg_interval 225 * | redzone | / 226 * |--------------------| 227 * | ... | 228 * | ... | 229 * | ... | 230 * |--------------------| 231 * | redzone | 232 * | region nregs-1 | 233 * | redzone | 234 * |--------------------| 235 * | alignment pad? | 236 * \--------------------/ 237 * 238 * reg_interval has at least the same minimum alignment as reg_size; this 239 * preserves the alignment constraint that sa2u() depends on. Alignment pad is 240 * either 0 or redzone_size; it is present only if needed to align reg0_offset. 241 */ 242struct arena_bin_info_s { 243 /* Size of regions in a run for this bin's size class. */ 244 size_t reg_size; 245 246 /* Redzone size. */ 247 size_t redzone_size; 248 249 /* Interval between regions (reg_size + (redzone_size << 1)). */ 250 size_t reg_interval; 251 252 /* Total size of a run for this bin's size class. */ 253 size_t run_size; 254 255 /* Total number of regions in a run for this bin's size class. */ 256 uint32_t nregs; 257 258 /* 259 * Offset of first bitmap_t element in a run header for this bin's size 260 * class. 261 */ 262 uint32_t bitmap_offset; 263 264 /* 265 * Metadata used to manipulate bitmaps for runs associated with this 266 * bin. 267 */ 268 bitmap_info_t bitmap_info; 269 270 /* Offset of first region in a run for this bin's size class. */ 271 uint32_t reg0_offset; 272}; 273 274struct arena_bin_s { 275 /* 276 * All operations on runcur, runs, and stats require that lock be 277 * locked. Run allocation/deallocation are protected by the arena lock, 278 * which may be acquired while holding one or more bin locks, but not 279 * vise versa. 280 */ 281 malloc_mutex_t lock; 282 283 /* 284 * Current run being used to service allocations of this bin's size 285 * class. 286 */ 287 arena_run_t *runcur; 288 289 /* 290 * Tree of non-full runs. This tree is used when looking for an 291 * existing run when runcur is no longer usable. We choose the 292 * non-full run that is lowest in memory; this policy tends to keep 293 * objects packed well, and it can also help reduce the number of 294 * almost-empty chunks. 295 */ 296 arena_run_tree_t runs; 297 298 /* Bin statistics. */ 299 malloc_bin_stats_t stats; 300}; 301 302struct arena_s { 303 /* This arena's index within the arenas array. */ 304 unsigned ind; 305 306 /* 307 * Number of threads currently assigned to this arena. This field is 308 * protected by arenas_lock. 309 */ 310 unsigned nthreads; 311 312 /* 313 * There are three classes of arena operations from a locking 314 * perspective: 315 * 1) Thread asssignment (modifies nthreads) is protected by 316 * arenas_lock. 317 * 2) Bin-related operations are protected by bin locks. 318 * 3) Chunk- and run-related operations are protected by this mutex. 319 */ 320 malloc_mutex_t lock; 321 322 arena_stats_t stats; 323 /* 324 * List of tcaches for extant threads associated with this arena. 325 * Stats from these are merged incrementally, and at exit. 326 */ 327 ql_head(tcache_t) tcache_ql; 328 329 uint64_t prof_accumbytes; 330 331 dss_prec_t dss_prec; 332 333 /* Tree of dirty-page-containing chunks this arena manages. */ 334 arena_chunk_tree_t chunks_dirty; 335 336 /* 337 * In order to avoid rapid chunk allocation/deallocation when an arena 338 * oscillates right on the cusp of needing a new chunk, cache the most 339 * recently freed chunk. The spare is left in the arena's chunk trees 340 * until it is deleted. 341 * 342 * There is one spare chunk per arena, rather than one spare total, in 343 * order to avoid interactions between multiple threads that could make 344 * a single spare inadequate. 345 */ 346 arena_chunk_t *spare; 347 348 /* Number of pages in active runs. */ 349 size_t nactive; 350 351 /* 352 * Current count of pages within unused runs that are potentially 353 * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 354 * By tracking this, we can institute a limit on how much dirty unused 355 * memory is mapped for each arena. 356 */ 357 size_t ndirty; 358 359 /* 360 * Approximate number of pages being purged. It is possible for 361 * multiple threads to purge dirty pages concurrently, and they use 362 * npurgatory to indicate the total number of pages all threads are 363 * attempting to purge. 364 */ 365 size_t npurgatory; 366 367 /* 368 * Size/address-ordered trees of this arena's available runs. The trees 369 * are used for first-best-fit run allocation. 370 */ 371 arena_avail_tree_t runs_avail; 372 373 /* 374 * user-configureable chunk allocation and deallocation functions. 375 */ 376 chunk_alloc_t *chunk_alloc; 377 chunk_dealloc_t *chunk_dealloc; 378 379 /* bins is used to store trees of free regions. */ 380 arena_bin_t bins[NBINS]; 381}; 382 383#endif /* JEMALLOC_H_STRUCTS */ 384/******************************************************************************/ 385#ifdef JEMALLOC_H_EXTERNS 386 387extern ssize_t opt_lg_dirty_mult; 388/* 389 * small_size2bin_tab is a compact lookup table that rounds request sizes up to 390 * size classes. In order to reduce cache footprint, the table is compressed, 391 * and all accesses are via small_size2bin(). 392 */ 393extern uint8_t const small_size2bin_tab[]; 394/* 395 * small_bin2size_tab duplicates information in arena_bin_info, but in a const 396 * array, for which it is easier for the compiler to optimize repeated 397 * dereferences. 398 */ 399extern uint32_t const small_bin2size_tab[NBINS]; 400 401extern arena_bin_info_t arena_bin_info[NBINS]; 402 403/* Number of large size classes. */ 404#define nlclasses (chunk_npages - map_bias) 405 406void arena_purge_all(arena_t *arena); 407void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, 408 size_t binind, uint64_t prof_accumbytes); 409void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, 410 bool zero); 411#ifdef JEMALLOC_JET 412typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, 413 uint8_t); 414extern arena_redzone_corruption_t *arena_redzone_corruption; 415typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); 416extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; 417#else 418void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); 419#endif 420void arena_quarantine_junk_small(void *ptr, size_t usize); 421void *arena_malloc_small(arena_t *arena, size_t size, bool zero); 422void *arena_malloc_large(arena_t *arena, size_t size, bool zero); 423void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); 424void arena_prof_promoted(const void *ptr, size_t size); 425void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 426 arena_chunk_map_t *mapelm); 427void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 428 size_t pageind, arena_chunk_map_t *mapelm); 429void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 430 size_t pageind); 431#ifdef JEMALLOC_JET 432typedef void (arena_dalloc_junk_large_t)(void *, size_t); 433extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; 434#endif 435void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, 436 void *ptr); 437void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); 438#ifdef JEMALLOC_JET 439typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); 440extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; 441#endif 442bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 443 size_t extra, bool zero); 444void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, 445 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, 446 bool try_tcache_dalloc); 447dss_prec_t arena_dss_prec_get(arena_t *arena); 448bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); 449void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 450 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 451 malloc_large_stats_t *lstats); 452bool arena_new(arena_t *arena, unsigned ind); 453void arena_boot(void); 454void arena_prefork(arena_t *arena); 455void arena_postfork_parent(arena_t *arena); 456void arena_postfork_child(arena_t *arena); 457 458#endif /* JEMALLOC_H_EXTERNS */ 459/******************************************************************************/ 460#ifdef JEMALLOC_H_INLINES 461 462#ifndef JEMALLOC_ENABLE_INLINE 463size_t small_size2bin(size_t size); 464size_t small_bin2size(size_t binind); 465arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); 466size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); 467size_t arena_mapbitsp_read(size_t *mapbitsp); 468size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); 469size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, 470 size_t pageind); 471size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); 472size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); 473size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); 474size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); 475size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); 476size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); 477size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); 478void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); 479void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, 480 size_t size, size_t flags); 481void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 482 size_t size); 483void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, 484 size_t size, size_t flags); 485void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 486 size_t binind); 487void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, 488 size_t runind, size_t binind, size_t flags); 489void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 490 size_t unzeroed); 491bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); 492bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); 493bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); 494size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); 495size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 496unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 497 const void *ptr); 498prof_ctx_t *arena_prof_ctx_get(const void *ptr); 499void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 500void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); 501size_t arena_salloc(const void *ptr, bool demote); 502void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache); 503#endif 504 505#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 506# ifdef JEMALLOC_ARENA_INLINE_A 507JEMALLOC_ALWAYS_INLINE size_t 508small_size2bin(size_t size) 509{ 510 511 return ((size_t)(small_size2bin_tab[(size-1) >> LG_TINY_MIN])); 512} 513 514JEMALLOC_ALWAYS_INLINE size_t 515small_bin2size(size_t binind) 516{ 517 518 return ((size_t)(small_bin2size_tab[binind])); 519} 520# endif /* JEMALLOC_ARENA_INLINE_A */ 521 522# ifdef JEMALLOC_ARENA_INLINE_B 523JEMALLOC_ALWAYS_INLINE arena_chunk_map_t * 524arena_mapp_get(arena_chunk_t *chunk, size_t pageind) 525{ 526 527 assert(pageind >= map_bias); 528 assert(pageind < chunk_npages); 529 530 return (&chunk->map[pageind-map_bias]); 531} 532 533JEMALLOC_ALWAYS_INLINE size_t * 534arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) 535{ 536 537 return (&arena_mapp_get(chunk, pageind)->bits); 538} 539 540JEMALLOC_ALWAYS_INLINE size_t 541arena_mapbitsp_read(size_t *mapbitsp) 542{ 543 544 return (*mapbitsp); 545} 546 547JEMALLOC_ALWAYS_INLINE size_t 548arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) 549{ 550 551 return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); 552} 553 554JEMALLOC_ALWAYS_INLINE size_t 555arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) 556{ 557 size_t mapbits; 558 559 mapbits = arena_mapbits_get(chunk, pageind); 560 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 561 return (mapbits & ~PAGE_MASK); 562} 563 564JEMALLOC_ALWAYS_INLINE size_t 565arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) 566{ 567 size_t mapbits; 568 569 mapbits = arena_mapbits_get(chunk, pageind); 570 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 571 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); 572 return (mapbits & ~PAGE_MASK); 573} 574 575JEMALLOC_ALWAYS_INLINE size_t 576arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) 577{ 578 size_t mapbits; 579 580 mapbits = arena_mapbits_get(chunk, pageind); 581 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 582 CHUNK_MAP_ALLOCATED); 583 return (mapbits >> LG_PAGE); 584} 585 586JEMALLOC_ALWAYS_INLINE size_t 587arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) 588{ 589 size_t mapbits; 590 size_t binind; 591 592 mapbits = arena_mapbits_get(chunk, pageind); 593 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 594 assert(binind < NBINS || binind == BININD_INVALID); 595 return (binind); 596} 597 598JEMALLOC_ALWAYS_INLINE size_t 599arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) 600{ 601 size_t mapbits; 602 603 mapbits = arena_mapbits_get(chunk, pageind); 604 return (mapbits & CHUNK_MAP_DIRTY); 605} 606 607JEMALLOC_ALWAYS_INLINE size_t 608arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) 609{ 610 size_t mapbits; 611 612 mapbits = arena_mapbits_get(chunk, pageind); 613 return (mapbits & CHUNK_MAP_UNZEROED); 614} 615 616JEMALLOC_ALWAYS_INLINE size_t 617arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) 618{ 619 size_t mapbits; 620 621 mapbits = arena_mapbits_get(chunk, pageind); 622 return (mapbits & CHUNK_MAP_LARGE); 623} 624 625JEMALLOC_ALWAYS_INLINE size_t 626arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) 627{ 628 size_t mapbits; 629 630 mapbits = arena_mapbits_get(chunk, pageind); 631 return (mapbits & CHUNK_MAP_ALLOCATED); 632} 633 634JEMALLOC_ALWAYS_INLINE void 635arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) 636{ 637 638 *mapbitsp = mapbits; 639} 640 641JEMALLOC_ALWAYS_INLINE void 642arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, 643 size_t flags) 644{ 645 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 646 647 assert((size & PAGE_MASK) == 0); 648 assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); 649 assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); 650 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); 651} 652 653JEMALLOC_ALWAYS_INLINE void 654arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 655 size_t size) 656{ 657 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 658 size_t mapbits = arena_mapbitsp_read(mapbitsp); 659 660 assert((size & PAGE_MASK) == 0); 661 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 662 arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); 663} 664 665JEMALLOC_ALWAYS_INLINE void 666arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, 667 size_t flags) 668{ 669 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 670 size_t mapbits = arena_mapbitsp_read(mapbitsp); 671 size_t unzeroed; 672 673 assert((size & PAGE_MASK) == 0); 674 assert((flags & CHUNK_MAP_DIRTY) == flags); 675 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 676 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags 677 | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); 678} 679 680JEMALLOC_ALWAYS_INLINE void 681arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 682 size_t binind) 683{ 684 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 685 size_t mapbits = arena_mapbitsp_read(mapbitsp); 686 687 assert(binind <= BININD_INVALID); 688 assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); 689 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | 690 (binind << CHUNK_MAP_BININD_SHIFT)); 691} 692 693JEMALLOC_ALWAYS_INLINE void 694arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, 695 size_t binind, size_t flags) 696{ 697 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 698 size_t mapbits = arena_mapbitsp_read(mapbitsp); 699 size_t unzeroed; 700 701 assert(binind < BININD_INVALID); 702 assert(pageind - runind >= map_bias); 703 assert((flags & CHUNK_MAP_DIRTY) == flags); 704 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 705 arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << 706 CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); 707} 708 709JEMALLOC_ALWAYS_INLINE void 710arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 711 size_t unzeroed) 712{ 713 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 714 size_t mapbits = arena_mapbitsp_read(mapbitsp); 715 716 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | 717 unzeroed); 718} 719 720JEMALLOC_INLINE bool 721arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) 722{ 723 724 cassert(config_prof); 725 assert(prof_interval != 0); 726 727 arena->prof_accumbytes += accumbytes; 728 if (arena->prof_accumbytes >= prof_interval) { 729 arena->prof_accumbytes -= prof_interval; 730 return (true); 731 } 732 return (false); 733} 734 735JEMALLOC_INLINE bool 736arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) 737{ 738 739 cassert(config_prof); 740 741 if (prof_interval == 0) 742 return (false); 743 return (arena_prof_accum_impl(arena, accumbytes)); 744} 745 746JEMALLOC_INLINE bool 747arena_prof_accum(arena_t *arena, uint64_t accumbytes) 748{ 749 750 cassert(config_prof); 751 752 if (prof_interval == 0) 753 return (false); 754 755 { 756 bool ret; 757 758 malloc_mutex_lock(&arena->lock); 759 ret = arena_prof_accum_impl(arena, accumbytes); 760 malloc_mutex_unlock(&arena->lock); 761 return (ret); 762 } 763} 764 765JEMALLOC_ALWAYS_INLINE size_t 766arena_ptr_small_binind_get(const void *ptr, size_t mapbits) 767{ 768 size_t binind; 769 770 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 771 772 if (config_debug) { 773 arena_chunk_t *chunk; 774 arena_t *arena; 775 size_t pageind; 776 size_t actual_mapbits; 777 arena_run_t *run; 778 arena_bin_t *bin; 779 size_t actual_binind; 780 arena_bin_info_t *bin_info; 781 782 assert(binind != BININD_INVALID); 783 assert(binind < NBINS); 784 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 785 arena = chunk->arena; 786 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 787 actual_mapbits = arena_mapbits_get(chunk, pageind); 788 assert(mapbits == actual_mapbits); 789 assert(arena_mapbits_large_get(chunk, pageind) == 0); 790 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 791 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 792 (actual_mapbits >> LG_PAGE)) << LG_PAGE)); 793 bin = run->bin; 794 actual_binind = bin - arena->bins; 795 assert(binind == actual_binind); 796 bin_info = &arena_bin_info[actual_binind]; 797 assert(((uintptr_t)ptr - ((uintptr_t)run + 798 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval 799 == 0); 800 } 801 802 return (binind); 803} 804# endif /* JEMALLOC_ARENA_INLINE_B */ 805 806# ifdef JEMALLOC_ARENA_INLINE_C 807JEMALLOC_INLINE size_t 808arena_bin_index(arena_t *arena, arena_bin_t *bin) 809{ 810 size_t binind = bin - arena->bins; 811 assert(binind < NBINS); 812 return (binind); 813} 814 815JEMALLOC_INLINE unsigned 816arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 817{ 818 unsigned shift, diff, regind; 819 size_t interval; 820 821 /* 822 * Freeing a pointer lower than region zero can cause assertion 823 * failure. 824 */ 825 assert((uintptr_t)ptr >= (uintptr_t)run + 826 (uintptr_t)bin_info->reg0_offset); 827 828 /* 829 * Avoid doing division with a variable divisor if possible. Using 830 * actual division here can reduce allocator throughput by over 20%! 831 */ 832 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - 833 bin_info->reg0_offset); 834 835 /* Rescale (factor powers of 2 out of the numerator and denominator). */ 836 interval = bin_info->reg_interval; 837 shift = ffs(interval) - 1; 838 diff >>= shift; 839 interval >>= shift; 840 841 if (interval == 1) { 842 /* The divisor was a power of 2. */ 843 regind = diff; 844 } else { 845 /* 846 * To divide by a number D that is not a power of two we 847 * multiply by (2^21 / D) and then right shift by 21 positions. 848 * 849 * X / D 850 * 851 * becomes 852 * 853 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT 854 * 855 * We can omit the first three elements, because we never 856 * divide by 0, and 1 and 2 are both powers of two, which are 857 * handled above. 858 */ 859#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) 860#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) 861 static const unsigned interval_invs[] = { 862 SIZE_INV(3), 863 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 864 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 865 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 866 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 867 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 868 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 869 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 870 }; 871 872 if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 873 2)) { 874 regind = (diff * interval_invs[interval - 3]) >> 875 SIZE_INV_SHIFT; 876 } else 877 regind = diff / interval; 878#undef SIZE_INV 879#undef SIZE_INV_SHIFT 880 } 881 assert(diff == regind * interval); 882 assert(regind < bin_info->nregs); 883 884 return (regind); 885} 886 887JEMALLOC_INLINE prof_ctx_t * 888arena_prof_ctx_get(const void *ptr) 889{ 890 prof_ctx_t *ret; 891 arena_chunk_t *chunk; 892 size_t pageind, mapbits; 893 894 cassert(config_prof); 895 assert(ptr != NULL); 896 assert(CHUNK_ADDR2BASE(ptr) != ptr); 897 898 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 899 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 900 mapbits = arena_mapbits_get(chunk, pageind); 901 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 902 if ((mapbits & CHUNK_MAP_LARGE) == 0) 903 ret = (prof_ctx_t *)(uintptr_t)1U; 904 else 905 ret = arena_mapp_get(chunk, pageind)->prof_ctx; 906 907 return (ret); 908} 909 910JEMALLOC_INLINE void 911arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 912{ 913 arena_chunk_t *chunk; 914 size_t pageind; 915 916 cassert(config_prof); 917 assert(ptr != NULL); 918 assert(CHUNK_ADDR2BASE(ptr) != ptr); 919 920 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 921 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 922 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 923 924 if (arena_mapbits_large_get(chunk, pageind) != 0) 925 arena_mapp_get(chunk, pageind)->prof_ctx = ctx; 926} 927 928JEMALLOC_ALWAYS_INLINE void * 929arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) 930{ 931 tcache_t *tcache; 932 933 assert(size != 0); 934 assert(size <= arena_maxclass); 935 936 if (size <= SMALL_MAXCLASS) { 937 if (try_tcache && (tcache = tcache_get(true)) != NULL) 938 return (tcache_alloc_small(tcache, size, zero)); 939 else { 940 return (arena_malloc_small(choose_arena(arena), size, 941 zero)); 942 } 943 } else { 944 /* 945 * Initialize tcache after checking size in order to avoid 946 * infinite recursion during tcache initialization. 947 */ 948 if (try_tcache && size <= tcache_maxclass && (tcache = 949 tcache_get(true)) != NULL) 950 return (tcache_alloc_large(tcache, size, zero)); 951 else { 952 return (arena_malloc_large(choose_arena(arena), size, 953 zero)); 954 } 955 } 956} 957 958/* Return the size of the allocation pointed to by ptr. */ 959JEMALLOC_ALWAYS_INLINE size_t 960arena_salloc(const void *ptr, bool demote) 961{ 962 size_t ret; 963 arena_chunk_t *chunk; 964 size_t pageind, binind; 965 966 assert(ptr != NULL); 967 assert(CHUNK_ADDR2BASE(ptr) != ptr); 968 969 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 970 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 971 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 972 binind = arena_mapbits_binind_get(chunk, pageind); 973 if (binind == BININD_INVALID || (config_prof && demote == false && 974 arena_mapbits_large_get(chunk, pageind) != 0)) { 975 /* 976 * Large allocation. In the common case (demote == true), and 977 * as this is an inline function, most callers will only end up 978 * looking at binind to determine that ptr is a small 979 * allocation. 980 */ 981 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 982 ret = arena_mapbits_large_size_get(chunk, pageind); 983 assert(ret != 0); 984 assert(pageind + (ret>>LG_PAGE) <= chunk_npages); 985 assert(ret == PAGE || arena_mapbits_large_size_get(chunk, 986 pageind+(ret>>LG_PAGE)-1) == 0); 987 assert(binind == arena_mapbits_binind_get(chunk, 988 pageind+(ret>>LG_PAGE)-1)); 989 assert(arena_mapbits_dirty_get(chunk, pageind) == 990 arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); 991 } else { 992 /* Small allocation (possibly promoted to a large object). */ 993 assert(arena_mapbits_large_get(chunk, pageind) != 0 || 994 arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 995 pageind)) == binind); 996 ret = small_bin2size(binind); 997 } 998 999 return (ret); 1000} 1001 1002JEMALLOC_ALWAYS_INLINE void 1003arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache) 1004{ 1005 size_t pageind, mapbits; 1006 tcache_t *tcache; 1007 1008 assert(ptr != NULL); 1009 assert(CHUNK_ADDR2BASE(ptr) != ptr); 1010 1011 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1012 mapbits = arena_mapbits_get(chunk, pageind); 1013 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 1014 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 1015 /* Small allocation. */ 1016 if (try_tcache && (tcache = tcache_get(false)) != NULL) { 1017 size_t binind; 1018 1019 binind = arena_ptr_small_binind_get(ptr, mapbits); 1020 tcache_dalloc_small(tcache, ptr, binind); 1021 } else 1022 arena_dalloc_small(chunk->arena, chunk, ptr, pageind); 1023 } else { 1024 size_t size = arena_mapbits_large_size_get(chunk, pageind); 1025 1026 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 1027 1028 if (try_tcache && size <= tcache_maxclass && (tcache = 1029 tcache_get(false)) != NULL) { 1030 tcache_dalloc_large(tcache, ptr, size); 1031 } else 1032 arena_dalloc_large(chunk->arena, chunk, ptr); 1033 } 1034} 1035# endif /* JEMALLOC_ARENA_INLINE_C */ 1036#endif 1037 1038#endif /* JEMALLOC_H_INLINES */ 1039/******************************************************************************/ 1040