arena.h revision 3541a904d6fb949f3f0aea05418ccce7cbd4b705
1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4/* 5 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized 6 * as small as possible such that this setting is still honored, without 7 * violating other constraints. The goal is to make runs as small as possible 8 * without exceeding a per run external fragmentation threshold. 9 * 10 * We use binary fixed point math for overhead computations, where the binary 11 * point is implicitly RUN_BFP bits to the left. 12 * 13 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be 14 * honored for some/all object sizes, since when heap profiling is enabled 15 * there is one pointer of header overhead per object (plus a constant). This 16 * constraint is relaxed (ignored) for runs that are so small that the 17 * per-region overhead is greater than: 18 * 19 * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) 20 */ 21#define RUN_BFP 12 22/* \/ Implicit binary fixed point. */ 23#define RUN_MAX_OVRHD 0x0000003dU 24#define RUN_MAX_OVRHD_RELAX 0x00001800U 25 26/* Maximum number of regions in one run. */ 27#define LG_RUN_MAXREGS 11 28#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 29 30/* 31 * Minimum redzone size. Redzones may be larger than this if necessary to 32 * preserve region alignment. 33 */ 34#define REDZONE_MINSIZE 16 35 36/* 37 * The minimum ratio of active:dirty pages per arena is computed as: 38 * 39 * (nactive >> opt_lg_dirty_mult) >= ndirty 40 * 41 * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times 42 * as many active pages as dirty pages. 43 */ 44#define LG_DIRTY_MULT_DEFAULT 3 45 46typedef struct arena_chunk_map_s arena_chunk_map_t; 47typedef struct arena_chunk_s arena_chunk_t; 48typedef struct arena_run_s arena_run_t; 49typedef struct arena_bin_info_s arena_bin_info_t; 50typedef struct arena_bin_s arena_bin_t; 51typedef struct arena_s arena_t; 52 53#endif /* JEMALLOC_H_TYPES */ 54/******************************************************************************/ 55#ifdef JEMALLOC_H_STRUCTS 56 57/* Each element of the chunk map corresponds to one page within the chunk. */ 58struct arena_chunk_map_s { 59#ifndef JEMALLOC_PROF 60 /* 61 * Overlay prof_ctx in order to allow it to be referenced by dead code. 62 * Such antics aren't warranted for per arena data structures, but 63 * chunk map overhead accounts for a percentage of memory, rather than 64 * being just a fixed cost. 65 */ 66 union { 67#endif 68 union { 69 /* 70 * Linkage for run trees. There are two disjoint uses: 71 * 72 * 1) arena_t's runs_avail tree. 73 * 2) arena_run_t conceptually uses this linkage for in-use 74 * non-full runs, rather than directly embedding linkage. 75 */ 76 rb_node(arena_chunk_map_t) rb_link; 77 /* 78 * List of runs currently in purgatory. arena_chunk_purge() 79 * temporarily allocates runs that contain dirty pages while 80 * purging, so that other threads cannot use the runs while the 81 * purging thread is operating without the arena lock held. 82 */ 83 ql_elm(arena_chunk_map_t) ql_link; 84 } u; 85 86 /* Profile counters, used for large object runs. */ 87 prof_ctx_t *prof_ctx; 88#ifndef JEMALLOC_PROF 89 }; /* union { ... }; */ 90#endif 91 92 /* 93 * Run address (or size) and various flags are stored together. The bit 94 * layout looks like (assuming 32-bit system): 95 * 96 * ???????? ???????? ????nnnn nnnndula 97 * 98 * ? : Unallocated: Run address for first/last pages, unset for internal 99 * pages. 100 * Small: Run page offset. 101 * Large: Run size for first page, unset for trailing pages. 102 * n : binind for small size class, BININD_INVALID for large size class. 103 * d : dirty? 104 * u : unzeroed? 105 * l : large? 106 * a : allocated? 107 * 108 * Following are example bit patterns for the three types of runs. 109 * 110 * p : run page offset 111 * s : run size 112 * n : binind for size class; large objects set these to BININD_INVALID 113 * x : don't care 114 * - : 0 115 * + : 1 116 * [DULA] : bit set 117 * [dula] : bit unset 118 * 119 * Unallocated (clean): 120 * ssssssss ssssssss ssss++++ ++++du-a 121 * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx 122 * ssssssss ssssssss ssss++++ ++++dU-a 123 * 124 * Unallocated (dirty): 125 * ssssssss ssssssss ssss++++ ++++D--a 126 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 127 * ssssssss ssssssss ssss++++ ++++D--a 128 * 129 * Small: 130 * pppppppp pppppppp ppppnnnn nnnnd--A 131 * pppppppp pppppppp ppppnnnn nnnn---A 132 * pppppppp pppppppp ppppnnnn nnnnd--A 133 * 134 * Large: 135 * ssssssss ssssssss ssss++++ ++++D-LA 136 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 137 * -------- -------- ----++++ ++++D-LA 138 * 139 * Large (sampled, size <= PAGE): 140 * ssssssss ssssssss ssssnnnn nnnnD-LA 141 * 142 * Large (not sampled, size == PAGE): 143 * ssssssss ssssssss ssss++++ ++++D-LA 144 */ 145 size_t bits; 146#define CHUNK_MAP_BININD_SHIFT 4 147#define BININD_INVALID ((size_t)0xffU) 148/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ 149#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) 150#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK 151#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) 152#define CHUNK_MAP_DIRTY ((size_t)0x8U) 153#define CHUNK_MAP_UNZEROED ((size_t)0x4U) 154#define CHUNK_MAP_LARGE ((size_t)0x2U) 155#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) 156#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED 157}; 158typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; 159typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; 160typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t; 161 162/* Arena chunk header. */ 163struct arena_chunk_s { 164 /* Arena that owns the chunk. */ 165 arena_t *arena; 166 167 /* Linkage for tree of arena chunks that contain dirty runs. */ 168 rb_node(arena_chunk_t) dirty_link; 169 170 /* Number of dirty pages. */ 171 size_t ndirty; 172 173 /* Number of available runs. */ 174 size_t nruns_avail; 175 176 /* 177 * Number of available run adjacencies that purging could coalesce. 178 * Clean and dirty available runs are not coalesced, which causes 179 * virtual memory fragmentation. The ratio of 180 * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this 181 * fragmentation. 182 */ 183 size_t nruns_adjac; 184 185 /* 186 * Map of pages within chunk that keeps track of free/large/small. The 187 * first map_bias entries are omitted, since the chunk header does not 188 * need to be tracked in the map. This omission saves a header page 189 * for common chunk sizes (e.g. 4 MiB). 190 */ 191 arena_chunk_map_t map[1]; /* Dynamically sized. */ 192}; 193typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; 194 195struct arena_run_s { 196 /* Bin this run is associated with. */ 197 arena_bin_t *bin; 198 199 /* Index of next region that has never been allocated, or nregs. */ 200 uint32_t nextind; 201 202 /* Number of free regions in run. */ 203 unsigned nfree; 204}; 205 206/* 207 * Read-only information associated with each element of arena_t's bins array 208 * is stored separately, partly to reduce memory usage (only one copy, rather 209 * than one per arena), but mainly to avoid false cacheline sharing. 210 * 211 * Each run has the following layout: 212 * 213 * /--------------------\ 214 * | arena_run_t header | 215 * | ... | 216 * bitmap_offset | bitmap | 217 * | ... | 218 * |--------------------| 219 * | redzone | 220 * reg0_offset | region 0 | 221 * | redzone | 222 * |--------------------| \ 223 * | redzone | | 224 * | region 1 | > reg_interval 225 * | redzone | / 226 * |--------------------| 227 * | ... | 228 * | ... | 229 * | ... | 230 * |--------------------| 231 * | redzone | 232 * | region nregs-1 | 233 * | redzone | 234 * |--------------------| 235 * | alignment pad? | 236 * \--------------------/ 237 * 238 * reg_interval has at least the same minimum alignment as reg_size; this 239 * preserves the alignment constraint that sa2u() depends on. Alignment pad is 240 * either 0 or redzone_size; it is present only if needed to align reg0_offset. 241 */ 242struct arena_bin_info_s { 243 /* Size of regions in a run for this bin's size class. */ 244 size_t reg_size; 245 246 /* Redzone size. */ 247 size_t redzone_size; 248 249 /* Interval between regions (reg_size + (redzone_size << 1)). */ 250 size_t reg_interval; 251 252 /* Total size of a run for this bin's size class. */ 253 size_t run_size; 254 255 /* Total number of regions in a run for this bin's size class. */ 256 uint32_t nregs; 257 258 /* 259 * Offset of first bitmap_t element in a run header for this bin's size 260 * class. 261 */ 262 uint32_t bitmap_offset; 263 264 /* 265 * Metadata used to manipulate bitmaps for runs associated with this 266 * bin. 267 */ 268 bitmap_info_t bitmap_info; 269 270 /* Offset of first region in a run for this bin's size class. */ 271 uint32_t reg0_offset; 272}; 273 274struct arena_bin_s { 275 /* 276 * All operations on runcur, runs, and stats require that lock be 277 * locked. Run allocation/deallocation are protected by the arena lock, 278 * which may be acquired while holding one or more bin locks, but not 279 * vise versa. 280 */ 281 malloc_mutex_t lock; 282 283 /* 284 * Current run being used to service allocations of this bin's size 285 * class. 286 */ 287 arena_run_t *runcur; 288 289 /* 290 * Tree of non-full runs. This tree is used when looking for an 291 * existing run when runcur is no longer usable. We choose the 292 * non-full run that is lowest in memory; this policy tends to keep 293 * objects packed well, and it can also help reduce the number of 294 * almost-empty chunks. 295 */ 296 arena_run_tree_t runs; 297 298 /* Bin statistics. */ 299 malloc_bin_stats_t stats; 300}; 301 302struct arena_s { 303 /* This arena's index within the arenas array. */ 304 unsigned ind; 305 306 /* 307 * Number of threads currently assigned to this arena. This field is 308 * protected by arenas_lock. 309 */ 310 unsigned nthreads; 311 312 /* 313 * There are three classes of arena operations from a locking 314 * perspective: 315 * 1) Thread asssignment (modifies nthreads) is protected by 316 * arenas_lock. 317 * 2) Bin-related operations are protected by bin locks. 318 * 3) Chunk- and run-related operations are protected by this mutex. 319 */ 320 malloc_mutex_t lock; 321 322 arena_stats_t stats; 323 /* 324 * List of tcaches for extant threads associated with this arena. 325 * Stats from these are merged incrementally, and at exit. 326 */ 327 ql_head(tcache_t) tcache_ql; 328 329 uint64_t prof_accumbytes; 330 331 dss_prec_t dss_prec; 332 333 /* Tree of dirty-page-containing chunks this arena manages. */ 334 arena_chunk_tree_t chunks_dirty; 335 336 /* 337 * In order to avoid rapid chunk allocation/deallocation when an arena 338 * oscillates right on the cusp of needing a new chunk, cache the most 339 * recently freed chunk. The spare is left in the arena's chunk trees 340 * until it is deleted. 341 * 342 * There is one spare chunk per arena, rather than one spare total, in 343 * order to avoid interactions between multiple threads that could make 344 * a single spare inadequate. 345 */ 346 arena_chunk_t *spare; 347 348 /* Number of pages in active runs. */ 349 size_t nactive; 350 351 /* 352 * Current count of pages within unused runs that are potentially 353 * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 354 * By tracking this, we can institute a limit on how much dirty unused 355 * memory is mapped for each arena. 356 */ 357 size_t ndirty; 358 359 /* 360 * Approximate number of pages being purged. It is possible for 361 * multiple threads to purge dirty pages concurrently, and they use 362 * npurgatory to indicate the total number of pages all threads are 363 * attempting to purge. 364 */ 365 size_t npurgatory; 366 367 /* 368 * Size/address-ordered trees of this arena's available runs. The trees 369 * are used for first-best-fit run allocation. 370 */ 371 arena_avail_tree_t runs_avail; 372 373 /* bins is used to store trees of free regions. */ 374 arena_bin_t bins[NBINS]; 375}; 376 377#endif /* JEMALLOC_H_STRUCTS */ 378/******************************************************************************/ 379#ifdef JEMALLOC_H_EXTERNS 380 381extern ssize_t opt_lg_dirty_mult; 382/* 383 * small_size2bin_tab is a compact lookup table that rounds request sizes up to 384 * size classes. In order to reduce cache footprint, the table is compressed, 385 * and all accesses are via small_size2bin(). 386 */ 387extern uint8_t const small_size2bin_tab[]; 388/* 389 * small_bin2size_tab duplicates information in arena_bin_info, but in a const 390 * array, for which it is easier for the compiler to optimize repeated 391 * dereferences. 392 */ 393extern uint32_t const small_bin2size_tab[NBINS]; 394 395extern arena_bin_info_t arena_bin_info[NBINS]; 396 397/* Number of large size classes. */ 398#define nlclasses (chunk_npages - map_bias) 399 400void arena_purge_all(arena_t *arena); 401void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, 402 size_t binind, uint64_t prof_accumbytes); 403void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, 404 bool zero); 405#ifdef JEMALLOC_JET 406typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, 407 uint8_t); 408extern arena_redzone_corruption_t *arena_redzone_corruption; 409typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); 410extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; 411#else 412void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); 413#endif 414void arena_quarantine_junk_small(void *ptr, size_t usize); 415void *arena_malloc_small(arena_t *arena, size_t size, bool zero); 416void *arena_malloc_large(arena_t *arena, size_t size, bool zero); 417void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); 418void arena_prof_promoted(const void *ptr, size_t size); 419void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 420 arena_chunk_map_t *mapelm); 421void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 422 size_t pageind, arena_chunk_map_t *mapelm); 423void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 424 size_t pageind); 425#ifdef JEMALLOC_JET 426typedef void (arena_dalloc_junk_large_t)(void *, size_t); 427extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; 428#endif 429void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, 430 void *ptr); 431void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); 432#ifdef JEMALLOC_JET 433typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); 434extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; 435#endif 436bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 437 size_t extra, bool zero); 438void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, 439 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, 440 bool try_tcache_dalloc); 441dss_prec_t arena_dss_prec_get(arena_t *arena); 442bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); 443void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 444 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 445 malloc_large_stats_t *lstats); 446bool arena_new(arena_t *arena, unsigned ind); 447void arena_boot(void); 448void arena_prefork(arena_t *arena); 449void arena_postfork_parent(arena_t *arena); 450void arena_postfork_child(arena_t *arena); 451 452#endif /* JEMALLOC_H_EXTERNS */ 453/******************************************************************************/ 454#ifdef JEMALLOC_H_INLINES 455 456#ifndef JEMALLOC_ENABLE_INLINE 457size_t small_size2bin(size_t size); 458size_t small_bin2size(size_t binind); 459arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); 460size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); 461size_t arena_mapbitsp_read(size_t *mapbitsp); 462size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); 463size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, 464 size_t pageind); 465size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); 466size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); 467size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); 468size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); 469size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); 470size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); 471size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); 472void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); 473void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, 474 size_t size, size_t flags); 475void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 476 size_t size); 477void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, 478 size_t size, size_t flags); 479void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 480 size_t binind); 481void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, 482 size_t runind, size_t binind, size_t flags); 483void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 484 size_t unzeroed); 485bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); 486bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); 487bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); 488size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); 489size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 490unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 491 const void *ptr); 492prof_ctx_t *arena_prof_ctx_get(const void *ptr); 493void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 494void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); 495size_t arena_salloc(const void *ptr, bool demote); 496void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache); 497#endif 498 499#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 500# ifdef JEMALLOC_ARENA_INLINE_A 501JEMALLOC_ALWAYS_INLINE size_t 502small_size2bin(size_t size) 503{ 504 505 return ((size_t)(small_size2bin_tab[(size-1) >> LG_TINY_MIN])); 506} 507 508JEMALLOC_ALWAYS_INLINE size_t 509small_bin2size(size_t binind) 510{ 511 512 return ((size_t)(small_bin2size_tab[binind])); 513} 514# endif /* JEMALLOC_ARENA_INLINE_A */ 515 516# ifdef JEMALLOC_ARENA_INLINE_B 517JEMALLOC_ALWAYS_INLINE arena_chunk_map_t * 518arena_mapp_get(arena_chunk_t *chunk, size_t pageind) 519{ 520 521 assert(pageind >= map_bias); 522 assert(pageind < chunk_npages); 523 524 return (&chunk->map[pageind-map_bias]); 525} 526 527JEMALLOC_ALWAYS_INLINE size_t * 528arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) 529{ 530 531 return (&arena_mapp_get(chunk, pageind)->bits); 532} 533 534JEMALLOC_ALWAYS_INLINE size_t 535arena_mapbitsp_read(size_t *mapbitsp) 536{ 537 538 return (*mapbitsp); 539} 540 541JEMALLOC_ALWAYS_INLINE size_t 542arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) 543{ 544 545 return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); 546} 547 548JEMALLOC_ALWAYS_INLINE size_t 549arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) 550{ 551 size_t mapbits; 552 553 mapbits = arena_mapbits_get(chunk, pageind); 554 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 555 return (mapbits & ~PAGE_MASK); 556} 557 558JEMALLOC_ALWAYS_INLINE size_t 559arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) 560{ 561 size_t mapbits; 562 563 mapbits = arena_mapbits_get(chunk, pageind); 564 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 565 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); 566 return (mapbits & ~PAGE_MASK); 567} 568 569JEMALLOC_ALWAYS_INLINE size_t 570arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) 571{ 572 size_t mapbits; 573 574 mapbits = arena_mapbits_get(chunk, pageind); 575 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 576 CHUNK_MAP_ALLOCATED); 577 return (mapbits >> LG_PAGE); 578} 579 580JEMALLOC_ALWAYS_INLINE size_t 581arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) 582{ 583 size_t mapbits; 584 size_t binind; 585 586 mapbits = arena_mapbits_get(chunk, pageind); 587 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 588 assert(binind < NBINS || binind == BININD_INVALID); 589 return (binind); 590} 591 592JEMALLOC_ALWAYS_INLINE size_t 593arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) 594{ 595 size_t mapbits; 596 597 mapbits = arena_mapbits_get(chunk, pageind); 598 return (mapbits & CHUNK_MAP_DIRTY); 599} 600 601JEMALLOC_ALWAYS_INLINE size_t 602arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) 603{ 604 size_t mapbits; 605 606 mapbits = arena_mapbits_get(chunk, pageind); 607 return (mapbits & CHUNK_MAP_UNZEROED); 608} 609 610JEMALLOC_ALWAYS_INLINE size_t 611arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) 612{ 613 size_t mapbits; 614 615 mapbits = arena_mapbits_get(chunk, pageind); 616 return (mapbits & CHUNK_MAP_LARGE); 617} 618 619JEMALLOC_ALWAYS_INLINE size_t 620arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) 621{ 622 size_t mapbits; 623 624 mapbits = arena_mapbits_get(chunk, pageind); 625 return (mapbits & CHUNK_MAP_ALLOCATED); 626} 627 628JEMALLOC_ALWAYS_INLINE void 629arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) 630{ 631 632 *mapbitsp = mapbits; 633} 634 635JEMALLOC_ALWAYS_INLINE void 636arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, 637 size_t flags) 638{ 639 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 640 641 assert((size & PAGE_MASK) == 0); 642 assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); 643 assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); 644 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); 645} 646 647JEMALLOC_ALWAYS_INLINE void 648arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 649 size_t size) 650{ 651 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 652 size_t mapbits = arena_mapbitsp_read(mapbitsp); 653 654 assert((size & PAGE_MASK) == 0); 655 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 656 arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); 657} 658 659JEMALLOC_ALWAYS_INLINE void 660arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, 661 size_t flags) 662{ 663 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 664 size_t mapbits = arena_mapbitsp_read(mapbitsp); 665 size_t unzeroed; 666 667 assert((size & PAGE_MASK) == 0); 668 assert((flags & CHUNK_MAP_DIRTY) == flags); 669 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 670 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags 671 | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); 672} 673 674JEMALLOC_ALWAYS_INLINE void 675arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 676 size_t binind) 677{ 678 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 679 size_t mapbits = arena_mapbitsp_read(mapbitsp); 680 681 assert(binind <= BININD_INVALID); 682 assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); 683 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | 684 (binind << CHUNK_MAP_BININD_SHIFT)); 685} 686 687JEMALLOC_ALWAYS_INLINE void 688arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, 689 size_t binind, size_t flags) 690{ 691 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 692 size_t mapbits = arena_mapbitsp_read(mapbitsp); 693 size_t unzeroed; 694 695 assert(binind < BININD_INVALID); 696 assert(pageind - runind >= map_bias); 697 assert((flags & CHUNK_MAP_DIRTY) == flags); 698 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 699 arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << 700 CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); 701} 702 703JEMALLOC_ALWAYS_INLINE void 704arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 705 size_t unzeroed) 706{ 707 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 708 size_t mapbits = arena_mapbitsp_read(mapbitsp); 709 710 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | 711 unzeroed); 712} 713 714JEMALLOC_INLINE bool 715arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) 716{ 717 718 cassert(config_prof); 719 assert(prof_interval != 0); 720 721 arena->prof_accumbytes += accumbytes; 722 if (arena->prof_accumbytes >= prof_interval) { 723 arena->prof_accumbytes -= prof_interval; 724 return (true); 725 } 726 return (false); 727} 728 729JEMALLOC_INLINE bool 730arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) 731{ 732 733 cassert(config_prof); 734 735 if (prof_interval == 0) 736 return (false); 737 return (arena_prof_accum_impl(arena, accumbytes)); 738} 739 740JEMALLOC_INLINE bool 741arena_prof_accum(arena_t *arena, uint64_t accumbytes) 742{ 743 744 cassert(config_prof); 745 746 if (prof_interval == 0) 747 return (false); 748 749 { 750 bool ret; 751 752 malloc_mutex_lock(&arena->lock); 753 ret = arena_prof_accum_impl(arena, accumbytes); 754 malloc_mutex_unlock(&arena->lock); 755 return (ret); 756 } 757} 758 759JEMALLOC_ALWAYS_INLINE size_t 760arena_ptr_small_binind_get(const void *ptr, size_t mapbits) 761{ 762 size_t binind; 763 764 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 765 766 if (config_debug) { 767 arena_chunk_t *chunk; 768 arena_t *arena; 769 size_t pageind; 770 size_t actual_mapbits; 771 arena_run_t *run; 772 arena_bin_t *bin; 773 size_t actual_binind; 774 arena_bin_info_t *bin_info; 775 776 assert(binind != BININD_INVALID); 777 assert(binind < NBINS); 778 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 779 arena = chunk->arena; 780 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 781 actual_mapbits = arena_mapbits_get(chunk, pageind); 782 assert(mapbits == actual_mapbits); 783 assert(arena_mapbits_large_get(chunk, pageind) == 0); 784 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 785 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 786 (actual_mapbits >> LG_PAGE)) << LG_PAGE)); 787 bin = run->bin; 788 actual_binind = bin - arena->bins; 789 assert(binind == actual_binind); 790 bin_info = &arena_bin_info[actual_binind]; 791 assert(((uintptr_t)ptr - ((uintptr_t)run + 792 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval 793 == 0); 794 } 795 796 return (binind); 797} 798# endif /* JEMALLOC_ARENA_INLINE_B */ 799 800# ifdef JEMALLOC_ARENA_INLINE_C 801JEMALLOC_INLINE size_t 802arena_bin_index(arena_t *arena, arena_bin_t *bin) 803{ 804 size_t binind = bin - arena->bins; 805 assert(binind < NBINS); 806 return (binind); 807} 808 809JEMALLOC_INLINE unsigned 810arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 811{ 812 unsigned shift, diff, regind; 813 size_t interval; 814 815 /* 816 * Freeing a pointer lower than region zero can cause assertion 817 * failure. 818 */ 819 assert((uintptr_t)ptr >= (uintptr_t)run + 820 (uintptr_t)bin_info->reg0_offset); 821 822 /* 823 * Avoid doing division with a variable divisor if possible. Using 824 * actual division here can reduce allocator throughput by over 20%! 825 */ 826 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - 827 bin_info->reg0_offset); 828 829 /* Rescale (factor powers of 2 out of the numerator and denominator). */ 830 interval = bin_info->reg_interval; 831 shift = ffs(interval) - 1; 832 diff >>= shift; 833 interval >>= shift; 834 835 if (interval == 1) { 836 /* The divisor was a power of 2. */ 837 regind = diff; 838 } else { 839 /* 840 * To divide by a number D that is not a power of two we 841 * multiply by (2^21 / D) and then right shift by 21 positions. 842 * 843 * X / D 844 * 845 * becomes 846 * 847 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT 848 * 849 * We can omit the first three elements, because we never 850 * divide by 0, and 1 and 2 are both powers of two, which are 851 * handled above. 852 */ 853#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) 854#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) 855 static const unsigned interval_invs[] = { 856 SIZE_INV(3), 857 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 858 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 859 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 860 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 861 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 862 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 863 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 864 }; 865 866 if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 867 2)) { 868 regind = (diff * interval_invs[interval - 3]) >> 869 SIZE_INV_SHIFT; 870 } else 871 regind = diff / interval; 872#undef SIZE_INV 873#undef SIZE_INV_SHIFT 874 } 875 assert(diff == regind * interval); 876 assert(regind < bin_info->nregs); 877 878 return (regind); 879} 880 881JEMALLOC_INLINE prof_ctx_t * 882arena_prof_ctx_get(const void *ptr) 883{ 884 prof_ctx_t *ret; 885 arena_chunk_t *chunk; 886 size_t pageind, mapbits; 887 888 cassert(config_prof); 889 assert(ptr != NULL); 890 assert(CHUNK_ADDR2BASE(ptr) != ptr); 891 892 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 893 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 894 mapbits = arena_mapbits_get(chunk, pageind); 895 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 896 if ((mapbits & CHUNK_MAP_LARGE) == 0) 897 ret = (prof_ctx_t *)(uintptr_t)1U; 898 else 899 ret = arena_mapp_get(chunk, pageind)->prof_ctx; 900 901 return (ret); 902} 903 904JEMALLOC_INLINE void 905arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 906{ 907 arena_chunk_t *chunk; 908 size_t pageind; 909 910 cassert(config_prof); 911 assert(ptr != NULL); 912 assert(CHUNK_ADDR2BASE(ptr) != ptr); 913 914 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 915 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 916 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 917 918 if (arena_mapbits_large_get(chunk, pageind) != 0) 919 arena_mapp_get(chunk, pageind)->prof_ctx = ctx; 920} 921 922JEMALLOC_ALWAYS_INLINE void * 923arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) 924{ 925 tcache_t *tcache; 926 927 assert(size != 0); 928 assert(size <= arena_maxclass); 929 930 if (size <= SMALL_MAXCLASS) { 931 if (try_tcache && (tcache = tcache_get(true)) != NULL) 932 return (tcache_alloc_small(tcache, size, zero)); 933 else { 934 return (arena_malloc_small(choose_arena(arena), size, 935 zero)); 936 } 937 } else { 938 /* 939 * Initialize tcache after checking size in order to avoid 940 * infinite recursion during tcache initialization. 941 */ 942 if (try_tcache && size <= tcache_maxclass && (tcache = 943 tcache_get(true)) != NULL) 944 return (tcache_alloc_large(tcache, size, zero)); 945 else { 946 return (arena_malloc_large(choose_arena(arena), size, 947 zero)); 948 } 949 } 950} 951 952/* Return the size of the allocation pointed to by ptr. */ 953JEMALLOC_ALWAYS_INLINE size_t 954arena_salloc(const void *ptr, bool demote) 955{ 956 size_t ret; 957 arena_chunk_t *chunk; 958 size_t pageind, binind; 959 960 assert(ptr != NULL); 961 assert(CHUNK_ADDR2BASE(ptr) != ptr); 962 963 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 964 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 965 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 966 binind = arena_mapbits_binind_get(chunk, pageind); 967 if (binind == BININD_INVALID || (config_prof && demote == false && 968 arena_mapbits_large_get(chunk, pageind) != 0)) { 969 /* 970 * Large allocation. In the common case (demote == true), and 971 * as this is an inline function, most callers will only end up 972 * looking at binind to determine that ptr is a small 973 * allocation. 974 */ 975 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 976 ret = arena_mapbits_large_size_get(chunk, pageind); 977 assert(ret != 0); 978 assert(pageind + (ret>>LG_PAGE) <= chunk_npages); 979 assert(ret == PAGE || arena_mapbits_large_size_get(chunk, 980 pageind+(ret>>LG_PAGE)-1) == 0); 981 assert(binind == arena_mapbits_binind_get(chunk, 982 pageind+(ret>>LG_PAGE)-1)); 983 assert(arena_mapbits_dirty_get(chunk, pageind) == 984 arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); 985 } else { 986 /* Small allocation (possibly promoted to a large object). */ 987 assert(arena_mapbits_large_get(chunk, pageind) != 0 || 988 arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 989 pageind)) == binind); 990 ret = small_bin2size(binind); 991 } 992 993 return (ret); 994} 995 996JEMALLOC_ALWAYS_INLINE void 997arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache) 998{ 999 size_t pageind, mapbits; 1000 tcache_t *tcache; 1001 1002 assert(ptr != NULL); 1003 assert(CHUNK_ADDR2BASE(ptr) != ptr); 1004 1005 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1006 mapbits = arena_mapbits_get(chunk, pageind); 1007 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 1008 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 1009 /* Small allocation. */ 1010 if (try_tcache && (tcache = tcache_get(false)) != NULL) { 1011 size_t binind; 1012 1013 binind = arena_ptr_small_binind_get(ptr, mapbits); 1014 tcache_dalloc_small(tcache, ptr, binind); 1015 } else 1016 arena_dalloc_small(chunk->arena, chunk, ptr, pageind); 1017 } else { 1018 size_t size = arena_mapbits_large_size_get(chunk, pageind); 1019 1020 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 1021 1022 if (try_tcache && size <= tcache_maxclass && (tcache = 1023 tcache_get(false)) != NULL) { 1024 tcache_dalloc_large(tcache, ptr, size); 1025 } else 1026 arena_dalloc_large(chunk->arena, chunk, ptr); 1027 } 1028} 1029# endif /* JEMALLOC_ARENA_INLINE_C */ 1030#endif 1031 1032#endif /* JEMALLOC_H_INLINES */ 1033/******************************************************************************/ 1034