arena.h revision 9b0cbf0850b130a9b0a8c58bd10b2926b2083510
1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4/* 5 * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized 6 * as small as possible such that this setting is still honored, without 7 * violating other constraints. The goal is to make runs as small as possible 8 * without exceeding a per run external fragmentation threshold. 9 * 10 * We use binary fixed point math for overhead computations, where the binary 11 * point is implicitly RUN_BFP bits to the left. 12 * 13 * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be 14 * honored for some/all object sizes, since when heap profiling is enabled 15 * there is one pointer of header overhead per object (plus a constant). This 16 * constraint is relaxed (ignored) for runs that are so small that the 17 * per-region overhead is greater than: 18 * 19 * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) 20 */ 21#define RUN_BFP 12 22/* \/ Implicit binary fixed point. */ 23#define RUN_MAX_OVRHD 0x0000003dU 24#define RUN_MAX_OVRHD_RELAX 0x00001800U 25 26/* Maximum number of regions in one run. */ 27#define LG_RUN_MAXREGS 11 28#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) 29 30/* 31 * Minimum redzone size. Redzones may be larger than this if necessary to 32 * preserve region alignment. 33 */ 34#define REDZONE_MINSIZE 16 35 36/* 37 * The minimum ratio of active:dirty pages per arena is computed as: 38 * 39 * (nactive >> opt_lg_dirty_mult) >= ndirty 40 * 41 * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times 42 * as many active pages as dirty pages. 43 */ 44#define LG_DIRTY_MULT_DEFAULT 3 45 46typedef struct arena_chunk_map_s arena_chunk_map_t; 47typedef struct arena_chunk_s arena_chunk_t; 48typedef struct arena_run_s arena_run_t; 49typedef struct arena_bin_info_s arena_bin_info_t; 50typedef struct arena_bin_s arena_bin_t; 51typedef struct arena_s arena_t; 52 53#endif /* JEMALLOC_H_TYPES */ 54/******************************************************************************/ 55#ifdef JEMALLOC_H_STRUCTS 56 57/* Each element of the chunk map corresponds to one page within the chunk. */ 58struct arena_chunk_map_s { 59#ifndef JEMALLOC_PROF 60 /* 61 * Overlay prof_ctx in order to allow it to be referenced by dead code. 62 * Such antics aren't warranted for per arena data structures, but 63 * chunk map overhead accounts for a percentage of memory, rather than 64 * being just a fixed cost. 65 */ 66 union { 67#endif 68 union { 69 /* 70 * Linkage for run trees. There are two disjoint uses: 71 * 72 * 1) arena_t's runs_avail tree. 73 * 2) arena_run_t conceptually uses this linkage for in-use 74 * non-full runs, rather than directly embedding linkage. 75 */ 76 rb_node(arena_chunk_map_t) rb_link; 77 /* 78 * List of runs currently in purgatory. arena_chunk_purge() 79 * temporarily allocates runs that contain dirty pages while 80 * purging, so that other threads cannot use the runs while the 81 * purging thread is operating without the arena lock held. 82 */ 83 ql_elm(arena_chunk_map_t) ql_link; 84 } u; 85 86 /* Profile counters, used for large object runs. */ 87 prof_ctx_t *prof_ctx; 88#ifndef JEMALLOC_PROF 89 }; /* union { ... }; */ 90#endif 91 92 /* 93 * Run address (or size) and various flags are stored together. The bit 94 * layout looks like (assuming 32-bit system): 95 * 96 * ???????? ???????? ????nnnn nnnndula 97 * 98 * ? : Unallocated: Run address for first/last pages, unset for internal 99 * pages. 100 * Small: Run page offset. 101 * Large: Run size for first page, unset for trailing pages. 102 * n : binind for small size class, BININD_INVALID for large size class. 103 * d : dirty? 104 * u : unzeroed? 105 * l : large? 106 * a : allocated? 107 * 108 * Following are example bit patterns for the three types of runs. 109 * 110 * p : run page offset 111 * s : run size 112 * n : binind for size class; large objects set these to BININD_INVALID 113 * x : don't care 114 * - : 0 115 * + : 1 116 * [DULA] : bit set 117 * [dula] : bit unset 118 * 119 * Unallocated (clean): 120 * ssssssss ssssssss ssss++++ ++++du-a 121 * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx 122 * ssssssss ssssssss ssss++++ ++++dU-a 123 * 124 * Unallocated (dirty): 125 * ssssssss ssssssss ssss++++ ++++D--a 126 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 127 * ssssssss ssssssss ssss++++ ++++D--a 128 * 129 * Small: 130 * pppppppp pppppppp ppppnnnn nnnnd--A 131 * pppppppp pppppppp ppppnnnn nnnn---A 132 * pppppppp pppppppp ppppnnnn nnnnd--A 133 * 134 * Large: 135 * ssssssss ssssssss ssss++++ ++++D-LA 136 * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx 137 * -------- -------- ----++++ ++++D-LA 138 * 139 * Large (sampled, size <= PAGE): 140 * ssssssss ssssssss ssssnnnn nnnnD-LA 141 * 142 * Large (not sampled, size == PAGE): 143 * ssssssss ssssssss ssss++++ ++++D-LA 144 */ 145 size_t bits; 146#define CHUNK_MAP_BININD_SHIFT 4 147#define BININD_INVALID ((size_t)0xffU) 148/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ 149#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) 150#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK 151#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) 152#define CHUNK_MAP_DIRTY ((size_t)0x8U) 153#define CHUNK_MAP_UNZEROED ((size_t)0x4U) 154#define CHUNK_MAP_LARGE ((size_t)0x2U) 155#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) 156#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED 157}; 158typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; 159typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; 160typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t; 161 162/* Arena chunk header. */ 163struct arena_chunk_s { 164 /* Arena that owns the chunk. */ 165 arena_t *arena; 166 167 /* Linkage for tree of arena chunks that contain dirty runs. */ 168 rb_node(arena_chunk_t) dirty_link; 169 170 /* Number of dirty pages. */ 171 size_t ndirty; 172 173 /* Number of available runs. */ 174 size_t nruns_avail; 175 176 /* 177 * Number of available run adjacencies that purging could coalesce. 178 * Clean and dirty available runs are not coalesced, which causes 179 * virtual memory fragmentation. The ratio of 180 * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this 181 * fragmentation. 182 */ 183 size_t nruns_adjac; 184 185 /* 186 * Map of pages within chunk that keeps track of free/large/small. The 187 * first map_bias entries are omitted, since the chunk header does not 188 * need to be tracked in the map. This omission saves a header page 189 * for common chunk sizes (e.g. 4 MiB). 190 */ 191 arena_chunk_map_t map[1]; /* Dynamically sized. */ 192}; 193typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; 194 195struct arena_run_s { 196 /* Bin this run is associated with. */ 197 arena_bin_t *bin; 198 199 /* Index of next region that has never been allocated, or nregs. */ 200 uint32_t nextind; 201 202 /* Number of free regions in run. */ 203 unsigned nfree; 204}; 205 206/* 207 * Read-only information associated with each element of arena_t's bins array 208 * is stored separately, partly to reduce memory usage (only one copy, rather 209 * than one per arena), but mainly to avoid false cacheline sharing. 210 * 211 * Each run has the following layout: 212 * 213 * /--------------------\ 214 * | arena_run_t header | 215 * | ... | 216 * bitmap_offset | bitmap | 217 * | ... | 218 * |--------------------| 219 * | redzone | 220 * reg0_offset | region 0 | 221 * | redzone | 222 * |--------------------| \ 223 * | redzone | | 224 * | region 1 | > reg_interval 225 * | redzone | / 226 * |--------------------| 227 * | ... | 228 * | ... | 229 * | ... | 230 * |--------------------| 231 * | redzone | 232 * | region nregs-1 | 233 * | redzone | 234 * |--------------------| 235 * | alignment pad? | 236 * \--------------------/ 237 * 238 * reg_interval has at least the same minimum alignment as reg_size; this 239 * preserves the alignment constraint that sa2u() depends on. Alignment pad is 240 * either 0 or redzone_size; it is present only if needed to align reg0_offset. 241 */ 242struct arena_bin_info_s { 243 /* Size of regions in a run for this bin's size class. */ 244 size_t reg_size; 245 246 /* Redzone size. */ 247 size_t redzone_size; 248 249 /* Interval between regions (reg_size + (redzone_size << 1)). */ 250 size_t reg_interval; 251 252 /* Total size of a run for this bin's size class. */ 253 size_t run_size; 254 255 /* Total number of regions in a run for this bin's size class. */ 256 uint32_t nregs; 257 258 /* 259 * Offset of first bitmap_t element in a run header for this bin's size 260 * class. 261 */ 262 uint32_t bitmap_offset; 263 264 /* 265 * Metadata used to manipulate bitmaps for runs associated with this 266 * bin. 267 */ 268 bitmap_info_t bitmap_info; 269 270 /* Offset of first region in a run for this bin's size class. */ 271 uint32_t reg0_offset; 272}; 273 274struct arena_bin_s { 275 /* 276 * All operations on runcur, runs, and stats require that lock be 277 * locked. Run allocation/deallocation are protected by the arena lock, 278 * which may be acquired while holding one or more bin locks, but not 279 * vise versa. 280 */ 281 malloc_mutex_t lock; 282 283 /* 284 * Current run being used to service allocations of this bin's size 285 * class. 286 */ 287 arena_run_t *runcur; 288 289 /* 290 * Tree of non-full runs. This tree is used when looking for an 291 * existing run when runcur is no longer usable. We choose the 292 * non-full run that is lowest in memory; this policy tends to keep 293 * objects packed well, and it can also help reduce the number of 294 * almost-empty chunks. 295 */ 296 arena_run_tree_t runs; 297 298 /* Bin statistics. */ 299 malloc_bin_stats_t stats; 300}; 301 302struct arena_s { 303 /* This arena's index within the arenas array. */ 304 unsigned ind; 305 306 /* 307 * Number of threads currently assigned to this arena. This field is 308 * protected by arenas_lock. 309 */ 310 unsigned nthreads; 311 312 /* 313 * There are three classes of arena operations from a locking 314 * perspective: 315 * 1) Thread asssignment (modifies nthreads) is protected by 316 * arenas_lock. 317 * 2) Bin-related operations are protected by bin locks. 318 * 3) Chunk- and run-related operations are protected by this mutex. 319 */ 320 malloc_mutex_t lock; 321 322 arena_stats_t stats; 323 /* 324 * List of tcaches for extant threads associated with this arena. 325 * Stats from these are merged incrementally, and at exit. 326 */ 327 ql_head(tcache_t) tcache_ql; 328 329 uint64_t prof_accumbytes; 330 331 dss_prec_t dss_prec; 332 333 /* Tree of dirty-page-containing chunks this arena manages. */ 334 arena_chunk_tree_t chunks_dirty; 335 336 /* 337 * In order to avoid rapid chunk allocation/deallocation when an arena 338 * oscillates right on the cusp of needing a new chunk, cache the most 339 * recently freed chunk. The spare is left in the arena's chunk trees 340 * until it is deleted. 341 * 342 * There is one spare chunk per arena, rather than one spare total, in 343 * order to avoid interactions between multiple threads that could make 344 * a single spare inadequate. 345 */ 346 arena_chunk_t *spare; 347 348 /* Number of pages in active runs. */ 349 size_t nactive; 350 351 /* 352 * Current count of pages within unused runs that are potentially 353 * dirty, and for which madvise(... MADV_DONTNEED) has not been called. 354 * By tracking this, we can institute a limit on how much dirty unused 355 * memory is mapped for each arena. 356 */ 357 size_t ndirty; 358 359 /* 360 * Approximate number of pages being purged. It is possible for 361 * multiple threads to purge dirty pages concurrently, and they use 362 * npurgatory to indicate the total number of pages all threads are 363 * attempting to purge. 364 */ 365 size_t npurgatory; 366 367 /* 368 * Size/address-ordered trees of this arena's available runs. The trees 369 * are used for first-best-fit run allocation. 370 */ 371 arena_avail_tree_t runs_avail; 372 373 /* bins is used to store trees of free regions. */ 374 arena_bin_t bins[NBINS]; 375}; 376 377#endif /* JEMALLOC_H_STRUCTS */ 378/******************************************************************************/ 379#ifdef JEMALLOC_H_EXTERNS 380 381extern ssize_t opt_lg_dirty_mult; 382/* 383 * small_size2bin is a compact lookup table that rounds request sizes up to 384 * size classes. In order to reduce cache footprint, the table is compressed, 385 * and all accesses are via the SMALL_SIZE2BIN macro. 386 */ 387extern uint8_t const small_size2bin[]; 388#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) 389 390extern arena_bin_info_t arena_bin_info[NBINS]; 391 392/* Number of large size classes. */ 393#define nlclasses (chunk_npages - map_bias) 394 395void arena_purge_all(arena_t *arena); 396void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, 397 size_t binind, uint64_t prof_accumbytes); 398void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, 399 bool zero); 400#ifdef JEMALLOC_JET 401typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, 402 uint8_t); 403extern arena_redzone_corruption_t *arena_redzone_corruption; 404typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); 405extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; 406#else 407void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); 408#endif 409void arena_quarantine_junk_small(void *ptr, size_t usize); 410void *arena_malloc_small(arena_t *arena, size_t size, bool zero); 411void *arena_malloc_large(arena_t *arena, size_t size, bool zero); 412void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); 413void arena_prof_promoted(const void *ptr, size_t size); 414void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 415 arena_chunk_map_t *mapelm); 416void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 417 size_t pageind, arena_chunk_map_t *mapelm); 418void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 419 size_t pageind); 420#ifdef JEMALLOC_JET 421typedef void (arena_dalloc_junk_large_t)(void *, size_t); 422extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; 423#endif 424void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, 425 void *ptr); 426void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); 427#ifdef JEMALLOC_JET 428typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); 429extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; 430#endif 431bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, 432 size_t extra, bool zero); 433void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, 434 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, 435 bool try_tcache_dalloc); 436dss_prec_t arena_dss_prec_get(arena_t *arena); 437void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); 438void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 439 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 440 malloc_large_stats_t *lstats); 441bool arena_new(arena_t *arena, unsigned ind); 442void arena_boot(void); 443void arena_prefork(arena_t *arena); 444void arena_postfork_parent(arena_t *arena); 445void arena_postfork_child(arena_t *arena); 446 447#endif /* JEMALLOC_H_EXTERNS */ 448/******************************************************************************/ 449#ifdef JEMALLOC_H_INLINES 450 451#ifndef JEMALLOC_ENABLE_INLINE 452arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); 453size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); 454size_t arena_mapbitsp_read(size_t *mapbitsp); 455size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); 456size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, 457 size_t pageind); 458size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); 459size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); 460size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); 461size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); 462size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); 463size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); 464size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); 465void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); 466void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, 467 size_t size, size_t flags); 468void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 469 size_t size); 470void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, 471 size_t size, size_t flags); 472void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 473 size_t binind); 474void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, 475 size_t runind, size_t binind, size_t flags); 476void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 477 size_t unzeroed); 478bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); 479bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); 480bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); 481size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); 482size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); 483unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, 484 const void *ptr); 485prof_ctx_t *arena_prof_ctx_get(const void *ptr); 486void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 487void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); 488size_t arena_salloc(const void *ptr, bool demote); 489void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache); 490#endif 491 492#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) 493# ifdef JEMALLOC_ARENA_INLINE_A 494JEMALLOC_ALWAYS_INLINE arena_chunk_map_t * 495arena_mapp_get(arena_chunk_t *chunk, size_t pageind) 496{ 497 498 assert(pageind >= map_bias); 499 assert(pageind < chunk_npages); 500 501 return (&chunk->map[pageind-map_bias]); 502} 503 504JEMALLOC_ALWAYS_INLINE size_t * 505arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) 506{ 507 508 return (&arena_mapp_get(chunk, pageind)->bits); 509} 510 511JEMALLOC_ALWAYS_INLINE size_t 512arena_mapbitsp_read(size_t *mapbitsp) 513{ 514 515 return (*mapbitsp); 516} 517 518JEMALLOC_ALWAYS_INLINE size_t 519arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) 520{ 521 522 return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); 523} 524 525JEMALLOC_ALWAYS_INLINE size_t 526arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) 527{ 528 size_t mapbits; 529 530 mapbits = arena_mapbits_get(chunk, pageind); 531 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 532 return (mapbits & ~PAGE_MASK); 533} 534 535JEMALLOC_ALWAYS_INLINE size_t 536arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) 537{ 538 size_t mapbits; 539 540 mapbits = arena_mapbits_get(chunk, pageind); 541 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 542 (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); 543 return (mapbits & ~PAGE_MASK); 544} 545 546JEMALLOC_ALWAYS_INLINE size_t 547arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) 548{ 549 size_t mapbits; 550 551 mapbits = arena_mapbits_get(chunk, pageind); 552 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 553 CHUNK_MAP_ALLOCATED); 554 return (mapbits >> LG_PAGE); 555} 556 557JEMALLOC_ALWAYS_INLINE size_t 558arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) 559{ 560 size_t mapbits; 561 size_t binind; 562 563 mapbits = arena_mapbits_get(chunk, pageind); 564 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 565 assert(binind < NBINS || binind == BININD_INVALID); 566 return (binind); 567} 568 569JEMALLOC_ALWAYS_INLINE size_t 570arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) 571{ 572 size_t mapbits; 573 574 mapbits = arena_mapbits_get(chunk, pageind); 575 return (mapbits & CHUNK_MAP_DIRTY); 576} 577 578JEMALLOC_ALWAYS_INLINE size_t 579arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) 580{ 581 size_t mapbits; 582 583 mapbits = arena_mapbits_get(chunk, pageind); 584 return (mapbits & CHUNK_MAP_UNZEROED); 585} 586 587JEMALLOC_ALWAYS_INLINE size_t 588arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) 589{ 590 size_t mapbits; 591 592 mapbits = arena_mapbits_get(chunk, pageind); 593 return (mapbits & CHUNK_MAP_LARGE); 594} 595 596JEMALLOC_ALWAYS_INLINE size_t 597arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) 598{ 599 size_t mapbits; 600 601 mapbits = arena_mapbits_get(chunk, pageind); 602 return (mapbits & CHUNK_MAP_ALLOCATED); 603} 604 605JEMALLOC_ALWAYS_INLINE void 606arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) 607{ 608 609 *mapbitsp = mapbits; 610} 611 612JEMALLOC_ALWAYS_INLINE void 613arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, 614 size_t flags) 615{ 616 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 617 618 assert((size & PAGE_MASK) == 0); 619 assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); 620 assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); 621 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); 622} 623 624JEMALLOC_ALWAYS_INLINE void 625arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, 626 size_t size) 627{ 628 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 629 size_t mapbits = arena_mapbitsp_read(mapbitsp); 630 631 assert((size & PAGE_MASK) == 0); 632 assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); 633 arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); 634} 635 636JEMALLOC_ALWAYS_INLINE void 637arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, 638 size_t flags) 639{ 640 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 641 size_t mapbits = arena_mapbitsp_read(mapbitsp); 642 size_t unzeroed; 643 644 assert((size & PAGE_MASK) == 0); 645 assert((flags & CHUNK_MAP_DIRTY) == flags); 646 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 647 arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags 648 | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); 649} 650 651JEMALLOC_ALWAYS_INLINE void 652arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, 653 size_t binind) 654{ 655 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 656 size_t mapbits = arena_mapbitsp_read(mapbitsp); 657 658 assert(binind <= BININD_INVALID); 659 assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); 660 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | 661 (binind << CHUNK_MAP_BININD_SHIFT)); 662} 663 664JEMALLOC_ALWAYS_INLINE void 665arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, 666 size_t binind, size_t flags) 667{ 668 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 669 size_t mapbits = arena_mapbitsp_read(mapbitsp); 670 size_t unzeroed; 671 672 assert(binind < BININD_INVALID); 673 assert(pageind - runind >= map_bias); 674 assert((flags & CHUNK_MAP_DIRTY) == flags); 675 unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ 676 arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << 677 CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); 678} 679 680JEMALLOC_ALWAYS_INLINE void 681arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, 682 size_t unzeroed) 683{ 684 size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); 685 size_t mapbits = arena_mapbitsp_read(mapbitsp); 686 687 arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | 688 unzeroed); 689} 690 691JEMALLOC_INLINE bool 692arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) 693{ 694 695 cassert(config_prof); 696 assert(prof_interval != 0); 697 698 arena->prof_accumbytes += accumbytes; 699 if (arena->prof_accumbytes >= prof_interval) { 700 arena->prof_accumbytes -= prof_interval; 701 return (true); 702 } 703 return (false); 704} 705 706JEMALLOC_INLINE bool 707arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) 708{ 709 710 cassert(config_prof); 711 712 if (prof_interval == 0) 713 return (false); 714 return (arena_prof_accum_impl(arena, accumbytes)); 715} 716 717JEMALLOC_INLINE bool 718arena_prof_accum(arena_t *arena, uint64_t accumbytes) 719{ 720 721 cassert(config_prof); 722 723 if (prof_interval == 0) 724 return (false); 725 726 { 727 bool ret; 728 729 malloc_mutex_lock(&arena->lock); 730 ret = arena_prof_accum_impl(arena, accumbytes); 731 malloc_mutex_unlock(&arena->lock); 732 return (ret); 733 } 734} 735 736JEMALLOC_ALWAYS_INLINE size_t 737arena_ptr_small_binind_get(const void *ptr, size_t mapbits) 738{ 739 size_t binind; 740 741 binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; 742 743 if (config_debug) { 744 arena_chunk_t *chunk; 745 arena_t *arena; 746 size_t pageind; 747 size_t actual_mapbits; 748 arena_run_t *run; 749 arena_bin_t *bin; 750 size_t actual_binind; 751 arena_bin_info_t *bin_info; 752 753 assert(binind != BININD_INVALID); 754 assert(binind < NBINS); 755 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 756 arena = chunk->arena; 757 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 758 actual_mapbits = arena_mapbits_get(chunk, pageind); 759 assert(mapbits == actual_mapbits); 760 assert(arena_mapbits_large_get(chunk, pageind) == 0); 761 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 762 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 763 (actual_mapbits >> LG_PAGE)) << LG_PAGE)); 764 bin = run->bin; 765 actual_binind = bin - arena->bins; 766 assert(binind == actual_binind); 767 bin_info = &arena_bin_info[actual_binind]; 768 assert(((uintptr_t)ptr - ((uintptr_t)run + 769 (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval 770 == 0); 771 } 772 773 return (binind); 774} 775# endif /* JEMALLOC_ARENA_INLINE_A */ 776 777# ifdef JEMALLOC_ARENA_INLINE_B 778JEMALLOC_INLINE size_t 779arena_bin_index(arena_t *arena, arena_bin_t *bin) 780{ 781 size_t binind = bin - arena->bins; 782 assert(binind < NBINS); 783 return (binind); 784} 785 786JEMALLOC_INLINE unsigned 787arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) 788{ 789 unsigned shift, diff, regind; 790 size_t interval; 791 792 /* 793 * Freeing a pointer lower than region zero can cause assertion 794 * failure. 795 */ 796 assert((uintptr_t)ptr >= (uintptr_t)run + 797 (uintptr_t)bin_info->reg0_offset); 798 799 /* 800 * Avoid doing division with a variable divisor if possible. Using 801 * actual division here can reduce allocator throughput by over 20%! 802 */ 803 diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - 804 bin_info->reg0_offset); 805 806 /* Rescale (factor powers of 2 out of the numerator and denominator). */ 807 interval = bin_info->reg_interval; 808 shift = ffs(interval) - 1; 809 diff >>= shift; 810 interval >>= shift; 811 812 if (interval == 1) { 813 /* The divisor was a power of 2. */ 814 regind = diff; 815 } else { 816 /* 817 * To divide by a number D that is not a power of two we 818 * multiply by (2^21 / D) and then right shift by 21 positions. 819 * 820 * X / D 821 * 822 * becomes 823 * 824 * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT 825 * 826 * We can omit the first three elements, because we never 827 * divide by 0, and 1 and 2 are both powers of two, which are 828 * handled above. 829 */ 830#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) 831#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) 832 static const unsigned interval_invs[] = { 833 SIZE_INV(3), 834 SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), 835 SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), 836 SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), 837 SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), 838 SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), 839 SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), 840 SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) 841 }; 842 843 if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + 844 2)) { 845 regind = (diff * interval_invs[interval - 3]) >> 846 SIZE_INV_SHIFT; 847 } else 848 regind = diff / interval; 849#undef SIZE_INV 850#undef SIZE_INV_SHIFT 851 } 852 assert(diff == regind * interval); 853 assert(regind < bin_info->nregs); 854 855 return (regind); 856} 857 858JEMALLOC_INLINE prof_ctx_t * 859arena_prof_ctx_get(const void *ptr) 860{ 861 prof_ctx_t *ret; 862 arena_chunk_t *chunk; 863 size_t pageind, mapbits; 864 865 cassert(config_prof); 866 assert(ptr != NULL); 867 assert(CHUNK_ADDR2BASE(ptr) != ptr); 868 869 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 870 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 871 mapbits = arena_mapbits_get(chunk, pageind); 872 assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); 873 if ((mapbits & CHUNK_MAP_LARGE) == 0) 874 ret = (prof_ctx_t *)(uintptr_t)1U; 875 else 876 ret = arena_mapp_get(chunk, pageind)->prof_ctx; 877 878 return (ret); 879} 880 881JEMALLOC_INLINE void 882arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 883{ 884 arena_chunk_t *chunk; 885 size_t pageind; 886 887 cassert(config_prof); 888 assert(ptr != NULL); 889 assert(CHUNK_ADDR2BASE(ptr) != ptr); 890 891 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 892 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 893 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 894 895 if (arena_mapbits_large_get(chunk, pageind) != 0) 896 arena_mapp_get(chunk, pageind)->prof_ctx = ctx; 897} 898 899JEMALLOC_ALWAYS_INLINE void * 900arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) 901{ 902 tcache_t *tcache; 903 904 assert(size != 0); 905 assert(size <= arena_maxclass); 906 907 if (size <= SMALL_MAXCLASS) { 908 if (try_tcache && (tcache = tcache_get(true)) != NULL) 909 return (tcache_alloc_small(tcache, size, zero)); 910 else { 911 return (arena_malloc_small(choose_arena(arena), size, 912 zero)); 913 } 914 } else { 915 /* 916 * Initialize tcache after checking size in order to avoid 917 * infinite recursion during tcache initialization. 918 */ 919 if (try_tcache && size <= tcache_maxclass && (tcache = 920 tcache_get(true)) != NULL) 921 return (tcache_alloc_large(tcache, size, zero)); 922 else { 923 return (arena_malloc_large(choose_arena(arena), size, 924 zero)); 925 } 926 } 927} 928 929/* Return the size of the allocation pointed to by ptr. */ 930JEMALLOC_ALWAYS_INLINE size_t 931arena_salloc(const void *ptr, bool demote) 932{ 933 size_t ret; 934 arena_chunk_t *chunk; 935 size_t pageind, binind; 936 937 assert(ptr != NULL); 938 assert(CHUNK_ADDR2BASE(ptr) != ptr); 939 940 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 941 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 942 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 943 binind = arena_mapbits_binind_get(chunk, pageind); 944 if (binind == BININD_INVALID || (config_prof && demote == false && 945 arena_mapbits_large_get(chunk, pageind) != 0)) { 946 /* 947 * Large allocation. In the common case (demote == true), and 948 * as this is an inline function, most callers will only end up 949 * looking at binind to determine that ptr is a small 950 * allocation. 951 */ 952 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 953 ret = arena_mapbits_large_size_get(chunk, pageind); 954 assert(ret != 0); 955 assert(pageind + (ret>>LG_PAGE) <= chunk_npages); 956 assert(ret == PAGE || arena_mapbits_large_size_get(chunk, 957 pageind+(ret>>LG_PAGE)-1) == 0); 958 assert(binind == arena_mapbits_binind_get(chunk, 959 pageind+(ret>>LG_PAGE)-1)); 960 assert(arena_mapbits_dirty_get(chunk, pageind) == 961 arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); 962 } else { 963 /* Small allocation (possibly promoted to a large object). */ 964 assert(arena_mapbits_large_get(chunk, pageind) != 0 || 965 arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 966 pageind)) == binind); 967 ret = arena_bin_info[binind].reg_size; 968 } 969 970 return (ret); 971} 972 973JEMALLOC_ALWAYS_INLINE void 974arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache) 975{ 976 size_t pageind, mapbits; 977 tcache_t *tcache; 978 979 assert(ptr != NULL); 980 assert(CHUNK_ADDR2BASE(ptr) != ptr); 981 982 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 983 mapbits = arena_mapbits_get(chunk, pageind); 984 assert(arena_mapbits_allocated_get(chunk, pageind) != 0); 985 if ((mapbits & CHUNK_MAP_LARGE) == 0) { 986 /* Small allocation. */ 987 if (try_tcache && (tcache = tcache_get(false)) != NULL) { 988 size_t binind; 989 990 binind = arena_ptr_small_binind_get(ptr, mapbits); 991 tcache_dalloc_small(tcache, ptr, binind); 992 } else 993 arena_dalloc_small(chunk->arena, chunk, ptr, pageind); 994 } else { 995 size_t size = arena_mapbits_large_size_get(chunk, pageind); 996 997 assert(((uintptr_t)ptr & PAGE_MASK) == 0); 998 999 if (try_tcache && size <= tcache_maxclass && (tcache = 1000 tcache_get(false)) != NULL) { 1001 tcache_dalloc_large(tcache, ptr, size); 1002 } else 1003 arena_dalloc_large(chunk->arena, chunk, ptr); 1004 } 1005} 1006# endif /* JEMALLOC_ARENA_INLINE_B */ 1007#endif 1008 1009#endif /* JEMALLOC_H_INLINES */ 1010/******************************************************************************/ 1011