arena.c revision 94e7ffa9794792d2ec70269a0ab9c282a32aa2ec
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7purge_mode_t opt_purge = PURGE_DEFAULT; 8const char *purge_mode_names[] = { 9 "ratio", 10 "decay", 11 "N/A" 12}; 13ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 14static ssize_t lg_dirty_mult_default; 15ssize_t opt_decay_time = DECAY_TIME_DEFAULT; 16static ssize_t decay_time_default; 17 18arena_bin_info_t arena_bin_info[NBINS]; 19 20size_t map_bias; 21size_t map_misc_offset; 22size_t arena_maxrun; /* Max run size for arenas. */ 23size_t large_maxclass; /* Max large size class. */ 24unsigned nlclasses; /* Number of large size classes. */ 25unsigned nhclasses; /* Number of huge size classes. */ 26 27/******************************************************************************/ 28/* 29 * Function prototypes for static functions that are referenced prior to 30 * definition. 31 */ 32 33static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, 34 size_t ndirty_limit); 35static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, 36 bool dirty, bool cleaned, bool decommitted); 37static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, 38 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); 39static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 40 arena_run_t *run, arena_bin_t *bin); 41 42/******************************************************************************/ 43 44JEMALLOC_INLINE_C size_t 45arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) 46{ 47 arena_chunk_t *chunk; 48 size_t pageind, mapbits; 49 50 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 51 pageind = arena_miscelm_to_pageind(miscelm); 52 mapbits = arena_mapbits_get(chunk, pageind); 53 return (arena_mapbits_size_decode(mapbits)); 54} 55 56JEMALLOC_INLINE_C int 57arena_run_addr_comp(const arena_chunk_map_misc_t *a, 58 const arena_chunk_map_misc_t *b) 59{ 60 uintptr_t a_miscelm = (uintptr_t)a; 61 uintptr_t b_miscelm = (uintptr_t)b; 62 63 assert(a != NULL); 64 assert(b != NULL); 65 66 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 67} 68 69/* Generate pairing heap functions. */ 70ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, 71 ph_link, arena_run_addr_comp) 72 73#ifdef JEMALLOC_JET 74#undef run_quantize_floor 75#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) 76#endif 77static size_t 78run_quantize_floor(size_t size) 79{ 80 size_t ret; 81 pszind_t pind; 82 83 assert(size > 0); 84 assert(size <= HUGE_MAXCLASS); 85 assert((size & PAGE_MASK) == 0); 86 87 assert(size != 0); 88 assert(size == PAGE_CEILING(size)); 89 90 pind = psz2ind(size - large_pad + 1); 91 if (pind == 0) { 92 /* 93 * Avoid underflow. This short-circuit would also do the right 94 * thing for all sizes in the range for which there are 95 * PAGE-spaced size classes, but it's simplest to just handle 96 * the one case that would cause erroneous results. 97 */ 98 return (size); 99 } 100 ret = pind2sz(pind - 1) + large_pad; 101 assert(ret <= size); 102 return (ret); 103} 104#ifdef JEMALLOC_JET 105#undef run_quantize_floor 106#define run_quantize_floor JEMALLOC_N(run_quantize_floor) 107run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); 108#endif 109 110#ifdef JEMALLOC_JET 111#undef run_quantize_ceil 112#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) 113#endif 114static size_t 115run_quantize_ceil(size_t size) 116{ 117 size_t ret; 118 119 assert(size > 0); 120 assert(size <= HUGE_MAXCLASS); 121 assert((size & PAGE_MASK) == 0); 122 123 ret = run_quantize_floor(size); 124 if (ret < size) { 125 /* 126 * Skip a quantization that may have an adequately large run, 127 * because under-sized runs may be mixed in. This only happens 128 * when an unusual size is requested, i.e. for aligned 129 * allocation, and is just one of several places where linear 130 * search would potentially find sufficiently aligned available 131 * memory somewhere lower. 132 */ 133 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; 134 } 135 return (ret); 136} 137#ifdef JEMALLOC_JET 138#undef run_quantize_ceil 139#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) 140run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); 141#endif 142 143static void 144arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 145 size_t npages) 146{ 147 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( 148 arena_miscelm_get_const(chunk, pageind)))); 149 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 150 LG_PAGE)); 151 arena_run_heap_insert(&arena->runs_avail[pind], 152 arena_miscelm_get_mutable(chunk, pageind)); 153} 154 155static void 156arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 157 size_t npages) 158{ 159 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( 160 arena_miscelm_get_const(chunk, pageind)))); 161 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 162 LG_PAGE)); 163 arena_run_heap_remove(&arena->runs_avail[pind], 164 arena_miscelm_get_mutable(chunk, pageind)); 165} 166 167static void 168arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 169 size_t npages) 170{ 171 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 172 pageind); 173 174 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 175 LG_PAGE)); 176 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 177 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 178 CHUNK_MAP_DIRTY); 179 180 qr_new(&miscelm->rd, rd_link); 181 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); 182 arena->ndirty += npages; 183} 184 185static void 186arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 187 size_t npages) 188{ 189 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 190 pageind); 191 192 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 193 LG_PAGE)); 194 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 195 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 196 CHUNK_MAP_DIRTY); 197 198 qr_remove(&miscelm->rd, rd_link); 199 assert(arena->ndirty >= npages); 200 arena->ndirty -= npages; 201} 202 203static size_t 204arena_chunk_dirty_npages(const extent_node_t *node) 205{ 206 207 return (extent_node_size_get(node) >> LG_PAGE); 208} 209 210void 211arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) 212{ 213 214 if (cache) { 215 extent_node_dirty_linkage_init(node); 216 extent_node_dirty_insert(node, &arena->runs_dirty, 217 &arena->chunks_cache); 218 arena->ndirty += arena_chunk_dirty_npages(node); 219 } 220} 221 222void 223arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) 224{ 225 226 if (dirty) { 227 extent_node_dirty_remove(node); 228 assert(arena->ndirty >= arena_chunk_dirty_npages(node)); 229 arena->ndirty -= arena_chunk_dirty_npages(node); 230 } 231} 232 233JEMALLOC_INLINE_C void * 234arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 235{ 236 void *ret; 237 size_t regind; 238 arena_chunk_map_misc_t *miscelm; 239 void *rpages; 240 241 assert(run->nfree > 0); 242 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 243 244 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 245 miscelm = arena_run_to_miscelm(run); 246 rpages = arena_miscelm_to_rpages(miscelm); 247 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 248 (uintptr_t)(bin_info->reg_interval * regind)); 249 run->nfree--; 250 return (ret); 251} 252 253JEMALLOC_INLINE_C void 254arena_run_reg_dalloc(arena_run_t *run, void *ptr) 255{ 256 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 257 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 258 size_t mapbits = arena_mapbits_get(chunk, pageind); 259 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); 260 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 261 size_t regind = arena_run_regind(run, bin_info, ptr); 262 263 assert(run->nfree < bin_info->nregs); 264 /* Freeing an interior pointer can cause assertion failure. */ 265 assert(((uintptr_t)ptr - 266 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 267 (uintptr_t)bin_info->reg0_offset)) % 268 (uintptr_t)bin_info->reg_interval == 0); 269 assert((uintptr_t)ptr >= 270 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 271 (uintptr_t)bin_info->reg0_offset); 272 /* Freeing an unallocated pointer can cause assertion failure. */ 273 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 274 275 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 276 run->nfree++; 277} 278 279JEMALLOC_INLINE_C void 280arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 281{ 282 283 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 284 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 285 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 286 (npages << LG_PAGE)); 287} 288 289JEMALLOC_INLINE_C void 290arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 291{ 292 293 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 294 << LG_PAGE)), PAGE); 295} 296 297JEMALLOC_INLINE_C void 298arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 299{ 300 size_t i; 301 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 302 303 arena_run_page_mark_zeroed(chunk, run_ind); 304 for (i = 0; i < PAGE / sizeof(size_t); i++) 305 assert(p[i] == 0); 306} 307 308static void 309arena_nactive_add(arena_t *arena, size_t add_pages) 310{ 311 312 if (config_stats) { 313 size_t cactive_add = CHUNK_CEILING((arena->nactive + 314 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 315 LG_PAGE); 316 if (cactive_add != 0) 317 stats_cactive_add(cactive_add); 318 } 319 arena->nactive += add_pages; 320} 321 322static void 323arena_nactive_sub(arena_t *arena, size_t sub_pages) 324{ 325 326 if (config_stats) { 327 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - 328 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); 329 if (cactive_sub != 0) 330 stats_cactive_sub(cactive_sub); 331 } 332 arena->nactive -= sub_pages; 333} 334 335static void 336arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 337 size_t flag_dirty, size_t flag_decommitted, size_t need_pages) 338{ 339 size_t total_pages, rem_pages; 340 341 assert(flag_dirty == 0 || flag_decommitted == 0); 342 343 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 344 LG_PAGE; 345 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 346 flag_dirty); 347 assert(need_pages <= total_pages); 348 rem_pages = total_pages - need_pages; 349 350 arena_avail_remove(arena, chunk, run_ind, total_pages); 351 if (flag_dirty != 0) 352 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); 353 arena_nactive_add(arena, need_pages); 354 355 /* Keep track of trailing unused pages for later use. */ 356 if (rem_pages > 0) { 357 size_t flags = flag_dirty | flag_decommitted; 358 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 359 0; 360 361 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 362 (rem_pages << LG_PAGE), flags | 363 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & 364 flag_unzeroed_mask)); 365 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, 366 (rem_pages << LG_PAGE), flags | 367 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & 368 flag_unzeroed_mask)); 369 if (flag_dirty != 0) { 370 arena_run_dirty_insert(arena, chunk, run_ind+need_pages, 371 rem_pages); 372 } 373 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 374 } 375} 376 377static bool 378arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 379 bool remove, bool zero) 380{ 381 arena_chunk_t *chunk; 382 arena_chunk_map_misc_t *miscelm; 383 size_t flag_dirty, flag_decommitted, run_ind, need_pages; 384 size_t flag_unzeroed_mask; 385 386 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 387 miscelm = arena_run_to_miscelm(run); 388 run_ind = arena_miscelm_to_pageind(miscelm); 389 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 390 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 391 need_pages = (size >> LG_PAGE); 392 assert(need_pages > 0); 393 394 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 395 run_ind << LG_PAGE, size, arena->ind)) 396 return (true); 397 398 if (remove) { 399 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 400 flag_decommitted, need_pages); 401 } 402 403 if (zero) { 404 if (flag_decommitted != 0) { 405 /* The run is untouched, and therefore zeroed. */ 406 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 407 *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 408 (need_pages << LG_PAGE)); 409 } else if (flag_dirty != 0) { 410 /* The run is dirty, so all pages must be zeroed. */ 411 arena_run_zero(chunk, run_ind, need_pages); 412 } else { 413 /* 414 * The run is clean, so some pages may be zeroed (i.e. 415 * never before touched). 416 */ 417 size_t i; 418 for (i = 0; i < need_pages; i++) { 419 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 420 != 0) 421 arena_run_zero(chunk, run_ind+i, 1); 422 else if (config_debug) { 423 arena_run_page_validate_zeroed(chunk, 424 run_ind+i); 425 } else { 426 arena_run_page_mark_zeroed(chunk, 427 run_ind+i); 428 } 429 } 430 } 431 } else { 432 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 433 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 434 } 435 436 /* 437 * Set the last element first, in case the run only contains one page 438 * (i.e. both statements set the same element). 439 */ 440 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 441 CHUNK_MAP_UNZEROED : 0; 442 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | 443 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 444 run_ind+need_pages-1))); 445 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | 446 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); 447 return (false); 448} 449 450static bool 451arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 452{ 453 454 return (arena_run_split_large_helper(arena, run, size, true, zero)); 455} 456 457static bool 458arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 459{ 460 461 return (arena_run_split_large_helper(arena, run, size, false, zero)); 462} 463 464static bool 465arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 466 szind_t binind) 467{ 468 arena_chunk_t *chunk; 469 arena_chunk_map_misc_t *miscelm; 470 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; 471 472 assert(binind != BININD_INVALID); 473 474 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 475 miscelm = arena_run_to_miscelm(run); 476 run_ind = arena_miscelm_to_pageind(miscelm); 477 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 478 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 479 need_pages = (size >> LG_PAGE); 480 assert(need_pages > 0); 481 482 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 483 run_ind << LG_PAGE, size, arena->ind)) 484 return (true); 485 486 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 487 flag_decommitted, need_pages); 488 489 for (i = 0; i < need_pages; i++) { 490 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, 491 run_ind+i); 492 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 493 flag_unzeroed); 494 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) 495 arena_run_page_validate_zeroed(chunk, run_ind+i); 496 } 497 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 498 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 499 return (false); 500} 501 502static arena_chunk_t * 503arena_chunk_init_spare(arena_t *arena) 504{ 505 arena_chunk_t *chunk; 506 507 assert(arena->spare != NULL); 508 509 chunk = arena->spare; 510 arena->spare = NULL; 511 512 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 513 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 514 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 515 arena_maxrun); 516 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 517 arena_maxrun); 518 assert(arena_mapbits_dirty_get(chunk, map_bias) == 519 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 520 521 return (chunk); 522} 523 524static bool 525arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 526 bool zero) 527{ 528 529 /* 530 * The extent node notion of "committed" doesn't directly apply to 531 * arena chunks. Arbitrarily mark them as committed. The commit state 532 * of runs is tracked individually, and upon chunk deallocation the 533 * entire chunk is in a consistent commit state. 534 */ 535 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); 536 extent_node_achunk_set(&chunk->node, true); 537 return (chunk_register(tsdn, chunk, &chunk->node)); 538} 539 540static arena_chunk_t * 541arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, 542 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) 543{ 544 arena_chunk_t *chunk; 545 546 malloc_mutex_unlock(tsdn, &arena->lock); 547 548 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, 549 NULL, chunksize, chunksize, zero, commit); 550 if (chunk != NULL && !*commit) { 551 /* Commit header. */ 552 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << 553 LG_PAGE, arena->ind)) { 554 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, 555 (void *)chunk, chunksize, *zero, *commit); 556 chunk = NULL; 557 } 558 } 559 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) { 560 if (!*commit) { 561 /* Undo commit of header. */ 562 chunk_hooks->decommit(chunk, chunksize, 0, map_bias << 563 LG_PAGE, arena->ind); 564 } 565 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, 566 chunksize, *zero, *commit); 567 chunk = NULL; 568 } 569 570 malloc_mutex_lock(tsdn, &arena->lock); 571 return (chunk); 572} 573 574static arena_chunk_t * 575arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, 576 bool *commit) 577{ 578 arena_chunk_t *chunk; 579 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 580 581 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, 582 chunksize, zero, true); 583 if (chunk != NULL) { 584 if (arena_chunk_register(tsdn, arena, chunk, *zero)) { 585 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, 586 chunksize, true); 587 return (NULL); 588 } 589 *commit = true; 590 } 591 if (chunk == NULL) { 592 chunk = arena_chunk_alloc_internal_hard(tsdn, arena, 593 &chunk_hooks, zero, commit); 594 } 595 596 if (config_stats && chunk != NULL) { 597 arena->stats.mapped += chunksize; 598 arena->stats.metadata_mapped += (map_bias << LG_PAGE); 599 } 600 601 return (chunk); 602} 603 604static arena_chunk_t * 605arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) 606{ 607 arena_chunk_t *chunk; 608 bool zero, commit; 609 size_t flag_unzeroed, flag_decommitted, i; 610 611 assert(arena->spare == NULL); 612 613 zero = false; 614 commit = false; 615 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); 616 if (chunk == NULL) 617 return (NULL); 618 619 /* 620 * Initialize the map to contain one maximal free untouched run. Mark 621 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed 622 * or decommitted chunk. 623 */ 624 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; 625 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; 626 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, 627 flag_unzeroed | flag_decommitted); 628 /* 629 * There is no need to initialize the internal page map entries unless 630 * the chunk is not zeroed. 631 */ 632 if (!zero) { 633 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 634 (void *)arena_bitselm_get_const(chunk, map_bias+1), 635 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 636 chunk_npages-1) - 637 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 638 for (i = map_bias+1; i < chunk_npages-1; i++) 639 arena_mapbits_internal_set(chunk, i, flag_unzeroed); 640 } else { 641 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 642 *)arena_bitselm_get_const(chunk, map_bias+1), 643 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 644 chunk_npages-1) - 645 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 646 if (config_debug) { 647 for (i = map_bias+1; i < chunk_npages-1; i++) { 648 assert(arena_mapbits_unzeroed_get(chunk, i) == 649 flag_unzeroed); 650 } 651 } 652 } 653 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 654 flag_unzeroed); 655 656 return (chunk); 657} 658 659static arena_chunk_t * 660arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) 661{ 662 arena_chunk_t *chunk; 663 664 if (arena->spare != NULL) 665 chunk = arena_chunk_init_spare(arena); 666 else { 667 chunk = arena_chunk_init_hard(tsdn, arena); 668 if (chunk == NULL) 669 return (NULL); 670 } 671 672 ql_elm_new(&chunk->node, ql_link); 673 ql_tail_insert(&arena->achunks, &chunk->node, ql_link); 674 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 675 676 return (chunk); 677} 678 679static void 680arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) 681{ 682 bool committed; 683 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 684 685 chunk_deregister(chunk, &chunk->node); 686 687 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); 688 if (!committed) { 689 /* 690 * Decommit the header. Mark the chunk as decommitted even if 691 * header decommit fails, since treating a partially committed 692 * chunk as committed has a high potential for causing later 693 * access of decommitted memory. 694 */ 695 chunk_hooks = chunk_hooks_get(tsdn, arena); 696 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, 697 arena->ind); 698 } 699 700 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, 701 committed); 702 703 if (config_stats) { 704 arena->stats.mapped -= chunksize; 705 arena->stats.metadata_mapped -= (map_bias << LG_PAGE); 706 } 707} 708 709static void 710arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) 711{ 712 713 assert(arena->spare != spare); 714 715 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 716 arena_run_dirty_remove(arena, spare, map_bias, 717 chunk_npages-map_bias); 718 } 719 720 arena_chunk_discard(tsdn, arena, spare); 721} 722 723static void 724arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) 725{ 726 arena_chunk_t *spare; 727 728 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 729 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 730 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 731 arena_maxrun); 732 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 733 arena_maxrun); 734 assert(arena_mapbits_dirty_get(chunk, map_bias) == 735 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 736 assert(arena_mapbits_decommitted_get(chunk, map_bias) == 737 arena_mapbits_decommitted_get(chunk, chunk_npages-1)); 738 739 /* Remove run from runs_avail, so that the arena does not use it. */ 740 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 741 742 ql_remove(&arena->achunks, &chunk->node, ql_link); 743 spare = arena->spare; 744 arena->spare = chunk; 745 if (spare != NULL) 746 arena_spare_discard(tsdn, arena, spare); 747} 748 749static void 750arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 751{ 752 szind_t index = size2index(usize) - nlclasses - NBINS; 753 754 cassert(config_stats); 755 756 arena->stats.nmalloc_huge++; 757 arena->stats.allocated_huge += usize; 758 arena->stats.hstats[index].nmalloc++; 759 arena->stats.hstats[index].curhchunks++; 760} 761 762static void 763arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 764{ 765 szind_t index = size2index(usize) - nlclasses - NBINS; 766 767 cassert(config_stats); 768 769 arena->stats.nmalloc_huge--; 770 arena->stats.allocated_huge -= usize; 771 arena->stats.hstats[index].nmalloc--; 772 arena->stats.hstats[index].curhchunks--; 773} 774 775static void 776arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 777{ 778 szind_t index = size2index(usize) - nlclasses - NBINS; 779 780 cassert(config_stats); 781 782 arena->stats.ndalloc_huge++; 783 arena->stats.allocated_huge -= usize; 784 arena->stats.hstats[index].ndalloc++; 785 arena->stats.hstats[index].curhchunks--; 786} 787 788static void 789arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) 790{ 791 szind_t index = size2index(usize) - nlclasses - NBINS; 792 793 cassert(config_stats); 794 795 arena->stats.ndalloc_huge++; 796 arena->stats.hstats[index].ndalloc--; 797} 798 799static void 800arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 801{ 802 szind_t index = size2index(usize) - nlclasses - NBINS; 803 804 cassert(config_stats); 805 806 arena->stats.ndalloc_huge--; 807 arena->stats.allocated_huge += usize; 808 arena->stats.hstats[index].ndalloc--; 809 arena->stats.hstats[index].curhchunks++; 810} 811 812static void 813arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 814{ 815 816 arena_huge_dalloc_stats_update(arena, oldsize); 817 arena_huge_malloc_stats_update(arena, usize); 818} 819 820static void 821arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 822 size_t usize) 823{ 824 825 arena_huge_dalloc_stats_update_undo(arena, oldsize); 826 arena_huge_malloc_stats_update_undo(arena, usize); 827} 828 829extent_node_t * 830arena_node_alloc(tsdn_t *tsdn, arena_t *arena) 831{ 832 extent_node_t *node; 833 834 malloc_mutex_lock(tsdn, &arena->node_cache_mtx); 835 node = ql_last(&arena->node_cache, ql_link); 836 if (node == NULL) { 837 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); 838 return (base_alloc(tsdn, sizeof(extent_node_t))); 839 } 840 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); 841 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); 842 return (node); 843} 844 845void 846arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) 847{ 848 849 malloc_mutex_lock(tsdn, &arena->node_cache_mtx); 850 ql_elm_new(node, ql_link); 851 ql_tail_insert(&arena->node_cache, node, ql_link); 852 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); 853} 854 855static void * 856arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, 857 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, 858 size_t csize) 859{ 860 void *ret; 861 bool commit = true; 862 863 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, 864 alignment, zero, &commit); 865 if (ret == NULL) { 866 /* Revert optimistic stats updates. */ 867 malloc_mutex_lock(tsdn, &arena->lock); 868 if (config_stats) { 869 arena_huge_malloc_stats_update_undo(arena, usize); 870 arena->stats.mapped -= usize; 871 } 872 arena_nactive_sub(arena, usize >> LG_PAGE); 873 malloc_mutex_unlock(tsdn, &arena->lock); 874 } 875 876 return (ret); 877} 878 879void * 880arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, 881 size_t alignment, bool *zero) 882{ 883 void *ret; 884 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 885 size_t csize = CHUNK_CEILING(usize); 886 887 malloc_mutex_lock(tsdn, &arena->lock); 888 889 /* Optimistically update stats. */ 890 if (config_stats) { 891 arena_huge_malloc_stats_update(arena, usize); 892 arena->stats.mapped += usize; 893 } 894 arena_nactive_add(arena, usize >> LG_PAGE); 895 896 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, 897 alignment, zero, true); 898 malloc_mutex_unlock(tsdn, &arena->lock); 899 if (ret == NULL) { 900 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, 901 usize, alignment, zero, csize); 902 } 903 904 return (ret); 905} 906 907void 908arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize) 909{ 910 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 911 size_t csize; 912 913 csize = CHUNK_CEILING(usize); 914 malloc_mutex_lock(tsdn, &arena->lock); 915 if (config_stats) { 916 arena_huge_dalloc_stats_update(arena, usize); 917 arena->stats.mapped -= usize; 918 } 919 arena_nactive_sub(arena, usize >> LG_PAGE); 920 921 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true); 922 malloc_mutex_unlock(tsdn, &arena->lock); 923} 924 925void 926arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, 927 size_t oldsize, size_t usize) 928{ 929 930 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 931 assert(oldsize != usize); 932 933 malloc_mutex_lock(tsdn, &arena->lock); 934 if (config_stats) 935 arena_huge_ralloc_stats_update(arena, oldsize, usize); 936 if (oldsize < usize) 937 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); 938 else 939 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); 940 malloc_mutex_unlock(tsdn, &arena->lock); 941} 942 943void 944arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, 945 size_t oldsize, size_t usize) 946{ 947 size_t udiff = oldsize - usize; 948 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 949 950 malloc_mutex_lock(tsdn, &arena->lock); 951 if (config_stats) { 952 arena_huge_ralloc_stats_update(arena, oldsize, usize); 953 if (cdiff != 0) 954 arena->stats.mapped -= cdiff; 955 } 956 arena_nactive_sub(arena, udiff >> LG_PAGE); 957 958 if (cdiff != 0) { 959 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 960 void *nchunk = (void *)((uintptr_t)chunk + 961 CHUNK_CEILING(usize)); 962 963 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, 964 true); 965 } 966 malloc_mutex_unlock(tsdn, &arena->lock); 967} 968 969static bool 970arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, 971 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, 972 bool *zero, void *nchunk, size_t udiff, size_t cdiff) 973{ 974 bool err; 975 bool commit = true; 976 977 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, 978 chunksize, zero, &commit) == NULL); 979 if (err) { 980 /* Revert optimistic stats updates. */ 981 malloc_mutex_lock(tsdn, &arena->lock); 982 if (config_stats) { 983 arena_huge_ralloc_stats_update_undo(arena, oldsize, 984 usize); 985 arena->stats.mapped -= cdiff; 986 } 987 arena_nactive_sub(arena, udiff >> LG_PAGE); 988 malloc_mutex_unlock(tsdn, &arena->lock); 989 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, 990 cdiff, true, arena->ind)) { 991 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, 992 *zero, true); 993 err = true; 994 } 995 return (err); 996} 997 998bool 999arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, 1000 size_t oldsize, size_t usize, bool *zero) 1001{ 1002 bool err; 1003 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); 1004 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); 1005 size_t udiff = usize - oldsize; 1006 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 1007 1008 malloc_mutex_lock(tsdn, &arena->lock); 1009 1010 /* Optimistically update stats. */ 1011 if (config_stats) { 1012 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1013 arena->stats.mapped += cdiff; 1014 } 1015 arena_nactive_add(arena, udiff >> LG_PAGE); 1016 1017 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, 1018 chunksize, zero, true) == NULL); 1019 malloc_mutex_unlock(tsdn, &arena->lock); 1020 if (err) { 1021 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, 1022 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, 1023 cdiff); 1024 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1025 cdiff, true, arena->ind)) { 1026 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, 1027 *zero, true); 1028 err = true; 1029 } 1030 1031 return (err); 1032} 1033 1034/* 1035 * Do first-best-fit run selection, i.e. select the lowest run that best fits. 1036 * Run sizes are indexed, so not all candidate runs are necessarily exactly the 1037 * same size. 1038 */ 1039static arena_run_t * 1040arena_run_first_best_fit(arena_t *arena, size_t size) 1041{ 1042 pszind_t pind, i; 1043 1044 pind = psz2ind(run_quantize_ceil(size)); 1045 1046 for (i = pind; pind2sz(i) <= large_maxclass; i++) { 1047 arena_chunk_map_misc_t *miscelm = arena_run_heap_first( 1048 &arena->runs_avail[i]); 1049 if (miscelm != NULL) 1050 return (&miscelm->run); 1051 } 1052 1053 return (NULL); 1054} 1055 1056static arena_run_t * 1057arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 1058{ 1059 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); 1060 if (run != NULL) { 1061 if (arena_run_split_large(arena, run, size, zero)) 1062 run = NULL; 1063 } 1064 return (run); 1065} 1066 1067static arena_run_t * 1068arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) 1069{ 1070 arena_chunk_t *chunk; 1071 arena_run_t *run; 1072 1073 assert(size <= arena_maxrun); 1074 assert(size == PAGE_CEILING(size)); 1075 1076 /* Search the arena's chunks for the lowest best fit. */ 1077 run = arena_run_alloc_large_helper(arena, size, zero); 1078 if (run != NULL) 1079 return (run); 1080 1081 /* 1082 * No usable runs. Create a new chunk from which to allocate the run. 1083 */ 1084 chunk = arena_chunk_alloc(tsdn, arena); 1085 if (chunk != NULL) { 1086 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1087 if (arena_run_split_large(arena, run, size, zero)) 1088 run = NULL; 1089 return (run); 1090 } 1091 1092 /* 1093 * arena_chunk_alloc() failed, but another thread may have made 1094 * sufficient memory available while this one dropped arena->lock in 1095 * arena_chunk_alloc(), so search one more time. 1096 */ 1097 return (arena_run_alloc_large_helper(arena, size, zero)); 1098} 1099 1100static arena_run_t * 1101arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) 1102{ 1103 arena_run_t *run = arena_run_first_best_fit(arena, size); 1104 if (run != NULL) { 1105 if (arena_run_split_small(arena, run, size, binind)) 1106 run = NULL; 1107 } 1108 return (run); 1109} 1110 1111static arena_run_t * 1112arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) 1113{ 1114 arena_chunk_t *chunk; 1115 arena_run_t *run; 1116 1117 assert(size <= arena_maxrun); 1118 assert(size == PAGE_CEILING(size)); 1119 assert(binind != BININD_INVALID); 1120 1121 /* Search the arena's chunks for the lowest best fit. */ 1122 run = arena_run_alloc_small_helper(arena, size, binind); 1123 if (run != NULL) 1124 return (run); 1125 1126 /* 1127 * No usable runs. Create a new chunk from which to allocate the run. 1128 */ 1129 chunk = arena_chunk_alloc(tsdn, arena); 1130 if (chunk != NULL) { 1131 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1132 if (arena_run_split_small(arena, run, size, binind)) 1133 run = NULL; 1134 return (run); 1135 } 1136 1137 /* 1138 * arena_chunk_alloc() failed, but another thread may have made 1139 * sufficient memory available while this one dropped arena->lock in 1140 * arena_chunk_alloc(), so search one more time. 1141 */ 1142 return (arena_run_alloc_small_helper(arena, size, binind)); 1143} 1144 1145static bool 1146arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) 1147{ 1148 1149 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) 1150 << 3)); 1151} 1152 1153ssize_t 1154arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) 1155{ 1156 ssize_t lg_dirty_mult; 1157 1158 malloc_mutex_lock(tsdn, &arena->lock); 1159 lg_dirty_mult = arena->lg_dirty_mult; 1160 malloc_mutex_unlock(tsdn, &arena->lock); 1161 1162 return (lg_dirty_mult); 1163} 1164 1165bool 1166arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) 1167{ 1168 1169 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 1170 return (true); 1171 1172 malloc_mutex_lock(tsdn, &arena->lock); 1173 arena->lg_dirty_mult = lg_dirty_mult; 1174 arena_maybe_purge(tsdn, arena); 1175 malloc_mutex_unlock(tsdn, &arena->lock); 1176 1177 return (false); 1178} 1179 1180static void 1181arena_decay_deadline_init(arena_t *arena) 1182{ 1183 1184 assert(opt_purge == purge_mode_decay); 1185 1186 /* 1187 * Generate a new deadline that is uniformly random within the next 1188 * epoch after the current one. 1189 */ 1190 nstime_copy(&arena->decay.deadline, &arena->decay.epoch); 1191 nstime_add(&arena->decay.deadline, &arena->decay.interval); 1192 if (arena->decay.time > 0) { 1193 nstime_t jitter; 1194 1195 nstime_init(&jitter, prng_range(&arena->decay.jitter_state, 1196 nstime_ns(&arena->decay.interval))); 1197 nstime_add(&arena->decay.deadline, &jitter); 1198 } 1199} 1200 1201static bool 1202arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) 1203{ 1204 1205 assert(opt_purge == purge_mode_decay); 1206 1207 return (nstime_compare(&arena->decay.deadline, time) <= 0); 1208} 1209 1210static size_t 1211arena_decay_backlog_npages_limit(const arena_t *arena) 1212{ 1213 static const uint64_t h_steps[] = { 1214#define STEP(step, h, x, y) \ 1215 h, 1216 SMOOTHSTEP 1217#undef STEP 1218 }; 1219 uint64_t sum; 1220 size_t npages_limit_backlog; 1221 unsigned i; 1222 1223 assert(opt_purge == purge_mode_decay); 1224 1225 /* 1226 * For each element of decay_backlog, multiply by the corresponding 1227 * fixed-point smoothstep decay factor. Sum the products, then divide 1228 * to round down to the nearest whole number of pages. 1229 */ 1230 sum = 0; 1231 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) 1232 sum += arena->decay.backlog[i] * h_steps[i]; 1233 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 1234 1235 return (npages_limit_backlog); 1236} 1237 1238static void 1239arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) 1240{ 1241 uint64_t nadvance_u64; 1242 nstime_t delta; 1243 size_t ndirty_delta; 1244 1245 assert(opt_purge == purge_mode_decay); 1246 assert(arena_decay_deadline_reached(arena, time)); 1247 1248 nstime_copy(&delta, time); 1249 nstime_subtract(&delta, &arena->decay.epoch); 1250 nadvance_u64 = nstime_divide(&delta, &arena->decay.interval); 1251 assert(nadvance_u64 > 0); 1252 1253 /* Add nadvance_u64 decay intervals to epoch. */ 1254 nstime_copy(&delta, &arena->decay.interval); 1255 nstime_imultiply(&delta, nadvance_u64); 1256 nstime_add(&arena->decay.epoch, &delta); 1257 1258 /* Set a new deadline. */ 1259 arena_decay_deadline_init(arena); 1260 1261 /* Update the backlog. */ 1262 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 1263 memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 1264 sizeof(size_t)); 1265 } else { 1266 size_t nadvance_z = (size_t)nadvance_u64; 1267 1268 assert((uint64_t)nadvance_z == nadvance_u64); 1269 1270 memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], 1271 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 1272 if (nadvance_z > 1) { 1273 memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - 1274 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 1275 } 1276 } 1277 ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? arena->ndirty - 1278 arena->decay.ndirty : 0; 1279 arena->decay.ndirty = arena->ndirty; 1280 arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; 1281 arena->decay.backlog_npages_limit = 1282 arena_decay_backlog_npages_limit(arena); 1283} 1284 1285static size_t 1286arena_decay_npages_limit(arena_t *arena) 1287{ 1288 size_t npages_limit; 1289 1290 assert(opt_purge == purge_mode_decay); 1291 1292 npages_limit = arena->decay.backlog_npages_limit; 1293 1294 /* Add in any dirty pages created during the current epoch. */ 1295 if (arena->ndirty > arena->decay.ndirty) 1296 npages_limit += arena->ndirty - arena->decay.ndirty; 1297 1298 return (npages_limit); 1299} 1300 1301static void 1302arena_decay_init(arena_t *arena, ssize_t decay_time) 1303{ 1304 1305 arena->decay.time = decay_time; 1306 if (decay_time > 0) { 1307 nstime_init2(&arena->decay.interval, decay_time, 0); 1308 nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS); 1309 } 1310 1311 nstime_init(&arena->decay.epoch, 0); 1312 nstime_update(&arena->decay.epoch); 1313 arena->decay.jitter_state = (uint64_t)(uintptr_t)arena; 1314 arena_decay_deadline_init(arena); 1315 arena->decay.ndirty = arena->ndirty; 1316 arena->decay.backlog_npages_limit = 0; 1317 memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 1318} 1319 1320static bool 1321arena_decay_time_valid(ssize_t decay_time) 1322{ 1323 1324 if (decay_time < -1) 1325 return (false); 1326 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) 1327 return (true); 1328 return (false); 1329} 1330 1331ssize_t 1332arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) 1333{ 1334 ssize_t decay_time; 1335 1336 malloc_mutex_lock(tsdn, &arena->lock); 1337 decay_time = arena->decay.time; 1338 malloc_mutex_unlock(tsdn, &arena->lock); 1339 1340 return (decay_time); 1341} 1342 1343bool 1344arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) 1345{ 1346 1347 if (!arena_decay_time_valid(decay_time)) 1348 return (true); 1349 1350 malloc_mutex_lock(tsdn, &arena->lock); 1351 /* 1352 * Restart decay backlog from scratch, which may cause many dirty pages 1353 * to be immediately purged. It would conceptually be possible to map 1354 * the old backlog onto the new backlog, but there is no justification 1355 * for such complexity since decay_time changes are intended to be 1356 * infrequent, either between the {-1, 0, >0} states, or a one-time 1357 * arbitrary change during initial arena configuration. 1358 */ 1359 arena_decay_init(arena, decay_time); 1360 arena_maybe_purge(tsdn, arena); 1361 malloc_mutex_unlock(tsdn, &arena->lock); 1362 1363 return (false); 1364} 1365 1366static void 1367arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) 1368{ 1369 1370 assert(opt_purge == purge_mode_ratio); 1371 1372 /* Don't purge if the option is disabled. */ 1373 if (arena->lg_dirty_mult < 0) 1374 return; 1375 1376 /* 1377 * Iterate, since preventing recursive purging could otherwise leave too 1378 * many dirty pages. 1379 */ 1380 while (true) { 1381 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1382 if (threshold < chunk_npages) 1383 threshold = chunk_npages; 1384 /* 1385 * Don't purge unless the number of purgeable pages exceeds the 1386 * threshold. 1387 */ 1388 if (arena->ndirty <= threshold) 1389 return; 1390 arena_purge_to_limit(tsdn, arena, threshold); 1391 } 1392} 1393 1394static void 1395arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) 1396{ 1397 nstime_t time; 1398 size_t ndirty_limit; 1399 1400 assert(opt_purge == purge_mode_decay); 1401 1402 /* Purge all or nothing if the option is disabled. */ 1403 if (arena->decay.time <= 0) { 1404 if (arena->decay.time == 0) 1405 arena_purge_to_limit(tsdn, arena, 0); 1406 return; 1407 } 1408 1409 nstime_copy(&time, &arena->decay.epoch); 1410 if (unlikely(nstime_update(&time))) { 1411 /* Time went backwards. Force an epoch advance. */ 1412 nstime_copy(&time, &arena->decay.deadline); 1413 } 1414 1415 if (arena_decay_deadline_reached(arena, &time)) 1416 arena_decay_epoch_advance(arena, &time); 1417 1418 ndirty_limit = arena_decay_npages_limit(arena); 1419 1420 /* 1421 * Don't try to purge unless the number of purgeable pages exceeds the 1422 * current limit. 1423 */ 1424 if (arena->ndirty <= ndirty_limit) 1425 return; 1426 arena_purge_to_limit(tsdn, arena, ndirty_limit); 1427} 1428 1429void 1430arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) 1431{ 1432 1433 /* Don't recursively purge. */ 1434 if (arena->purging) 1435 return; 1436 1437 if (opt_purge == purge_mode_ratio) 1438 arena_maybe_purge_ratio(tsdn, arena); 1439 else 1440 arena_maybe_purge_decay(tsdn, arena); 1441} 1442 1443static size_t 1444arena_dirty_count(arena_t *arena) 1445{ 1446 size_t ndirty = 0; 1447 arena_runs_dirty_link_t *rdelm; 1448 extent_node_t *chunkselm; 1449 1450 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1451 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1452 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { 1453 size_t npages; 1454 1455 if (rdelm == &chunkselm->rd) { 1456 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1457 chunkselm = qr_next(chunkselm, cc_link); 1458 } else { 1459 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 1460 rdelm); 1461 arena_chunk_map_misc_t *miscelm = 1462 arena_rd_to_miscelm(rdelm); 1463 size_t pageind = arena_miscelm_to_pageind(miscelm); 1464 assert(arena_mapbits_allocated_get(chunk, pageind) == 1465 0); 1466 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1467 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 1468 npages = arena_mapbits_unallocated_size_get(chunk, 1469 pageind) >> LG_PAGE; 1470 } 1471 ndirty += npages; 1472 } 1473 1474 return (ndirty); 1475} 1476 1477static size_t 1478arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, 1479 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, 1480 extent_node_t *purge_chunks_sentinel) 1481{ 1482 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1483 extent_node_t *chunkselm; 1484 size_t nstashed = 0; 1485 1486 /* Stash runs/chunks according to ndirty_limit. */ 1487 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1488 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1489 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1490 size_t npages; 1491 rdelm_next = qr_next(rdelm, rd_link); 1492 1493 if (rdelm == &chunkselm->rd) { 1494 extent_node_t *chunkselm_next; 1495 bool zero; 1496 UNUSED void *chunk; 1497 1498 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1499 if (opt_purge == purge_mode_decay && arena->ndirty - 1500 (nstashed + npages) < ndirty_limit) 1501 break; 1502 1503 chunkselm_next = qr_next(chunkselm, cc_link); 1504 /* 1505 * Allocate. chunkselm remains valid due to the 1506 * dalloc_node=false argument to chunk_alloc_cache(). 1507 */ 1508 zero = false; 1509 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, 1510 extent_node_addr_get(chunkselm), 1511 extent_node_size_get(chunkselm), chunksize, &zero, 1512 false); 1513 assert(chunk == extent_node_addr_get(chunkselm)); 1514 assert(zero == extent_node_zeroed_get(chunkselm)); 1515 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1516 purge_chunks_sentinel); 1517 assert(npages == (extent_node_size_get(chunkselm) >> 1518 LG_PAGE)); 1519 chunkselm = chunkselm_next; 1520 } else { 1521 arena_chunk_t *chunk = 1522 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1523 arena_chunk_map_misc_t *miscelm = 1524 arena_rd_to_miscelm(rdelm); 1525 size_t pageind = arena_miscelm_to_pageind(miscelm); 1526 arena_run_t *run = &miscelm->run; 1527 size_t run_size = 1528 arena_mapbits_unallocated_size_get(chunk, pageind); 1529 1530 npages = run_size >> LG_PAGE; 1531 if (opt_purge == purge_mode_decay && arena->ndirty - 1532 (nstashed + npages) < ndirty_limit) 1533 break; 1534 1535 assert(pageind + npages <= chunk_npages); 1536 assert(arena_mapbits_dirty_get(chunk, pageind) == 1537 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1538 1539 /* 1540 * If purging the spare chunk's run, make it available 1541 * prior to allocation. 1542 */ 1543 if (chunk == arena->spare) 1544 arena_chunk_alloc(tsdn, arena); 1545 1546 /* Temporarily allocate the free dirty run. */ 1547 arena_run_split_large(arena, run, run_size, false); 1548 /* Stash. */ 1549 if (false) 1550 qr_new(rdelm, rd_link); /* Redundant. */ 1551 else { 1552 assert(qr_next(rdelm, rd_link) == rdelm); 1553 assert(qr_prev(rdelm, rd_link) == rdelm); 1554 } 1555 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1556 } 1557 1558 nstashed += npages; 1559 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= 1560 ndirty_limit) 1561 break; 1562 } 1563 1564 return (nstashed); 1565} 1566 1567static size_t 1568arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, 1569 arena_runs_dirty_link_t *purge_runs_sentinel, 1570 extent_node_t *purge_chunks_sentinel) 1571{ 1572 size_t npurged, nmadvise; 1573 arena_runs_dirty_link_t *rdelm; 1574 extent_node_t *chunkselm; 1575 1576 if (config_stats) 1577 nmadvise = 0; 1578 npurged = 0; 1579 1580 malloc_mutex_unlock(tsdn, &arena->lock); 1581 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1582 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1583 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { 1584 size_t npages; 1585 1586 if (rdelm == &chunkselm->rd) { 1587 /* 1588 * Don't actually purge the chunk here because 1) 1589 * chunkselm is embedded in the chunk and must remain 1590 * valid, and 2) we deallocate the chunk in 1591 * arena_unstash_purged(), where it is destroyed, 1592 * decommitted, or purged, depending on chunk 1593 * deallocation policy. 1594 */ 1595 size_t size = extent_node_size_get(chunkselm); 1596 npages = size >> LG_PAGE; 1597 chunkselm = qr_next(chunkselm, cc_link); 1598 } else { 1599 size_t pageind, run_size, flag_unzeroed, flags, i; 1600 bool decommitted; 1601 arena_chunk_t *chunk = 1602 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1603 arena_chunk_map_misc_t *miscelm = 1604 arena_rd_to_miscelm(rdelm); 1605 pageind = arena_miscelm_to_pageind(miscelm); 1606 run_size = arena_mapbits_large_size_get(chunk, pageind); 1607 npages = run_size >> LG_PAGE; 1608 1609 assert(pageind + npages <= chunk_npages); 1610 assert(!arena_mapbits_decommitted_get(chunk, pageind)); 1611 assert(!arena_mapbits_decommitted_get(chunk, 1612 pageind+npages-1)); 1613 decommitted = !chunk_hooks->decommit(chunk, chunksize, 1614 pageind << LG_PAGE, npages << LG_PAGE, arena->ind); 1615 if (decommitted) { 1616 flag_unzeroed = 0; 1617 flags = CHUNK_MAP_DECOMMITTED; 1618 } else { 1619 flag_unzeroed = chunk_purge_wrapper(tsdn, arena, 1620 chunk_hooks, chunk, chunksize, pageind << 1621 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; 1622 flags = flag_unzeroed; 1623 } 1624 arena_mapbits_large_set(chunk, pageind+npages-1, 0, 1625 flags); 1626 arena_mapbits_large_set(chunk, pageind, run_size, 1627 flags); 1628 1629 /* 1630 * Set the unzeroed flag for internal pages, now that 1631 * chunk_purge_wrapper() has returned whether the pages 1632 * were zeroed as a side effect of purging. This chunk 1633 * map modification is safe even though the arena mutex 1634 * isn't currently owned by this thread, because the run 1635 * is marked as allocated, thus protecting it from being 1636 * modified by any other thread. As long as these 1637 * writes don't perturb the first and last elements' 1638 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1639 */ 1640 for (i = 1; i < npages-1; i++) { 1641 arena_mapbits_internal_set(chunk, pageind+i, 1642 flag_unzeroed); 1643 } 1644 } 1645 1646 npurged += npages; 1647 if (config_stats) 1648 nmadvise++; 1649 } 1650 malloc_mutex_lock(tsdn, &arena->lock); 1651 1652 if (config_stats) { 1653 arena->stats.nmadvise += nmadvise; 1654 arena->stats.purged += npurged; 1655 } 1656 1657 return (npurged); 1658} 1659 1660static void 1661arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, 1662 arena_runs_dirty_link_t *purge_runs_sentinel, 1663 extent_node_t *purge_chunks_sentinel) 1664{ 1665 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1666 extent_node_t *chunkselm; 1667 1668 /* Deallocate chunks/runs. */ 1669 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1670 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1671 rdelm != purge_runs_sentinel; rdelm = rdelm_next) { 1672 rdelm_next = qr_next(rdelm, rd_link); 1673 if (rdelm == &chunkselm->rd) { 1674 extent_node_t *chunkselm_next = qr_next(chunkselm, 1675 cc_link); 1676 void *addr = extent_node_addr_get(chunkselm); 1677 size_t size = extent_node_size_get(chunkselm); 1678 bool zeroed = extent_node_zeroed_get(chunkselm); 1679 bool committed = extent_node_committed_get(chunkselm); 1680 extent_node_dirty_remove(chunkselm); 1681 arena_node_dalloc(tsdn, arena, chunkselm); 1682 chunkselm = chunkselm_next; 1683 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, 1684 size, zeroed, committed); 1685 } else { 1686 arena_chunk_t *chunk = 1687 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1688 arena_chunk_map_misc_t *miscelm = 1689 arena_rd_to_miscelm(rdelm); 1690 size_t pageind = arena_miscelm_to_pageind(miscelm); 1691 bool decommitted = (arena_mapbits_decommitted_get(chunk, 1692 pageind) != 0); 1693 arena_run_t *run = &miscelm->run; 1694 qr_remove(rdelm, rd_link); 1695 arena_run_dalloc(tsdn, arena, run, false, true, 1696 decommitted); 1697 } 1698 } 1699} 1700 1701/* 1702 * NB: ndirty_limit is interpreted differently depending on opt_purge: 1703 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the 1704 * desired state: 1705 * (arena->ndirty <= ndirty_limit) 1706 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without 1707 * violating the invariant: 1708 * (arena->ndirty >= ndirty_limit) 1709 */ 1710static void 1711arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) 1712{ 1713 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); 1714 size_t npurge, npurged; 1715 arena_runs_dirty_link_t purge_runs_sentinel; 1716 extent_node_t purge_chunks_sentinel; 1717 1718 arena->purging = true; 1719 1720 /* 1721 * Calls to arena_dirty_count() are disabled even for debug builds 1722 * because overhead grows nonlinearly as memory usage increases. 1723 */ 1724 if (false && config_debug) { 1725 size_t ndirty = arena_dirty_count(arena); 1726 assert(ndirty == arena->ndirty); 1727 } 1728 assert(opt_purge != purge_mode_ratio || (arena->nactive >> 1729 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); 1730 1731 qr_new(&purge_runs_sentinel, rd_link); 1732 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1733 1734 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, 1735 &purge_runs_sentinel, &purge_chunks_sentinel); 1736 if (npurge == 0) 1737 goto label_return; 1738 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, 1739 &purge_runs_sentinel, &purge_chunks_sentinel); 1740 assert(npurged == npurge); 1741 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, 1742 &purge_chunks_sentinel); 1743 1744 if (config_stats) 1745 arena->stats.npurge++; 1746 1747label_return: 1748 arena->purging = false; 1749} 1750 1751void 1752arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) 1753{ 1754 1755 malloc_mutex_lock(tsdn, &arena->lock); 1756 if (all) 1757 arena_purge_to_limit(tsdn, arena, 0); 1758 else 1759 arena_maybe_purge(tsdn, arena); 1760 malloc_mutex_unlock(tsdn, &arena->lock); 1761} 1762 1763static void 1764arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) 1765{ 1766 size_t pageind, npages; 1767 1768 cassert(config_prof); 1769 assert(opt_prof); 1770 1771 /* 1772 * Iterate over the allocated runs and remove profiled allocations from 1773 * the sample set. 1774 */ 1775 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { 1776 if (arena_mapbits_allocated_get(chunk, pageind) != 0) { 1777 if (arena_mapbits_large_get(chunk, pageind) != 0) { 1778 void *ptr = (void *)((uintptr_t)chunk + (pageind 1779 << LG_PAGE)); 1780 size_t usize = isalloc(tsd_tsdn(tsd), ptr, 1781 config_prof); 1782 1783 prof_free(tsd, ptr, usize); 1784 npages = arena_mapbits_large_size_get(chunk, 1785 pageind) >> LG_PAGE; 1786 } else { 1787 /* Skip small run. */ 1788 size_t binind = arena_mapbits_binind_get(chunk, 1789 pageind); 1790 arena_bin_info_t *bin_info = 1791 &arena_bin_info[binind]; 1792 npages = bin_info->run_size >> LG_PAGE; 1793 } 1794 } else { 1795 /* Skip unallocated run. */ 1796 npages = arena_mapbits_unallocated_size_get(chunk, 1797 pageind) >> LG_PAGE; 1798 } 1799 assert(pageind + npages <= chunk_npages); 1800 } 1801} 1802 1803void 1804arena_reset(tsd_t *tsd, arena_t *arena) 1805{ 1806 unsigned i; 1807 extent_node_t *node; 1808 1809 /* 1810 * Locking in this function is unintuitive. The caller guarantees that 1811 * no concurrent operations are happening in this arena, but there are 1812 * still reasons that some locking is necessary: 1813 * 1814 * - Some of the functions in the transitive closure of calls assume 1815 * appropriate locks are held, and in some cases these locks are 1816 * temporarily dropped to avoid lock order reversal or deadlock due to 1817 * reentry. 1818 * - mallctl("epoch", ...) may concurrently refresh stats. While 1819 * strictly speaking this is a "concurrent operation", disallowing 1820 * stats refreshes would impose an inconvenient burden. 1821 */ 1822 1823 /* Remove large allocations from prof sample set. */ 1824 if (config_prof && opt_prof) { 1825 ql_foreach(node, &arena->achunks, ql_link) { 1826 arena_achunk_prof_reset(tsd, arena, 1827 extent_node_addr_get(node)); 1828 } 1829 } 1830 1831 /* Reset curruns for large size classes. */ 1832 if (config_stats) { 1833 for (i = 0; i < nlclasses; i++) 1834 arena->stats.lstats[i].curruns = 0; 1835 } 1836 1837 /* Huge allocations. */ 1838 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); 1839 for (node = ql_last(&arena->huge, ql_link); node != NULL; node = 1840 ql_last(&arena->huge, ql_link)) { 1841 void *ptr = extent_node_addr_get(node); 1842 size_t usize; 1843 1844 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); 1845 if (config_stats || (config_prof && opt_prof)) 1846 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1847 /* Remove huge allocation from prof sample set. */ 1848 if (config_prof && opt_prof) 1849 prof_free(tsd, ptr, usize); 1850 huge_dalloc(tsd_tsdn(tsd), ptr); 1851 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); 1852 /* Cancel out unwanted effects on stats. */ 1853 if (config_stats) 1854 arena_huge_reset_stats_cancel(arena, usize); 1855 } 1856 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); 1857 1858 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); 1859 1860 /* Bins. */ 1861 for (i = 0; i < NBINS; i++) { 1862 arena_bin_t *bin = &arena->bins[i]; 1863 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1864 bin->runcur = NULL; 1865 arena_run_heap_new(&bin->runs); 1866 if (config_stats) { 1867 bin->stats.curregs = 0; 1868 bin->stats.curruns = 0; 1869 } 1870 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1871 } 1872 1873 /* 1874 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty 1875 * chains directly correspond. 1876 */ 1877 qr_new(&arena->runs_dirty, rd_link); 1878 for (node = qr_next(&arena->chunks_cache, cc_link); 1879 node != &arena->chunks_cache; node = qr_next(node, cc_link)) { 1880 qr_new(&node->rd, rd_link); 1881 qr_meld(&arena->runs_dirty, &node->rd, rd_link); 1882 } 1883 1884 /* Arena chunks. */ 1885 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = 1886 ql_last(&arena->achunks, ql_link)) { 1887 ql_remove(&arena->achunks, node, ql_link); 1888 arena_chunk_discard(tsd_tsdn(tsd), arena, 1889 extent_node_addr_get(node)); 1890 } 1891 1892 /* Spare. */ 1893 if (arena->spare != NULL) { 1894 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); 1895 arena->spare = NULL; 1896 } 1897 1898 assert(!arena->purging); 1899 arena->nactive = 0; 1900 1901 for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t); 1902 i++) 1903 arena_run_heap_new(&arena->runs_avail[i]); 1904 1905 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); 1906} 1907 1908static void 1909arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1910 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, 1911 size_t flag_decommitted) 1912{ 1913 size_t size = *p_size; 1914 size_t run_ind = *p_run_ind; 1915 size_t run_pages = *p_run_pages; 1916 1917 /* Try to coalesce forward. */ 1918 if (run_ind + run_pages < chunk_npages && 1919 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1920 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && 1921 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == 1922 flag_decommitted) { 1923 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1924 run_ind+run_pages); 1925 size_t nrun_pages = nrun_size >> LG_PAGE; 1926 1927 /* 1928 * Remove successor from runs_avail; the coalesced run is 1929 * inserted later. 1930 */ 1931 assert(arena_mapbits_unallocated_size_get(chunk, 1932 run_ind+run_pages+nrun_pages-1) == nrun_size); 1933 assert(arena_mapbits_dirty_get(chunk, 1934 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1935 assert(arena_mapbits_decommitted_get(chunk, 1936 run_ind+run_pages+nrun_pages-1) == flag_decommitted); 1937 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1938 1939 /* 1940 * If the successor is dirty, remove it from the set of dirty 1941 * pages. 1942 */ 1943 if (flag_dirty != 0) { 1944 arena_run_dirty_remove(arena, chunk, run_ind+run_pages, 1945 nrun_pages); 1946 } 1947 1948 size += nrun_size; 1949 run_pages += nrun_pages; 1950 1951 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1952 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1953 size); 1954 } 1955 1956 /* Try to coalesce backward. */ 1957 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1958 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1959 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == 1960 flag_decommitted) { 1961 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1962 run_ind-1); 1963 size_t prun_pages = prun_size >> LG_PAGE; 1964 1965 run_ind -= prun_pages; 1966 1967 /* 1968 * Remove predecessor from runs_avail; the coalesced run is 1969 * inserted later. 1970 */ 1971 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1972 prun_size); 1973 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1974 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 1975 flag_decommitted); 1976 arena_avail_remove(arena, chunk, run_ind, prun_pages); 1977 1978 /* 1979 * If the predecessor is dirty, remove it from the set of dirty 1980 * pages. 1981 */ 1982 if (flag_dirty != 0) { 1983 arena_run_dirty_remove(arena, chunk, run_ind, 1984 prun_pages); 1985 } 1986 1987 size += prun_size; 1988 run_pages += prun_pages; 1989 1990 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1991 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1992 size); 1993 } 1994 1995 *p_size = size; 1996 *p_run_ind = run_ind; 1997 *p_run_pages = run_pages; 1998} 1999 2000static size_t 2001arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2002 size_t run_ind) 2003{ 2004 size_t size; 2005 2006 assert(run_ind >= map_bias); 2007 assert(run_ind < chunk_npages); 2008 2009 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 2010 size = arena_mapbits_large_size_get(chunk, run_ind); 2011 assert(size == PAGE || arena_mapbits_large_size_get(chunk, 2012 run_ind+(size>>LG_PAGE)-1) == 0); 2013 } else { 2014 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 2015 size = bin_info->run_size; 2016 } 2017 2018 return (size); 2019} 2020 2021static void 2022arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, 2023 bool cleaned, bool decommitted) 2024{ 2025 arena_chunk_t *chunk; 2026 arena_chunk_map_misc_t *miscelm; 2027 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; 2028 2029 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2030 miscelm = arena_run_to_miscelm(run); 2031 run_ind = arena_miscelm_to_pageind(miscelm); 2032 assert(run_ind >= map_bias); 2033 assert(run_ind < chunk_npages); 2034 size = arena_run_size_get(arena, chunk, run, run_ind); 2035 run_pages = (size >> LG_PAGE); 2036 arena_nactive_sub(arena, run_pages); 2037 2038 /* 2039 * The run is dirty if the caller claims to have dirtied it, as well as 2040 * if it was already dirty before being allocated and the caller 2041 * doesn't claim to have cleaned it. 2042 */ 2043 assert(arena_mapbits_dirty_get(chunk, run_ind) == 2044 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 2045 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) 2046 != 0) 2047 dirty = true; 2048 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 2049 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; 2050 2051 /* Mark pages as unallocated in the chunk map. */ 2052 if (dirty || decommitted) { 2053 size_t flags = flag_dirty | flag_decommitted; 2054 arena_mapbits_unallocated_set(chunk, run_ind, size, flags); 2055 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 2056 flags); 2057 } else { 2058 arena_mapbits_unallocated_set(chunk, run_ind, size, 2059 arena_mapbits_unzeroed_get(chunk, run_ind)); 2060 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 2061 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 2062 } 2063 2064 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, 2065 flag_dirty, flag_decommitted); 2066 2067 /* Insert into runs_avail, now that coalescing is complete. */ 2068 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 2069 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 2070 assert(arena_mapbits_dirty_get(chunk, run_ind) == 2071 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 2072 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 2073 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); 2074 arena_avail_insert(arena, chunk, run_ind, run_pages); 2075 2076 if (dirty) 2077 arena_run_dirty_insert(arena, chunk, run_ind, run_pages); 2078 2079 /* Deallocate chunk if it is now completely unused. */ 2080 if (size == arena_maxrun) { 2081 assert(run_ind == map_bias); 2082 assert(run_pages == (arena_maxrun >> LG_PAGE)); 2083 arena_chunk_dalloc(tsdn, arena, chunk); 2084 } 2085 2086 /* 2087 * It is okay to do dirty page processing here even if the chunk was 2088 * deallocated above, since in that case it is the spare. Waiting 2089 * until after possible chunk deallocation to do dirty processing 2090 * allows for an old spare to be fully deallocated, thus decreasing the 2091 * chances of spuriously crossing the dirty page purging threshold. 2092 */ 2093 if (dirty) 2094 arena_maybe_purge(tsdn, arena); 2095} 2096 2097static void 2098arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2099 arena_run_t *run, size_t oldsize, size_t newsize) 2100{ 2101 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2102 size_t pageind = arena_miscelm_to_pageind(miscelm); 2103 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 2104 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2105 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2106 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2107 CHUNK_MAP_UNZEROED : 0; 2108 2109 assert(oldsize > newsize); 2110 2111 /* 2112 * Update the chunk map so that arena_run_dalloc() can treat the 2113 * leading run as separately allocated. Set the last element of each 2114 * run first, in case of single-page runs. 2115 */ 2116 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2117 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2118 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2119 pageind+head_npages-1))); 2120 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | 2121 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2122 2123 if (config_debug) { 2124 UNUSED size_t tail_npages = newsize >> LG_PAGE; 2125 assert(arena_mapbits_large_size_get(chunk, 2126 pageind+head_npages+tail_npages-1) == 0); 2127 assert(arena_mapbits_dirty_get(chunk, 2128 pageind+head_npages+tail_npages-1) == flag_dirty); 2129 } 2130 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 2131 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2132 pageind+head_npages))); 2133 2134 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != 2135 0)); 2136} 2137 2138static void 2139arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2140 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) 2141{ 2142 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2143 size_t pageind = arena_miscelm_to_pageind(miscelm); 2144 size_t head_npages = newsize >> LG_PAGE; 2145 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2146 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2147 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2148 CHUNK_MAP_UNZEROED : 0; 2149 arena_chunk_map_misc_t *tail_miscelm; 2150 arena_run_t *tail_run; 2151 2152 assert(oldsize > newsize); 2153 2154 /* 2155 * Update the chunk map so that arena_run_dalloc() can treat the 2156 * trailing run as separately allocated. Set the last element of each 2157 * run first, in case of single-page runs. 2158 */ 2159 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2160 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2161 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2162 pageind+head_npages-1))); 2163 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | 2164 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2165 2166 if (config_debug) { 2167 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 2168 assert(arena_mapbits_large_size_get(chunk, 2169 pageind+head_npages+tail_npages-1) == 0); 2170 assert(arena_mapbits_dirty_get(chunk, 2171 pageind+head_npages+tail_npages-1) == flag_dirty); 2172 } 2173 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 2174 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2175 pageind+head_npages))); 2176 2177 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); 2178 tail_run = &tail_miscelm->run; 2179 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted 2180 != 0)); 2181} 2182 2183static void 2184arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 2185{ 2186 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2187 2188 arena_run_heap_insert(&bin->runs, miscelm); 2189} 2190 2191static arena_run_t * 2192arena_bin_nonfull_run_tryget(arena_bin_t *bin) 2193{ 2194 arena_chunk_map_misc_t *miscelm; 2195 2196 miscelm = arena_run_heap_remove_first(&bin->runs); 2197 if (miscelm == NULL) 2198 return (NULL); 2199 if (config_stats) 2200 bin->stats.reruns++; 2201 2202 return (&miscelm->run); 2203} 2204 2205static arena_run_t * 2206arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) 2207{ 2208 arena_run_t *run; 2209 szind_t binind; 2210 arena_bin_info_t *bin_info; 2211 2212 /* Look for a usable run. */ 2213 run = arena_bin_nonfull_run_tryget(bin); 2214 if (run != NULL) 2215 return (run); 2216 /* No existing runs have any space available. */ 2217 2218 binind = arena_bin_index(arena, bin); 2219 bin_info = &arena_bin_info[binind]; 2220 2221 /* Allocate a new run. */ 2222 malloc_mutex_unlock(tsdn, &bin->lock); 2223 /******************************/ 2224 malloc_mutex_lock(tsdn, &arena->lock); 2225 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); 2226 if (run != NULL) { 2227 /* Initialize run internals. */ 2228 run->binind = binind; 2229 run->nfree = bin_info->nregs; 2230 bitmap_init(run->bitmap, &bin_info->bitmap_info); 2231 } 2232 malloc_mutex_unlock(tsdn, &arena->lock); 2233 /********************************/ 2234 malloc_mutex_lock(tsdn, &bin->lock); 2235 if (run != NULL) { 2236 if (config_stats) { 2237 bin->stats.nruns++; 2238 bin->stats.curruns++; 2239 } 2240 return (run); 2241 } 2242 2243 /* 2244 * arena_run_alloc_small() failed, but another thread may have made 2245 * sufficient memory available while this one dropped bin->lock above, 2246 * so search one more time. 2247 */ 2248 run = arena_bin_nonfull_run_tryget(bin); 2249 if (run != NULL) 2250 return (run); 2251 2252 return (NULL); 2253} 2254 2255/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 2256static void * 2257arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) 2258{ 2259 szind_t binind; 2260 arena_bin_info_t *bin_info; 2261 arena_run_t *run; 2262 2263 binind = arena_bin_index(arena, bin); 2264 bin_info = &arena_bin_info[binind]; 2265 bin->runcur = NULL; 2266 run = arena_bin_nonfull_run_get(tsdn, arena, bin); 2267 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 2268 /* 2269 * Another thread updated runcur while this one ran without the 2270 * bin lock in arena_bin_nonfull_run_get(). 2271 */ 2272 void *ret; 2273 assert(bin->runcur->nfree > 0); 2274 ret = arena_run_reg_alloc(bin->runcur, bin_info); 2275 if (run != NULL) { 2276 arena_chunk_t *chunk; 2277 2278 /* 2279 * arena_run_alloc_small() may have allocated run, or 2280 * it may have pulled run from the bin's run tree. 2281 * Therefore it is unsafe to make any assumptions about 2282 * how run has previously been used, and 2283 * arena_bin_lower_run() must be called, as if a region 2284 * were just deallocated from the run. 2285 */ 2286 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2287 if (run->nfree == bin_info->nregs) { 2288 arena_dalloc_bin_run(tsdn, arena, chunk, run, 2289 bin); 2290 } else 2291 arena_bin_lower_run(arena, chunk, run, bin); 2292 } 2293 return (ret); 2294 } 2295 2296 if (run == NULL) 2297 return (NULL); 2298 2299 bin->runcur = run; 2300 2301 assert(bin->runcur->nfree > 0); 2302 2303 return (arena_run_reg_alloc(bin->runcur, bin_info)); 2304} 2305 2306void 2307arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, 2308 szind_t binind, uint64_t prof_accumbytes) 2309{ 2310 unsigned i, nfill; 2311 arena_bin_t *bin; 2312 2313 assert(tbin->ncached == 0); 2314 2315 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) 2316 prof_idump(tsdn); 2317 bin = &arena->bins[binind]; 2318 malloc_mutex_lock(tsdn, &bin->lock); 2319 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 2320 tbin->lg_fill_div); i < nfill; i++) { 2321 arena_run_t *run; 2322 void *ptr; 2323 if ((run = bin->runcur) != NULL && run->nfree > 0) 2324 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2325 else 2326 ptr = arena_bin_malloc_hard(tsdn, arena, bin); 2327 if (ptr == NULL) { 2328 /* 2329 * OOM. tbin->avail isn't yet filled down to its first 2330 * element, so the successful allocations (if any) must 2331 * be moved just before tbin->avail before bailing out. 2332 */ 2333 if (i > 0) { 2334 memmove(tbin->avail - i, tbin->avail - nfill, 2335 i * sizeof(void *)); 2336 } 2337 break; 2338 } 2339 if (config_fill && unlikely(opt_junk_alloc)) { 2340 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 2341 true); 2342 } 2343 /* Insert such that low regions get used first. */ 2344 *(tbin->avail - nfill + i) = ptr; 2345 } 2346 if (config_stats) { 2347 bin->stats.nmalloc += i; 2348 bin->stats.nrequests += tbin->tstats.nrequests; 2349 bin->stats.curregs += i; 2350 bin->stats.nfills++; 2351 tbin->tstats.nrequests = 0; 2352 } 2353 malloc_mutex_unlock(tsdn, &bin->lock); 2354 tbin->ncached = i; 2355 arena_decay_tick(tsdn, arena); 2356} 2357 2358void 2359arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 2360{ 2361 2362 size_t redzone_size = bin_info->redzone_size; 2363 2364 if (zero) { 2365 memset((void *)((uintptr_t)ptr - redzone_size), 2366 JEMALLOC_ALLOC_JUNK, redzone_size); 2367 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 2368 JEMALLOC_ALLOC_JUNK, redzone_size); 2369 } else { 2370 memset((void *)((uintptr_t)ptr - redzone_size), 2371 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); 2372 } 2373} 2374 2375#ifdef JEMALLOC_JET 2376#undef arena_redzone_corruption 2377#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) 2378#endif 2379static void 2380arena_redzone_corruption(void *ptr, size_t usize, bool after, 2381 size_t offset, uint8_t byte) 2382{ 2383 2384 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 2385 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 2386 after ? "after" : "before", ptr, usize, byte); 2387} 2388#ifdef JEMALLOC_JET 2389#undef arena_redzone_corruption 2390#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 2391arena_redzone_corruption_t *arena_redzone_corruption = 2392 JEMALLOC_N(n_arena_redzone_corruption); 2393#endif 2394 2395static void 2396arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 2397{ 2398 bool error = false; 2399 2400 if (opt_junk_alloc) { 2401 size_t size = bin_info->reg_size; 2402 size_t redzone_size = bin_info->redzone_size; 2403 size_t i; 2404 2405 for (i = 1; i <= redzone_size; i++) { 2406 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 2407 if (*byte != JEMALLOC_ALLOC_JUNK) { 2408 error = true; 2409 arena_redzone_corruption(ptr, size, false, i, 2410 *byte); 2411 if (reset) 2412 *byte = JEMALLOC_ALLOC_JUNK; 2413 } 2414 } 2415 for (i = 0; i < redzone_size; i++) { 2416 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 2417 if (*byte != JEMALLOC_ALLOC_JUNK) { 2418 error = true; 2419 arena_redzone_corruption(ptr, size, true, i, 2420 *byte); 2421 if (reset) 2422 *byte = JEMALLOC_ALLOC_JUNK; 2423 } 2424 } 2425 } 2426 2427 if (opt_abort && error) 2428 abort(); 2429} 2430 2431#ifdef JEMALLOC_JET 2432#undef arena_dalloc_junk_small 2433#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) 2434#endif 2435void 2436arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 2437{ 2438 size_t redzone_size = bin_info->redzone_size; 2439 2440 arena_redzones_validate(ptr, bin_info, false); 2441 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, 2442 bin_info->reg_interval); 2443} 2444#ifdef JEMALLOC_JET 2445#undef arena_dalloc_junk_small 2446#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 2447arena_dalloc_junk_small_t *arena_dalloc_junk_small = 2448 JEMALLOC_N(n_arena_dalloc_junk_small); 2449#endif 2450 2451void 2452arena_quarantine_junk_small(void *ptr, size_t usize) 2453{ 2454 szind_t binind; 2455 arena_bin_info_t *bin_info; 2456 cassert(config_fill); 2457 assert(opt_junk_free); 2458 assert(opt_quarantine); 2459 assert(usize <= SMALL_MAXCLASS); 2460 2461 binind = size2index(usize); 2462 bin_info = &arena_bin_info[binind]; 2463 arena_redzones_validate(ptr, bin_info, true); 2464} 2465 2466static void * 2467arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) 2468{ 2469 void *ret; 2470 arena_bin_t *bin; 2471 size_t usize; 2472 arena_run_t *run; 2473 2474 assert(binind < NBINS); 2475 bin = &arena->bins[binind]; 2476 usize = index2size(binind); 2477 2478 malloc_mutex_lock(tsdn, &bin->lock); 2479 if ((run = bin->runcur) != NULL && run->nfree > 0) 2480 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2481 else 2482 ret = arena_bin_malloc_hard(tsdn, arena, bin); 2483 2484 if (ret == NULL) { 2485 malloc_mutex_unlock(tsdn, &bin->lock); 2486 return (NULL); 2487 } 2488 2489 if (config_stats) { 2490 bin->stats.nmalloc++; 2491 bin->stats.nrequests++; 2492 bin->stats.curregs++; 2493 } 2494 malloc_mutex_unlock(tsdn, &bin->lock); 2495 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) 2496 prof_idump(tsdn); 2497 2498 if (!zero) { 2499 if (config_fill) { 2500 if (unlikely(opt_junk_alloc)) { 2501 arena_alloc_junk_small(ret, 2502 &arena_bin_info[binind], false); 2503 } else if (unlikely(opt_zero)) 2504 memset(ret, 0, usize); 2505 } 2506 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2507 } else { 2508 if (config_fill && unlikely(opt_junk_alloc)) { 2509 arena_alloc_junk_small(ret, &arena_bin_info[binind], 2510 true); 2511 } 2512 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2513 memset(ret, 0, usize); 2514 } 2515 2516 arena_decay_tick(tsdn, arena); 2517 return (ret); 2518} 2519 2520void * 2521arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) 2522{ 2523 void *ret; 2524 size_t usize; 2525 uintptr_t random_offset; 2526 arena_run_t *run; 2527 arena_chunk_map_misc_t *miscelm; 2528 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); 2529 2530 /* Large allocation. */ 2531 usize = index2size(binind); 2532 malloc_mutex_lock(tsdn, &arena->lock); 2533 if (config_cache_oblivious) { 2534 uint64_t r; 2535 2536 /* 2537 * Compute a uniformly distributed offset within the first page 2538 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 2539 * for 4 KiB pages and 64-byte cachelines. 2540 */ 2541 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE); 2542 random_offset = ((uintptr_t)r) << LG_CACHELINE; 2543 } else 2544 random_offset = 0; 2545 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); 2546 if (run == NULL) { 2547 malloc_mutex_unlock(tsdn, &arena->lock); 2548 return (NULL); 2549 } 2550 miscelm = arena_run_to_miscelm(run); 2551 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2552 random_offset); 2553 if (config_stats) { 2554 szind_t index = binind - NBINS; 2555 2556 arena->stats.nmalloc_large++; 2557 arena->stats.nrequests_large++; 2558 arena->stats.allocated_large += usize; 2559 arena->stats.lstats[index].nmalloc++; 2560 arena->stats.lstats[index].nrequests++; 2561 arena->stats.lstats[index].curruns++; 2562 } 2563 if (config_prof) 2564 idump = arena_prof_accum_locked(arena, usize); 2565 malloc_mutex_unlock(tsdn, &arena->lock); 2566 if (config_prof && idump) 2567 prof_idump(tsdn); 2568 2569 if (!zero) { 2570 if (config_fill) { 2571 if (unlikely(opt_junk_alloc)) 2572 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2573 else if (unlikely(opt_zero)) 2574 memset(ret, 0, usize); 2575 } 2576 } 2577 2578 arena_decay_tick(tsdn, arena); 2579 return (ret); 2580} 2581 2582void * 2583arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 2584 bool zero) 2585{ 2586 2587 assert(!tsdn_null(tsdn) || arena != NULL); 2588 2589 if (likely(!tsdn_null(tsdn))) 2590 arena = arena_choose(tsdn_tsd(tsdn), arena); 2591 if (unlikely(arena == NULL)) 2592 return (NULL); 2593 2594 if (likely(size <= SMALL_MAXCLASS)) 2595 return (arena_malloc_small(tsdn, arena, ind, zero)); 2596 if (likely(size <= large_maxclass)) 2597 return (arena_malloc_large(tsdn, arena, ind, zero)); 2598 return (huge_malloc(tsdn, arena, index2size(ind), zero)); 2599} 2600 2601/* Only handles large allocations that require more than page alignment. */ 2602static void * 2603arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 2604 bool zero) 2605{ 2606 void *ret; 2607 size_t alloc_size, leadsize, trailsize; 2608 arena_run_t *run; 2609 arena_chunk_t *chunk; 2610 arena_chunk_map_misc_t *miscelm; 2611 void *rpages; 2612 2613 assert(!tsdn_null(tsdn) || arena != NULL); 2614 assert(usize == PAGE_CEILING(usize)); 2615 2616 if (likely(!tsdn_null(tsdn))) 2617 arena = arena_choose(tsdn_tsd(tsdn), arena); 2618 if (unlikely(arena == NULL)) 2619 return (NULL); 2620 2621 alignment = PAGE_CEILING(alignment); 2622 alloc_size = usize + large_pad + alignment - PAGE; 2623 2624 malloc_mutex_lock(tsdn, &arena->lock); 2625 run = arena_run_alloc_large(tsdn, arena, alloc_size, false); 2626 if (run == NULL) { 2627 malloc_mutex_unlock(tsdn, &arena->lock); 2628 return (NULL); 2629 } 2630 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2631 miscelm = arena_run_to_miscelm(run); 2632 rpages = arena_miscelm_to_rpages(miscelm); 2633 2634 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 2635 (uintptr_t)rpages; 2636 assert(alloc_size >= leadsize + usize); 2637 trailsize = alloc_size - leadsize - usize - large_pad; 2638 if (leadsize != 0) { 2639 arena_chunk_map_misc_t *head_miscelm = miscelm; 2640 arena_run_t *head_run = run; 2641 2642 miscelm = arena_miscelm_get_mutable(chunk, 2643 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 2644 LG_PAGE)); 2645 run = &miscelm->run; 2646 2647 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, 2648 alloc_size - leadsize); 2649 } 2650 if (trailsize != 0) { 2651 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + 2652 trailsize, usize + large_pad, false); 2653 } 2654 if (arena_run_init_large(arena, run, usize + large_pad, zero)) { 2655 size_t run_ind = 2656 arena_miscelm_to_pageind(arena_run_to_miscelm(run)); 2657 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); 2658 bool decommitted = (arena_mapbits_decommitted_get(chunk, 2659 run_ind) != 0); 2660 2661 assert(decommitted); /* Cause of OOM. */ 2662 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); 2663 malloc_mutex_unlock(tsdn, &arena->lock); 2664 return (NULL); 2665 } 2666 ret = arena_miscelm_to_rpages(miscelm); 2667 2668 if (config_stats) { 2669 szind_t index = size2index(usize) - NBINS; 2670 2671 arena->stats.nmalloc_large++; 2672 arena->stats.nrequests_large++; 2673 arena->stats.allocated_large += usize; 2674 arena->stats.lstats[index].nmalloc++; 2675 arena->stats.lstats[index].nrequests++; 2676 arena->stats.lstats[index].curruns++; 2677 } 2678 malloc_mutex_unlock(tsdn, &arena->lock); 2679 2680 if (config_fill && !zero) { 2681 if (unlikely(opt_junk_alloc)) 2682 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2683 else if (unlikely(opt_zero)) 2684 memset(ret, 0, usize); 2685 } 2686 arena_decay_tick(tsdn, arena); 2687 return (ret); 2688} 2689 2690void * 2691arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 2692 bool zero, tcache_t *tcache) 2693{ 2694 void *ret; 2695 2696 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2697 && (usize & PAGE_MASK) == 0))) { 2698 /* Small; alignment doesn't require special run placement. */ 2699 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, 2700 tcache, true); 2701 } else if (usize <= large_maxclass && alignment <= PAGE) { 2702 /* 2703 * Large; alignment doesn't require special run placement. 2704 * However, the cached pointer may be at a random offset from 2705 * the base of the run, so do some bit manipulation to retrieve 2706 * the base. 2707 */ 2708 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, 2709 tcache, true); 2710 if (config_cache_oblivious) 2711 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2712 } else { 2713 if (likely(usize <= large_maxclass)) { 2714 ret = arena_palloc_large(tsdn, arena, usize, alignment, 2715 zero); 2716 } else if (likely(alignment <= chunksize)) 2717 ret = huge_malloc(tsdn, arena, usize, zero); 2718 else { 2719 ret = huge_palloc(tsdn, arena, usize, alignment, zero); 2720 } 2721 } 2722 return (ret); 2723} 2724 2725void 2726arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) 2727{ 2728 arena_chunk_t *chunk; 2729 size_t pageind; 2730 szind_t binind; 2731 2732 cassert(config_prof); 2733 assert(ptr != NULL); 2734 assert(CHUNK_ADDR2BASE(ptr) != ptr); 2735 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); 2736 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); 2737 assert(size <= SMALL_MAXCLASS); 2738 2739 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2740 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2741 binind = size2index(size); 2742 assert(binind < NBINS); 2743 arena_mapbits_large_binind_set(chunk, pageind, binind); 2744 2745 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); 2746 assert(isalloc(tsdn, ptr, true) == size); 2747} 2748 2749static void 2750arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 2751 arena_bin_t *bin) 2752{ 2753 2754 /* Dissociate run from bin. */ 2755 if (run == bin->runcur) 2756 bin->runcur = NULL; 2757 else { 2758 szind_t binind = arena_bin_index(extent_node_arena_get( 2759 &chunk->node), bin); 2760 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 2761 2762 /* 2763 * The following block's conditional is necessary because if the 2764 * run only contains one region, then it never gets inserted 2765 * into the non-full runs tree. 2766 */ 2767 if (bin_info->nregs != 1) { 2768 arena_chunk_map_misc_t *miscelm = 2769 arena_run_to_miscelm(run); 2770 2771 arena_run_heap_remove(&bin->runs, miscelm); 2772 } 2773 } 2774} 2775 2776static void 2777arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2778 arena_run_t *run, arena_bin_t *bin) 2779{ 2780 2781 assert(run != bin->runcur); 2782 2783 malloc_mutex_unlock(tsdn, &bin->lock); 2784 /******************************/ 2785 malloc_mutex_lock(tsdn, &arena->lock); 2786 arena_run_dalloc(tsdn, arena, run, true, false, false); 2787 malloc_mutex_unlock(tsdn, &arena->lock); 2788 /****************************/ 2789 malloc_mutex_lock(tsdn, &bin->lock); 2790 if (config_stats) 2791 bin->stats.curruns--; 2792} 2793 2794static void 2795arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2796 arena_bin_t *bin) 2797{ 2798 2799 /* 2800 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 2801 * non-full run. It is okay to NULL runcur out rather than proactively 2802 * keeping it pointing at the lowest non-full run. 2803 */ 2804 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 2805 /* Switch runcur. */ 2806 if (bin->runcur->nfree > 0) 2807 arena_bin_runs_insert(bin, bin->runcur); 2808 bin->runcur = run; 2809 if (config_stats) 2810 bin->stats.reruns++; 2811 } else 2812 arena_bin_runs_insert(bin, run); 2813} 2814 2815static void 2816arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2817 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) 2818{ 2819 size_t pageind, rpages_ind; 2820 arena_run_t *run; 2821 arena_bin_t *bin; 2822 arena_bin_info_t *bin_info; 2823 szind_t binind; 2824 2825 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2826 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2827 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2828 binind = run->binind; 2829 bin = &arena->bins[binind]; 2830 bin_info = &arena_bin_info[binind]; 2831 2832 if (!junked && config_fill && unlikely(opt_junk_free)) 2833 arena_dalloc_junk_small(ptr, bin_info); 2834 2835 arena_run_reg_dalloc(run, ptr); 2836 if (run->nfree == bin_info->nregs) { 2837 arena_dissociate_bin_run(chunk, run, bin); 2838 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); 2839 } else if (run->nfree == 1 && run != bin->runcur) 2840 arena_bin_lower_run(arena, chunk, run, bin); 2841 2842 if (config_stats) { 2843 bin->stats.ndalloc++; 2844 bin->stats.curregs--; 2845 } 2846} 2847 2848void 2849arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, 2850 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) 2851{ 2852 2853 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); 2854} 2855 2856void 2857arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, 2858 size_t pageind, arena_chunk_map_bits_t *bitselm) 2859{ 2860 arena_run_t *run; 2861 arena_bin_t *bin; 2862 size_t rpages_ind; 2863 2864 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2865 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2866 bin = &arena->bins[run->binind]; 2867 malloc_mutex_lock(tsdn, &bin->lock); 2868 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); 2869 malloc_mutex_unlock(tsdn, &bin->lock); 2870} 2871 2872void 2873arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2874 void *ptr, size_t pageind) 2875{ 2876 arena_chunk_map_bits_t *bitselm; 2877 2878 if (config_debug) { 2879 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2880 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2881 pageind)) != BININD_INVALID); 2882 } 2883 bitselm = arena_bitselm_get_mutable(chunk, pageind); 2884 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); 2885 arena_decay_tick(tsdn, arena); 2886} 2887 2888#ifdef JEMALLOC_JET 2889#undef arena_dalloc_junk_large 2890#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) 2891#endif 2892void 2893arena_dalloc_junk_large(void *ptr, size_t usize) 2894{ 2895 2896 if (config_fill && unlikely(opt_junk_free)) 2897 memset(ptr, JEMALLOC_FREE_JUNK, usize); 2898} 2899#ifdef JEMALLOC_JET 2900#undef arena_dalloc_junk_large 2901#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2902arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2903 JEMALLOC_N(n_arena_dalloc_junk_large); 2904#endif 2905 2906static void 2907arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, 2908 arena_chunk_t *chunk, void *ptr, bool junked) 2909{ 2910 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2911 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 2912 pageind); 2913 arena_run_t *run = &miscelm->run; 2914 2915 if (config_fill || config_stats) { 2916 size_t usize = arena_mapbits_large_size_get(chunk, pageind) - 2917 large_pad; 2918 2919 if (!junked) 2920 arena_dalloc_junk_large(ptr, usize); 2921 if (config_stats) { 2922 szind_t index = size2index(usize) - NBINS; 2923 2924 arena->stats.ndalloc_large++; 2925 arena->stats.allocated_large -= usize; 2926 arena->stats.lstats[index].ndalloc++; 2927 arena->stats.lstats[index].curruns--; 2928 } 2929 } 2930 2931 arena_run_dalloc(tsdn, arena, run, true, false, false); 2932} 2933 2934void 2935arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, 2936 arena_chunk_t *chunk, void *ptr) 2937{ 2938 2939 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); 2940} 2941 2942void 2943arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2944 void *ptr) 2945{ 2946 2947 malloc_mutex_lock(tsdn, &arena->lock); 2948 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); 2949 malloc_mutex_unlock(tsdn, &arena->lock); 2950 arena_decay_tick(tsdn, arena); 2951} 2952 2953static void 2954arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2955 void *ptr, size_t oldsize, size_t size) 2956{ 2957 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2958 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 2959 pageind); 2960 arena_run_t *run = &miscelm->run; 2961 2962 assert(size < oldsize); 2963 2964 /* 2965 * Shrink the run, and make trailing pages available for other 2966 * allocations. 2967 */ 2968 malloc_mutex_lock(tsdn, &arena->lock); 2969 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + 2970 large_pad, true); 2971 if (config_stats) { 2972 szind_t oldindex = size2index(oldsize) - NBINS; 2973 szind_t index = size2index(size) - NBINS; 2974 2975 arena->stats.ndalloc_large++; 2976 arena->stats.allocated_large -= oldsize; 2977 arena->stats.lstats[oldindex].ndalloc++; 2978 arena->stats.lstats[oldindex].curruns--; 2979 2980 arena->stats.nmalloc_large++; 2981 arena->stats.nrequests_large++; 2982 arena->stats.allocated_large += size; 2983 arena->stats.lstats[index].nmalloc++; 2984 arena->stats.lstats[index].nrequests++; 2985 arena->stats.lstats[index].curruns++; 2986 } 2987 malloc_mutex_unlock(tsdn, &arena->lock); 2988} 2989 2990static bool 2991arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2992 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) 2993{ 2994 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2995 size_t npages = (oldsize + large_pad) >> LG_PAGE; 2996 size_t followsize; 2997 2998 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - 2999 large_pad); 3000 3001 /* Try to extend the run. */ 3002 malloc_mutex_lock(tsdn, &arena->lock); 3003 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, 3004 pageind+npages) != 0) 3005 goto label_fail; 3006 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); 3007 if (oldsize + followsize >= usize_min) { 3008 /* 3009 * The next run is available and sufficiently large. Split the 3010 * following run, then merge the first part with the existing 3011 * allocation. 3012 */ 3013 arena_run_t *run; 3014 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; 3015 3016 usize = usize_max; 3017 while (oldsize + followsize < usize) 3018 usize = index2size(size2index(usize)-1); 3019 assert(usize >= usize_min); 3020 assert(usize >= oldsize); 3021 splitsize = usize - oldsize; 3022 if (splitsize == 0) 3023 goto label_fail; 3024 3025 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; 3026 if (arena_run_split_large(arena, run, splitsize, zero)) 3027 goto label_fail; 3028 3029 if (config_cache_oblivious && zero) { 3030 /* 3031 * Zero the trailing bytes of the original allocation's 3032 * last page, since they are in an indeterminate state. 3033 * There will always be trailing bytes, because ptr's 3034 * offset from the beginning of the run is a multiple of 3035 * CACHELINE in [0 .. PAGE). 3036 */ 3037 void *zbase = (void *)((uintptr_t)ptr + oldsize); 3038 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + 3039 PAGE)); 3040 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; 3041 assert(nzero > 0); 3042 memset(zbase, 0, nzero); 3043 } 3044 3045 size = oldsize + splitsize; 3046 npages = (size + large_pad) >> LG_PAGE; 3047 3048 /* 3049 * Mark the extended run as dirty if either portion of the run 3050 * was dirty before allocation. This is rather pedantic, 3051 * because there's not actually any sequence of events that 3052 * could cause the resulting run to be passed to 3053 * arena_run_dalloc() with the dirty argument set to false 3054 * (which is when dirty flag consistency would really matter). 3055 */ 3056 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 3057 arena_mapbits_dirty_get(chunk, pageind+npages-1); 3058 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; 3059 arena_mapbits_large_set(chunk, pageind, size + large_pad, 3060 flag_dirty | (flag_unzeroed_mask & 3061 arena_mapbits_unzeroed_get(chunk, pageind))); 3062 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | 3063 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 3064 pageind+npages-1))); 3065 3066 if (config_stats) { 3067 szind_t oldindex = size2index(oldsize) - NBINS; 3068 szind_t index = size2index(size) - NBINS; 3069 3070 arena->stats.ndalloc_large++; 3071 arena->stats.allocated_large -= oldsize; 3072 arena->stats.lstats[oldindex].ndalloc++; 3073 arena->stats.lstats[oldindex].curruns--; 3074 3075 arena->stats.nmalloc_large++; 3076 arena->stats.nrequests_large++; 3077 arena->stats.allocated_large += size; 3078 arena->stats.lstats[index].nmalloc++; 3079 arena->stats.lstats[index].nrequests++; 3080 arena->stats.lstats[index].curruns++; 3081 } 3082 malloc_mutex_unlock(tsdn, &arena->lock); 3083 return (false); 3084 } 3085label_fail: 3086 malloc_mutex_unlock(tsdn, &arena->lock); 3087 return (true); 3088} 3089 3090#ifdef JEMALLOC_JET 3091#undef arena_ralloc_junk_large 3092#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) 3093#endif 3094static void 3095arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 3096{ 3097 3098 if (config_fill && unlikely(opt_junk_free)) { 3099 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, 3100 old_usize - usize); 3101 } 3102} 3103#ifdef JEMALLOC_JET 3104#undef arena_ralloc_junk_large 3105#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 3106arena_ralloc_junk_large_t *arena_ralloc_junk_large = 3107 JEMALLOC_N(n_arena_ralloc_junk_large); 3108#endif 3109 3110/* 3111 * Try to resize a large allocation, in order to avoid copying. This will 3112 * always fail if growing an object, and the following run is already in use. 3113 */ 3114static bool 3115arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, 3116 size_t usize_max, bool zero) 3117{ 3118 arena_chunk_t *chunk; 3119 arena_t *arena; 3120 3121 if (oldsize == usize_max) { 3122 /* Current size class is compatible and maximal. */ 3123 return (false); 3124 } 3125 3126 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3127 arena = extent_node_arena_get(&chunk->node); 3128 3129 if (oldsize < usize_max) { 3130 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, 3131 oldsize, usize_min, usize_max, zero); 3132 if (config_fill && !ret && !zero) { 3133 if (unlikely(opt_junk_alloc)) { 3134 memset((void *)((uintptr_t)ptr + oldsize), 3135 JEMALLOC_ALLOC_JUNK, 3136 isalloc(tsdn, ptr, config_prof) - oldsize); 3137 } else if (unlikely(opt_zero)) { 3138 memset((void *)((uintptr_t)ptr + oldsize), 0, 3139 isalloc(tsdn, ptr, config_prof) - oldsize); 3140 } 3141 } 3142 return (ret); 3143 } 3144 3145 assert(oldsize > usize_max); 3146 /* Fill before shrinking in order avoid a race. */ 3147 arena_ralloc_junk_large(ptr, oldsize, usize_max); 3148 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); 3149 return (false); 3150} 3151 3152bool 3153arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, 3154 size_t extra, bool zero) 3155{ 3156 size_t usize_min, usize_max; 3157 3158 /* Calls with non-zero extra had to clamp extra. */ 3159 assert(extra == 0 || size + extra <= HUGE_MAXCLASS); 3160 3161 if (unlikely(size > HUGE_MAXCLASS)) 3162 return (true); 3163 3164 usize_min = s2u(size); 3165 usize_max = s2u(size + extra); 3166 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { 3167 arena_chunk_t *chunk; 3168 3169 /* 3170 * Avoid moving the allocation if the size class can be left the 3171 * same. 3172 */ 3173 if (oldsize <= SMALL_MAXCLASS) { 3174 assert(arena_bin_info[size2index(oldsize)].reg_size == 3175 oldsize); 3176 if ((usize_max > SMALL_MAXCLASS || 3177 size2index(usize_max) != size2index(oldsize)) && 3178 (size > oldsize || usize_max < oldsize)) 3179 return (true); 3180 } else { 3181 if (usize_max <= SMALL_MAXCLASS) 3182 return (true); 3183 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, 3184 usize_max, zero)) 3185 return (true); 3186 } 3187 3188 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3189 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); 3190 return (false); 3191 } else { 3192 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, 3193 usize_max, zero)); 3194 } 3195} 3196 3197static void * 3198arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 3199 size_t alignment, bool zero, tcache_t *tcache) 3200{ 3201 3202 if (alignment == 0) 3203 return (arena_malloc(tsdn, arena, usize, size2index(usize), 3204 zero, tcache, true)); 3205 usize = sa2u(usize, alignment); 3206 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 3207 return (NULL); 3208 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 3209} 3210 3211void * 3212arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 3213 size_t alignment, bool zero, tcache_t *tcache) 3214{ 3215 void *ret; 3216 size_t usize; 3217 3218 usize = s2u(size); 3219 if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) 3220 return (NULL); 3221 3222 if (likely(usize <= large_maxclass)) { 3223 size_t copysize; 3224 3225 /* Try to avoid moving the allocation. */ 3226 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, 3227 zero)) 3228 return (ptr); 3229 3230 /* 3231 * size and oldsize are different enough that we need to move 3232 * the object. In that case, fall back to allocating new space 3233 * and copying. 3234 */ 3235 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, 3236 alignment, zero, tcache); 3237 if (ret == NULL) 3238 return (NULL); 3239 3240 /* 3241 * Junk/zero-filling were already done by 3242 * ipalloc()/arena_malloc(). 3243 */ 3244 3245 copysize = (usize < oldsize) ? usize : oldsize; 3246 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 3247 memcpy(ret, ptr, copysize); 3248 isqalloc(tsd, ptr, oldsize, tcache, true); 3249 } else { 3250 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, 3251 zero, tcache); 3252 } 3253 return (ret); 3254} 3255 3256dss_prec_t 3257arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) 3258{ 3259 dss_prec_t ret; 3260 3261 malloc_mutex_lock(tsdn, &arena->lock); 3262 ret = arena->dss_prec; 3263 malloc_mutex_unlock(tsdn, &arena->lock); 3264 return (ret); 3265} 3266 3267bool 3268arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) 3269{ 3270 3271 if (!have_dss) 3272 return (dss_prec != dss_prec_disabled); 3273 malloc_mutex_lock(tsdn, &arena->lock); 3274 arena->dss_prec = dss_prec; 3275 malloc_mutex_unlock(tsdn, &arena->lock); 3276 return (false); 3277} 3278 3279ssize_t 3280arena_lg_dirty_mult_default_get(void) 3281{ 3282 3283 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 3284} 3285 3286bool 3287arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 3288{ 3289 3290 if (opt_purge != purge_mode_ratio) 3291 return (true); 3292 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 3293 return (true); 3294 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 3295 return (false); 3296} 3297 3298ssize_t 3299arena_decay_time_default_get(void) 3300{ 3301 3302 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); 3303} 3304 3305bool 3306arena_decay_time_default_set(ssize_t decay_time) 3307{ 3308 3309 if (opt_purge != purge_mode_decay) 3310 return (true); 3311 if (!arena_decay_time_valid(decay_time)) 3312 return (true); 3313 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); 3314 return (false); 3315} 3316 3317static void 3318arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, 3319 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3320 size_t *nactive, size_t *ndirty) 3321{ 3322 3323 *nthreads += arena_nthreads_get(arena, false); 3324 *dss = dss_prec_names[arena->dss_prec]; 3325 *lg_dirty_mult = arena->lg_dirty_mult; 3326 *decay_time = arena->decay.time; 3327 *nactive += arena->nactive; 3328 *ndirty += arena->ndirty; 3329} 3330 3331void 3332arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 3333 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3334 size_t *nactive, size_t *ndirty) 3335{ 3336 3337 malloc_mutex_lock(tsdn, &arena->lock); 3338 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3339 decay_time, nactive, ndirty); 3340 malloc_mutex_unlock(tsdn, &arena->lock); 3341} 3342 3343void 3344arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 3345 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3346 size_t *nactive, size_t *ndirty, arena_stats_t *astats, 3347 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, 3348 malloc_huge_stats_t *hstats) 3349{ 3350 unsigned i; 3351 3352 cassert(config_stats); 3353 3354 malloc_mutex_lock(tsdn, &arena->lock); 3355 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3356 decay_time, nactive, ndirty); 3357 3358 astats->mapped += arena->stats.mapped; 3359 astats->retained += arena->stats.retained; 3360 astats->npurge += arena->stats.npurge; 3361 astats->nmadvise += arena->stats.nmadvise; 3362 astats->purged += arena->stats.purged; 3363 astats->metadata_mapped += arena->stats.metadata_mapped; 3364 astats->metadata_allocated += arena_metadata_allocated_get(arena); 3365 astats->allocated_large += arena->stats.allocated_large; 3366 astats->nmalloc_large += arena->stats.nmalloc_large; 3367 astats->ndalloc_large += arena->stats.ndalloc_large; 3368 astats->nrequests_large += arena->stats.nrequests_large; 3369 astats->allocated_huge += arena->stats.allocated_huge; 3370 astats->nmalloc_huge += arena->stats.nmalloc_huge; 3371 astats->ndalloc_huge += arena->stats.ndalloc_huge; 3372 3373 for (i = 0; i < nlclasses; i++) { 3374 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 3375 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 3376 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 3377 lstats[i].curruns += arena->stats.lstats[i].curruns; 3378 } 3379 3380 for (i = 0; i < nhclasses; i++) { 3381 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 3382 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 3383 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 3384 } 3385 malloc_mutex_unlock(tsdn, &arena->lock); 3386 3387 for (i = 0; i < NBINS; i++) { 3388 arena_bin_t *bin = &arena->bins[i]; 3389 3390 malloc_mutex_lock(tsdn, &bin->lock); 3391 bstats[i].nmalloc += bin->stats.nmalloc; 3392 bstats[i].ndalloc += bin->stats.ndalloc; 3393 bstats[i].nrequests += bin->stats.nrequests; 3394 bstats[i].curregs += bin->stats.curregs; 3395 if (config_tcache) { 3396 bstats[i].nfills += bin->stats.nfills; 3397 bstats[i].nflushes += bin->stats.nflushes; 3398 } 3399 bstats[i].nruns += bin->stats.nruns; 3400 bstats[i].reruns += bin->stats.reruns; 3401 bstats[i].curruns += bin->stats.curruns; 3402 malloc_mutex_unlock(tsdn, &bin->lock); 3403 } 3404} 3405 3406unsigned 3407arena_nthreads_get(arena_t *arena, bool internal) 3408{ 3409 3410 return (atomic_read_u(&arena->nthreads[internal])); 3411} 3412 3413void 3414arena_nthreads_inc(arena_t *arena, bool internal) 3415{ 3416 3417 atomic_add_u(&arena->nthreads[internal], 1); 3418} 3419 3420void 3421arena_nthreads_dec(arena_t *arena, bool internal) 3422{ 3423 3424 atomic_sub_u(&arena->nthreads[internal], 1); 3425} 3426 3427arena_t * 3428arena_new(tsdn_t *tsdn, unsigned ind) 3429{ 3430 arena_t *arena; 3431 unsigned i; 3432 3433 /* 3434 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 3435 * because there is no way to clean up if base_alloc() OOMs. 3436 */ 3437 if (config_stats) { 3438 arena = (arena_t *)base_alloc(tsdn, 3439 CACHELINE_CEILING(sizeof(arena_t)) + 3440 QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)) + 3441 (nhclasses * sizeof(malloc_huge_stats_t)))); 3442 } else 3443 arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t)); 3444 if (arena == NULL) 3445 return (NULL); 3446 3447 arena->ind = ind; 3448 arena->nthreads[0] = arena->nthreads[1] = 0; 3449 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) 3450 return (NULL); 3451 3452 if (config_stats) { 3453 memset(&arena->stats, 0, sizeof(arena_stats_t)); 3454 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 3455 + CACHELINE_CEILING(sizeof(arena_t))); 3456 memset(arena->stats.lstats, 0, nlclasses * 3457 sizeof(malloc_large_stats_t)); 3458 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 3459 + CACHELINE_CEILING(sizeof(arena_t)) + 3460 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 3461 memset(arena->stats.hstats, 0, nhclasses * 3462 sizeof(malloc_huge_stats_t)); 3463 if (config_tcache) 3464 ql_new(&arena->tcache_ql); 3465 } 3466 3467 if (config_prof) 3468 arena->prof_accumbytes = 0; 3469 3470 if (config_cache_oblivious) { 3471 /* 3472 * A nondeterministic seed based on the address of arena reduces 3473 * the likelihood of lockstep non-uniform cache index 3474 * utilization among identical concurrent processes, but at the 3475 * cost of test repeatability. For debug builds, instead use a 3476 * deterministic seed. 3477 */ 3478 arena->offset_state = config_debug ? ind : 3479 (uint64_t)(uintptr_t)arena; 3480 } 3481 3482 arena->dss_prec = chunk_dss_prec_get(tsdn); 3483 3484 ql_new(&arena->achunks); 3485 3486 arena->spare = NULL; 3487 3488 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 3489 arena->purging = false; 3490 arena->nactive = 0; 3491 arena->ndirty = 0; 3492 3493 for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t); 3494 i++) 3495 arena_run_heap_new(&arena->runs_avail[i]); 3496 3497 qr_new(&arena->runs_dirty, rd_link); 3498 qr_new(&arena->chunks_cache, cc_link); 3499 3500 if (opt_purge == purge_mode_decay) 3501 arena_decay_init(arena, arena_decay_time_default_get()); 3502 3503 ql_new(&arena->huge); 3504 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", 3505 WITNESS_RANK_ARENA_HUGE)) 3506 return (NULL); 3507 3508 extent_tree_szad_new(&arena->chunks_szad_cached); 3509 extent_tree_ad_new(&arena->chunks_ad_cached); 3510 extent_tree_szad_new(&arena->chunks_szad_retained); 3511 extent_tree_ad_new(&arena->chunks_ad_retained); 3512 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", 3513 WITNESS_RANK_ARENA_CHUNKS)) 3514 return (NULL); 3515 ql_new(&arena->node_cache); 3516 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", 3517 WITNESS_RANK_ARENA_NODE_CACHE)) 3518 return (NULL); 3519 3520 arena->chunk_hooks = chunk_hooks_default; 3521 3522 /* Initialize bins. */ 3523 for (i = 0; i < NBINS; i++) { 3524 arena_bin_t *bin = &arena->bins[i]; 3525 if (malloc_mutex_init(&bin->lock, "arena_bin", 3526 WITNESS_RANK_ARENA_BIN)) 3527 return (NULL); 3528 bin->runcur = NULL; 3529 arena_run_heap_new(&bin->runs); 3530 if (config_stats) 3531 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 3532 } 3533 3534 return (arena); 3535} 3536 3537/* 3538 * Calculate bin_info->run_size such that it meets the following constraints: 3539 * 3540 * *) bin_info->run_size <= arena_maxrun 3541 * *) bin_info->nregs <= RUN_MAXREGS 3542 * 3543 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 3544 * these settings are all interdependent. 3545 */ 3546static void 3547bin_info_run_size_calc(arena_bin_info_t *bin_info) 3548{ 3549 size_t pad_size; 3550 size_t try_run_size, perfect_run_size, actual_run_size; 3551 uint32_t try_nregs, perfect_nregs, actual_nregs; 3552 3553 /* 3554 * Determine redzone size based on minimum alignment and minimum 3555 * redzone size. Add padding to the end of the run if it is needed to 3556 * align the regions. The padding allows each redzone to be half the 3557 * minimum alignment; without the padding, each redzone would have to 3558 * be twice as large in order to maintain alignment. 3559 */ 3560 if (config_fill && unlikely(opt_redzone)) { 3561 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); 3562 if (align_min <= REDZONE_MINSIZE) { 3563 bin_info->redzone_size = REDZONE_MINSIZE; 3564 pad_size = 0; 3565 } else { 3566 bin_info->redzone_size = align_min >> 1; 3567 pad_size = bin_info->redzone_size; 3568 } 3569 } else { 3570 bin_info->redzone_size = 0; 3571 pad_size = 0; 3572 } 3573 bin_info->reg_interval = bin_info->reg_size + 3574 (bin_info->redzone_size << 1); 3575 3576 /* 3577 * Compute run size under ideal conditions (no redzones, no limit on run 3578 * size). 3579 */ 3580 try_run_size = PAGE; 3581 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3582 do { 3583 perfect_run_size = try_run_size; 3584 perfect_nregs = try_nregs; 3585 3586 try_run_size += PAGE; 3587 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3588 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 3589 assert(perfect_nregs <= RUN_MAXREGS); 3590 3591 actual_run_size = perfect_run_size; 3592 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3593 bin_info->reg_interval); 3594 3595 /* 3596 * Redzones can require enough padding that not even a single region can 3597 * fit within the number of pages that would normally be dedicated to a 3598 * run for this size class. Increase the run size until at least one 3599 * region fits. 3600 */ 3601 while (actual_nregs == 0) { 3602 assert(config_fill && unlikely(opt_redzone)); 3603 3604 actual_run_size += PAGE; 3605 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3606 bin_info->reg_interval); 3607 } 3608 3609 /* 3610 * Make sure that the run will fit within an arena chunk. 3611 */ 3612 while (actual_run_size > arena_maxrun) { 3613 actual_run_size -= PAGE; 3614 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3615 bin_info->reg_interval); 3616 } 3617 assert(actual_nregs > 0); 3618 assert(actual_run_size == s2u(actual_run_size)); 3619 3620 /* Copy final settings. */ 3621 bin_info->run_size = actual_run_size; 3622 bin_info->nregs = actual_nregs; 3623 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * 3624 bin_info->reg_interval) - pad_size + bin_info->redzone_size); 3625 3626 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 3627 * bin_info->reg_interval) + pad_size == bin_info->run_size); 3628} 3629 3630static void 3631bin_info_init(void) 3632{ 3633 arena_bin_info_t *bin_info; 3634 3635#define BIN_INFO_INIT_bin_yes(index, size) \ 3636 bin_info = &arena_bin_info[index]; \ 3637 bin_info->reg_size = size; \ 3638 bin_info_run_size_calc(bin_info); \ 3639 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 3640#define BIN_INFO_INIT_bin_no(index, size) 3641#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 3642 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3643 SIZE_CLASSES 3644#undef BIN_INFO_INIT_bin_yes 3645#undef BIN_INFO_INIT_bin_no 3646#undef SC 3647} 3648 3649void 3650arena_boot(void) 3651{ 3652 unsigned i; 3653 3654 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); 3655 arena_decay_time_default_set(opt_decay_time); 3656 3657 /* 3658 * Compute the header size such that it is large enough to contain the 3659 * page map. The page map is biased to omit entries for the header 3660 * itself, so some iteration is necessary to compute the map bias. 3661 * 3662 * 1) Compute safe header_size and map_bias values that include enough 3663 * space for an unbiased page map. 3664 * 2) Refine map_bias based on (1) to omit the header pages in the page 3665 * map. The resulting map_bias may be one too small. 3666 * 3) Refine map_bias based on (2). The result will be >= the result 3667 * from (2), and will always be correct. 3668 */ 3669 map_bias = 0; 3670 for (i = 0; i < 3; i++) { 3671 size_t header_size = offsetof(arena_chunk_t, map_bits) + 3672 ((sizeof(arena_chunk_map_bits_t) + 3673 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 3674 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 3675 } 3676 assert(map_bias > 0); 3677 3678 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 3679 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 3680 3681 arena_maxrun = chunksize - (map_bias << LG_PAGE); 3682 assert(arena_maxrun > 0); 3683 large_maxclass = index2size(size2index(chunksize)-1); 3684 if (large_maxclass > arena_maxrun) { 3685 /* 3686 * For small chunk sizes it's possible for there to be fewer 3687 * non-header pages available than are necessary to serve the 3688 * size classes just below chunksize. 3689 */ 3690 large_maxclass = arena_maxrun; 3691 } 3692 assert(large_maxclass > 0); 3693 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); 3694 nhclasses = NSIZES - nlclasses - NBINS; 3695 3696 bin_info_init(); 3697} 3698 3699void 3700arena_prefork0(tsdn_t *tsdn, arena_t *arena) 3701{ 3702 3703 malloc_mutex_prefork(tsdn, &arena->lock); 3704} 3705 3706void 3707arena_prefork1(tsdn_t *tsdn, arena_t *arena) 3708{ 3709 3710 malloc_mutex_prefork(tsdn, &arena->chunks_mtx); 3711} 3712 3713void 3714arena_prefork2(tsdn_t *tsdn, arena_t *arena) 3715{ 3716 3717 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); 3718} 3719 3720void 3721arena_prefork3(tsdn_t *tsdn, arena_t *arena) 3722{ 3723 unsigned i; 3724 3725 for (i = 0; i < NBINS; i++) 3726 malloc_mutex_prefork(tsdn, &arena->bins[i].lock); 3727 malloc_mutex_prefork(tsdn, &arena->huge_mtx); 3728} 3729 3730void 3731arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) 3732{ 3733 unsigned i; 3734 3735 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); 3736 for (i = 0; i < NBINS; i++) 3737 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); 3738 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); 3739 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); 3740 malloc_mutex_postfork_parent(tsdn, &arena->lock); 3741} 3742 3743void 3744arena_postfork_child(tsdn_t *tsdn, arena_t *arena) 3745{ 3746 unsigned i; 3747 3748 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); 3749 for (i = 0; i < NBINS; i++) 3750 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); 3751 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); 3752 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); 3753 malloc_mutex_postfork_child(tsdn, &arena->lock); 3754} 3755