arena.c revision c9a4bf91702b351e73e2cd7cf9125afd076d59fe
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7purge_mode_t opt_purge = PURGE_DEFAULT; 8const char *purge_mode_names[] = { 9 "ratio", 10 "decay", 11 "N/A" 12}; 13ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 14static ssize_t lg_dirty_mult_default; 15ssize_t opt_decay_time = DECAY_TIME_DEFAULT; 16static ssize_t decay_time_default; 17 18arena_bin_info_t arena_bin_info[NBINS]; 19 20size_t map_bias; 21size_t map_misc_offset; 22size_t arena_maxrun; /* Max run size for arenas. */ 23size_t large_maxclass; /* Max large size class. */ 24size_t run_quantize_max; /* Max run_quantize_*() input. */ 25static size_t small_maxrun; /* Max run size for small size classes. */ 26static bool *small_run_tab; /* Valid small run page multiples. */ 27static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */ 28static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */ 29unsigned nlclasses; /* Number of large size classes. */ 30unsigned nhclasses; /* Number of huge size classes. */ 31static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */ 32static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */ 33 34/******************************************************************************/ 35/* 36 * Function prototypes for static functions that are referenced prior to 37 * definition. 38 */ 39 40static void arena_purge_to_limit(tsd_t *tsd, arena_t *arena, 41 size_t ndirty_limit); 42static void arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, 43 bool dirty, bool cleaned, bool decommitted); 44static void arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena, 45 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); 46static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 47 arena_run_t *run, arena_bin_t *bin); 48 49/******************************************************************************/ 50 51JEMALLOC_INLINE_C size_t 52arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) 53{ 54 arena_chunk_t *chunk; 55 size_t pageind, mapbits; 56 57 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 58 pageind = arena_miscelm_to_pageind(miscelm); 59 mapbits = arena_mapbits_get(chunk, pageind); 60 return (arena_mapbits_size_decode(mapbits)); 61} 62 63JEMALLOC_INLINE_C int 64arena_run_addr_comp(const arena_chunk_map_misc_t *a, 65 const arena_chunk_map_misc_t *b) 66{ 67 uintptr_t a_miscelm = (uintptr_t)a; 68 uintptr_t b_miscelm = (uintptr_t)b; 69 70 assert(a != NULL); 71 assert(b != NULL); 72 73 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 74} 75 76/* Generate pairing heap functions. */ 77ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, 78 ph_link, arena_run_addr_comp) 79 80static size_t 81run_quantize_floor_compute(size_t size) 82{ 83 size_t qsize; 84 85 assert(size != 0); 86 assert(size == PAGE_CEILING(size)); 87 88 /* Don't change sizes that are valid small run sizes. */ 89 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) 90 return (size); 91 92 /* 93 * Round down to the nearest run size that can actually be requested 94 * during normal large allocation. Add large_pad so that cache index 95 * randomization can offset the allocation from the page boundary. 96 */ 97 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; 98 if (qsize <= SMALL_MAXCLASS + large_pad) 99 return (run_quantize_floor_compute(size - large_pad)); 100 assert(qsize <= size); 101 return (qsize); 102} 103 104static size_t 105run_quantize_ceil_compute_hard(size_t size) 106{ 107 size_t large_run_size_next; 108 109 assert(size != 0); 110 assert(size == PAGE_CEILING(size)); 111 112 /* 113 * Return the next quantized size greater than the input size. 114 * Quantized sizes comprise the union of run sizes that back small 115 * region runs, and run sizes that back large regions with no explicit 116 * alignment constraints. 117 */ 118 119 if (size > SMALL_MAXCLASS) { 120 large_run_size_next = PAGE_CEILING(index2size(size2index(size - 121 large_pad) + 1) + large_pad); 122 } else 123 large_run_size_next = SIZE_T_MAX; 124 if (size >= small_maxrun) 125 return (large_run_size_next); 126 127 while (true) { 128 size += PAGE; 129 assert(size <= small_maxrun); 130 if (small_run_tab[size >> LG_PAGE]) { 131 if (large_run_size_next < size) 132 return (large_run_size_next); 133 return (size); 134 } 135 } 136} 137 138static size_t 139run_quantize_ceil_compute(size_t size) 140{ 141 size_t qsize = run_quantize_floor_compute(size); 142 143 if (qsize < size) { 144 /* 145 * Skip a quantization that may have an adequately large run, 146 * because under-sized runs may be mixed in. This only happens 147 * when an unusual size is requested, i.e. for aligned 148 * allocation, and is just one of several places where linear 149 * search would potentially find sufficiently aligned available 150 * memory somewhere lower. 151 */ 152 qsize = run_quantize_ceil_compute_hard(qsize); 153 } 154 return (qsize); 155} 156 157#ifdef JEMALLOC_JET 158#undef run_quantize_floor 159#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) 160#endif 161static size_t 162run_quantize_floor(size_t size) 163{ 164 size_t ret; 165 166 assert(size > 0); 167 assert(size <= run_quantize_max); 168 assert((size & PAGE_MASK) == 0); 169 170 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1]; 171 assert(ret == run_quantize_floor_compute(size)); 172 return (ret); 173} 174#ifdef JEMALLOC_JET 175#undef run_quantize_floor 176#define run_quantize_floor JEMALLOC_N(run_quantize_floor) 177run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); 178#endif 179 180#ifdef JEMALLOC_JET 181#undef run_quantize_ceil 182#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) 183#endif 184static size_t 185run_quantize_ceil(size_t size) 186{ 187 size_t ret; 188 189 assert(size > 0); 190 assert(size <= run_quantize_max); 191 assert((size & PAGE_MASK) == 0); 192 193 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1]; 194 assert(ret == run_quantize_ceil_compute(size)); 195 return (ret); 196} 197#ifdef JEMALLOC_JET 198#undef run_quantize_ceil 199#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) 200run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); 201#endif 202 203static arena_run_heap_t * 204arena_runs_avail_get(arena_t *arena, szind_t ind) 205{ 206 207 assert(ind >= runs_avail_bias); 208 assert(ind - runs_avail_bias < runs_avail_nclasses); 209 210 return (&arena->runs_avail[ind - runs_avail_bias]); 211} 212 213static void 214arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 215 size_t npages) 216{ 217 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 218 arena_miscelm_get_const(chunk, pageind)))); 219 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 220 LG_PAGE)); 221 arena_run_heap_insert(arena_runs_avail_get(arena, ind), 222 arena_miscelm_get_mutable(chunk, pageind)); 223} 224 225static void 226arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 227 size_t npages) 228{ 229 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 230 arena_miscelm_get_const(chunk, pageind)))); 231 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 232 LG_PAGE)); 233 arena_run_heap_remove(arena_runs_avail_get(arena, ind), 234 arena_miscelm_get_mutable(chunk, pageind)); 235} 236 237static void 238arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 239 size_t npages) 240{ 241 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 242 pageind); 243 244 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 245 LG_PAGE)); 246 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 247 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 248 CHUNK_MAP_DIRTY); 249 250 qr_new(&miscelm->rd, rd_link); 251 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); 252 arena->ndirty += npages; 253} 254 255static void 256arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 257 size_t npages) 258{ 259 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 260 pageind); 261 262 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 263 LG_PAGE)); 264 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 265 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 266 CHUNK_MAP_DIRTY); 267 268 qr_remove(&miscelm->rd, rd_link); 269 assert(arena->ndirty >= npages); 270 arena->ndirty -= npages; 271} 272 273static size_t 274arena_chunk_dirty_npages(const extent_node_t *node) 275{ 276 277 return (extent_node_size_get(node) >> LG_PAGE); 278} 279 280void 281arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) 282{ 283 284 if (cache) { 285 extent_node_dirty_linkage_init(node); 286 extent_node_dirty_insert(node, &arena->runs_dirty, 287 &arena->chunks_cache); 288 arena->ndirty += arena_chunk_dirty_npages(node); 289 } 290} 291 292void 293arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) 294{ 295 296 if (dirty) { 297 extent_node_dirty_remove(node); 298 assert(arena->ndirty >= arena_chunk_dirty_npages(node)); 299 arena->ndirty -= arena_chunk_dirty_npages(node); 300 } 301} 302 303JEMALLOC_INLINE_C void * 304arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 305{ 306 void *ret; 307 size_t regind; 308 arena_chunk_map_misc_t *miscelm; 309 void *rpages; 310 311 assert(run->nfree > 0); 312 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 313 314 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 315 miscelm = arena_run_to_miscelm(run); 316 rpages = arena_miscelm_to_rpages(miscelm); 317 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 318 (uintptr_t)(bin_info->reg_interval * regind)); 319 run->nfree--; 320 return (ret); 321} 322 323JEMALLOC_INLINE_C void 324arena_run_reg_dalloc(arena_run_t *run, void *ptr) 325{ 326 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 327 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 328 size_t mapbits = arena_mapbits_get(chunk, pageind); 329 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); 330 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 331 size_t regind = arena_run_regind(run, bin_info, ptr); 332 333 assert(run->nfree < bin_info->nregs); 334 /* Freeing an interior pointer can cause assertion failure. */ 335 assert(((uintptr_t)ptr - 336 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 337 (uintptr_t)bin_info->reg0_offset)) % 338 (uintptr_t)bin_info->reg_interval == 0); 339 assert((uintptr_t)ptr >= 340 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 341 (uintptr_t)bin_info->reg0_offset); 342 /* Freeing an unallocated pointer can cause assertion failure. */ 343 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 344 345 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 346 run->nfree++; 347} 348 349JEMALLOC_INLINE_C void 350arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 351{ 352 353 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 354 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 355 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 356 (npages << LG_PAGE)); 357} 358 359JEMALLOC_INLINE_C void 360arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 361{ 362 363 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 364 << LG_PAGE)), PAGE); 365} 366 367JEMALLOC_INLINE_C void 368arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 369{ 370 size_t i; 371 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 372 373 arena_run_page_mark_zeroed(chunk, run_ind); 374 for (i = 0; i < PAGE / sizeof(size_t); i++) 375 assert(p[i] == 0); 376} 377 378static void 379arena_nactive_add(arena_t *arena, size_t add_pages) 380{ 381 382 if (config_stats) { 383 size_t cactive_add = CHUNK_CEILING((arena->nactive + 384 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 385 LG_PAGE); 386 if (cactive_add != 0) 387 stats_cactive_add(cactive_add); 388 } 389 arena->nactive += add_pages; 390} 391 392static void 393arena_nactive_sub(arena_t *arena, size_t sub_pages) 394{ 395 396 if (config_stats) { 397 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - 398 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); 399 if (cactive_sub != 0) 400 stats_cactive_sub(cactive_sub); 401 } 402 arena->nactive -= sub_pages; 403} 404 405static void 406arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 407 size_t flag_dirty, size_t flag_decommitted, size_t need_pages) 408{ 409 size_t total_pages, rem_pages; 410 411 assert(flag_dirty == 0 || flag_decommitted == 0); 412 413 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 414 LG_PAGE; 415 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 416 flag_dirty); 417 assert(need_pages <= total_pages); 418 rem_pages = total_pages - need_pages; 419 420 arena_avail_remove(arena, chunk, run_ind, total_pages); 421 if (flag_dirty != 0) 422 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); 423 arena_nactive_add(arena, need_pages); 424 425 /* Keep track of trailing unused pages for later use. */ 426 if (rem_pages > 0) { 427 size_t flags = flag_dirty | flag_decommitted; 428 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 429 0; 430 431 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 432 (rem_pages << LG_PAGE), flags | 433 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & 434 flag_unzeroed_mask)); 435 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, 436 (rem_pages << LG_PAGE), flags | 437 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & 438 flag_unzeroed_mask)); 439 if (flag_dirty != 0) { 440 arena_run_dirty_insert(arena, chunk, run_ind+need_pages, 441 rem_pages); 442 } 443 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 444 } 445} 446 447static bool 448arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 449 bool remove, bool zero) 450{ 451 arena_chunk_t *chunk; 452 arena_chunk_map_misc_t *miscelm; 453 size_t flag_dirty, flag_decommitted, run_ind, need_pages; 454 size_t flag_unzeroed_mask; 455 456 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 457 miscelm = arena_run_to_miscelm(run); 458 run_ind = arena_miscelm_to_pageind(miscelm); 459 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 460 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 461 need_pages = (size >> LG_PAGE); 462 assert(need_pages > 0); 463 464 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 465 run_ind << LG_PAGE, size, arena->ind)) 466 return (true); 467 468 if (remove) { 469 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 470 flag_decommitted, need_pages); 471 } 472 473 if (zero) { 474 if (flag_decommitted != 0) { 475 /* The run is untouched, and therefore zeroed. */ 476 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 477 *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 478 (need_pages << LG_PAGE)); 479 } else if (flag_dirty != 0) { 480 /* The run is dirty, so all pages must be zeroed. */ 481 arena_run_zero(chunk, run_ind, need_pages); 482 } else { 483 /* 484 * The run is clean, so some pages may be zeroed (i.e. 485 * never before touched). 486 */ 487 size_t i; 488 for (i = 0; i < need_pages; i++) { 489 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 490 != 0) 491 arena_run_zero(chunk, run_ind+i, 1); 492 else if (config_debug) { 493 arena_run_page_validate_zeroed(chunk, 494 run_ind+i); 495 } else { 496 arena_run_page_mark_zeroed(chunk, 497 run_ind+i); 498 } 499 } 500 } 501 } else { 502 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 503 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 504 } 505 506 /* 507 * Set the last element first, in case the run only contains one page 508 * (i.e. both statements set the same element). 509 */ 510 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 511 CHUNK_MAP_UNZEROED : 0; 512 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | 513 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 514 run_ind+need_pages-1))); 515 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | 516 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); 517 return (false); 518} 519 520static bool 521arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 522{ 523 524 return (arena_run_split_large_helper(arena, run, size, true, zero)); 525} 526 527static bool 528arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 529{ 530 531 return (arena_run_split_large_helper(arena, run, size, false, zero)); 532} 533 534static bool 535arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 536 szind_t binind) 537{ 538 arena_chunk_t *chunk; 539 arena_chunk_map_misc_t *miscelm; 540 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; 541 542 assert(binind != BININD_INVALID); 543 544 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 545 miscelm = arena_run_to_miscelm(run); 546 run_ind = arena_miscelm_to_pageind(miscelm); 547 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 548 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 549 need_pages = (size >> LG_PAGE); 550 assert(need_pages > 0); 551 552 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 553 run_ind << LG_PAGE, size, arena->ind)) 554 return (true); 555 556 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 557 flag_decommitted, need_pages); 558 559 for (i = 0; i < need_pages; i++) { 560 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, 561 run_ind+i); 562 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 563 flag_unzeroed); 564 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) 565 arena_run_page_validate_zeroed(chunk, run_ind+i); 566 } 567 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 568 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 569 return (false); 570} 571 572static arena_chunk_t * 573arena_chunk_init_spare(arena_t *arena) 574{ 575 arena_chunk_t *chunk; 576 577 assert(arena->spare != NULL); 578 579 chunk = arena->spare; 580 arena->spare = NULL; 581 582 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 583 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 584 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 585 arena_maxrun); 586 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 587 arena_maxrun); 588 assert(arena_mapbits_dirty_get(chunk, map_bias) == 589 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 590 591 return (chunk); 592} 593 594static bool 595arena_chunk_register(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 596 bool zero) 597{ 598 599 /* 600 * The extent node notion of "committed" doesn't directly apply to 601 * arena chunks. Arbitrarily mark them as committed. The commit state 602 * of runs is tracked individually, and upon chunk deallocation the 603 * entire chunk is in a consistent commit state. 604 */ 605 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); 606 extent_node_achunk_set(&chunk->node, true); 607 return (chunk_register(tsd, chunk, &chunk->node)); 608} 609 610static arena_chunk_t * 611arena_chunk_alloc_internal_hard(tsd_t *tsd, arena_t *arena, 612 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) 613{ 614 arena_chunk_t *chunk; 615 616 malloc_mutex_unlock(tsd, &arena->lock); 617 618 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsd, arena, chunk_hooks, 619 NULL, chunksize, chunksize, zero, commit); 620 if (chunk != NULL && !*commit) { 621 /* Commit header. */ 622 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << 623 LG_PAGE, arena->ind)) { 624 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, 625 (void *)chunk, chunksize, *zero, *commit); 626 chunk = NULL; 627 } 628 } 629 if (chunk != NULL && arena_chunk_register(tsd, arena, chunk, *zero)) { 630 if (!*commit) { 631 /* Undo commit of header. */ 632 chunk_hooks->decommit(chunk, chunksize, 0, map_bias << 633 LG_PAGE, arena->ind); 634 } 635 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, (void *)chunk, 636 chunksize, *zero, *commit); 637 chunk = NULL; 638 } 639 640 malloc_mutex_lock(tsd, &arena->lock); 641 return (chunk); 642} 643 644static arena_chunk_t * 645arena_chunk_alloc_internal(tsd_t *tsd, arena_t *arena, bool *zero, bool *commit) 646{ 647 arena_chunk_t *chunk; 648 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 649 650 chunk = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, chunksize, 651 chunksize, zero, true); 652 if (chunk != NULL) { 653 if (arena_chunk_register(tsd, arena, chunk, *zero)) { 654 chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk, 655 chunksize, true); 656 return (NULL); 657 } 658 *commit = true; 659 } 660 if (chunk == NULL) { 661 chunk = arena_chunk_alloc_internal_hard(tsd, arena, 662 &chunk_hooks, zero, commit); 663 } 664 665 if (config_stats && chunk != NULL) { 666 arena->stats.mapped += chunksize; 667 arena->stats.metadata_mapped += (map_bias << LG_PAGE); 668 } 669 670 return (chunk); 671} 672 673static arena_chunk_t * 674arena_chunk_init_hard(tsd_t *tsd, arena_t *arena) 675{ 676 arena_chunk_t *chunk; 677 bool zero, commit; 678 size_t flag_unzeroed, flag_decommitted, i; 679 680 assert(arena->spare == NULL); 681 682 zero = false; 683 commit = false; 684 chunk = arena_chunk_alloc_internal(tsd, arena, &zero, &commit); 685 if (chunk == NULL) 686 return (NULL); 687 688 /* 689 * Initialize the map to contain one maximal free untouched run. Mark 690 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed 691 * or decommitted chunk. 692 */ 693 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; 694 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; 695 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, 696 flag_unzeroed | flag_decommitted); 697 /* 698 * There is no need to initialize the internal page map entries unless 699 * the chunk is not zeroed. 700 */ 701 if (!zero) { 702 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 703 (void *)arena_bitselm_get_const(chunk, map_bias+1), 704 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 705 chunk_npages-1) - 706 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 707 for (i = map_bias+1; i < chunk_npages-1; i++) 708 arena_mapbits_internal_set(chunk, i, flag_unzeroed); 709 } else { 710 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 711 *)arena_bitselm_get_const(chunk, map_bias+1), 712 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 713 chunk_npages-1) - 714 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 715 if (config_debug) { 716 for (i = map_bias+1; i < chunk_npages-1; i++) { 717 assert(arena_mapbits_unzeroed_get(chunk, i) == 718 flag_unzeroed); 719 } 720 } 721 } 722 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 723 flag_unzeroed); 724 725 return (chunk); 726} 727 728static arena_chunk_t * 729arena_chunk_alloc(tsd_t *tsd, arena_t *arena) 730{ 731 arena_chunk_t *chunk; 732 733 if (arena->spare != NULL) 734 chunk = arena_chunk_init_spare(arena); 735 else { 736 chunk = arena_chunk_init_hard(tsd, arena); 737 if (chunk == NULL) 738 return (NULL); 739 } 740 741 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 742 743 return (chunk); 744} 745 746static void 747arena_chunk_dalloc(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) 748{ 749 750 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 751 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 752 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 753 arena_maxrun); 754 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 755 arena_maxrun); 756 assert(arena_mapbits_dirty_get(chunk, map_bias) == 757 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 758 assert(arena_mapbits_decommitted_get(chunk, map_bias) == 759 arena_mapbits_decommitted_get(chunk, chunk_npages-1)); 760 761 /* Remove run from runs_avail, so that the arena does not use it. */ 762 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 763 764 if (arena->spare != NULL) { 765 arena_chunk_t *spare = arena->spare; 766 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 767 bool committed; 768 769 arena->spare = chunk; 770 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 771 arena_run_dirty_remove(arena, spare, map_bias, 772 chunk_npages-map_bias); 773 } 774 775 chunk_deregister(spare, &spare->node); 776 777 committed = (arena_mapbits_decommitted_get(spare, map_bias) == 778 0); 779 if (!committed) { 780 /* 781 * Decommit the header. Mark the chunk as decommitted 782 * even if header decommit fails, since treating a 783 * partially committed chunk as committed has a high 784 * potential for causing later access of decommitted 785 * memory. 786 */ 787 chunk_hooks = chunk_hooks_get(tsd, arena); 788 chunk_hooks.decommit(spare, chunksize, 0, map_bias << 789 LG_PAGE, arena->ind); 790 } 791 792 chunk_dalloc_cache(tsd, arena, &chunk_hooks, (void *)spare, 793 chunksize, committed); 794 795 if (config_stats) { 796 arena->stats.mapped -= chunksize; 797 arena->stats.metadata_mapped -= (map_bias << LG_PAGE); 798 } 799 } else 800 arena->spare = chunk; 801} 802 803static void 804arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 805{ 806 szind_t index = size2index(usize) - nlclasses - NBINS; 807 808 cassert(config_stats); 809 810 arena->stats.nmalloc_huge++; 811 arena->stats.allocated_huge += usize; 812 arena->stats.hstats[index].nmalloc++; 813 arena->stats.hstats[index].curhchunks++; 814} 815 816static void 817arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 818{ 819 szind_t index = size2index(usize) - nlclasses - NBINS; 820 821 cassert(config_stats); 822 823 arena->stats.nmalloc_huge--; 824 arena->stats.allocated_huge -= usize; 825 arena->stats.hstats[index].nmalloc--; 826 arena->stats.hstats[index].curhchunks--; 827} 828 829static void 830arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 831{ 832 szind_t index = size2index(usize) - nlclasses - NBINS; 833 834 cassert(config_stats); 835 836 arena->stats.ndalloc_huge++; 837 arena->stats.allocated_huge -= usize; 838 arena->stats.hstats[index].ndalloc++; 839 arena->stats.hstats[index].curhchunks--; 840} 841 842static void 843arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 844{ 845 szind_t index = size2index(usize) - nlclasses - NBINS; 846 847 cassert(config_stats); 848 849 arena->stats.ndalloc_huge--; 850 arena->stats.allocated_huge += usize; 851 arena->stats.hstats[index].ndalloc--; 852 arena->stats.hstats[index].curhchunks++; 853} 854 855static void 856arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 857{ 858 859 arena_huge_dalloc_stats_update(arena, oldsize); 860 arena_huge_malloc_stats_update(arena, usize); 861} 862 863static void 864arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 865 size_t usize) 866{ 867 868 arena_huge_dalloc_stats_update_undo(arena, oldsize); 869 arena_huge_malloc_stats_update_undo(arena, usize); 870} 871 872extent_node_t * 873arena_node_alloc(tsd_t *tsd, arena_t *arena) 874{ 875 extent_node_t *node; 876 877 malloc_mutex_lock(tsd, &arena->node_cache_mtx); 878 node = ql_last(&arena->node_cache, ql_link); 879 if (node == NULL) { 880 malloc_mutex_unlock(tsd, &arena->node_cache_mtx); 881 return (base_alloc(tsd, sizeof(extent_node_t))); 882 } 883 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); 884 malloc_mutex_unlock(tsd, &arena->node_cache_mtx); 885 return (node); 886} 887 888void 889arena_node_dalloc(tsd_t *tsd, arena_t *arena, extent_node_t *node) 890{ 891 892 malloc_mutex_lock(tsd, &arena->node_cache_mtx); 893 ql_elm_new(node, ql_link); 894 ql_tail_insert(&arena->node_cache, node, ql_link); 895 malloc_mutex_unlock(tsd, &arena->node_cache_mtx); 896} 897 898static void * 899arena_chunk_alloc_huge_hard(tsd_t *tsd, arena_t *arena, 900 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, 901 size_t csize) 902{ 903 void *ret; 904 bool commit = true; 905 906 ret = chunk_alloc_wrapper(tsd, arena, chunk_hooks, NULL, csize, 907 alignment, zero, &commit); 908 if (ret == NULL) { 909 /* Revert optimistic stats updates. */ 910 malloc_mutex_lock(tsd, &arena->lock); 911 if (config_stats) { 912 arena_huge_malloc_stats_update_undo(arena, usize); 913 arena->stats.mapped -= usize; 914 } 915 arena_nactive_sub(arena, usize >> LG_PAGE); 916 malloc_mutex_unlock(tsd, &arena->lock); 917 } 918 919 return (ret); 920} 921 922void * 923arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize, 924 size_t alignment, bool *zero) 925{ 926 void *ret; 927 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 928 size_t csize = CHUNK_CEILING(usize); 929 930 malloc_mutex_lock(tsd, &arena->lock); 931 932 /* Optimistically update stats. */ 933 if (config_stats) { 934 arena_huge_malloc_stats_update(arena, usize); 935 arena->stats.mapped += usize; 936 } 937 arena_nactive_add(arena, usize >> LG_PAGE); 938 939 ret = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, csize, 940 alignment, zero, true); 941 malloc_mutex_unlock(tsd, &arena->lock); 942 if (ret == NULL) { 943 ret = arena_chunk_alloc_huge_hard(tsd, arena, &chunk_hooks, 944 usize, alignment, zero, csize); 945 } 946 947 return (ret); 948} 949 950void 951arena_chunk_dalloc_huge(tsd_t *tsd, arena_t *arena, void *chunk, size_t usize) 952{ 953 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 954 size_t csize; 955 956 csize = CHUNK_CEILING(usize); 957 malloc_mutex_lock(tsd, &arena->lock); 958 if (config_stats) { 959 arena_huge_dalloc_stats_update(arena, usize); 960 arena->stats.mapped -= usize; 961 } 962 arena_nactive_sub(arena, usize >> LG_PAGE); 963 964 chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk, csize, true); 965 malloc_mutex_unlock(tsd, &arena->lock); 966} 967 968void 969arena_chunk_ralloc_huge_similar(tsd_t *tsd, arena_t *arena, void *chunk, 970 size_t oldsize, size_t usize) 971{ 972 973 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 974 assert(oldsize != usize); 975 976 malloc_mutex_lock(tsd, &arena->lock); 977 if (config_stats) 978 arena_huge_ralloc_stats_update(arena, oldsize, usize); 979 if (oldsize < usize) 980 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); 981 else 982 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); 983 malloc_mutex_unlock(tsd, &arena->lock); 984} 985 986void 987arena_chunk_ralloc_huge_shrink(tsd_t *tsd, arena_t *arena, void *chunk, 988 size_t oldsize, size_t usize) 989{ 990 size_t udiff = oldsize - usize; 991 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 992 993 malloc_mutex_lock(tsd, &arena->lock); 994 if (config_stats) { 995 arena_huge_ralloc_stats_update(arena, oldsize, usize); 996 if (cdiff != 0) 997 arena->stats.mapped -= cdiff; 998 } 999 arena_nactive_sub(arena, udiff >> LG_PAGE); 1000 1001 if (cdiff != 0) { 1002 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 1003 void *nchunk = (void *)((uintptr_t)chunk + 1004 CHUNK_CEILING(usize)); 1005 1006 chunk_dalloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff, 1007 true); 1008 } 1009 malloc_mutex_unlock(tsd, &arena->lock); 1010} 1011 1012static bool 1013arena_chunk_ralloc_huge_expand_hard(tsd_t *tsd, arena_t *arena, 1014 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, 1015 bool *zero, void *nchunk, size_t udiff, size_t cdiff) 1016{ 1017 bool err; 1018 bool commit = true; 1019 1020 err = (chunk_alloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff, 1021 chunksize, zero, &commit) == NULL); 1022 if (err) { 1023 /* Revert optimistic stats updates. */ 1024 malloc_mutex_lock(tsd, &arena->lock); 1025 if (config_stats) { 1026 arena_huge_ralloc_stats_update_undo(arena, oldsize, 1027 usize); 1028 arena->stats.mapped -= cdiff; 1029 } 1030 arena_nactive_sub(arena, udiff >> LG_PAGE); 1031 malloc_mutex_unlock(tsd, &arena->lock); 1032 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1033 cdiff, true, arena->ind)) { 1034 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff, 1035 *zero, true); 1036 err = true; 1037 } 1038 return (err); 1039} 1040 1041bool 1042arena_chunk_ralloc_huge_expand(tsd_t *tsd, arena_t *arena, void *chunk, 1043 size_t oldsize, size_t usize, bool *zero) 1044{ 1045 bool err; 1046 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena); 1047 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); 1048 size_t udiff = usize - oldsize; 1049 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 1050 1051 malloc_mutex_lock(tsd, &arena->lock); 1052 1053 /* Optimistically update stats. */ 1054 if (config_stats) { 1055 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1056 arena->stats.mapped += cdiff; 1057 } 1058 arena_nactive_add(arena, udiff >> LG_PAGE); 1059 1060 err = (chunk_alloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff, 1061 chunksize, zero, true) == NULL); 1062 malloc_mutex_unlock(tsd, &arena->lock); 1063 if (err) { 1064 err = arena_chunk_ralloc_huge_expand_hard(tsd, arena, 1065 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, 1066 cdiff); 1067 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1068 cdiff, true, arena->ind)) { 1069 chunk_dalloc_wrapper(tsd, arena, &chunk_hooks, nchunk, cdiff, 1070 *zero, true); 1071 err = true; 1072 } 1073 1074 return (err); 1075} 1076 1077/* 1078 * Do first-best-fit run selection, i.e. select the lowest run that best fits. 1079 * Run sizes are indexed, so not all candidate runs are necessarily exactly the 1080 * same size. 1081 */ 1082static arena_run_t * 1083arena_run_first_best_fit(arena_t *arena, size_t size) 1084{ 1085 szind_t ind, i; 1086 1087 ind = size2index(run_quantize_ceil(size)); 1088 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) { 1089 arena_chunk_map_misc_t *miscelm = arena_run_heap_first( 1090 arena_runs_avail_get(arena, i)); 1091 if (miscelm != NULL) 1092 return (&miscelm->run); 1093 } 1094 1095 return (NULL); 1096} 1097 1098static arena_run_t * 1099arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 1100{ 1101 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); 1102 if (run != NULL) { 1103 if (arena_run_split_large(arena, run, size, zero)) 1104 run = NULL; 1105 } 1106 return (run); 1107} 1108 1109static arena_run_t * 1110arena_run_alloc_large(tsd_t *tsd, arena_t *arena, size_t size, bool zero) 1111{ 1112 arena_chunk_t *chunk; 1113 arena_run_t *run; 1114 1115 assert(size <= arena_maxrun); 1116 assert(size == PAGE_CEILING(size)); 1117 1118 /* Search the arena's chunks for the lowest best fit. */ 1119 run = arena_run_alloc_large_helper(arena, size, zero); 1120 if (run != NULL) 1121 return (run); 1122 1123 /* 1124 * No usable runs. Create a new chunk from which to allocate the run. 1125 */ 1126 chunk = arena_chunk_alloc(tsd, arena); 1127 if (chunk != NULL) { 1128 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1129 if (arena_run_split_large(arena, run, size, zero)) 1130 run = NULL; 1131 return (run); 1132 } 1133 1134 /* 1135 * arena_chunk_alloc() failed, but another thread may have made 1136 * sufficient memory available while this one dropped arena->lock in 1137 * arena_chunk_alloc(), so search one more time. 1138 */ 1139 return (arena_run_alloc_large_helper(arena, size, zero)); 1140} 1141 1142static arena_run_t * 1143arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) 1144{ 1145 arena_run_t *run = arena_run_first_best_fit(arena, size); 1146 if (run != NULL) { 1147 if (arena_run_split_small(arena, run, size, binind)) 1148 run = NULL; 1149 } 1150 return (run); 1151} 1152 1153static arena_run_t * 1154arena_run_alloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind) 1155{ 1156 arena_chunk_t *chunk; 1157 arena_run_t *run; 1158 1159 assert(size <= arena_maxrun); 1160 assert(size == PAGE_CEILING(size)); 1161 assert(binind != BININD_INVALID); 1162 1163 /* Search the arena's chunks for the lowest best fit. */ 1164 run = arena_run_alloc_small_helper(arena, size, binind); 1165 if (run != NULL) 1166 return (run); 1167 1168 /* 1169 * No usable runs. Create a new chunk from which to allocate the run. 1170 */ 1171 chunk = arena_chunk_alloc(tsd, arena); 1172 if (chunk != NULL) { 1173 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1174 if (arena_run_split_small(arena, run, size, binind)) 1175 run = NULL; 1176 return (run); 1177 } 1178 1179 /* 1180 * arena_chunk_alloc() failed, but another thread may have made 1181 * sufficient memory available while this one dropped arena->lock in 1182 * arena_chunk_alloc(), so search one more time. 1183 */ 1184 return (arena_run_alloc_small_helper(arena, size, binind)); 1185} 1186 1187static bool 1188arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) 1189{ 1190 1191 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) 1192 << 3)); 1193} 1194 1195ssize_t 1196arena_lg_dirty_mult_get(tsd_t *tsd, arena_t *arena) 1197{ 1198 ssize_t lg_dirty_mult; 1199 1200 malloc_mutex_lock(tsd, &arena->lock); 1201 lg_dirty_mult = arena->lg_dirty_mult; 1202 malloc_mutex_unlock(tsd, &arena->lock); 1203 1204 return (lg_dirty_mult); 1205} 1206 1207bool 1208arena_lg_dirty_mult_set(tsd_t *tsd, arena_t *arena, ssize_t lg_dirty_mult) 1209{ 1210 1211 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 1212 return (true); 1213 1214 malloc_mutex_lock(tsd, &arena->lock); 1215 arena->lg_dirty_mult = lg_dirty_mult; 1216 arena_maybe_purge(tsd, arena); 1217 malloc_mutex_unlock(tsd, &arena->lock); 1218 1219 return (false); 1220} 1221 1222static void 1223arena_decay_deadline_init(arena_t *arena) 1224{ 1225 1226 assert(opt_purge == purge_mode_decay); 1227 1228 /* 1229 * Generate a new deadline that is uniformly random within the next 1230 * epoch after the current one. 1231 */ 1232 nstime_copy(&arena->decay_deadline, &arena->decay_epoch); 1233 nstime_add(&arena->decay_deadline, &arena->decay_interval); 1234 if (arena->decay_time > 0) { 1235 nstime_t jitter; 1236 1237 nstime_init(&jitter, prng_range(&arena->decay_jitter_state, 1238 nstime_ns(&arena->decay_interval))); 1239 nstime_add(&arena->decay_deadline, &jitter); 1240 } 1241} 1242 1243static bool 1244arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) 1245{ 1246 1247 assert(opt_purge == purge_mode_decay); 1248 1249 return (nstime_compare(&arena->decay_deadline, time) <= 0); 1250} 1251 1252static size_t 1253arena_decay_backlog_npages_limit(const arena_t *arena) 1254{ 1255 static const uint64_t h_steps[] = { 1256#define STEP(step, h, x, y) \ 1257 h, 1258 SMOOTHSTEP 1259#undef STEP 1260 }; 1261 uint64_t sum; 1262 size_t npages_limit_backlog; 1263 unsigned i; 1264 1265 assert(opt_purge == purge_mode_decay); 1266 1267 /* 1268 * For each element of decay_backlog, multiply by the corresponding 1269 * fixed-point smoothstep decay factor. Sum the products, then divide 1270 * to round down to the nearest whole number of pages. 1271 */ 1272 sum = 0; 1273 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) 1274 sum += arena->decay_backlog[i] * h_steps[i]; 1275 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 1276 1277 return (npages_limit_backlog); 1278} 1279 1280static void 1281arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) 1282{ 1283 uint64_t nadvance_u64; 1284 nstime_t delta; 1285 size_t ndirty_delta; 1286 1287 assert(opt_purge == purge_mode_decay); 1288 assert(arena_decay_deadline_reached(arena, time)); 1289 1290 nstime_copy(&delta, time); 1291 nstime_subtract(&delta, &arena->decay_epoch); 1292 nadvance_u64 = nstime_divide(&delta, &arena->decay_interval); 1293 assert(nadvance_u64 > 0); 1294 1295 /* Add nadvance_u64 decay intervals to epoch. */ 1296 nstime_copy(&delta, &arena->decay_interval); 1297 nstime_imultiply(&delta, nadvance_u64); 1298 nstime_add(&arena->decay_epoch, &delta); 1299 1300 /* Set a new deadline. */ 1301 arena_decay_deadline_init(arena); 1302 1303 /* Update the backlog. */ 1304 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 1305 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 1306 sizeof(size_t)); 1307 } else { 1308 size_t nadvance_z = (size_t)nadvance_u64; 1309 1310 assert((uint64_t)nadvance_z == nadvance_u64); 1311 1312 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z], 1313 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 1314 if (nadvance_z > 1) { 1315 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS - 1316 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 1317 } 1318 } 1319 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty - 1320 arena->decay_ndirty : 0; 1321 arena->decay_ndirty = arena->ndirty; 1322 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; 1323 arena->decay_backlog_npages_limit = 1324 arena_decay_backlog_npages_limit(arena); 1325} 1326 1327static size_t 1328arena_decay_npages_limit(arena_t *arena) 1329{ 1330 size_t npages_limit; 1331 1332 assert(opt_purge == purge_mode_decay); 1333 1334 npages_limit = arena->decay_backlog_npages_limit; 1335 1336 /* Add in any dirty pages created during the current epoch. */ 1337 if (arena->ndirty > arena->decay_ndirty) 1338 npages_limit += arena->ndirty - arena->decay_ndirty; 1339 1340 return (npages_limit); 1341} 1342 1343static void 1344arena_decay_init(arena_t *arena, ssize_t decay_time) 1345{ 1346 1347 arena->decay_time = decay_time; 1348 if (decay_time > 0) { 1349 nstime_init2(&arena->decay_interval, decay_time, 0); 1350 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS); 1351 } 1352 1353 nstime_init(&arena->decay_epoch, 0); 1354 nstime_update(&arena->decay_epoch); 1355 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena; 1356 arena_decay_deadline_init(arena); 1357 arena->decay_ndirty = arena->ndirty; 1358 arena->decay_backlog_npages_limit = 0; 1359 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 1360} 1361 1362static bool 1363arena_decay_time_valid(ssize_t decay_time) 1364{ 1365 1366 if (decay_time < -1) 1367 return (false); 1368 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) 1369 return (true); 1370 return (false); 1371} 1372 1373ssize_t 1374arena_decay_time_get(tsd_t *tsd, arena_t *arena) 1375{ 1376 ssize_t decay_time; 1377 1378 malloc_mutex_lock(tsd, &arena->lock); 1379 decay_time = arena->decay_time; 1380 malloc_mutex_unlock(tsd, &arena->lock); 1381 1382 return (decay_time); 1383} 1384 1385bool 1386arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time) 1387{ 1388 1389 if (!arena_decay_time_valid(decay_time)) 1390 return (true); 1391 1392 malloc_mutex_lock(tsd, &arena->lock); 1393 /* 1394 * Restart decay backlog from scratch, which may cause many dirty pages 1395 * to be immediately purged. It would conceptually be possible to map 1396 * the old backlog onto the new backlog, but there is no justification 1397 * for such complexity since decay_time changes are intended to be 1398 * infrequent, either between the {-1, 0, >0} states, or a one-time 1399 * arbitrary change during initial arena configuration. 1400 */ 1401 arena_decay_init(arena, decay_time); 1402 arena_maybe_purge(tsd, arena); 1403 malloc_mutex_unlock(tsd, &arena->lock); 1404 1405 return (false); 1406} 1407 1408static void 1409arena_maybe_purge_ratio(tsd_t *tsd, arena_t *arena) 1410{ 1411 1412 assert(opt_purge == purge_mode_ratio); 1413 1414 /* Don't purge if the option is disabled. */ 1415 if (arena->lg_dirty_mult < 0) 1416 return; 1417 1418 /* 1419 * Iterate, since preventing recursive purging could otherwise leave too 1420 * many dirty pages. 1421 */ 1422 while (true) { 1423 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1424 if (threshold < chunk_npages) 1425 threshold = chunk_npages; 1426 /* 1427 * Don't purge unless the number of purgeable pages exceeds the 1428 * threshold. 1429 */ 1430 if (arena->ndirty <= threshold) 1431 return; 1432 arena_purge_to_limit(tsd, arena, threshold); 1433 } 1434} 1435 1436static void 1437arena_maybe_purge_decay(tsd_t *tsd, arena_t *arena) 1438{ 1439 nstime_t time; 1440 size_t ndirty_limit; 1441 1442 assert(opt_purge == purge_mode_decay); 1443 1444 /* Purge all or nothing if the option is disabled. */ 1445 if (arena->decay_time <= 0) { 1446 if (arena->decay_time == 0) 1447 arena_purge_to_limit(tsd, arena, 0); 1448 return; 1449 } 1450 1451 nstime_copy(&time, &arena->decay_epoch); 1452 if (unlikely(nstime_update(&time))) { 1453 /* Time went backwards. Force an epoch advance. */ 1454 nstime_copy(&time, &arena->decay_deadline); 1455 } 1456 1457 if (arena_decay_deadline_reached(arena, &time)) 1458 arena_decay_epoch_advance(arena, &time); 1459 1460 ndirty_limit = arena_decay_npages_limit(arena); 1461 1462 /* 1463 * Don't try to purge unless the number of purgeable pages exceeds the 1464 * current limit. 1465 */ 1466 if (arena->ndirty <= ndirty_limit) 1467 return; 1468 arena_purge_to_limit(tsd, arena, ndirty_limit); 1469} 1470 1471void 1472arena_maybe_purge(tsd_t *tsd, arena_t *arena) 1473{ 1474 1475 /* Don't recursively purge. */ 1476 if (arena->purging) 1477 return; 1478 1479 if (opt_purge == purge_mode_ratio) 1480 arena_maybe_purge_ratio(tsd, arena); 1481 else 1482 arena_maybe_purge_decay(tsd, arena); 1483} 1484 1485static size_t 1486arena_dirty_count(arena_t *arena) 1487{ 1488 size_t ndirty = 0; 1489 arena_runs_dirty_link_t *rdelm; 1490 extent_node_t *chunkselm; 1491 1492 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1493 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1494 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { 1495 size_t npages; 1496 1497 if (rdelm == &chunkselm->rd) { 1498 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1499 chunkselm = qr_next(chunkselm, cc_link); 1500 } else { 1501 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 1502 rdelm); 1503 arena_chunk_map_misc_t *miscelm = 1504 arena_rd_to_miscelm(rdelm); 1505 size_t pageind = arena_miscelm_to_pageind(miscelm); 1506 assert(arena_mapbits_allocated_get(chunk, pageind) == 1507 0); 1508 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1509 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 1510 npages = arena_mapbits_unallocated_size_get(chunk, 1511 pageind) >> LG_PAGE; 1512 } 1513 ndirty += npages; 1514 } 1515 1516 return (ndirty); 1517} 1518 1519static size_t 1520arena_stash_dirty(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, 1521 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, 1522 extent_node_t *purge_chunks_sentinel) 1523{ 1524 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1525 extent_node_t *chunkselm; 1526 size_t nstashed = 0; 1527 1528 /* Stash runs/chunks according to ndirty_limit. */ 1529 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1530 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1531 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1532 size_t npages; 1533 rdelm_next = qr_next(rdelm, rd_link); 1534 1535 if (rdelm == &chunkselm->rd) { 1536 extent_node_t *chunkselm_next; 1537 bool zero; 1538 UNUSED void *chunk; 1539 1540 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1541 if (opt_purge == purge_mode_decay && arena->ndirty - 1542 (nstashed + npages) < ndirty_limit) 1543 break; 1544 1545 chunkselm_next = qr_next(chunkselm, cc_link); 1546 /* 1547 * Allocate. chunkselm remains valid due to the 1548 * dalloc_node=false argument to chunk_alloc_cache(). 1549 */ 1550 zero = false; 1551 chunk = chunk_alloc_cache(tsd, arena, chunk_hooks, 1552 extent_node_addr_get(chunkselm), 1553 extent_node_size_get(chunkselm), chunksize, &zero, 1554 false); 1555 assert(chunk == extent_node_addr_get(chunkselm)); 1556 assert(zero == extent_node_zeroed_get(chunkselm)); 1557 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1558 purge_chunks_sentinel); 1559 assert(npages == (extent_node_size_get(chunkselm) >> 1560 LG_PAGE)); 1561 chunkselm = chunkselm_next; 1562 } else { 1563 arena_chunk_t *chunk = 1564 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1565 arena_chunk_map_misc_t *miscelm = 1566 arena_rd_to_miscelm(rdelm); 1567 size_t pageind = arena_miscelm_to_pageind(miscelm); 1568 arena_run_t *run = &miscelm->run; 1569 size_t run_size = 1570 arena_mapbits_unallocated_size_get(chunk, pageind); 1571 1572 npages = run_size >> LG_PAGE; 1573 if (opt_purge == purge_mode_decay && arena->ndirty - 1574 (nstashed + npages) < ndirty_limit) 1575 break; 1576 1577 assert(pageind + npages <= chunk_npages); 1578 assert(arena_mapbits_dirty_get(chunk, pageind) == 1579 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1580 1581 /* 1582 * If purging the spare chunk's run, make it available 1583 * prior to allocation. 1584 */ 1585 if (chunk == arena->spare) 1586 arena_chunk_alloc(tsd, arena); 1587 1588 /* Temporarily allocate the free dirty run. */ 1589 arena_run_split_large(arena, run, run_size, false); 1590 /* Stash. */ 1591 if (false) 1592 qr_new(rdelm, rd_link); /* Redundant. */ 1593 else { 1594 assert(qr_next(rdelm, rd_link) == rdelm); 1595 assert(qr_prev(rdelm, rd_link) == rdelm); 1596 } 1597 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1598 } 1599 1600 nstashed += npages; 1601 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= 1602 ndirty_limit) 1603 break; 1604 } 1605 1606 return (nstashed); 1607} 1608 1609static size_t 1610arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, 1611 arena_runs_dirty_link_t *purge_runs_sentinel, 1612 extent_node_t *purge_chunks_sentinel) 1613{ 1614 size_t npurged, nmadvise; 1615 arena_runs_dirty_link_t *rdelm; 1616 extent_node_t *chunkselm; 1617 1618 if (config_stats) 1619 nmadvise = 0; 1620 npurged = 0; 1621 1622 malloc_mutex_unlock(tsd, &arena->lock); 1623 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1624 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1625 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { 1626 size_t npages; 1627 1628 if (rdelm == &chunkselm->rd) { 1629 /* 1630 * Don't actually purge the chunk here because 1) 1631 * chunkselm is embedded in the chunk and must remain 1632 * valid, and 2) we deallocate the chunk in 1633 * arena_unstash_purged(), where it is destroyed, 1634 * decommitted, or purged, depending on chunk 1635 * deallocation policy. 1636 */ 1637 size_t size = extent_node_size_get(chunkselm); 1638 npages = size >> LG_PAGE; 1639 chunkselm = qr_next(chunkselm, cc_link); 1640 } else { 1641 size_t pageind, run_size, flag_unzeroed, flags, i; 1642 bool decommitted; 1643 arena_chunk_t *chunk = 1644 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1645 arena_chunk_map_misc_t *miscelm = 1646 arena_rd_to_miscelm(rdelm); 1647 pageind = arena_miscelm_to_pageind(miscelm); 1648 run_size = arena_mapbits_large_size_get(chunk, pageind); 1649 npages = run_size >> LG_PAGE; 1650 1651 assert(pageind + npages <= chunk_npages); 1652 assert(!arena_mapbits_decommitted_get(chunk, pageind)); 1653 assert(!arena_mapbits_decommitted_get(chunk, 1654 pageind+npages-1)); 1655 decommitted = !chunk_hooks->decommit(chunk, chunksize, 1656 pageind << LG_PAGE, npages << LG_PAGE, arena->ind); 1657 if (decommitted) { 1658 flag_unzeroed = 0; 1659 flags = CHUNK_MAP_DECOMMITTED; 1660 } else { 1661 flag_unzeroed = chunk_purge_wrapper(tsd, arena, 1662 chunk_hooks, chunk, chunksize, pageind << 1663 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; 1664 flags = flag_unzeroed; 1665 } 1666 arena_mapbits_large_set(chunk, pageind+npages-1, 0, 1667 flags); 1668 arena_mapbits_large_set(chunk, pageind, run_size, 1669 flags); 1670 1671 /* 1672 * Set the unzeroed flag for internal pages, now that 1673 * chunk_purge_wrapper() has returned whether the pages 1674 * were zeroed as a side effect of purging. This chunk 1675 * map modification is safe even though the arena mutex 1676 * isn't currently owned by this thread, because the run 1677 * is marked as allocated, thus protecting it from being 1678 * modified by any other thread. As long as these 1679 * writes don't perturb the first and last elements' 1680 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1681 */ 1682 for (i = 1; i < npages-1; i++) { 1683 arena_mapbits_internal_set(chunk, pageind+i, 1684 flag_unzeroed); 1685 } 1686 } 1687 1688 npurged += npages; 1689 if (config_stats) 1690 nmadvise++; 1691 } 1692 malloc_mutex_lock(tsd, &arena->lock); 1693 1694 if (config_stats) { 1695 arena->stats.nmadvise += nmadvise; 1696 arena->stats.purged += npurged; 1697 } 1698 1699 return (npurged); 1700} 1701 1702static void 1703arena_unstash_purged(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, 1704 arena_runs_dirty_link_t *purge_runs_sentinel, 1705 extent_node_t *purge_chunks_sentinel) 1706{ 1707 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1708 extent_node_t *chunkselm; 1709 1710 /* Deallocate chunks/runs. */ 1711 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1712 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1713 rdelm != purge_runs_sentinel; rdelm = rdelm_next) { 1714 rdelm_next = qr_next(rdelm, rd_link); 1715 if (rdelm == &chunkselm->rd) { 1716 extent_node_t *chunkselm_next = qr_next(chunkselm, 1717 cc_link); 1718 void *addr = extent_node_addr_get(chunkselm); 1719 size_t size = extent_node_size_get(chunkselm); 1720 bool zeroed = extent_node_zeroed_get(chunkselm); 1721 bool committed = extent_node_committed_get(chunkselm); 1722 extent_node_dirty_remove(chunkselm); 1723 arena_node_dalloc(tsd, arena, chunkselm); 1724 chunkselm = chunkselm_next; 1725 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, addr, 1726 size, zeroed, committed); 1727 } else { 1728 arena_chunk_t *chunk = 1729 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1730 arena_chunk_map_misc_t *miscelm = 1731 arena_rd_to_miscelm(rdelm); 1732 size_t pageind = arena_miscelm_to_pageind(miscelm); 1733 bool decommitted = (arena_mapbits_decommitted_get(chunk, 1734 pageind) != 0); 1735 arena_run_t *run = &miscelm->run; 1736 qr_remove(rdelm, rd_link); 1737 arena_run_dalloc(tsd, arena, run, false, true, 1738 decommitted); 1739 } 1740 } 1741} 1742 1743/* 1744 * NB: ndirty_limit is interpreted differently depending on opt_purge: 1745 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the 1746 * desired state: 1747 * (arena->ndirty <= ndirty_limit) 1748 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without 1749 * violating the invariant: 1750 * (arena->ndirty >= ndirty_limit) 1751 */ 1752static void 1753arena_purge_to_limit(tsd_t *tsd, arena_t *arena, size_t ndirty_limit) 1754{ 1755 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena); 1756 size_t npurge, npurged; 1757 arena_runs_dirty_link_t purge_runs_sentinel; 1758 extent_node_t purge_chunks_sentinel; 1759 1760 arena->purging = true; 1761 1762 /* 1763 * Calls to arena_dirty_count() are disabled even for debug builds 1764 * because overhead grows nonlinearly as memory usage increases. 1765 */ 1766 if (false && config_debug) { 1767 size_t ndirty = arena_dirty_count(arena); 1768 assert(ndirty == arena->ndirty); 1769 } 1770 assert(opt_purge != purge_mode_ratio || (arena->nactive >> 1771 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); 1772 1773 qr_new(&purge_runs_sentinel, rd_link); 1774 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1775 1776 npurge = arena_stash_dirty(tsd, arena, &chunk_hooks, ndirty_limit, 1777 &purge_runs_sentinel, &purge_chunks_sentinel); 1778 if (npurge == 0) 1779 goto label_return; 1780 npurged = arena_purge_stashed(tsd, arena, &chunk_hooks, 1781 &purge_runs_sentinel, &purge_chunks_sentinel); 1782 assert(npurged == npurge); 1783 arena_unstash_purged(tsd, arena, &chunk_hooks, &purge_runs_sentinel, 1784 &purge_chunks_sentinel); 1785 1786 if (config_stats) 1787 arena->stats.npurge++; 1788 1789label_return: 1790 arena->purging = false; 1791} 1792 1793void 1794arena_purge(tsd_t *tsd, arena_t *arena, bool all) 1795{ 1796 1797 malloc_mutex_lock(tsd, &arena->lock); 1798 if (all) 1799 arena_purge_to_limit(tsd, arena, 0); 1800 else 1801 arena_maybe_purge(tsd, arena); 1802 malloc_mutex_unlock(tsd, &arena->lock); 1803} 1804 1805static void 1806arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1807 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, 1808 size_t flag_decommitted) 1809{ 1810 size_t size = *p_size; 1811 size_t run_ind = *p_run_ind; 1812 size_t run_pages = *p_run_pages; 1813 1814 /* Try to coalesce forward. */ 1815 if (run_ind + run_pages < chunk_npages && 1816 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1817 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && 1818 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == 1819 flag_decommitted) { 1820 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1821 run_ind+run_pages); 1822 size_t nrun_pages = nrun_size >> LG_PAGE; 1823 1824 /* 1825 * Remove successor from runs_avail; the coalesced run is 1826 * inserted later. 1827 */ 1828 assert(arena_mapbits_unallocated_size_get(chunk, 1829 run_ind+run_pages+nrun_pages-1) == nrun_size); 1830 assert(arena_mapbits_dirty_get(chunk, 1831 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1832 assert(arena_mapbits_decommitted_get(chunk, 1833 run_ind+run_pages+nrun_pages-1) == flag_decommitted); 1834 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1835 1836 /* 1837 * If the successor is dirty, remove it from the set of dirty 1838 * pages. 1839 */ 1840 if (flag_dirty != 0) { 1841 arena_run_dirty_remove(arena, chunk, run_ind+run_pages, 1842 nrun_pages); 1843 } 1844 1845 size += nrun_size; 1846 run_pages += nrun_pages; 1847 1848 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1849 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1850 size); 1851 } 1852 1853 /* Try to coalesce backward. */ 1854 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1855 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1856 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == 1857 flag_decommitted) { 1858 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1859 run_ind-1); 1860 size_t prun_pages = prun_size >> LG_PAGE; 1861 1862 run_ind -= prun_pages; 1863 1864 /* 1865 * Remove predecessor from runs_avail; the coalesced run is 1866 * inserted later. 1867 */ 1868 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1869 prun_size); 1870 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1871 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 1872 flag_decommitted); 1873 arena_avail_remove(arena, chunk, run_ind, prun_pages); 1874 1875 /* 1876 * If the predecessor is dirty, remove it from the set of dirty 1877 * pages. 1878 */ 1879 if (flag_dirty != 0) { 1880 arena_run_dirty_remove(arena, chunk, run_ind, 1881 prun_pages); 1882 } 1883 1884 size += prun_size; 1885 run_pages += prun_pages; 1886 1887 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1888 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1889 size); 1890 } 1891 1892 *p_size = size; 1893 *p_run_ind = run_ind; 1894 *p_run_pages = run_pages; 1895} 1896 1897static size_t 1898arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1899 size_t run_ind) 1900{ 1901 size_t size; 1902 1903 assert(run_ind >= map_bias); 1904 assert(run_ind < chunk_npages); 1905 1906 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1907 size = arena_mapbits_large_size_get(chunk, run_ind); 1908 assert(size == PAGE || arena_mapbits_large_size_get(chunk, 1909 run_ind+(size>>LG_PAGE)-1) == 0); 1910 } else { 1911 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1912 size = bin_info->run_size; 1913 } 1914 1915 return (size); 1916} 1917 1918static void 1919arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, bool dirty, 1920 bool cleaned, bool decommitted) 1921{ 1922 arena_chunk_t *chunk; 1923 arena_chunk_map_misc_t *miscelm; 1924 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; 1925 1926 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1927 miscelm = arena_run_to_miscelm(run); 1928 run_ind = arena_miscelm_to_pageind(miscelm); 1929 assert(run_ind >= map_bias); 1930 assert(run_ind < chunk_npages); 1931 size = arena_run_size_get(arena, chunk, run, run_ind); 1932 run_pages = (size >> LG_PAGE); 1933 arena_nactive_sub(arena, run_pages); 1934 1935 /* 1936 * The run is dirty if the caller claims to have dirtied it, as well as 1937 * if it was already dirty before being allocated and the caller 1938 * doesn't claim to have cleaned it. 1939 */ 1940 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1941 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1942 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) 1943 != 0) 1944 dirty = true; 1945 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1946 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; 1947 1948 /* Mark pages as unallocated in the chunk map. */ 1949 if (dirty || decommitted) { 1950 size_t flags = flag_dirty | flag_decommitted; 1951 arena_mapbits_unallocated_set(chunk, run_ind, size, flags); 1952 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1953 flags); 1954 } else { 1955 arena_mapbits_unallocated_set(chunk, run_ind, size, 1956 arena_mapbits_unzeroed_get(chunk, run_ind)); 1957 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1958 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1959 } 1960 1961 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, 1962 flag_dirty, flag_decommitted); 1963 1964 /* Insert into runs_avail, now that coalescing is complete. */ 1965 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1966 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1967 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1968 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1969 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 1970 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); 1971 arena_avail_insert(arena, chunk, run_ind, run_pages); 1972 1973 if (dirty) 1974 arena_run_dirty_insert(arena, chunk, run_ind, run_pages); 1975 1976 /* Deallocate chunk if it is now completely unused. */ 1977 if (size == arena_maxrun) { 1978 assert(run_ind == map_bias); 1979 assert(run_pages == (arena_maxrun >> LG_PAGE)); 1980 arena_chunk_dalloc(tsd, arena, chunk); 1981 } 1982 1983 /* 1984 * It is okay to do dirty page processing here even if the chunk was 1985 * deallocated above, since in that case it is the spare. Waiting 1986 * until after possible chunk deallocation to do dirty processing 1987 * allows for an old spare to be fully deallocated, thus decreasing the 1988 * chances of spuriously crossing the dirty page purging threshold. 1989 */ 1990 if (dirty) 1991 arena_maybe_purge(tsd, arena); 1992} 1993 1994static void 1995arena_run_trim_head(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 1996 arena_run_t *run, size_t oldsize, size_t newsize) 1997{ 1998 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1999 size_t pageind = arena_miscelm_to_pageind(miscelm); 2000 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 2001 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2002 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2003 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2004 CHUNK_MAP_UNZEROED : 0; 2005 2006 assert(oldsize > newsize); 2007 2008 /* 2009 * Update the chunk map so that arena_run_dalloc() can treat the 2010 * leading run as separately allocated. Set the last element of each 2011 * run first, in case of single-page runs. 2012 */ 2013 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2014 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2015 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2016 pageind+head_npages-1))); 2017 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | 2018 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2019 2020 if (config_debug) { 2021 UNUSED size_t tail_npages = newsize >> LG_PAGE; 2022 assert(arena_mapbits_large_size_get(chunk, 2023 pageind+head_npages+tail_npages-1) == 0); 2024 assert(arena_mapbits_dirty_get(chunk, 2025 pageind+head_npages+tail_npages-1) == flag_dirty); 2026 } 2027 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 2028 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2029 pageind+head_npages))); 2030 2031 arena_run_dalloc(tsd, arena, run, false, false, (flag_decommitted != 2032 0)); 2033} 2034 2035static void 2036arena_run_trim_tail(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2037 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) 2038{ 2039 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2040 size_t pageind = arena_miscelm_to_pageind(miscelm); 2041 size_t head_npages = newsize >> LG_PAGE; 2042 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2043 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2044 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2045 CHUNK_MAP_UNZEROED : 0; 2046 arena_chunk_map_misc_t *tail_miscelm; 2047 arena_run_t *tail_run; 2048 2049 assert(oldsize > newsize); 2050 2051 /* 2052 * Update the chunk map so that arena_run_dalloc() can treat the 2053 * trailing run as separately allocated. Set the last element of each 2054 * run first, in case of single-page runs. 2055 */ 2056 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2057 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2058 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2059 pageind+head_npages-1))); 2060 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | 2061 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2062 2063 if (config_debug) { 2064 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 2065 assert(arena_mapbits_large_size_get(chunk, 2066 pageind+head_npages+tail_npages-1) == 0); 2067 assert(arena_mapbits_dirty_get(chunk, 2068 pageind+head_npages+tail_npages-1) == flag_dirty); 2069 } 2070 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 2071 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2072 pageind+head_npages))); 2073 2074 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); 2075 tail_run = &tail_miscelm->run; 2076 arena_run_dalloc(tsd, arena, tail_run, dirty, false, (flag_decommitted 2077 != 0)); 2078} 2079 2080static void 2081arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 2082{ 2083 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2084 2085 arena_run_heap_insert(&bin->runs, miscelm); 2086} 2087 2088static arena_run_t * 2089arena_bin_nonfull_run_tryget(arena_bin_t *bin) 2090{ 2091 arena_chunk_map_misc_t *miscelm; 2092 2093 miscelm = arena_run_heap_remove_first(&bin->runs); 2094 if (miscelm == NULL) 2095 return (NULL); 2096 if (config_stats) 2097 bin->stats.reruns++; 2098 2099 return (&miscelm->run); 2100} 2101 2102static arena_run_t * 2103arena_bin_nonfull_run_get(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) 2104{ 2105 arena_run_t *run; 2106 szind_t binind; 2107 arena_bin_info_t *bin_info; 2108 2109 /* Look for a usable run. */ 2110 run = arena_bin_nonfull_run_tryget(bin); 2111 if (run != NULL) 2112 return (run); 2113 /* No existing runs have any space available. */ 2114 2115 binind = arena_bin_index(arena, bin); 2116 bin_info = &arena_bin_info[binind]; 2117 2118 /* Allocate a new run. */ 2119 malloc_mutex_unlock(tsd, &bin->lock); 2120 /******************************/ 2121 malloc_mutex_lock(tsd, &arena->lock); 2122 run = arena_run_alloc_small(tsd, arena, bin_info->run_size, binind); 2123 if (run != NULL) { 2124 /* Initialize run internals. */ 2125 run->binind = binind; 2126 run->nfree = bin_info->nregs; 2127 bitmap_init(run->bitmap, &bin_info->bitmap_info); 2128 } 2129 malloc_mutex_unlock(tsd, &arena->lock); 2130 /********************************/ 2131 malloc_mutex_lock(tsd, &bin->lock); 2132 if (run != NULL) { 2133 if (config_stats) { 2134 bin->stats.nruns++; 2135 bin->stats.curruns++; 2136 } 2137 return (run); 2138 } 2139 2140 /* 2141 * arena_run_alloc_small() failed, but another thread may have made 2142 * sufficient memory available while this one dropped bin->lock above, 2143 * so search one more time. 2144 */ 2145 run = arena_bin_nonfull_run_tryget(bin); 2146 if (run != NULL) 2147 return (run); 2148 2149 return (NULL); 2150} 2151 2152/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 2153static void * 2154arena_bin_malloc_hard(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) 2155{ 2156 szind_t binind; 2157 arena_bin_info_t *bin_info; 2158 arena_run_t *run; 2159 2160 binind = arena_bin_index(arena, bin); 2161 bin_info = &arena_bin_info[binind]; 2162 bin->runcur = NULL; 2163 run = arena_bin_nonfull_run_get(tsd, arena, bin); 2164 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 2165 /* 2166 * Another thread updated runcur while this one ran without the 2167 * bin lock in arena_bin_nonfull_run_get(). 2168 */ 2169 void *ret; 2170 assert(bin->runcur->nfree > 0); 2171 ret = arena_run_reg_alloc(bin->runcur, bin_info); 2172 if (run != NULL) { 2173 arena_chunk_t *chunk; 2174 2175 /* 2176 * arena_run_alloc_small() may have allocated run, or 2177 * it may have pulled run from the bin's run tree. 2178 * Therefore it is unsafe to make any assumptions about 2179 * how run has previously been used, and 2180 * arena_bin_lower_run() must be called, as if a region 2181 * were just deallocated from the run. 2182 */ 2183 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2184 if (run->nfree == bin_info->nregs) { 2185 arena_dalloc_bin_run(tsd, arena, chunk, run, 2186 bin); 2187 } else 2188 arena_bin_lower_run(arena, chunk, run, bin); 2189 } 2190 return (ret); 2191 } 2192 2193 if (run == NULL) 2194 return (NULL); 2195 2196 bin->runcur = run; 2197 2198 assert(bin->runcur->nfree > 0); 2199 2200 return (arena_run_reg_alloc(bin->runcur, bin_info)); 2201} 2202 2203void 2204arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, 2205 szind_t binind, uint64_t prof_accumbytes) 2206{ 2207 unsigned i, nfill; 2208 arena_bin_t *bin; 2209 2210 assert(tbin->ncached == 0); 2211 2212 if (config_prof && arena_prof_accum(tsd, arena, prof_accumbytes)) 2213 prof_idump(tsd); 2214 bin = &arena->bins[binind]; 2215 malloc_mutex_lock(tsd, &bin->lock); 2216 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 2217 tbin->lg_fill_div); i < nfill; i++) { 2218 arena_run_t *run; 2219 void *ptr; 2220 if ((run = bin->runcur) != NULL && run->nfree > 0) 2221 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2222 else 2223 ptr = arena_bin_malloc_hard(tsd, arena, bin); 2224 if (ptr == NULL) { 2225 /* 2226 * OOM. tbin->avail isn't yet filled down to its first 2227 * element, so the successful allocations (if any) must 2228 * be moved just before tbin->avail before bailing out. 2229 */ 2230 if (i > 0) { 2231 memmove(tbin->avail - i, tbin->avail - nfill, 2232 i * sizeof(void *)); 2233 } 2234 break; 2235 } 2236 if (config_fill && unlikely(opt_junk_alloc)) { 2237 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 2238 true); 2239 } 2240 /* Insert such that low regions get used first. */ 2241 *(tbin->avail - nfill + i) = ptr; 2242 } 2243 if (config_stats) { 2244 bin->stats.nmalloc += i; 2245 bin->stats.nrequests += tbin->tstats.nrequests; 2246 bin->stats.curregs += i; 2247 bin->stats.nfills++; 2248 tbin->tstats.nrequests = 0; 2249 } 2250 malloc_mutex_unlock(tsd, &bin->lock); 2251 tbin->ncached = i; 2252 arena_decay_tick(tsd, arena); 2253} 2254 2255void 2256arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 2257{ 2258 2259 size_t redzone_size = bin_info->redzone_size; 2260 2261 if (zero) { 2262 memset((void *)((uintptr_t)ptr - redzone_size), 2263 JEMALLOC_ALLOC_JUNK, redzone_size); 2264 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 2265 JEMALLOC_ALLOC_JUNK, redzone_size); 2266 } else { 2267 memset((void *)((uintptr_t)ptr - redzone_size), 2268 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); 2269 } 2270} 2271 2272#ifdef JEMALLOC_JET 2273#undef arena_redzone_corruption 2274#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) 2275#endif 2276static void 2277arena_redzone_corruption(void *ptr, size_t usize, bool after, 2278 size_t offset, uint8_t byte) 2279{ 2280 2281 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 2282 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 2283 after ? "after" : "before", ptr, usize, byte); 2284} 2285#ifdef JEMALLOC_JET 2286#undef arena_redzone_corruption 2287#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 2288arena_redzone_corruption_t *arena_redzone_corruption = 2289 JEMALLOC_N(n_arena_redzone_corruption); 2290#endif 2291 2292static void 2293arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 2294{ 2295 bool error = false; 2296 2297 if (opt_junk_alloc) { 2298 size_t size = bin_info->reg_size; 2299 size_t redzone_size = bin_info->redzone_size; 2300 size_t i; 2301 2302 for (i = 1; i <= redzone_size; i++) { 2303 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 2304 if (*byte != JEMALLOC_ALLOC_JUNK) { 2305 error = true; 2306 arena_redzone_corruption(ptr, size, false, i, 2307 *byte); 2308 if (reset) 2309 *byte = JEMALLOC_ALLOC_JUNK; 2310 } 2311 } 2312 for (i = 0; i < redzone_size; i++) { 2313 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 2314 if (*byte != JEMALLOC_ALLOC_JUNK) { 2315 error = true; 2316 arena_redzone_corruption(ptr, size, true, i, 2317 *byte); 2318 if (reset) 2319 *byte = JEMALLOC_ALLOC_JUNK; 2320 } 2321 } 2322 } 2323 2324 if (opt_abort && error) 2325 abort(); 2326} 2327 2328#ifdef JEMALLOC_JET 2329#undef arena_dalloc_junk_small 2330#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) 2331#endif 2332void 2333arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 2334{ 2335 size_t redzone_size = bin_info->redzone_size; 2336 2337 arena_redzones_validate(ptr, bin_info, false); 2338 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, 2339 bin_info->reg_interval); 2340} 2341#ifdef JEMALLOC_JET 2342#undef arena_dalloc_junk_small 2343#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 2344arena_dalloc_junk_small_t *arena_dalloc_junk_small = 2345 JEMALLOC_N(n_arena_dalloc_junk_small); 2346#endif 2347 2348void 2349arena_quarantine_junk_small(void *ptr, size_t usize) 2350{ 2351 szind_t binind; 2352 arena_bin_info_t *bin_info; 2353 cassert(config_fill); 2354 assert(opt_junk_free); 2355 assert(opt_quarantine); 2356 assert(usize <= SMALL_MAXCLASS); 2357 2358 binind = size2index(usize); 2359 bin_info = &arena_bin_info[binind]; 2360 arena_redzones_validate(ptr, bin_info, true); 2361} 2362 2363static void * 2364arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) 2365{ 2366 void *ret; 2367 arena_bin_t *bin; 2368 size_t usize; 2369 arena_run_t *run; 2370 2371 assert(binind < NBINS); 2372 bin = &arena->bins[binind]; 2373 usize = index2size(binind); 2374 2375 malloc_mutex_lock(tsd, &bin->lock); 2376 if ((run = bin->runcur) != NULL && run->nfree > 0) 2377 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2378 else 2379 ret = arena_bin_malloc_hard(tsd, arena, bin); 2380 2381 if (ret == NULL) { 2382 malloc_mutex_unlock(tsd, &bin->lock); 2383 return (NULL); 2384 } 2385 2386 if (config_stats) { 2387 bin->stats.nmalloc++; 2388 bin->stats.nrequests++; 2389 bin->stats.curregs++; 2390 } 2391 malloc_mutex_unlock(tsd, &bin->lock); 2392 if (config_prof && !isthreaded && arena_prof_accum(tsd, arena, usize)) 2393 prof_idump(tsd); 2394 2395 if (!zero) { 2396 if (config_fill) { 2397 if (unlikely(opt_junk_alloc)) { 2398 arena_alloc_junk_small(ret, 2399 &arena_bin_info[binind], false); 2400 } else if (unlikely(opt_zero)) 2401 memset(ret, 0, usize); 2402 } 2403 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2404 } else { 2405 if (config_fill && unlikely(opt_junk_alloc)) { 2406 arena_alloc_junk_small(ret, &arena_bin_info[binind], 2407 true); 2408 } 2409 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2410 memset(ret, 0, usize); 2411 } 2412 2413 arena_decay_tick(tsd, arena); 2414 return (ret); 2415} 2416 2417void * 2418arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) 2419{ 2420 void *ret; 2421 size_t usize; 2422 uintptr_t random_offset; 2423 arena_run_t *run; 2424 arena_chunk_map_misc_t *miscelm; 2425 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); 2426 2427 /* Large allocation. */ 2428 usize = index2size(binind); 2429 malloc_mutex_lock(tsd, &arena->lock); 2430 if (config_cache_oblivious) { 2431 uint64_t r; 2432 2433 /* 2434 * Compute a uniformly distributed offset within the first page 2435 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 2436 * for 4 KiB pages and 64-byte cachelines. 2437 */ 2438 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE); 2439 random_offset = ((uintptr_t)r) << LG_CACHELINE; 2440 } else 2441 random_offset = 0; 2442 run = arena_run_alloc_large(tsd, arena, usize + large_pad, zero); 2443 if (run == NULL) { 2444 malloc_mutex_unlock(tsd, &arena->lock); 2445 return (NULL); 2446 } 2447 miscelm = arena_run_to_miscelm(run); 2448 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2449 random_offset); 2450 if (config_stats) { 2451 szind_t index = binind - NBINS; 2452 2453 arena->stats.nmalloc_large++; 2454 arena->stats.nrequests_large++; 2455 arena->stats.allocated_large += usize; 2456 arena->stats.lstats[index].nmalloc++; 2457 arena->stats.lstats[index].nrequests++; 2458 arena->stats.lstats[index].curruns++; 2459 } 2460 if (config_prof) 2461 idump = arena_prof_accum_locked(arena, usize); 2462 malloc_mutex_unlock(tsd, &arena->lock); 2463 if (config_prof && idump) 2464 prof_idump(tsd); 2465 2466 if (!zero) { 2467 if (config_fill) { 2468 if (unlikely(opt_junk_alloc)) 2469 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2470 else if (unlikely(opt_zero)) 2471 memset(ret, 0, usize); 2472 } 2473 } 2474 2475 arena_decay_tick(tsd, arena); 2476 return (ret); 2477} 2478 2479void * 2480arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, 2481 bool zero, tcache_t *tcache) 2482{ 2483 2484 arena = arena_choose(tsd, arena); 2485 if (unlikely(arena == NULL)) 2486 return (NULL); 2487 2488 if (likely(size <= SMALL_MAXCLASS)) 2489 return (arena_malloc_small(tsd, arena, ind, zero)); 2490 if (likely(size <= large_maxclass)) 2491 return (arena_malloc_large(tsd, arena, ind, zero)); 2492 return (huge_malloc(tsd, arena, index2size(ind), zero, tcache)); 2493} 2494 2495/* Only handles large allocations that require more than page alignment. */ 2496static void * 2497arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2498 bool zero) 2499{ 2500 void *ret; 2501 size_t alloc_size, leadsize, trailsize; 2502 arena_run_t *run; 2503 arena_chunk_t *chunk; 2504 arena_chunk_map_misc_t *miscelm; 2505 void *rpages; 2506 2507 assert(usize == PAGE_CEILING(usize)); 2508 2509 arena = arena_choose(tsd, arena); 2510 if (unlikely(arena == NULL)) 2511 return (NULL); 2512 2513 alignment = PAGE_CEILING(alignment); 2514 alloc_size = usize + large_pad + alignment; 2515 2516 malloc_mutex_lock(tsd, &arena->lock); 2517 run = arena_run_alloc_large(tsd, arena, alloc_size, false); 2518 if (run == NULL) { 2519 malloc_mutex_unlock(tsd, &arena->lock); 2520 return (NULL); 2521 } 2522 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2523 miscelm = arena_run_to_miscelm(run); 2524 rpages = arena_miscelm_to_rpages(miscelm); 2525 2526 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 2527 (uintptr_t)rpages; 2528 assert(alloc_size >= leadsize + usize); 2529 trailsize = alloc_size - leadsize - usize - large_pad; 2530 if (leadsize != 0) { 2531 arena_chunk_map_misc_t *head_miscelm = miscelm; 2532 arena_run_t *head_run = run; 2533 2534 miscelm = arena_miscelm_get_mutable(chunk, 2535 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 2536 LG_PAGE)); 2537 run = &miscelm->run; 2538 2539 arena_run_trim_head(tsd, arena, chunk, head_run, alloc_size, 2540 alloc_size - leadsize); 2541 } 2542 if (trailsize != 0) { 2543 arena_run_trim_tail(tsd, arena, chunk, run, usize + large_pad + 2544 trailsize, usize + large_pad, false); 2545 } 2546 if (arena_run_init_large(arena, run, usize + large_pad, zero)) { 2547 size_t run_ind = 2548 arena_miscelm_to_pageind(arena_run_to_miscelm(run)); 2549 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); 2550 bool decommitted = (arena_mapbits_decommitted_get(chunk, 2551 run_ind) != 0); 2552 2553 assert(decommitted); /* Cause of OOM. */ 2554 arena_run_dalloc(tsd, arena, run, dirty, false, decommitted); 2555 malloc_mutex_unlock(tsd, &arena->lock); 2556 return (NULL); 2557 } 2558 ret = arena_miscelm_to_rpages(miscelm); 2559 2560 if (config_stats) { 2561 szind_t index = size2index(usize) - NBINS; 2562 2563 arena->stats.nmalloc_large++; 2564 arena->stats.nrequests_large++; 2565 arena->stats.allocated_large += usize; 2566 arena->stats.lstats[index].nmalloc++; 2567 arena->stats.lstats[index].nrequests++; 2568 arena->stats.lstats[index].curruns++; 2569 } 2570 malloc_mutex_unlock(tsd, &arena->lock); 2571 2572 if (config_fill && !zero) { 2573 if (unlikely(opt_junk_alloc)) 2574 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2575 else if (unlikely(opt_zero)) 2576 memset(ret, 0, usize); 2577 } 2578 arena_decay_tick(tsd, arena); 2579 return (ret); 2580} 2581 2582void * 2583arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2584 bool zero, tcache_t *tcache) 2585{ 2586 void *ret; 2587 2588 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2589 && (usize & PAGE_MASK) == 0))) { 2590 /* Small; alignment doesn't require special run placement. */ 2591 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2592 tcache, true); 2593 } else if (usize <= large_maxclass && alignment <= PAGE) { 2594 /* 2595 * Large; alignment doesn't require special run placement. 2596 * However, the cached pointer may be at a random offset from 2597 * the base of the run, so do some bit manipulation to retrieve 2598 * the base. 2599 */ 2600 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2601 tcache, true); 2602 if (config_cache_oblivious) 2603 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2604 } else { 2605 if (likely(usize <= large_maxclass)) { 2606 ret = arena_palloc_large(tsd, arena, usize, alignment, 2607 zero); 2608 } else if (likely(alignment <= chunksize)) 2609 ret = huge_malloc(tsd, arena, usize, zero, tcache); 2610 else { 2611 ret = huge_palloc(tsd, arena, usize, alignment, zero, 2612 tcache); 2613 } 2614 } 2615 return (ret); 2616} 2617 2618void 2619arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size) 2620{ 2621 arena_chunk_t *chunk; 2622 size_t pageind; 2623 szind_t binind; 2624 2625 cassert(config_prof); 2626 assert(ptr != NULL); 2627 assert(CHUNK_ADDR2BASE(ptr) != ptr); 2628 assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS); 2629 assert(isalloc(tsd, ptr, true) == LARGE_MINCLASS); 2630 assert(size <= SMALL_MAXCLASS); 2631 2632 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2633 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2634 binind = size2index(size); 2635 assert(binind < NBINS); 2636 arena_mapbits_large_binind_set(chunk, pageind, binind); 2637 2638 assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS); 2639 assert(isalloc(tsd, ptr, true) == size); 2640} 2641 2642static void 2643arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 2644 arena_bin_t *bin) 2645{ 2646 2647 /* Dissociate run from bin. */ 2648 if (run == bin->runcur) 2649 bin->runcur = NULL; 2650 else { 2651 szind_t binind = arena_bin_index(extent_node_arena_get( 2652 &chunk->node), bin); 2653 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 2654 2655 /* 2656 * The following block's conditional is necessary because if the 2657 * run only contains one region, then it never gets inserted 2658 * into the non-full runs tree. 2659 */ 2660 if (bin_info->nregs != 1) { 2661 arena_chunk_map_misc_t *miscelm = 2662 arena_run_to_miscelm(run); 2663 2664 arena_run_heap_remove(&bin->runs, miscelm); 2665 } 2666 } 2667} 2668 2669static void 2670arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2671 arena_run_t *run, arena_bin_t *bin) 2672{ 2673 2674 assert(run != bin->runcur); 2675 2676 malloc_mutex_unlock(tsd, &bin->lock); 2677 /******************************/ 2678 malloc_mutex_lock(tsd, &arena->lock); 2679 arena_run_dalloc(tsd, arena, run, true, false, false); 2680 malloc_mutex_unlock(tsd, &arena->lock); 2681 /****************************/ 2682 malloc_mutex_lock(tsd, &bin->lock); 2683 if (config_stats) 2684 bin->stats.curruns--; 2685} 2686 2687static void 2688arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2689 arena_bin_t *bin) 2690{ 2691 2692 /* 2693 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 2694 * non-full run. It is okay to NULL runcur out rather than proactively 2695 * keeping it pointing at the lowest non-full run. 2696 */ 2697 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 2698 /* Switch runcur. */ 2699 if (bin->runcur->nfree > 0) 2700 arena_bin_runs_insert(bin, bin->runcur); 2701 bin->runcur = run; 2702 if (config_stats) 2703 bin->stats.reruns++; 2704 } else 2705 arena_bin_runs_insert(bin, run); 2706} 2707 2708static void 2709arena_dalloc_bin_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2710 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) 2711{ 2712 size_t pageind, rpages_ind; 2713 arena_run_t *run; 2714 arena_bin_t *bin; 2715 arena_bin_info_t *bin_info; 2716 szind_t binind; 2717 2718 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2719 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2720 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2721 binind = run->binind; 2722 bin = &arena->bins[binind]; 2723 bin_info = &arena_bin_info[binind]; 2724 2725 if (!junked && config_fill && unlikely(opt_junk_free)) 2726 arena_dalloc_junk_small(ptr, bin_info); 2727 2728 arena_run_reg_dalloc(run, ptr); 2729 if (run->nfree == bin_info->nregs) { 2730 arena_dissociate_bin_run(chunk, run, bin); 2731 arena_dalloc_bin_run(tsd, arena, chunk, run, bin); 2732 } else if (run->nfree == 1 && run != bin->runcur) 2733 arena_bin_lower_run(arena, chunk, run, bin); 2734 2735 if (config_stats) { 2736 bin->stats.ndalloc++; 2737 bin->stats.curregs--; 2738 } 2739} 2740 2741void 2742arena_dalloc_bin_junked_locked(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2743 void *ptr, arena_chunk_map_bits_t *bitselm) 2744{ 2745 2746 arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, true); 2747} 2748 2749void 2750arena_dalloc_bin(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, 2751 size_t pageind, arena_chunk_map_bits_t *bitselm) 2752{ 2753 arena_run_t *run; 2754 arena_bin_t *bin; 2755 size_t rpages_ind; 2756 2757 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2758 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2759 bin = &arena->bins[run->binind]; 2760 malloc_mutex_lock(tsd, &bin->lock); 2761 arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, false); 2762 malloc_mutex_unlock(tsd, &bin->lock); 2763} 2764 2765void 2766arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, 2767 size_t pageind) 2768{ 2769 arena_chunk_map_bits_t *bitselm; 2770 2771 if (config_debug) { 2772 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2773 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2774 pageind)) != BININD_INVALID); 2775 } 2776 bitselm = arena_bitselm_get_mutable(chunk, pageind); 2777 arena_dalloc_bin(tsd, arena, chunk, ptr, pageind, bitselm); 2778 arena_decay_tick(tsd, arena); 2779} 2780 2781#ifdef JEMALLOC_JET 2782#undef arena_dalloc_junk_large 2783#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) 2784#endif 2785void 2786arena_dalloc_junk_large(void *ptr, size_t usize) 2787{ 2788 2789 if (config_fill && unlikely(opt_junk_free)) 2790 memset(ptr, JEMALLOC_FREE_JUNK, usize); 2791} 2792#ifdef JEMALLOC_JET 2793#undef arena_dalloc_junk_large 2794#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2795arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2796 JEMALLOC_N(n_arena_dalloc_junk_large); 2797#endif 2798 2799static void 2800arena_dalloc_large_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2801 void *ptr, bool junked) 2802{ 2803 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2804 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 2805 pageind); 2806 arena_run_t *run = &miscelm->run; 2807 2808 if (config_fill || config_stats) { 2809 size_t usize = arena_mapbits_large_size_get(chunk, pageind) - 2810 large_pad; 2811 2812 if (!junked) 2813 arena_dalloc_junk_large(ptr, usize); 2814 if (config_stats) { 2815 szind_t index = size2index(usize) - NBINS; 2816 2817 arena->stats.ndalloc_large++; 2818 arena->stats.allocated_large -= usize; 2819 arena->stats.lstats[index].ndalloc++; 2820 arena->stats.lstats[index].curruns--; 2821 } 2822 } 2823 2824 arena_run_dalloc(tsd, arena, run, true, false, false); 2825} 2826 2827void 2828arena_dalloc_large_junked_locked(tsd_t *tsd, arena_t *arena, 2829 arena_chunk_t *chunk, void *ptr) 2830{ 2831 2832 arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, true); 2833} 2834 2835void 2836arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr) 2837{ 2838 2839 malloc_mutex_lock(tsd, &arena->lock); 2840 arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, false); 2841 malloc_mutex_unlock(tsd, &arena->lock); 2842 arena_decay_tick(tsd, arena); 2843} 2844 2845static void 2846arena_ralloc_large_shrink(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2847 void *ptr, size_t oldsize, size_t size) 2848{ 2849 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2850 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 2851 pageind); 2852 arena_run_t *run = &miscelm->run; 2853 2854 assert(size < oldsize); 2855 2856 /* 2857 * Shrink the run, and make trailing pages available for other 2858 * allocations. 2859 */ 2860 malloc_mutex_lock(tsd, &arena->lock); 2861 arena_run_trim_tail(tsd, arena, chunk, run, oldsize + large_pad, size + 2862 large_pad, true); 2863 if (config_stats) { 2864 szind_t oldindex = size2index(oldsize) - NBINS; 2865 szind_t index = size2index(size) - NBINS; 2866 2867 arena->stats.ndalloc_large++; 2868 arena->stats.allocated_large -= oldsize; 2869 arena->stats.lstats[oldindex].ndalloc++; 2870 arena->stats.lstats[oldindex].curruns--; 2871 2872 arena->stats.nmalloc_large++; 2873 arena->stats.nrequests_large++; 2874 arena->stats.allocated_large += size; 2875 arena->stats.lstats[index].nmalloc++; 2876 arena->stats.lstats[index].nrequests++; 2877 arena->stats.lstats[index].curruns++; 2878 } 2879 malloc_mutex_unlock(tsd, &arena->lock); 2880} 2881 2882static bool 2883arena_ralloc_large_grow(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2884 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) 2885{ 2886 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2887 size_t npages = (oldsize + large_pad) >> LG_PAGE; 2888 size_t followsize; 2889 2890 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - 2891 large_pad); 2892 2893 /* Try to extend the run. */ 2894 malloc_mutex_lock(tsd, &arena->lock); 2895 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, 2896 pageind+npages) != 0) 2897 goto label_fail; 2898 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); 2899 if (oldsize + followsize >= usize_min) { 2900 /* 2901 * The next run is available and sufficiently large. Split the 2902 * following run, then merge the first part with the existing 2903 * allocation. 2904 */ 2905 arena_run_t *run; 2906 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; 2907 2908 usize = usize_max; 2909 while (oldsize + followsize < usize) 2910 usize = index2size(size2index(usize)-1); 2911 assert(usize >= usize_min); 2912 assert(usize >= oldsize); 2913 splitsize = usize - oldsize; 2914 if (splitsize == 0) 2915 goto label_fail; 2916 2917 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; 2918 if (arena_run_split_large(arena, run, splitsize, zero)) 2919 goto label_fail; 2920 2921 if (config_cache_oblivious && zero) { 2922 /* 2923 * Zero the trailing bytes of the original allocation's 2924 * last page, since they are in an indeterminate state. 2925 * There will always be trailing bytes, because ptr's 2926 * offset from the beginning of the run is a multiple of 2927 * CACHELINE in [0 .. PAGE). 2928 */ 2929 void *zbase = (void *)((uintptr_t)ptr + oldsize); 2930 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + 2931 PAGE)); 2932 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; 2933 assert(nzero > 0); 2934 memset(zbase, 0, nzero); 2935 } 2936 2937 size = oldsize + splitsize; 2938 npages = (size + large_pad) >> LG_PAGE; 2939 2940 /* 2941 * Mark the extended run as dirty if either portion of the run 2942 * was dirty before allocation. This is rather pedantic, 2943 * because there's not actually any sequence of events that 2944 * could cause the resulting run to be passed to 2945 * arena_run_dalloc() with the dirty argument set to false 2946 * (which is when dirty flag consistency would really matter). 2947 */ 2948 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2949 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2950 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; 2951 arena_mapbits_large_set(chunk, pageind, size + large_pad, 2952 flag_dirty | (flag_unzeroed_mask & 2953 arena_mapbits_unzeroed_get(chunk, pageind))); 2954 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | 2955 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2956 pageind+npages-1))); 2957 2958 if (config_stats) { 2959 szind_t oldindex = size2index(oldsize) - NBINS; 2960 szind_t index = size2index(size) - NBINS; 2961 2962 arena->stats.ndalloc_large++; 2963 arena->stats.allocated_large -= oldsize; 2964 arena->stats.lstats[oldindex].ndalloc++; 2965 arena->stats.lstats[oldindex].curruns--; 2966 2967 arena->stats.nmalloc_large++; 2968 arena->stats.nrequests_large++; 2969 arena->stats.allocated_large += size; 2970 arena->stats.lstats[index].nmalloc++; 2971 arena->stats.lstats[index].nrequests++; 2972 arena->stats.lstats[index].curruns++; 2973 } 2974 malloc_mutex_unlock(tsd, &arena->lock); 2975 return (false); 2976 } 2977label_fail: 2978 malloc_mutex_unlock(tsd, &arena->lock); 2979 return (true); 2980} 2981 2982#ifdef JEMALLOC_JET 2983#undef arena_ralloc_junk_large 2984#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) 2985#endif 2986static void 2987arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2988{ 2989 2990 if (config_fill && unlikely(opt_junk_free)) { 2991 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, 2992 old_usize - usize); 2993 } 2994} 2995#ifdef JEMALLOC_JET 2996#undef arena_ralloc_junk_large 2997#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 2998arena_ralloc_junk_large_t *arena_ralloc_junk_large = 2999 JEMALLOC_N(n_arena_ralloc_junk_large); 3000#endif 3001 3002/* 3003 * Try to resize a large allocation, in order to avoid copying. This will 3004 * always fail if growing an object, and the following run is already in use. 3005 */ 3006static bool 3007arena_ralloc_large(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, 3008 size_t usize_max, bool zero) 3009{ 3010 arena_chunk_t *chunk; 3011 arena_t *arena; 3012 3013 if (oldsize == usize_max) { 3014 /* Current size class is compatible and maximal. */ 3015 return (false); 3016 } 3017 3018 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3019 arena = extent_node_arena_get(&chunk->node); 3020 3021 if (oldsize < usize_max) { 3022 bool ret = arena_ralloc_large_grow(tsd, arena, chunk, ptr, 3023 oldsize, usize_min, usize_max, zero); 3024 if (config_fill && !ret && !zero) { 3025 if (unlikely(opt_junk_alloc)) { 3026 memset((void *)((uintptr_t)ptr + oldsize), 3027 JEMALLOC_ALLOC_JUNK, 3028 isalloc(tsd, ptr, config_prof) - oldsize); 3029 } else if (unlikely(opt_zero)) { 3030 memset((void *)((uintptr_t)ptr + oldsize), 0, 3031 isalloc(tsd, ptr, config_prof) - oldsize); 3032 } 3033 } 3034 return (ret); 3035 } 3036 3037 assert(oldsize > usize_max); 3038 /* Fill before shrinking in order avoid a race. */ 3039 arena_ralloc_junk_large(ptr, oldsize, usize_max); 3040 arena_ralloc_large_shrink(tsd, arena, chunk, ptr, oldsize, usize_max); 3041 return (false); 3042} 3043 3044bool 3045arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, 3046 size_t extra, bool zero) 3047{ 3048 size_t usize_min, usize_max; 3049 3050 /* Calls with non-zero extra had to clamp extra. */ 3051 assert(extra == 0 || size + extra <= HUGE_MAXCLASS); 3052 3053 if (unlikely(size > HUGE_MAXCLASS)) 3054 return (true); 3055 3056 usize_min = s2u(size); 3057 usize_max = s2u(size + extra); 3058 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { 3059 arena_chunk_t *chunk; 3060 3061 /* 3062 * Avoid moving the allocation if the size class can be left the 3063 * same. 3064 */ 3065 if (oldsize <= SMALL_MAXCLASS) { 3066 assert(arena_bin_info[size2index(oldsize)].reg_size == 3067 oldsize); 3068 if ((usize_max > SMALL_MAXCLASS || 3069 size2index(usize_max) != size2index(oldsize)) && 3070 (size > oldsize || usize_max < oldsize)) 3071 return (true); 3072 } else { 3073 if (usize_max <= SMALL_MAXCLASS) 3074 return (true); 3075 if (arena_ralloc_large(tsd, ptr, oldsize, usize_min, 3076 usize_max, zero)) 3077 return (true); 3078 } 3079 3080 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3081 arena_decay_tick(tsd, extent_node_arena_get(&chunk->node)); 3082 return (false); 3083 } else { 3084 return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min, 3085 usize_max, zero)); 3086 } 3087} 3088 3089static void * 3090arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, 3091 size_t alignment, bool zero, tcache_t *tcache) 3092{ 3093 3094 if (alignment == 0) 3095 return (arena_malloc(tsd, arena, usize, size2index(usize), zero, 3096 tcache, true)); 3097 usize = sa2u(usize, alignment); 3098 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 3099 return (NULL); 3100 return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); 3101} 3102 3103void * 3104arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 3105 size_t alignment, bool zero, tcache_t *tcache) 3106{ 3107 void *ret; 3108 size_t usize; 3109 3110 usize = s2u(size); 3111 if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) 3112 return (NULL); 3113 3114 if (likely(usize <= large_maxclass)) { 3115 size_t copysize; 3116 3117 /* Try to avoid moving the allocation. */ 3118 if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero)) 3119 return (ptr); 3120 3121 /* 3122 * size and oldsize are different enough that we need to move 3123 * the object. In that case, fall back to allocating new space 3124 * and copying. 3125 */ 3126 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, 3127 zero, tcache); 3128 if (ret == NULL) 3129 return (NULL); 3130 3131 /* 3132 * Junk/zero-filling were already done by 3133 * ipalloc()/arena_malloc(). 3134 */ 3135 3136 copysize = (usize < oldsize) ? usize : oldsize; 3137 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 3138 memcpy(ret, ptr, copysize); 3139 isqalloc(tsd, ptr, oldsize, tcache); 3140 } else { 3141 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, 3142 zero, tcache); 3143 } 3144 return (ret); 3145} 3146 3147dss_prec_t 3148arena_dss_prec_get(tsd_t *tsd, arena_t *arena) 3149{ 3150 dss_prec_t ret; 3151 3152 malloc_mutex_lock(tsd, &arena->lock); 3153 ret = arena->dss_prec; 3154 malloc_mutex_unlock(tsd, &arena->lock); 3155 return (ret); 3156} 3157 3158bool 3159arena_dss_prec_set(tsd_t *tsd, arena_t *arena, dss_prec_t dss_prec) 3160{ 3161 3162 if (!have_dss) 3163 return (dss_prec != dss_prec_disabled); 3164 malloc_mutex_lock(tsd, &arena->lock); 3165 arena->dss_prec = dss_prec; 3166 malloc_mutex_unlock(tsd, &arena->lock); 3167 return (false); 3168} 3169 3170ssize_t 3171arena_lg_dirty_mult_default_get(void) 3172{ 3173 3174 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 3175} 3176 3177bool 3178arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 3179{ 3180 3181 if (opt_purge != purge_mode_ratio) 3182 return (true); 3183 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 3184 return (true); 3185 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 3186 return (false); 3187} 3188 3189ssize_t 3190arena_decay_time_default_get(void) 3191{ 3192 3193 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); 3194} 3195 3196bool 3197arena_decay_time_default_set(ssize_t decay_time) 3198{ 3199 3200 if (opt_purge != purge_mode_decay) 3201 return (true); 3202 if (!arena_decay_time_valid(decay_time)) 3203 return (true); 3204 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); 3205 return (false); 3206} 3207 3208static void 3209arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, 3210 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3211 size_t *nactive, size_t *ndirty) 3212{ 3213 3214 *nthreads += arena_nthreads_get(arena); 3215 *dss = dss_prec_names[arena->dss_prec]; 3216 *lg_dirty_mult = arena->lg_dirty_mult; 3217 *decay_time = arena->decay_time; 3218 *nactive += arena->nactive; 3219 *ndirty += arena->ndirty; 3220} 3221 3222void 3223arena_basic_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, 3224 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3225 size_t *nactive, size_t *ndirty) 3226{ 3227 3228 malloc_mutex_lock(tsd, &arena->lock); 3229 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3230 decay_time, nactive, ndirty); 3231 malloc_mutex_unlock(tsd, &arena->lock); 3232} 3233 3234void 3235arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, 3236 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3237 size_t *nactive, size_t *ndirty, arena_stats_t *astats, 3238 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, 3239 malloc_huge_stats_t *hstats) 3240{ 3241 unsigned i; 3242 3243 cassert(config_stats); 3244 3245 malloc_mutex_lock(tsd, &arena->lock); 3246 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3247 decay_time, nactive, ndirty); 3248 3249 astats->mapped += arena->stats.mapped; 3250 astats->npurge += arena->stats.npurge; 3251 astats->nmadvise += arena->stats.nmadvise; 3252 astats->purged += arena->stats.purged; 3253 astats->metadata_mapped += arena->stats.metadata_mapped; 3254 astats->metadata_allocated += arena_metadata_allocated_get(arena); 3255 astats->allocated_large += arena->stats.allocated_large; 3256 astats->nmalloc_large += arena->stats.nmalloc_large; 3257 astats->ndalloc_large += arena->stats.ndalloc_large; 3258 astats->nrequests_large += arena->stats.nrequests_large; 3259 astats->allocated_huge += arena->stats.allocated_huge; 3260 astats->nmalloc_huge += arena->stats.nmalloc_huge; 3261 astats->ndalloc_huge += arena->stats.ndalloc_huge; 3262 3263 for (i = 0; i < nlclasses; i++) { 3264 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 3265 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 3266 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 3267 lstats[i].curruns += arena->stats.lstats[i].curruns; 3268 } 3269 3270 for (i = 0; i < nhclasses; i++) { 3271 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 3272 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 3273 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 3274 } 3275 malloc_mutex_unlock(tsd, &arena->lock); 3276 3277 for (i = 0; i < NBINS; i++) { 3278 arena_bin_t *bin = &arena->bins[i]; 3279 3280 malloc_mutex_lock(tsd, &bin->lock); 3281 bstats[i].nmalloc += bin->stats.nmalloc; 3282 bstats[i].ndalloc += bin->stats.ndalloc; 3283 bstats[i].nrequests += bin->stats.nrequests; 3284 bstats[i].curregs += bin->stats.curregs; 3285 if (config_tcache) { 3286 bstats[i].nfills += bin->stats.nfills; 3287 bstats[i].nflushes += bin->stats.nflushes; 3288 } 3289 bstats[i].nruns += bin->stats.nruns; 3290 bstats[i].reruns += bin->stats.reruns; 3291 bstats[i].curruns += bin->stats.curruns; 3292 malloc_mutex_unlock(tsd, &bin->lock); 3293 } 3294} 3295 3296unsigned 3297arena_nthreads_get(arena_t *arena) 3298{ 3299 3300 return (atomic_read_u(&arena->nthreads)); 3301} 3302 3303void 3304arena_nthreads_inc(arena_t *arena) 3305{ 3306 3307 atomic_add_u(&arena->nthreads, 1); 3308} 3309 3310void 3311arena_nthreads_dec(arena_t *arena) 3312{ 3313 3314 atomic_sub_u(&arena->nthreads, 1); 3315} 3316 3317arena_t * 3318arena_new(tsd_t *tsd, unsigned ind) 3319{ 3320 arena_t *arena; 3321 size_t arena_size; 3322 unsigned i; 3323 3324 /* Compute arena size to incorporate sufficient runs_avail elements. */ 3325 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) * 3326 runs_avail_nclasses); 3327 /* 3328 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 3329 * because there is no way to clean up if base_alloc() OOMs. 3330 */ 3331 if (config_stats) { 3332 arena = (arena_t *)base_alloc(tsd, CACHELINE_CEILING(arena_size) 3333 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + 3334 nhclasses) * sizeof(malloc_huge_stats_t)); 3335 } else 3336 arena = (arena_t *)base_alloc(tsd, arena_size); 3337 if (arena == NULL) 3338 return (NULL); 3339 3340 arena->ind = ind; 3341 arena->nthreads = 0; 3342 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) 3343 return (NULL); 3344 3345 if (config_stats) { 3346 memset(&arena->stats, 0, sizeof(arena_stats_t)); 3347 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 3348 + CACHELINE_CEILING(arena_size)); 3349 memset(arena->stats.lstats, 0, nlclasses * 3350 sizeof(malloc_large_stats_t)); 3351 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 3352 + CACHELINE_CEILING(arena_size) + 3353 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 3354 memset(arena->stats.hstats, 0, nhclasses * 3355 sizeof(malloc_huge_stats_t)); 3356 if (config_tcache) 3357 ql_new(&arena->tcache_ql); 3358 } 3359 3360 if (config_prof) 3361 arena->prof_accumbytes = 0; 3362 3363 if (config_cache_oblivious) { 3364 /* 3365 * A nondeterministic seed based on the address of arena reduces 3366 * the likelihood of lockstep non-uniform cache index 3367 * utilization among identical concurrent processes, but at the 3368 * cost of test repeatability. For debug builds, instead use a 3369 * deterministic seed. 3370 */ 3371 arena->offset_state = config_debug ? ind : 3372 (uint64_t)(uintptr_t)arena; 3373 } 3374 3375 arena->dss_prec = chunk_dss_prec_get(tsd); 3376 3377 arena->spare = NULL; 3378 3379 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 3380 arena->purging = false; 3381 arena->nactive = 0; 3382 arena->ndirty = 0; 3383 3384 for(i = 0; i < runs_avail_nclasses; i++) 3385 arena_run_heap_new(&arena->runs_avail[i]); 3386 qr_new(&arena->runs_dirty, rd_link); 3387 qr_new(&arena->chunks_cache, cc_link); 3388 3389 if (opt_purge == purge_mode_decay) 3390 arena_decay_init(arena, arena_decay_time_default_get()); 3391 3392 ql_new(&arena->huge); 3393 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", 3394 WITNESS_RANK_ARENA_HUGE)) 3395 return (NULL); 3396 3397 extent_tree_szad_new(&arena->chunks_szad_cached); 3398 extent_tree_ad_new(&arena->chunks_ad_cached); 3399 extent_tree_szad_new(&arena->chunks_szad_retained); 3400 extent_tree_ad_new(&arena->chunks_ad_retained); 3401 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", 3402 WITNESS_RANK_ARENA_CHUNKS)) 3403 return (NULL); 3404 ql_new(&arena->node_cache); 3405 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", 3406 WITNESS_RANK_ARENA_NODE_CACHE)) 3407 return (NULL); 3408 3409 arena->chunk_hooks = chunk_hooks_default; 3410 3411 /* Initialize bins. */ 3412 for (i = 0; i < NBINS; i++) { 3413 arena_bin_t *bin = &arena->bins[i]; 3414 if (malloc_mutex_init(&bin->lock, "arena_bin", 3415 WITNESS_RANK_ARENA_BIN)) 3416 return (NULL); 3417 bin->runcur = NULL; 3418 arena_run_heap_new(&bin->runs); 3419 if (config_stats) 3420 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 3421 } 3422 3423 return (arena); 3424} 3425 3426/* 3427 * Calculate bin_info->run_size such that it meets the following constraints: 3428 * 3429 * *) bin_info->run_size <= arena_maxrun 3430 * *) bin_info->nregs <= RUN_MAXREGS 3431 * 3432 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 3433 * these settings are all interdependent. 3434 */ 3435static void 3436bin_info_run_size_calc(arena_bin_info_t *bin_info) 3437{ 3438 size_t pad_size; 3439 size_t try_run_size, perfect_run_size, actual_run_size; 3440 uint32_t try_nregs, perfect_nregs, actual_nregs; 3441 3442 /* 3443 * Determine redzone size based on minimum alignment and minimum 3444 * redzone size. Add padding to the end of the run if it is needed to 3445 * align the regions. The padding allows each redzone to be half the 3446 * minimum alignment; without the padding, each redzone would have to 3447 * be twice as large in order to maintain alignment. 3448 */ 3449 if (config_fill && unlikely(opt_redzone)) { 3450 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); 3451 if (align_min <= REDZONE_MINSIZE) { 3452 bin_info->redzone_size = REDZONE_MINSIZE; 3453 pad_size = 0; 3454 } else { 3455 bin_info->redzone_size = align_min >> 1; 3456 pad_size = bin_info->redzone_size; 3457 } 3458 } else { 3459 bin_info->redzone_size = 0; 3460 pad_size = 0; 3461 } 3462 bin_info->reg_interval = bin_info->reg_size + 3463 (bin_info->redzone_size << 1); 3464 3465 /* 3466 * Compute run size under ideal conditions (no redzones, no limit on run 3467 * size). 3468 */ 3469 try_run_size = PAGE; 3470 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3471 do { 3472 perfect_run_size = try_run_size; 3473 perfect_nregs = try_nregs; 3474 3475 try_run_size += PAGE; 3476 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3477 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 3478 assert(perfect_nregs <= RUN_MAXREGS); 3479 3480 actual_run_size = perfect_run_size; 3481 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3482 bin_info->reg_interval); 3483 3484 /* 3485 * Redzones can require enough padding that not even a single region can 3486 * fit within the number of pages that would normally be dedicated to a 3487 * run for this size class. Increase the run size until at least one 3488 * region fits. 3489 */ 3490 while (actual_nregs == 0) { 3491 assert(config_fill && unlikely(opt_redzone)); 3492 3493 actual_run_size += PAGE; 3494 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3495 bin_info->reg_interval); 3496 } 3497 3498 /* 3499 * Make sure that the run will fit within an arena chunk. 3500 */ 3501 while (actual_run_size > arena_maxrun) { 3502 actual_run_size -= PAGE; 3503 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3504 bin_info->reg_interval); 3505 } 3506 assert(actual_nregs > 0); 3507 assert(actual_run_size == s2u(actual_run_size)); 3508 3509 /* Copy final settings. */ 3510 bin_info->run_size = actual_run_size; 3511 bin_info->nregs = actual_nregs; 3512 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * 3513 bin_info->reg_interval) - pad_size + bin_info->redzone_size); 3514 3515 if (actual_run_size > small_maxrun) 3516 small_maxrun = actual_run_size; 3517 3518 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 3519 * bin_info->reg_interval) + pad_size == bin_info->run_size); 3520} 3521 3522static void 3523bin_info_init(void) 3524{ 3525 arena_bin_info_t *bin_info; 3526 3527#define BIN_INFO_INIT_bin_yes(index, size) \ 3528 bin_info = &arena_bin_info[index]; \ 3529 bin_info->reg_size = size; \ 3530 bin_info_run_size_calc(bin_info); \ 3531 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 3532#define BIN_INFO_INIT_bin_no(index, size) 3533#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 3534 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3535 SIZE_CLASSES 3536#undef BIN_INFO_INIT_bin_yes 3537#undef BIN_INFO_INIT_bin_no 3538#undef SC 3539} 3540 3541static bool 3542small_run_size_init(void) 3543{ 3544 3545 assert(small_maxrun != 0); 3546 3547 small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >> 3548 LG_PAGE)); 3549 if (small_run_tab == NULL) 3550 return (true); 3551 3552#define TAB_INIT_bin_yes(index, size) { \ 3553 arena_bin_info_t *bin_info = &arena_bin_info[index]; \ 3554 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ 3555 } 3556#define TAB_INIT_bin_no(index, size) 3557#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 3558 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3559 SIZE_CLASSES 3560#undef TAB_INIT_bin_yes 3561#undef TAB_INIT_bin_no 3562#undef SC 3563 3564 return (false); 3565} 3566 3567static bool 3568run_quantize_init(void) 3569{ 3570 unsigned i; 3571 3572 run_quantize_max = chunksize + large_pad; 3573 3574 run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) * 3575 (run_quantize_max >> LG_PAGE)); 3576 if (run_quantize_floor_tab == NULL) 3577 return (true); 3578 3579 run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) * 3580 (run_quantize_max >> LG_PAGE)); 3581 if (run_quantize_ceil_tab == NULL) 3582 return (true); 3583 3584 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) { 3585 size_t run_size = i << LG_PAGE; 3586 3587 run_quantize_floor_tab[i-1] = 3588 run_quantize_floor_compute(run_size); 3589 run_quantize_ceil_tab[i-1] = 3590 run_quantize_ceil_compute(run_size); 3591 } 3592 3593 return (false); 3594} 3595 3596bool 3597arena_boot(void) 3598{ 3599 unsigned i; 3600 3601 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); 3602 arena_decay_time_default_set(opt_decay_time); 3603 3604 /* 3605 * Compute the header size such that it is large enough to contain the 3606 * page map. The page map is biased to omit entries for the header 3607 * itself, so some iteration is necessary to compute the map bias. 3608 * 3609 * 1) Compute safe header_size and map_bias values that include enough 3610 * space for an unbiased page map. 3611 * 2) Refine map_bias based on (1) to omit the header pages in the page 3612 * map. The resulting map_bias may be one too small. 3613 * 3) Refine map_bias based on (2). The result will be >= the result 3614 * from (2), and will always be correct. 3615 */ 3616 map_bias = 0; 3617 for (i = 0; i < 3; i++) { 3618 size_t header_size = offsetof(arena_chunk_t, map_bits) + 3619 ((sizeof(arena_chunk_map_bits_t) + 3620 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 3621 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 3622 } 3623 assert(map_bias > 0); 3624 3625 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 3626 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 3627 3628 arena_maxrun = chunksize - (map_bias << LG_PAGE); 3629 assert(arena_maxrun > 0); 3630 large_maxclass = index2size(size2index(chunksize)-1); 3631 if (large_maxclass > arena_maxrun) { 3632 /* 3633 * For small chunk sizes it's possible for there to be fewer 3634 * non-header pages available than are necessary to serve the 3635 * size classes just below chunksize. 3636 */ 3637 large_maxclass = arena_maxrun; 3638 } 3639 assert(large_maxclass > 0); 3640 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); 3641 nhclasses = NSIZES - nlclasses - NBINS; 3642 3643 bin_info_init(); 3644 if (small_run_size_init()) 3645 return (true); 3646 if (run_quantize_init()) 3647 return (true); 3648 3649 runs_avail_bias = size2index(PAGE); 3650 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias; 3651 3652 return (false); 3653} 3654 3655void 3656arena_prefork(tsd_t *tsd, arena_t *arena) 3657{ 3658 unsigned i; 3659 3660 malloc_mutex_prefork(tsd, &arena->lock); 3661 malloc_mutex_prefork(tsd, &arena->huge_mtx); 3662 malloc_mutex_prefork(tsd, &arena->chunks_mtx); 3663 malloc_mutex_prefork(tsd, &arena->node_cache_mtx); 3664 for (i = 0; i < NBINS; i++) 3665 malloc_mutex_prefork(tsd, &arena->bins[i].lock); 3666} 3667 3668void 3669arena_postfork_parent(tsd_t *tsd, arena_t *arena) 3670{ 3671 unsigned i; 3672 3673 for (i = 0; i < NBINS; i++) 3674 malloc_mutex_postfork_parent(tsd, &arena->bins[i].lock); 3675 malloc_mutex_postfork_parent(tsd, &arena->node_cache_mtx); 3676 malloc_mutex_postfork_parent(tsd, &arena->chunks_mtx); 3677 malloc_mutex_postfork_parent(tsd, &arena->huge_mtx); 3678 malloc_mutex_postfork_parent(tsd, &arena->lock); 3679} 3680 3681void 3682arena_postfork_child(tsd_t *tsd, arena_t *arena) 3683{ 3684 unsigned i; 3685 3686 for (i = 0; i < NBINS; i++) 3687 malloc_mutex_postfork_child(tsd, &arena->bins[i].lock); 3688 malloc_mutex_postfork_child(tsd, &arena->node_cache_mtx); 3689 malloc_mutex_postfork_child(tsd, &arena->chunks_mtx); 3690 malloc_mutex_postfork_child(tsd, &arena->huge_mtx); 3691 malloc_mutex_postfork_child(tsd, &arena->lock); 3692} 3693