arena.c revision 90827a3f3ef2099dcd480d542aacc9f44a0787e8
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7purge_mode_t opt_purge = PURGE_DEFAULT; 8const char *purge_mode_names[] = { 9 "ratio", 10 "decay", 11 "N/A" 12}; 13ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 14static ssize_t lg_dirty_mult_default; 15ssize_t opt_decay_time = DECAY_TIME_DEFAULT; 16static ssize_t decay_time_default; 17 18arena_bin_info_t arena_bin_info[NBINS]; 19 20size_t map_bias; 21size_t map_misc_offset; 22size_t arena_maxrun; /* Max run size for arenas. */ 23size_t large_maxclass; /* Max large size class. */ 24size_t run_quantize_max; /* Max run_quantize_*() input. */ 25static size_t small_maxrun; /* Max run size for small size classes. */ 26static bool *small_run_tab; /* Valid small run page multiples. */ 27static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */ 28static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */ 29unsigned nlclasses; /* Number of large size classes. */ 30unsigned nhclasses; /* Number of huge size classes. */ 31static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */ 32static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */ 33 34/******************************************************************************/ 35/* 36 * Function prototypes for static functions that are referenced prior to 37 * definition. 38 */ 39 40static void arena_purge_to_limit(tsd_t *tsd, arena_t *arena, 41 size_t ndirty_limit); 42static void arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, 43 bool dirty, bool cleaned, bool decommitted); 44static void arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena, 45 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); 46static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 47 arena_run_t *run, arena_bin_t *bin); 48 49/******************************************************************************/ 50 51JEMALLOC_INLINE_C size_t 52arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) 53{ 54 arena_chunk_t *chunk; 55 size_t pageind, mapbits; 56 57 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 58 pageind = arena_miscelm_to_pageind(miscelm); 59 mapbits = arena_mapbits_get(chunk, pageind); 60 return (arena_mapbits_size_decode(mapbits)); 61} 62 63JEMALLOC_INLINE_C int 64arena_run_addr_comp(const arena_chunk_map_misc_t *a, 65 const arena_chunk_map_misc_t *b) 66{ 67 uintptr_t a_miscelm = (uintptr_t)a; 68 uintptr_t b_miscelm = (uintptr_t)b; 69 70 assert(a != NULL); 71 assert(b != NULL); 72 73 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 74} 75 76/* Generate pairing heap functions. */ 77ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, 78 ph_link, arena_run_addr_comp) 79 80static size_t 81run_quantize_floor_compute(size_t size) 82{ 83 size_t qsize; 84 85 assert(size != 0); 86 assert(size == PAGE_CEILING(size)); 87 88 /* Don't change sizes that are valid small run sizes. */ 89 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) 90 return (size); 91 92 /* 93 * Round down to the nearest run size that can actually be requested 94 * during normal large allocation. Add large_pad so that cache index 95 * randomization can offset the allocation from the page boundary. 96 */ 97 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; 98 if (qsize <= SMALL_MAXCLASS + large_pad) 99 return (run_quantize_floor_compute(size - large_pad)); 100 assert(qsize <= size); 101 return (qsize); 102} 103 104static size_t 105run_quantize_ceil_compute_hard(size_t size) 106{ 107 size_t large_run_size_next; 108 109 assert(size != 0); 110 assert(size == PAGE_CEILING(size)); 111 112 /* 113 * Return the next quantized size greater than the input size. 114 * Quantized sizes comprise the union of run sizes that back small 115 * region runs, and run sizes that back large regions with no explicit 116 * alignment constraints. 117 */ 118 119 if (size > SMALL_MAXCLASS) { 120 large_run_size_next = PAGE_CEILING(index2size(size2index(size - 121 large_pad) + 1) + large_pad); 122 } else 123 large_run_size_next = SIZE_T_MAX; 124 if (size >= small_maxrun) 125 return (large_run_size_next); 126 127 while (true) { 128 size += PAGE; 129 assert(size <= small_maxrun); 130 if (small_run_tab[size >> LG_PAGE]) { 131 if (large_run_size_next < size) 132 return (large_run_size_next); 133 return (size); 134 } 135 } 136} 137 138static size_t 139run_quantize_ceil_compute(size_t size) 140{ 141 size_t qsize = run_quantize_floor_compute(size); 142 143 if (qsize < size) { 144 /* 145 * Skip a quantization that may have an adequately large run, 146 * because under-sized runs may be mixed in. This only happens 147 * when an unusual size is requested, i.e. for aligned 148 * allocation, and is just one of several places where linear 149 * search would potentially find sufficiently aligned available 150 * memory somewhere lower. 151 */ 152 qsize = run_quantize_ceil_compute_hard(qsize); 153 } 154 return (qsize); 155} 156 157#ifdef JEMALLOC_JET 158#undef run_quantize_floor 159#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) 160#endif 161static size_t 162run_quantize_floor(size_t size) 163{ 164 size_t ret; 165 166 assert(size > 0); 167 assert(size <= run_quantize_max); 168 assert((size & PAGE_MASK) == 0); 169 170 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1]; 171 assert(ret == run_quantize_floor_compute(size)); 172 return (ret); 173} 174#ifdef JEMALLOC_JET 175#undef run_quantize_floor 176#define run_quantize_floor JEMALLOC_N(run_quantize_floor) 177run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); 178#endif 179 180#ifdef JEMALLOC_JET 181#undef run_quantize_ceil 182#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) 183#endif 184static size_t 185run_quantize_ceil(size_t size) 186{ 187 size_t ret; 188 189 assert(size > 0); 190 assert(size <= run_quantize_max); 191 assert((size & PAGE_MASK) == 0); 192 193 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1]; 194 assert(ret == run_quantize_ceil_compute(size)); 195 return (ret); 196} 197#ifdef JEMALLOC_JET 198#undef run_quantize_ceil 199#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) 200run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); 201#endif 202 203static arena_run_heap_t * 204arena_runs_avail_get(arena_t *arena, szind_t ind) 205{ 206 207 assert(ind >= runs_avail_bias); 208 assert(ind - runs_avail_bias < runs_avail_nclasses); 209 210 return (&arena->runs_avail[ind - runs_avail_bias]); 211} 212 213static void 214arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 215 size_t npages) 216{ 217 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 218 arena_miscelm_get_const(chunk, pageind)))); 219 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 220 LG_PAGE)); 221 arena_run_heap_insert(arena_runs_avail_get(arena, ind), 222 arena_miscelm_get_mutable(chunk, pageind)); 223} 224 225static void 226arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 227 size_t npages) 228{ 229 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 230 arena_miscelm_get_const(chunk, pageind)))); 231 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 232 LG_PAGE)); 233 arena_run_heap_remove(arena_runs_avail_get(arena, ind), 234 arena_miscelm_get_mutable(chunk, pageind)); 235} 236 237static void 238arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 239 size_t npages) 240{ 241 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 242 pageind); 243 244 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 245 LG_PAGE)); 246 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 247 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 248 CHUNK_MAP_DIRTY); 249 250 qr_new(&miscelm->rd, rd_link); 251 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); 252 arena->ndirty += npages; 253} 254 255static void 256arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 257 size_t npages) 258{ 259 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 260 pageind); 261 262 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 263 LG_PAGE)); 264 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 265 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 266 CHUNK_MAP_DIRTY); 267 268 qr_remove(&miscelm->rd, rd_link); 269 assert(arena->ndirty >= npages); 270 arena->ndirty -= npages; 271} 272 273static size_t 274arena_chunk_dirty_npages(const extent_node_t *node) 275{ 276 277 return (extent_node_size_get(node) >> LG_PAGE); 278} 279 280void 281arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) 282{ 283 284 if (cache) { 285 extent_node_dirty_linkage_init(node); 286 extent_node_dirty_insert(node, &arena->runs_dirty, 287 &arena->chunks_cache); 288 arena->ndirty += arena_chunk_dirty_npages(node); 289 } 290} 291 292void 293arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) 294{ 295 296 if (dirty) { 297 extent_node_dirty_remove(node); 298 assert(arena->ndirty >= arena_chunk_dirty_npages(node)); 299 arena->ndirty -= arena_chunk_dirty_npages(node); 300 } 301} 302 303JEMALLOC_INLINE_C void * 304arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 305{ 306 void *ret; 307 size_t regind; 308 arena_chunk_map_misc_t *miscelm; 309 void *rpages; 310 311 assert(run->nfree > 0); 312 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 313 314 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 315 miscelm = arena_run_to_miscelm(run); 316 rpages = arena_miscelm_to_rpages(miscelm); 317 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 318 (uintptr_t)(bin_info->reg_interval * regind)); 319 run->nfree--; 320 return (ret); 321} 322 323JEMALLOC_INLINE_C void 324arena_run_reg_dalloc(arena_run_t *run, void *ptr) 325{ 326 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 327 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 328 size_t mapbits = arena_mapbits_get(chunk, pageind); 329 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); 330 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 331 size_t regind = arena_run_regind(run, bin_info, ptr); 332 333 assert(run->nfree < bin_info->nregs); 334 /* Freeing an interior pointer can cause assertion failure. */ 335 assert(((uintptr_t)ptr - 336 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 337 (uintptr_t)bin_info->reg0_offset)) % 338 (uintptr_t)bin_info->reg_interval == 0); 339 assert((uintptr_t)ptr >= 340 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 341 (uintptr_t)bin_info->reg0_offset); 342 /* Freeing an unallocated pointer can cause assertion failure. */ 343 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 344 345 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 346 run->nfree++; 347} 348 349JEMALLOC_INLINE_C void 350arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 351{ 352 353 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 354 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 355 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 356 (npages << LG_PAGE)); 357} 358 359JEMALLOC_INLINE_C void 360arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 361{ 362 363 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 364 << LG_PAGE)), PAGE); 365} 366 367JEMALLOC_INLINE_C void 368arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 369{ 370 size_t i; 371 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 372 373 arena_run_page_mark_zeroed(chunk, run_ind); 374 for (i = 0; i < PAGE / sizeof(size_t); i++) 375 assert(p[i] == 0); 376} 377 378static void 379arena_nactive_add(arena_t *arena, size_t add_pages) 380{ 381 382 if (config_stats) { 383 size_t cactive_add = CHUNK_CEILING((arena->nactive + 384 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 385 LG_PAGE); 386 if (cactive_add != 0) 387 stats_cactive_add(cactive_add); 388 } 389 arena->nactive += add_pages; 390} 391 392static void 393arena_nactive_sub(arena_t *arena, size_t sub_pages) 394{ 395 396 if (config_stats) { 397 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - 398 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); 399 if (cactive_sub != 0) 400 stats_cactive_sub(cactive_sub); 401 } 402 arena->nactive -= sub_pages; 403} 404 405static void 406arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 407 size_t flag_dirty, size_t flag_decommitted, size_t need_pages) 408{ 409 size_t total_pages, rem_pages; 410 411 assert(flag_dirty == 0 || flag_decommitted == 0); 412 413 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 414 LG_PAGE; 415 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 416 flag_dirty); 417 assert(need_pages <= total_pages); 418 rem_pages = total_pages - need_pages; 419 420 arena_avail_remove(arena, chunk, run_ind, total_pages); 421 if (flag_dirty != 0) 422 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); 423 arena_nactive_add(arena, need_pages); 424 425 /* Keep track of trailing unused pages for later use. */ 426 if (rem_pages > 0) { 427 size_t flags = flag_dirty | flag_decommitted; 428 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 429 0; 430 431 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 432 (rem_pages << LG_PAGE), flags | 433 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & 434 flag_unzeroed_mask)); 435 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, 436 (rem_pages << LG_PAGE), flags | 437 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & 438 flag_unzeroed_mask)); 439 if (flag_dirty != 0) { 440 arena_run_dirty_insert(arena, chunk, run_ind+need_pages, 441 rem_pages); 442 } 443 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 444 } 445} 446 447static bool 448arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 449 bool remove, bool zero) 450{ 451 arena_chunk_t *chunk; 452 arena_chunk_map_misc_t *miscelm; 453 size_t flag_dirty, flag_decommitted, run_ind, need_pages; 454 size_t flag_unzeroed_mask; 455 456 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 457 miscelm = arena_run_to_miscelm(run); 458 run_ind = arena_miscelm_to_pageind(miscelm); 459 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 460 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 461 need_pages = (size >> LG_PAGE); 462 assert(need_pages > 0); 463 464 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 465 run_ind << LG_PAGE, size, arena->ind)) 466 return (true); 467 468 if (remove) { 469 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 470 flag_decommitted, need_pages); 471 } 472 473 if (zero) { 474 if (flag_decommitted != 0) { 475 /* The run is untouched, and therefore zeroed. */ 476 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 477 *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 478 (need_pages << LG_PAGE)); 479 } else if (flag_dirty != 0) { 480 /* The run is dirty, so all pages must be zeroed. */ 481 arena_run_zero(chunk, run_ind, need_pages); 482 } else { 483 /* 484 * The run is clean, so some pages may be zeroed (i.e. 485 * never before touched). 486 */ 487 size_t i; 488 for (i = 0; i < need_pages; i++) { 489 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 490 != 0) 491 arena_run_zero(chunk, run_ind+i, 1); 492 else if (config_debug) { 493 arena_run_page_validate_zeroed(chunk, 494 run_ind+i); 495 } else { 496 arena_run_page_mark_zeroed(chunk, 497 run_ind+i); 498 } 499 } 500 } 501 } else { 502 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 503 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 504 } 505 506 /* 507 * Set the last element first, in case the run only contains one page 508 * (i.e. both statements set the same element). 509 */ 510 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 511 CHUNK_MAP_UNZEROED : 0; 512 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | 513 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 514 run_ind+need_pages-1))); 515 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | 516 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); 517 return (false); 518} 519 520static bool 521arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 522{ 523 524 return (arena_run_split_large_helper(arena, run, size, true, zero)); 525} 526 527static bool 528arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 529{ 530 531 return (arena_run_split_large_helper(arena, run, size, false, zero)); 532} 533 534static bool 535arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 536 szind_t binind) 537{ 538 arena_chunk_t *chunk; 539 arena_chunk_map_misc_t *miscelm; 540 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; 541 542 assert(binind != BININD_INVALID); 543 544 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 545 miscelm = arena_run_to_miscelm(run); 546 run_ind = arena_miscelm_to_pageind(miscelm); 547 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 548 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 549 need_pages = (size >> LG_PAGE); 550 assert(need_pages > 0); 551 552 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 553 run_ind << LG_PAGE, size, arena->ind)) 554 return (true); 555 556 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 557 flag_decommitted, need_pages); 558 559 for (i = 0; i < need_pages; i++) { 560 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, 561 run_ind+i); 562 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 563 flag_unzeroed); 564 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) 565 arena_run_page_validate_zeroed(chunk, run_ind+i); 566 } 567 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 568 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 569 return (false); 570} 571 572static arena_chunk_t * 573arena_chunk_init_spare(arena_t *arena) 574{ 575 arena_chunk_t *chunk; 576 577 assert(arena->spare != NULL); 578 579 chunk = arena->spare; 580 arena->spare = NULL; 581 582 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 583 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 584 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 585 arena_maxrun); 586 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 587 arena_maxrun); 588 assert(arena_mapbits_dirty_get(chunk, map_bias) == 589 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 590 591 return (chunk); 592} 593 594static bool 595arena_chunk_register(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 596 bool zero) 597{ 598 599 /* 600 * The extent node notion of "committed" doesn't directly apply to 601 * arena chunks. Arbitrarily mark them as committed. The commit state 602 * of runs is tracked individually, and upon chunk deallocation the 603 * entire chunk is in a consistent commit state. 604 */ 605 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); 606 extent_node_achunk_set(&chunk->node, true); 607 return (chunk_register(tsd, chunk, &chunk->node)); 608} 609 610static arena_chunk_t * 611arena_chunk_alloc_internal_hard(tsd_t *tsd, arena_t *arena, 612 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) 613{ 614 arena_chunk_t *chunk; 615 616 malloc_mutex_unlock(tsd, &arena->lock); 617 618 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsd, arena, chunk_hooks, 619 NULL, chunksize, chunksize, zero, commit); 620 if (chunk != NULL && !*commit) { 621 /* Commit header. */ 622 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << 623 LG_PAGE, arena->ind)) { 624 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, 625 (void *)chunk, chunksize, *zero, *commit); 626 chunk = NULL; 627 } 628 } 629 if (chunk != NULL && arena_chunk_register(tsd, arena, chunk, *zero)) { 630 if (!*commit) { 631 /* Undo commit of header. */ 632 chunk_hooks->decommit(chunk, chunksize, 0, map_bias << 633 LG_PAGE, arena->ind); 634 } 635 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, (void *)chunk, 636 chunksize, *zero, *commit); 637 chunk = NULL; 638 } 639 640 malloc_mutex_lock(tsd, &arena->lock); 641 return (chunk); 642} 643 644static arena_chunk_t * 645arena_chunk_alloc_internal(tsd_t *tsd, arena_t *arena, bool *zero, bool *commit) 646{ 647 arena_chunk_t *chunk; 648 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 649 650 chunk = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, chunksize, 651 chunksize, zero, true); 652 if (chunk != NULL) { 653 if (arena_chunk_register(tsd, arena, chunk, *zero)) { 654 chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk, 655 chunksize, true); 656 return (NULL); 657 } 658 *commit = true; 659 } 660 if (chunk == NULL) { 661 chunk = arena_chunk_alloc_internal_hard(tsd, arena, 662 &chunk_hooks, zero, commit); 663 } 664 665 if (config_stats && chunk != NULL) { 666 arena->stats.mapped += chunksize; 667 arena->stats.metadata_mapped += (map_bias << LG_PAGE); 668 } 669 670 return (chunk); 671} 672 673static arena_chunk_t * 674arena_chunk_init_hard(tsd_t *tsd, arena_t *arena) 675{ 676 arena_chunk_t *chunk; 677 bool zero, commit; 678 size_t flag_unzeroed, flag_decommitted, i; 679 680 assert(arena->spare == NULL); 681 682 zero = false; 683 commit = false; 684 chunk = arena_chunk_alloc_internal(tsd, arena, &zero, &commit); 685 if (chunk == NULL) 686 return (NULL); 687 688 /* 689 * Initialize the map to contain one maximal free untouched run. Mark 690 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed 691 * or decommitted chunk. 692 */ 693 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; 694 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; 695 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, 696 flag_unzeroed | flag_decommitted); 697 /* 698 * There is no need to initialize the internal page map entries unless 699 * the chunk is not zeroed. 700 */ 701 if (!zero) { 702 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 703 (void *)arena_bitselm_get_const(chunk, map_bias+1), 704 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 705 chunk_npages-1) - 706 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 707 for (i = map_bias+1; i < chunk_npages-1; i++) 708 arena_mapbits_internal_set(chunk, i, flag_unzeroed); 709 } else { 710 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 711 *)arena_bitselm_get_const(chunk, map_bias+1), 712 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 713 chunk_npages-1) - 714 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 715 if (config_debug) { 716 for (i = map_bias+1; i < chunk_npages-1; i++) { 717 assert(arena_mapbits_unzeroed_get(chunk, i) == 718 flag_unzeroed); 719 } 720 } 721 } 722 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 723 flag_unzeroed); 724 725 return (chunk); 726} 727 728static arena_chunk_t * 729arena_chunk_alloc(tsd_t *tsd, arena_t *arena) 730{ 731 arena_chunk_t *chunk; 732 733 if (arena->spare != NULL) 734 chunk = arena_chunk_init_spare(arena); 735 else { 736 chunk = arena_chunk_init_hard(tsd, arena); 737 if (chunk == NULL) 738 return (NULL); 739 } 740 741 ql_elm_new(&chunk->node, ql_link); 742 ql_tail_insert(&arena->achunks, &chunk->node, ql_link); 743 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 744 745 return (chunk); 746} 747 748static void 749arena_chunk_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) 750{ 751 bool committed; 752 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 753 754 chunk_deregister(chunk, &chunk->node); 755 756 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); 757 if (!committed) { 758 /* 759 * Decommit the header. Mark the chunk as decommitted even if 760 * header decommit fails, since treating a partially committed 761 * chunk as committed has a high potential for causing later 762 * access of decommitted memory. 763 */ 764 chunk_hooks = chunk_hooks_get(tsd, arena); 765 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, 766 arena->ind); 767 } 768 769 chunk_dalloc_cache(tsd, arena, &chunk_hooks, (void *)chunk, chunksize, 770 committed); 771 772 if (config_stats) { 773 arena->stats.mapped -= chunksize; 774 arena->stats.metadata_mapped -= (map_bias << LG_PAGE); 775 } 776} 777 778static void 779arena_spare_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *spare) 780{ 781 782 assert(arena->spare != spare); 783 784 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 785 arena_run_dirty_remove(arena, spare, map_bias, 786 chunk_npages-map_bias); 787 } 788 789 arena_chunk_discard(tsd, arena, spare); 790} 791 792static void 793arena_chunk_dalloc(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) 794{ 795 arena_chunk_t *spare; 796 797 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 798 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 799 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 800 arena_maxrun); 801 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 802 arena_maxrun); 803 assert(arena_mapbits_dirty_get(chunk, map_bias) == 804 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 805 assert(arena_mapbits_decommitted_get(chunk, map_bias) == 806 arena_mapbits_decommitted_get(chunk, chunk_npages-1)); 807 808 /* Remove run from runs_avail, so that the arena does not use it. */ 809 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 810 811 ql_remove(&arena->achunks, &chunk->node, ql_link); 812 spare = arena->spare; 813 arena->spare = chunk; 814 if (spare != NULL) 815 arena_spare_discard(tsd, arena, spare); 816} 817 818static void 819arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 820{ 821 szind_t index = size2index(usize) - nlclasses - NBINS; 822 823 cassert(config_stats); 824 825 arena->stats.nmalloc_huge++; 826 arena->stats.allocated_huge += usize; 827 arena->stats.hstats[index].nmalloc++; 828 arena->stats.hstats[index].curhchunks++; 829} 830 831static void 832arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 833{ 834 szind_t index = size2index(usize) - nlclasses - NBINS; 835 836 cassert(config_stats); 837 838 arena->stats.nmalloc_huge--; 839 arena->stats.allocated_huge -= usize; 840 arena->stats.hstats[index].nmalloc--; 841 arena->stats.hstats[index].curhchunks--; 842} 843 844static void 845arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 846{ 847 szind_t index = size2index(usize) - nlclasses - NBINS; 848 849 cassert(config_stats); 850 851 arena->stats.ndalloc_huge++; 852 arena->stats.allocated_huge -= usize; 853 arena->stats.hstats[index].ndalloc++; 854 arena->stats.hstats[index].curhchunks--; 855} 856 857static void 858arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) 859{ 860 szind_t index = size2index(usize) - nlclasses - NBINS; 861 862 cassert(config_stats); 863 864 arena->stats.ndalloc_huge++; 865 arena->stats.hstats[index].ndalloc--; 866} 867 868static void 869arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 870{ 871 szind_t index = size2index(usize) - nlclasses - NBINS; 872 873 cassert(config_stats); 874 875 arena->stats.ndalloc_huge--; 876 arena->stats.allocated_huge += usize; 877 arena->stats.hstats[index].ndalloc--; 878 arena->stats.hstats[index].curhchunks++; 879} 880 881static void 882arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 883{ 884 885 arena_huge_dalloc_stats_update(arena, oldsize); 886 arena_huge_malloc_stats_update(arena, usize); 887} 888 889static void 890arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 891 size_t usize) 892{ 893 894 arena_huge_dalloc_stats_update_undo(arena, oldsize); 895 arena_huge_malloc_stats_update_undo(arena, usize); 896} 897 898extent_node_t * 899arena_node_alloc(tsd_t *tsd, arena_t *arena) 900{ 901 extent_node_t *node; 902 903 malloc_mutex_lock(tsd, &arena->node_cache_mtx); 904 node = ql_last(&arena->node_cache, ql_link); 905 if (node == NULL) { 906 malloc_mutex_unlock(tsd, &arena->node_cache_mtx); 907 return (base_alloc(tsd, sizeof(extent_node_t))); 908 } 909 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); 910 malloc_mutex_unlock(tsd, &arena->node_cache_mtx); 911 return (node); 912} 913 914void 915arena_node_dalloc(tsd_t *tsd, arena_t *arena, extent_node_t *node) 916{ 917 918 malloc_mutex_lock(tsd, &arena->node_cache_mtx); 919 ql_elm_new(node, ql_link); 920 ql_tail_insert(&arena->node_cache, node, ql_link); 921 malloc_mutex_unlock(tsd, &arena->node_cache_mtx); 922} 923 924static void * 925arena_chunk_alloc_huge_hard(tsd_t *tsd, arena_t *arena, 926 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, 927 size_t csize) 928{ 929 void *ret; 930 bool commit = true; 931 932 ret = chunk_alloc_wrapper(tsd, arena, chunk_hooks, NULL, csize, 933 alignment, zero, &commit); 934 if (ret == NULL) { 935 /* Revert optimistic stats updates. */ 936 malloc_mutex_lock(tsd, &arena->lock); 937 if (config_stats) { 938 arena_huge_malloc_stats_update_undo(arena, usize); 939 arena->stats.mapped -= usize; 940 } 941 arena_nactive_sub(arena, usize >> LG_PAGE); 942 malloc_mutex_unlock(tsd, &arena->lock); 943 } 944 945 return (ret); 946} 947 948void * 949arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize, 950 size_t alignment, bool *zero) 951{ 952 void *ret; 953 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 954 size_t csize = CHUNK_CEILING(usize); 955 956 malloc_mutex_lock(tsd, &arena->lock); 957 958 /* Optimistically update stats. */ 959 if (config_stats) { 960 arena_huge_malloc_stats_update(arena, usize); 961 arena->stats.mapped += usize; 962 } 963 arena_nactive_add(arena, usize >> LG_PAGE); 964 965 ret = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, csize, 966 alignment, zero, true); 967 malloc_mutex_unlock(tsd, &arena->lock); 968 if (ret == NULL) { 969 ret = arena_chunk_alloc_huge_hard(tsd, arena, &chunk_hooks, 970 usize, alignment, zero, csize); 971 } 972 973 return (ret); 974} 975 976void 977arena_chunk_dalloc_huge(tsd_t *tsd, arena_t *arena, void *chunk, size_t usize) 978{ 979 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 980 size_t csize; 981 982 csize = CHUNK_CEILING(usize); 983 malloc_mutex_lock(tsd, &arena->lock); 984 if (config_stats) { 985 arena_huge_dalloc_stats_update(arena, usize); 986 arena->stats.mapped -= usize; 987 } 988 arena_nactive_sub(arena, usize >> LG_PAGE); 989 990 chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk, csize, true); 991 malloc_mutex_unlock(tsd, &arena->lock); 992} 993 994void 995arena_chunk_ralloc_huge_similar(tsd_t *tsd, arena_t *arena, void *chunk, 996 size_t oldsize, size_t usize) 997{ 998 999 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 1000 assert(oldsize != usize); 1001 1002 malloc_mutex_lock(tsd, &arena->lock); 1003 if (config_stats) 1004 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1005 if (oldsize < usize) 1006 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); 1007 else 1008 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); 1009 malloc_mutex_unlock(tsd, &arena->lock); 1010} 1011 1012void 1013arena_chunk_ralloc_huge_shrink(tsd_t *tsd, arena_t *arena, void *chunk, 1014 size_t oldsize, size_t usize) 1015{ 1016 size_t udiff = oldsize - usize; 1017 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 1018 1019 malloc_mutex_lock(tsd, &arena->lock); 1020 if (config_stats) { 1021 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1022 if (cdiff != 0) 1023 arena->stats.mapped -= cdiff; 1024 } 1025 arena_nactive_sub(arena, udiff >> LG_PAGE); 1026 1027 if (cdiff != 0) { 1028 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 1029 void *nchunk = (void *)((uintptr_t)chunk + 1030 CHUNK_CEILING(usize)); 1031 1032 chunk_dalloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff, 1033 true); 1034 } 1035 malloc_mutex_unlock(tsd, &arena->lock); 1036} 1037 1038static bool 1039arena_chunk_ralloc_huge_expand_hard(tsd_t *tsd, arena_t *arena, 1040 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, 1041 bool *zero, void *nchunk, size_t udiff, size_t cdiff) 1042{ 1043 bool err; 1044 bool commit = true; 1045 1046 err = (chunk_alloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff, 1047 chunksize, zero, &commit) == NULL); 1048 if (err) { 1049 /* Revert optimistic stats updates. */ 1050 malloc_mutex_lock(tsd, &arena->lock); 1051 if (config_stats) { 1052 arena_huge_ralloc_stats_update_undo(arena, oldsize, 1053 usize); 1054 arena->stats.mapped -= cdiff; 1055 } 1056 arena_nactive_sub(arena, udiff >> LG_PAGE); 1057 malloc_mutex_unlock(tsd, &arena->lock); 1058 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1059 cdiff, true, arena->ind)) { 1060 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff, 1061 *zero, true); 1062 err = true; 1063 } 1064 return (err); 1065} 1066 1067bool 1068arena_chunk_ralloc_huge_expand(tsd_t *tsd, arena_t *arena, void *chunk, 1069 size_t oldsize, size_t usize, bool *zero) 1070{ 1071 bool err; 1072 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena); 1073 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); 1074 size_t udiff = usize - oldsize; 1075 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 1076 1077 malloc_mutex_lock(tsd, &arena->lock); 1078 1079 /* Optimistically update stats. */ 1080 if (config_stats) { 1081 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1082 arena->stats.mapped += cdiff; 1083 } 1084 arena_nactive_add(arena, udiff >> LG_PAGE); 1085 1086 err = (chunk_alloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff, 1087 chunksize, zero, true) == NULL); 1088 malloc_mutex_unlock(tsd, &arena->lock); 1089 if (err) { 1090 err = arena_chunk_ralloc_huge_expand_hard(tsd, arena, 1091 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, 1092 cdiff); 1093 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1094 cdiff, true, arena->ind)) { 1095 chunk_dalloc_wrapper(tsd, arena, &chunk_hooks, nchunk, cdiff, 1096 *zero, true); 1097 err = true; 1098 } 1099 1100 return (err); 1101} 1102 1103/* 1104 * Do first-best-fit run selection, i.e. select the lowest run that best fits. 1105 * Run sizes are indexed, so not all candidate runs are necessarily exactly the 1106 * same size. 1107 */ 1108static arena_run_t * 1109arena_run_first_best_fit(arena_t *arena, size_t size) 1110{ 1111 szind_t ind, i; 1112 1113 ind = size2index(run_quantize_ceil(size)); 1114 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) { 1115 arena_chunk_map_misc_t *miscelm = arena_run_heap_first( 1116 arena_runs_avail_get(arena, i)); 1117 if (miscelm != NULL) 1118 return (&miscelm->run); 1119 } 1120 1121 return (NULL); 1122} 1123 1124static arena_run_t * 1125arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 1126{ 1127 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); 1128 if (run != NULL) { 1129 if (arena_run_split_large(arena, run, size, zero)) 1130 run = NULL; 1131 } 1132 return (run); 1133} 1134 1135static arena_run_t * 1136arena_run_alloc_large(tsd_t *tsd, arena_t *arena, size_t size, bool zero) 1137{ 1138 arena_chunk_t *chunk; 1139 arena_run_t *run; 1140 1141 assert(size <= arena_maxrun); 1142 assert(size == PAGE_CEILING(size)); 1143 1144 /* Search the arena's chunks for the lowest best fit. */ 1145 run = arena_run_alloc_large_helper(arena, size, zero); 1146 if (run != NULL) 1147 return (run); 1148 1149 /* 1150 * No usable runs. Create a new chunk from which to allocate the run. 1151 */ 1152 chunk = arena_chunk_alloc(tsd, arena); 1153 if (chunk != NULL) { 1154 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1155 if (arena_run_split_large(arena, run, size, zero)) 1156 run = NULL; 1157 return (run); 1158 } 1159 1160 /* 1161 * arena_chunk_alloc() failed, but another thread may have made 1162 * sufficient memory available while this one dropped arena->lock in 1163 * arena_chunk_alloc(), so search one more time. 1164 */ 1165 return (arena_run_alloc_large_helper(arena, size, zero)); 1166} 1167 1168static arena_run_t * 1169arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) 1170{ 1171 arena_run_t *run = arena_run_first_best_fit(arena, size); 1172 if (run != NULL) { 1173 if (arena_run_split_small(arena, run, size, binind)) 1174 run = NULL; 1175 } 1176 return (run); 1177} 1178 1179static arena_run_t * 1180arena_run_alloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind) 1181{ 1182 arena_chunk_t *chunk; 1183 arena_run_t *run; 1184 1185 assert(size <= arena_maxrun); 1186 assert(size == PAGE_CEILING(size)); 1187 assert(binind != BININD_INVALID); 1188 1189 /* Search the arena's chunks for the lowest best fit. */ 1190 run = arena_run_alloc_small_helper(arena, size, binind); 1191 if (run != NULL) 1192 return (run); 1193 1194 /* 1195 * No usable runs. Create a new chunk from which to allocate the run. 1196 */ 1197 chunk = arena_chunk_alloc(tsd, arena); 1198 if (chunk != NULL) { 1199 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1200 if (arena_run_split_small(arena, run, size, binind)) 1201 run = NULL; 1202 return (run); 1203 } 1204 1205 /* 1206 * arena_chunk_alloc() failed, but another thread may have made 1207 * sufficient memory available while this one dropped arena->lock in 1208 * arena_chunk_alloc(), so search one more time. 1209 */ 1210 return (arena_run_alloc_small_helper(arena, size, binind)); 1211} 1212 1213static bool 1214arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) 1215{ 1216 1217 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) 1218 << 3)); 1219} 1220 1221ssize_t 1222arena_lg_dirty_mult_get(tsd_t *tsd, arena_t *arena) 1223{ 1224 ssize_t lg_dirty_mult; 1225 1226 malloc_mutex_lock(tsd, &arena->lock); 1227 lg_dirty_mult = arena->lg_dirty_mult; 1228 malloc_mutex_unlock(tsd, &arena->lock); 1229 1230 return (lg_dirty_mult); 1231} 1232 1233bool 1234arena_lg_dirty_mult_set(tsd_t *tsd, arena_t *arena, ssize_t lg_dirty_mult) 1235{ 1236 1237 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 1238 return (true); 1239 1240 malloc_mutex_lock(tsd, &arena->lock); 1241 arena->lg_dirty_mult = lg_dirty_mult; 1242 arena_maybe_purge(tsd, arena); 1243 malloc_mutex_unlock(tsd, &arena->lock); 1244 1245 return (false); 1246} 1247 1248static void 1249arena_decay_deadline_init(arena_t *arena) 1250{ 1251 1252 assert(opt_purge == purge_mode_decay); 1253 1254 /* 1255 * Generate a new deadline that is uniformly random within the next 1256 * epoch after the current one. 1257 */ 1258 nstime_copy(&arena->decay_deadline, &arena->decay_epoch); 1259 nstime_add(&arena->decay_deadline, &arena->decay_interval); 1260 if (arena->decay_time > 0) { 1261 nstime_t jitter; 1262 1263 nstime_init(&jitter, prng_range(&arena->decay_jitter_state, 1264 nstime_ns(&arena->decay_interval))); 1265 nstime_add(&arena->decay_deadline, &jitter); 1266 } 1267} 1268 1269static bool 1270arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) 1271{ 1272 1273 assert(opt_purge == purge_mode_decay); 1274 1275 return (nstime_compare(&arena->decay_deadline, time) <= 0); 1276} 1277 1278static size_t 1279arena_decay_backlog_npages_limit(const arena_t *arena) 1280{ 1281 static const uint64_t h_steps[] = { 1282#define STEP(step, h, x, y) \ 1283 h, 1284 SMOOTHSTEP 1285#undef STEP 1286 }; 1287 uint64_t sum; 1288 size_t npages_limit_backlog; 1289 unsigned i; 1290 1291 assert(opt_purge == purge_mode_decay); 1292 1293 /* 1294 * For each element of decay_backlog, multiply by the corresponding 1295 * fixed-point smoothstep decay factor. Sum the products, then divide 1296 * to round down to the nearest whole number of pages. 1297 */ 1298 sum = 0; 1299 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) 1300 sum += arena->decay_backlog[i] * h_steps[i]; 1301 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 1302 1303 return (npages_limit_backlog); 1304} 1305 1306static void 1307arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) 1308{ 1309 uint64_t nadvance_u64; 1310 nstime_t delta; 1311 size_t ndirty_delta; 1312 1313 assert(opt_purge == purge_mode_decay); 1314 assert(arena_decay_deadline_reached(arena, time)); 1315 1316 nstime_copy(&delta, time); 1317 nstime_subtract(&delta, &arena->decay_epoch); 1318 nadvance_u64 = nstime_divide(&delta, &arena->decay_interval); 1319 assert(nadvance_u64 > 0); 1320 1321 /* Add nadvance_u64 decay intervals to epoch. */ 1322 nstime_copy(&delta, &arena->decay_interval); 1323 nstime_imultiply(&delta, nadvance_u64); 1324 nstime_add(&arena->decay_epoch, &delta); 1325 1326 /* Set a new deadline. */ 1327 arena_decay_deadline_init(arena); 1328 1329 /* Update the backlog. */ 1330 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 1331 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 1332 sizeof(size_t)); 1333 } else { 1334 size_t nadvance_z = (size_t)nadvance_u64; 1335 1336 assert((uint64_t)nadvance_z == nadvance_u64); 1337 1338 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z], 1339 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 1340 if (nadvance_z > 1) { 1341 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS - 1342 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 1343 } 1344 } 1345 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty - 1346 arena->decay_ndirty : 0; 1347 arena->decay_ndirty = arena->ndirty; 1348 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; 1349 arena->decay_backlog_npages_limit = 1350 arena_decay_backlog_npages_limit(arena); 1351} 1352 1353static size_t 1354arena_decay_npages_limit(arena_t *arena) 1355{ 1356 size_t npages_limit; 1357 1358 assert(opt_purge == purge_mode_decay); 1359 1360 npages_limit = arena->decay_backlog_npages_limit; 1361 1362 /* Add in any dirty pages created during the current epoch. */ 1363 if (arena->ndirty > arena->decay_ndirty) 1364 npages_limit += arena->ndirty - arena->decay_ndirty; 1365 1366 return (npages_limit); 1367} 1368 1369static void 1370arena_decay_init(arena_t *arena, ssize_t decay_time) 1371{ 1372 1373 arena->decay_time = decay_time; 1374 if (decay_time > 0) { 1375 nstime_init2(&arena->decay_interval, decay_time, 0); 1376 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS); 1377 } 1378 1379 nstime_init(&arena->decay_epoch, 0); 1380 nstime_update(&arena->decay_epoch); 1381 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena; 1382 arena_decay_deadline_init(arena); 1383 arena->decay_ndirty = arena->ndirty; 1384 arena->decay_backlog_npages_limit = 0; 1385 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 1386} 1387 1388static bool 1389arena_decay_time_valid(ssize_t decay_time) 1390{ 1391 1392 if (decay_time < -1) 1393 return (false); 1394 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) 1395 return (true); 1396 return (false); 1397} 1398 1399ssize_t 1400arena_decay_time_get(tsd_t *tsd, arena_t *arena) 1401{ 1402 ssize_t decay_time; 1403 1404 malloc_mutex_lock(tsd, &arena->lock); 1405 decay_time = arena->decay_time; 1406 malloc_mutex_unlock(tsd, &arena->lock); 1407 1408 return (decay_time); 1409} 1410 1411bool 1412arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time) 1413{ 1414 1415 if (!arena_decay_time_valid(decay_time)) 1416 return (true); 1417 1418 malloc_mutex_lock(tsd, &arena->lock); 1419 /* 1420 * Restart decay backlog from scratch, which may cause many dirty pages 1421 * to be immediately purged. It would conceptually be possible to map 1422 * the old backlog onto the new backlog, but there is no justification 1423 * for such complexity since decay_time changes are intended to be 1424 * infrequent, either between the {-1, 0, >0} states, or a one-time 1425 * arbitrary change during initial arena configuration. 1426 */ 1427 arena_decay_init(arena, decay_time); 1428 arena_maybe_purge(tsd, arena); 1429 malloc_mutex_unlock(tsd, &arena->lock); 1430 1431 return (false); 1432} 1433 1434static void 1435arena_maybe_purge_ratio(tsd_t *tsd, arena_t *arena) 1436{ 1437 1438 assert(opt_purge == purge_mode_ratio); 1439 1440 /* Don't purge if the option is disabled. */ 1441 if (arena->lg_dirty_mult < 0) 1442 return; 1443 1444 /* 1445 * Iterate, since preventing recursive purging could otherwise leave too 1446 * many dirty pages. 1447 */ 1448 while (true) { 1449 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1450 if (threshold < chunk_npages) 1451 threshold = chunk_npages; 1452 /* 1453 * Don't purge unless the number of purgeable pages exceeds the 1454 * threshold. 1455 */ 1456 if (arena->ndirty <= threshold) 1457 return; 1458 arena_purge_to_limit(tsd, arena, threshold); 1459 } 1460} 1461 1462static void 1463arena_maybe_purge_decay(tsd_t *tsd, arena_t *arena) 1464{ 1465 nstime_t time; 1466 size_t ndirty_limit; 1467 1468 assert(opt_purge == purge_mode_decay); 1469 1470 /* Purge all or nothing if the option is disabled. */ 1471 if (arena->decay_time <= 0) { 1472 if (arena->decay_time == 0) 1473 arena_purge_to_limit(tsd, arena, 0); 1474 return; 1475 } 1476 1477 nstime_copy(&time, &arena->decay_epoch); 1478 if (unlikely(nstime_update(&time))) { 1479 /* Time went backwards. Force an epoch advance. */ 1480 nstime_copy(&time, &arena->decay_deadline); 1481 } 1482 1483 if (arena_decay_deadline_reached(arena, &time)) 1484 arena_decay_epoch_advance(arena, &time); 1485 1486 ndirty_limit = arena_decay_npages_limit(arena); 1487 1488 /* 1489 * Don't try to purge unless the number of purgeable pages exceeds the 1490 * current limit. 1491 */ 1492 if (arena->ndirty <= ndirty_limit) 1493 return; 1494 arena_purge_to_limit(tsd, arena, ndirty_limit); 1495} 1496 1497void 1498arena_maybe_purge(tsd_t *tsd, arena_t *arena) 1499{ 1500 1501 /* Don't recursively purge. */ 1502 if (arena->purging) 1503 return; 1504 1505 if (opt_purge == purge_mode_ratio) 1506 arena_maybe_purge_ratio(tsd, arena); 1507 else 1508 arena_maybe_purge_decay(tsd, arena); 1509} 1510 1511static size_t 1512arena_dirty_count(arena_t *arena) 1513{ 1514 size_t ndirty = 0; 1515 arena_runs_dirty_link_t *rdelm; 1516 extent_node_t *chunkselm; 1517 1518 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1519 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1520 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { 1521 size_t npages; 1522 1523 if (rdelm == &chunkselm->rd) { 1524 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1525 chunkselm = qr_next(chunkselm, cc_link); 1526 } else { 1527 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 1528 rdelm); 1529 arena_chunk_map_misc_t *miscelm = 1530 arena_rd_to_miscelm(rdelm); 1531 size_t pageind = arena_miscelm_to_pageind(miscelm); 1532 assert(arena_mapbits_allocated_get(chunk, pageind) == 1533 0); 1534 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1535 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 1536 npages = arena_mapbits_unallocated_size_get(chunk, 1537 pageind) >> LG_PAGE; 1538 } 1539 ndirty += npages; 1540 } 1541 1542 return (ndirty); 1543} 1544 1545static size_t 1546arena_stash_dirty(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, 1547 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, 1548 extent_node_t *purge_chunks_sentinel) 1549{ 1550 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1551 extent_node_t *chunkselm; 1552 size_t nstashed = 0; 1553 1554 /* Stash runs/chunks according to ndirty_limit. */ 1555 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1556 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1557 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1558 size_t npages; 1559 rdelm_next = qr_next(rdelm, rd_link); 1560 1561 if (rdelm == &chunkselm->rd) { 1562 extent_node_t *chunkselm_next; 1563 bool zero; 1564 UNUSED void *chunk; 1565 1566 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1567 if (opt_purge == purge_mode_decay && arena->ndirty - 1568 (nstashed + npages) < ndirty_limit) 1569 break; 1570 1571 chunkselm_next = qr_next(chunkselm, cc_link); 1572 /* 1573 * Allocate. chunkselm remains valid due to the 1574 * dalloc_node=false argument to chunk_alloc_cache(). 1575 */ 1576 zero = false; 1577 chunk = chunk_alloc_cache(tsd, arena, chunk_hooks, 1578 extent_node_addr_get(chunkselm), 1579 extent_node_size_get(chunkselm), chunksize, &zero, 1580 false); 1581 assert(chunk == extent_node_addr_get(chunkselm)); 1582 assert(zero == extent_node_zeroed_get(chunkselm)); 1583 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1584 purge_chunks_sentinel); 1585 assert(npages == (extent_node_size_get(chunkselm) >> 1586 LG_PAGE)); 1587 chunkselm = chunkselm_next; 1588 } else { 1589 arena_chunk_t *chunk = 1590 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1591 arena_chunk_map_misc_t *miscelm = 1592 arena_rd_to_miscelm(rdelm); 1593 size_t pageind = arena_miscelm_to_pageind(miscelm); 1594 arena_run_t *run = &miscelm->run; 1595 size_t run_size = 1596 arena_mapbits_unallocated_size_get(chunk, pageind); 1597 1598 npages = run_size >> LG_PAGE; 1599 if (opt_purge == purge_mode_decay && arena->ndirty - 1600 (nstashed + npages) < ndirty_limit) 1601 break; 1602 1603 assert(pageind + npages <= chunk_npages); 1604 assert(arena_mapbits_dirty_get(chunk, pageind) == 1605 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1606 1607 /* 1608 * If purging the spare chunk's run, make it available 1609 * prior to allocation. 1610 */ 1611 if (chunk == arena->spare) 1612 arena_chunk_alloc(tsd, arena); 1613 1614 /* Temporarily allocate the free dirty run. */ 1615 arena_run_split_large(arena, run, run_size, false); 1616 /* Stash. */ 1617 if (false) 1618 qr_new(rdelm, rd_link); /* Redundant. */ 1619 else { 1620 assert(qr_next(rdelm, rd_link) == rdelm); 1621 assert(qr_prev(rdelm, rd_link) == rdelm); 1622 } 1623 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1624 } 1625 1626 nstashed += npages; 1627 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= 1628 ndirty_limit) 1629 break; 1630 } 1631 1632 return (nstashed); 1633} 1634 1635static size_t 1636arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, 1637 arena_runs_dirty_link_t *purge_runs_sentinel, 1638 extent_node_t *purge_chunks_sentinel) 1639{ 1640 size_t npurged, nmadvise; 1641 arena_runs_dirty_link_t *rdelm; 1642 extent_node_t *chunkselm; 1643 1644 if (config_stats) 1645 nmadvise = 0; 1646 npurged = 0; 1647 1648 malloc_mutex_unlock(tsd, &arena->lock); 1649 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1650 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1651 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { 1652 size_t npages; 1653 1654 if (rdelm == &chunkselm->rd) { 1655 /* 1656 * Don't actually purge the chunk here because 1) 1657 * chunkselm is embedded in the chunk and must remain 1658 * valid, and 2) we deallocate the chunk in 1659 * arena_unstash_purged(), where it is destroyed, 1660 * decommitted, or purged, depending on chunk 1661 * deallocation policy. 1662 */ 1663 size_t size = extent_node_size_get(chunkselm); 1664 npages = size >> LG_PAGE; 1665 chunkselm = qr_next(chunkselm, cc_link); 1666 } else { 1667 size_t pageind, run_size, flag_unzeroed, flags, i; 1668 bool decommitted; 1669 arena_chunk_t *chunk = 1670 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1671 arena_chunk_map_misc_t *miscelm = 1672 arena_rd_to_miscelm(rdelm); 1673 pageind = arena_miscelm_to_pageind(miscelm); 1674 run_size = arena_mapbits_large_size_get(chunk, pageind); 1675 npages = run_size >> LG_PAGE; 1676 1677 assert(pageind + npages <= chunk_npages); 1678 assert(!arena_mapbits_decommitted_get(chunk, pageind)); 1679 assert(!arena_mapbits_decommitted_get(chunk, 1680 pageind+npages-1)); 1681 decommitted = !chunk_hooks->decommit(chunk, chunksize, 1682 pageind << LG_PAGE, npages << LG_PAGE, arena->ind); 1683 if (decommitted) { 1684 flag_unzeroed = 0; 1685 flags = CHUNK_MAP_DECOMMITTED; 1686 } else { 1687 flag_unzeroed = chunk_purge_wrapper(tsd, arena, 1688 chunk_hooks, chunk, chunksize, pageind << 1689 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; 1690 flags = flag_unzeroed; 1691 } 1692 arena_mapbits_large_set(chunk, pageind+npages-1, 0, 1693 flags); 1694 arena_mapbits_large_set(chunk, pageind, run_size, 1695 flags); 1696 1697 /* 1698 * Set the unzeroed flag for internal pages, now that 1699 * chunk_purge_wrapper() has returned whether the pages 1700 * were zeroed as a side effect of purging. This chunk 1701 * map modification is safe even though the arena mutex 1702 * isn't currently owned by this thread, because the run 1703 * is marked as allocated, thus protecting it from being 1704 * modified by any other thread. As long as these 1705 * writes don't perturb the first and last elements' 1706 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1707 */ 1708 for (i = 1; i < npages-1; i++) { 1709 arena_mapbits_internal_set(chunk, pageind+i, 1710 flag_unzeroed); 1711 } 1712 } 1713 1714 npurged += npages; 1715 if (config_stats) 1716 nmadvise++; 1717 } 1718 malloc_mutex_lock(tsd, &arena->lock); 1719 1720 if (config_stats) { 1721 arena->stats.nmadvise += nmadvise; 1722 arena->stats.purged += npurged; 1723 } 1724 1725 return (npurged); 1726} 1727 1728static void 1729arena_unstash_purged(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, 1730 arena_runs_dirty_link_t *purge_runs_sentinel, 1731 extent_node_t *purge_chunks_sentinel) 1732{ 1733 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1734 extent_node_t *chunkselm; 1735 1736 /* Deallocate chunks/runs. */ 1737 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1738 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1739 rdelm != purge_runs_sentinel; rdelm = rdelm_next) { 1740 rdelm_next = qr_next(rdelm, rd_link); 1741 if (rdelm == &chunkselm->rd) { 1742 extent_node_t *chunkselm_next = qr_next(chunkselm, 1743 cc_link); 1744 void *addr = extent_node_addr_get(chunkselm); 1745 size_t size = extent_node_size_get(chunkselm); 1746 bool zeroed = extent_node_zeroed_get(chunkselm); 1747 bool committed = extent_node_committed_get(chunkselm); 1748 extent_node_dirty_remove(chunkselm); 1749 arena_node_dalloc(tsd, arena, chunkselm); 1750 chunkselm = chunkselm_next; 1751 chunk_dalloc_wrapper(tsd, arena, chunk_hooks, addr, 1752 size, zeroed, committed); 1753 } else { 1754 arena_chunk_t *chunk = 1755 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1756 arena_chunk_map_misc_t *miscelm = 1757 arena_rd_to_miscelm(rdelm); 1758 size_t pageind = arena_miscelm_to_pageind(miscelm); 1759 bool decommitted = (arena_mapbits_decommitted_get(chunk, 1760 pageind) != 0); 1761 arena_run_t *run = &miscelm->run; 1762 qr_remove(rdelm, rd_link); 1763 arena_run_dalloc(tsd, arena, run, false, true, 1764 decommitted); 1765 } 1766 } 1767} 1768 1769/* 1770 * NB: ndirty_limit is interpreted differently depending on opt_purge: 1771 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the 1772 * desired state: 1773 * (arena->ndirty <= ndirty_limit) 1774 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without 1775 * violating the invariant: 1776 * (arena->ndirty >= ndirty_limit) 1777 */ 1778static void 1779arena_purge_to_limit(tsd_t *tsd, arena_t *arena, size_t ndirty_limit) 1780{ 1781 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena); 1782 size_t npurge, npurged; 1783 arena_runs_dirty_link_t purge_runs_sentinel; 1784 extent_node_t purge_chunks_sentinel; 1785 1786 arena->purging = true; 1787 1788 /* 1789 * Calls to arena_dirty_count() are disabled even for debug builds 1790 * because overhead grows nonlinearly as memory usage increases. 1791 */ 1792 if (false && config_debug) { 1793 size_t ndirty = arena_dirty_count(arena); 1794 assert(ndirty == arena->ndirty); 1795 } 1796 assert(opt_purge != purge_mode_ratio || (arena->nactive >> 1797 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); 1798 1799 qr_new(&purge_runs_sentinel, rd_link); 1800 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1801 1802 npurge = arena_stash_dirty(tsd, arena, &chunk_hooks, ndirty_limit, 1803 &purge_runs_sentinel, &purge_chunks_sentinel); 1804 if (npurge == 0) 1805 goto label_return; 1806 npurged = arena_purge_stashed(tsd, arena, &chunk_hooks, 1807 &purge_runs_sentinel, &purge_chunks_sentinel); 1808 assert(npurged == npurge); 1809 arena_unstash_purged(tsd, arena, &chunk_hooks, &purge_runs_sentinel, 1810 &purge_chunks_sentinel); 1811 1812 if (config_stats) 1813 arena->stats.npurge++; 1814 1815label_return: 1816 arena->purging = false; 1817} 1818 1819void 1820arena_purge(tsd_t *tsd, arena_t *arena, bool all) 1821{ 1822 1823 malloc_mutex_lock(tsd, &arena->lock); 1824 if (all) 1825 arena_purge_to_limit(tsd, arena, 0); 1826 else 1827 arena_maybe_purge(tsd, arena); 1828 malloc_mutex_unlock(tsd, &arena->lock); 1829} 1830 1831static void 1832arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) 1833{ 1834 size_t pageind, npages; 1835 1836 cassert(config_prof); 1837 assert(opt_prof); 1838 1839 /* 1840 * Iterate over the allocated runs and remove profiled allocations from 1841 * the sample set. 1842 */ 1843 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { 1844 if (arena_mapbits_allocated_get(chunk, pageind) != 0) { 1845 if (arena_mapbits_large_get(chunk, pageind) != 0) { 1846 void *ptr = (void *)((uintptr_t)chunk + (pageind 1847 << LG_PAGE)); 1848 size_t usize = isalloc(tsd, ptr, config_prof); 1849 1850 prof_free(tsd, ptr, usize); 1851 npages = arena_mapbits_large_size_get(chunk, 1852 pageind) >> LG_PAGE; 1853 } else { 1854 /* Skip small run. */ 1855 size_t binind = arena_mapbits_binind_get(chunk, 1856 pageind); 1857 arena_bin_info_t *bin_info = 1858 &arena_bin_info[binind]; 1859 npages = bin_info->run_size >> LG_PAGE; 1860 } 1861 } else { 1862 /* Skip unallocated run. */ 1863 npages = arena_mapbits_unallocated_size_get(chunk, 1864 pageind) >> LG_PAGE; 1865 } 1866 assert(pageind + npages <= chunk_npages); 1867 } 1868} 1869 1870void 1871arena_reset(tsd_t *tsd, arena_t *arena) 1872{ 1873 unsigned i; 1874 extent_node_t *node; 1875 1876 /* 1877 * Locking in this function is unintuitive. The caller guarantees that 1878 * no concurrent operations are happening in this arena, but there are 1879 * still reasons that some locking is necessary: 1880 * 1881 * - Some of the functions in the transitive closure of calls assume 1882 * appropriate locks are held, and in some cases these locks are 1883 * temporarily dropped to avoid lock order reversal or deadlock due to 1884 * reentry. 1885 * - mallctl("epoch", ...) may concurrently refresh stats. While 1886 * strictly speaking this is a "concurrent operation", disallowing 1887 * stats refreshes would impose an inconvenient burden. 1888 */ 1889 1890 /* Remove large allocations from prof sample set. */ 1891 if (config_prof && opt_prof) { 1892 ql_foreach(node, &arena->achunks, ql_link) { 1893 arena_achunk_prof_reset(tsd, arena, 1894 extent_node_addr_get(node)); 1895 } 1896 } 1897 1898 /* Reset curruns for large size classes. */ 1899 if (config_stats) { 1900 for (i = 0; i < nlclasses; i++) 1901 arena->stats.lstats[i].curruns = 0; 1902 } 1903 1904 /* Huge allocations. */ 1905 malloc_mutex_lock(tsd, &arena->huge_mtx); 1906 for (node = ql_last(&arena->huge, ql_link); node != NULL; node = 1907 ql_last(&arena->huge, ql_link)) { 1908 void *ptr = extent_node_addr_get(node); 1909 size_t usize; 1910 1911 malloc_mutex_unlock(tsd, &arena->huge_mtx); 1912 if (config_stats || (config_prof && opt_prof)) 1913 usize = isalloc(tsd, ptr, config_prof); 1914 /* Remove huge allocation from prof sample set. */ 1915 if (config_prof && opt_prof) 1916 prof_free(tsd, ptr, usize); 1917 huge_dalloc(tsd, ptr); 1918 malloc_mutex_lock(tsd, &arena->huge_mtx); 1919 /* Cancel out unwanted effects on stats. */ 1920 if (config_stats) 1921 arena_huge_reset_stats_cancel(arena, usize); 1922 } 1923 malloc_mutex_unlock(tsd, &arena->huge_mtx); 1924 1925 malloc_mutex_lock(tsd, &arena->lock); 1926 1927 /* Bins. */ 1928 for (i = 0; i < NBINS; i++) { 1929 arena_bin_t *bin = &arena->bins[i]; 1930 malloc_mutex_lock(tsd, &bin->lock); 1931 bin->runcur = NULL; 1932 arena_run_heap_new(&bin->runs); 1933 if (config_stats) { 1934 bin->stats.curregs = 0; 1935 bin->stats.curruns = 0; 1936 } 1937 malloc_mutex_unlock(tsd, &bin->lock); 1938 } 1939 1940 /* 1941 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty 1942 * chains directly correspond. 1943 */ 1944 qr_new(&arena->runs_dirty, rd_link); 1945 for (node = qr_next(&arena->chunks_cache, cc_link); 1946 node != &arena->chunks_cache; node = qr_next(node, cc_link)) { 1947 qr_new(&node->rd, rd_link); 1948 qr_meld(&arena->runs_dirty, &node->rd, rd_link); 1949 } 1950 1951 /* Arena chunks. */ 1952 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = 1953 ql_last(&arena->achunks, ql_link)) { 1954 ql_remove(&arena->achunks, node, ql_link); 1955 arena_chunk_discard(tsd, arena, extent_node_addr_get(node)); 1956 } 1957 1958 /* Spare. */ 1959 if (arena->spare != NULL) { 1960 arena_chunk_discard(tsd, arena, arena->spare); 1961 arena->spare = NULL; 1962 } 1963 1964 assert(!arena->purging); 1965 arena->nactive = 0; 1966 1967 for(i = 0; i < runs_avail_nclasses; i++) 1968 arena_run_heap_new(&arena->runs_avail[i]); 1969 1970 malloc_mutex_unlock(tsd, &arena->lock); 1971} 1972 1973static void 1974arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1975 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, 1976 size_t flag_decommitted) 1977{ 1978 size_t size = *p_size; 1979 size_t run_ind = *p_run_ind; 1980 size_t run_pages = *p_run_pages; 1981 1982 /* Try to coalesce forward. */ 1983 if (run_ind + run_pages < chunk_npages && 1984 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1985 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && 1986 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == 1987 flag_decommitted) { 1988 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1989 run_ind+run_pages); 1990 size_t nrun_pages = nrun_size >> LG_PAGE; 1991 1992 /* 1993 * Remove successor from runs_avail; the coalesced run is 1994 * inserted later. 1995 */ 1996 assert(arena_mapbits_unallocated_size_get(chunk, 1997 run_ind+run_pages+nrun_pages-1) == nrun_size); 1998 assert(arena_mapbits_dirty_get(chunk, 1999 run_ind+run_pages+nrun_pages-1) == flag_dirty); 2000 assert(arena_mapbits_decommitted_get(chunk, 2001 run_ind+run_pages+nrun_pages-1) == flag_decommitted); 2002 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 2003 2004 /* 2005 * If the successor is dirty, remove it from the set of dirty 2006 * pages. 2007 */ 2008 if (flag_dirty != 0) { 2009 arena_run_dirty_remove(arena, chunk, run_ind+run_pages, 2010 nrun_pages); 2011 } 2012 2013 size += nrun_size; 2014 run_pages += nrun_pages; 2015 2016 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 2017 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 2018 size); 2019 } 2020 2021 /* Try to coalesce backward. */ 2022 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 2023 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 2024 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == 2025 flag_decommitted) { 2026 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 2027 run_ind-1); 2028 size_t prun_pages = prun_size >> LG_PAGE; 2029 2030 run_ind -= prun_pages; 2031 2032 /* 2033 * Remove predecessor from runs_avail; the coalesced run is 2034 * inserted later. 2035 */ 2036 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 2037 prun_size); 2038 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 2039 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 2040 flag_decommitted); 2041 arena_avail_remove(arena, chunk, run_ind, prun_pages); 2042 2043 /* 2044 * If the predecessor is dirty, remove it from the set of dirty 2045 * pages. 2046 */ 2047 if (flag_dirty != 0) { 2048 arena_run_dirty_remove(arena, chunk, run_ind, 2049 prun_pages); 2050 } 2051 2052 size += prun_size; 2053 run_pages += prun_pages; 2054 2055 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 2056 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 2057 size); 2058 } 2059 2060 *p_size = size; 2061 *p_run_ind = run_ind; 2062 *p_run_pages = run_pages; 2063} 2064 2065static size_t 2066arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2067 size_t run_ind) 2068{ 2069 size_t size; 2070 2071 assert(run_ind >= map_bias); 2072 assert(run_ind < chunk_npages); 2073 2074 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 2075 size = arena_mapbits_large_size_get(chunk, run_ind); 2076 assert(size == PAGE || arena_mapbits_large_size_get(chunk, 2077 run_ind+(size>>LG_PAGE)-1) == 0); 2078 } else { 2079 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 2080 size = bin_info->run_size; 2081 } 2082 2083 return (size); 2084} 2085 2086static void 2087arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, bool dirty, 2088 bool cleaned, bool decommitted) 2089{ 2090 arena_chunk_t *chunk; 2091 arena_chunk_map_misc_t *miscelm; 2092 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; 2093 2094 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2095 miscelm = arena_run_to_miscelm(run); 2096 run_ind = arena_miscelm_to_pageind(miscelm); 2097 assert(run_ind >= map_bias); 2098 assert(run_ind < chunk_npages); 2099 size = arena_run_size_get(arena, chunk, run, run_ind); 2100 run_pages = (size >> LG_PAGE); 2101 arena_nactive_sub(arena, run_pages); 2102 2103 /* 2104 * The run is dirty if the caller claims to have dirtied it, as well as 2105 * if it was already dirty before being allocated and the caller 2106 * doesn't claim to have cleaned it. 2107 */ 2108 assert(arena_mapbits_dirty_get(chunk, run_ind) == 2109 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 2110 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) 2111 != 0) 2112 dirty = true; 2113 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 2114 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; 2115 2116 /* Mark pages as unallocated in the chunk map. */ 2117 if (dirty || decommitted) { 2118 size_t flags = flag_dirty | flag_decommitted; 2119 arena_mapbits_unallocated_set(chunk, run_ind, size, flags); 2120 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 2121 flags); 2122 } else { 2123 arena_mapbits_unallocated_set(chunk, run_ind, size, 2124 arena_mapbits_unzeroed_get(chunk, run_ind)); 2125 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 2126 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 2127 } 2128 2129 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, 2130 flag_dirty, flag_decommitted); 2131 2132 /* Insert into runs_avail, now that coalescing is complete. */ 2133 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 2134 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 2135 assert(arena_mapbits_dirty_get(chunk, run_ind) == 2136 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 2137 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 2138 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); 2139 arena_avail_insert(arena, chunk, run_ind, run_pages); 2140 2141 if (dirty) 2142 arena_run_dirty_insert(arena, chunk, run_ind, run_pages); 2143 2144 /* Deallocate chunk if it is now completely unused. */ 2145 if (size == arena_maxrun) { 2146 assert(run_ind == map_bias); 2147 assert(run_pages == (arena_maxrun >> LG_PAGE)); 2148 arena_chunk_dalloc(tsd, arena, chunk); 2149 } 2150 2151 /* 2152 * It is okay to do dirty page processing here even if the chunk was 2153 * deallocated above, since in that case it is the spare. Waiting 2154 * until after possible chunk deallocation to do dirty processing 2155 * allows for an old spare to be fully deallocated, thus decreasing the 2156 * chances of spuriously crossing the dirty page purging threshold. 2157 */ 2158 if (dirty) 2159 arena_maybe_purge(tsd, arena); 2160} 2161 2162static void 2163arena_run_trim_head(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2164 arena_run_t *run, size_t oldsize, size_t newsize) 2165{ 2166 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2167 size_t pageind = arena_miscelm_to_pageind(miscelm); 2168 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 2169 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2170 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2171 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2172 CHUNK_MAP_UNZEROED : 0; 2173 2174 assert(oldsize > newsize); 2175 2176 /* 2177 * Update the chunk map so that arena_run_dalloc() can treat the 2178 * leading run as separately allocated. Set the last element of each 2179 * run first, in case of single-page runs. 2180 */ 2181 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2182 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2183 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2184 pageind+head_npages-1))); 2185 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | 2186 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2187 2188 if (config_debug) { 2189 UNUSED size_t tail_npages = newsize >> LG_PAGE; 2190 assert(arena_mapbits_large_size_get(chunk, 2191 pageind+head_npages+tail_npages-1) == 0); 2192 assert(arena_mapbits_dirty_get(chunk, 2193 pageind+head_npages+tail_npages-1) == flag_dirty); 2194 } 2195 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 2196 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2197 pageind+head_npages))); 2198 2199 arena_run_dalloc(tsd, arena, run, false, false, (flag_decommitted != 2200 0)); 2201} 2202 2203static void 2204arena_run_trim_tail(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2205 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) 2206{ 2207 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2208 size_t pageind = arena_miscelm_to_pageind(miscelm); 2209 size_t head_npages = newsize >> LG_PAGE; 2210 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2211 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2212 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2213 CHUNK_MAP_UNZEROED : 0; 2214 arena_chunk_map_misc_t *tail_miscelm; 2215 arena_run_t *tail_run; 2216 2217 assert(oldsize > newsize); 2218 2219 /* 2220 * Update the chunk map so that arena_run_dalloc() can treat the 2221 * trailing run as separately allocated. Set the last element of each 2222 * run first, in case of single-page runs. 2223 */ 2224 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2225 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2226 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2227 pageind+head_npages-1))); 2228 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | 2229 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2230 2231 if (config_debug) { 2232 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 2233 assert(arena_mapbits_large_size_get(chunk, 2234 pageind+head_npages+tail_npages-1) == 0); 2235 assert(arena_mapbits_dirty_get(chunk, 2236 pageind+head_npages+tail_npages-1) == flag_dirty); 2237 } 2238 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 2239 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2240 pageind+head_npages))); 2241 2242 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); 2243 tail_run = &tail_miscelm->run; 2244 arena_run_dalloc(tsd, arena, tail_run, dirty, false, (flag_decommitted 2245 != 0)); 2246} 2247 2248static void 2249arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 2250{ 2251 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2252 2253 arena_run_heap_insert(&bin->runs, miscelm); 2254} 2255 2256static arena_run_t * 2257arena_bin_nonfull_run_tryget(arena_bin_t *bin) 2258{ 2259 arena_chunk_map_misc_t *miscelm; 2260 2261 miscelm = arena_run_heap_remove_first(&bin->runs); 2262 if (miscelm == NULL) 2263 return (NULL); 2264 if (config_stats) 2265 bin->stats.reruns++; 2266 2267 return (&miscelm->run); 2268} 2269 2270static arena_run_t * 2271arena_bin_nonfull_run_get(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) 2272{ 2273 arena_run_t *run; 2274 szind_t binind; 2275 arena_bin_info_t *bin_info; 2276 2277 /* Look for a usable run. */ 2278 run = arena_bin_nonfull_run_tryget(bin); 2279 if (run != NULL) 2280 return (run); 2281 /* No existing runs have any space available. */ 2282 2283 binind = arena_bin_index(arena, bin); 2284 bin_info = &arena_bin_info[binind]; 2285 2286 /* Allocate a new run. */ 2287 malloc_mutex_unlock(tsd, &bin->lock); 2288 /******************************/ 2289 malloc_mutex_lock(tsd, &arena->lock); 2290 run = arena_run_alloc_small(tsd, arena, bin_info->run_size, binind); 2291 if (run != NULL) { 2292 /* Initialize run internals. */ 2293 run->binind = binind; 2294 run->nfree = bin_info->nregs; 2295 bitmap_init(run->bitmap, &bin_info->bitmap_info); 2296 } 2297 malloc_mutex_unlock(tsd, &arena->lock); 2298 /********************************/ 2299 malloc_mutex_lock(tsd, &bin->lock); 2300 if (run != NULL) { 2301 if (config_stats) { 2302 bin->stats.nruns++; 2303 bin->stats.curruns++; 2304 } 2305 return (run); 2306 } 2307 2308 /* 2309 * arena_run_alloc_small() failed, but another thread may have made 2310 * sufficient memory available while this one dropped bin->lock above, 2311 * so search one more time. 2312 */ 2313 run = arena_bin_nonfull_run_tryget(bin); 2314 if (run != NULL) 2315 return (run); 2316 2317 return (NULL); 2318} 2319 2320/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 2321static void * 2322arena_bin_malloc_hard(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) 2323{ 2324 szind_t binind; 2325 arena_bin_info_t *bin_info; 2326 arena_run_t *run; 2327 2328 binind = arena_bin_index(arena, bin); 2329 bin_info = &arena_bin_info[binind]; 2330 bin->runcur = NULL; 2331 run = arena_bin_nonfull_run_get(tsd, arena, bin); 2332 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 2333 /* 2334 * Another thread updated runcur while this one ran without the 2335 * bin lock in arena_bin_nonfull_run_get(). 2336 */ 2337 void *ret; 2338 assert(bin->runcur->nfree > 0); 2339 ret = arena_run_reg_alloc(bin->runcur, bin_info); 2340 if (run != NULL) { 2341 arena_chunk_t *chunk; 2342 2343 /* 2344 * arena_run_alloc_small() may have allocated run, or 2345 * it may have pulled run from the bin's run tree. 2346 * Therefore it is unsafe to make any assumptions about 2347 * how run has previously been used, and 2348 * arena_bin_lower_run() must be called, as if a region 2349 * were just deallocated from the run. 2350 */ 2351 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2352 if (run->nfree == bin_info->nregs) { 2353 arena_dalloc_bin_run(tsd, arena, chunk, run, 2354 bin); 2355 } else 2356 arena_bin_lower_run(arena, chunk, run, bin); 2357 } 2358 return (ret); 2359 } 2360 2361 if (run == NULL) 2362 return (NULL); 2363 2364 bin->runcur = run; 2365 2366 assert(bin->runcur->nfree > 0); 2367 2368 return (arena_run_reg_alloc(bin->runcur, bin_info)); 2369} 2370 2371void 2372arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, 2373 szind_t binind, uint64_t prof_accumbytes) 2374{ 2375 unsigned i, nfill; 2376 arena_bin_t *bin; 2377 2378 assert(tbin->ncached == 0); 2379 2380 if (config_prof && arena_prof_accum(tsd, arena, prof_accumbytes)) 2381 prof_idump(tsd); 2382 bin = &arena->bins[binind]; 2383 malloc_mutex_lock(tsd, &bin->lock); 2384 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 2385 tbin->lg_fill_div); i < nfill; i++) { 2386 arena_run_t *run; 2387 void *ptr; 2388 if ((run = bin->runcur) != NULL && run->nfree > 0) 2389 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2390 else 2391 ptr = arena_bin_malloc_hard(tsd, arena, bin); 2392 if (ptr == NULL) { 2393 /* 2394 * OOM. tbin->avail isn't yet filled down to its first 2395 * element, so the successful allocations (if any) must 2396 * be moved just before tbin->avail before bailing out. 2397 */ 2398 if (i > 0) { 2399 memmove(tbin->avail - i, tbin->avail - nfill, 2400 i * sizeof(void *)); 2401 } 2402 break; 2403 } 2404 if (config_fill && unlikely(opt_junk_alloc)) { 2405 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 2406 true); 2407 } 2408 /* Insert such that low regions get used first. */ 2409 *(tbin->avail - nfill + i) = ptr; 2410 } 2411 if (config_stats) { 2412 bin->stats.nmalloc += i; 2413 bin->stats.nrequests += tbin->tstats.nrequests; 2414 bin->stats.curregs += i; 2415 bin->stats.nfills++; 2416 tbin->tstats.nrequests = 0; 2417 } 2418 malloc_mutex_unlock(tsd, &bin->lock); 2419 tbin->ncached = i; 2420 arena_decay_tick(tsd, arena); 2421} 2422 2423void 2424arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 2425{ 2426 2427 size_t redzone_size = bin_info->redzone_size; 2428 2429 if (zero) { 2430 memset((void *)((uintptr_t)ptr - redzone_size), 2431 JEMALLOC_ALLOC_JUNK, redzone_size); 2432 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 2433 JEMALLOC_ALLOC_JUNK, redzone_size); 2434 } else { 2435 memset((void *)((uintptr_t)ptr - redzone_size), 2436 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); 2437 } 2438} 2439 2440#ifdef JEMALLOC_JET 2441#undef arena_redzone_corruption 2442#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) 2443#endif 2444static void 2445arena_redzone_corruption(void *ptr, size_t usize, bool after, 2446 size_t offset, uint8_t byte) 2447{ 2448 2449 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 2450 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 2451 after ? "after" : "before", ptr, usize, byte); 2452} 2453#ifdef JEMALLOC_JET 2454#undef arena_redzone_corruption 2455#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 2456arena_redzone_corruption_t *arena_redzone_corruption = 2457 JEMALLOC_N(n_arena_redzone_corruption); 2458#endif 2459 2460static void 2461arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 2462{ 2463 bool error = false; 2464 2465 if (opt_junk_alloc) { 2466 size_t size = bin_info->reg_size; 2467 size_t redzone_size = bin_info->redzone_size; 2468 size_t i; 2469 2470 for (i = 1; i <= redzone_size; i++) { 2471 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 2472 if (*byte != JEMALLOC_ALLOC_JUNK) { 2473 error = true; 2474 arena_redzone_corruption(ptr, size, false, i, 2475 *byte); 2476 if (reset) 2477 *byte = JEMALLOC_ALLOC_JUNK; 2478 } 2479 } 2480 for (i = 0; i < redzone_size; i++) { 2481 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 2482 if (*byte != JEMALLOC_ALLOC_JUNK) { 2483 error = true; 2484 arena_redzone_corruption(ptr, size, true, i, 2485 *byte); 2486 if (reset) 2487 *byte = JEMALLOC_ALLOC_JUNK; 2488 } 2489 } 2490 } 2491 2492 if (opt_abort && error) 2493 abort(); 2494} 2495 2496#ifdef JEMALLOC_JET 2497#undef arena_dalloc_junk_small 2498#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) 2499#endif 2500void 2501arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 2502{ 2503 size_t redzone_size = bin_info->redzone_size; 2504 2505 arena_redzones_validate(ptr, bin_info, false); 2506 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, 2507 bin_info->reg_interval); 2508} 2509#ifdef JEMALLOC_JET 2510#undef arena_dalloc_junk_small 2511#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 2512arena_dalloc_junk_small_t *arena_dalloc_junk_small = 2513 JEMALLOC_N(n_arena_dalloc_junk_small); 2514#endif 2515 2516void 2517arena_quarantine_junk_small(void *ptr, size_t usize) 2518{ 2519 szind_t binind; 2520 arena_bin_info_t *bin_info; 2521 cassert(config_fill); 2522 assert(opt_junk_free); 2523 assert(opt_quarantine); 2524 assert(usize <= SMALL_MAXCLASS); 2525 2526 binind = size2index(usize); 2527 bin_info = &arena_bin_info[binind]; 2528 arena_redzones_validate(ptr, bin_info, true); 2529} 2530 2531static void * 2532arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) 2533{ 2534 void *ret; 2535 arena_bin_t *bin; 2536 size_t usize; 2537 arena_run_t *run; 2538 2539 assert(binind < NBINS); 2540 bin = &arena->bins[binind]; 2541 usize = index2size(binind); 2542 2543 malloc_mutex_lock(tsd, &bin->lock); 2544 if ((run = bin->runcur) != NULL && run->nfree > 0) 2545 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2546 else 2547 ret = arena_bin_malloc_hard(tsd, arena, bin); 2548 2549 if (ret == NULL) { 2550 malloc_mutex_unlock(tsd, &bin->lock); 2551 return (NULL); 2552 } 2553 2554 if (config_stats) { 2555 bin->stats.nmalloc++; 2556 bin->stats.nrequests++; 2557 bin->stats.curregs++; 2558 } 2559 malloc_mutex_unlock(tsd, &bin->lock); 2560 if (config_prof && !isthreaded && arena_prof_accum(tsd, arena, usize)) 2561 prof_idump(tsd); 2562 2563 if (!zero) { 2564 if (config_fill) { 2565 if (unlikely(opt_junk_alloc)) { 2566 arena_alloc_junk_small(ret, 2567 &arena_bin_info[binind], false); 2568 } else if (unlikely(opt_zero)) 2569 memset(ret, 0, usize); 2570 } 2571 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2572 } else { 2573 if (config_fill && unlikely(opt_junk_alloc)) { 2574 arena_alloc_junk_small(ret, &arena_bin_info[binind], 2575 true); 2576 } 2577 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2578 memset(ret, 0, usize); 2579 } 2580 2581 arena_decay_tick(tsd, arena); 2582 return (ret); 2583} 2584 2585void * 2586arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) 2587{ 2588 void *ret; 2589 size_t usize; 2590 uintptr_t random_offset; 2591 arena_run_t *run; 2592 arena_chunk_map_misc_t *miscelm; 2593 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); 2594 2595 /* Large allocation. */ 2596 usize = index2size(binind); 2597 malloc_mutex_lock(tsd, &arena->lock); 2598 if (config_cache_oblivious) { 2599 uint64_t r; 2600 2601 /* 2602 * Compute a uniformly distributed offset within the first page 2603 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 2604 * for 4 KiB pages and 64-byte cachelines. 2605 */ 2606 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE); 2607 random_offset = ((uintptr_t)r) << LG_CACHELINE; 2608 } else 2609 random_offset = 0; 2610 run = arena_run_alloc_large(tsd, arena, usize + large_pad, zero); 2611 if (run == NULL) { 2612 malloc_mutex_unlock(tsd, &arena->lock); 2613 return (NULL); 2614 } 2615 miscelm = arena_run_to_miscelm(run); 2616 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2617 random_offset); 2618 if (config_stats) { 2619 szind_t index = binind - NBINS; 2620 2621 arena->stats.nmalloc_large++; 2622 arena->stats.nrequests_large++; 2623 arena->stats.allocated_large += usize; 2624 arena->stats.lstats[index].nmalloc++; 2625 arena->stats.lstats[index].nrequests++; 2626 arena->stats.lstats[index].curruns++; 2627 } 2628 if (config_prof) 2629 idump = arena_prof_accum_locked(arena, usize); 2630 malloc_mutex_unlock(tsd, &arena->lock); 2631 if (config_prof && idump) 2632 prof_idump(tsd); 2633 2634 if (!zero) { 2635 if (config_fill) { 2636 if (unlikely(opt_junk_alloc)) 2637 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2638 else if (unlikely(opt_zero)) 2639 memset(ret, 0, usize); 2640 } 2641 } 2642 2643 arena_decay_tick(tsd, arena); 2644 return (ret); 2645} 2646 2647void * 2648arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, 2649 bool zero) 2650{ 2651 2652 arena = arena_choose(tsd, arena); 2653 if (unlikely(arena == NULL)) 2654 return (NULL); 2655 2656 if (likely(size <= SMALL_MAXCLASS)) 2657 return (arena_malloc_small(tsd, arena, ind, zero)); 2658 if (likely(size <= large_maxclass)) 2659 return (arena_malloc_large(tsd, arena, ind, zero)); 2660 return (huge_malloc(tsd, arena, index2size(ind), zero)); 2661} 2662 2663/* Only handles large allocations that require more than page alignment. */ 2664static void * 2665arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2666 bool zero) 2667{ 2668 void *ret; 2669 size_t alloc_size, leadsize, trailsize; 2670 arena_run_t *run; 2671 arena_chunk_t *chunk; 2672 arena_chunk_map_misc_t *miscelm; 2673 void *rpages; 2674 2675 assert(usize == PAGE_CEILING(usize)); 2676 2677 arena = arena_choose(tsd, arena); 2678 if (unlikely(arena == NULL)) 2679 return (NULL); 2680 2681 alignment = PAGE_CEILING(alignment); 2682 alloc_size = usize + large_pad + alignment; 2683 2684 malloc_mutex_lock(tsd, &arena->lock); 2685 run = arena_run_alloc_large(tsd, arena, alloc_size, false); 2686 if (run == NULL) { 2687 malloc_mutex_unlock(tsd, &arena->lock); 2688 return (NULL); 2689 } 2690 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2691 miscelm = arena_run_to_miscelm(run); 2692 rpages = arena_miscelm_to_rpages(miscelm); 2693 2694 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 2695 (uintptr_t)rpages; 2696 assert(alloc_size >= leadsize + usize); 2697 trailsize = alloc_size - leadsize - usize - large_pad; 2698 if (leadsize != 0) { 2699 arena_chunk_map_misc_t *head_miscelm = miscelm; 2700 arena_run_t *head_run = run; 2701 2702 miscelm = arena_miscelm_get_mutable(chunk, 2703 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 2704 LG_PAGE)); 2705 run = &miscelm->run; 2706 2707 arena_run_trim_head(tsd, arena, chunk, head_run, alloc_size, 2708 alloc_size - leadsize); 2709 } 2710 if (trailsize != 0) { 2711 arena_run_trim_tail(tsd, arena, chunk, run, usize + large_pad + 2712 trailsize, usize + large_pad, false); 2713 } 2714 if (arena_run_init_large(arena, run, usize + large_pad, zero)) { 2715 size_t run_ind = 2716 arena_miscelm_to_pageind(arena_run_to_miscelm(run)); 2717 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); 2718 bool decommitted = (arena_mapbits_decommitted_get(chunk, 2719 run_ind) != 0); 2720 2721 assert(decommitted); /* Cause of OOM. */ 2722 arena_run_dalloc(tsd, arena, run, dirty, false, decommitted); 2723 malloc_mutex_unlock(tsd, &arena->lock); 2724 return (NULL); 2725 } 2726 ret = arena_miscelm_to_rpages(miscelm); 2727 2728 if (config_stats) { 2729 szind_t index = size2index(usize) - NBINS; 2730 2731 arena->stats.nmalloc_large++; 2732 arena->stats.nrequests_large++; 2733 arena->stats.allocated_large += usize; 2734 arena->stats.lstats[index].nmalloc++; 2735 arena->stats.lstats[index].nrequests++; 2736 arena->stats.lstats[index].curruns++; 2737 } 2738 malloc_mutex_unlock(tsd, &arena->lock); 2739 2740 if (config_fill && !zero) { 2741 if (unlikely(opt_junk_alloc)) 2742 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2743 else if (unlikely(opt_zero)) 2744 memset(ret, 0, usize); 2745 } 2746 arena_decay_tick(tsd, arena); 2747 return (ret); 2748} 2749 2750void * 2751arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2752 bool zero, tcache_t *tcache) 2753{ 2754 void *ret; 2755 2756 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2757 && (usize & PAGE_MASK) == 0))) { 2758 /* Small; alignment doesn't require special run placement. */ 2759 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2760 tcache, true); 2761 } else if (usize <= large_maxclass && alignment <= PAGE) { 2762 /* 2763 * Large; alignment doesn't require special run placement. 2764 * However, the cached pointer may be at a random offset from 2765 * the base of the run, so do some bit manipulation to retrieve 2766 * the base. 2767 */ 2768 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2769 tcache, true); 2770 if (config_cache_oblivious) 2771 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2772 } else { 2773 if (likely(usize <= large_maxclass)) { 2774 ret = arena_palloc_large(tsd, arena, usize, alignment, 2775 zero); 2776 } else if (likely(alignment <= chunksize)) 2777 ret = huge_malloc(tsd, arena, usize, zero); 2778 else { 2779 ret = huge_palloc(tsd, arena, usize, alignment, zero); 2780 } 2781 } 2782 return (ret); 2783} 2784 2785void 2786arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size) 2787{ 2788 arena_chunk_t *chunk; 2789 size_t pageind; 2790 szind_t binind; 2791 2792 cassert(config_prof); 2793 assert(ptr != NULL); 2794 assert(CHUNK_ADDR2BASE(ptr) != ptr); 2795 assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS); 2796 assert(isalloc(tsd, ptr, true) == LARGE_MINCLASS); 2797 assert(size <= SMALL_MAXCLASS); 2798 2799 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2800 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2801 binind = size2index(size); 2802 assert(binind < NBINS); 2803 arena_mapbits_large_binind_set(chunk, pageind, binind); 2804 2805 assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS); 2806 assert(isalloc(tsd, ptr, true) == size); 2807} 2808 2809static void 2810arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 2811 arena_bin_t *bin) 2812{ 2813 2814 /* Dissociate run from bin. */ 2815 if (run == bin->runcur) 2816 bin->runcur = NULL; 2817 else { 2818 szind_t binind = arena_bin_index(extent_node_arena_get( 2819 &chunk->node), bin); 2820 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 2821 2822 /* 2823 * The following block's conditional is necessary because if the 2824 * run only contains one region, then it never gets inserted 2825 * into the non-full runs tree. 2826 */ 2827 if (bin_info->nregs != 1) { 2828 arena_chunk_map_misc_t *miscelm = 2829 arena_run_to_miscelm(run); 2830 2831 arena_run_heap_remove(&bin->runs, miscelm); 2832 } 2833 } 2834} 2835 2836static void 2837arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2838 arena_run_t *run, arena_bin_t *bin) 2839{ 2840 2841 assert(run != bin->runcur); 2842 2843 malloc_mutex_unlock(tsd, &bin->lock); 2844 /******************************/ 2845 malloc_mutex_lock(tsd, &arena->lock); 2846 arena_run_dalloc(tsd, arena, run, true, false, false); 2847 malloc_mutex_unlock(tsd, &arena->lock); 2848 /****************************/ 2849 malloc_mutex_lock(tsd, &bin->lock); 2850 if (config_stats) 2851 bin->stats.curruns--; 2852} 2853 2854static void 2855arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2856 arena_bin_t *bin) 2857{ 2858 2859 /* 2860 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 2861 * non-full run. It is okay to NULL runcur out rather than proactively 2862 * keeping it pointing at the lowest non-full run. 2863 */ 2864 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 2865 /* Switch runcur. */ 2866 if (bin->runcur->nfree > 0) 2867 arena_bin_runs_insert(bin, bin->runcur); 2868 bin->runcur = run; 2869 if (config_stats) 2870 bin->stats.reruns++; 2871 } else 2872 arena_bin_runs_insert(bin, run); 2873} 2874 2875static void 2876arena_dalloc_bin_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2877 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) 2878{ 2879 size_t pageind, rpages_ind; 2880 arena_run_t *run; 2881 arena_bin_t *bin; 2882 arena_bin_info_t *bin_info; 2883 szind_t binind; 2884 2885 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2886 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2887 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2888 binind = run->binind; 2889 bin = &arena->bins[binind]; 2890 bin_info = &arena_bin_info[binind]; 2891 2892 if (!junked && config_fill && unlikely(opt_junk_free)) 2893 arena_dalloc_junk_small(ptr, bin_info); 2894 2895 arena_run_reg_dalloc(run, ptr); 2896 if (run->nfree == bin_info->nregs) { 2897 arena_dissociate_bin_run(chunk, run, bin); 2898 arena_dalloc_bin_run(tsd, arena, chunk, run, bin); 2899 } else if (run->nfree == 1 && run != bin->runcur) 2900 arena_bin_lower_run(arena, chunk, run, bin); 2901 2902 if (config_stats) { 2903 bin->stats.ndalloc++; 2904 bin->stats.curregs--; 2905 } 2906} 2907 2908void 2909arena_dalloc_bin_junked_locked(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2910 void *ptr, arena_chunk_map_bits_t *bitselm) 2911{ 2912 2913 arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, true); 2914} 2915 2916void 2917arena_dalloc_bin(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, 2918 size_t pageind, arena_chunk_map_bits_t *bitselm) 2919{ 2920 arena_run_t *run; 2921 arena_bin_t *bin; 2922 size_t rpages_ind; 2923 2924 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2925 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2926 bin = &arena->bins[run->binind]; 2927 malloc_mutex_lock(tsd, &bin->lock); 2928 arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, false); 2929 malloc_mutex_unlock(tsd, &bin->lock); 2930} 2931 2932void 2933arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, 2934 size_t pageind) 2935{ 2936 arena_chunk_map_bits_t *bitselm; 2937 2938 if (config_debug) { 2939 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2940 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2941 pageind)) != BININD_INVALID); 2942 } 2943 bitselm = arena_bitselm_get_mutable(chunk, pageind); 2944 arena_dalloc_bin(tsd, arena, chunk, ptr, pageind, bitselm); 2945 arena_decay_tick(tsd, arena); 2946} 2947 2948#ifdef JEMALLOC_JET 2949#undef arena_dalloc_junk_large 2950#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) 2951#endif 2952void 2953arena_dalloc_junk_large(void *ptr, size_t usize) 2954{ 2955 2956 if (config_fill && unlikely(opt_junk_free)) 2957 memset(ptr, JEMALLOC_FREE_JUNK, usize); 2958} 2959#ifdef JEMALLOC_JET 2960#undef arena_dalloc_junk_large 2961#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2962arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2963 JEMALLOC_N(n_arena_dalloc_junk_large); 2964#endif 2965 2966static void 2967arena_dalloc_large_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 2968 void *ptr, bool junked) 2969{ 2970 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2971 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 2972 pageind); 2973 arena_run_t *run = &miscelm->run; 2974 2975 if (config_fill || config_stats) { 2976 size_t usize = arena_mapbits_large_size_get(chunk, pageind) - 2977 large_pad; 2978 2979 if (!junked) 2980 arena_dalloc_junk_large(ptr, usize); 2981 if (config_stats) { 2982 szind_t index = size2index(usize) - NBINS; 2983 2984 arena->stats.ndalloc_large++; 2985 arena->stats.allocated_large -= usize; 2986 arena->stats.lstats[index].ndalloc++; 2987 arena->stats.lstats[index].curruns--; 2988 } 2989 } 2990 2991 arena_run_dalloc(tsd, arena, run, true, false, false); 2992} 2993 2994void 2995arena_dalloc_large_junked_locked(tsd_t *tsd, arena_t *arena, 2996 arena_chunk_t *chunk, void *ptr) 2997{ 2998 2999 arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, true); 3000} 3001 3002void 3003arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr) 3004{ 3005 3006 malloc_mutex_lock(tsd, &arena->lock); 3007 arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, false); 3008 malloc_mutex_unlock(tsd, &arena->lock); 3009 arena_decay_tick(tsd, arena); 3010} 3011 3012static void 3013arena_ralloc_large_shrink(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 3014 void *ptr, size_t oldsize, size_t size) 3015{ 3016 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 3017 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 3018 pageind); 3019 arena_run_t *run = &miscelm->run; 3020 3021 assert(size < oldsize); 3022 3023 /* 3024 * Shrink the run, and make trailing pages available for other 3025 * allocations. 3026 */ 3027 malloc_mutex_lock(tsd, &arena->lock); 3028 arena_run_trim_tail(tsd, arena, chunk, run, oldsize + large_pad, size + 3029 large_pad, true); 3030 if (config_stats) { 3031 szind_t oldindex = size2index(oldsize) - NBINS; 3032 szind_t index = size2index(size) - NBINS; 3033 3034 arena->stats.ndalloc_large++; 3035 arena->stats.allocated_large -= oldsize; 3036 arena->stats.lstats[oldindex].ndalloc++; 3037 arena->stats.lstats[oldindex].curruns--; 3038 3039 arena->stats.nmalloc_large++; 3040 arena->stats.nrequests_large++; 3041 arena->stats.allocated_large += size; 3042 arena->stats.lstats[index].nmalloc++; 3043 arena->stats.lstats[index].nrequests++; 3044 arena->stats.lstats[index].curruns++; 3045 } 3046 malloc_mutex_unlock(tsd, &arena->lock); 3047} 3048 3049static bool 3050arena_ralloc_large_grow(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, 3051 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) 3052{ 3053 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 3054 size_t npages = (oldsize + large_pad) >> LG_PAGE; 3055 size_t followsize; 3056 3057 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - 3058 large_pad); 3059 3060 /* Try to extend the run. */ 3061 malloc_mutex_lock(tsd, &arena->lock); 3062 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, 3063 pageind+npages) != 0) 3064 goto label_fail; 3065 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); 3066 if (oldsize + followsize >= usize_min) { 3067 /* 3068 * The next run is available and sufficiently large. Split the 3069 * following run, then merge the first part with the existing 3070 * allocation. 3071 */ 3072 arena_run_t *run; 3073 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; 3074 3075 usize = usize_max; 3076 while (oldsize + followsize < usize) 3077 usize = index2size(size2index(usize)-1); 3078 assert(usize >= usize_min); 3079 assert(usize >= oldsize); 3080 splitsize = usize - oldsize; 3081 if (splitsize == 0) 3082 goto label_fail; 3083 3084 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; 3085 if (arena_run_split_large(arena, run, splitsize, zero)) 3086 goto label_fail; 3087 3088 if (config_cache_oblivious && zero) { 3089 /* 3090 * Zero the trailing bytes of the original allocation's 3091 * last page, since they are in an indeterminate state. 3092 * There will always be trailing bytes, because ptr's 3093 * offset from the beginning of the run is a multiple of 3094 * CACHELINE in [0 .. PAGE). 3095 */ 3096 void *zbase = (void *)((uintptr_t)ptr + oldsize); 3097 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + 3098 PAGE)); 3099 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; 3100 assert(nzero > 0); 3101 memset(zbase, 0, nzero); 3102 } 3103 3104 size = oldsize + splitsize; 3105 npages = (size + large_pad) >> LG_PAGE; 3106 3107 /* 3108 * Mark the extended run as dirty if either portion of the run 3109 * was dirty before allocation. This is rather pedantic, 3110 * because there's not actually any sequence of events that 3111 * could cause the resulting run to be passed to 3112 * arena_run_dalloc() with the dirty argument set to false 3113 * (which is when dirty flag consistency would really matter). 3114 */ 3115 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 3116 arena_mapbits_dirty_get(chunk, pageind+npages-1); 3117 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; 3118 arena_mapbits_large_set(chunk, pageind, size + large_pad, 3119 flag_dirty | (flag_unzeroed_mask & 3120 arena_mapbits_unzeroed_get(chunk, pageind))); 3121 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | 3122 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 3123 pageind+npages-1))); 3124 3125 if (config_stats) { 3126 szind_t oldindex = size2index(oldsize) - NBINS; 3127 szind_t index = size2index(size) - NBINS; 3128 3129 arena->stats.ndalloc_large++; 3130 arena->stats.allocated_large -= oldsize; 3131 arena->stats.lstats[oldindex].ndalloc++; 3132 arena->stats.lstats[oldindex].curruns--; 3133 3134 arena->stats.nmalloc_large++; 3135 arena->stats.nrequests_large++; 3136 arena->stats.allocated_large += size; 3137 arena->stats.lstats[index].nmalloc++; 3138 arena->stats.lstats[index].nrequests++; 3139 arena->stats.lstats[index].curruns++; 3140 } 3141 malloc_mutex_unlock(tsd, &arena->lock); 3142 return (false); 3143 } 3144label_fail: 3145 malloc_mutex_unlock(tsd, &arena->lock); 3146 return (true); 3147} 3148 3149#ifdef JEMALLOC_JET 3150#undef arena_ralloc_junk_large 3151#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) 3152#endif 3153static void 3154arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 3155{ 3156 3157 if (config_fill && unlikely(opt_junk_free)) { 3158 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, 3159 old_usize - usize); 3160 } 3161} 3162#ifdef JEMALLOC_JET 3163#undef arena_ralloc_junk_large 3164#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 3165arena_ralloc_junk_large_t *arena_ralloc_junk_large = 3166 JEMALLOC_N(n_arena_ralloc_junk_large); 3167#endif 3168 3169/* 3170 * Try to resize a large allocation, in order to avoid copying. This will 3171 * always fail if growing an object, and the following run is already in use. 3172 */ 3173static bool 3174arena_ralloc_large(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, 3175 size_t usize_max, bool zero) 3176{ 3177 arena_chunk_t *chunk; 3178 arena_t *arena; 3179 3180 if (oldsize == usize_max) { 3181 /* Current size class is compatible and maximal. */ 3182 return (false); 3183 } 3184 3185 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3186 arena = extent_node_arena_get(&chunk->node); 3187 3188 if (oldsize < usize_max) { 3189 bool ret = arena_ralloc_large_grow(tsd, arena, chunk, ptr, 3190 oldsize, usize_min, usize_max, zero); 3191 if (config_fill && !ret && !zero) { 3192 if (unlikely(opt_junk_alloc)) { 3193 memset((void *)((uintptr_t)ptr + oldsize), 3194 JEMALLOC_ALLOC_JUNK, 3195 isalloc(tsd, ptr, config_prof) - oldsize); 3196 } else if (unlikely(opt_zero)) { 3197 memset((void *)((uintptr_t)ptr + oldsize), 0, 3198 isalloc(tsd, ptr, config_prof) - oldsize); 3199 } 3200 } 3201 return (ret); 3202 } 3203 3204 assert(oldsize > usize_max); 3205 /* Fill before shrinking in order avoid a race. */ 3206 arena_ralloc_junk_large(ptr, oldsize, usize_max); 3207 arena_ralloc_large_shrink(tsd, arena, chunk, ptr, oldsize, usize_max); 3208 return (false); 3209} 3210 3211bool 3212arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, 3213 size_t extra, bool zero) 3214{ 3215 size_t usize_min, usize_max; 3216 3217 /* Calls with non-zero extra had to clamp extra. */ 3218 assert(extra == 0 || size + extra <= HUGE_MAXCLASS); 3219 3220 if (unlikely(size > HUGE_MAXCLASS)) 3221 return (true); 3222 3223 usize_min = s2u(size); 3224 usize_max = s2u(size + extra); 3225 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { 3226 arena_chunk_t *chunk; 3227 3228 /* 3229 * Avoid moving the allocation if the size class can be left the 3230 * same. 3231 */ 3232 if (oldsize <= SMALL_MAXCLASS) { 3233 assert(arena_bin_info[size2index(oldsize)].reg_size == 3234 oldsize); 3235 if ((usize_max > SMALL_MAXCLASS || 3236 size2index(usize_max) != size2index(oldsize)) && 3237 (size > oldsize || usize_max < oldsize)) 3238 return (true); 3239 } else { 3240 if (usize_max <= SMALL_MAXCLASS) 3241 return (true); 3242 if (arena_ralloc_large(tsd, ptr, oldsize, usize_min, 3243 usize_max, zero)) 3244 return (true); 3245 } 3246 3247 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3248 arena_decay_tick(tsd, extent_node_arena_get(&chunk->node)); 3249 return (false); 3250 } else { 3251 return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min, 3252 usize_max, zero)); 3253 } 3254} 3255 3256static void * 3257arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, 3258 size_t alignment, bool zero, tcache_t *tcache) 3259{ 3260 3261 if (alignment == 0) 3262 return (arena_malloc(tsd, arena, usize, size2index(usize), zero, 3263 tcache, true)); 3264 usize = sa2u(usize, alignment); 3265 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 3266 return (NULL); 3267 return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); 3268} 3269 3270void * 3271arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 3272 size_t alignment, bool zero, tcache_t *tcache) 3273{ 3274 void *ret; 3275 size_t usize; 3276 3277 usize = s2u(size); 3278 if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) 3279 return (NULL); 3280 3281 if (likely(usize <= large_maxclass)) { 3282 size_t copysize; 3283 3284 /* Try to avoid moving the allocation. */ 3285 if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero)) 3286 return (ptr); 3287 3288 /* 3289 * size and oldsize are different enough that we need to move 3290 * the object. In that case, fall back to allocating new space 3291 * and copying. 3292 */ 3293 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, 3294 zero, tcache); 3295 if (ret == NULL) 3296 return (NULL); 3297 3298 /* 3299 * Junk/zero-filling were already done by 3300 * ipalloc()/arena_malloc(). 3301 */ 3302 3303 copysize = (usize < oldsize) ? usize : oldsize; 3304 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 3305 memcpy(ret, ptr, copysize); 3306 isqalloc(tsd, ptr, oldsize, tcache); 3307 } else { 3308 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, 3309 zero, tcache); 3310 } 3311 return (ret); 3312} 3313 3314dss_prec_t 3315arena_dss_prec_get(tsd_t *tsd, arena_t *arena) 3316{ 3317 dss_prec_t ret; 3318 3319 malloc_mutex_lock(tsd, &arena->lock); 3320 ret = arena->dss_prec; 3321 malloc_mutex_unlock(tsd, &arena->lock); 3322 return (ret); 3323} 3324 3325bool 3326arena_dss_prec_set(tsd_t *tsd, arena_t *arena, dss_prec_t dss_prec) 3327{ 3328 3329 if (!have_dss) 3330 return (dss_prec != dss_prec_disabled); 3331 malloc_mutex_lock(tsd, &arena->lock); 3332 arena->dss_prec = dss_prec; 3333 malloc_mutex_unlock(tsd, &arena->lock); 3334 return (false); 3335} 3336 3337ssize_t 3338arena_lg_dirty_mult_default_get(void) 3339{ 3340 3341 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 3342} 3343 3344bool 3345arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 3346{ 3347 3348 if (opt_purge != purge_mode_ratio) 3349 return (true); 3350 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 3351 return (true); 3352 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 3353 return (false); 3354} 3355 3356ssize_t 3357arena_decay_time_default_get(void) 3358{ 3359 3360 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); 3361} 3362 3363bool 3364arena_decay_time_default_set(ssize_t decay_time) 3365{ 3366 3367 if (opt_purge != purge_mode_decay) 3368 return (true); 3369 if (!arena_decay_time_valid(decay_time)) 3370 return (true); 3371 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); 3372 return (false); 3373} 3374 3375static void 3376arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, 3377 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3378 size_t *nactive, size_t *ndirty) 3379{ 3380 3381 *nthreads += arena_nthreads_get(arena, false); 3382 *dss = dss_prec_names[arena->dss_prec]; 3383 *lg_dirty_mult = arena->lg_dirty_mult; 3384 *decay_time = arena->decay_time; 3385 *nactive += arena->nactive; 3386 *ndirty += arena->ndirty; 3387} 3388 3389void 3390arena_basic_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, 3391 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3392 size_t *nactive, size_t *ndirty) 3393{ 3394 3395 malloc_mutex_lock(tsd, &arena->lock); 3396 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3397 decay_time, nactive, ndirty); 3398 malloc_mutex_unlock(tsd, &arena->lock); 3399} 3400 3401void 3402arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, 3403 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3404 size_t *nactive, size_t *ndirty, arena_stats_t *astats, 3405 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, 3406 malloc_huge_stats_t *hstats) 3407{ 3408 unsigned i; 3409 3410 cassert(config_stats); 3411 3412 malloc_mutex_lock(tsd, &arena->lock); 3413 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3414 decay_time, nactive, ndirty); 3415 3416 astats->mapped += arena->stats.mapped; 3417 astats->npurge += arena->stats.npurge; 3418 astats->nmadvise += arena->stats.nmadvise; 3419 astats->purged += arena->stats.purged; 3420 astats->metadata_mapped += arena->stats.metadata_mapped; 3421 astats->metadata_allocated += arena_metadata_allocated_get(arena); 3422 astats->allocated_large += arena->stats.allocated_large; 3423 astats->nmalloc_large += arena->stats.nmalloc_large; 3424 astats->ndalloc_large += arena->stats.ndalloc_large; 3425 astats->nrequests_large += arena->stats.nrequests_large; 3426 astats->allocated_huge += arena->stats.allocated_huge; 3427 astats->nmalloc_huge += arena->stats.nmalloc_huge; 3428 astats->ndalloc_huge += arena->stats.ndalloc_huge; 3429 3430 for (i = 0; i < nlclasses; i++) { 3431 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 3432 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 3433 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 3434 lstats[i].curruns += arena->stats.lstats[i].curruns; 3435 } 3436 3437 for (i = 0; i < nhclasses; i++) { 3438 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 3439 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 3440 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 3441 } 3442 malloc_mutex_unlock(tsd, &arena->lock); 3443 3444 for (i = 0; i < NBINS; i++) { 3445 arena_bin_t *bin = &arena->bins[i]; 3446 3447 malloc_mutex_lock(tsd, &bin->lock); 3448 bstats[i].nmalloc += bin->stats.nmalloc; 3449 bstats[i].ndalloc += bin->stats.ndalloc; 3450 bstats[i].nrequests += bin->stats.nrequests; 3451 bstats[i].curregs += bin->stats.curregs; 3452 if (config_tcache) { 3453 bstats[i].nfills += bin->stats.nfills; 3454 bstats[i].nflushes += bin->stats.nflushes; 3455 } 3456 bstats[i].nruns += bin->stats.nruns; 3457 bstats[i].reruns += bin->stats.reruns; 3458 bstats[i].curruns += bin->stats.curruns; 3459 malloc_mutex_unlock(tsd, &bin->lock); 3460 } 3461} 3462 3463unsigned 3464arena_nthreads_get(arena_t *arena, bool internal) 3465{ 3466 3467 return (atomic_read_u(&arena->nthreads[internal])); 3468} 3469 3470void 3471arena_nthreads_inc(arena_t *arena, bool internal) 3472{ 3473 3474 atomic_add_u(&arena->nthreads[internal], 1); 3475} 3476 3477void 3478arena_nthreads_dec(arena_t *arena, bool internal) 3479{ 3480 3481 atomic_sub_u(&arena->nthreads[internal], 1); 3482} 3483 3484arena_t * 3485arena_new(tsd_t *tsd, unsigned ind) 3486{ 3487 arena_t *arena; 3488 size_t arena_size; 3489 unsigned i; 3490 3491 /* Compute arena size to incorporate sufficient runs_avail elements. */ 3492 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) * 3493 runs_avail_nclasses); 3494 /* 3495 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 3496 * because there is no way to clean up if base_alloc() OOMs. 3497 */ 3498 if (config_stats) { 3499 arena = (arena_t *)base_alloc(tsd, CACHELINE_CEILING(arena_size) 3500 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + 3501 nhclasses) * sizeof(malloc_huge_stats_t)); 3502 } else 3503 arena = (arena_t *)base_alloc(tsd, arena_size); 3504 if (arena == NULL) 3505 return (NULL); 3506 3507 arena->ind = ind; 3508 arena->nthreads[0] = arena->nthreads[1] = 0; 3509 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) 3510 return (NULL); 3511 3512 if (config_stats) { 3513 memset(&arena->stats, 0, sizeof(arena_stats_t)); 3514 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 3515 + CACHELINE_CEILING(arena_size)); 3516 memset(arena->stats.lstats, 0, nlclasses * 3517 sizeof(malloc_large_stats_t)); 3518 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 3519 + CACHELINE_CEILING(arena_size) + 3520 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 3521 memset(arena->stats.hstats, 0, nhclasses * 3522 sizeof(malloc_huge_stats_t)); 3523 if (config_tcache) 3524 ql_new(&arena->tcache_ql); 3525 } 3526 3527 if (config_prof) 3528 arena->prof_accumbytes = 0; 3529 3530 if (config_cache_oblivious) { 3531 /* 3532 * A nondeterministic seed based on the address of arena reduces 3533 * the likelihood of lockstep non-uniform cache index 3534 * utilization among identical concurrent processes, but at the 3535 * cost of test repeatability. For debug builds, instead use a 3536 * deterministic seed. 3537 */ 3538 arena->offset_state = config_debug ? ind : 3539 (uint64_t)(uintptr_t)arena; 3540 } 3541 3542 arena->dss_prec = chunk_dss_prec_get(tsd); 3543 3544 ql_new(&arena->achunks); 3545 3546 arena->spare = NULL; 3547 3548 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 3549 arena->purging = false; 3550 arena->nactive = 0; 3551 arena->ndirty = 0; 3552 3553 for(i = 0; i < runs_avail_nclasses; i++) 3554 arena_run_heap_new(&arena->runs_avail[i]); 3555 qr_new(&arena->runs_dirty, rd_link); 3556 qr_new(&arena->chunks_cache, cc_link); 3557 3558 if (opt_purge == purge_mode_decay) 3559 arena_decay_init(arena, arena_decay_time_default_get()); 3560 3561 ql_new(&arena->huge); 3562 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", 3563 WITNESS_RANK_ARENA_HUGE)) 3564 return (NULL); 3565 3566 extent_tree_szad_new(&arena->chunks_szad_cached); 3567 extent_tree_ad_new(&arena->chunks_ad_cached); 3568 extent_tree_szad_new(&arena->chunks_szad_retained); 3569 extent_tree_ad_new(&arena->chunks_ad_retained); 3570 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", 3571 WITNESS_RANK_ARENA_CHUNKS)) 3572 return (NULL); 3573 ql_new(&arena->node_cache); 3574 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", 3575 WITNESS_RANK_ARENA_NODE_CACHE)) 3576 return (NULL); 3577 3578 arena->chunk_hooks = chunk_hooks_default; 3579 3580 /* Initialize bins. */ 3581 for (i = 0; i < NBINS; i++) { 3582 arena_bin_t *bin = &arena->bins[i]; 3583 if (malloc_mutex_init(&bin->lock, "arena_bin", 3584 WITNESS_RANK_ARENA_BIN)) 3585 return (NULL); 3586 bin->runcur = NULL; 3587 arena_run_heap_new(&bin->runs); 3588 if (config_stats) 3589 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 3590 } 3591 3592 return (arena); 3593} 3594 3595/* 3596 * Calculate bin_info->run_size such that it meets the following constraints: 3597 * 3598 * *) bin_info->run_size <= arena_maxrun 3599 * *) bin_info->nregs <= RUN_MAXREGS 3600 * 3601 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 3602 * these settings are all interdependent. 3603 */ 3604static void 3605bin_info_run_size_calc(arena_bin_info_t *bin_info) 3606{ 3607 size_t pad_size; 3608 size_t try_run_size, perfect_run_size, actual_run_size; 3609 uint32_t try_nregs, perfect_nregs, actual_nregs; 3610 3611 /* 3612 * Determine redzone size based on minimum alignment and minimum 3613 * redzone size. Add padding to the end of the run if it is needed to 3614 * align the regions. The padding allows each redzone to be half the 3615 * minimum alignment; without the padding, each redzone would have to 3616 * be twice as large in order to maintain alignment. 3617 */ 3618 if (config_fill && unlikely(opt_redzone)) { 3619 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); 3620 if (align_min <= REDZONE_MINSIZE) { 3621 bin_info->redzone_size = REDZONE_MINSIZE; 3622 pad_size = 0; 3623 } else { 3624 bin_info->redzone_size = align_min >> 1; 3625 pad_size = bin_info->redzone_size; 3626 } 3627 } else { 3628 bin_info->redzone_size = 0; 3629 pad_size = 0; 3630 } 3631 bin_info->reg_interval = bin_info->reg_size + 3632 (bin_info->redzone_size << 1); 3633 3634 /* 3635 * Compute run size under ideal conditions (no redzones, no limit on run 3636 * size). 3637 */ 3638 try_run_size = PAGE; 3639 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3640 do { 3641 perfect_run_size = try_run_size; 3642 perfect_nregs = try_nregs; 3643 3644 try_run_size += PAGE; 3645 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3646 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 3647 assert(perfect_nregs <= RUN_MAXREGS); 3648 3649 actual_run_size = perfect_run_size; 3650 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3651 bin_info->reg_interval); 3652 3653 /* 3654 * Redzones can require enough padding that not even a single region can 3655 * fit within the number of pages that would normally be dedicated to a 3656 * run for this size class. Increase the run size until at least one 3657 * region fits. 3658 */ 3659 while (actual_nregs == 0) { 3660 assert(config_fill && unlikely(opt_redzone)); 3661 3662 actual_run_size += PAGE; 3663 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3664 bin_info->reg_interval); 3665 } 3666 3667 /* 3668 * Make sure that the run will fit within an arena chunk. 3669 */ 3670 while (actual_run_size > arena_maxrun) { 3671 actual_run_size -= PAGE; 3672 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3673 bin_info->reg_interval); 3674 } 3675 assert(actual_nregs > 0); 3676 assert(actual_run_size == s2u(actual_run_size)); 3677 3678 /* Copy final settings. */ 3679 bin_info->run_size = actual_run_size; 3680 bin_info->nregs = actual_nregs; 3681 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * 3682 bin_info->reg_interval) - pad_size + bin_info->redzone_size); 3683 3684 if (actual_run_size > small_maxrun) 3685 small_maxrun = actual_run_size; 3686 3687 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 3688 * bin_info->reg_interval) + pad_size == bin_info->run_size); 3689} 3690 3691static void 3692bin_info_init(void) 3693{ 3694 arena_bin_info_t *bin_info; 3695 3696#define BIN_INFO_INIT_bin_yes(index, size) \ 3697 bin_info = &arena_bin_info[index]; \ 3698 bin_info->reg_size = size; \ 3699 bin_info_run_size_calc(bin_info); \ 3700 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 3701#define BIN_INFO_INIT_bin_no(index, size) 3702#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 3703 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3704 SIZE_CLASSES 3705#undef BIN_INFO_INIT_bin_yes 3706#undef BIN_INFO_INIT_bin_no 3707#undef SC 3708} 3709 3710static bool 3711small_run_size_init(void) 3712{ 3713 3714 assert(small_maxrun != 0); 3715 3716 small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >> 3717 LG_PAGE)); 3718 if (small_run_tab == NULL) 3719 return (true); 3720 3721#define TAB_INIT_bin_yes(index, size) { \ 3722 arena_bin_info_t *bin_info = &arena_bin_info[index]; \ 3723 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ 3724 } 3725#define TAB_INIT_bin_no(index, size) 3726#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 3727 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3728 SIZE_CLASSES 3729#undef TAB_INIT_bin_yes 3730#undef TAB_INIT_bin_no 3731#undef SC 3732 3733 return (false); 3734} 3735 3736static bool 3737run_quantize_init(void) 3738{ 3739 unsigned i; 3740 3741 run_quantize_max = chunksize + large_pad; 3742 3743 run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) * 3744 (run_quantize_max >> LG_PAGE)); 3745 if (run_quantize_floor_tab == NULL) 3746 return (true); 3747 3748 run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) * 3749 (run_quantize_max >> LG_PAGE)); 3750 if (run_quantize_ceil_tab == NULL) 3751 return (true); 3752 3753 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) { 3754 size_t run_size = i << LG_PAGE; 3755 3756 run_quantize_floor_tab[i-1] = 3757 run_quantize_floor_compute(run_size); 3758 run_quantize_ceil_tab[i-1] = 3759 run_quantize_ceil_compute(run_size); 3760 } 3761 3762 return (false); 3763} 3764 3765bool 3766arena_boot(void) 3767{ 3768 unsigned i; 3769 3770 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); 3771 arena_decay_time_default_set(opt_decay_time); 3772 3773 /* 3774 * Compute the header size such that it is large enough to contain the 3775 * page map. The page map is biased to omit entries for the header 3776 * itself, so some iteration is necessary to compute the map bias. 3777 * 3778 * 1) Compute safe header_size and map_bias values that include enough 3779 * space for an unbiased page map. 3780 * 2) Refine map_bias based on (1) to omit the header pages in the page 3781 * map. The resulting map_bias may be one too small. 3782 * 3) Refine map_bias based on (2). The result will be >= the result 3783 * from (2), and will always be correct. 3784 */ 3785 map_bias = 0; 3786 for (i = 0; i < 3; i++) { 3787 size_t header_size = offsetof(arena_chunk_t, map_bits) + 3788 ((sizeof(arena_chunk_map_bits_t) + 3789 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 3790 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 3791 } 3792 assert(map_bias > 0); 3793 3794 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 3795 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 3796 3797 arena_maxrun = chunksize - (map_bias << LG_PAGE); 3798 assert(arena_maxrun > 0); 3799 large_maxclass = index2size(size2index(chunksize)-1); 3800 if (large_maxclass > arena_maxrun) { 3801 /* 3802 * For small chunk sizes it's possible for there to be fewer 3803 * non-header pages available than are necessary to serve the 3804 * size classes just below chunksize. 3805 */ 3806 large_maxclass = arena_maxrun; 3807 } 3808 assert(large_maxclass > 0); 3809 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); 3810 nhclasses = NSIZES - nlclasses - NBINS; 3811 3812 bin_info_init(); 3813 if (small_run_size_init()) 3814 return (true); 3815 if (run_quantize_init()) 3816 return (true); 3817 3818 runs_avail_bias = size2index(PAGE); 3819 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias; 3820 3821 return (false); 3822} 3823 3824void 3825arena_prefork0(tsd_t *tsd, arena_t *arena) 3826{ 3827 3828 malloc_mutex_prefork(tsd, &arena->lock); 3829} 3830 3831void 3832arena_prefork1(tsd_t *tsd, arena_t *arena) 3833{ 3834 3835 malloc_mutex_prefork(tsd, &arena->chunks_mtx); 3836} 3837 3838void 3839arena_prefork2(tsd_t *tsd, arena_t *arena) 3840{ 3841 3842 malloc_mutex_prefork(tsd, &arena->node_cache_mtx); 3843} 3844 3845void 3846arena_prefork3(tsd_t *tsd, arena_t *arena) 3847{ 3848 unsigned i; 3849 3850 for (i = 0; i < NBINS; i++) 3851 malloc_mutex_prefork(tsd, &arena->bins[i].lock); 3852 malloc_mutex_prefork(tsd, &arena->huge_mtx); 3853} 3854 3855void 3856arena_postfork_parent(tsd_t *tsd, arena_t *arena) 3857{ 3858 unsigned i; 3859 3860 malloc_mutex_postfork_parent(tsd, &arena->huge_mtx); 3861 for (i = 0; i < NBINS; i++) 3862 malloc_mutex_postfork_parent(tsd, &arena->bins[i].lock); 3863 malloc_mutex_postfork_parent(tsd, &arena->node_cache_mtx); 3864 malloc_mutex_postfork_parent(tsd, &arena->chunks_mtx); 3865 malloc_mutex_postfork_parent(tsd, &arena->lock); 3866} 3867 3868void 3869arena_postfork_child(tsd_t *tsd, arena_t *arena) 3870{ 3871 unsigned i; 3872 3873 malloc_mutex_postfork_child(tsd, &arena->huge_mtx); 3874 for (i = 0; i < NBINS; i++) 3875 malloc_mutex_postfork_child(tsd, &arena->bins[i].lock); 3876 malloc_mutex_postfork_child(tsd, &arena->node_cache_mtx); 3877 malloc_mutex_postfork_child(tsd, &arena->chunks_mtx); 3878 malloc_mutex_postfork_child(tsd, &arena->lock); 3879} 3880