arena.c revision bd16ea49c3e36706a52ef9c8f560813c167fa085
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8static ssize_t lg_dirty_mult_default; 9arena_bin_info_t arena_bin_info[NBINS]; 10 11size_t map_bias; 12size_t map_misc_offset; 13size_t arena_maxrun; /* Max run size for arenas. */ 14size_t arena_maxclass; /* Max size class for arenas. */ 15unsigned nlclasses; /* Number of large size classes. */ 16unsigned nhclasses; /* Number of huge size classes. */ 17 18/******************************************************************************/ 19/* 20 * Function prototypes for static functions that are referenced prior to 21 * definition. 22 */ 23 24static void arena_purge(arena_t *arena, bool all); 25static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 26 bool cleaned); 27static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 28 arena_run_t *run, arena_bin_t *bin); 29static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 30 arena_run_t *run, arena_bin_t *bin); 31 32/******************************************************************************/ 33 34JEMALLOC_INLINE_C size_t 35arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm) 36{ 37 arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm); 38 size_t pageind = arena_miscelm_to_pageind(miscelm); 39 40 return (arena_mapbits_get(chunk, pageind)); 41} 42 43JEMALLOC_INLINE_C int 44arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 45{ 46 uintptr_t a_miscelm = (uintptr_t)a; 47 uintptr_t b_miscelm = (uintptr_t)b; 48 49 assert(a != NULL); 50 assert(b != NULL); 51 52 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 53} 54 55/* Generate red-black tree functions. */ 56rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, 57 rb_link, arena_run_comp) 58 59JEMALLOC_INLINE_C int 60arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 61{ 62 int ret; 63 uintptr_t a_miscelm = (uintptr_t)a; 64 size_t a_size; 65 size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK; 66 index_t a_index, b_index; 67 68 if (a_miscelm & CHUNK_MAP_KEY) { 69 a_size = a_miscelm & ~PAGE_MASK; 70 assert(a_size == s2u(a_size)); 71 } else 72 a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK; 73 74 /* 75 * Compute the index of the largest size class that the run can satisfy 76 * a request for. 77 */ 78 a_index = size2index(a_size + 1) - 1; 79 b_index = size2index(b_size + 1) - 1; 80 81 /* 82 * Compare based on size class index rather than size, in order to 83 * sort equally useful runs only by address. 84 */ 85 ret = (a_index > b_index) - (a_index < b_index); 86 if (ret == 0) { 87 if (!(a_miscelm & CHUNK_MAP_KEY)) { 88 uintptr_t b_miscelm = (uintptr_t)b; 89 90 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); 91 } else { 92 /* 93 * Treat keys as if they are lower than anything else. 94 */ 95 ret = -1; 96 } 97 } 98 99 return (ret); 100} 101 102/* Generate red-black tree functions. */ 103rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, 104 arena_chunk_map_misc_t, rb_link, arena_avail_comp) 105 106static void 107arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 108 size_t npages) 109{ 110 111 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 112 LG_PAGE)); 113 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, 114 pageind)); 115} 116 117static void 118arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 119 size_t npages) 120{ 121 122 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 123 LG_PAGE)); 124 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, 125 pageind)); 126} 127 128static void 129arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 130 size_t npages) 131{ 132 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 133 134 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 135 LG_PAGE)); 136 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 137 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 138 CHUNK_MAP_DIRTY); 139 140 qr_new(&miscelm->rd, rd_link); 141 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); 142 arena->ndirty += npages; 143} 144 145static void 146arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 147 size_t npages) 148{ 149 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 150 151 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 152 LG_PAGE)); 153 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 154 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 155 CHUNK_MAP_DIRTY); 156 157 qr_remove(&miscelm->rd, rd_link); 158 assert(arena->ndirty >= npages); 159 arena->ndirty -= npages; 160} 161 162static size_t 163arena_chunk_dirty_npages(const extent_node_t *node) 164{ 165 166 return (extent_node_size_get(node) >> LG_PAGE); 167} 168 169void 170arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) 171{ 172 173 if (cache) { 174 extent_node_dirty_linkage_init(node); 175 extent_node_dirty_insert(node, &arena->runs_dirty, 176 &arena->chunks_cache); 177 arena->ndirty += arena_chunk_dirty_npages(node); 178 } 179} 180 181void 182arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) 183{ 184 185 if (dirty) { 186 extent_node_dirty_remove(node); 187 assert(arena->ndirty >= arena_chunk_dirty_npages(node)); 188 arena->ndirty -= arena_chunk_dirty_npages(node); 189 } 190} 191 192JEMALLOC_INLINE_C void * 193arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 194{ 195 void *ret; 196 unsigned regind; 197 arena_chunk_map_misc_t *miscelm; 198 void *rpages; 199 200 assert(run->nfree > 0); 201 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 202 203 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 204 miscelm = arena_run_to_miscelm(run); 205 rpages = arena_miscelm_to_rpages(miscelm); 206 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 207 (uintptr_t)(bin_info->reg_interval * regind)); 208 run->nfree--; 209 return (ret); 210} 211 212JEMALLOC_INLINE_C void 213arena_run_reg_dalloc(arena_run_t *run, void *ptr) 214{ 215 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 216 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 217 size_t mapbits = arena_mapbits_get(chunk, pageind); 218 index_t binind = arena_ptr_small_binind_get(ptr, mapbits); 219 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 220 unsigned regind = arena_run_regind(run, bin_info, ptr); 221 222 assert(run->nfree < bin_info->nregs); 223 /* Freeing an interior pointer can cause assertion failure. */ 224 assert(((uintptr_t)ptr - 225 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 226 (uintptr_t)bin_info->reg0_offset)) % 227 (uintptr_t)bin_info->reg_interval == 0); 228 assert((uintptr_t)ptr >= 229 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 230 (uintptr_t)bin_info->reg0_offset); 231 /* Freeing an unallocated pointer can cause assertion failure. */ 232 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 233 234 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 235 run->nfree++; 236} 237 238JEMALLOC_INLINE_C void 239arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 240{ 241 242 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 243 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 244 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 245 (npages << LG_PAGE)); 246} 247 248JEMALLOC_INLINE_C void 249arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 250{ 251 252 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 253 << LG_PAGE)), PAGE); 254} 255 256JEMALLOC_INLINE_C void 257arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 258{ 259 size_t i; 260 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 261 262 arena_run_page_mark_zeroed(chunk, run_ind); 263 for (i = 0; i < PAGE / sizeof(size_t); i++) 264 assert(p[i] == 0); 265} 266 267static void 268arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) 269{ 270 271 if (config_stats) { 272 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages 273 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 274 LG_PAGE); 275 if (cactive_diff != 0) 276 stats_cactive_add(cactive_diff); 277 } 278} 279 280static void 281arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 282 size_t flag_dirty, size_t need_pages) 283{ 284 size_t total_pages, rem_pages; 285 286 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 287 LG_PAGE; 288 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 289 flag_dirty); 290 assert(need_pages <= total_pages); 291 rem_pages = total_pages - need_pages; 292 293 arena_avail_remove(arena, chunk, run_ind, total_pages); 294 if (flag_dirty != 0) 295 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); 296 arena_cactive_update(arena, need_pages, 0); 297 arena->nactive += need_pages; 298 299 /* Keep track of trailing unused pages for later use. */ 300 if (rem_pages > 0) { 301 if (flag_dirty != 0) { 302 arena_mapbits_unallocated_set(chunk, 303 run_ind+need_pages, (rem_pages << LG_PAGE), 304 flag_dirty); 305 arena_mapbits_unallocated_set(chunk, 306 run_ind+total_pages-1, (rem_pages << LG_PAGE), 307 flag_dirty); 308 arena_run_dirty_insert(arena, chunk, run_ind+need_pages, 309 rem_pages); 310 } else { 311 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 312 (rem_pages << LG_PAGE), 313 arena_mapbits_unzeroed_get(chunk, 314 run_ind+need_pages)); 315 arena_mapbits_unallocated_set(chunk, 316 run_ind+total_pages-1, (rem_pages << LG_PAGE), 317 arena_mapbits_unzeroed_get(chunk, 318 run_ind+total_pages-1)); 319 } 320 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 321 } 322} 323 324static void 325arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 326 bool remove, bool zero) 327{ 328 arena_chunk_t *chunk; 329 arena_chunk_map_misc_t *miscelm; 330 size_t flag_dirty, run_ind, need_pages, i; 331 332 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 333 miscelm = arena_run_to_miscelm(run); 334 run_ind = arena_miscelm_to_pageind(miscelm); 335 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 336 need_pages = (size >> LG_PAGE); 337 assert(need_pages > 0); 338 339 if (remove) { 340 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 341 need_pages); 342 } 343 344 if (zero) { 345 if (flag_dirty == 0) { 346 /* 347 * The run is clean, so some pages may be zeroed (i.e. 348 * never before touched). 349 */ 350 for (i = 0; i < need_pages; i++) { 351 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 352 != 0) 353 arena_run_zero(chunk, run_ind+i, 1); 354 else if (config_debug) { 355 arena_run_page_validate_zeroed(chunk, 356 run_ind+i); 357 } else { 358 arena_run_page_mark_zeroed(chunk, 359 run_ind+i); 360 } 361 } 362 } else { 363 /* The run is dirty, so all pages must be zeroed. */ 364 arena_run_zero(chunk, run_ind, need_pages); 365 } 366 } else { 367 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 368 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 369 } 370 371 /* 372 * Set the last element first, in case the run only contains one page 373 * (i.e. both statements set the same element). 374 */ 375 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); 376 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); 377} 378 379static void 380arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 381{ 382 383 arena_run_split_large_helper(arena, run, size, true, zero); 384} 385 386static void 387arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 388{ 389 390 arena_run_split_large_helper(arena, run, size, false, zero); 391} 392 393static void 394arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 395 index_t binind) 396{ 397 arena_chunk_t *chunk; 398 arena_chunk_map_misc_t *miscelm; 399 size_t flag_dirty, run_ind, need_pages, i; 400 401 assert(binind != BININD_INVALID); 402 403 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 404 miscelm = arena_run_to_miscelm(run); 405 run_ind = arena_miscelm_to_pageind(miscelm); 406 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 407 need_pages = (size >> LG_PAGE); 408 assert(need_pages > 0); 409 410 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); 411 412 for (i = 0; i < need_pages; i++) { 413 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); 414 if (config_debug && flag_dirty == 0 && 415 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) 416 arena_run_page_validate_zeroed(chunk, run_ind+i); 417 } 418 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 419 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 420} 421 422static arena_chunk_t * 423arena_chunk_init_spare(arena_t *arena) 424{ 425 arena_chunk_t *chunk; 426 427 assert(arena->spare != NULL); 428 429 chunk = arena->spare; 430 arena->spare = NULL; 431 432 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 433 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 434 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 435 arena_maxrun); 436 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 437 arena_maxrun); 438 assert(arena_mapbits_dirty_get(chunk, map_bias) == 439 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 440 441 return (chunk); 442} 443 444static bool 445arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) 446{ 447 448 extent_node_init(&chunk->node, arena, chunk, chunksize, zero); 449 extent_node_achunk_set(&chunk->node, true); 450 return (chunk_register(chunk, &chunk->node)); 451} 452 453static arena_chunk_t * 454arena_chunk_alloc_internal_hard(arena_t *arena, bool *zero) 455{ 456 arena_chunk_t *chunk; 457 chunk_alloc_t *chunk_alloc = arena->chunk_alloc; 458 chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc; 459 460 malloc_mutex_unlock(&arena->lock); 461 chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_alloc, NULL, 462 chunksize, chunksize, zero); 463 if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { 464 chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)chunk, 465 chunksize); 466 chunk = NULL; 467 } 468 malloc_mutex_lock(&arena->lock); 469 470 return (chunk); 471} 472 473static arena_chunk_t * 474arena_chunk_alloc_internal(arena_t *arena, bool *zero) 475{ 476 arena_chunk_t *chunk; 477 478 if (likely(arena->chunk_alloc == chunk_alloc_default)) { 479 chunk = chunk_alloc_cache(arena, NULL, chunksize, chunksize, 480 zero, true); 481 if (chunk != NULL && arena_chunk_register(arena, chunk, 482 *zero)) { 483 chunk_dalloc_cache(arena, chunk, chunksize); 484 return (NULL); 485 } 486 } else 487 chunk = NULL; 488 if (chunk == NULL) 489 chunk = arena_chunk_alloc_internal_hard(arena, zero); 490 491 if (config_stats && chunk != NULL) { 492 arena->stats.mapped += chunksize; 493 arena->stats.metadata_mapped += (map_bias << LG_PAGE); 494 } 495 496 return (chunk); 497} 498 499static arena_chunk_t * 500arena_chunk_init_hard(arena_t *arena) 501{ 502 arena_chunk_t *chunk; 503 bool zero; 504 size_t unzeroed, i; 505 506 assert(arena->spare == NULL); 507 508 zero = false; 509 chunk = arena_chunk_alloc_internal(arena, &zero); 510 if (chunk == NULL) 511 return (NULL); 512 513 /* 514 * Initialize the map to contain one maximal free untouched run. Mark 515 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. 516 */ 517 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; 518 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, unzeroed); 519 /* 520 * There is no need to initialize the internal page map entries unless 521 * the chunk is not zeroed. 522 */ 523 if (!zero) { 524 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 525 (void *)arena_bitselm_get(chunk, map_bias+1), 526 (size_t)((uintptr_t) arena_bitselm_get(chunk, 527 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, 528 map_bias+1))); 529 for (i = map_bias+1; i < chunk_npages-1; i++) 530 arena_mapbits_unzeroed_set(chunk, i, unzeroed); 531 } else { 532 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 533 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) 534 arena_bitselm_get(chunk, chunk_npages-1) - 535 (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); 536 if (config_debug) { 537 for (i = map_bias+1; i < chunk_npages-1; i++) { 538 assert(arena_mapbits_unzeroed_get(chunk, i) == 539 unzeroed); 540 } 541 } 542 } 543 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 544 unzeroed); 545 546 return (chunk); 547} 548 549static arena_chunk_t * 550arena_chunk_alloc(arena_t *arena) 551{ 552 arena_chunk_t *chunk; 553 554 if (arena->spare != NULL) 555 chunk = arena_chunk_init_spare(arena); 556 else { 557 chunk = arena_chunk_init_hard(arena); 558 if (chunk == NULL) 559 return (NULL); 560 } 561 562 /* Insert the run into the runs_avail tree. */ 563 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 564 565 return (chunk); 566} 567 568static void 569arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 570{ 571 572 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 573 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 574 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 575 arena_maxrun); 576 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 577 arena_maxrun); 578 assert(arena_mapbits_dirty_get(chunk, map_bias) == 579 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 580 581 /* 582 * Remove run from the runs_avail tree, so that the arena does not use 583 * it. 584 */ 585 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 586 587 if (arena->spare != NULL) { 588 arena_chunk_t *spare = arena->spare; 589 chunk_dalloc_t *chunk_dalloc; 590 591 arena->spare = chunk; 592 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 593 arena_run_dirty_remove(arena, spare, map_bias, 594 chunk_npages-map_bias); 595 } 596 597 chunk_deregister(spare, &spare->node); 598 599 chunk_dalloc = arena->chunk_dalloc; 600 if (likely(chunk_dalloc == chunk_dalloc_default)) 601 chunk_dalloc_cache(arena, (void *)spare, chunksize); 602 else { 603 malloc_mutex_unlock(&arena->lock); 604 chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)spare, 605 chunksize); 606 malloc_mutex_lock(&arena->lock); 607 } 608 609 if (config_stats) { 610 arena->stats.mapped -= chunksize; 611 arena->stats.metadata_mapped -= (map_bias << LG_PAGE); 612 } 613 } else 614 arena->spare = chunk; 615} 616 617static void 618arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 619{ 620 index_t index = size2index(usize) - nlclasses - NBINS; 621 622 cassert(config_stats); 623 624 arena->stats.nmalloc_huge++; 625 arena->stats.allocated_huge += usize; 626 arena->stats.hstats[index].nmalloc++; 627 arena->stats.hstats[index].curhchunks++; 628} 629 630static void 631arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 632{ 633 index_t index = size2index(usize) - nlclasses - NBINS; 634 635 cassert(config_stats); 636 637 arena->stats.nmalloc_huge--; 638 arena->stats.allocated_huge -= usize; 639 arena->stats.hstats[index].nmalloc--; 640 arena->stats.hstats[index].curhchunks--; 641} 642 643static void 644arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 645{ 646 index_t index = size2index(usize) - nlclasses - NBINS; 647 648 cassert(config_stats); 649 650 arena->stats.ndalloc_huge++; 651 arena->stats.allocated_huge -= usize; 652 arena->stats.hstats[index].ndalloc++; 653 arena->stats.hstats[index].curhchunks--; 654} 655 656static void 657arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 658{ 659 index_t index = size2index(usize) - nlclasses - NBINS; 660 661 cassert(config_stats); 662 663 arena->stats.ndalloc_huge--; 664 arena->stats.allocated_huge += usize; 665 arena->stats.hstats[index].ndalloc--; 666 arena->stats.hstats[index].curhchunks++; 667} 668 669static void 670arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 671{ 672 673 arena_huge_dalloc_stats_update(arena, oldsize); 674 arena_huge_malloc_stats_update(arena, usize); 675} 676 677static void 678arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 679 size_t usize) 680{ 681 682 arena_huge_dalloc_stats_update_undo(arena, oldsize); 683 arena_huge_malloc_stats_update_undo(arena, usize); 684} 685 686extent_node_t * 687arena_node_alloc(arena_t *arena) 688{ 689 extent_node_t *node; 690 691 malloc_mutex_lock(&arena->node_cache_mtx); 692 node = ql_last(&arena->node_cache, ql_link); 693 if (node == NULL) { 694 malloc_mutex_unlock(&arena->node_cache_mtx); 695 return (base_alloc(sizeof(extent_node_t))); 696 } 697 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); 698 malloc_mutex_unlock(&arena->node_cache_mtx); 699 return (node); 700} 701 702void 703arena_node_dalloc(arena_t *arena, extent_node_t *node) 704{ 705 706 malloc_mutex_lock(&arena->node_cache_mtx); 707 ql_elm_new(node, ql_link); 708 ql_tail_insert(&arena->node_cache, node, ql_link); 709 malloc_mutex_unlock(&arena->node_cache_mtx); 710} 711 712static void * 713arena_chunk_alloc_huge_hard(arena_t *arena, chunk_alloc_t *chunk_alloc, 714 size_t usize, size_t alignment, bool *zero, size_t csize) 715{ 716 void *ret; 717 718 ret = chunk_alloc_wrapper(arena, chunk_alloc, NULL, csize, alignment, 719 zero); 720 if (ret == NULL) { 721 /* Revert optimistic stats updates. */ 722 malloc_mutex_lock(&arena->lock); 723 if (config_stats) { 724 arena_huge_malloc_stats_update_undo(arena, usize); 725 arena->stats.mapped -= usize; 726 } 727 arena->nactive -= (usize >> LG_PAGE); 728 malloc_mutex_unlock(&arena->lock); 729 } 730 731 return (ret); 732} 733 734void * 735arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, 736 bool *zero) 737{ 738 void *ret; 739 chunk_alloc_t *chunk_alloc; 740 size_t csize = CHUNK_CEILING(usize); 741 742 malloc_mutex_lock(&arena->lock); 743 744 /* Optimistically update stats. */ 745 if (config_stats) { 746 arena_huge_malloc_stats_update(arena, usize); 747 arena->stats.mapped += usize; 748 } 749 arena->nactive += (usize >> LG_PAGE); 750 751 chunk_alloc = arena->chunk_alloc; 752 if (likely(chunk_alloc == chunk_alloc_default)) { 753 ret = chunk_alloc_cache(arena, NULL, csize, alignment, zero, 754 true); 755 } else 756 ret = NULL; 757 malloc_mutex_unlock(&arena->lock); 758 if (ret == NULL) { 759 ret = arena_chunk_alloc_huge_hard(arena, chunk_alloc, usize, 760 alignment, zero, csize); 761 } 762 763 if (config_stats && ret != NULL) 764 stats_cactive_add(usize); 765 return (ret); 766} 767 768void 769arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) 770{ 771 chunk_dalloc_t *chunk_dalloc; 772 size_t csize; 773 774 csize = CHUNK_CEILING(usize); 775 malloc_mutex_lock(&arena->lock); 776 chunk_dalloc = arena->chunk_dalloc; 777 if (config_stats) { 778 arena_huge_dalloc_stats_update(arena, usize); 779 arena->stats.mapped -= usize; 780 stats_cactive_sub(usize); 781 } 782 arena->nactive -= (usize >> LG_PAGE); 783 784 if (likely(chunk_dalloc == chunk_dalloc_default)) { 785 chunk_dalloc_cache(arena, chunk, csize); 786 malloc_mutex_unlock(&arena->lock); 787 } else { 788 malloc_mutex_unlock(&arena->lock); 789 chunk_dalloc_wrapper(arena, chunk_dalloc, chunk, csize); 790 } 791} 792 793void 794arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, 795 size_t usize) 796{ 797 798 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 799 assert(oldsize != usize); 800 801 malloc_mutex_lock(&arena->lock); 802 if (config_stats) 803 arena_huge_ralloc_stats_update(arena, oldsize, usize); 804 if (oldsize < usize) { 805 size_t udiff = usize - oldsize; 806 arena->nactive += udiff >> LG_PAGE; 807 if (config_stats) 808 stats_cactive_add(udiff); 809 } else { 810 size_t udiff = oldsize - usize; 811 arena->nactive -= udiff >> LG_PAGE; 812 if (config_stats) 813 stats_cactive_sub(udiff); 814 } 815 malloc_mutex_unlock(&arena->lock); 816} 817 818void 819arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, 820 size_t usize) 821{ 822 size_t udiff = oldsize - usize; 823 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 824 825 malloc_mutex_lock(&arena->lock); 826 if (config_stats) { 827 arena_huge_ralloc_stats_update(arena, oldsize, usize); 828 if (cdiff != 0) { 829 arena->stats.mapped -= cdiff; 830 stats_cactive_sub(udiff); 831 } 832 } 833 arena->nactive -= udiff >> LG_PAGE; 834 835 if (cdiff != 0) { 836 chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc; 837 void *nchunk = (void *)((uintptr_t)chunk + 838 CHUNK_CEILING(usize)); 839 840 if (likely(chunk_dalloc == chunk_dalloc_default)) { 841 chunk_dalloc_cache(arena, nchunk, cdiff); 842 malloc_mutex_unlock(&arena->lock); 843 } else { 844 malloc_mutex_unlock(&arena->lock); 845 chunk_dalloc_wrapper(arena, chunk_dalloc, nchunk, 846 cdiff); 847 } 848 } else 849 malloc_mutex_unlock(&arena->lock); 850} 851 852bool 853arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_alloc_t *chunk_alloc, 854 size_t oldsize, size_t usize, bool *zero, void *nchunk, size_t udiff, 855 size_t cdiff) 856{ 857 bool err; 858 859 err = (chunk_alloc_wrapper(arena, chunk_alloc, nchunk, cdiff, chunksize, 860 zero) == NULL); 861 if (err) { 862 /* Revert optimistic stats updates. */ 863 malloc_mutex_lock(&arena->lock); 864 if (config_stats) { 865 arena_huge_ralloc_stats_update_undo(arena, oldsize, 866 usize); 867 arena->stats.mapped -= cdiff; 868 } 869 arena->nactive -= (udiff >> LG_PAGE); 870 malloc_mutex_unlock(&arena->lock); 871 } 872 return (err); 873} 874 875bool 876arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, 877 size_t usize, bool *zero) 878{ 879 bool err; 880 chunk_alloc_t *chunk_alloc; 881 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); 882 size_t udiff = usize - oldsize; 883 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 884 885 malloc_mutex_lock(&arena->lock); 886 887 /* Optimistically update stats. */ 888 if (config_stats) { 889 arena_huge_ralloc_stats_update(arena, oldsize, usize); 890 arena->stats.mapped += cdiff; 891 } 892 arena->nactive += (udiff >> LG_PAGE); 893 894 chunk_alloc = arena->chunk_alloc; 895 if (likely(chunk_alloc == chunk_alloc_default)) { 896 err = (chunk_alloc_cache(arena, nchunk, cdiff, chunksize, zero, 897 true) == NULL); 898 } else 899 err = true; 900 malloc_mutex_unlock(&arena->lock); 901 if (err) { 902 err = arena_chunk_ralloc_huge_expand_hard(arena, chunk_alloc, 903 oldsize, usize, zero, nchunk, udiff, cdiff); 904 } 905 906 if (config_stats && !err) 907 stats_cactive_add(udiff); 908 return (err); 909} 910 911/* Do first-fit run selection. */ 912static arena_run_t * 913arena_run_first_fit(arena_t *arena, size_t size) 914{ 915 arena_run_t *run; 916 index_t index, max_index; 917 918 assert(size == s2u(size)); 919 assert(size == PAGE_CEILING(size)); 920 921 /* 922 * Iterate over all size classes that are at least large enough to 923 * satisfy the request, search for the lowest run of each size class, 924 * and choose the lowest of the runs found. 925 */ 926 run = NULL; 927 for (index = size2index(size), max_index = size2index(arena_maxclass); 928 index <= max_index;) { 929 arena_run_t *currun; 930 arena_chunk_t *currun_chunk; 931 size_t currun_pageind, currun_size; 932 size_t usize = PAGE_CEILING(index2size(index)); 933 arena_chunk_map_misc_t *key = (arena_chunk_map_misc_t *)(usize | 934 CHUNK_MAP_KEY); 935 arena_chunk_map_misc_t *miscelm = 936 arena_avail_tree_nsearch(&arena->runs_avail, key); 937 if (miscelm == NULL) 938 break; 939 currun = &miscelm->run; 940 if (run == NULL || (uintptr_t)currun < (uintptr_t)run) 941 run = currun; 942 currun_chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(currun); 943 currun_pageind = arena_miscelm_to_pageind(miscelm); 944 currun_size = arena_mapbits_unallocated_size_get(currun_chunk, 945 currun_pageind); 946 assert(size2index(currun_size) + 1 > index); 947 index = size2index(currun_size) + 1; 948 } 949 950 return (run); 951} 952 953static arena_run_t * 954arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 955{ 956 arena_run_t *run = arena_run_first_fit(arena, s2u(size)); 957 if (run != NULL) 958 arena_run_split_large(arena, run, size, zero); 959 return (run); 960} 961 962static arena_run_t * 963arena_run_alloc_large(arena_t *arena, size_t size, bool zero) 964{ 965 arena_chunk_t *chunk; 966 arena_run_t *run; 967 968 assert(size <= arena_maxrun); 969 assert((size & PAGE_MASK) == 0); 970 971 /* Search the arena's chunks for the lowest best fit. */ 972 run = arena_run_alloc_large_helper(arena, size, zero); 973 if (run != NULL) 974 return (run); 975 976 /* 977 * No usable runs. Create a new chunk from which to allocate the run. 978 */ 979 chunk = arena_chunk_alloc(arena); 980 if (chunk != NULL) { 981 run = &arena_miscelm_get(chunk, map_bias)->run; 982 arena_run_split_large(arena, run, size, zero); 983 return (run); 984 } 985 986 /* 987 * arena_chunk_alloc() failed, but another thread may have made 988 * sufficient memory available while this one dropped arena->lock in 989 * arena_chunk_alloc(), so search one more time. 990 */ 991 return (arena_run_alloc_large_helper(arena, size, zero)); 992} 993 994static arena_run_t * 995arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind) 996{ 997 arena_run_t *run = arena_run_first_fit(arena, PAGE_CEILING(size)); 998 if (run != NULL) 999 arena_run_split_small(arena, run, size, binind); 1000 return (run); 1001} 1002 1003static arena_run_t * 1004arena_run_alloc_small(arena_t *arena, size_t size, index_t binind) 1005{ 1006 arena_chunk_t *chunk; 1007 arena_run_t *run; 1008 1009 assert(size <= arena_maxrun); 1010 assert((size & PAGE_MASK) == 0); 1011 assert(binind != BININD_INVALID); 1012 1013 /* Search the arena's chunks for the lowest best fit. */ 1014 run = arena_run_alloc_small_helper(arena, size, binind); 1015 if (run != NULL) 1016 return (run); 1017 1018 /* 1019 * No usable runs. Create a new chunk from which to allocate the run. 1020 */ 1021 chunk = arena_chunk_alloc(arena); 1022 if (chunk != NULL) { 1023 run = &arena_miscelm_get(chunk, map_bias)->run; 1024 arena_run_split_small(arena, run, size, binind); 1025 return (run); 1026 } 1027 1028 /* 1029 * arena_chunk_alloc() failed, but another thread may have made 1030 * sufficient memory available while this one dropped arena->lock in 1031 * arena_chunk_alloc(), so search one more time. 1032 */ 1033 return (arena_run_alloc_small_helper(arena, size, binind)); 1034} 1035 1036static bool 1037arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) 1038{ 1039 1040 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) 1041 << 3)); 1042} 1043 1044ssize_t 1045arena_lg_dirty_mult_get(arena_t *arena) 1046{ 1047 ssize_t lg_dirty_mult; 1048 1049 malloc_mutex_lock(&arena->lock); 1050 lg_dirty_mult = arena->lg_dirty_mult; 1051 malloc_mutex_unlock(&arena->lock); 1052 1053 return (lg_dirty_mult); 1054} 1055 1056bool 1057arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) 1058{ 1059 1060 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 1061 return (true); 1062 1063 malloc_mutex_lock(&arena->lock); 1064 arena->lg_dirty_mult = lg_dirty_mult; 1065 arena_maybe_purge(arena); 1066 malloc_mutex_unlock(&arena->lock); 1067 1068 return (false); 1069} 1070 1071void 1072arena_maybe_purge(arena_t *arena) 1073{ 1074 size_t threshold; 1075 1076 /* Don't purge if the option is disabled. */ 1077 if (arena->lg_dirty_mult < 0) 1078 return; 1079 threshold = (arena->nactive >> arena->lg_dirty_mult); 1080 threshold = threshold < chunk_npages ? chunk_npages : threshold; 1081 /* 1082 * Don't purge unless the number of purgeable pages exceeds the 1083 * threshold. 1084 */ 1085 if (arena->ndirty <= threshold) 1086 return; 1087 1088 arena_purge(arena, false); 1089} 1090 1091static size_t 1092arena_dirty_count(arena_t *arena) 1093{ 1094 size_t ndirty = 0; 1095 arena_runs_dirty_link_t *rdelm; 1096 extent_node_t *chunkselm; 1097 1098 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1099 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1100 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { 1101 size_t npages; 1102 1103 if (rdelm == &chunkselm->rd) { 1104 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1105 chunkselm = qr_next(chunkselm, cc_link); 1106 } else { 1107 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 1108 rdelm); 1109 arena_chunk_map_misc_t *miscelm = 1110 arena_rd_to_miscelm(rdelm); 1111 size_t pageind = arena_miscelm_to_pageind(miscelm); 1112 assert(arena_mapbits_allocated_get(chunk, pageind) == 1113 0); 1114 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1115 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 1116 npages = arena_mapbits_unallocated_size_get(chunk, 1117 pageind) >> LG_PAGE; 1118 } 1119 ndirty += npages; 1120 } 1121 1122 return (ndirty); 1123} 1124 1125static size_t 1126arena_compute_npurge(arena_t *arena, bool all) 1127{ 1128 size_t npurge; 1129 1130 /* 1131 * Compute the minimum number of pages that this thread should try to 1132 * purge. 1133 */ 1134 if (!all) { 1135 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1136 threshold = threshold < chunk_npages ? chunk_npages : threshold; 1137 1138 npurge = arena->ndirty - threshold; 1139 } else 1140 npurge = arena->ndirty; 1141 1142 return (npurge); 1143} 1144 1145static size_t 1146arena_stash_dirty(arena_t *arena, bool all, size_t npurge, 1147 arena_runs_dirty_link_t *purge_runs_sentinel, 1148 extent_node_t *purge_chunks_sentinel) 1149{ 1150 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1151 extent_node_t *chunkselm; 1152 size_t nstashed = 0; 1153 1154 /* Stash at least npurge pages. */ 1155 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1156 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1157 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1158 size_t npages; 1159 rdelm_next = qr_next(rdelm, rd_link); 1160 1161 if (rdelm == &chunkselm->rd) { 1162 extent_node_t *chunkselm_next; 1163 bool zero; 1164 UNUSED void *chunk; 1165 1166 chunkselm_next = qr_next(chunkselm, cc_link); 1167 /* 1168 * Allocate. chunkselm remains valid due to the 1169 * dalloc_node=false argument to chunk_alloc_cache(). 1170 */ 1171 zero = false; 1172 chunk = chunk_alloc_cache(arena, 1173 extent_node_addr_get(chunkselm), 1174 extent_node_size_get(chunkselm), chunksize, &zero, 1175 false); 1176 assert(chunk == extent_node_addr_get(chunkselm)); 1177 assert(zero == extent_node_zeroed_get(chunkselm)); 1178 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1179 purge_chunks_sentinel); 1180 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1181 chunkselm = chunkselm_next; 1182 } else { 1183 arena_chunk_t *chunk = 1184 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1185 arena_chunk_map_misc_t *miscelm = 1186 arena_rd_to_miscelm(rdelm); 1187 size_t pageind = arena_miscelm_to_pageind(miscelm); 1188 arena_run_t *run = &miscelm->run; 1189 size_t run_size = 1190 arena_mapbits_unallocated_size_get(chunk, pageind); 1191 1192 npages = run_size >> LG_PAGE; 1193 1194 assert(pageind + npages <= chunk_npages); 1195 assert(arena_mapbits_dirty_get(chunk, pageind) == 1196 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1197 1198 /* 1199 * If purging the spare chunk's run, make it available 1200 * prior to allocation. 1201 */ 1202 if (chunk == arena->spare) 1203 arena_chunk_alloc(arena); 1204 1205 /* Temporarily allocate the free dirty run. */ 1206 arena_run_split_large(arena, run, run_size, false); 1207 /* Stash. */ 1208 if (false) 1209 qr_new(rdelm, rd_link); /* Redundant. */ 1210 else { 1211 assert(qr_next(rdelm, rd_link) == rdelm); 1212 assert(qr_prev(rdelm, rd_link) == rdelm); 1213 } 1214 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1215 } 1216 1217 nstashed += npages; 1218 if (!all && nstashed >= npurge) 1219 break; 1220 } 1221 1222 return (nstashed); 1223} 1224 1225static size_t 1226arena_purge_stashed(arena_t *arena, 1227 arena_runs_dirty_link_t *purge_runs_sentinel, 1228 extent_node_t *purge_chunks_sentinel) 1229{ 1230 size_t npurged, nmadvise; 1231 chunk_purge_t *chunk_purge; 1232 arena_runs_dirty_link_t *rdelm; 1233 extent_node_t *chunkselm; 1234 1235 if (config_stats) 1236 nmadvise = 0; 1237 npurged = 0; 1238 1239 chunk_purge = arena->chunk_purge; 1240 malloc_mutex_unlock(&arena->lock); 1241 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1242 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1243 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { 1244 size_t npages; 1245 1246 if (rdelm == &chunkselm->rd) { 1247 size_t size = extent_node_size_get(chunkselm); 1248 void *addr, *chunk; 1249 size_t offset; 1250 bool unzeroed; 1251 1252 npages = size >> LG_PAGE; 1253 addr = extent_node_addr_get(chunkselm); 1254 chunk = CHUNK_ADDR2BASE(addr); 1255 offset = CHUNK_ADDR2OFFSET(addr); 1256 unzeroed = chunk_purge_wrapper(arena, chunk_purge, 1257 chunk, offset, size); 1258 extent_node_zeroed_set(chunkselm, !unzeroed); 1259 chunkselm = qr_next(chunkselm, cc_link); 1260 } else { 1261 size_t pageind, run_size, flag_unzeroed, i; 1262 bool unzeroed; 1263 arena_chunk_t *chunk = (arena_chunk_t 1264 *)CHUNK_ADDR2BASE(rdelm); 1265 arena_chunk_map_misc_t *miscelm = 1266 arena_rd_to_miscelm(rdelm); 1267 pageind = arena_miscelm_to_pageind(miscelm); 1268 run_size = arena_mapbits_large_size_get(chunk, pageind); 1269 npages = run_size >> LG_PAGE; 1270 1271 assert(pageind + npages <= chunk_npages); 1272 unzeroed = chunk_purge_wrapper(arena, chunk_purge, 1273 chunk, pageind << LG_PAGE, run_size); 1274 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; 1275 1276 /* 1277 * Set the unzeroed flag for all pages, now that 1278 * chunk_purge_wrapper() has returned whether the pages 1279 * were zeroed as a side effect of purging. This chunk 1280 * map modification is safe even though the arena mutex 1281 * isn't currently owned by this thread, because the run 1282 * is marked as allocated, thus protecting it from being 1283 * modified by any other thread. As long as these 1284 * writes don't perturb the first and last elements' 1285 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1286 */ 1287 for (i = 0; i < npages; i++) { 1288 arena_mapbits_unzeroed_set(chunk, pageind+i, 1289 flag_unzeroed); 1290 } 1291 } 1292 1293 npurged += npages; 1294 if (config_stats) 1295 nmadvise++; 1296 } 1297 malloc_mutex_lock(&arena->lock); 1298 1299 if (config_stats) { 1300 arena->stats.nmadvise += nmadvise; 1301 arena->stats.purged += npurged; 1302 } 1303 1304 return (npurged); 1305} 1306 1307static void 1308arena_unstash_purged(arena_t *arena, 1309 arena_runs_dirty_link_t *purge_runs_sentinel, 1310 extent_node_t *purge_chunks_sentinel) 1311{ 1312 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1313 extent_node_t *chunkselm; 1314 1315 /* Deallocate runs. */ 1316 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1317 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1318 rdelm != purge_runs_sentinel; rdelm = rdelm_next) { 1319 rdelm_next = qr_next(rdelm, rd_link); 1320 if (rdelm == &chunkselm->rd) { 1321 extent_node_t *chunkselm_next = qr_next(chunkselm, 1322 cc_link); 1323 void *addr = extent_node_addr_get(chunkselm); 1324 size_t size = extent_node_size_get(chunkselm); 1325 bool zeroed = extent_node_zeroed_get(chunkselm); 1326 extent_node_dirty_remove(chunkselm); 1327 arena_node_dalloc(arena, chunkselm); 1328 chunkselm = chunkselm_next; 1329 chunk_dalloc_arena(arena, addr, size, zeroed); 1330 } else { 1331 arena_chunk_map_misc_t *miscelm = 1332 arena_rd_to_miscelm(rdelm); 1333 arena_run_t *run = &miscelm->run; 1334 qr_remove(rdelm, rd_link); 1335 arena_run_dalloc(arena, run, false, true); 1336 } 1337 } 1338} 1339 1340static void 1341arena_purge(arena_t *arena, bool all) 1342{ 1343 size_t npurge, npurgeable, npurged; 1344 arena_runs_dirty_link_t purge_runs_sentinel; 1345 extent_node_t purge_chunks_sentinel; 1346 1347 /* 1348 * Calls to arena_dirty_count() are disabled even for debug builds 1349 * because overhead grows nonlinearly as memory usage increases. 1350 */ 1351 if (false && config_debug) { 1352 size_t ndirty = arena_dirty_count(arena); 1353 assert(ndirty == arena->ndirty); 1354 } 1355 assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); 1356 1357 if (config_stats) 1358 arena->stats.npurge++; 1359 1360 npurge = arena_compute_npurge(arena, all); 1361 qr_new(&purge_runs_sentinel, rd_link); 1362 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1363 1364 npurgeable = arena_stash_dirty(arena, all, npurge, &purge_runs_sentinel, 1365 &purge_chunks_sentinel); 1366 assert(npurgeable >= npurge); 1367 npurged = arena_purge_stashed(arena, &purge_runs_sentinel, 1368 &purge_chunks_sentinel); 1369 assert(npurged == npurgeable); 1370 arena_unstash_purged(arena, &purge_runs_sentinel, 1371 &purge_chunks_sentinel); 1372} 1373 1374void 1375arena_purge_all(arena_t *arena) 1376{ 1377 1378 malloc_mutex_lock(&arena->lock); 1379 arena_purge(arena, true); 1380 malloc_mutex_unlock(&arena->lock); 1381} 1382 1383static void 1384arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1385 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) 1386{ 1387 size_t size = *p_size; 1388 size_t run_ind = *p_run_ind; 1389 size_t run_pages = *p_run_pages; 1390 1391 /* Try to coalesce forward. */ 1392 if (run_ind + run_pages < chunk_npages && 1393 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1394 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { 1395 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1396 run_ind+run_pages); 1397 size_t nrun_pages = nrun_size >> LG_PAGE; 1398 1399 /* 1400 * Remove successor from runs_avail; the coalesced run is 1401 * inserted later. 1402 */ 1403 assert(arena_mapbits_unallocated_size_get(chunk, 1404 run_ind+run_pages+nrun_pages-1) == nrun_size); 1405 assert(arena_mapbits_dirty_get(chunk, 1406 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1407 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1408 1409 /* 1410 * If the successor is dirty, remove it from the set of dirty 1411 * pages. 1412 */ 1413 if (flag_dirty != 0) { 1414 arena_run_dirty_remove(arena, chunk, run_ind+run_pages, 1415 nrun_pages); 1416 } 1417 1418 size += nrun_size; 1419 run_pages += nrun_pages; 1420 1421 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1422 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1423 size); 1424 } 1425 1426 /* Try to coalesce backward. */ 1427 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1428 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1429 flag_dirty) { 1430 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1431 run_ind-1); 1432 size_t prun_pages = prun_size >> LG_PAGE; 1433 1434 run_ind -= prun_pages; 1435 1436 /* 1437 * Remove predecessor from runs_avail; the coalesced run is 1438 * inserted later. 1439 */ 1440 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1441 prun_size); 1442 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1443 arena_avail_remove(arena, chunk, run_ind, prun_pages); 1444 1445 /* 1446 * If the predecessor is dirty, remove it from the set of dirty 1447 * pages. 1448 */ 1449 if (flag_dirty != 0) { 1450 arena_run_dirty_remove(arena, chunk, run_ind, 1451 prun_pages); 1452 } 1453 1454 size += prun_size; 1455 run_pages += prun_pages; 1456 1457 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1458 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1459 size); 1460 } 1461 1462 *p_size = size; 1463 *p_run_ind = run_ind; 1464 *p_run_pages = run_pages; 1465} 1466 1467static void 1468arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) 1469{ 1470 arena_chunk_t *chunk; 1471 arena_chunk_map_misc_t *miscelm; 1472 size_t size, run_ind, run_pages, flag_dirty; 1473 1474 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1475 miscelm = arena_run_to_miscelm(run); 1476 run_ind = arena_miscelm_to_pageind(miscelm); 1477 assert(run_ind >= map_bias); 1478 assert(run_ind < chunk_npages); 1479 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1480 size = arena_mapbits_large_size_get(chunk, run_ind); 1481 assert(size == PAGE || 1482 arena_mapbits_large_size_get(chunk, 1483 run_ind+(size>>LG_PAGE)-1) == 0); 1484 } else { 1485 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1486 size = bin_info->run_size; 1487 } 1488 run_pages = (size >> LG_PAGE); 1489 arena_cactive_update(arena, 0, run_pages); 1490 arena->nactive -= run_pages; 1491 1492 /* 1493 * The run is dirty if the caller claims to have dirtied it, as well as 1494 * if it was already dirty before being allocated and the caller 1495 * doesn't claim to have cleaned it. 1496 */ 1497 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1498 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1499 if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0) 1500 dirty = true; 1501 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1502 1503 /* Mark pages as unallocated in the chunk map. */ 1504 if (dirty) { 1505 arena_mapbits_unallocated_set(chunk, run_ind, size, 1506 CHUNK_MAP_DIRTY); 1507 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1508 CHUNK_MAP_DIRTY); 1509 } else { 1510 arena_mapbits_unallocated_set(chunk, run_ind, size, 1511 arena_mapbits_unzeroed_get(chunk, run_ind)); 1512 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1513 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1514 } 1515 1516 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty); 1517 1518 /* Insert into runs_avail, now that coalescing is complete. */ 1519 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1520 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1521 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1522 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1523 arena_avail_insert(arena, chunk, run_ind, run_pages); 1524 1525 if (dirty) 1526 arena_run_dirty_insert(arena, chunk, run_ind, run_pages); 1527 1528 /* Deallocate chunk if it is now completely unused. */ 1529 if (size == arena_maxrun) { 1530 assert(run_ind == map_bias); 1531 assert(run_pages == (arena_maxrun >> LG_PAGE)); 1532 arena_chunk_dalloc(arena, chunk); 1533 } 1534 1535 /* 1536 * It is okay to do dirty page processing here even if the chunk was 1537 * deallocated above, since in that case it is the spare. Waiting 1538 * until after possible chunk deallocation to do dirty processing 1539 * allows for an old spare to be fully deallocated, thus decreasing the 1540 * chances of spuriously crossing the dirty page purging threshold. 1541 */ 1542 if (dirty) 1543 arena_maybe_purge(arena); 1544} 1545 1546static void 1547arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1548 size_t oldsize, size_t newsize) 1549{ 1550 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1551 size_t pageind = arena_miscelm_to_pageind(miscelm); 1552 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1553 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1554 1555 assert(oldsize > newsize); 1556 1557 /* 1558 * Update the chunk map so that arena_run_dalloc() can treat the 1559 * leading run as separately allocated. Set the last element of each 1560 * run first, in case of single-page runs. 1561 */ 1562 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1563 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1564 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); 1565 1566 if (config_debug) { 1567 UNUSED size_t tail_npages = newsize >> LG_PAGE; 1568 assert(arena_mapbits_large_size_get(chunk, 1569 pageind+head_npages+tail_npages-1) == 0); 1570 assert(arena_mapbits_dirty_get(chunk, 1571 pageind+head_npages+tail_npages-1) == flag_dirty); 1572 } 1573 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 1574 flag_dirty); 1575 1576 arena_run_dalloc(arena, run, false, false); 1577} 1578 1579static void 1580arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1581 size_t oldsize, size_t newsize, bool dirty) 1582{ 1583 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1584 size_t pageind = arena_miscelm_to_pageind(miscelm); 1585 size_t head_npages = newsize >> LG_PAGE; 1586 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1587 arena_chunk_map_misc_t *tail_miscelm; 1588 arena_run_t *tail_run; 1589 1590 assert(oldsize > newsize); 1591 1592 /* 1593 * Update the chunk map so that arena_run_dalloc() can treat the 1594 * trailing run as separately allocated. Set the last element of each 1595 * run first, in case of single-page runs. 1596 */ 1597 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1598 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1599 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); 1600 1601 if (config_debug) { 1602 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 1603 assert(arena_mapbits_large_size_get(chunk, 1604 pageind+head_npages+tail_npages-1) == 0); 1605 assert(arena_mapbits_dirty_get(chunk, 1606 pageind+head_npages+tail_npages-1) == flag_dirty); 1607 } 1608 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 1609 flag_dirty); 1610 1611 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); 1612 tail_run = &tail_miscelm->run; 1613 arena_run_dalloc(arena, tail_run, dirty, false); 1614} 1615 1616static arena_run_t * 1617arena_bin_runs_first(arena_bin_t *bin) 1618{ 1619 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); 1620 if (miscelm != NULL) 1621 return (&miscelm->run); 1622 1623 return (NULL); 1624} 1625 1626static void 1627arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 1628{ 1629 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1630 1631 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); 1632 1633 arena_run_tree_insert(&bin->runs, miscelm); 1634} 1635 1636static void 1637arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) 1638{ 1639 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1640 1641 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); 1642 1643 arena_run_tree_remove(&bin->runs, miscelm); 1644} 1645 1646static arena_run_t * 1647arena_bin_nonfull_run_tryget(arena_bin_t *bin) 1648{ 1649 arena_run_t *run = arena_bin_runs_first(bin); 1650 if (run != NULL) { 1651 arena_bin_runs_remove(bin, run); 1652 if (config_stats) 1653 bin->stats.reruns++; 1654 } 1655 return (run); 1656} 1657 1658static arena_run_t * 1659arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 1660{ 1661 arena_run_t *run; 1662 index_t binind; 1663 arena_bin_info_t *bin_info; 1664 1665 /* Look for a usable run. */ 1666 run = arena_bin_nonfull_run_tryget(bin); 1667 if (run != NULL) 1668 return (run); 1669 /* No existing runs have any space available. */ 1670 1671 binind = arena_bin_index(arena, bin); 1672 bin_info = &arena_bin_info[binind]; 1673 1674 /* Allocate a new run. */ 1675 malloc_mutex_unlock(&bin->lock); 1676 /******************************/ 1677 malloc_mutex_lock(&arena->lock); 1678 run = arena_run_alloc_small(arena, bin_info->run_size, binind); 1679 if (run != NULL) { 1680 /* Initialize run internals. */ 1681 run->binind = binind; 1682 run->nfree = bin_info->nregs; 1683 bitmap_init(run->bitmap, &bin_info->bitmap_info); 1684 } 1685 malloc_mutex_unlock(&arena->lock); 1686 /********************************/ 1687 malloc_mutex_lock(&bin->lock); 1688 if (run != NULL) { 1689 if (config_stats) { 1690 bin->stats.nruns++; 1691 bin->stats.curruns++; 1692 } 1693 return (run); 1694 } 1695 1696 /* 1697 * arena_run_alloc_small() failed, but another thread may have made 1698 * sufficient memory available while this one dropped bin->lock above, 1699 * so search one more time. 1700 */ 1701 run = arena_bin_nonfull_run_tryget(bin); 1702 if (run != NULL) 1703 return (run); 1704 1705 return (NULL); 1706} 1707 1708/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 1709static void * 1710arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 1711{ 1712 void *ret; 1713 index_t binind; 1714 arena_bin_info_t *bin_info; 1715 arena_run_t *run; 1716 1717 binind = arena_bin_index(arena, bin); 1718 bin_info = &arena_bin_info[binind]; 1719 bin->runcur = NULL; 1720 run = arena_bin_nonfull_run_get(arena, bin); 1721 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 1722 /* 1723 * Another thread updated runcur while this one ran without the 1724 * bin lock in arena_bin_nonfull_run_get(). 1725 */ 1726 assert(bin->runcur->nfree > 0); 1727 ret = arena_run_reg_alloc(bin->runcur, bin_info); 1728 if (run != NULL) { 1729 arena_chunk_t *chunk; 1730 1731 /* 1732 * arena_run_alloc_small() may have allocated run, or 1733 * it may have pulled run from the bin's run tree. 1734 * Therefore it is unsafe to make any assumptions about 1735 * how run has previously been used, and 1736 * arena_bin_lower_run() must be called, as if a region 1737 * were just deallocated from the run. 1738 */ 1739 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1740 if (run->nfree == bin_info->nregs) 1741 arena_dalloc_bin_run(arena, chunk, run, bin); 1742 else 1743 arena_bin_lower_run(arena, chunk, run, bin); 1744 } 1745 return (ret); 1746 } 1747 1748 if (run == NULL) 1749 return (NULL); 1750 1751 bin->runcur = run; 1752 1753 assert(bin->runcur->nfree > 0); 1754 1755 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1756} 1757 1758void 1759arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind, 1760 uint64_t prof_accumbytes) 1761{ 1762 unsigned i, nfill; 1763 arena_bin_t *bin; 1764 arena_run_t *run; 1765 void *ptr; 1766 1767 assert(tbin->ncached == 0); 1768 1769 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1770 prof_idump(); 1771 bin = &arena->bins[binind]; 1772 malloc_mutex_lock(&bin->lock); 1773 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1774 tbin->lg_fill_div); i < nfill; i++) { 1775 if ((run = bin->runcur) != NULL && run->nfree > 0) 1776 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1777 else 1778 ptr = arena_bin_malloc_hard(arena, bin); 1779 if (ptr == NULL) { 1780 /* 1781 * OOM. tbin->avail isn't yet filled down to its first 1782 * element, so the successful allocations (if any) must 1783 * be moved to the base of tbin->avail before bailing 1784 * out. 1785 */ 1786 if (i > 0) { 1787 memmove(tbin->avail, &tbin->avail[nfill - i], 1788 i * sizeof(void *)); 1789 } 1790 break; 1791 } 1792 if (config_fill && unlikely(opt_junk_alloc)) { 1793 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1794 true); 1795 } 1796 /* Insert such that low regions get used first. */ 1797 tbin->avail[nfill - 1 - i] = ptr; 1798 } 1799 if (config_stats) { 1800 bin->stats.nmalloc += i; 1801 bin->stats.nrequests += tbin->tstats.nrequests; 1802 bin->stats.curregs += i; 1803 bin->stats.nfills++; 1804 tbin->tstats.nrequests = 0; 1805 } 1806 malloc_mutex_unlock(&bin->lock); 1807 tbin->ncached = i; 1808} 1809 1810void 1811arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 1812{ 1813 1814 if (zero) { 1815 size_t redzone_size = bin_info->redzone_size; 1816 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, 1817 redzone_size); 1818 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, 1819 redzone_size); 1820 } else { 1821 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, 1822 bin_info->reg_interval); 1823 } 1824} 1825 1826#ifdef JEMALLOC_JET 1827#undef arena_redzone_corruption 1828#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) 1829#endif 1830static void 1831arena_redzone_corruption(void *ptr, size_t usize, bool after, 1832 size_t offset, uint8_t byte) 1833{ 1834 1835 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 1836 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 1837 after ? "after" : "before", ptr, usize, byte); 1838} 1839#ifdef JEMALLOC_JET 1840#undef arena_redzone_corruption 1841#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 1842arena_redzone_corruption_t *arena_redzone_corruption = 1843 JEMALLOC_N(arena_redzone_corruption_impl); 1844#endif 1845 1846static void 1847arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 1848{ 1849 size_t size = bin_info->reg_size; 1850 size_t redzone_size = bin_info->redzone_size; 1851 size_t i; 1852 bool error = false; 1853 1854 if (opt_junk_alloc) { 1855 for (i = 1; i <= redzone_size; i++) { 1856 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 1857 if (*byte != 0xa5) { 1858 error = true; 1859 arena_redzone_corruption(ptr, size, false, i, *byte); 1860 if (reset) 1861 *byte = 0xa5; 1862 } 1863 } 1864 for (i = 0; i < redzone_size; i++) { 1865 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 1866 if (*byte != 0xa5) { 1867 error = true; 1868 arena_redzone_corruption(ptr, size, true, i, *byte); 1869 if (reset) 1870 *byte = 0xa5; 1871 } 1872 } 1873 } 1874 1875 if (opt_abort && error) 1876 abort(); 1877} 1878 1879#ifdef JEMALLOC_JET 1880#undef arena_dalloc_junk_small 1881#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) 1882#endif 1883void 1884arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 1885{ 1886 size_t redzone_size = bin_info->redzone_size; 1887 1888 arena_redzones_validate(ptr, bin_info, false); 1889 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, 1890 bin_info->reg_interval); 1891} 1892#ifdef JEMALLOC_JET 1893#undef arena_dalloc_junk_small 1894#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 1895arena_dalloc_junk_small_t *arena_dalloc_junk_small = 1896 JEMALLOC_N(arena_dalloc_junk_small_impl); 1897#endif 1898 1899void 1900arena_quarantine_junk_small(void *ptr, size_t usize) 1901{ 1902 index_t binind; 1903 arena_bin_info_t *bin_info; 1904 cassert(config_fill); 1905 assert(opt_junk_free); 1906 assert(opt_quarantine); 1907 assert(usize <= SMALL_MAXCLASS); 1908 1909 binind = size2index(usize); 1910 bin_info = &arena_bin_info[binind]; 1911 arena_redzones_validate(ptr, bin_info, true); 1912} 1913 1914void * 1915arena_malloc_small(arena_t *arena, size_t size, bool zero) 1916{ 1917 void *ret; 1918 arena_bin_t *bin; 1919 arena_run_t *run; 1920 index_t binind; 1921 1922 binind = size2index(size); 1923 assert(binind < NBINS); 1924 bin = &arena->bins[binind]; 1925 size = index2size(binind); 1926 1927 malloc_mutex_lock(&bin->lock); 1928 if ((run = bin->runcur) != NULL && run->nfree > 0) 1929 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1930 else 1931 ret = arena_bin_malloc_hard(arena, bin); 1932 1933 if (ret == NULL) { 1934 malloc_mutex_unlock(&bin->lock); 1935 return (NULL); 1936 } 1937 1938 if (config_stats) { 1939 bin->stats.nmalloc++; 1940 bin->stats.nrequests++; 1941 bin->stats.curregs++; 1942 } 1943 malloc_mutex_unlock(&bin->lock); 1944 if (config_prof && !isthreaded && arena_prof_accum(arena, size)) 1945 prof_idump(); 1946 1947 if (!zero) { 1948 if (config_fill) { 1949 if (unlikely(opt_junk_alloc)) { 1950 arena_alloc_junk_small(ret, 1951 &arena_bin_info[binind], false); 1952 } else if (unlikely(opt_zero)) 1953 memset(ret, 0, size); 1954 } 1955 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1956 } else { 1957 if (config_fill && unlikely(opt_junk_alloc)) { 1958 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1959 true); 1960 } 1961 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1962 memset(ret, 0, size); 1963 } 1964 1965 return (ret); 1966} 1967 1968void * 1969arena_malloc_large(arena_t *arena, size_t size, bool zero) 1970{ 1971 void *ret; 1972 size_t usize; 1973 arena_run_t *run; 1974 arena_chunk_map_misc_t *miscelm; 1975 UNUSED bool idump; 1976 1977 /* Large allocation. */ 1978 usize = s2u(size); 1979 malloc_mutex_lock(&arena->lock); 1980 run = arena_run_alloc_large(arena, usize, zero); 1981 if (run == NULL) { 1982 malloc_mutex_unlock(&arena->lock); 1983 return (NULL); 1984 } 1985 miscelm = arena_run_to_miscelm(run); 1986 ret = arena_miscelm_to_rpages(miscelm); 1987 if (config_stats) { 1988 index_t index = size2index(usize) - NBINS; 1989 1990 arena->stats.nmalloc_large++; 1991 arena->stats.nrequests_large++; 1992 arena->stats.allocated_large += usize; 1993 arena->stats.lstats[index].nmalloc++; 1994 arena->stats.lstats[index].nrequests++; 1995 arena->stats.lstats[index].curruns++; 1996 } 1997 if (config_prof) 1998 idump = arena_prof_accum_locked(arena, usize); 1999 malloc_mutex_unlock(&arena->lock); 2000 if (config_prof && idump) 2001 prof_idump(); 2002 2003 if (!zero) { 2004 if (config_fill) { 2005 if (unlikely(opt_junk_alloc)) 2006 memset(ret, 0xa5, usize); 2007 else if (unlikely(opt_zero)) 2008 memset(ret, 0, usize); 2009 } 2010 } 2011 2012 return (ret); 2013} 2014 2015/* Only handles large allocations that require more than page alignment. */ 2016static void * 2017arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, 2018 bool zero) 2019{ 2020 void *ret; 2021 size_t alloc_size, leadsize, trailsize; 2022 arena_run_t *run; 2023 arena_chunk_t *chunk; 2024 arena_chunk_map_misc_t *miscelm; 2025 void *rpages; 2026 2027 assert((size & PAGE_MASK) == 0); 2028 2029 arena = arena_choose(tsd, arena); 2030 if (unlikely(arena == NULL)) 2031 return (NULL); 2032 2033 alignment = PAGE_CEILING(alignment); 2034 alloc_size = size + alignment - PAGE; 2035 2036 malloc_mutex_lock(&arena->lock); 2037 run = arena_run_alloc_large(arena, alloc_size, false); 2038 if (run == NULL) { 2039 malloc_mutex_unlock(&arena->lock); 2040 return (NULL); 2041 } 2042 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2043 miscelm = arena_run_to_miscelm(run); 2044 rpages = arena_miscelm_to_rpages(miscelm); 2045 2046 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 2047 (uintptr_t)rpages; 2048 assert(alloc_size >= leadsize + size); 2049 trailsize = alloc_size - leadsize - size; 2050 if (leadsize != 0) { 2051 arena_chunk_map_misc_t *head_miscelm = miscelm; 2052 arena_run_t *head_run = run; 2053 2054 miscelm = arena_miscelm_get(chunk, 2055 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 2056 LG_PAGE)); 2057 run = &miscelm->run; 2058 2059 arena_run_trim_head(arena, chunk, head_run, alloc_size, 2060 alloc_size - leadsize); 2061 } 2062 if (trailsize != 0) { 2063 arena_run_trim_tail(arena, chunk, run, size + trailsize, size, 2064 false); 2065 } 2066 arena_run_init_large(arena, run, size, zero); 2067 ret = arena_miscelm_to_rpages(miscelm); 2068 2069 if (config_stats) { 2070 index_t index = size2index(size) - NBINS; 2071 2072 arena->stats.nmalloc_large++; 2073 arena->stats.nrequests_large++; 2074 arena->stats.allocated_large += size; 2075 arena->stats.lstats[index].nmalloc++; 2076 arena->stats.lstats[index].nrequests++; 2077 arena->stats.lstats[index].curruns++; 2078 } 2079 malloc_mutex_unlock(&arena->lock); 2080 2081 if (config_fill && !zero) { 2082 if (unlikely(opt_junk_alloc)) 2083 memset(ret, 0xa5, size); 2084 else if (unlikely(opt_zero)) 2085 memset(ret, 0, size); 2086 } 2087 return (ret); 2088} 2089 2090void * 2091arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2092 bool zero, tcache_t *tcache) 2093{ 2094 void *ret; 2095 2096 if (usize <= SMALL_MAXCLASS && alignment < PAGE) 2097 ret = arena_malloc(tsd, arena, usize, zero, tcache); 2098 else { 2099 if (likely(usize <= arena_maxclass)) { 2100 ret = arena_palloc_large(tsd, arena, usize, alignment, 2101 zero); 2102 } else if (likely(alignment <= chunksize)) 2103 ret = huge_malloc(tsd, arena, usize, zero, tcache); 2104 else { 2105 ret = huge_palloc(tsd, arena, usize, alignment, zero, 2106 tcache); 2107 } 2108 } 2109 return (ret); 2110} 2111 2112void 2113arena_prof_promoted(const void *ptr, size_t size) 2114{ 2115 arena_chunk_t *chunk; 2116 size_t pageind; 2117 index_t binind; 2118 2119 cassert(config_prof); 2120 assert(ptr != NULL); 2121 assert(CHUNK_ADDR2BASE(ptr) != ptr); 2122 assert(isalloc(ptr, false) == LARGE_MINCLASS); 2123 assert(isalloc(ptr, true) == LARGE_MINCLASS); 2124 assert(size <= SMALL_MAXCLASS); 2125 2126 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2127 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2128 binind = size2index(size); 2129 assert(binind < NBINS); 2130 arena_mapbits_large_binind_set(chunk, pageind, binind); 2131 2132 assert(isalloc(ptr, false) == LARGE_MINCLASS); 2133 assert(isalloc(ptr, true) == size); 2134} 2135 2136static void 2137arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 2138 arena_bin_t *bin) 2139{ 2140 2141 /* Dissociate run from bin. */ 2142 if (run == bin->runcur) 2143 bin->runcur = NULL; 2144 else { 2145 index_t binind = arena_bin_index(extent_node_arena_get( 2146 &chunk->node), bin); 2147 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 2148 2149 if (bin_info->nregs != 1) { 2150 /* 2151 * This block's conditional is necessary because if the 2152 * run only contains one region, then it never gets 2153 * inserted into the non-full runs tree. 2154 */ 2155 arena_bin_runs_remove(bin, run); 2156 } 2157 } 2158} 2159 2160static void 2161arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2162 arena_bin_t *bin) 2163{ 2164 2165 assert(run != bin->runcur); 2166 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == 2167 NULL); 2168 2169 malloc_mutex_unlock(&bin->lock); 2170 /******************************/ 2171 malloc_mutex_lock(&arena->lock); 2172 arena_run_dalloc(arena, run, true, false); 2173 malloc_mutex_unlock(&arena->lock); 2174 /****************************/ 2175 malloc_mutex_lock(&bin->lock); 2176 if (config_stats) 2177 bin->stats.curruns--; 2178} 2179 2180static void 2181arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2182 arena_bin_t *bin) 2183{ 2184 2185 /* 2186 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 2187 * non-full run. It is okay to NULL runcur out rather than proactively 2188 * keeping it pointing at the lowest non-full run. 2189 */ 2190 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 2191 /* Switch runcur. */ 2192 if (bin->runcur->nfree > 0) 2193 arena_bin_runs_insert(bin, bin->runcur); 2194 bin->runcur = run; 2195 if (config_stats) 2196 bin->stats.reruns++; 2197 } else 2198 arena_bin_runs_insert(bin, run); 2199} 2200 2201static void 2202arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2203 arena_chunk_map_bits_t *bitselm, bool junked) 2204{ 2205 size_t pageind, rpages_ind; 2206 arena_run_t *run; 2207 arena_bin_t *bin; 2208 arena_bin_info_t *bin_info; 2209 index_t binind; 2210 2211 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2212 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2213 run = &arena_miscelm_get(chunk, rpages_ind)->run; 2214 binind = run->binind; 2215 bin = &arena->bins[binind]; 2216 bin_info = &arena_bin_info[binind]; 2217 2218 if (!junked && config_fill && unlikely(opt_junk_free)) 2219 arena_dalloc_junk_small(ptr, bin_info); 2220 2221 arena_run_reg_dalloc(run, ptr); 2222 if (run->nfree == bin_info->nregs) { 2223 arena_dissociate_bin_run(chunk, run, bin); 2224 arena_dalloc_bin_run(arena, chunk, run, bin); 2225 } else if (run->nfree == 1 && run != bin->runcur) 2226 arena_bin_lower_run(arena, chunk, run, bin); 2227 2228 if (config_stats) { 2229 bin->stats.ndalloc++; 2230 bin->stats.curregs--; 2231 } 2232} 2233 2234void 2235arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2236 arena_chunk_map_bits_t *bitselm) 2237{ 2238 2239 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); 2240} 2241 2242void 2243arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2244 size_t pageind, arena_chunk_map_bits_t *bitselm) 2245{ 2246 arena_run_t *run; 2247 arena_bin_t *bin; 2248 size_t rpages_ind; 2249 2250 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2251 run = &arena_miscelm_get(chunk, rpages_ind)->run; 2252 bin = &arena->bins[run->binind]; 2253 malloc_mutex_lock(&bin->lock); 2254 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); 2255 malloc_mutex_unlock(&bin->lock); 2256} 2257 2258void 2259arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2260 size_t pageind) 2261{ 2262 arena_chunk_map_bits_t *bitselm; 2263 2264 if (config_debug) { 2265 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2266 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2267 pageind)) != BININD_INVALID); 2268 } 2269 bitselm = arena_bitselm_get(chunk, pageind); 2270 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); 2271} 2272 2273#ifdef JEMALLOC_JET 2274#undef arena_dalloc_junk_large 2275#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 2276#endif 2277void 2278arena_dalloc_junk_large(void *ptr, size_t usize) 2279{ 2280 2281 if (config_fill && unlikely(opt_junk_free)) 2282 memset(ptr, 0x5a, usize); 2283} 2284#ifdef JEMALLOC_JET 2285#undef arena_dalloc_junk_large 2286#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2287arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2288 JEMALLOC_N(arena_dalloc_junk_large_impl); 2289#endif 2290 2291void 2292arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, 2293 void *ptr, bool junked) 2294{ 2295 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2296 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 2297 arena_run_t *run = &miscelm->run; 2298 2299 if (config_fill || config_stats) { 2300 size_t usize = arena_mapbits_large_size_get(chunk, pageind); 2301 2302 if (!junked) 2303 arena_dalloc_junk_large(ptr, usize); 2304 if (config_stats) { 2305 index_t index = size2index(usize) - NBINS; 2306 2307 arena->stats.ndalloc_large++; 2308 arena->stats.allocated_large -= usize; 2309 arena->stats.lstats[index].ndalloc++; 2310 arena->stats.lstats[index].curruns--; 2311 } 2312 } 2313 2314 arena_run_dalloc(arena, run, true, false); 2315} 2316 2317void 2318arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, 2319 void *ptr) 2320{ 2321 2322 arena_dalloc_large_locked_impl(arena, chunk, ptr, true); 2323} 2324 2325void 2326arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) 2327{ 2328 2329 malloc_mutex_lock(&arena->lock); 2330 arena_dalloc_large_locked_impl(arena, chunk, ptr, false); 2331 malloc_mutex_unlock(&arena->lock); 2332} 2333 2334static void 2335arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2336 size_t oldsize, size_t size) 2337{ 2338 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2339 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 2340 arena_run_t *run = &miscelm->run; 2341 2342 assert(size < oldsize); 2343 2344 /* 2345 * Shrink the run, and make trailing pages available for other 2346 * allocations. 2347 */ 2348 malloc_mutex_lock(&arena->lock); 2349 arena_run_trim_tail(arena, chunk, run, oldsize, size, true); 2350 if (config_stats) { 2351 index_t oldindex = size2index(oldsize) - NBINS; 2352 index_t index = size2index(size) - NBINS; 2353 2354 arena->stats.ndalloc_large++; 2355 arena->stats.allocated_large -= oldsize; 2356 arena->stats.lstats[oldindex].ndalloc++; 2357 arena->stats.lstats[oldindex].curruns--; 2358 2359 arena->stats.nmalloc_large++; 2360 arena->stats.nrequests_large++; 2361 arena->stats.allocated_large += size; 2362 arena->stats.lstats[index].nmalloc++; 2363 arena->stats.lstats[index].nrequests++; 2364 arena->stats.lstats[index].curruns++; 2365 } 2366 malloc_mutex_unlock(&arena->lock); 2367} 2368 2369static bool 2370arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2371 size_t oldsize, size_t size, size_t extra, bool zero) 2372{ 2373 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2374 size_t npages = oldsize >> LG_PAGE; 2375 size_t followsize; 2376 size_t usize_min = s2u(size); 2377 2378 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); 2379 2380 /* Try to extend the run. */ 2381 assert(usize_min > oldsize); 2382 malloc_mutex_lock(&arena->lock); 2383 if (pageind + npages < chunk_npages && 2384 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && 2385 (followsize = arena_mapbits_unallocated_size_get(chunk, 2386 pageind+npages)) >= usize_min - oldsize) { 2387 /* 2388 * The next run is available and sufficiently large. Split the 2389 * following run, then merge the first part with the existing 2390 * allocation. 2391 */ 2392 arena_run_t *run; 2393 size_t flag_dirty, splitsize, usize; 2394 2395 usize = s2u(size + extra); 2396 while (oldsize + followsize < usize) 2397 usize = index2size(size2index(usize)-1); 2398 assert(usize >= usize_min); 2399 splitsize = usize - oldsize; 2400 2401 run = &arena_miscelm_get(chunk, pageind+npages)->run; 2402 arena_run_split_large(arena, run, splitsize, zero); 2403 2404 size = oldsize + splitsize; 2405 npages = size >> LG_PAGE; 2406 2407 /* 2408 * Mark the extended run as dirty if either portion of the run 2409 * was dirty before allocation. This is rather pedantic, 2410 * because there's not actually any sequence of events that 2411 * could cause the resulting run to be passed to 2412 * arena_run_dalloc() with the dirty argument set to false 2413 * (which is when dirty flag consistency would really matter). 2414 */ 2415 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2416 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2417 arena_mapbits_large_set(chunk, pageind, size, flag_dirty); 2418 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); 2419 2420 if (config_stats) { 2421 index_t oldindex = size2index(oldsize) - NBINS; 2422 index_t index = size2index(size) - NBINS; 2423 2424 arena->stats.ndalloc_large++; 2425 arena->stats.allocated_large -= oldsize; 2426 arena->stats.lstats[oldindex].ndalloc++; 2427 arena->stats.lstats[oldindex].curruns--; 2428 2429 arena->stats.nmalloc_large++; 2430 arena->stats.nrequests_large++; 2431 arena->stats.allocated_large += size; 2432 arena->stats.lstats[index].nmalloc++; 2433 arena->stats.lstats[index].nrequests++; 2434 arena->stats.lstats[index].curruns++; 2435 } 2436 malloc_mutex_unlock(&arena->lock); 2437 return (false); 2438 } 2439 malloc_mutex_unlock(&arena->lock); 2440 2441 return (true); 2442} 2443 2444#ifdef JEMALLOC_JET 2445#undef arena_ralloc_junk_large 2446#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2447#endif 2448static void 2449arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2450{ 2451 2452 if (config_fill && unlikely(opt_junk_free)) { 2453 memset((void *)((uintptr_t)ptr + usize), 0x5a, 2454 old_usize - usize); 2455 } 2456} 2457#ifdef JEMALLOC_JET 2458#undef arena_ralloc_junk_large 2459#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 2460arena_ralloc_junk_large_t *arena_ralloc_junk_large = 2461 JEMALLOC_N(arena_ralloc_junk_large_impl); 2462#endif 2463 2464/* 2465 * Try to resize a large allocation, in order to avoid copying. This will 2466 * always fail if growing an object, and the following run is already in use. 2467 */ 2468static bool 2469arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, 2470 bool zero) 2471{ 2472 size_t usize; 2473 2474 /* Make sure extra can't cause size_t overflow. */ 2475 if (unlikely(extra >= arena_maxclass)) 2476 return (true); 2477 2478 usize = s2u(size + extra); 2479 if (usize == oldsize) { 2480 /* Same size class. */ 2481 return (false); 2482 } else { 2483 arena_chunk_t *chunk; 2484 arena_t *arena; 2485 2486 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2487 arena = extent_node_arena_get(&chunk->node); 2488 2489 if (usize < oldsize) { 2490 /* Fill before shrinking in order avoid a race. */ 2491 arena_ralloc_junk_large(ptr, oldsize, usize); 2492 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, 2493 usize); 2494 return (false); 2495 } else { 2496 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, 2497 oldsize, size, extra, zero); 2498 if (config_fill && !ret && !zero) { 2499 if (unlikely(opt_junk_alloc)) { 2500 memset((void *)((uintptr_t)ptr + 2501 oldsize), 0xa5, isalloc(ptr, 2502 config_prof) - oldsize); 2503 } else if (unlikely(opt_zero)) { 2504 memset((void *)((uintptr_t)ptr + 2505 oldsize), 0, isalloc(ptr, 2506 config_prof) - oldsize); 2507 } 2508 } 2509 return (ret); 2510 } 2511 } 2512} 2513 2514bool 2515arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2516 bool zero) 2517{ 2518 2519 if (likely(size <= arena_maxclass)) { 2520 /* 2521 * Avoid moving the allocation if the size class can be left the 2522 * same. 2523 */ 2524 if (likely(oldsize <= arena_maxclass)) { 2525 if (oldsize <= SMALL_MAXCLASS) { 2526 assert( 2527 arena_bin_info[size2index(oldsize)].reg_size 2528 == oldsize); 2529 if ((size + extra <= SMALL_MAXCLASS && 2530 size2index(size + extra) == 2531 size2index(oldsize)) || (size <= oldsize && 2532 size + extra >= oldsize)) 2533 return (false); 2534 } else { 2535 assert(size <= arena_maxclass); 2536 if (size + extra > SMALL_MAXCLASS) { 2537 if (!arena_ralloc_large(ptr, oldsize, 2538 size, extra, zero)) 2539 return (false); 2540 } 2541 } 2542 } 2543 2544 /* Reallocation would require a move. */ 2545 return (true); 2546 } else 2547 return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); 2548} 2549 2550void * 2551arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 2552 size_t extra, size_t alignment, bool zero, tcache_t *tcache) 2553{ 2554 void *ret; 2555 2556 if (likely(size <= arena_maxclass)) { 2557 size_t copysize; 2558 2559 /* Try to avoid moving the allocation. */ 2560 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) 2561 return (ptr); 2562 2563 /* 2564 * size and oldsize are different enough that we need to move 2565 * the object. In that case, fall back to allocating new space 2566 * and copying. 2567 */ 2568 if (alignment != 0) { 2569 size_t usize = sa2u(size + extra, alignment); 2570 if (usize == 0) 2571 return (NULL); 2572 ret = ipalloct(tsd, usize, alignment, zero, tcache, 2573 arena); 2574 } else { 2575 ret = arena_malloc(tsd, arena, size + extra, zero, 2576 tcache); 2577 } 2578 2579 if (ret == NULL) { 2580 if (extra == 0) 2581 return (NULL); 2582 /* Try again, this time without extra. */ 2583 if (alignment != 0) { 2584 size_t usize = sa2u(size, alignment); 2585 if (usize == 0) 2586 return (NULL); 2587 ret = ipalloct(tsd, usize, alignment, zero, 2588 tcache, arena); 2589 } else { 2590 ret = arena_malloc(tsd, arena, size, zero, 2591 tcache); 2592 } 2593 2594 if (ret == NULL) 2595 return (NULL); 2596 } 2597 2598 /* 2599 * Junk/zero-filling were already done by 2600 * ipalloc()/arena_malloc(). 2601 */ 2602 2603 /* 2604 * Copy at most size bytes (not size+extra), since the caller 2605 * has no expectation that the extra bytes will be reliably 2606 * preserved. 2607 */ 2608 copysize = (size < oldsize) ? size : oldsize; 2609 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 2610 memcpy(ret, ptr, copysize); 2611 isqalloc(tsd, ptr, oldsize, tcache); 2612 } else { 2613 ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra, 2614 alignment, zero, tcache); 2615 } 2616 return (ret); 2617} 2618 2619dss_prec_t 2620arena_dss_prec_get(arena_t *arena) 2621{ 2622 dss_prec_t ret; 2623 2624 malloc_mutex_lock(&arena->lock); 2625 ret = arena->dss_prec; 2626 malloc_mutex_unlock(&arena->lock); 2627 return (ret); 2628} 2629 2630bool 2631arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) 2632{ 2633 2634 if (!have_dss) 2635 return (dss_prec != dss_prec_disabled); 2636 malloc_mutex_lock(&arena->lock); 2637 arena->dss_prec = dss_prec; 2638 malloc_mutex_unlock(&arena->lock); 2639 return (false); 2640} 2641 2642ssize_t 2643arena_lg_dirty_mult_default_get(void) 2644{ 2645 2646 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 2647} 2648 2649bool 2650arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 2651{ 2652 2653 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 2654 return (true); 2655 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 2656 return (false); 2657} 2658 2659void 2660arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 2661 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 2662 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) 2663{ 2664 unsigned i; 2665 2666 malloc_mutex_lock(&arena->lock); 2667 *dss = dss_prec_names[arena->dss_prec]; 2668 *nactive += arena->nactive; 2669 *ndirty += arena->ndirty; 2670 2671 astats->mapped += arena->stats.mapped; 2672 astats->npurge += arena->stats.npurge; 2673 astats->nmadvise += arena->stats.nmadvise; 2674 astats->purged += arena->stats.purged; 2675 astats->metadata_mapped += arena->stats.metadata_mapped; 2676 astats->metadata_allocated += arena_metadata_allocated_get(arena); 2677 astats->allocated_large += arena->stats.allocated_large; 2678 astats->nmalloc_large += arena->stats.nmalloc_large; 2679 astats->ndalloc_large += arena->stats.ndalloc_large; 2680 astats->nrequests_large += arena->stats.nrequests_large; 2681 astats->allocated_huge += arena->stats.allocated_huge; 2682 astats->nmalloc_huge += arena->stats.nmalloc_huge; 2683 astats->ndalloc_huge += arena->stats.ndalloc_huge; 2684 2685 for (i = 0; i < nlclasses; i++) { 2686 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 2687 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 2688 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 2689 lstats[i].curruns += arena->stats.lstats[i].curruns; 2690 } 2691 2692 for (i = 0; i < nhclasses; i++) { 2693 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 2694 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 2695 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 2696 } 2697 malloc_mutex_unlock(&arena->lock); 2698 2699 for (i = 0; i < NBINS; i++) { 2700 arena_bin_t *bin = &arena->bins[i]; 2701 2702 malloc_mutex_lock(&bin->lock); 2703 bstats[i].nmalloc += bin->stats.nmalloc; 2704 bstats[i].ndalloc += bin->stats.ndalloc; 2705 bstats[i].nrequests += bin->stats.nrequests; 2706 bstats[i].curregs += bin->stats.curregs; 2707 if (config_tcache) { 2708 bstats[i].nfills += bin->stats.nfills; 2709 bstats[i].nflushes += bin->stats.nflushes; 2710 } 2711 bstats[i].nruns += bin->stats.nruns; 2712 bstats[i].reruns += bin->stats.reruns; 2713 bstats[i].curruns += bin->stats.curruns; 2714 malloc_mutex_unlock(&bin->lock); 2715 } 2716} 2717 2718arena_t * 2719arena_new(unsigned ind) 2720{ 2721 arena_t *arena; 2722 unsigned i; 2723 arena_bin_t *bin; 2724 2725 /* 2726 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 2727 * because there is no way to clean up if base_alloc() OOMs. 2728 */ 2729 if (config_stats) { 2730 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) 2731 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + 2732 nhclasses) * sizeof(malloc_huge_stats_t)); 2733 } else 2734 arena = (arena_t *)base_alloc(sizeof(arena_t)); 2735 if (arena == NULL) 2736 return (NULL); 2737 2738 arena->ind = ind; 2739 arena->nthreads = 0; 2740 if (malloc_mutex_init(&arena->lock)) 2741 return (NULL); 2742 2743 if (config_stats) { 2744 memset(&arena->stats, 0, sizeof(arena_stats_t)); 2745 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 2746 + CACHELINE_CEILING(sizeof(arena_t))); 2747 memset(arena->stats.lstats, 0, nlclasses * 2748 sizeof(malloc_large_stats_t)); 2749 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 2750 + CACHELINE_CEILING(sizeof(arena_t)) + 2751 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 2752 memset(arena->stats.hstats, 0, nhclasses * 2753 sizeof(malloc_huge_stats_t)); 2754 if (config_tcache) 2755 ql_new(&arena->tcache_ql); 2756 } 2757 2758 if (config_prof) 2759 arena->prof_accumbytes = 0; 2760 2761 arena->dss_prec = chunk_dss_prec_get(); 2762 2763 arena->spare = NULL; 2764 2765 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 2766 arena->nactive = 0; 2767 arena->ndirty = 0; 2768 2769 arena_avail_tree_new(&arena->runs_avail); 2770 qr_new(&arena->runs_dirty, rd_link); 2771 qr_new(&arena->chunks_cache, cc_link); 2772 2773 ql_new(&arena->huge); 2774 if (malloc_mutex_init(&arena->huge_mtx)) 2775 return (NULL); 2776 2777 extent_tree_szad_new(&arena->chunks_szad_cache); 2778 extent_tree_ad_new(&arena->chunks_ad_cache); 2779 extent_tree_szad_new(&arena->chunks_szad_mmap); 2780 extent_tree_ad_new(&arena->chunks_ad_mmap); 2781 extent_tree_szad_new(&arena->chunks_szad_dss); 2782 extent_tree_ad_new(&arena->chunks_ad_dss); 2783 if (malloc_mutex_init(&arena->chunks_mtx)) 2784 return (NULL); 2785 ql_new(&arena->node_cache); 2786 if (malloc_mutex_init(&arena->node_cache_mtx)) 2787 return (NULL); 2788 2789 arena->chunk_alloc = chunk_alloc_default; 2790 arena->chunk_dalloc = chunk_dalloc_default; 2791 arena->chunk_purge = chunk_purge_default; 2792 2793 /* Initialize bins. */ 2794 for (i = 0; i < NBINS; i++) { 2795 bin = &arena->bins[i]; 2796 if (malloc_mutex_init(&bin->lock)) 2797 return (NULL); 2798 bin->runcur = NULL; 2799 arena_run_tree_new(&bin->runs); 2800 if (config_stats) 2801 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2802 } 2803 2804 return (arena); 2805} 2806 2807/* 2808 * Calculate bin_info->run_size such that it meets the following constraints: 2809 * 2810 * *) bin_info->run_size <= arena_maxrun 2811 * *) bin_info->nregs <= RUN_MAXREGS 2812 * 2813 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 2814 * these settings are all interdependent. 2815 */ 2816static void 2817bin_info_run_size_calc(arena_bin_info_t *bin_info) 2818{ 2819 size_t pad_size; 2820 size_t try_run_size, perfect_run_size, actual_run_size; 2821 uint32_t try_nregs, perfect_nregs, actual_nregs; 2822 2823 /* 2824 * Determine redzone size based on minimum alignment and minimum 2825 * redzone size. Add padding to the end of the run if it is needed to 2826 * align the regions. The padding allows each redzone to be half the 2827 * minimum alignment; without the padding, each redzone would have to 2828 * be twice as large in order to maintain alignment. 2829 */ 2830 if (config_fill && unlikely(opt_redzone)) { 2831 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 2832 1); 2833 if (align_min <= REDZONE_MINSIZE) { 2834 bin_info->redzone_size = REDZONE_MINSIZE; 2835 pad_size = 0; 2836 } else { 2837 bin_info->redzone_size = align_min >> 1; 2838 pad_size = bin_info->redzone_size; 2839 } 2840 } else { 2841 bin_info->redzone_size = 0; 2842 pad_size = 0; 2843 } 2844 bin_info->reg_interval = bin_info->reg_size + 2845 (bin_info->redzone_size << 1); 2846 2847 /* 2848 * Compute run size under ideal conditions (no redzones, no limit on run 2849 * size). 2850 */ 2851 try_run_size = PAGE; 2852 try_nregs = try_run_size / bin_info->reg_size; 2853 do { 2854 perfect_run_size = try_run_size; 2855 perfect_nregs = try_nregs; 2856 2857 try_run_size += PAGE; 2858 try_nregs = try_run_size / bin_info->reg_size; 2859 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 2860 assert(perfect_nregs <= RUN_MAXREGS); 2861 2862 actual_run_size = perfect_run_size; 2863 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; 2864 2865 /* 2866 * Redzones can require enough padding that not even a single region can 2867 * fit within the number of pages that would normally be dedicated to a 2868 * run for this size class. Increase the run size until at least one 2869 * region fits. 2870 */ 2871 while (actual_nregs == 0) { 2872 assert(config_fill && unlikely(opt_redzone)); 2873 2874 actual_run_size += PAGE; 2875 actual_nregs = (actual_run_size - pad_size) / 2876 bin_info->reg_interval; 2877 } 2878 2879 /* 2880 * Make sure that the run will fit within an arena chunk. 2881 */ 2882 while (actual_run_size > arena_maxrun) { 2883 actual_run_size -= PAGE; 2884 actual_nregs = (actual_run_size - pad_size) / 2885 bin_info->reg_interval; 2886 } 2887 assert(actual_nregs > 0); 2888 assert(actual_run_size == s2u(actual_run_size)); 2889 2890 /* Copy final settings. */ 2891 bin_info->run_size = actual_run_size; 2892 bin_info->nregs = actual_nregs; 2893 bin_info->reg0_offset = actual_run_size - (actual_nregs * 2894 bin_info->reg_interval) - pad_size + bin_info->redzone_size; 2895 2896 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 2897 * bin_info->reg_interval) + pad_size == bin_info->run_size); 2898} 2899 2900static void 2901bin_info_init(void) 2902{ 2903 arena_bin_info_t *bin_info; 2904 2905#define BIN_INFO_INIT_bin_yes(index, size) \ 2906 bin_info = &arena_bin_info[index]; \ 2907 bin_info->reg_size = size; \ 2908 bin_info_run_size_calc(bin_info); \ 2909 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 2910#define BIN_INFO_INIT_bin_no(index, size) 2911#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 2912 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 2913 SIZE_CLASSES 2914#undef BIN_INFO_INIT_bin_yes 2915#undef BIN_INFO_INIT_bin_no 2916#undef SC 2917} 2918 2919void 2920arena_boot(void) 2921{ 2922 size_t header_size; 2923 unsigned i; 2924 2925 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); 2926 2927 /* 2928 * Compute the header size such that it is large enough to contain the 2929 * page map. The page map is biased to omit entries for the header 2930 * itself, so some iteration is necessary to compute the map bias. 2931 * 2932 * 1) Compute safe header_size and map_bias values that include enough 2933 * space for an unbiased page map. 2934 * 2) Refine map_bias based on (1) to omit the header pages in the page 2935 * map. The resulting map_bias may be one too small. 2936 * 3) Refine map_bias based on (2). The result will be >= the result 2937 * from (2), and will always be correct. 2938 */ 2939 map_bias = 0; 2940 for (i = 0; i < 3; i++) { 2941 header_size = offsetof(arena_chunk_t, map_bits) + 2942 ((sizeof(arena_chunk_map_bits_t) + 2943 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 2944 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 2945 } 2946 assert(map_bias > 0); 2947 2948 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 2949 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 2950 2951 arena_maxrun = chunksize - (map_bias << LG_PAGE); 2952 assert(arena_maxrun > 0); 2953 arena_maxclass = index2size(size2index(chunksize)-1); 2954 if (arena_maxclass > arena_maxrun) { 2955 /* 2956 * For small chunk sizes it's possible for there to be fewer 2957 * non-header pages available than are necessary to serve the 2958 * size classes just below chunksize. 2959 */ 2960 arena_maxclass = arena_maxrun; 2961 } 2962 assert(arena_maxclass > 0); 2963 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS); 2964 nhclasses = NSIZES - nlclasses - NBINS; 2965 2966 bin_info_init(); 2967} 2968 2969void 2970arena_prefork(arena_t *arena) 2971{ 2972 unsigned i; 2973 2974 malloc_mutex_prefork(&arena->lock); 2975 malloc_mutex_prefork(&arena->huge_mtx); 2976 malloc_mutex_prefork(&arena->chunks_mtx); 2977 malloc_mutex_prefork(&arena->node_cache_mtx); 2978 for (i = 0; i < NBINS; i++) 2979 malloc_mutex_prefork(&arena->bins[i].lock); 2980} 2981 2982void 2983arena_postfork_parent(arena_t *arena) 2984{ 2985 unsigned i; 2986 2987 for (i = 0; i < NBINS; i++) 2988 malloc_mutex_postfork_parent(&arena->bins[i].lock); 2989 malloc_mutex_postfork_parent(&arena->node_cache_mtx); 2990 malloc_mutex_postfork_parent(&arena->chunks_mtx); 2991 malloc_mutex_postfork_parent(&arena->huge_mtx); 2992 malloc_mutex_postfork_parent(&arena->lock); 2993} 2994 2995void 2996arena_postfork_child(arena_t *arena) 2997{ 2998 unsigned i; 2999 3000 for (i = 0; i < NBINS; i++) 3001 malloc_mutex_postfork_child(&arena->bins[i].lock); 3002 malloc_mutex_postfork_child(&arena->node_cache_mtx); 3003 malloc_mutex_postfork_child(&arena->chunks_mtx); 3004 malloc_mutex_postfork_child(&arena->huge_mtx); 3005 malloc_mutex_postfork_child(&arena->lock); 3006} 3007