arena.c revision 2195ba4e1f8f262b7e6586106d90f4dc0aea7630
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8arena_bin_info_t arena_bin_info[NBINS]; 9 10size_t map_bias; 11size_t map_misc_offset; 12size_t arena_maxrun; /* Max run size for arenas. */ 13size_t arena_maxclass; /* Max size class for arenas. */ 14unsigned nlclasses; /* Number of large size classes. */ 15unsigned nhclasses; /* Number of huge size classes. */ 16 17/******************************************************************************/ 18/* 19 * Function prototypes for static functions that are referenced prior to 20 * definition. 21 */ 22 23static void arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk); 24static void arena_purge(arena_t *arena, bool all); 25static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 26 bool cleaned); 27static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 28 arena_run_t *run, arena_bin_t *bin); 29static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 30 arena_run_t *run, arena_bin_t *bin); 31 32/******************************************************************************/ 33 34JEMALLOC_INLINE_C size_t 35arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm) 36{ 37 arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm); 38 size_t pageind = arena_miscelm_to_pageind(miscelm); 39 40 return (arena_mapbits_get(chunk, pageind)); 41} 42 43JEMALLOC_INLINE_C int 44arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 45{ 46 uintptr_t a_miscelm = (uintptr_t)a; 47 uintptr_t b_miscelm = (uintptr_t)b; 48 49 assert(a != NULL); 50 assert(b != NULL); 51 52 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 53} 54 55/* Generate red-black tree functions. */ 56rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, 57 rb_link, arena_run_comp) 58 59JEMALLOC_INLINE_C int 60arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 61{ 62 int ret; 63 size_t a_size; 64 size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK; 65 uintptr_t a_miscelm = (uintptr_t)a; 66 uintptr_t b_miscelm = (uintptr_t)b; 67 68 if (a_miscelm & CHUNK_MAP_KEY) 69 a_size = a_miscelm & ~PAGE_MASK; 70 else 71 a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK; 72 73 ret = (a_size > b_size) - (a_size < b_size); 74 if (ret == 0) { 75 if (!(a_miscelm & CHUNK_MAP_KEY)) 76 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); 77 else { 78 /* 79 * Treat keys as if they are lower than anything else. 80 */ 81 ret = -1; 82 } 83 } 84 85 return (ret); 86} 87 88/* Generate red-black tree functions. */ 89rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, 90 arena_chunk_map_misc_t, rb_link, arena_avail_comp) 91 92static void 93arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 94 size_t npages) 95{ 96 97 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 98 LG_PAGE)); 99 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, 100 pageind)); 101} 102 103static void 104arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 105 size_t npages) 106{ 107 108 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 109 LG_PAGE)); 110 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, 111 pageind)); 112} 113 114static void 115arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 116 size_t npages) 117{ 118 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 119 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 120 LG_PAGE)); 121 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 122 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 123 CHUNK_MAP_DIRTY); 124 ql_elm_new(miscelm, dr_link); 125 ql_tail_insert(&arena->runs_dirty, miscelm, dr_link); 126 arena->ndirty += npages; 127} 128 129static void 130arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 131 size_t npages) 132{ 133 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 134 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 135 LG_PAGE)); 136 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 137 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 138 CHUNK_MAP_DIRTY); 139 ql_remove(&arena->runs_dirty, miscelm, dr_link); 140 arena->ndirty -= npages; 141} 142 143JEMALLOC_INLINE_C void * 144arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 145{ 146 void *ret; 147 unsigned regind; 148 arena_chunk_map_misc_t *miscelm; 149 void *rpages; 150 151 assert(run->nfree > 0); 152 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 153 154 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 155 miscelm = arena_run_to_miscelm(run); 156 rpages = arena_miscelm_to_rpages(miscelm); 157 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 158 (uintptr_t)(bin_info->reg_interval * regind)); 159 run->nfree--; 160 return (ret); 161} 162 163JEMALLOC_INLINE_C void 164arena_run_reg_dalloc(arena_run_t *run, void *ptr) 165{ 166 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 167 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 168 size_t mapbits = arena_mapbits_get(chunk, pageind); 169 index_t binind = arena_ptr_small_binind_get(ptr, mapbits); 170 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 171 unsigned regind = arena_run_regind(run, bin_info, ptr); 172 173 assert(run->nfree < bin_info->nregs); 174 /* Freeing an interior pointer can cause assertion failure. */ 175 assert(((uintptr_t)ptr - 176 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 177 (uintptr_t)bin_info->reg0_offset)) % 178 (uintptr_t)bin_info->reg_interval == 0); 179 assert((uintptr_t)ptr >= 180 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 181 (uintptr_t)bin_info->reg0_offset); 182 /* Freeing an unallocated pointer can cause assertion failure. */ 183 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 184 185 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 186 run->nfree++; 187} 188 189JEMALLOC_INLINE_C void 190arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 191{ 192 193 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 194 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 195 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 196 (npages << LG_PAGE)); 197} 198 199JEMALLOC_INLINE_C void 200arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 201{ 202 203 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 204 << LG_PAGE)), PAGE); 205} 206 207JEMALLOC_INLINE_C void 208arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 209{ 210 size_t i; 211 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 212 213 arena_run_page_mark_zeroed(chunk, run_ind); 214 for (i = 0; i < PAGE / sizeof(size_t); i++) 215 assert(p[i] == 0); 216} 217 218static void 219arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) 220{ 221 222 if (config_stats) { 223 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages 224 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 225 LG_PAGE); 226 if (cactive_diff != 0) 227 stats_cactive_add(cactive_diff); 228 } 229} 230 231static void 232arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 233 size_t flag_dirty, size_t need_pages) 234{ 235 size_t total_pages, rem_pages; 236 237 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 238 LG_PAGE; 239 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 240 flag_dirty); 241 assert(need_pages <= total_pages); 242 rem_pages = total_pages - need_pages; 243 244 arena_avail_remove(arena, chunk, run_ind, total_pages); 245 if (flag_dirty != 0) 246 arena_dirty_remove(arena, chunk, run_ind, total_pages); 247 arena_cactive_update(arena, need_pages, 0); 248 arena->nactive += need_pages; 249 250 /* Keep track of trailing unused pages for later use. */ 251 if (rem_pages > 0) { 252 if (flag_dirty != 0) { 253 arena_mapbits_unallocated_set(chunk, 254 run_ind+need_pages, (rem_pages << LG_PAGE), 255 flag_dirty); 256 arena_mapbits_unallocated_set(chunk, 257 run_ind+total_pages-1, (rem_pages << LG_PAGE), 258 flag_dirty); 259 arena_dirty_insert(arena, chunk, run_ind+need_pages, 260 rem_pages); 261 } else { 262 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 263 (rem_pages << LG_PAGE), 264 arena_mapbits_unzeroed_get(chunk, 265 run_ind+need_pages)); 266 arena_mapbits_unallocated_set(chunk, 267 run_ind+total_pages-1, (rem_pages << LG_PAGE), 268 arena_mapbits_unzeroed_get(chunk, 269 run_ind+total_pages-1)); 270 } 271 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 272 } 273} 274 275static void 276arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 277 bool remove, bool zero) 278{ 279 arena_chunk_t *chunk; 280 arena_chunk_map_misc_t *miscelm; 281 size_t flag_dirty, run_ind, need_pages, i; 282 283 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 284 miscelm = arena_run_to_miscelm(run); 285 run_ind = arena_miscelm_to_pageind(miscelm); 286 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 287 need_pages = (size >> LG_PAGE); 288 assert(need_pages > 0); 289 290 if (remove) { 291 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 292 need_pages); 293 } 294 295 if (zero) { 296 if (flag_dirty == 0) { 297 /* 298 * The run is clean, so some pages may be zeroed (i.e. 299 * never before touched). 300 */ 301 for (i = 0; i < need_pages; i++) { 302 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 303 != 0) 304 arena_run_zero(chunk, run_ind+i, 1); 305 else if (config_debug) { 306 arena_run_page_validate_zeroed(chunk, 307 run_ind+i); 308 } else { 309 arena_run_page_mark_zeroed(chunk, 310 run_ind+i); 311 } 312 } 313 } else { 314 /* The run is dirty, so all pages must be zeroed. */ 315 arena_run_zero(chunk, run_ind, need_pages); 316 } 317 } else { 318 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 319 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 320 } 321 322 /* 323 * Set the last element first, in case the run only contains one page 324 * (i.e. both statements set the same element). 325 */ 326 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); 327 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); 328} 329 330static void 331arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 332{ 333 334 arena_run_split_large_helper(arena, run, size, true, zero); 335} 336 337static void 338arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 339{ 340 341 arena_run_split_large_helper(arena, run, size, false, zero); 342} 343 344static void 345arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 346 index_t binind) 347{ 348 arena_chunk_t *chunk; 349 arena_chunk_map_misc_t *miscelm; 350 size_t flag_dirty, run_ind, need_pages, i; 351 352 assert(binind != BININD_INVALID); 353 354 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 355 miscelm = arena_run_to_miscelm(run); 356 run_ind = arena_miscelm_to_pageind(miscelm); 357 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 358 need_pages = (size >> LG_PAGE); 359 assert(need_pages > 0); 360 361 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); 362 363 for (i = 0; i < need_pages; i++) { 364 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); 365 if (config_debug && flag_dirty == 0 && 366 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) 367 arena_run_page_validate_zeroed(chunk, run_ind+i); 368 } 369 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 370 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 371} 372 373static arena_chunk_t * 374arena_chunk_init_spare(arena_t *arena) 375{ 376 arena_chunk_t *chunk; 377 378 assert(arena->spare != NULL); 379 380 chunk = arena->spare; 381 arena->spare = NULL; 382 383 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 384 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 385 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 386 arena_maxrun); 387 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 388 arena_maxrun); 389 assert(arena_mapbits_dirty_get(chunk, map_bias) == 390 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 391 392 return (chunk); 393} 394 395static arena_chunk_t * 396arena_chunk_alloc_internal(arena_t *arena, bool *zero) 397{ 398 arena_chunk_t *chunk; 399 chunk_alloc_t *chunk_alloc; 400 chunk_dalloc_t *chunk_dalloc; 401 402 chunk_alloc = arena->chunk_alloc; 403 chunk_dalloc = arena->chunk_dalloc; 404 malloc_mutex_unlock(&arena->lock); 405 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc, 406 arena->ind, NULL, chunksize, chunksize, zero); 407 if (chunk != NULL) { 408 chunk->node.arena = arena; 409 chunk->node.addr = chunk; 410 chunk->node.size = 0; /* Indicates this is an arena chunk. */ 411 if (chunk_register(chunk, &chunk->node)) { 412 chunk_dalloc((void *)chunk, chunksize, arena->ind); 413 chunk = NULL; 414 } 415 } 416 malloc_mutex_lock(&arena->lock); 417 if (config_stats && chunk != NULL) { 418 arena->stats.mapped += chunksize; 419 arena->stats.metadata_mapped += (map_bias << LG_PAGE); 420 } 421 422 return (chunk); 423} 424 425static arena_chunk_t * 426arena_chunk_init_hard(arena_t *arena) 427{ 428 arena_chunk_t *chunk; 429 bool zero; 430 size_t unzeroed, i; 431 432 assert(arena->spare == NULL); 433 434 zero = false; 435 chunk = arena_chunk_alloc_internal(arena, &zero); 436 if (chunk == NULL) 437 return (NULL); 438 439 /* 440 * Initialize the map to contain one maximal free untouched run. Mark 441 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. 442 */ 443 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; 444 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, unzeroed); 445 /* 446 * There is no need to initialize the internal page map entries unless 447 * the chunk is not zeroed. 448 */ 449 if (!zero) { 450 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 451 (void *)arena_bitselm_get(chunk, map_bias+1), 452 (size_t)((uintptr_t) arena_bitselm_get(chunk, 453 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, 454 map_bias+1))); 455 for (i = map_bias+1; i < chunk_npages-1; i++) 456 arena_mapbits_unzeroed_set(chunk, i, unzeroed); 457 } else { 458 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 459 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) 460 arena_bitselm_get(chunk, chunk_npages-1) - 461 (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); 462 if (config_debug) { 463 for (i = map_bias+1; i < chunk_npages-1; i++) { 464 assert(arena_mapbits_unzeroed_get(chunk, i) == 465 unzeroed); 466 } 467 } 468 } 469 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 470 unzeroed); 471 472 return (chunk); 473} 474 475static arena_chunk_t * 476arena_chunk_alloc(arena_t *arena) 477{ 478 arena_chunk_t *chunk; 479 480 if (arena->spare != NULL) 481 chunk = arena_chunk_init_spare(arena); 482 else { 483 chunk = arena_chunk_init_hard(arena); 484 if (chunk == NULL) 485 return (NULL); 486 } 487 488 /* Insert the run into the runs_avail tree. */ 489 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 490 491 return (chunk); 492} 493 494static void 495arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 496{ 497 498 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 499 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 500 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 501 arena_maxrun); 502 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 503 arena_maxrun); 504 assert(arena_mapbits_dirty_get(chunk, map_bias) == 505 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 506 507 /* 508 * Remove run from the runs_avail tree, so that the arena does not use 509 * it. 510 */ 511 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 512 513 if (arena->spare != NULL) { 514 arena_chunk_t *spare = arena->spare; 515 chunk_dalloc_t *chunk_dalloc; 516 517 arena->spare = chunk; 518 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 519 arena_dirty_remove(arena, spare, map_bias, 520 chunk_npages-map_bias); 521 } 522 chunk_dalloc = arena->chunk_dalloc; 523 malloc_mutex_unlock(&arena->lock); 524 chunk_deregister(spare, &spare->node); 525 chunk_dalloc((void *)spare, chunksize, arena->ind); 526 malloc_mutex_lock(&arena->lock); 527 if (config_stats) { 528 arena->stats.mapped -= chunksize; 529 arena->stats.metadata_mapped -= (map_bias << LG_PAGE); 530 } 531 } else 532 arena->spare = chunk; 533} 534 535static void 536arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 537{ 538 index_t index = size2index(usize) - nlclasses - NBINS; 539 540 cassert(config_stats); 541 542 arena->stats.nmalloc_huge++; 543 arena->stats.allocated_huge += usize; 544 arena->stats.hstats[index].nmalloc++; 545 arena->stats.hstats[index].curhchunks++; 546} 547 548static void 549arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 550{ 551 index_t index = size2index(usize) - nlclasses - NBINS; 552 553 cassert(config_stats); 554 555 arena->stats.nmalloc_huge--; 556 arena->stats.allocated_huge -= usize; 557 arena->stats.hstats[index].nmalloc--; 558 arena->stats.hstats[index].curhchunks--; 559} 560 561static void 562arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 563{ 564 index_t index = size2index(usize) - nlclasses - NBINS; 565 566 cassert(config_stats); 567 568 arena->stats.ndalloc_huge++; 569 arena->stats.allocated_huge -= usize; 570 arena->stats.hstats[index].ndalloc++; 571 arena->stats.hstats[index].curhchunks--; 572} 573 574static void 575arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 576{ 577 index_t index = size2index(usize) - nlclasses - NBINS; 578 579 cassert(config_stats); 580 581 arena->stats.ndalloc_huge--; 582 arena->stats.allocated_huge += usize; 583 arena->stats.hstats[index].ndalloc--; 584 arena->stats.hstats[index].curhchunks++; 585} 586 587static void 588arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 589{ 590 591 arena_huge_dalloc_stats_update(arena, oldsize); 592 arena_huge_malloc_stats_update(arena, usize); 593} 594 595static void 596arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 597 size_t usize) 598{ 599 600 arena_huge_dalloc_stats_update_undo(arena, oldsize); 601 arena_huge_malloc_stats_update_undo(arena, usize); 602} 603 604extent_node_t * 605arena_node_alloc(arena_t *arena) 606{ 607 extent_node_t *node; 608 609 malloc_mutex_lock(&arena->node_cache_mtx); 610 node = ql_last(&arena->node_cache, ql_link); 611 if (node == NULL) { 612 malloc_mutex_unlock(&arena->node_cache_mtx); 613 return (base_alloc(sizeof(extent_node_t))); 614 } 615 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); 616 malloc_mutex_unlock(&arena->node_cache_mtx); 617 return (node); 618} 619 620void 621arena_node_dalloc(arena_t *arena, extent_node_t *node) 622{ 623 624 malloc_mutex_lock(&arena->node_cache_mtx); 625 ql_elm_new(node, ql_link); 626 ql_tail_insert(&arena->node_cache, node, ql_link); 627 malloc_mutex_unlock(&arena->node_cache_mtx); 628} 629 630void * 631arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, 632 bool *zero) 633{ 634 void *ret; 635 chunk_alloc_t *chunk_alloc; 636 chunk_dalloc_t *chunk_dalloc; 637 size_t csize = CHUNK_CEILING(usize); 638 639 malloc_mutex_lock(&arena->lock); 640 chunk_alloc = arena->chunk_alloc; 641 chunk_dalloc = arena->chunk_dalloc; 642 if (config_stats) { 643 /* Optimistically update stats prior to unlocking. */ 644 arena_huge_malloc_stats_update(arena, usize); 645 arena->stats.mapped += usize; 646 } 647 arena->nactive += (usize >> LG_PAGE); 648 malloc_mutex_unlock(&arena->lock); 649 650 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL, 651 csize, alignment, zero); 652 if (ret == NULL) { 653 /* Revert optimistic stats updates. */ 654 malloc_mutex_lock(&arena->lock); 655 if (config_stats) { 656 arena_huge_malloc_stats_update_undo(arena, usize); 657 arena->stats.mapped -= usize; 658 } 659 arena->nactive -= (usize >> LG_PAGE); 660 malloc_mutex_unlock(&arena->lock); 661 return (NULL); 662 } 663 664 if (config_stats) 665 stats_cactive_add(usize); 666 667 return (ret); 668} 669 670void 671arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) 672{ 673 chunk_dalloc_t *chunk_dalloc; 674 675 malloc_mutex_lock(&arena->lock); 676 chunk_dalloc = arena->chunk_dalloc; 677 if (config_stats) { 678 arena_huge_dalloc_stats_update(arena, usize); 679 arena->stats.mapped -= usize; 680 stats_cactive_sub(usize); 681 } 682 arena->nactive -= (usize >> LG_PAGE); 683 malloc_mutex_unlock(&arena->lock); 684 chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind); 685} 686 687void 688arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, 689 size_t usize) 690{ 691 692 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 693 assert(oldsize != usize); 694 695 malloc_mutex_lock(&arena->lock); 696 if (config_stats) 697 arena_huge_ralloc_stats_update(arena, oldsize, usize); 698 if (oldsize < usize) { 699 size_t udiff = usize - oldsize; 700 arena->nactive += udiff >> LG_PAGE; 701 if (config_stats) 702 stats_cactive_add(udiff); 703 } else { 704 size_t udiff = oldsize - usize; 705 arena->nactive -= udiff >> LG_PAGE; 706 if (config_stats) 707 stats_cactive_sub(udiff); 708 } 709 malloc_mutex_unlock(&arena->lock); 710} 711 712void 713arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, 714 size_t usize) 715{ 716 chunk_dalloc_t *chunk_dalloc; 717 size_t udiff = oldsize - usize; 718 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 719 720 malloc_mutex_lock(&arena->lock); 721 chunk_dalloc = arena->chunk_dalloc; 722 if (config_stats) { 723 arena_huge_ralloc_stats_update(arena, oldsize, usize); 724 if (cdiff != 0) { 725 arena->stats.mapped -= cdiff; 726 stats_cactive_sub(udiff); 727 } 728 } 729 arena->nactive -= udiff >> LG_PAGE; 730 malloc_mutex_unlock(&arena->lock); 731 if (cdiff != 0) { 732 chunk_dalloc((void *)((uintptr_t)chunk + CHUNK_CEILING(usize)), 733 cdiff, arena->ind); 734 } 735} 736 737bool 738arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, 739 size_t usize, bool *zero) 740{ 741 chunk_alloc_t *chunk_alloc; 742 chunk_dalloc_t *chunk_dalloc; 743 size_t udiff = usize - oldsize; 744 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 745 746 malloc_mutex_lock(&arena->lock); 747 chunk_alloc = arena->chunk_alloc; 748 chunk_dalloc = arena->chunk_dalloc; 749 if (config_stats) { 750 /* Optimistically update stats prior to unlocking. */ 751 arena_huge_ralloc_stats_update(arena, oldsize, usize); 752 arena->stats.mapped += cdiff; 753 } 754 arena->nactive += (udiff >> LG_PAGE); 755 malloc_mutex_unlock(&arena->lock); 756 757 if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, 758 (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)), cdiff, 759 chunksize, zero) == NULL) { 760 /* Revert optimistic stats updates. */ 761 malloc_mutex_lock(&arena->lock); 762 if (config_stats) { 763 arena_huge_ralloc_stats_update_undo(arena, 764 oldsize, usize); 765 arena->stats.mapped -= cdiff; 766 } 767 arena->nactive -= (udiff >> LG_PAGE); 768 malloc_mutex_unlock(&arena->lock); 769 return (true); 770 } 771 772 if (config_stats) 773 stats_cactive_add(udiff); 774 775 return (false); 776} 777 778static arena_run_t * 779arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 780{ 781 arena_chunk_map_misc_t *miscelm; 782 arena_chunk_map_misc_t *key; 783 784 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY); 785 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 786 if (miscelm != NULL) { 787 arena_run_t *run = &miscelm->run; 788 arena_run_split_large(arena, &miscelm->run, size, zero); 789 return (run); 790 } 791 792 return (NULL); 793} 794 795static arena_run_t * 796arena_run_alloc_large(arena_t *arena, size_t size, bool zero) 797{ 798 arena_chunk_t *chunk; 799 arena_run_t *run; 800 801 assert(size <= arena_maxrun); 802 assert((size & PAGE_MASK) == 0); 803 804 /* Search the arena's chunks for the lowest best fit. */ 805 run = arena_run_alloc_large_helper(arena, size, zero); 806 if (run != NULL) 807 return (run); 808 809 /* 810 * No usable runs. Create a new chunk from which to allocate the run. 811 */ 812 chunk = arena_chunk_alloc(arena); 813 if (chunk != NULL) { 814 run = &arena_miscelm_get(chunk, map_bias)->run; 815 arena_run_split_large(arena, run, size, zero); 816 return (run); 817 } 818 819 /* 820 * arena_chunk_alloc() failed, but another thread may have made 821 * sufficient memory available while this one dropped arena->lock in 822 * arena_chunk_alloc(), so search one more time. 823 */ 824 return (arena_run_alloc_large_helper(arena, size, zero)); 825} 826 827static arena_run_t * 828arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind) 829{ 830 arena_run_t *run; 831 arena_chunk_map_misc_t *miscelm; 832 arena_chunk_map_misc_t *key; 833 834 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY); 835 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 836 if (miscelm != NULL) { 837 run = &miscelm->run; 838 arena_run_split_small(arena, run, size, binind); 839 return (run); 840 } 841 842 return (NULL); 843} 844 845static arena_run_t * 846arena_run_alloc_small(arena_t *arena, size_t size, index_t binind) 847{ 848 arena_chunk_t *chunk; 849 arena_run_t *run; 850 851 assert(size <= arena_maxrun); 852 assert((size & PAGE_MASK) == 0); 853 assert(binind != BININD_INVALID); 854 855 /* Search the arena's chunks for the lowest best fit. */ 856 run = arena_run_alloc_small_helper(arena, size, binind); 857 if (run != NULL) 858 return (run); 859 860 /* 861 * No usable runs. Create a new chunk from which to allocate the run. 862 */ 863 chunk = arena_chunk_alloc(arena); 864 if (chunk != NULL) { 865 run = &arena_miscelm_get(chunk, map_bias)->run; 866 arena_run_split_small(arena, run, size, binind); 867 return (run); 868 } 869 870 /* 871 * arena_chunk_alloc() failed, but another thread may have made 872 * sufficient memory available while this one dropped arena->lock in 873 * arena_chunk_alloc(), so search one more time. 874 */ 875 return (arena_run_alloc_small_helper(arena, size, binind)); 876} 877 878JEMALLOC_INLINE_C void 879arena_maybe_purge(arena_t *arena) 880{ 881 size_t threshold; 882 883 /* Don't purge if the option is disabled. */ 884 if (opt_lg_dirty_mult < 0) 885 return; 886 threshold = (arena->nactive >> opt_lg_dirty_mult); 887 threshold = threshold < chunk_npages ? chunk_npages : threshold; 888 /* 889 * Don't purge unless the number of purgeable pages exceeds the 890 * threshold. 891 */ 892 if (arena->ndirty <= threshold) 893 return; 894 895 arena_purge(arena, false); 896} 897 898static size_t 899arena_dirty_count(arena_t *arena) 900{ 901 size_t ndirty = 0; 902 arena_chunk_map_misc_t *miscelm; 903 arena_chunk_t *chunk; 904 size_t pageind, npages; 905 906 ql_foreach(miscelm, &arena->runs_dirty, dr_link) { 907 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 908 pageind = arena_miscelm_to_pageind(miscelm); 909 assert(arena_mapbits_allocated_get(chunk, pageind) == 0); 910 assert(arena_mapbits_large_get(chunk, pageind) == 0); 911 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 912 npages = arena_mapbits_unallocated_size_get(chunk, pageind) >> 913 LG_PAGE; 914 ndirty += npages; 915 } 916 917 return (ndirty); 918} 919 920static size_t 921arena_compute_npurge(arena_t *arena, bool all) 922{ 923 size_t npurge; 924 925 /* 926 * Compute the minimum number of pages that this thread should try to 927 * purge. 928 */ 929 if (!all) { 930 size_t threshold = (arena->nactive >> opt_lg_dirty_mult); 931 threshold = threshold < chunk_npages ? chunk_npages : threshold; 932 933 npurge = arena->ndirty - threshold; 934 } else 935 npurge = arena->ndirty; 936 937 return (npurge); 938} 939 940static size_t 941arena_stash_dirty(arena_t *arena, bool all, size_t npurge, 942 arena_chunk_miscelms_t *miscelms) 943{ 944 arena_chunk_map_misc_t *miscelm; 945 size_t nstashed = 0; 946 947 /* Add at least npurge pages to purge_list. */ 948 for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL; 949 miscelm = ql_first(&arena->runs_dirty)) { 950 arena_chunk_t *chunk = 951 (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 952 size_t pageind = arena_miscelm_to_pageind(miscelm); 953 size_t run_size = arena_mapbits_unallocated_size_get(chunk, 954 pageind); 955 size_t npages = run_size >> LG_PAGE; 956 arena_run_t *run = &miscelm->run; 957 958 assert(pageind + npages <= chunk_npages); 959 assert(arena_mapbits_dirty_get(chunk, pageind) == 960 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 961 962 /* 963 * If purging the spare chunk's run, make it available prior to 964 * allocation. 965 */ 966 if (chunk == arena->spare) 967 arena_chunk_alloc(arena); 968 969 /* Temporarily allocate the free dirty run. */ 970 arena_run_split_large(arena, run, run_size, false); 971 /* Append to purge_list for later processing. */ 972 ql_elm_new(miscelm, dr_link); 973 ql_tail_insert(miscelms, miscelm, dr_link); 974 975 nstashed += npages; 976 977 if (!all && nstashed >= npurge) 978 break; 979 } 980 981 return (nstashed); 982} 983 984static size_t 985arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms) 986{ 987 size_t npurged, nmadvise; 988 arena_chunk_map_misc_t *miscelm; 989 990 if (config_stats) 991 nmadvise = 0; 992 npurged = 0; 993 994 malloc_mutex_unlock(&arena->lock); 995 996 ql_foreach(miscelm, miscelms, dr_link) { 997 arena_chunk_t *chunk; 998 size_t pageind, run_size, npages, flag_unzeroed, i; 999 bool unzeroed; 1000 1001 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 1002 pageind = arena_miscelm_to_pageind(miscelm); 1003 run_size = arena_mapbits_large_size_get(chunk, pageind); 1004 npages = run_size >> LG_PAGE; 1005 1006 assert(pageind + npages <= chunk_npages); 1007 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << 1008 LG_PAGE)), run_size); 1009 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; 1010 1011 /* 1012 * Set the unzeroed flag for all pages, now that pages_purge() 1013 * has returned whether the pages were zeroed as a side effect 1014 * of purging. This chunk map modification is safe even though 1015 * the arena mutex isn't currently owned by this thread, 1016 * because the run is marked as allocated, thus protecting it 1017 * from being modified by any other thread. As long as these 1018 * writes don't perturb the first and last elements' 1019 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1020 */ 1021 for (i = 0; i < npages; i++) { 1022 arena_mapbits_unzeroed_set(chunk, pageind+i, 1023 flag_unzeroed); 1024 } 1025 1026 npurged += npages; 1027 if (config_stats) 1028 nmadvise++; 1029 } 1030 1031 malloc_mutex_lock(&arena->lock); 1032 1033 if (config_stats) { 1034 arena->stats.nmadvise += nmadvise; 1035 arena->stats.purged += npurged; 1036 } 1037 1038 return (npurged); 1039} 1040 1041static void 1042arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms) 1043{ 1044 arena_chunk_map_misc_t *miscelm; 1045 1046 /* Deallocate runs. */ 1047 for (miscelm = ql_first(miscelms); miscelm != NULL; 1048 miscelm = ql_first(miscelms)) { 1049 arena_run_t *run = &miscelm->run; 1050 ql_remove(miscelms, miscelm, dr_link); 1051 arena_run_dalloc(arena, run, false, true); 1052 } 1053} 1054 1055void 1056arena_purge(arena_t *arena, bool all) 1057{ 1058 size_t npurge, npurgeable, npurged; 1059 arena_chunk_miscelms_t purge_list; 1060 1061 /* 1062 * Calls to arena_dirty_count() are disabled even for debug builds 1063 * because overhead grows nonlinearly as memory usage increases. 1064 */ 1065 if (false && config_debug) { 1066 size_t ndirty = arena_dirty_count(arena); 1067 assert(ndirty == arena->ndirty); 1068 } 1069 assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all); 1070 1071 if (config_stats) 1072 arena->stats.npurge++; 1073 1074 npurge = arena_compute_npurge(arena, all); 1075 ql_new(&purge_list); 1076 npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list); 1077 assert(npurgeable >= npurge); 1078 npurged = arena_purge_stashed(arena, &purge_list); 1079 assert(npurged == npurgeable); 1080 arena_unstash_purged(arena, &purge_list); 1081} 1082 1083void 1084arena_purge_all(arena_t *arena) 1085{ 1086 1087 malloc_mutex_lock(&arena->lock); 1088 arena_purge(arena, true); 1089 malloc_mutex_unlock(&arena->lock); 1090} 1091 1092static void 1093arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1094 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) 1095{ 1096 size_t size = *p_size; 1097 size_t run_ind = *p_run_ind; 1098 size_t run_pages = *p_run_pages; 1099 1100 /* Try to coalesce forward. */ 1101 if (run_ind + run_pages < chunk_npages && 1102 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1103 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { 1104 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1105 run_ind+run_pages); 1106 size_t nrun_pages = nrun_size >> LG_PAGE; 1107 1108 /* 1109 * Remove successor from runs_avail; the coalesced run is 1110 * inserted later. 1111 */ 1112 assert(arena_mapbits_unallocated_size_get(chunk, 1113 run_ind+run_pages+nrun_pages-1) == nrun_size); 1114 assert(arena_mapbits_dirty_get(chunk, 1115 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1116 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1117 1118 /* If the successor is dirty, remove it from runs_dirty. */ 1119 if (flag_dirty != 0) { 1120 arena_dirty_remove(arena, chunk, run_ind+run_pages, 1121 nrun_pages); 1122 } 1123 1124 size += nrun_size; 1125 run_pages += nrun_pages; 1126 1127 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1128 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1129 size); 1130 } 1131 1132 /* Try to coalesce backward. */ 1133 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1134 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1135 flag_dirty) { 1136 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1137 run_ind-1); 1138 size_t prun_pages = prun_size >> LG_PAGE; 1139 1140 run_ind -= prun_pages; 1141 1142 /* 1143 * Remove predecessor from runs_avail; the coalesced run is 1144 * inserted later. 1145 */ 1146 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1147 prun_size); 1148 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1149 arena_avail_remove(arena, chunk, run_ind, prun_pages); 1150 1151 /* If the predecessor is dirty, remove it from runs_dirty. */ 1152 if (flag_dirty != 0) 1153 arena_dirty_remove(arena, chunk, run_ind, prun_pages); 1154 1155 size += prun_size; 1156 run_pages += prun_pages; 1157 1158 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1159 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1160 size); 1161 } 1162 1163 *p_size = size; 1164 *p_run_ind = run_ind; 1165 *p_run_pages = run_pages; 1166} 1167 1168static void 1169arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) 1170{ 1171 arena_chunk_t *chunk; 1172 arena_chunk_map_misc_t *miscelm; 1173 size_t size, run_ind, run_pages, flag_dirty; 1174 1175 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1176 miscelm = arena_run_to_miscelm(run); 1177 run_ind = arena_miscelm_to_pageind(miscelm); 1178 assert(run_ind >= map_bias); 1179 assert(run_ind < chunk_npages); 1180 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1181 size = arena_mapbits_large_size_get(chunk, run_ind); 1182 assert(size == PAGE || 1183 arena_mapbits_large_size_get(chunk, 1184 run_ind+(size>>LG_PAGE)-1) == 0); 1185 } else { 1186 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1187 size = bin_info->run_size; 1188 } 1189 run_pages = (size >> LG_PAGE); 1190 arena_cactive_update(arena, 0, run_pages); 1191 arena->nactive -= run_pages; 1192 1193 /* 1194 * The run is dirty if the caller claims to have dirtied it, as well as 1195 * if it was already dirty before being allocated and the caller 1196 * doesn't claim to have cleaned it. 1197 */ 1198 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1199 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1200 if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0) 1201 dirty = true; 1202 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1203 1204 /* Mark pages as unallocated in the chunk map. */ 1205 if (dirty) { 1206 arena_mapbits_unallocated_set(chunk, run_ind, size, 1207 CHUNK_MAP_DIRTY); 1208 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1209 CHUNK_MAP_DIRTY); 1210 } else { 1211 arena_mapbits_unallocated_set(chunk, run_ind, size, 1212 arena_mapbits_unzeroed_get(chunk, run_ind)); 1213 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1214 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1215 } 1216 1217 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty); 1218 1219 /* Insert into runs_avail, now that coalescing is complete. */ 1220 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1221 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1222 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1223 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1224 arena_avail_insert(arena, chunk, run_ind, run_pages); 1225 1226 if (dirty) 1227 arena_dirty_insert(arena, chunk, run_ind, run_pages); 1228 1229 /* Deallocate chunk if it is now completely unused. */ 1230 if (size == arena_maxrun) { 1231 assert(run_ind == map_bias); 1232 assert(run_pages == (arena_maxrun >> LG_PAGE)); 1233 arena_chunk_dalloc(arena, chunk); 1234 } 1235 1236 /* 1237 * It is okay to do dirty page processing here even if the chunk was 1238 * deallocated above, since in that case it is the spare. Waiting 1239 * until after possible chunk deallocation to do dirty processing 1240 * allows for an old spare to be fully deallocated, thus decreasing the 1241 * chances of spuriously crossing the dirty page purging threshold. 1242 */ 1243 if (dirty) 1244 arena_maybe_purge(arena); 1245} 1246 1247static void 1248arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1249 size_t oldsize, size_t newsize) 1250{ 1251 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1252 size_t pageind = arena_miscelm_to_pageind(miscelm); 1253 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1254 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1255 1256 assert(oldsize > newsize); 1257 1258 /* 1259 * Update the chunk map so that arena_run_dalloc() can treat the 1260 * leading run as separately allocated. Set the last element of each 1261 * run first, in case of single-page runs. 1262 */ 1263 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1264 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1265 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); 1266 1267 if (config_debug) { 1268 UNUSED size_t tail_npages = newsize >> LG_PAGE; 1269 assert(arena_mapbits_large_size_get(chunk, 1270 pageind+head_npages+tail_npages-1) == 0); 1271 assert(arena_mapbits_dirty_get(chunk, 1272 pageind+head_npages+tail_npages-1) == flag_dirty); 1273 } 1274 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 1275 flag_dirty); 1276 1277 arena_run_dalloc(arena, run, false, false); 1278} 1279 1280static void 1281arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1282 size_t oldsize, size_t newsize, bool dirty) 1283{ 1284 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1285 size_t pageind = arena_miscelm_to_pageind(miscelm); 1286 size_t head_npages = newsize >> LG_PAGE; 1287 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1288 arena_chunk_map_misc_t *tail_miscelm; 1289 arena_run_t *tail_run; 1290 1291 assert(oldsize > newsize); 1292 1293 /* 1294 * Update the chunk map so that arena_run_dalloc() can treat the 1295 * trailing run as separately allocated. Set the last element of each 1296 * run first, in case of single-page runs. 1297 */ 1298 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1299 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1300 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); 1301 1302 if (config_debug) { 1303 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 1304 assert(arena_mapbits_large_size_get(chunk, 1305 pageind+head_npages+tail_npages-1) == 0); 1306 assert(arena_mapbits_dirty_get(chunk, 1307 pageind+head_npages+tail_npages-1) == flag_dirty); 1308 } 1309 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 1310 flag_dirty); 1311 1312 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); 1313 tail_run = &tail_miscelm->run; 1314 arena_run_dalloc(arena, tail_run, dirty, false); 1315} 1316 1317static arena_run_t * 1318arena_bin_runs_first(arena_bin_t *bin) 1319{ 1320 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); 1321 if (miscelm != NULL) 1322 return (&miscelm->run); 1323 1324 return (NULL); 1325} 1326 1327static void 1328arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 1329{ 1330 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1331 1332 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); 1333 1334 arena_run_tree_insert(&bin->runs, miscelm); 1335} 1336 1337static void 1338arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) 1339{ 1340 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1341 1342 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); 1343 1344 arena_run_tree_remove(&bin->runs, miscelm); 1345} 1346 1347static arena_run_t * 1348arena_bin_nonfull_run_tryget(arena_bin_t *bin) 1349{ 1350 arena_run_t *run = arena_bin_runs_first(bin); 1351 if (run != NULL) { 1352 arena_bin_runs_remove(bin, run); 1353 if (config_stats) 1354 bin->stats.reruns++; 1355 } 1356 return (run); 1357} 1358 1359static arena_run_t * 1360arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 1361{ 1362 arena_run_t *run; 1363 index_t binind; 1364 arena_bin_info_t *bin_info; 1365 1366 /* Look for a usable run. */ 1367 run = arena_bin_nonfull_run_tryget(bin); 1368 if (run != NULL) 1369 return (run); 1370 /* No existing runs have any space available. */ 1371 1372 binind = arena_bin_index(arena, bin); 1373 bin_info = &arena_bin_info[binind]; 1374 1375 /* Allocate a new run. */ 1376 malloc_mutex_unlock(&bin->lock); 1377 /******************************/ 1378 malloc_mutex_lock(&arena->lock); 1379 run = arena_run_alloc_small(arena, bin_info->run_size, binind); 1380 if (run != NULL) { 1381 /* Initialize run internals. */ 1382 run->binind = binind; 1383 run->nfree = bin_info->nregs; 1384 bitmap_init(run->bitmap, &bin_info->bitmap_info); 1385 } 1386 malloc_mutex_unlock(&arena->lock); 1387 /********************************/ 1388 malloc_mutex_lock(&bin->lock); 1389 if (run != NULL) { 1390 if (config_stats) { 1391 bin->stats.nruns++; 1392 bin->stats.curruns++; 1393 } 1394 return (run); 1395 } 1396 1397 /* 1398 * arena_run_alloc_small() failed, but another thread may have made 1399 * sufficient memory available while this one dropped bin->lock above, 1400 * so search one more time. 1401 */ 1402 run = arena_bin_nonfull_run_tryget(bin); 1403 if (run != NULL) 1404 return (run); 1405 1406 return (NULL); 1407} 1408 1409/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 1410static void * 1411arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 1412{ 1413 void *ret; 1414 index_t binind; 1415 arena_bin_info_t *bin_info; 1416 arena_run_t *run; 1417 1418 binind = arena_bin_index(arena, bin); 1419 bin_info = &arena_bin_info[binind]; 1420 bin->runcur = NULL; 1421 run = arena_bin_nonfull_run_get(arena, bin); 1422 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 1423 /* 1424 * Another thread updated runcur while this one ran without the 1425 * bin lock in arena_bin_nonfull_run_get(). 1426 */ 1427 assert(bin->runcur->nfree > 0); 1428 ret = arena_run_reg_alloc(bin->runcur, bin_info); 1429 if (run != NULL) { 1430 arena_chunk_t *chunk; 1431 1432 /* 1433 * arena_run_alloc_small() may have allocated run, or 1434 * it may have pulled run from the bin's run tree. 1435 * Therefore it is unsafe to make any assumptions about 1436 * how run has previously been used, and 1437 * arena_bin_lower_run() must be called, as if a region 1438 * were just deallocated from the run. 1439 */ 1440 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1441 if (run->nfree == bin_info->nregs) 1442 arena_dalloc_bin_run(arena, chunk, run, bin); 1443 else 1444 arena_bin_lower_run(arena, chunk, run, bin); 1445 } 1446 return (ret); 1447 } 1448 1449 if (run == NULL) 1450 return (NULL); 1451 1452 bin->runcur = run; 1453 1454 assert(bin->runcur->nfree > 0); 1455 1456 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1457} 1458 1459void 1460arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind, 1461 uint64_t prof_accumbytes) 1462{ 1463 unsigned i, nfill; 1464 arena_bin_t *bin; 1465 arena_run_t *run; 1466 void *ptr; 1467 1468 assert(tbin->ncached == 0); 1469 1470 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1471 prof_idump(); 1472 bin = &arena->bins[binind]; 1473 malloc_mutex_lock(&bin->lock); 1474 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1475 tbin->lg_fill_div); i < nfill; i++) { 1476 if ((run = bin->runcur) != NULL && run->nfree > 0) 1477 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1478 else 1479 ptr = arena_bin_malloc_hard(arena, bin); 1480 if (ptr == NULL) { 1481 /* 1482 * OOM. tbin->avail isn't yet filled down to its first 1483 * element, so the successful allocations (if any) must 1484 * be moved to the base of tbin->avail before bailing 1485 * out. 1486 */ 1487 if (i > 0) { 1488 memmove(tbin->avail, &tbin->avail[nfill - i], 1489 i * sizeof(void *)); 1490 } 1491 break; 1492 } 1493 if (config_fill && unlikely(opt_junk_alloc)) { 1494 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1495 true); 1496 } 1497 /* Insert such that low regions get used first. */ 1498 tbin->avail[nfill - 1 - i] = ptr; 1499 } 1500 if (config_stats) { 1501 bin->stats.nmalloc += i; 1502 bin->stats.nrequests += tbin->tstats.nrequests; 1503 bin->stats.curregs += i; 1504 bin->stats.nfills++; 1505 tbin->tstats.nrequests = 0; 1506 } 1507 malloc_mutex_unlock(&bin->lock); 1508 tbin->ncached = i; 1509} 1510 1511void 1512arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 1513{ 1514 1515 if (zero) { 1516 size_t redzone_size = bin_info->redzone_size; 1517 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, 1518 redzone_size); 1519 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, 1520 redzone_size); 1521 } else { 1522 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, 1523 bin_info->reg_interval); 1524 } 1525} 1526 1527#ifdef JEMALLOC_JET 1528#undef arena_redzone_corruption 1529#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) 1530#endif 1531static void 1532arena_redzone_corruption(void *ptr, size_t usize, bool after, 1533 size_t offset, uint8_t byte) 1534{ 1535 1536 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 1537 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 1538 after ? "after" : "before", ptr, usize, byte); 1539} 1540#ifdef JEMALLOC_JET 1541#undef arena_redzone_corruption 1542#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 1543arena_redzone_corruption_t *arena_redzone_corruption = 1544 JEMALLOC_N(arena_redzone_corruption_impl); 1545#endif 1546 1547static void 1548arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 1549{ 1550 size_t size = bin_info->reg_size; 1551 size_t redzone_size = bin_info->redzone_size; 1552 size_t i; 1553 bool error = false; 1554 1555 if (opt_junk_alloc) { 1556 for (i = 1; i <= redzone_size; i++) { 1557 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 1558 if (*byte != 0xa5) { 1559 error = true; 1560 arena_redzone_corruption(ptr, size, false, i, *byte); 1561 if (reset) 1562 *byte = 0xa5; 1563 } 1564 } 1565 for (i = 0; i < redzone_size; i++) { 1566 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 1567 if (*byte != 0xa5) { 1568 error = true; 1569 arena_redzone_corruption(ptr, size, true, i, *byte); 1570 if (reset) 1571 *byte = 0xa5; 1572 } 1573 } 1574 } 1575 1576 if (opt_abort && error) 1577 abort(); 1578} 1579 1580#ifdef JEMALLOC_JET 1581#undef arena_dalloc_junk_small 1582#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) 1583#endif 1584void 1585arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 1586{ 1587 size_t redzone_size = bin_info->redzone_size; 1588 1589 arena_redzones_validate(ptr, bin_info, false); 1590 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, 1591 bin_info->reg_interval); 1592} 1593#ifdef JEMALLOC_JET 1594#undef arena_dalloc_junk_small 1595#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 1596arena_dalloc_junk_small_t *arena_dalloc_junk_small = 1597 JEMALLOC_N(arena_dalloc_junk_small_impl); 1598#endif 1599 1600void 1601arena_quarantine_junk_small(void *ptr, size_t usize) 1602{ 1603 index_t binind; 1604 arena_bin_info_t *bin_info; 1605 cassert(config_fill); 1606 assert(opt_junk_free); 1607 assert(opt_quarantine); 1608 assert(usize <= SMALL_MAXCLASS); 1609 1610 binind = size2index(usize); 1611 bin_info = &arena_bin_info[binind]; 1612 arena_redzones_validate(ptr, bin_info, true); 1613} 1614 1615void * 1616arena_malloc_small(arena_t *arena, size_t size, bool zero) 1617{ 1618 void *ret; 1619 arena_bin_t *bin; 1620 arena_run_t *run; 1621 index_t binind; 1622 1623 binind = size2index(size); 1624 assert(binind < NBINS); 1625 bin = &arena->bins[binind]; 1626 size = index2size(binind); 1627 1628 malloc_mutex_lock(&bin->lock); 1629 if ((run = bin->runcur) != NULL && run->nfree > 0) 1630 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1631 else 1632 ret = arena_bin_malloc_hard(arena, bin); 1633 1634 if (ret == NULL) { 1635 malloc_mutex_unlock(&bin->lock); 1636 return (NULL); 1637 } 1638 1639 if (config_stats) { 1640 bin->stats.nmalloc++; 1641 bin->stats.nrequests++; 1642 bin->stats.curregs++; 1643 } 1644 malloc_mutex_unlock(&bin->lock); 1645 if (config_prof && !isthreaded && arena_prof_accum(arena, size)) 1646 prof_idump(); 1647 1648 if (!zero) { 1649 if (config_fill) { 1650 if (unlikely(opt_junk_alloc)) { 1651 arena_alloc_junk_small(ret, 1652 &arena_bin_info[binind], false); 1653 } else if (unlikely(opt_zero)) 1654 memset(ret, 0, size); 1655 } 1656 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1657 } else { 1658 if (config_fill && unlikely(opt_junk_alloc)) { 1659 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1660 true); 1661 } 1662 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1663 memset(ret, 0, size); 1664 } 1665 1666 return (ret); 1667} 1668 1669void * 1670arena_malloc_large(arena_t *arena, size_t size, bool zero) 1671{ 1672 void *ret; 1673 size_t usize; 1674 arena_run_t *run; 1675 arena_chunk_map_misc_t *miscelm; 1676 UNUSED bool idump; 1677 1678 /* Large allocation. */ 1679 usize = s2u(size); 1680 malloc_mutex_lock(&arena->lock); 1681 run = arena_run_alloc_large(arena, usize, zero); 1682 if (run == NULL) { 1683 malloc_mutex_unlock(&arena->lock); 1684 return (NULL); 1685 } 1686 miscelm = arena_run_to_miscelm(run); 1687 ret = arena_miscelm_to_rpages(miscelm); 1688 if (config_stats) { 1689 index_t index = size2index(usize) - NBINS; 1690 1691 arena->stats.nmalloc_large++; 1692 arena->stats.nrequests_large++; 1693 arena->stats.allocated_large += usize; 1694 arena->stats.lstats[index].nmalloc++; 1695 arena->stats.lstats[index].nrequests++; 1696 arena->stats.lstats[index].curruns++; 1697 } 1698 if (config_prof) 1699 idump = arena_prof_accum_locked(arena, usize); 1700 malloc_mutex_unlock(&arena->lock); 1701 if (config_prof && idump) 1702 prof_idump(); 1703 1704 if (!zero) { 1705 if (config_fill) { 1706 if (unlikely(opt_junk_alloc)) 1707 memset(ret, 0xa5, usize); 1708 else if (unlikely(opt_zero)) 1709 memset(ret, 0, usize); 1710 } 1711 } 1712 1713 return (ret); 1714} 1715 1716/* Only handles large allocations that require more than page alignment. */ 1717static void * 1718arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, 1719 bool zero) 1720{ 1721 void *ret; 1722 size_t alloc_size, leadsize, trailsize; 1723 arena_run_t *run; 1724 arena_chunk_t *chunk; 1725 arena_chunk_map_misc_t *miscelm; 1726 void *rpages; 1727 1728 assert((size & PAGE_MASK) == 0); 1729 1730 arena = arena_choose(tsd, arena); 1731 if (unlikely(arena == NULL)) 1732 return (NULL); 1733 1734 alignment = PAGE_CEILING(alignment); 1735 alloc_size = size + alignment - PAGE; 1736 1737 malloc_mutex_lock(&arena->lock); 1738 run = arena_run_alloc_large(arena, alloc_size, false); 1739 if (run == NULL) { 1740 malloc_mutex_unlock(&arena->lock); 1741 return (NULL); 1742 } 1743 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1744 miscelm = arena_run_to_miscelm(run); 1745 rpages = arena_miscelm_to_rpages(miscelm); 1746 1747 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 1748 (uintptr_t)rpages; 1749 assert(alloc_size >= leadsize + size); 1750 trailsize = alloc_size - leadsize - size; 1751 if (leadsize != 0) { 1752 arena_chunk_map_misc_t *head_miscelm = miscelm; 1753 arena_run_t *head_run = run; 1754 1755 miscelm = arena_miscelm_get(chunk, 1756 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 1757 LG_PAGE)); 1758 run = &miscelm->run; 1759 1760 arena_run_trim_head(arena, chunk, head_run, alloc_size, 1761 alloc_size - leadsize); 1762 } 1763 if (trailsize != 0) { 1764 arena_run_trim_tail(arena, chunk, run, size + trailsize, size, 1765 false); 1766 } 1767 arena_run_init_large(arena, run, size, zero); 1768 ret = arena_miscelm_to_rpages(miscelm); 1769 1770 if (config_stats) { 1771 index_t index = size2index(size) - NBINS; 1772 1773 arena->stats.nmalloc_large++; 1774 arena->stats.nrequests_large++; 1775 arena->stats.allocated_large += size; 1776 arena->stats.lstats[index].nmalloc++; 1777 arena->stats.lstats[index].nrequests++; 1778 arena->stats.lstats[index].curruns++; 1779 } 1780 malloc_mutex_unlock(&arena->lock); 1781 1782 if (config_fill && !zero) { 1783 if (unlikely(opt_junk_alloc)) 1784 memset(ret, 0xa5, size); 1785 else if (unlikely(opt_zero)) 1786 memset(ret, 0, size); 1787 } 1788 return (ret); 1789} 1790 1791void * 1792arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 1793 bool zero, tcache_t *tcache) 1794{ 1795 void *ret; 1796 1797 if (usize <= SMALL_MAXCLASS && alignment < PAGE) 1798 ret = arena_malloc(tsd, arena, usize, zero, tcache); 1799 else { 1800 if (likely(usize <= arena_maxclass)) { 1801 ret = arena_palloc_large(tsd, arena, usize, alignment, 1802 zero); 1803 } else if (likely(alignment <= chunksize)) 1804 ret = huge_malloc(tsd, arena, usize, zero, tcache); 1805 else { 1806 ret = huge_palloc(tsd, arena, usize, alignment, zero, 1807 tcache); 1808 } 1809 } 1810 return (ret); 1811} 1812 1813void 1814arena_prof_promoted(const void *ptr, size_t size) 1815{ 1816 arena_chunk_t *chunk; 1817 size_t pageind; 1818 index_t binind; 1819 1820 cassert(config_prof); 1821 assert(ptr != NULL); 1822 assert(CHUNK_ADDR2BASE(ptr) != ptr); 1823 assert(isalloc(ptr, false) == LARGE_MINCLASS); 1824 assert(isalloc(ptr, true) == LARGE_MINCLASS); 1825 assert(size <= SMALL_MAXCLASS); 1826 1827 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1828 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1829 binind = size2index(size); 1830 assert(binind < NBINS); 1831 arena_mapbits_large_binind_set(chunk, pageind, binind); 1832 1833 assert(isalloc(ptr, false) == LARGE_MINCLASS); 1834 assert(isalloc(ptr, true) == size); 1835} 1836 1837static void 1838arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 1839 arena_bin_t *bin) 1840{ 1841 1842 /* Dissociate run from bin. */ 1843 if (run == bin->runcur) 1844 bin->runcur = NULL; 1845 else { 1846 index_t binind = arena_bin_index(chunk->node.arena, bin); 1847 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1848 1849 if (bin_info->nregs != 1) { 1850 /* 1851 * This block's conditional is necessary because if the 1852 * run only contains one region, then it never gets 1853 * inserted into the non-full runs tree. 1854 */ 1855 arena_bin_runs_remove(bin, run); 1856 } 1857 } 1858} 1859 1860static void 1861arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1862 arena_bin_t *bin) 1863{ 1864 1865 assert(run != bin->runcur); 1866 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == 1867 NULL); 1868 1869 malloc_mutex_unlock(&bin->lock); 1870 /******************************/ 1871 malloc_mutex_lock(&arena->lock); 1872 arena_run_dalloc(arena, run, true, false); 1873 malloc_mutex_unlock(&arena->lock); 1874 /****************************/ 1875 malloc_mutex_lock(&bin->lock); 1876 if (config_stats) 1877 bin->stats.curruns--; 1878} 1879 1880static void 1881arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1882 arena_bin_t *bin) 1883{ 1884 1885 /* 1886 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 1887 * non-full run. It is okay to NULL runcur out rather than proactively 1888 * keeping it pointing at the lowest non-full run. 1889 */ 1890 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 1891 /* Switch runcur. */ 1892 if (bin->runcur->nfree > 0) 1893 arena_bin_runs_insert(bin, bin->runcur); 1894 bin->runcur = run; 1895 if (config_stats) 1896 bin->stats.reruns++; 1897 } else 1898 arena_bin_runs_insert(bin, run); 1899} 1900 1901static void 1902arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1903 arena_chunk_map_bits_t *bitselm, bool junked) 1904{ 1905 size_t pageind, rpages_ind; 1906 arena_run_t *run; 1907 arena_bin_t *bin; 1908 arena_bin_info_t *bin_info; 1909 index_t binind; 1910 1911 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1912 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 1913 run = &arena_miscelm_get(chunk, rpages_ind)->run; 1914 binind = run->binind; 1915 bin = &arena->bins[binind]; 1916 bin_info = &arena_bin_info[binind]; 1917 1918 if (!junked && config_fill && unlikely(opt_junk_free)) 1919 arena_dalloc_junk_small(ptr, bin_info); 1920 1921 arena_run_reg_dalloc(run, ptr); 1922 if (run->nfree == bin_info->nregs) { 1923 arena_dissociate_bin_run(chunk, run, bin); 1924 arena_dalloc_bin_run(arena, chunk, run, bin); 1925 } else if (run->nfree == 1 && run != bin->runcur) 1926 arena_bin_lower_run(arena, chunk, run, bin); 1927 1928 if (config_stats) { 1929 bin->stats.ndalloc++; 1930 bin->stats.curregs--; 1931 } 1932} 1933 1934void 1935arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1936 arena_chunk_map_bits_t *bitselm) 1937{ 1938 1939 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); 1940} 1941 1942void 1943arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1944 size_t pageind, arena_chunk_map_bits_t *bitselm) 1945{ 1946 arena_run_t *run; 1947 arena_bin_t *bin; 1948 size_t rpages_ind; 1949 1950 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 1951 run = &arena_miscelm_get(chunk, rpages_ind)->run; 1952 bin = &arena->bins[run->binind]; 1953 malloc_mutex_lock(&bin->lock); 1954 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); 1955 malloc_mutex_unlock(&bin->lock); 1956} 1957 1958void 1959arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1960 size_t pageind) 1961{ 1962 arena_chunk_map_bits_t *bitselm; 1963 1964 if (config_debug) { 1965 /* arena_ptr_small_binind_get() does extra sanity checking. */ 1966 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 1967 pageind)) != BININD_INVALID); 1968 } 1969 bitselm = arena_bitselm_get(chunk, pageind); 1970 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); 1971} 1972 1973#ifdef JEMALLOC_JET 1974#undef arena_dalloc_junk_large 1975#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 1976#endif 1977void 1978arena_dalloc_junk_large(void *ptr, size_t usize) 1979{ 1980 1981 if (config_fill && unlikely(opt_junk_free)) 1982 memset(ptr, 0x5a, usize); 1983} 1984#ifdef JEMALLOC_JET 1985#undef arena_dalloc_junk_large 1986#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 1987arena_dalloc_junk_large_t *arena_dalloc_junk_large = 1988 JEMALLOC_N(arena_dalloc_junk_large_impl); 1989#endif 1990 1991void 1992arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, 1993 void *ptr, bool junked) 1994{ 1995 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1996 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 1997 arena_run_t *run = &miscelm->run; 1998 1999 if (config_fill || config_stats) { 2000 size_t usize = arena_mapbits_large_size_get(chunk, pageind); 2001 2002 if (!junked) 2003 arena_dalloc_junk_large(ptr, usize); 2004 if (config_stats) { 2005 index_t index = size2index(usize) - NBINS; 2006 2007 arena->stats.ndalloc_large++; 2008 arena->stats.allocated_large -= usize; 2009 arena->stats.lstats[index].ndalloc++; 2010 arena->stats.lstats[index].curruns--; 2011 } 2012 } 2013 2014 arena_run_dalloc(arena, run, true, false); 2015} 2016 2017void 2018arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, 2019 void *ptr) 2020{ 2021 2022 arena_dalloc_large_locked_impl(arena, chunk, ptr, true); 2023} 2024 2025void 2026arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) 2027{ 2028 2029 malloc_mutex_lock(&arena->lock); 2030 arena_dalloc_large_locked_impl(arena, chunk, ptr, false); 2031 malloc_mutex_unlock(&arena->lock); 2032} 2033 2034static void 2035arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2036 size_t oldsize, size_t size) 2037{ 2038 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2039 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 2040 arena_run_t *run = &miscelm->run; 2041 2042 assert(size < oldsize); 2043 2044 /* 2045 * Shrink the run, and make trailing pages available for other 2046 * allocations. 2047 */ 2048 malloc_mutex_lock(&arena->lock); 2049 arena_run_trim_tail(arena, chunk, run, oldsize, size, true); 2050 if (config_stats) { 2051 index_t oldindex = size2index(oldsize) - NBINS; 2052 index_t index = size2index(size) - NBINS; 2053 2054 arena->stats.ndalloc_large++; 2055 arena->stats.allocated_large -= oldsize; 2056 arena->stats.lstats[oldindex].ndalloc++; 2057 arena->stats.lstats[oldindex].curruns--; 2058 2059 arena->stats.nmalloc_large++; 2060 arena->stats.nrequests_large++; 2061 arena->stats.allocated_large += size; 2062 arena->stats.lstats[index].nmalloc++; 2063 arena->stats.lstats[index].nrequests++; 2064 arena->stats.lstats[index].curruns++; 2065 } 2066 malloc_mutex_unlock(&arena->lock); 2067} 2068 2069static bool 2070arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2071 size_t oldsize, size_t size, size_t extra, bool zero) 2072{ 2073 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2074 size_t npages = oldsize >> LG_PAGE; 2075 size_t followsize; 2076 size_t usize_min = s2u(size); 2077 2078 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); 2079 2080 /* Try to extend the run. */ 2081 assert(usize_min > oldsize); 2082 malloc_mutex_lock(&arena->lock); 2083 if (pageind + npages < chunk_npages && 2084 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && 2085 (followsize = arena_mapbits_unallocated_size_get(chunk, 2086 pageind+npages)) >= usize_min - oldsize) { 2087 /* 2088 * The next run is available and sufficiently large. Split the 2089 * following run, then merge the first part with the existing 2090 * allocation. 2091 */ 2092 arena_run_t *run; 2093 size_t flag_dirty, splitsize, usize; 2094 2095 usize = s2u(size + extra); 2096 while (oldsize + followsize < usize) 2097 usize = index2size(size2index(usize)-1); 2098 assert(usize >= usize_min); 2099 splitsize = usize - oldsize; 2100 2101 run = &arena_miscelm_get(chunk, pageind+npages)->run; 2102 arena_run_split_large(arena, run, splitsize, zero); 2103 2104 size = oldsize + splitsize; 2105 npages = size >> LG_PAGE; 2106 2107 /* 2108 * Mark the extended run as dirty if either portion of the run 2109 * was dirty before allocation. This is rather pedantic, 2110 * because there's not actually any sequence of events that 2111 * could cause the resulting run to be passed to 2112 * arena_run_dalloc() with the dirty argument set to false 2113 * (which is when dirty flag consistency would really matter). 2114 */ 2115 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2116 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2117 arena_mapbits_large_set(chunk, pageind, size, flag_dirty); 2118 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); 2119 2120 if (config_stats) { 2121 index_t oldindex = size2index(oldsize) - NBINS; 2122 index_t index = size2index(size) - NBINS; 2123 2124 arena->stats.ndalloc_large++; 2125 arena->stats.allocated_large -= oldsize; 2126 arena->stats.lstats[oldindex].ndalloc++; 2127 arena->stats.lstats[oldindex].curruns--; 2128 2129 arena->stats.nmalloc_large++; 2130 arena->stats.nrequests_large++; 2131 arena->stats.allocated_large += size; 2132 arena->stats.lstats[index].nmalloc++; 2133 arena->stats.lstats[index].nrequests++; 2134 arena->stats.lstats[index].curruns++; 2135 } 2136 malloc_mutex_unlock(&arena->lock); 2137 return (false); 2138 } 2139 malloc_mutex_unlock(&arena->lock); 2140 2141 return (true); 2142} 2143 2144#ifdef JEMALLOC_JET 2145#undef arena_ralloc_junk_large 2146#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2147#endif 2148static void 2149arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2150{ 2151 2152 if (config_fill && unlikely(opt_junk_free)) { 2153 memset((void *)((uintptr_t)ptr + usize), 0x5a, 2154 old_usize - usize); 2155 } 2156} 2157#ifdef JEMALLOC_JET 2158#undef arena_ralloc_junk_large 2159#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 2160arena_ralloc_junk_large_t *arena_ralloc_junk_large = 2161 JEMALLOC_N(arena_ralloc_junk_large_impl); 2162#endif 2163 2164/* 2165 * Try to resize a large allocation, in order to avoid copying. This will 2166 * always fail if growing an object, and the following run is already in use. 2167 */ 2168static bool 2169arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, 2170 bool zero) 2171{ 2172 size_t usize; 2173 2174 /* Make sure extra can't cause size_t overflow. */ 2175 if (unlikely(extra >= arena_maxclass)) 2176 return (true); 2177 2178 usize = s2u(size + extra); 2179 if (usize == oldsize) { 2180 /* Same size class. */ 2181 return (false); 2182 } else { 2183 arena_chunk_t *chunk; 2184 arena_t *arena; 2185 2186 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2187 arena = chunk->node.arena; 2188 2189 if (usize < oldsize) { 2190 /* Fill before shrinking in order avoid a race. */ 2191 arena_ralloc_junk_large(ptr, oldsize, usize); 2192 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, 2193 usize); 2194 return (false); 2195 } else { 2196 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, 2197 oldsize, size, extra, zero); 2198 if (config_fill && !ret && !zero) { 2199 if (unlikely(opt_junk_alloc)) { 2200 memset((void *)((uintptr_t)ptr + 2201 oldsize), 0xa5, isalloc(ptr, 2202 config_prof) - oldsize); 2203 } else if (unlikely(opt_zero)) { 2204 memset((void *)((uintptr_t)ptr + 2205 oldsize), 0, isalloc(ptr, 2206 config_prof) - oldsize); 2207 } 2208 } 2209 return (ret); 2210 } 2211 } 2212} 2213 2214bool 2215arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2216 bool zero) 2217{ 2218 2219 if (likely(size <= arena_maxclass)) { 2220 /* 2221 * Avoid moving the allocation if the size class can be left the 2222 * same. 2223 */ 2224 if (likely(oldsize <= arena_maxclass)) { 2225 if (oldsize <= SMALL_MAXCLASS) { 2226 assert( 2227 arena_bin_info[size2index(oldsize)].reg_size 2228 == oldsize); 2229 if ((size + extra <= SMALL_MAXCLASS && 2230 size2index(size + extra) == 2231 size2index(oldsize)) || (size <= oldsize && 2232 size + extra >= oldsize)) 2233 return (false); 2234 } else { 2235 assert(size <= arena_maxclass); 2236 if (size + extra > SMALL_MAXCLASS) { 2237 if (!arena_ralloc_large(ptr, oldsize, 2238 size, extra, zero)) 2239 return (false); 2240 } 2241 } 2242 } 2243 2244 /* Reallocation would require a move. */ 2245 return (true); 2246 } else 2247 return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); 2248} 2249 2250void * 2251arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 2252 size_t extra, size_t alignment, bool zero, tcache_t *tcache) 2253{ 2254 void *ret; 2255 2256 if (likely(size <= arena_maxclass)) { 2257 size_t copysize; 2258 2259 /* Try to avoid moving the allocation. */ 2260 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) 2261 return (ptr); 2262 2263 /* 2264 * size and oldsize are different enough that we need to move 2265 * the object. In that case, fall back to allocating new space 2266 * and copying. 2267 */ 2268 if (alignment != 0) { 2269 size_t usize = sa2u(size + extra, alignment); 2270 if (usize == 0) 2271 return (NULL); 2272 ret = ipalloct(tsd, usize, alignment, zero, tcache, 2273 arena); 2274 } else { 2275 ret = arena_malloc(tsd, arena, size + extra, zero, 2276 tcache); 2277 } 2278 2279 if (ret == NULL) { 2280 if (extra == 0) 2281 return (NULL); 2282 /* Try again, this time without extra. */ 2283 if (alignment != 0) { 2284 size_t usize = sa2u(size, alignment); 2285 if (usize == 0) 2286 return (NULL); 2287 ret = ipalloct(tsd, usize, alignment, zero, 2288 tcache, arena); 2289 } else { 2290 ret = arena_malloc(tsd, arena, size, zero, 2291 tcache); 2292 } 2293 2294 if (ret == NULL) 2295 return (NULL); 2296 } 2297 2298 /* 2299 * Junk/zero-filling were already done by 2300 * ipalloc()/arena_malloc(). 2301 */ 2302 2303 /* 2304 * Copy at most size bytes (not size+extra), since the caller 2305 * has no expectation that the extra bytes will be reliably 2306 * preserved. 2307 */ 2308 copysize = (size < oldsize) ? size : oldsize; 2309 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 2310 memcpy(ret, ptr, copysize); 2311 isqalloc(tsd, ptr, oldsize, tcache); 2312 } else { 2313 ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra, 2314 alignment, zero, tcache); 2315 } 2316 return (ret); 2317} 2318 2319dss_prec_t 2320arena_dss_prec_get(arena_t *arena) 2321{ 2322 dss_prec_t ret; 2323 2324 malloc_mutex_lock(&arena->lock); 2325 ret = arena->dss_prec; 2326 malloc_mutex_unlock(&arena->lock); 2327 return (ret); 2328} 2329 2330bool 2331arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) 2332{ 2333 2334 if (!have_dss) 2335 return (dss_prec != dss_prec_disabled); 2336 malloc_mutex_lock(&arena->lock); 2337 arena->dss_prec = dss_prec; 2338 malloc_mutex_unlock(&arena->lock); 2339 return (false); 2340} 2341 2342void 2343arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 2344 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 2345 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) 2346{ 2347 unsigned i; 2348 2349 malloc_mutex_lock(&arena->lock); 2350 *dss = dss_prec_names[arena->dss_prec]; 2351 *nactive += arena->nactive; 2352 *ndirty += arena->ndirty; 2353 2354 astats->mapped += arena->stats.mapped; 2355 astats->npurge += arena->stats.npurge; 2356 astats->nmadvise += arena->stats.nmadvise; 2357 astats->purged += arena->stats.purged; 2358 astats->metadata_mapped += arena->stats.metadata_mapped; 2359 astats->metadata_allocated += arena_metadata_allocated_get(arena); 2360 astats->allocated_large += arena->stats.allocated_large; 2361 astats->nmalloc_large += arena->stats.nmalloc_large; 2362 astats->ndalloc_large += arena->stats.ndalloc_large; 2363 astats->nrequests_large += arena->stats.nrequests_large; 2364 astats->allocated_huge += arena->stats.allocated_huge; 2365 astats->nmalloc_huge += arena->stats.nmalloc_huge; 2366 astats->ndalloc_huge += arena->stats.ndalloc_huge; 2367 2368 for (i = 0; i < nlclasses; i++) { 2369 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 2370 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 2371 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 2372 lstats[i].curruns += arena->stats.lstats[i].curruns; 2373 } 2374 2375 for (i = 0; i < nhclasses; i++) { 2376 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 2377 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 2378 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 2379 } 2380 malloc_mutex_unlock(&arena->lock); 2381 2382 for (i = 0; i < NBINS; i++) { 2383 arena_bin_t *bin = &arena->bins[i]; 2384 2385 malloc_mutex_lock(&bin->lock); 2386 bstats[i].nmalloc += bin->stats.nmalloc; 2387 bstats[i].ndalloc += bin->stats.ndalloc; 2388 bstats[i].nrequests += bin->stats.nrequests; 2389 bstats[i].curregs += bin->stats.curregs; 2390 if (config_tcache) { 2391 bstats[i].nfills += bin->stats.nfills; 2392 bstats[i].nflushes += bin->stats.nflushes; 2393 } 2394 bstats[i].nruns += bin->stats.nruns; 2395 bstats[i].reruns += bin->stats.reruns; 2396 bstats[i].curruns += bin->stats.curruns; 2397 malloc_mutex_unlock(&bin->lock); 2398 } 2399} 2400 2401arena_t * 2402arena_new(unsigned ind) 2403{ 2404 arena_t *arena; 2405 unsigned i; 2406 arena_bin_t *bin; 2407 2408 /* 2409 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 2410 * because there is no way to clean up if base_alloc() OOMs. 2411 */ 2412 if (config_stats) { 2413 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) 2414 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + 2415 nhclasses) * sizeof(malloc_huge_stats_t)); 2416 } else 2417 arena = (arena_t *)base_alloc(sizeof(arena_t)); 2418 if (arena == NULL) 2419 return (NULL); 2420 2421 arena->ind = ind; 2422 arena->nthreads = 0; 2423 if (malloc_mutex_init(&arena->lock)) 2424 return (NULL); 2425 arena->chunk_alloc = chunk_alloc_default; 2426 arena->chunk_dalloc = chunk_dalloc_default; 2427 ql_new(&arena->huge); 2428 if (malloc_mutex_init(&arena->huge_mtx)) 2429 return (NULL); 2430 extent_tree_szad_new(&arena->chunks_szad_mmap); 2431 extent_tree_ad_new(&arena->chunks_ad_mmap); 2432 extent_tree_szad_new(&arena->chunks_szad_dss); 2433 extent_tree_ad_new(&arena->chunks_ad_dss); 2434 ql_new(&arena->node_cache); 2435 if (malloc_mutex_init(&arena->chunks_mtx)) 2436 return (NULL); 2437 if (malloc_mutex_init(&arena->node_cache_mtx)) 2438 return (NULL); 2439 2440 if (config_stats) { 2441 memset(&arena->stats, 0, sizeof(arena_stats_t)); 2442 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 2443 + CACHELINE_CEILING(sizeof(arena_t))); 2444 memset(arena->stats.lstats, 0, nlclasses * 2445 sizeof(malloc_large_stats_t)); 2446 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 2447 + CACHELINE_CEILING(sizeof(arena_t)) + 2448 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 2449 memset(arena->stats.hstats, 0, nhclasses * 2450 sizeof(malloc_huge_stats_t)); 2451 if (config_tcache) 2452 ql_new(&arena->tcache_ql); 2453 } 2454 2455 if (config_prof) 2456 arena->prof_accumbytes = 0; 2457 2458 arena->dss_prec = chunk_dss_prec_get(); 2459 2460 arena->spare = NULL; 2461 2462 arena->nactive = 0; 2463 arena->ndirty = 0; 2464 2465 arena_avail_tree_new(&arena->runs_avail); 2466 ql_new(&arena->runs_dirty); 2467 2468 /* Initialize bins. */ 2469 for (i = 0; i < NBINS; i++) { 2470 bin = &arena->bins[i]; 2471 if (malloc_mutex_init(&bin->lock)) 2472 return (NULL); 2473 bin->runcur = NULL; 2474 arena_run_tree_new(&bin->runs); 2475 if (config_stats) 2476 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2477 } 2478 2479 return (arena); 2480} 2481 2482/* 2483 * Calculate bin_info->run_size such that it meets the following constraints: 2484 * 2485 * *) bin_info->run_size <= arena_maxrun 2486 * *) bin_info->nregs <= RUN_MAXREGS 2487 * 2488 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 2489 * these settings are all interdependent. 2490 */ 2491static void 2492bin_info_run_size_calc(arena_bin_info_t *bin_info) 2493{ 2494 size_t pad_size; 2495 size_t try_run_size, perfect_run_size, actual_run_size; 2496 uint32_t try_nregs, perfect_nregs, actual_nregs; 2497 2498 /* 2499 * Determine redzone size based on minimum alignment and minimum 2500 * redzone size. Add padding to the end of the run if it is needed to 2501 * align the regions. The padding allows each redzone to be half the 2502 * minimum alignment; without the padding, each redzone would have to 2503 * be twice as large in order to maintain alignment. 2504 */ 2505 if (config_fill && unlikely(opt_redzone)) { 2506 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 2507 1); 2508 if (align_min <= REDZONE_MINSIZE) { 2509 bin_info->redzone_size = REDZONE_MINSIZE; 2510 pad_size = 0; 2511 } else { 2512 bin_info->redzone_size = align_min >> 1; 2513 pad_size = bin_info->redzone_size; 2514 } 2515 } else { 2516 bin_info->redzone_size = 0; 2517 pad_size = 0; 2518 } 2519 bin_info->reg_interval = bin_info->reg_size + 2520 (bin_info->redzone_size << 1); 2521 2522 /* 2523 * Compute run size under ideal conditions (no redzones, no limit on run 2524 * size). 2525 */ 2526 try_run_size = PAGE; 2527 try_nregs = try_run_size / bin_info->reg_size; 2528 do { 2529 perfect_run_size = try_run_size; 2530 perfect_nregs = try_nregs; 2531 2532 try_run_size += PAGE; 2533 try_nregs = try_run_size / bin_info->reg_size; 2534 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 2535 assert(perfect_nregs <= RUN_MAXREGS); 2536 2537 actual_run_size = perfect_run_size; 2538 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; 2539 2540 /* 2541 * Redzones can require enough padding that not even a single region can 2542 * fit within the number of pages that would normally be dedicated to a 2543 * run for this size class. Increase the run size until at least one 2544 * region fits. 2545 */ 2546 while (actual_nregs == 0) { 2547 assert(config_fill && unlikely(opt_redzone)); 2548 2549 actual_run_size += PAGE; 2550 actual_nregs = (actual_run_size - pad_size) / 2551 bin_info->reg_interval; 2552 } 2553 2554 /* 2555 * Make sure that the run will fit within an arena chunk. 2556 */ 2557 while (actual_run_size > arena_maxrun) { 2558 actual_run_size -= PAGE; 2559 actual_nregs = (actual_run_size - pad_size) / 2560 bin_info->reg_interval; 2561 } 2562 assert(actual_nregs > 0); 2563 2564 /* Copy final settings. */ 2565 bin_info->run_size = actual_run_size; 2566 bin_info->nregs = actual_nregs; 2567 bin_info->reg0_offset = actual_run_size - (actual_nregs * 2568 bin_info->reg_interval) - pad_size + bin_info->redzone_size; 2569 2570 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 2571 * bin_info->reg_interval) + pad_size == bin_info->run_size); 2572} 2573 2574static void 2575bin_info_init(void) 2576{ 2577 arena_bin_info_t *bin_info; 2578 2579#define BIN_INFO_INIT_bin_yes(index, size) \ 2580 bin_info = &arena_bin_info[index]; \ 2581 bin_info->reg_size = size; \ 2582 bin_info_run_size_calc(bin_info); \ 2583 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 2584#define BIN_INFO_INIT_bin_no(index, size) 2585#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 2586 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 2587 SIZE_CLASSES 2588#undef BIN_INFO_INIT_bin_yes 2589#undef BIN_INFO_INIT_bin_no 2590#undef SC 2591} 2592 2593void 2594arena_boot(void) 2595{ 2596 size_t header_size; 2597 unsigned i; 2598 2599 /* 2600 * Compute the header size such that it is large enough to contain the 2601 * page map. The page map is biased to omit entries for the header 2602 * itself, so some iteration is necessary to compute the map bias. 2603 * 2604 * 1) Compute safe header_size and map_bias values that include enough 2605 * space for an unbiased page map. 2606 * 2) Refine map_bias based on (1) to omit the header pages in the page 2607 * map. The resulting map_bias may be one too small. 2608 * 3) Refine map_bias based on (2). The result will be >= the result 2609 * from (2), and will always be correct. 2610 */ 2611 map_bias = 0; 2612 for (i = 0; i < 3; i++) { 2613 header_size = offsetof(arena_chunk_t, map_bits) + 2614 ((sizeof(arena_chunk_map_bits_t) + 2615 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 2616 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 2617 } 2618 assert(map_bias > 0); 2619 2620 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 2621 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 2622 2623 arena_maxrun = chunksize - (map_bias << LG_PAGE); 2624 assert(arena_maxrun > 0); 2625 arena_maxclass = index2size(size2index(chunksize)-1); 2626 if (arena_maxclass > arena_maxrun) { 2627 /* 2628 * For small chunk sizes it's possible for there to be fewer 2629 * non-header pages available than are necessary to serve the 2630 * size classes just below chunksize. 2631 */ 2632 arena_maxclass = arena_maxrun; 2633 } 2634 assert(arena_maxclass > 0); 2635 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS); 2636 nhclasses = NSIZES - nlclasses - NBINS; 2637 2638 bin_info_init(); 2639} 2640 2641void 2642arena_prefork(arena_t *arena) 2643{ 2644 unsigned i; 2645 2646 malloc_mutex_prefork(&arena->lock); 2647 malloc_mutex_prefork(&arena->huge_mtx); 2648 malloc_mutex_prefork(&arena->chunks_mtx); 2649 malloc_mutex_prefork(&arena->node_cache_mtx); 2650 for (i = 0; i < NBINS; i++) 2651 malloc_mutex_prefork(&arena->bins[i].lock); 2652} 2653 2654void 2655arena_postfork_parent(arena_t *arena) 2656{ 2657 unsigned i; 2658 2659 for (i = 0; i < NBINS; i++) 2660 malloc_mutex_postfork_parent(&arena->bins[i].lock); 2661 malloc_mutex_postfork_parent(&arena->node_cache_mtx); 2662 malloc_mutex_postfork_parent(&arena->chunks_mtx); 2663 malloc_mutex_postfork_parent(&arena->huge_mtx); 2664 malloc_mutex_postfork_parent(&arena->lock); 2665} 2666 2667void 2668arena_postfork_child(arena_t *arena) 2669{ 2670 unsigned i; 2671 2672 for (i = 0; i < NBINS; i++) 2673 malloc_mutex_postfork_child(&arena->bins[i].lock); 2674 malloc_mutex_postfork_child(&arena->node_cache_mtx); 2675 malloc_mutex_postfork_child(&arena->chunks_mtx); 2676 malloc_mutex_postfork_child(&arena->huge_mtx); 2677 malloc_mutex_postfork_child(&arena->lock); 2678} 2679