arena.c revision 9b41ac909facf4f09bb1b637b78ba647348e572e
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8arena_bin_info_t arena_bin_info[NBINS]; 9 10size_t map_bias; 11size_t map_misc_offset; 12size_t arena_maxrun; /* Max run size for arenas. */ 13size_t arena_maxclass; /* Max size class for arenas. */ 14unsigned nlclasses; /* Number of large size classes. */ 15unsigned nhclasses; /* Number of huge size classes. */ 16 17/******************************************************************************/ 18/* 19 * Function prototypes for static functions that are referenced prior to 20 * definition. 21 */ 22 23static void arena_purge(arena_t *arena, bool all); 24static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 25 bool cleaned); 26static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 27 arena_run_t *run, arena_bin_t *bin); 28static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 29 arena_run_t *run, arena_bin_t *bin); 30 31/******************************************************************************/ 32 33JEMALLOC_INLINE_C size_t 34arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm) 35{ 36 arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm); 37 size_t pageind = arena_miscelm_to_pageind(miscelm); 38 39 return arena_mapbits_get(chunk, pageind); 40} 41 42static inline int 43arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 44{ 45 uintptr_t a_miscelm = (uintptr_t)a; 46 uintptr_t b_miscelm = (uintptr_t)b; 47 48 assert(a != NULL); 49 assert(b != NULL); 50 51 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 52} 53 54/* Generate red-black tree functions. */ 55rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, 56 rb_link, arena_run_comp) 57 58static inline int 59arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 60{ 61 int ret; 62 size_t a_size; 63 size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK; 64 uintptr_t a_miscelm = (uintptr_t)a; 65 uintptr_t b_miscelm = (uintptr_t)b; 66 67 if (a_miscelm & CHUNK_MAP_KEY) 68 a_size = a_miscelm & ~PAGE_MASK; 69 else 70 a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK; 71 72 ret = (a_size > b_size) - (a_size < b_size); 73 if (ret == 0) { 74 if (!(a_miscelm & CHUNK_MAP_KEY)) 75 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); 76 else { 77 /* 78 * Treat keys as if they are lower than anything else. 79 */ 80 ret = -1; 81 } 82 } 83 84 return (ret); 85} 86 87/* Generate red-black tree functions. */ 88rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, 89 arena_chunk_map_misc_t, rb_link, arena_avail_comp) 90 91static void 92arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 93 size_t npages) 94{ 95 96 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 97 LG_PAGE)); 98 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, 99 pageind)); 100} 101 102static void 103arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 104 size_t npages) 105{ 106 107 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 108 LG_PAGE)); 109 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, 110 pageind)); 111} 112 113static void 114arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 115 size_t npages) 116{ 117 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 118 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 119 LG_PAGE)); 120 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 121 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 122 CHUNK_MAP_DIRTY); 123 ql_elm_new(miscelm, dr_link); 124 ql_tail_insert(&arena->runs_dirty, miscelm, dr_link); 125 arena->ndirty += npages; 126} 127 128static void 129arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 130 size_t npages) 131{ 132 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 133 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 134 LG_PAGE)); 135 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 136 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 137 CHUNK_MAP_DIRTY); 138 ql_remove(&arena->runs_dirty, miscelm, dr_link); 139 arena->ndirty -= npages; 140} 141 142static inline void * 143arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 144{ 145 void *ret; 146 unsigned regind; 147 arena_chunk_map_misc_t *miscelm; 148 void *rpages; 149 150 assert(run->nfree > 0); 151 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 152 153 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 154 miscelm = arena_run_to_miscelm(run); 155 rpages = arena_miscelm_to_rpages(miscelm); 156 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 157 (uintptr_t)(bin_info->reg_interval * regind)); 158 run->nfree--; 159 return (ret); 160} 161 162static inline void 163arena_run_reg_dalloc(arena_run_t *run, void *ptr) 164{ 165 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 166 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 167 size_t mapbits = arena_mapbits_get(chunk, pageind); 168 index_t binind = arena_ptr_small_binind_get(ptr, mapbits); 169 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 170 unsigned regind = arena_run_regind(run, bin_info, ptr); 171 172 assert(run->nfree < bin_info->nregs); 173 /* Freeing an interior pointer can cause assertion failure. */ 174 assert(((uintptr_t)ptr - 175 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 176 (uintptr_t)bin_info->reg0_offset)) % 177 (uintptr_t)bin_info->reg_interval == 0); 178 assert((uintptr_t)ptr >= 179 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 180 (uintptr_t)bin_info->reg0_offset); 181 /* Freeing an unallocated pointer can cause assertion failure. */ 182 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 183 184 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 185 run->nfree++; 186} 187 188static inline void 189arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 190{ 191 192 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 193 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 194 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 195 (npages << LG_PAGE)); 196} 197 198static inline void 199arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 200{ 201 202 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 203 << LG_PAGE)), PAGE); 204} 205 206static inline void 207arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 208{ 209 size_t i; 210 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 211 212 arena_run_page_mark_zeroed(chunk, run_ind); 213 for (i = 0; i < PAGE / sizeof(size_t); i++) 214 assert(p[i] == 0); 215} 216 217static void 218arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) 219{ 220 221 if (config_stats) { 222 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages 223 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 224 LG_PAGE); 225 if (cactive_diff != 0) 226 stats_cactive_add(cactive_diff); 227 } 228} 229 230static void 231arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 232 size_t flag_dirty, size_t need_pages) 233{ 234 size_t total_pages, rem_pages; 235 236 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 237 LG_PAGE; 238 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 239 flag_dirty); 240 assert(need_pages <= total_pages); 241 rem_pages = total_pages - need_pages; 242 243 arena_avail_remove(arena, chunk, run_ind, total_pages); 244 if (flag_dirty != 0) 245 arena_dirty_remove(arena, chunk, run_ind, total_pages); 246 arena_cactive_update(arena, need_pages, 0); 247 arena->nactive += need_pages; 248 249 /* Keep track of trailing unused pages for later use. */ 250 if (rem_pages > 0) { 251 if (flag_dirty != 0) { 252 arena_mapbits_unallocated_set(chunk, 253 run_ind+need_pages, (rem_pages << LG_PAGE), 254 flag_dirty); 255 arena_mapbits_unallocated_set(chunk, 256 run_ind+total_pages-1, (rem_pages << LG_PAGE), 257 flag_dirty); 258 arena_dirty_insert(arena, chunk, run_ind+need_pages, 259 rem_pages); 260 } else { 261 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 262 (rem_pages << LG_PAGE), 263 arena_mapbits_unzeroed_get(chunk, 264 run_ind+need_pages)); 265 arena_mapbits_unallocated_set(chunk, 266 run_ind+total_pages-1, (rem_pages << LG_PAGE), 267 arena_mapbits_unzeroed_get(chunk, 268 run_ind+total_pages-1)); 269 } 270 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 271 } 272} 273 274static void 275arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 276 bool remove, bool zero) 277{ 278 arena_chunk_t *chunk; 279 arena_chunk_map_misc_t *miscelm; 280 size_t flag_dirty, run_ind, need_pages, i; 281 282 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 283 miscelm = arena_run_to_miscelm(run); 284 run_ind = arena_miscelm_to_pageind(miscelm); 285 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 286 need_pages = (size >> LG_PAGE); 287 assert(need_pages > 0); 288 289 if (remove) { 290 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 291 need_pages); 292 } 293 294 if (zero) { 295 if (flag_dirty == 0) { 296 /* 297 * The run is clean, so some pages may be zeroed (i.e. 298 * never before touched). 299 */ 300 for (i = 0; i < need_pages; i++) { 301 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 302 != 0) 303 arena_run_zero(chunk, run_ind+i, 1); 304 else if (config_debug) { 305 arena_run_page_validate_zeroed(chunk, 306 run_ind+i); 307 } else { 308 arena_run_page_mark_zeroed(chunk, 309 run_ind+i); 310 } 311 } 312 } else { 313 /* The run is dirty, so all pages must be zeroed. */ 314 arena_run_zero(chunk, run_ind, need_pages); 315 } 316 } else { 317 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 318 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 319 } 320 321 /* 322 * Set the last element first, in case the run only contains one page 323 * (i.e. both statements set the same element). 324 */ 325 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); 326 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); 327} 328 329static void 330arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 331{ 332 333 arena_run_split_large_helper(arena, run, size, true, zero); 334} 335 336static void 337arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 338{ 339 340 arena_run_split_large_helper(arena, run, size, false, zero); 341} 342 343static void 344arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 345 index_t binind) 346{ 347 arena_chunk_t *chunk; 348 arena_chunk_map_misc_t *miscelm; 349 size_t flag_dirty, run_ind, need_pages, i; 350 351 assert(binind != BININD_INVALID); 352 353 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 354 miscelm = arena_run_to_miscelm(run); 355 run_ind = arena_miscelm_to_pageind(miscelm); 356 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 357 need_pages = (size >> LG_PAGE); 358 assert(need_pages > 0); 359 360 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); 361 362 for (i = 0; i < need_pages; i++) { 363 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); 364 if (config_debug && flag_dirty == 0 && 365 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) 366 arena_run_page_validate_zeroed(chunk, run_ind+i); 367 } 368 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 369 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 370} 371 372static arena_chunk_t * 373arena_chunk_init_spare(arena_t *arena) 374{ 375 arena_chunk_t *chunk; 376 377 assert(arena->spare != NULL); 378 379 chunk = arena->spare; 380 arena->spare = NULL; 381 382 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 383 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 384 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 385 arena_maxrun); 386 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 387 arena_maxrun); 388 assert(arena_mapbits_dirty_get(chunk, map_bias) == 389 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 390 391 return (chunk); 392} 393 394static arena_chunk_t * 395arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment, 396 bool *zero) 397{ 398 arena_chunk_t *chunk; 399 chunk_alloc_t *chunk_alloc; 400 chunk_dalloc_t *chunk_dalloc; 401 402 chunk_alloc = arena->chunk_alloc; 403 chunk_dalloc = arena->chunk_dalloc; 404 malloc_mutex_unlock(&arena->lock); 405 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc, 406 arena->ind, NULL, size, alignment, zero); 407 malloc_mutex_lock(&arena->lock); 408 if (config_stats && chunk != NULL) 409 arena->stats.mapped += chunksize; 410 411 return (chunk); 412} 413 414static arena_chunk_t * 415arena_chunk_init_hard(arena_t *arena) 416{ 417 arena_chunk_t *chunk; 418 bool zero; 419 size_t unzeroed, i; 420 421 assert(arena->spare == NULL); 422 423 zero = false; 424 chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero); 425 if (chunk == NULL) 426 return (NULL); 427 428 chunk->arena = arena; 429 430 /* 431 * Initialize the map to contain one maximal free untouched run. Mark 432 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. 433 */ 434 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; 435 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, unzeroed); 436 /* 437 * There is no need to initialize the internal page map entries unless 438 * the chunk is not zeroed. 439 */ 440 if (!zero) { 441 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 442 (void *)arena_bitselm_get(chunk, map_bias+1), 443 (size_t)((uintptr_t) arena_bitselm_get(chunk, 444 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, 445 map_bias+1))); 446 for (i = map_bias+1; i < chunk_npages-1; i++) 447 arena_mapbits_unzeroed_set(chunk, i, unzeroed); 448 } else { 449 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 450 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) 451 arena_bitselm_get(chunk, chunk_npages-1) - 452 (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); 453 if (config_debug) { 454 for (i = map_bias+1; i < chunk_npages-1; i++) { 455 assert(arena_mapbits_unzeroed_get(chunk, i) == 456 unzeroed); 457 } 458 } 459 } 460 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 461 unzeroed); 462 463 return (chunk); 464} 465 466static arena_chunk_t * 467arena_chunk_alloc(arena_t *arena) 468{ 469 arena_chunk_t *chunk; 470 471 if (arena->spare != NULL) 472 chunk = arena_chunk_init_spare(arena); 473 else { 474 chunk = arena_chunk_init_hard(arena); 475 if (chunk == NULL) 476 return (NULL); 477 } 478 479 /* Insert the run into the runs_avail tree. */ 480 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 481 482 return (chunk); 483} 484 485static void 486arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 487{ 488 489 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 490 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 491 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 492 arena_maxrun); 493 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 494 arena_maxrun); 495 assert(arena_mapbits_dirty_get(chunk, map_bias) == 496 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 497 498 /* 499 * Remove run from the runs_avail tree, so that the arena does not use 500 * it. 501 */ 502 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 503 504 if (arena->spare != NULL) { 505 arena_chunk_t *spare = arena->spare; 506 chunk_dalloc_t *chunk_dalloc; 507 508 arena->spare = chunk; 509 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 510 arena_dirty_remove(arena, spare, map_bias, 511 chunk_npages-map_bias); 512 } 513 chunk_dalloc = arena->chunk_dalloc; 514 malloc_mutex_unlock(&arena->lock); 515 chunk_dalloc((void *)spare, chunksize, arena->ind); 516 malloc_mutex_lock(&arena->lock); 517 if (config_stats) 518 arena->stats.mapped -= chunksize; 519 } else 520 arena->spare = chunk; 521} 522 523static void 524arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 525{ 526 index_t index = size2index(usize) - nlclasses - NBINS; 527 528 cassert(config_stats); 529 530 arena->stats.nmalloc_huge++; 531 arena->stats.allocated_huge += usize; 532 arena->stats.hstats[index].nmalloc++; 533 arena->stats.hstats[index].curhchunks++; 534} 535 536static void 537arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 538{ 539 index_t index = size2index(usize) - nlclasses - NBINS; 540 541 cassert(config_stats); 542 543 arena->stats.nmalloc_huge--; 544 arena->stats.allocated_huge -= usize; 545 arena->stats.hstats[index].nmalloc--; 546 arena->stats.hstats[index].curhchunks--; 547} 548 549static void 550arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 551{ 552 index_t index = size2index(usize) - nlclasses - NBINS; 553 554 cassert(config_stats); 555 556 arena->stats.ndalloc_huge++; 557 arena->stats.allocated_huge -= usize; 558 arena->stats.hstats[index].ndalloc++; 559 arena->stats.hstats[index].curhchunks--; 560} 561 562static void 563arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 564{ 565 index_t index = size2index(usize) - nlclasses - NBINS; 566 567 cassert(config_stats); 568 569 arena->stats.ndalloc_huge--; 570 arena->stats.allocated_huge += usize; 571 arena->stats.hstats[index].ndalloc--; 572 arena->stats.hstats[index].curhchunks++; 573} 574 575static void 576arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 577{ 578 579 arena_huge_dalloc_stats_update(arena, oldsize); 580 arena_huge_malloc_stats_update(arena, usize); 581} 582 583static void 584arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 585 size_t usize) 586{ 587 588 arena_huge_dalloc_stats_update_undo(arena, oldsize); 589 arena_huge_malloc_stats_update_undo(arena, usize); 590} 591 592void * 593arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, 594 bool *zero) 595{ 596 void *ret; 597 chunk_alloc_t *chunk_alloc; 598 chunk_dalloc_t *chunk_dalloc; 599 size_t csize = CHUNK_CEILING(usize); 600 601 malloc_mutex_lock(&arena->lock); 602 chunk_alloc = arena->chunk_alloc; 603 chunk_dalloc = arena->chunk_dalloc; 604 if (config_stats) { 605 /* Optimistically update stats prior to unlocking. */ 606 arena_huge_malloc_stats_update(arena, usize); 607 arena->stats.mapped += usize; 608 } 609 arena->nactive += (usize >> LG_PAGE); 610 malloc_mutex_unlock(&arena->lock); 611 612 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL, 613 csize, alignment, zero); 614 if (ret == NULL) { 615 /* Revert optimistic stats updates. */ 616 malloc_mutex_lock(&arena->lock); 617 if (config_stats) { 618 arena_huge_malloc_stats_update_undo(arena, usize); 619 arena->stats.mapped -= usize; 620 } 621 arena->nactive -= (usize >> LG_PAGE); 622 malloc_mutex_unlock(&arena->lock); 623 return (NULL); 624 } 625 626 if (config_stats) 627 stats_cactive_add(usize); 628 629 return (ret); 630} 631 632void 633arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) 634{ 635 chunk_dalloc_t *chunk_dalloc; 636 637 malloc_mutex_lock(&arena->lock); 638 chunk_dalloc = arena->chunk_dalloc; 639 if (config_stats) { 640 arena_huge_dalloc_stats_update(arena, usize); 641 arena->stats.mapped -= usize; 642 stats_cactive_sub(usize); 643 } 644 arena->nactive -= (usize >> LG_PAGE); 645 malloc_mutex_unlock(&arena->lock); 646 chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind); 647} 648 649void 650arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, 651 size_t usize) 652{ 653 654 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 655 assert(oldsize != usize); 656 657 malloc_mutex_lock(&arena->lock); 658 if (config_stats) 659 arena_huge_ralloc_stats_update(arena, oldsize, usize); 660 if (oldsize < usize) { 661 size_t udiff = usize - oldsize; 662 arena->nactive += udiff >> LG_PAGE; 663 if (config_stats) 664 stats_cactive_add(udiff); 665 } else { 666 size_t udiff = oldsize - usize; 667 arena->nactive -= udiff >> LG_PAGE; 668 if (config_stats) 669 stats_cactive_sub(udiff); 670 } 671 malloc_mutex_unlock(&arena->lock); 672} 673 674void 675arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, 676 size_t usize) 677{ 678 chunk_dalloc_t *chunk_dalloc; 679 size_t udiff = oldsize - usize; 680 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 681 682 malloc_mutex_lock(&arena->lock); 683 chunk_dalloc = arena->chunk_dalloc; 684 if (config_stats) { 685 arena_huge_ralloc_stats_update(arena, oldsize, usize); 686 if (cdiff != 0) { 687 arena->stats.mapped -= cdiff; 688 stats_cactive_sub(udiff); 689 } 690 } 691 arena->nactive -= udiff >> LG_PAGE; 692 malloc_mutex_unlock(&arena->lock); 693 if (cdiff != 0) 694 chunk_dalloc(chunk + CHUNK_CEILING(usize), cdiff, arena->ind); 695} 696 697bool 698arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, 699 size_t usize, bool *zero) 700{ 701 chunk_alloc_t *chunk_alloc; 702 chunk_dalloc_t *chunk_dalloc; 703 size_t udiff = usize - oldsize; 704 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 705 706 malloc_mutex_lock(&arena->lock); 707 chunk_alloc = arena->chunk_alloc; 708 chunk_dalloc = arena->chunk_dalloc; 709 if (config_stats) { 710 /* Optimistically update stats prior to unlocking. */ 711 arena_huge_ralloc_stats_update(arena, oldsize, usize); 712 arena->stats.mapped += cdiff; 713 } 714 arena->nactive += (udiff >> LG_PAGE); 715 malloc_mutex_unlock(&arena->lock); 716 717 if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, chunk + 718 CHUNK_CEILING(oldsize), cdiff, chunksize, zero) == NULL) { 719 /* Revert optimistic stats updates. */ 720 malloc_mutex_lock(&arena->lock); 721 if (config_stats) { 722 arena_huge_ralloc_stats_update_undo(arena, 723 oldsize, usize); 724 arena->stats.mapped -= cdiff; 725 } 726 arena->nactive -= (udiff >> LG_PAGE); 727 malloc_mutex_unlock(&arena->lock); 728 return (true); 729 } 730 731 if (config_stats) 732 stats_cactive_add(udiff); 733 734 return (false); 735} 736 737static arena_run_t * 738arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 739{ 740 arena_chunk_map_misc_t *miscelm; 741 arena_chunk_map_misc_t *key; 742 743 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY); 744 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 745 if (miscelm != NULL) { 746 arena_run_t *run = &miscelm->run; 747 arena_run_split_large(arena, &miscelm->run, size, zero); 748 return (run); 749 } 750 751 return (NULL); 752} 753 754static arena_run_t * 755arena_run_alloc_large(arena_t *arena, size_t size, bool zero) 756{ 757 arena_chunk_t *chunk; 758 arena_run_t *run; 759 760 assert(size <= arena_maxrun); 761 assert((size & PAGE_MASK) == 0); 762 763 /* Search the arena's chunks for the lowest best fit. */ 764 run = arena_run_alloc_large_helper(arena, size, zero); 765 if (run != NULL) 766 return (run); 767 768 /* 769 * No usable runs. Create a new chunk from which to allocate the run. 770 */ 771 chunk = arena_chunk_alloc(arena); 772 if (chunk != NULL) { 773 run = &arena_miscelm_get(chunk, map_bias)->run; 774 arena_run_split_large(arena, run, size, zero); 775 return (run); 776 } 777 778 /* 779 * arena_chunk_alloc() failed, but another thread may have made 780 * sufficient memory available while this one dropped arena->lock in 781 * arena_chunk_alloc(), so search one more time. 782 */ 783 return (arena_run_alloc_large_helper(arena, size, zero)); 784} 785 786static arena_run_t * 787arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind) 788{ 789 arena_run_t *run; 790 arena_chunk_map_misc_t *miscelm; 791 arena_chunk_map_misc_t *key; 792 793 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY); 794 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 795 if (miscelm != NULL) { 796 run = &miscelm->run; 797 arena_run_split_small(arena, run, size, binind); 798 return (run); 799 } 800 801 return (NULL); 802} 803 804static arena_run_t * 805arena_run_alloc_small(arena_t *arena, size_t size, index_t binind) 806{ 807 arena_chunk_t *chunk; 808 arena_run_t *run; 809 810 assert(size <= arena_maxrun); 811 assert((size & PAGE_MASK) == 0); 812 assert(binind != BININD_INVALID); 813 814 /* Search the arena's chunks for the lowest best fit. */ 815 run = arena_run_alloc_small_helper(arena, size, binind); 816 if (run != NULL) 817 return (run); 818 819 /* 820 * No usable runs. Create a new chunk from which to allocate the run. 821 */ 822 chunk = arena_chunk_alloc(arena); 823 if (chunk != NULL) { 824 run = &arena_miscelm_get(chunk, map_bias)->run; 825 arena_run_split_small(arena, run, size, binind); 826 return (run); 827 } 828 829 /* 830 * arena_chunk_alloc() failed, but another thread may have made 831 * sufficient memory available while this one dropped arena->lock in 832 * arena_chunk_alloc(), so search one more time. 833 */ 834 return (arena_run_alloc_small_helper(arena, size, binind)); 835} 836 837static inline void 838arena_maybe_purge(arena_t *arena) 839{ 840 size_t threshold; 841 842 /* Don't purge if the option is disabled. */ 843 if (opt_lg_dirty_mult < 0) 844 return; 845 threshold = (arena->nactive >> opt_lg_dirty_mult); 846 /* 847 * Don't purge unless the number of purgeable pages exceeds the 848 * threshold. 849 */ 850 if (arena->ndirty <= threshold) 851 return; 852 853 arena_purge(arena, false); 854} 855 856static size_t 857arena_dirty_count(arena_t *arena) 858{ 859 size_t ndirty = 0; 860 arena_chunk_map_misc_t *miscelm; 861 arena_chunk_t *chunk; 862 size_t pageind, npages; 863 864 ql_foreach(miscelm, &arena->runs_dirty, dr_link) { 865 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 866 pageind = arena_miscelm_to_pageind(miscelm); 867 assert(arena_mapbits_allocated_get(chunk, pageind) == 0); 868 assert(arena_mapbits_large_get(chunk, pageind) == 0); 869 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 870 npages = arena_mapbits_unallocated_size_get(chunk, pageind) >> 871 LG_PAGE; 872 ndirty += npages; 873 } 874 875 return (ndirty); 876} 877 878static size_t 879arena_compute_npurge(arena_t *arena, bool all) 880{ 881 size_t npurge; 882 883 /* 884 * Compute the minimum number of pages that this thread should try to 885 * purge. 886 */ 887 if (!all) { 888 size_t threshold = (arena->nactive >> opt_lg_dirty_mult); 889 890 npurge = arena->ndirty - threshold; 891 } else 892 npurge = arena->ndirty; 893 894 return (npurge); 895} 896 897static size_t 898arena_stash_dirty(arena_t *arena, bool all, size_t npurge, 899 arena_chunk_miscelms_t *miscelms) 900{ 901 arena_chunk_map_misc_t *miscelm; 902 size_t nstashed = 0; 903 904 /* Add at least npurge pages to purge_list. */ 905 for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL; 906 miscelm = ql_first(&arena->runs_dirty)) { 907 arena_chunk_t *chunk = 908 (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 909 size_t pageind = arena_miscelm_to_pageind(miscelm); 910 size_t run_size = arena_mapbits_unallocated_size_get(chunk, 911 pageind); 912 size_t npages = run_size >> LG_PAGE; 913 arena_run_t *run = &miscelm->run; 914 915 assert(pageind + npages <= chunk_npages); 916 assert(arena_mapbits_dirty_get(chunk, pageind) == 917 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 918 919 /* 920 * If purging the spare chunk's run, make it available prior to 921 * allocation. 922 */ 923 if (chunk == arena->spare) 924 arena_chunk_alloc(arena); 925 926 /* Temporarily allocate the free dirty run. */ 927 arena_run_split_large(arena, run, run_size, false); 928 /* Append to purge_list for later processing. */ 929 ql_elm_new(miscelm, dr_link); 930 ql_tail_insert(miscelms, miscelm, dr_link); 931 932 nstashed += npages; 933 934 if (!all && nstashed >= npurge) 935 break; 936 } 937 938 return (nstashed); 939} 940 941static size_t 942arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms) 943{ 944 size_t npurged, nmadvise; 945 arena_chunk_map_misc_t *miscelm; 946 947 if (config_stats) 948 nmadvise = 0; 949 npurged = 0; 950 951 malloc_mutex_unlock(&arena->lock); 952 953 ql_foreach(miscelm, miscelms, dr_link) { 954 arena_chunk_t *chunk; 955 size_t pageind, run_size, npages, flag_unzeroed, i; 956 bool unzeroed; 957 958 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 959 pageind = arena_miscelm_to_pageind(miscelm); 960 run_size = arena_mapbits_large_size_get(chunk, pageind); 961 npages = run_size >> LG_PAGE; 962 963 assert(pageind + npages <= chunk_npages); 964 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << 965 LG_PAGE)), run_size); 966 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; 967 968 /* 969 * Set the unzeroed flag for all pages, now that pages_purge() 970 * has returned whether the pages were zeroed as a side effect 971 * of purging. This chunk map modification is safe even though 972 * the arena mutex isn't currently owned by this thread, 973 * because the run is marked as allocated, thus protecting it 974 * from being modified by any other thread. As long as these 975 * writes don't perturb the first and last elements' 976 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 977 */ 978 for (i = 0; i < npages; i++) { 979 arena_mapbits_unzeroed_set(chunk, pageind+i, 980 flag_unzeroed); 981 } 982 983 npurged += npages; 984 if (config_stats) 985 nmadvise++; 986 } 987 988 malloc_mutex_lock(&arena->lock); 989 990 if (config_stats) { 991 arena->stats.nmadvise += nmadvise; 992 arena->stats.purged += npurged; 993 } 994 995 return (npurged); 996} 997 998static void 999arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms) 1000{ 1001 arena_chunk_map_misc_t *miscelm; 1002 1003 /* Deallocate runs. */ 1004 for (miscelm = ql_first(miscelms); miscelm != NULL; 1005 miscelm = ql_first(miscelms)) { 1006 arena_run_t *run = &miscelm->run; 1007 ql_remove(miscelms, miscelm, dr_link); 1008 arena_run_dalloc(arena, run, false, true); 1009 } 1010} 1011 1012void 1013arena_purge(arena_t *arena, bool all) 1014{ 1015 size_t npurge, npurgeable, npurged; 1016 arena_chunk_miscelms_t purge_list; 1017 1018 if (config_debug) { 1019 size_t ndirty = arena_dirty_count(arena); 1020 assert(ndirty == arena->ndirty); 1021 } 1022 assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all); 1023 1024 if (config_stats) 1025 arena->stats.npurge++; 1026 1027 npurge = arena_compute_npurge(arena, all); 1028 ql_new(&purge_list); 1029 npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list); 1030 assert(npurgeable >= npurge); 1031 npurged = arena_purge_stashed(arena, &purge_list); 1032 assert(npurged == npurgeable); 1033 arena_unstash_purged(arena, &purge_list); 1034} 1035 1036void 1037arena_purge_all(arena_t *arena) 1038{ 1039 1040 malloc_mutex_lock(&arena->lock); 1041 arena_purge(arena, true); 1042 malloc_mutex_unlock(&arena->lock); 1043} 1044 1045static void 1046arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1047 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) 1048{ 1049 size_t size = *p_size; 1050 size_t run_ind = *p_run_ind; 1051 size_t run_pages = *p_run_pages; 1052 1053 /* Try to coalesce forward. */ 1054 if (run_ind + run_pages < chunk_npages && 1055 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1056 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { 1057 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1058 run_ind+run_pages); 1059 size_t nrun_pages = nrun_size >> LG_PAGE; 1060 1061 /* 1062 * Remove successor from runs_avail; the coalesced run is 1063 * inserted later. 1064 */ 1065 assert(arena_mapbits_unallocated_size_get(chunk, 1066 run_ind+run_pages+nrun_pages-1) == nrun_size); 1067 assert(arena_mapbits_dirty_get(chunk, 1068 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1069 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1070 1071 /* If the successor is dirty, remove it from runs_dirty. */ 1072 if (flag_dirty != 0) { 1073 arena_dirty_remove(arena, chunk, run_ind+run_pages, 1074 nrun_pages); 1075 } 1076 1077 size += nrun_size; 1078 run_pages += nrun_pages; 1079 1080 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1081 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1082 size); 1083 } 1084 1085 /* Try to coalesce backward. */ 1086 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1087 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1088 flag_dirty) { 1089 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1090 run_ind-1); 1091 size_t prun_pages = prun_size >> LG_PAGE; 1092 1093 run_ind -= prun_pages; 1094 1095 /* 1096 * Remove predecessor from runs_avail; the coalesced run is 1097 * inserted later. 1098 */ 1099 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1100 prun_size); 1101 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1102 arena_avail_remove(arena, chunk, run_ind, prun_pages); 1103 1104 /* If the predecessor is dirty, remove it from runs_dirty. */ 1105 if (flag_dirty != 0) 1106 arena_dirty_remove(arena, chunk, run_ind, prun_pages); 1107 1108 size += prun_size; 1109 run_pages += prun_pages; 1110 1111 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1112 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1113 size); 1114 } 1115 1116 *p_size = size; 1117 *p_run_ind = run_ind; 1118 *p_run_pages = run_pages; 1119} 1120 1121static void 1122arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) 1123{ 1124 arena_chunk_t *chunk; 1125 arena_chunk_map_misc_t *miscelm; 1126 size_t size, run_ind, run_pages, flag_dirty; 1127 1128 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1129 miscelm = arena_run_to_miscelm(run); 1130 run_ind = arena_miscelm_to_pageind(miscelm); 1131 assert(run_ind >= map_bias); 1132 assert(run_ind < chunk_npages); 1133 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1134 size = arena_mapbits_large_size_get(chunk, run_ind); 1135 assert(size == PAGE || 1136 arena_mapbits_large_size_get(chunk, 1137 run_ind+(size>>LG_PAGE)-1) == 0); 1138 } else { 1139 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1140 size = bin_info->run_size; 1141 } 1142 run_pages = (size >> LG_PAGE); 1143 arena_cactive_update(arena, 0, run_pages); 1144 arena->nactive -= run_pages; 1145 1146 /* 1147 * The run is dirty if the caller claims to have dirtied it, as well as 1148 * if it was already dirty before being allocated and the caller 1149 * doesn't claim to have cleaned it. 1150 */ 1151 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1152 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1153 if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0) 1154 dirty = true; 1155 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1156 1157 /* Mark pages as unallocated in the chunk map. */ 1158 if (dirty) { 1159 arena_mapbits_unallocated_set(chunk, run_ind, size, 1160 CHUNK_MAP_DIRTY); 1161 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1162 CHUNK_MAP_DIRTY); 1163 } else { 1164 arena_mapbits_unallocated_set(chunk, run_ind, size, 1165 arena_mapbits_unzeroed_get(chunk, run_ind)); 1166 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1167 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1168 } 1169 1170 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty); 1171 1172 /* Insert into runs_avail, now that coalescing is complete. */ 1173 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1174 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1175 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1176 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1177 arena_avail_insert(arena, chunk, run_ind, run_pages); 1178 1179 if (dirty) 1180 arena_dirty_insert(arena, chunk, run_ind, run_pages); 1181 1182 /* Deallocate chunk if it is now completely unused. */ 1183 if (size == arena_maxrun) { 1184 assert(run_ind == map_bias); 1185 assert(run_pages == (arena_maxrun >> LG_PAGE)); 1186 arena_chunk_dalloc(arena, chunk); 1187 } 1188 1189 /* 1190 * It is okay to do dirty page processing here even if the chunk was 1191 * deallocated above, since in that case it is the spare. Waiting 1192 * until after possible chunk deallocation to do dirty processing 1193 * allows for an old spare to be fully deallocated, thus decreasing the 1194 * chances of spuriously crossing the dirty page purging threshold. 1195 */ 1196 if (dirty) 1197 arena_maybe_purge(arena); 1198} 1199 1200static void 1201arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1202 size_t oldsize, size_t newsize) 1203{ 1204 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1205 size_t pageind = arena_miscelm_to_pageind(miscelm); 1206 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1207 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1208 1209 assert(oldsize > newsize); 1210 1211 /* 1212 * Update the chunk map so that arena_run_dalloc() can treat the 1213 * leading run as separately allocated. Set the last element of each 1214 * run first, in case of single-page runs. 1215 */ 1216 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1217 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1218 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); 1219 1220 if (config_debug) { 1221 UNUSED size_t tail_npages = newsize >> LG_PAGE; 1222 assert(arena_mapbits_large_size_get(chunk, 1223 pageind+head_npages+tail_npages-1) == 0); 1224 assert(arena_mapbits_dirty_get(chunk, 1225 pageind+head_npages+tail_npages-1) == flag_dirty); 1226 } 1227 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 1228 flag_dirty); 1229 1230 arena_run_dalloc(arena, run, false, false); 1231} 1232 1233static void 1234arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1235 size_t oldsize, size_t newsize, bool dirty) 1236{ 1237 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1238 size_t pageind = arena_miscelm_to_pageind(miscelm); 1239 size_t head_npages = newsize >> LG_PAGE; 1240 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1241 arena_chunk_map_misc_t *tail_miscelm; 1242 arena_run_t *tail_run; 1243 1244 assert(oldsize > newsize); 1245 1246 /* 1247 * Update the chunk map so that arena_run_dalloc() can treat the 1248 * trailing run as separately allocated. Set the last element of each 1249 * run first, in case of single-page runs. 1250 */ 1251 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1252 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1253 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); 1254 1255 if (config_debug) { 1256 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 1257 assert(arena_mapbits_large_size_get(chunk, 1258 pageind+head_npages+tail_npages-1) == 0); 1259 assert(arena_mapbits_dirty_get(chunk, 1260 pageind+head_npages+tail_npages-1) == flag_dirty); 1261 } 1262 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 1263 flag_dirty); 1264 1265 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); 1266 tail_run = &tail_miscelm->run; 1267 arena_run_dalloc(arena, tail_run, dirty, false); 1268} 1269 1270static arena_run_t * 1271arena_bin_runs_first(arena_bin_t *bin) 1272{ 1273 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); 1274 if (miscelm != NULL) 1275 return (&miscelm->run); 1276 1277 return (NULL); 1278} 1279 1280static void 1281arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 1282{ 1283 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1284 1285 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); 1286 1287 arena_run_tree_insert(&bin->runs, miscelm); 1288} 1289 1290static void 1291arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) 1292{ 1293 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1294 1295 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); 1296 1297 arena_run_tree_remove(&bin->runs, miscelm); 1298} 1299 1300static arena_run_t * 1301arena_bin_nonfull_run_tryget(arena_bin_t *bin) 1302{ 1303 arena_run_t *run = arena_bin_runs_first(bin); 1304 if (run != NULL) { 1305 arena_bin_runs_remove(bin, run); 1306 if (config_stats) 1307 bin->stats.reruns++; 1308 } 1309 return (run); 1310} 1311 1312static arena_run_t * 1313arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 1314{ 1315 arena_run_t *run; 1316 index_t binind; 1317 arena_bin_info_t *bin_info; 1318 1319 /* Look for a usable run. */ 1320 run = arena_bin_nonfull_run_tryget(bin); 1321 if (run != NULL) 1322 return (run); 1323 /* No existing runs have any space available. */ 1324 1325 binind = arena_bin_index(arena, bin); 1326 bin_info = &arena_bin_info[binind]; 1327 1328 /* Allocate a new run. */ 1329 malloc_mutex_unlock(&bin->lock); 1330 /******************************/ 1331 malloc_mutex_lock(&arena->lock); 1332 run = arena_run_alloc_small(arena, bin_info->run_size, binind); 1333 if (run != NULL) { 1334 /* Initialize run internals. */ 1335 run->binind = binind; 1336 run->nfree = bin_info->nregs; 1337 bitmap_init(run->bitmap, &bin_info->bitmap_info); 1338 } 1339 malloc_mutex_unlock(&arena->lock); 1340 /********************************/ 1341 malloc_mutex_lock(&bin->lock); 1342 if (run != NULL) { 1343 if (config_stats) { 1344 bin->stats.nruns++; 1345 bin->stats.curruns++; 1346 } 1347 return (run); 1348 } 1349 1350 /* 1351 * arena_run_alloc_small() failed, but another thread may have made 1352 * sufficient memory available while this one dropped bin->lock above, 1353 * so search one more time. 1354 */ 1355 run = arena_bin_nonfull_run_tryget(bin); 1356 if (run != NULL) 1357 return (run); 1358 1359 return (NULL); 1360} 1361 1362/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 1363static void * 1364arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 1365{ 1366 void *ret; 1367 index_t binind; 1368 arena_bin_info_t *bin_info; 1369 arena_run_t *run; 1370 1371 binind = arena_bin_index(arena, bin); 1372 bin_info = &arena_bin_info[binind]; 1373 bin->runcur = NULL; 1374 run = arena_bin_nonfull_run_get(arena, bin); 1375 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 1376 /* 1377 * Another thread updated runcur while this one ran without the 1378 * bin lock in arena_bin_nonfull_run_get(). 1379 */ 1380 assert(bin->runcur->nfree > 0); 1381 ret = arena_run_reg_alloc(bin->runcur, bin_info); 1382 if (run != NULL) { 1383 arena_chunk_t *chunk; 1384 1385 /* 1386 * arena_run_alloc_small() may have allocated run, or 1387 * it may have pulled run from the bin's run tree. 1388 * Therefore it is unsafe to make any assumptions about 1389 * how run has previously been used, and 1390 * arena_bin_lower_run() must be called, as if a region 1391 * were just deallocated from the run. 1392 */ 1393 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1394 if (run->nfree == bin_info->nregs) 1395 arena_dalloc_bin_run(arena, chunk, run, bin); 1396 else 1397 arena_bin_lower_run(arena, chunk, run, bin); 1398 } 1399 return (ret); 1400 } 1401 1402 if (run == NULL) 1403 return (NULL); 1404 1405 bin->runcur = run; 1406 1407 assert(bin->runcur->nfree > 0); 1408 1409 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1410} 1411 1412void 1413arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind, 1414 uint64_t prof_accumbytes) 1415{ 1416 unsigned i, nfill; 1417 arena_bin_t *bin; 1418 arena_run_t *run; 1419 void *ptr; 1420 1421 assert(tbin->ncached == 0); 1422 1423 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1424 prof_idump(); 1425 bin = &arena->bins[binind]; 1426 malloc_mutex_lock(&bin->lock); 1427 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1428 tbin->lg_fill_div); i < nfill; i++) { 1429 if ((run = bin->runcur) != NULL && run->nfree > 0) 1430 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1431 else 1432 ptr = arena_bin_malloc_hard(arena, bin); 1433 if (ptr == NULL) { 1434 /* 1435 * OOM. tbin->avail isn't yet filled down to its first 1436 * element, so the successful allocations (if any) must 1437 * be moved to the base of tbin->avail before bailing 1438 * out. 1439 */ 1440 if (i > 0) { 1441 memmove(tbin->avail, &tbin->avail[nfill - i], 1442 i * sizeof(void *)); 1443 } 1444 break; 1445 } 1446 if (config_fill && unlikely(opt_junk)) { 1447 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1448 true); 1449 } 1450 /* Insert such that low regions get used first. */ 1451 tbin->avail[nfill - 1 - i] = ptr; 1452 } 1453 if (config_stats) { 1454 bin->stats.nmalloc += i; 1455 bin->stats.nrequests += tbin->tstats.nrequests; 1456 bin->stats.curregs += i; 1457 bin->stats.nfills++; 1458 tbin->tstats.nrequests = 0; 1459 } 1460 malloc_mutex_unlock(&bin->lock); 1461 tbin->ncached = i; 1462} 1463 1464void 1465arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 1466{ 1467 1468 if (zero) { 1469 size_t redzone_size = bin_info->redzone_size; 1470 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, 1471 redzone_size); 1472 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, 1473 redzone_size); 1474 } else { 1475 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, 1476 bin_info->reg_interval); 1477 } 1478} 1479 1480#ifdef JEMALLOC_JET 1481#undef arena_redzone_corruption 1482#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) 1483#endif 1484static void 1485arena_redzone_corruption(void *ptr, size_t usize, bool after, 1486 size_t offset, uint8_t byte) 1487{ 1488 1489 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 1490 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 1491 after ? "after" : "before", ptr, usize, byte); 1492} 1493#ifdef JEMALLOC_JET 1494#undef arena_redzone_corruption 1495#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 1496arena_redzone_corruption_t *arena_redzone_corruption = 1497 JEMALLOC_N(arena_redzone_corruption_impl); 1498#endif 1499 1500static void 1501arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 1502{ 1503 size_t size = bin_info->reg_size; 1504 size_t redzone_size = bin_info->redzone_size; 1505 size_t i; 1506 bool error = false; 1507 1508 for (i = 1; i <= redzone_size; i++) { 1509 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 1510 if (*byte != 0xa5) { 1511 error = true; 1512 arena_redzone_corruption(ptr, size, false, i, *byte); 1513 if (reset) 1514 *byte = 0xa5; 1515 } 1516 } 1517 for (i = 0; i < redzone_size; i++) { 1518 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 1519 if (*byte != 0xa5) { 1520 error = true; 1521 arena_redzone_corruption(ptr, size, true, i, *byte); 1522 if (reset) 1523 *byte = 0xa5; 1524 } 1525 } 1526 if (opt_abort && error) 1527 abort(); 1528} 1529 1530#ifdef JEMALLOC_JET 1531#undef arena_dalloc_junk_small 1532#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) 1533#endif 1534void 1535arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 1536{ 1537 size_t redzone_size = bin_info->redzone_size; 1538 1539 arena_redzones_validate(ptr, bin_info, false); 1540 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, 1541 bin_info->reg_interval); 1542} 1543#ifdef JEMALLOC_JET 1544#undef arena_dalloc_junk_small 1545#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 1546arena_dalloc_junk_small_t *arena_dalloc_junk_small = 1547 JEMALLOC_N(arena_dalloc_junk_small_impl); 1548#endif 1549 1550void 1551arena_quarantine_junk_small(void *ptr, size_t usize) 1552{ 1553 index_t binind; 1554 arena_bin_info_t *bin_info; 1555 cassert(config_fill); 1556 assert(opt_junk); 1557 assert(opt_quarantine); 1558 assert(usize <= SMALL_MAXCLASS); 1559 1560 binind = size2index(usize); 1561 bin_info = &arena_bin_info[binind]; 1562 arena_redzones_validate(ptr, bin_info, true); 1563} 1564 1565void * 1566arena_malloc_small(arena_t *arena, size_t size, bool zero) 1567{ 1568 void *ret; 1569 arena_bin_t *bin; 1570 arena_run_t *run; 1571 index_t binind; 1572 1573 binind = size2index(size); 1574 assert(binind < NBINS); 1575 bin = &arena->bins[binind]; 1576 size = index2size(binind); 1577 1578 malloc_mutex_lock(&bin->lock); 1579 if ((run = bin->runcur) != NULL && run->nfree > 0) 1580 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1581 else 1582 ret = arena_bin_malloc_hard(arena, bin); 1583 1584 if (ret == NULL) { 1585 malloc_mutex_unlock(&bin->lock); 1586 return (NULL); 1587 } 1588 1589 if (config_stats) { 1590 bin->stats.nmalloc++; 1591 bin->stats.nrequests++; 1592 bin->stats.curregs++; 1593 } 1594 malloc_mutex_unlock(&bin->lock); 1595 if (config_prof && !isthreaded && arena_prof_accum(arena, size)) 1596 prof_idump(); 1597 1598 if (!zero) { 1599 if (config_fill) { 1600 if (unlikely(opt_junk)) { 1601 arena_alloc_junk_small(ret, 1602 &arena_bin_info[binind], false); 1603 } else if (unlikely(opt_zero)) 1604 memset(ret, 0, size); 1605 } 1606 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1607 } else { 1608 if (config_fill && unlikely(opt_junk)) { 1609 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1610 true); 1611 } 1612 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1613 memset(ret, 0, size); 1614 } 1615 1616 return (ret); 1617} 1618 1619void * 1620arena_malloc_large(arena_t *arena, size_t size, bool zero) 1621{ 1622 void *ret; 1623 size_t usize; 1624 arena_run_t *run; 1625 arena_chunk_map_misc_t *miscelm; 1626 UNUSED bool idump; 1627 1628 /* Large allocation. */ 1629 usize = s2u(size); 1630 malloc_mutex_lock(&arena->lock); 1631 run = arena_run_alloc_large(arena, usize, zero); 1632 if (run == NULL) { 1633 malloc_mutex_unlock(&arena->lock); 1634 return (NULL); 1635 } 1636 miscelm = arena_run_to_miscelm(run); 1637 ret = arena_miscelm_to_rpages(miscelm); 1638 if (config_stats) { 1639 index_t index = size2index(usize) - NBINS; 1640 1641 arena->stats.nmalloc_large++; 1642 arena->stats.nrequests_large++; 1643 arena->stats.allocated_large += usize; 1644 arena->stats.lstats[index].nmalloc++; 1645 arena->stats.lstats[index].nrequests++; 1646 arena->stats.lstats[index].curruns++; 1647 } 1648 if (config_prof) 1649 idump = arena_prof_accum_locked(arena, usize); 1650 malloc_mutex_unlock(&arena->lock); 1651 if (config_prof && idump) 1652 prof_idump(); 1653 1654 if (!zero) { 1655 if (config_fill) { 1656 if (unlikely(opt_junk)) 1657 memset(ret, 0xa5, usize); 1658 else if (unlikely(opt_zero)) 1659 memset(ret, 0, usize); 1660 } 1661 } 1662 1663 return (ret); 1664} 1665 1666/* Only handles large allocations that require more than page alignment. */ 1667void * 1668arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) 1669{ 1670 void *ret; 1671 size_t alloc_size, leadsize, trailsize; 1672 arena_run_t *run; 1673 arena_chunk_t *chunk; 1674 arena_chunk_map_misc_t *miscelm; 1675 void *rpages; 1676 1677 assert((size & PAGE_MASK) == 0); 1678 1679 alignment = PAGE_CEILING(alignment); 1680 alloc_size = size + alignment - PAGE; 1681 1682 malloc_mutex_lock(&arena->lock); 1683 run = arena_run_alloc_large(arena, alloc_size, false); 1684 if (run == NULL) { 1685 malloc_mutex_unlock(&arena->lock); 1686 return (NULL); 1687 } 1688 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1689 miscelm = arena_run_to_miscelm(run); 1690 rpages = arena_miscelm_to_rpages(miscelm); 1691 1692 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 1693 (uintptr_t)rpages; 1694 assert(alloc_size >= leadsize + size); 1695 trailsize = alloc_size - leadsize - size; 1696 if (leadsize != 0) { 1697 arena_chunk_map_misc_t *head_miscelm = miscelm; 1698 arena_run_t *head_run = run; 1699 1700 miscelm = arena_miscelm_get(chunk, 1701 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 1702 LG_PAGE)); 1703 run = &miscelm->run; 1704 1705 arena_run_trim_head(arena, chunk, head_run, alloc_size, 1706 alloc_size - leadsize); 1707 } 1708 if (trailsize != 0) { 1709 arena_run_trim_tail(arena, chunk, run, size + trailsize, size, 1710 false); 1711 } 1712 arena_run_init_large(arena, run, size, zero); 1713 ret = arena_miscelm_to_rpages(miscelm); 1714 1715 if (config_stats) { 1716 index_t index = size2index(size) - NBINS; 1717 1718 arena->stats.nmalloc_large++; 1719 arena->stats.nrequests_large++; 1720 arena->stats.allocated_large += size; 1721 arena->stats.lstats[index].nmalloc++; 1722 arena->stats.lstats[index].nrequests++; 1723 arena->stats.lstats[index].curruns++; 1724 } 1725 malloc_mutex_unlock(&arena->lock); 1726 1727 if (config_fill && !zero) { 1728 if (unlikely(opt_junk)) 1729 memset(ret, 0xa5, size); 1730 else if (unlikely(opt_zero)) 1731 memset(ret, 0, size); 1732 } 1733 return (ret); 1734} 1735 1736void 1737arena_prof_promoted(const void *ptr, size_t size) 1738{ 1739 arena_chunk_t *chunk; 1740 size_t pageind; 1741 index_t binind; 1742 1743 cassert(config_prof); 1744 assert(ptr != NULL); 1745 assert(CHUNK_ADDR2BASE(ptr) != ptr); 1746 assert(isalloc(ptr, false) == LARGE_MINCLASS); 1747 assert(isalloc(ptr, true) == LARGE_MINCLASS); 1748 assert(size <= SMALL_MAXCLASS); 1749 1750 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1751 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1752 binind = size2index(size); 1753 assert(binind < NBINS); 1754 arena_mapbits_large_binind_set(chunk, pageind, binind); 1755 1756 assert(isalloc(ptr, false) == LARGE_MINCLASS); 1757 assert(isalloc(ptr, true) == size); 1758} 1759 1760static void 1761arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 1762 arena_bin_t *bin) 1763{ 1764 1765 /* Dissociate run from bin. */ 1766 if (run == bin->runcur) 1767 bin->runcur = NULL; 1768 else { 1769 index_t binind = arena_bin_index(chunk->arena, bin); 1770 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1771 1772 if (bin_info->nregs != 1) { 1773 /* 1774 * This block's conditional is necessary because if the 1775 * run only contains one region, then it never gets 1776 * inserted into the non-full runs tree. 1777 */ 1778 arena_bin_runs_remove(bin, run); 1779 } 1780 } 1781} 1782 1783static void 1784arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1785 arena_bin_t *bin) 1786{ 1787 1788 assert(run != bin->runcur); 1789 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == 1790 NULL); 1791 1792 malloc_mutex_unlock(&bin->lock); 1793 /******************************/ 1794 malloc_mutex_lock(&arena->lock); 1795 arena_run_dalloc(arena, run, true, false); 1796 malloc_mutex_unlock(&arena->lock); 1797 /****************************/ 1798 malloc_mutex_lock(&bin->lock); 1799 if (config_stats) 1800 bin->stats.curruns--; 1801} 1802 1803static void 1804arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1805 arena_bin_t *bin) 1806{ 1807 1808 /* 1809 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 1810 * non-full run. It is okay to NULL runcur out rather than proactively 1811 * keeping it pointing at the lowest non-full run. 1812 */ 1813 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 1814 /* Switch runcur. */ 1815 if (bin->runcur->nfree > 0) 1816 arena_bin_runs_insert(bin, bin->runcur); 1817 bin->runcur = run; 1818 if (config_stats) 1819 bin->stats.reruns++; 1820 } else 1821 arena_bin_runs_insert(bin, run); 1822} 1823 1824static void 1825arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1826 arena_chunk_map_bits_t *bitselm, bool junked) 1827{ 1828 size_t pageind, rpages_ind; 1829 arena_run_t *run; 1830 arena_bin_t *bin; 1831 arena_bin_info_t *bin_info; 1832 index_t binind; 1833 1834 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1835 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 1836 run = &arena_miscelm_get(chunk, rpages_ind)->run; 1837 binind = run->binind; 1838 bin = &arena->bins[binind]; 1839 bin_info = &arena_bin_info[binind]; 1840 1841 if (!junked && config_fill && unlikely(opt_junk)) 1842 arena_dalloc_junk_small(ptr, bin_info); 1843 1844 arena_run_reg_dalloc(run, ptr); 1845 if (run->nfree == bin_info->nregs) { 1846 arena_dissociate_bin_run(chunk, run, bin); 1847 arena_dalloc_bin_run(arena, chunk, run, bin); 1848 } else if (run->nfree == 1 && run != bin->runcur) 1849 arena_bin_lower_run(arena, chunk, run, bin); 1850 1851 if (config_stats) { 1852 bin->stats.ndalloc++; 1853 bin->stats.curregs--; 1854 } 1855} 1856 1857void 1858arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1859 arena_chunk_map_bits_t *bitselm) 1860{ 1861 1862 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); 1863} 1864 1865void 1866arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1867 size_t pageind, arena_chunk_map_bits_t *bitselm) 1868{ 1869 arena_run_t *run; 1870 arena_bin_t *bin; 1871 size_t rpages_ind; 1872 1873 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 1874 run = &arena_miscelm_get(chunk, rpages_ind)->run; 1875 bin = &arena->bins[run->binind]; 1876 malloc_mutex_lock(&bin->lock); 1877 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); 1878 malloc_mutex_unlock(&bin->lock); 1879} 1880 1881void 1882arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1883 size_t pageind) 1884{ 1885 arena_chunk_map_bits_t *bitselm; 1886 1887 if (config_debug) { 1888 /* arena_ptr_small_binind_get() does extra sanity checking. */ 1889 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 1890 pageind)) != BININD_INVALID); 1891 } 1892 bitselm = arena_bitselm_get(chunk, pageind); 1893 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); 1894} 1895 1896#ifdef JEMALLOC_JET 1897#undef arena_dalloc_junk_large 1898#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 1899#endif 1900void 1901arena_dalloc_junk_large(void *ptr, size_t usize) 1902{ 1903 1904 if (config_fill && unlikely(opt_junk)) 1905 memset(ptr, 0x5a, usize); 1906} 1907#ifdef JEMALLOC_JET 1908#undef arena_dalloc_junk_large 1909#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 1910arena_dalloc_junk_large_t *arena_dalloc_junk_large = 1911 JEMALLOC_N(arena_dalloc_junk_large_impl); 1912#endif 1913 1914void 1915arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, 1916 void *ptr, bool junked) 1917{ 1918 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1919 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 1920 arena_run_t *run = &miscelm->run; 1921 1922 if (config_fill || config_stats) { 1923 size_t usize = arena_mapbits_large_size_get(chunk, pageind); 1924 1925 if (!junked) 1926 arena_dalloc_junk_large(ptr, usize); 1927 if (config_stats) { 1928 index_t index = size2index(usize) - NBINS; 1929 1930 arena->stats.ndalloc_large++; 1931 arena->stats.allocated_large -= usize; 1932 arena->stats.lstats[index].ndalloc++; 1933 arena->stats.lstats[index].curruns--; 1934 } 1935 } 1936 1937 arena_run_dalloc(arena, run, true, false); 1938} 1939 1940void 1941arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, 1942 void *ptr) 1943{ 1944 1945 arena_dalloc_large_locked_impl(arena, chunk, ptr, true); 1946} 1947 1948void 1949arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) 1950{ 1951 1952 malloc_mutex_lock(&arena->lock); 1953 arena_dalloc_large_locked_impl(arena, chunk, ptr, false); 1954 malloc_mutex_unlock(&arena->lock); 1955} 1956 1957static void 1958arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1959 size_t oldsize, size_t size) 1960{ 1961 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1962 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 1963 arena_run_t *run = &miscelm->run; 1964 1965 assert(size < oldsize); 1966 1967 /* 1968 * Shrink the run, and make trailing pages available for other 1969 * allocations. 1970 */ 1971 malloc_mutex_lock(&arena->lock); 1972 arena_run_trim_tail(arena, chunk, run, oldsize, size, true); 1973 if (config_stats) { 1974 index_t oldindex = size2index(oldsize) - NBINS; 1975 index_t index = size2index(size) - NBINS; 1976 1977 arena->stats.ndalloc_large++; 1978 arena->stats.allocated_large -= oldsize; 1979 arena->stats.lstats[oldindex].ndalloc++; 1980 arena->stats.lstats[oldindex].curruns--; 1981 1982 arena->stats.nmalloc_large++; 1983 arena->stats.nrequests_large++; 1984 arena->stats.allocated_large += size; 1985 arena->stats.lstats[index].nmalloc++; 1986 arena->stats.lstats[index].nrequests++; 1987 arena->stats.lstats[index].curruns++; 1988 } 1989 malloc_mutex_unlock(&arena->lock); 1990} 1991 1992static bool 1993arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1994 size_t oldsize, size_t size, size_t extra, bool zero) 1995{ 1996 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1997 size_t npages = oldsize >> LG_PAGE; 1998 size_t followsize; 1999 size_t usize_min = s2u(size); 2000 2001 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); 2002 2003 /* Try to extend the run. */ 2004 assert(usize_min > oldsize); 2005 malloc_mutex_lock(&arena->lock); 2006 if (pageind + npages < chunk_npages && 2007 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && 2008 (followsize = arena_mapbits_unallocated_size_get(chunk, 2009 pageind+npages)) >= usize_min - oldsize) { 2010 /* 2011 * The next run is available and sufficiently large. Split the 2012 * following run, then merge the first part with the existing 2013 * allocation. 2014 */ 2015 size_t flag_dirty, splitsize, usize; 2016 2017 usize = s2u(size + extra); 2018 while (oldsize + followsize < usize) 2019 usize = index2size(size2index(usize)-1); 2020 assert(usize >= usize_min); 2021 splitsize = usize - oldsize; 2022 2023 arena_run_t *run = &arena_miscelm_get(chunk, 2024 pageind+npages)->run; 2025 arena_run_split_large(arena, run, splitsize, zero); 2026 2027 size = oldsize + splitsize; 2028 npages = size >> LG_PAGE; 2029 2030 /* 2031 * Mark the extended run as dirty if either portion of the run 2032 * was dirty before allocation. This is rather pedantic, 2033 * because there's not actually any sequence of events that 2034 * could cause the resulting run to be passed to 2035 * arena_run_dalloc() with the dirty argument set to false 2036 * (which is when dirty flag consistency would really matter). 2037 */ 2038 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2039 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2040 arena_mapbits_large_set(chunk, pageind, size, flag_dirty); 2041 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); 2042 2043 if (config_stats) { 2044 index_t oldindex = size2index(oldsize) - NBINS; 2045 index_t index = size2index(size) - NBINS; 2046 2047 arena->stats.ndalloc_large++; 2048 arena->stats.allocated_large -= oldsize; 2049 arena->stats.lstats[oldindex].ndalloc++; 2050 arena->stats.lstats[oldindex].curruns--; 2051 2052 arena->stats.nmalloc_large++; 2053 arena->stats.nrequests_large++; 2054 arena->stats.allocated_large += size; 2055 arena->stats.lstats[index].nmalloc++; 2056 arena->stats.lstats[index].nrequests++; 2057 arena->stats.lstats[index].curruns++; 2058 } 2059 malloc_mutex_unlock(&arena->lock); 2060 return (false); 2061 } 2062 malloc_mutex_unlock(&arena->lock); 2063 2064 return (true); 2065} 2066 2067#ifdef JEMALLOC_JET 2068#undef arena_ralloc_junk_large 2069#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2070#endif 2071static void 2072arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2073{ 2074 2075 if (config_fill && unlikely(opt_junk)) { 2076 memset((void *)((uintptr_t)ptr + usize), 0x5a, 2077 old_usize - usize); 2078 } 2079} 2080#ifdef JEMALLOC_JET 2081#undef arena_ralloc_junk_large 2082#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 2083arena_ralloc_junk_large_t *arena_ralloc_junk_large = 2084 JEMALLOC_N(arena_ralloc_junk_large_impl); 2085#endif 2086 2087/* 2088 * Try to resize a large allocation, in order to avoid copying. This will 2089 * always fail if growing an object, and the following run is already in use. 2090 */ 2091static bool 2092arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, 2093 bool zero) 2094{ 2095 size_t usize; 2096 2097 /* Make sure extra can't cause size_t overflow. */ 2098 if (extra >= arena_maxclass) 2099 return (true); 2100 2101 usize = s2u(size + extra); 2102 if (usize == oldsize) { 2103 /* Same size class. */ 2104 return (false); 2105 } else { 2106 arena_chunk_t *chunk; 2107 arena_t *arena; 2108 2109 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2110 arena = chunk->arena; 2111 2112 if (usize < oldsize) { 2113 /* Fill before shrinking in order avoid a race. */ 2114 arena_ralloc_junk_large(ptr, oldsize, usize); 2115 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, 2116 usize); 2117 return (false); 2118 } else { 2119 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, 2120 oldsize, size, extra, zero); 2121 if (config_fill && !ret && !zero) { 2122 if (unlikely(opt_junk)) { 2123 memset((void *)((uintptr_t)ptr + 2124 oldsize), 0xa5, isalloc(ptr, 2125 config_prof) - oldsize); 2126 } else if (unlikely(opt_zero)) { 2127 memset((void *)((uintptr_t)ptr + 2128 oldsize), 0, isalloc(ptr, 2129 config_prof) - oldsize); 2130 } 2131 } 2132 return (ret); 2133 } 2134 } 2135} 2136 2137bool 2138arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2139 bool zero) 2140{ 2141 2142 /* 2143 * Avoid moving the allocation if the size class can be left the same. 2144 */ 2145 if (oldsize <= arena_maxclass) { 2146 if (oldsize <= SMALL_MAXCLASS) { 2147 assert(arena_bin_info[size2index(oldsize)].reg_size 2148 == oldsize); 2149 if ((size + extra <= SMALL_MAXCLASS && size2index(size + 2150 extra) == size2index(oldsize)) || (size <= oldsize 2151 && size + extra >= oldsize)) 2152 return (false); 2153 } else { 2154 assert(size <= arena_maxclass); 2155 if (size + extra > SMALL_MAXCLASS) { 2156 if (!arena_ralloc_large(ptr, oldsize, size, 2157 extra, zero)) 2158 return (false); 2159 } 2160 } 2161 } 2162 2163 /* Reallocation would require a move. */ 2164 return (true); 2165} 2166 2167void * 2168arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 2169 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, 2170 bool try_tcache_dalloc) 2171{ 2172 void *ret; 2173 size_t copysize; 2174 2175 /* Try to avoid moving the allocation. */ 2176 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) 2177 return (ptr); 2178 2179 /* 2180 * size and oldsize are different enough that we need to move the 2181 * object. In that case, fall back to allocating new space and 2182 * copying. 2183 */ 2184 if (alignment != 0) { 2185 size_t usize = sa2u(size + extra, alignment); 2186 if (usize == 0) 2187 return (NULL); 2188 ret = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc, 2189 arena); 2190 } else { 2191 ret = arena_malloc(tsd, arena, size + extra, zero, 2192 try_tcache_alloc); 2193 } 2194 2195 if (ret == NULL) { 2196 if (extra == 0) 2197 return (NULL); 2198 /* Try again, this time without extra. */ 2199 if (alignment != 0) { 2200 size_t usize = sa2u(size, alignment); 2201 if (usize == 0) 2202 return (NULL); 2203 ret = ipalloct(tsd, usize, alignment, zero, 2204 try_tcache_alloc, arena); 2205 } else { 2206 ret = arena_malloc(tsd, arena, size, zero, 2207 try_tcache_alloc); 2208 } 2209 2210 if (ret == NULL) 2211 return (NULL); 2212 } 2213 2214 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ 2215 2216 /* 2217 * Copy at most size bytes (not size+extra), since the caller has no 2218 * expectation that the extra bytes will be reliably preserved. 2219 */ 2220 copysize = (size < oldsize) ? size : oldsize; 2221 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 2222 memcpy(ret, ptr, copysize); 2223 iqalloc(tsd, ptr, try_tcache_dalloc); 2224 return (ret); 2225} 2226 2227dss_prec_t 2228arena_dss_prec_get(arena_t *arena) 2229{ 2230 dss_prec_t ret; 2231 2232 malloc_mutex_lock(&arena->lock); 2233 ret = arena->dss_prec; 2234 malloc_mutex_unlock(&arena->lock); 2235 return (ret); 2236} 2237 2238bool 2239arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) 2240{ 2241 2242 if (!have_dss) 2243 return (dss_prec != dss_prec_disabled); 2244 malloc_mutex_lock(&arena->lock); 2245 arena->dss_prec = dss_prec; 2246 malloc_mutex_unlock(&arena->lock); 2247 return (false); 2248} 2249 2250void 2251arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 2252 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 2253 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) 2254{ 2255 unsigned i; 2256 2257 malloc_mutex_lock(&arena->lock); 2258 *dss = dss_prec_names[arena->dss_prec]; 2259 *nactive += arena->nactive; 2260 *ndirty += arena->ndirty; 2261 2262 astats->mapped += arena->stats.mapped; 2263 astats->npurge += arena->stats.npurge; 2264 astats->nmadvise += arena->stats.nmadvise; 2265 astats->purged += arena->stats.purged; 2266 astats->allocated_large += arena->stats.allocated_large; 2267 astats->nmalloc_large += arena->stats.nmalloc_large; 2268 astats->ndalloc_large += arena->stats.ndalloc_large; 2269 astats->nrequests_large += arena->stats.nrequests_large; 2270 astats->allocated_huge += arena->stats.allocated_huge; 2271 astats->nmalloc_huge += arena->stats.nmalloc_huge; 2272 astats->ndalloc_huge += arena->stats.ndalloc_huge; 2273 2274 for (i = 0; i < nlclasses; i++) { 2275 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 2276 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 2277 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 2278 lstats[i].curruns += arena->stats.lstats[i].curruns; 2279 } 2280 2281 for (i = 0; i < nhclasses; i++) { 2282 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 2283 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 2284 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 2285 } 2286 malloc_mutex_unlock(&arena->lock); 2287 2288 for (i = 0; i < NBINS; i++) { 2289 arena_bin_t *bin = &arena->bins[i]; 2290 2291 malloc_mutex_lock(&bin->lock); 2292 bstats[i].nmalloc += bin->stats.nmalloc; 2293 bstats[i].ndalloc += bin->stats.ndalloc; 2294 bstats[i].nrequests += bin->stats.nrequests; 2295 bstats[i].curregs += bin->stats.curregs; 2296 if (config_tcache) { 2297 bstats[i].nfills += bin->stats.nfills; 2298 bstats[i].nflushes += bin->stats.nflushes; 2299 } 2300 bstats[i].nruns += bin->stats.nruns; 2301 bstats[i].reruns += bin->stats.reruns; 2302 bstats[i].curruns += bin->stats.curruns; 2303 malloc_mutex_unlock(&bin->lock); 2304 } 2305} 2306 2307arena_t * 2308arena_new(unsigned ind) 2309{ 2310 arena_t *arena; 2311 unsigned i; 2312 arena_bin_t *bin; 2313 2314 /* 2315 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 2316 * because there is no way to clean up if base_alloc() OOMs. 2317 */ 2318 if (config_stats) { 2319 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) 2320 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + 2321 nhclasses) * sizeof(malloc_huge_stats_t)); 2322 } else 2323 arena = (arena_t *)base_alloc(sizeof(arena_t)); 2324 if (arena == NULL) 2325 return (NULL); 2326 2327 arena->ind = ind; 2328 arena->nthreads = 0; 2329 arena->chunk_alloc = chunk_alloc_default; 2330 arena->chunk_dalloc = chunk_dalloc_default; 2331 2332 if (malloc_mutex_init(&arena->lock)) 2333 return (NULL); 2334 2335 if (config_stats) { 2336 memset(&arena->stats, 0, sizeof(arena_stats_t)); 2337 arena->stats.lstats = (malloc_large_stats_t *)(((void *)arena) + 2338 CACHELINE_CEILING(sizeof(arena_t))); 2339 memset(arena->stats.lstats, 0, nlclasses * 2340 sizeof(malloc_large_stats_t)); 2341 arena->stats.hstats = (malloc_huge_stats_t *)(((void *)arena) + 2342 CACHELINE_CEILING(sizeof(arena_t)) + 2343 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 2344 memset(arena->stats.hstats, 0, nhclasses * 2345 sizeof(malloc_huge_stats_t)); 2346 if (config_tcache) 2347 ql_new(&arena->tcache_ql); 2348 } 2349 2350 if (config_prof) 2351 arena->prof_accumbytes = 0; 2352 2353 arena->dss_prec = chunk_dss_prec_get(); 2354 2355 arena->spare = NULL; 2356 2357 arena->nactive = 0; 2358 arena->ndirty = 0; 2359 2360 arena_avail_tree_new(&arena->runs_avail); 2361 ql_new(&arena->runs_dirty); 2362 2363 /* Initialize bins. */ 2364 for (i = 0; i < NBINS; i++) { 2365 bin = &arena->bins[i]; 2366 if (malloc_mutex_init(&bin->lock)) 2367 return (NULL); 2368 bin->runcur = NULL; 2369 arena_run_tree_new(&bin->runs); 2370 if (config_stats) 2371 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2372 } 2373 2374 return (arena); 2375} 2376 2377/* 2378 * Calculate bin_info->run_size such that it meets the following constraints: 2379 * 2380 * *) bin_info->run_size <= arena_maxrun 2381 * *) bin_info->nregs <= RUN_MAXREGS 2382 * 2383 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 2384 * these settings are all interdependent. 2385 */ 2386static void 2387bin_info_run_size_calc(arena_bin_info_t *bin_info) 2388{ 2389 size_t pad_size; 2390 size_t try_run_size, perfect_run_size, actual_run_size; 2391 uint32_t try_nregs, perfect_nregs, actual_nregs; 2392 2393 /* 2394 * Determine redzone size based on minimum alignment and minimum 2395 * redzone size. Add padding to the end of the run if it is needed to 2396 * align the regions. The padding allows each redzone to be half the 2397 * minimum alignment; without the padding, each redzone would have to 2398 * be twice as large in order to maintain alignment. 2399 */ 2400 if (config_fill && unlikely(opt_redzone)) { 2401 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 2402 1); 2403 if (align_min <= REDZONE_MINSIZE) { 2404 bin_info->redzone_size = REDZONE_MINSIZE; 2405 pad_size = 0; 2406 } else { 2407 bin_info->redzone_size = align_min >> 1; 2408 pad_size = bin_info->redzone_size; 2409 } 2410 } else { 2411 bin_info->redzone_size = 0; 2412 pad_size = 0; 2413 } 2414 bin_info->reg_interval = bin_info->reg_size + 2415 (bin_info->redzone_size << 1); 2416 2417 /* 2418 * Compute run size under ideal conditions (no redzones, no limit on run 2419 * size). 2420 */ 2421 try_run_size = PAGE; 2422 try_nregs = try_run_size / bin_info->reg_size; 2423 do { 2424 perfect_run_size = try_run_size; 2425 perfect_nregs = try_nregs; 2426 2427 try_run_size += PAGE; 2428 try_nregs = try_run_size / bin_info->reg_size; 2429 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 2430 assert(perfect_nregs <= RUN_MAXREGS); 2431 2432 actual_run_size = perfect_run_size; 2433 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; 2434 2435 /* 2436 * Redzones can require enough padding that not even a single region can 2437 * fit within the number of pages that would normally be dedicated to a 2438 * run for this size class. Increase the run size until at least one 2439 * region fits. 2440 */ 2441 while (actual_nregs == 0) { 2442 assert(config_fill && unlikely(opt_redzone)); 2443 2444 actual_run_size += PAGE; 2445 actual_nregs = (actual_run_size - pad_size) / 2446 bin_info->reg_interval; 2447 } 2448 2449 /* 2450 * Make sure that the run will fit within an arena chunk. 2451 */ 2452 while (actual_run_size > arena_maxrun) { 2453 actual_run_size -= PAGE; 2454 actual_nregs = (actual_run_size - pad_size) / 2455 bin_info->reg_interval; 2456 } 2457 assert(actual_nregs > 0); 2458 2459 /* Copy final settings. */ 2460 bin_info->run_size = actual_run_size; 2461 bin_info->nregs = actual_nregs; 2462 bin_info->reg0_offset = actual_run_size - (actual_nregs * 2463 bin_info->reg_interval) - pad_size + bin_info->redzone_size; 2464 2465 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 2466 * bin_info->reg_interval) + pad_size == bin_info->run_size); 2467} 2468 2469static void 2470bin_info_init(void) 2471{ 2472 arena_bin_info_t *bin_info; 2473 2474#define BIN_INFO_INIT_bin_yes(index, size) \ 2475 bin_info = &arena_bin_info[index]; \ 2476 bin_info->reg_size = size; \ 2477 bin_info_run_size_calc(bin_info); \ 2478 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 2479#define BIN_INFO_INIT_bin_no(index, size) 2480#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 2481 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 2482 SIZE_CLASSES 2483#undef BIN_INFO_INIT_bin_yes 2484#undef BIN_INFO_INIT_bin_no 2485#undef SC 2486} 2487 2488void 2489arena_boot(void) 2490{ 2491 size_t header_size; 2492 unsigned i; 2493 2494 /* 2495 * Compute the header size such that it is large enough to contain the 2496 * page map. The page map is biased to omit entries for the header 2497 * itself, so some iteration is necessary to compute the map bias. 2498 * 2499 * 1) Compute safe header_size and map_bias values that include enough 2500 * space for an unbiased page map. 2501 * 2) Refine map_bias based on (1) to omit the header pages in the page 2502 * map. The resulting map_bias may be one too small. 2503 * 3) Refine map_bias based on (2). The result will be >= the result 2504 * from (2), and will always be correct. 2505 */ 2506 map_bias = 0; 2507 for (i = 0; i < 3; i++) { 2508 header_size = offsetof(arena_chunk_t, map_bits) + 2509 ((sizeof(arena_chunk_map_bits_t) + 2510 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 2511 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 2512 } 2513 assert(map_bias > 0); 2514 2515 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 2516 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 2517 2518 arena_maxrun = chunksize - (map_bias << LG_PAGE); 2519 assert(arena_maxrun > 0); 2520 arena_maxclass = index2size(size2index(chunksize)-1); 2521 if (arena_maxclass > arena_maxrun) { 2522 /* 2523 * For small chunk sizes it's possible for there to be fewer 2524 * non-header pages available than are necessary to serve the 2525 * size classes just below chunksize. 2526 */ 2527 arena_maxclass = arena_maxrun; 2528 } 2529 assert(arena_maxclass > 0); 2530 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS); 2531 nhclasses = NSIZES - nlclasses - NBINS; 2532 2533 bin_info_init(); 2534} 2535 2536void 2537arena_prefork(arena_t *arena) 2538{ 2539 unsigned i; 2540 2541 malloc_mutex_prefork(&arena->lock); 2542 for (i = 0; i < NBINS; i++) 2543 malloc_mutex_prefork(&arena->bins[i].lock); 2544} 2545 2546void 2547arena_postfork_parent(arena_t *arena) 2548{ 2549 unsigned i; 2550 2551 for (i = 0; i < NBINS; i++) 2552 malloc_mutex_postfork_parent(&arena->bins[i].lock); 2553 malloc_mutex_postfork_parent(&arena->lock); 2554} 2555 2556void 2557arena_postfork_child(arena_t *arena) 2558{ 2559 unsigned i; 2560 2561 for (i = 0; i < NBINS; i++) 2562 malloc_mutex_postfork_child(&arena->bins[i].lock); 2563 malloc_mutex_postfork_child(&arena->lock); 2564} 2565