arena.c revision 9c6a8d3b0cc14fd26b119ad08f190e537771464f
1#define JEMALLOC_ARENA_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8arena_bin_info_t arena_bin_info[NBINS]; 9 10size_t map_bias; 11size_t map_misc_offset; 12size_t arena_maxrun; /* Max run size for arenas. */ 13size_t arena_maxclass; /* Max size class for arenas. */ 14unsigned nlclasses; /* Number of large size classes. */ 15unsigned nhclasses; /* Number of huge size classes. */ 16 17/******************************************************************************/ 18/* 19 * Function prototypes for static functions that are referenced prior to 20 * definition. 21 */ 22 23static void arena_purge(arena_t *arena, bool all); 24static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 25 bool cleaned); 26static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 27 arena_run_t *run, arena_bin_t *bin); 28static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 29 arena_run_t *run, arena_bin_t *bin); 30 31/******************************************************************************/ 32 33JEMALLOC_INLINE_C size_t 34arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm) 35{ 36 arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm); 37 size_t pageind = arena_miscelm_to_pageind(miscelm); 38 39 return (arena_mapbits_get(chunk, pageind)); 40} 41 42JEMALLOC_INLINE_C int 43arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 44{ 45 uintptr_t a_miscelm = (uintptr_t)a; 46 uintptr_t b_miscelm = (uintptr_t)b; 47 48 assert(a != NULL); 49 assert(b != NULL); 50 51 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 52} 53 54/* Generate red-black tree functions. */ 55rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, 56 rb_link, arena_run_comp) 57 58JEMALLOC_INLINE_C int 59arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) 60{ 61 int ret; 62 size_t a_size; 63 size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK; 64 uintptr_t a_miscelm = (uintptr_t)a; 65 uintptr_t b_miscelm = (uintptr_t)b; 66 67 if (a_miscelm & CHUNK_MAP_KEY) 68 a_size = a_miscelm & ~PAGE_MASK; 69 else 70 a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK; 71 72 ret = (a_size > b_size) - (a_size < b_size); 73 if (ret == 0) { 74 if (!(a_miscelm & CHUNK_MAP_KEY)) 75 ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); 76 else { 77 /* 78 * Treat keys as if they are lower than anything else. 79 */ 80 ret = -1; 81 } 82 } 83 84 return (ret); 85} 86 87/* Generate red-black tree functions. */ 88rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, 89 arena_chunk_map_misc_t, rb_link, arena_avail_comp) 90 91static void 92arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 93 size_t npages) 94{ 95 96 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 97 LG_PAGE)); 98 arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, 99 pageind)); 100} 101 102static void 103arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 104 size_t npages) 105{ 106 107 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 108 LG_PAGE)); 109 arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, 110 pageind)); 111} 112 113static void 114arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 115 size_t npages) 116{ 117 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 118 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 119 LG_PAGE)); 120 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 121 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 122 CHUNK_MAP_DIRTY); 123 ql_elm_new(miscelm, dr_link); 124 ql_tail_insert(&arena->runs_dirty, miscelm, dr_link); 125 arena->ndirty += npages; 126} 127 128static void 129arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 130 size_t npages) 131{ 132 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 133 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 134 LG_PAGE)); 135 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 136 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 137 CHUNK_MAP_DIRTY); 138 ql_remove(&arena->runs_dirty, miscelm, dr_link); 139 arena->ndirty -= npages; 140} 141 142JEMALLOC_INLINE_C void * 143arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 144{ 145 void *ret; 146 unsigned regind; 147 arena_chunk_map_misc_t *miscelm; 148 void *rpages; 149 150 assert(run->nfree > 0); 151 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 152 153 regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 154 miscelm = arena_run_to_miscelm(run); 155 rpages = arena_miscelm_to_rpages(miscelm); 156 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 157 (uintptr_t)(bin_info->reg_interval * regind)); 158 run->nfree--; 159 return (ret); 160} 161 162JEMALLOC_INLINE_C void 163arena_run_reg_dalloc(arena_run_t *run, void *ptr) 164{ 165 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 166 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 167 size_t mapbits = arena_mapbits_get(chunk, pageind); 168 index_t binind = arena_ptr_small_binind_get(ptr, mapbits); 169 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 170 unsigned regind = arena_run_regind(run, bin_info, ptr); 171 172 assert(run->nfree < bin_info->nregs); 173 /* Freeing an interior pointer can cause assertion failure. */ 174 assert(((uintptr_t)ptr - 175 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 176 (uintptr_t)bin_info->reg0_offset)) % 177 (uintptr_t)bin_info->reg_interval == 0); 178 assert((uintptr_t)ptr >= 179 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 180 (uintptr_t)bin_info->reg0_offset); 181 /* Freeing an unallocated pointer can cause assertion failure. */ 182 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 183 184 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 185 run->nfree++; 186} 187 188JEMALLOC_INLINE_C void 189arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 190{ 191 192 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 193 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 194 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 195 (npages << LG_PAGE)); 196} 197 198JEMALLOC_INLINE_C void 199arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 200{ 201 202 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 203 << LG_PAGE)), PAGE); 204} 205 206JEMALLOC_INLINE_C void 207arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 208{ 209 size_t i; 210 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 211 212 arena_run_page_mark_zeroed(chunk, run_ind); 213 for (i = 0; i < PAGE / sizeof(size_t); i++) 214 assert(p[i] == 0); 215} 216 217static void 218arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) 219{ 220 221 if (config_stats) { 222 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages 223 - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 224 LG_PAGE); 225 if (cactive_diff != 0) 226 stats_cactive_add(cactive_diff); 227 } 228} 229 230static void 231arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 232 size_t flag_dirty, size_t need_pages) 233{ 234 size_t total_pages, rem_pages; 235 236 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 237 LG_PAGE; 238 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 239 flag_dirty); 240 assert(need_pages <= total_pages); 241 rem_pages = total_pages - need_pages; 242 243 arena_avail_remove(arena, chunk, run_ind, total_pages); 244 if (flag_dirty != 0) 245 arena_dirty_remove(arena, chunk, run_ind, total_pages); 246 arena_cactive_update(arena, need_pages, 0); 247 arena->nactive += need_pages; 248 249 /* Keep track of trailing unused pages for later use. */ 250 if (rem_pages > 0) { 251 if (flag_dirty != 0) { 252 arena_mapbits_unallocated_set(chunk, 253 run_ind+need_pages, (rem_pages << LG_PAGE), 254 flag_dirty); 255 arena_mapbits_unallocated_set(chunk, 256 run_ind+total_pages-1, (rem_pages << LG_PAGE), 257 flag_dirty); 258 arena_dirty_insert(arena, chunk, run_ind+need_pages, 259 rem_pages); 260 } else { 261 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 262 (rem_pages << LG_PAGE), 263 arena_mapbits_unzeroed_get(chunk, 264 run_ind+need_pages)); 265 arena_mapbits_unallocated_set(chunk, 266 run_ind+total_pages-1, (rem_pages << LG_PAGE), 267 arena_mapbits_unzeroed_get(chunk, 268 run_ind+total_pages-1)); 269 } 270 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 271 } 272} 273 274static void 275arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 276 bool remove, bool zero) 277{ 278 arena_chunk_t *chunk; 279 arena_chunk_map_misc_t *miscelm; 280 size_t flag_dirty, run_ind, need_pages, i; 281 282 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 283 miscelm = arena_run_to_miscelm(run); 284 run_ind = arena_miscelm_to_pageind(miscelm); 285 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 286 need_pages = (size >> LG_PAGE); 287 assert(need_pages > 0); 288 289 if (remove) { 290 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 291 need_pages); 292 } 293 294 if (zero) { 295 if (flag_dirty == 0) { 296 /* 297 * The run is clean, so some pages may be zeroed (i.e. 298 * never before touched). 299 */ 300 for (i = 0; i < need_pages; i++) { 301 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 302 != 0) 303 arena_run_zero(chunk, run_ind+i, 1); 304 else if (config_debug) { 305 arena_run_page_validate_zeroed(chunk, 306 run_ind+i); 307 } else { 308 arena_run_page_mark_zeroed(chunk, 309 run_ind+i); 310 } 311 } 312 } else { 313 /* The run is dirty, so all pages must be zeroed. */ 314 arena_run_zero(chunk, run_ind, need_pages); 315 } 316 } else { 317 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 318 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 319 } 320 321 /* 322 * Set the last element first, in case the run only contains one page 323 * (i.e. both statements set the same element). 324 */ 325 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); 326 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); 327} 328 329static void 330arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 331{ 332 333 arena_run_split_large_helper(arena, run, size, true, zero); 334} 335 336static void 337arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 338{ 339 340 arena_run_split_large_helper(arena, run, size, false, zero); 341} 342 343static void 344arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 345 index_t binind) 346{ 347 arena_chunk_t *chunk; 348 arena_chunk_map_misc_t *miscelm; 349 size_t flag_dirty, run_ind, need_pages, i; 350 351 assert(binind != BININD_INVALID); 352 353 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 354 miscelm = arena_run_to_miscelm(run); 355 run_ind = arena_miscelm_to_pageind(miscelm); 356 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 357 need_pages = (size >> LG_PAGE); 358 assert(need_pages > 0); 359 360 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); 361 362 for (i = 0; i < need_pages; i++) { 363 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); 364 if (config_debug && flag_dirty == 0 && 365 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) 366 arena_run_page_validate_zeroed(chunk, run_ind+i); 367 } 368 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 369 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 370} 371 372static arena_chunk_t * 373arena_chunk_init_spare(arena_t *arena) 374{ 375 arena_chunk_t *chunk; 376 377 assert(arena->spare != NULL); 378 379 chunk = arena->spare; 380 arena->spare = NULL; 381 382 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 383 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 384 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 385 arena_maxrun); 386 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 387 arena_maxrun); 388 assert(arena_mapbits_dirty_get(chunk, map_bias) == 389 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 390 391 return (chunk); 392} 393 394static arena_chunk_t * 395arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment, 396 bool *zero) 397{ 398 arena_chunk_t *chunk; 399 chunk_alloc_t *chunk_alloc; 400 chunk_dalloc_t *chunk_dalloc; 401 402 chunk_alloc = arena->chunk_alloc; 403 chunk_dalloc = arena->chunk_dalloc; 404 malloc_mutex_unlock(&arena->lock); 405 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc, 406 arena->ind, NULL, size, alignment, zero); 407 malloc_mutex_lock(&arena->lock); 408 if (config_stats && chunk != NULL) 409 arena->stats.mapped += chunksize; 410 411 return (chunk); 412} 413 414static arena_chunk_t * 415arena_chunk_init_hard(arena_t *arena) 416{ 417 arena_chunk_t *chunk; 418 bool zero; 419 size_t unzeroed, i; 420 421 assert(arena->spare == NULL); 422 423 zero = false; 424 chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero); 425 if (chunk == NULL) 426 return (NULL); 427 428 chunk->arena = arena; 429 430 /* 431 * Initialize the map to contain one maximal free untouched run. Mark 432 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. 433 */ 434 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; 435 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, unzeroed); 436 /* 437 * There is no need to initialize the internal page map entries unless 438 * the chunk is not zeroed. 439 */ 440 if (!zero) { 441 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 442 (void *)arena_bitselm_get(chunk, map_bias+1), 443 (size_t)((uintptr_t) arena_bitselm_get(chunk, 444 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, 445 map_bias+1))); 446 for (i = map_bias+1; i < chunk_npages-1; i++) 447 arena_mapbits_unzeroed_set(chunk, i, unzeroed); 448 } else { 449 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 450 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) 451 arena_bitselm_get(chunk, chunk_npages-1) - 452 (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); 453 if (config_debug) { 454 for (i = map_bias+1; i < chunk_npages-1; i++) { 455 assert(arena_mapbits_unzeroed_get(chunk, i) == 456 unzeroed); 457 } 458 } 459 } 460 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 461 unzeroed); 462 463 return (chunk); 464} 465 466static arena_chunk_t * 467arena_chunk_alloc(arena_t *arena) 468{ 469 arena_chunk_t *chunk; 470 471 if (arena->spare != NULL) 472 chunk = arena_chunk_init_spare(arena); 473 else { 474 chunk = arena_chunk_init_hard(arena); 475 if (chunk == NULL) 476 return (NULL); 477 } 478 479 /* Insert the run into the runs_avail tree. */ 480 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 481 482 return (chunk); 483} 484 485static void 486arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 487{ 488 489 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 490 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 491 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 492 arena_maxrun); 493 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 494 arena_maxrun); 495 assert(arena_mapbits_dirty_get(chunk, map_bias) == 496 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 497 498 /* 499 * Remove run from the runs_avail tree, so that the arena does not use 500 * it. 501 */ 502 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 503 504 if (arena->spare != NULL) { 505 arena_chunk_t *spare = arena->spare; 506 chunk_dalloc_t *chunk_dalloc; 507 508 arena->spare = chunk; 509 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 510 arena_dirty_remove(arena, spare, map_bias, 511 chunk_npages-map_bias); 512 } 513 chunk_dalloc = arena->chunk_dalloc; 514 malloc_mutex_unlock(&arena->lock); 515 chunk_dalloc((void *)spare, chunksize, arena->ind); 516 malloc_mutex_lock(&arena->lock); 517 if (config_stats) 518 arena->stats.mapped -= chunksize; 519 } else 520 arena->spare = chunk; 521} 522 523static void 524arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 525{ 526 index_t index = size2index(usize) - nlclasses - NBINS; 527 528 cassert(config_stats); 529 530 arena->stats.nmalloc_huge++; 531 arena->stats.allocated_huge += usize; 532 arena->stats.hstats[index].nmalloc++; 533 arena->stats.hstats[index].curhchunks++; 534} 535 536static void 537arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 538{ 539 index_t index = size2index(usize) - nlclasses - NBINS; 540 541 cassert(config_stats); 542 543 arena->stats.nmalloc_huge--; 544 arena->stats.allocated_huge -= usize; 545 arena->stats.hstats[index].nmalloc--; 546 arena->stats.hstats[index].curhchunks--; 547} 548 549static void 550arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 551{ 552 index_t index = size2index(usize) - nlclasses - NBINS; 553 554 cassert(config_stats); 555 556 arena->stats.ndalloc_huge++; 557 arena->stats.allocated_huge -= usize; 558 arena->stats.hstats[index].ndalloc++; 559 arena->stats.hstats[index].curhchunks--; 560} 561 562static void 563arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 564{ 565 index_t index = size2index(usize) - nlclasses - NBINS; 566 567 cassert(config_stats); 568 569 arena->stats.ndalloc_huge--; 570 arena->stats.allocated_huge += usize; 571 arena->stats.hstats[index].ndalloc--; 572 arena->stats.hstats[index].curhchunks++; 573} 574 575static void 576arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 577{ 578 579 arena_huge_dalloc_stats_update(arena, oldsize); 580 arena_huge_malloc_stats_update(arena, usize); 581} 582 583static void 584arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 585 size_t usize) 586{ 587 588 arena_huge_dalloc_stats_update_undo(arena, oldsize); 589 arena_huge_malloc_stats_update_undo(arena, usize); 590} 591 592void * 593arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, 594 bool *zero) 595{ 596 void *ret; 597 chunk_alloc_t *chunk_alloc; 598 chunk_dalloc_t *chunk_dalloc; 599 size_t csize = CHUNK_CEILING(usize); 600 601 malloc_mutex_lock(&arena->lock); 602 chunk_alloc = arena->chunk_alloc; 603 chunk_dalloc = arena->chunk_dalloc; 604 if (config_stats) { 605 /* Optimistically update stats prior to unlocking. */ 606 arena_huge_malloc_stats_update(arena, usize); 607 arena->stats.mapped += usize; 608 } 609 arena->nactive += (usize >> LG_PAGE); 610 malloc_mutex_unlock(&arena->lock); 611 612 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, NULL, 613 csize, alignment, zero); 614 if (ret == NULL) { 615 /* Revert optimistic stats updates. */ 616 malloc_mutex_lock(&arena->lock); 617 if (config_stats) { 618 arena_huge_malloc_stats_update_undo(arena, usize); 619 arena->stats.mapped -= usize; 620 } 621 arena->nactive -= (usize >> LG_PAGE); 622 malloc_mutex_unlock(&arena->lock); 623 return (NULL); 624 } 625 626 if (config_stats) 627 stats_cactive_add(usize); 628 629 return (ret); 630} 631 632void 633arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) 634{ 635 chunk_dalloc_t *chunk_dalloc; 636 637 malloc_mutex_lock(&arena->lock); 638 chunk_dalloc = arena->chunk_dalloc; 639 if (config_stats) { 640 arena_huge_dalloc_stats_update(arena, usize); 641 arena->stats.mapped -= usize; 642 stats_cactive_sub(usize); 643 } 644 arena->nactive -= (usize >> LG_PAGE); 645 malloc_mutex_unlock(&arena->lock); 646 chunk_dalloc(chunk, CHUNK_CEILING(usize), arena->ind); 647} 648 649void 650arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, 651 size_t usize) 652{ 653 654 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 655 assert(oldsize != usize); 656 657 malloc_mutex_lock(&arena->lock); 658 if (config_stats) 659 arena_huge_ralloc_stats_update(arena, oldsize, usize); 660 if (oldsize < usize) { 661 size_t udiff = usize - oldsize; 662 arena->nactive += udiff >> LG_PAGE; 663 if (config_stats) 664 stats_cactive_add(udiff); 665 } else { 666 size_t udiff = oldsize - usize; 667 arena->nactive -= udiff >> LG_PAGE; 668 if (config_stats) 669 stats_cactive_sub(udiff); 670 } 671 malloc_mutex_unlock(&arena->lock); 672} 673 674void 675arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, 676 size_t usize) 677{ 678 chunk_dalloc_t *chunk_dalloc; 679 size_t udiff = oldsize - usize; 680 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 681 682 malloc_mutex_lock(&arena->lock); 683 chunk_dalloc = arena->chunk_dalloc; 684 if (config_stats) { 685 arena_huge_ralloc_stats_update(arena, oldsize, usize); 686 if (cdiff != 0) { 687 arena->stats.mapped -= cdiff; 688 stats_cactive_sub(udiff); 689 } 690 } 691 arena->nactive -= udiff >> LG_PAGE; 692 malloc_mutex_unlock(&arena->lock); 693 if (cdiff != 0) { 694 chunk_dalloc((void *)((uintptr_t)chunk + CHUNK_CEILING(usize)), 695 cdiff, arena->ind); 696 } 697} 698 699bool 700arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, 701 size_t usize, bool *zero) 702{ 703 chunk_alloc_t *chunk_alloc; 704 chunk_dalloc_t *chunk_dalloc; 705 size_t udiff = usize - oldsize; 706 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 707 708 malloc_mutex_lock(&arena->lock); 709 chunk_alloc = arena->chunk_alloc; 710 chunk_dalloc = arena->chunk_dalloc; 711 if (config_stats) { 712 /* Optimistically update stats prior to unlocking. */ 713 arena_huge_ralloc_stats_update(arena, oldsize, usize); 714 arena->stats.mapped += cdiff; 715 } 716 arena->nactive += (udiff >> LG_PAGE); 717 malloc_mutex_unlock(&arena->lock); 718 719 if (chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, 720 (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)), cdiff, 721 chunksize, zero) == NULL) { 722 /* Revert optimistic stats updates. */ 723 malloc_mutex_lock(&arena->lock); 724 if (config_stats) { 725 arena_huge_ralloc_stats_update_undo(arena, 726 oldsize, usize); 727 arena->stats.mapped -= cdiff; 728 } 729 arena->nactive -= (udiff >> LG_PAGE); 730 malloc_mutex_unlock(&arena->lock); 731 return (true); 732 } 733 734 if (config_stats) 735 stats_cactive_add(udiff); 736 737 return (false); 738} 739 740static arena_run_t * 741arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 742{ 743 arena_chunk_map_misc_t *miscelm; 744 arena_chunk_map_misc_t *key; 745 746 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY); 747 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 748 if (miscelm != NULL) { 749 arena_run_t *run = &miscelm->run; 750 arena_run_split_large(arena, &miscelm->run, size, zero); 751 return (run); 752 } 753 754 return (NULL); 755} 756 757static arena_run_t * 758arena_run_alloc_large(arena_t *arena, size_t size, bool zero) 759{ 760 arena_chunk_t *chunk; 761 arena_run_t *run; 762 763 assert(size <= arena_maxrun); 764 assert((size & PAGE_MASK) == 0); 765 766 /* Search the arena's chunks for the lowest best fit. */ 767 run = arena_run_alloc_large_helper(arena, size, zero); 768 if (run != NULL) 769 return (run); 770 771 /* 772 * No usable runs. Create a new chunk from which to allocate the run. 773 */ 774 chunk = arena_chunk_alloc(arena); 775 if (chunk != NULL) { 776 run = &arena_miscelm_get(chunk, map_bias)->run; 777 arena_run_split_large(arena, run, size, zero); 778 return (run); 779 } 780 781 /* 782 * arena_chunk_alloc() failed, but another thread may have made 783 * sufficient memory available while this one dropped arena->lock in 784 * arena_chunk_alloc(), so search one more time. 785 */ 786 return (arena_run_alloc_large_helper(arena, size, zero)); 787} 788 789static arena_run_t * 790arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind) 791{ 792 arena_run_t *run; 793 arena_chunk_map_misc_t *miscelm; 794 arena_chunk_map_misc_t *key; 795 796 key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY); 797 miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 798 if (miscelm != NULL) { 799 run = &miscelm->run; 800 arena_run_split_small(arena, run, size, binind); 801 return (run); 802 } 803 804 return (NULL); 805} 806 807static arena_run_t * 808arena_run_alloc_small(arena_t *arena, size_t size, index_t binind) 809{ 810 arena_chunk_t *chunk; 811 arena_run_t *run; 812 813 assert(size <= arena_maxrun); 814 assert((size & PAGE_MASK) == 0); 815 assert(binind != BININD_INVALID); 816 817 /* Search the arena's chunks for the lowest best fit. */ 818 run = arena_run_alloc_small_helper(arena, size, binind); 819 if (run != NULL) 820 return (run); 821 822 /* 823 * No usable runs. Create a new chunk from which to allocate the run. 824 */ 825 chunk = arena_chunk_alloc(arena); 826 if (chunk != NULL) { 827 run = &arena_miscelm_get(chunk, map_bias)->run; 828 arena_run_split_small(arena, run, size, binind); 829 return (run); 830 } 831 832 /* 833 * arena_chunk_alloc() failed, but another thread may have made 834 * sufficient memory available while this one dropped arena->lock in 835 * arena_chunk_alloc(), so search one more time. 836 */ 837 return (arena_run_alloc_small_helper(arena, size, binind)); 838} 839 840JEMALLOC_INLINE_C void 841arena_maybe_purge(arena_t *arena) 842{ 843 size_t threshold; 844 845 /* Don't purge if the option is disabled. */ 846 if (opt_lg_dirty_mult < 0) 847 return; 848 threshold = (arena->nactive >> opt_lg_dirty_mult); 849 /* 850 * Don't purge unless the number of purgeable pages exceeds the 851 * threshold. 852 */ 853 if (arena->ndirty <= threshold) 854 return; 855 856 arena_purge(arena, false); 857} 858 859static size_t 860arena_dirty_count(arena_t *arena) 861{ 862 size_t ndirty = 0; 863 arena_chunk_map_misc_t *miscelm; 864 arena_chunk_t *chunk; 865 size_t pageind, npages; 866 867 ql_foreach(miscelm, &arena->runs_dirty, dr_link) { 868 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 869 pageind = arena_miscelm_to_pageind(miscelm); 870 assert(arena_mapbits_allocated_get(chunk, pageind) == 0); 871 assert(arena_mapbits_large_get(chunk, pageind) == 0); 872 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 873 npages = arena_mapbits_unallocated_size_get(chunk, pageind) >> 874 LG_PAGE; 875 ndirty += npages; 876 } 877 878 return (ndirty); 879} 880 881static size_t 882arena_compute_npurge(arena_t *arena, bool all) 883{ 884 size_t npurge; 885 886 /* 887 * Compute the minimum number of pages that this thread should try to 888 * purge. 889 */ 890 if (!all) { 891 size_t threshold = (arena->nactive >> opt_lg_dirty_mult); 892 893 npurge = arena->ndirty - threshold; 894 } else 895 npurge = arena->ndirty; 896 897 return (npurge); 898} 899 900static size_t 901arena_stash_dirty(arena_t *arena, bool all, size_t npurge, 902 arena_chunk_miscelms_t *miscelms) 903{ 904 arena_chunk_map_misc_t *miscelm; 905 size_t nstashed = 0; 906 907 /* Add at least npurge pages to purge_list. */ 908 for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL; 909 miscelm = ql_first(&arena->runs_dirty)) { 910 arena_chunk_t *chunk = 911 (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 912 size_t pageind = arena_miscelm_to_pageind(miscelm); 913 size_t run_size = arena_mapbits_unallocated_size_get(chunk, 914 pageind); 915 size_t npages = run_size >> LG_PAGE; 916 arena_run_t *run = &miscelm->run; 917 918 assert(pageind + npages <= chunk_npages); 919 assert(arena_mapbits_dirty_get(chunk, pageind) == 920 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 921 922 /* 923 * If purging the spare chunk's run, make it available prior to 924 * allocation. 925 */ 926 if (chunk == arena->spare) 927 arena_chunk_alloc(arena); 928 929 /* Temporarily allocate the free dirty run. */ 930 arena_run_split_large(arena, run, run_size, false); 931 /* Append to purge_list for later processing. */ 932 ql_elm_new(miscelm, dr_link); 933 ql_tail_insert(miscelms, miscelm, dr_link); 934 935 nstashed += npages; 936 937 if (!all && nstashed >= npurge) 938 break; 939 } 940 941 return (nstashed); 942} 943 944static size_t 945arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms) 946{ 947 size_t npurged, nmadvise; 948 arena_chunk_map_misc_t *miscelm; 949 950 if (config_stats) 951 nmadvise = 0; 952 npurged = 0; 953 954 malloc_mutex_unlock(&arena->lock); 955 956 ql_foreach(miscelm, miscelms, dr_link) { 957 arena_chunk_t *chunk; 958 size_t pageind, run_size, npages, flag_unzeroed, i; 959 bool unzeroed; 960 961 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 962 pageind = arena_miscelm_to_pageind(miscelm); 963 run_size = arena_mapbits_large_size_get(chunk, pageind); 964 npages = run_size >> LG_PAGE; 965 966 assert(pageind + npages <= chunk_npages); 967 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << 968 LG_PAGE)), run_size); 969 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; 970 971 /* 972 * Set the unzeroed flag for all pages, now that pages_purge() 973 * has returned whether the pages were zeroed as a side effect 974 * of purging. This chunk map modification is safe even though 975 * the arena mutex isn't currently owned by this thread, 976 * because the run is marked as allocated, thus protecting it 977 * from being modified by any other thread. As long as these 978 * writes don't perturb the first and last elements' 979 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 980 */ 981 for (i = 0; i < npages; i++) { 982 arena_mapbits_unzeroed_set(chunk, pageind+i, 983 flag_unzeroed); 984 } 985 986 npurged += npages; 987 if (config_stats) 988 nmadvise++; 989 } 990 991 malloc_mutex_lock(&arena->lock); 992 993 if (config_stats) { 994 arena->stats.nmadvise += nmadvise; 995 arena->stats.purged += npurged; 996 } 997 998 return (npurged); 999} 1000 1001static void 1002arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms) 1003{ 1004 arena_chunk_map_misc_t *miscelm; 1005 1006 /* Deallocate runs. */ 1007 for (miscelm = ql_first(miscelms); miscelm != NULL; 1008 miscelm = ql_first(miscelms)) { 1009 arena_run_t *run = &miscelm->run; 1010 ql_remove(miscelms, miscelm, dr_link); 1011 arena_run_dalloc(arena, run, false, true); 1012 } 1013} 1014 1015void 1016arena_purge(arena_t *arena, bool all) 1017{ 1018 size_t npurge, npurgeable, npurged; 1019 arena_chunk_miscelms_t purge_list; 1020 1021 /* 1022 * Calls to arena_dirty_count() are disabled even for debug builds 1023 * because overhead grows nonlinearly as memory usage increases. 1024 */ 1025 if (false && config_debug) { 1026 size_t ndirty = arena_dirty_count(arena); 1027 assert(ndirty == arena->ndirty); 1028 } 1029 assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all); 1030 1031 if (config_stats) 1032 arena->stats.npurge++; 1033 1034 npurge = arena_compute_npurge(arena, all); 1035 ql_new(&purge_list); 1036 npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list); 1037 assert(npurgeable >= npurge); 1038 npurged = arena_purge_stashed(arena, &purge_list); 1039 assert(npurged == npurgeable); 1040 arena_unstash_purged(arena, &purge_list); 1041} 1042 1043void 1044arena_purge_all(arena_t *arena) 1045{ 1046 1047 malloc_mutex_lock(&arena->lock); 1048 arena_purge(arena, true); 1049 malloc_mutex_unlock(&arena->lock); 1050} 1051 1052static void 1053arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1054 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) 1055{ 1056 size_t size = *p_size; 1057 size_t run_ind = *p_run_ind; 1058 size_t run_pages = *p_run_pages; 1059 1060 /* Try to coalesce forward. */ 1061 if (run_ind + run_pages < chunk_npages && 1062 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1063 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { 1064 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1065 run_ind+run_pages); 1066 size_t nrun_pages = nrun_size >> LG_PAGE; 1067 1068 /* 1069 * Remove successor from runs_avail; the coalesced run is 1070 * inserted later. 1071 */ 1072 assert(arena_mapbits_unallocated_size_get(chunk, 1073 run_ind+run_pages+nrun_pages-1) == nrun_size); 1074 assert(arena_mapbits_dirty_get(chunk, 1075 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1076 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1077 1078 /* If the successor is dirty, remove it from runs_dirty. */ 1079 if (flag_dirty != 0) { 1080 arena_dirty_remove(arena, chunk, run_ind+run_pages, 1081 nrun_pages); 1082 } 1083 1084 size += nrun_size; 1085 run_pages += nrun_pages; 1086 1087 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1088 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1089 size); 1090 } 1091 1092 /* Try to coalesce backward. */ 1093 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1094 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1095 flag_dirty) { 1096 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1097 run_ind-1); 1098 size_t prun_pages = prun_size >> LG_PAGE; 1099 1100 run_ind -= prun_pages; 1101 1102 /* 1103 * Remove predecessor from runs_avail; the coalesced run is 1104 * inserted later. 1105 */ 1106 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1107 prun_size); 1108 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1109 arena_avail_remove(arena, chunk, run_ind, prun_pages); 1110 1111 /* If the predecessor is dirty, remove it from runs_dirty. */ 1112 if (flag_dirty != 0) 1113 arena_dirty_remove(arena, chunk, run_ind, prun_pages); 1114 1115 size += prun_size; 1116 run_pages += prun_pages; 1117 1118 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1119 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1120 size); 1121 } 1122 1123 *p_size = size; 1124 *p_run_ind = run_ind; 1125 *p_run_pages = run_pages; 1126} 1127 1128static void 1129arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) 1130{ 1131 arena_chunk_t *chunk; 1132 arena_chunk_map_misc_t *miscelm; 1133 size_t size, run_ind, run_pages, flag_dirty; 1134 1135 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1136 miscelm = arena_run_to_miscelm(run); 1137 run_ind = arena_miscelm_to_pageind(miscelm); 1138 assert(run_ind >= map_bias); 1139 assert(run_ind < chunk_npages); 1140 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1141 size = arena_mapbits_large_size_get(chunk, run_ind); 1142 assert(size == PAGE || 1143 arena_mapbits_large_size_get(chunk, 1144 run_ind+(size>>LG_PAGE)-1) == 0); 1145 } else { 1146 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1147 size = bin_info->run_size; 1148 } 1149 run_pages = (size >> LG_PAGE); 1150 arena_cactive_update(arena, 0, run_pages); 1151 arena->nactive -= run_pages; 1152 1153 /* 1154 * The run is dirty if the caller claims to have dirtied it, as well as 1155 * if it was already dirty before being allocated and the caller 1156 * doesn't claim to have cleaned it. 1157 */ 1158 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1159 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1160 if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0) 1161 dirty = true; 1162 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1163 1164 /* Mark pages as unallocated in the chunk map. */ 1165 if (dirty) { 1166 arena_mapbits_unallocated_set(chunk, run_ind, size, 1167 CHUNK_MAP_DIRTY); 1168 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1169 CHUNK_MAP_DIRTY); 1170 } else { 1171 arena_mapbits_unallocated_set(chunk, run_ind, size, 1172 arena_mapbits_unzeroed_get(chunk, run_ind)); 1173 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1174 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1175 } 1176 1177 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, flag_dirty); 1178 1179 /* Insert into runs_avail, now that coalescing is complete. */ 1180 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1181 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1182 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1183 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1184 arena_avail_insert(arena, chunk, run_ind, run_pages); 1185 1186 if (dirty) 1187 arena_dirty_insert(arena, chunk, run_ind, run_pages); 1188 1189 /* Deallocate chunk if it is now completely unused. */ 1190 if (size == arena_maxrun) { 1191 assert(run_ind == map_bias); 1192 assert(run_pages == (arena_maxrun >> LG_PAGE)); 1193 arena_chunk_dalloc(arena, chunk); 1194 } 1195 1196 /* 1197 * It is okay to do dirty page processing here even if the chunk was 1198 * deallocated above, since in that case it is the spare. Waiting 1199 * until after possible chunk deallocation to do dirty processing 1200 * allows for an old spare to be fully deallocated, thus decreasing the 1201 * chances of spuriously crossing the dirty page purging threshold. 1202 */ 1203 if (dirty) 1204 arena_maybe_purge(arena); 1205} 1206 1207static void 1208arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1209 size_t oldsize, size_t newsize) 1210{ 1211 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1212 size_t pageind = arena_miscelm_to_pageind(miscelm); 1213 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1214 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1215 1216 assert(oldsize > newsize); 1217 1218 /* 1219 * Update the chunk map so that arena_run_dalloc() can treat the 1220 * leading run as separately allocated. Set the last element of each 1221 * run first, in case of single-page runs. 1222 */ 1223 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1224 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1225 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); 1226 1227 if (config_debug) { 1228 UNUSED size_t tail_npages = newsize >> LG_PAGE; 1229 assert(arena_mapbits_large_size_get(chunk, 1230 pageind+head_npages+tail_npages-1) == 0); 1231 assert(arena_mapbits_dirty_get(chunk, 1232 pageind+head_npages+tail_npages-1) == flag_dirty); 1233 } 1234 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 1235 flag_dirty); 1236 1237 arena_run_dalloc(arena, run, false, false); 1238} 1239 1240static void 1241arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1242 size_t oldsize, size_t newsize, bool dirty) 1243{ 1244 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1245 size_t pageind = arena_miscelm_to_pageind(miscelm); 1246 size_t head_npages = newsize >> LG_PAGE; 1247 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1248 arena_chunk_map_misc_t *tail_miscelm; 1249 arena_run_t *tail_run; 1250 1251 assert(oldsize > newsize); 1252 1253 /* 1254 * Update the chunk map so that arena_run_dalloc() can treat the 1255 * trailing run as separately allocated. Set the last element of each 1256 * run first, in case of single-page runs. 1257 */ 1258 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1259 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1260 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); 1261 1262 if (config_debug) { 1263 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 1264 assert(arena_mapbits_large_size_get(chunk, 1265 pageind+head_npages+tail_npages-1) == 0); 1266 assert(arena_mapbits_dirty_get(chunk, 1267 pageind+head_npages+tail_npages-1) == flag_dirty); 1268 } 1269 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 1270 flag_dirty); 1271 1272 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); 1273 tail_run = &tail_miscelm->run; 1274 arena_run_dalloc(arena, tail_run, dirty, false); 1275} 1276 1277static arena_run_t * 1278arena_bin_runs_first(arena_bin_t *bin) 1279{ 1280 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); 1281 if (miscelm != NULL) 1282 return (&miscelm->run); 1283 1284 return (NULL); 1285} 1286 1287static void 1288arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 1289{ 1290 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1291 1292 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); 1293 1294 arena_run_tree_insert(&bin->runs, miscelm); 1295} 1296 1297static void 1298arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) 1299{ 1300 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1301 1302 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); 1303 1304 arena_run_tree_remove(&bin->runs, miscelm); 1305} 1306 1307static arena_run_t * 1308arena_bin_nonfull_run_tryget(arena_bin_t *bin) 1309{ 1310 arena_run_t *run = arena_bin_runs_first(bin); 1311 if (run != NULL) { 1312 arena_bin_runs_remove(bin, run); 1313 if (config_stats) 1314 bin->stats.reruns++; 1315 } 1316 return (run); 1317} 1318 1319static arena_run_t * 1320arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 1321{ 1322 arena_run_t *run; 1323 index_t binind; 1324 arena_bin_info_t *bin_info; 1325 1326 /* Look for a usable run. */ 1327 run = arena_bin_nonfull_run_tryget(bin); 1328 if (run != NULL) 1329 return (run); 1330 /* No existing runs have any space available. */ 1331 1332 binind = arena_bin_index(arena, bin); 1333 bin_info = &arena_bin_info[binind]; 1334 1335 /* Allocate a new run. */ 1336 malloc_mutex_unlock(&bin->lock); 1337 /******************************/ 1338 malloc_mutex_lock(&arena->lock); 1339 run = arena_run_alloc_small(arena, bin_info->run_size, binind); 1340 if (run != NULL) { 1341 /* Initialize run internals. */ 1342 run->binind = binind; 1343 run->nfree = bin_info->nregs; 1344 bitmap_init(run->bitmap, &bin_info->bitmap_info); 1345 } 1346 malloc_mutex_unlock(&arena->lock); 1347 /********************************/ 1348 malloc_mutex_lock(&bin->lock); 1349 if (run != NULL) { 1350 if (config_stats) { 1351 bin->stats.nruns++; 1352 bin->stats.curruns++; 1353 } 1354 return (run); 1355 } 1356 1357 /* 1358 * arena_run_alloc_small() failed, but another thread may have made 1359 * sufficient memory available while this one dropped bin->lock above, 1360 * so search one more time. 1361 */ 1362 run = arena_bin_nonfull_run_tryget(bin); 1363 if (run != NULL) 1364 return (run); 1365 1366 return (NULL); 1367} 1368 1369/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 1370static void * 1371arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 1372{ 1373 void *ret; 1374 index_t binind; 1375 arena_bin_info_t *bin_info; 1376 arena_run_t *run; 1377 1378 binind = arena_bin_index(arena, bin); 1379 bin_info = &arena_bin_info[binind]; 1380 bin->runcur = NULL; 1381 run = arena_bin_nonfull_run_get(arena, bin); 1382 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 1383 /* 1384 * Another thread updated runcur while this one ran without the 1385 * bin lock in arena_bin_nonfull_run_get(). 1386 */ 1387 assert(bin->runcur->nfree > 0); 1388 ret = arena_run_reg_alloc(bin->runcur, bin_info); 1389 if (run != NULL) { 1390 arena_chunk_t *chunk; 1391 1392 /* 1393 * arena_run_alloc_small() may have allocated run, or 1394 * it may have pulled run from the bin's run tree. 1395 * Therefore it is unsafe to make any assumptions about 1396 * how run has previously been used, and 1397 * arena_bin_lower_run() must be called, as if a region 1398 * were just deallocated from the run. 1399 */ 1400 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1401 if (run->nfree == bin_info->nregs) 1402 arena_dalloc_bin_run(arena, chunk, run, bin); 1403 else 1404 arena_bin_lower_run(arena, chunk, run, bin); 1405 } 1406 return (ret); 1407 } 1408 1409 if (run == NULL) 1410 return (NULL); 1411 1412 bin->runcur = run; 1413 1414 assert(bin->runcur->nfree > 0); 1415 1416 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1417} 1418 1419void 1420arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind, 1421 uint64_t prof_accumbytes) 1422{ 1423 unsigned i, nfill; 1424 arena_bin_t *bin; 1425 arena_run_t *run; 1426 void *ptr; 1427 1428 assert(tbin->ncached == 0); 1429 1430 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1431 prof_idump(); 1432 bin = &arena->bins[binind]; 1433 malloc_mutex_lock(&bin->lock); 1434 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1435 tbin->lg_fill_div); i < nfill; i++) { 1436 if ((run = bin->runcur) != NULL && run->nfree > 0) 1437 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1438 else 1439 ptr = arena_bin_malloc_hard(arena, bin); 1440 if (ptr == NULL) { 1441 /* 1442 * OOM. tbin->avail isn't yet filled down to its first 1443 * element, so the successful allocations (if any) must 1444 * be moved to the base of tbin->avail before bailing 1445 * out. 1446 */ 1447 if (i > 0) { 1448 memmove(tbin->avail, &tbin->avail[nfill - i], 1449 i * sizeof(void *)); 1450 } 1451 break; 1452 } 1453 if (config_fill && unlikely(opt_junk_alloc)) { 1454 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1455 true); 1456 } 1457 /* Insert such that low regions get used first. */ 1458 tbin->avail[nfill - 1 - i] = ptr; 1459 } 1460 if (config_stats) { 1461 bin->stats.nmalloc += i; 1462 bin->stats.nrequests += tbin->tstats.nrequests; 1463 bin->stats.curregs += i; 1464 bin->stats.nfills++; 1465 tbin->tstats.nrequests = 0; 1466 } 1467 malloc_mutex_unlock(&bin->lock); 1468 tbin->ncached = i; 1469} 1470 1471void 1472arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 1473{ 1474 1475 if (zero) { 1476 size_t redzone_size = bin_info->redzone_size; 1477 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, 1478 redzone_size); 1479 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, 1480 redzone_size); 1481 } else { 1482 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, 1483 bin_info->reg_interval); 1484 } 1485} 1486 1487#ifdef JEMALLOC_JET 1488#undef arena_redzone_corruption 1489#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) 1490#endif 1491static void 1492arena_redzone_corruption(void *ptr, size_t usize, bool after, 1493 size_t offset, uint8_t byte) 1494{ 1495 1496 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 1497 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 1498 after ? "after" : "before", ptr, usize, byte); 1499} 1500#ifdef JEMALLOC_JET 1501#undef arena_redzone_corruption 1502#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 1503arena_redzone_corruption_t *arena_redzone_corruption = 1504 JEMALLOC_N(arena_redzone_corruption_impl); 1505#endif 1506 1507static void 1508arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 1509{ 1510 size_t size = bin_info->reg_size; 1511 size_t redzone_size = bin_info->redzone_size; 1512 size_t i; 1513 bool error = false; 1514 1515 if (opt_junk_alloc) { 1516 for (i = 1; i <= redzone_size; i++) { 1517 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 1518 if (*byte != 0xa5) { 1519 error = true; 1520 arena_redzone_corruption(ptr, size, false, i, *byte); 1521 if (reset) 1522 *byte = 0xa5; 1523 } 1524 } 1525 for (i = 0; i < redzone_size; i++) { 1526 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 1527 if (*byte != 0xa5) { 1528 error = true; 1529 arena_redzone_corruption(ptr, size, true, i, *byte); 1530 if (reset) 1531 *byte = 0xa5; 1532 } 1533 } 1534 } 1535 1536 if (opt_abort && error) 1537 abort(); 1538} 1539 1540#ifdef JEMALLOC_JET 1541#undef arena_dalloc_junk_small 1542#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) 1543#endif 1544void 1545arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 1546{ 1547 size_t redzone_size = bin_info->redzone_size; 1548 1549 arena_redzones_validate(ptr, bin_info, false); 1550 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, 1551 bin_info->reg_interval); 1552} 1553#ifdef JEMALLOC_JET 1554#undef arena_dalloc_junk_small 1555#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 1556arena_dalloc_junk_small_t *arena_dalloc_junk_small = 1557 JEMALLOC_N(arena_dalloc_junk_small_impl); 1558#endif 1559 1560void 1561arena_quarantine_junk_small(void *ptr, size_t usize) 1562{ 1563 index_t binind; 1564 arena_bin_info_t *bin_info; 1565 cassert(config_fill); 1566 assert(opt_junk_free); 1567 assert(opt_quarantine); 1568 assert(usize <= SMALL_MAXCLASS); 1569 1570 binind = size2index(usize); 1571 bin_info = &arena_bin_info[binind]; 1572 arena_redzones_validate(ptr, bin_info, true); 1573} 1574 1575void * 1576arena_malloc_small(arena_t *arena, size_t size, bool zero) 1577{ 1578 void *ret; 1579 arena_bin_t *bin; 1580 arena_run_t *run; 1581 index_t binind; 1582 1583 binind = size2index(size); 1584 assert(binind < NBINS); 1585 bin = &arena->bins[binind]; 1586 size = index2size(binind); 1587 1588 malloc_mutex_lock(&bin->lock); 1589 if ((run = bin->runcur) != NULL && run->nfree > 0) 1590 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1591 else 1592 ret = arena_bin_malloc_hard(arena, bin); 1593 1594 if (ret == NULL) { 1595 malloc_mutex_unlock(&bin->lock); 1596 return (NULL); 1597 } 1598 1599 if (config_stats) { 1600 bin->stats.nmalloc++; 1601 bin->stats.nrequests++; 1602 bin->stats.curregs++; 1603 } 1604 malloc_mutex_unlock(&bin->lock); 1605 if (config_prof && !isthreaded && arena_prof_accum(arena, size)) 1606 prof_idump(); 1607 1608 if (!zero) { 1609 if (config_fill) { 1610 if (unlikely(opt_junk_alloc)) { 1611 arena_alloc_junk_small(ret, 1612 &arena_bin_info[binind], false); 1613 } else if (unlikely(opt_zero)) 1614 memset(ret, 0, size); 1615 } 1616 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1617 } else { 1618 if (config_fill && unlikely(opt_junk_alloc)) { 1619 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1620 true); 1621 } 1622 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1623 memset(ret, 0, size); 1624 } 1625 1626 return (ret); 1627} 1628 1629void * 1630arena_malloc_large(arena_t *arena, size_t size, bool zero) 1631{ 1632 void *ret; 1633 size_t usize; 1634 arena_run_t *run; 1635 arena_chunk_map_misc_t *miscelm; 1636 UNUSED bool idump; 1637 1638 /* Large allocation. */ 1639 usize = s2u(size); 1640 malloc_mutex_lock(&arena->lock); 1641 run = arena_run_alloc_large(arena, usize, zero); 1642 if (run == NULL) { 1643 malloc_mutex_unlock(&arena->lock); 1644 return (NULL); 1645 } 1646 miscelm = arena_run_to_miscelm(run); 1647 ret = arena_miscelm_to_rpages(miscelm); 1648 if (config_stats) { 1649 index_t index = size2index(usize) - NBINS; 1650 1651 arena->stats.nmalloc_large++; 1652 arena->stats.nrequests_large++; 1653 arena->stats.allocated_large += usize; 1654 arena->stats.lstats[index].nmalloc++; 1655 arena->stats.lstats[index].nrequests++; 1656 arena->stats.lstats[index].curruns++; 1657 } 1658 if (config_prof) 1659 idump = arena_prof_accum_locked(arena, usize); 1660 malloc_mutex_unlock(&arena->lock); 1661 if (config_prof && idump) 1662 prof_idump(); 1663 1664 if (!zero) { 1665 if (config_fill) { 1666 if (unlikely(opt_junk_alloc)) 1667 memset(ret, 0xa5, usize); 1668 else if (unlikely(opt_zero)) 1669 memset(ret, 0, usize); 1670 } 1671 } 1672 1673 return (ret); 1674} 1675 1676/* Only handles large allocations that require more than page alignment. */ 1677void * 1678arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) 1679{ 1680 void *ret; 1681 size_t alloc_size, leadsize, trailsize; 1682 arena_run_t *run; 1683 arena_chunk_t *chunk; 1684 arena_chunk_map_misc_t *miscelm; 1685 void *rpages; 1686 1687 assert((size & PAGE_MASK) == 0); 1688 1689 alignment = PAGE_CEILING(alignment); 1690 alloc_size = size + alignment - PAGE; 1691 1692 malloc_mutex_lock(&arena->lock); 1693 run = arena_run_alloc_large(arena, alloc_size, false); 1694 if (run == NULL) { 1695 malloc_mutex_unlock(&arena->lock); 1696 return (NULL); 1697 } 1698 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1699 miscelm = arena_run_to_miscelm(run); 1700 rpages = arena_miscelm_to_rpages(miscelm); 1701 1702 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 1703 (uintptr_t)rpages; 1704 assert(alloc_size >= leadsize + size); 1705 trailsize = alloc_size - leadsize - size; 1706 if (leadsize != 0) { 1707 arena_chunk_map_misc_t *head_miscelm = miscelm; 1708 arena_run_t *head_run = run; 1709 1710 miscelm = arena_miscelm_get(chunk, 1711 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 1712 LG_PAGE)); 1713 run = &miscelm->run; 1714 1715 arena_run_trim_head(arena, chunk, head_run, alloc_size, 1716 alloc_size - leadsize); 1717 } 1718 if (trailsize != 0) { 1719 arena_run_trim_tail(arena, chunk, run, size + trailsize, size, 1720 false); 1721 } 1722 arena_run_init_large(arena, run, size, zero); 1723 ret = arena_miscelm_to_rpages(miscelm); 1724 1725 if (config_stats) { 1726 index_t index = size2index(size) - NBINS; 1727 1728 arena->stats.nmalloc_large++; 1729 arena->stats.nrequests_large++; 1730 arena->stats.allocated_large += size; 1731 arena->stats.lstats[index].nmalloc++; 1732 arena->stats.lstats[index].nrequests++; 1733 arena->stats.lstats[index].curruns++; 1734 } 1735 malloc_mutex_unlock(&arena->lock); 1736 1737 if (config_fill && !zero) { 1738 if (unlikely(opt_junk_alloc)) 1739 memset(ret, 0xa5, size); 1740 else if (unlikely(opt_zero)) 1741 memset(ret, 0, size); 1742 } 1743 return (ret); 1744} 1745 1746void 1747arena_prof_promoted(const void *ptr, size_t size) 1748{ 1749 arena_chunk_t *chunk; 1750 size_t pageind; 1751 index_t binind; 1752 1753 cassert(config_prof); 1754 assert(ptr != NULL); 1755 assert(CHUNK_ADDR2BASE(ptr) != ptr); 1756 assert(isalloc(ptr, false) == LARGE_MINCLASS); 1757 assert(isalloc(ptr, true) == LARGE_MINCLASS); 1758 assert(size <= SMALL_MAXCLASS); 1759 1760 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1761 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1762 binind = size2index(size); 1763 assert(binind < NBINS); 1764 arena_mapbits_large_binind_set(chunk, pageind, binind); 1765 1766 assert(isalloc(ptr, false) == LARGE_MINCLASS); 1767 assert(isalloc(ptr, true) == size); 1768} 1769 1770static void 1771arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 1772 arena_bin_t *bin) 1773{ 1774 1775 /* Dissociate run from bin. */ 1776 if (run == bin->runcur) 1777 bin->runcur = NULL; 1778 else { 1779 index_t binind = arena_bin_index(chunk->arena, bin); 1780 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1781 1782 if (bin_info->nregs != 1) { 1783 /* 1784 * This block's conditional is necessary because if the 1785 * run only contains one region, then it never gets 1786 * inserted into the non-full runs tree. 1787 */ 1788 arena_bin_runs_remove(bin, run); 1789 } 1790 } 1791} 1792 1793static void 1794arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1795 arena_bin_t *bin) 1796{ 1797 1798 assert(run != bin->runcur); 1799 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == 1800 NULL); 1801 1802 malloc_mutex_unlock(&bin->lock); 1803 /******************************/ 1804 malloc_mutex_lock(&arena->lock); 1805 arena_run_dalloc(arena, run, true, false); 1806 malloc_mutex_unlock(&arena->lock); 1807 /****************************/ 1808 malloc_mutex_lock(&bin->lock); 1809 if (config_stats) 1810 bin->stats.curruns--; 1811} 1812 1813static void 1814arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1815 arena_bin_t *bin) 1816{ 1817 1818 /* 1819 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 1820 * non-full run. It is okay to NULL runcur out rather than proactively 1821 * keeping it pointing at the lowest non-full run. 1822 */ 1823 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 1824 /* Switch runcur. */ 1825 if (bin->runcur->nfree > 0) 1826 arena_bin_runs_insert(bin, bin->runcur); 1827 bin->runcur = run; 1828 if (config_stats) 1829 bin->stats.reruns++; 1830 } else 1831 arena_bin_runs_insert(bin, run); 1832} 1833 1834static void 1835arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1836 arena_chunk_map_bits_t *bitselm, bool junked) 1837{ 1838 size_t pageind, rpages_ind; 1839 arena_run_t *run; 1840 arena_bin_t *bin; 1841 arena_bin_info_t *bin_info; 1842 index_t binind; 1843 1844 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1845 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 1846 run = &arena_miscelm_get(chunk, rpages_ind)->run; 1847 binind = run->binind; 1848 bin = &arena->bins[binind]; 1849 bin_info = &arena_bin_info[binind]; 1850 1851 if (!junked && config_fill && unlikely(opt_junk_free)) 1852 arena_dalloc_junk_small(ptr, bin_info); 1853 1854 arena_run_reg_dalloc(run, ptr); 1855 if (run->nfree == bin_info->nregs) { 1856 arena_dissociate_bin_run(chunk, run, bin); 1857 arena_dalloc_bin_run(arena, chunk, run, bin); 1858 } else if (run->nfree == 1 && run != bin->runcur) 1859 arena_bin_lower_run(arena, chunk, run, bin); 1860 1861 if (config_stats) { 1862 bin->stats.ndalloc++; 1863 bin->stats.curregs--; 1864 } 1865} 1866 1867void 1868arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1869 arena_chunk_map_bits_t *bitselm) 1870{ 1871 1872 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); 1873} 1874 1875void 1876arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1877 size_t pageind, arena_chunk_map_bits_t *bitselm) 1878{ 1879 arena_run_t *run; 1880 arena_bin_t *bin; 1881 size_t rpages_ind; 1882 1883 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 1884 run = &arena_miscelm_get(chunk, rpages_ind)->run; 1885 bin = &arena->bins[run->binind]; 1886 malloc_mutex_lock(&bin->lock); 1887 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); 1888 malloc_mutex_unlock(&bin->lock); 1889} 1890 1891void 1892arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1893 size_t pageind) 1894{ 1895 arena_chunk_map_bits_t *bitselm; 1896 1897 if (config_debug) { 1898 /* arena_ptr_small_binind_get() does extra sanity checking. */ 1899 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 1900 pageind)) != BININD_INVALID); 1901 } 1902 bitselm = arena_bitselm_get(chunk, pageind); 1903 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); 1904} 1905 1906#ifdef JEMALLOC_JET 1907#undef arena_dalloc_junk_large 1908#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 1909#endif 1910void 1911arena_dalloc_junk_large(void *ptr, size_t usize) 1912{ 1913 1914 if (config_fill && unlikely(opt_junk_free)) 1915 memset(ptr, 0x5a, usize); 1916} 1917#ifdef JEMALLOC_JET 1918#undef arena_dalloc_junk_large 1919#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 1920arena_dalloc_junk_large_t *arena_dalloc_junk_large = 1921 JEMALLOC_N(arena_dalloc_junk_large_impl); 1922#endif 1923 1924void 1925arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, 1926 void *ptr, bool junked) 1927{ 1928 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1929 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 1930 arena_run_t *run = &miscelm->run; 1931 1932 if (config_fill || config_stats) { 1933 size_t usize = arena_mapbits_large_size_get(chunk, pageind); 1934 1935 if (!junked) 1936 arena_dalloc_junk_large(ptr, usize); 1937 if (config_stats) { 1938 index_t index = size2index(usize) - NBINS; 1939 1940 arena->stats.ndalloc_large++; 1941 arena->stats.allocated_large -= usize; 1942 arena->stats.lstats[index].ndalloc++; 1943 arena->stats.lstats[index].curruns--; 1944 } 1945 } 1946 1947 arena_run_dalloc(arena, run, true, false); 1948} 1949 1950void 1951arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, 1952 void *ptr) 1953{ 1954 1955 arena_dalloc_large_locked_impl(arena, chunk, ptr, true); 1956} 1957 1958void 1959arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) 1960{ 1961 1962 malloc_mutex_lock(&arena->lock); 1963 arena_dalloc_large_locked_impl(arena, chunk, ptr, false); 1964 malloc_mutex_unlock(&arena->lock); 1965} 1966 1967static void 1968arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1969 size_t oldsize, size_t size) 1970{ 1971 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1972 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 1973 arena_run_t *run = &miscelm->run; 1974 1975 assert(size < oldsize); 1976 1977 /* 1978 * Shrink the run, and make trailing pages available for other 1979 * allocations. 1980 */ 1981 malloc_mutex_lock(&arena->lock); 1982 arena_run_trim_tail(arena, chunk, run, oldsize, size, true); 1983 if (config_stats) { 1984 index_t oldindex = size2index(oldsize) - NBINS; 1985 index_t index = size2index(size) - NBINS; 1986 1987 arena->stats.ndalloc_large++; 1988 arena->stats.allocated_large -= oldsize; 1989 arena->stats.lstats[oldindex].ndalloc++; 1990 arena->stats.lstats[oldindex].curruns--; 1991 1992 arena->stats.nmalloc_large++; 1993 arena->stats.nrequests_large++; 1994 arena->stats.allocated_large += size; 1995 arena->stats.lstats[index].nmalloc++; 1996 arena->stats.lstats[index].nrequests++; 1997 arena->stats.lstats[index].curruns++; 1998 } 1999 malloc_mutex_unlock(&arena->lock); 2000} 2001 2002static bool 2003arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2004 size_t oldsize, size_t size, size_t extra, bool zero) 2005{ 2006 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2007 size_t npages = oldsize >> LG_PAGE; 2008 size_t followsize; 2009 size_t usize_min = s2u(size); 2010 2011 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); 2012 2013 /* Try to extend the run. */ 2014 assert(usize_min > oldsize); 2015 malloc_mutex_lock(&arena->lock); 2016 if (pageind + npages < chunk_npages && 2017 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && 2018 (followsize = arena_mapbits_unallocated_size_get(chunk, 2019 pageind+npages)) >= usize_min - oldsize) { 2020 /* 2021 * The next run is available and sufficiently large. Split the 2022 * following run, then merge the first part with the existing 2023 * allocation. 2024 */ 2025 arena_run_t *run; 2026 size_t flag_dirty, splitsize, usize; 2027 2028 usize = s2u(size + extra); 2029 while (oldsize + followsize < usize) 2030 usize = index2size(size2index(usize)-1); 2031 assert(usize >= usize_min); 2032 splitsize = usize - oldsize; 2033 2034 run = &arena_miscelm_get(chunk, pageind+npages)->run; 2035 arena_run_split_large(arena, run, splitsize, zero); 2036 2037 size = oldsize + splitsize; 2038 npages = size >> LG_PAGE; 2039 2040 /* 2041 * Mark the extended run as dirty if either portion of the run 2042 * was dirty before allocation. This is rather pedantic, 2043 * because there's not actually any sequence of events that 2044 * could cause the resulting run to be passed to 2045 * arena_run_dalloc() with the dirty argument set to false 2046 * (which is when dirty flag consistency would really matter). 2047 */ 2048 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2049 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2050 arena_mapbits_large_set(chunk, pageind, size, flag_dirty); 2051 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); 2052 2053 if (config_stats) { 2054 index_t oldindex = size2index(oldsize) - NBINS; 2055 index_t index = size2index(size) - NBINS; 2056 2057 arena->stats.ndalloc_large++; 2058 arena->stats.allocated_large -= oldsize; 2059 arena->stats.lstats[oldindex].ndalloc++; 2060 arena->stats.lstats[oldindex].curruns--; 2061 2062 arena->stats.nmalloc_large++; 2063 arena->stats.nrequests_large++; 2064 arena->stats.allocated_large += size; 2065 arena->stats.lstats[index].nmalloc++; 2066 arena->stats.lstats[index].nrequests++; 2067 arena->stats.lstats[index].curruns++; 2068 } 2069 malloc_mutex_unlock(&arena->lock); 2070 return (false); 2071 } 2072 malloc_mutex_unlock(&arena->lock); 2073 2074 return (true); 2075} 2076 2077#ifdef JEMALLOC_JET 2078#undef arena_ralloc_junk_large 2079#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2080#endif 2081static void 2082arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2083{ 2084 2085 if (config_fill && unlikely(opt_junk_free)) { 2086 memset((void *)((uintptr_t)ptr + usize), 0x5a, 2087 old_usize - usize); 2088 } 2089} 2090#ifdef JEMALLOC_JET 2091#undef arena_ralloc_junk_large 2092#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 2093arena_ralloc_junk_large_t *arena_ralloc_junk_large = 2094 JEMALLOC_N(arena_ralloc_junk_large_impl); 2095#endif 2096 2097/* 2098 * Try to resize a large allocation, in order to avoid copying. This will 2099 * always fail if growing an object, and the following run is already in use. 2100 */ 2101static bool 2102arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, 2103 bool zero) 2104{ 2105 size_t usize; 2106 2107 /* Make sure extra can't cause size_t overflow. */ 2108 if (unlikely(extra >= arena_maxclass)) 2109 return (true); 2110 2111 usize = s2u(size + extra); 2112 if (usize == oldsize) { 2113 /* Same size class. */ 2114 return (false); 2115 } else { 2116 arena_chunk_t *chunk; 2117 arena_t *arena; 2118 2119 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2120 arena = chunk->arena; 2121 2122 if (usize < oldsize) { 2123 /* Fill before shrinking in order avoid a race. */ 2124 arena_ralloc_junk_large(ptr, oldsize, usize); 2125 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, 2126 usize); 2127 return (false); 2128 } else { 2129 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, 2130 oldsize, size, extra, zero); 2131 if (config_fill && !ret && !zero) { 2132 if (unlikely(opt_junk_alloc)) { 2133 memset((void *)((uintptr_t)ptr + 2134 oldsize), 0xa5, isalloc(ptr, 2135 config_prof) - oldsize); 2136 } else if (unlikely(opt_zero)) { 2137 memset((void *)((uintptr_t)ptr + 2138 oldsize), 0, isalloc(ptr, 2139 config_prof) - oldsize); 2140 } 2141 } 2142 return (ret); 2143 } 2144 } 2145} 2146 2147bool 2148arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2149 bool zero) 2150{ 2151 2152 /* 2153 * Avoid moving the allocation if the size class can be left the same. 2154 */ 2155 if (likely(oldsize <= arena_maxclass)) { 2156 if (oldsize <= SMALL_MAXCLASS) { 2157 assert(arena_bin_info[size2index(oldsize)].reg_size 2158 == oldsize); 2159 if ((size + extra <= SMALL_MAXCLASS && size2index(size + 2160 extra) == size2index(oldsize)) || (size <= oldsize 2161 && size + extra >= oldsize)) 2162 return (false); 2163 } else { 2164 assert(size <= arena_maxclass); 2165 if (size + extra > SMALL_MAXCLASS) { 2166 if (!arena_ralloc_large(ptr, oldsize, size, 2167 extra, zero)) 2168 return (false); 2169 } 2170 } 2171 } 2172 2173 /* Reallocation would require a move. */ 2174 return (true); 2175} 2176 2177void * 2178arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 2179 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, 2180 bool try_tcache_dalloc) 2181{ 2182 void *ret; 2183 size_t copysize; 2184 2185 /* Try to avoid moving the allocation. */ 2186 if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) 2187 return (ptr); 2188 2189 /* 2190 * size and oldsize are different enough that we need to move the 2191 * object. In that case, fall back to allocating new space and 2192 * copying. 2193 */ 2194 if (alignment != 0) { 2195 size_t usize = sa2u(size + extra, alignment); 2196 if (usize == 0) 2197 return (NULL); 2198 ret = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc, 2199 arena); 2200 } else { 2201 ret = arena_malloc(tsd, arena, size + extra, zero, 2202 try_tcache_alloc); 2203 } 2204 2205 if (ret == NULL) { 2206 if (extra == 0) 2207 return (NULL); 2208 /* Try again, this time without extra. */ 2209 if (alignment != 0) { 2210 size_t usize = sa2u(size, alignment); 2211 if (usize == 0) 2212 return (NULL); 2213 ret = ipalloct(tsd, usize, alignment, zero, 2214 try_tcache_alloc, arena); 2215 } else { 2216 ret = arena_malloc(tsd, arena, size, zero, 2217 try_tcache_alloc); 2218 } 2219 2220 if (ret == NULL) 2221 return (NULL); 2222 } 2223 2224 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ 2225 2226 /* 2227 * Copy at most size bytes (not size+extra), since the caller has no 2228 * expectation that the extra bytes will be reliably preserved. 2229 */ 2230 copysize = (size < oldsize) ? size : oldsize; 2231 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 2232 memcpy(ret, ptr, copysize); 2233 isqalloc(tsd, ptr, oldsize, try_tcache_dalloc); 2234 return (ret); 2235} 2236 2237dss_prec_t 2238arena_dss_prec_get(arena_t *arena) 2239{ 2240 dss_prec_t ret; 2241 2242 malloc_mutex_lock(&arena->lock); 2243 ret = arena->dss_prec; 2244 malloc_mutex_unlock(&arena->lock); 2245 return (ret); 2246} 2247 2248bool 2249arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) 2250{ 2251 2252 if (!have_dss) 2253 return (dss_prec != dss_prec_disabled); 2254 malloc_mutex_lock(&arena->lock); 2255 arena->dss_prec = dss_prec; 2256 malloc_mutex_unlock(&arena->lock); 2257 return (false); 2258} 2259 2260void 2261arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 2262 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 2263 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) 2264{ 2265 unsigned i; 2266 2267 malloc_mutex_lock(&arena->lock); 2268 *dss = dss_prec_names[arena->dss_prec]; 2269 *nactive += arena->nactive; 2270 *ndirty += arena->ndirty; 2271 2272 astats->mapped += arena->stats.mapped; 2273 astats->npurge += arena->stats.npurge; 2274 astats->nmadvise += arena->stats.nmadvise; 2275 astats->purged += arena->stats.purged; 2276 astats->allocated_large += arena->stats.allocated_large; 2277 astats->nmalloc_large += arena->stats.nmalloc_large; 2278 astats->ndalloc_large += arena->stats.ndalloc_large; 2279 astats->nrequests_large += arena->stats.nrequests_large; 2280 astats->allocated_huge += arena->stats.allocated_huge; 2281 astats->nmalloc_huge += arena->stats.nmalloc_huge; 2282 astats->ndalloc_huge += arena->stats.ndalloc_huge; 2283 2284 for (i = 0; i < nlclasses; i++) { 2285 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 2286 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 2287 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 2288 lstats[i].curruns += arena->stats.lstats[i].curruns; 2289 } 2290 2291 for (i = 0; i < nhclasses; i++) { 2292 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 2293 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 2294 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 2295 } 2296 malloc_mutex_unlock(&arena->lock); 2297 2298 for (i = 0; i < NBINS; i++) { 2299 arena_bin_t *bin = &arena->bins[i]; 2300 2301 malloc_mutex_lock(&bin->lock); 2302 bstats[i].nmalloc += bin->stats.nmalloc; 2303 bstats[i].ndalloc += bin->stats.ndalloc; 2304 bstats[i].nrequests += bin->stats.nrequests; 2305 bstats[i].curregs += bin->stats.curregs; 2306 if (config_tcache) { 2307 bstats[i].nfills += bin->stats.nfills; 2308 bstats[i].nflushes += bin->stats.nflushes; 2309 } 2310 bstats[i].nruns += bin->stats.nruns; 2311 bstats[i].reruns += bin->stats.reruns; 2312 bstats[i].curruns += bin->stats.curruns; 2313 malloc_mutex_unlock(&bin->lock); 2314 } 2315} 2316 2317arena_t * 2318arena_new(unsigned ind) 2319{ 2320 arena_t *arena; 2321 unsigned i; 2322 arena_bin_t *bin; 2323 2324 /* 2325 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 2326 * because there is no way to clean up if base_alloc() OOMs. 2327 */ 2328 if (config_stats) { 2329 arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) 2330 + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + 2331 nhclasses) * sizeof(malloc_huge_stats_t)); 2332 } else 2333 arena = (arena_t *)base_alloc(sizeof(arena_t)); 2334 if (arena == NULL) 2335 return (NULL); 2336 2337 arena->ind = ind; 2338 arena->nthreads = 0; 2339 arena->chunk_alloc = chunk_alloc_default; 2340 arena->chunk_dalloc = chunk_dalloc_default; 2341 2342 if (malloc_mutex_init(&arena->lock)) 2343 return (NULL); 2344 2345 if (config_stats) { 2346 memset(&arena->stats, 0, sizeof(arena_stats_t)); 2347 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 2348 + CACHELINE_CEILING(sizeof(arena_t))); 2349 memset(arena->stats.lstats, 0, nlclasses * 2350 sizeof(malloc_large_stats_t)); 2351 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 2352 + CACHELINE_CEILING(sizeof(arena_t)) + 2353 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 2354 memset(arena->stats.hstats, 0, nhclasses * 2355 sizeof(malloc_huge_stats_t)); 2356 if (config_tcache) 2357 ql_new(&arena->tcache_ql); 2358 } 2359 2360 if (config_prof) 2361 arena->prof_accumbytes = 0; 2362 2363 arena->dss_prec = chunk_dss_prec_get(); 2364 2365 arena->spare = NULL; 2366 2367 arena->nactive = 0; 2368 arena->ndirty = 0; 2369 2370 arena_avail_tree_new(&arena->runs_avail); 2371 ql_new(&arena->runs_dirty); 2372 2373 /* Initialize bins. */ 2374 for (i = 0; i < NBINS; i++) { 2375 bin = &arena->bins[i]; 2376 if (malloc_mutex_init(&bin->lock)) 2377 return (NULL); 2378 bin->runcur = NULL; 2379 arena_run_tree_new(&bin->runs); 2380 if (config_stats) 2381 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2382 } 2383 2384 return (arena); 2385} 2386 2387/* 2388 * Calculate bin_info->run_size such that it meets the following constraints: 2389 * 2390 * *) bin_info->run_size <= arena_maxrun 2391 * *) bin_info->nregs <= RUN_MAXREGS 2392 * 2393 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 2394 * these settings are all interdependent. 2395 */ 2396static void 2397bin_info_run_size_calc(arena_bin_info_t *bin_info) 2398{ 2399 size_t pad_size; 2400 size_t try_run_size, perfect_run_size, actual_run_size; 2401 uint32_t try_nregs, perfect_nregs, actual_nregs; 2402 2403 /* 2404 * Determine redzone size based on minimum alignment and minimum 2405 * redzone size. Add padding to the end of the run if it is needed to 2406 * align the regions. The padding allows each redzone to be half the 2407 * minimum alignment; without the padding, each redzone would have to 2408 * be twice as large in order to maintain alignment. 2409 */ 2410 if (config_fill && unlikely(opt_redzone)) { 2411 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 2412 1); 2413 if (align_min <= REDZONE_MINSIZE) { 2414 bin_info->redzone_size = REDZONE_MINSIZE; 2415 pad_size = 0; 2416 } else { 2417 bin_info->redzone_size = align_min >> 1; 2418 pad_size = bin_info->redzone_size; 2419 } 2420 } else { 2421 bin_info->redzone_size = 0; 2422 pad_size = 0; 2423 } 2424 bin_info->reg_interval = bin_info->reg_size + 2425 (bin_info->redzone_size << 1); 2426 2427 /* 2428 * Compute run size under ideal conditions (no redzones, no limit on run 2429 * size). 2430 */ 2431 try_run_size = PAGE; 2432 try_nregs = try_run_size / bin_info->reg_size; 2433 do { 2434 perfect_run_size = try_run_size; 2435 perfect_nregs = try_nregs; 2436 2437 try_run_size += PAGE; 2438 try_nregs = try_run_size / bin_info->reg_size; 2439 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 2440 assert(perfect_nregs <= RUN_MAXREGS); 2441 2442 actual_run_size = perfect_run_size; 2443 actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; 2444 2445 /* 2446 * Redzones can require enough padding that not even a single region can 2447 * fit within the number of pages that would normally be dedicated to a 2448 * run for this size class. Increase the run size until at least one 2449 * region fits. 2450 */ 2451 while (actual_nregs == 0) { 2452 assert(config_fill && unlikely(opt_redzone)); 2453 2454 actual_run_size += PAGE; 2455 actual_nregs = (actual_run_size - pad_size) / 2456 bin_info->reg_interval; 2457 } 2458 2459 /* 2460 * Make sure that the run will fit within an arena chunk. 2461 */ 2462 while (actual_run_size > arena_maxrun) { 2463 actual_run_size -= PAGE; 2464 actual_nregs = (actual_run_size - pad_size) / 2465 bin_info->reg_interval; 2466 } 2467 assert(actual_nregs > 0); 2468 2469 /* Copy final settings. */ 2470 bin_info->run_size = actual_run_size; 2471 bin_info->nregs = actual_nregs; 2472 bin_info->reg0_offset = actual_run_size - (actual_nregs * 2473 bin_info->reg_interval) - pad_size + bin_info->redzone_size; 2474 2475 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 2476 * bin_info->reg_interval) + pad_size == bin_info->run_size); 2477} 2478 2479static void 2480bin_info_init(void) 2481{ 2482 arena_bin_info_t *bin_info; 2483 2484#define BIN_INFO_INIT_bin_yes(index, size) \ 2485 bin_info = &arena_bin_info[index]; \ 2486 bin_info->reg_size = size; \ 2487 bin_info_run_size_calc(bin_info); \ 2488 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 2489#define BIN_INFO_INIT_bin_no(index, size) 2490#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 2491 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 2492 SIZE_CLASSES 2493#undef BIN_INFO_INIT_bin_yes 2494#undef BIN_INFO_INIT_bin_no 2495#undef SC 2496} 2497 2498void 2499arena_boot(void) 2500{ 2501 size_t header_size; 2502 unsigned i; 2503 2504 /* 2505 * Compute the header size such that it is large enough to contain the 2506 * page map. The page map is biased to omit entries for the header 2507 * itself, so some iteration is necessary to compute the map bias. 2508 * 2509 * 1) Compute safe header_size and map_bias values that include enough 2510 * space for an unbiased page map. 2511 * 2) Refine map_bias based on (1) to omit the header pages in the page 2512 * map. The resulting map_bias may be one too small. 2513 * 3) Refine map_bias based on (2). The result will be >= the result 2514 * from (2), and will always be correct. 2515 */ 2516 map_bias = 0; 2517 for (i = 0; i < 3; i++) { 2518 header_size = offsetof(arena_chunk_t, map_bits) + 2519 ((sizeof(arena_chunk_map_bits_t) + 2520 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 2521 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 2522 } 2523 assert(map_bias > 0); 2524 2525 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 2526 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 2527 2528 arena_maxrun = chunksize - (map_bias << LG_PAGE); 2529 assert(arena_maxrun > 0); 2530 arena_maxclass = index2size(size2index(chunksize)-1); 2531 if (arena_maxclass > arena_maxrun) { 2532 /* 2533 * For small chunk sizes it's possible for there to be fewer 2534 * non-header pages available than are necessary to serve the 2535 * size classes just below chunksize. 2536 */ 2537 arena_maxclass = arena_maxrun; 2538 } 2539 assert(arena_maxclass > 0); 2540 nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS); 2541 nhclasses = NSIZES - nlclasses - NBINS; 2542 2543 bin_info_init(); 2544} 2545 2546void 2547arena_prefork(arena_t *arena) 2548{ 2549 unsigned i; 2550 2551 malloc_mutex_prefork(&arena->lock); 2552 for (i = 0; i < NBINS; i++) 2553 malloc_mutex_prefork(&arena->bins[i].lock); 2554} 2555 2556void 2557arena_postfork_parent(arena_t *arena) 2558{ 2559 unsigned i; 2560 2561 for (i = 0; i < NBINS; i++) 2562 malloc_mutex_postfork_parent(&arena->bins[i].lock); 2563 malloc_mutex_postfork_parent(&arena->lock); 2564} 2565 2566void 2567arena_postfork_child(arena_t *arena) 2568{ 2569 unsigned i; 2570 2571 for (i = 0; i < NBINS; i++) 2572 malloc_mutex_postfork_child(&arena->bins[i].lock); 2573 malloc_mutex_postfork_child(&arena->lock); 2574} 2575