jemalloc.c revision dd6ef0302f3980200ed602ec600e211f55e58694
1#define JEMALLOC_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7malloc_tsd_data(, arenas, arena_t *, NULL) 8malloc_tsd_data(, thread_allocated, thread_allocated_t, 9 THREAD_ALLOCATED_INITIALIZER) 10 11/* Runtime configuration options. */ 12const char *je_malloc_conf; 13bool opt_abort = 14#ifdef JEMALLOC_DEBUG 15 true 16#else 17 false 18#endif 19 ; 20bool opt_junk = 21#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 22 true 23#else 24 false 25#endif 26 ; 27size_t opt_quarantine = ZU(0); 28bool opt_redzone = false; 29bool opt_utrace = false; 30bool opt_valgrind = false; 31bool opt_xmalloc = false; 32bool opt_zero = false; 33size_t opt_narenas = 0; 34 35unsigned ncpus; 36 37malloc_mutex_t arenas_lock; 38arena_t **arenas; 39unsigned narenas_total; 40unsigned narenas_auto; 41 42/* Set to true once the allocator has been initialized. */ 43static bool malloc_initialized = false; 44 45#ifdef JEMALLOC_THREADED_INIT 46/* Used to let the initializing thread recursively allocate. */ 47# define NO_INITIALIZER ((unsigned long)0) 48# define INITIALIZER pthread_self() 49# define IS_INITIALIZER (malloc_initializer == pthread_self()) 50static pthread_t malloc_initializer = NO_INITIALIZER; 51#else 52# define NO_INITIALIZER false 53# define INITIALIZER true 54# define IS_INITIALIZER malloc_initializer 55static bool malloc_initializer = NO_INITIALIZER; 56#endif 57 58/* Used to avoid initialization races. */ 59#ifdef _WIN32 60static malloc_mutex_t init_lock; 61 62JEMALLOC_ATTR(constructor) 63static void WINAPI 64_init_init_lock(void) 65{ 66 67 malloc_mutex_init(&init_lock); 68} 69 70#ifdef _MSC_VER 71# pragma section(".CRT$XCU", read) 72JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 73static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 74#endif 75 76#else 77static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 78#endif 79 80typedef struct { 81 void *p; /* Input pointer (as in realloc(p, s)). */ 82 size_t s; /* Request size. */ 83 void *r; /* Result pointer. */ 84} malloc_utrace_t; 85 86#ifdef JEMALLOC_UTRACE 87# define UTRACE(a, b, c) do { \ 88 if (opt_utrace) { \ 89 int utrace_serrno = errno; \ 90 malloc_utrace_t ut; \ 91 ut.p = (a); \ 92 ut.s = (b); \ 93 ut.r = (c); \ 94 utrace(&ut, sizeof(ut)); \ 95 errno = utrace_serrno; \ 96 } \ 97} while (0) 98#else 99# define UTRACE(a, b, c) 100#endif 101 102/******************************************************************************/ 103/* Function prototypes for non-inline static functions. */ 104 105static void stats_print_atexit(void); 106static unsigned malloc_ncpus(void); 107static bool malloc_conf_next(char const **opts_p, char const **k_p, 108 size_t *klen_p, char const **v_p, size_t *vlen_p); 109static void malloc_conf_error(const char *msg, const char *k, size_t klen, 110 const char *v, size_t vlen); 111static void malloc_conf_init(void); 112static bool malloc_init_hard(void); 113static int imemalign(void **memptr, size_t alignment, size_t size, 114 size_t min_alignment); 115 116/******************************************************************************/ 117/* 118 * Begin miscellaneous support functions. 119 */ 120 121/* Create a new arena and insert it into the arenas array at index ind. */ 122arena_t * 123arenas_extend(unsigned ind) 124{ 125 arena_t *ret; 126 127 ret = (arena_t *)base_alloc(sizeof(arena_t)); 128 if (ret != NULL && arena_new(ret, ind) == false) { 129 arenas[ind] = ret; 130 return (ret); 131 } 132 /* Only reached if there is an OOM error. */ 133 134 /* 135 * OOM here is quite inconvenient to propagate, since dealing with it 136 * would require a check for failure in the fast path. Instead, punt 137 * by using arenas[0]. In practice, this is an extremely unlikely 138 * failure. 139 */ 140 malloc_write("<jemalloc>: Error initializing arena\n"); 141 if (opt_abort) 142 abort(); 143 144 return (arenas[0]); 145} 146 147/* Slow path, called only by choose_arena(). */ 148arena_t * 149choose_arena_hard(void) 150{ 151 arena_t *ret; 152 153 if (narenas_auto > 1) { 154 unsigned i, choose, first_null; 155 156 choose = 0; 157 first_null = narenas_auto; 158 malloc_mutex_lock(&arenas_lock); 159 assert(arenas[0] != NULL); 160 for (i = 1; i < narenas_auto; i++) { 161 if (arenas[i] != NULL) { 162 /* 163 * Choose the first arena that has the lowest 164 * number of threads assigned to it. 165 */ 166 if (arenas[i]->nthreads < 167 arenas[choose]->nthreads) 168 choose = i; 169 } else if (first_null == narenas_auto) { 170 /* 171 * Record the index of the first uninitialized 172 * arena, in case all extant arenas are in use. 173 * 174 * NB: It is possible for there to be 175 * discontinuities in terms of initialized 176 * versus uninitialized arenas, due to the 177 * "thread.arena" mallctl. 178 */ 179 first_null = i; 180 } 181 } 182 183 if (arenas[choose]->nthreads == 0 184 || first_null == narenas_auto) { 185 /* 186 * Use an unloaded arena, or the least loaded arena if 187 * all arenas are already initialized. 188 */ 189 ret = arenas[choose]; 190 } else { 191 /* Initialize a new arena. */ 192 ret = arenas_extend(first_null); 193 } 194 ret->nthreads++; 195 malloc_mutex_unlock(&arenas_lock); 196 } else { 197 ret = arenas[0]; 198 malloc_mutex_lock(&arenas_lock); 199 ret->nthreads++; 200 malloc_mutex_unlock(&arenas_lock); 201 } 202 203 arenas_tsd_set(&ret); 204 205 return (ret); 206} 207 208static void 209stats_print_atexit(void) 210{ 211 212 if (config_tcache && config_stats) { 213 unsigned narenas, i; 214 215 /* 216 * Merge stats from extant threads. This is racy, since 217 * individual threads do not lock when recording tcache stats 218 * events. As a consequence, the final stats may be slightly 219 * out of date by the time they are reported, if other threads 220 * continue to allocate. 221 */ 222 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 223 arena_t *arena = arenas[i]; 224 if (arena != NULL) { 225 tcache_t *tcache; 226 227 /* 228 * tcache_stats_merge() locks bins, so if any 229 * code is introduced that acquires both arena 230 * and bin locks in the opposite order, 231 * deadlocks may result. 232 */ 233 malloc_mutex_lock(&arena->lock); 234 ql_foreach(tcache, &arena->tcache_ql, link) { 235 tcache_stats_merge(tcache, arena); 236 } 237 malloc_mutex_unlock(&arena->lock); 238 } 239 } 240 } 241 je_malloc_stats_print(NULL, NULL, NULL); 242} 243 244/* 245 * End miscellaneous support functions. 246 */ 247/******************************************************************************/ 248/* 249 * Begin initialization functions. 250 */ 251 252static unsigned 253malloc_ncpus(void) 254{ 255 unsigned ret; 256 long result; 257 258#ifdef _WIN32 259 SYSTEM_INFO si; 260 GetSystemInfo(&si); 261 result = si.dwNumberOfProcessors; 262#else 263 result = sysconf(_SC_NPROCESSORS_ONLN); 264#endif 265 if (result == -1) { 266 /* Error. */ 267 ret = 1; 268 } else { 269 ret = (unsigned)result; 270 } 271 272 return (ret); 273} 274 275void 276arenas_cleanup(void *arg) 277{ 278 arena_t *arena = *(arena_t **)arg; 279 280 malloc_mutex_lock(&arenas_lock); 281 arena->nthreads--; 282 malloc_mutex_unlock(&arenas_lock); 283} 284 285static JEMALLOC_ATTR(always_inline) void 286malloc_thread_init(void) 287{ 288 289 /* 290 * TSD initialization can't be safely done as a side effect of 291 * deallocation, because it is possible for a thread to do nothing but 292 * deallocate its TLS data via free(), in which case writing to TLS 293 * would cause write-after-free memory corruption. The quarantine 294 * facility *only* gets used as a side effect of deallocation, so make 295 * a best effort attempt at initializing its TSD by hooking all 296 * allocation events. 297 */ 298 if (config_fill && opt_quarantine) 299 quarantine_alloc_hook(); 300} 301 302static JEMALLOC_ATTR(always_inline) bool 303malloc_init(void) 304{ 305 306 if (malloc_initialized == false && malloc_init_hard()) 307 return (true); 308 malloc_thread_init(); 309 310 return (false); 311} 312 313static bool 314malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 315 char const **v_p, size_t *vlen_p) 316{ 317 bool accept; 318 const char *opts = *opts_p; 319 320 *k_p = opts; 321 322 for (accept = false; accept == false;) { 323 switch (*opts) { 324 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 325 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 326 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 327 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 328 case 'Y': case 'Z': 329 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 330 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 331 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 332 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 333 case 'y': case 'z': 334 case '0': case '1': case '2': case '3': case '4': case '5': 335 case '6': case '7': case '8': case '9': 336 case '_': 337 opts++; 338 break; 339 case ':': 340 opts++; 341 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 342 *v_p = opts; 343 accept = true; 344 break; 345 case '\0': 346 if (opts != *opts_p) { 347 malloc_write("<jemalloc>: Conf string ends " 348 "with key\n"); 349 } 350 return (true); 351 default: 352 malloc_write("<jemalloc>: Malformed conf string\n"); 353 return (true); 354 } 355 } 356 357 for (accept = false; accept == false;) { 358 switch (*opts) { 359 case ',': 360 opts++; 361 /* 362 * Look ahead one character here, because the next time 363 * this function is called, it will assume that end of 364 * input has been cleanly reached if no input remains, 365 * but we have optimistically already consumed the 366 * comma if one exists. 367 */ 368 if (*opts == '\0') { 369 malloc_write("<jemalloc>: Conf string ends " 370 "with comma\n"); 371 } 372 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 373 accept = true; 374 break; 375 case '\0': 376 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 377 accept = true; 378 break; 379 default: 380 opts++; 381 break; 382 } 383 } 384 385 *opts_p = opts; 386 return (false); 387} 388 389static void 390malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 391 size_t vlen) 392{ 393 394 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 395 (int)vlen, v); 396} 397 398static void 399malloc_conf_init(void) 400{ 401 unsigned i; 402 char buf[PATH_MAX + 1]; 403 const char *opts, *k, *v; 404 size_t klen, vlen; 405 406 /* 407 * Automatically configure valgrind before processing options. The 408 * valgrind option remains in jemalloc 3.x for compatibility reasons. 409 */ 410 if (config_valgrind) { 411 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 412 if (config_fill && opt_valgrind) { 413 opt_junk = false; 414 assert(opt_zero == false); 415 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 416 opt_redzone = true; 417 } 418 if (config_tcache && opt_valgrind) 419 opt_tcache = false; 420 } 421 422 for (i = 0; i < 3; i++) { 423 /* Get runtime configuration. */ 424 switch (i) { 425 case 0: 426 if (je_malloc_conf != NULL) { 427 /* 428 * Use options that were compiled into the 429 * program. 430 */ 431 opts = je_malloc_conf; 432 } else { 433 /* No configuration specified. */ 434 buf[0] = '\0'; 435 opts = buf; 436 } 437 break; 438 case 1: { 439 int linklen = 0; 440#ifndef _WIN32 441 int saved_errno = errno; 442 const char *linkname = 443# ifdef JEMALLOC_PREFIX 444 "/etc/"JEMALLOC_PREFIX"malloc.conf" 445# else 446 "/etc/malloc.conf" 447# endif 448 ; 449 450 /* 451 * Try to use the contents of the "/etc/malloc.conf" 452 * symbolic link's name. 453 */ 454 linklen = readlink(linkname, buf, sizeof(buf) - 1); 455 if (linklen == -1) { 456 /* No configuration specified. */ 457 linklen = 0; 458 /* restore errno */ 459 set_errno(saved_errno); 460 } 461#endif 462 buf[linklen] = '\0'; 463 opts = buf; 464 break; 465 } case 2: { 466 const char *envname = 467#ifdef JEMALLOC_PREFIX 468 JEMALLOC_CPREFIX"MALLOC_CONF" 469#else 470 "MALLOC_CONF" 471#endif 472 ; 473 474 if ((opts = getenv(envname)) != NULL) { 475 /* 476 * Do nothing; opts is already initialized to 477 * the value of the MALLOC_CONF environment 478 * variable. 479 */ 480 } else { 481 /* No configuration specified. */ 482 buf[0] = '\0'; 483 opts = buf; 484 } 485 break; 486 } default: 487 /* NOTREACHED */ 488 assert(false); 489 buf[0] = '\0'; 490 opts = buf; 491 } 492 493 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 494 &vlen) == false) { 495#define CONF_HANDLE_BOOL(o, n) \ 496 if (sizeof(n)-1 == klen && strncmp(n, k, \ 497 klen) == 0) { \ 498 if (strncmp("true", v, vlen) == 0 && \ 499 vlen == sizeof("true")-1) \ 500 o = true; \ 501 else if (strncmp("false", v, vlen) == \ 502 0 && vlen == sizeof("false")-1) \ 503 o = false; \ 504 else { \ 505 malloc_conf_error( \ 506 "Invalid conf value", \ 507 k, klen, v, vlen); \ 508 } \ 509 continue; \ 510 } 511#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 512 if (sizeof(n)-1 == klen && strncmp(n, k, \ 513 klen) == 0) { \ 514 uintmax_t um; \ 515 char *end; \ 516 \ 517 set_errno(0); \ 518 um = malloc_strtoumax(v, &end, 0); \ 519 if (get_errno() != 0 || (uintptr_t)end -\ 520 (uintptr_t)v != vlen) { \ 521 malloc_conf_error( \ 522 "Invalid conf value", \ 523 k, klen, v, vlen); \ 524 } else if (clip) { \ 525 if (um < min) \ 526 o = min; \ 527 else if (um > max) \ 528 o = max; \ 529 else \ 530 o = um; \ 531 } else { \ 532 if (um < min || um > max) { \ 533 malloc_conf_error( \ 534 "Out-of-range " \ 535 "conf value", \ 536 k, klen, v, vlen); \ 537 } else \ 538 o = um; \ 539 } \ 540 continue; \ 541 } 542#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 543 if (sizeof(n)-1 == klen && strncmp(n, k, \ 544 klen) == 0) { \ 545 long l; \ 546 char *end; \ 547 \ 548 set_errno(0); \ 549 l = strtol(v, &end, 0); \ 550 if (get_errno() != 0 || (uintptr_t)end -\ 551 (uintptr_t)v != vlen) { \ 552 malloc_conf_error( \ 553 "Invalid conf value", \ 554 k, klen, v, vlen); \ 555 } else if (l < (ssize_t)min || l > \ 556 (ssize_t)max) { \ 557 malloc_conf_error( \ 558 "Out-of-range conf value", \ 559 k, klen, v, vlen); \ 560 } else \ 561 o = l; \ 562 continue; \ 563 } 564#define CONF_HANDLE_CHAR_P(o, n, d) \ 565 if (sizeof(n)-1 == klen && strncmp(n, k, \ 566 klen) == 0) { \ 567 size_t cpylen = (vlen <= \ 568 sizeof(o)-1) ? vlen : \ 569 sizeof(o)-1; \ 570 strncpy(o, v, cpylen); \ 571 o[cpylen] = '\0'; \ 572 continue; \ 573 } 574 575 CONF_HANDLE_BOOL(opt_abort, "abort") 576 /* 577 * Chunks always require at least one header page, plus 578 * one data page in the absence of redzones, or three 579 * pages in the presence of redzones. In order to 580 * simplify options processing, fix the limit based on 581 * config_fill. 582 */ 583 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 584 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, 585 true) 586 if (strncmp("dss", k, klen) == 0) { 587 int i; 588 bool match = false; 589 for (i = 0; i < dss_prec_limit; i++) { 590 if (strncmp(dss_prec_names[i], v, vlen) 591 == 0) { 592 if (chunk_dss_prec_set(i)) { 593 malloc_conf_error( 594 "Error setting dss", 595 k, klen, v, vlen); 596 } else { 597 opt_dss = 598 dss_prec_names[i]; 599 match = true; 600 break; 601 } 602 } 603 } 604 if (match == false) { 605 malloc_conf_error("Invalid conf value", 606 k, klen, v, vlen); 607 } 608 continue; 609 } 610 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, 611 SIZE_T_MAX, false) 612 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 613 -1, (sizeof(size_t) << 3) - 1) 614 CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 615 if (config_fill) { 616 CONF_HANDLE_BOOL(opt_junk, "junk") 617 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 618 0, SIZE_T_MAX, false) 619 CONF_HANDLE_BOOL(opt_redzone, "redzone") 620 CONF_HANDLE_BOOL(opt_zero, "zero") 621 } 622 if (config_utrace) { 623 CONF_HANDLE_BOOL(opt_utrace, "utrace") 624 } 625 if (config_valgrind) { 626 CONF_HANDLE_BOOL(opt_valgrind, "valgrind") 627 } 628 if (config_xmalloc) { 629 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 630 } 631 if (config_tcache) { 632 CONF_HANDLE_BOOL(opt_tcache, "tcache") 633 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 634 "lg_tcache_max", -1, 635 (sizeof(size_t) << 3) - 1) 636 } 637 if (config_prof) { 638 CONF_HANDLE_BOOL(opt_prof, "prof") 639 CONF_HANDLE_CHAR_P(opt_prof_prefix, 640 "prof_prefix", "jeprof") 641 CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 642 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, 643 "lg_prof_sample", 0, 644 (sizeof(uint64_t) << 3) - 1) 645 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 646 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 647 "lg_prof_interval", -1, 648 (sizeof(uint64_t) << 3) - 1) 649 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 650 CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 651 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 652 } 653 malloc_conf_error("Invalid conf pair", k, klen, v, 654 vlen); 655#undef CONF_HANDLE_BOOL 656#undef CONF_HANDLE_SIZE_T 657#undef CONF_HANDLE_SSIZE_T 658#undef CONF_HANDLE_CHAR_P 659 } 660 } 661} 662 663static bool 664malloc_init_hard(void) 665{ 666 arena_t *init_arenas[1]; 667 668 malloc_mutex_lock(&init_lock); 669 if (malloc_initialized || IS_INITIALIZER) { 670 /* 671 * Another thread initialized the allocator before this one 672 * acquired init_lock, or this thread is the initializing 673 * thread, and it is recursively allocating. 674 */ 675 malloc_mutex_unlock(&init_lock); 676 return (false); 677 } 678#ifdef JEMALLOC_THREADED_INIT 679 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { 680 /* Busy-wait until the initializing thread completes. */ 681 do { 682 malloc_mutex_unlock(&init_lock); 683 CPU_SPINWAIT; 684 malloc_mutex_lock(&init_lock); 685 } while (malloc_initialized == false); 686 malloc_mutex_unlock(&init_lock); 687 return (false); 688 } 689#endif 690 malloc_initializer = INITIALIZER; 691 692 malloc_tsd_boot(); 693 if (config_prof) 694 prof_boot0(); 695 696 malloc_conf_init(); 697 698#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 699 && !defined(_WIN32)) 700 /* Register fork handlers. */ 701 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 702 jemalloc_postfork_child) != 0) { 703 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 704 if (opt_abort) 705 abort(); 706 } 707#endif 708 709 if (opt_stats_print) { 710 /* Print statistics at exit. */ 711 if (atexit(stats_print_atexit) != 0) { 712 malloc_write("<jemalloc>: Error in atexit()\n"); 713 if (opt_abort) 714 abort(); 715 } 716 } 717 718 if (base_boot()) { 719 malloc_mutex_unlock(&init_lock); 720 return (true); 721 } 722 723 if (chunk_boot()) { 724 malloc_mutex_unlock(&init_lock); 725 return (true); 726 } 727 728 if (ctl_boot()) { 729 malloc_mutex_unlock(&init_lock); 730 return (true); 731 } 732 733 if (config_prof) 734 prof_boot1(); 735 736 arena_boot(); 737 738 if (config_tcache && tcache_boot0()) { 739 malloc_mutex_unlock(&init_lock); 740 return (true); 741 } 742 743 if (huge_boot()) { 744 malloc_mutex_unlock(&init_lock); 745 return (true); 746 } 747 748 if (malloc_mutex_init(&arenas_lock)) 749 return (true); 750 751 /* 752 * Create enough scaffolding to allow recursive allocation in 753 * malloc_ncpus(). 754 */ 755 narenas_total = narenas_auto = 1; 756 arenas = init_arenas; 757 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 758 759 /* 760 * Initialize one arena here. The rest are lazily created in 761 * choose_arena_hard(). 762 */ 763 arenas_extend(0); 764 if (arenas[0] == NULL) { 765 malloc_mutex_unlock(&init_lock); 766 return (true); 767 } 768 769 /* Initialize allocation counters before any allocations can occur. */ 770 if (config_stats && thread_allocated_tsd_boot()) { 771 malloc_mutex_unlock(&init_lock); 772 return (true); 773 } 774 775 if (arenas_tsd_boot()) { 776 malloc_mutex_unlock(&init_lock); 777 return (true); 778 } 779 780 if (config_tcache && tcache_boot1()) { 781 malloc_mutex_unlock(&init_lock); 782 return (true); 783 } 784 785 if (config_fill && quarantine_boot()) { 786 malloc_mutex_unlock(&init_lock); 787 return (true); 788 } 789 790 if (config_prof && prof_boot2()) { 791 malloc_mutex_unlock(&init_lock); 792 return (true); 793 } 794 795 /* Get number of CPUs. */ 796 malloc_mutex_unlock(&init_lock); 797 ncpus = malloc_ncpus(); 798 malloc_mutex_lock(&init_lock); 799 800 if (mutex_boot()) { 801 malloc_mutex_unlock(&init_lock); 802 return (true); 803 } 804 805 if (opt_narenas == 0) { 806 /* 807 * For SMP systems, create more than one arena per CPU by 808 * default. 809 */ 810 if (ncpus > 1) 811 opt_narenas = ncpus << 2; 812 else 813 opt_narenas = 1; 814 } 815 narenas_auto = opt_narenas; 816 /* 817 * Make sure that the arenas array can be allocated. In practice, this 818 * limit is enough to allow the allocator to function, but the ctl 819 * machinery will fail to allocate memory at far lower limits. 820 */ 821 if (narenas_auto > chunksize / sizeof(arena_t *)) { 822 narenas_auto = chunksize / sizeof(arena_t *); 823 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 824 narenas_auto); 825 } 826 narenas_total = narenas_auto; 827 828 /* Allocate and initialize arenas. */ 829 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); 830 if (arenas == NULL) { 831 malloc_mutex_unlock(&init_lock); 832 return (true); 833 } 834 /* 835 * Zero the array. In practice, this should always be pre-zeroed, 836 * since it was just mmap()ed, but let's be sure. 837 */ 838 memset(arenas, 0, sizeof(arena_t *) * narenas_total); 839 /* Copy the pointer to the one arena that was already initialized. */ 840 arenas[0] = init_arenas[0]; 841 842 malloc_initialized = true; 843 malloc_mutex_unlock(&init_lock); 844 return (false); 845} 846 847/* 848 * End initialization functions. 849 */ 850/******************************************************************************/ 851/* 852 * Begin malloc(3)-compatible functions. 853 */ 854 855void * 856je_malloc(size_t size) 857{ 858 void *ret; 859 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 860 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 861 862 if (malloc_init()) { 863 ret = NULL; 864 goto label_oom; 865 } 866 867 if (size == 0) 868 size = 1; 869 870 if (config_prof && opt_prof) { 871 usize = s2u(size); 872 PROF_ALLOC_PREP(1, usize, cnt); 873 if (cnt == NULL) { 874 ret = NULL; 875 goto label_oom; 876 } 877 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 878 SMALL_MAXCLASS) { 879 ret = imalloc(SMALL_MAXCLASS+1); 880 if (ret != NULL) 881 arena_prof_promoted(ret, usize); 882 } else 883 ret = imalloc(size); 884 } else { 885 if (config_stats || (config_valgrind && opt_valgrind)) 886 usize = s2u(size); 887 ret = imalloc(size); 888 } 889 890label_oom: 891 if (ret == NULL) { 892 if (config_xmalloc && opt_xmalloc) { 893 malloc_write("<jemalloc>: Error in malloc(): " 894 "out of memory\n"); 895 abort(); 896 } 897 set_errno(ENOMEM); 898 } 899 if (config_prof && opt_prof && ret != NULL) 900 prof_malloc(ret, usize, cnt); 901 if (config_stats && ret != NULL) { 902 assert(usize == isalloc(ret, config_prof)); 903 thread_allocated_tsd_get()->allocated += usize; 904 } 905 UTRACE(0, size, ret); 906 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 907 return (ret); 908} 909 910JEMALLOC_ATTR(nonnull(1)) 911#ifdef JEMALLOC_PROF 912/* 913 * Avoid any uncertainty as to how many backtrace frames to ignore in 914 * PROF_ALLOC_PREP(). 915 */ 916JEMALLOC_NOINLINE 917#endif 918static int 919imemalign(void **memptr, size_t alignment, size_t size, 920 size_t min_alignment) 921{ 922 int ret; 923 size_t usize; 924 void *result; 925 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 926 927 assert(min_alignment != 0); 928 929 if (malloc_init()) 930 result = NULL; 931 else { 932 if (size == 0) 933 size = 1; 934 935 /* Make sure that alignment is a large enough power of 2. */ 936 if (((alignment - 1) & alignment) != 0 937 || (alignment < min_alignment)) { 938 if (config_xmalloc && opt_xmalloc) { 939 malloc_write("<jemalloc>: Error allocating " 940 "aligned memory: invalid alignment\n"); 941 abort(); 942 } 943 result = NULL; 944 ret = EINVAL; 945 goto label_return; 946 } 947 948 usize = sa2u(size, alignment); 949 if (usize == 0) { 950 result = NULL; 951 ret = ENOMEM; 952 goto label_return; 953 } 954 955 if (config_prof && opt_prof) { 956 PROF_ALLOC_PREP(2, usize, cnt); 957 if (cnt == NULL) { 958 result = NULL; 959 ret = EINVAL; 960 } else { 961 if (prof_promote && (uintptr_t)cnt != 962 (uintptr_t)1U && usize <= SMALL_MAXCLASS) { 963 assert(sa2u(SMALL_MAXCLASS+1, 964 alignment) != 0); 965 result = ipalloc(sa2u(SMALL_MAXCLASS+1, 966 alignment), alignment, false); 967 if (result != NULL) { 968 arena_prof_promoted(result, 969 usize); 970 } 971 } else { 972 result = ipalloc(usize, alignment, 973 false); 974 } 975 } 976 } else 977 result = ipalloc(usize, alignment, false); 978 } 979 980 if (result == NULL) { 981 if (config_xmalloc && opt_xmalloc) { 982 malloc_write("<jemalloc>: Error allocating aligned " 983 "memory: out of memory\n"); 984 abort(); 985 } 986 ret = ENOMEM; 987 goto label_return; 988 } 989 990 *memptr = result; 991 ret = 0; 992 993label_return: 994 if (config_stats && result != NULL) { 995 assert(usize == isalloc(result, config_prof)); 996 thread_allocated_tsd_get()->allocated += usize; 997 } 998 if (config_prof && opt_prof && result != NULL) 999 prof_malloc(result, usize, cnt); 1000 UTRACE(0, size, result); 1001 return (ret); 1002} 1003 1004int 1005je_posix_memalign(void **memptr, size_t alignment, size_t size) 1006{ 1007 int ret = imemalign(memptr, alignment, size, sizeof(void *)); 1008 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 1009 config_prof), false); 1010 return (ret); 1011} 1012 1013void * 1014je_aligned_alloc(size_t alignment, size_t size) 1015{ 1016 void *ret; 1017 int err; 1018 1019 if ((err = imemalign(&ret, alignment, size, 1)) != 0) { 1020 ret = NULL; 1021 set_errno(err); 1022 } 1023 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 1024 false); 1025 return (ret); 1026} 1027 1028void * 1029je_calloc(size_t num, size_t size) 1030{ 1031 void *ret; 1032 size_t num_size; 1033 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1034 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1035 1036 if (malloc_init()) { 1037 num_size = 0; 1038 ret = NULL; 1039 goto label_return; 1040 } 1041 1042 num_size = num * size; 1043 if (num_size == 0) { 1044 if (num == 0 || size == 0) 1045 num_size = 1; 1046 else { 1047 ret = NULL; 1048 goto label_return; 1049 } 1050 /* 1051 * Try to avoid division here. We know that it isn't possible to 1052 * overflow during multiplication if neither operand uses any of the 1053 * most significant half of the bits in a size_t. 1054 */ 1055 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 1056 && (num_size / size != num)) { 1057 /* size_t overflow. */ 1058 ret = NULL; 1059 goto label_return; 1060 } 1061 1062 if (config_prof && opt_prof) { 1063 usize = s2u(num_size); 1064 PROF_ALLOC_PREP(1, usize, cnt); 1065 if (cnt == NULL) { 1066 ret = NULL; 1067 goto label_return; 1068 } 1069 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize 1070 <= SMALL_MAXCLASS) { 1071 ret = icalloc(SMALL_MAXCLASS+1); 1072 if (ret != NULL) 1073 arena_prof_promoted(ret, usize); 1074 } else 1075 ret = icalloc(num_size); 1076 } else { 1077 if (config_stats || (config_valgrind && opt_valgrind)) 1078 usize = s2u(num_size); 1079 ret = icalloc(num_size); 1080 } 1081 1082label_return: 1083 if (ret == NULL) { 1084 if (config_xmalloc && opt_xmalloc) { 1085 malloc_write("<jemalloc>: Error in calloc(): out of " 1086 "memory\n"); 1087 abort(); 1088 } 1089 set_errno(ENOMEM); 1090 } 1091 1092 if (config_prof && opt_prof && ret != NULL) 1093 prof_malloc(ret, usize, cnt); 1094 if (config_stats && ret != NULL) { 1095 assert(usize == isalloc(ret, config_prof)); 1096 thread_allocated_tsd_get()->allocated += usize; 1097 } 1098 UTRACE(0, num_size, ret); 1099 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1100 return (ret); 1101} 1102 1103void * 1104je_realloc(void *ptr, size_t size) 1105{ 1106 void *ret; 1107 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1108 size_t old_size = 0; 1109 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1110 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1111 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); 1112 1113 if (size == 0) { 1114 if (ptr != NULL) { 1115 /* realloc(ptr, 0) is equivalent to free(p). */ 1116 assert(malloc_initialized || IS_INITIALIZER); 1117 if (config_prof) { 1118 old_size = isalloc(ptr, true); 1119 if (config_valgrind && opt_valgrind) 1120 old_rzsize = p2rz(ptr); 1121 } else if (config_stats) { 1122 old_size = isalloc(ptr, false); 1123 if (config_valgrind && opt_valgrind) 1124 old_rzsize = u2rz(old_size); 1125 } else if (config_valgrind && opt_valgrind) { 1126 old_size = isalloc(ptr, false); 1127 old_rzsize = u2rz(old_size); 1128 } 1129 if (config_prof && opt_prof) { 1130 old_ctx = prof_ctx_get(ptr); 1131 cnt = NULL; 1132 } 1133 iqalloc(ptr); 1134 ret = NULL; 1135 goto label_return; 1136 } else 1137 size = 1; 1138 } 1139 1140 if (ptr != NULL) { 1141 assert(malloc_initialized || IS_INITIALIZER); 1142 malloc_thread_init(); 1143 1144 if (config_prof) { 1145 old_size = isalloc(ptr, true); 1146 if (config_valgrind && opt_valgrind) 1147 old_rzsize = p2rz(ptr); 1148 } else if (config_stats) { 1149 old_size = isalloc(ptr, false); 1150 if (config_valgrind && opt_valgrind) 1151 old_rzsize = u2rz(old_size); 1152 } else if (config_valgrind && opt_valgrind) { 1153 old_size = isalloc(ptr, false); 1154 old_rzsize = u2rz(old_size); 1155 } 1156 if (config_prof && opt_prof) { 1157 usize = s2u(size); 1158 old_ctx = prof_ctx_get(ptr); 1159 PROF_ALLOC_PREP(1, usize, cnt); 1160 if (cnt == NULL) { 1161 old_ctx = NULL; 1162 ret = NULL; 1163 goto label_oom; 1164 } 1165 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && 1166 usize <= SMALL_MAXCLASS) { 1167 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, 1168 false, false); 1169 if (ret != NULL) 1170 arena_prof_promoted(ret, usize); 1171 else 1172 old_ctx = NULL; 1173 } else { 1174 ret = iralloc(ptr, size, 0, 0, false, false); 1175 if (ret == NULL) 1176 old_ctx = NULL; 1177 } 1178 } else { 1179 if (config_stats || (config_valgrind && opt_valgrind)) 1180 usize = s2u(size); 1181 ret = iralloc(ptr, size, 0, 0, false, false); 1182 } 1183 1184label_oom: 1185 if (ret == NULL) { 1186 if (config_xmalloc && opt_xmalloc) { 1187 malloc_write("<jemalloc>: Error in realloc(): " 1188 "out of memory\n"); 1189 abort(); 1190 } 1191 set_errno(ENOMEM); 1192 } 1193 } else { 1194 /* realloc(NULL, size) is equivalent to malloc(size). */ 1195 if (config_prof && opt_prof) 1196 old_ctx = NULL; 1197 if (malloc_init()) { 1198 if (config_prof && opt_prof) 1199 cnt = NULL; 1200 ret = NULL; 1201 } else { 1202 if (config_prof && opt_prof) { 1203 usize = s2u(size); 1204 PROF_ALLOC_PREP(1, usize, cnt); 1205 if (cnt == NULL) 1206 ret = NULL; 1207 else { 1208 if (prof_promote && (uintptr_t)cnt != 1209 (uintptr_t)1U && usize <= 1210 SMALL_MAXCLASS) { 1211 ret = imalloc(SMALL_MAXCLASS+1); 1212 if (ret != NULL) { 1213 arena_prof_promoted(ret, 1214 usize); 1215 } 1216 } else 1217 ret = imalloc(size); 1218 } 1219 } else { 1220 if (config_stats || (config_valgrind && 1221 opt_valgrind)) 1222 usize = s2u(size); 1223 ret = imalloc(size); 1224 } 1225 } 1226 1227 if (ret == NULL) { 1228 if (config_xmalloc && opt_xmalloc) { 1229 malloc_write("<jemalloc>: Error in realloc(): " 1230 "out of memory\n"); 1231 abort(); 1232 } 1233 set_errno(ENOMEM); 1234 } 1235 } 1236 1237label_return: 1238 if (config_prof && opt_prof) 1239 prof_realloc(ret, usize, cnt, old_size, old_ctx); 1240 if (config_stats && ret != NULL) { 1241 thread_allocated_t *ta; 1242 assert(usize == isalloc(ret, config_prof)); 1243 ta = thread_allocated_tsd_get(); 1244 ta->allocated += usize; 1245 ta->deallocated += old_size; 1246 } 1247 UTRACE(ptr, size, ret); 1248 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); 1249 return (ret); 1250} 1251 1252void 1253je_free(void *ptr) 1254{ 1255 1256 UTRACE(ptr, 0, 0); 1257 if (ptr != NULL) { 1258 size_t usize; 1259 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1260 1261 assert(malloc_initialized || IS_INITIALIZER); 1262 1263 if (config_prof && opt_prof) { 1264 usize = isalloc(ptr, config_prof); 1265 prof_free(ptr, usize); 1266 } else if (config_stats || config_valgrind) 1267 usize = isalloc(ptr, config_prof); 1268 if (config_stats) 1269 thread_allocated_tsd_get()->deallocated += usize; 1270 if (config_valgrind && opt_valgrind) 1271 rzsize = p2rz(ptr); 1272 iqalloc(ptr); 1273 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1274 } 1275} 1276 1277/* 1278 * End malloc(3)-compatible functions. 1279 */ 1280/******************************************************************************/ 1281/* 1282 * Begin non-standard override functions. 1283 */ 1284 1285#ifdef JEMALLOC_OVERRIDE_MEMALIGN 1286void * 1287je_memalign(size_t alignment, size_t size) 1288{ 1289 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1290 imemalign(&ret, alignment, size, 1); 1291 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1292 return (ret); 1293} 1294#endif 1295 1296#ifdef JEMALLOC_OVERRIDE_VALLOC 1297void * 1298je_valloc(size_t size) 1299{ 1300 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1301 imemalign(&ret, PAGE, size, 1); 1302 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1303 return (ret); 1304} 1305#endif 1306 1307/* 1308 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1309 * #define je_malloc malloc 1310 */ 1311#define malloc_is_malloc 1 1312#define is_malloc_(a) malloc_is_ ## a 1313#define is_malloc(a) is_malloc_(a) 1314 1315#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1316/* 1317 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1318 * to inconsistently reference libc's malloc(3)-compatible functions 1319 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1320 * 1321 * These definitions interpose hooks in glibc. The functions are actually 1322 * passed an extra argument for the caller return address, which will be 1323 * ignored. 1324 */ 1325JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; 1326JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; 1327JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; 1328JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = 1329 je_memalign; 1330#endif 1331 1332/* 1333 * End non-standard override functions. 1334 */ 1335/******************************************************************************/ 1336/* 1337 * Begin non-standard functions. 1338 */ 1339 1340size_t 1341je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 1342{ 1343 size_t ret; 1344 1345 assert(malloc_initialized || IS_INITIALIZER); 1346 malloc_thread_init(); 1347 1348 if (config_ivsalloc) 1349 ret = ivsalloc(ptr, config_prof); 1350 else 1351 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; 1352 1353 return (ret); 1354} 1355 1356void 1357je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1358 const char *opts) 1359{ 1360 1361 stats_print(write_cb, cbopaque, opts); 1362} 1363 1364int 1365je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1366 size_t newlen) 1367{ 1368 1369 if (malloc_init()) 1370 return (EAGAIN); 1371 1372 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1373} 1374 1375int 1376je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1377{ 1378 1379 if (malloc_init()) 1380 return (EAGAIN); 1381 1382 return (ctl_nametomib(name, mibp, miblenp)); 1383} 1384 1385int 1386je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1387 void *newp, size_t newlen) 1388{ 1389 1390 if (malloc_init()) 1391 return (EAGAIN); 1392 1393 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1394} 1395 1396/* 1397 * End non-standard functions. 1398 */ 1399/******************************************************************************/ 1400/* 1401 * Begin experimental functions. 1402 */ 1403#ifdef JEMALLOC_EXPERIMENTAL 1404 1405static JEMALLOC_ATTR(always_inline) void * 1406iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, 1407 arena_t *arena) 1408{ 1409 1410 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, 1411 alignment))); 1412 1413 if (alignment != 0) 1414 return (ipallocx(usize, alignment, zero, try_tcache, arena)); 1415 else if (zero) 1416 return (icallocx(usize, try_tcache, arena)); 1417 else 1418 return (imallocx(usize, try_tcache, arena)); 1419} 1420 1421int 1422je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 1423{ 1424 void *p; 1425 size_t usize; 1426 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1427 & (SIZE_T_MAX-1)); 1428 bool zero = flags & ALLOCM_ZERO; 1429 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1430 arena_t *arena; 1431 bool try_tcache; 1432 1433 assert(ptr != NULL); 1434 assert(size != 0); 1435 1436 if (malloc_init()) 1437 goto label_oom; 1438 1439 if (arena_ind != UINT_MAX) { 1440 arena = arenas[arena_ind]; 1441 try_tcache = false; 1442 } else { 1443 arena = NULL; 1444 try_tcache = true; 1445 } 1446 1447 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1448 if (usize == 0) 1449 goto label_oom; 1450 1451 if (config_prof && opt_prof) { 1452 prof_thr_cnt_t *cnt; 1453 1454 PROF_ALLOC_PREP(1, usize, cnt); 1455 if (cnt == NULL) 1456 goto label_oom; 1457 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 1458 SMALL_MAXCLASS) { 1459 size_t usize_promoted = (alignment == 0) ? 1460 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, 1461 alignment); 1462 assert(usize_promoted != 0); 1463 p = iallocm(usize_promoted, alignment, zero, 1464 try_tcache, arena); 1465 if (p == NULL) 1466 goto label_oom; 1467 arena_prof_promoted(p, usize); 1468 } else { 1469 p = iallocm(usize, alignment, zero, try_tcache, arena); 1470 if (p == NULL) 1471 goto label_oom; 1472 } 1473 prof_malloc(p, usize, cnt); 1474 } else { 1475 p = iallocm(usize, alignment, zero, try_tcache, arena); 1476 if (p == NULL) 1477 goto label_oom; 1478 } 1479 if (rsize != NULL) 1480 *rsize = usize; 1481 1482 *ptr = p; 1483 if (config_stats) { 1484 assert(usize == isalloc(p, config_prof)); 1485 thread_allocated_tsd_get()->allocated += usize; 1486 } 1487 UTRACE(0, size, p); 1488 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); 1489 return (ALLOCM_SUCCESS); 1490label_oom: 1491 if (config_xmalloc && opt_xmalloc) { 1492 malloc_write("<jemalloc>: Error in allocm(): " 1493 "out of memory\n"); 1494 abort(); 1495 } 1496 *ptr = NULL; 1497 UTRACE(0, size, 0); 1498 return (ALLOCM_ERR_OOM); 1499} 1500 1501int 1502je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 1503{ 1504 void *p, *q; 1505 size_t usize; 1506 size_t old_size; 1507 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1508 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1509 & (SIZE_T_MAX-1)); 1510 bool zero = flags & ALLOCM_ZERO; 1511 bool no_move = flags & ALLOCM_NO_MOVE; 1512 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1513 bool try_tcache_alloc, try_tcache_dalloc; 1514 arena_t *arena; 1515 1516 assert(ptr != NULL); 1517 assert(*ptr != NULL); 1518 assert(size != 0); 1519 assert(SIZE_T_MAX - size >= extra); 1520 assert(malloc_initialized || IS_INITIALIZER); 1521 malloc_thread_init(); 1522 1523 if (arena_ind != UINT_MAX) { 1524 arena_chunk_t *chunk; 1525 try_tcache_alloc = true; 1526 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); 1527 try_tcache_dalloc = (chunk == *ptr || chunk->arena != 1528 arenas[arena_ind]); 1529 arena = arenas[arena_ind]; 1530 } else { 1531 try_tcache_alloc = true; 1532 try_tcache_dalloc = true; 1533 arena = NULL; 1534 } 1535 1536 p = *ptr; 1537 if (config_prof && opt_prof) { 1538 prof_thr_cnt_t *cnt; 1539 1540 /* 1541 * usize isn't knowable before iralloc() returns when extra is 1542 * non-zero. Therefore, compute its maximum possible value and 1543 * use that in PROF_ALLOC_PREP() to decide whether to capture a 1544 * backtrace. prof_realloc() will use the actual usize to 1545 * decide whether to sample. 1546 */ 1547 size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1548 sa2u(size+extra, alignment); 1549 prof_ctx_t *old_ctx = prof_ctx_get(p); 1550 old_size = isalloc(p, true); 1551 if (config_valgrind && opt_valgrind) 1552 old_rzsize = p2rz(p); 1553 PROF_ALLOC_PREP(1, max_usize, cnt); 1554 if (cnt == NULL) 1555 goto label_oom; 1556 /* 1557 * Use minimum usize to determine whether promotion may happen. 1558 */ 1559 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U 1560 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) 1561 <= SMALL_MAXCLASS) { 1562 q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1563 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 1564 alignment, zero, no_move, try_tcache_alloc, 1565 try_tcache_dalloc, arena); 1566 if (q == NULL) 1567 goto label_err; 1568 if (max_usize < PAGE) { 1569 usize = max_usize; 1570 arena_prof_promoted(q, usize); 1571 } else 1572 usize = isalloc(q, config_prof); 1573 } else { 1574 q = irallocx(p, size, extra, alignment, zero, no_move, 1575 try_tcache_alloc, try_tcache_dalloc, arena); 1576 if (q == NULL) 1577 goto label_err; 1578 usize = isalloc(q, config_prof); 1579 } 1580 prof_realloc(q, usize, cnt, old_size, old_ctx); 1581 if (rsize != NULL) 1582 *rsize = usize; 1583 } else { 1584 if (config_stats) { 1585 old_size = isalloc(p, false); 1586 if (config_valgrind && opt_valgrind) 1587 old_rzsize = u2rz(old_size); 1588 } else if (config_valgrind && opt_valgrind) { 1589 old_size = isalloc(p, false); 1590 old_rzsize = u2rz(old_size); 1591 } 1592 q = irallocx(p, size, extra, alignment, zero, no_move, 1593 try_tcache_alloc, try_tcache_dalloc, arena); 1594 if (q == NULL) 1595 goto label_err; 1596 if (config_stats) 1597 usize = isalloc(q, config_prof); 1598 if (rsize != NULL) { 1599 if (config_stats == false) 1600 usize = isalloc(q, config_prof); 1601 *rsize = usize; 1602 } 1603 } 1604 1605 *ptr = q; 1606 if (config_stats) { 1607 thread_allocated_t *ta; 1608 ta = thread_allocated_tsd_get(); 1609 ta->allocated += usize; 1610 ta->deallocated += old_size; 1611 } 1612 UTRACE(p, size, q); 1613 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); 1614 return (ALLOCM_SUCCESS); 1615label_err: 1616 if (no_move) { 1617 UTRACE(p, size, q); 1618 return (ALLOCM_ERR_NOT_MOVED); 1619 } 1620label_oom: 1621 if (config_xmalloc && opt_xmalloc) { 1622 malloc_write("<jemalloc>: Error in rallocm(): " 1623 "out of memory\n"); 1624 abort(); 1625 } 1626 UTRACE(p, size, 0); 1627 return (ALLOCM_ERR_OOM); 1628} 1629 1630int 1631je_sallocm(const void *ptr, size_t *rsize, int flags) 1632{ 1633 size_t sz; 1634 1635 assert(malloc_initialized || IS_INITIALIZER); 1636 malloc_thread_init(); 1637 1638 if (config_ivsalloc) 1639 sz = ivsalloc(ptr, config_prof); 1640 else { 1641 assert(ptr != NULL); 1642 sz = isalloc(ptr, config_prof); 1643 } 1644 assert(rsize != NULL); 1645 *rsize = sz; 1646 1647 return (ALLOCM_SUCCESS); 1648} 1649 1650int 1651je_dallocm(void *ptr, int flags) 1652{ 1653 size_t usize; 1654 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1655 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1656 bool try_tcache; 1657 1658 assert(ptr != NULL); 1659 assert(malloc_initialized || IS_INITIALIZER); 1660 1661 if (arena_ind != UINT_MAX) { 1662 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1663 try_tcache = (chunk == ptr || chunk->arena != 1664 arenas[arena_ind]); 1665 } else 1666 try_tcache = true; 1667 1668 UTRACE(ptr, 0, 0); 1669 if (config_stats || config_valgrind) 1670 usize = isalloc(ptr, config_prof); 1671 if (config_prof && opt_prof) { 1672 if (config_stats == false && config_valgrind == false) 1673 usize = isalloc(ptr, config_prof); 1674 prof_free(ptr, usize); 1675 } 1676 if (config_stats) 1677 thread_allocated_tsd_get()->deallocated += usize; 1678 if (config_valgrind && opt_valgrind) 1679 rzsize = p2rz(ptr); 1680 iqallocx(ptr, try_tcache); 1681 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1682 1683 return (ALLOCM_SUCCESS); 1684} 1685 1686int 1687je_nallocm(size_t *rsize, size_t size, int flags) 1688{ 1689 size_t usize; 1690 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1691 & (SIZE_T_MAX-1)); 1692 1693 assert(size != 0); 1694 1695 if (malloc_init()) 1696 return (ALLOCM_ERR_OOM); 1697 1698 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1699 if (usize == 0) 1700 return (ALLOCM_ERR_OOM); 1701 1702 if (rsize != NULL) 1703 *rsize = usize; 1704 return (ALLOCM_SUCCESS); 1705} 1706 1707#endif 1708/* 1709 * End experimental functions. 1710 */ 1711/******************************************************************************/ 1712/* 1713 * The following functions are used by threading libraries for protection of 1714 * malloc during fork(). 1715 */ 1716 1717/* 1718 * If an application creates a thread before doing any allocation in the main 1719 * thread, then calls fork(2) in the main thread followed by memory allocation 1720 * in the child process, a race can occur that results in deadlock within the 1721 * child: the main thread may have forked while the created thread had 1722 * partially initialized the allocator. Ordinarily jemalloc prevents 1723 * fork/malloc races via the following functions it registers during 1724 * initialization using pthread_atfork(), but of course that does no good if 1725 * the allocator isn't fully initialized at fork time. The following library 1726 * constructor is a partial solution to this problem. It may still possible to 1727 * trigger the deadlock described above, but doing so would involve forking via 1728 * a library constructor that runs before jemalloc's runs. 1729 */ 1730JEMALLOC_ATTR(constructor) 1731static void 1732jemalloc_constructor(void) 1733{ 1734 1735 malloc_init(); 1736} 1737 1738#ifndef JEMALLOC_MUTEX_INIT_CB 1739void 1740jemalloc_prefork(void) 1741#else 1742JEMALLOC_EXPORT void 1743_malloc_prefork(void) 1744#endif 1745{ 1746 unsigned i; 1747 1748#ifdef JEMALLOC_MUTEX_INIT_CB 1749 if (malloc_initialized == false) 1750 return; 1751#endif 1752 assert(malloc_initialized); 1753 1754 /* Acquire all mutexes in a safe order. */ 1755 ctl_prefork(); 1756 prof_prefork(); 1757 malloc_mutex_prefork(&arenas_lock); 1758 for (i = 0; i < narenas_total; i++) { 1759 if (arenas[i] != NULL) 1760 arena_prefork(arenas[i]); 1761 } 1762 chunk_prefork(); 1763 base_prefork(); 1764 huge_prefork(); 1765} 1766 1767#ifndef JEMALLOC_MUTEX_INIT_CB 1768void 1769jemalloc_postfork_parent(void) 1770#else 1771JEMALLOC_EXPORT void 1772_malloc_postfork(void) 1773#endif 1774{ 1775 unsigned i; 1776 1777#ifdef JEMALLOC_MUTEX_INIT_CB 1778 if (malloc_initialized == false) 1779 return; 1780#endif 1781 assert(malloc_initialized); 1782 1783 /* Release all mutexes, now that fork() has completed. */ 1784 huge_postfork_parent(); 1785 base_postfork_parent(); 1786 chunk_postfork_parent(); 1787 for (i = 0; i < narenas_total; i++) { 1788 if (arenas[i] != NULL) 1789 arena_postfork_parent(arenas[i]); 1790 } 1791 malloc_mutex_postfork_parent(&arenas_lock); 1792 prof_postfork_parent(); 1793 ctl_postfork_parent(); 1794} 1795 1796void 1797jemalloc_postfork_child(void) 1798{ 1799 unsigned i; 1800 1801 assert(malloc_initialized); 1802 1803 /* Release all mutexes, now that fork() has completed. */ 1804 huge_postfork_child(); 1805 base_postfork_child(); 1806 chunk_postfork_child(); 1807 for (i = 0; i < narenas_total; i++) { 1808 if (arenas[i] != NULL) 1809 arena_postfork_child(arenas[i]); 1810 } 1811 malloc_mutex_postfork_child(&arenas_lock); 1812 prof_postfork_child(); 1813 ctl_postfork_child(); 1814} 1815 1816/******************************************************************************/ 1817/* 1818 * The following functions are used for TLS allocation/deallocation in static 1819 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() 1820 * is that these avoid accessing TLS variables. 1821 */ 1822 1823static void * 1824a0alloc(size_t size, bool zero) 1825{ 1826 1827 if (malloc_init()) 1828 return (NULL); 1829 1830 if (size == 0) 1831 size = 1; 1832 1833 if (size <= arena_maxclass) 1834 return (arena_malloc(arenas[0], size, zero, false)); 1835 else 1836 return (huge_malloc(size, zero)); 1837} 1838 1839void * 1840a0malloc(size_t size) 1841{ 1842 1843 return (a0alloc(size, false)); 1844} 1845 1846void * 1847a0calloc(size_t num, size_t size) 1848{ 1849 1850 return (a0alloc(num * size, true)); 1851} 1852 1853void 1854a0free(void *ptr) 1855{ 1856 arena_chunk_t *chunk; 1857 1858 if (ptr == NULL) 1859 return; 1860 1861 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1862 if (chunk != ptr) 1863 arena_dalloc(chunk->arena, chunk, ptr, false); 1864 else 1865 huge_dalloc(ptr, true); 1866} 1867 1868/******************************************************************************/ 1869