jemalloc.c revision 9b0cbf0850b130a9b0a8c58bd10b2926b2083510
1#define JEMALLOC_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7malloc_tsd_data(, arenas, arena_t *, NULL) 8malloc_tsd_data(, thread_allocated, thread_allocated_t, 9 THREAD_ALLOCATED_INITIALIZER) 10 11/* Runtime configuration options. */ 12const char *je_malloc_conf; 13bool opt_abort = 14#ifdef JEMALLOC_DEBUG 15 true 16#else 17 false 18#endif 19 ; 20bool opt_junk = 21#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 22 true 23#else 24 false 25#endif 26 ; 27size_t opt_quarantine = ZU(0); 28bool opt_redzone = false; 29bool opt_utrace = false; 30bool opt_valgrind = false; 31bool opt_xmalloc = false; 32bool opt_zero = false; 33size_t opt_narenas = 0; 34 35unsigned ncpus; 36 37malloc_mutex_t arenas_lock; 38arena_t **arenas; 39unsigned narenas_total; 40unsigned narenas_auto; 41 42/* Set to true once the allocator has been initialized. */ 43static bool malloc_initialized = false; 44 45#ifdef JEMALLOC_THREADED_INIT 46/* Used to let the initializing thread recursively allocate. */ 47# define NO_INITIALIZER ((unsigned long)0) 48# define INITIALIZER pthread_self() 49# define IS_INITIALIZER (malloc_initializer == pthread_self()) 50static pthread_t malloc_initializer = NO_INITIALIZER; 51#else 52# define NO_INITIALIZER false 53# define INITIALIZER true 54# define IS_INITIALIZER malloc_initializer 55static bool malloc_initializer = NO_INITIALIZER; 56#endif 57 58/* Used to avoid initialization races. */ 59#ifdef _WIN32 60static malloc_mutex_t init_lock; 61 62JEMALLOC_ATTR(constructor) 63static void WINAPI 64_init_init_lock(void) 65{ 66 67 malloc_mutex_init(&init_lock); 68} 69 70#ifdef _MSC_VER 71# pragma section(".CRT$XCU", read) 72JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 73static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 74#endif 75 76#else 77static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 78#endif 79 80typedef struct { 81 void *p; /* Input pointer (as in realloc(p, s)). */ 82 size_t s; /* Request size. */ 83 void *r; /* Result pointer. */ 84} malloc_utrace_t; 85 86#ifdef JEMALLOC_UTRACE 87# define UTRACE(a, b, c) do { \ 88 if (opt_utrace) { \ 89 int utrace_serrno = errno; \ 90 malloc_utrace_t ut; \ 91 ut.p = (a); \ 92 ut.s = (b); \ 93 ut.r = (c); \ 94 utrace(&ut, sizeof(ut)); \ 95 errno = utrace_serrno; \ 96 } \ 97} while (0) 98#else 99# define UTRACE(a, b, c) 100#endif 101 102/******************************************************************************/ 103/* 104 * Function prototypes for static functions that are referenced prior to 105 * definition. 106 */ 107 108static bool malloc_init_hard(void); 109 110/******************************************************************************/ 111/* 112 * Begin miscellaneous support functions. 113 */ 114 115/* Create a new arena and insert it into the arenas array at index ind. */ 116arena_t * 117arenas_extend(unsigned ind) 118{ 119 arena_t *ret; 120 121 ret = (arena_t *)base_alloc(sizeof(arena_t)); 122 if (ret != NULL && arena_new(ret, ind) == false) { 123 arenas[ind] = ret; 124 return (ret); 125 } 126 /* Only reached if there is an OOM error. */ 127 128 /* 129 * OOM here is quite inconvenient to propagate, since dealing with it 130 * would require a check for failure in the fast path. Instead, punt 131 * by using arenas[0]. In practice, this is an extremely unlikely 132 * failure. 133 */ 134 malloc_write("<jemalloc>: Error initializing arena\n"); 135 if (opt_abort) 136 abort(); 137 138 return (arenas[0]); 139} 140 141/* Slow path, called only by choose_arena(). */ 142arena_t * 143choose_arena_hard(void) 144{ 145 arena_t *ret; 146 147 if (narenas_auto > 1) { 148 unsigned i, choose, first_null; 149 150 choose = 0; 151 first_null = narenas_auto; 152 malloc_mutex_lock(&arenas_lock); 153 assert(arenas[0] != NULL); 154 for (i = 1; i < narenas_auto; i++) { 155 if (arenas[i] != NULL) { 156 /* 157 * Choose the first arena that has the lowest 158 * number of threads assigned to it. 159 */ 160 if (arenas[i]->nthreads < 161 arenas[choose]->nthreads) 162 choose = i; 163 } else if (first_null == narenas_auto) { 164 /* 165 * Record the index of the first uninitialized 166 * arena, in case all extant arenas are in use. 167 * 168 * NB: It is possible for there to be 169 * discontinuities in terms of initialized 170 * versus uninitialized arenas, due to the 171 * "thread.arena" mallctl. 172 */ 173 first_null = i; 174 } 175 } 176 177 if (arenas[choose]->nthreads == 0 178 || first_null == narenas_auto) { 179 /* 180 * Use an unloaded arena, or the least loaded arena if 181 * all arenas are already initialized. 182 */ 183 ret = arenas[choose]; 184 } else { 185 /* Initialize a new arena. */ 186 ret = arenas_extend(first_null); 187 } 188 ret->nthreads++; 189 malloc_mutex_unlock(&arenas_lock); 190 } else { 191 ret = arenas[0]; 192 malloc_mutex_lock(&arenas_lock); 193 ret->nthreads++; 194 malloc_mutex_unlock(&arenas_lock); 195 } 196 197 arenas_tsd_set(&ret); 198 199 return (ret); 200} 201 202static void 203stats_print_atexit(void) 204{ 205 206 if (config_tcache && config_stats) { 207 unsigned narenas, i; 208 209 /* 210 * Merge stats from extant threads. This is racy, since 211 * individual threads do not lock when recording tcache stats 212 * events. As a consequence, the final stats may be slightly 213 * out of date by the time they are reported, if other threads 214 * continue to allocate. 215 */ 216 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 217 arena_t *arena = arenas[i]; 218 if (arena != NULL) { 219 tcache_t *tcache; 220 221 /* 222 * tcache_stats_merge() locks bins, so if any 223 * code is introduced that acquires both arena 224 * and bin locks in the opposite order, 225 * deadlocks may result. 226 */ 227 malloc_mutex_lock(&arena->lock); 228 ql_foreach(tcache, &arena->tcache_ql, link) { 229 tcache_stats_merge(tcache, arena); 230 } 231 malloc_mutex_unlock(&arena->lock); 232 } 233 } 234 } 235 je_malloc_stats_print(NULL, NULL, NULL); 236} 237 238/* 239 * End miscellaneous support functions. 240 */ 241/******************************************************************************/ 242/* 243 * Begin initialization functions. 244 */ 245 246static unsigned 247malloc_ncpus(void) 248{ 249 long result; 250 251#ifdef _WIN32 252 SYSTEM_INFO si; 253 GetSystemInfo(&si); 254 result = si.dwNumberOfProcessors; 255#else 256 result = sysconf(_SC_NPROCESSORS_ONLN); 257#endif 258 return ((result == -1) ? 1 : (unsigned)result); 259} 260 261void 262arenas_cleanup(void *arg) 263{ 264 arena_t *arena = *(arena_t **)arg; 265 266 malloc_mutex_lock(&arenas_lock); 267 arena->nthreads--; 268 malloc_mutex_unlock(&arenas_lock); 269} 270 271JEMALLOC_ALWAYS_INLINE_C void 272malloc_thread_init(void) 273{ 274 275 /* 276 * TSD initialization can't be safely done as a side effect of 277 * deallocation, because it is possible for a thread to do nothing but 278 * deallocate its TLS data via free(), in which case writing to TLS 279 * would cause write-after-free memory corruption. The quarantine 280 * facility *only* gets used as a side effect of deallocation, so make 281 * a best effort attempt at initializing its TSD by hooking all 282 * allocation events. 283 */ 284 if (config_fill && opt_quarantine) 285 quarantine_alloc_hook(); 286} 287 288JEMALLOC_ALWAYS_INLINE_C bool 289malloc_init(void) 290{ 291 292 if (malloc_initialized == false && malloc_init_hard()) 293 return (true); 294 malloc_thread_init(); 295 296 return (false); 297} 298 299static bool 300malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 301 char const **v_p, size_t *vlen_p) 302{ 303 bool accept; 304 const char *opts = *opts_p; 305 306 *k_p = opts; 307 308 for (accept = false; accept == false;) { 309 switch (*opts) { 310 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 311 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 312 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 313 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 314 case 'Y': case 'Z': 315 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 316 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 317 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 318 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 319 case 'y': case 'z': 320 case '0': case '1': case '2': case '3': case '4': case '5': 321 case '6': case '7': case '8': case '9': 322 case '_': 323 opts++; 324 break; 325 case ':': 326 opts++; 327 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 328 *v_p = opts; 329 accept = true; 330 break; 331 case '\0': 332 if (opts != *opts_p) { 333 malloc_write("<jemalloc>: Conf string ends " 334 "with key\n"); 335 } 336 return (true); 337 default: 338 malloc_write("<jemalloc>: Malformed conf string\n"); 339 return (true); 340 } 341 } 342 343 for (accept = false; accept == false;) { 344 switch (*opts) { 345 case ',': 346 opts++; 347 /* 348 * Look ahead one character here, because the next time 349 * this function is called, it will assume that end of 350 * input has been cleanly reached if no input remains, 351 * but we have optimistically already consumed the 352 * comma if one exists. 353 */ 354 if (*opts == '\0') { 355 malloc_write("<jemalloc>: Conf string ends " 356 "with comma\n"); 357 } 358 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 359 accept = true; 360 break; 361 case '\0': 362 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 363 accept = true; 364 break; 365 default: 366 opts++; 367 break; 368 } 369 } 370 371 *opts_p = opts; 372 return (false); 373} 374 375static void 376malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 377 size_t vlen) 378{ 379 380 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 381 (int)vlen, v); 382} 383 384static void 385malloc_conf_init(void) 386{ 387 unsigned i; 388 char buf[PATH_MAX + 1]; 389 const char *opts, *k, *v; 390 size_t klen, vlen; 391 392 /* 393 * Automatically configure valgrind before processing options. The 394 * valgrind option remains in jemalloc 3.x for compatibility reasons. 395 */ 396 if (config_valgrind) { 397 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 398 if (config_fill && opt_valgrind) { 399 opt_junk = false; 400 assert(opt_zero == false); 401 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 402 opt_redzone = true; 403 } 404 if (config_tcache && opt_valgrind) 405 opt_tcache = false; 406 } 407 408 for (i = 0; i < 3; i++) { 409 /* Get runtime configuration. */ 410 switch (i) { 411 case 0: 412 if (je_malloc_conf != NULL) { 413 /* 414 * Use options that were compiled into the 415 * program. 416 */ 417 opts = je_malloc_conf; 418 } else { 419 /* No configuration specified. */ 420 buf[0] = '\0'; 421 opts = buf; 422 } 423 break; 424 case 1: { 425 int linklen = 0; 426#ifndef _WIN32 427 int saved_errno = errno; 428 const char *linkname = 429# ifdef JEMALLOC_PREFIX 430 "/etc/"JEMALLOC_PREFIX"malloc.conf" 431# else 432 "/etc/malloc.conf" 433# endif 434 ; 435 436 /* 437 * Try to use the contents of the "/etc/malloc.conf" 438 * symbolic link's name. 439 */ 440 linklen = readlink(linkname, buf, sizeof(buf) - 1); 441 if (linklen == -1) { 442 /* No configuration specified. */ 443 linklen = 0; 444 /* restore errno */ 445 set_errno(saved_errno); 446 } 447#endif 448 buf[linklen] = '\0'; 449 opts = buf; 450 break; 451 } case 2: { 452 const char *envname = 453#ifdef JEMALLOC_PREFIX 454 JEMALLOC_CPREFIX"MALLOC_CONF" 455#else 456 "MALLOC_CONF" 457#endif 458 ; 459 460 if ((opts = getenv(envname)) != NULL) { 461 /* 462 * Do nothing; opts is already initialized to 463 * the value of the MALLOC_CONF environment 464 * variable. 465 */ 466 } else { 467 /* No configuration specified. */ 468 buf[0] = '\0'; 469 opts = buf; 470 } 471 break; 472 } default: 473 not_reached(); 474 buf[0] = '\0'; 475 opts = buf; 476 } 477 478 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 479 &vlen) == false) { 480#define CONF_HANDLE_BOOL(o, n) \ 481 if (sizeof(n)-1 == klen && strncmp(n, k, \ 482 klen) == 0) { \ 483 if (strncmp("true", v, vlen) == 0 && \ 484 vlen == sizeof("true")-1) \ 485 o = true; \ 486 else if (strncmp("false", v, vlen) == \ 487 0 && vlen == sizeof("false")-1) \ 488 o = false; \ 489 else { \ 490 malloc_conf_error( \ 491 "Invalid conf value", \ 492 k, klen, v, vlen); \ 493 } \ 494 continue; \ 495 } 496#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 497 if (sizeof(n)-1 == klen && strncmp(n, k, \ 498 klen) == 0) { \ 499 uintmax_t um; \ 500 char *end; \ 501 \ 502 set_errno(0); \ 503 um = malloc_strtoumax(v, &end, 0); \ 504 if (get_errno() != 0 || (uintptr_t)end -\ 505 (uintptr_t)v != vlen) { \ 506 malloc_conf_error( \ 507 "Invalid conf value", \ 508 k, klen, v, vlen); \ 509 } else if (clip) { \ 510 if (min != 0 && um < min) \ 511 o = min; \ 512 else if (um > max) \ 513 o = max; \ 514 else \ 515 o = um; \ 516 } else { \ 517 if ((min != 0 && um < min) || \ 518 um > max) { \ 519 malloc_conf_error( \ 520 "Out-of-range " \ 521 "conf value", \ 522 k, klen, v, vlen); \ 523 } else \ 524 o = um; \ 525 } \ 526 continue; \ 527 } 528#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 529 if (sizeof(n)-1 == klen && strncmp(n, k, \ 530 klen) == 0) { \ 531 long l; \ 532 char *end; \ 533 \ 534 set_errno(0); \ 535 l = strtol(v, &end, 0); \ 536 if (get_errno() != 0 || (uintptr_t)end -\ 537 (uintptr_t)v != vlen) { \ 538 malloc_conf_error( \ 539 "Invalid conf value", \ 540 k, klen, v, vlen); \ 541 } else if (l < (ssize_t)min || l > \ 542 (ssize_t)max) { \ 543 malloc_conf_error( \ 544 "Out-of-range conf value", \ 545 k, klen, v, vlen); \ 546 } else \ 547 o = l; \ 548 continue; \ 549 } 550#define CONF_HANDLE_CHAR_P(o, n, d) \ 551 if (sizeof(n)-1 == klen && strncmp(n, k, \ 552 klen) == 0) { \ 553 size_t cpylen = (vlen <= \ 554 sizeof(o)-1) ? vlen : \ 555 sizeof(o)-1; \ 556 strncpy(o, v, cpylen); \ 557 o[cpylen] = '\0'; \ 558 continue; \ 559 } 560 561 CONF_HANDLE_BOOL(opt_abort, "abort") 562 /* 563 * Chunks always require at least one header page, plus 564 * one data page in the absence of redzones, or three 565 * pages in the presence of redzones. In order to 566 * simplify options processing, fix the limit based on 567 * config_fill. 568 */ 569 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 570 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, 571 true) 572 if (strncmp("dss", k, klen) == 0) { 573 int i; 574 bool match = false; 575 for (i = 0; i < dss_prec_limit; i++) { 576 if (strncmp(dss_prec_names[i], v, vlen) 577 == 0) { 578 if (chunk_dss_prec_set(i)) { 579 malloc_conf_error( 580 "Error setting dss", 581 k, klen, v, vlen); 582 } else { 583 opt_dss = 584 dss_prec_names[i]; 585 match = true; 586 break; 587 } 588 } 589 } 590 if (match == false) { 591 malloc_conf_error("Invalid conf value", 592 k, klen, v, vlen); 593 } 594 continue; 595 } 596 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, 597 SIZE_T_MAX, false) 598 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 599 -1, (sizeof(size_t) << 3) - 1) 600 CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 601 if (config_fill) { 602 CONF_HANDLE_BOOL(opt_junk, "junk") 603 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 604 0, SIZE_T_MAX, false) 605 CONF_HANDLE_BOOL(opt_redzone, "redzone") 606 CONF_HANDLE_BOOL(opt_zero, "zero") 607 } 608 if (config_utrace) { 609 CONF_HANDLE_BOOL(opt_utrace, "utrace") 610 } 611 if (config_valgrind) { 612 CONF_HANDLE_BOOL(opt_valgrind, "valgrind") 613 } 614 if (config_xmalloc) { 615 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 616 } 617 if (config_tcache) { 618 CONF_HANDLE_BOOL(opt_tcache, "tcache") 619 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 620 "lg_tcache_max", -1, 621 (sizeof(size_t) << 3) - 1) 622 } 623 if (config_prof) { 624 CONF_HANDLE_BOOL(opt_prof, "prof") 625 CONF_HANDLE_CHAR_P(opt_prof_prefix, 626 "prof_prefix", "jeprof") 627 CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 628 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, 629 "lg_prof_sample", 0, 630 (sizeof(uint64_t) << 3) - 1) 631 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 632 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 633 "lg_prof_interval", -1, 634 (sizeof(uint64_t) << 3) - 1) 635 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 636 CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 637 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 638 } 639 malloc_conf_error("Invalid conf pair", k, klen, v, 640 vlen); 641#undef CONF_HANDLE_BOOL 642#undef CONF_HANDLE_SIZE_T 643#undef CONF_HANDLE_SSIZE_T 644#undef CONF_HANDLE_CHAR_P 645 } 646 } 647} 648 649static bool 650malloc_init_hard(void) 651{ 652 arena_t *init_arenas[1]; 653 654 malloc_mutex_lock(&init_lock); 655 if (malloc_initialized || IS_INITIALIZER) { 656 /* 657 * Another thread initialized the allocator before this one 658 * acquired init_lock, or this thread is the initializing 659 * thread, and it is recursively allocating. 660 */ 661 malloc_mutex_unlock(&init_lock); 662 return (false); 663 } 664#ifdef JEMALLOC_THREADED_INIT 665 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { 666 /* Busy-wait until the initializing thread completes. */ 667 do { 668 malloc_mutex_unlock(&init_lock); 669 CPU_SPINWAIT; 670 malloc_mutex_lock(&init_lock); 671 } while (malloc_initialized == false); 672 malloc_mutex_unlock(&init_lock); 673 return (false); 674 } 675#endif 676 malloc_initializer = INITIALIZER; 677 678 malloc_tsd_boot(); 679 if (config_prof) 680 prof_boot0(); 681 682 malloc_conf_init(); 683 684 if (opt_stats_print) { 685 /* Print statistics at exit. */ 686 if (atexit(stats_print_atexit) != 0) { 687 malloc_write("<jemalloc>: Error in atexit()\n"); 688 if (opt_abort) 689 abort(); 690 } 691 } 692 693 if (base_boot()) { 694 malloc_mutex_unlock(&init_lock); 695 return (true); 696 } 697 698 if (chunk_boot()) { 699 malloc_mutex_unlock(&init_lock); 700 return (true); 701 } 702 703 if (ctl_boot()) { 704 malloc_mutex_unlock(&init_lock); 705 return (true); 706 } 707 708 if (config_prof) 709 prof_boot1(); 710 711 arena_boot(); 712 713 if (config_tcache && tcache_boot0()) { 714 malloc_mutex_unlock(&init_lock); 715 return (true); 716 } 717 718 if (huge_boot()) { 719 malloc_mutex_unlock(&init_lock); 720 return (true); 721 } 722 723 if (malloc_mutex_init(&arenas_lock)) { 724 malloc_mutex_unlock(&init_lock); 725 return (true); 726 } 727 728 /* 729 * Create enough scaffolding to allow recursive allocation in 730 * malloc_ncpus(). 731 */ 732 narenas_total = narenas_auto = 1; 733 arenas = init_arenas; 734 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 735 736 /* 737 * Initialize one arena here. The rest are lazily created in 738 * choose_arena_hard(). 739 */ 740 arenas_extend(0); 741 if (arenas[0] == NULL) { 742 malloc_mutex_unlock(&init_lock); 743 return (true); 744 } 745 746 /* Initialize allocation counters before any allocations can occur. */ 747 if (config_stats && thread_allocated_tsd_boot()) { 748 malloc_mutex_unlock(&init_lock); 749 return (true); 750 } 751 752 if (arenas_tsd_boot()) { 753 malloc_mutex_unlock(&init_lock); 754 return (true); 755 } 756 757 if (config_tcache && tcache_boot1()) { 758 malloc_mutex_unlock(&init_lock); 759 return (true); 760 } 761 762 if (config_fill && quarantine_boot()) { 763 malloc_mutex_unlock(&init_lock); 764 return (true); 765 } 766 767 if (config_prof && prof_boot2()) { 768 malloc_mutex_unlock(&init_lock); 769 return (true); 770 } 771 772 malloc_mutex_unlock(&init_lock); 773 /**********************************************************************/ 774 /* Recursive allocation may follow. */ 775 776 ncpus = malloc_ncpus(); 777 778#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 779 && !defined(_WIN32)) 780 /* LinuxThreads's pthread_atfork() allocates. */ 781 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 782 jemalloc_postfork_child) != 0) { 783 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 784 if (opt_abort) 785 abort(); 786 } 787#endif 788 789 /* Done recursively allocating. */ 790 /**********************************************************************/ 791 malloc_mutex_lock(&init_lock); 792 793 if (mutex_boot()) { 794 malloc_mutex_unlock(&init_lock); 795 return (true); 796 } 797 798 if (opt_narenas == 0) { 799 /* 800 * For SMP systems, create more than one arena per CPU by 801 * default. 802 */ 803 if (ncpus > 1) 804 opt_narenas = ncpus << 2; 805 else 806 opt_narenas = 1; 807 } 808 narenas_auto = opt_narenas; 809 /* 810 * Make sure that the arenas array can be allocated. In practice, this 811 * limit is enough to allow the allocator to function, but the ctl 812 * machinery will fail to allocate memory at far lower limits. 813 */ 814 if (narenas_auto > chunksize / sizeof(arena_t *)) { 815 narenas_auto = chunksize / sizeof(arena_t *); 816 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 817 narenas_auto); 818 } 819 narenas_total = narenas_auto; 820 821 /* Allocate and initialize arenas. */ 822 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); 823 if (arenas == NULL) { 824 malloc_mutex_unlock(&init_lock); 825 return (true); 826 } 827 /* 828 * Zero the array. In practice, this should always be pre-zeroed, 829 * since it was just mmap()ed, but let's be sure. 830 */ 831 memset(arenas, 0, sizeof(arena_t *) * narenas_total); 832 /* Copy the pointer to the one arena that was already initialized. */ 833 arenas[0] = init_arenas[0]; 834 835 malloc_initialized = true; 836 malloc_mutex_unlock(&init_lock); 837 838 return (false); 839} 840 841/* 842 * End initialization functions. 843 */ 844/******************************************************************************/ 845/* 846 * Begin malloc(3)-compatible functions. 847 */ 848 849static void * 850imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) 851{ 852 void *p; 853 854 if (cnt == NULL) 855 return (NULL); 856 if (usize <= SMALL_MAXCLASS) { 857 p = imalloc(SMALL_MAXCLASS+1); 858 if (p == NULL) 859 return (NULL); 860 arena_prof_promoted(p, usize); 861 } else 862 p = imalloc(usize); 863 864 return (p); 865} 866 867JEMALLOC_ALWAYS_INLINE_C void * 868imalloc_prof(size_t usize, prof_thr_cnt_t *cnt) 869{ 870 void *p; 871 872 if ((uintptr_t)cnt != (uintptr_t)1U) 873 p = imalloc_prof_sample(usize, cnt); 874 else 875 p = imalloc(usize); 876 if (p == NULL) 877 return (NULL); 878 prof_malloc(p, usize, cnt); 879 880 return (p); 881} 882 883/* 884 * MALLOC_BODY() is a macro rather than a function because its contents are in 885 * the fast path, but inlining would cause reliability issues when determining 886 * how many frames to discard from heap profiling backtraces. 887 */ 888#define MALLOC_BODY(ret, size, usize) do { \ 889 if (malloc_init()) \ 890 ret = NULL; \ 891 else { \ 892 if (config_prof && opt_prof) { \ 893 prof_thr_cnt_t *cnt; \ 894 \ 895 usize = s2u(size); \ 896 /* \ 897 * Call PROF_ALLOC_PREP() here rather than in \ 898 * imalloc_prof() so that imalloc_prof() can be \ 899 * inlined without introducing uncertainty \ 900 * about the number of backtrace frames to \ 901 * ignore. imalloc_prof() is in the fast path \ 902 * when heap profiling is enabled, so inlining \ 903 * is critical to performance. (For \ 904 * consistency all callers of PROF_ALLOC_PREP() \ 905 * are structured similarly, even though e.g. \ 906 * realloc() isn't called enough for inlining \ 907 * to be critical.) \ 908 */ \ 909 PROF_ALLOC_PREP(1, usize, cnt); \ 910 ret = imalloc_prof(usize, cnt); \ 911 } else { \ 912 if (config_stats || (config_valgrind && \ 913 opt_valgrind)) \ 914 usize = s2u(size); \ 915 ret = imalloc(size); \ 916 } \ 917 } \ 918} while (0) 919 920void * 921je_malloc(size_t size) 922{ 923 void *ret; 924 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 925 926 if (size == 0) 927 size = 1; 928 929 MALLOC_BODY(ret, size, usize); 930 931 if (ret == NULL) { 932 if (config_xmalloc && opt_xmalloc) { 933 malloc_write("<jemalloc>: Error in malloc(): " 934 "out of memory\n"); 935 abort(); 936 } 937 set_errno(ENOMEM); 938 } 939 if (config_stats && ret != NULL) { 940 assert(usize == isalloc(ret, config_prof)); 941 thread_allocated_tsd_get()->allocated += usize; 942 } 943 UTRACE(0, size, ret); 944 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 945 return (ret); 946} 947 948static void * 949imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) 950{ 951 void *p; 952 953 if (cnt == NULL) 954 return (NULL); 955 if (usize <= SMALL_MAXCLASS) { 956 assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0); 957 p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment, 958 false); 959 if (p == NULL) 960 return (NULL); 961 arena_prof_promoted(p, usize); 962 } else 963 p = ipalloc(usize, alignment, false); 964 965 return (p); 966} 967 968JEMALLOC_ALWAYS_INLINE_C void * 969imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) 970{ 971 void *p; 972 973 if ((uintptr_t)cnt != (uintptr_t)1U) 974 p = imemalign_prof_sample(alignment, usize, cnt); 975 else 976 p = ipalloc(usize, alignment, false); 977 if (p == NULL) 978 return (NULL); 979 prof_malloc(p, usize, cnt); 980 981 return (p); 982} 983 984JEMALLOC_ATTR(nonnull(1)) 985#ifdef JEMALLOC_PROF 986/* 987 * Avoid any uncertainty as to how many backtrace frames to ignore in 988 * PROF_ALLOC_PREP(). 989 */ 990JEMALLOC_NOINLINE 991#endif 992static int 993imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 994{ 995 int ret; 996 size_t usize; 997 void *result; 998 999 assert(min_alignment != 0); 1000 1001 if (malloc_init()) { 1002 result = NULL; 1003 goto label_oom; 1004 } else { 1005 if (size == 0) 1006 size = 1; 1007 1008 /* Make sure that alignment is a large enough power of 2. */ 1009 if (((alignment - 1) & alignment) != 0 1010 || (alignment < min_alignment)) { 1011 if (config_xmalloc && opt_xmalloc) { 1012 malloc_write("<jemalloc>: Error allocating " 1013 "aligned memory: invalid alignment\n"); 1014 abort(); 1015 } 1016 result = NULL; 1017 ret = EINVAL; 1018 goto label_return; 1019 } 1020 1021 usize = sa2u(size, alignment); 1022 if (usize == 0) { 1023 result = NULL; 1024 goto label_oom; 1025 } 1026 1027 if (config_prof && opt_prof) { 1028 prof_thr_cnt_t *cnt; 1029 1030 PROF_ALLOC_PREP(2, usize, cnt); 1031 result = imemalign_prof(alignment, usize, cnt); 1032 } else 1033 result = ipalloc(usize, alignment, false); 1034 if (result == NULL) 1035 goto label_oom; 1036 } 1037 1038 *memptr = result; 1039 ret = 0; 1040label_return: 1041 if (config_stats && result != NULL) { 1042 assert(usize == isalloc(result, config_prof)); 1043 thread_allocated_tsd_get()->allocated += usize; 1044 } 1045 UTRACE(0, size, result); 1046 return (ret); 1047label_oom: 1048 assert(result == NULL); 1049 if (config_xmalloc && opt_xmalloc) { 1050 malloc_write("<jemalloc>: Error allocating aligned memory: " 1051 "out of memory\n"); 1052 abort(); 1053 } 1054 ret = ENOMEM; 1055 goto label_return; 1056} 1057 1058int 1059je_posix_memalign(void **memptr, size_t alignment, size_t size) 1060{ 1061 int ret = imemalign(memptr, alignment, size, sizeof(void *)); 1062 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 1063 config_prof), false); 1064 return (ret); 1065} 1066 1067void * 1068je_aligned_alloc(size_t alignment, size_t size) 1069{ 1070 void *ret; 1071 int err; 1072 1073 if ((err = imemalign(&ret, alignment, size, 1)) != 0) { 1074 ret = NULL; 1075 set_errno(err); 1076 } 1077 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 1078 false); 1079 return (ret); 1080} 1081 1082static void * 1083icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) 1084{ 1085 void *p; 1086 1087 if (cnt == NULL) 1088 return (NULL); 1089 if (usize <= SMALL_MAXCLASS) { 1090 p = icalloc(SMALL_MAXCLASS+1); 1091 if (p == NULL) 1092 return (NULL); 1093 arena_prof_promoted(p, usize); 1094 } else 1095 p = icalloc(usize); 1096 1097 return (p); 1098} 1099 1100JEMALLOC_ALWAYS_INLINE_C void * 1101icalloc_prof(size_t usize, prof_thr_cnt_t *cnt) 1102{ 1103 void *p; 1104 1105 if ((uintptr_t)cnt != (uintptr_t)1U) 1106 p = icalloc_prof_sample(usize, cnt); 1107 else 1108 p = icalloc(usize); 1109 if (p == NULL) 1110 return (NULL); 1111 prof_malloc(p, usize, cnt); 1112 1113 return (p); 1114} 1115 1116void * 1117je_calloc(size_t num, size_t size) 1118{ 1119 void *ret; 1120 size_t num_size; 1121 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1122 1123 if (malloc_init()) { 1124 num_size = 0; 1125 ret = NULL; 1126 goto label_return; 1127 } 1128 1129 num_size = num * size; 1130 if (num_size == 0) { 1131 if (num == 0 || size == 0) 1132 num_size = 1; 1133 else { 1134 ret = NULL; 1135 goto label_return; 1136 } 1137 /* 1138 * Try to avoid division here. We know that it isn't possible to 1139 * overflow during multiplication if neither operand uses any of the 1140 * most significant half of the bits in a size_t. 1141 */ 1142 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 1143 && (num_size / size != num)) { 1144 /* size_t overflow. */ 1145 ret = NULL; 1146 goto label_return; 1147 } 1148 1149 if (config_prof && opt_prof) { 1150 prof_thr_cnt_t *cnt; 1151 1152 usize = s2u(num_size); 1153 PROF_ALLOC_PREP(1, usize, cnt); 1154 ret = icalloc_prof(usize, cnt); 1155 } else { 1156 if (config_stats || (config_valgrind && opt_valgrind)) 1157 usize = s2u(num_size); 1158 ret = icalloc(num_size); 1159 } 1160 1161label_return: 1162 if (ret == NULL) { 1163 if (config_xmalloc && opt_xmalloc) { 1164 malloc_write("<jemalloc>: Error in calloc(): out of " 1165 "memory\n"); 1166 abort(); 1167 } 1168 set_errno(ENOMEM); 1169 } 1170 if (config_stats && ret != NULL) { 1171 assert(usize == isalloc(ret, config_prof)); 1172 thread_allocated_tsd_get()->allocated += usize; 1173 } 1174 UTRACE(0, num_size, ret); 1175 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1176 return (ret); 1177} 1178 1179static void * 1180irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt) 1181{ 1182 void *p; 1183 1184 if (cnt == NULL) 1185 return (NULL); 1186 if (usize <= SMALL_MAXCLASS) { 1187 p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false); 1188 if (p == NULL) 1189 return (NULL); 1190 arena_prof_promoted(p, usize); 1191 } else 1192 p = iralloc(oldptr, usize, 0, 0, false); 1193 1194 return (p); 1195} 1196 1197JEMALLOC_ALWAYS_INLINE_C void * 1198irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt) 1199{ 1200 void *p; 1201 prof_ctx_t *old_ctx; 1202 1203 old_ctx = prof_ctx_get(oldptr); 1204 if ((uintptr_t)cnt != (uintptr_t)1U) 1205 p = irealloc_prof_sample(oldptr, usize, cnt); 1206 else 1207 p = iralloc(oldptr, usize, 0, 0, false); 1208 if (p == NULL) 1209 return (NULL); 1210 prof_realloc(p, usize, cnt, old_usize, old_ctx); 1211 1212 return (p); 1213} 1214 1215JEMALLOC_INLINE_C void 1216ifree(void *ptr) 1217{ 1218 size_t usize; 1219 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1220 1221 assert(ptr != NULL); 1222 assert(malloc_initialized || IS_INITIALIZER); 1223 1224 if (config_prof && opt_prof) { 1225 usize = isalloc(ptr, config_prof); 1226 prof_free(ptr, usize); 1227 } else if (config_stats || config_valgrind) 1228 usize = isalloc(ptr, config_prof); 1229 if (config_stats) 1230 thread_allocated_tsd_get()->deallocated += usize; 1231 if (config_valgrind && opt_valgrind) 1232 rzsize = p2rz(ptr); 1233 iqalloc(ptr); 1234 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1235} 1236 1237void * 1238je_realloc(void *ptr, size_t size) 1239{ 1240 void *ret; 1241 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1242 size_t old_usize = 0; 1243 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1244 1245 if (size == 0) { 1246 if (ptr != NULL) { 1247 /* realloc(ptr, 0) is equivalent to free(ptr). */ 1248 UTRACE(ptr, 0, 0); 1249 ifree(ptr); 1250 return (NULL); 1251 } 1252 size = 1; 1253 } 1254 1255 if (ptr != NULL) { 1256 assert(malloc_initialized || IS_INITIALIZER); 1257 malloc_thread_init(); 1258 1259 if ((config_prof && opt_prof) || config_stats || 1260 (config_valgrind && opt_valgrind)) 1261 old_usize = isalloc(ptr, config_prof); 1262 if (config_valgrind && opt_valgrind) 1263 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); 1264 1265 if (config_prof && opt_prof) { 1266 prof_thr_cnt_t *cnt; 1267 1268 usize = s2u(size); 1269 PROF_ALLOC_PREP(1, usize, cnt); 1270 ret = irealloc_prof(ptr, old_usize, usize, cnt); 1271 } else { 1272 if (config_stats || (config_valgrind && opt_valgrind)) 1273 usize = s2u(size); 1274 ret = iralloc(ptr, size, 0, 0, false); 1275 } 1276 } else { 1277 /* realloc(NULL, size) is equivalent to malloc(size). */ 1278 MALLOC_BODY(ret, size, usize); 1279 } 1280 1281 if (ret == NULL) { 1282 if (config_xmalloc && opt_xmalloc) { 1283 malloc_write("<jemalloc>: Error in realloc(): " 1284 "out of memory\n"); 1285 abort(); 1286 } 1287 set_errno(ENOMEM); 1288 } 1289 if (config_stats && ret != NULL) { 1290 thread_allocated_t *ta; 1291 assert(usize == isalloc(ret, config_prof)); 1292 ta = thread_allocated_tsd_get(); 1293 ta->allocated += usize; 1294 ta->deallocated += old_usize; 1295 } 1296 UTRACE(ptr, size, ret); 1297 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize, 1298 false); 1299 return (ret); 1300} 1301 1302void 1303je_free(void *ptr) 1304{ 1305 1306 UTRACE(ptr, 0, 0); 1307 if (ptr != NULL) 1308 ifree(ptr); 1309} 1310 1311/* 1312 * End malloc(3)-compatible functions. 1313 */ 1314/******************************************************************************/ 1315/* 1316 * Begin non-standard override functions. 1317 */ 1318 1319#ifdef JEMALLOC_OVERRIDE_MEMALIGN 1320void * 1321je_memalign(size_t alignment, size_t size) 1322{ 1323 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1324 imemalign(&ret, alignment, size, 1); 1325 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1326 return (ret); 1327} 1328#endif 1329 1330#ifdef JEMALLOC_OVERRIDE_VALLOC 1331void * 1332je_valloc(size_t size) 1333{ 1334 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1335 imemalign(&ret, PAGE, size, 1); 1336 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1337 return (ret); 1338} 1339#endif 1340 1341/* 1342 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1343 * #define je_malloc malloc 1344 */ 1345#define malloc_is_malloc 1 1346#define is_malloc_(a) malloc_is_ ## a 1347#define is_malloc(a) is_malloc_(a) 1348 1349#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1350/* 1351 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1352 * to inconsistently reference libc's malloc(3)-compatible functions 1353 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1354 * 1355 * These definitions interpose hooks in glibc. The functions are actually 1356 * passed an extra argument for the caller return address, which will be 1357 * ignored. 1358 */ 1359JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; 1360JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; 1361JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; 1362JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = 1363 je_memalign; 1364#endif 1365 1366/* 1367 * End non-standard override functions. 1368 */ 1369/******************************************************************************/ 1370/* 1371 * Begin non-standard functions. 1372 */ 1373 1374JEMALLOC_ALWAYS_INLINE_C void * 1375imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 1376 arena_t *arena) 1377{ 1378 1379 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, 1380 alignment))); 1381 1382 if (alignment != 0) 1383 return (ipalloct(usize, alignment, zero, try_tcache, arena)); 1384 else if (zero) 1385 return (icalloct(usize, try_tcache, arena)); 1386 else 1387 return (imalloct(usize, try_tcache, arena)); 1388} 1389 1390static void * 1391imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache, 1392 arena_t *arena, prof_thr_cnt_t *cnt) 1393{ 1394 void *p; 1395 1396 if (cnt == NULL) 1397 return (NULL); 1398 if (usize <= SMALL_MAXCLASS) { 1399 size_t usize_promoted = (alignment == 0) ? 1400 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); 1401 assert(usize_promoted != 0); 1402 p = imallocx(usize_promoted, alignment, zero, try_tcache, 1403 arena); 1404 if (p == NULL) 1405 return (NULL); 1406 arena_prof_promoted(p, usize); 1407 } else 1408 p = imallocx(usize, alignment, zero, try_tcache, arena); 1409 1410 return (p); 1411} 1412 1413JEMALLOC_ALWAYS_INLINE_C void * 1414imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache, 1415 arena_t *arena, prof_thr_cnt_t *cnt) 1416{ 1417 void *p; 1418 1419 if ((uintptr_t)cnt != (uintptr_t)1U) { 1420 p = imallocx_prof_sample(usize, alignment, zero, try_tcache, 1421 arena, cnt); 1422 } else 1423 p = imallocx(usize, alignment, zero, try_tcache, arena); 1424 if (p == NULL) 1425 return (NULL); 1426 prof_malloc(p, usize, cnt); 1427 1428 return (p); 1429} 1430 1431void * 1432je_mallocx(size_t size, int flags) 1433{ 1434 void *p; 1435 size_t usize; 1436 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1437 & (SIZE_T_MAX-1)); 1438 bool zero = flags & MALLOCX_ZERO; 1439 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1440 arena_t *arena; 1441 bool try_tcache; 1442 1443 assert(size != 0); 1444 1445 if (malloc_init()) 1446 goto label_oom; 1447 1448 if (arena_ind != UINT_MAX) { 1449 arena = arenas[arena_ind]; 1450 try_tcache = false; 1451 } else { 1452 arena = NULL; 1453 try_tcache = true; 1454 } 1455 1456 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1457 assert(usize != 0); 1458 1459 if (config_prof && opt_prof) { 1460 prof_thr_cnt_t *cnt; 1461 1462 PROF_ALLOC_PREP(1, usize, cnt); 1463 p = imallocx_prof(usize, alignment, zero, try_tcache, arena, 1464 cnt); 1465 } else 1466 p = imallocx(usize, alignment, zero, try_tcache, arena); 1467 if (p == NULL) 1468 goto label_oom; 1469 1470 if (config_stats) { 1471 assert(usize == isalloc(p, config_prof)); 1472 thread_allocated_tsd_get()->allocated += usize; 1473 } 1474 UTRACE(0, size, p); 1475 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); 1476 return (p); 1477label_oom: 1478 if (config_xmalloc && opt_xmalloc) { 1479 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); 1480 abort(); 1481 } 1482 UTRACE(0, size, 0); 1483 return (NULL); 1484} 1485 1486static void * 1487irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize, 1488 bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena, 1489 prof_thr_cnt_t *cnt) 1490{ 1491 void *p; 1492 1493 if (cnt == NULL) 1494 return (NULL); 1495 if (usize <= SMALL_MAXCLASS) { 1496 p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1497 size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero, 1498 try_tcache_alloc, try_tcache_dalloc, arena); 1499 if (p == NULL) 1500 return (NULL); 1501 arena_prof_promoted(p, usize); 1502 } else { 1503 p = iralloct(oldptr, size, 0, alignment, zero, 1504 try_tcache_alloc, try_tcache_dalloc, arena); 1505 } 1506 1507 return (p); 1508} 1509 1510JEMALLOC_ALWAYS_INLINE_C void * 1511irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, 1512 size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, 1513 arena_t *arena, prof_thr_cnt_t *cnt) 1514{ 1515 void *p; 1516 prof_ctx_t *old_ctx; 1517 1518 old_ctx = prof_ctx_get(oldptr); 1519 if ((uintptr_t)cnt != (uintptr_t)1U) 1520 p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero, 1521 try_tcache_alloc, try_tcache_dalloc, arena, cnt); 1522 else { 1523 p = iralloct(oldptr, size, 0, alignment, zero, 1524 try_tcache_alloc, try_tcache_dalloc, arena); 1525 } 1526 if (p == NULL) 1527 return (NULL); 1528 1529 if (p == oldptr && alignment != 0) { 1530 /* 1531 * The allocation did not move, so it is possible that the size 1532 * class is smaller than would guarantee the requested 1533 * alignment, and that the alignment constraint was 1534 * serendipitously satisfied. Additionally, old_usize may not 1535 * be the same as the current usize because of in-place large 1536 * reallocation. Therefore, query the actual value of usize. 1537 */ 1538 *usize = isalloc(p, config_prof); 1539 } 1540 prof_realloc(p, *usize, cnt, old_usize, old_ctx); 1541 1542 return (p); 1543} 1544 1545void * 1546je_rallocx(void *ptr, size_t size, int flags) 1547{ 1548 void *p; 1549 size_t usize, old_usize; 1550 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1551 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1552 & (SIZE_T_MAX-1)); 1553 bool zero = flags & MALLOCX_ZERO; 1554 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1555 bool try_tcache_alloc, try_tcache_dalloc; 1556 arena_t *arena; 1557 1558 assert(ptr != NULL); 1559 assert(size != 0); 1560 assert(malloc_initialized || IS_INITIALIZER); 1561 malloc_thread_init(); 1562 1563 if (arena_ind != UINT_MAX) { 1564 arena_chunk_t *chunk; 1565 try_tcache_alloc = false; 1566 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1567 try_tcache_dalloc = (chunk == ptr || chunk->arena != 1568 arenas[arena_ind]); 1569 arena = arenas[arena_ind]; 1570 } else { 1571 try_tcache_alloc = true; 1572 try_tcache_dalloc = true; 1573 arena = NULL; 1574 } 1575 1576 if ((config_prof && opt_prof) || config_stats || 1577 (config_valgrind && opt_valgrind)) 1578 old_usize = isalloc(ptr, config_prof); 1579 if (config_valgrind && opt_valgrind) 1580 old_rzsize = u2rz(old_usize); 1581 1582 if (config_prof && opt_prof) { 1583 prof_thr_cnt_t *cnt; 1584 1585 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1586 assert(usize != 0); 1587 PROF_ALLOC_PREP(1, usize, cnt); 1588 p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero, 1589 try_tcache_alloc, try_tcache_dalloc, arena, cnt); 1590 if (p == NULL) 1591 goto label_oom; 1592 } else { 1593 p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc, 1594 try_tcache_dalloc, arena); 1595 if (p == NULL) 1596 goto label_oom; 1597 if (config_stats || (config_valgrind && opt_valgrind)) 1598 usize = isalloc(p, config_prof); 1599 } 1600 1601 if (config_stats) { 1602 thread_allocated_t *ta; 1603 ta = thread_allocated_tsd_get(); 1604 ta->allocated += usize; 1605 ta->deallocated += old_usize; 1606 } 1607 UTRACE(ptr, size, p); 1608 JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero); 1609 return (p); 1610label_oom: 1611 if (config_xmalloc && opt_xmalloc) { 1612 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 1613 abort(); 1614 } 1615 UTRACE(ptr, size, 0); 1616 return (NULL); 1617} 1618 1619JEMALLOC_ALWAYS_INLINE_C size_t 1620ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, 1621 size_t alignment, bool zero, arena_t *arena) 1622{ 1623 size_t usize; 1624 1625 if (ixalloc(ptr, size, extra, alignment, zero)) 1626 return (old_usize); 1627 usize = isalloc(ptr, config_prof); 1628 1629 return (usize); 1630} 1631 1632static size_t 1633ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, 1634 size_t alignment, size_t max_usize, bool zero, arena_t *arena, 1635 prof_thr_cnt_t *cnt) 1636{ 1637 size_t usize; 1638 1639 if (cnt == NULL) 1640 return (old_usize); 1641 /* Use minimum usize to determine whether promotion may happen. */ 1642 if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <= 1643 SMALL_MAXCLASS) { 1644 if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1645 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 1646 alignment, zero)) 1647 return (old_usize); 1648 usize = isalloc(ptr, config_prof); 1649 if (max_usize < PAGE) 1650 arena_prof_promoted(ptr, usize); 1651 } else { 1652 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, 1653 zero, arena); 1654 } 1655 1656 return (usize); 1657} 1658 1659JEMALLOC_ALWAYS_INLINE_C size_t 1660ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra, 1661 size_t alignment, size_t max_usize, bool zero, arena_t *arena, 1662 prof_thr_cnt_t *cnt) 1663{ 1664 size_t usize; 1665 prof_ctx_t *old_ctx; 1666 1667 old_ctx = prof_ctx_get(ptr); 1668 if ((uintptr_t)cnt != (uintptr_t)1U) { 1669 usize = ixallocx_prof_sample(ptr, old_usize, size, extra, 1670 alignment, zero, max_usize, arena, cnt); 1671 } else { 1672 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, 1673 zero, arena); 1674 } 1675 if (usize == old_usize) 1676 return (usize); 1677 prof_realloc(ptr, usize, cnt, old_usize, old_ctx); 1678 1679 return (usize); 1680} 1681 1682size_t 1683je_xallocx(void *ptr, size_t size, size_t extra, int flags) 1684{ 1685 size_t usize, old_usize; 1686 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1687 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1688 & (SIZE_T_MAX-1)); 1689 bool zero = flags & MALLOCX_ZERO; 1690 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1691 arena_t *arena; 1692 1693 assert(ptr != NULL); 1694 assert(size != 0); 1695 assert(SIZE_T_MAX - size >= extra); 1696 assert(malloc_initialized || IS_INITIALIZER); 1697 malloc_thread_init(); 1698 1699 if (arena_ind != UINT_MAX) 1700 arena = arenas[arena_ind]; 1701 else 1702 arena = NULL; 1703 1704 old_usize = isalloc(ptr, config_prof); 1705 if (config_valgrind && opt_valgrind) 1706 old_rzsize = u2rz(old_usize); 1707 1708 if (config_prof && opt_prof) { 1709 prof_thr_cnt_t *cnt; 1710 /* 1711 * usize isn't knowable before ixalloc() returns when extra is 1712 * non-zero. Therefore, compute its maximum possible value and 1713 * use that in PROF_ALLOC_PREP() to decide whether to capture a 1714 * backtrace. prof_realloc() will use the actual usize to 1715 * decide whether to sample. 1716 */ 1717 size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1718 sa2u(size+extra, alignment); 1719 PROF_ALLOC_PREP(1, max_usize, cnt); 1720 usize = ixallocx_prof(ptr, old_usize, size, extra, alignment, 1721 max_usize, zero, arena, cnt); 1722 } else { 1723 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, 1724 zero, arena); 1725 } 1726 if (usize == old_usize) 1727 goto label_not_resized; 1728 1729 if (config_stats) { 1730 thread_allocated_t *ta; 1731 ta = thread_allocated_tsd_get(); 1732 ta->allocated += usize; 1733 ta->deallocated += old_usize; 1734 } 1735 JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero); 1736label_not_resized: 1737 UTRACE(ptr, size, ptr); 1738 return (usize); 1739} 1740 1741size_t 1742je_sallocx(const void *ptr, int flags) 1743{ 1744 size_t usize; 1745 1746 assert(malloc_initialized || IS_INITIALIZER); 1747 malloc_thread_init(); 1748 1749 if (config_ivsalloc) 1750 usize = ivsalloc(ptr, config_prof); 1751 else { 1752 assert(ptr != NULL); 1753 usize = isalloc(ptr, config_prof); 1754 } 1755 1756 return (usize); 1757} 1758 1759void 1760je_dallocx(void *ptr, int flags) 1761{ 1762 size_t usize; 1763 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1764 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1765 bool try_tcache; 1766 1767 assert(ptr != NULL); 1768 assert(malloc_initialized || IS_INITIALIZER); 1769 1770 if (arena_ind != UINT_MAX) { 1771 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1772 try_tcache = (chunk == ptr || chunk->arena != 1773 arenas[arena_ind]); 1774 } else 1775 try_tcache = true; 1776 1777 UTRACE(ptr, 0, 0); 1778 if (config_stats || config_valgrind) 1779 usize = isalloc(ptr, config_prof); 1780 if (config_prof && opt_prof) { 1781 if (config_stats == false && config_valgrind == false) 1782 usize = isalloc(ptr, config_prof); 1783 prof_free(ptr, usize); 1784 } 1785 if (config_stats) 1786 thread_allocated_tsd_get()->deallocated += usize; 1787 if (config_valgrind && opt_valgrind) 1788 rzsize = p2rz(ptr); 1789 iqalloct(ptr, try_tcache); 1790 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1791} 1792 1793size_t 1794je_nallocx(size_t size, int flags) 1795{ 1796 size_t usize; 1797 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1798 & (SIZE_T_MAX-1)); 1799 1800 assert(size != 0); 1801 1802 if (malloc_init()) 1803 return (0); 1804 1805 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1806 assert(usize != 0); 1807 return (usize); 1808} 1809 1810int 1811je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1812 size_t newlen) 1813{ 1814 1815 if (malloc_init()) 1816 return (EAGAIN); 1817 1818 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1819} 1820 1821int 1822je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1823{ 1824 1825 if (malloc_init()) 1826 return (EAGAIN); 1827 1828 return (ctl_nametomib(name, mibp, miblenp)); 1829} 1830 1831int 1832je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1833 void *newp, size_t newlen) 1834{ 1835 1836 if (malloc_init()) 1837 return (EAGAIN); 1838 1839 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1840} 1841 1842void 1843je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1844 const char *opts) 1845{ 1846 1847 stats_print(write_cb, cbopaque, opts); 1848} 1849 1850size_t 1851je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 1852{ 1853 size_t ret; 1854 1855 assert(malloc_initialized || IS_INITIALIZER); 1856 malloc_thread_init(); 1857 1858 if (config_ivsalloc) 1859 ret = ivsalloc(ptr, config_prof); 1860 else 1861 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; 1862 1863 return (ret); 1864} 1865 1866/* 1867 * End non-standard functions. 1868 */ 1869/******************************************************************************/ 1870/* 1871 * Begin experimental functions. 1872 */ 1873#ifdef JEMALLOC_EXPERIMENTAL 1874 1875int 1876je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 1877{ 1878 void *p; 1879 1880 assert(ptr != NULL); 1881 1882 p = je_mallocx(size, flags); 1883 if (p == NULL) 1884 return (ALLOCM_ERR_OOM); 1885 if (rsize != NULL) 1886 *rsize = isalloc(p, config_prof); 1887 *ptr = p; 1888 return (ALLOCM_SUCCESS); 1889} 1890 1891int 1892je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 1893{ 1894 int ret; 1895 bool no_move = flags & ALLOCM_NO_MOVE; 1896 1897 assert(ptr != NULL); 1898 assert(*ptr != NULL); 1899 assert(size != 0); 1900 assert(SIZE_T_MAX - size >= extra); 1901 1902 if (no_move) { 1903 size_t usize = je_xallocx(*ptr, size, extra, flags); 1904 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 1905 if (rsize != NULL) 1906 *rsize = usize; 1907 } else { 1908 void *p = je_rallocx(*ptr, size+extra, flags); 1909 if (p != NULL) { 1910 *ptr = p; 1911 ret = ALLOCM_SUCCESS; 1912 } else 1913 ret = ALLOCM_ERR_OOM; 1914 if (rsize != NULL) 1915 *rsize = isalloc(*ptr, config_prof); 1916 } 1917 return (ret); 1918} 1919 1920int 1921je_sallocm(const void *ptr, size_t *rsize, int flags) 1922{ 1923 1924 assert(rsize != NULL); 1925 *rsize = je_sallocx(ptr, flags); 1926 return (ALLOCM_SUCCESS); 1927} 1928 1929int 1930je_dallocm(void *ptr, int flags) 1931{ 1932 1933 je_dallocx(ptr, flags); 1934 return (ALLOCM_SUCCESS); 1935} 1936 1937int 1938je_nallocm(size_t *rsize, size_t size, int flags) 1939{ 1940 size_t usize; 1941 1942 usize = je_nallocx(size, flags); 1943 if (usize == 0) 1944 return (ALLOCM_ERR_OOM); 1945 if (rsize != NULL) 1946 *rsize = usize; 1947 return (ALLOCM_SUCCESS); 1948} 1949 1950#endif 1951/* 1952 * End experimental functions. 1953 */ 1954/******************************************************************************/ 1955/* 1956 * The following functions are used by threading libraries for protection of 1957 * malloc during fork(). 1958 */ 1959 1960/* 1961 * If an application creates a thread before doing any allocation in the main 1962 * thread, then calls fork(2) in the main thread followed by memory allocation 1963 * in the child process, a race can occur that results in deadlock within the 1964 * child: the main thread may have forked while the created thread had 1965 * partially initialized the allocator. Ordinarily jemalloc prevents 1966 * fork/malloc races via the following functions it registers during 1967 * initialization using pthread_atfork(), but of course that does no good if 1968 * the allocator isn't fully initialized at fork time. The following library 1969 * constructor is a partial solution to this problem. It may still possible to 1970 * trigger the deadlock described above, but doing so would involve forking via 1971 * a library constructor that runs before jemalloc's runs. 1972 */ 1973JEMALLOC_ATTR(constructor) 1974static void 1975jemalloc_constructor(void) 1976{ 1977 1978 malloc_init(); 1979} 1980 1981#ifndef JEMALLOC_MUTEX_INIT_CB 1982void 1983jemalloc_prefork(void) 1984#else 1985JEMALLOC_EXPORT void 1986_malloc_prefork(void) 1987#endif 1988{ 1989 unsigned i; 1990 1991#ifdef JEMALLOC_MUTEX_INIT_CB 1992 if (malloc_initialized == false) 1993 return; 1994#endif 1995 assert(malloc_initialized); 1996 1997 /* Acquire all mutexes in a safe order. */ 1998 ctl_prefork(); 1999 prof_prefork(); 2000 malloc_mutex_prefork(&arenas_lock); 2001 for (i = 0; i < narenas_total; i++) { 2002 if (arenas[i] != NULL) 2003 arena_prefork(arenas[i]); 2004 } 2005 chunk_prefork(); 2006 base_prefork(); 2007 huge_prefork(); 2008} 2009 2010#ifndef JEMALLOC_MUTEX_INIT_CB 2011void 2012jemalloc_postfork_parent(void) 2013#else 2014JEMALLOC_EXPORT void 2015_malloc_postfork(void) 2016#endif 2017{ 2018 unsigned i; 2019 2020#ifdef JEMALLOC_MUTEX_INIT_CB 2021 if (malloc_initialized == false) 2022 return; 2023#endif 2024 assert(malloc_initialized); 2025 2026 /* Release all mutexes, now that fork() has completed. */ 2027 huge_postfork_parent(); 2028 base_postfork_parent(); 2029 chunk_postfork_parent(); 2030 for (i = 0; i < narenas_total; i++) { 2031 if (arenas[i] != NULL) 2032 arena_postfork_parent(arenas[i]); 2033 } 2034 malloc_mutex_postfork_parent(&arenas_lock); 2035 prof_postfork_parent(); 2036 ctl_postfork_parent(); 2037} 2038 2039void 2040jemalloc_postfork_child(void) 2041{ 2042 unsigned i; 2043 2044 assert(malloc_initialized); 2045 2046 /* Release all mutexes, now that fork() has completed. */ 2047 huge_postfork_child(); 2048 base_postfork_child(); 2049 chunk_postfork_child(); 2050 for (i = 0; i < narenas_total; i++) { 2051 if (arenas[i] != NULL) 2052 arena_postfork_child(arenas[i]); 2053 } 2054 malloc_mutex_postfork_child(&arenas_lock); 2055 prof_postfork_child(); 2056 ctl_postfork_child(); 2057} 2058 2059/******************************************************************************/ 2060/* 2061 * The following functions are used for TLS allocation/deallocation in static 2062 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() 2063 * is that these avoid accessing TLS variables. 2064 */ 2065 2066static void * 2067a0alloc(size_t size, bool zero) 2068{ 2069 2070 if (malloc_init()) 2071 return (NULL); 2072 2073 if (size == 0) 2074 size = 1; 2075 2076 if (size <= arena_maxclass) 2077 return (arena_malloc(arenas[0], size, zero, false)); 2078 else 2079 return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0]))); 2080} 2081 2082void * 2083a0malloc(size_t size) 2084{ 2085 2086 return (a0alloc(size, false)); 2087} 2088 2089void * 2090a0calloc(size_t num, size_t size) 2091{ 2092 2093 return (a0alloc(num * size, true)); 2094} 2095 2096void 2097a0free(void *ptr) 2098{ 2099 arena_chunk_t *chunk; 2100 2101 if (ptr == NULL) 2102 return; 2103 2104 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2105 if (chunk != ptr) 2106 arena_dalloc(chunk, ptr, false); 2107 else 2108 huge_dalloc(ptr, true); 2109} 2110 2111/******************************************************************************/ 2112