jemalloc.c revision 94ed6812bc04a6171d1a801f2740355f458d5c9c
1#define JEMALLOC_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7malloc_tsd_data(, arenas, arena_t *, NULL) 8malloc_tsd_data(, thread_allocated, thread_allocated_t, 9 THREAD_ALLOCATED_INITIALIZER) 10 11/* Runtime configuration options. */ 12const char *je_malloc_conf; 13bool opt_abort = 14#ifdef JEMALLOC_DEBUG 15 true 16#else 17 false 18#endif 19 ; 20bool opt_junk = 21#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 22 true 23#else 24 false 25#endif 26 ; 27size_t opt_quarantine = ZU(0); 28bool opt_redzone = false; 29bool opt_utrace = false; 30bool opt_xmalloc = false; 31bool opt_zero = false; 32size_t opt_narenas = 0; 33 34/* Initialized to true if the process is running inside Valgrind. */ 35bool in_valgrind; 36 37unsigned ncpus; 38 39malloc_mutex_t arenas_lock; 40arena_t **arenas; 41unsigned narenas_total; 42unsigned narenas_auto; 43 44/* Set to true once the allocator has been initialized. */ 45static bool malloc_initialized = false; 46 47#ifdef JEMALLOC_THREADED_INIT 48/* Used to let the initializing thread recursively allocate. */ 49# define NO_INITIALIZER ((unsigned long)0) 50# define INITIALIZER pthread_self() 51# define IS_INITIALIZER (malloc_initializer == pthread_self()) 52static pthread_t malloc_initializer = NO_INITIALIZER; 53#else 54# define NO_INITIALIZER false 55# define INITIALIZER true 56# define IS_INITIALIZER malloc_initializer 57static bool malloc_initializer = NO_INITIALIZER; 58#endif 59 60/* Used to avoid initialization races. */ 61#ifdef _WIN32 62static malloc_mutex_t init_lock; 63 64JEMALLOC_ATTR(constructor) 65static void WINAPI 66_init_init_lock(void) 67{ 68 69 malloc_mutex_init(&init_lock); 70} 71 72#ifdef _MSC_VER 73# pragma section(".CRT$XCU", read) 74JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 75static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 76#endif 77 78#else 79static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 80#endif 81 82typedef struct { 83 void *p; /* Input pointer (as in realloc(p, s)). */ 84 size_t s; /* Request size. */ 85 void *r; /* Result pointer. */ 86} malloc_utrace_t; 87 88#ifdef JEMALLOC_UTRACE 89# define UTRACE(a, b, c) do { \ 90 if (opt_utrace) { \ 91 int utrace_serrno = errno; \ 92 malloc_utrace_t ut; \ 93 ut.p = (a); \ 94 ut.s = (b); \ 95 ut.r = (c); \ 96 utrace(&ut, sizeof(ut)); \ 97 errno = utrace_serrno; \ 98 } \ 99} while (0) 100#else 101# define UTRACE(a, b, c) 102#endif 103 104/******************************************************************************/ 105/* 106 * Function prototypes for static functions that are referenced prior to 107 * definition. 108 */ 109 110static bool malloc_init_hard(void); 111 112/******************************************************************************/ 113/* 114 * Begin miscellaneous support functions. 115 */ 116 117/* Create a new arena and insert it into the arenas array at index ind. */ 118arena_t * 119arenas_extend(unsigned ind) 120{ 121 arena_t *ret; 122 123 ret = (arena_t *)base_alloc(sizeof(arena_t)); 124 if (ret != NULL && arena_new(ret, ind) == false) { 125 arenas[ind] = ret; 126 return (ret); 127 } 128 /* Only reached if there is an OOM error. */ 129 130 /* 131 * OOM here is quite inconvenient to propagate, since dealing with it 132 * would require a check for failure in the fast path. Instead, punt 133 * by using arenas[0]. In practice, this is an extremely unlikely 134 * failure. 135 */ 136 malloc_write("<jemalloc>: Error initializing arena\n"); 137 if (opt_abort) 138 abort(); 139 140 return (arenas[0]); 141} 142 143/* Slow path, called only by choose_arena(). */ 144arena_t * 145choose_arena_hard(void) 146{ 147 arena_t *ret; 148 149 if (narenas_auto > 1) { 150 unsigned i, choose, first_null; 151 152 choose = 0; 153 first_null = narenas_auto; 154 malloc_mutex_lock(&arenas_lock); 155 assert(arenas[0] != NULL); 156 for (i = 1; i < narenas_auto; i++) { 157 if (arenas[i] != NULL) { 158 /* 159 * Choose the first arena that has the lowest 160 * number of threads assigned to it. 161 */ 162 if (arenas[i]->nthreads < 163 arenas[choose]->nthreads) 164 choose = i; 165 } else if (first_null == narenas_auto) { 166 /* 167 * Record the index of the first uninitialized 168 * arena, in case all extant arenas are in use. 169 * 170 * NB: It is possible for there to be 171 * discontinuities in terms of initialized 172 * versus uninitialized arenas, due to the 173 * "thread.arena" mallctl. 174 */ 175 first_null = i; 176 } 177 } 178 179 if (arenas[choose]->nthreads == 0 180 || first_null == narenas_auto) { 181 /* 182 * Use an unloaded arena, or the least loaded arena if 183 * all arenas are already initialized. 184 */ 185 ret = arenas[choose]; 186 } else { 187 /* Initialize a new arena. */ 188 ret = arenas_extend(first_null); 189 } 190 ret->nthreads++; 191 malloc_mutex_unlock(&arenas_lock); 192 } else { 193 ret = arenas[0]; 194 malloc_mutex_lock(&arenas_lock); 195 ret->nthreads++; 196 malloc_mutex_unlock(&arenas_lock); 197 } 198 199 arenas_tsd_set(&ret); 200 201 return (ret); 202} 203 204static void 205stats_print_atexit(void) 206{ 207 208 if (config_tcache && config_stats) { 209 unsigned narenas, i; 210 211 /* 212 * Merge stats from extant threads. This is racy, since 213 * individual threads do not lock when recording tcache stats 214 * events. As a consequence, the final stats may be slightly 215 * out of date by the time they are reported, if other threads 216 * continue to allocate. 217 */ 218 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 219 arena_t *arena = arenas[i]; 220 if (arena != NULL) { 221 tcache_t *tcache; 222 223 /* 224 * tcache_stats_merge() locks bins, so if any 225 * code is introduced that acquires both arena 226 * and bin locks in the opposite order, 227 * deadlocks may result. 228 */ 229 malloc_mutex_lock(&arena->lock); 230 ql_foreach(tcache, &arena->tcache_ql, link) { 231 tcache_stats_merge(tcache, arena); 232 } 233 malloc_mutex_unlock(&arena->lock); 234 } 235 } 236 } 237 je_malloc_stats_print(NULL, NULL, NULL); 238} 239 240/* 241 * End miscellaneous support functions. 242 */ 243/******************************************************************************/ 244/* 245 * Begin initialization functions. 246 */ 247 248static unsigned 249malloc_ncpus(void) 250{ 251 long result; 252 253#ifdef _WIN32 254 SYSTEM_INFO si; 255 GetSystemInfo(&si); 256 result = si.dwNumberOfProcessors; 257#else 258 result = sysconf(_SC_NPROCESSORS_ONLN); 259#endif 260 return ((result == -1) ? 1 : (unsigned)result); 261} 262 263void 264arenas_cleanup(void *arg) 265{ 266 arena_t *arena = *(arena_t **)arg; 267 268 malloc_mutex_lock(&arenas_lock); 269 arena->nthreads--; 270 malloc_mutex_unlock(&arenas_lock); 271} 272 273JEMALLOC_ALWAYS_INLINE_C void 274malloc_thread_init(void) 275{ 276 277 /* 278 * TSD initialization can't be safely done as a side effect of 279 * deallocation, because it is possible for a thread to do nothing but 280 * deallocate its TLS data via free(), in which case writing to TLS 281 * would cause write-after-free memory corruption. The quarantine 282 * facility *only* gets used as a side effect of deallocation, so make 283 * a best effort attempt at initializing its TSD by hooking all 284 * allocation events. 285 */ 286 if (config_fill && opt_quarantine) 287 quarantine_alloc_hook(); 288} 289 290JEMALLOC_ALWAYS_INLINE_C bool 291malloc_init(void) 292{ 293 294 if (malloc_initialized == false && malloc_init_hard()) 295 return (true); 296 malloc_thread_init(); 297 298 return (false); 299} 300 301static bool 302malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 303 char const **v_p, size_t *vlen_p) 304{ 305 bool accept; 306 const char *opts = *opts_p; 307 308 *k_p = opts; 309 310 for (accept = false; accept == false;) { 311 switch (*opts) { 312 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 313 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 314 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 315 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 316 case 'Y': case 'Z': 317 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 318 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 319 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 320 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 321 case 'y': case 'z': 322 case '0': case '1': case '2': case '3': case '4': case '5': 323 case '6': case '7': case '8': case '9': 324 case '_': 325 opts++; 326 break; 327 case ':': 328 opts++; 329 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 330 *v_p = opts; 331 accept = true; 332 break; 333 case '\0': 334 if (opts != *opts_p) { 335 malloc_write("<jemalloc>: Conf string ends " 336 "with key\n"); 337 } 338 return (true); 339 default: 340 malloc_write("<jemalloc>: Malformed conf string\n"); 341 return (true); 342 } 343 } 344 345 for (accept = false; accept == false;) { 346 switch (*opts) { 347 case ',': 348 opts++; 349 /* 350 * Look ahead one character here, because the next time 351 * this function is called, it will assume that end of 352 * input has been cleanly reached if no input remains, 353 * but we have optimistically already consumed the 354 * comma if one exists. 355 */ 356 if (*opts == '\0') { 357 malloc_write("<jemalloc>: Conf string ends " 358 "with comma\n"); 359 } 360 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 361 accept = true; 362 break; 363 case '\0': 364 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 365 accept = true; 366 break; 367 default: 368 opts++; 369 break; 370 } 371 } 372 373 *opts_p = opts; 374 return (false); 375} 376 377static void 378malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 379 size_t vlen) 380{ 381 382 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 383 (int)vlen, v); 384} 385 386static void 387malloc_conf_init(void) 388{ 389 unsigned i; 390 char buf[PATH_MAX + 1]; 391 const char *opts, *k, *v; 392 size_t klen, vlen; 393 394 /* 395 * Automatically configure valgrind before processing options. The 396 * valgrind option remains in jemalloc 3.x for compatibility reasons. 397 */ 398 if (config_valgrind) { 399 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 400 if (config_fill && in_valgrind) { 401 opt_junk = false; 402 assert(opt_zero == false); 403 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 404 opt_redzone = true; 405 } 406 if (config_tcache && in_valgrind) 407 opt_tcache = false; 408 } 409 410 for (i = 0; i < 3; i++) { 411 /* Get runtime configuration. */ 412 switch (i) { 413 case 0: 414 if (je_malloc_conf != NULL) { 415 /* 416 * Use options that were compiled into the 417 * program. 418 */ 419 opts = je_malloc_conf; 420 } else { 421 /* No configuration specified. */ 422 buf[0] = '\0'; 423 opts = buf; 424 } 425 break; 426 case 1: { 427 int linklen = 0; 428#ifndef _WIN32 429 int saved_errno = errno; 430 const char *linkname = 431# ifdef JEMALLOC_PREFIX 432 "/etc/"JEMALLOC_PREFIX"malloc.conf" 433# else 434 "/etc/malloc.conf" 435# endif 436 ; 437 438 /* 439 * Try to use the contents of the "/etc/malloc.conf" 440 * symbolic link's name. 441 */ 442 linklen = readlink(linkname, buf, sizeof(buf) - 1); 443 if (linklen == -1) { 444 /* No configuration specified. */ 445 linklen = 0; 446 /* restore errno */ 447 set_errno(saved_errno); 448 } 449#endif 450 buf[linklen] = '\0'; 451 opts = buf; 452 break; 453 } case 2: { 454 const char *envname = 455#ifdef JEMALLOC_PREFIX 456 JEMALLOC_CPREFIX"MALLOC_CONF" 457#else 458 "MALLOC_CONF" 459#endif 460 ; 461 462 if ((opts = getenv(envname)) != NULL) { 463 /* 464 * Do nothing; opts is already initialized to 465 * the value of the MALLOC_CONF environment 466 * variable. 467 */ 468 } else { 469 /* No configuration specified. */ 470 buf[0] = '\0'; 471 opts = buf; 472 } 473 break; 474 } default: 475 not_reached(); 476 buf[0] = '\0'; 477 opts = buf; 478 } 479 480 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 481 &vlen) == false) { 482#define CONF_MATCH(n) \ 483 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 484#define CONF_HANDLE_BOOL(o, n, cont) \ 485 if (CONF_MATCH(n)) { \ 486 if (strncmp("true", v, vlen) == 0 && \ 487 vlen == sizeof("true")-1) \ 488 o = true; \ 489 else if (strncmp("false", v, vlen) == \ 490 0 && vlen == sizeof("false")-1) \ 491 o = false; \ 492 else { \ 493 malloc_conf_error( \ 494 "Invalid conf value", \ 495 k, klen, v, vlen); \ 496 } \ 497 if (cont) \ 498 continue; \ 499 } 500#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 501 if (CONF_MATCH(n)) { \ 502 uintmax_t um; \ 503 char *end; \ 504 \ 505 set_errno(0); \ 506 um = malloc_strtoumax(v, &end, 0); \ 507 if (get_errno() != 0 || (uintptr_t)end -\ 508 (uintptr_t)v != vlen) { \ 509 malloc_conf_error( \ 510 "Invalid conf value", \ 511 k, klen, v, vlen); \ 512 } else if (clip) { \ 513 if (min != 0 && um < min) \ 514 o = min; \ 515 else if (um > max) \ 516 o = max; \ 517 else \ 518 o = um; \ 519 } else { \ 520 if ((min != 0 && um < min) || \ 521 um > max) { \ 522 malloc_conf_error( \ 523 "Out-of-range " \ 524 "conf value", \ 525 k, klen, v, vlen); \ 526 } else \ 527 o = um; \ 528 } \ 529 continue; \ 530 } 531#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 532 if (CONF_MATCH(n)) { \ 533 long l; \ 534 char *end; \ 535 \ 536 set_errno(0); \ 537 l = strtol(v, &end, 0); \ 538 if (get_errno() != 0 || (uintptr_t)end -\ 539 (uintptr_t)v != vlen) { \ 540 malloc_conf_error( \ 541 "Invalid conf value", \ 542 k, klen, v, vlen); \ 543 } else if (l < (ssize_t)min || l > \ 544 (ssize_t)max) { \ 545 malloc_conf_error( \ 546 "Out-of-range conf value", \ 547 k, klen, v, vlen); \ 548 } else \ 549 o = l; \ 550 continue; \ 551 } 552#define CONF_HANDLE_CHAR_P(o, n, d) \ 553 if (CONF_MATCH(n)) { \ 554 size_t cpylen = (vlen <= \ 555 sizeof(o)-1) ? vlen : \ 556 sizeof(o)-1; \ 557 strncpy(o, v, cpylen); \ 558 o[cpylen] = '\0'; \ 559 continue; \ 560 } 561 562 CONF_HANDLE_BOOL(opt_abort, "abort", true) 563 /* 564 * Chunks always require at least one header page, plus 565 * one data page in the absence of redzones, or three 566 * pages in the presence of redzones. In order to 567 * simplify options processing, fix the limit based on 568 * config_fill. 569 */ 570 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 571 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, 572 true) 573 if (strncmp("dss", k, klen) == 0) { 574 int i; 575 bool match = false; 576 for (i = 0; i < dss_prec_limit; i++) { 577 if (strncmp(dss_prec_names[i], v, vlen) 578 == 0) { 579 if (chunk_dss_prec_set(i)) { 580 malloc_conf_error( 581 "Error setting dss", 582 k, klen, v, vlen); 583 } else { 584 opt_dss = 585 dss_prec_names[i]; 586 match = true; 587 break; 588 } 589 } 590 } 591 if (match == false) { 592 malloc_conf_error("Invalid conf value", 593 k, klen, v, vlen); 594 } 595 continue; 596 } 597 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, 598 SIZE_T_MAX, false) 599 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 600 -1, (sizeof(size_t) << 3) - 1) 601 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 602 if (config_fill) { 603 CONF_HANDLE_BOOL(opt_junk, "junk", true) 604 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 605 0, SIZE_T_MAX, false) 606 CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 607 CONF_HANDLE_BOOL(opt_zero, "zero", true) 608 } 609 if (config_utrace) { 610 CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 611 } 612 if (config_xmalloc) { 613 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 614 } 615 if (config_tcache) { 616 CONF_HANDLE_BOOL(opt_tcache, "tcache", 617 !config_valgrind || !in_valgrind) 618 if (CONF_MATCH("tcache")) { 619 assert(config_valgrind && in_valgrind); 620 if (opt_tcache) { 621 opt_tcache = false; 622 malloc_conf_error( 623 "tcache cannot be enabled " 624 "while running inside Valgrind", 625 k, klen, v, vlen); 626 } 627 continue; 628 } 629 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 630 "lg_tcache_max", -1, 631 (sizeof(size_t) << 3) - 1) 632 } 633 if (config_prof) { 634 CONF_HANDLE_BOOL(opt_prof, "prof", true) 635 CONF_HANDLE_CHAR_P(opt_prof_prefix, 636 "prof_prefix", "jeprof") 637 CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 638 true) 639 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, 640 "lg_prof_sample", 0, 641 (sizeof(uint64_t) << 3) - 1) 642 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 643 true) 644 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 645 "lg_prof_interval", -1, 646 (sizeof(uint64_t) << 3) - 1) 647 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 648 true) 649 CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 650 true) 651 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 652 true) 653 } 654 malloc_conf_error("Invalid conf pair", k, klen, v, 655 vlen); 656#undef CONF_MATCH 657#undef CONF_HANDLE_BOOL 658#undef CONF_HANDLE_SIZE_T 659#undef CONF_HANDLE_SSIZE_T 660#undef CONF_HANDLE_CHAR_P 661 } 662 } 663} 664 665static bool 666malloc_init_hard(void) 667{ 668 arena_t *init_arenas[1]; 669 670 malloc_mutex_lock(&init_lock); 671 if (malloc_initialized || IS_INITIALIZER) { 672 /* 673 * Another thread initialized the allocator before this one 674 * acquired init_lock, or this thread is the initializing 675 * thread, and it is recursively allocating. 676 */ 677 malloc_mutex_unlock(&init_lock); 678 return (false); 679 } 680#ifdef JEMALLOC_THREADED_INIT 681 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { 682 /* Busy-wait until the initializing thread completes. */ 683 do { 684 malloc_mutex_unlock(&init_lock); 685 CPU_SPINWAIT; 686 malloc_mutex_lock(&init_lock); 687 } while (malloc_initialized == false); 688 malloc_mutex_unlock(&init_lock); 689 return (false); 690 } 691#endif 692 malloc_initializer = INITIALIZER; 693 694 malloc_tsd_boot(); 695 if (config_prof) 696 prof_boot0(); 697 698 malloc_conf_init(); 699 700 if (opt_stats_print) { 701 /* Print statistics at exit. */ 702 if (atexit(stats_print_atexit) != 0) { 703 malloc_write("<jemalloc>: Error in atexit()\n"); 704 if (opt_abort) 705 abort(); 706 } 707 } 708 709 if (base_boot()) { 710 malloc_mutex_unlock(&init_lock); 711 return (true); 712 } 713 714 if (chunk_boot()) { 715 malloc_mutex_unlock(&init_lock); 716 return (true); 717 } 718 719 if (ctl_boot()) { 720 malloc_mutex_unlock(&init_lock); 721 return (true); 722 } 723 724 if (config_prof) 725 prof_boot1(); 726 727 arena_boot(); 728 729 if (config_tcache && tcache_boot0()) { 730 malloc_mutex_unlock(&init_lock); 731 return (true); 732 } 733 734 if (huge_boot()) { 735 malloc_mutex_unlock(&init_lock); 736 return (true); 737 } 738 739 if (malloc_mutex_init(&arenas_lock)) { 740 malloc_mutex_unlock(&init_lock); 741 return (true); 742 } 743 744 /* 745 * Create enough scaffolding to allow recursive allocation in 746 * malloc_ncpus(). 747 */ 748 narenas_total = narenas_auto = 1; 749 arenas = init_arenas; 750 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 751 752 /* 753 * Initialize one arena here. The rest are lazily created in 754 * choose_arena_hard(). 755 */ 756 arenas_extend(0); 757 if (arenas[0] == NULL) { 758 malloc_mutex_unlock(&init_lock); 759 return (true); 760 } 761 762 /* Initialize allocation counters before any allocations can occur. */ 763 if (config_stats && thread_allocated_tsd_boot()) { 764 malloc_mutex_unlock(&init_lock); 765 return (true); 766 } 767 768 if (arenas_tsd_boot()) { 769 malloc_mutex_unlock(&init_lock); 770 return (true); 771 } 772 773 if (config_tcache && tcache_boot1()) { 774 malloc_mutex_unlock(&init_lock); 775 return (true); 776 } 777 778 if (config_fill && quarantine_boot()) { 779 malloc_mutex_unlock(&init_lock); 780 return (true); 781 } 782 783 if (config_prof && prof_boot2()) { 784 malloc_mutex_unlock(&init_lock); 785 return (true); 786 } 787 788 malloc_mutex_unlock(&init_lock); 789 /**********************************************************************/ 790 /* Recursive allocation may follow. */ 791 792 ncpus = malloc_ncpus(); 793 794#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 795 && !defined(_WIN32) && !defined(__native_client__)) 796 /* LinuxThreads's pthread_atfork() allocates. */ 797 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 798 jemalloc_postfork_child) != 0) { 799 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 800 if (opt_abort) 801 abort(); 802 } 803#endif 804 805 /* Done recursively allocating. */ 806 /**********************************************************************/ 807 malloc_mutex_lock(&init_lock); 808 809 if (mutex_boot()) { 810 malloc_mutex_unlock(&init_lock); 811 return (true); 812 } 813 814 if (opt_narenas == 0) { 815 /* 816 * For SMP systems, create more than one arena per CPU by 817 * default. 818 */ 819 if (ncpus > 1) 820 opt_narenas = ncpus << 2; 821 else 822 opt_narenas = 1; 823 } 824 narenas_auto = opt_narenas; 825 /* 826 * Make sure that the arenas array can be allocated. In practice, this 827 * limit is enough to allow the allocator to function, but the ctl 828 * machinery will fail to allocate memory at far lower limits. 829 */ 830 if (narenas_auto > chunksize / sizeof(arena_t *)) { 831 narenas_auto = chunksize / sizeof(arena_t *); 832 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 833 narenas_auto); 834 } 835 narenas_total = narenas_auto; 836 837 /* Allocate and initialize arenas. */ 838 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); 839 if (arenas == NULL) { 840 malloc_mutex_unlock(&init_lock); 841 return (true); 842 } 843 /* 844 * Zero the array. In practice, this should always be pre-zeroed, 845 * since it was just mmap()ed, but let's be sure. 846 */ 847 memset(arenas, 0, sizeof(arena_t *) * narenas_total); 848 /* Copy the pointer to the one arena that was already initialized. */ 849 arenas[0] = init_arenas[0]; 850 851 malloc_initialized = true; 852 malloc_mutex_unlock(&init_lock); 853 854 return (false); 855} 856 857/* 858 * End initialization functions. 859 */ 860/******************************************************************************/ 861/* 862 * Begin malloc(3)-compatible functions. 863 */ 864 865static void * 866imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) 867{ 868 void *p; 869 870 if (cnt == NULL) 871 return (NULL); 872 if (usize <= SMALL_MAXCLASS) { 873 p = imalloc(SMALL_MAXCLASS+1); 874 if (p == NULL) 875 return (NULL); 876 arena_prof_promoted(p, usize); 877 } else 878 p = imalloc(usize); 879 880 return (p); 881} 882 883JEMALLOC_ALWAYS_INLINE_C void * 884imalloc_prof(size_t usize) 885{ 886 void *p; 887 prof_thr_cnt_t *cnt; 888 889 PROF_ALLOC_PREP(usize, cnt); 890 if ((uintptr_t)cnt != (uintptr_t)1U) 891 p = imalloc_prof_sample(usize, cnt); 892 else 893 p = imalloc(usize); 894 if (p == NULL) 895 return (NULL); 896 prof_malloc(p, usize, cnt); 897 898 return (p); 899} 900 901JEMALLOC_ALWAYS_INLINE_C void * 902imalloc_body(size_t size, size_t *usize) 903{ 904 905 if (malloc_init()) 906 return (NULL); 907 908 if (config_prof && opt_prof) { 909 *usize = s2u(size); 910 return (imalloc_prof(*usize)); 911 } 912 913 if (config_stats || (config_valgrind && in_valgrind)) 914 *usize = s2u(size); 915 return (imalloc(size)); 916} 917 918void * 919je_malloc(size_t size) 920{ 921 void *ret; 922 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 923 924 if (size == 0) 925 size = 1; 926 927 ret = imalloc_body(size, &usize); 928 if (ret == NULL) { 929 if (config_xmalloc && opt_xmalloc) { 930 malloc_write("<jemalloc>: Error in malloc(): " 931 "out of memory\n"); 932 abort(); 933 } 934 set_errno(ENOMEM); 935 } 936 if (config_stats && ret != NULL) { 937 assert(usize == isalloc(ret, config_prof)); 938 thread_allocated_tsd_get()->allocated += usize; 939 } 940 UTRACE(0, size, ret); 941 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 942 return (ret); 943} 944 945static void * 946imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) 947{ 948 void *p; 949 950 if (cnt == NULL) 951 return (NULL); 952 if (usize <= SMALL_MAXCLASS) { 953 assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0); 954 p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment, 955 false); 956 if (p == NULL) 957 return (NULL); 958 arena_prof_promoted(p, usize); 959 } else 960 p = ipalloc(usize, alignment, false); 961 962 return (p); 963} 964 965JEMALLOC_ALWAYS_INLINE_C void * 966imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) 967{ 968 void *p; 969 970 if ((uintptr_t)cnt != (uintptr_t)1U) 971 p = imemalign_prof_sample(alignment, usize, cnt); 972 else 973 p = ipalloc(usize, alignment, false); 974 if (p == NULL) 975 return (NULL); 976 prof_malloc(p, usize, cnt); 977 978 return (p); 979} 980 981JEMALLOC_ATTR(nonnull(1)) 982static int 983imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 984{ 985 int ret; 986 size_t usize; 987 void *result; 988 989 assert(min_alignment != 0); 990 991 if (malloc_init()) { 992 result = NULL; 993 goto label_oom; 994 } else { 995 if (size == 0) 996 size = 1; 997 998 /* Make sure that alignment is a large enough power of 2. */ 999 if (((alignment - 1) & alignment) != 0 1000 || (alignment < min_alignment)) { 1001 if (config_xmalloc && opt_xmalloc) { 1002 malloc_write("<jemalloc>: Error allocating " 1003 "aligned memory: invalid alignment\n"); 1004 abort(); 1005 } 1006 result = NULL; 1007 ret = EINVAL; 1008 goto label_return; 1009 } 1010 1011 usize = sa2u(size, alignment); 1012 if (usize == 0) { 1013 result = NULL; 1014 goto label_oom; 1015 } 1016 1017 if (config_prof && opt_prof) { 1018 prof_thr_cnt_t *cnt; 1019 1020 PROF_ALLOC_PREP(usize, cnt); 1021 result = imemalign_prof(alignment, usize, cnt); 1022 } else 1023 result = ipalloc(usize, alignment, false); 1024 if (result == NULL) 1025 goto label_oom; 1026 } 1027 1028 *memptr = result; 1029 ret = 0; 1030label_return: 1031 if (config_stats && result != NULL) { 1032 assert(usize == isalloc(result, config_prof)); 1033 thread_allocated_tsd_get()->allocated += usize; 1034 } 1035 UTRACE(0, size, result); 1036 return (ret); 1037label_oom: 1038 assert(result == NULL); 1039 if (config_xmalloc && opt_xmalloc) { 1040 malloc_write("<jemalloc>: Error allocating aligned memory: " 1041 "out of memory\n"); 1042 abort(); 1043 } 1044 ret = ENOMEM; 1045 goto label_return; 1046} 1047 1048int 1049je_posix_memalign(void **memptr, size_t alignment, size_t size) 1050{ 1051 int ret = imemalign(memptr, alignment, size, sizeof(void *)); 1052 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 1053 config_prof), false); 1054 return (ret); 1055} 1056 1057void * 1058je_aligned_alloc(size_t alignment, size_t size) 1059{ 1060 void *ret; 1061 int err; 1062 1063 if ((err = imemalign(&ret, alignment, size, 1)) != 0) { 1064 ret = NULL; 1065 set_errno(err); 1066 } 1067 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 1068 false); 1069 return (ret); 1070} 1071 1072static void * 1073icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) 1074{ 1075 void *p; 1076 1077 if (cnt == NULL) 1078 return (NULL); 1079 if (usize <= SMALL_MAXCLASS) { 1080 p = icalloc(SMALL_MAXCLASS+1); 1081 if (p == NULL) 1082 return (NULL); 1083 arena_prof_promoted(p, usize); 1084 } else 1085 p = icalloc(usize); 1086 1087 return (p); 1088} 1089 1090JEMALLOC_ALWAYS_INLINE_C void * 1091icalloc_prof(size_t usize, prof_thr_cnt_t *cnt) 1092{ 1093 void *p; 1094 1095 if ((uintptr_t)cnt != (uintptr_t)1U) 1096 p = icalloc_prof_sample(usize, cnt); 1097 else 1098 p = icalloc(usize); 1099 if (p == NULL) 1100 return (NULL); 1101 prof_malloc(p, usize, cnt); 1102 1103 return (p); 1104} 1105 1106void * 1107je_calloc(size_t num, size_t size) 1108{ 1109 void *ret; 1110 size_t num_size; 1111 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1112 1113 if (malloc_init()) { 1114 num_size = 0; 1115 ret = NULL; 1116 goto label_return; 1117 } 1118 1119 num_size = num * size; 1120 if (num_size == 0) { 1121 if (num == 0 || size == 0) 1122 num_size = 1; 1123 else { 1124 ret = NULL; 1125 goto label_return; 1126 } 1127 /* 1128 * Try to avoid division here. We know that it isn't possible to 1129 * overflow during multiplication if neither operand uses any of the 1130 * most significant half of the bits in a size_t. 1131 */ 1132 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 1133 && (num_size / size != num)) { 1134 /* size_t overflow. */ 1135 ret = NULL; 1136 goto label_return; 1137 } 1138 1139 if (config_prof && opt_prof) { 1140 prof_thr_cnt_t *cnt; 1141 1142 usize = s2u(num_size); 1143 PROF_ALLOC_PREP(usize, cnt); 1144 ret = icalloc_prof(usize, cnt); 1145 } else { 1146 if (config_stats || (config_valgrind && in_valgrind)) 1147 usize = s2u(num_size); 1148 ret = icalloc(num_size); 1149 } 1150 1151label_return: 1152 if (ret == NULL) { 1153 if (config_xmalloc && opt_xmalloc) { 1154 malloc_write("<jemalloc>: Error in calloc(): out of " 1155 "memory\n"); 1156 abort(); 1157 } 1158 set_errno(ENOMEM); 1159 } 1160 if (config_stats && ret != NULL) { 1161 assert(usize == isalloc(ret, config_prof)); 1162 thread_allocated_tsd_get()->allocated += usize; 1163 } 1164 UTRACE(0, num_size, ret); 1165 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1166 return (ret); 1167} 1168 1169static void * 1170irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt) 1171{ 1172 void *p; 1173 1174 if (cnt == NULL) 1175 return (NULL); 1176 if (usize <= SMALL_MAXCLASS) { 1177 p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false); 1178 if (p == NULL) 1179 return (NULL); 1180 arena_prof_promoted(p, usize); 1181 } else 1182 p = iralloc(oldptr, usize, 0, 0, false); 1183 1184 return (p); 1185} 1186 1187JEMALLOC_ALWAYS_INLINE_C void * 1188irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt) 1189{ 1190 void *p; 1191 prof_ctx_t *old_ctx; 1192 1193 old_ctx = prof_ctx_get(oldptr); 1194 if ((uintptr_t)cnt != (uintptr_t)1U) 1195 p = irealloc_prof_sample(oldptr, usize, cnt); 1196 else 1197 p = iralloc(oldptr, usize, 0, 0, false); 1198 if (p == NULL) 1199 return (NULL); 1200 prof_realloc(p, usize, cnt, old_usize, old_ctx); 1201 1202 return (p); 1203} 1204 1205JEMALLOC_INLINE_C void 1206ifree(void *ptr) 1207{ 1208 size_t usize; 1209 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1210 1211 assert(ptr != NULL); 1212 assert(malloc_initialized || IS_INITIALIZER); 1213 1214 if (config_prof && opt_prof) { 1215 usize = isalloc(ptr, config_prof); 1216 prof_free(ptr, usize); 1217 } else if (config_stats || config_valgrind) 1218 usize = isalloc(ptr, config_prof); 1219 if (config_stats) 1220 thread_allocated_tsd_get()->deallocated += usize; 1221 if (config_valgrind && in_valgrind) 1222 rzsize = p2rz(ptr); 1223 iqalloc(ptr); 1224 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1225} 1226 1227void * 1228je_realloc(void *ptr, size_t size) 1229{ 1230 void *ret; 1231 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1232 size_t old_usize = 0; 1233 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1234 1235 if (size == 0) { 1236 if (ptr != NULL) { 1237 /* realloc(ptr, 0) is equivalent to free(ptr). */ 1238 UTRACE(ptr, 0, 0); 1239 ifree(ptr); 1240 return (NULL); 1241 } 1242 size = 1; 1243 } 1244 1245 if (ptr != NULL) { 1246 assert(malloc_initialized || IS_INITIALIZER); 1247 malloc_thread_init(); 1248 1249 if ((config_prof && opt_prof) || config_stats || 1250 (config_valgrind && in_valgrind)) 1251 old_usize = isalloc(ptr, config_prof); 1252 if (config_valgrind && in_valgrind) 1253 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); 1254 1255 if (config_prof && opt_prof) { 1256 prof_thr_cnt_t *cnt; 1257 1258 usize = s2u(size); 1259 PROF_ALLOC_PREP(usize, cnt); 1260 ret = irealloc_prof(ptr, old_usize, usize, cnt); 1261 } else { 1262 if (config_stats || (config_valgrind && in_valgrind)) 1263 usize = s2u(size); 1264 ret = iralloc(ptr, size, 0, 0, false); 1265 } 1266 } else { 1267 /* realloc(NULL, size) is equivalent to malloc(size). */ 1268 ret = imalloc_body(size, &usize); 1269 } 1270 1271 if (ret == NULL) { 1272 if (config_xmalloc && opt_xmalloc) { 1273 malloc_write("<jemalloc>: Error in realloc(): " 1274 "out of memory\n"); 1275 abort(); 1276 } 1277 set_errno(ENOMEM); 1278 } 1279 if (config_stats && ret != NULL) { 1280 thread_allocated_t *ta; 1281 assert(usize == isalloc(ret, config_prof)); 1282 ta = thread_allocated_tsd_get(); 1283 ta->allocated += usize; 1284 ta->deallocated += old_usize; 1285 } 1286 UTRACE(ptr, size, ret); 1287 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, 1288 old_rzsize, true, false); 1289 return (ret); 1290} 1291 1292void 1293je_free(void *ptr) 1294{ 1295 1296 UTRACE(ptr, 0, 0); 1297 if (ptr != NULL) 1298 ifree(ptr); 1299} 1300 1301/* 1302 * End malloc(3)-compatible functions. 1303 */ 1304/******************************************************************************/ 1305/* 1306 * Begin non-standard override functions. 1307 */ 1308 1309#ifdef JEMALLOC_OVERRIDE_MEMALIGN 1310void * 1311je_memalign(size_t alignment, size_t size) 1312{ 1313 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1314 imemalign(&ret, alignment, size, 1); 1315 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1316 return (ret); 1317} 1318#endif 1319 1320#ifdef JEMALLOC_OVERRIDE_VALLOC 1321void * 1322je_valloc(size_t size) 1323{ 1324 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1325 imemalign(&ret, PAGE, size, 1); 1326 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1327 return (ret); 1328} 1329#endif 1330 1331/* 1332 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1333 * #define je_malloc malloc 1334 */ 1335#define malloc_is_malloc 1 1336#define is_malloc_(a) malloc_is_ ## a 1337#define is_malloc(a) is_malloc_(a) 1338 1339#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1340/* 1341 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1342 * to inconsistently reference libc's malloc(3)-compatible functions 1343 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1344 * 1345 * These definitions interpose hooks in glibc. The functions are actually 1346 * passed an extra argument for the caller return address, which will be 1347 * ignored. 1348 */ 1349JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 1350JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 1351JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 1352JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 1353 je_memalign; 1354#endif 1355 1356/* 1357 * End non-standard override functions. 1358 */ 1359/******************************************************************************/ 1360/* 1361 * Begin non-standard functions. 1362 */ 1363 1364JEMALLOC_ALWAYS_INLINE_C void * 1365imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 1366 arena_t *arena) 1367{ 1368 1369 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, 1370 alignment))); 1371 1372 if (alignment != 0) 1373 return (ipalloct(usize, alignment, zero, try_tcache, arena)); 1374 else if (zero) 1375 return (icalloct(usize, try_tcache, arena)); 1376 else 1377 return (imalloct(usize, try_tcache, arena)); 1378} 1379 1380static void * 1381imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache, 1382 arena_t *arena, prof_thr_cnt_t *cnt) 1383{ 1384 void *p; 1385 1386 if (cnt == NULL) 1387 return (NULL); 1388 if (usize <= SMALL_MAXCLASS) { 1389 size_t usize_promoted = (alignment == 0) ? 1390 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); 1391 assert(usize_promoted != 0); 1392 p = imallocx(usize_promoted, alignment, zero, try_tcache, 1393 arena); 1394 if (p == NULL) 1395 return (NULL); 1396 arena_prof_promoted(p, usize); 1397 } else 1398 p = imallocx(usize, alignment, zero, try_tcache, arena); 1399 1400 return (p); 1401} 1402 1403JEMALLOC_ALWAYS_INLINE_C void * 1404imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache, 1405 arena_t *arena, prof_thr_cnt_t *cnt) 1406{ 1407 void *p; 1408 1409 if ((uintptr_t)cnt != (uintptr_t)1U) { 1410 p = imallocx_prof_sample(usize, alignment, zero, try_tcache, 1411 arena, cnt); 1412 } else 1413 p = imallocx(usize, alignment, zero, try_tcache, arena); 1414 if (p == NULL) 1415 return (NULL); 1416 prof_malloc(p, usize, cnt); 1417 1418 return (p); 1419} 1420 1421void * 1422je_mallocx(size_t size, int flags) 1423{ 1424 void *p; 1425 size_t usize; 1426 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1427 & (SIZE_T_MAX-1)); 1428 bool zero = flags & MALLOCX_ZERO; 1429 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1430 arena_t *arena; 1431 bool try_tcache; 1432 1433 assert(size != 0); 1434 1435 if (malloc_init()) 1436 goto label_oom; 1437 1438 if (arena_ind != UINT_MAX) { 1439 arena = arenas[arena_ind]; 1440 try_tcache = false; 1441 } else { 1442 arena = NULL; 1443 try_tcache = true; 1444 } 1445 1446 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1447 assert(usize != 0); 1448 1449 if (config_prof && opt_prof) { 1450 prof_thr_cnt_t *cnt; 1451 1452 PROF_ALLOC_PREP(usize, cnt); 1453 p = imallocx_prof(usize, alignment, zero, try_tcache, arena, 1454 cnt); 1455 } else 1456 p = imallocx(usize, alignment, zero, try_tcache, arena); 1457 if (p == NULL) 1458 goto label_oom; 1459 1460 if (config_stats) { 1461 assert(usize == isalloc(p, config_prof)); 1462 thread_allocated_tsd_get()->allocated += usize; 1463 } 1464 UTRACE(0, size, p); 1465 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); 1466 return (p); 1467label_oom: 1468 if (config_xmalloc && opt_xmalloc) { 1469 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); 1470 abort(); 1471 } 1472 UTRACE(0, size, 0); 1473 return (NULL); 1474} 1475 1476static void * 1477irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize, 1478 bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena, 1479 prof_thr_cnt_t *cnt) 1480{ 1481 void *p; 1482 1483 if (cnt == NULL) 1484 return (NULL); 1485 if (usize <= SMALL_MAXCLASS) { 1486 p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1487 size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero, 1488 try_tcache_alloc, try_tcache_dalloc, arena); 1489 if (p == NULL) 1490 return (NULL); 1491 arena_prof_promoted(p, usize); 1492 } else { 1493 p = iralloct(oldptr, size, 0, alignment, zero, 1494 try_tcache_alloc, try_tcache_dalloc, arena); 1495 } 1496 1497 return (p); 1498} 1499 1500JEMALLOC_ALWAYS_INLINE_C void * 1501irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, 1502 size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, 1503 arena_t *arena, prof_thr_cnt_t *cnt) 1504{ 1505 void *p; 1506 prof_ctx_t *old_ctx; 1507 1508 old_ctx = prof_ctx_get(oldptr); 1509 if ((uintptr_t)cnt != (uintptr_t)1U) 1510 p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero, 1511 try_tcache_alloc, try_tcache_dalloc, arena, cnt); 1512 else { 1513 p = iralloct(oldptr, size, 0, alignment, zero, 1514 try_tcache_alloc, try_tcache_dalloc, arena); 1515 } 1516 if (p == NULL) 1517 return (NULL); 1518 1519 if (p == oldptr && alignment != 0) { 1520 /* 1521 * The allocation did not move, so it is possible that the size 1522 * class is smaller than would guarantee the requested 1523 * alignment, and that the alignment constraint was 1524 * serendipitously satisfied. Additionally, old_usize may not 1525 * be the same as the current usize because of in-place large 1526 * reallocation. Therefore, query the actual value of usize. 1527 */ 1528 *usize = isalloc(p, config_prof); 1529 } 1530 prof_realloc(p, *usize, cnt, old_usize, old_ctx); 1531 1532 return (p); 1533} 1534 1535void * 1536je_rallocx(void *ptr, size_t size, int flags) 1537{ 1538 void *p; 1539 size_t usize, old_usize; 1540 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1541 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1542 & (SIZE_T_MAX-1)); 1543 bool zero = flags & MALLOCX_ZERO; 1544 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1545 bool try_tcache_alloc, try_tcache_dalloc; 1546 arena_t *arena; 1547 1548 assert(ptr != NULL); 1549 assert(size != 0); 1550 assert(malloc_initialized || IS_INITIALIZER); 1551 malloc_thread_init(); 1552 1553 if (arena_ind != UINT_MAX) { 1554 arena_chunk_t *chunk; 1555 try_tcache_alloc = false; 1556 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1557 try_tcache_dalloc = (chunk == ptr || chunk->arena != 1558 arenas[arena_ind]); 1559 arena = arenas[arena_ind]; 1560 } else { 1561 try_tcache_alloc = true; 1562 try_tcache_dalloc = true; 1563 arena = NULL; 1564 } 1565 1566 if ((config_prof && opt_prof) || config_stats || 1567 (config_valgrind && in_valgrind)) 1568 old_usize = isalloc(ptr, config_prof); 1569 if (config_valgrind && in_valgrind) 1570 old_rzsize = u2rz(old_usize); 1571 1572 if (config_prof && opt_prof) { 1573 prof_thr_cnt_t *cnt; 1574 1575 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1576 assert(usize != 0); 1577 PROF_ALLOC_PREP(usize, cnt); 1578 p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero, 1579 try_tcache_alloc, try_tcache_dalloc, arena, cnt); 1580 if (p == NULL) 1581 goto label_oom; 1582 } else { 1583 p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc, 1584 try_tcache_dalloc, arena); 1585 if (p == NULL) 1586 goto label_oom; 1587 if (config_stats || (config_valgrind && in_valgrind)) 1588 usize = isalloc(p, config_prof); 1589 } 1590 1591 if (config_stats) { 1592 thread_allocated_t *ta; 1593 ta = thread_allocated_tsd_get(); 1594 ta->allocated += usize; 1595 ta->deallocated += old_usize; 1596 } 1597 UTRACE(ptr, size, p); 1598 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, 1599 old_rzsize, false, zero); 1600 return (p); 1601label_oom: 1602 if (config_xmalloc && opt_xmalloc) { 1603 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 1604 abort(); 1605 } 1606 UTRACE(ptr, size, 0); 1607 return (NULL); 1608} 1609 1610JEMALLOC_ALWAYS_INLINE_C size_t 1611ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, 1612 size_t alignment, bool zero, arena_t *arena) 1613{ 1614 size_t usize; 1615 1616 if (ixalloc(ptr, size, extra, alignment, zero)) 1617 return (old_usize); 1618 usize = isalloc(ptr, config_prof); 1619 1620 return (usize); 1621} 1622 1623static size_t 1624ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, 1625 size_t alignment, size_t max_usize, bool zero, arena_t *arena, 1626 prof_thr_cnt_t *cnt) 1627{ 1628 size_t usize; 1629 1630 if (cnt == NULL) 1631 return (old_usize); 1632 /* Use minimum usize to determine whether promotion may happen. */ 1633 if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <= 1634 SMALL_MAXCLASS) { 1635 if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1636 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 1637 alignment, zero)) 1638 return (old_usize); 1639 usize = isalloc(ptr, config_prof); 1640 if (max_usize < PAGE) 1641 arena_prof_promoted(ptr, usize); 1642 } else { 1643 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, 1644 zero, arena); 1645 } 1646 1647 return (usize); 1648} 1649 1650JEMALLOC_ALWAYS_INLINE_C size_t 1651ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra, 1652 size_t alignment, size_t max_usize, bool zero, arena_t *arena, 1653 prof_thr_cnt_t *cnt) 1654{ 1655 size_t usize; 1656 prof_ctx_t *old_ctx; 1657 1658 old_ctx = prof_ctx_get(ptr); 1659 if ((uintptr_t)cnt != (uintptr_t)1U) { 1660 usize = ixallocx_prof_sample(ptr, old_usize, size, extra, 1661 alignment, zero, max_usize, arena, cnt); 1662 } else { 1663 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, 1664 zero, arena); 1665 } 1666 if (usize == old_usize) 1667 return (usize); 1668 prof_realloc(ptr, usize, cnt, old_usize, old_ctx); 1669 1670 return (usize); 1671} 1672 1673size_t 1674je_xallocx(void *ptr, size_t size, size_t extra, int flags) 1675{ 1676 size_t usize, old_usize; 1677 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1678 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1679 & (SIZE_T_MAX-1)); 1680 bool zero = flags & MALLOCX_ZERO; 1681 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1682 arena_t *arena; 1683 1684 assert(ptr != NULL); 1685 assert(size != 0); 1686 assert(SIZE_T_MAX - size >= extra); 1687 assert(malloc_initialized || IS_INITIALIZER); 1688 malloc_thread_init(); 1689 1690 if (arena_ind != UINT_MAX) 1691 arena = arenas[arena_ind]; 1692 else 1693 arena = NULL; 1694 1695 old_usize = isalloc(ptr, config_prof); 1696 if (config_valgrind && in_valgrind) 1697 old_rzsize = u2rz(old_usize); 1698 1699 if (config_prof && opt_prof) { 1700 prof_thr_cnt_t *cnt; 1701 /* 1702 * usize isn't knowable before ixalloc() returns when extra is 1703 * non-zero. Therefore, compute its maximum possible value and 1704 * use that in PROF_ALLOC_PREP() to decide whether to capture a 1705 * backtrace. prof_realloc() will use the actual usize to 1706 * decide whether to sample. 1707 */ 1708 size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1709 sa2u(size+extra, alignment); 1710 PROF_ALLOC_PREP(max_usize, cnt); 1711 usize = ixallocx_prof(ptr, old_usize, size, extra, alignment, 1712 max_usize, zero, arena, cnt); 1713 } else { 1714 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, 1715 zero, arena); 1716 } 1717 if (usize == old_usize) 1718 goto label_not_resized; 1719 1720 if (config_stats) { 1721 thread_allocated_t *ta; 1722 ta = thread_allocated_tsd_get(); 1723 ta->allocated += usize; 1724 ta->deallocated += old_usize; 1725 } 1726 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, 1727 old_rzsize, false, zero); 1728label_not_resized: 1729 UTRACE(ptr, size, ptr); 1730 return (usize); 1731} 1732 1733size_t 1734je_sallocx(const void *ptr, int flags) 1735{ 1736 size_t usize; 1737 1738 assert(malloc_initialized || IS_INITIALIZER); 1739 malloc_thread_init(); 1740 1741 if (config_ivsalloc) 1742 usize = ivsalloc(ptr, config_prof); 1743 else { 1744 assert(ptr != NULL); 1745 usize = isalloc(ptr, config_prof); 1746 } 1747 1748 return (usize); 1749} 1750 1751void 1752je_dallocx(void *ptr, int flags) 1753{ 1754 size_t usize; 1755 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1756 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1757 bool try_tcache; 1758 1759 assert(ptr != NULL); 1760 assert(malloc_initialized || IS_INITIALIZER); 1761 1762 if (arena_ind != UINT_MAX) { 1763 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1764 try_tcache = (chunk == ptr || chunk->arena != 1765 arenas[arena_ind]); 1766 } else 1767 try_tcache = true; 1768 1769 UTRACE(ptr, 0, 0); 1770 if (config_stats || config_valgrind) 1771 usize = isalloc(ptr, config_prof); 1772 if (config_prof && opt_prof) { 1773 if (config_stats == false && config_valgrind == false) 1774 usize = isalloc(ptr, config_prof); 1775 prof_free(ptr, usize); 1776 } 1777 if (config_stats) 1778 thread_allocated_tsd_get()->deallocated += usize; 1779 if (config_valgrind && in_valgrind) 1780 rzsize = p2rz(ptr); 1781 iqalloct(ptr, try_tcache); 1782 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1783} 1784 1785size_t 1786je_nallocx(size_t size, int flags) 1787{ 1788 size_t usize; 1789 size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) 1790 & (SIZE_T_MAX-1)); 1791 1792 assert(size != 0); 1793 1794 if (malloc_init()) 1795 return (0); 1796 1797 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1798 assert(usize != 0); 1799 return (usize); 1800} 1801 1802int 1803je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1804 size_t newlen) 1805{ 1806 1807 if (malloc_init()) 1808 return (EAGAIN); 1809 1810 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1811} 1812 1813int 1814je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1815{ 1816 1817 if (malloc_init()) 1818 return (EAGAIN); 1819 1820 return (ctl_nametomib(name, mibp, miblenp)); 1821} 1822 1823int 1824je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1825 void *newp, size_t newlen) 1826{ 1827 1828 if (malloc_init()) 1829 return (EAGAIN); 1830 1831 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1832} 1833 1834void 1835je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1836 const char *opts) 1837{ 1838 1839 stats_print(write_cb, cbopaque, opts); 1840} 1841 1842size_t 1843je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 1844{ 1845 size_t ret; 1846 1847 assert(malloc_initialized || IS_INITIALIZER); 1848 malloc_thread_init(); 1849 1850 if (config_ivsalloc) 1851 ret = ivsalloc(ptr, config_prof); 1852 else 1853 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; 1854 1855 return (ret); 1856} 1857 1858/* 1859 * End non-standard functions. 1860 */ 1861/******************************************************************************/ 1862/* 1863 * The following functions are used by threading libraries for protection of 1864 * malloc during fork(). 1865 */ 1866 1867/* 1868 * If an application creates a thread before doing any allocation in the main 1869 * thread, then calls fork(2) in the main thread followed by memory allocation 1870 * in the child process, a race can occur that results in deadlock within the 1871 * child: the main thread may have forked while the created thread had 1872 * partially initialized the allocator. Ordinarily jemalloc prevents 1873 * fork/malloc races via the following functions it registers during 1874 * initialization using pthread_atfork(), but of course that does no good if 1875 * the allocator isn't fully initialized at fork time. The following library 1876 * constructor is a partial solution to this problem. It may still possible to 1877 * trigger the deadlock described above, but doing so would involve forking via 1878 * a library constructor that runs before jemalloc's runs. 1879 */ 1880JEMALLOC_ATTR(constructor) 1881static void 1882jemalloc_constructor(void) 1883{ 1884 1885 malloc_init(); 1886} 1887 1888#ifndef JEMALLOC_MUTEX_INIT_CB 1889void 1890jemalloc_prefork(void) 1891#else 1892JEMALLOC_EXPORT void 1893_malloc_prefork(void) 1894#endif 1895{ 1896 unsigned i; 1897 1898#ifdef JEMALLOC_MUTEX_INIT_CB 1899 if (malloc_initialized == false) 1900 return; 1901#endif 1902 assert(malloc_initialized); 1903 1904 /* Acquire all mutexes in a safe order. */ 1905 ctl_prefork(); 1906 prof_prefork(); 1907 malloc_mutex_prefork(&arenas_lock); 1908 for (i = 0; i < narenas_total; i++) { 1909 if (arenas[i] != NULL) 1910 arena_prefork(arenas[i]); 1911 } 1912 chunk_prefork(); 1913 base_prefork(); 1914 huge_prefork(); 1915} 1916 1917#ifndef JEMALLOC_MUTEX_INIT_CB 1918void 1919jemalloc_postfork_parent(void) 1920#else 1921JEMALLOC_EXPORT void 1922_malloc_postfork(void) 1923#endif 1924{ 1925 unsigned i; 1926 1927#ifdef JEMALLOC_MUTEX_INIT_CB 1928 if (malloc_initialized == false) 1929 return; 1930#endif 1931 assert(malloc_initialized); 1932 1933 /* Release all mutexes, now that fork() has completed. */ 1934 huge_postfork_parent(); 1935 base_postfork_parent(); 1936 chunk_postfork_parent(); 1937 for (i = 0; i < narenas_total; i++) { 1938 if (arenas[i] != NULL) 1939 arena_postfork_parent(arenas[i]); 1940 } 1941 malloc_mutex_postfork_parent(&arenas_lock); 1942 prof_postfork_parent(); 1943 ctl_postfork_parent(); 1944} 1945 1946void 1947jemalloc_postfork_child(void) 1948{ 1949 unsigned i; 1950 1951 assert(malloc_initialized); 1952 1953 /* Release all mutexes, now that fork() has completed. */ 1954 huge_postfork_child(); 1955 base_postfork_child(); 1956 chunk_postfork_child(); 1957 for (i = 0; i < narenas_total; i++) { 1958 if (arenas[i] != NULL) 1959 arena_postfork_child(arenas[i]); 1960 } 1961 malloc_mutex_postfork_child(&arenas_lock); 1962 prof_postfork_child(); 1963 ctl_postfork_child(); 1964} 1965 1966/******************************************************************************/ 1967/* 1968 * The following functions are used for TLS allocation/deallocation in static 1969 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() 1970 * is that these avoid accessing TLS variables. 1971 */ 1972 1973static void * 1974a0alloc(size_t size, bool zero) 1975{ 1976 1977 if (malloc_init()) 1978 return (NULL); 1979 1980 if (size == 0) 1981 size = 1; 1982 1983 if (size <= arena_maxclass) 1984 return (arena_malloc(arenas[0], size, zero, false)); 1985 else 1986 return (huge_malloc(NULL, size, zero)); 1987} 1988 1989void * 1990a0malloc(size_t size) 1991{ 1992 1993 return (a0alloc(size, false)); 1994} 1995 1996void * 1997a0calloc(size_t num, size_t size) 1998{ 1999 2000 return (a0alloc(num * size, true)); 2001} 2002 2003void 2004a0free(void *ptr) 2005{ 2006 arena_chunk_t *chunk; 2007 2008 if (ptr == NULL) 2009 return; 2010 2011 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2012 if (chunk != ptr) 2013 arena_dalloc(chunk, ptr, false); 2014 else 2015 huge_dalloc(ptr); 2016} 2017 2018/******************************************************************************/ 2019