jemalloc.c revision 174b70efb4942be112b1ea38db1e5c6ca7599e5d
1#define JEMALLOC_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7malloc_tsd_data(, arenas, arena_t *, NULL) 8malloc_tsd_data(, thread_allocated, thread_allocated_t, 9 THREAD_ALLOCATED_INITIALIZER) 10 11/* Runtime configuration options. */ 12const char *je_malloc_conf; 13#ifdef JEMALLOC_DEBUG 14bool opt_abort = true; 15# ifdef JEMALLOC_FILL 16bool opt_junk = true; 17# else 18bool opt_junk = false; 19# endif 20#else 21bool opt_abort = false; 22bool opt_junk = false; 23#endif 24size_t opt_quarantine = ZU(0); 25bool opt_redzone = false; 26bool opt_utrace = false; 27bool opt_valgrind = false; 28bool opt_xmalloc = false; 29bool opt_zero = false; 30size_t opt_narenas = 0; 31 32unsigned ncpus; 33 34malloc_mutex_t arenas_lock; 35arena_t **arenas; 36unsigned narenas; 37 38/* Set to true once the allocator has been initialized. */ 39static bool malloc_initialized = false; 40 41#ifdef JEMALLOC_THREADED_INIT 42/* Used to let the initializing thread recursively allocate. */ 43# define NO_INITIALIZER ((unsigned long)0) 44# define INITIALIZER pthread_self() 45# define IS_INITIALIZER (malloc_initializer == pthread_self()) 46static pthread_t malloc_initializer = NO_INITIALIZER; 47#else 48# define NO_INITIALIZER false 49# define INITIALIZER true 50# define IS_INITIALIZER malloc_initializer 51static bool malloc_initializer = NO_INITIALIZER; 52#endif 53 54/* Used to avoid initialization races. */ 55#ifdef _WIN32 56static malloc_mutex_t init_lock; 57 58JEMALLOC_ATTR(constructor) 59static void WINAPI 60_init_init_lock(void) 61{ 62 63 malloc_mutex_init(&init_lock); 64} 65 66#ifdef _MSC_VER 67# pragma section(".CRT$XCU", read) 68JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 69static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 70#endif 71 72#else 73static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 74#endif 75 76typedef struct { 77 void *p; /* Input pointer (as in realloc(p, s)). */ 78 size_t s; /* Request size. */ 79 void *r; /* Result pointer. */ 80} malloc_utrace_t; 81 82#ifdef JEMALLOC_UTRACE 83# define UTRACE(a, b, c) do { \ 84 if (opt_utrace) { \ 85 malloc_utrace_t ut; \ 86 ut.p = (a); \ 87 ut.s = (b); \ 88 ut.r = (c); \ 89 utrace(&ut, sizeof(ut)); \ 90 } \ 91} while (0) 92#else 93# define UTRACE(a, b, c) 94#endif 95 96/******************************************************************************/ 97/* Function prototypes for non-inline static functions. */ 98 99static void stats_print_atexit(void); 100static unsigned malloc_ncpus(void); 101static bool malloc_conf_next(char const **opts_p, char const **k_p, 102 size_t *klen_p, char const **v_p, size_t *vlen_p); 103static void malloc_conf_error(const char *msg, const char *k, size_t klen, 104 const char *v, size_t vlen); 105static void malloc_conf_init(void); 106static bool malloc_init_hard(void); 107static int imemalign(void **memptr, size_t alignment, size_t size, 108 size_t min_alignment); 109 110/******************************************************************************/ 111/* 112 * Begin miscellaneous support functions. 113 */ 114 115/* Create a new arena and insert it into the arenas array at index ind. */ 116arena_t * 117arenas_extend(unsigned ind) 118{ 119 arena_t *ret; 120 121 ret = (arena_t *)base_alloc(sizeof(arena_t)); 122 if (ret != NULL && arena_new(ret, ind) == false) { 123 arenas[ind] = ret; 124 return (ret); 125 } 126 /* Only reached if there is an OOM error. */ 127 128 /* 129 * OOM here is quite inconvenient to propagate, since dealing with it 130 * would require a check for failure in the fast path. Instead, punt 131 * by using arenas[0]. In practice, this is an extremely unlikely 132 * failure. 133 */ 134 malloc_write("<jemalloc>: Error initializing arena\n"); 135 if (opt_abort) 136 abort(); 137 138 return (arenas[0]); 139} 140 141/* Slow path, called only by choose_arena(). */ 142arena_t * 143choose_arena_hard(void) 144{ 145 arena_t *ret; 146 147 if (narenas > 1) { 148 unsigned i, choose, first_null; 149 150 choose = 0; 151 first_null = narenas; 152 malloc_mutex_lock(&arenas_lock); 153 assert(arenas[0] != NULL); 154 for (i = 1; i < narenas; i++) { 155 if (arenas[i] != NULL) { 156 /* 157 * Choose the first arena that has the lowest 158 * number of threads assigned to it. 159 */ 160 if (arenas[i]->nthreads < 161 arenas[choose]->nthreads) 162 choose = i; 163 } else if (first_null == narenas) { 164 /* 165 * Record the index of the first uninitialized 166 * arena, in case all extant arenas are in use. 167 * 168 * NB: It is possible for there to be 169 * discontinuities in terms of initialized 170 * versus uninitialized arenas, due to the 171 * "thread.arena" mallctl. 172 */ 173 first_null = i; 174 } 175 } 176 177 if (arenas[choose]->nthreads == 0 || first_null == narenas) { 178 /* 179 * Use an unloaded arena, or the least loaded arena if 180 * all arenas are already initialized. 181 */ 182 ret = arenas[choose]; 183 } else { 184 /* Initialize a new arena. */ 185 ret = arenas_extend(first_null); 186 } 187 ret->nthreads++; 188 malloc_mutex_unlock(&arenas_lock); 189 } else { 190 ret = arenas[0]; 191 malloc_mutex_lock(&arenas_lock); 192 ret->nthreads++; 193 malloc_mutex_unlock(&arenas_lock); 194 } 195 196 arenas_tsd_set(&ret); 197 198 return (ret); 199} 200 201static void 202stats_print_atexit(void) 203{ 204 205 if (config_tcache && config_stats) { 206 unsigned i; 207 208 /* 209 * Merge stats from extant threads. This is racy, since 210 * individual threads do not lock when recording tcache stats 211 * events. As a consequence, the final stats may be slightly 212 * out of date by the time they are reported, if other threads 213 * continue to allocate. 214 */ 215 for (i = 0; i < narenas; i++) { 216 arena_t *arena = arenas[i]; 217 if (arena != NULL) { 218 tcache_t *tcache; 219 220 /* 221 * tcache_stats_merge() locks bins, so if any 222 * code is introduced that acquires both arena 223 * and bin locks in the opposite order, 224 * deadlocks may result. 225 */ 226 malloc_mutex_lock(&arena->lock); 227 ql_foreach(tcache, &arena->tcache_ql, link) { 228 tcache_stats_merge(tcache, arena); 229 } 230 malloc_mutex_unlock(&arena->lock); 231 } 232 } 233 } 234 je_malloc_stats_print(NULL, NULL, NULL); 235} 236 237/* 238 * End miscellaneous support functions. 239 */ 240/******************************************************************************/ 241/* 242 * Begin initialization functions. 243 */ 244 245static unsigned 246malloc_ncpus(void) 247{ 248 unsigned ret; 249 long result; 250 251#ifdef _WIN32 252 SYSTEM_INFO si; 253 GetSystemInfo(&si); 254 result = si.dwNumberOfProcessors; 255#else 256 result = sysconf(_SC_NPROCESSORS_ONLN); 257 if (result == -1) { 258 /* Error. */ 259 ret = 1; 260 } 261#endif 262 ret = (unsigned)result; 263 264 return (ret); 265} 266 267void 268arenas_cleanup(void *arg) 269{ 270 arena_t *arena = *(arena_t **)arg; 271 272 malloc_mutex_lock(&arenas_lock); 273 arena->nthreads--; 274 malloc_mutex_unlock(&arenas_lock); 275} 276 277static inline bool 278malloc_init(void) 279{ 280 281 if (malloc_initialized == false) 282 return (malloc_init_hard()); 283 284 return (false); 285} 286 287static bool 288malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 289 char const **v_p, size_t *vlen_p) 290{ 291 bool accept; 292 const char *opts = *opts_p; 293 294 *k_p = opts; 295 296 for (accept = false; accept == false;) { 297 switch (*opts) { 298 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 299 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 300 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 301 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 302 case 'Y': case 'Z': 303 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 304 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 305 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 306 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 307 case 'y': case 'z': 308 case '0': case '1': case '2': case '3': case '4': case '5': 309 case '6': case '7': case '8': case '9': 310 case '_': 311 opts++; 312 break; 313 case ':': 314 opts++; 315 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 316 *v_p = opts; 317 accept = true; 318 break; 319 case '\0': 320 if (opts != *opts_p) { 321 malloc_write("<jemalloc>: Conf string ends " 322 "with key\n"); 323 } 324 return (true); 325 default: 326 malloc_write("<jemalloc>: Malformed conf string\n"); 327 return (true); 328 } 329 } 330 331 for (accept = false; accept == false;) { 332 switch (*opts) { 333 case ',': 334 opts++; 335 /* 336 * Look ahead one character here, because the next time 337 * this function is called, it will assume that end of 338 * input has been cleanly reached if no input remains, 339 * but we have optimistically already consumed the 340 * comma if one exists. 341 */ 342 if (*opts == '\0') { 343 malloc_write("<jemalloc>: Conf string ends " 344 "with comma\n"); 345 } 346 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 347 accept = true; 348 break; 349 case '\0': 350 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 351 accept = true; 352 break; 353 default: 354 opts++; 355 break; 356 } 357 } 358 359 *opts_p = opts; 360 return (false); 361} 362 363static void 364malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 365 size_t vlen) 366{ 367 368 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 369 (int)vlen, v); 370} 371 372static void 373malloc_conf_init(void) 374{ 375 unsigned i; 376 char buf[PATH_MAX + 1]; 377 const char *opts, *k, *v; 378 size_t klen, vlen; 379 380 /* 381 * Automatically configure valgrind before processing options. The 382 * valgrind option remains in jemalloc 3.x for compatibility reasons. 383 */ 384 if (config_valgrind) { 385 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 386 if (config_fill && opt_valgrind) { 387 opt_junk = false; 388 assert(opt_zero == false); 389 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 390 opt_redzone = true; 391 } 392 if (config_tcache && opt_valgrind) 393 opt_tcache = false; 394 } 395 396 for (i = 0; i < 3; i++) { 397 /* Get runtime configuration. */ 398 switch (i) { 399 case 0: 400 if (je_malloc_conf != NULL) { 401 /* 402 * Use options that were compiled into the 403 * program. 404 */ 405 opts = je_malloc_conf; 406 } else { 407 /* No configuration specified. */ 408 buf[0] = '\0'; 409 opts = buf; 410 } 411 break; 412 case 1: { 413#ifndef _WIN32 414 int linklen; 415 const char *linkname = 416# ifdef JEMALLOC_PREFIX 417 "/etc/"JEMALLOC_PREFIX"malloc.conf" 418# else 419 "/etc/malloc.conf" 420# endif 421 ; 422 423 if ((linklen = readlink(linkname, buf, 424 sizeof(buf) - 1)) != -1) { 425 /* 426 * Use the contents of the "/etc/malloc.conf" 427 * symbolic link's name. 428 */ 429 buf[linklen] = '\0'; 430 opts = buf; 431 } else 432#endif 433 { 434 /* No configuration specified. */ 435 buf[0] = '\0'; 436 opts = buf; 437 } 438 break; 439 } case 2: { 440 const char *envname = 441#ifdef JEMALLOC_PREFIX 442 JEMALLOC_CPREFIX"MALLOC_CONF" 443#else 444 "MALLOC_CONF" 445#endif 446 ; 447 448 if ((opts = getenv(envname)) != NULL) { 449 /* 450 * Do nothing; opts is already initialized to 451 * the value of the MALLOC_CONF environment 452 * variable. 453 */ 454 } else { 455 /* No configuration specified. */ 456 buf[0] = '\0'; 457 opts = buf; 458 } 459 break; 460 } default: 461 /* NOTREACHED */ 462 assert(false); 463 buf[0] = '\0'; 464 opts = buf; 465 } 466 467 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 468 &vlen) == false) { 469#define CONF_HANDLE_BOOL_HIT(o, n, hit) \ 470 if (sizeof(n)-1 == klen && strncmp(n, k, \ 471 klen) == 0) { \ 472 if (strncmp("true", v, vlen) == 0 && \ 473 vlen == sizeof("true")-1) \ 474 o = true; \ 475 else if (strncmp("false", v, vlen) == \ 476 0 && vlen == sizeof("false")-1) \ 477 o = false; \ 478 else { \ 479 malloc_conf_error( \ 480 "Invalid conf value", \ 481 k, klen, v, vlen); \ 482 } \ 483 hit = true; \ 484 } else \ 485 hit = false; 486#define CONF_HANDLE_BOOL(o, n) { \ 487 bool hit; \ 488 CONF_HANDLE_BOOL_HIT(o, n, hit); \ 489 if (hit) \ 490 continue; \ 491} 492#define CONF_HANDLE_SIZE_T(o, n, min, max) \ 493 if (sizeof(n)-1 == klen && strncmp(n, k, \ 494 klen) == 0) { \ 495 uintmax_t um; \ 496 char *end; \ 497 \ 498 set_errno(0); \ 499 um = malloc_strtoumax(v, &end, 0); \ 500 if (get_errno() != 0 || (uintptr_t)end -\ 501 (uintptr_t)v != vlen) { \ 502 malloc_conf_error( \ 503 "Invalid conf value", \ 504 k, klen, v, vlen); \ 505 } else if (um < min || um > max) { \ 506 malloc_conf_error( \ 507 "Out-of-range conf value", \ 508 k, klen, v, vlen); \ 509 } else \ 510 o = um; \ 511 continue; \ 512 } 513#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 514 if (sizeof(n)-1 == klen && strncmp(n, k, \ 515 klen) == 0) { \ 516 long l; \ 517 char *end; \ 518 \ 519 set_errno(0); \ 520 l = strtol(v, &end, 0); \ 521 if (get_errno() != 0 || (uintptr_t)end -\ 522 (uintptr_t)v != vlen) { \ 523 malloc_conf_error( \ 524 "Invalid conf value", \ 525 k, klen, v, vlen); \ 526 } else if (l < (ssize_t)min || l > \ 527 (ssize_t)max) { \ 528 malloc_conf_error( \ 529 "Out-of-range conf value", \ 530 k, klen, v, vlen); \ 531 } else \ 532 o = l; \ 533 continue; \ 534 } 535#define CONF_HANDLE_CHAR_P(o, n, d) \ 536 if (sizeof(n)-1 == klen && strncmp(n, k, \ 537 klen) == 0) { \ 538 size_t cpylen = (vlen <= \ 539 sizeof(o)-1) ? vlen : \ 540 sizeof(o)-1; \ 541 strncpy(o, v, cpylen); \ 542 o[cpylen] = '\0'; \ 543 continue; \ 544 } 545 546 CONF_HANDLE_BOOL(opt_abort, "abort") 547 /* 548 * Chunks always require at least one header page, plus 549 * one data page in the absence of redzones, or three 550 * pages in the presence of redzones. In order to 551 * simplify options processing, fix the limit based on 552 * config_fill. 553 */ 554 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 555 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1) 556 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, 557 SIZE_T_MAX) 558 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 559 -1, (sizeof(size_t) << 3) - 1) 560 CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 561 if (config_fill) { 562 CONF_HANDLE_BOOL(opt_junk, "junk") 563 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 564 0, SIZE_T_MAX) 565 CONF_HANDLE_BOOL(opt_redzone, "redzone") 566 CONF_HANDLE_BOOL(opt_zero, "zero") 567 } 568 if (config_utrace) { 569 CONF_HANDLE_BOOL(opt_utrace, "utrace") 570 } 571 if (config_valgrind) { 572 CONF_HANDLE_BOOL(opt_valgrind, "valgrind") 573 } 574 if (config_xmalloc) { 575 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 576 } 577 if (config_tcache) { 578 CONF_HANDLE_BOOL(opt_tcache, "tcache") 579 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 580 "lg_tcache_max", -1, 581 (sizeof(size_t) << 3) - 1) 582 } 583 if (config_prof) { 584 CONF_HANDLE_BOOL(opt_prof, "prof") 585 CONF_HANDLE_CHAR_P(opt_prof_prefix, 586 "prof_prefix", "jeprof") 587 CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 588 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, 589 "lg_prof_sample", 0, 590 (sizeof(uint64_t) << 3) - 1) 591 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 592 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 593 "lg_prof_interval", -1, 594 (sizeof(uint64_t) << 3) - 1) 595 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 596 CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 597 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 598 } 599 malloc_conf_error("Invalid conf pair", k, klen, v, 600 vlen); 601#undef CONF_HANDLE_BOOL 602#undef CONF_HANDLE_SIZE_T 603#undef CONF_HANDLE_SSIZE_T 604#undef CONF_HANDLE_CHAR_P 605 } 606 } 607} 608 609static bool 610malloc_init_hard(void) 611{ 612 arena_t *init_arenas[1]; 613 614 malloc_mutex_lock(&init_lock); 615 if (malloc_initialized || IS_INITIALIZER) { 616 /* 617 * Another thread initialized the allocator before this one 618 * acquired init_lock, or this thread is the initializing 619 * thread, and it is recursively allocating. 620 */ 621 malloc_mutex_unlock(&init_lock); 622 return (false); 623 } 624#ifdef JEMALLOC_THREADED_INIT 625 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { 626 /* Busy-wait until the initializing thread completes. */ 627 do { 628 malloc_mutex_unlock(&init_lock); 629 CPU_SPINWAIT; 630 malloc_mutex_lock(&init_lock); 631 } while (malloc_initialized == false); 632 malloc_mutex_unlock(&init_lock); 633 return (false); 634 } 635#endif 636 malloc_initializer = INITIALIZER; 637 638 malloc_tsd_boot(); 639 if (config_prof) 640 prof_boot0(); 641 642 malloc_conf_init(); 643 644#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 645 && !defined(_WIN32)) 646 /* Register fork handlers. */ 647 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 648 jemalloc_postfork_child) != 0) { 649 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 650 if (opt_abort) 651 abort(); 652 } 653#endif 654 655 if (opt_stats_print) { 656 /* Print statistics at exit. */ 657 if (atexit(stats_print_atexit) != 0) { 658 malloc_write("<jemalloc>: Error in atexit()\n"); 659 if (opt_abort) 660 abort(); 661 } 662 } 663 664 if (base_boot()) { 665 malloc_mutex_unlock(&init_lock); 666 return (true); 667 } 668 669 if (chunk_boot()) { 670 malloc_mutex_unlock(&init_lock); 671 return (true); 672 } 673 674 if (ctl_boot()) { 675 malloc_mutex_unlock(&init_lock); 676 return (true); 677 } 678 679 if (config_prof) 680 prof_boot1(); 681 682 arena_boot(); 683 684 if (config_tcache && tcache_boot0()) { 685 malloc_mutex_unlock(&init_lock); 686 return (true); 687 } 688 689 if (huge_boot()) { 690 malloc_mutex_unlock(&init_lock); 691 return (true); 692 } 693 694 if (malloc_mutex_init(&arenas_lock)) 695 return (true); 696 697 /* 698 * Create enough scaffolding to allow recursive allocation in 699 * malloc_ncpus(). 700 */ 701 narenas = 1; 702 arenas = init_arenas; 703 memset(arenas, 0, sizeof(arena_t *) * narenas); 704 705 /* 706 * Initialize one arena here. The rest are lazily created in 707 * choose_arena_hard(). 708 */ 709 arenas_extend(0); 710 if (arenas[0] == NULL) { 711 malloc_mutex_unlock(&init_lock); 712 return (true); 713 } 714 715 /* Initialize allocation counters before any allocations can occur. */ 716 if (config_stats && thread_allocated_tsd_boot()) { 717 malloc_mutex_unlock(&init_lock); 718 return (true); 719 } 720 721 if (arenas_tsd_boot()) { 722 malloc_mutex_unlock(&init_lock); 723 return (true); 724 } 725 726 if (config_tcache && tcache_boot1()) { 727 malloc_mutex_unlock(&init_lock); 728 return (true); 729 } 730 731 if (config_fill && quarantine_boot()) { 732 malloc_mutex_unlock(&init_lock); 733 return (true); 734 } 735 736 if (config_prof && prof_boot2()) { 737 malloc_mutex_unlock(&init_lock); 738 return (true); 739 } 740 741 /* Get number of CPUs. */ 742 malloc_mutex_unlock(&init_lock); 743 ncpus = malloc_ncpus(); 744 malloc_mutex_lock(&init_lock); 745 746 if (mutex_boot()) { 747 malloc_mutex_unlock(&init_lock); 748 return (true); 749 } 750 751 if (opt_narenas == 0) { 752 /* 753 * For SMP systems, create more than one arena per CPU by 754 * default. 755 */ 756 if (ncpus > 1) 757 opt_narenas = ncpus << 2; 758 else 759 opt_narenas = 1; 760 } 761 narenas = opt_narenas; 762 /* 763 * Make sure that the arenas array can be allocated. In practice, this 764 * limit is enough to allow the allocator to function, but the ctl 765 * machinery will fail to allocate memory at far lower limits. 766 */ 767 if (narenas > chunksize / sizeof(arena_t *)) { 768 narenas = chunksize / sizeof(arena_t *); 769 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 770 narenas); 771 } 772 773 /* Allocate and initialize arenas. */ 774 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); 775 if (arenas == NULL) { 776 malloc_mutex_unlock(&init_lock); 777 return (true); 778 } 779 /* 780 * Zero the array. In practice, this should always be pre-zeroed, 781 * since it was just mmap()ed, but let's be sure. 782 */ 783 memset(arenas, 0, sizeof(arena_t *) * narenas); 784 /* Copy the pointer to the one arena that was already initialized. */ 785 arenas[0] = init_arenas[0]; 786 787 malloc_initialized = true; 788 malloc_mutex_unlock(&init_lock); 789 return (false); 790} 791 792/* 793 * End initialization functions. 794 */ 795/******************************************************************************/ 796/* 797 * Begin malloc(3)-compatible functions. 798 */ 799 800void * 801je_malloc(size_t size) 802{ 803 void *ret; 804 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 805 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 806 807 if (malloc_init()) { 808 ret = NULL; 809 goto label_oom; 810 } 811 812 if (size == 0) 813 size = 1; 814 815 if (config_prof && opt_prof) { 816 usize = s2u(size); 817 PROF_ALLOC_PREP(1, usize, cnt); 818 if (cnt == NULL) { 819 ret = NULL; 820 goto label_oom; 821 } 822 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 823 SMALL_MAXCLASS) { 824 ret = imalloc(SMALL_MAXCLASS+1); 825 if (ret != NULL) 826 arena_prof_promoted(ret, usize); 827 } else 828 ret = imalloc(size); 829 } else { 830 if (config_stats || (config_valgrind && opt_valgrind)) 831 usize = s2u(size); 832 ret = imalloc(size); 833 } 834 835label_oom: 836 if (ret == NULL) { 837 if (config_xmalloc && opt_xmalloc) { 838 malloc_write("<jemalloc>: Error in malloc(): " 839 "out of memory\n"); 840 abort(); 841 } 842 set_errno(ENOMEM); 843 } 844 if (config_prof && opt_prof && ret != NULL) 845 prof_malloc(ret, usize, cnt); 846 if (config_stats && ret != NULL) { 847 assert(usize == isalloc(ret, config_prof)); 848 thread_allocated_tsd_get()->allocated += usize; 849 } 850 UTRACE(0, size, ret); 851 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 852 return (ret); 853} 854 855JEMALLOC_ATTR(nonnull(1)) 856#ifdef JEMALLOC_PROF 857/* 858 * Avoid any uncertainty as to how many backtrace frames to ignore in 859 * PROF_ALLOC_PREP(). 860 */ 861JEMALLOC_ATTR(noinline) 862#endif 863static int 864imemalign(void **memptr, size_t alignment, size_t size, 865 size_t min_alignment) 866{ 867 int ret; 868 size_t usize; 869 void *result; 870 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 871 872 assert(min_alignment != 0); 873 874 if (malloc_init()) 875 result = NULL; 876 else { 877 if (size == 0) 878 size = 1; 879 880 /* Make sure that alignment is a large enough power of 2. */ 881 if (((alignment - 1) & alignment) != 0 882 || (alignment < min_alignment)) { 883 if (config_xmalloc && opt_xmalloc) { 884 malloc_write("<jemalloc>: Error allocating " 885 "aligned memory: invalid alignment\n"); 886 abort(); 887 } 888 result = NULL; 889 ret = EINVAL; 890 goto label_return; 891 } 892 893 usize = sa2u(size, alignment); 894 if (usize == 0) { 895 result = NULL; 896 ret = ENOMEM; 897 goto label_return; 898 } 899 900 if (config_prof && opt_prof) { 901 PROF_ALLOC_PREP(2, usize, cnt); 902 if (cnt == NULL) { 903 result = NULL; 904 ret = EINVAL; 905 } else { 906 if (prof_promote && (uintptr_t)cnt != 907 (uintptr_t)1U && usize <= SMALL_MAXCLASS) { 908 assert(sa2u(SMALL_MAXCLASS+1, 909 alignment) != 0); 910 result = ipalloc(sa2u(SMALL_MAXCLASS+1, 911 alignment), alignment, false); 912 if (result != NULL) { 913 arena_prof_promoted(result, 914 usize); 915 } 916 } else { 917 result = ipalloc(usize, alignment, 918 false); 919 } 920 } 921 } else 922 result = ipalloc(usize, alignment, false); 923 } 924 925 if (result == NULL) { 926 if (config_xmalloc && opt_xmalloc) { 927 malloc_write("<jemalloc>: Error allocating aligned " 928 "memory: out of memory\n"); 929 abort(); 930 } 931 ret = ENOMEM; 932 goto label_return; 933 } 934 935 *memptr = result; 936 ret = 0; 937 938label_return: 939 if (config_stats && result != NULL) { 940 assert(usize == isalloc(result, config_prof)); 941 thread_allocated_tsd_get()->allocated += usize; 942 } 943 if (config_prof && opt_prof && result != NULL) 944 prof_malloc(result, usize, cnt); 945 UTRACE(0, size, result); 946 return (ret); 947} 948 949int 950je_posix_memalign(void **memptr, size_t alignment, size_t size) 951{ 952 int ret = imemalign(memptr, alignment, size, sizeof(void *)); 953 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 954 config_prof), false); 955 return (ret); 956} 957 958void * 959je_aligned_alloc(size_t alignment, size_t size) 960{ 961 void *ret; 962 int err; 963 964 if ((err = imemalign(&ret, alignment, size, 1)) != 0) { 965 ret = NULL; 966 set_errno(err); 967 } 968 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 969 false); 970 return (ret); 971} 972 973void * 974je_calloc(size_t num, size_t size) 975{ 976 void *ret; 977 size_t num_size; 978 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 979 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 980 981 if (malloc_init()) { 982 num_size = 0; 983 ret = NULL; 984 goto label_return; 985 } 986 987 num_size = num * size; 988 if (num_size == 0) { 989 if (num == 0 || size == 0) 990 num_size = 1; 991 else { 992 ret = NULL; 993 goto label_return; 994 } 995 /* 996 * Try to avoid division here. We know that it isn't possible to 997 * overflow during multiplication if neither operand uses any of the 998 * most significant half of the bits in a size_t. 999 */ 1000 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 1001 && (num_size / size != num)) { 1002 /* size_t overflow. */ 1003 ret = NULL; 1004 goto label_return; 1005 } 1006 1007 if (config_prof && opt_prof) { 1008 usize = s2u(num_size); 1009 PROF_ALLOC_PREP(1, usize, cnt); 1010 if (cnt == NULL) { 1011 ret = NULL; 1012 goto label_return; 1013 } 1014 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize 1015 <= SMALL_MAXCLASS) { 1016 ret = icalloc(SMALL_MAXCLASS+1); 1017 if (ret != NULL) 1018 arena_prof_promoted(ret, usize); 1019 } else 1020 ret = icalloc(num_size); 1021 } else { 1022 if (config_stats || (config_valgrind && opt_valgrind)) 1023 usize = s2u(num_size); 1024 ret = icalloc(num_size); 1025 } 1026 1027label_return: 1028 if (ret == NULL) { 1029 if (config_xmalloc && opt_xmalloc) { 1030 malloc_write("<jemalloc>: Error in calloc(): out of " 1031 "memory\n"); 1032 abort(); 1033 } 1034 set_errno(ENOMEM); 1035 } 1036 1037 if (config_prof && opt_prof && ret != NULL) 1038 prof_malloc(ret, usize, cnt); 1039 if (config_stats && ret != NULL) { 1040 assert(usize == isalloc(ret, config_prof)); 1041 thread_allocated_tsd_get()->allocated += usize; 1042 } 1043 UTRACE(0, num_size, ret); 1044 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1045 return (ret); 1046} 1047 1048void * 1049je_realloc(void *ptr, size_t size) 1050{ 1051 void *ret; 1052 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1053 size_t old_size = 0; 1054 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1055 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1056 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); 1057 1058 if (size == 0) { 1059 if (ptr != NULL) { 1060 /* realloc(ptr, 0) is equivalent to free(p). */ 1061 if (config_prof) { 1062 old_size = isalloc(ptr, true); 1063 if (config_valgrind && opt_valgrind) 1064 old_rzsize = p2rz(ptr); 1065 } else if (config_stats) { 1066 old_size = isalloc(ptr, false); 1067 if (config_valgrind && opt_valgrind) 1068 old_rzsize = u2rz(old_size); 1069 } else if (config_valgrind && opt_valgrind) { 1070 old_size = isalloc(ptr, false); 1071 old_rzsize = u2rz(old_size); 1072 } 1073 if (config_prof && opt_prof) { 1074 old_ctx = prof_ctx_get(ptr); 1075 cnt = NULL; 1076 } 1077 iqalloc(ptr); 1078 ret = NULL; 1079 goto label_return; 1080 } else 1081 size = 1; 1082 } 1083 1084 if (ptr != NULL) { 1085 assert(malloc_initialized || IS_INITIALIZER); 1086 1087 if (config_prof) { 1088 old_size = isalloc(ptr, true); 1089 if (config_valgrind && opt_valgrind) 1090 old_rzsize = p2rz(ptr); 1091 } else if (config_stats) { 1092 old_size = isalloc(ptr, false); 1093 if (config_valgrind && opt_valgrind) 1094 old_rzsize = u2rz(old_size); 1095 } else if (config_valgrind && opt_valgrind) { 1096 old_size = isalloc(ptr, false); 1097 old_rzsize = u2rz(old_size); 1098 } 1099 if (config_prof && opt_prof) { 1100 usize = s2u(size); 1101 old_ctx = prof_ctx_get(ptr); 1102 PROF_ALLOC_PREP(1, usize, cnt); 1103 if (cnt == NULL) { 1104 old_ctx = NULL; 1105 ret = NULL; 1106 goto label_oom; 1107 } 1108 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && 1109 usize <= SMALL_MAXCLASS) { 1110 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, 1111 false, false); 1112 if (ret != NULL) 1113 arena_prof_promoted(ret, usize); 1114 else 1115 old_ctx = NULL; 1116 } else { 1117 ret = iralloc(ptr, size, 0, 0, false, false); 1118 if (ret == NULL) 1119 old_ctx = NULL; 1120 } 1121 } else { 1122 if (config_stats || (config_valgrind && opt_valgrind)) 1123 usize = s2u(size); 1124 ret = iralloc(ptr, size, 0, 0, false, false); 1125 } 1126 1127label_oom: 1128 if (ret == NULL) { 1129 if (config_xmalloc && opt_xmalloc) { 1130 malloc_write("<jemalloc>: Error in realloc(): " 1131 "out of memory\n"); 1132 abort(); 1133 } 1134 set_errno(ENOMEM); 1135 } 1136 } else { 1137 /* realloc(NULL, size) is equivalent to malloc(size). */ 1138 if (config_prof && opt_prof) 1139 old_ctx = NULL; 1140 if (malloc_init()) { 1141 if (config_prof && opt_prof) 1142 cnt = NULL; 1143 ret = NULL; 1144 } else { 1145 if (config_prof && opt_prof) { 1146 usize = s2u(size); 1147 PROF_ALLOC_PREP(1, usize, cnt); 1148 if (cnt == NULL) 1149 ret = NULL; 1150 else { 1151 if (prof_promote && (uintptr_t)cnt != 1152 (uintptr_t)1U && usize <= 1153 SMALL_MAXCLASS) { 1154 ret = imalloc(SMALL_MAXCLASS+1); 1155 if (ret != NULL) { 1156 arena_prof_promoted(ret, 1157 usize); 1158 } 1159 } else 1160 ret = imalloc(size); 1161 } 1162 } else { 1163 if (config_stats || (config_valgrind && 1164 opt_valgrind)) 1165 usize = s2u(size); 1166 ret = imalloc(size); 1167 } 1168 } 1169 1170 if (ret == NULL) { 1171 if (config_xmalloc && opt_xmalloc) { 1172 malloc_write("<jemalloc>: Error in realloc(): " 1173 "out of memory\n"); 1174 abort(); 1175 } 1176 set_errno(ENOMEM); 1177 } 1178 } 1179 1180label_return: 1181 if (config_prof && opt_prof) 1182 prof_realloc(ret, usize, cnt, old_size, old_ctx); 1183 if (config_stats && ret != NULL) { 1184 thread_allocated_t *ta; 1185 assert(usize == isalloc(ret, config_prof)); 1186 ta = thread_allocated_tsd_get(); 1187 ta->allocated += usize; 1188 ta->deallocated += old_size; 1189 } 1190 UTRACE(ptr, size, ret); 1191 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); 1192 return (ret); 1193} 1194 1195void 1196je_free(void *ptr) 1197{ 1198 1199 UTRACE(ptr, 0, 0); 1200 if (ptr != NULL) { 1201 size_t usize; 1202 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1203 1204 assert(malloc_initialized || IS_INITIALIZER); 1205 1206 if (config_prof && opt_prof) { 1207 usize = isalloc(ptr, config_prof); 1208 prof_free(ptr, usize); 1209 } else if (config_stats || config_valgrind) 1210 usize = isalloc(ptr, config_prof); 1211 if (config_stats) 1212 thread_allocated_tsd_get()->deallocated += usize; 1213 if (config_valgrind && opt_valgrind) 1214 rzsize = p2rz(ptr); 1215 iqalloc(ptr); 1216 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1217 } 1218} 1219 1220/* 1221 * End malloc(3)-compatible functions. 1222 */ 1223/******************************************************************************/ 1224/* 1225 * Begin non-standard override functions. 1226 */ 1227 1228#ifdef JEMALLOC_OVERRIDE_MEMALIGN 1229void * 1230je_memalign(size_t alignment, size_t size) 1231{ 1232 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1233 imemalign(&ret, alignment, size, 1); 1234 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1235 return (ret); 1236} 1237#endif 1238 1239#ifdef JEMALLOC_OVERRIDE_VALLOC 1240void * 1241je_valloc(size_t size) 1242{ 1243 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1244 imemalign(&ret, PAGE, size, 1); 1245 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1246 return (ret); 1247} 1248#endif 1249 1250/* 1251 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1252 * #define je_malloc malloc 1253 */ 1254#define malloc_is_malloc 1 1255#define is_malloc_(a) malloc_is_ ## a 1256#define is_malloc(a) is_malloc_(a) 1257 1258#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1259/* 1260 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1261 * to inconsistently reference libc's malloc(3)-compatible functions 1262 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1263 * 1264 * These definitions interpose hooks in glibc. The functions are actually 1265 * passed an extra argument for the caller return address, which will be 1266 * ignored. 1267 */ 1268JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free; 1269JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc; 1270JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) = 1271 je_realloc; 1272JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) = 1273 je_memalign; 1274#endif 1275 1276/* 1277 * End non-standard override functions. 1278 */ 1279/******************************************************************************/ 1280/* 1281 * Begin non-standard functions. 1282 */ 1283 1284size_t 1285je_malloc_usable_size(const void *ptr) 1286{ 1287 size_t ret; 1288 1289 assert(malloc_initialized || IS_INITIALIZER); 1290 1291 if (config_ivsalloc) 1292 ret = ivsalloc(ptr, config_prof); 1293 else 1294 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; 1295 1296 return (ret); 1297} 1298 1299void 1300je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1301 const char *opts) 1302{ 1303 1304 stats_print(write_cb, cbopaque, opts); 1305} 1306 1307int 1308je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1309 size_t newlen) 1310{ 1311 1312 if (malloc_init()) 1313 return (EAGAIN); 1314 1315 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1316} 1317 1318int 1319je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1320{ 1321 1322 if (malloc_init()) 1323 return (EAGAIN); 1324 1325 return (ctl_nametomib(name, mibp, miblenp)); 1326} 1327 1328int 1329je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1330 void *newp, size_t newlen) 1331{ 1332 1333 if (malloc_init()) 1334 return (EAGAIN); 1335 1336 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1337} 1338 1339/* 1340 * End non-standard functions. 1341 */ 1342/******************************************************************************/ 1343/* 1344 * Begin experimental functions. 1345 */ 1346#ifdef JEMALLOC_EXPERIMENTAL 1347 1348JEMALLOC_INLINE void * 1349iallocm(size_t usize, size_t alignment, bool zero) 1350{ 1351 1352 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, 1353 alignment))); 1354 1355 if (alignment != 0) 1356 return (ipalloc(usize, alignment, zero)); 1357 else if (zero) 1358 return (icalloc(usize)); 1359 else 1360 return (imalloc(usize)); 1361} 1362 1363int 1364je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 1365{ 1366 void *p; 1367 size_t usize; 1368 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1369 & (SIZE_T_MAX-1)); 1370 bool zero = flags & ALLOCM_ZERO; 1371 1372 assert(ptr != NULL); 1373 assert(size != 0); 1374 1375 if (malloc_init()) 1376 goto label_oom; 1377 1378 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1379 if (usize == 0) 1380 goto label_oom; 1381 1382 if (config_prof && opt_prof) { 1383 prof_thr_cnt_t *cnt; 1384 1385 PROF_ALLOC_PREP(1, usize, cnt); 1386 if (cnt == NULL) 1387 goto label_oom; 1388 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 1389 SMALL_MAXCLASS) { 1390 size_t usize_promoted = (alignment == 0) ? 1391 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, 1392 alignment); 1393 assert(usize_promoted != 0); 1394 p = iallocm(usize_promoted, alignment, zero); 1395 if (p == NULL) 1396 goto label_oom; 1397 arena_prof_promoted(p, usize); 1398 } else { 1399 p = iallocm(usize, alignment, zero); 1400 if (p == NULL) 1401 goto label_oom; 1402 } 1403 prof_malloc(p, usize, cnt); 1404 } else { 1405 p = iallocm(usize, alignment, zero); 1406 if (p == NULL) 1407 goto label_oom; 1408 } 1409 if (rsize != NULL) 1410 *rsize = usize; 1411 1412 *ptr = p; 1413 if (config_stats) { 1414 assert(usize == isalloc(p, config_prof)); 1415 thread_allocated_tsd_get()->allocated += usize; 1416 } 1417 UTRACE(0, size, p); 1418 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); 1419 return (ALLOCM_SUCCESS); 1420label_oom: 1421 if (config_xmalloc && opt_xmalloc) { 1422 malloc_write("<jemalloc>: Error in allocm(): " 1423 "out of memory\n"); 1424 abort(); 1425 } 1426 *ptr = NULL; 1427 UTRACE(0, size, 0); 1428 return (ALLOCM_ERR_OOM); 1429} 1430 1431int 1432je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 1433{ 1434 void *p, *q; 1435 size_t usize; 1436 size_t old_size; 1437 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1438 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1439 & (SIZE_T_MAX-1)); 1440 bool zero = flags & ALLOCM_ZERO; 1441 bool no_move = flags & ALLOCM_NO_MOVE; 1442 1443 assert(ptr != NULL); 1444 assert(*ptr != NULL); 1445 assert(size != 0); 1446 assert(SIZE_T_MAX - size >= extra); 1447 assert(malloc_initialized || IS_INITIALIZER); 1448 1449 p = *ptr; 1450 if (config_prof && opt_prof) { 1451 prof_thr_cnt_t *cnt; 1452 1453 /* 1454 * usize isn't knowable before iralloc() returns when extra is 1455 * non-zero. Therefore, compute its maximum possible value and 1456 * use that in PROF_ALLOC_PREP() to decide whether to capture a 1457 * backtrace. prof_realloc() will use the actual usize to 1458 * decide whether to sample. 1459 */ 1460 size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1461 sa2u(size+extra, alignment); 1462 prof_ctx_t *old_ctx = prof_ctx_get(p); 1463 old_size = isalloc(p, true); 1464 if (config_valgrind && opt_valgrind) 1465 old_rzsize = p2rz(p); 1466 PROF_ALLOC_PREP(1, max_usize, cnt); 1467 if (cnt == NULL) 1468 goto label_oom; 1469 /* 1470 * Use minimum usize to determine whether promotion may happen. 1471 */ 1472 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U 1473 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) 1474 <= SMALL_MAXCLASS) { 1475 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1476 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 1477 alignment, zero, no_move); 1478 if (q == NULL) 1479 goto label_err; 1480 if (max_usize < PAGE) { 1481 usize = max_usize; 1482 arena_prof_promoted(q, usize); 1483 } else 1484 usize = isalloc(q, config_prof); 1485 } else { 1486 q = iralloc(p, size, extra, alignment, zero, no_move); 1487 if (q == NULL) 1488 goto label_err; 1489 usize = isalloc(q, config_prof); 1490 } 1491 prof_realloc(q, usize, cnt, old_size, old_ctx); 1492 if (rsize != NULL) 1493 *rsize = usize; 1494 } else { 1495 if (config_stats) { 1496 old_size = isalloc(p, false); 1497 if (config_valgrind && opt_valgrind) 1498 old_rzsize = u2rz(old_size); 1499 } else if (config_valgrind && opt_valgrind) { 1500 old_size = isalloc(p, false); 1501 old_rzsize = u2rz(old_size); 1502 } 1503 q = iralloc(p, size, extra, alignment, zero, no_move); 1504 if (q == NULL) 1505 goto label_err; 1506 if (config_stats) 1507 usize = isalloc(q, config_prof); 1508 if (rsize != NULL) { 1509 if (config_stats == false) 1510 usize = isalloc(q, config_prof); 1511 *rsize = usize; 1512 } 1513 } 1514 1515 *ptr = q; 1516 if (config_stats) { 1517 thread_allocated_t *ta; 1518 ta = thread_allocated_tsd_get(); 1519 ta->allocated += usize; 1520 ta->deallocated += old_size; 1521 } 1522 UTRACE(p, size, q); 1523 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); 1524 return (ALLOCM_SUCCESS); 1525label_err: 1526 if (no_move) { 1527 UTRACE(p, size, q); 1528 return (ALLOCM_ERR_NOT_MOVED); 1529 } 1530label_oom: 1531 if (config_xmalloc && opt_xmalloc) { 1532 malloc_write("<jemalloc>: Error in rallocm(): " 1533 "out of memory\n"); 1534 abort(); 1535 } 1536 UTRACE(p, size, 0); 1537 return (ALLOCM_ERR_OOM); 1538} 1539 1540int 1541je_sallocm(const void *ptr, size_t *rsize, int flags) 1542{ 1543 size_t sz; 1544 1545 assert(malloc_initialized || IS_INITIALIZER); 1546 1547 if (config_ivsalloc) 1548 sz = ivsalloc(ptr, config_prof); 1549 else { 1550 assert(ptr != NULL); 1551 sz = isalloc(ptr, config_prof); 1552 } 1553 assert(rsize != NULL); 1554 *rsize = sz; 1555 1556 return (ALLOCM_SUCCESS); 1557} 1558 1559int 1560je_dallocm(void *ptr, int flags) 1561{ 1562 size_t usize; 1563 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1564 1565 assert(ptr != NULL); 1566 assert(malloc_initialized || IS_INITIALIZER); 1567 1568 UTRACE(ptr, 0, 0); 1569 if (config_stats || config_valgrind) 1570 usize = isalloc(ptr, config_prof); 1571 if (config_prof && opt_prof) { 1572 if (config_stats == false && config_valgrind == false) 1573 usize = isalloc(ptr, config_prof); 1574 prof_free(ptr, usize); 1575 } 1576 if (config_stats) 1577 thread_allocated_tsd_get()->deallocated += usize; 1578 if (config_valgrind && opt_valgrind) 1579 rzsize = p2rz(ptr); 1580 iqalloc(ptr); 1581 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1582 1583 return (ALLOCM_SUCCESS); 1584} 1585 1586int 1587je_nallocm(size_t *rsize, size_t size, int flags) 1588{ 1589 size_t usize; 1590 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1591 & (SIZE_T_MAX-1)); 1592 1593 assert(size != 0); 1594 1595 if (malloc_init()) 1596 return (ALLOCM_ERR_OOM); 1597 1598 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1599 if (usize == 0) 1600 return (ALLOCM_ERR_OOM); 1601 1602 if (rsize != NULL) 1603 *rsize = usize; 1604 return (ALLOCM_SUCCESS); 1605} 1606 1607#endif 1608/* 1609 * End experimental functions. 1610 */ 1611/******************************************************************************/ 1612/* 1613 * The following functions are used by threading libraries for protection of 1614 * malloc during fork(). 1615 */ 1616 1617#ifndef JEMALLOC_MUTEX_INIT_CB 1618void 1619jemalloc_prefork(void) 1620#else 1621JEMALLOC_EXPORT void 1622_malloc_prefork(void) 1623#endif 1624{ 1625 unsigned i; 1626 1627#ifdef JEMALLOC_MUTEX_INIT_CB 1628 if (malloc_initialized == false) 1629 return; 1630#endif 1631 assert(malloc_initialized); 1632 1633 /* Acquire all mutexes in a safe order. */ 1634 malloc_mutex_prefork(&arenas_lock); 1635 for (i = 0; i < narenas; i++) { 1636 if (arenas[i] != NULL) 1637 arena_prefork(arenas[i]); 1638 } 1639 base_prefork(); 1640 huge_prefork(); 1641 chunk_dss_prefork(); 1642} 1643 1644#ifndef JEMALLOC_MUTEX_INIT_CB 1645void 1646jemalloc_postfork_parent(void) 1647#else 1648JEMALLOC_EXPORT void 1649_malloc_postfork(void) 1650#endif 1651{ 1652 unsigned i; 1653 1654#ifdef JEMALLOC_MUTEX_INIT_CB 1655 if (malloc_initialized == false) 1656 return; 1657#endif 1658 assert(malloc_initialized); 1659 1660 /* Release all mutexes, now that fork() has completed. */ 1661 chunk_dss_postfork_parent(); 1662 huge_postfork_parent(); 1663 base_postfork_parent(); 1664 for (i = 0; i < narenas; i++) { 1665 if (arenas[i] != NULL) 1666 arena_postfork_parent(arenas[i]); 1667 } 1668 malloc_mutex_postfork_parent(&arenas_lock); 1669} 1670 1671void 1672jemalloc_postfork_child(void) 1673{ 1674 unsigned i; 1675 1676 assert(malloc_initialized); 1677 1678 /* Release all mutexes, now that fork() has completed. */ 1679 chunk_dss_postfork_child(); 1680 huge_postfork_child(); 1681 base_postfork_child(); 1682 for (i = 0; i < narenas; i++) { 1683 if (arenas[i] != NULL) 1684 arena_postfork_child(arenas[i]); 1685 } 1686 malloc_mutex_postfork_child(&arenas_lock); 1687} 1688 1689/******************************************************************************/ 1690/* 1691 * The following functions are used for TLS allocation/deallocation in static 1692 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() 1693 * is that these avoid accessing TLS variables. 1694 */ 1695 1696static void * 1697a0alloc(size_t size, bool zero) 1698{ 1699 1700 if (malloc_init()) 1701 return (NULL); 1702 1703 if (size == 0) 1704 size = 1; 1705 1706 if (size <= arena_maxclass) 1707 return (arena_malloc(arenas[0], size, zero, false)); 1708 else 1709 return (huge_malloc(size, zero)); 1710} 1711 1712void * 1713a0malloc(size_t size) 1714{ 1715 1716 return (a0alloc(size, false)); 1717} 1718 1719void * 1720a0calloc(size_t num, size_t size) 1721{ 1722 1723 return (a0alloc(num * size, true)); 1724} 1725 1726void 1727a0free(void *ptr) 1728{ 1729 arena_chunk_t *chunk; 1730 1731 if (ptr == NULL) 1732 return; 1733 1734 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1735 if (chunk != ptr) 1736 arena_dalloc(chunk->arena, chunk, ptr, false); 1737 else 1738 huge_dalloc(ptr, true); 1739} 1740 1741/******************************************************************************/ 1742