jemalloc.c revision 606f1fdc3cdbc700717133ca56685313caea24bb
1#define JEMALLOC_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7malloc_tsd_data(, arenas, arena_t *, NULL) 8malloc_tsd_data(, thread_allocated, thread_allocated_t, 9 THREAD_ALLOCATED_INITIALIZER) 10 11/* Runtime configuration options. */ 12const char *je_malloc_conf JEMALLOC_ATTR(visibility("default")); 13#ifdef JEMALLOC_DEBUG 14bool opt_abort = true; 15# ifdef JEMALLOC_FILL 16bool opt_junk = true; 17# else 18bool opt_junk = false; 19# endif 20#else 21bool opt_abort = false; 22bool opt_junk = false; 23#endif 24size_t opt_quarantine = ZU(0); 25bool opt_redzone = false; 26bool opt_utrace = false; 27bool opt_valgrind = false; 28bool opt_xmalloc = false; 29bool opt_zero = false; 30size_t opt_narenas = 0; 31 32unsigned ncpus; 33 34malloc_mutex_t arenas_lock; 35arena_t **arenas; 36unsigned narenas; 37 38/* Set to true once the allocator has been initialized. */ 39static bool malloc_initialized = false; 40 41#ifdef JEMALLOC_THREADED_INIT 42/* Used to let the initializing thread recursively allocate. */ 43# define NO_INITIALIZER ((unsigned long)0) 44# define INITIALIZER pthread_self() 45# define IS_INITIALIZER (malloc_initializer == pthread_self()) 46static pthread_t malloc_initializer = NO_INITIALIZER; 47#else 48# define NO_INITIALIZER false 49# define INITIALIZER true 50# define IS_INITIALIZER malloc_initializer 51static bool malloc_initializer = NO_INITIALIZER; 52#endif 53 54/* Used to avoid initialization races. */ 55static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 56 57typedef struct { 58 void *p; /* Input pointer (as in realloc(p, s)). */ 59 size_t s; /* Request size. */ 60 void *r; /* Result pointer. */ 61} malloc_utrace_t; 62 63#ifdef JEMALLOC_UTRACE 64# define UTRACE(a, b, c) do { \ 65 if (opt_utrace) { \ 66 malloc_utrace_t ut; \ 67 ut.p = (a); \ 68 ut.s = (b); \ 69 ut.r = (c); \ 70 utrace(&ut, sizeof(ut)); \ 71 } \ 72} while (0) 73#else 74# define UTRACE(a, b, c) 75#endif 76 77/******************************************************************************/ 78/* Function prototypes for non-inline static functions. */ 79 80static void stats_print_atexit(void); 81static unsigned malloc_ncpus(void); 82static bool malloc_conf_next(char const **opts_p, char const **k_p, 83 size_t *klen_p, char const **v_p, size_t *vlen_p); 84static void malloc_conf_error(const char *msg, const char *k, size_t klen, 85 const char *v, size_t vlen); 86static void malloc_conf_init(void); 87static bool malloc_init_hard(void); 88static int imemalign(void **memptr, size_t alignment, size_t size, 89 size_t min_alignment); 90 91/******************************************************************************/ 92/* 93 * Begin miscellaneous support functions. 94 */ 95 96/* Create a new arena and insert it into the arenas array at index ind. */ 97arena_t * 98arenas_extend(unsigned ind) 99{ 100 arena_t *ret; 101 102 ret = (arena_t *)base_alloc(sizeof(arena_t)); 103 if (ret != NULL && arena_new(ret, ind) == false) { 104 arenas[ind] = ret; 105 return (ret); 106 } 107 /* Only reached if there is an OOM error. */ 108 109 /* 110 * OOM here is quite inconvenient to propagate, since dealing with it 111 * would require a check for failure in the fast path. Instead, punt 112 * by using arenas[0]. In practice, this is an extremely unlikely 113 * failure. 114 */ 115 malloc_write("<jemalloc>: Error initializing arena\n"); 116 if (opt_abort) 117 abort(); 118 119 return (arenas[0]); 120} 121 122/* Slow path, called only by choose_arena(). */ 123arena_t * 124choose_arena_hard(void) 125{ 126 arena_t *ret; 127 128 if (narenas > 1) { 129 unsigned i, choose, first_null; 130 131 choose = 0; 132 first_null = narenas; 133 malloc_mutex_lock(&arenas_lock); 134 assert(arenas[0] != NULL); 135 for (i = 1; i < narenas; i++) { 136 if (arenas[i] != NULL) { 137 /* 138 * Choose the first arena that has the lowest 139 * number of threads assigned to it. 140 */ 141 if (arenas[i]->nthreads < 142 arenas[choose]->nthreads) 143 choose = i; 144 } else if (first_null == narenas) { 145 /* 146 * Record the index of the first uninitialized 147 * arena, in case all extant arenas are in use. 148 * 149 * NB: It is possible for there to be 150 * discontinuities in terms of initialized 151 * versus uninitialized arenas, due to the 152 * "thread.arena" mallctl. 153 */ 154 first_null = i; 155 } 156 } 157 158 if (arenas[choose]->nthreads == 0 || first_null == narenas) { 159 /* 160 * Use an unloaded arena, or the least loaded arena if 161 * all arenas are already initialized. 162 */ 163 ret = arenas[choose]; 164 } else { 165 /* Initialize a new arena. */ 166 ret = arenas_extend(first_null); 167 } 168 ret->nthreads++; 169 malloc_mutex_unlock(&arenas_lock); 170 } else { 171 ret = arenas[0]; 172 malloc_mutex_lock(&arenas_lock); 173 ret->nthreads++; 174 malloc_mutex_unlock(&arenas_lock); 175 } 176 177 arenas_tsd_set(&ret); 178 179 return (ret); 180} 181 182static void 183stats_print_atexit(void) 184{ 185 186 if (config_tcache && config_stats) { 187 unsigned i; 188 189 /* 190 * Merge stats from extant threads. This is racy, since 191 * individual threads do not lock when recording tcache stats 192 * events. As a consequence, the final stats may be slightly 193 * out of date by the time they are reported, if other threads 194 * continue to allocate. 195 */ 196 for (i = 0; i < narenas; i++) { 197 arena_t *arena = arenas[i]; 198 if (arena != NULL) { 199 tcache_t *tcache; 200 201 /* 202 * tcache_stats_merge() locks bins, so if any 203 * code is introduced that acquires both arena 204 * and bin locks in the opposite order, 205 * deadlocks may result. 206 */ 207 malloc_mutex_lock(&arena->lock); 208 ql_foreach(tcache, &arena->tcache_ql, link) { 209 tcache_stats_merge(tcache, arena); 210 } 211 malloc_mutex_unlock(&arena->lock); 212 } 213 } 214 } 215 je_malloc_stats_print(NULL, NULL, NULL); 216} 217 218/* 219 * End miscellaneous support functions. 220 */ 221/******************************************************************************/ 222/* 223 * Begin initialization functions. 224 */ 225 226static unsigned 227malloc_ncpus(void) 228{ 229 unsigned ret; 230 long result; 231 232 result = sysconf(_SC_NPROCESSORS_ONLN); 233 if (result == -1) { 234 /* Error. */ 235 ret = 1; 236 } 237 ret = (unsigned)result; 238 239 return (ret); 240} 241 242void 243arenas_cleanup(void *arg) 244{ 245 arena_t *arena = *(arena_t **)arg; 246 247 malloc_mutex_lock(&arenas_lock); 248 arena->nthreads--; 249 malloc_mutex_unlock(&arenas_lock); 250} 251 252static inline bool 253malloc_init(void) 254{ 255 256 if (malloc_initialized == false) 257 return (malloc_init_hard()); 258 259 return (false); 260} 261 262static bool 263malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 264 char const **v_p, size_t *vlen_p) 265{ 266 bool accept; 267 const char *opts = *opts_p; 268 269 *k_p = opts; 270 271 for (accept = false; accept == false;) { 272 switch (*opts) { 273 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 274 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 275 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 276 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 277 case 'Y': case 'Z': 278 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 279 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 280 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 281 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 282 case 'y': case 'z': 283 case '0': case '1': case '2': case '3': case '4': case '5': 284 case '6': case '7': case '8': case '9': 285 case '_': 286 opts++; 287 break; 288 case ':': 289 opts++; 290 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 291 *v_p = opts; 292 accept = true; 293 break; 294 case '\0': 295 if (opts != *opts_p) { 296 malloc_write("<jemalloc>: Conf string ends " 297 "with key\n"); 298 } 299 return (true); 300 default: 301 malloc_write("<jemalloc>: Malformed conf string\n"); 302 return (true); 303 } 304 } 305 306 for (accept = false; accept == false;) { 307 switch (*opts) { 308 case ',': 309 opts++; 310 /* 311 * Look ahead one character here, because the next time 312 * this function is called, it will assume that end of 313 * input has been cleanly reached if no input remains, 314 * but we have optimistically already consumed the 315 * comma if one exists. 316 */ 317 if (*opts == '\0') { 318 malloc_write("<jemalloc>: Conf string ends " 319 "with comma\n"); 320 } 321 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 322 accept = true; 323 break; 324 case '\0': 325 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 326 accept = true; 327 break; 328 default: 329 opts++; 330 break; 331 } 332 } 333 334 *opts_p = opts; 335 return (false); 336} 337 338static void 339malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 340 size_t vlen) 341{ 342 343 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 344 (int)vlen, v); 345} 346 347static void 348malloc_conf_init(void) 349{ 350 unsigned i; 351 char buf[PATH_MAX + 1]; 352 const char *opts, *k, *v; 353 size_t klen, vlen; 354 355 for (i = 0; i < 3; i++) { 356 /* Get runtime configuration. */ 357 switch (i) { 358 case 0: 359 if (je_malloc_conf != NULL) { 360 /* 361 * Use options that were compiled into the 362 * program. 363 */ 364 opts = je_malloc_conf; 365 } else { 366 /* No configuration specified. */ 367 buf[0] = '\0'; 368 opts = buf; 369 } 370 break; 371 case 1: { 372 int linklen; 373 const char *linkname = 374#ifdef JEMALLOC_PREFIX 375 "/etc/"JEMALLOC_PREFIX"malloc.conf" 376#else 377 "/etc/malloc.conf" 378#endif 379 ; 380 381 if ((linklen = readlink(linkname, buf, 382 sizeof(buf) - 1)) != -1) { 383 /* 384 * Use the contents of the "/etc/malloc.conf" 385 * symbolic link's name. 386 */ 387 buf[linklen] = '\0'; 388 opts = buf; 389 } else { 390 /* No configuration specified. */ 391 buf[0] = '\0'; 392 opts = buf; 393 } 394 break; 395 } case 2: { 396 const char *envname = 397#ifdef JEMALLOC_PREFIX 398 JEMALLOC_CPREFIX"MALLOC_CONF" 399#else 400 "MALLOC_CONF" 401#endif 402 ; 403 404 if ((opts = getenv(envname)) != NULL) { 405 /* 406 * Do nothing; opts is already initialized to 407 * the value of the MALLOC_CONF environment 408 * variable. 409 */ 410 } else { 411 /* No configuration specified. */ 412 buf[0] = '\0'; 413 opts = buf; 414 } 415 break; 416 } default: 417 /* NOTREACHED */ 418 assert(false); 419 buf[0] = '\0'; 420 opts = buf; 421 } 422 423 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 424 &vlen) == false) { 425#define CONF_HANDLE_BOOL_HIT(o, n, hit) \ 426 if (sizeof(n)-1 == klen && strncmp(n, k, \ 427 klen) == 0) { \ 428 if (strncmp("true", v, vlen) == 0 && \ 429 vlen == sizeof("true")-1) \ 430 o = true; \ 431 else if (strncmp("false", v, vlen) == \ 432 0 && vlen == sizeof("false")-1) \ 433 o = false; \ 434 else { \ 435 malloc_conf_error( \ 436 "Invalid conf value", \ 437 k, klen, v, vlen); \ 438 } \ 439 hit = true; \ 440 } else \ 441 hit = false; 442#define CONF_HANDLE_BOOL(o, n) { \ 443 bool hit; \ 444 CONF_HANDLE_BOOL_HIT(o, n, hit); \ 445 if (hit) \ 446 continue; \ 447} 448#define CONF_HANDLE_SIZE_T(o, n, min, max) \ 449 if (sizeof(n)-1 == klen && strncmp(n, k, \ 450 klen) == 0) { \ 451 uintmax_t um; \ 452 char *end; \ 453 \ 454 errno = 0; \ 455 um = malloc_strtoumax(v, &end, 0); \ 456 if (errno != 0 || (uintptr_t)end - \ 457 (uintptr_t)v != vlen) { \ 458 malloc_conf_error( \ 459 "Invalid conf value", \ 460 k, klen, v, vlen); \ 461 } else if (um < min || um > max) { \ 462 malloc_conf_error( \ 463 "Out-of-range conf value", \ 464 k, klen, v, vlen); \ 465 } else \ 466 o = um; \ 467 continue; \ 468 } 469#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 470 if (sizeof(n)-1 == klen && strncmp(n, k, \ 471 klen) == 0) { \ 472 long l; \ 473 char *end; \ 474 \ 475 errno = 0; \ 476 l = strtol(v, &end, 0); \ 477 if (errno != 0 || (uintptr_t)end - \ 478 (uintptr_t)v != vlen) { \ 479 malloc_conf_error( \ 480 "Invalid conf value", \ 481 k, klen, v, vlen); \ 482 } else if (l < (ssize_t)min || l > \ 483 (ssize_t)max) { \ 484 malloc_conf_error( \ 485 "Out-of-range conf value", \ 486 k, klen, v, vlen); \ 487 } else \ 488 o = l; \ 489 continue; \ 490 } 491#define CONF_HANDLE_CHAR_P(o, n, d) \ 492 if (sizeof(n)-1 == klen && strncmp(n, k, \ 493 klen) == 0) { \ 494 size_t cpylen = (vlen <= \ 495 sizeof(o)-1) ? vlen : \ 496 sizeof(o)-1; \ 497 strncpy(o, v, cpylen); \ 498 o[cpylen] = '\0'; \ 499 continue; \ 500 } 501 502 CONF_HANDLE_BOOL(opt_abort, "abort") 503 /* 504 * Chunks always require at least one header page, plus 505 * one data page in the absence of redzones, or three 506 * pages in the presence of redzones. In order to 507 * simplify options processing, fix the limit based on 508 * config_fill. 509 */ 510 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 511 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1) 512 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, 513 SIZE_T_MAX) 514 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 515 -1, (sizeof(size_t) << 3) - 1) 516 CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 517 if (config_fill) { 518 CONF_HANDLE_BOOL(opt_junk, "junk") 519 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 520 0, SIZE_T_MAX) 521 CONF_HANDLE_BOOL(opt_redzone, "redzone") 522 CONF_HANDLE_BOOL(opt_zero, "zero") 523 } 524 if (config_utrace) { 525 CONF_HANDLE_BOOL(opt_utrace, "utrace") 526 } 527 if (config_valgrind) { 528 bool hit; 529 CONF_HANDLE_BOOL_HIT(opt_valgrind, 530 "valgrind", hit) 531 if (config_fill && opt_valgrind && hit) { 532 opt_junk = false; 533 opt_zero = false; 534 if (opt_quarantine == 0) { 535 opt_quarantine = 536 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 537 } 538 opt_redzone = true; 539 } 540 if (hit) 541 continue; 542 } 543 if (config_xmalloc) { 544 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 545 } 546 if (config_tcache) { 547 CONF_HANDLE_BOOL(opt_tcache, "tcache") 548 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 549 "lg_tcache_max", -1, 550 (sizeof(size_t) << 3) - 1) 551 } 552 if (config_prof) { 553 CONF_HANDLE_BOOL(opt_prof, "prof") 554 CONF_HANDLE_CHAR_P(opt_prof_prefix, 555 "prof_prefix", "jeprof") 556 CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 557 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, 558 "lg_prof_sample", 0, 559 (sizeof(uint64_t) << 3) - 1) 560 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 561 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 562 "lg_prof_interval", -1, 563 (sizeof(uint64_t) << 3) - 1) 564 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 565 CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 566 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 567 } 568 malloc_conf_error("Invalid conf pair", k, klen, v, 569 vlen); 570#undef CONF_HANDLE_BOOL 571#undef CONF_HANDLE_SIZE_T 572#undef CONF_HANDLE_SSIZE_T 573#undef CONF_HANDLE_CHAR_P 574 } 575 } 576} 577 578static bool 579malloc_init_hard(void) 580{ 581 arena_t *init_arenas[1]; 582 583 malloc_mutex_lock(&init_lock); 584 if (malloc_initialized || IS_INITIALIZER) { 585 /* 586 * Another thread initialized the allocator before this one 587 * acquired init_lock, or this thread is the initializing 588 * thread, and it is recursively allocating. 589 */ 590 malloc_mutex_unlock(&init_lock); 591 return (false); 592 } 593#ifdef JEMALLOC_THREADED_INIT 594 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { 595 /* Busy-wait until the initializing thread completes. */ 596 do { 597 malloc_mutex_unlock(&init_lock); 598 CPU_SPINWAIT; 599 malloc_mutex_lock(&init_lock); 600 } while (malloc_initialized == false); 601 malloc_mutex_unlock(&init_lock); 602 return (false); 603 } 604#endif 605 malloc_initializer = INITIALIZER; 606 607 malloc_tsd_boot(); 608 if (config_prof) 609 prof_boot0(); 610 611 malloc_conf_init(); 612 613#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE)) 614 /* Register fork handlers. */ 615 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 616 jemalloc_postfork_child) != 0) { 617 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 618 if (opt_abort) 619 abort(); 620 } 621#endif 622 623 if (opt_stats_print) { 624 /* Print statistics at exit. */ 625 if (atexit(stats_print_atexit) != 0) { 626 malloc_write("<jemalloc>: Error in atexit()\n"); 627 if (opt_abort) 628 abort(); 629 } 630 } 631 632 if (base_boot()) { 633 malloc_mutex_unlock(&init_lock); 634 return (true); 635 } 636 637 if (chunk_boot0()) { 638 malloc_mutex_unlock(&init_lock); 639 return (true); 640 } 641 642 if (ctl_boot()) { 643 malloc_mutex_unlock(&init_lock); 644 return (true); 645 } 646 647 if (config_prof) 648 prof_boot1(); 649 650 arena_boot(); 651 652 if (config_tcache && tcache_boot0()) { 653 malloc_mutex_unlock(&init_lock); 654 return (true); 655 } 656 657 if (huge_boot()) { 658 malloc_mutex_unlock(&init_lock); 659 return (true); 660 } 661 662 if (malloc_mutex_init(&arenas_lock)) 663 return (true); 664 665 /* 666 * Create enough scaffolding to allow recursive allocation in 667 * malloc_ncpus(). 668 */ 669 narenas = 1; 670 arenas = init_arenas; 671 memset(arenas, 0, sizeof(arena_t *) * narenas); 672 673 /* 674 * Initialize one arena here. The rest are lazily created in 675 * choose_arena_hard(). 676 */ 677 arenas_extend(0); 678 if (arenas[0] == NULL) { 679 malloc_mutex_unlock(&init_lock); 680 return (true); 681 } 682 683 /* Initialize allocation counters before any allocations can occur. */ 684 if (config_stats && thread_allocated_tsd_boot()) { 685 malloc_mutex_unlock(&init_lock); 686 return (true); 687 } 688 689 if (arenas_tsd_boot()) { 690 malloc_mutex_unlock(&init_lock); 691 return (true); 692 } 693 694 if (config_tcache && tcache_boot1()) { 695 malloc_mutex_unlock(&init_lock); 696 return (true); 697 } 698 699 if (config_fill && quarantine_boot()) { 700 malloc_mutex_unlock(&init_lock); 701 return (true); 702 } 703 704 if (config_prof && prof_boot2()) { 705 malloc_mutex_unlock(&init_lock); 706 return (true); 707 } 708 709 /* Get number of CPUs. */ 710 malloc_mutex_unlock(&init_lock); 711 ncpus = malloc_ncpus(); 712 malloc_mutex_lock(&init_lock); 713 714 if (chunk_boot1()) { 715 malloc_mutex_unlock(&init_lock); 716 return (true); 717 } 718 719 if (mutex_boot()) { 720 malloc_mutex_unlock(&init_lock); 721 return (true); 722 } 723 724 if (opt_narenas == 0) { 725 /* 726 * For SMP systems, create more than one arena per CPU by 727 * default. 728 */ 729 if (ncpus > 1) 730 opt_narenas = ncpus << 2; 731 else 732 opt_narenas = 1; 733 } 734 narenas = opt_narenas; 735 /* 736 * Make sure that the arenas array can be allocated. In practice, this 737 * limit is enough to allow the allocator to function, but the ctl 738 * machinery will fail to allocate memory at far lower limits. 739 */ 740 if (narenas > chunksize / sizeof(arena_t *)) { 741 narenas = chunksize / sizeof(arena_t *); 742 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 743 narenas); 744 } 745 746 /* Allocate and initialize arenas. */ 747 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); 748 if (arenas == NULL) { 749 malloc_mutex_unlock(&init_lock); 750 return (true); 751 } 752 /* 753 * Zero the array. In practice, this should always be pre-zeroed, 754 * since it was just mmap()ed, but let's be sure. 755 */ 756 memset(arenas, 0, sizeof(arena_t *) * narenas); 757 /* Copy the pointer to the one arena that was already initialized. */ 758 arenas[0] = init_arenas[0]; 759 760 malloc_initialized = true; 761 malloc_mutex_unlock(&init_lock); 762 return (false); 763} 764 765/* 766 * End initialization functions. 767 */ 768/******************************************************************************/ 769/* 770 * Begin malloc(3)-compatible functions. 771 */ 772 773JEMALLOC_ATTR(malloc) 774JEMALLOC_ATTR(visibility("default")) 775void * 776je_malloc(size_t size) 777{ 778 void *ret; 779 size_t usize; 780 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 781 782 if (malloc_init()) { 783 ret = NULL; 784 goto label_oom; 785 } 786 787 if (size == 0) 788 size = 1; 789 790 if (config_prof && opt_prof) { 791 usize = s2u(size); 792 PROF_ALLOC_PREP(1, usize, cnt); 793 if (cnt == NULL) { 794 ret = NULL; 795 goto label_oom; 796 } 797 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 798 SMALL_MAXCLASS) { 799 ret = imalloc(SMALL_MAXCLASS+1); 800 if (ret != NULL) 801 arena_prof_promoted(ret, usize); 802 } else 803 ret = imalloc(size); 804 } else { 805 if (config_stats || (config_valgrind && opt_valgrind)) 806 usize = s2u(size); 807 ret = imalloc(size); 808 } 809 810label_oom: 811 if (ret == NULL) { 812 if (config_xmalloc && opt_xmalloc) { 813 malloc_write("<jemalloc>: Error in malloc(): " 814 "out of memory\n"); 815 abort(); 816 } 817 errno = ENOMEM; 818 } 819 if (config_prof && opt_prof && ret != NULL) 820 prof_malloc(ret, usize, cnt); 821 if (config_stats && ret != NULL) { 822 assert(usize == isalloc(ret, config_prof)); 823 thread_allocated_tsd_get()->allocated += usize; 824 } 825 UTRACE(0, size, ret); 826 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 827 return (ret); 828} 829 830JEMALLOC_ATTR(nonnull(1)) 831#ifdef JEMALLOC_PROF 832/* 833 * Avoid any uncertainty as to how many backtrace frames to ignore in 834 * PROF_ALLOC_PREP(). 835 */ 836JEMALLOC_ATTR(noinline) 837#endif 838static int 839imemalign(void **memptr, size_t alignment, size_t size, 840 size_t min_alignment) 841{ 842 int ret; 843 size_t usize; 844 void *result; 845 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 846 847 assert(min_alignment != 0); 848 849 if (malloc_init()) 850 result = NULL; 851 else { 852 if (size == 0) 853 size = 1; 854 855 /* Make sure that alignment is a large enough power of 2. */ 856 if (((alignment - 1) & alignment) != 0 857 || (alignment < min_alignment)) { 858 if (config_xmalloc && opt_xmalloc) { 859 malloc_write("<jemalloc>: Error allocating " 860 "aligned memory: invalid alignment\n"); 861 abort(); 862 } 863 result = NULL; 864 ret = EINVAL; 865 goto label_return; 866 } 867 868 usize = sa2u(size, alignment); 869 if (usize == 0) { 870 result = NULL; 871 ret = ENOMEM; 872 goto label_return; 873 } 874 875 if (config_prof && opt_prof) { 876 PROF_ALLOC_PREP(2, usize, cnt); 877 if (cnt == NULL) { 878 result = NULL; 879 ret = EINVAL; 880 } else { 881 if (prof_promote && (uintptr_t)cnt != 882 (uintptr_t)1U && usize <= SMALL_MAXCLASS) { 883 assert(sa2u(SMALL_MAXCLASS+1, 884 alignment) != 0); 885 result = ipalloc(sa2u(SMALL_MAXCLASS+1, 886 alignment), alignment, false); 887 if (result != NULL) { 888 arena_prof_promoted(result, 889 usize); 890 } 891 } else { 892 result = ipalloc(usize, alignment, 893 false); 894 } 895 } 896 } else 897 result = ipalloc(usize, alignment, false); 898 } 899 900 if (result == NULL) { 901 if (config_xmalloc && opt_xmalloc) { 902 malloc_write("<jemalloc>: Error allocating aligned " 903 "memory: out of memory\n"); 904 abort(); 905 } 906 ret = ENOMEM; 907 goto label_return; 908 } 909 910 *memptr = result; 911 ret = 0; 912 913label_return: 914 if (config_stats && result != NULL) { 915 assert(usize == isalloc(result, config_prof)); 916 thread_allocated_tsd_get()->allocated += usize; 917 } 918 if (config_prof && opt_prof && result != NULL) 919 prof_malloc(result, usize, cnt); 920 UTRACE(0, size, result); 921 return (ret); 922} 923 924JEMALLOC_ATTR(nonnull(1)) 925JEMALLOC_ATTR(visibility("default")) 926int 927je_posix_memalign(void **memptr, size_t alignment, size_t size) 928{ 929 int ret = imemalign(memptr, alignment, size, sizeof(void *)); 930 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 931 config_prof), false); 932 return (ret); 933} 934 935JEMALLOC_ATTR(malloc) 936JEMALLOC_ATTR(visibility("default")) 937void * 938je_aligned_alloc(size_t alignment, size_t size) 939{ 940 void *ret; 941 int err; 942 943 if ((err = imemalign(&ret, alignment, size, 1)) != 0) { 944 ret = NULL; 945 errno = err; 946 } 947 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 948 false); 949 return (ret); 950} 951 952JEMALLOC_ATTR(malloc) 953JEMALLOC_ATTR(visibility("default")) 954void * 955je_calloc(size_t num, size_t size) 956{ 957 void *ret; 958 size_t num_size; 959 size_t usize; 960 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 961 962 if (malloc_init()) { 963 num_size = 0; 964 ret = NULL; 965 goto label_return; 966 } 967 968 num_size = num * size; 969 if (num_size == 0) { 970 if (num == 0 || size == 0) 971 num_size = 1; 972 else { 973 ret = NULL; 974 goto label_return; 975 } 976 /* 977 * Try to avoid division here. We know that it isn't possible to 978 * overflow during multiplication if neither operand uses any of the 979 * most significant half of the bits in a size_t. 980 */ 981 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 982 && (num_size / size != num)) { 983 /* size_t overflow. */ 984 ret = NULL; 985 goto label_return; 986 } 987 988 if (config_prof && opt_prof) { 989 usize = s2u(num_size); 990 PROF_ALLOC_PREP(1, usize, cnt); 991 if (cnt == NULL) { 992 ret = NULL; 993 goto label_return; 994 } 995 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize 996 <= SMALL_MAXCLASS) { 997 ret = icalloc(SMALL_MAXCLASS+1); 998 if (ret != NULL) 999 arena_prof_promoted(ret, usize); 1000 } else 1001 ret = icalloc(num_size); 1002 } else { 1003 if (config_stats || (config_valgrind && opt_valgrind)) 1004 usize = s2u(num_size); 1005 ret = icalloc(num_size); 1006 } 1007 1008label_return: 1009 if (ret == NULL) { 1010 if (config_xmalloc && opt_xmalloc) { 1011 malloc_write("<jemalloc>: Error in calloc(): out of " 1012 "memory\n"); 1013 abort(); 1014 } 1015 errno = ENOMEM; 1016 } 1017 1018 if (config_prof && opt_prof && ret != NULL) 1019 prof_malloc(ret, usize, cnt); 1020 if (config_stats && ret != NULL) { 1021 assert(usize == isalloc(ret, config_prof)); 1022 thread_allocated_tsd_get()->allocated += usize; 1023 } 1024 UTRACE(0, num_size, ret); 1025 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1026 return (ret); 1027} 1028 1029JEMALLOC_ATTR(visibility("default")) 1030void * 1031je_realloc(void *ptr, size_t size) 1032{ 1033 void *ret; 1034 size_t usize; 1035 size_t old_size = 0; 1036 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1037 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1038 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); 1039 1040 if (size == 0) { 1041 if (ptr != NULL) { 1042 /* realloc(ptr, 0) is equivalent to free(p). */ 1043 if (config_prof) { 1044 old_size = isalloc(ptr, true); 1045 if (config_valgrind && opt_valgrind) 1046 old_rzsize = p2rz(ptr); 1047 } else if (config_stats) { 1048 old_size = isalloc(ptr, false); 1049 if (config_valgrind && opt_valgrind) 1050 old_rzsize = u2rz(old_size); 1051 } else if (config_valgrind && opt_valgrind) { 1052 old_size = isalloc(ptr, false); 1053 old_rzsize = u2rz(old_size); 1054 } 1055 if (config_prof && opt_prof) { 1056 old_ctx = prof_ctx_get(ptr); 1057 cnt = NULL; 1058 } 1059 iqalloc(ptr); 1060 ret = NULL; 1061 goto label_return; 1062 } else 1063 size = 1; 1064 } 1065 1066 if (ptr != NULL) { 1067 assert(malloc_initialized || IS_INITIALIZER); 1068 1069 if (config_prof) { 1070 old_size = isalloc(ptr, true); 1071 if (config_valgrind && opt_valgrind) 1072 old_rzsize = p2rz(ptr); 1073 } else if (config_stats) { 1074 old_size = isalloc(ptr, false); 1075 if (config_valgrind && opt_valgrind) 1076 old_rzsize = u2rz(old_size); 1077 } else if (config_valgrind && opt_valgrind) { 1078 old_size = isalloc(ptr, false); 1079 old_rzsize = u2rz(old_size); 1080 } 1081 if (config_prof && opt_prof) { 1082 usize = s2u(size); 1083 old_ctx = prof_ctx_get(ptr); 1084 PROF_ALLOC_PREP(1, usize, cnt); 1085 if (cnt == NULL) { 1086 old_ctx = NULL; 1087 ret = NULL; 1088 goto label_oom; 1089 } 1090 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && 1091 usize <= SMALL_MAXCLASS) { 1092 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, 1093 false, false); 1094 if (ret != NULL) 1095 arena_prof_promoted(ret, usize); 1096 else 1097 old_ctx = NULL; 1098 } else { 1099 ret = iralloc(ptr, size, 0, 0, false, false); 1100 if (ret == NULL) 1101 old_ctx = NULL; 1102 } 1103 } else { 1104 if (config_stats || (config_valgrind && opt_valgrind)) 1105 usize = s2u(size); 1106 ret = iralloc(ptr, size, 0, 0, false, false); 1107 } 1108 1109label_oom: 1110 if (ret == NULL) { 1111 if (config_xmalloc && opt_xmalloc) { 1112 malloc_write("<jemalloc>: Error in realloc(): " 1113 "out of memory\n"); 1114 abort(); 1115 } 1116 errno = ENOMEM; 1117 } 1118 } else { 1119 /* realloc(NULL, size) is equivalent to malloc(size). */ 1120 if (config_prof && opt_prof) 1121 old_ctx = NULL; 1122 if (malloc_init()) { 1123 if (config_prof && opt_prof) 1124 cnt = NULL; 1125 ret = NULL; 1126 } else { 1127 if (config_prof && opt_prof) { 1128 usize = s2u(size); 1129 PROF_ALLOC_PREP(1, usize, cnt); 1130 if (cnt == NULL) 1131 ret = NULL; 1132 else { 1133 if (prof_promote && (uintptr_t)cnt != 1134 (uintptr_t)1U && usize <= 1135 SMALL_MAXCLASS) { 1136 ret = imalloc(SMALL_MAXCLASS+1); 1137 if (ret != NULL) { 1138 arena_prof_promoted(ret, 1139 usize); 1140 } 1141 } else 1142 ret = imalloc(size); 1143 } 1144 } else { 1145 if (config_stats || (config_valgrind && 1146 opt_valgrind)) 1147 usize = s2u(size); 1148 ret = imalloc(size); 1149 } 1150 } 1151 1152 if (ret == NULL) { 1153 if (config_xmalloc && opt_xmalloc) { 1154 malloc_write("<jemalloc>: Error in realloc(): " 1155 "out of memory\n"); 1156 abort(); 1157 } 1158 errno = ENOMEM; 1159 } 1160 } 1161 1162label_return: 1163 if (config_prof && opt_prof) 1164 prof_realloc(ret, usize, cnt, old_size, old_ctx); 1165 if (config_stats && ret != NULL) { 1166 thread_allocated_t *ta; 1167 assert(usize == isalloc(ret, config_prof)); 1168 ta = thread_allocated_tsd_get(); 1169 ta->allocated += usize; 1170 ta->deallocated += old_size; 1171 } 1172 UTRACE(ptr, size, ret); 1173 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); 1174 return (ret); 1175} 1176 1177JEMALLOC_ATTR(visibility("default")) 1178void 1179je_free(void *ptr) 1180{ 1181 1182 UTRACE(ptr, 0, 0); 1183 if (ptr != NULL) { 1184 size_t usize; 1185 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1186 1187 assert(malloc_initialized || IS_INITIALIZER); 1188 1189 if (config_prof && opt_prof) { 1190 usize = isalloc(ptr, config_prof); 1191 prof_free(ptr, usize); 1192 } else if (config_stats || config_valgrind) 1193 usize = isalloc(ptr, config_prof); 1194 if (config_stats) 1195 thread_allocated_tsd_get()->deallocated += usize; 1196 if (config_valgrind && opt_valgrind) 1197 rzsize = p2rz(ptr); 1198 iqalloc(ptr); 1199 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1200 } 1201} 1202 1203/* 1204 * End malloc(3)-compatible functions. 1205 */ 1206/******************************************************************************/ 1207/* 1208 * Begin non-standard override functions. 1209 */ 1210 1211#ifdef JEMALLOC_OVERRIDE_MEMALIGN 1212JEMALLOC_ATTR(malloc) 1213JEMALLOC_ATTR(visibility("default")) 1214void * 1215je_memalign(size_t alignment, size_t size) 1216{ 1217 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1218 imemalign(&ret, alignment, size, 1); 1219 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1220 return (ret); 1221} 1222#endif 1223 1224#ifdef JEMALLOC_OVERRIDE_VALLOC 1225JEMALLOC_ATTR(malloc) 1226JEMALLOC_ATTR(visibility("default")) 1227void * 1228je_valloc(size_t size) 1229{ 1230 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1231 imemalign(&ret, PAGE, size, 1); 1232 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1233 return (ret); 1234} 1235#endif 1236 1237/* 1238 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1239 * #define je_malloc malloc 1240 */ 1241#define malloc_is_malloc 1 1242#define is_malloc_(a) malloc_is_ ## a 1243#define is_malloc(a) is_malloc_(a) 1244 1245#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1246/* 1247 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1248 * to inconsistently reference libc's malloc(3)-compatible functions 1249 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1250 * 1251 * These definitions interpose hooks in glibc. The functions are actually 1252 * passed an extra argument for the caller return address, which will be 1253 * ignored. 1254 */ 1255JEMALLOC_ATTR(visibility("default")) 1256void (* const __free_hook)(void *ptr) = je_free; 1257 1258JEMALLOC_ATTR(visibility("default")) 1259void *(* const __malloc_hook)(size_t size) = je_malloc; 1260 1261JEMALLOC_ATTR(visibility("default")) 1262void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc; 1263 1264JEMALLOC_ATTR(visibility("default")) 1265void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign; 1266#endif 1267 1268/* 1269 * End non-standard override functions. 1270 */ 1271/******************************************************************************/ 1272/* 1273 * Begin non-standard functions. 1274 */ 1275 1276JEMALLOC_ATTR(visibility("default")) 1277size_t 1278je_malloc_usable_size(const void *ptr) 1279{ 1280 size_t ret; 1281 1282 assert(malloc_initialized || IS_INITIALIZER); 1283 1284 if (config_ivsalloc) 1285 ret = ivsalloc(ptr, config_prof); 1286 else 1287 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; 1288 1289 return (ret); 1290} 1291 1292JEMALLOC_ATTR(visibility("default")) 1293void 1294je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1295 const char *opts) 1296{ 1297 1298 stats_print(write_cb, cbopaque, opts); 1299} 1300 1301JEMALLOC_ATTR(visibility("default")) 1302int 1303je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1304 size_t newlen) 1305{ 1306 1307 if (malloc_init()) 1308 return (EAGAIN); 1309 1310 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1311} 1312 1313JEMALLOC_ATTR(visibility("default")) 1314int 1315je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1316{ 1317 1318 if (malloc_init()) 1319 return (EAGAIN); 1320 1321 return (ctl_nametomib(name, mibp, miblenp)); 1322} 1323 1324JEMALLOC_ATTR(visibility("default")) 1325int 1326je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1327 void *newp, size_t newlen) 1328{ 1329 1330 if (malloc_init()) 1331 return (EAGAIN); 1332 1333 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1334} 1335 1336/* 1337 * End non-standard functions. 1338 */ 1339/******************************************************************************/ 1340/* 1341 * Begin experimental functions. 1342 */ 1343#ifdef JEMALLOC_EXPERIMENTAL 1344 1345JEMALLOC_INLINE void * 1346iallocm(size_t usize, size_t alignment, bool zero) 1347{ 1348 1349 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, 1350 alignment))); 1351 1352 if (alignment != 0) 1353 return (ipalloc(usize, alignment, zero)); 1354 else if (zero) 1355 return (icalloc(usize)); 1356 else 1357 return (imalloc(usize)); 1358} 1359 1360JEMALLOC_ATTR(nonnull(1)) 1361JEMALLOC_ATTR(visibility("default")) 1362int 1363je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 1364{ 1365 void *p; 1366 size_t usize; 1367 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1368 & (SIZE_T_MAX-1)); 1369 bool zero = flags & ALLOCM_ZERO; 1370 prof_thr_cnt_t *cnt; 1371 1372 assert(ptr != NULL); 1373 assert(size != 0); 1374 1375 if (malloc_init()) 1376 goto label_oom; 1377 1378 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1379 if (usize == 0) 1380 goto label_oom; 1381 1382 if (config_prof && opt_prof) { 1383 PROF_ALLOC_PREP(1, usize, cnt); 1384 if (cnt == NULL) 1385 goto label_oom; 1386 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 1387 SMALL_MAXCLASS) { 1388 size_t usize_promoted = (alignment == 0) ? 1389 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, 1390 alignment); 1391 assert(usize_promoted != 0); 1392 p = iallocm(usize_promoted, alignment, zero); 1393 if (p == NULL) 1394 goto label_oom; 1395 arena_prof_promoted(p, usize); 1396 } else { 1397 p = iallocm(usize, alignment, zero); 1398 if (p == NULL) 1399 goto label_oom; 1400 } 1401 prof_malloc(p, usize, cnt); 1402 } else { 1403 p = iallocm(usize, alignment, zero); 1404 if (p == NULL) 1405 goto label_oom; 1406 } 1407 if (rsize != NULL) 1408 *rsize = usize; 1409 1410 *ptr = p; 1411 if (config_stats) { 1412 assert(usize == isalloc(p, config_prof)); 1413 thread_allocated_tsd_get()->allocated += usize; 1414 } 1415 UTRACE(0, size, p); 1416 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); 1417 return (ALLOCM_SUCCESS); 1418label_oom: 1419 if (config_xmalloc && opt_xmalloc) { 1420 malloc_write("<jemalloc>: Error in allocm(): " 1421 "out of memory\n"); 1422 abort(); 1423 } 1424 *ptr = NULL; 1425 UTRACE(0, size, 0); 1426 return (ALLOCM_ERR_OOM); 1427} 1428 1429JEMALLOC_ATTR(nonnull(1)) 1430JEMALLOC_ATTR(visibility("default")) 1431int 1432je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 1433{ 1434 void *p, *q; 1435 size_t usize; 1436 size_t old_size; 1437 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1438 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1439 & (SIZE_T_MAX-1)); 1440 bool zero = flags & ALLOCM_ZERO; 1441 bool no_move = flags & ALLOCM_NO_MOVE; 1442 prof_thr_cnt_t *cnt; 1443 1444 assert(ptr != NULL); 1445 assert(*ptr != NULL); 1446 assert(size != 0); 1447 assert(SIZE_T_MAX - size >= extra); 1448 assert(malloc_initialized || IS_INITIALIZER); 1449 1450 p = *ptr; 1451 if (config_prof && opt_prof) { 1452 /* 1453 * usize isn't knowable before iralloc() returns when extra is 1454 * non-zero. Therefore, compute its maximum possible value and 1455 * use that in PROF_ALLOC_PREP() to decide whether to capture a 1456 * backtrace. prof_realloc() will use the actual usize to 1457 * decide whether to sample. 1458 */ 1459 size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1460 sa2u(size+extra, alignment); 1461 prof_ctx_t *old_ctx = prof_ctx_get(p); 1462 old_size = isalloc(p, true); 1463 if (config_valgrind && opt_valgrind) 1464 old_rzsize = p2rz(p); 1465 PROF_ALLOC_PREP(1, max_usize, cnt); 1466 if (cnt == NULL) 1467 goto label_oom; 1468 /* 1469 * Use minimum usize to determine whether promotion may happen. 1470 */ 1471 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U 1472 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) 1473 <= SMALL_MAXCLASS) { 1474 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1475 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 1476 alignment, zero, no_move); 1477 if (q == NULL) 1478 goto label_err; 1479 if (max_usize < PAGE) { 1480 usize = max_usize; 1481 arena_prof_promoted(q, usize); 1482 } else 1483 usize = isalloc(q, config_prof); 1484 } else { 1485 q = iralloc(p, size, extra, alignment, zero, no_move); 1486 if (q == NULL) 1487 goto label_err; 1488 usize = isalloc(q, config_prof); 1489 } 1490 prof_realloc(q, usize, cnt, old_size, old_ctx); 1491 if (rsize != NULL) 1492 *rsize = usize; 1493 } else { 1494 if (config_stats) { 1495 old_size = isalloc(p, false); 1496 if (config_valgrind && opt_valgrind) 1497 old_rzsize = u2rz(old_size); 1498 } else if (config_valgrind && opt_valgrind) { 1499 old_size = isalloc(p, false); 1500 old_rzsize = u2rz(old_size); 1501 } 1502 q = iralloc(p, size, extra, alignment, zero, no_move); 1503 if (q == NULL) 1504 goto label_err; 1505 if (config_stats) 1506 usize = isalloc(q, config_prof); 1507 if (rsize != NULL) { 1508 if (config_stats == false) 1509 usize = isalloc(q, config_prof); 1510 *rsize = usize; 1511 } 1512 } 1513 1514 *ptr = q; 1515 if (config_stats) { 1516 thread_allocated_t *ta; 1517 ta = thread_allocated_tsd_get(); 1518 ta->allocated += usize; 1519 ta->deallocated += old_size; 1520 } 1521 UTRACE(p, size, q); 1522 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); 1523 return (ALLOCM_SUCCESS); 1524label_err: 1525 if (no_move) { 1526 UTRACE(p, size, q); 1527 return (ALLOCM_ERR_NOT_MOVED); 1528 } 1529label_oom: 1530 if (config_xmalloc && opt_xmalloc) { 1531 malloc_write("<jemalloc>: Error in rallocm(): " 1532 "out of memory\n"); 1533 abort(); 1534 } 1535 UTRACE(p, size, 0); 1536 return (ALLOCM_ERR_OOM); 1537} 1538 1539JEMALLOC_ATTR(nonnull(1)) 1540JEMALLOC_ATTR(visibility("default")) 1541int 1542je_sallocm(const void *ptr, size_t *rsize, int flags) 1543{ 1544 size_t sz; 1545 1546 assert(malloc_initialized || IS_INITIALIZER); 1547 1548 if (config_ivsalloc) 1549 sz = ivsalloc(ptr, config_prof); 1550 else { 1551 assert(ptr != NULL); 1552 sz = isalloc(ptr, config_prof); 1553 } 1554 assert(rsize != NULL); 1555 *rsize = sz; 1556 1557 return (ALLOCM_SUCCESS); 1558} 1559 1560JEMALLOC_ATTR(nonnull(1)) 1561JEMALLOC_ATTR(visibility("default")) 1562int 1563je_dallocm(void *ptr, int flags) 1564{ 1565 size_t usize; 1566 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1567 1568 assert(ptr != NULL); 1569 assert(malloc_initialized || IS_INITIALIZER); 1570 1571 UTRACE(ptr, 0, 0); 1572 if (config_stats || config_valgrind) 1573 usize = isalloc(ptr, config_prof); 1574 if (config_prof && opt_prof) { 1575 if (config_stats == false && config_valgrind == false) 1576 usize = isalloc(ptr, config_prof); 1577 prof_free(ptr, usize); 1578 } 1579 if (config_stats) 1580 thread_allocated_tsd_get()->deallocated += usize; 1581 if (config_valgrind && opt_valgrind) 1582 rzsize = p2rz(ptr); 1583 iqalloc(ptr); 1584 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1585 1586 return (ALLOCM_SUCCESS); 1587} 1588 1589JEMALLOC_ATTR(visibility("default")) 1590int 1591je_nallocm(size_t *rsize, size_t size, int flags) 1592{ 1593 size_t usize; 1594 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1595 & (SIZE_T_MAX-1)); 1596 1597 assert(size != 0); 1598 1599 if (malloc_init()) 1600 return (ALLOCM_ERR_OOM); 1601 1602 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1603 if (usize == 0) 1604 return (ALLOCM_ERR_OOM); 1605 1606 if (rsize != NULL) 1607 *rsize = usize; 1608 return (ALLOCM_SUCCESS); 1609} 1610 1611#endif 1612/* 1613 * End experimental functions. 1614 */ 1615/******************************************************************************/ 1616/* 1617 * The following functions are used by threading libraries for protection of 1618 * malloc during fork(). 1619 */ 1620 1621#ifndef JEMALLOC_MUTEX_INIT_CB 1622void 1623jemalloc_prefork(void) 1624#else 1625JEMALLOC_ATTR(visibility("default")) 1626void 1627_malloc_prefork(void) 1628#endif 1629{ 1630 unsigned i; 1631 1632 /* Acquire all mutexes in a safe order. */ 1633 malloc_mutex_prefork(&arenas_lock); 1634 for (i = 0; i < narenas; i++) { 1635 if (arenas[i] != NULL) 1636 arena_prefork(arenas[i]); 1637 } 1638 base_prefork(); 1639 huge_prefork(); 1640 chunk_dss_prefork(); 1641} 1642 1643#ifndef JEMALLOC_MUTEX_INIT_CB 1644void 1645jemalloc_postfork_parent(void) 1646#else 1647JEMALLOC_ATTR(visibility("default")) 1648void 1649_malloc_postfork(void) 1650#endif 1651{ 1652 unsigned i; 1653 1654 /* Release all mutexes, now that fork() has completed. */ 1655 chunk_dss_postfork_parent(); 1656 huge_postfork_parent(); 1657 base_postfork_parent(); 1658 for (i = 0; i < narenas; i++) { 1659 if (arenas[i] != NULL) 1660 arena_postfork_parent(arenas[i]); 1661 } 1662 malloc_mutex_postfork_parent(&arenas_lock); 1663} 1664 1665void 1666jemalloc_postfork_child(void) 1667{ 1668 unsigned i; 1669 1670 /* Release all mutexes, now that fork() has completed. */ 1671 chunk_dss_postfork_child(); 1672 huge_postfork_child(); 1673 base_postfork_child(); 1674 for (i = 0; i < narenas; i++) { 1675 if (arenas[i] != NULL) 1676 arena_postfork_child(arenas[i]); 1677 } 1678 malloc_mutex_postfork_child(&arenas_lock); 1679} 1680 1681/******************************************************************************/ 1682/* 1683 * The following functions are used for TLS allocation/deallocation in static 1684 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() 1685 * is that these avoid accessing TLS variables. 1686 */ 1687 1688static void * 1689a0alloc(size_t size, bool zero) 1690{ 1691 1692 if (malloc_init()) 1693 return (NULL); 1694 1695 if (size == 0) 1696 size = 1; 1697 1698 if (size <= arena_maxclass) 1699 return (arena_malloc(arenas[0], size, zero, false)); 1700 else 1701 return (huge_malloc(size, zero)); 1702} 1703 1704void * 1705a0malloc(size_t size) 1706{ 1707 1708 return (a0alloc(size, false)); 1709} 1710 1711void * 1712a0calloc(size_t num, size_t size) 1713{ 1714 1715 return (a0alloc(num * size, true)); 1716} 1717 1718void 1719a0free(void *ptr) 1720{ 1721 arena_chunk_t *chunk; 1722 1723 if (ptr == NULL) 1724 return; 1725 1726 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1727 if (chunk != ptr) 1728 arena_dalloc(chunk->arena, chunk, ptr, false); 1729 else 1730 huge_dalloc(ptr, true); 1731} 1732 1733/******************************************************************************/ 1734