jemalloc.c revision 4507f34628dfae26e6b0a6faa13e5f9a49600616
1#define JEMALLOC_C_ 2#include "jemalloc/internal/jemalloc_internal.h" 3 4/******************************************************************************/ 5/* Data. */ 6 7malloc_mutex_t arenas_lock; 8arena_t **arenas; 9unsigned narenas; 10 11pthread_key_t arenas_tsd; 12#ifndef NO_TLS 13__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec")); 14#endif 15 16#ifndef NO_TLS 17__thread thread_allocated_t thread_allocated_tls; 18#endif 19pthread_key_t thread_allocated_tsd; 20 21/* Set to true once the allocator has been initialized. */ 22static bool malloc_initialized = false; 23 24/* Used to let the initializing thread recursively allocate. */ 25static pthread_t malloc_initializer = (unsigned long)0; 26 27/* Used to avoid initialization races. */ 28static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 29 30#ifdef DYNAMIC_PAGE_SHIFT 31size_t pagesize; 32size_t pagesize_mask; 33size_t lg_pagesize; 34#endif 35 36unsigned ncpus; 37 38/* Runtime configuration options. */ 39const char *je_malloc_conf JEMALLOC_ATTR(visibility("default")); 40#ifdef JEMALLOC_DEBUG 41bool opt_abort = true; 42# ifdef JEMALLOC_FILL 43bool opt_junk = true; 44# else 45bool opt_junk = false; 46# endif 47#else 48bool opt_abort = false; 49bool opt_junk = false; 50#endif 51bool opt_xmalloc = false; 52bool opt_zero = false; 53size_t opt_narenas = 0; 54 55/******************************************************************************/ 56/* Function prototypes for non-inline static functions. */ 57 58static void wrtmessage(void *cbopaque, const char *s); 59static void stats_print_atexit(void); 60static unsigned malloc_ncpus(void); 61static void arenas_cleanup(void *arg); 62#ifdef NO_TLS 63static void thread_allocated_cleanup(void *arg); 64#endif 65static bool malloc_conf_next(char const **opts_p, char const **k_p, 66 size_t *klen_p, char const **v_p, size_t *vlen_p); 67static void malloc_conf_error(const char *msg, const char *k, size_t klen, 68 const char *v, size_t vlen); 69static void malloc_conf_init(void); 70static bool malloc_init_hard(void); 71static int imemalign(void **memptr, size_t alignment, size_t size, 72 bool enforce_min_alignment); 73 74/******************************************************************************/ 75/* malloc_message() setup. */ 76 77JEMALLOC_CATTR(visibility("hidden"), static) 78void 79wrtmessage(void *cbopaque, const char *s) 80{ 81 UNUSED int result = write(STDERR_FILENO, s, strlen(s)); 82} 83 84void (*je_malloc_message)(void *, const char *s) 85 JEMALLOC_ATTR(visibility("default")) = wrtmessage; 86 87/******************************************************************************/ 88/* 89 * Begin miscellaneous support functions. 90 */ 91 92/* Create a new arena and insert it into the arenas array at index ind. */ 93arena_t * 94arenas_extend(unsigned ind) 95{ 96 arena_t *ret; 97 98 ret = (arena_t *)base_alloc(sizeof(arena_t)); 99 if (ret != NULL && arena_new(ret, ind) == false) { 100 arenas[ind] = ret; 101 return (ret); 102 } 103 /* Only reached if there is an OOM error. */ 104 105 /* 106 * OOM here is quite inconvenient to propagate, since dealing with it 107 * would require a check for failure in the fast path. Instead, punt 108 * by using arenas[0]. In practice, this is an extremely unlikely 109 * failure. 110 */ 111 malloc_write("<jemalloc>: Error initializing arena\n"); 112 if (opt_abort) 113 abort(); 114 115 return (arenas[0]); 116} 117 118/* 119 * Choose an arena based on a per-thread value (slow-path code only, called 120 * only by choose_arena()). 121 */ 122arena_t * 123choose_arena_hard(void) 124{ 125 arena_t *ret; 126 127 if (narenas > 1) { 128 unsigned i, choose, first_null; 129 130 choose = 0; 131 first_null = narenas; 132 malloc_mutex_lock(&arenas_lock); 133 assert(arenas[0] != NULL); 134 for (i = 1; i < narenas; i++) { 135 if (arenas[i] != NULL) { 136 /* 137 * Choose the first arena that has the lowest 138 * number of threads assigned to it. 139 */ 140 if (arenas[i]->nthreads < 141 arenas[choose]->nthreads) 142 choose = i; 143 } else if (first_null == narenas) { 144 /* 145 * Record the index of the first uninitialized 146 * arena, in case all extant arenas are in use. 147 * 148 * NB: It is possible for there to be 149 * discontinuities in terms of initialized 150 * versus uninitialized arenas, due to the 151 * "thread.arena" mallctl. 152 */ 153 first_null = i; 154 } 155 } 156 157 if (arenas[choose] == 0 || first_null == narenas) { 158 /* 159 * Use an unloaded arena, or the least loaded arena if 160 * all arenas are already initialized. 161 */ 162 ret = arenas[choose]; 163 } else { 164 /* Initialize a new arena. */ 165 ret = arenas_extend(first_null); 166 } 167 ret->nthreads++; 168 malloc_mutex_unlock(&arenas_lock); 169 } else { 170 ret = arenas[0]; 171 malloc_mutex_lock(&arenas_lock); 172 ret->nthreads++; 173 malloc_mutex_unlock(&arenas_lock); 174 } 175 176 ARENA_SET(ret); 177 178 return (ret); 179} 180 181/* 182 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so 183 * provide a wrapper. 184 */ 185int 186buferror(int errnum, char *buf, size_t buflen) 187{ 188#ifdef _GNU_SOURCE 189 char *b = strerror_r(errno, buf, buflen); 190 if (b != buf) { 191 strncpy(buf, b, buflen); 192 buf[buflen-1] = '\0'; 193 } 194 return (0); 195#else 196 return (strerror_r(errno, buf, buflen)); 197#endif 198} 199 200static void 201stats_print_atexit(void) 202{ 203 204 if (config_tcache && config_stats) { 205 unsigned i; 206 207 /* 208 * Merge stats from extant threads. This is racy, since 209 * individual threads do not lock when recording tcache stats 210 * events. As a consequence, the final stats may be slightly 211 * out of date by the time they are reported, if other threads 212 * continue to allocate. 213 */ 214 for (i = 0; i < narenas; i++) { 215 arena_t *arena = arenas[i]; 216 if (arena != NULL) { 217 tcache_t *tcache; 218 219 /* 220 * tcache_stats_merge() locks bins, so if any 221 * code is introduced that acquires both arena 222 * and bin locks in the opposite order, 223 * deadlocks may result. 224 */ 225 malloc_mutex_lock(&arena->lock); 226 ql_foreach(tcache, &arena->tcache_ql, link) { 227 tcache_stats_merge(tcache, arena); 228 } 229 malloc_mutex_unlock(&arena->lock); 230 } 231 } 232 } 233 je_malloc_stats_print(NULL, NULL, NULL); 234} 235 236thread_allocated_t * 237thread_allocated_get_hard(void) 238{ 239 thread_allocated_t *thread_allocated = (thread_allocated_t *) 240 imalloc(sizeof(thread_allocated_t)); 241 if (thread_allocated == NULL) { 242 static thread_allocated_t static_thread_allocated = {0, 0}; 243 malloc_write("<jemalloc>: Error allocating TSD;" 244 " mallctl(\"thread.{de,}allocated[p]\", ...)" 245 " will be inaccurate\n"); 246 if (opt_abort) 247 abort(); 248 return (&static_thread_allocated); 249 } 250 pthread_setspecific(thread_allocated_tsd, thread_allocated); 251 thread_allocated->allocated = 0; 252 thread_allocated->deallocated = 0; 253 return (thread_allocated); 254} 255 256/* 257 * End miscellaneous support functions. 258 */ 259/******************************************************************************/ 260/* 261 * Begin initialization functions. 262 */ 263 264static unsigned 265malloc_ncpus(void) 266{ 267 unsigned ret; 268 long result; 269 270 result = sysconf(_SC_NPROCESSORS_ONLN); 271 if (result == -1) { 272 /* Error. */ 273 ret = 1; 274 } 275 ret = (unsigned)result; 276 277 return (ret); 278} 279 280static void 281arenas_cleanup(void *arg) 282{ 283 arena_t *arena = (arena_t *)arg; 284 285 malloc_mutex_lock(&arenas_lock); 286 arena->nthreads--; 287 malloc_mutex_unlock(&arenas_lock); 288} 289 290#ifdef NO_TLS 291static void 292thread_allocated_cleanup(void *arg) 293{ 294 uint64_t *allocated = (uint64_t *)arg; 295 296 if (allocated != NULL) 297 idalloc(allocated); 298} 299#endif 300 301/* 302 * FreeBSD's pthreads implementation calls malloc(3), so the malloc 303 * implementation has to take pains to avoid infinite recursion during 304 * initialization. 305 */ 306static inline bool 307malloc_init(void) 308{ 309 310 if (malloc_initialized == false) 311 return (malloc_init_hard()); 312 313 return (false); 314} 315 316static bool 317malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 318 char const **v_p, size_t *vlen_p) 319{ 320 bool accept; 321 const char *opts = *opts_p; 322 323 *k_p = opts; 324 325 for (accept = false; accept == false;) { 326 switch (*opts) { 327 case 'A': case 'B': case 'C': case 'D': case 'E': 328 case 'F': case 'G': case 'H': case 'I': case 'J': 329 case 'K': case 'L': case 'M': case 'N': case 'O': 330 case 'P': case 'Q': case 'R': case 'S': case 'T': 331 case 'U': case 'V': case 'W': case 'X': case 'Y': 332 case 'Z': 333 case 'a': case 'b': case 'c': case 'd': case 'e': 334 case 'f': case 'g': case 'h': case 'i': case 'j': 335 case 'k': case 'l': case 'm': case 'n': case 'o': 336 case 'p': case 'q': case 'r': case 's': case 't': 337 case 'u': case 'v': case 'w': case 'x': case 'y': 338 case 'z': 339 case '0': case '1': case '2': case '3': case '4': 340 case '5': case '6': case '7': case '8': case '9': 341 case '_': 342 opts++; 343 break; 344 case ':': 345 opts++; 346 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 347 *v_p = opts; 348 accept = true; 349 break; 350 case '\0': 351 if (opts != *opts_p) { 352 malloc_write("<jemalloc>: Conf string " 353 "ends with key\n"); 354 } 355 return (true); 356 default: 357 malloc_write("<jemalloc>: Malformed conf " 358 "string\n"); 359 return (true); 360 } 361 } 362 363 for (accept = false; accept == false;) { 364 switch (*opts) { 365 case ',': 366 opts++; 367 /* 368 * Look ahead one character here, because the 369 * next time this function is called, it will 370 * assume that end of input has been cleanly 371 * reached if no input remains, but we have 372 * optimistically already consumed the comma if 373 * one exists. 374 */ 375 if (*opts == '\0') { 376 malloc_write("<jemalloc>: Conf string " 377 "ends with comma\n"); 378 } 379 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 380 accept = true; 381 break; 382 case '\0': 383 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 384 accept = true; 385 break; 386 default: 387 opts++; 388 break; 389 } 390 } 391 392 *opts_p = opts; 393 return (false); 394} 395 396static void 397malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 398 size_t vlen) 399{ 400 char buf[PATH_MAX + 1]; 401 402 malloc_write("<jemalloc>: "); 403 malloc_write(msg); 404 malloc_write(": "); 405 memcpy(buf, k, klen); 406 memcpy(&buf[klen], ":", 1); 407 memcpy(&buf[klen+1], v, vlen); 408 buf[klen+1+vlen] = '\0'; 409 malloc_write(buf); 410 malloc_write("\n"); 411} 412 413static void 414malloc_conf_init(void) 415{ 416 unsigned i; 417 char buf[PATH_MAX + 1]; 418 const char *opts, *k, *v; 419 size_t klen, vlen; 420 421 for (i = 0; i < 3; i++) { 422 /* Get runtime configuration. */ 423 switch (i) { 424 case 0: 425 if (je_malloc_conf != NULL) { 426 /* 427 * Use options that were compiled into the 428 * program. 429 */ 430 opts = je_malloc_conf; 431 } else { 432 /* No configuration specified. */ 433 buf[0] = '\0'; 434 opts = buf; 435 } 436 break; 437 case 1: { 438 int linklen; 439 const char *linkname = 440#ifdef JEMALLOC_PREFIX 441 "/etc/"JEMALLOC_PREFIX"malloc.conf" 442#else 443 "/etc/malloc.conf" 444#endif 445 ; 446 447 if ((linklen = readlink(linkname, buf, 448 sizeof(buf) - 1)) != -1) { 449 /* 450 * Use the contents of the "/etc/malloc.conf" 451 * symbolic link's name. 452 */ 453 buf[linklen] = '\0'; 454 opts = buf; 455 } else { 456 /* No configuration specified. */ 457 buf[0] = '\0'; 458 opts = buf; 459 } 460 break; 461 } 462 case 2: { 463 const char *envname = 464#ifdef JEMALLOC_PREFIX 465 JEMALLOC_CPREFIX"MALLOC_CONF" 466#else 467 "MALLOC_CONF" 468#endif 469 ; 470 471 if ((opts = getenv(envname)) != NULL) { 472 /* 473 * Do nothing; opts is already initialized to 474 * the value of the MALLOC_CONF environment 475 * variable. 476 */ 477 } else { 478 /* No configuration specified. */ 479 buf[0] = '\0'; 480 opts = buf; 481 } 482 break; 483 } 484 default: 485 /* NOTREACHED */ 486 assert(false); 487 buf[0] = '\0'; 488 opts = buf; 489 } 490 491 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 492 &vlen) == false) { 493#define CONF_HANDLE_BOOL(n) \ 494 if (sizeof(#n)-1 == klen && strncmp(#n, k, \ 495 klen) == 0) { \ 496 if (strncmp("true", v, vlen) == 0 && \ 497 vlen == sizeof("true")-1) \ 498 opt_##n = true; \ 499 else if (strncmp("false", v, vlen) == \ 500 0 && vlen == sizeof("false")-1) \ 501 opt_##n = false; \ 502 else { \ 503 malloc_conf_error( \ 504 "Invalid conf value", \ 505 k, klen, v, vlen); \ 506 } \ 507 continue; \ 508 } 509#define CONF_HANDLE_SIZE_T(n, min, max) \ 510 if (sizeof(#n)-1 == klen && strncmp(#n, k, \ 511 klen) == 0) { \ 512 unsigned long ul; \ 513 char *end; \ 514 \ 515 errno = 0; \ 516 ul = strtoul(v, &end, 0); \ 517 if (errno != 0 || (uintptr_t)end - \ 518 (uintptr_t)v != vlen) { \ 519 malloc_conf_error( \ 520 "Invalid conf value", \ 521 k, klen, v, vlen); \ 522 } else if (ul < min || ul > max) { \ 523 malloc_conf_error( \ 524 "Out-of-range conf value", \ 525 k, klen, v, vlen); \ 526 } else \ 527 opt_##n = ul; \ 528 continue; \ 529 } 530#define CONF_HANDLE_SSIZE_T(n, min, max) \ 531 if (sizeof(#n)-1 == klen && strncmp(#n, k, \ 532 klen) == 0) { \ 533 long l; \ 534 char *end; \ 535 \ 536 errno = 0; \ 537 l = strtol(v, &end, 0); \ 538 if (errno != 0 || (uintptr_t)end - \ 539 (uintptr_t)v != vlen) { \ 540 malloc_conf_error( \ 541 "Invalid conf value", \ 542 k, klen, v, vlen); \ 543 } else if (l < (ssize_t)min || l > \ 544 (ssize_t)max) { \ 545 malloc_conf_error( \ 546 "Out-of-range conf value", \ 547 k, klen, v, vlen); \ 548 } else \ 549 opt_##n = l; \ 550 continue; \ 551 } 552#define CONF_HANDLE_CHAR_P(n, d) \ 553 if (sizeof(#n)-1 == klen && strncmp(#n, k, \ 554 klen) == 0) { \ 555 size_t cpylen = (vlen <= \ 556 sizeof(opt_##n)-1) ? vlen : \ 557 sizeof(opt_##n)-1; \ 558 strncpy(opt_##n, v, cpylen); \ 559 opt_##n[cpylen] = '\0'; \ 560 continue; \ 561 } 562 563 CONF_HANDLE_BOOL(abort) 564 /* 565 * Chunks always require at least one * header page, 566 * plus one data page. 567 */ 568 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1, 569 (sizeof(size_t) << 3) - 1) 570 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX) 571 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1, 572 (sizeof(size_t) << 3) - 1) 573 CONF_HANDLE_BOOL(stats_print) 574 if (config_fill) { 575 CONF_HANDLE_BOOL(junk) 576 CONF_HANDLE_BOOL(zero) 577 } 578 if (config_xmalloc) { 579 CONF_HANDLE_BOOL(xmalloc) 580 } 581 if (config_tcache) { 582 CONF_HANDLE_BOOL(tcache) 583 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1, 584 (sizeof(size_t) << 3) - 1) 585 } 586 if (config_prof) { 587 CONF_HANDLE_BOOL(prof) 588 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof") 589 CONF_HANDLE_BOOL(prof_active) 590 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0, 591 (sizeof(uint64_t) << 3) - 1) 592 CONF_HANDLE_BOOL(prof_accum) 593 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1, 594 (sizeof(uint64_t) << 3) - 1) 595 CONF_HANDLE_BOOL(prof_gdump) 596 CONF_HANDLE_BOOL(prof_leak) 597 } 598 malloc_conf_error("Invalid conf pair", k, klen, v, 599 vlen); 600#undef CONF_HANDLE_BOOL 601#undef CONF_HANDLE_SIZE_T 602#undef CONF_HANDLE_SSIZE_T 603#undef CONF_HANDLE_CHAR_P 604 } 605 } 606} 607 608static bool 609malloc_init_hard(void) 610{ 611 arena_t *init_arenas[1]; 612 613 malloc_mutex_lock(&init_lock); 614 if (malloc_initialized || malloc_initializer == pthread_self()) { 615 /* 616 * Another thread initialized the allocator before this one 617 * acquired init_lock, or this thread is the initializing 618 * thread, and it is recursively allocating. 619 */ 620 malloc_mutex_unlock(&init_lock); 621 return (false); 622 } 623 if (malloc_initializer != (unsigned long)0) { 624 /* Busy-wait until the initializing thread completes. */ 625 do { 626 malloc_mutex_unlock(&init_lock); 627 CPU_SPINWAIT; 628 malloc_mutex_lock(&init_lock); 629 } while (malloc_initialized == false); 630 malloc_mutex_unlock(&init_lock); 631 return (false); 632 } 633 634#ifdef DYNAMIC_PAGE_SHIFT 635 /* Get page size. */ 636 { 637 long result; 638 639 result = sysconf(_SC_PAGESIZE); 640 assert(result != -1); 641 pagesize = (size_t)result; 642 643 /* 644 * We assume that pagesize is a power of 2 when calculating 645 * pagesize_mask and lg_pagesize. 646 */ 647 assert(((result - 1) & result) == 0); 648 pagesize_mask = result - 1; 649 lg_pagesize = ffs((int)result) - 1; 650 } 651#endif 652 653 if (config_prof) 654 prof_boot0(); 655 656 malloc_conf_init(); 657 658 /* Register fork handlers. */ 659 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork, 660 jemalloc_postfork) != 0) { 661 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 662 if (opt_abort) 663 abort(); 664 } 665 666 if (ctl_boot()) { 667 malloc_mutex_unlock(&init_lock); 668 return (true); 669 } 670 671 if (opt_stats_print) { 672 /* Print statistics at exit. */ 673 if (atexit(stats_print_atexit) != 0) { 674 malloc_write("<jemalloc>: Error in atexit()\n"); 675 if (opt_abort) 676 abort(); 677 } 678 } 679 680 if (chunk_boot()) { 681 malloc_mutex_unlock(&init_lock); 682 return (true); 683 } 684 685 if (base_boot()) { 686 malloc_mutex_unlock(&init_lock); 687 return (true); 688 } 689 690 if (config_prof) 691 prof_boot1(); 692 693 arena_boot(); 694 695 if (config_tcache && tcache_boot()) { 696 malloc_mutex_unlock(&init_lock); 697 return (true); 698 } 699 700 if (huge_boot()) { 701 malloc_mutex_unlock(&init_lock); 702 return (true); 703 } 704 705#ifdef NO_TLS 706 /* Initialize allocation counters before any allocations can occur. */ 707 if (config_stats && pthread_key_create(&thread_allocated_tsd, 708 thread_allocated_cleanup) != 0) { 709 malloc_mutex_unlock(&init_lock); 710 return (true); 711 } 712#endif 713 714 if (malloc_mutex_init(&arenas_lock)) 715 return (true); 716 717 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) { 718 malloc_mutex_unlock(&init_lock); 719 return (true); 720 } 721 722 /* 723 * Create enough scaffolding to allow recursive allocation in 724 * malloc_ncpus(). 725 */ 726 narenas = 1; 727 arenas = init_arenas; 728 memset(arenas, 0, sizeof(arena_t *) * narenas); 729 730 /* 731 * Initialize one arena here. The rest are lazily created in 732 * choose_arena_hard(). 733 */ 734 arenas_extend(0); 735 if (arenas[0] == NULL) { 736 malloc_mutex_unlock(&init_lock); 737 return (true); 738 } 739 740 /* 741 * Assign the initial arena to the initial thread, in order to avoid 742 * spurious creation of an extra arena if the application switches to 743 * threaded mode. 744 */ 745 ARENA_SET(arenas[0]); 746 arenas[0]->nthreads++; 747 748 if (config_prof && prof_boot2()) { 749 malloc_mutex_unlock(&init_lock); 750 return (true); 751 } 752 753 /* Get number of CPUs. */ 754 malloc_initializer = pthread_self(); 755 malloc_mutex_unlock(&init_lock); 756 ncpus = malloc_ncpus(); 757 malloc_mutex_lock(&init_lock); 758 759 if (opt_narenas == 0) { 760 /* 761 * For SMP systems, create more than one arena per CPU by 762 * default. 763 */ 764 if (ncpus > 1) 765 opt_narenas = ncpus << 2; 766 else 767 opt_narenas = 1; 768 } 769 narenas = opt_narenas; 770 /* 771 * Make sure that the arenas array can be allocated. In practice, this 772 * limit is enough to allow the allocator to function, but the ctl 773 * machinery will fail to allocate memory at far lower limits. 774 */ 775 if (narenas > chunksize / sizeof(arena_t *)) { 776 char buf[UMAX2S_BUFSIZE]; 777 778 narenas = chunksize / sizeof(arena_t *); 779 malloc_write("<jemalloc>: Reducing narenas to limit ("); 780 malloc_write(u2s(narenas, 10, buf)); 781 malloc_write(")\n"); 782 } 783 784 /* Allocate and initialize arenas. */ 785 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); 786 if (arenas == NULL) { 787 malloc_mutex_unlock(&init_lock); 788 return (true); 789 } 790 /* 791 * Zero the array. In practice, this should always be pre-zeroed, 792 * since it was just mmap()ed, but let's be sure. 793 */ 794 memset(arenas, 0, sizeof(arena_t *) * narenas); 795 /* Copy the pointer to the one arena that was already initialized. */ 796 arenas[0] = init_arenas[0]; 797 798#ifdef JEMALLOC_ZONE 799 /* Register the custom zone. */ 800 malloc_zone_register(create_zone()); 801 802 /* 803 * Convert the default szone to an "overlay zone" that is capable of 804 * deallocating szone-allocated objects, but allocating new objects 805 * from jemalloc. 806 */ 807 szone2ozone(malloc_default_zone()); 808#endif 809 810 malloc_initialized = true; 811 malloc_mutex_unlock(&init_lock); 812 return (false); 813} 814 815#ifdef JEMALLOC_ZONE 816JEMALLOC_ATTR(constructor) 817void 818jemalloc_darwin_init(void) 819{ 820 821 if (malloc_init_hard()) 822 abort(); 823} 824#endif 825 826/* 827 * End initialization functions. 828 */ 829/******************************************************************************/ 830/* 831 * Begin malloc(3)-compatible functions. 832 */ 833 834JEMALLOC_ATTR(malloc) 835JEMALLOC_ATTR(visibility("default")) 836void * 837je_malloc(size_t size) 838{ 839 void *ret; 840 size_t usize; 841 prof_thr_cnt_t *cnt 842#ifdef JEMALLOC_CC_SILENCE 843 = NULL 844#endif 845 ; 846 847 if (malloc_init()) { 848 ret = NULL; 849 goto OOM; 850 } 851 852 if (size == 0) 853 size = 1; 854 855 if (config_prof && opt_prof) { 856 usize = s2u(size); 857 PROF_ALLOC_PREP(1, usize, cnt); 858 if (cnt == NULL) { 859 ret = NULL; 860 goto OOM; 861 } 862 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 863 SMALL_MAXCLASS) { 864 ret = imalloc(SMALL_MAXCLASS+1); 865 if (ret != NULL) 866 arena_prof_promoted(ret, usize); 867 } else 868 ret = imalloc(size); 869 } else { 870 if (config_stats) 871 usize = s2u(size); 872 ret = imalloc(size); 873 } 874 875OOM: 876 if (ret == NULL) { 877 if (config_xmalloc && opt_xmalloc) { 878 malloc_write("<jemalloc>: Error in malloc(): " 879 "out of memory\n"); 880 abort(); 881 } 882 errno = ENOMEM; 883 } 884 if (config_prof && opt_prof && ret != NULL) 885 prof_malloc(ret, usize, cnt); 886 if (config_stats && ret != NULL) { 887 assert(usize == isalloc(ret)); 888 ALLOCATED_ADD(usize, 0); 889 } 890 return (ret); 891} 892 893JEMALLOC_ATTR(nonnull(1)) 894#ifdef JEMALLOC_PROF 895/* 896 * Avoid any uncertainty as to how many backtrace frames to ignore in 897 * PROF_ALLOC_PREP(). 898 */ 899JEMALLOC_ATTR(noinline) 900#endif 901static int 902imemalign(void **memptr, size_t alignment, size_t size, 903 bool enforce_min_alignment) 904{ 905 int ret; 906 size_t usize; 907 void *result; 908 prof_thr_cnt_t *cnt 909#ifdef JEMALLOC_CC_SILENCE 910 = NULL 911#endif 912 ; 913 914 if (malloc_init()) 915 result = NULL; 916 else { 917 if (size == 0) 918 size = 1; 919 920 /* Make sure that alignment is a large enough power of 2. */ 921 if (((alignment - 1) & alignment) != 0 922 || (enforce_min_alignment && alignment < sizeof(void *))) { 923 if (config_xmalloc && opt_xmalloc) { 924 malloc_write("<jemalloc>: Error in " 925 "posix_memalign(): invalid alignment\n"); 926 abort(); 927 } 928 result = NULL; 929 ret = EINVAL; 930 goto RETURN; 931 } 932 933 usize = sa2u(size, alignment, NULL); 934 if (usize == 0) { 935 result = NULL; 936 ret = ENOMEM; 937 goto RETURN; 938 } 939 940 if (config_prof && opt_prof) { 941 PROF_ALLOC_PREP(2, usize, cnt); 942 if (cnt == NULL) { 943 result = NULL; 944 ret = EINVAL; 945 } else { 946 if (prof_promote && (uintptr_t)cnt != 947 (uintptr_t)1U && usize <= SMALL_MAXCLASS) { 948 assert(sa2u(SMALL_MAXCLASS+1, 949 alignment, NULL) != 0); 950 result = ipalloc(sa2u(SMALL_MAXCLASS+1, 951 alignment, NULL), alignment, false); 952 if (result != NULL) { 953 arena_prof_promoted(result, 954 usize); 955 } 956 } else { 957 result = ipalloc(usize, alignment, 958 false); 959 } 960 } 961 } else 962 result = ipalloc(usize, alignment, false); 963 } 964 965 if (result == NULL) { 966 if (config_xmalloc && opt_xmalloc) { 967 malloc_write("<jemalloc>: Error in posix_memalign(): " 968 "out of memory\n"); 969 abort(); 970 } 971 ret = ENOMEM; 972 goto RETURN; 973 } 974 975 *memptr = result; 976 ret = 0; 977 978RETURN: 979 if (config_stats && result != NULL) { 980 assert(usize == isalloc(result)); 981 ALLOCATED_ADD(usize, 0); 982 } 983 if (config_prof && opt_prof && result != NULL) 984 prof_malloc(result, usize, cnt); 985 return (ret); 986} 987 988JEMALLOC_ATTR(nonnull(1)) 989JEMALLOC_ATTR(visibility("default")) 990int 991je_posix_memalign(void **memptr, size_t alignment, size_t size) 992{ 993 994 return imemalign(memptr, alignment, size, true); 995} 996 997JEMALLOC_ATTR(malloc) 998JEMALLOC_ATTR(visibility("default")) 999void * 1000je_calloc(size_t num, size_t size) 1001{ 1002 void *ret; 1003 size_t num_size; 1004 size_t usize; 1005 prof_thr_cnt_t *cnt 1006#ifdef JEMALLOC_CC_SILENCE 1007 = NULL 1008#endif 1009 ; 1010 1011 if (malloc_init()) { 1012 num_size = 0; 1013 ret = NULL; 1014 goto RETURN; 1015 } 1016 1017 num_size = num * size; 1018 if (num_size == 0) { 1019 if (num == 0 || size == 0) 1020 num_size = 1; 1021 else { 1022 ret = NULL; 1023 goto RETURN; 1024 } 1025 /* 1026 * Try to avoid division here. We know that it isn't possible to 1027 * overflow during multiplication if neither operand uses any of the 1028 * most significant half of the bits in a size_t. 1029 */ 1030 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 1031 && (num_size / size != num)) { 1032 /* size_t overflow. */ 1033 ret = NULL; 1034 goto RETURN; 1035 } 1036 1037 if (config_prof && opt_prof) { 1038 usize = s2u(num_size); 1039 PROF_ALLOC_PREP(1, usize, cnt); 1040 if (cnt == NULL) { 1041 ret = NULL; 1042 goto RETURN; 1043 } 1044 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize 1045 <= SMALL_MAXCLASS) { 1046 ret = icalloc(SMALL_MAXCLASS+1); 1047 if (ret != NULL) 1048 arena_prof_promoted(ret, usize); 1049 } else 1050 ret = icalloc(num_size); 1051 } else { 1052 if (config_stats) 1053 usize = s2u(num_size); 1054 ret = icalloc(num_size); 1055 } 1056 1057RETURN: 1058 if (ret == NULL) { 1059 if (config_xmalloc && opt_xmalloc) { 1060 malloc_write("<jemalloc>: Error in calloc(): out of " 1061 "memory\n"); 1062 abort(); 1063 } 1064 errno = ENOMEM; 1065 } 1066 1067 if (config_prof && opt_prof && ret != NULL) 1068 prof_malloc(ret, usize, cnt); 1069 if (config_stats && ret != NULL) { 1070 assert(usize == isalloc(ret)); 1071 ALLOCATED_ADD(usize, 0); 1072 } 1073 return (ret); 1074} 1075 1076JEMALLOC_ATTR(visibility("default")) 1077void * 1078je_realloc(void *ptr, size_t size) 1079{ 1080 void *ret; 1081 size_t usize; 1082 size_t old_size = 0; 1083 prof_thr_cnt_t *cnt 1084#ifdef JEMALLOC_CC_SILENCE 1085 = NULL 1086#endif 1087 ; 1088 prof_ctx_t *old_ctx 1089#ifdef JEMALLOC_CC_SILENCE 1090 = NULL 1091#endif 1092 ; 1093 1094 if (size == 0) { 1095 if (ptr != NULL) { 1096 /* realloc(ptr, 0) is equivalent to free(p). */ 1097 if (config_prof || config_stats) 1098 old_size = isalloc(ptr); 1099 if (config_prof && opt_prof) { 1100 old_ctx = prof_ctx_get(ptr); 1101 cnt = NULL; 1102 } 1103 idalloc(ptr); 1104 ret = NULL; 1105 goto RETURN; 1106 } else 1107 size = 1; 1108 } 1109 1110 if (ptr != NULL) { 1111 assert(malloc_initialized || malloc_initializer == 1112 pthread_self()); 1113 1114 if (config_prof || config_stats) 1115 old_size = isalloc(ptr); 1116 if (config_prof && opt_prof) { 1117 usize = s2u(size); 1118 old_ctx = prof_ctx_get(ptr); 1119 PROF_ALLOC_PREP(1, usize, cnt); 1120 if (cnt == NULL) { 1121 old_ctx = NULL; 1122 ret = NULL; 1123 goto OOM; 1124 } 1125 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && 1126 usize <= SMALL_MAXCLASS) { 1127 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, 1128 false, false); 1129 if (ret != NULL) 1130 arena_prof_promoted(ret, usize); 1131 else 1132 old_ctx = NULL; 1133 } else { 1134 ret = iralloc(ptr, size, 0, 0, false, false); 1135 if (ret == NULL) 1136 old_ctx = NULL; 1137 } 1138 } else { 1139 if (config_stats) 1140 usize = s2u(size); 1141 ret = iralloc(ptr, size, 0, 0, false, false); 1142 } 1143 1144OOM: 1145 if (ret == NULL) { 1146 if (config_xmalloc && opt_xmalloc) { 1147 malloc_write("<jemalloc>: Error in realloc(): " 1148 "out of memory\n"); 1149 abort(); 1150 } 1151 errno = ENOMEM; 1152 } 1153 } else { 1154 /* realloc(NULL, size) is equivalent to malloc(size). */ 1155 if (config_prof && opt_prof) 1156 old_ctx = NULL; 1157 if (malloc_init()) { 1158 if (config_prof && opt_prof) 1159 cnt = NULL; 1160 ret = NULL; 1161 } else { 1162 if (config_prof && opt_prof) { 1163 usize = s2u(size); 1164 PROF_ALLOC_PREP(1, usize, cnt); 1165 if (cnt == NULL) 1166 ret = NULL; 1167 else { 1168 if (prof_promote && (uintptr_t)cnt != 1169 (uintptr_t)1U && usize <= 1170 SMALL_MAXCLASS) { 1171 ret = imalloc(SMALL_MAXCLASS+1); 1172 if (ret != NULL) { 1173 arena_prof_promoted(ret, 1174 usize); 1175 } 1176 } else 1177 ret = imalloc(size); 1178 } 1179 } else { 1180 if (config_stats) 1181 usize = s2u(size); 1182 ret = imalloc(size); 1183 } 1184 } 1185 1186 if (ret == NULL) { 1187 if (config_xmalloc && opt_xmalloc) { 1188 malloc_write("<jemalloc>: Error in realloc(): " 1189 "out of memory\n"); 1190 abort(); 1191 } 1192 errno = ENOMEM; 1193 } 1194 } 1195 1196RETURN: 1197 if (config_prof && opt_prof) 1198 prof_realloc(ret, usize, cnt, old_size, old_ctx); 1199 if (config_stats && ret != NULL) { 1200 assert(usize == isalloc(ret)); 1201 ALLOCATED_ADD(usize, old_size); 1202 } 1203 return (ret); 1204} 1205 1206JEMALLOC_ATTR(visibility("default")) 1207void 1208je_free(void *ptr) 1209{ 1210 1211 if (ptr != NULL) { 1212 size_t usize; 1213 1214 assert(malloc_initialized || malloc_initializer == 1215 pthread_self()); 1216 1217 if (config_prof && opt_prof) { 1218 usize = isalloc(ptr); 1219 prof_free(ptr, usize); 1220 } else if (config_stats) { 1221 usize = isalloc(ptr); 1222 } 1223 if (config_stats) 1224 ALLOCATED_ADD(0, usize); 1225 idalloc(ptr); 1226 } 1227} 1228 1229/* 1230 * End malloc(3)-compatible functions. 1231 */ 1232/******************************************************************************/ 1233/* 1234 * Begin non-standard override functions. 1235 */ 1236 1237#ifdef JEMALLOC_OVERRIDE_MEMALIGN 1238JEMALLOC_ATTR(malloc) 1239JEMALLOC_ATTR(visibility("default")) 1240void * 1241je_memalign(size_t alignment, size_t size) 1242{ 1243 void *ret 1244#ifdef JEMALLOC_CC_SILENCE 1245 = NULL 1246#endif 1247 ; 1248 imemalign(&ret, alignment, size, false); 1249 return (ret); 1250} 1251#endif 1252 1253#ifdef JEMALLOC_OVERRIDE_VALLOC 1254JEMALLOC_ATTR(malloc) 1255JEMALLOC_ATTR(visibility("default")) 1256void * 1257je_valloc(size_t size) 1258{ 1259 void *ret 1260#ifdef JEMALLOC_CC_SILENCE 1261 = NULL 1262#endif 1263 ; 1264 imemalign(&ret, PAGE_SIZE, size, false); 1265 return (ret); 1266} 1267#endif 1268 1269#if (!defined(JEMALLOC_PREFIX) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1270/* 1271 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1272 * to inconsistently reference libc's malloc(3)-compatible functions 1273 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1274 * 1275 * These definitions interpose hooks in glibc. The functions are actually 1276 * passed an extra argument for the caller return address, which will be 1277 * ignored. 1278 */ 1279JEMALLOC_ATTR(visibility("default")) 1280void (* const __free_hook)(void *ptr) = je_free; 1281 1282JEMALLOC_ATTR(visibility("default")) 1283void *(* const __malloc_hook)(size_t size) = je_malloc; 1284 1285JEMALLOC_ATTR(visibility("default")) 1286void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc; 1287 1288JEMALLOC_ATTR(visibility("default")) 1289void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign; 1290#endif 1291 1292/* 1293 * End non-standard override functions. 1294 */ 1295/******************************************************************************/ 1296/* 1297 * Begin non-standard functions. 1298 */ 1299 1300JEMALLOC_ATTR(visibility("default")) 1301size_t 1302je_malloc_usable_size(const void *ptr) 1303{ 1304 size_t ret; 1305 1306 assert(malloc_initialized || malloc_initializer == pthread_self()); 1307 1308 if (config_ivsalloc) 1309 ret = ivsalloc(ptr); 1310 else { 1311 assert(ptr != NULL); 1312 ret = isalloc(ptr); 1313 } 1314 1315 return (ret); 1316} 1317 1318JEMALLOC_ATTR(visibility("default")) 1319void 1320je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1321 const char *opts) 1322{ 1323 1324 stats_print(write_cb, cbopaque, opts); 1325} 1326 1327JEMALLOC_ATTR(visibility("default")) 1328int 1329je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1330 size_t newlen) 1331{ 1332 1333 if (malloc_init()) 1334 return (EAGAIN); 1335 1336 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1337} 1338 1339JEMALLOC_ATTR(visibility("default")) 1340int 1341je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1342{ 1343 1344 if (malloc_init()) 1345 return (EAGAIN); 1346 1347 return (ctl_nametomib(name, mibp, miblenp)); 1348} 1349 1350JEMALLOC_ATTR(visibility("default")) 1351int 1352je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1353 void *newp, size_t newlen) 1354{ 1355 1356 if (malloc_init()) 1357 return (EAGAIN); 1358 1359 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1360} 1361 1362/* 1363 * End non-standard functions. 1364 */ 1365/******************************************************************************/ 1366/* 1367 * Begin experimental functions. 1368 */ 1369#ifdef JEMALLOC_EXPERIMENTAL 1370 1371JEMALLOC_INLINE void * 1372iallocm(size_t usize, size_t alignment, bool zero) 1373{ 1374 1375 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment, 1376 NULL))); 1377 1378 if (alignment != 0) 1379 return (ipalloc(usize, alignment, zero)); 1380 else if (zero) 1381 return (icalloc(usize)); 1382 else 1383 return (imalloc(usize)); 1384} 1385 1386JEMALLOC_ATTR(nonnull(1)) 1387JEMALLOC_ATTR(visibility("default")) 1388int 1389je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 1390{ 1391 void *p; 1392 size_t usize; 1393 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1394 & (SIZE_T_MAX-1)); 1395 bool zero = flags & ALLOCM_ZERO; 1396 prof_thr_cnt_t *cnt; 1397 1398 assert(ptr != NULL); 1399 assert(size != 0); 1400 1401 if (malloc_init()) 1402 goto OOM; 1403 1404 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL); 1405 if (usize == 0) 1406 goto OOM; 1407 1408 if (config_prof && opt_prof) { 1409 PROF_ALLOC_PREP(1, usize, cnt); 1410 if (cnt == NULL) 1411 goto OOM; 1412 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 1413 SMALL_MAXCLASS) { 1414 size_t usize_promoted = (alignment == 0) ? 1415 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, 1416 alignment, NULL); 1417 assert(usize_promoted != 0); 1418 p = iallocm(usize_promoted, alignment, zero); 1419 if (p == NULL) 1420 goto OOM; 1421 arena_prof_promoted(p, usize); 1422 } else { 1423 p = iallocm(usize, alignment, zero); 1424 if (p == NULL) 1425 goto OOM; 1426 } 1427 prof_malloc(p, usize, cnt); 1428 } else { 1429 p = iallocm(usize, alignment, zero); 1430 if (p == NULL) 1431 goto OOM; 1432 } 1433 if (rsize != NULL) 1434 *rsize = usize; 1435 1436 *ptr = p; 1437 if (config_stats) { 1438 assert(usize == isalloc(p)); 1439 ALLOCATED_ADD(usize, 0); 1440 } 1441 return (ALLOCM_SUCCESS); 1442OOM: 1443 if (config_xmalloc && opt_xmalloc) { 1444 malloc_write("<jemalloc>: Error in allocm(): " 1445 "out of memory\n"); 1446 abort(); 1447 } 1448 *ptr = NULL; 1449 return (ALLOCM_ERR_OOM); 1450} 1451 1452JEMALLOC_ATTR(nonnull(1)) 1453JEMALLOC_ATTR(visibility("default")) 1454int 1455je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 1456{ 1457 void *p, *q; 1458 size_t usize; 1459 size_t old_size; 1460 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1461 & (SIZE_T_MAX-1)); 1462 bool zero = flags & ALLOCM_ZERO; 1463 bool no_move = flags & ALLOCM_NO_MOVE; 1464 prof_thr_cnt_t *cnt; 1465 1466 assert(ptr != NULL); 1467 assert(*ptr != NULL); 1468 assert(size != 0); 1469 assert(SIZE_T_MAX - size >= extra); 1470 assert(malloc_initialized || malloc_initializer == pthread_self()); 1471 1472 p = *ptr; 1473 if (config_prof && opt_prof) { 1474 /* 1475 * usize isn't knowable before iralloc() returns when extra is 1476 * non-zero. Therefore, compute its maximum possible value and 1477 * use that in PROF_ALLOC_PREP() to decide whether to capture a 1478 * backtrace. prof_realloc() will use the actual usize to 1479 * decide whether to sample. 1480 */ 1481 size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1482 sa2u(size+extra, alignment, NULL); 1483 prof_ctx_t *old_ctx = prof_ctx_get(p); 1484 old_size = isalloc(p); 1485 PROF_ALLOC_PREP(1, max_usize, cnt); 1486 if (cnt == NULL) 1487 goto OOM; 1488 /* 1489 * Use minimum usize to determine whether promotion may happen. 1490 */ 1491 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U 1492 && ((alignment == 0) ? s2u(size) : sa2u(size, 1493 alignment, NULL)) <= SMALL_MAXCLASS) { 1494 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1495 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 1496 alignment, zero, no_move); 1497 if (q == NULL) 1498 goto ERR; 1499 if (max_usize < PAGE_SIZE) { 1500 usize = max_usize; 1501 arena_prof_promoted(q, usize); 1502 } else 1503 usize = isalloc(q); 1504 } else { 1505 q = iralloc(p, size, extra, alignment, zero, no_move); 1506 if (q == NULL) 1507 goto ERR; 1508 usize = isalloc(q); 1509 } 1510 prof_realloc(q, usize, cnt, old_size, old_ctx); 1511 if (rsize != NULL) 1512 *rsize = usize; 1513 } else { 1514 if (config_stats) 1515 old_size = isalloc(p); 1516 q = iralloc(p, size, extra, alignment, zero, no_move); 1517 if (q == NULL) 1518 goto ERR; 1519 if (config_stats) 1520 usize = isalloc(q); 1521 if (rsize != NULL) { 1522 if (config_stats == false) 1523 usize = isalloc(q); 1524 *rsize = usize; 1525 } 1526 } 1527 1528 *ptr = q; 1529 if (config_stats) 1530 ALLOCATED_ADD(usize, old_size); 1531 return (ALLOCM_SUCCESS); 1532ERR: 1533 if (no_move) 1534 return (ALLOCM_ERR_NOT_MOVED); 1535OOM: 1536 if (config_xmalloc && opt_xmalloc) { 1537 malloc_write("<jemalloc>: Error in rallocm(): " 1538 "out of memory\n"); 1539 abort(); 1540 } 1541 return (ALLOCM_ERR_OOM); 1542} 1543 1544JEMALLOC_ATTR(nonnull(1)) 1545JEMALLOC_ATTR(visibility("default")) 1546int 1547je_sallocm(const void *ptr, size_t *rsize, int flags) 1548{ 1549 size_t sz; 1550 1551 assert(malloc_initialized || malloc_initializer == pthread_self()); 1552 1553 if (config_ivsalloc) 1554 sz = ivsalloc(ptr); 1555 else { 1556 assert(ptr != NULL); 1557 sz = isalloc(ptr); 1558 } 1559 assert(rsize != NULL); 1560 *rsize = sz; 1561 1562 return (ALLOCM_SUCCESS); 1563} 1564 1565JEMALLOC_ATTR(nonnull(1)) 1566JEMALLOC_ATTR(visibility("default")) 1567int 1568je_dallocm(void *ptr, int flags) 1569{ 1570 size_t usize; 1571 1572 assert(ptr != NULL); 1573 assert(malloc_initialized || malloc_initializer == pthread_self()); 1574 1575 if (config_stats) 1576 usize = isalloc(ptr); 1577 if (config_prof && opt_prof) { 1578 if (config_stats == false) 1579 usize = isalloc(ptr); 1580 prof_free(ptr, usize); 1581 } 1582 if (config_stats) 1583 ALLOCATED_ADD(0, usize); 1584 idalloc(ptr); 1585 1586 return (ALLOCM_SUCCESS); 1587} 1588 1589JEMALLOC_ATTR(visibility("default")) 1590int 1591je_nallocm(size_t *rsize, size_t size, int flags) 1592{ 1593 size_t usize; 1594 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1595 & (SIZE_T_MAX-1)); 1596 1597 assert(size != 0); 1598 1599 if (malloc_init()) 1600 return (ALLOCM_ERR_OOM); 1601 1602 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL); 1603 if (usize == 0) 1604 return (ALLOCM_ERR_OOM); 1605 1606 if (rsize != NULL) 1607 *rsize = usize; 1608 return (ALLOCM_SUCCESS); 1609} 1610 1611#endif 1612/* 1613 * End experimental functions. 1614 */ 1615/******************************************************************************/ 1616 1617/* 1618 * The following functions are used by threading libraries for protection of 1619 * malloc during fork(). 1620 */ 1621 1622void 1623jemalloc_prefork(void) 1624{ 1625 unsigned i; 1626 1627 /* Acquire all mutexes in a safe order. */ 1628 1629 malloc_mutex_lock(&arenas_lock); 1630 for (i = 0; i < narenas; i++) { 1631 if (arenas[i] != NULL) 1632 malloc_mutex_lock(&arenas[i]->lock); 1633 } 1634 1635 malloc_mutex_lock(&base_mtx); 1636 1637 malloc_mutex_lock(&huge_mtx); 1638 1639 if (config_dss) 1640 malloc_mutex_lock(&dss_mtx); 1641} 1642 1643void 1644jemalloc_postfork(void) 1645{ 1646 unsigned i; 1647 1648 /* Release all mutexes, now that fork() has completed. */ 1649 1650 if (config_dss) 1651 malloc_mutex_unlock(&dss_mtx); 1652 1653 malloc_mutex_unlock(&huge_mtx); 1654 1655 malloc_mutex_unlock(&base_mtx); 1656 1657 for (i = 0; i < narenas; i++) { 1658 if (arenas[i] != NULL) 1659 malloc_mutex_unlock(&arenas[i]->lock); 1660 } 1661 malloc_mutex_unlock(&arenas_lock); 1662} 1663 1664/******************************************************************************/ 1665