jemalloc_internal.h.in revision 1a0e7770243e0539fa8fef7bb1512f784f93389f
1#include <sys/mman.h> 2#include <sys/param.h> 3#include <sys/syscall.h> 4#if !defined(SYS_write) && defined(__NR_write) 5#define SYS_write __NR_write 6#endif 7#include <sys/time.h> 8#include <sys/types.h> 9#include <sys/uio.h> 10 11#include <errno.h> 12#include <limits.h> 13#ifndef SIZE_T_MAX 14# define SIZE_T_MAX SIZE_MAX 15#endif 16#include <pthread.h> 17#include <sched.h> 18#include <stdarg.h> 19#include <stdbool.h> 20#include <stdio.h> 21#include <stdlib.h> 22#include <stdint.h> 23#include <stddef.h> 24#ifndef offsetof 25# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 26#endif 27#include <inttypes.h> 28#include <string.h> 29#include <strings.h> 30#include <ctype.h> 31#include <unistd.h> 32#include <fcntl.h> 33#include <pthread.h> 34#include <math.h> 35 36#define JEMALLOC_NO_DEMANGLE 37#include "../jemalloc@install_suffix@.h" 38 39#include "jemalloc/internal/private_namespace.h" 40 41#ifdef JEMALLOC_CC_SILENCE 42#define UNUSED JEMALLOC_ATTR(unused) 43#else 44#define UNUSED 45#endif 46 47static const bool config_debug = 48#ifdef JEMALLOC_DEBUG 49 true 50#else 51 false 52#endif 53 ; 54static const bool config_dss = 55#ifdef JEMALLOC_DSS 56 true 57#else 58 false 59#endif 60 ; 61static const bool config_dynamic_page_shift = 62#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT 63 true 64#else 65 false 66#endif 67 ; 68static const bool config_fill = 69#ifdef JEMALLOC_FILL 70 true 71#else 72 false 73#endif 74 ; 75static const bool config_lazy_lock = 76#ifdef JEMALLOC_LAZY_LOCK 77 true 78#else 79 false 80#endif 81 ; 82static const bool config_prof = 83#ifdef JEMALLOC_PROF 84 true 85#else 86 false 87#endif 88 ; 89static const bool config_prof_libgcc = 90#ifdef JEMALLOC_PROF_LIBGCC 91 true 92#else 93 false 94#endif 95 ; 96static const bool config_prof_libunwind = 97#ifdef JEMALLOC_PROF_LIBUNWIND 98 true 99#else 100 false 101#endif 102 ; 103static const bool config_stats = 104#ifdef JEMALLOC_STATS 105 true 106#else 107 false 108#endif 109 ; 110static const bool config_tcache = 111#ifdef JEMALLOC_TCACHE 112 true 113#else 114 false 115#endif 116 ; 117static const bool config_tls = 118#ifdef JEMALLOC_TLS 119 true 120#else 121 false 122#endif 123 ; 124static const bool config_xmalloc = 125#ifdef JEMALLOC_XMALLOC 126 true 127#else 128 false 129#endif 130 ; 131static const bool config_ivsalloc = 132#ifdef JEMALLOC_IVSALLOC 133 true 134#else 135 false 136#endif 137 ; 138 139#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 140#include <libkern/OSAtomic.h> 141#endif 142 143#ifdef JEMALLOC_ZONE 144#include <mach/mach_error.h> 145#include <mach/mach_init.h> 146#include <mach/vm_map.h> 147#include <malloc/malloc.h> 148#endif 149 150#define RB_COMPACT 151#include "jemalloc/internal/rb.h" 152#include "jemalloc/internal/qr.h" 153#include "jemalloc/internal/ql.h" 154 155/* 156 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 157 * but there are circular dependencies that cannot be broken without 158 * substantial performance degradation. In order to reduce the effect on 159 * visual code flow, read the header files in multiple passes, with one of the 160 * following cpp variables defined during each pass: 161 * 162 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 163 * types. 164 * JEMALLOC_H_STRUCTS : Data structures. 165 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 166 * JEMALLOC_H_INLINES : Inline functions. 167 */ 168/******************************************************************************/ 169#define JEMALLOC_H_TYPES 170 171#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 172 173#define ZU(z) ((size_t)z) 174 175#ifndef __DECONST 176# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 177#endif 178 179#ifdef JEMALLOC_DEBUG 180 /* Disable inlining to make debugging easier. */ 181# define JEMALLOC_INLINE 182# define inline 183#else 184# define JEMALLOC_ENABLE_INLINE 185# define JEMALLOC_INLINE static inline 186#endif 187 188/* Smallest size class to support. */ 189#define LG_TINY_MIN 3 190#define TINY_MIN (1U << LG_TINY_MIN) 191 192/* 193 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 194 * classes). 195 */ 196#ifndef LG_QUANTUM 197# ifdef __i386__ 198# define LG_QUANTUM 4 199# endif 200# ifdef __ia64__ 201# define LG_QUANTUM 4 202# endif 203# ifdef __alpha__ 204# define LG_QUANTUM 4 205# endif 206# ifdef __sparc64__ 207# define LG_QUANTUM 4 208# endif 209# if (defined(__amd64__) || defined(__x86_64__)) 210# define LG_QUANTUM 4 211# endif 212# ifdef __arm__ 213# define LG_QUANTUM 3 214# endif 215# ifdef __mips__ 216# define LG_QUANTUM 3 217# endif 218# ifdef __powerpc__ 219# define LG_QUANTUM 4 220# endif 221# ifdef __s390x__ 222# define LG_QUANTUM 4 223# endif 224# ifdef __SH4__ 225# define LG_QUANTUM 4 226# endif 227# ifdef __tile__ 228# define LG_QUANTUM 4 229# endif 230# ifndef LG_QUANTUM 231# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 232# endif 233#endif 234 235#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 236#define QUANTUM_MASK (QUANTUM - 1) 237 238/* Return the smallest quantum multiple that is >= a. */ 239#define QUANTUM_CEILING(a) \ 240 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 241 242#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 243#define LONG_MASK (LONG - 1) 244 245/* Return the smallest long multiple that is >= a. */ 246#define LONG_CEILING(a) \ 247 (((a) + LONG_MASK) & ~LONG_MASK) 248 249#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 250#define PTR_MASK (SIZEOF_PTR - 1) 251 252/* Return the smallest (void *) multiple that is >= a. */ 253#define PTR_CEILING(a) \ 254 (((a) + PTR_MASK) & ~PTR_MASK) 255 256/* 257 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 258 * In addition, this controls the spacing of cacheline-spaced size classes. 259 */ 260#define LG_CACHELINE 6 261#define CACHELINE ((size_t)(1U << LG_CACHELINE)) 262#define CACHELINE_MASK (CACHELINE - 1) 263 264/* Return the smallest cacheline multiple that is >= s. */ 265#define CACHELINE_CEILING(s) \ 266 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 267 268/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 269#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT)) 270#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1)) 271#ifdef PAGE_SHIFT 272# undef PAGE_SHIFT 273#endif 274#ifdef PAGE_SIZE 275# undef PAGE_SIZE 276#endif 277#ifdef PAGE_MASK 278# undef PAGE_MASK 279#endif 280#define PAGE_SHIFT STATIC_PAGE_SHIFT 281#define PAGE_SIZE STATIC_PAGE_SIZE 282#define PAGE_MASK STATIC_PAGE_MASK 283 284/* Return the smallest pagesize multiple that is >= s. */ 285#define PAGE_CEILING(s) \ 286 (((s) + PAGE_MASK) & ~PAGE_MASK) 287 288#include "jemalloc/internal/util.h" 289#include "jemalloc/internal/atomic.h" 290#include "jemalloc/internal/prng.h" 291#include "jemalloc/internal/ckh.h" 292#include "jemalloc/internal/size_classes.h" 293#include "jemalloc/internal/stats.h" 294#include "jemalloc/internal/ctl.h" 295#include "jemalloc/internal/mutex.h" 296#include "jemalloc/internal/tsd.h" 297#include "jemalloc/internal/mb.h" 298#include "jemalloc/internal/extent.h" 299#include "jemalloc/internal/arena.h" 300#include "jemalloc/internal/bitmap.h" 301#include "jemalloc/internal/base.h" 302#include "jemalloc/internal/chunk.h" 303#include "jemalloc/internal/huge.h" 304#include "jemalloc/internal/rtree.h" 305#include "jemalloc/internal/tcache.h" 306#include "jemalloc/internal/hash.h" 307#ifdef JEMALLOC_ZONE 308#include "jemalloc/internal/zone.h" 309#endif 310#include "jemalloc/internal/prof.h" 311 312#undef JEMALLOC_H_TYPES 313/******************************************************************************/ 314#define JEMALLOC_H_STRUCTS 315 316#include "jemalloc/internal/util.h" 317#include "jemalloc/internal/atomic.h" 318#include "jemalloc/internal/prng.h" 319#include "jemalloc/internal/ckh.h" 320#include "jemalloc/internal/size_classes.h" 321#include "jemalloc/internal/stats.h" 322#include "jemalloc/internal/ctl.h" 323#include "jemalloc/internal/mutex.h" 324#include "jemalloc/internal/tsd.h" 325#include "jemalloc/internal/mb.h" 326#include "jemalloc/internal/bitmap.h" 327#include "jemalloc/internal/extent.h" 328#include "jemalloc/internal/arena.h" 329#include "jemalloc/internal/base.h" 330#include "jemalloc/internal/chunk.h" 331#include "jemalloc/internal/huge.h" 332#include "jemalloc/internal/rtree.h" 333#include "jemalloc/internal/tcache.h" 334#include "jemalloc/internal/hash.h" 335#ifdef JEMALLOC_ZONE 336#include "jemalloc/internal/zone.h" 337#endif 338#include "jemalloc/internal/prof.h" 339 340typedef struct { 341 uint64_t allocated; 342 uint64_t deallocated; 343} thread_allocated_t; 344/* 345 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 346 * argument. 347 */ 348#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) 349 350#undef JEMALLOC_H_STRUCTS 351/******************************************************************************/ 352#define JEMALLOC_H_EXTERNS 353 354extern bool opt_abort; 355extern bool opt_junk; 356extern bool opt_xmalloc; 357extern bool opt_zero; 358extern size_t opt_narenas; 359 360#ifdef DYNAMIC_PAGE_SHIFT 361extern size_t pagesize; 362extern size_t pagesize_mask; 363extern size_t lg_pagesize; 364#endif 365 366/* Number of CPUs. */ 367extern unsigned ncpus; 368 369extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 370/* 371 * Arenas that are used to service external requests. Not all elements of the 372 * arenas array are necessarily used; arenas are created lazily as needed. 373 */ 374extern arena_t **arenas; 375extern unsigned narenas; 376 377extern bool malloc_initialized; 378 379arena_t *arenas_extend(unsigned ind); 380void arenas_cleanup(void *arg); 381arena_t *choose_arena_hard(void); 382void jemalloc_prefork(void); 383void jemalloc_postfork_parent(void); 384void jemalloc_postfork_child(void); 385 386#include "jemalloc/internal/util.h" 387#include "jemalloc/internal/atomic.h" 388#include "jemalloc/internal/prng.h" 389#include "jemalloc/internal/ckh.h" 390#include "jemalloc/internal/size_classes.h" 391#include "jemalloc/internal/stats.h" 392#include "jemalloc/internal/ctl.h" 393#include "jemalloc/internal/mutex.h" 394#include "jemalloc/internal/tsd.h" 395#include "jemalloc/internal/mb.h" 396#include "jemalloc/internal/bitmap.h" 397#include "jemalloc/internal/extent.h" 398#include "jemalloc/internal/arena.h" 399#include "jemalloc/internal/base.h" 400#include "jemalloc/internal/chunk.h" 401#include "jemalloc/internal/huge.h" 402#include "jemalloc/internal/rtree.h" 403#include "jemalloc/internal/tcache.h" 404#include "jemalloc/internal/hash.h" 405#ifdef JEMALLOC_ZONE 406#include "jemalloc/internal/zone.h" 407#endif 408#include "jemalloc/internal/prof.h" 409 410#undef JEMALLOC_H_EXTERNS 411/******************************************************************************/ 412#define JEMALLOC_H_INLINES 413 414#include "jemalloc/internal/util.h" 415#include "jemalloc/internal/atomic.h" 416#include "jemalloc/internal/prng.h" 417#include "jemalloc/internal/ckh.h" 418#include "jemalloc/internal/size_classes.h" 419#include "jemalloc/internal/stats.h" 420#include "jemalloc/internal/ctl.h" 421#include "jemalloc/internal/mutex.h" 422#include "jemalloc/internal/tsd.h" 423#include "jemalloc/internal/mb.h" 424#include "jemalloc/internal/extent.h" 425#include "jemalloc/internal/base.h" 426#include "jemalloc/internal/chunk.h" 427#include "jemalloc/internal/huge.h" 428 429#ifndef JEMALLOC_ENABLE_INLINE 430malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 431 432size_t s2u(size_t size); 433size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); 434arena_t *choose_arena(void); 435#endif 436 437#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 438/* 439 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 440 * for allocations. 441 */ 442malloc_tsd_externs(arenas, arena_t *) 443malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) 444 445/* 446 * Compute usable size that would result from allocating an object with the 447 * specified size. 448 */ 449JEMALLOC_INLINE size_t 450s2u(size_t size) 451{ 452 453 if (size <= SMALL_MAXCLASS) 454 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 455 if (size <= arena_maxclass) 456 return (PAGE_CEILING(size)); 457 return (CHUNK_CEILING(size)); 458} 459 460/* 461 * Compute usable size that would result from allocating an object with the 462 * specified size and alignment. 463 */ 464JEMALLOC_INLINE size_t 465sa2u(size_t size, size_t alignment, size_t *run_size_p) 466{ 467 size_t usize; 468 469 /* 470 * Round size up to the nearest multiple of alignment. 471 * 472 * This done, we can take advantage of the fact that for each small 473 * size class, every object is aligned at the smallest power of two 474 * that is non-zero in the base two representation of the size. For 475 * example: 476 * 477 * Size | Base 2 | Minimum alignment 478 * -----+----------+------------------ 479 * 96 | 1100000 | 32 480 * 144 | 10100000 | 32 481 * 192 | 11000000 | 64 482 */ 483 usize = (size + (alignment - 1)) & (-alignment); 484 /* 485 * (usize < size) protects against the combination of maximal 486 * alignment and size greater than maximal alignment. 487 */ 488 if (usize < size) { 489 /* size_t overflow. */ 490 return (0); 491 } 492 493 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) { 494 if (usize <= SMALL_MAXCLASS) 495 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 496 return (PAGE_CEILING(usize)); 497 } else { 498 size_t run_size; 499 500 /* 501 * We can't achieve subpage alignment, so round up alignment 502 * permanently; it makes later calculations simpler. 503 */ 504 alignment = PAGE_CEILING(alignment); 505 usize = PAGE_CEILING(size); 506 /* 507 * (usize < size) protects against very large sizes within 508 * PAGE_SIZE of SIZE_T_MAX. 509 * 510 * (usize + alignment < usize) protects against the 511 * combination of maximal alignment and usize large enough 512 * to cause overflow. This is similar to the first overflow 513 * check above, but it needs to be repeated due to the new 514 * usize value, which may now be *equal* to maximal 515 * alignment, whereas before we only detected overflow if the 516 * original size was *greater* than maximal alignment. 517 */ 518 if (usize < size || usize + alignment < usize) { 519 /* size_t overflow. */ 520 return (0); 521 } 522 523 /* 524 * Calculate the size of the over-size run that arena_palloc() 525 * would need to allocate in order to guarantee the alignment. 526 */ 527 if (usize >= alignment) 528 run_size = usize + alignment - PAGE_SIZE; 529 else { 530 /* 531 * It is possible that (alignment << 1) will cause 532 * overflow, but it doesn't matter because we also 533 * subtract PAGE_SIZE, which in the case of overflow 534 * leaves us with a very large run_size. That causes 535 * the first conditional below to fail, which means 536 * that the bogus run_size value never gets used for 537 * anything important. 538 */ 539 run_size = (alignment << 1) - PAGE_SIZE; 540 } 541 if (run_size_p != NULL) 542 *run_size_p = run_size; 543 544 if (run_size <= arena_maxclass) 545 return (PAGE_CEILING(usize)); 546 return (CHUNK_CEILING(usize)); 547 } 548} 549 550/* Choose an arena based on a per-thread value. */ 551JEMALLOC_INLINE arena_t * 552choose_arena(void) 553{ 554 arena_t *ret; 555 556 if ((ret = *arenas_tsd_get()) == NULL) { 557 ret = choose_arena_hard(); 558 assert(ret != NULL); 559 } 560 561 return (ret); 562} 563#endif 564 565#include "jemalloc/internal/bitmap.h" 566#include "jemalloc/internal/rtree.h" 567#include "jemalloc/internal/tcache.h" 568#include "jemalloc/internal/arena.h" 569#include "jemalloc/internal/hash.h" 570#ifdef JEMALLOC_ZONE 571#include "jemalloc/internal/zone.h" 572#endif 573 574#ifndef JEMALLOC_ENABLE_INLINE 575void *imalloc(size_t size); 576void *icalloc(size_t size); 577void *ipalloc(size_t usize, size_t alignment, bool zero); 578size_t isalloc(const void *ptr); 579size_t ivsalloc(const void *ptr); 580void idalloc(void *ptr); 581void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 582 bool zero, bool no_move); 583malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 584#endif 585 586#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 587JEMALLOC_INLINE void * 588imalloc(size_t size) 589{ 590 591 assert(size != 0); 592 593 if (size <= arena_maxclass) 594 return (arena_malloc(size, false)); 595 else 596 return (huge_malloc(size, false)); 597} 598 599JEMALLOC_INLINE void * 600icalloc(size_t size) 601{ 602 603 if (size <= arena_maxclass) 604 return (arena_malloc(size, true)); 605 else 606 return (huge_malloc(size, true)); 607} 608 609JEMALLOC_INLINE void * 610ipalloc(size_t usize, size_t alignment, bool zero) 611{ 612 void *ret; 613 614 assert(usize != 0); 615 assert(usize == sa2u(usize, alignment, NULL)); 616 617 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) 618 ret = arena_malloc(usize, zero); 619 else { 620 size_t run_size JEMALLOC_CC_SILENCE_INIT(0); 621 622 /* 623 * Ideally we would only ever call sa2u() once per aligned 624 * allocation request, and the caller of this function has 625 * already done so once. However, it's rather burdensome to 626 * require every caller to pass in run_size, especially given 627 * that it's only relevant to large allocations. Therefore, 628 * just call it again here in order to get run_size. 629 */ 630 sa2u(usize, alignment, &run_size); 631 if (run_size <= arena_maxclass) { 632 ret = arena_palloc(choose_arena(), usize, run_size, 633 alignment, zero); 634 } else if (alignment <= chunksize) 635 ret = huge_malloc(usize, zero); 636 else 637 ret = huge_palloc(usize, alignment, zero); 638 } 639 640 assert(((uintptr_t)ret & (alignment - 1)) == 0); 641 return (ret); 642} 643 644JEMALLOC_INLINE size_t 645isalloc(const void *ptr) 646{ 647 size_t ret; 648 arena_chunk_t *chunk; 649 650 assert(ptr != NULL); 651 652 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 653 if (chunk != ptr) { 654 /* Region. */ 655 if (config_prof) 656 ret = arena_salloc_demote(ptr); 657 else 658 ret = arena_salloc(ptr); 659 } else 660 ret = huge_salloc(ptr); 661 662 return (ret); 663} 664 665JEMALLOC_INLINE size_t 666ivsalloc(const void *ptr) 667{ 668 669 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 670 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 671 return (0); 672 673 return (isalloc(ptr)); 674} 675 676JEMALLOC_INLINE void 677idalloc(void *ptr) 678{ 679 arena_chunk_t *chunk; 680 681 assert(ptr != NULL); 682 683 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 684 if (chunk != ptr) 685 arena_dalloc(chunk->arena, chunk, ptr); 686 else 687 huge_dalloc(ptr, true); 688} 689 690JEMALLOC_INLINE void * 691iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 692 bool no_move) 693{ 694 void *ret; 695 size_t oldsize; 696 697 assert(ptr != NULL); 698 assert(size != 0); 699 700 oldsize = isalloc(ptr); 701 702 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 703 != 0) { 704 size_t usize, copysize; 705 706 /* 707 * Existing object alignment is inadquate; allocate new space 708 * and copy. 709 */ 710 if (no_move) 711 return (NULL); 712 usize = sa2u(size + extra, alignment, NULL); 713 if (usize == 0) 714 return (NULL); 715 ret = ipalloc(usize, alignment, zero); 716 if (ret == NULL) { 717 if (extra == 0) 718 return (NULL); 719 /* Try again, without extra this time. */ 720 usize = sa2u(size, alignment, NULL); 721 if (usize == 0) 722 return (NULL); 723 ret = ipalloc(usize, alignment, zero); 724 if (ret == NULL) 725 return (NULL); 726 } 727 /* 728 * Copy at most size bytes (not size+extra), since the caller 729 * has no expectation that the extra bytes will be reliably 730 * preserved. 731 */ 732 copysize = (size < oldsize) ? size : oldsize; 733 memcpy(ret, ptr, copysize); 734 idalloc(ptr); 735 return (ret); 736 } 737 738 if (no_move) { 739 if (size <= arena_maxclass) { 740 return (arena_ralloc_no_move(ptr, oldsize, size, 741 extra, zero)); 742 } else { 743 return (huge_ralloc_no_move(ptr, oldsize, size, 744 extra)); 745 } 746 } else { 747 if (size + extra <= arena_maxclass) { 748 return (arena_ralloc(ptr, oldsize, size, extra, 749 alignment, zero)); 750 } else { 751 return (huge_ralloc(ptr, oldsize, size, extra, 752 alignment, zero)); 753 } 754 } 755} 756 757malloc_tsd_externs(thread_allocated, thread_allocated_t) 758malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, 759 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 760#endif 761 762#include "jemalloc/internal/prof.h" 763 764#undef JEMALLOC_H_INLINES 765/******************************************************************************/ 766