jemalloc_internal.h.in revision 7372b15a31c63ac5cb9ed8aeabc2a0a3c005e8bf
1#include <sys/mman.h> 2#include <sys/param.h> 3#include <sys/time.h> 4#include <sys/types.h> 5#include <sys/sysctl.h> 6#include <sys/uio.h> 7 8#include <errno.h> 9#include <limits.h> 10#ifndef SIZE_T_MAX 11# define SIZE_T_MAX SIZE_MAX 12#endif 13#include <pthread.h> 14#include <sched.h> 15#include <stdarg.h> 16#include <stdbool.h> 17#include <stdio.h> 18#include <stdlib.h> 19#include <stdint.h> 20#include <stddef.h> 21#ifndef offsetof 22# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 23#endif 24#include <inttypes.h> 25#include <string.h> 26#include <strings.h> 27#include <ctype.h> 28#include <unistd.h> 29#include <fcntl.h> 30#include <pthread.h> 31#include <math.h> 32 33#define JEMALLOC_MANGLE 34#include "../jemalloc@install_suffix@.h" 35 36#include "jemalloc/internal/private_namespace.h" 37 38#ifdef JEMALLOC_CC_SILENCE 39#define UNUSED JEMALLOC_ATTR(unused) 40#else 41#define UNUSED 42#endif 43 44static const bool config_debug = 45#ifdef JEMALLOC_DEBUG 46 true 47#else 48 false 49#endif 50 ; 51static const bool config_dss = 52#ifdef JEMALLOC_DSS 53 true 54#else 55 false 56#endif 57 ; 58static const bool config_dynamic_page_shift = 59#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT 60 true 61#else 62 false 63#endif 64 ; 65static const bool config_fill = 66#ifdef JEMALLOC_FILL 67 true 68#else 69 false 70#endif 71 ; 72static const bool config_lazy_lock = 73#ifdef JEMALLOC_LAZY_LOCK 74 true 75#else 76 false 77#endif 78 ; 79static const bool config_prof = 80#ifdef JEMALLOC_PROF 81 true 82#else 83 false 84#endif 85 ; 86static const bool config_prof_libgcc = 87#ifdef JEMALLOC_PROF_LIBGCC 88 true 89#else 90 false 91#endif 92 ; 93static const bool config_prof_libunwind = 94#ifdef JEMALLOC_PROF_LIBUNWIND 95 true 96#else 97 false 98#endif 99 ; 100static const bool config_stats = 101#ifdef JEMALLOC_STATS 102 true 103#else 104 false 105#endif 106 ; 107static const bool config_swap = 108#ifdef JEMALLOC_SWAP 109 true 110#else 111 false 112#endif 113 ; 114static const bool config_sysv = 115#ifdef JEMALLOC_SYSV 116 true 117#else 118 false 119#endif 120 ; 121static const bool config_tcache = 122#ifdef JEMALLOC_TCACHE 123 true 124#else 125 false 126#endif 127 ; 128static const bool config_tiny = 129#ifdef JEMALLOC_TINY 130 true 131#else 132 false 133#endif 134 ; 135static const bool config_tls = 136#ifdef JEMALLOC_TLS 137 true 138#else 139 false 140#endif 141 ; 142static const bool config_xmalloc = 143#ifdef JEMALLOC_XMALLOC 144 true 145#else 146 false 147#endif 148 ; 149static const bool config_ivsalloc = 150#ifdef JEMALLOC_IVSALLOC 151 true 152#else 153 false 154#endif 155 ; 156 157#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 158#include <libkern/OSAtomic.h> 159#endif 160 161#ifdef JEMALLOC_ZONE 162#include <mach/mach_error.h> 163#include <mach/mach_init.h> 164#include <mach/vm_map.h> 165#include <malloc/malloc.h> 166#endif 167 168#ifdef JEMALLOC_LAZY_LOCK 169#include <dlfcn.h> 170#endif 171 172#define RB_COMPACT 173#include "jemalloc/internal/rb.h" 174#include "jemalloc/internal/qr.h" 175#include "jemalloc/internal/ql.h" 176 177extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s); 178 179/* 180 * Define a custom assert() in order to reduce the chances of deadlock during 181 * assertion failure. 182 */ 183#ifndef assert 184# ifdef JEMALLOC_DEBUG 185# define assert(e) do { \ 186 if (!(e)) { \ 187 char line_buf[UMAX2S_BUFSIZE]; \ 188 malloc_write("<jemalloc>: "); \ 189 malloc_write(__FILE__); \ 190 malloc_write(":"); \ 191 malloc_write(u2s(__LINE__, 10, line_buf)); \ 192 malloc_write(": Failed assertion: "); \ 193 malloc_write("\""); \ 194 malloc_write(#e); \ 195 malloc_write("\"\n"); \ 196 abort(); \ 197 } \ 198} while (0) 199# else 200# define assert(e) 201# endif 202#endif 203 204/* Use to assert a particular configuration, e.g., cassert(config_debug). */ 205#define cassert(c) do { \ 206 if ((c) == false) \ 207 assert(false); \ 208} while (0) 209 210/* 211 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 212 * but there are circular dependencies that cannot be broken without 213 * substantial performance degradation. In order to reduce the effect on 214 * visual code flow, read the header files in multiple passes, with one of the 215 * following cpp variables defined during each pass: 216 * 217 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 218 * types. 219 * JEMALLOC_H_STRUCTS : Data structures. 220 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 221 * JEMALLOC_H_INLINES : Inline functions. 222 */ 223/******************************************************************************/ 224#define JEMALLOC_H_TYPES 225 226#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 227 228#define ZU(z) ((size_t)z) 229 230#ifndef __DECONST 231# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 232#endif 233 234#ifdef JEMALLOC_DEBUG 235 /* Disable inlining to make debugging easier. */ 236# define JEMALLOC_INLINE 237# define inline 238#else 239# define JEMALLOC_ENABLE_INLINE 240# define JEMALLOC_INLINE static inline 241#endif 242 243/* Size of stack-allocated buffer passed to buferror(). */ 244#define BUFERROR_BUF 64 245 246/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */ 247#ifdef __i386__ 248# define LG_QUANTUM 4 249#endif 250#ifdef __ia64__ 251# define LG_QUANTUM 4 252#endif 253#ifdef __alpha__ 254# define LG_QUANTUM 4 255#endif 256#ifdef __sparc64__ 257# define LG_QUANTUM 4 258#endif 259#if (defined(__amd64__) || defined(__x86_64__)) 260# define LG_QUANTUM 4 261#endif 262#ifdef __arm__ 263# define LG_QUANTUM 3 264#endif 265#ifdef __mips__ 266# define LG_QUANTUM 3 267#endif 268#ifdef __powerpc__ 269# define LG_QUANTUM 4 270#endif 271#ifdef __s390x__ 272# define LG_QUANTUM 4 273#endif 274 275#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 276#define QUANTUM_MASK (QUANTUM - 1) 277 278/* Return the smallest quantum multiple that is >= a. */ 279#define QUANTUM_CEILING(a) \ 280 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 281 282#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 283#define LONG_MASK (LONG - 1) 284 285/* Return the smallest long multiple that is >= a. */ 286#define LONG_CEILING(a) \ 287 (((a) + LONG_MASK) & ~LONG_MASK) 288 289#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 290#define PTR_MASK (SIZEOF_PTR - 1) 291 292/* Return the smallest (void *) multiple that is >= a. */ 293#define PTR_CEILING(a) \ 294 (((a) + PTR_MASK) & ~PTR_MASK) 295 296/* 297 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 298 * In addition, this controls the spacing of cacheline-spaced size classes. 299 */ 300#define LG_CACHELINE 6 301#define CACHELINE ((size_t)(1U << LG_CACHELINE)) 302#define CACHELINE_MASK (CACHELINE - 1) 303 304/* Return the smallest cacheline multiple that is >= s. */ 305#define CACHELINE_CEILING(s) \ 306 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 307 308/* 309 * Page size. STATIC_PAGE_SHIFT is determined by the configure script. If 310 * DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where 311 * compile-time values are required for the purposes of defining data 312 * structures. 313 */ 314#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT)) 315#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1)) 316 317#ifdef PAGE_SHIFT 318# undef PAGE_SHIFT 319#endif 320#ifdef PAGE_SIZE 321# undef PAGE_SIZE 322#endif 323#ifdef PAGE_MASK 324# undef PAGE_MASK 325#endif 326 327#ifdef DYNAMIC_PAGE_SHIFT 328# define PAGE_SHIFT lg_pagesize 329# define PAGE_SIZE pagesize 330# define PAGE_MASK pagesize_mask 331#else 332# define PAGE_SHIFT STATIC_PAGE_SHIFT 333# define PAGE_SIZE STATIC_PAGE_SIZE 334# define PAGE_MASK STATIC_PAGE_MASK 335#endif 336 337/* Return the smallest pagesize multiple that is >= s. */ 338#define PAGE_CEILING(s) \ 339 (((s) + PAGE_MASK) & ~PAGE_MASK) 340 341#include "jemalloc/internal/atomic.h" 342#include "jemalloc/internal/prn.h" 343#include "jemalloc/internal/ckh.h" 344#include "jemalloc/internal/stats.h" 345#include "jemalloc/internal/ctl.h" 346#include "jemalloc/internal/mutex.h" 347#include "jemalloc/internal/mb.h" 348#include "jemalloc/internal/extent.h" 349#include "jemalloc/internal/arena.h" 350#include "jemalloc/internal/bitmap.h" 351#include "jemalloc/internal/base.h" 352#include "jemalloc/internal/chunk.h" 353#include "jemalloc/internal/huge.h" 354#include "jemalloc/internal/rtree.h" 355#include "jemalloc/internal/tcache.h" 356#include "jemalloc/internal/hash.h" 357#ifdef JEMALLOC_ZONE 358#include "jemalloc/internal/zone.h" 359#endif 360#include "jemalloc/internal/prof.h" 361 362#undef JEMALLOC_H_TYPES 363/******************************************************************************/ 364#define JEMALLOC_H_STRUCTS 365 366#include "jemalloc/internal/atomic.h" 367#include "jemalloc/internal/prn.h" 368#include "jemalloc/internal/ckh.h" 369#include "jemalloc/internal/stats.h" 370#include "jemalloc/internal/ctl.h" 371#include "jemalloc/internal/mutex.h" 372#include "jemalloc/internal/mb.h" 373#include "jemalloc/internal/bitmap.h" 374#include "jemalloc/internal/extent.h" 375#include "jemalloc/internal/arena.h" 376#include "jemalloc/internal/base.h" 377#include "jemalloc/internal/chunk.h" 378#include "jemalloc/internal/huge.h" 379#include "jemalloc/internal/rtree.h" 380#include "jemalloc/internal/tcache.h" 381#include "jemalloc/internal/hash.h" 382#ifdef JEMALLOC_ZONE 383#include "jemalloc/internal/zone.h" 384#endif 385#include "jemalloc/internal/prof.h" 386 387typedef struct { 388 uint64_t allocated; 389 uint64_t deallocated; 390} thread_allocated_t; 391 392#undef JEMALLOC_H_STRUCTS 393/******************************************************************************/ 394#define JEMALLOC_H_EXTERNS 395 396extern bool opt_abort; 397extern bool opt_junk; 398extern bool opt_sysv; 399extern bool opt_xmalloc; 400extern bool opt_zero; 401extern size_t opt_narenas; 402 403#ifdef DYNAMIC_PAGE_SHIFT 404extern size_t pagesize; 405extern size_t pagesize_mask; 406extern size_t lg_pagesize; 407#endif 408 409/* Number of CPUs. */ 410extern unsigned ncpus; 411 412extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 413extern pthread_key_t arenas_tsd; 414#ifndef NO_TLS 415/* 416 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 417 * for allocations. 418 */ 419extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec")); 420# define ARENA_GET() arenas_tls 421# define ARENA_SET(v) do { \ 422 arenas_tls = (v); \ 423 pthread_setspecific(arenas_tsd, (void *)(v)); \ 424} while (0) 425#else 426# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd)) 427# define ARENA_SET(v) do { \ 428 pthread_setspecific(arenas_tsd, (void *)(v)); \ 429} while (0) 430#endif 431 432/* 433 * Arenas that are used to service external requests. Not all elements of the 434 * arenas array are necessarily used; arenas are created lazily as needed. 435 */ 436extern arena_t **arenas; 437extern unsigned narenas; 438 439#ifndef NO_TLS 440extern __thread thread_allocated_t thread_allocated_tls; 441# define ALLOCATED_GET() (thread_allocated_tls.allocated) 442# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated) 443# define DEALLOCATED_GET() (thread_allocated_tls.deallocated) 444# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated) 445# define ALLOCATED_ADD(a, d) do { \ 446 thread_allocated_tls.allocated += a; \ 447 thread_allocated_tls.deallocated += d; \ 448} while (0) 449#else 450# define ALLOCATED_GET() (thread_allocated_get()->allocated) 451# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated) 452# define DEALLOCATED_GET() (thread_allocated_get()->deallocated) 453# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated) 454# define ALLOCATED_ADD(a, d) do { \ 455 thread_allocated_t *thread_allocated = thread_allocated_get(); \ 456 thread_allocated->allocated += (a); \ 457 thread_allocated->deallocated += (d); \ 458} while (0) 459#endif 460extern pthread_key_t thread_allocated_tsd; 461thread_allocated_t *thread_allocated_get_hard(void); 462 463arena_t *arenas_extend(unsigned ind); 464arena_t *choose_arena_hard(void); 465int buferror(int errnum, char *buf, size_t buflen); 466void jemalloc_prefork(void); 467void jemalloc_postfork(void); 468 469#include "jemalloc/internal/atomic.h" 470#include "jemalloc/internal/prn.h" 471#include "jemalloc/internal/ckh.h" 472#include "jemalloc/internal/stats.h" 473#include "jemalloc/internal/ctl.h" 474#include "jemalloc/internal/mutex.h" 475#include "jemalloc/internal/mb.h" 476#include "jemalloc/internal/bitmap.h" 477#include "jemalloc/internal/extent.h" 478#include "jemalloc/internal/arena.h" 479#include "jemalloc/internal/base.h" 480#include "jemalloc/internal/chunk.h" 481#include "jemalloc/internal/huge.h" 482#include "jemalloc/internal/rtree.h" 483#include "jemalloc/internal/tcache.h" 484#include "jemalloc/internal/hash.h" 485#ifdef JEMALLOC_ZONE 486#include "jemalloc/internal/zone.h" 487#endif 488#include "jemalloc/internal/prof.h" 489 490#undef JEMALLOC_H_EXTERNS 491/******************************************************************************/ 492#define JEMALLOC_H_INLINES 493 494#include "jemalloc/internal/atomic.h" 495#include "jemalloc/internal/prn.h" 496#include "jemalloc/internal/ckh.h" 497#include "jemalloc/internal/stats.h" 498#include "jemalloc/internal/ctl.h" 499#include "jemalloc/internal/mutex.h" 500#include "jemalloc/internal/mb.h" 501#include "jemalloc/internal/extent.h" 502#include "jemalloc/internal/base.h" 503#include "jemalloc/internal/chunk.h" 504#include "jemalloc/internal/huge.h" 505 506#ifndef JEMALLOC_ENABLE_INLINE 507size_t pow2_ceil(size_t x); 508size_t s2u(size_t size); 509size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); 510void malloc_write(const char *s); 511arena_t *choose_arena(void); 512thread_allocated_t *thread_allocated_get(void); 513#endif 514 515#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 516/* Compute the smallest power of 2 that is >= x. */ 517JEMALLOC_INLINE size_t 518pow2_ceil(size_t x) 519{ 520 521 x--; 522 x |= x >> 1; 523 x |= x >> 2; 524 x |= x >> 4; 525 x |= x >> 8; 526 x |= x >> 16; 527#if (LG_SIZEOF_PTR == 3) 528 x |= x >> 32; 529#endif 530 x++; 531 return (x); 532} 533 534/* 535 * Compute usable size that would result from allocating an object with the 536 * specified size. 537 */ 538JEMALLOC_INLINE size_t 539s2u(size_t size) 540{ 541 542 if (size <= small_maxclass) 543 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 544 if (size <= arena_maxclass) 545 return (PAGE_CEILING(size)); 546 return (CHUNK_CEILING(size)); 547} 548 549/* 550 * Compute usable size that would result from allocating an object with the 551 * specified size and alignment. 552 */ 553JEMALLOC_INLINE size_t 554sa2u(size_t size, size_t alignment, size_t *run_size_p) 555{ 556 size_t usize; 557 558 /* 559 * Round size up to the nearest multiple of alignment. 560 * 561 * This done, we can take advantage of the fact that for each small 562 * size class, every object is aligned at the smallest power of two 563 * that is non-zero in the base two representation of the size. For 564 * example: 565 * 566 * Size | Base 2 | Minimum alignment 567 * -----+----------+------------------ 568 * 96 | 1100000 | 32 569 * 144 | 10100000 | 32 570 * 192 | 11000000 | 64 571 * 572 * Depending on runtime settings, it is possible that arena_malloc() 573 * will further round up to a power of two, but that never causes 574 * correctness issues. 575 */ 576 usize = (size + (alignment - 1)) & (-alignment); 577 /* 578 * (usize < size) protects against the combination of maximal 579 * alignment and size greater than maximal alignment. 580 */ 581 if (usize < size) { 582 /* size_t overflow. */ 583 return (0); 584 } 585 586 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) { 587 if (usize <= small_maxclass) 588 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 589 return (PAGE_CEILING(usize)); 590 } else { 591 size_t run_size; 592 593 /* 594 * We can't achieve subpage alignment, so round up alignment 595 * permanently; it makes later calculations simpler. 596 */ 597 alignment = PAGE_CEILING(alignment); 598 usize = PAGE_CEILING(size); 599 /* 600 * (usize < size) protects against very large sizes within 601 * PAGE_SIZE of SIZE_T_MAX. 602 * 603 * (usize + alignment < usize) protects against the 604 * combination of maximal alignment and usize large enough 605 * to cause overflow. This is similar to the first overflow 606 * check above, but it needs to be repeated due to the new 607 * usize value, which may now be *equal* to maximal 608 * alignment, whereas before we only detected overflow if the 609 * original size was *greater* than maximal alignment. 610 */ 611 if (usize < size || usize + alignment < usize) { 612 /* size_t overflow. */ 613 return (0); 614 } 615 616 /* 617 * Calculate the size of the over-size run that arena_palloc() 618 * would need to allocate in order to guarantee the alignment. 619 */ 620 if (usize >= alignment) 621 run_size = usize + alignment - PAGE_SIZE; 622 else { 623 /* 624 * It is possible that (alignment << 1) will cause 625 * overflow, but it doesn't matter because we also 626 * subtract PAGE_SIZE, which in the case of overflow 627 * leaves us with a very large run_size. That causes 628 * the first conditional below to fail, which means 629 * that the bogus run_size value never gets used for 630 * anything important. 631 */ 632 run_size = (alignment << 1) - PAGE_SIZE; 633 } 634 if (run_size_p != NULL) 635 *run_size_p = run_size; 636 637 if (run_size <= arena_maxclass) 638 return (PAGE_CEILING(usize)); 639 return (CHUNK_CEILING(usize)); 640 } 641} 642 643/* 644 * Wrapper around malloc_message() that avoids the need for 645 * JEMALLOC_P(malloc_message)(...) throughout the code. 646 */ 647JEMALLOC_INLINE void 648malloc_write(const char *s) 649{ 650 651 JEMALLOC_P(malloc_message)(NULL, s); 652} 653 654/* 655 * Choose an arena based on a per-thread value (fast-path code, calls slow-path 656 * code if necessary). 657 */ 658JEMALLOC_INLINE arena_t * 659choose_arena(void) 660{ 661 arena_t *ret; 662 663 ret = ARENA_GET(); 664 if (ret == NULL) { 665 ret = choose_arena_hard(); 666 assert(ret != NULL); 667 } 668 669 return (ret); 670} 671 672JEMALLOC_INLINE thread_allocated_t * 673thread_allocated_get(void) 674{ 675 thread_allocated_t *thread_allocated = (thread_allocated_t *) 676 pthread_getspecific(thread_allocated_tsd); 677 678 if (thread_allocated == NULL) 679 return (thread_allocated_get_hard()); 680 return (thread_allocated); 681} 682#endif 683 684#include "jemalloc/internal/bitmap.h" 685#include "jemalloc/internal/rtree.h" 686#include "jemalloc/internal/tcache.h" 687#include "jemalloc/internal/arena.h" 688#include "jemalloc/internal/hash.h" 689#ifdef JEMALLOC_ZONE 690#include "jemalloc/internal/zone.h" 691#endif 692 693#ifndef JEMALLOC_ENABLE_INLINE 694void *imalloc(size_t size); 695void *icalloc(size_t size); 696void *ipalloc(size_t usize, size_t alignment, bool zero); 697size_t isalloc(const void *ptr); 698size_t ivsalloc(const void *ptr); 699void idalloc(void *ptr); 700void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 701 bool zero, bool no_move); 702#endif 703 704#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 705JEMALLOC_INLINE void * 706imalloc(size_t size) 707{ 708 709 assert(size != 0); 710 711 if (size <= arena_maxclass) 712 return (arena_malloc(size, false)); 713 else 714 return (huge_malloc(size, false)); 715} 716 717JEMALLOC_INLINE void * 718icalloc(size_t size) 719{ 720 721 if (size <= arena_maxclass) 722 return (arena_malloc(size, true)); 723 else 724 return (huge_malloc(size, true)); 725} 726 727JEMALLOC_INLINE void * 728ipalloc(size_t usize, size_t alignment, bool zero) 729{ 730 void *ret; 731 732 assert(usize != 0); 733 assert(usize == sa2u(usize, alignment, NULL)); 734 735 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) 736 ret = arena_malloc(usize, zero); 737 else { 738 size_t run_size 739#ifdef JEMALLOC_CC_SILENCE 740 = 0 741#endif 742 ; 743 744 /* 745 * Ideally we would only ever call sa2u() once per aligned 746 * allocation request, and the caller of this function has 747 * already done so once. However, it's rather burdensome to 748 * require every caller to pass in run_size, especially given 749 * that it's only relevant to large allocations. Therefore, 750 * just call it again here in order to get run_size. 751 */ 752 sa2u(usize, alignment, &run_size); 753 if (run_size <= arena_maxclass) { 754 ret = arena_palloc(choose_arena(), usize, run_size, 755 alignment, zero); 756 } else if (alignment <= chunksize) 757 ret = huge_malloc(usize, zero); 758 else 759 ret = huge_palloc(usize, alignment, zero); 760 } 761 762 assert(((uintptr_t)ret & (alignment - 1)) == 0); 763 return (ret); 764} 765 766JEMALLOC_INLINE size_t 767isalloc(const void *ptr) 768{ 769 size_t ret; 770 arena_chunk_t *chunk; 771 772 assert(ptr != NULL); 773 774 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 775 if (chunk != ptr) { 776 /* Region. */ 777 assert(chunk->arena->magic == ARENA_MAGIC); 778 779 if (config_prof) 780 ret = arena_salloc_demote(ptr); 781 else 782 ret = arena_salloc(ptr); 783 } else 784 ret = huge_salloc(ptr); 785 786 return (ret); 787} 788 789JEMALLOC_INLINE size_t 790ivsalloc(const void *ptr) 791{ 792 793 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 794 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 795 return (0); 796 797 return (isalloc(ptr)); 798} 799 800JEMALLOC_INLINE void 801idalloc(void *ptr) 802{ 803 arena_chunk_t *chunk; 804 805 assert(ptr != NULL); 806 807 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 808 if (chunk != ptr) 809 arena_dalloc(chunk->arena, chunk, ptr); 810 else 811 huge_dalloc(ptr, true); 812} 813 814JEMALLOC_INLINE void * 815iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 816 bool no_move) 817{ 818 void *ret; 819 size_t oldsize; 820 821 assert(ptr != NULL); 822 assert(size != 0); 823 824 oldsize = isalloc(ptr); 825 826 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 827 != 0) { 828 size_t usize, copysize; 829 830 /* 831 * Existing object alignment is inadquate; allocate new space 832 * and copy. 833 */ 834 if (no_move) 835 return (NULL); 836 usize = sa2u(size + extra, alignment, NULL); 837 if (usize == 0) 838 return (NULL); 839 ret = ipalloc(usize, alignment, zero); 840 if (ret == NULL) { 841 if (extra == 0) 842 return (NULL); 843 /* Try again, without extra this time. */ 844 usize = sa2u(size, alignment, NULL); 845 if (usize == 0) 846 return (NULL); 847 ret = ipalloc(usize, alignment, zero); 848 if (ret == NULL) 849 return (NULL); 850 } 851 /* 852 * Copy at most size bytes (not size+extra), since the caller 853 * has no expectation that the extra bytes will be reliably 854 * preserved. 855 */ 856 copysize = (size < oldsize) ? size : oldsize; 857 memcpy(ret, ptr, copysize); 858 idalloc(ptr); 859 return (ret); 860 } 861 862 if (no_move) { 863 if (size <= arena_maxclass) { 864 return (arena_ralloc_no_move(ptr, oldsize, size, 865 extra, zero)); 866 } else { 867 return (huge_ralloc_no_move(ptr, oldsize, size, 868 extra)); 869 } 870 } else { 871 if (size + extra <= arena_maxclass) { 872 return (arena_ralloc(ptr, oldsize, size, extra, 873 alignment, zero)); 874 } else { 875 return (huge_ralloc(ptr, oldsize, size, extra, 876 alignment, zero)); 877 } 878 } 879} 880#endif 881 882#include "jemalloc/internal/prof.h" 883 884#undef JEMALLOC_H_INLINES 885/******************************************************************************/ 886