jemalloc_internal.h.in revision 4bb09830133ffa8b27a95bc3727558007722c152
1#include <sys/mman.h> 2#include <sys/param.h> 3#include <sys/time.h> 4#include <sys/types.h> 5#include <sys/sysctl.h> 6#include <sys/uio.h> 7 8#include <errno.h> 9#include <limits.h> 10#ifndef SIZE_T_MAX 11# define SIZE_T_MAX SIZE_MAX 12#endif 13#include <pthread.h> 14#include <sched.h> 15#include <stdarg.h> 16#include <stdbool.h> 17#include <stdio.h> 18#include <stdlib.h> 19#include <stdint.h> 20#include <stddef.h> 21#ifndef offsetof 22# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 23#endif 24#include <inttypes.h> 25#include <string.h> 26#include <strings.h> 27#include <ctype.h> 28#include <unistd.h> 29#include <fcntl.h> 30#include <pthread.h> 31#include <math.h> 32 33#define JEMALLOC_MANGLE 34#include "../jemalloc@install_suffix@.h" 35 36#include "jemalloc/internal/private_namespace.h" 37 38#ifdef JEMALLOC_CC_SILENCE 39#define UNUSED JEMALLOC_ATTR(unused) 40#else 41#define UNUSED 42#endif 43 44static const bool config_debug = 45#ifdef JEMALLOC_DEBUG 46 true 47#else 48 false 49#endif 50 ; 51static const bool config_dss = 52#ifdef JEMALLOC_DSS 53 true 54#else 55 false 56#endif 57 ; 58static const bool config_dynamic_page_shift = 59#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT 60 true 61#else 62 false 63#endif 64 ; 65static const bool config_fill = 66#ifdef JEMALLOC_FILL 67 true 68#else 69 false 70#endif 71 ; 72static const bool config_lazy_lock = 73#ifdef JEMALLOC_LAZY_LOCK 74 true 75#else 76 false 77#endif 78 ; 79static const bool config_prof = 80#ifdef JEMALLOC_PROF 81 true 82#else 83 false 84#endif 85 ; 86static const bool config_prof_libgcc = 87#ifdef JEMALLOC_PROF_LIBGCC 88 true 89#else 90 false 91#endif 92 ; 93static const bool config_prof_libunwind = 94#ifdef JEMALLOC_PROF_LIBUNWIND 95 true 96#else 97 false 98#endif 99 ; 100static const bool config_stats = 101#ifdef JEMALLOC_STATS 102 true 103#else 104 false 105#endif 106 ; 107static const bool config_tcache = 108#ifdef JEMALLOC_TCACHE 109 true 110#else 111 false 112#endif 113 ; 114static const bool config_tls = 115#ifdef JEMALLOC_TLS 116 true 117#else 118 false 119#endif 120 ; 121static const bool config_xmalloc = 122#ifdef JEMALLOC_XMALLOC 123 true 124#else 125 false 126#endif 127 ; 128static const bool config_ivsalloc = 129#ifdef JEMALLOC_IVSALLOC 130 true 131#else 132 false 133#endif 134 ; 135 136#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 137#include <libkern/OSAtomic.h> 138#endif 139 140#ifdef JEMALLOC_ZONE 141#include <mach/mach_error.h> 142#include <mach/mach_init.h> 143#include <mach/vm_map.h> 144#include <malloc/malloc.h> 145#endif 146 147#define RB_COMPACT 148#include "jemalloc/internal/rb.h" 149#include "jemalloc/internal/qr.h" 150#include "jemalloc/internal/ql.h" 151 152extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s); 153 154/* 155 * Define a custom assert() in order to reduce the chances of deadlock during 156 * assertion failure. 157 */ 158#ifndef assert 159# ifdef JEMALLOC_DEBUG 160# define assert(e) do { \ 161 if (!(e)) { \ 162 char line_buf[UMAX2S_BUFSIZE]; \ 163 malloc_write("<jemalloc>: "); \ 164 malloc_write(__FILE__); \ 165 malloc_write(":"); \ 166 malloc_write(u2s(__LINE__, 10, line_buf)); \ 167 malloc_write(": Failed assertion: "); \ 168 malloc_write("\""); \ 169 malloc_write(#e); \ 170 malloc_write("\"\n"); \ 171 abort(); \ 172 } \ 173} while (0) 174# else 175# define assert(e) 176# endif 177#endif 178 179/* Use to assert a particular configuration, e.g., cassert(config_debug). */ 180#define cassert(c) do { \ 181 if ((c) == false) \ 182 assert(false); \ 183} while (0) 184 185/* 186 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 187 * but there are circular dependencies that cannot be broken without 188 * substantial performance degradation. In order to reduce the effect on 189 * visual code flow, read the header files in multiple passes, with one of the 190 * following cpp variables defined during each pass: 191 * 192 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 193 * types. 194 * JEMALLOC_H_STRUCTS : Data structures. 195 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 196 * JEMALLOC_H_INLINES : Inline functions. 197 */ 198/******************************************************************************/ 199#define JEMALLOC_H_TYPES 200 201#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 202 203#define ZU(z) ((size_t)z) 204 205#ifndef __DECONST 206# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 207#endif 208 209#ifdef JEMALLOC_DEBUG 210 /* Disable inlining to make debugging easier. */ 211# define JEMALLOC_INLINE 212# define inline 213#else 214# define JEMALLOC_ENABLE_INLINE 215# define JEMALLOC_INLINE static inline 216#endif 217 218/* Size of stack-allocated buffer passed to buferror(). */ 219#define BUFERROR_BUF 64 220 221/* Smallest size class to support. */ 222#define LG_TINY_MIN 3 223#define TINY_MIN (1U << LG_TINY_MIN) 224 225/* 226 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 227 * classes). 228 */ 229#ifndef LG_QUANTUM 230# ifdef __i386__ 231# define LG_QUANTUM 4 232# endif 233# ifdef __ia64__ 234# define LG_QUANTUM 4 235# endif 236# ifdef __alpha__ 237# define LG_QUANTUM 4 238# endif 239# ifdef __sparc64__ 240# define LG_QUANTUM 4 241# endif 242# if (defined(__amd64__) || defined(__x86_64__)) 243# define LG_QUANTUM 4 244# endif 245# ifdef __arm__ 246# define LG_QUANTUM 3 247# endif 248# ifdef __mips__ 249# define LG_QUANTUM 3 250# endif 251# ifdef __powerpc__ 252# define LG_QUANTUM 4 253# endif 254# ifdef __s390x__ 255# define LG_QUANTUM 4 256# endif 257# ifdef __tile__ 258# define LG_QUANTUM 4 259# endif 260# ifndef LG_QUANTUM 261# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 262# endif 263#endif 264 265#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 266#define QUANTUM_MASK (QUANTUM - 1) 267 268/* Return the smallest quantum multiple that is >= a. */ 269#define QUANTUM_CEILING(a) \ 270 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 271 272#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 273#define LONG_MASK (LONG - 1) 274 275/* Return the smallest long multiple that is >= a. */ 276#define LONG_CEILING(a) \ 277 (((a) + LONG_MASK) & ~LONG_MASK) 278 279#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 280#define PTR_MASK (SIZEOF_PTR - 1) 281 282/* Return the smallest (void *) multiple that is >= a. */ 283#define PTR_CEILING(a) \ 284 (((a) + PTR_MASK) & ~PTR_MASK) 285 286/* 287 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 288 * In addition, this controls the spacing of cacheline-spaced size classes. 289 */ 290#define LG_CACHELINE 6 291#define CACHELINE ((size_t)(1U << LG_CACHELINE)) 292#define CACHELINE_MASK (CACHELINE - 1) 293 294/* Return the smallest cacheline multiple that is >= s. */ 295#define CACHELINE_CEILING(s) \ 296 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 297 298/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 299#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT)) 300#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1)) 301#ifdef PAGE_SHIFT 302# undef PAGE_SHIFT 303#endif 304#ifdef PAGE_SIZE 305# undef PAGE_SIZE 306#endif 307#ifdef PAGE_MASK 308# undef PAGE_MASK 309#endif 310#define PAGE_SHIFT STATIC_PAGE_SHIFT 311#define PAGE_SIZE STATIC_PAGE_SIZE 312#define PAGE_MASK STATIC_PAGE_MASK 313 314/* Return the smallest pagesize multiple that is >= s. */ 315#define PAGE_CEILING(s) \ 316 (((s) + PAGE_MASK) & ~PAGE_MASK) 317 318#include "jemalloc/internal/atomic.h" 319#include "jemalloc/internal/prn.h" 320#include "jemalloc/internal/ckh.h" 321#include "jemalloc/internal/size_classes.h" 322#include "jemalloc/internal/stats.h" 323#include "jemalloc/internal/ctl.h" 324#include "jemalloc/internal/mutex.h" 325#include "jemalloc/internal/mb.h" 326#include "jemalloc/internal/extent.h" 327#include "jemalloc/internal/arena.h" 328#include "jemalloc/internal/bitmap.h" 329#include "jemalloc/internal/base.h" 330#include "jemalloc/internal/chunk.h" 331#include "jemalloc/internal/huge.h" 332#include "jemalloc/internal/rtree.h" 333#include "jemalloc/internal/tcache.h" 334#include "jemalloc/internal/hash.h" 335#ifdef JEMALLOC_ZONE 336#include "jemalloc/internal/zone.h" 337#endif 338#include "jemalloc/internal/prof.h" 339 340#undef JEMALLOC_H_TYPES 341/******************************************************************************/ 342#define JEMALLOC_H_STRUCTS 343 344#include "jemalloc/internal/atomic.h" 345#include "jemalloc/internal/prn.h" 346#include "jemalloc/internal/ckh.h" 347#include "jemalloc/internal/size_classes.h" 348#include "jemalloc/internal/stats.h" 349#include "jemalloc/internal/ctl.h" 350#include "jemalloc/internal/mutex.h" 351#include "jemalloc/internal/mb.h" 352#include "jemalloc/internal/bitmap.h" 353#include "jemalloc/internal/extent.h" 354#include "jemalloc/internal/arena.h" 355#include "jemalloc/internal/base.h" 356#include "jemalloc/internal/chunk.h" 357#include "jemalloc/internal/huge.h" 358#include "jemalloc/internal/rtree.h" 359#include "jemalloc/internal/tcache.h" 360#include "jemalloc/internal/hash.h" 361#ifdef JEMALLOC_ZONE 362#include "jemalloc/internal/zone.h" 363#endif 364#include "jemalloc/internal/prof.h" 365 366typedef struct { 367 uint64_t allocated; 368 uint64_t deallocated; 369} thread_allocated_t; 370 371#undef JEMALLOC_H_STRUCTS 372/******************************************************************************/ 373#define JEMALLOC_H_EXTERNS 374 375extern bool opt_abort; 376extern bool opt_junk; 377extern bool opt_xmalloc; 378extern bool opt_zero; 379extern size_t opt_narenas; 380 381#ifdef DYNAMIC_PAGE_SHIFT 382extern size_t pagesize; 383extern size_t pagesize_mask; 384extern size_t lg_pagesize; 385#endif 386 387/* Number of CPUs. */ 388extern unsigned ncpus; 389 390extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 391extern pthread_key_t arenas_tsd; 392#ifndef NO_TLS 393/* 394 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 395 * for allocations. 396 */ 397extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec")); 398# define ARENA_GET() arenas_tls 399# define ARENA_SET(v) do { \ 400 arenas_tls = (v); \ 401 pthread_setspecific(arenas_tsd, (void *)(v)); \ 402} while (0) 403#else 404# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd)) 405# define ARENA_SET(v) do { \ 406 pthread_setspecific(arenas_tsd, (void *)(v)); \ 407} while (0) 408#endif 409 410/* 411 * Arenas that are used to service external requests. Not all elements of the 412 * arenas array are necessarily used; arenas are created lazily as needed. 413 */ 414extern arena_t **arenas; 415extern unsigned narenas; 416 417#ifndef NO_TLS 418extern __thread thread_allocated_t thread_allocated_tls; 419# define ALLOCATED_GET() (thread_allocated_tls.allocated) 420# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated) 421# define DEALLOCATED_GET() (thread_allocated_tls.deallocated) 422# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated) 423# define ALLOCATED_ADD(a, d) do { \ 424 thread_allocated_tls.allocated += a; \ 425 thread_allocated_tls.deallocated += d; \ 426} while (0) 427#else 428# define ALLOCATED_GET() (thread_allocated_get()->allocated) 429# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated) 430# define DEALLOCATED_GET() (thread_allocated_get()->deallocated) 431# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated) 432# define ALLOCATED_ADD(a, d) do { \ 433 thread_allocated_t *thread_allocated = thread_allocated_get(); \ 434 thread_allocated->allocated += (a); \ 435 thread_allocated->deallocated += (d); \ 436} while (0) 437#endif 438extern pthread_key_t thread_allocated_tsd; 439thread_allocated_t *thread_allocated_get_hard(void); 440 441arena_t *arenas_extend(unsigned ind); 442arena_t *choose_arena_hard(void); 443int buferror(int errnum, char *buf, size_t buflen); 444void jemalloc_prefork(void); 445void jemalloc_postfork(void); 446 447#include "jemalloc/internal/atomic.h" 448#include "jemalloc/internal/prn.h" 449#include "jemalloc/internal/ckh.h" 450#include "jemalloc/internal/size_classes.h" 451#include "jemalloc/internal/stats.h" 452#include "jemalloc/internal/ctl.h" 453#include "jemalloc/internal/mutex.h" 454#include "jemalloc/internal/mb.h" 455#include "jemalloc/internal/bitmap.h" 456#include "jemalloc/internal/extent.h" 457#include "jemalloc/internal/arena.h" 458#include "jemalloc/internal/base.h" 459#include "jemalloc/internal/chunk.h" 460#include "jemalloc/internal/huge.h" 461#include "jemalloc/internal/rtree.h" 462#include "jemalloc/internal/tcache.h" 463#include "jemalloc/internal/hash.h" 464#ifdef JEMALLOC_ZONE 465#include "jemalloc/internal/zone.h" 466#endif 467#include "jemalloc/internal/prof.h" 468 469#undef JEMALLOC_H_EXTERNS 470/******************************************************************************/ 471#define JEMALLOC_H_INLINES 472 473#include "jemalloc/internal/atomic.h" 474#include "jemalloc/internal/prn.h" 475#include "jemalloc/internal/ckh.h" 476#include "jemalloc/internal/size_classes.h" 477#include "jemalloc/internal/stats.h" 478#include "jemalloc/internal/ctl.h" 479#include "jemalloc/internal/mutex.h" 480#include "jemalloc/internal/mb.h" 481#include "jemalloc/internal/extent.h" 482#include "jemalloc/internal/base.h" 483#include "jemalloc/internal/chunk.h" 484#include "jemalloc/internal/huge.h" 485 486#ifndef JEMALLOC_ENABLE_INLINE 487size_t pow2_ceil(size_t x); 488size_t s2u(size_t size); 489size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); 490void malloc_write(const char *s); 491arena_t *choose_arena(void); 492thread_allocated_t *thread_allocated_get(void); 493#endif 494 495#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 496/* Compute the smallest power of 2 that is >= x. */ 497JEMALLOC_INLINE size_t 498pow2_ceil(size_t x) 499{ 500 501 x--; 502 x |= x >> 1; 503 x |= x >> 2; 504 x |= x >> 4; 505 x |= x >> 8; 506 x |= x >> 16; 507#if (LG_SIZEOF_PTR == 3) 508 x |= x >> 32; 509#endif 510 x++; 511 return (x); 512} 513 514/* 515 * Compute usable size that would result from allocating an object with the 516 * specified size. 517 */ 518JEMALLOC_INLINE size_t 519s2u(size_t size) 520{ 521 522 if (size <= SMALL_MAXCLASS) 523 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 524 if (size <= arena_maxclass) 525 return (PAGE_CEILING(size)); 526 return (CHUNK_CEILING(size)); 527} 528 529/* 530 * Compute usable size that would result from allocating an object with the 531 * specified size and alignment. 532 */ 533JEMALLOC_INLINE size_t 534sa2u(size_t size, size_t alignment, size_t *run_size_p) 535{ 536 size_t usize; 537 538 /* 539 * Round size up to the nearest multiple of alignment. 540 * 541 * This done, we can take advantage of the fact that for each small 542 * size class, every object is aligned at the smallest power of two 543 * that is non-zero in the base two representation of the size. For 544 * example: 545 * 546 * Size | Base 2 | Minimum alignment 547 * -----+----------+------------------ 548 * 96 | 1100000 | 32 549 * 144 | 10100000 | 32 550 * 192 | 11000000 | 64 551 * 552 * Depending on runtime settings, it is possible that arena_malloc() 553 * will further round up to a power of two, but that never causes 554 * correctness issues. 555 */ 556 usize = (size + (alignment - 1)) & (-alignment); 557 /* 558 * (usize < size) protects against the combination of maximal 559 * alignment and size greater than maximal alignment. 560 */ 561 if (usize < size) { 562 /* size_t overflow. */ 563 return (0); 564 } 565 566 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) { 567 if (usize <= SMALL_MAXCLASS) 568 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 569 return (PAGE_CEILING(usize)); 570 } else { 571 size_t run_size; 572 573 /* 574 * We can't achieve subpage alignment, so round up alignment 575 * permanently; it makes later calculations simpler. 576 */ 577 alignment = PAGE_CEILING(alignment); 578 usize = PAGE_CEILING(size); 579 /* 580 * (usize < size) protects against very large sizes within 581 * PAGE_SIZE of SIZE_T_MAX. 582 * 583 * (usize + alignment < usize) protects against the 584 * combination of maximal alignment and usize large enough 585 * to cause overflow. This is similar to the first overflow 586 * check above, but it needs to be repeated due to the new 587 * usize value, which may now be *equal* to maximal 588 * alignment, whereas before we only detected overflow if the 589 * original size was *greater* than maximal alignment. 590 */ 591 if (usize < size || usize + alignment < usize) { 592 /* size_t overflow. */ 593 return (0); 594 } 595 596 /* 597 * Calculate the size of the over-size run that arena_palloc() 598 * would need to allocate in order to guarantee the alignment. 599 */ 600 if (usize >= alignment) 601 run_size = usize + alignment - PAGE_SIZE; 602 else { 603 /* 604 * It is possible that (alignment << 1) will cause 605 * overflow, but it doesn't matter because we also 606 * subtract PAGE_SIZE, which in the case of overflow 607 * leaves us with a very large run_size. That causes 608 * the first conditional below to fail, which means 609 * that the bogus run_size value never gets used for 610 * anything important. 611 */ 612 run_size = (alignment << 1) - PAGE_SIZE; 613 } 614 if (run_size_p != NULL) 615 *run_size_p = run_size; 616 617 if (run_size <= arena_maxclass) 618 return (PAGE_CEILING(usize)); 619 return (CHUNK_CEILING(usize)); 620 } 621} 622 623/* 624 * Wrapper around malloc_message() that avoids the need for 625 * JEMALLOC_P(malloc_message)(...) throughout the code. 626 */ 627JEMALLOC_INLINE void 628malloc_write(const char *s) 629{ 630 631 JEMALLOC_P(malloc_message)(NULL, s); 632} 633 634/* 635 * Choose an arena based on a per-thread value (fast-path code, calls slow-path 636 * code if necessary). 637 */ 638JEMALLOC_INLINE arena_t * 639choose_arena(void) 640{ 641 arena_t *ret; 642 643 ret = ARENA_GET(); 644 if (ret == NULL) { 645 ret = choose_arena_hard(); 646 assert(ret != NULL); 647 } 648 649 return (ret); 650} 651 652JEMALLOC_INLINE thread_allocated_t * 653thread_allocated_get(void) 654{ 655 thread_allocated_t *thread_allocated = (thread_allocated_t *) 656 pthread_getspecific(thread_allocated_tsd); 657 658 if (thread_allocated == NULL) 659 return (thread_allocated_get_hard()); 660 return (thread_allocated); 661} 662#endif 663 664#include "jemalloc/internal/bitmap.h" 665#include "jemalloc/internal/rtree.h" 666#include "jemalloc/internal/tcache.h" 667#include "jemalloc/internal/arena.h" 668#include "jemalloc/internal/hash.h" 669#ifdef JEMALLOC_ZONE 670#include "jemalloc/internal/zone.h" 671#endif 672 673#ifndef JEMALLOC_ENABLE_INLINE 674void *imalloc(size_t size); 675void *icalloc(size_t size); 676void *ipalloc(size_t usize, size_t alignment, bool zero); 677size_t isalloc(const void *ptr); 678size_t ivsalloc(const void *ptr); 679void idalloc(void *ptr); 680void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 681 bool zero, bool no_move); 682#endif 683 684#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 685JEMALLOC_INLINE void * 686imalloc(size_t size) 687{ 688 689 assert(size != 0); 690 691 if (size <= arena_maxclass) 692 return (arena_malloc(size, false)); 693 else 694 return (huge_malloc(size, false)); 695} 696 697JEMALLOC_INLINE void * 698icalloc(size_t size) 699{ 700 701 if (size <= arena_maxclass) 702 return (arena_malloc(size, true)); 703 else 704 return (huge_malloc(size, true)); 705} 706 707JEMALLOC_INLINE void * 708ipalloc(size_t usize, size_t alignment, bool zero) 709{ 710 void *ret; 711 712 assert(usize != 0); 713 assert(usize == sa2u(usize, alignment, NULL)); 714 715 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) 716 ret = arena_malloc(usize, zero); 717 else { 718 size_t run_size 719#ifdef JEMALLOC_CC_SILENCE 720 = 0 721#endif 722 ; 723 724 /* 725 * Ideally we would only ever call sa2u() once per aligned 726 * allocation request, and the caller of this function has 727 * already done so once. However, it's rather burdensome to 728 * require every caller to pass in run_size, especially given 729 * that it's only relevant to large allocations. Therefore, 730 * just call it again here in order to get run_size. 731 */ 732 sa2u(usize, alignment, &run_size); 733 if (run_size <= arena_maxclass) { 734 ret = arena_palloc(choose_arena(), usize, run_size, 735 alignment, zero); 736 } else if (alignment <= chunksize) 737 ret = huge_malloc(usize, zero); 738 else 739 ret = huge_palloc(usize, alignment, zero); 740 } 741 742 assert(((uintptr_t)ret & (alignment - 1)) == 0); 743 return (ret); 744} 745 746JEMALLOC_INLINE size_t 747isalloc(const void *ptr) 748{ 749 size_t ret; 750 arena_chunk_t *chunk; 751 752 assert(ptr != NULL); 753 754 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 755 if (chunk != ptr) { 756 /* Region. */ 757 if (config_prof) 758 ret = arena_salloc_demote(ptr); 759 else 760 ret = arena_salloc(ptr); 761 } else 762 ret = huge_salloc(ptr); 763 764 return (ret); 765} 766 767JEMALLOC_INLINE size_t 768ivsalloc(const void *ptr) 769{ 770 771 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 772 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 773 return (0); 774 775 return (isalloc(ptr)); 776} 777 778JEMALLOC_INLINE void 779idalloc(void *ptr) 780{ 781 arena_chunk_t *chunk; 782 783 assert(ptr != NULL); 784 785 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 786 if (chunk != ptr) 787 arena_dalloc(chunk->arena, chunk, ptr); 788 else 789 huge_dalloc(ptr, true); 790} 791 792JEMALLOC_INLINE void * 793iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 794 bool no_move) 795{ 796 void *ret; 797 size_t oldsize; 798 799 assert(ptr != NULL); 800 assert(size != 0); 801 802 oldsize = isalloc(ptr); 803 804 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 805 != 0) { 806 size_t usize, copysize; 807 808 /* 809 * Existing object alignment is inadquate; allocate new space 810 * and copy. 811 */ 812 if (no_move) 813 return (NULL); 814 usize = sa2u(size + extra, alignment, NULL); 815 if (usize == 0) 816 return (NULL); 817 ret = ipalloc(usize, alignment, zero); 818 if (ret == NULL) { 819 if (extra == 0) 820 return (NULL); 821 /* Try again, without extra this time. */ 822 usize = sa2u(size, alignment, NULL); 823 if (usize == 0) 824 return (NULL); 825 ret = ipalloc(usize, alignment, zero); 826 if (ret == NULL) 827 return (NULL); 828 } 829 /* 830 * Copy at most size bytes (not size+extra), since the caller 831 * has no expectation that the extra bytes will be reliably 832 * preserved. 833 */ 834 copysize = (size < oldsize) ? size : oldsize; 835 memcpy(ret, ptr, copysize); 836 idalloc(ptr); 837 return (ret); 838 } 839 840 if (no_move) { 841 if (size <= arena_maxclass) { 842 return (arena_ralloc_no_move(ptr, oldsize, size, 843 extra, zero)); 844 } else { 845 return (huge_ralloc_no_move(ptr, oldsize, size, 846 extra)); 847 } 848 } else { 849 if (size + extra <= arena_maxclass) { 850 return (arena_ralloc(ptr, oldsize, size, extra, 851 alignment, zero)); 852 } else { 853 return (huge_ralloc(ptr, oldsize, size, extra, 854 alignment, zero)); 855 } 856 } 857} 858#endif 859 860#include "jemalloc/internal/prof.h" 861 862#undef JEMALLOC_H_INLINES 863/******************************************************************************/ 864