jemalloc_internal.h.in revision 3492daf1ce6fb85040d28dfd4dcb51cbf6b0da51
1#include <sys/mman.h> 2#include <sys/param.h> 3#include <sys/time.h> 4#include <sys/types.h> 5#include <sys/sysctl.h> 6#include <sys/uio.h> 7 8#include <errno.h> 9#include <limits.h> 10#ifndef SIZE_T_MAX 11# define SIZE_T_MAX SIZE_MAX 12#endif 13#include <pthread.h> 14#include <sched.h> 15#include <stdarg.h> 16#include <stdbool.h> 17#include <stdio.h> 18#include <stdlib.h> 19#include <stdint.h> 20#include <stddef.h> 21#ifndef offsetof 22# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 23#endif 24#include <inttypes.h> 25#include <string.h> 26#include <strings.h> 27#include <ctype.h> 28#include <unistd.h> 29#include <fcntl.h> 30#include <pthread.h> 31#include <math.h> 32 33#define JEMALLOC_NO_DEMANGLE 34#include "../jemalloc@install_suffix@.h" 35 36#include "jemalloc/internal/private_namespace.h" 37 38#ifdef JEMALLOC_CC_SILENCE 39#define UNUSED JEMALLOC_ATTR(unused) 40#else 41#define UNUSED 42#endif 43 44static const bool config_debug = 45#ifdef JEMALLOC_DEBUG 46 true 47#else 48 false 49#endif 50 ; 51static const bool config_dss = 52#ifdef JEMALLOC_DSS 53 true 54#else 55 false 56#endif 57 ; 58static const bool config_dynamic_page_shift = 59#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT 60 true 61#else 62 false 63#endif 64 ; 65static const bool config_fill = 66#ifdef JEMALLOC_FILL 67 true 68#else 69 false 70#endif 71 ; 72static const bool config_lazy_lock = 73#ifdef JEMALLOC_LAZY_LOCK 74 true 75#else 76 false 77#endif 78 ; 79static const bool config_prof = 80#ifdef JEMALLOC_PROF 81 true 82#else 83 false 84#endif 85 ; 86static const bool config_prof_libgcc = 87#ifdef JEMALLOC_PROF_LIBGCC 88 true 89#else 90 false 91#endif 92 ; 93static const bool config_prof_libunwind = 94#ifdef JEMALLOC_PROF_LIBUNWIND 95 true 96#else 97 false 98#endif 99 ; 100static const bool config_stats = 101#ifdef JEMALLOC_STATS 102 true 103#else 104 false 105#endif 106 ; 107static const bool config_tcache = 108#ifdef JEMALLOC_TCACHE 109 true 110#else 111 false 112#endif 113 ; 114static const bool config_tls = 115#ifdef JEMALLOC_TLS 116 true 117#else 118 false 119#endif 120 ; 121static const bool config_xmalloc = 122#ifdef JEMALLOC_XMALLOC 123 true 124#else 125 false 126#endif 127 ; 128static const bool config_ivsalloc = 129#ifdef JEMALLOC_IVSALLOC 130 true 131#else 132 false 133#endif 134 ; 135 136#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 137#include <libkern/OSAtomic.h> 138#endif 139 140#ifdef JEMALLOC_ZONE 141#include <mach/mach_error.h> 142#include <mach/mach_init.h> 143#include <mach/vm_map.h> 144#include <malloc/malloc.h> 145#endif 146 147#define RB_COMPACT 148#include "jemalloc/internal/rb.h" 149#include "jemalloc/internal/qr.h" 150#include "jemalloc/internal/ql.h" 151 152extern void (*je_malloc_message)(void *wcbopaque, const char *s); 153 154/* 155 * Define a custom assert() in order to reduce the chances of deadlock during 156 * assertion failure. 157 */ 158#ifndef assert 159# ifdef JEMALLOC_DEBUG 160# define assert(e) do { \ 161 if (!(e)) { \ 162 char line_buf[UMAX2S_BUFSIZE]; \ 163 malloc_write("<jemalloc>: "); \ 164 malloc_write(__FILE__); \ 165 malloc_write(":"); \ 166 malloc_write(u2s(__LINE__, 10, line_buf)); \ 167 malloc_write(": Failed assertion: "); \ 168 malloc_write("\""); \ 169 malloc_write(#e); \ 170 malloc_write("\"\n"); \ 171 abort(); \ 172 } \ 173} while (0) 174# else 175# define assert(e) 176# endif 177#endif 178 179/* Use to assert a particular configuration, e.g., cassert(config_debug). */ 180#define cassert(c) do { \ 181 if ((c) == false) \ 182 assert(false); \ 183} while (0) 184 185/* 186 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 187 * but there are circular dependencies that cannot be broken without 188 * substantial performance degradation. In order to reduce the effect on 189 * visual code flow, read the header files in multiple passes, with one of the 190 * following cpp variables defined during each pass: 191 * 192 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 193 * types. 194 * JEMALLOC_H_STRUCTS : Data structures. 195 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 196 * JEMALLOC_H_INLINES : Inline functions. 197 */ 198/******************************************************************************/ 199#define JEMALLOC_H_TYPES 200 201#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 202 203#define ZU(z) ((size_t)z) 204 205#ifndef __DECONST 206# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 207#endif 208 209#ifdef JEMALLOC_DEBUG 210 /* Disable inlining to make debugging easier. */ 211# define JEMALLOC_INLINE 212# define inline 213#else 214# define JEMALLOC_ENABLE_INLINE 215# define JEMALLOC_INLINE static inline 216#endif 217 218/* Size of stack-allocated buffer passed to buferror(). */ 219#define BUFERROR_BUF 64 220 221/* Smallest size class to support. */ 222#define LG_TINY_MIN 3 223#define TINY_MIN (1U << LG_TINY_MIN) 224 225/* 226 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 227 * classes). 228 */ 229#ifndef LG_QUANTUM 230# ifdef __i386__ 231# define LG_QUANTUM 4 232# endif 233# ifdef __ia64__ 234# define LG_QUANTUM 4 235# endif 236# ifdef __alpha__ 237# define LG_QUANTUM 4 238# endif 239# ifdef __sparc64__ 240# define LG_QUANTUM 4 241# endif 242# if (defined(__amd64__) || defined(__x86_64__)) 243# define LG_QUANTUM 4 244# endif 245# ifdef __arm__ 246# define LG_QUANTUM 3 247# endif 248# ifdef __mips__ 249# define LG_QUANTUM 3 250# endif 251# ifdef __powerpc__ 252# define LG_QUANTUM 4 253# endif 254# ifdef __s390x__ 255# define LG_QUANTUM 4 256# endif 257# ifdef __SH4__ 258# define LG_QUANTUM 4 259# endif 260# ifdef __tile__ 261# define LG_QUANTUM 4 262# endif 263# ifndef LG_QUANTUM 264# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 265# endif 266#endif 267 268#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 269#define QUANTUM_MASK (QUANTUM - 1) 270 271/* Return the smallest quantum multiple that is >= a. */ 272#define QUANTUM_CEILING(a) \ 273 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 274 275#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 276#define LONG_MASK (LONG - 1) 277 278/* Return the smallest long multiple that is >= a. */ 279#define LONG_CEILING(a) \ 280 (((a) + LONG_MASK) & ~LONG_MASK) 281 282#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 283#define PTR_MASK (SIZEOF_PTR - 1) 284 285/* Return the smallest (void *) multiple that is >= a. */ 286#define PTR_CEILING(a) \ 287 (((a) + PTR_MASK) & ~PTR_MASK) 288 289/* 290 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 291 * In addition, this controls the spacing of cacheline-spaced size classes. 292 */ 293#define LG_CACHELINE 6 294#define CACHELINE ((size_t)(1U << LG_CACHELINE)) 295#define CACHELINE_MASK (CACHELINE - 1) 296 297/* Return the smallest cacheline multiple that is >= s. */ 298#define CACHELINE_CEILING(s) \ 299 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 300 301/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 302#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT)) 303#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1)) 304#ifdef PAGE_SHIFT 305# undef PAGE_SHIFT 306#endif 307#ifdef PAGE_SIZE 308# undef PAGE_SIZE 309#endif 310#ifdef PAGE_MASK 311# undef PAGE_MASK 312#endif 313#define PAGE_SHIFT STATIC_PAGE_SHIFT 314#define PAGE_SIZE STATIC_PAGE_SIZE 315#define PAGE_MASK STATIC_PAGE_MASK 316 317/* Return the smallest pagesize multiple that is >= s. */ 318#define PAGE_CEILING(s) \ 319 (((s) + PAGE_MASK) & ~PAGE_MASK) 320 321#include "jemalloc/internal/atomic.h" 322#include "jemalloc/internal/prng.h" 323#include "jemalloc/internal/ckh.h" 324#include "jemalloc/internal/size_classes.h" 325#include "jemalloc/internal/stats.h" 326#include "jemalloc/internal/ctl.h" 327#include "jemalloc/internal/mutex.h" 328#include "jemalloc/internal/mb.h" 329#include "jemalloc/internal/extent.h" 330#include "jemalloc/internal/arena.h" 331#include "jemalloc/internal/bitmap.h" 332#include "jemalloc/internal/base.h" 333#include "jemalloc/internal/chunk.h" 334#include "jemalloc/internal/huge.h" 335#include "jemalloc/internal/rtree.h" 336#include "jemalloc/internal/tcache.h" 337#include "jemalloc/internal/hash.h" 338#ifdef JEMALLOC_ZONE 339#include "jemalloc/internal/zone.h" 340#endif 341#include "jemalloc/internal/prof.h" 342 343#undef JEMALLOC_H_TYPES 344/******************************************************************************/ 345#define JEMALLOC_H_STRUCTS 346 347#include "jemalloc/internal/atomic.h" 348#include "jemalloc/internal/prng.h" 349#include "jemalloc/internal/ckh.h" 350#include "jemalloc/internal/size_classes.h" 351#include "jemalloc/internal/stats.h" 352#include "jemalloc/internal/ctl.h" 353#include "jemalloc/internal/mutex.h" 354#include "jemalloc/internal/mb.h" 355#include "jemalloc/internal/bitmap.h" 356#include "jemalloc/internal/extent.h" 357#include "jemalloc/internal/arena.h" 358#include "jemalloc/internal/base.h" 359#include "jemalloc/internal/chunk.h" 360#include "jemalloc/internal/huge.h" 361#include "jemalloc/internal/rtree.h" 362#include "jemalloc/internal/tcache.h" 363#include "jemalloc/internal/hash.h" 364#ifdef JEMALLOC_ZONE 365#include "jemalloc/internal/zone.h" 366#endif 367#include "jemalloc/internal/prof.h" 368 369typedef struct { 370 uint64_t allocated; 371 uint64_t deallocated; 372} thread_allocated_t; 373 374#undef JEMALLOC_H_STRUCTS 375/******************************************************************************/ 376#define JEMALLOC_H_EXTERNS 377 378extern bool opt_abort; 379extern bool opt_junk; 380extern bool opt_xmalloc; 381extern bool opt_zero; 382extern size_t opt_narenas; 383 384#ifdef DYNAMIC_PAGE_SHIFT 385extern size_t pagesize; 386extern size_t pagesize_mask; 387extern size_t lg_pagesize; 388#endif 389 390/* Number of CPUs. */ 391extern unsigned ncpus; 392 393extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 394extern pthread_key_t arenas_tsd; 395#ifndef NO_TLS 396/* 397 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 398 * for allocations. 399 */ 400extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec")); 401# define ARENA_GET() arenas_tls 402# define ARENA_SET(v) do { \ 403 arenas_tls = (v); \ 404 pthread_setspecific(arenas_tsd, (void *)(v)); \ 405} while (0) 406#else 407# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd)) 408# define ARENA_SET(v) do { \ 409 pthread_setspecific(arenas_tsd, (void *)(v)); \ 410} while (0) 411#endif 412 413/* 414 * Arenas that are used to service external requests. Not all elements of the 415 * arenas array are necessarily used; arenas are created lazily as needed. 416 */ 417extern arena_t **arenas; 418extern unsigned narenas; 419 420#ifndef NO_TLS 421extern __thread thread_allocated_t thread_allocated_tls; 422# define ALLOCATED_GET() (thread_allocated_tls.allocated) 423# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated) 424# define DEALLOCATED_GET() (thread_allocated_tls.deallocated) 425# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated) 426# define ALLOCATED_ADD(a, d) do { \ 427 thread_allocated_tls.allocated += a; \ 428 thread_allocated_tls.deallocated += d; \ 429} while (0) 430#else 431# define ALLOCATED_GET() (thread_allocated_get()->allocated) 432# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated) 433# define DEALLOCATED_GET() (thread_allocated_get()->deallocated) 434# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated) 435# define ALLOCATED_ADD(a, d) do { \ 436 thread_allocated_t *thread_allocated = thread_allocated_get(); \ 437 thread_allocated->allocated += (a); \ 438 thread_allocated->deallocated += (d); \ 439} while (0) 440#endif 441extern pthread_key_t thread_allocated_tsd; 442thread_allocated_t *thread_allocated_get_hard(void); 443 444arena_t *arenas_extend(unsigned ind); 445arena_t *choose_arena_hard(void); 446int buferror(int errnum, char *buf, size_t buflen); 447void jemalloc_prefork(void); 448void jemalloc_postfork(void); 449 450#include "jemalloc/internal/atomic.h" 451#include "jemalloc/internal/prng.h" 452#include "jemalloc/internal/ckh.h" 453#include "jemalloc/internal/size_classes.h" 454#include "jemalloc/internal/stats.h" 455#include "jemalloc/internal/ctl.h" 456#include "jemalloc/internal/mutex.h" 457#include "jemalloc/internal/mb.h" 458#include "jemalloc/internal/bitmap.h" 459#include "jemalloc/internal/extent.h" 460#include "jemalloc/internal/arena.h" 461#include "jemalloc/internal/base.h" 462#include "jemalloc/internal/chunk.h" 463#include "jemalloc/internal/huge.h" 464#include "jemalloc/internal/rtree.h" 465#include "jemalloc/internal/tcache.h" 466#include "jemalloc/internal/hash.h" 467#ifdef JEMALLOC_ZONE 468#include "jemalloc/internal/zone.h" 469#endif 470#include "jemalloc/internal/prof.h" 471 472#undef JEMALLOC_H_EXTERNS 473/******************************************************************************/ 474#define JEMALLOC_H_INLINES 475 476#include "jemalloc/internal/atomic.h" 477#include "jemalloc/internal/prng.h" 478#include "jemalloc/internal/ckh.h" 479#include "jemalloc/internal/size_classes.h" 480#include "jemalloc/internal/stats.h" 481#include "jemalloc/internal/ctl.h" 482#include "jemalloc/internal/mutex.h" 483#include "jemalloc/internal/mb.h" 484#include "jemalloc/internal/extent.h" 485#include "jemalloc/internal/base.h" 486#include "jemalloc/internal/chunk.h" 487#include "jemalloc/internal/huge.h" 488 489#ifndef JEMALLOC_ENABLE_INLINE 490size_t pow2_ceil(size_t x); 491size_t s2u(size_t size); 492size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); 493void malloc_write(const char *s); 494arena_t *choose_arena(void); 495thread_allocated_t *thread_allocated_get(void); 496#endif 497 498#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 499/* Compute the smallest power of 2 that is >= x. */ 500JEMALLOC_INLINE size_t 501pow2_ceil(size_t x) 502{ 503 504 x--; 505 x |= x >> 1; 506 x |= x >> 2; 507 x |= x >> 4; 508 x |= x >> 8; 509 x |= x >> 16; 510#if (LG_SIZEOF_PTR == 3) 511 x |= x >> 32; 512#endif 513 x++; 514 return (x); 515} 516 517/* 518 * Compute usable size that would result from allocating an object with the 519 * specified size. 520 */ 521JEMALLOC_INLINE size_t 522s2u(size_t size) 523{ 524 525 if (size <= SMALL_MAXCLASS) 526 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 527 if (size <= arena_maxclass) 528 return (PAGE_CEILING(size)); 529 return (CHUNK_CEILING(size)); 530} 531 532/* 533 * Compute usable size that would result from allocating an object with the 534 * specified size and alignment. 535 */ 536JEMALLOC_INLINE size_t 537sa2u(size_t size, size_t alignment, size_t *run_size_p) 538{ 539 size_t usize; 540 541 /* 542 * Round size up to the nearest multiple of alignment. 543 * 544 * This done, we can take advantage of the fact that for each small 545 * size class, every object is aligned at the smallest power of two 546 * that is non-zero in the base two representation of the size. For 547 * example: 548 * 549 * Size | Base 2 | Minimum alignment 550 * -----+----------+------------------ 551 * 96 | 1100000 | 32 552 * 144 | 10100000 | 32 553 * 192 | 11000000 | 64 554 */ 555 usize = (size + (alignment - 1)) & (-alignment); 556 /* 557 * (usize < size) protects against the combination of maximal 558 * alignment and size greater than maximal alignment. 559 */ 560 if (usize < size) { 561 /* size_t overflow. */ 562 return (0); 563 } 564 565 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) { 566 if (usize <= SMALL_MAXCLASS) 567 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 568 return (PAGE_CEILING(usize)); 569 } else { 570 size_t run_size; 571 572 /* 573 * We can't achieve subpage alignment, so round up alignment 574 * permanently; it makes later calculations simpler. 575 */ 576 alignment = PAGE_CEILING(alignment); 577 usize = PAGE_CEILING(size); 578 /* 579 * (usize < size) protects against very large sizes within 580 * PAGE_SIZE of SIZE_T_MAX. 581 * 582 * (usize + alignment < usize) protects against the 583 * combination of maximal alignment and usize large enough 584 * to cause overflow. This is similar to the first overflow 585 * check above, but it needs to be repeated due to the new 586 * usize value, which may now be *equal* to maximal 587 * alignment, whereas before we only detected overflow if the 588 * original size was *greater* than maximal alignment. 589 */ 590 if (usize < size || usize + alignment < usize) { 591 /* size_t overflow. */ 592 return (0); 593 } 594 595 /* 596 * Calculate the size of the over-size run that arena_palloc() 597 * would need to allocate in order to guarantee the alignment. 598 */ 599 if (usize >= alignment) 600 run_size = usize + alignment - PAGE_SIZE; 601 else { 602 /* 603 * It is possible that (alignment << 1) will cause 604 * overflow, but it doesn't matter because we also 605 * subtract PAGE_SIZE, which in the case of overflow 606 * leaves us with a very large run_size. That causes 607 * the first conditional below to fail, which means 608 * that the bogus run_size value never gets used for 609 * anything important. 610 */ 611 run_size = (alignment << 1) - PAGE_SIZE; 612 } 613 if (run_size_p != NULL) 614 *run_size_p = run_size; 615 616 if (run_size <= arena_maxclass) 617 return (PAGE_CEILING(usize)); 618 return (CHUNK_CEILING(usize)); 619 } 620} 621 622/* 623 * Wrapper around malloc_message() that avoids the need for 624 * je_malloc_message(...) throughout the code. 625 */ 626JEMALLOC_INLINE void 627malloc_write(const char *s) 628{ 629 630 je_malloc_message(NULL, s); 631} 632 633/* 634 * Choose an arena based on a per-thread value (fast-path code, calls slow-path 635 * code if necessary). 636 */ 637JEMALLOC_INLINE arena_t * 638choose_arena(void) 639{ 640 arena_t *ret; 641 642 ret = ARENA_GET(); 643 if (ret == NULL) { 644 ret = choose_arena_hard(); 645 assert(ret != NULL); 646 } 647 648 return (ret); 649} 650 651JEMALLOC_INLINE thread_allocated_t * 652thread_allocated_get(void) 653{ 654 thread_allocated_t *thread_allocated = (thread_allocated_t *) 655 pthread_getspecific(thread_allocated_tsd); 656 657 if (thread_allocated == NULL) 658 return (thread_allocated_get_hard()); 659 return (thread_allocated); 660} 661#endif 662 663#include "jemalloc/internal/bitmap.h" 664#include "jemalloc/internal/rtree.h" 665#include "jemalloc/internal/tcache.h" 666#include "jemalloc/internal/arena.h" 667#include "jemalloc/internal/hash.h" 668#ifdef JEMALLOC_ZONE 669#include "jemalloc/internal/zone.h" 670#endif 671 672#ifndef JEMALLOC_ENABLE_INLINE 673void *imalloc(size_t size); 674void *icalloc(size_t size); 675void *ipalloc(size_t usize, size_t alignment, bool zero); 676size_t isalloc(const void *ptr); 677size_t ivsalloc(const void *ptr); 678void idalloc(void *ptr); 679void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 680 bool zero, bool no_move); 681#endif 682 683#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 684JEMALLOC_INLINE void * 685imalloc(size_t size) 686{ 687 688 assert(size != 0); 689 690 if (size <= arena_maxclass) 691 return (arena_malloc(size, false)); 692 else 693 return (huge_malloc(size, false)); 694} 695 696JEMALLOC_INLINE void * 697icalloc(size_t size) 698{ 699 700 if (size <= arena_maxclass) 701 return (arena_malloc(size, true)); 702 else 703 return (huge_malloc(size, true)); 704} 705 706JEMALLOC_INLINE void * 707ipalloc(size_t usize, size_t alignment, bool zero) 708{ 709 void *ret; 710 711 assert(usize != 0); 712 assert(usize == sa2u(usize, alignment, NULL)); 713 714 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) 715 ret = arena_malloc(usize, zero); 716 else { 717 size_t run_size 718#ifdef JEMALLOC_CC_SILENCE 719 = 0 720#endif 721 ; 722 723 /* 724 * Ideally we would only ever call sa2u() once per aligned 725 * allocation request, and the caller of this function has 726 * already done so once. However, it's rather burdensome to 727 * require every caller to pass in run_size, especially given 728 * that it's only relevant to large allocations. Therefore, 729 * just call it again here in order to get run_size. 730 */ 731 sa2u(usize, alignment, &run_size); 732 if (run_size <= arena_maxclass) { 733 ret = arena_palloc(choose_arena(), usize, run_size, 734 alignment, zero); 735 } else if (alignment <= chunksize) 736 ret = huge_malloc(usize, zero); 737 else 738 ret = huge_palloc(usize, alignment, zero); 739 } 740 741 assert(((uintptr_t)ret & (alignment - 1)) == 0); 742 return (ret); 743} 744 745JEMALLOC_INLINE size_t 746isalloc(const void *ptr) 747{ 748 size_t ret; 749 arena_chunk_t *chunk; 750 751 assert(ptr != NULL); 752 753 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 754 if (chunk != ptr) { 755 /* Region. */ 756 if (config_prof) 757 ret = arena_salloc_demote(ptr); 758 else 759 ret = arena_salloc(ptr); 760 } else 761 ret = huge_salloc(ptr); 762 763 return (ret); 764} 765 766JEMALLOC_INLINE size_t 767ivsalloc(const void *ptr) 768{ 769 770 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 771 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 772 return (0); 773 774 return (isalloc(ptr)); 775} 776 777JEMALLOC_INLINE void 778idalloc(void *ptr) 779{ 780 arena_chunk_t *chunk; 781 782 assert(ptr != NULL); 783 784 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 785 if (chunk != ptr) 786 arena_dalloc(chunk->arena, chunk, ptr); 787 else 788 huge_dalloc(ptr, true); 789} 790 791JEMALLOC_INLINE void * 792iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 793 bool no_move) 794{ 795 void *ret; 796 size_t oldsize; 797 798 assert(ptr != NULL); 799 assert(size != 0); 800 801 oldsize = isalloc(ptr); 802 803 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 804 != 0) { 805 size_t usize, copysize; 806 807 /* 808 * Existing object alignment is inadquate; allocate new space 809 * and copy. 810 */ 811 if (no_move) 812 return (NULL); 813 usize = sa2u(size + extra, alignment, NULL); 814 if (usize == 0) 815 return (NULL); 816 ret = ipalloc(usize, alignment, zero); 817 if (ret == NULL) { 818 if (extra == 0) 819 return (NULL); 820 /* Try again, without extra this time. */ 821 usize = sa2u(size, alignment, NULL); 822 if (usize == 0) 823 return (NULL); 824 ret = ipalloc(usize, alignment, zero); 825 if (ret == NULL) 826 return (NULL); 827 } 828 /* 829 * Copy at most size bytes (not size+extra), since the caller 830 * has no expectation that the extra bytes will be reliably 831 * preserved. 832 */ 833 copysize = (size < oldsize) ? size : oldsize; 834 memcpy(ret, ptr, copysize); 835 idalloc(ptr); 836 return (ret); 837 } 838 839 if (no_move) { 840 if (size <= arena_maxclass) { 841 return (arena_ralloc_no_move(ptr, oldsize, size, 842 extra, zero)); 843 } else { 844 return (huge_ralloc_no_move(ptr, oldsize, size, 845 extra)); 846 } 847 } else { 848 if (size + extra <= arena_maxclass) { 849 return (arena_ralloc(ptr, oldsize, size, extra, 850 alignment, zero)); 851 } else { 852 return (huge_ralloc(ptr, oldsize, size, extra, 853 alignment, zero)); 854 } 855 } 856} 857#endif 858 859#include "jemalloc/internal/prof.h" 860 861#undef JEMALLOC_H_INLINES 862/******************************************************************************/ 863