jemalloc_internal.h.in revision f7088e6c992d079bc3162e0c48ed4dc5def6d263
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <sys/mman.h> 4#include <sys/param.h> 5#include <sys/syscall.h> 6#if !defined(SYS_write) && defined(__NR_write) 7#define SYS_write __NR_write 8#endif 9#include <sys/time.h> 10#include <sys/types.h> 11#include <sys/uio.h> 12 13#include <errno.h> 14#include <limits.h> 15#ifndef SIZE_T_MAX 16# define SIZE_T_MAX SIZE_MAX 17#endif 18#include <pthread.h> 19#include <sched.h> 20#include <stdarg.h> 21#include <stdbool.h> 22#include <stdio.h> 23#include <stdlib.h> 24#include <stdint.h> 25#include <stddef.h> 26#ifndef offsetof 27# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 28#endif 29#include <inttypes.h> 30#include <string.h> 31#include <strings.h> 32#include <ctype.h> 33#include <unistd.h> 34#include <fcntl.h> 35#include <pthread.h> 36#include <math.h> 37 38#define JEMALLOC_NO_DEMANGLE 39#include "../jemalloc@install_suffix@.h" 40 41#ifdef JEMALLOC_UTRACE 42#include <sys/ktrace.h> 43#endif 44 45#ifdef JEMALLOC_VALGRIND 46#include <valgrind/valgrind.h> 47#include <valgrind/memcheck.h> 48#endif 49 50#include "jemalloc/internal/private_namespace.h" 51 52#ifdef JEMALLOC_CC_SILENCE 53#define UNUSED JEMALLOC_ATTR(unused) 54#else 55#define UNUSED 56#endif 57 58static const bool config_debug = 59#ifdef JEMALLOC_DEBUG 60 true 61#else 62 false 63#endif 64 ; 65static const bool config_dss = 66#ifdef JEMALLOC_DSS 67 true 68#else 69 false 70#endif 71 ; 72static const bool config_fill = 73#ifdef JEMALLOC_FILL 74 true 75#else 76 false 77#endif 78 ; 79static const bool config_lazy_lock = 80#ifdef JEMALLOC_LAZY_LOCK 81 true 82#else 83 false 84#endif 85 ; 86static const bool config_prof = 87#ifdef JEMALLOC_PROF 88 true 89#else 90 false 91#endif 92 ; 93static const bool config_prof_libgcc = 94#ifdef JEMALLOC_PROF_LIBGCC 95 true 96#else 97 false 98#endif 99 ; 100static const bool config_prof_libunwind = 101#ifdef JEMALLOC_PROF_LIBUNWIND 102 true 103#else 104 false 105#endif 106 ; 107static const bool config_munmap = 108#ifdef JEMALLOC_MUNMAP 109 true 110#else 111 false 112#endif 113 ; 114static const bool config_stats = 115#ifdef JEMALLOC_STATS 116 true 117#else 118 false 119#endif 120 ; 121static const bool config_tcache = 122#ifdef JEMALLOC_TCACHE 123 true 124#else 125 false 126#endif 127 ; 128static const bool config_tls = 129#ifdef JEMALLOC_TLS 130 true 131#else 132 false 133#endif 134 ; 135static const bool config_utrace = 136#ifdef JEMALLOC_UTRACE 137 true 138#else 139 false 140#endif 141 ; 142static const bool config_valgrind = 143#ifdef JEMALLOC_VALGRIND 144 true 145#else 146 false 147#endif 148 ; 149static const bool config_xmalloc = 150#ifdef JEMALLOC_XMALLOC 151 true 152#else 153 false 154#endif 155 ; 156static const bool config_ivsalloc = 157#ifdef JEMALLOC_IVSALLOC 158 true 159#else 160 false 161#endif 162 ; 163 164#ifdef JEMALLOC_ATOMIC9 165#include <machine/atomic.h> 166#endif 167 168#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 169#include <libkern/OSAtomic.h> 170#endif 171 172#ifdef JEMALLOC_ZONE 173#include <mach/mach_error.h> 174#include <mach/mach_init.h> 175#include <mach/vm_map.h> 176#include <malloc/malloc.h> 177#endif 178 179#define RB_COMPACT 180#include "jemalloc/internal/rb.h" 181#include "jemalloc/internal/qr.h" 182#include "jemalloc/internal/ql.h" 183 184/* 185 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 186 * but there are circular dependencies that cannot be broken without 187 * substantial performance degradation. In order to reduce the effect on 188 * visual code flow, read the header files in multiple passes, with one of the 189 * following cpp variables defined during each pass: 190 * 191 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 192 * types. 193 * JEMALLOC_H_STRUCTS : Data structures. 194 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 195 * JEMALLOC_H_INLINES : Inline functions. 196 */ 197/******************************************************************************/ 198#define JEMALLOC_H_TYPES 199 200#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 201 202#define ZU(z) ((size_t)z) 203 204#ifndef __DECONST 205# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 206#endif 207 208#ifdef JEMALLOC_DEBUG 209 /* Disable inlining to make debugging easier. */ 210# define JEMALLOC_INLINE 211# define inline 212#else 213# define JEMALLOC_ENABLE_INLINE 214# define JEMALLOC_INLINE static inline 215#endif 216 217/* Smallest size class to support. */ 218#define LG_TINY_MIN 3 219#define TINY_MIN (1U << LG_TINY_MIN) 220 221/* 222 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 223 * classes). 224 */ 225#ifndef LG_QUANTUM 226# ifdef __i386__ 227# define LG_QUANTUM 4 228# endif 229# ifdef __ia64__ 230# define LG_QUANTUM 4 231# endif 232# ifdef __alpha__ 233# define LG_QUANTUM 4 234# endif 235# ifdef __sparc64__ 236# define LG_QUANTUM 4 237# endif 238# if (defined(__amd64__) || defined(__x86_64__)) 239# define LG_QUANTUM 4 240# endif 241# ifdef __arm__ 242# define LG_QUANTUM 3 243# endif 244# ifdef __mips__ 245# define LG_QUANTUM 3 246# endif 247# ifdef __powerpc__ 248# define LG_QUANTUM 4 249# endif 250# ifdef __s390x__ 251# define LG_QUANTUM 4 252# endif 253# ifdef __SH4__ 254# define LG_QUANTUM 4 255# endif 256# ifdef __tile__ 257# define LG_QUANTUM 4 258# endif 259# ifndef LG_QUANTUM 260# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 261# endif 262#endif 263 264#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 265#define QUANTUM_MASK (QUANTUM - 1) 266 267/* Return the smallest quantum multiple that is >= a. */ 268#define QUANTUM_CEILING(a) \ 269 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 270 271#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 272#define LONG_MASK (LONG - 1) 273 274/* Return the smallest long multiple that is >= a. */ 275#define LONG_CEILING(a) \ 276 (((a) + LONG_MASK) & ~LONG_MASK) 277 278#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 279#define PTR_MASK (SIZEOF_PTR - 1) 280 281/* Return the smallest (void *) multiple that is >= a. */ 282#define PTR_CEILING(a) \ 283 (((a) + PTR_MASK) & ~PTR_MASK) 284 285/* 286 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 287 * In addition, this controls the spacing of cacheline-spaced size classes. 288 */ 289#define LG_CACHELINE 6 290#define CACHELINE ((size_t)(1U << LG_CACHELINE)) 291#define CACHELINE_MASK (CACHELINE - 1) 292 293/* Return the smallest cacheline multiple that is >= s. */ 294#define CACHELINE_CEILING(s) \ 295 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 296 297/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 298#ifdef PAGE_MASK 299# undef PAGE_MASK 300#endif 301#define LG_PAGE STATIC_PAGE_SHIFT 302#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 303#define PAGE_MASK ((size_t)(PAGE - 1)) 304 305/* Return the smallest pagesize multiple that is >= s. */ 306#define PAGE_CEILING(s) \ 307 (((s) + PAGE_MASK) & ~PAGE_MASK) 308 309/* Return the nearest aligned address at or below a. */ 310#define ALIGNMENT_ADDR2BASE(a, alignment) \ 311 ((void *)((uintptr_t)(a) & (-(alignment)))) 312 313/* Return the offset between a and the nearest aligned address at or below a. */ 314#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 315 ((size_t)((uintptr_t)(a) & (alignment - 1))) 316 317/* Return the smallest alignment multiple that is >= s. */ 318#define ALIGNMENT_CEILING(s, alignment) \ 319 (((s) + (alignment - 1)) & (-(alignment))) 320 321#ifdef JEMALLOC_VALGRIND 322/* 323 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 324 * so that when Valgrind reports errors, there are no extra stack frames 325 * in the backtraces. 326 * 327 * The size that is reported to valgrind must be consistent through a chain of 328 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 329 * jemalloc, so it is critical that all callers of these macros provide usize 330 * rather than request size. As a result, buffer overflow detection is 331 * technically weakened for the standard API, though it is generally accepted 332 * practice to consider any extra bytes reported by malloc_usable_size() as 333 * usable space. 334 */ 335#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 336 if (config_valgrind && opt_valgrind && cond) \ 337 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 338} while (0) 339#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 340 old_rzsize, zero) do { \ 341 if (config_valgrind && opt_valgrind) { \ 342 size_t rzsize = p2rz(ptr); \ 343 \ 344 if (ptr == old_ptr) { \ 345 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 346 usize, rzsize); \ 347 if (zero && old_usize < usize) { \ 348 VALGRIND_MAKE_MEM_DEFINED( \ 349 (void *)((uintptr_t)ptr + \ 350 old_usize), usize - old_usize); \ 351 } \ 352 } else { \ 353 if (old_ptr != NULL) { \ 354 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 355 old_rzsize); \ 356 } \ 357 if (ptr != NULL) { \ 358 size_t copy_size = (old_usize < usize) \ 359 ? old_usize : usize; \ 360 size_t tail_size = usize - copy_size; \ 361 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 362 rzsize, false); \ 363 if (copy_size > 0) { \ 364 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 365 copy_size); \ 366 } \ 367 if (zero && tail_size > 0) { \ 368 VALGRIND_MAKE_MEM_DEFINED( \ 369 (void *)((uintptr_t)ptr + \ 370 copy_size), tail_size); \ 371 } \ 372 } \ 373 } \ 374 } \ 375} while (0) 376#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 377 if (config_valgrind && opt_valgrind) \ 378 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 379} while (0) 380#else 381#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) 382#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) 383#define VALGRIND_FREELIKE_BLOCK(addr, rzB) 384#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) 385#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) 386#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) 387#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 388 old_rzsize, zero) 389#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) 390#endif 391 392#include "jemalloc/internal/util.h" 393#include "jemalloc/internal/atomic.h" 394#include "jemalloc/internal/prng.h" 395#include "jemalloc/internal/ckh.h" 396#include "jemalloc/internal/size_classes.h" 397#include "jemalloc/internal/stats.h" 398#include "jemalloc/internal/ctl.h" 399#include "jemalloc/internal/mutex.h" 400#include "jemalloc/internal/tsd.h" 401#include "jemalloc/internal/mb.h" 402#include "jemalloc/internal/extent.h" 403#include "jemalloc/internal/arena.h" 404#include "jemalloc/internal/bitmap.h" 405#include "jemalloc/internal/base.h" 406#include "jemalloc/internal/chunk.h" 407#include "jemalloc/internal/huge.h" 408#include "jemalloc/internal/rtree.h" 409#include "jemalloc/internal/tcache.h" 410#include "jemalloc/internal/hash.h" 411#include "jemalloc/internal/quarantine.h" 412#include "jemalloc/internal/prof.h" 413 414#undef JEMALLOC_H_TYPES 415/******************************************************************************/ 416#define JEMALLOC_H_STRUCTS 417 418#include "jemalloc/internal/util.h" 419#include "jemalloc/internal/atomic.h" 420#include "jemalloc/internal/prng.h" 421#include "jemalloc/internal/ckh.h" 422#include "jemalloc/internal/size_classes.h" 423#include "jemalloc/internal/stats.h" 424#include "jemalloc/internal/ctl.h" 425#include "jemalloc/internal/mutex.h" 426#include "jemalloc/internal/tsd.h" 427#include "jemalloc/internal/mb.h" 428#include "jemalloc/internal/bitmap.h" 429#include "jemalloc/internal/extent.h" 430#include "jemalloc/internal/arena.h" 431#include "jemalloc/internal/base.h" 432#include "jemalloc/internal/chunk.h" 433#include "jemalloc/internal/huge.h" 434#include "jemalloc/internal/rtree.h" 435#include "jemalloc/internal/tcache.h" 436#include "jemalloc/internal/hash.h" 437#include "jemalloc/internal/quarantine.h" 438#include "jemalloc/internal/prof.h" 439 440typedef struct { 441 uint64_t allocated; 442 uint64_t deallocated; 443} thread_allocated_t; 444/* 445 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 446 * argument. 447 */ 448#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) 449 450#undef JEMALLOC_H_STRUCTS 451/******************************************************************************/ 452#define JEMALLOC_H_EXTERNS 453 454extern bool opt_abort; 455extern bool opt_junk; 456extern size_t opt_quarantine; 457extern bool opt_redzone; 458extern bool opt_utrace; 459extern bool opt_valgrind; 460extern bool opt_xmalloc; 461extern bool opt_zero; 462extern size_t opt_narenas; 463 464/* Number of CPUs. */ 465extern unsigned ncpus; 466 467extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 468/* 469 * Arenas that are used to service external requests. Not all elements of the 470 * arenas array are necessarily used; arenas are created lazily as needed. 471 */ 472extern arena_t **arenas; 473extern unsigned narenas; 474 475arena_t *arenas_extend(unsigned ind); 476void arenas_cleanup(void *arg); 477arena_t *choose_arena_hard(void); 478void jemalloc_prefork(void); 479void jemalloc_postfork_parent(void); 480void jemalloc_postfork_child(void); 481 482#include "jemalloc/internal/util.h" 483#include "jemalloc/internal/atomic.h" 484#include "jemalloc/internal/prng.h" 485#include "jemalloc/internal/ckh.h" 486#include "jemalloc/internal/size_classes.h" 487#include "jemalloc/internal/stats.h" 488#include "jemalloc/internal/ctl.h" 489#include "jemalloc/internal/mutex.h" 490#include "jemalloc/internal/tsd.h" 491#include "jemalloc/internal/mb.h" 492#include "jemalloc/internal/bitmap.h" 493#include "jemalloc/internal/extent.h" 494#include "jemalloc/internal/arena.h" 495#include "jemalloc/internal/base.h" 496#include "jemalloc/internal/chunk.h" 497#include "jemalloc/internal/huge.h" 498#include "jemalloc/internal/rtree.h" 499#include "jemalloc/internal/tcache.h" 500#include "jemalloc/internal/hash.h" 501#include "jemalloc/internal/quarantine.h" 502#include "jemalloc/internal/prof.h" 503 504#undef JEMALLOC_H_EXTERNS 505/******************************************************************************/ 506#define JEMALLOC_H_INLINES 507 508#include "jemalloc/internal/util.h" 509#include "jemalloc/internal/atomic.h" 510#include "jemalloc/internal/prng.h" 511#include "jemalloc/internal/ckh.h" 512#include "jemalloc/internal/size_classes.h" 513#include "jemalloc/internal/stats.h" 514#include "jemalloc/internal/ctl.h" 515#include "jemalloc/internal/mutex.h" 516#include "jemalloc/internal/tsd.h" 517#include "jemalloc/internal/mb.h" 518#include "jemalloc/internal/extent.h" 519#include "jemalloc/internal/base.h" 520#include "jemalloc/internal/chunk.h" 521#include "jemalloc/internal/huge.h" 522 523#ifndef JEMALLOC_ENABLE_INLINE 524malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 525 526size_t s2u(size_t size); 527size_t sa2u(size_t size, size_t alignment); 528arena_t *choose_arena(arena_t *arena); 529#endif 530 531#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 532/* 533 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 534 * for allocations. 535 */ 536malloc_tsd_externs(arenas, arena_t *) 537malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) 538 539/* 540 * Compute usable size that would result from allocating an object with the 541 * specified size. 542 */ 543JEMALLOC_INLINE size_t 544s2u(size_t size) 545{ 546 547 if (size <= SMALL_MAXCLASS) 548 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 549 if (size <= arena_maxclass) 550 return (PAGE_CEILING(size)); 551 return (CHUNK_CEILING(size)); 552} 553 554/* 555 * Compute usable size that would result from allocating an object with the 556 * specified size and alignment. 557 */ 558JEMALLOC_INLINE size_t 559sa2u(size_t size, size_t alignment) 560{ 561 size_t usize; 562 563 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 564 565 /* 566 * Round size up to the nearest multiple of alignment. 567 * 568 * This done, we can take advantage of the fact that for each small 569 * size class, every object is aligned at the smallest power of two 570 * that is non-zero in the base two representation of the size. For 571 * example: 572 * 573 * Size | Base 2 | Minimum alignment 574 * -----+----------+------------------ 575 * 96 | 1100000 | 32 576 * 144 | 10100000 | 32 577 * 192 | 11000000 | 64 578 */ 579 usize = ALIGNMENT_CEILING(size, alignment); 580 /* 581 * (usize < size) protects against the combination of maximal 582 * alignment and size greater than maximal alignment. 583 */ 584 if (usize < size) { 585 /* size_t overflow. */ 586 return (0); 587 } 588 589 if (usize <= arena_maxclass && alignment <= PAGE) { 590 if (usize <= SMALL_MAXCLASS) 591 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 592 return (PAGE_CEILING(usize)); 593 } else { 594 size_t run_size; 595 596 /* 597 * We can't achieve subpage alignment, so round up alignment 598 * permanently; it makes later calculations simpler. 599 */ 600 alignment = PAGE_CEILING(alignment); 601 usize = PAGE_CEILING(size); 602 /* 603 * (usize < size) protects against very large sizes within 604 * PAGE of SIZE_T_MAX. 605 * 606 * (usize + alignment < usize) protects against the 607 * combination of maximal alignment and usize large enough 608 * to cause overflow. This is similar to the first overflow 609 * check above, but it needs to be repeated due to the new 610 * usize value, which may now be *equal* to maximal 611 * alignment, whereas before we only detected overflow if the 612 * original size was *greater* than maximal alignment. 613 */ 614 if (usize < size || usize + alignment < usize) { 615 /* size_t overflow. */ 616 return (0); 617 } 618 619 /* 620 * Calculate the size of the over-size run that arena_palloc() 621 * would need to allocate in order to guarantee the alignment. 622 * If the run wouldn't fit within a chunk, round up to a huge 623 * allocation size. 624 */ 625 run_size = usize + alignment - PAGE; 626 if (run_size <= arena_maxclass) 627 return (PAGE_CEILING(usize)); 628 return (CHUNK_CEILING(usize)); 629 } 630} 631 632/* Choose an arena based on a per-thread value. */ 633JEMALLOC_INLINE arena_t * 634choose_arena(arena_t *arena) 635{ 636 arena_t *ret; 637 638 if (arena != NULL) 639 return (arena); 640 641 if ((ret = *arenas_tsd_get()) == NULL) { 642 ret = choose_arena_hard(); 643 assert(ret != NULL); 644 } 645 646 return (ret); 647} 648#endif 649 650#include "jemalloc/internal/bitmap.h" 651#include "jemalloc/internal/rtree.h" 652#include "jemalloc/internal/tcache.h" 653#include "jemalloc/internal/arena.h" 654#include "jemalloc/internal/hash.h" 655#include "jemalloc/internal/quarantine.h" 656 657#ifndef JEMALLOC_ENABLE_INLINE 658void *imalloc(size_t size); 659void *icalloc(size_t size); 660void *ipalloc(size_t usize, size_t alignment, bool zero); 661size_t isalloc(const void *ptr, bool demote); 662size_t ivsalloc(const void *ptr, bool demote); 663size_t u2rz(size_t usize); 664size_t p2rz(const void *ptr); 665void idalloc(void *ptr); 666void iqalloc(void *ptr); 667void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 668 bool zero, bool no_move); 669malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 670#endif 671 672#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 673JEMALLOC_INLINE void * 674imalloc(size_t size) 675{ 676 677 assert(size != 0); 678 679 if (size <= arena_maxclass) 680 return (arena_malloc(NULL, size, false, true)); 681 else 682 return (huge_malloc(size, false)); 683} 684 685JEMALLOC_INLINE void * 686icalloc(size_t size) 687{ 688 689 if (size <= arena_maxclass) 690 return (arena_malloc(NULL, size, true, true)); 691 else 692 return (huge_malloc(size, true)); 693} 694 695JEMALLOC_INLINE void * 696ipalloc(size_t usize, size_t alignment, bool zero) 697{ 698 void *ret; 699 700 assert(usize != 0); 701 assert(usize == sa2u(usize, alignment)); 702 703 if (usize <= arena_maxclass && alignment <= PAGE) 704 ret = arena_malloc(NULL, usize, zero, true); 705 else { 706 if (usize <= arena_maxclass) { 707 ret = arena_palloc(choose_arena(NULL), usize, alignment, 708 zero); 709 } else if (alignment <= chunksize) 710 ret = huge_malloc(usize, zero); 711 else 712 ret = huge_palloc(usize, alignment, zero); 713 } 714 715 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 716 return (ret); 717} 718 719/* 720 * Typical usage: 721 * void *ptr = [...] 722 * size_t sz = isalloc(ptr, config_prof); 723 */ 724JEMALLOC_INLINE size_t 725isalloc(const void *ptr, bool demote) 726{ 727 size_t ret; 728 arena_chunk_t *chunk; 729 730 assert(ptr != NULL); 731 /* Demotion only makes sense if config_prof is true. */ 732 assert(config_prof || demote == false); 733 734 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 735 if (chunk != ptr) 736 ret = arena_salloc(ptr, demote); 737 else 738 ret = huge_salloc(ptr); 739 740 return (ret); 741} 742 743JEMALLOC_INLINE size_t 744ivsalloc(const void *ptr, bool demote) 745{ 746 747 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 748 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 749 return (0); 750 751 return (isalloc(ptr, demote)); 752} 753 754JEMALLOC_INLINE size_t 755u2rz(size_t usize) 756{ 757 size_t ret; 758 759 if (usize <= SMALL_MAXCLASS) { 760 size_t binind = SMALL_SIZE2BIN(usize); 761 ret = arena_bin_info[binind].redzone_size; 762 } else 763 ret = 0; 764 765 return (ret); 766} 767 768JEMALLOC_INLINE size_t 769p2rz(const void *ptr) 770{ 771 size_t usize = isalloc(ptr, false); 772 773 return (u2rz(usize)); 774} 775 776JEMALLOC_INLINE void 777idalloc(void *ptr) 778{ 779 arena_chunk_t *chunk; 780 781 assert(ptr != NULL); 782 783 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 784 if (chunk != ptr) 785 arena_dalloc(chunk->arena, chunk, ptr, true); 786 else 787 huge_dalloc(ptr, true); 788} 789 790JEMALLOC_INLINE void 791iqalloc(void *ptr) 792{ 793 794 if (config_fill && opt_quarantine) 795 quarantine(ptr); 796 else 797 idalloc(ptr); 798} 799 800JEMALLOC_INLINE void * 801iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 802 bool no_move) 803{ 804 void *ret; 805 size_t oldsize; 806 807 assert(ptr != NULL); 808 assert(size != 0); 809 810 oldsize = isalloc(ptr, config_prof); 811 812 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 813 != 0) { 814 size_t usize, copysize; 815 816 /* 817 * Existing object alignment is inadequate; allocate new space 818 * and copy. 819 */ 820 if (no_move) 821 return (NULL); 822 usize = sa2u(size + extra, alignment); 823 if (usize == 0) 824 return (NULL); 825 ret = ipalloc(usize, alignment, zero); 826 if (ret == NULL) { 827 if (extra == 0) 828 return (NULL); 829 /* Try again, without extra this time. */ 830 usize = sa2u(size, alignment); 831 if (usize == 0) 832 return (NULL); 833 ret = ipalloc(usize, alignment, zero); 834 if (ret == NULL) 835 return (NULL); 836 } 837 /* 838 * Copy at most size bytes (not size+extra), since the caller 839 * has no expectation that the extra bytes will be reliably 840 * preserved. 841 */ 842 copysize = (size < oldsize) ? size : oldsize; 843 memcpy(ret, ptr, copysize); 844 iqalloc(ptr); 845 return (ret); 846 } 847 848 if (no_move) { 849 if (size <= arena_maxclass) { 850 return (arena_ralloc_no_move(ptr, oldsize, size, 851 extra, zero)); 852 } else { 853 return (huge_ralloc_no_move(ptr, oldsize, size, 854 extra)); 855 } 856 } else { 857 if (size + extra <= arena_maxclass) { 858 return (arena_ralloc(ptr, oldsize, size, extra, 859 alignment, zero, true)); 860 } else { 861 return (huge_ralloc(ptr, oldsize, size, extra, 862 alignment, zero)); 863 } 864 } 865} 866 867malloc_tsd_externs(thread_allocated, thread_allocated_t) 868malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, 869 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 870#endif 871 872#include "jemalloc/internal/prof.h" 873 874#undef JEMALLOC_H_INLINES 875/******************************************************************************/ 876#endif /* JEMALLOC_INTERNAL_H */ 877