jemalloc_internal.h.in revision 88393cb0eb9a046000d20809809d4adac11957ab
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <math.h> 4#ifdef _WIN32 5# include <windows.h> 6# define ENOENT ERROR_PATH_NOT_FOUND 7# define EINVAL ERROR_BAD_ARGUMENTS 8# define EAGAIN ERROR_OUTOFMEMORY 9# define EPERM ERROR_WRITE_FAULT 10# define EFAULT ERROR_INVALID_ADDRESS 11# define ENOMEM ERROR_NOT_ENOUGH_MEMORY 12# undef ERANGE 13# define ERANGE ERROR_INVALID_DATA 14#else 15# include <sys/param.h> 16# include <sys/mman.h> 17# include <sys/syscall.h> 18# if !defined(SYS_write) && defined(__NR_write) 19# define SYS_write __NR_write 20# endif 21# include <sys/uio.h> 22# include <pthread.h> 23# include <errno.h> 24#endif 25#include <sys/types.h> 26 27#include <limits.h> 28#ifndef SIZE_T_MAX 29# define SIZE_T_MAX SIZE_MAX 30#endif 31#include <stdarg.h> 32#include <stdbool.h> 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdint.h> 36#include <stddef.h> 37#ifndef offsetof 38# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 39#endif 40#include <inttypes.h> 41#include <string.h> 42#include <strings.h> 43#include <ctype.h> 44#ifdef _MSC_VER 45# include <io.h> 46typedef intptr_t ssize_t; 47# define PATH_MAX 1024 48# define STDERR_FILENO 2 49# define __func__ __FUNCTION__ 50/* Disable warnings about deprecated system functions */ 51# pragma warning(disable: 4996) 52#else 53# include <unistd.h> 54#endif 55#include <fcntl.h> 56 57#define JEMALLOC_NO_DEMANGLE 58#include "../jemalloc@install_suffix@.h" 59 60#ifdef JEMALLOC_UTRACE 61#include <sys/ktrace.h> 62#endif 63 64#ifdef JEMALLOC_VALGRIND 65#include <valgrind/valgrind.h> 66#include <valgrind/memcheck.h> 67#endif 68 69#include "jemalloc/internal/private_namespace.h" 70 71#ifdef JEMALLOC_CC_SILENCE 72#define UNUSED JEMALLOC_ATTR(unused) 73#else 74#define UNUSED 75#endif 76 77static const bool config_debug = 78#ifdef JEMALLOC_DEBUG 79 true 80#else 81 false 82#endif 83 ; 84static const bool config_dss = 85#ifdef JEMALLOC_DSS 86 true 87#else 88 false 89#endif 90 ; 91static const bool config_fill = 92#ifdef JEMALLOC_FILL 93 true 94#else 95 false 96#endif 97 ; 98static const bool config_lazy_lock = 99#ifdef JEMALLOC_LAZY_LOCK 100 true 101#else 102 false 103#endif 104 ; 105static const bool config_prof = 106#ifdef JEMALLOC_PROF 107 true 108#else 109 false 110#endif 111 ; 112static const bool config_prof_libgcc = 113#ifdef JEMALLOC_PROF_LIBGCC 114 true 115#else 116 false 117#endif 118 ; 119static const bool config_prof_libunwind = 120#ifdef JEMALLOC_PROF_LIBUNWIND 121 true 122#else 123 false 124#endif 125 ; 126static const bool config_mremap = 127#ifdef JEMALLOC_MREMAP 128 true 129#else 130 false 131#endif 132 ; 133static const bool config_munmap = 134#ifdef JEMALLOC_MUNMAP 135 true 136#else 137 false 138#endif 139 ; 140static const bool config_stats = 141#ifdef JEMALLOC_STATS 142 true 143#else 144 false 145#endif 146 ; 147static const bool config_tcache = 148#ifdef JEMALLOC_TCACHE 149 true 150#else 151 false 152#endif 153 ; 154static const bool config_tls = 155#ifdef JEMALLOC_TLS 156 true 157#else 158 false 159#endif 160 ; 161static const bool config_utrace = 162#ifdef JEMALLOC_UTRACE 163 true 164#else 165 false 166#endif 167 ; 168static const bool config_valgrind = 169#ifdef JEMALLOC_VALGRIND 170 true 171#else 172 false 173#endif 174 ; 175static const bool config_xmalloc = 176#ifdef JEMALLOC_XMALLOC 177 true 178#else 179 false 180#endif 181 ; 182static const bool config_ivsalloc = 183#ifdef JEMALLOC_IVSALLOC 184 true 185#else 186 false 187#endif 188 ; 189 190#ifdef JEMALLOC_ATOMIC9 191#include <machine/atomic.h> 192#endif 193 194#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 195#include <libkern/OSAtomic.h> 196#endif 197 198#ifdef JEMALLOC_ZONE 199#include <mach/mach_error.h> 200#include <mach/mach_init.h> 201#include <mach/vm_map.h> 202#include <malloc/malloc.h> 203#endif 204 205#define RB_COMPACT 206#include "jemalloc/internal/rb.h" 207#include "jemalloc/internal/qr.h" 208#include "jemalloc/internal/ql.h" 209 210/* 211 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 212 * but there are circular dependencies that cannot be broken without 213 * substantial performance degradation. In order to reduce the effect on 214 * visual code flow, read the header files in multiple passes, with one of the 215 * following cpp variables defined during each pass: 216 * 217 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 218 * types. 219 * JEMALLOC_H_STRUCTS : Data structures. 220 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 221 * JEMALLOC_H_INLINES : Inline functions. 222 */ 223/******************************************************************************/ 224#define JEMALLOC_H_TYPES 225 226#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 227 228#define ZU(z) ((size_t)z) 229 230#ifndef __DECONST 231# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 232#endif 233 234#ifdef JEMALLOC_DEBUG 235 /* Disable inlining to make debugging easier. */ 236# define JEMALLOC_ALWAYS_INLINE 237# define JEMALLOC_INLINE 238# define inline 239#else 240# define JEMALLOC_ENABLE_INLINE 241# ifdef JEMALLOC_HAVE_ATTR 242# define JEMALLOC_ALWAYS_INLINE \ 243 static JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) 244# else 245# define JEMALLOC_ALWAYS_INLINE static inline 246# endif 247# define JEMALLOC_INLINE static inline 248# ifdef _MSC_VER 249# define inline _inline 250# endif 251#endif 252 253/* Smallest size class to support. */ 254#define LG_TINY_MIN 3 255#define TINY_MIN (1U << LG_TINY_MIN) 256 257/* 258 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 259 * classes). 260 */ 261#ifndef LG_QUANTUM 262# if (defined(__i386__) || defined(_M_IX86)) 263# define LG_QUANTUM 4 264# endif 265# ifdef __ia64__ 266# define LG_QUANTUM 4 267# endif 268# ifdef __alpha__ 269# define LG_QUANTUM 4 270# endif 271# ifdef __sparc64__ 272# define LG_QUANTUM 4 273# endif 274# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 275# define LG_QUANTUM 4 276# endif 277# ifdef __arm__ 278# define LG_QUANTUM 3 279# endif 280# ifdef __hppa__ 281# define LG_QUANTUM 4 282# endif 283# ifdef __mips__ 284# define LG_QUANTUM 3 285# endif 286# ifdef __powerpc__ 287# define LG_QUANTUM 4 288# endif 289# ifdef __s390x__ 290# define LG_QUANTUM 4 291# endif 292# ifdef __SH4__ 293# define LG_QUANTUM 4 294# endif 295# ifdef __tile__ 296# define LG_QUANTUM 4 297# endif 298# ifndef LG_QUANTUM 299# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 300# endif 301#endif 302 303#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 304#define QUANTUM_MASK (QUANTUM - 1) 305 306/* Return the smallest quantum multiple that is >= a. */ 307#define QUANTUM_CEILING(a) \ 308 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 309 310#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 311#define LONG_MASK (LONG - 1) 312 313/* Return the smallest long multiple that is >= a. */ 314#define LONG_CEILING(a) \ 315 (((a) + LONG_MASK) & ~LONG_MASK) 316 317#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 318#define PTR_MASK (SIZEOF_PTR - 1) 319 320/* Return the smallest (void *) multiple that is >= a. */ 321#define PTR_CEILING(a) \ 322 (((a) + PTR_MASK) & ~PTR_MASK) 323 324/* 325 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 326 * In addition, this controls the spacing of cacheline-spaced size classes. 327 * 328 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can 329 * only handle raw constants. 330 */ 331#define LG_CACHELINE 6 332#define CACHELINE 64 333#define CACHELINE_MASK (CACHELINE - 1) 334 335/* Return the smallest cacheline multiple that is >= s. */ 336#define CACHELINE_CEILING(s) \ 337 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 338 339/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 340#ifdef PAGE_MASK 341# undef PAGE_MASK 342#endif 343#define LG_PAGE STATIC_PAGE_SHIFT 344#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 345#define PAGE_MASK ((size_t)(PAGE - 1)) 346 347/* Return the smallest pagesize multiple that is >= s. */ 348#define PAGE_CEILING(s) \ 349 (((s) + PAGE_MASK) & ~PAGE_MASK) 350 351/* Return the nearest aligned address at or below a. */ 352#define ALIGNMENT_ADDR2BASE(a, alignment) \ 353 ((void *)((uintptr_t)(a) & (-(alignment)))) 354 355/* Return the offset between a and the nearest aligned address at or below a. */ 356#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 357 ((size_t)((uintptr_t)(a) & (alignment - 1))) 358 359/* Return the smallest alignment multiple that is >= s. */ 360#define ALIGNMENT_CEILING(s, alignment) \ 361 (((s) + (alignment - 1)) & (-(alignment))) 362 363/* Declare a variable length array */ 364#if __STDC_VERSION__ < 199901L 365# ifdef _MSC_VER 366# include <malloc.h> 367# define alloca _alloca 368# else 369# ifdef JEMALLOC_HAS_ALLOCA_H 370# include <alloca.h> 371# else 372# include <stdlib.h> 373# endif 374# endif 375# define VARIABLE_ARRAY(type, name, count) \ 376 type *name = alloca(sizeof(type) * count) 377#else 378# define VARIABLE_ARRAY(type, name, count) type name[count] 379#endif 380 381#ifdef JEMALLOC_VALGRIND 382/* 383 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 384 * so that when Valgrind reports errors, there are no extra stack frames 385 * in the backtraces. 386 * 387 * The size that is reported to valgrind must be consistent through a chain of 388 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 389 * jemalloc, so it is critical that all callers of these macros provide usize 390 * rather than request size. As a result, buffer overflow detection is 391 * technically weakened for the standard API, though it is generally accepted 392 * practice to consider any extra bytes reported by malloc_usable_size() as 393 * usable space. 394 */ 395#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 396 if (config_valgrind && opt_valgrind && cond) \ 397 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 398} while (0) 399#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 400 old_rzsize, zero) do { \ 401 if (config_valgrind && opt_valgrind) { \ 402 size_t rzsize = p2rz(ptr); \ 403 \ 404 if (ptr == old_ptr) { \ 405 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 406 usize, rzsize); \ 407 if (zero && old_usize < usize) { \ 408 VALGRIND_MAKE_MEM_DEFINED( \ 409 (void *)((uintptr_t)ptr + \ 410 old_usize), usize - old_usize); \ 411 } \ 412 } else { \ 413 if (old_ptr != NULL) { \ 414 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 415 old_rzsize); \ 416 } \ 417 if (ptr != NULL) { \ 418 size_t copy_size = (old_usize < usize) \ 419 ? old_usize : usize; \ 420 size_t tail_size = usize - copy_size; \ 421 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 422 rzsize, false); \ 423 if (copy_size > 0) { \ 424 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 425 copy_size); \ 426 } \ 427 if (zero && tail_size > 0) { \ 428 VALGRIND_MAKE_MEM_DEFINED( \ 429 (void *)((uintptr_t)ptr + \ 430 copy_size), tail_size); \ 431 } \ 432 } \ 433 } \ 434 } \ 435} while (0) 436#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 437 if (config_valgrind && opt_valgrind) \ 438 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 439} while (0) 440#else 441#define RUNNING_ON_VALGRIND ((unsigned)0) 442#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) 443#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) 444#define VALGRIND_FREELIKE_BLOCK(addr, rzB) 445#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) 446#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) 447#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) 448#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 449 old_rzsize, zero) 450#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) 451#endif 452 453#include "jemalloc/internal/util.h" 454#include "jemalloc/internal/atomic.h" 455#include "jemalloc/internal/prng.h" 456#include "jemalloc/internal/ckh.h" 457#include "jemalloc/internal/size_classes.h" 458#include "jemalloc/internal/stats.h" 459#include "jemalloc/internal/ctl.h" 460#include "jemalloc/internal/mutex.h" 461#include "jemalloc/internal/tsd.h" 462#include "jemalloc/internal/mb.h" 463#include "jemalloc/internal/extent.h" 464#include "jemalloc/internal/arena.h" 465#include "jemalloc/internal/bitmap.h" 466#include "jemalloc/internal/base.h" 467#include "jemalloc/internal/chunk.h" 468#include "jemalloc/internal/huge.h" 469#include "jemalloc/internal/rtree.h" 470#include "jemalloc/internal/tcache.h" 471#include "jemalloc/internal/hash.h" 472#include "jemalloc/internal/quarantine.h" 473#include "jemalloc/internal/prof.h" 474 475#undef JEMALLOC_H_TYPES 476/******************************************************************************/ 477#define JEMALLOC_H_STRUCTS 478 479#include "jemalloc/internal/util.h" 480#include "jemalloc/internal/atomic.h" 481#include "jemalloc/internal/prng.h" 482#include "jemalloc/internal/ckh.h" 483#include "jemalloc/internal/size_classes.h" 484#include "jemalloc/internal/stats.h" 485#include "jemalloc/internal/ctl.h" 486#include "jemalloc/internal/mutex.h" 487#include "jemalloc/internal/tsd.h" 488#include "jemalloc/internal/mb.h" 489#include "jemalloc/internal/bitmap.h" 490#include "jemalloc/internal/extent.h" 491#include "jemalloc/internal/arena.h" 492#include "jemalloc/internal/base.h" 493#include "jemalloc/internal/chunk.h" 494#include "jemalloc/internal/huge.h" 495#include "jemalloc/internal/rtree.h" 496#include "jemalloc/internal/tcache.h" 497#include "jemalloc/internal/hash.h" 498#include "jemalloc/internal/quarantine.h" 499#include "jemalloc/internal/prof.h" 500 501typedef struct { 502 uint64_t allocated; 503 uint64_t deallocated; 504} thread_allocated_t; 505/* 506 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 507 * argument. 508 */ 509#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) 510 511#undef JEMALLOC_H_STRUCTS 512/******************************************************************************/ 513#define JEMALLOC_H_EXTERNS 514 515extern bool opt_abort; 516extern bool opt_junk; 517extern size_t opt_quarantine; 518extern bool opt_redzone; 519extern bool opt_utrace; 520extern bool opt_valgrind; 521extern bool opt_xmalloc; 522extern bool opt_zero; 523extern size_t opt_narenas; 524 525/* Number of CPUs. */ 526extern unsigned ncpus; 527 528/* Protects arenas initialization (arenas, arenas_total). */ 529extern malloc_mutex_t arenas_lock; 530/* 531 * Arenas that are used to service external requests. Not all elements of the 532 * arenas array are necessarily used; arenas are created lazily as needed. 533 * 534 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 535 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 536 * takes some action to create them and allocate from them. 537 */ 538extern arena_t **arenas; 539extern unsigned narenas_total; 540extern unsigned narenas_auto; /* Read-only after initialization. */ 541 542arena_t *arenas_extend(unsigned ind); 543void arenas_cleanup(void *arg); 544arena_t *choose_arena_hard(void); 545void jemalloc_prefork(void); 546void jemalloc_postfork_parent(void); 547void jemalloc_postfork_child(void); 548 549#include "jemalloc/internal/util.h" 550#include "jemalloc/internal/atomic.h" 551#include "jemalloc/internal/prng.h" 552#include "jemalloc/internal/ckh.h" 553#include "jemalloc/internal/size_classes.h" 554#include "jemalloc/internal/stats.h" 555#include "jemalloc/internal/ctl.h" 556#include "jemalloc/internal/mutex.h" 557#include "jemalloc/internal/tsd.h" 558#include "jemalloc/internal/mb.h" 559#include "jemalloc/internal/bitmap.h" 560#include "jemalloc/internal/extent.h" 561#include "jemalloc/internal/arena.h" 562#include "jemalloc/internal/base.h" 563#include "jemalloc/internal/chunk.h" 564#include "jemalloc/internal/huge.h" 565#include "jemalloc/internal/rtree.h" 566#include "jemalloc/internal/tcache.h" 567#include "jemalloc/internal/hash.h" 568#include "jemalloc/internal/quarantine.h" 569#include "jemalloc/internal/prof.h" 570 571#undef JEMALLOC_H_EXTERNS 572/******************************************************************************/ 573#define JEMALLOC_H_INLINES 574 575#include "jemalloc/internal/util.h" 576#include "jemalloc/internal/atomic.h" 577#include "jemalloc/internal/prng.h" 578#include "jemalloc/internal/ckh.h" 579#include "jemalloc/internal/size_classes.h" 580#include "jemalloc/internal/stats.h" 581#include "jemalloc/internal/ctl.h" 582#include "jemalloc/internal/mutex.h" 583#include "jemalloc/internal/tsd.h" 584#include "jemalloc/internal/mb.h" 585#include "jemalloc/internal/extent.h" 586#include "jemalloc/internal/base.h" 587#include "jemalloc/internal/chunk.h" 588#include "jemalloc/internal/huge.h" 589 590#ifndef JEMALLOC_ENABLE_INLINE 591malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 592 593size_t s2u(size_t size); 594size_t sa2u(size_t size, size_t alignment); 595unsigned narenas_total_get(void); 596arena_t *choose_arena(arena_t *arena); 597#endif 598 599#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 600/* 601 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 602 * for allocations. 603 */ 604malloc_tsd_externs(arenas, arena_t *) 605malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, 606 arenas_cleanup) 607 608/* 609 * Compute usable size that would result from allocating an object with the 610 * specified size. 611 */ 612JEMALLOC_ALWAYS_INLINE size_t 613s2u(size_t size) 614{ 615 616 if (size <= SMALL_MAXCLASS) 617 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 618 if (size <= arena_maxclass) 619 return (PAGE_CEILING(size)); 620 return (CHUNK_CEILING(size)); 621} 622 623/* 624 * Compute usable size that would result from allocating an object with the 625 * specified size and alignment. 626 */ 627JEMALLOC_ALWAYS_INLINE size_t 628sa2u(size_t size, size_t alignment) 629{ 630 size_t usize; 631 632 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 633 634 /* 635 * Round size up to the nearest multiple of alignment. 636 * 637 * This done, we can take advantage of the fact that for each small 638 * size class, every object is aligned at the smallest power of two 639 * that is non-zero in the base two representation of the size. For 640 * example: 641 * 642 * Size | Base 2 | Minimum alignment 643 * -----+----------+------------------ 644 * 96 | 1100000 | 32 645 * 144 | 10100000 | 32 646 * 192 | 11000000 | 64 647 */ 648 usize = ALIGNMENT_CEILING(size, alignment); 649 /* 650 * (usize < size) protects against the combination of maximal 651 * alignment and size greater than maximal alignment. 652 */ 653 if (usize < size) { 654 /* size_t overflow. */ 655 return (0); 656 } 657 658 if (usize <= arena_maxclass && alignment <= PAGE) { 659 if (usize <= SMALL_MAXCLASS) 660 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 661 return (PAGE_CEILING(usize)); 662 } else { 663 size_t run_size; 664 665 /* 666 * We can't achieve subpage alignment, so round up alignment 667 * permanently; it makes later calculations simpler. 668 */ 669 alignment = PAGE_CEILING(alignment); 670 usize = PAGE_CEILING(size); 671 /* 672 * (usize < size) protects against very large sizes within 673 * PAGE of SIZE_T_MAX. 674 * 675 * (usize + alignment < usize) protects against the 676 * combination of maximal alignment and usize large enough 677 * to cause overflow. This is similar to the first overflow 678 * check above, but it needs to be repeated due to the new 679 * usize value, which may now be *equal* to maximal 680 * alignment, whereas before we only detected overflow if the 681 * original size was *greater* than maximal alignment. 682 */ 683 if (usize < size || usize + alignment < usize) { 684 /* size_t overflow. */ 685 return (0); 686 } 687 688 /* 689 * Calculate the size of the over-size run that arena_palloc() 690 * would need to allocate in order to guarantee the alignment. 691 * If the run wouldn't fit within a chunk, round up to a huge 692 * allocation size. 693 */ 694 run_size = usize + alignment - PAGE; 695 if (run_size <= arena_maxclass) 696 return (PAGE_CEILING(usize)); 697 return (CHUNK_CEILING(usize)); 698 } 699} 700 701JEMALLOC_INLINE unsigned 702narenas_total_get(void) 703{ 704 unsigned narenas; 705 706 malloc_mutex_lock(&arenas_lock); 707 narenas = narenas_total; 708 malloc_mutex_unlock(&arenas_lock); 709 710 return (narenas); 711} 712 713/* Choose an arena based on a per-thread value. */ 714JEMALLOC_INLINE arena_t * 715choose_arena(arena_t *arena) 716{ 717 arena_t *ret; 718 719 if (arena != NULL) 720 return (arena); 721 722 if ((ret = *arenas_tsd_get()) == NULL) { 723 ret = choose_arena_hard(); 724 assert(ret != NULL); 725 } 726 727 return (ret); 728} 729#endif 730 731#include "jemalloc/internal/bitmap.h" 732#include "jemalloc/internal/rtree.h" 733/* 734 * Include arena.h twice in order to resolve circular dependencies with 735 * tcache.h. 736 */ 737#define JEMALLOC_ARENA_INLINE_A 738#include "jemalloc/internal/arena.h" 739#undef JEMALLOC_ARENA_INLINE_A 740#include "jemalloc/internal/tcache.h" 741#define JEMALLOC_ARENA_INLINE_B 742#include "jemalloc/internal/arena.h" 743#undef JEMALLOC_ARENA_INLINE_B 744#include "jemalloc/internal/hash.h" 745#include "jemalloc/internal/quarantine.h" 746 747#ifndef JEMALLOC_ENABLE_INLINE 748void *imallocx(size_t size, bool try_tcache, arena_t *arena); 749void *imalloc(size_t size); 750void *icallocx(size_t size, bool try_tcache, arena_t *arena); 751void *icalloc(size_t size); 752void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 753 arena_t *arena); 754void *ipalloc(size_t usize, size_t alignment, bool zero); 755size_t isalloc(const void *ptr, bool demote); 756size_t ivsalloc(const void *ptr, bool demote); 757size_t u2rz(size_t usize); 758size_t p2rz(const void *ptr); 759void idallocx(void *ptr, bool try_tcache); 760void idalloc(void *ptr); 761void iqallocx(void *ptr, bool try_tcache); 762void iqalloc(void *ptr); 763void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment, 764 bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, 765 arena_t *arena); 766void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 767 bool zero, bool no_move); 768malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 769#endif 770 771#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 772JEMALLOC_ALWAYS_INLINE void * 773imallocx(size_t size, bool try_tcache, arena_t *arena) 774{ 775 776 assert(size != 0); 777 778 if (size <= arena_maxclass) 779 return (arena_malloc(arena, size, false, try_tcache)); 780 else 781 return (huge_malloc(size, false)); 782} 783 784JEMALLOC_ALWAYS_INLINE void * 785imalloc(size_t size) 786{ 787 788 return (imallocx(size, true, NULL)); 789} 790 791JEMALLOC_ALWAYS_INLINE void * 792icallocx(size_t size, bool try_tcache, arena_t *arena) 793{ 794 795 if (size <= arena_maxclass) 796 return (arena_malloc(arena, size, true, try_tcache)); 797 else 798 return (huge_malloc(size, true)); 799} 800 801JEMALLOC_ALWAYS_INLINE void * 802icalloc(size_t size) 803{ 804 805 return (icallocx(size, true, NULL)); 806} 807 808JEMALLOC_ALWAYS_INLINE void * 809ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 810 arena_t *arena) 811{ 812 void *ret; 813 814 assert(usize != 0); 815 assert(usize == sa2u(usize, alignment)); 816 817 if (usize <= arena_maxclass && alignment <= PAGE) 818 ret = arena_malloc(arena, usize, zero, try_tcache); 819 else { 820 if (usize <= arena_maxclass) { 821 ret = arena_palloc(choose_arena(arena), usize, 822 alignment, zero); 823 } else if (alignment <= chunksize) 824 ret = huge_malloc(usize, zero); 825 else 826 ret = huge_palloc(usize, alignment, zero); 827 } 828 829 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 830 return (ret); 831} 832 833JEMALLOC_ALWAYS_INLINE void * 834ipalloc(size_t usize, size_t alignment, bool zero) 835{ 836 837 return (ipallocx(usize, alignment, zero, true, NULL)); 838} 839 840/* 841 * Typical usage: 842 * void *ptr = [...] 843 * size_t sz = isalloc(ptr, config_prof); 844 */ 845JEMALLOC_ALWAYS_INLINE size_t 846isalloc(const void *ptr, bool demote) 847{ 848 size_t ret; 849 arena_chunk_t *chunk; 850 851 assert(ptr != NULL); 852 /* Demotion only makes sense if config_prof is true. */ 853 assert(config_prof || demote == false); 854 855 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 856 if (chunk != ptr) 857 ret = arena_salloc(ptr, demote); 858 else 859 ret = huge_salloc(ptr); 860 861 return (ret); 862} 863 864JEMALLOC_ALWAYS_INLINE size_t 865ivsalloc(const void *ptr, bool demote) 866{ 867 868 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 869 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 870 return (0); 871 872 return (isalloc(ptr, demote)); 873} 874 875JEMALLOC_INLINE size_t 876u2rz(size_t usize) 877{ 878 size_t ret; 879 880 if (usize <= SMALL_MAXCLASS) { 881 size_t binind = SMALL_SIZE2BIN(usize); 882 ret = arena_bin_info[binind].redzone_size; 883 } else 884 ret = 0; 885 886 return (ret); 887} 888 889JEMALLOC_INLINE size_t 890p2rz(const void *ptr) 891{ 892 size_t usize = isalloc(ptr, false); 893 894 return (u2rz(usize)); 895} 896 897JEMALLOC_ALWAYS_INLINE void 898idallocx(void *ptr, bool try_tcache) 899{ 900 arena_chunk_t *chunk; 901 902 assert(ptr != NULL); 903 904 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 905 if (chunk != ptr) 906 arena_dalloc(chunk->arena, chunk, ptr, try_tcache); 907 else 908 huge_dalloc(ptr, true); 909} 910 911JEMALLOC_ALWAYS_INLINE void 912idalloc(void *ptr) 913{ 914 915 idallocx(ptr, true); 916} 917 918JEMALLOC_ALWAYS_INLINE void 919iqallocx(void *ptr, bool try_tcache) 920{ 921 922 if (config_fill && opt_quarantine) 923 quarantine(ptr); 924 else 925 idallocx(ptr, try_tcache); 926} 927 928JEMALLOC_ALWAYS_INLINE void 929iqalloc(void *ptr) 930{ 931 932 iqallocx(ptr, true); 933} 934 935JEMALLOC_ALWAYS_INLINE void * 936irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 937 bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) 938{ 939 void *ret; 940 size_t oldsize; 941 942 assert(ptr != NULL); 943 assert(size != 0); 944 945 oldsize = isalloc(ptr, config_prof); 946 947 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 948 != 0) { 949 size_t usize, copysize; 950 951 /* 952 * Existing object alignment is inadequate; allocate new space 953 * and copy. 954 */ 955 if (no_move) 956 return (NULL); 957 usize = sa2u(size + extra, alignment); 958 if (usize == 0) 959 return (NULL); 960 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); 961 if (ret == NULL) { 962 if (extra == 0) 963 return (NULL); 964 /* Try again, without extra this time. */ 965 usize = sa2u(size, alignment); 966 if (usize == 0) 967 return (NULL); 968 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, 969 arena); 970 if (ret == NULL) 971 return (NULL); 972 } 973 /* 974 * Copy at most size bytes (not size+extra), since the caller 975 * has no expectation that the extra bytes will be reliably 976 * preserved. 977 */ 978 copysize = (size < oldsize) ? size : oldsize; 979 memcpy(ret, ptr, copysize); 980 iqallocx(ptr, try_tcache_dalloc); 981 return (ret); 982 } 983 984 if (no_move) { 985 if (size <= arena_maxclass) { 986 return (arena_ralloc_no_move(ptr, oldsize, size, 987 extra, zero)); 988 } else { 989 return (huge_ralloc_no_move(ptr, oldsize, size, 990 extra)); 991 } 992 } else { 993 if (size + extra <= arena_maxclass) { 994 return (arena_ralloc(arena, ptr, oldsize, size, extra, 995 alignment, zero, try_tcache_alloc, 996 try_tcache_dalloc)); 997 } else { 998 return (huge_ralloc(ptr, oldsize, size, extra, 999 alignment, zero, try_tcache_dalloc)); 1000 } 1001 } 1002} 1003 1004JEMALLOC_ALWAYS_INLINE void * 1005iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 1006 bool no_move) 1007{ 1008 1009 return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true, 1010 NULL)); 1011} 1012 1013malloc_tsd_externs(thread_allocated, thread_allocated_t) 1014malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, 1015 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 1016#endif 1017 1018#include "jemalloc/internal/prof.h" 1019 1020#undef JEMALLOC_H_INLINES 1021/******************************************************************************/ 1022#endif /* JEMALLOC_INTERNAL_H */ 1023