jemalloc_internal.h.in revision 543abf7e6c7de06fe9654e91190b5c44a11b065e
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <math.h> 4#ifdef _WIN32 5# include <windows.h> 6# define ENOENT ERROR_PATH_NOT_FOUND 7# define EINVAL ERROR_BAD_ARGUMENTS 8# define EAGAIN ERROR_OUTOFMEMORY 9# define EPERM ERROR_WRITE_FAULT 10# define EFAULT ERROR_INVALID_ADDRESS 11# define ENOMEM ERROR_NOT_ENOUGH_MEMORY 12# undef ERANGE 13# define ERANGE ERROR_INVALID_DATA 14#else 15# include <sys/param.h> 16# include <sys/mman.h> 17# include <sys/syscall.h> 18# if !defined(SYS_write) && defined(__NR_write) 19# define SYS_write __NR_write 20# endif 21# include <sys/uio.h> 22# include <pthread.h> 23# include <errno.h> 24#endif 25#include <sys/types.h> 26 27#include <limits.h> 28#ifndef SIZE_T_MAX 29# define SIZE_T_MAX SIZE_MAX 30#endif 31#include <stdarg.h> 32#include <stdbool.h> 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdint.h> 36#include <stddef.h> 37#ifndef offsetof 38# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 39#endif 40#include <inttypes.h> 41#include <string.h> 42#include <strings.h> 43#include <ctype.h> 44#ifdef _MSC_VER 45# include <io.h> 46typedef intptr_t ssize_t; 47# define PATH_MAX 1024 48# define STDERR_FILENO 2 49# define __func__ __FUNCTION__ 50/* Disable warnings about deprecated system functions */ 51# pragma warning(disable: 4996) 52#else 53# include <unistd.h> 54#endif 55#include <fcntl.h> 56 57#define JEMALLOC_NO_DEMANGLE 58#include "../jemalloc@install_suffix@.h" 59 60#ifdef JEMALLOC_UTRACE 61#include <sys/ktrace.h> 62#endif 63 64#ifdef JEMALLOC_VALGRIND 65#include <valgrind/valgrind.h> 66#include <valgrind/memcheck.h> 67#endif 68 69#include "jemalloc/internal/private_namespace.h" 70 71#ifdef JEMALLOC_CC_SILENCE 72#define UNUSED JEMALLOC_ATTR(unused) 73#else 74#define UNUSED 75#endif 76 77static const bool config_debug = 78#ifdef JEMALLOC_DEBUG 79 true 80#else 81 false 82#endif 83 ; 84static const bool config_dss = 85#ifdef JEMALLOC_DSS 86 true 87#else 88 false 89#endif 90 ; 91static const bool config_fill = 92#ifdef JEMALLOC_FILL 93 true 94#else 95 false 96#endif 97 ; 98static const bool config_lazy_lock = 99#ifdef JEMALLOC_LAZY_LOCK 100 true 101#else 102 false 103#endif 104 ; 105static const bool config_prof = 106#ifdef JEMALLOC_PROF 107 true 108#else 109 false 110#endif 111 ; 112static const bool config_prof_libgcc = 113#ifdef JEMALLOC_PROF_LIBGCC 114 true 115#else 116 false 117#endif 118 ; 119static const bool config_prof_libunwind = 120#ifdef JEMALLOC_PROF_LIBUNWIND 121 true 122#else 123 false 124#endif 125 ; 126static const bool config_mremap = 127#ifdef JEMALLOC_MREMAP 128 true 129#else 130 false 131#endif 132 ; 133static const bool config_munmap = 134#ifdef JEMALLOC_MUNMAP 135 true 136#else 137 false 138#endif 139 ; 140static const bool config_stats = 141#ifdef JEMALLOC_STATS 142 true 143#else 144 false 145#endif 146 ; 147static const bool config_tcache = 148#ifdef JEMALLOC_TCACHE 149 true 150#else 151 false 152#endif 153 ; 154static const bool config_tls = 155#ifdef JEMALLOC_TLS 156 true 157#else 158 false 159#endif 160 ; 161static const bool config_utrace = 162#ifdef JEMALLOC_UTRACE 163 true 164#else 165 false 166#endif 167 ; 168static const bool config_valgrind = 169#ifdef JEMALLOC_VALGRIND 170 true 171#else 172 false 173#endif 174 ; 175static const bool config_xmalloc = 176#ifdef JEMALLOC_XMALLOC 177 true 178#else 179 false 180#endif 181 ; 182static const bool config_ivsalloc = 183#ifdef JEMALLOC_IVSALLOC 184 true 185#else 186 false 187#endif 188 ; 189 190#ifdef JEMALLOC_ATOMIC9 191#include <machine/atomic.h> 192#endif 193 194#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 195#include <libkern/OSAtomic.h> 196#endif 197 198#ifdef JEMALLOC_ZONE 199#include <mach/mach_error.h> 200#include <mach/mach_init.h> 201#include <mach/vm_map.h> 202#include <malloc/malloc.h> 203#endif 204 205#define RB_COMPACT 206#include "jemalloc/internal/rb.h" 207#include "jemalloc/internal/qr.h" 208#include "jemalloc/internal/ql.h" 209 210/* 211 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 212 * but there are circular dependencies that cannot be broken without 213 * substantial performance degradation. In order to reduce the effect on 214 * visual code flow, read the header files in multiple passes, with one of the 215 * following cpp variables defined during each pass: 216 * 217 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 218 * types. 219 * JEMALLOC_H_STRUCTS : Data structures. 220 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 221 * JEMALLOC_H_INLINES : Inline functions. 222 */ 223/******************************************************************************/ 224#define JEMALLOC_H_TYPES 225 226#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 227 228#define ZU(z) ((size_t)z) 229#define QU(q) ((uint64_t)q) 230 231#ifndef __DECONST 232# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 233#endif 234 235/* 236 * JEMALLOC_ALWAYS_INLINE is used within header files for functions that are 237 * static inline functions if inlining is enabled, and single-definition 238 * library-private functions if inlining is disabled. 239 * 240 * JEMALLOC_ALWAYS_INLINE_C is for use in .c files, in which case the denoted 241 * functions are always static, regardless of whether inlining is enabled. 242 */ 243#ifdef JEMALLOC_DEBUG 244 /* Disable inlining to make debugging easier. */ 245# define JEMALLOC_ALWAYS_INLINE 246# define JEMALLOC_ALWAYS_INLINE_C static 247# define JEMALLOC_INLINE 248# define inline 249#else 250# define JEMALLOC_ENABLE_INLINE 251# ifdef JEMALLOC_HAVE_ATTR 252# define JEMALLOC_ALWAYS_INLINE \ 253 static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) 254# define JEMALLOC_ALWAYS_INLINE_C \ 255 static inline JEMALLOC_ATTR(always_inline) 256# else 257# define JEMALLOC_ALWAYS_INLINE static inline 258# define JEMALLOC_ALWAYS_INLINE_C static inline 259# endif 260# define JEMALLOC_INLINE static inline 261# ifdef _MSC_VER 262# define inline _inline 263# endif 264#endif 265 266/* Smallest size class to support. */ 267#define LG_TINY_MIN 3 268#define TINY_MIN (1U << LG_TINY_MIN) 269 270/* 271 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 272 * classes). 273 */ 274#ifndef LG_QUANTUM 275# if (defined(__i386__) || defined(_M_IX86)) 276# define LG_QUANTUM 4 277# endif 278# ifdef __ia64__ 279# define LG_QUANTUM 4 280# endif 281# ifdef __alpha__ 282# define LG_QUANTUM 4 283# endif 284# ifdef __sparc64__ 285# define LG_QUANTUM 4 286# endif 287# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 288# define LG_QUANTUM 4 289# endif 290# ifdef __arm__ 291# define LG_QUANTUM 3 292# endif 293# ifdef __aarch64__ 294# define LG_QUANTUM 4 295# endif 296# ifdef __hppa__ 297# define LG_QUANTUM 4 298# endif 299# ifdef __mips__ 300# define LG_QUANTUM 3 301# endif 302# ifdef __powerpc__ 303# define LG_QUANTUM 4 304# endif 305# ifdef __s390__ 306# define LG_QUANTUM 4 307# endif 308# ifdef __SH4__ 309# define LG_QUANTUM 4 310# endif 311# ifdef __tile__ 312# define LG_QUANTUM 4 313# endif 314# ifndef LG_QUANTUM 315# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 316# endif 317#endif 318 319#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 320#define QUANTUM_MASK (QUANTUM - 1) 321 322/* Return the smallest quantum multiple that is >= a. */ 323#define QUANTUM_CEILING(a) \ 324 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 325 326#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 327#define LONG_MASK (LONG - 1) 328 329/* Return the smallest long multiple that is >= a. */ 330#define LONG_CEILING(a) \ 331 (((a) + LONG_MASK) & ~LONG_MASK) 332 333#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 334#define PTR_MASK (SIZEOF_PTR - 1) 335 336/* Return the smallest (void *) multiple that is >= a. */ 337#define PTR_CEILING(a) \ 338 (((a) + PTR_MASK) & ~PTR_MASK) 339 340/* 341 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 342 * In addition, this controls the spacing of cacheline-spaced size classes. 343 * 344 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can 345 * only handle raw constants. 346 */ 347#define LG_CACHELINE 6 348#define CACHELINE 64 349#define CACHELINE_MASK (CACHELINE - 1) 350 351/* Return the smallest cacheline multiple that is >= s. */ 352#define CACHELINE_CEILING(s) \ 353 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 354 355/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 356#ifdef PAGE_MASK 357# undef PAGE_MASK 358#endif 359#define LG_PAGE STATIC_PAGE_SHIFT 360#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 361#define PAGE_MASK ((size_t)(PAGE - 1)) 362 363/* Return the smallest pagesize multiple that is >= s. */ 364#define PAGE_CEILING(s) \ 365 (((s) + PAGE_MASK) & ~PAGE_MASK) 366 367/* Return the nearest aligned address at or below a. */ 368#define ALIGNMENT_ADDR2BASE(a, alignment) \ 369 ((void *)((uintptr_t)(a) & (-(alignment)))) 370 371/* Return the offset between a and the nearest aligned address at or below a. */ 372#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 373 ((size_t)((uintptr_t)(a) & (alignment - 1))) 374 375/* Return the smallest alignment multiple that is >= s. */ 376#define ALIGNMENT_CEILING(s, alignment) \ 377 (((s) + (alignment - 1)) & (-(alignment))) 378 379/* Declare a variable length array */ 380#if __STDC_VERSION__ < 199901L 381# ifdef _MSC_VER 382# include <malloc.h> 383# define alloca _alloca 384# else 385# ifdef JEMALLOC_HAS_ALLOCA_H 386# include <alloca.h> 387# else 388# include <stdlib.h> 389# endif 390# endif 391# define VARIABLE_ARRAY(type, name, count) \ 392 type *name = alloca(sizeof(type) * count) 393#else 394# define VARIABLE_ARRAY(type, name, count) type name[count] 395#endif 396 397#ifdef JEMALLOC_VALGRIND 398/* 399 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 400 * so that when Valgrind reports errors, there are no extra stack frames 401 * in the backtraces. 402 * 403 * The size that is reported to valgrind must be consistent through a chain of 404 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 405 * jemalloc, so it is critical that all callers of these macros provide usize 406 * rather than request size. As a result, buffer overflow detection is 407 * technically weakened for the standard API, though it is generally accepted 408 * practice to consider any extra bytes reported by malloc_usable_size() as 409 * usable space. 410 */ 411#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 412 if (config_valgrind && opt_valgrind && cond) \ 413 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 414} while (0) 415#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 416 old_rzsize, zero) do { \ 417 if (config_valgrind && opt_valgrind) { \ 418 size_t rzsize = p2rz(ptr); \ 419 \ 420 if (ptr == old_ptr) { \ 421 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 422 usize, rzsize); \ 423 if (zero && old_usize < usize) { \ 424 VALGRIND_MAKE_MEM_DEFINED( \ 425 (void *)((uintptr_t)ptr + \ 426 old_usize), usize - old_usize); \ 427 } \ 428 } else { \ 429 if (old_ptr != NULL) { \ 430 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 431 old_rzsize); \ 432 } \ 433 if (ptr != NULL) { \ 434 size_t copy_size = (old_usize < usize) \ 435 ? old_usize : usize; \ 436 size_t tail_size = usize - copy_size; \ 437 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 438 rzsize, false); \ 439 if (copy_size > 0) { \ 440 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 441 copy_size); \ 442 } \ 443 if (zero && tail_size > 0) { \ 444 VALGRIND_MAKE_MEM_DEFINED( \ 445 (void *)((uintptr_t)ptr + \ 446 copy_size), tail_size); \ 447 } \ 448 } \ 449 } \ 450 } \ 451} while (0) 452#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 453 if (config_valgrind && opt_valgrind) \ 454 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 455} while (0) 456#else 457#define RUNNING_ON_VALGRIND ((unsigned)0) 458#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ 459 do {} while (0) 460#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ 461 do {} while (0) 462#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0) 463#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0) 464#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0) 465#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0) 466#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) 467#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 468 old_rzsize, zero) do {} while (0) 469#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) 470#endif 471 472#include "jemalloc/internal/util.h" 473#include "jemalloc/internal/atomic.h" 474#include "jemalloc/internal/prng.h" 475#include "jemalloc/internal/ckh.h" 476#include "jemalloc/internal/size_classes.h" 477#include "jemalloc/internal/stats.h" 478#include "jemalloc/internal/ctl.h" 479#include "jemalloc/internal/mutex.h" 480#include "jemalloc/internal/tsd.h" 481#include "jemalloc/internal/mb.h" 482#include "jemalloc/internal/extent.h" 483#include "jemalloc/internal/arena.h" 484#include "jemalloc/internal/bitmap.h" 485#include "jemalloc/internal/base.h" 486#include "jemalloc/internal/chunk.h" 487#include "jemalloc/internal/huge.h" 488#include "jemalloc/internal/rtree.h" 489#include "jemalloc/internal/tcache.h" 490#include "jemalloc/internal/hash.h" 491#include "jemalloc/internal/quarantine.h" 492#include "jemalloc/internal/prof.h" 493 494#undef JEMALLOC_H_TYPES 495/******************************************************************************/ 496#define JEMALLOC_H_STRUCTS 497 498#include "jemalloc/internal/util.h" 499#include "jemalloc/internal/atomic.h" 500#include "jemalloc/internal/prng.h" 501#include "jemalloc/internal/ckh.h" 502#include "jemalloc/internal/size_classes.h" 503#include "jemalloc/internal/stats.h" 504#include "jemalloc/internal/ctl.h" 505#include "jemalloc/internal/mutex.h" 506#include "jemalloc/internal/tsd.h" 507#include "jemalloc/internal/mb.h" 508#include "jemalloc/internal/bitmap.h" 509#include "jemalloc/internal/extent.h" 510#include "jemalloc/internal/arena.h" 511#include "jemalloc/internal/base.h" 512#include "jemalloc/internal/chunk.h" 513#include "jemalloc/internal/huge.h" 514#include "jemalloc/internal/rtree.h" 515#include "jemalloc/internal/tcache.h" 516#include "jemalloc/internal/hash.h" 517#include "jemalloc/internal/quarantine.h" 518#include "jemalloc/internal/prof.h" 519 520typedef struct { 521 uint64_t allocated; 522 uint64_t deallocated; 523} thread_allocated_t; 524/* 525 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 526 * argument. 527 */ 528#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) 529 530#undef JEMALLOC_H_STRUCTS 531/******************************************************************************/ 532#define JEMALLOC_H_EXTERNS 533 534extern bool opt_abort; 535extern bool opt_junk; 536extern size_t opt_quarantine; 537extern bool opt_redzone; 538extern bool opt_utrace; 539extern bool opt_valgrind; 540extern bool opt_xmalloc; 541extern bool opt_zero; 542extern size_t opt_narenas; 543 544/* Number of CPUs. */ 545extern unsigned ncpus; 546 547/* Protects arenas initialization (arenas, arenas_total). */ 548extern malloc_mutex_t arenas_lock; 549/* 550 * Arenas that are used to service external requests. Not all elements of the 551 * arenas array are necessarily used; arenas are created lazily as needed. 552 * 553 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 554 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 555 * takes some action to create them and allocate from them. 556 */ 557extern arena_t **arenas; 558extern unsigned narenas_total; 559extern unsigned narenas_auto; /* Read-only after initialization. */ 560 561arena_t *arenas_extend(unsigned ind); 562void arenas_cleanup(void *arg); 563arena_t *choose_arena_hard(void); 564void jemalloc_prefork(void); 565void jemalloc_postfork_parent(void); 566void jemalloc_postfork_child(void); 567 568#include "jemalloc/internal/util.h" 569#include "jemalloc/internal/atomic.h" 570#include "jemalloc/internal/prng.h" 571#include "jemalloc/internal/ckh.h" 572#include "jemalloc/internal/size_classes.h" 573#include "jemalloc/internal/stats.h" 574#include "jemalloc/internal/ctl.h" 575#include "jemalloc/internal/mutex.h" 576#include "jemalloc/internal/tsd.h" 577#include "jemalloc/internal/mb.h" 578#include "jemalloc/internal/bitmap.h" 579#include "jemalloc/internal/extent.h" 580#include "jemalloc/internal/arena.h" 581#include "jemalloc/internal/base.h" 582#include "jemalloc/internal/chunk.h" 583#include "jemalloc/internal/huge.h" 584#include "jemalloc/internal/rtree.h" 585#include "jemalloc/internal/tcache.h" 586#include "jemalloc/internal/hash.h" 587#include "jemalloc/internal/quarantine.h" 588#include "jemalloc/internal/prof.h" 589 590#undef JEMALLOC_H_EXTERNS 591/******************************************************************************/ 592#define JEMALLOC_H_INLINES 593 594#include "jemalloc/internal/util.h" 595#include "jemalloc/internal/atomic.h" 596#include "jemalloc/internal/prng.h" 597#include "jemalloc/internal/ckh.h" 598#include "jemalloc/internal/size_classes.h" 599#include "jemalloc/internal/stats.h" 600#include "jemalloc/internal/ctl.h" 601#include "jemalloc/internal/mutex.h" 602#include "jemalloc/internal/tsd.h" 603#include "jemalloc/internal/mb.h" 604#include "jemalloc/internal/extent.h" 605#include "jemalloc/internal/base.h" 606#include "jemalloc/internal/chunk.h" 607#include "jemalloc/internal/huge.h" 608 609#ifndef JEMALLOC_ENABLE_INLINE 610malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 611 612size_t s2u(size_t size); 613size_t sa2u(size_t size, size_t alignment); 614unsigned narenas_total_get(void); 615arena_t *choose_arena(arena_t *arena); 616#endif 617 618#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 619/* 620 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 621 * for allocations. 622 */ 623malloc_tsd_externs(arenas, arena_t *) 624malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, 625 arenas_cleanup) 626 627/* 628 * Compute usable size that would result from allocating an object with the 629 * specified size. 630 */ 631JEMALLOC_ALWAYS_INLINE size_t 632s2u(size_t size) 633{ 634 635 if (size <= SMALL_MAXCLASS) 636 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 637 if (size <= arena_maxclass) 638 return (PAGE_CEILING(size)); 639 return (CHUNK_CEILING(size)); 640} 641 642/* 643 * Compute usable size that would result from allocating an object with the 644 * specified size and alignment. 645 */ 646JEMALLOC_ALWAYS_INLINE size_t 647sa2u(size_t size, size_t alignment) 648{ 649 size_t usize; 650 651 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 652 653 /* 654 * Round size up to the nearest multiple of alignment. 655 * 656 * This done, we can take advantage of the fact that for each small 657 * size class, every object is aligned at the smallest power of two 658 * that is non-zero in the base two representation of the size. For 659 * example: 660 * 661 * Size | Base 2 | Minimum alignment 662 * -----+----------+------------------ 663 * 96 | 1100000 | 32 664 * 144 | 10100000 | 32 665 * 192 | 11000000 | 64 666 */ 667 usize = ALIGNMENT_CEILING(size, alignment); 668 /* 669 * (usize < size) protects against the combination of maximal 670 * alignment and size greater than maximal alignment. 671 */ 672 if (usize < size) { 673 /* size_t overflow. */ 674 return (0); 675 } 676 677 if (usize <= arena_maxclass && alignment <= PAGE) { 678 if (usize <= SMALL_MAXCLASS) 679 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 680 return (PAGE_CEILING(usize)); 681 } else { 682 size_t run_size; 683 684 /* 685 * We can't achieve subpage alignment, so round up alignment 686 * permanently; it makes later calculations simpler. 687 */ 688 alignment = PAGE_CEILING(alignment); 689 usize = PAGE_CEILING(size); 690 /* 691 * (usize < size) protects against very large sizes within 692 * PAGE of SIZE_T_MAX. 693 * 694 * (usize + alignment < usize) protects against the 695 * combination of maximal alignment and usize large enough 696 * to cause overflow. This is similar to the first overflow 697 * check above, but it needs to be repeated due to the new 698 * usize value, which may now be *equal* to maximal 699 * alignment, whereas before we only detected overflow if the 700 * original size was *greater* than maximal alignment. 701 */ 702 if (usize < size || usize + alignment < usize) { 703 /* size_t overflow. */ 704 return (0); 705 } 706 707 /* 708 * Calculate the size of the over-size run that arena_palloc() 709 * would need to allocate in order to guarantee the alignment. 710 * If the run wouldn't fit within a chunk, round up to a huge 711 * allocation size. 712 */ 713 run_size = usize + alignment - PAGE; 714 if (run_size <= arena_maxclass) 715 return (PAGE_CEILING(usize)); 716 return (CHUNK_CEILING(usize)); 717 } 718} 719 720JEMALLOC_INLINE unsigned 721narenas_total_get(void) 722{ 723 unsigned narenas; 724 725 malloc_mutex_lock(&arenas_lock); 726 narenas = narenas_total; 727 malloc_mutex_unlock(&arenas_lock); 728 729 return (narenas); 730} 731 732/* Choose an arena based on a per-thread value. */ 733JEMALLOC_INLINE arena_t * 734choose_arena(arena_t *arena) 735{ 736 arena_t *ret; 737 738 if (arena != NULL) 739 return (arena); 740 741 if ((ret = *arenas_tsd_get()) == NULL) { 742 ret = choose_arena_hard(); 743 assert(ret != NULL); 744 } 745 746 return (ret); 747} 748#endif 749 750#include "jemalloc/internal/bitmap.h" 751#include "jemalloc/internal/rtree.h" 752/* 753 * Include arena.h twice in order to resolve circular dependencies with 754 * tcache.h. 755 */ 756#define JEMALLOC_ARENA_INLINE_A 757#include "jemalloc/internal/arena.h" 758#undef JEMALLOC_ARENA_INLINE_A 759#include "jemalloc/internal/tcache.h" 760#define JEMALLOC_ARENA_INLINE_B 761#include "jemalloc/internal/arena.h" 762#undef JEMALLOC_ARENA_INLINE_B 763#include "jemalloc/internal/hash.h" 764#include "jemalloc/internal/quarantine.h" 765 766#ifndef JEMALLOC_ENABLE_INLINE 767void *imallocx(size_t size, bool try_tcache, arena_t *arena); 768void *imalloc(size_t size); 769void *icallocx(size_t size, bool try_tcache, arena_t *arena); 770void *icalloc(size_t size); 771void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 772 arena_t *arena); 773void *ipalloc(size_t usize, size_t alignment, bool zero); 774size_t isalloc(const void *ptr, bool demote); 775size_t ivsalloc(const void *ptr, bool demote); 776size_t u2rz(size_t usize); 777size_t p2rz(const void *ptr); 778void idallocx(void *ptr, bool try_tcache); 779void idalloc(void *ptr); 780void iqallocx(void *ptr, bool try_tcache); 781void iqalloc(void *ptr); 782void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment, 783 bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, 784 arena_t *arena); 785void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 786 bool zero, bool no_move); 787malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 788#endif 789 790#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 791JEMALLOC_ALWAYS_INLINE void * 792imallocx(size_t size, bool try_tcache, arena_t *arena) 793{ 794 795 assert(size != 0); 796 797 if (size <= arena_maxclass) 798 return (arena_malloc(arena, size, false, try_tcache)); 799 else 800 return (huge_malloc(size, false)); 801} 802 803JEMALLOC_ALWAYS_INLINE void * 804imalloc(size_t size) 805{ 806 807 return (imallocx(size, true, NULL)); 808} 809 810JEMALLOC_ALWAYS_INLINE void * 811icallocx(size_t size, bool try_tcache, arena_t *arena) 812{ 813 814 if (size <= arena_maxclass) 815 return (arena_malloc(arena, size, true, try_tcache)); 816 else 817 return (huge_malloc(size, true)); 818} 819 820JEMALLOC_ALWAYS_INLINE void * 821icalloc(size_t size) 822{ 823 824 return (icallocx(size, true, NULL)); 825} 826 827JEMALLOC_ALWAYS_INLINE void * 828ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 829 arena_t *arena) 830{ 831 void *ret; 832 833 assert(usize != 0); 834 assert(usize == sa2u(usize, alignment)); 835 836 if (usize <= arena_maxclass && alignment <= PAGE) 837 ret = arena_malloc(arena, usize, zero, try_tcache); 838 else { 839 if (usize <= arena_maxclass) { 840 ret = arena_palloc(choose_arena(arena), usize, 841 alignment, zero); 842 } else if (alignment <= chunksize) 843 ret = huge_malloc(usize, zero); 844 else 845 ret = huge_palloc(usize, alignment, zero); 846 } 847 848 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 849 return (ret); 850} 851 852JEMALLOC_ALWAYS_INLINE void * 853ipalloc(size_t usize, size_t alignment, bool zero) 854{ 855 856 return (ipallocx(usize, alignment, zero, true, NULL)); 857} 858 859/* 860 * Typical usage: 861 * void *ptr = [...] 862 * size_t sz = isalloc(ptr, config_prof); 863 */ 864JEMALLOC_ALWAYS_INLINE size_t 865isalloc(const void *ptr, bool demote) 866{ 867 size_t ret; 868 arena_chunk_t *chunk; 869 870 assert(ptr != NULL); 871 /* Demotion only makes sense if config_prof is true. */ 872 assert(config_prof || demote == false); 873 874 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 875 if (chunk != ptr) 876 ret = arena_salloc(ptr, demote); 877 else 878 ret = huge_salloc(ptr); 879 880 return (ret); 881} 882 883JEMALLOC_ALWAYS_INLINE size_t 884ivsalloc(const void *ptr, bool demote) 885{ 886 887 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 888 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 889 return (0); 890 891 return (isalloc(ptr, demote)); 892} 893 894JEMALLOC_INLINE size_t 895u2rz(size_t usize) 896{ 897 size_t ret; 898 899 if (usize <= SMALL_MAXCLASS) { 900 size_t binind = SMALL_SIZE2BIN(usize); 901 ret = arena_bin_info[binind].redzone_size; 902 } else 903 ret = 0; 904 905 return (ret); 906} 907 908JEMALLOC_INLINE size_t 909p2rz(const void *ptr) 910{ 911 size_t usize = isalloc(ptr, false); 912 913 return (u2rz(usize)); 914} 915 916JEMALLOC_ALWAYS_INLINE void 917idallocx(void *ptr, bool try_tcache) 918{ 919 arena_chunk_t *chunk; 920 921 assert(ptr != NULL); 922 923 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 924 if (chunk != ptr) 925 arena_dalloc(chunk->arena, chunk, ptr, try_tcache); 926 else 927 huge_dalloc(ptr, true); 928} 929 930JEMALLOC_ALWAYS_INLINE void 931idalloc(void *ptr) 932{ 933 934 idallocx(ptr, true); 935} 936 937JEMALLOC_ALWAYS_INLINE void 938iqallocx(void *ptr, bool try_tcache) 939{ 940 941 if (config_fill && opt_quarantine) 942 quarantine(ptr); 943 else 944 idallocx(ptr, try_tcache); 945} 946 947JEMALLOC_ALWAYS_INLINE void 948iqalloc(void *ptr) 949{ 950 951 iqallocx(ptr, true); 952} 953 954JEMALLOC_ALWAYS_INLINE void * 955irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 956 bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) 957{ 958 void *ret; 959 size_t oldsize; 960 961 assert(ptr != NULL); 962 assert(size != 0); 963 964 oldsize = isalloc(ptr, config_prof); 965 966 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 967 != 0) { 968 size_t usize, copysize; 969 970 /* 971 * Existing object alignment is inadequate; allocate new space 972 * and copy. 973 */ 974 if (no_move) 975 return (NULL); 976 usize = sa2u(size + extra, alignment); 977 if (usize == 0) 978 return (NULL); 979 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); 980 if (ret == NULL) { 981 if (extra == 0) 982 return (NULL); 983 /* Try again, without extra this time. */ 984 usize = sa2u(size, alignment); 985 if (usize == 0) 986 return (NULL); 987 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, 988 arena); 989 if (ret == NULL) 990 return (NULL); 991 } 992 /* 993 * Copy at most size bytes (not size+extra), since the caller 994 * has no expectation that the extra bytes will be reliably 995 * preserved. 996 */ 997 copysize = (size < oldsize) ? size : oldsize; 998 memcpy(ret, ptr, copysize); 999 iqallocx(ptr, try_tcache_dalloc); 1000 return (ret); 1001 } 1002 1003 if (no_move) { 1004 if (size <= arena_maxclass) { 1005 return (arena_ralloc_no_move(ptr, oldsize, size, 1006 extra, zero)); 1007 } else { 1008 return (huge_ralloc_no_move(ptr, oldsize, size, 1009 extra)); 1010 } 1011 } else { 1012 if (size + extra <= arena_maxclass) { 1013 return (arena_ralloc(arena, ptr, oldsize, size, extra, 1014 alignment, zero, try_tcache_alloc, 1015 try_tcache_dalloc)); 1016 } else { 1017 return (huge_ralloc(ptr, oldsize, size, extra, 1018 alignment, zero, try_tcache_dalloc)); 1019 } 1020 } 1021} 1022 1023JEMALLOC_ALWAYS_INLINE void * 1024iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 1025 bool no_move) 1026{ 1027 1028 return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true, 1029 NULL)); 1030} 1031 1032malloc_tsd_externs(thread_allocated, thread_allocated_t) 1033malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, 1034 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 1035#endif 1036 1037#include "jemalloc/internal/prof.h" 1038 1039#undef JEMALLOC_H_INLINES 1040/******************************************************************************/ 1041#endif /* JEMALLOC_INTERNAL_H */ 1042