jemalloc_internal.h.in revision b954bc5d3a65966df0ce7801cd6102542b5e894b
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <math.h> 4#ifdef _WIN32 5# include <windows.h> 6# define ENOENT ERROR_PATH_NOT_FOUND 7# define EINVAL ERROR_BAD_ARGUMENTS 8# define EAGAIN ERROR_OUTOFMEMORY 9# define EPERM ERROR_WRITE_FAULT 10# define EFAULT ERROR_INVALID_ADDRESS 11# define ENOMEM ERROR_NOT_ENOUGH_MEMORY 12# undef ERANGE 13# define ERANGE ERROR_INVALID_DATA 14#else 15# include <sys/param.h> 16# include <sys/mman.h> 17# include <sys/syscall.h> 18# if !defined(SYS_write) && defined(__NR_write) 19# define SYS_write __NR_write 20# endif 21# include <sys/uio.h> 22# include <pthread.h> 23# include <errno.h> 24#endif 25#include <sys/types.h> 26 27#include <limits.h> 28#ifndef SIZE_T_MAX 29# define SIZE_T_MAX SIZE_MAX 30#endif 31#include <stdarg.h> 32#include <stdbool.h> 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdint.h> 36#include <stddef.h> 37#ifndef offsetof 38# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 39#endif 40#include <inttypes.h> 41#include <string.h> 42#include <strings.h> 43#include <ctype.h> 44#ifdef _MSC_VER 45# include <io.h> 46typedef intptr_t ssize_t; 47# define PATH_MAX 1024 48# define STDERR_FILENO 2 49# define __func__ __FUNCTION__ 50/* Disable warnings about deprecated system functions */ 51# pragma warning(disable: 4996) 52#else 53# include <unistd.h> 54#endif 55#include <fcntl.h> 56 57#include "jemalloc_internal_defs.h" 58 59#ifdef JEMALLOC_UTRACE 60#include <sys/ktrace.h> 61#endif 62 63#ifdef JEMALLOC_VALGRIND 64#include <valgrind/valgrind.h> 65#include <valgrind/memcheck.h> 66#endif 67 68#define JEMALLOC_NO_DEMANGLE 69#ifdef JEMALLOC_JET 70# define JEMALLOC_N(n) jet_##n 71# include "jemalloc/internal/public_namespace.h" 72# define JEMALLOC_NO_RENAME 73# include "../jemalloc@install_suffix@.h" 74#else 75# define JEMALLOC_N(n) @private_namespace@##n 76# include "../jemalloc@install_suffix@.h" 77#endif 78#include "jemalloc/internal/private_namespace.h" 79 80static const bool config_debug = 81#ifdef JEMALLOC_DEBUG 82 true 83#else 84 false 85#endif 86 ; 87static const bool config_dss = 88#ifdef JEMALLOC_DSS 89 true 90#else 91 false 92#endif 93 ; 94static const bool config_fill = 95#ifdef JEMALLOC_FILL 96 true 97#else 98 false 99#endif 100 ; 101static const bool config_lazy_lock = 102#ifdef JEMALLOC_LAZY_LOCK 103 true 104#else 105 false 106#endif 107 ; 108static const bool config_prof = 109#ifdef JEMALLOC_PROF 110 true 111#else 112 false 113#endif 114 ; 115static const bool config_prof_libgcc = 116#ifdef JEMALLOC_PROF_LIBGCC 117 true 118#else 119 false 120#endif 121 ; 122static const bool config_prof_libunwind = 123#ifdef JEMALLOC_PROF_LIBUNWIND 124 true 125#else 126 false 127#endif 128 ; 129static const bool config_mremap = 130#ifdef JEMALLOC_MREMAP 131 true 132#else 133 false 134#endif 135 ; 136static const bool config_munmap = 137#ifdef JEMALLOC_MUNMAP 138 true 139#else 140 false 141#endif 142 ; 143static const bool config_stats = 144#ifdef JEMALLOC_STATS 145 true 146#else 147 false 148#endif 149 ; 150static const bool config_tcache = 151#ifdef JEMALLOC_TCACHE 152 true 153#else 154 false 155#endif 156 ; 157static const bool config_tls = 158#ifdef JEMALLOC_TLS 159 true 160#else 161 false 162#endif 163 ; 164static const bool config_utrace = 165#ifdef JEMALLOC_UTRACE 166 true 167#else 168 false 169#endif 170 ; 171static const bool config_valgrind = 172#ifdef JEMALLOC_VALGRIND 173 true 174#else 175 false 176#endif 177 ; 178static const bool config_xmalloc = 179#ifdef JEMALLOC_XMALLOC 180 true 181#else 182 false 183#endif 184 ; 185static const bool config_ivsalloc = 186#ifdef JEMALLOC_IVSALLOC 187 true 188#else 189 false 190#endif 191 ; 192 193#ifdef JEMALLOC_ATOMIC9 194#include <machine/atomic.h> 195#endif 196 197#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 198#include <libkern/OSAtomic.h> 199#endif 200 201#ifdef JEMALLOC_ZONE 202#include <mach/mach_error.h> 203#include <mach/mach_init.h> 204#include <mach/vm_map.h> 205#include <malloc/malloc.h> 206#endif 207 208#define RB_COMPACT 209#include "jemalloc/internal/rb.h" 210#include "jemalloc/internal/qr.h" 211#include "jemalloc/internal/ql.h" 212 213/* 214 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 215 * but there are circular dependencies that cannot be broken without 216 * substantial performance degradation. In order to reduce the effect on 217 * visual code flow, read the header files in multiple passes, with one of the 218 * following cpp variables defined during each pass: 219 * 220 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 221 * types. 222 * JEMALLOC_H_STRUCTS : Data structures. 223 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 224 * JEMALLOC_H_INLINES : Inline functions. 225 */ 226/******************************************************************************/ 227#define JEMALLOC_H_TYPES 228 229#include "jemalloc/internal/jemalloc_internal_macros.h" 230 231#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) 232#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 233 234/* Smallest size class to support. */ 235#define LG_TINY_MIN 3 236#define TINY_MIN (1U << LG_TINY_MIN) 237 238/* 239 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 240 * classes). 241 */ 242#ifndef LG_QUANTUM 243# if (defined(__i386__) || defined(_M_IX86)) 244# define LG_QUANTUM 4 245# endif 246# ifdef __ia64__ 247# define LG_QUANTUM 4 248# endif 249# ifdef __alpha__ 250# define LG_QUANTUM 4 251# endif 252# ifdef __sparc64__ 253# define LG_QUANTUM 4 254# endif 255# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 256# define LG_QUANTUM 4 257# endif 258# ifdef __arm__ 259# define LG_QUANTUM 3 260# endif 261# ifdef __aarch64__ 262# define LG_QUANTUM 4 263# endif 264# ifdef __hppa__ 265# define LG_QUANTUM 4 266# endif 267# ifdef __mips__ 268# define LG_QUANTUM 3 269# endif 270# ifdef __powerpc__ 271# define LG_QUANTUM 4 272# endif 273# ifdef __s390__ 274# define LG_QUANTUM 4 275# endif 276# ifdef __SH4__ 277# define LG_QUANTUM 4 278# endif 279# ifdef __tile__ 280# define LG_QUANTUM 4 281# endif 282# ifndef LG_QUANTUM 283# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 284# endif 285#endif 286 287#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 288#define QUANTUM_MASK (QUANTUM - 1) 289 290/* Return the smallest quantum multiple that is >= a. */ 291#define QUANTUM_CEILING(a) \ 292 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 293 294#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 295#define LONG_MASK (LONG - 1) 296 297/* Return the smallest long multiple that is >= a. */ 298#define LONG_CEILING(a) \ 299 (((a) + LONG_MASK) & ~LONG_MASK) 300 301#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 302#define PTR_MASK (SIZEOF_PTR - 1) 303 304/* Return the smallest (void *) multiple that is >= a. */ 305#define PTR_CEILING(a) \ 306 (((a) + PTR_MASK) & ~PTR_MASK) 307 308/* 309 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 310 * In addition, this controls the spacing of cacheline-spaced size classes. 311 * 312 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can 313 * only handle raw constants. 314 */ 315#define LG_CACHELINE 6 316#define CACHELINE 64 317#define CACHELINE_MASK (CACHELINE - 1) 318 319/* Return the smallest cacheline multiple that is >= s. */ 320#define CACHELINE_CEILING(s) \ 321 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 322 323/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 324#ifdef PAGE_MASK 325# undef PAGE_MASK 326#endif 327#define LG_PAGE STATIC_PAGE_SHIFT 328#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 329#define PAGE_MASK ((size_t)(PAGE - 1)) 330 331/* Return the smallest pagesize multiple that is >= s. */ 332#define PAGE_CEILING(s) \ 333 (((s) + PAGE_MASK) & ~PAGE_MASK) 334 335/* Return the nearest aligned address at or below a. */ 336#define ALIGNMENT_ADDR2BASE(a, alignment) \ 337 ((void *)((uintptr_t)(a) & (-(alignment)))) 338 339/* Return the offset between a and the nearest aligned address at or below a. */ 340#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 341 ((size_t)((uintptr_t)(a) & (alignment - 1))) 342 343/* Return the smallest alignment multiple that is >= s. */ 344#define ALIGNMENT_CEILING(s, alignment) \ 345 (((s) + (alignment - 1)) & (-(alignment))) 346 347/* Declare a variable length array */ 348#if __STDC_VERSION__ < 199901L 349# ifdef _MSC_VER 350# include <malloc.h> 351# define alloca _alloca 352# else 353# ifdef JEMALLOC_HAS_ALLOCA_H 354# include <alloca.h> 355# else 356# include <stdlib.h> 357# endif 358# endif 359# define VARIABLE_ARRAY(type, name, count) \ 360 type *name = alloca(sizeof(type) * count) 361#else 362# define VARIABLE_ARRAY(type, name, count) type name[count] 363#endif 364 365#ifdef JEMALLOC_VALGRIND 366/* 367 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 368 * so that when Valgrind reports errors, there are no extra stack frames 369 * in the backtraces. 370 * 371 * The size that is reported to valgrind must be consistent through a chain of 372 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 373 * jemalloc, so it is critical that all callers of these macros provide usize 374 * rather than request size. As a result, buffer overflow detection is 375 * technically weakened for the standard API, though it is generally accepted 376 * practice to consider any extra bytes reported by malloc_usable_size() as 377 * usable space. 378 */ 379#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 380 if (config_valgrind && opt_valgrind && cond) \ 381 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 382} while (0) 383#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 384 old_rzsize, zero) do { \ 385 if (config_valgrind && opt_valgrind) { \ 386 size_t rzsize = p2rz(ptr); \ 387 \ 388 if (ptr == old_ptr) { \ 389 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 390 usize, rzsize); \ 391 if (zero && old_usize < usize) { \ 392 VALGRIND_MAKE_MEM_DEFINED( \ 393 (void *)((uintptr_t)ptr + \ 394 old_usize), usize - old_usize); \ 395 } \ 396 } else { \ 397 if (old_ptr != NULL) { \ 398 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 399 old_rzsize); \ 400 } \ 401 if (ptr != NULL) { \ 402 size_t copy_size = (old_usize < usize) \ 403 ? old_usize : usize; \ 404 size_t tail_size = usize - copy_size; \ 405 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 406 rzsize, false); \ 407 if (copy_size > 0) { \ 408 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 409 copy_size); \ 410 } \ 411 if (zero && tail_size > 0) { \ 412 VALGRIND_MAKE_MEM_DEFINED( \ 413 (void *)((uintptr_t)ptr + \ 414 copy_size), tail_size); \ 415 } \ 416 } \ 417 } \ 418 } \ 419} while (0) 420#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 421 if (config_valgrind && opt_valgrind) \ 422 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 423} while (0) 424#else 425#define RUNNING_ON_VALGRIND ((unsigned)0) 426#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ 427 do {} while (0) 428#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ 429 do {} while (0) 430#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0) 431#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0) 432#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0) 433#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0) 434#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) 435#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 436 old_rzsize, zero) do {} while (0) 437#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) 438#endif 439 440#include "jemalloc/internal/util.h" 441#include "jemalloc/internal/atomic.h" 442#include "jemalloc/internal/prng.h" 443#include "jemalloc/internal/ckh.h" 444#include "jemalloc/internal/size_classes.h" 445#include "jemalloc/internal/stats.h" 446#include "jemalloc/internal/ctl.h" 447#include "jemalloc/internal/mutex.h" 448#include "jemalloc/internal/tsd.h" 449#include "jemalloc/internal/mb.h" 450#include "jemalloc/internal/extent.h" 451#include "jemalloc/internal/arena.h" 452#include "jemalloc/internal/bitmap.h" 453#include "jemalloc/internal/base.h" 454#include "jemalloc/internal/chunk.h" 455#include "jemalloc/internal/huge.h" 456#include "jemalloc/internal/rtree.h" 457#include "jemalloc/internal/tcache.h" 458#include "jemalloc/internal/hash.h" 459#include "jemalloc/internal/quarantine.h" 460#include "jemalloc/internal/prof.h" 461 462#undef JEMALLOC_H_TYPES 463/******************************************************************************/ 464#define JEMALLOC_H_STRUCTS 465 466#include "jemalloc/internal/util.h" 467#include "jemalloc/internal/atomic.h" 468#include "jemalloc/internal/prng.h" 469#include "jemalloc/internal/ckh.h" 470#include "jemalloc/internal/size_classes.h" 471#include "jemalloc/internal/stats.h" 472#include "jemalloc/internal/ctl.h" 473#include "jemalloc/internal/mutex.h" 474#include "jemalloc/internal/tsd.h" 475#include "jemalloc/internal/mb.h" 476#include "jemalloc/internal/bitmap.h" 477#include "jemalloc/internal/extent.h" 478#include "jemalloc/internal/arena.h" 479#include "jemalloc/internal/base.h" 480#include "jemalloc/internal/chunk.h" 481#include "jemalloc/internal/huge.h" 482#include "jemalloc/internal/rtree.h" 483#include "jemalloc/internal/tcache.h" 484#include "jemalloc/internal/hash.h" 485#include "jemalloc/internal/quarantine.h" 486#include "jemalloc/internal/prof.h" 487 488typedef struct { 489 uint64_t allocated; 490 uint64_t deallocated; 491} thread_allocated_t; 492/* 493 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 494 * argument. 495 */ 496#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0}) 497 498#undef JEMALLOC_H_STRUCTS 499/******************************************************************************/ 500#define JEMALLOC_H_EXTERNS 501 502extern bool opt_abort; 503extern bool opt_junk; 504extern size_t opt_quarantine; 505extern bool opt_redzone; 506extern bool opt_utrace; 507extern bool opt_valgrind; 508extern bool opt_xmalloc; 509extern bool opt_zero; 510extern size_t opt_narenas; 511 512/* Number of CPUs. */ 513extern unsigned ncpus; 514 515/* Protects arenas initialization (arenas, arenas_total). */ 516extern malloc_mutex_t arenas_lock; 517/* 518 * Arenas that are used to service external requests. Not all elements of the 519 * arenas array are necessarily used; arenas are created lazily as needed. 520 * 521 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 522 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 523 * takes some action to create them and allocate from them. 524 */ 525extern arena_t **arenas; 526extern unsigned narenas_total; 527extern unsigned narenas_auto; /* Read-only after initialization. */ 528 529arena_t *arenas_extend(unsigned ind); 530void arenas_cleanup(void *arg); 531arena_t *choose_arena_hard(void); 532void jemalloc_prefork(void); 533void jemalloc_postfork_parent(void); 534void jemalloc_postfork_child(void); 535 536#include "jemalloc/internal/util.h" 537#include "jemalloc/internal/atomic.h" 538#include "jemalloc/internal/prng.h" 539#include "jemalloc/internal/ckh.h" 540#include "jemalloc/internal/size_classes.h" 541#include "jemalloc/internal/stats.h" 542#include "jemalloc/internal/ctl.h" 543#include "jemalloc/internal/mutex.h" 544#include "jemalloc/internal/tsd.h" 545#include "jemalloc/internal/mb.h" 546#include "jemalloc/internal/bitmap.h" 547#include "jemalloc/internal/extent.h" 548#include "jemalloc/internal/arena.h" 549#include "jemalloc/internal/base.h" 550#include "jemalloc/internal/chunk.h" 551#include "jemalloc/internal/huge.h" 552#include "jemalloc/internal/rtree.h" 553#include "jemalloc/internal/tcache.h" 554#include "jemalloc/internal/hash.h" 555#include "jemalloc/internal/quarantine.h" 556#include "jemalloc/internal/prof.h" 557 558#undef JEMALLOC_H_EXTERNS 559/******************************************************************************/ 560#define JEMALLOC_H_INLINES 561 562#include "jemalloc/internal/util.h" 563#include "jemalloc/internal/atomic.h" 564#include "jemalloc/internal/prng.h" 565#include "jemalloc/internal/ckh.h" 566#include "jemalloc/internal/size_classes.h" 567#include "jemalloc/internal/stats.h" 568#include "jemalloc/internal/ctl.h" 569#include "jemalloc/internal/mutex.h" 570#include "jemalloc/internal/tsd.h" 571#include "jemalloc/internal/mb.h" 572#include "jemalloc/internal/extent.h" 573#include "jemalloc/internal/base.h" 574#include "jemalloc/internal/chunk.h" 575#include "jemalloc/internal/huge.h" 576 577#ifndef JEMALLOC_ENABLE_INLINE 578malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 579 580size_t s2u(size_t size); 581size_t sa2u(size_t size, size_t alignment); 582unsigned narenas_total_get(void); 583arena_t *choose_arena(arena_t *arena); 584#endif 585 586#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 587/* 588 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 589 * for allocations. 590 */ 591malloc_tsd_externs(arenas, arena_t *) 592malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, 593 arenas_cleanup) 594 595/* 596 * Compute usable size that would result from allocating an object with the 597 * specified size. 598 */ 599JEMALLOC_ALWAYS_INLINE size_t 600s2u(size_t size) 601{ 602 603 if (size <= SMALL_MAXCLASS) 604 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 605 if (size <= arena_maxclass) 606 return (PAGE_CEILING(size)); 607 return (CHUNK_CEILING(size)); 608} 609 610/* 611 * Compute usable size that would result from allocating an object with the 612 * specified size and alignment. 613 */ 614JEMALLOC_ALWAYS_INLINE size_t 615sa2u(size_t size, size_t alignment) 616{ 617 size_t usize; 618 619 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 620 621 /* 622 * Round size up to the nearest multiple of alignment. 623 * 624 * This done, we can take advantage of the fact that for each small 625 * size class, every object is aligned at the smallest power of two 626 * that is non-zero in the base two representation of the size. For 627 * example: 628 * 629 * Size | Base 2 | Minimum alignment 630 * -----+----------+------------------ 631 * 96 | 1100000 | 32 632 * 144 | 10100000 | 32 633 * 192 | 11000000 | 64 634 */ 635 usize = ALIGNMENT_CEILING(size, alignment); 636 /* 637 * (usize < size) protects against the combination of maximal 638 * alignment and size greater than maximal alignment. 639 */ 640 if (usize < size) { 641 /* size_t overflow. */ 642 return (0); 643 } 644 645 if (usize <= arena_maxclass && alignment <= PAGE) { 646 if (usize <= SMALL_MAXCLASS) 647 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 648 return (PAGE_CEILING(usize)); 649 } else { 650 size_t run_size; 651 652 /* 653 * We can't achieve subpage alignment, so round up alignment 654 * permanently; it makes later calculations simpler. 655 */ 656 alignment = PAGE_CEILING(alignment); 657 usize = PAGE_CEILING(size); 658 /* 659 * (usize < size) protects against very large sizes within 660 * PAGE of SIZE_T_MAX. 661 * 662 * (usize + alignment < usize) protects against the 663 * combination of maximal alignment and usize large enough 664 * to cause overflow. This is similar to the first overflow 665 * check above, but it needs to be repeated due to the new 666 * usize value, which may now be *equal* to maximal 667 * alignment, whereas before we only detected overflow if the 668 * original size was *greater* than maximal alignment. 669 */ 670 if (usize < size || usize + alignment < usize) { 671 /* size_t overflow. */ 672 return (0); 673 } 674 675 /* 676 * Calculate the size of the over-size run that arena_palloc() 677 * would need to allocate in order to guarantee the alignment. 678 * If the run wouldn't fit within a chunk, round up to a huge 679 * allocation size. 680 */ 681 run_size = usize + alignment - PAGE; 682 if (run_size <= arena_maxclass) 683 return (PAGE_CEILING(usize)); 684 return (CHUNK_CEILING(usize)); 685 } 686} 687 688JEMALLOC_INLINE unsigned 689narenas_total_get(void) 690{ 691 unsigned narenas; 692 693 malloc_mutex_lock(&arenas_lock); 694 narenas = narenas_total; 695 malloc_mutex_unlock(&arenas_lock); 696 697 return (narenas); 698} 699 700/* Choose an arena based on a per-thread value. */ 701JEMALLOC_INLINE arena_t * 702choose_arena(arena_t *arena) 703{ 704 arena_t *ret; 705 706 if (arena != NULL) 707 return (arena); 708 709 if ((ret = *arenas_tsd_get()) == NULL) { 710 ret = choose_arena_hard(); 711 assert(ret != NULL); 712 } 713 714 return (ret); 715} 716#endif 717 718#include "jemalloc/internal/bitmap.h" 719#include "jemalloc/internal/rtree.h" 720/* 721 * Include arena.h twice in order to resolve circular dependencies with 722 * tcache.h. 723 */ 724#define JEMALLOC_ARENA_INLINE_A 725#include "jemalloc/internal/arena.h" 726#undef JEMALLOC_ARENA_INLINE_A 727#include "jemalloc/internal/tcache.h" 728#define JEMALLOC_ARENA_INLINE_B 729#include "jemalloc/internal/arena.h" 730#undef JEMALLOC_ARENA_INLINE_B 731#include "jemalloc/internal/hash.h" 732#include "jemalloc/internal/quarantine.h" 733 734#ifndef JEMALLOC_ENABLE_INLINE 735void *imalloct(size_t size, bool try_tcache, arena_t *arena); 736void *imalloc(size_t size); 737void *icalloct(size_t size, bool try_tcache, arena_t *arena); 738void *icalloc(size_t size); 739void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, 740 arena_t *arena); 741void *ipalloc(size_t usize, size_t alignment, bool zero); 742size_t isalloc(const void *ptr, bool demote); 743size_t ivsalloc(const void *ptr, bool demote); 744size_t u2rz(size_t usize); 745size_t p2rz(const void *ptr); 746void idalloct(void *ptr, bool try_tcache); 747void idalloc(void *ptr); 748void iqalloct(void *ptr, bool try_tcache); 749void iqalloc(void *ptr); 750void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, 751 bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, 752 arena_t *arena); 753void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 754 bool zero, bool no_move); 755malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 756#endif 757 758#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 759JEMALLOC_ALWAYS_INLINE void * 760imalloct(size_t size, bool try_tcache, arena_t *arena) 761{ 762 763 assert(size != 0); 764 765 if (size <= arena_maxclass) 766 return (arena_malloc(arena, size, false, try_tcache)); 767 else 768 return (huge_malloc(size, false)); 769} 770 771JEMALLOC_ALWAYS_INLINE void * 772imalloc(size_t size) 773{ 774 775 return (imalloct(size, true, NULL)); 776} 777 778JEMALLOC_ALWAYS_INLINE void * 779icalloct(size_t size, bool try_tcache, arena_t *arena) 780{ 781 782 if (size <= arena_maxclass) 783 return (arena_malloc(arena, size, true, try_tcache)); 784 else 785 return (huge_malloc(size, true)); 786} 787 788JEMALLOC_ALWAYS_INLINE void * 789icalloc(size_t size) 790{ 791 792 return (icalloct(size, true, NULL)); 793} 794 795JEMALLOC_ALWAYS_INLINE void * 796ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, 797 arena_t *arena) 798{ 799 void *ret; 800 801 assert(usize != 0); 802 assert(usize == sa2u(usize, alignment)); 803 804 if (usize <= arena_maxclass && alignment <= PAGE) 805 ret = arena_malloc(arena, usize, zero, try_tcache); 806 else { 807 if (usize <= arena_maxclass) { 808 ret = arena_palloc(choose_arena(arena), usize, 809 alignment, zero); 810 } else if (alignment <= chunksize) 811 ret = huge_malloc(usize, zero); 812 else 813 ret = huge_palloc(usize, alignment, zero); 814 } 815 816 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 817 return (ret); 818} 819 820JEMALLOC_ALWAYS_INLINE void * 821ipalloc(size_t usize, size_t alignment, bool zero) 822{ 823 824 return (ipalloct(usize, alignment, zero, true, NULL)); 825} 826 827/* 828 * Typical usage: 829 * void *ptr = [...] 830 * size_t sz = isalloc(ptr, config_prof); 831 */ 832JEMALLOC_ALWAYS_INLINE size_t 833isalloc(const void *ptr, bool demote) 834{ 835 size_t ret; 836 arena_chunk_t *chunk; 837 838 assert(ptr != NULL); 839 /* Demotion only makes sense if config_prof is true. */ 840 assert(config_prof || demote == false); 841 842 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 843 if (chunk != ptr) 844 ret = arena_salloc(ptr, demote); 845 else 846 ret = huge_salloc(ptr); 847 848 return (ret); 849} 850 851JEMALLOC_ALWAYS_INLINE size_t 852ivsalloc(const void *ptr, bool demote) 853{ 854 855 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 856 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0) 857 return (0); 858 859 return (isalloc(ptr, demote)); 860} 861 862JEMALLOC_INLINE size_t 863u2rz(size_t usize) 864{ 865 size_t ret; 866 867 if (usize <= SMALL_MAXCLASS) { 868 size_t binind = SMALL_SIZE2BIN(usize); 869 ret = arena_bin_info[binind].redzone_size; 870 } else 871 ret = 0; 872 873 return (ret); 874} 875 876JEMALLOC_INLINE size_t 877p2rz(const void *ptr) 878{ 879 size_t usize = isalloc(ptr, false); 880 881 return (u2rz(usize)); 882} 883 884JEMALLOC_ALWAYS_INLINE void 885idalloct(void *ptr, bool try_tcache) 886{ 887 arena_chunk_t *chunk; 888 889 assert(ptr != NULL); 890 891 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 892 if (chunk != ptr) 893 arena_dalloc(chunk->arena, chunk, ptr, try_tcache); 894 else 895 huge_dalloc(ptr, true); 896} 897 898JEMALLOC_ALWAYS_INLINE void 899idalloc(void *ptr) 900{ 901 902 idalloct(ptr, true); 903} 904 905JEMALLOC_ALWAYS_INLINE void 906iqalloct(void *ptr, bool try_tcache) 907{ 908 909 if (config_fill && opt_quarantine) 910 quarantine(ptr); 911 else 912 idalloct(ptr, try_tcache); 913} 914 915JEMALLOC_ALWAYS_INLINE void 916iqalloc(void *ptr) 917{ 918 919 iqalloct(ptr, true); 920} 921 922JEMALLOC_ALWAYS_INLINE void * 923iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 924 bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) 925{ 926 void *ret; 927 size_t oldsize; 928 929 assert(ptr != NULL); 930 assert(size != 0); 931 932 oldsize = isalloc(ptr, config_prof); 933 934 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 935 != 0) { 936 size_t usize, copysize; 937 938 /* 939 * Existing object alignment is inadequate; allocate new space 940 * and copy. 941 */ 942 if (no_move) 943 return (NULL); 944 usize = sa2u(size + extra, alignment); 945 if (usize == 0) 946 return (NULL); 947 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); 948 if (ret == NULL) { 949 if (extra == 0) 950 return (NULL); 951 /* Try again, without extra this time. */ 952 usize = sa2u(size, alignment); 953 if (usize == 0) 954 return (NULL); 955 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, 956 arena); 957 if (ret == NULL) 958 return (NULL); 959 } 960 /* 961 * Copy at most size bytes (not size+extra), since the caller 962 * has no expectation that the extra bytes will be reliably 963 * preserved. 964 */ 965 copysize = (size < oldsize) ? size : oldsize; 966 memcpy(ret, ptr, copysize); 967 iqalloct(ptr, try_tcache_dalloc); 968 return (ret); 969 } 970 971 if (no_move) { 972 if (size <= arena_maxclass) { 973 return (arena_ralloc_no_move(ptr, oldsize, size, 974 extra, zero)); 975 } else { 976 return (huge_ralloc_no_move(ptr, oldsize, size, 977 extra)); 978 } 979 } else { 980 if (size + extra <= arena_maxclass) { 981 return (arena_ralloc(arena, ptr, oldsize, size, extra, 982 alignment, zero, try_tcache_alloc, 983 try_tcache_dalloc)); 984 } else { 985 return (huge_ralloc(ptr, oldsize, size, extra, 986 alignment, zero, try_tcache_dalloc)); 987 } 988 } 989} 990 991JEMALLOC_ALWAYS_INLINE void * 992iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 993 bool no_move) 994{ 995 996 return (iralloct(ptr, size, extra, alignment, zero, no_move, true, true, 997 NULL)); 998} 999 1000malloc_tsd_externs(thread_allocated, thread_allocated_t) 1001malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, 1002 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 1003#endif 1004 1005#include "jemalloc/internal/prof.h" 1006 1007#undef JEMALLOC_H_INLINES 1008/******************************************************************************/ 1009#endif /* JEMALLOC_INTERNAL_H */ 1010