jemalloc_internal.h.in revision ecd3e59ca351d7111ec72a327fe0c009f2aa69a0
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <math.h> 4#ifdef _WIN32 5# include <windows.h> 6# define ENOENT ERROR_PATH_NOT_FOUND 7# define EINVAL ERROR_BAD_ARGUMENTS 8# define EAGAIN ERROR_OUTOFMEMORY 9# define EPERM ERROR_WRITE_FAULT 10# define EFAULT ERROR_INVALID_ADDRESS 11# define ENOMEM ERROR_NOT_ENOUGH_MEMORY 12# undef ERANGE 13# define ERANGE ERROR_INVALID_DATA 14#else 15# include <sys/param.h> 16# include <sys/mman.h> 17# include <sys/syscall.h> 18# if !defined(SYS_write) && defined(__NR_write) 19# define SYS_write __NR_write 20# endif 21# include <sys/uio.h> 22# include <pthread.h> 23# include <errno.h> 24#endif 25#include <sys/types.h> 26 27#include <limits.h> 28#ifndef SIZE_T_MAX 29# define SIZE_T_MAX SIZE_MAX 30#endif 31#include <stdarg.h> 32#include <stdbool.h> 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdint.h> 36#include <stddef.h> 37#ifndef offsetof 38# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 39#endif 40#include <inttypes.h> 41#include <string.h> 42#include <strings.h> 43#include <ctype.h> 44#ifdef _MSC_VER 45# include <io.h> 46typedef intptr_t ssize_t; 47# define PATH_MAX 1024 48# define STDERR_FILENO 2 49# define __func__ __FUNCTION__ 50/* Disable warnings about deprecated system functions */ 51# pragma warning(disable: 4996) 52#else 53# include <unistd.h> 54#endif 55#include <fcntl.h> 56 57#include "jemalloc_internal_defs.h" 58 59#ifdef JEMALLOC_UTRACE 60#include <sys/ktrace.h> 61#endif 62 63#ifdef JEMALLOC_VALGRIND 64#include <valgrind/valgrind.h> 65#include <valgrind/memcheck.h> 66#endif 67 68#define JEMALLOC_NO_DEMANGLE 69#ifdef JEMALLOC_JET 70# define JEMALLOC_N(n) jet_##n 71# include "jemalloc/internal/public_namespace.h" 72# define JEMALLOC_NO_RENAME 73# include "../jemalloc@install_suffix@.h" 74# undef JEMALLOC_NO_RENAME 75#else 76# define JEMALLOC_N(n) @private_namespace@##n 77# include "../jemalloc@install_suffix@.h" 78#endif 79#include "jemalloc/internal/private_namespace.h" 80 81static const bool config_debug = 82#ifdef JEMALLOC_DEBUG 83 true 84#else 85 false 86#endif 87 ; 88static const bool have_dss = 89#ifdef JEMALLOC_DSS 90 true 91#else 92 false 93#endif 94 ; 95static const bool config_fill = 96#ifdef JEMALLOC_FILL 97 true 98#else 99 false 100#endif 101 ; 102static const bool config_lazy_lock = 103#ifdef JEMALLOC_LAZY_LOCK 104 true 105#else 106 false 107#endif 108 ; 109static const bool config_prof = 110#ifdef JEMALLOC_PROF 111 true 112#else 113 false 114#endif 115 ; 116static const bool config_prof_libgcc = 117#ifdef JEMALLOC_PROF_LIBGCC 118 true 119#else 120 false 121#endif 122 ; 123static const bool config_prof_libunwind = 124#ifdef JEMALLOC_PROF_LIBUNWIND 125 true 126#else 127 false 128#endif 129 ; 130static const bool config_mremap = 131#ifdef JEMALLOC_MREMAP 132 true 133#else 134 false 135#endif 136 ; 137static const bool config_munmap = 138#ifdef JEMALLOC_MUNMAP 139 true 140#else 141 false 142#endif 143 ; 144static const bool config_stats = 145#ifdef JEMALLOC_STATS 146 true 147#else 148 false 149#endif 150 ; 151static const bool config_tcache = 152#ifdef JEMALLOC_TCACHE 153 true 154#else 155 false 156#endif 157 ; 158static const bool config_tls = 159#ifdef JEMALLOC_TLS 160 true 161#else 162 false 163#endif 164 ; 165static const bool config_utrace = 166#ifdef JEMALLOC_UTRACE 167 true 168#else 169 false 170#endif 171 ; 172static const bool config_valgrind = 173#ifdef JEMALLOC_VALGRIND 174 true 175#else 176 false 177#endif 178 ; 179static const bool config_xmalloc = 180#ifdef JEMALLOC_XMALLOC 181 true 182#else 183 false 184#endif 185 ; 186static const bool config_ivsalloc = 187#ifdef JEMALLOC_IVSALLOC 188 true 189#else 190 false 191#endif 192 ; 193 194#ifdef JEMALLOC_ATOMIC9 195#include <machine/atomic.h> 196#endif 197 198#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 199#include <libkern/OSAtomic.h> 200#endif 201 202#ifdef JEMALLOC_ZONE 203#include <mach/mach_error.h> 204#include <mach/mach_init.h> 205#include <mach/vm_map.h> 206#include <malloc/malloc.h> 207#endif 208 209#define RB_COMPACT 210#include "jemalloc/internal/rb.h" 211#include "jemalloc/internal/qr.h" 212#include "jemalloc/internal/ql.h" 213 214/* 215 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 216 * but there are circular dependencies that cannot be broken without 217 * substantial performance degradation. In order to reduce the effect on 218 * visual code flow, read the header files in multiple passes, with one of the 219 * following cpp variables defined during each pass: 220 * 221 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 222 * types. 223 * JEMALLOC_H_STRUCTS : Data structures. 224 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 225 * JEMALLOC_H_INLINES : Inline functions. 226 */ 227/******************************************************************************/ 228#define JEMALLOC_H_TYPES 229 230#include "jemalloc/internal/jemalloc_internal_macros.h" 231 232#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) 233 234/* Smallest size class to support. */ 235#define LG_TINY_MIN 3 236#define TINY_MIN (1U << LG_TINY_MIN) 237 238/* 239 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 240 * classes). 241 */ 242#ifndef LG_QUANTUM 243# if (defined(__i386__) || defined(_M_IX86)) 244# define LG_QUANTUM 4 245# endif 246# ifdef __ia64__ 247# define LG_QUANTUM 4 248# endif 249# ifdef __alpha__ 250# define LG_QUANTUM 4 251# endif 252# ifdef __sparc64__ 253# define LG_QUANTUM 4 254# endif 255# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 256# define LG_QUANTUM 4 257# endif 258# ifdef __arm__ 259# define LG_QUANTUM 3 260# endif 261# ifdef __aarch64__ 262# define LG_QUANTUM 4 263# endif 264# ifdef __hppa__ 265# define LG_QUANTUM 4 266# endif 267# ifdef __mips__ 268# define LG_QUANTUM 3 269# endif 270# ifdef __powerpc__ 271# define LG_QUANTUM 4 272# endif 273# ifdef __s390__ 274# define LG_QUANTUM 4 275# endif 276# ifdef __SH4__ 277# define LG_QUANTUM 4 278# endif 279# ifdef __tile__ 280# define LG_QUANTUM 4 281# endif 282# ifndef LG_QUANTUM 283# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 284# endif 285#endif 286 287#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 288#define QUANTUM_MASK (QUANTUM - 1) 289 290/* Return the smallest quantum multiple that is >= a. */ 291#define QUANTUM_CEILING(a) \ 292 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 293 294#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 295#define LONG_MASK (LONG - 1) 296 297/* Return the smallest long multiple that is >= a. */ 298#define LONG_CEILING(a) \ 299 (((a) + LONG_MASK) & ~LONG_MASK) 300 301#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 302#define PTR_MASK (SIZEOF_PTR - 1) 303 304/* Return the smallest (void *) multiple that is >= a. */ 305#define PTR_CEILING(a) \ 306 (((a) + PTR_MASK) & ~PTR_MASK) 307 308/* 309 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 310 * In addition, this controls the spacing of cacheline-spaced size classes. 311 * 312 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can 313 * only handle raw constants. 314 */ 315#define LG_CACHELINE 6 316#define CACHELINE 64 317#define CACHELINE_MASK (CACHELINE - 1) 318 319/* Return the smallest cacheline multiple that is >= s. */ 320#define CACHELINE_CEILING(s) \ 321 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 322 323/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 324#ifdef PAGE_MASK 325# undef PAGE_MASK 326#endif 327#define LG_PAGE STATIC_PAGE_SHIFT 328#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 329#define PAGE_MASK ((size_t)(PAGE - 1)) 330 331/* Return the smallest pagesize multiple that is >= s. */ 332#define PAGE_CEILING(s) \ 333 (((s) + PAGE_MASK) & ~PAGE_MASK) 334 335/* Return the nearest aligned address at or below a. */ 336#define ALIGNMENT_ADDR2BASE(a, alignment) \ 337 ((void *)((uintptr_t)(a) & (-(alignment)))) 338 339/* Return the offset between a and the nearest aligned address at or below a. */ 340#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 341 ((size_t)((uintptr_t)(a) & (alignment - 1))) 342 343/* Return the smallest alignment multiple that is >= s. */ 344#define ALIGNMENT_CEILING(s, alignment) \ 345 (((s) + (alignment - 1)) & (-(alignment))) 346 347/* Declare a variable length array */ 348#if __STDC_VERSION__ < 199901L 349# ifdef _MSC_VER 350# include <malloc.h> 351# define alloca _alloca 352# else 353# ifdef JEMALLOC_HAS_ALLOCA_H 354# include <alloca.h> 355# else 356# include <stdlib.h> 357# endif 358# endif 359# define VARIABLE_ARRAY(type, name, count) \ 360 type *name = alloca(sizeof(type) * count) 361#else 362# define VARIABLE_ARRAY(type, name, count) type name[count] 363#endif 364 365#ifdef JEMALLOC_VALGRIND 366/* 367 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 368 * so that when Valgrind reports errors, there are no extra stack frames 369 * in the backtraces. 370 * 371 * The size that is reported to valgrind must be consistent through a chain of 372 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 373 * jemalloc, so it is critical that all callers of these macros provide usize 374 * rather than request size. As a result, buffer overflow detection is 375 * technically weakened for the standard API, though it is generally accepted 376 * practice to consider any extra bytes reported by malloc_usable_size() as 377 * usable space. 378 */ 379#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 380 if (config_valgrind && in_valgrind && cond) \ 381 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 382} while (0) 383#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 384 old_rzsize, zero) do { \ 385 if (config_valgrind && in_valgrind) { \ 386 size_t rzsize = p2rz(ptr); \ 387 \ 388 if (ptr == old_ptr) { \ 389 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 390 usize, rzsize); \ 391 if (zero && old_usize < usize) { \ 392 VALGRIND_MAKE_MEM_DEFINED( \ 393 (void *)((uintptr_t)ptr + \ 394 old_usize), usize - old_usize); \ 395 } \ 396 } else { \ 397 if (old_ptr != NULL) { \ 398 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 399 old_rzsize); \ 400 } \ 401 if (ptr != NULL) { \ 402 size_t copy_size = (old_usize < usize) \ 403 ? old_usize : usize; \ 404 size_t tail_size = usize - copy_size; \ 405 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 406 rzsize, false); \ 407 if (copy_size > 0) { \ 408 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 409 copy_size); \ 410 } \ 411 if (zero && tail_size > 0) { \ 412 VALGRIND_MAKE_MEM_DEFINED( \ 413 (void *)((uintptr_t)ptr + \ 414 copy_size), tail_size); \ 415 } \ 416 } \ 417 } \ 418 } \ 419} while (0) 420#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 421 if (config_valgrind && in_valgrind) \ 422 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 423} while (0) 424#else 425#define RUNNING_ON_VALGRIND ((unsigned)0) 426#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ 427 do {} while (0) 428#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ 429 do {} while (0) 430#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0) 431#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0) 432#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0) 433#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0) 434#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) 435#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 436 old_rzsize, zero) do {} while (0) 437#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) 438#endif 439 440#include "jemalloc/internal/util.h" 441#include "jemalloc/internal/atomic.h" 442#include "jemalloc/internal/prng.h" 443#include "jemalloc/internal/ckh.h" 444#include "jemalloc/internal/size_classes.h" 445#include "jemalloc/internal/stats.h" 446#include "jemalloc/internal/ctl.h" 447#include "jemalloc/internal/mutex.h" 448#include "jemalloc/internal/tsd.h" 449#include "jemalloc/internal/mb.h" 450#include "jemalloc/internal/extent.h" 451#include "jemalloc/internal/arena.h" 452#include "jemalloc/internal/bitmap.h" 453#include "jemalloc/internal/base.h" 454#include "jemalloc/internal/chunk.h" 455#include "jemalloc/internal/huge.h" 456#include "jemalloc/internal/rtree.h" 457#include "jemalloc/internal/tcache.h" 458#include "jemalloc/internal/hash.h" 459#include "jemalloc/internal/quarantine.h" 460#include "jemalloc/internal/prof.h" 461 462#undef JEMALLOC_H_TYPES 463/******************************************************************************/ 464#define JEMALLOC_H_STRUCTS 465 466#include "jemalloc/internal/util.h" 467#include "jemalloc/internal/atomic.h" 468#include "jemalloc/internal/prng.h" 469#include "jemalloc/internal/ckh.h" 470#include "jemalloc/internal/size_classes.h" 471#include "jemalloc/internal/stats.h" 472#include "jemalloc/internal/ctl.h" 473#include "jemalloc/internal/mutex.h" 474#include "jemalloc/internal/tsd.h" 475#include "jemalloc/internal/mb.h" 476#include "jemalloc/internal/bitmap.h" 477#include "jemalloc/internal/extent.h" 478#include "jemalloc/internal/arena.h" 479#include "jemalloc/internal/base.h" 480#include "jemalloc/internal/chunk.h" 481#include "jemalloc/internal/huge.h" 482#include "jemalloc/internal/rtree.h" 483#include "jemalloc/internal/tcache.h" 484#include "jemalloc/internal/hash.h" 485#include "jemalloc/internal/quarantine.h" 486#include "jemalloc/internal/prof.h" 487 488typedef struct { 489 uint64_t allocated; 490 uint64_t deallocated; 491} thread_allocated_t; 492/* 493 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 494 * argument. 495 */ 496#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0}) 497 498#undef JEMALLOC_H_STRUCTS 499/******************************************************************************/ 500#define JEMALLOC_H_EXTERNS 501 502extern bool opt_abort; 503extern bool opt_junk; 504extern size_t opt_quarantine; 505extern bool opt_redzone; 506extern bool opt_utrace; 507extern bool opt_xmalloc; 508extern bool opt_zero; 509extern size_t opt_narenas; 510 511extern bool in_valgrind; 512 513/* Number of CPUs. */ 514extern unsigned ncpus; 515 516/* Protects arenas initialization (arenas, arenas_total). */ 517extern malloc_mutex_t arenas_lock; 518/* 519 * Arenas that are used to service external requests. Not all elements of the 520 * arenas array are necessarily used; arenas are created lazily as needed. 521 * 522 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 523 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 524 * takes some action to create them and allocate from them. 525 */ 526extern arena_t **arenas; 527extern unsigned narenas_total; 528extern unsigned narenas_auto; /* Read-only after initialization. */ 529 530arena_t *arenas_extend(unsigned ind); 531void arenas_cleanup(void *arg); 532arena_t *choose_arena_hard(void); 533void jemalloc_prefork(void); 534void jemalloc_postfork_parent(void); 535void jemalloc_postfork_child(void); 536 537#include "jemalloc/internal/util.h" 538#include "jemalloc/internal/atomic.h" 539#include "jemalloc/internal/prng.h" 540#include "jemalloc/internal/ckh.h" 541#include "jemalloc/internal/size_classes.h" 542#include "jemalloc/internal/stats.h" 543#include "jemalloc/internal/ctl.h" 544#include "jemalloc/internal/mutex.h" 545#include "jemalloc/internal/tsd.h" 546#include "jemalloc/internal/mb.h" 547#include "jemalloc/internal/bitmap.h" 548#include "jemalloc/internal/extent.h" 549#include "jemalloc/internal/arena.h" 550#include "jemalloc/internal/base.h" 551#include "jemalloc/internal/chunk.h" 552#include "jemalloc/internal/huge.h" 553#include "jemalloc/internal/rtree.h" 554#include "jemalloc/internal/tcache.h" 555#include "jemalloc/internal/hash.h" 556#include "jemalloc/internal/quarantine.h" 557#include "jemalloc/internal/prof.h" 558 559#undef JEMALLOC_H_EXTERNS 560/******************************************************************************/ 561#define JEMALLOC_H_INLINES 562 563#include "jemalloc/internal/util.h" 564#include "jemalloc/internal/atomic.h" 565#include "jemalloc/internal/prng.h" 566#include "jemalloc/internal/ckh.h" 567#include "jemalloc/internal/size_classes.h" 568#include "jemalloc/internal/stats.h" 569#include "jemalloc/internal/ctl.h" 570#include "jemalloc/internal/mutex.h" 571#include "jemalloc/internal/tsd.h" 572#include "jemalloc/internal/mb.h" 573#include "jemalloc/internal/extent.h" 574#include "jemalloc/internal/base.h" 575#include "jemalloc/internal/chunk.h" 576#include "jemalloc/internal/huge.h" 577 578#ifndef JEMALLOC_ENABLE_INLINE 579malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 580 581size_t s2u(size_t size); 582size_t sa2u(size_t size, size_t alignment); 583unsigned narenas_total_get(void); 584arena_t *choose_arena(arena_t *arena); 585#endif 586 587#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 588/* 589 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 590 * for allocations. 591 */ 592malloc_tsd_externs(arenas, arena_t *) 593malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, 594 arenas_cleanup) 595 596/* 597 * Compute usable size that would result from allocating an object with the 598 * specified size. 599 */ 600JEMALLOC_ALWAYS_INLINE size_t 601s2u(size_t size) 602{ 603 604 if (size <= SMALL_MAXCLASS) 605 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 606 if (size <= arena_maxclass) 607 return (PAGE_CEILING(size)); 608 return (CHUNK_CEILING(size)); 609} 610 611/* 612 * Compute usable size that would result from allocating an object with the 613 * specified size and alignment. 614 */ 615JEMALLOC_ALWAYS_INLINE size_t 616sa2u(size_t size, size_t alignment) 617{ 618 size_t usize; 619 620 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 621 622 /* 623 * Round size up to the nearest multiple of alignment. 624 * 625 * This done, we can take advantage of the fact that for each small 626 * size class, every object is aligned at the smallest power of two 627 * that is non-zero in the base two representation of the size. For 628 * example: 629 * 630 * Size | Base 2 | Minimum alignment 631 * -----+----------+------------------ 632 * 96 | 1100000 | 32 633 * 144 | 10100000 | 32 634 * 192 | 11000000 | 64 635 */ 636 usize = ALIGNMENT_CEILING(size, alignment); 637 /* 638 * (usize < size) protects against the combination of maximal 639 * alignment and size greater than maximal alignment. 640 */ 641 if (usize < size) { 642 /* size_t overflow. */ 643 return (0); 644 } 645 646 if (usize <= arena_maxclass && alignment <= PAGE) { 647 if (usize <= SMALL_MAXCLASS) 648 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 649 return (PAGE_CEILING(usize)); 650 } else { 651 size_t run_size; 652 653 /* 654 * We can't achieve subpage alignment, so round up alignment 655 * permanently; it makes later calculations simpler. 656 */ 657 alignment = PAGE_CEILING(alignment); 658 usize = PAGE_CEILING(size); 659 /* 660 * (usize < size) protects against very large sizes within 661 * PAGE of SIZE_T_MAX. 662 * 663 * (usize + alignment < usize) protects against the 664 * combination of maximal alignment and usize large enough 665 * to cause overflow. This is similar to the first overflow 666 * check above, but it needs to be repeated due to the new 667 * usize value, which may now be *equal* to maximal 668 * alignment, whereas before we only detected overflow if the 669 * original size was *greater* than maximal alignment. 670 */ 671 if (usize < size || usize + alignment < usize) { 672 /* size_t overflow. */ 673 return (0); 674 } 675 676 /* 677 * Calculate the size of the over-size run that arena_palloc() 678 * would need to allocate in order to guarantee the alignment. 679 * If the run wouldn't fit within a chunk, round up to a huge 680 * allocation size. 681 */ 682 run_size = usize + alignment - PAGE; 683 if (run_size <= arena_maxclass) 684 return (PAGE_CEILING(usize)); 685 return (CHUNK_CEILING(usize)); 686 } 687} 688 689JEMALLOC_INLINE unsigned 690narenas_total_get(void) 691{ 692 unsigned narenas; 693 694 malloc_mutex_lock(&arenas_lock); 695 narenas = narenas_total; 696 malloc_mutex_unlock(&arenas_lock); 697 698 return (narenas); 699} 700 701/* Choose an arena based on a per-thread value. */ 702JEMALLOC_INLINE arena_t * 703choose_arena(arena_t *arena) 704{ 705 arena_t *ret; 706 707 if (arena != NULL) 708 return (arena); 709 710 if ((ret = *arenas_tsd_get()) == NULL) { 711 ret = choose_arena_hard(); 712 assert(ret != NULL); 713 } 714 715 return (ret); 716} 717#endif 718 719#include "jemalloc/internal/bitmap.h" 720#include "jemalloc/internal/rtree.h" 721/* 722 * Include arena.h twice in order to resolve circular dependencies with 723 * tcache.h. 724 */ 725#define JEMALLOC_ARENA_INLINE_A 726#include "jemalloc/internal/arena.h" 727#undef JEMALLOC_ARENA_INLINE_A 728#include "jemalloc/internal/tcache.h" 729#define JEMALLOC_ARENA_INLINE_B 730#include "jemalloc/internal/arena.h" 731#undef JEMALLOC_ARENA_INLINE_B 732#include "jemalloc/internal/hash.h" 733#include "jemalloc/internal/quarantine.h" 734 735#ifndef JEMALLOC_ENABLE_INLINE 736void *imalloct(size_t size, bool try_tcache, arena_t *arena); 737void *imalloc(size_t size); 738void *icalloct(size_t size, bool try_tcache, arena_t *arena); 739void *icalloc(size_t size); 740void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, 741 arena_t *arena); 742void *ipalloc(size_t usize, size_t alignment, bool zero); 743size_t isalloc(const void *ptr, bool demote); 744size_t ivsalloc(const void *ptr, bool demote); 745size_t u2rz(size_t usize); 746size_t p2rz(const void *ptr); 747void idalloct(void *ptr, bool try_tcache); 748void idalloc(void *ptr); 749void iqalloct(void *ptr, bool try_tcache); 750void iqalloc(void *ptr); 751void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, 752 size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, 753 arena_t *arena); 754void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, 755 bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); 756void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 757 bool zero); 758bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, 759 bool zero); 760malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 761#endif 762 763#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 764JEMALLOC_ALWAYS_INLINE void * 765imalloct(size_t size, bool try_tcache, arena_t *arena) 766{ 767 768 assert(size != 0); 769 770 if (size <= arena_maxclass) 771 return (arena_malloc(arena, size, false, try_tcache)); 772 else 773 return (huge_malloc(size, false, huge_dss_prec_get(arena))); 774} 775 776JEMALLOC_ALWAYS_INLINE void * 777imalloc(size_t size) 778{ 779 780 return (imalloct(size, true, NULL)); 781} 782 783JEMALLOC_ALWAYS_INLINE void * 784icalloct(size_t size, bool try_tcache, arena_t *arena) 785{ 786 787 if (size <= arena_maxclass) 788 return (arena_malloc(arena, size, true, try_tcache)); 789 else 790 return (huge_malloc(size, true, huge_dss_prec_get(arena))); 791} 792 793JEMALLOC_ALWAYS_INLINE void * 794icalloc(size_t size) 795{ 796 797 return (icalloct(size, true, NULL)); 798} 799 800JEMALLOC_ALWAYS_INLINE void * 801ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, 802 arena_t *arena) 803{ 804 void *ret; 805 806 assert(usize != 0); 807 assert(usize == sa2u(usize, alignment)); 808 809 if (usize <= arena_maxclass && alignment <= PAGE) 810 ret = arena_malloc(arena, usize, zero, try_tcache); 811 else { 812 if (usize <= arena_maxclass) { 813 ret = arena_palloc(choose_arena(arena), usize, 814 alignment, zero); 815 } else if (alignment <= chunksize) 816 ret = huge_malloc(usize, zero, huge_dss_prec_get(arena)); 817 else 818 ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena)); 819 } 820 821 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 822 return (ret); 823} 824 825JEMALLOC_ALWAYS_INLINE void * 826ipalloc(size_t usize, size_t alignment, bool zero) 827{ 828 829 return (ipalloct(usize, alignment, zero, true, NULL)); 830} 831 832/* 833 * Typical usage: 834 * void *ptr = [...] 835 * size_t sz = isalloc(ptr, config_prof); 836 */ 837JEMALLOC_ALWAYS_INLINE size_t 838isalloc(const void *ptr, bool demote) 839{ 840 size_t ret; 841 arena_chunk_t *chunk; 842 843 assert(ptr != NULL); 844 /* Demotion only makes sense if config_prof is true. */ 845 assert(config_prof || demote == false); 846 847 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 848 if (chunk != ptr) 849 ret = arena_salloc(ptr, demote); 850 else 851 ret = huge_salloc(ptr); 852 853 return (ret); 854} 855 856JEMALLOC_ALWAYS_INLINE size_t 857ivsalloc(const void *ptr, bool demote) 858{ 859 860 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 861 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0) 862 return (0); 863 864 return (isalloc(ptr, demote)); 865} 866 867JEMALLOC_INLINE size_t 868u2rz(size_t usize) 869{ 870 size_t ret; 871 872 if (usize <= SMALL_MAXCLASS) { 873 size_t binind = SMALL_SIZE2BIN(usize); 874 ret = arena_bin_info[binind].redzone_size; 875 } else 876 ret = 0; 877 878 return (ret); 879} 880 881JEMALLOC_INLINE size_t 882p2rz(const void *ptr) 883{ 884 size_t usize = isalloc(ptr, false); 885 886 return (u2rz(usize)); 887} 888 889JEMALLOC_ALWAYS_INLINE void 890idalloct(void *ptr, bool try_tcache) 891{ 892 arena_chunk_t *chunk; 893 894 assert(ptr != NULL); 895 896 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 897 if (chunk != ptr) 898 arena_dalloc(chunk, ptr, try_tcache); 899 else 900 huge_dalloc(ptr, true); 901} 902 903JEMALLOC_ALWAYS_INLINE void 904idalloc(void *ptr) 905{ 906 907 idalloct(ptr, true); 908} 909 910JEMALLOC_ALWAYS_INLINE void 911iqalloct(void *ptr, bool try_tcache) 912{ 913 914 if (config_fill && opt_quarantine) 915 quarantine(ptr); 916 else 917 idalloct(ptr, try_tcache); 918} 919 920JEMALLOC_ALWAYS_INLINE void 921iqalloc(void *ptr) 922{ 923 924 iqalloct(ptr, true); 925} 926 927JEMALLOC_ALWAYS_INLINE void * 928iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, 929 size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, 930 arena_t *arena) 931{ 932 void *p; 933 size_t usize, copysize; 934 935 usize = sa2u(size + extra, alignment); 936 if (usize == 0) 937 return (NULL); 938 p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); 939 if (p == NULL) { 940 if (extra == 0) 941 return (NULL); 942 /* Try again, without extra this time. */ 943 usize = sa2u(size, alignment); 944 if (usize == 0) 945 return (NULL); 946 p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); 947 if (p == NULL) 948 return (NULL); 949 } 950 /* 951 * Copy at most size bytes (not size+extra), since the caller has no 952 * expectation that the extra bytes will be reliably preserved. 953 */ 954 copysize = (size < oldsize) ? size : oldsize; 955 memcpy(p, ptr, copysize); 956 iqalloct(ptr, try_tcache_dalloc); 957 return (p); 958} 959 960JEMALLOC_ALWAYS_INLINE void * 961iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 962 bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) 963{ 964 size_t oldsize; 965 966 assert(ptr != NULL); 967 assert(size != 0); 968 969 oldsize = isalloc(ptr, config_prof); 970 971 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 972 != 0) { 973 /* 974 * Existing object alignment is inadequate; allocate new space 975 * and copy. 976 */ 977 return (iralloct_realign(ptr, oldsize, size, extra, alignment, 978 zero, try_tcache_alloc, try_tcache_dalloc, arena)); 979 } 980 981 if (size + extra <= arena_maxclass) { 982 return (arena_ralloc(arena, ptr, oldsize, size, extra, 983 alignment, zero, try_tcache_alloc, 984 try_tcache_dalloc)); 985 } else { 986 return (huge_ralloc(ptr, oldsize, size, extra, 987 alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena))); 988 } 989} 990 991JEMALLOC_ALWAYS_INLINE void * 992iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) 993{ 994 995 return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL)); 996} 997 998JEMALLOC_ALWAYS_INLINE bool 999ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) 1000{ 1001 size_t oldsize; 1002 1003 assert(ptr != NULL); 1004 assert(size != 0); 1005 1006 oldsize = isalloc(ptr, config_prof); 1007 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 1008 != 0) { 1009 /* Existing object alignment is inadequate. */ 1010 return (true); 1011 } 1012 1013 if (size <= arena_maxclass) 1014 return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); 1015 else 1016 return (huge_ralloc_no_move(ptr, oldsize, size, extra)); 1017} 1018 1019malloc_tsd_externs(thread_allocated, thread_allocated_t) 1020malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, 1021 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 1022#endif 1023 1024#include "jemalloc/internal/prof.h" 1025 1026#undef JEMALLOC_H_INLINES 1027/******************************************************************************/ 1028#endif /* JEMALLOC_INTERNAL_H */ 1029