jemalloc_internal.h.in revision 609ae595f0358157b19311b0f9f9591db7cee705
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <math.h> 4#ifdef _WIN32 5# include <windows.h> 6# define ENOENT ERROR_PATH_NOT_FOUND 7# define EINVAL ERROR_BAD_ARGUMENTS 8# define EAGAIN ERROR_OUTOFMEMORY 9# define EPERM ERROR_WRITE_FAULT 10# define EFAULT ERROR_INVALID_ADDRESS 11# define ENOMEM ERROR_NOT_ENOUGH_MEMORY 12# undef ERANGE 13# define ERANGE ERROR_INVALID_DATA 14#else 15# include <sys/param.h> 16# include <sys/mman.h> 17# include <sys/syscall.h> 18# if !defined(SYS_write) && defined(__NR_write) 19# define SYS_write __NR_write 20# endif 21# include <sys/uio.h> 22# include <pthread.h> 23# include <errno.h> 24#endif 25#include <sys/types.h> 26 27#include <limits.h> 28#ifndef SIZE_T_MAX 29# define SIZE_T_MAX SIZE_MAX 30#endif 31#include <stdarg.h> 32#include <stdbool.h> 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdint.h> 36#include <stddef.h> 37#ifndef offsetof 38# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 39#endif 40#include <inttypes.h> 41#include <string.h> 42#include <strings.h> 43#include <ctype.h> 44#ifdef _MSC_VER 45# include <io.h> 46typedef intptr_t ssize_t; 47# define PATH_MAX 1024 48# define STDERR_FILENO 2 49# define __func__ __FUNCTION__ 50/* Disable warnings about deprecated system functions */ 51# pragma warning(disable: 4996) 52#else 53# include <unistd.h> 54#endif 55#include <fcntl.h> 56 57#define JEMALLOC_NO_DEMANGLE 58#include "../jemalloc@install_suffix@.h" 59 60#ifdef JEMALLOC_UTRACE 61#include <sys/ktrace.h> 62#endif 63 64#ifdef JEMALLOC_VALGRIND 65#include <valgrind/valgrind.h> 66#include <valgrind/memcheck.h> 67#endif 68 69#include "jemalloc/internal/private_namespace.h" 70 71#ifdef JEMALLOC_CC_SILENCE 72#define UNUSED JEMALLOC_ATTR(unused) 73#else 74#define UNUSED 75#endif 76 77static const bool config_debug = 78#ifdef JEMALLOC_DEBUG 79 true 80#else 81 false 82#endif 83 ; 84static const bool config_dss = 85#ifdef JEMALLOC_DSS 86 true 87#else 88 false 89#endif 90 ; 91static const bool config_fill = 92#ifdef JEMALLOC_FILL 93 true 94#else 95 false 96#endif 97 ; 98static const bool config_lazy_lock = 99#ifdef JEMALLOC_LAZY_LOCK 100 true 101#else 102 false 103#endif 104 ; 105static const bool config_prof = 106#ifdef JEMALLOC_PROF 107 true 108#else 109 false 110#endif 111 ; 112static const bool config_prof_libgcc = 113#ifdef JEMALLOC_PROF_LIBGCC 114 true 115#else 116 false 117#endif 118 ; 119static const bool config_prof_libunwind = 120#ifdef JEMALLOC_PROF_LIBUNWIND 121 true 122#else 123 false 124#endif 125 ; 126static const bool config_mremap = 127#ifdef JEMALLOC_MREMAP 128 true 129#else 130 false 131#endif 132 ; 133static const bool config_munmap = 134#ifdef JEMALLOC_MUNMAP 135 true 136#else 137 false 138#endif 139 ; 140static const bool config_stats = 141#ifdef JEMALLOC_STATS 142 true 143#else 144 false 145#endif 146 ; 147static const bool config_tcache = 148#ifdef JEMALLOC_TCACHE 149 true 150#else 151 false 152#endif 153 ; 154static const bool config_tls = 155#ifdef JEMALLOC_TLS 156 true 157#else 158 false 159#endif 160 ; 161static const bool config_utrace = 162#ifdef JEMALLOC_UTRACE 163 true 164#else 165 false 166#endif 167 ; 168static const bool config_valgrind = 169#ifdef JEMALLOC_VALGRIND 170 true 171#else 172 false 173#endif 174 ; 175static const bool config_xmalloc = 176#ifdef JEMALLOC_XMALLOC 177 true 178#else 179 false 180#endif 181 ; 182static const bool config_ivsalloc = 183#ifdef JEMALLOC_IVSALLOC 184 true 185#else 186 false 187#endif 188 ; 189 190#ifdef JEMALLOC_ATOMIC9 191#include <machine/atomic.h> 192#endif 193 194#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 195#include <libkern/OSAtomic.h> 196#endif 197 198#ifdef JEMALLOC_ZONE 199#include <mach/mach_error.h> 200#include <mach/mach_init.h> 201#include <mach/vm_map.h> 202#include <malloc/malloc.h> 203#endif 204 205#define RB_COMPACT 206#include "jemalloc/internal/rb.h" 207#include "jemalloc/internal/qr.h" 208#include "jemalloc/internal/ql.h" 209 210/* 211 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 212 * but there are circular dependencies that cannot be broken without 213 * substantial performance degradation. In order to reduce the effect on 214 * visual code flow, read the header files in multiple passes, with one of the 215 * following cpp variables defined during each pass: 216 * 217 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 218 * types. 219 * JEMALLOC_H_STRUCTS : Data structures. 220 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 221 * JEMALLOC_H_INLINES : Inline functions. 222 */ 223/******************************************************************************/ 224#define JEMALLOC_H_TYPES 225 226#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 227 228#define ZU(z) ((size_t)z) 229 230#ifndef __DECONST 231# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 232#endif 233 234#ifdef JEMALLOC_DEBUG 235 /* Disable inlining to make debugging easier. */ 236# define JEMALLOC_INLINE 237# define inline 238#else 239# define JEMALLOC_ENABLE_INLINE 240# define JEMALLOC_INLINE static inline 241# ifdef _MSC_VER 242# define inline _inline 243# endif 244#endif 245 246/* Smallest size class to support. */ 247#define LG_TINY_MIN 3 248#define TINY_MIN (1U << LG_TINY_MIN) 249 250/* 251 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 252 * classes). 253 */ 254#ifndef LG_QUANTUM 255# if (defined(__i386__) || defined(_M_IX86)) 256# define LG_QUANTUM 4 257# endif 258# ifdef __ia64__ 259# define LG_QUANTUM 4 260# endif 261# ifdef __alpha__ 262# define LG_QUANTUM 4 263# endif 264# ifdef __sparc64__ 265# define LG_QUANTUM 4 266# endif 267# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 268# define LG_QUANTUM 4 269# endif 270# ifdef __arm__ 271# define LG_QUANTUM 3 272# endif 273# ifdef __hppa__ 274# define LG_QUANTUM 4 275# endif 276# ifdef __mips__ 277# define LG_QUANTUM 3 278# endif 279# ifdef __powerpc__ 280# define LG_QUANTUM 4 281# endif 282# ifdef __s390x__ 283# define LG_QUANTUM 4 284# endif 285# ifdef __SH4__ 286# define LG_QUANTUM 4 287# endif 288# ifdef __tile__ 289# define LG_QUANTUM 4 290# endif 291# ifndef LG_QUANTUM 292# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 293# endif 294#endif 295 296#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 297#define QUANTUM_MASK (QUANTUM - 1) 298 299/* Return the smallest quantum multiple that is >= a. */ 300#define QUANTUM_CEILING(a) \ 301 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 302 303#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 304#define LONG_MASK (LONG - 1) 305 306/* Return the smallest long multiple that is >= a. */ 307#define LONG_CEILING(a) \ 308 (((a) + LONG_MASK) & ~LONG_MASK) 309 310#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 311#define PTR_MASK (SIZEOF_PTR - 1) 312 313/* Return the smallest (void *) multiple that is >= a. */ 314#define PTR_CEILING(a) \ 315 (((a) + PTR_MASK) & ~PTR_MASK) 316 317/* 318 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 319 * In addition, this controls the spacing of cacheline-spaced size classes. 320 * 321 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can 322 * only handle raw constants. 323 */ 324#define LG_CACHELINE 6 325#define CACHELINE 64 326#define CACHELINE_MASK (CACHELINE - 1) 327 328/* Return the smallest cacheline multiple that is >= s. */ 329#define CACHELINE_CEILING(s) \ 330 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 331 332/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 333#ifdef PAGE_MASK 334# undef PAGE_MASK 335#endif 336#define LG_PAGE STATIC_PAGE_SHIFT 337#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 338#define PAGE_MASK ((size_t)(PAGE - 1)) 339 340/* Return the smallest pagesize multiple that is >= s. */ 341#define PAGE_CEILING(s) \ 342 (((s) + PAGE_MASK) & ~PAGE_MASK) 343 344/* Return the nearest aligned address at or below a. */ 345#define ALIGNMENT_ADDR2BASE(a, alignment) \ 346 ((void *)((uintptr_t)(a) & (-(alignment)))) 347 348/* Return the offset between a and the nearest aligned address at or below a. */ 349#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 350 ((size_t)((uintptr_t)(a) & (alignment - 1))) 351 352/* Return the smallest alignment multiple that is >= s. */ 353#define ALIGNMENT_CEILING(s, alignment) \ 354 (((s) + (alignment - 1)) & (-(alignment))) 355 356/* Declare a variable length array */ 357#if __STDC_VERSION__ < 199901L 358# ifdef _MSC_VER 359# include <malloc.h> 360# define alloca _alloca 361# else 362# include <alloca.h> 363# endif 364# define VARIABLE_ARRAY(type, name, count) \ 365 type *name = alloca(sizeof(type) * count) 366#else 367# define VARIABLE_ARRAY(type, name, count) type name[count] 368#endif 369 370#ifdef JEMALLOC_VALGRIND 371/* 372 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 373 * so that when Valgrind reports errors, there are no extra stack frames 374 * in the backtraces. 375 * 376 * The size that is reported to valgrind must be consistent through a chain of 377 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 378 * jemalloc, so it is critical that all callers of these macros provide usize 379 * rather than request size. As a result, buffer overflow detection is 380 * technically weakened for the standard API, though it is generally accepted 381 * practice to consider any extra bytes reported by malloc_usable_size() as 382 * usable space. 383 */ 384#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 385 if (config_valgrind && opt_valgrind && cond) \ 386 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 387} while (0) 388#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 389 old_rzsize, zero) do { \ 390 if (config_valgrind && opt_valgrind) { \ 391 size_t rzsize = p2rz(ptr); \ 392 \ 393 if (ptr == old_ptr) { \ 394 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 395 usize, rzsize); \ 396 if (zero && old_usize < usize) { \ 397 VALGRIND_MAKE_MEM_DEFINED( \ 398 (void *)((uintptr_t)ptr + \ 399 old_usize), usize - old_usize); \ 400 } \ 401 } else { \ 402 if (old_ptr != NULL) { \ 403 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 404 old_rzsize); \ 405 } \ 406 if (ptr != NULL) { \ 407 size_t copy_size = (old_usize < usize) \ 408 ? old_usize : usize; \ 409 size_t tail_size = usize - copy_size; \ 410 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 411 rzsize, false); \ 412 if (copy_size > 0) { \ 413 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 414 copy_size); \ 415 } \ 416 if (zero && tail_size > 0) { \ 417 VALGRIND_MAKE_MEM_DEFINED( \ 418 (void *)((uintptr_t)ptr + \ 419 copy_size), tail_size); \ 420 } \ 421 } \ 422 } \ 423 } \ 424} while (0) 425#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 426 if (config_valgrind && opt_valgrind) \ 427 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 428} while (0) 429#else 430#define RUNNING_ON_VALGRIND ((unsigned)0) 431#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) 432#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) 433#define VALGRIND_FREELIKE_BLOCK(addr, rzB) 434#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) 435#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) 436#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) 437#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 438 old_rzsize, zero) 439#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) 440#endif 441 442#include "jemalloc/internal/util.h" 443#include "jemalloc/internal/atomic.h" 444#include "jemalloc/internal/prng.h" 445#include "jemalloc/internal/ckh.h" 446#include "jemalloc/internal/size_classes.h" 447#include "jemalloc/internal/stats.h" 448#include "jemalloc/internal/ctl.h" 449#include "jemalloc/internal/mutex.h" 450#include "jemalloc/internal/tsd.h" 451#include "jemalloc/internal/mb.h" 452#include "jemalloc/internal/extent.h" 453#include "jemalloc/internal/arena.h" 454#include "jemalloc/internal/bitmap.h" 455#include "jemalloc/internal/base.h" 456#include "jemalloc/internal/chunk.h" 457#include "jemalloc/internal/huge.h" 458#include "jemalloc/internal/rtree.h" 459#include "jemalloc/internal/tcache.h" 460#include "jemalloc/internal/hash.h" 461#include "jemalloc/internal/quarantine.h" 462#include "jemalloc/internal/prof.h" 463 464#undef JEMALLOC_H_TYPES 465/******************************************************************************/ 466#define JEMALLOC_H_STRUCTS 467 468#include "jemalloc/internal/util.h" 469#include "jemalloc/internal/atomic.h" 470#include "jemalloc/internal/prng.h" 471#include "jemalloc/internal/ckh.h" 472#include "jemalloc/internal/size_classes.h" 473#include "jemalloc/internal/stats.h" 474#include "jemalloc/internal/ctl.h" 475#include "jemalloc/internal/mutex.h" 476#include "jemalloc/internal/tsd.h" 477#include "jemalloc/internal/mb.h" 478#include "jemalloc/internal/bitmap.h" 479#include "jemalloc/internal/extent.h" 480#include "jemalloc/internal/arena.h" 481#include "jemalloc/internal/base.h" 482#include "jemalloc/internal/chunk.h" 483#include "jemalloc/internal/huge.h" 484#include "jemalloc/internal/rtree.h" 485#include "jemalloc/internal/tcache.h" 486#include "jemalloc/internal/hash.h" 487#include "jemalloc/internal/quarantine.h" 488#include "jemalloc/internal/prof.h" 489 490typedef struct { 491 uint64_t allocated; 492 uint64_t deallocated; 493} thread_allocated_t; 494/* 495 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 496 * argument. 497 */ 498#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) 499 500#undef JEMALLOC_H_STRUCTS 501/******************************************************************************/ 502#define JEMALLOC_H_EXTERNS 503 504extern bool opt_abort; 505extern bool opt_junk; 506extern size_t opt_quarantine; 507extern bool opt_redzone; 508extern bool opt_utrace; 509extern bool opt_valgrind; 510extern bool opt_xmalloc; 511extern bool opt_zero; 512extern size_t opt_narenas; 513 514/* Number of CPUs. */ 515extern unsigned ncpus; 516 517/* Protects arenas initialization (arenas, arenas_total). */ 518extern malloc_mutex_t arenas_lock; 519/* 520 * Arenas that are used to service external requests. Not all elements of the 521 * arenas array are necessarily used; arenas are created lazily as needed. 522 * 523 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 524 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 525 * takes some action to create them and allocate from them. 526 */ 527extern arena_t **arenas; 528extern unsigned narenas_total; 529extern unsigned narenas_auto; /* Read-only after initialization. */ 530 531arena_t *arenas_extend(unsigned ind); 532void arenas_cleanup(void *arg); 533arena_t *choose_arena_hard(void); 534void jemalloc_prefork(void); 535void jemalloc_postfork_parent(void); 536void jemalloc_postfork_child(void); 537 538#include "jemalloc/internal/util.h" 539#include "jemalloc/internal/atomic.h" 540#include "jemalloc/internal/prng.h" 541#include "jemalloc/internal/ckh.h" 542#include "jemalloc/internal/size_classes.h" 543#include "jemalloc/internal/stats.h" 544#include "jemalloc/internal/ctl.h" 545#include "jemalloc/internal/mutex.h" 546#include "jemalloc/internal/tsd.h" 547#include "jemalloc/internal/mb.h" 548#include "jemalloc/internal/bitmap.h" 549#include "jemalloc/internal/extent.h" 550#include "jemalloc/internal/arena.h" 551#include "jemalloc/internal/base.h" 552#include "jemalloc/internal/chunk.h" 553#include "jemalloc/internal/huge.h" 554#include "jemalloc/internal/rtree.h" 555#include "jemalloc/internal/tcache.h" 556#include "jemalloc/internal/hash.h" 557#include "jemalloc/internal/quarantine.h" 558#include "jemalloc/internal/prof.h" 559 560#undef JEMALLOC_H_EXTERNS 561/******************************************************************************/ 562#define JEMALLOC_H_INLINES 563 564#include "jemalloc/internal/util.h" 565#include "jemalloc/internal/atomic.h" 566#include "jemalloc/internal/prng.h" 567#include "jemalloc/internal/ckh.h" 568#include "jemalloc/internal/size_classes.h" 569#include "jemalloc/internal/stats.h" 570#include "jemalloc/internal/ctl.h" 571#include "jemalloc/internal/mutex.h" 572#include "jemalloc/internal/tsd.h" 573#include "jemalloc/internal/mb.h" 574#include "jemalloc/internal/extent.h" 575#include "jemalloc/internal/base.h" 576#include "jemalloc/internal/chunk.h" 577#include "jemalloc/internal/huge.h" 578 579#ifndef JEMALLOC_ENABLE_INLINE 580malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 581 582size_t s2u(size_t size); 583size_t sa2u(size_t size, size_t alignment); 584unsigned narenas_total_get(void); 585arena_t *choose_arena(arena_t *arena); 586#endif 587 588#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 589/* 590 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 591 * for allocations. 592 */ 593malloc_tsd_externs(arenas, arena_t *) 594malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) 595 596/* 597 * Compute usable size that would result from allocating an object with the 598 * specified size. 599 */ 600JEMALLOC_INLINE size_t 601s2u(size_t size) 602{ 603 604 if (size <= SMALL_MAXCLASS) 605 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 606 if (size <= arena_maxclass) 607 return (PAGE_CEILING(size)); 608 return (CHUNK_CEILING(size)); 609} 610 611/* 612 * Compute usable size that would result from allocating an object with the 613 * specified size and alignment. 614 */ 615JEMALLOC_INLINE size_t 616sa2u(size_t size, size_t alignment) 617{ 618 size_t usize; 619 620 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 621 622 /* 623 * Round size up to the nearest multiple of alignment. 624 * 625 * This done, we can take advantage of the fact that for each small 626 * size class, every object is aligned at the smallest power of two 627 * that is non-zero in the base two representation of the size. For 628 * example: 629 * 630 * Size | Base 2 | Minimum alignment 631 * -----+----------+------------------ 632 * 96 | 1100000 | 32 633 * 144 | 10100000 | 32 634 * 192 | 11000000 | 64 635 */ 636 usize = ALIGNMENT_CEILING(size, alignment); 637 /* 638 * (usize < size) protects against the combination of maximal 639 * alignment and size greater than maximal alignment. 640 */ 641 if (usize < size) { 642 /* size_t overflow. */ 643 return (0); 644 } 645 646 if (usize <= arena_maxclass && alignment <= PAGE) { 647 if (usize <= SMALL_MAXCLASS) 648 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 649 return (PAGE_CEILING(usize)); 650 } else { 651 size_t run_size; 652 653 /* 654 * We can't achieve subpage alignment, so round up alignment 655 * permanently; it makes later calculations simpler. 656 */ 657 alignment = PAGE_CEILING(alignment); 658 usize = PAGE_CEILING(size); 659 /* 660 * (usize < size) protects against very large sizes within 661 * PAGE of SIZE_T_MAX. 662 * 663 * (usize + alignment < usize) protects against the 664 * combination of maximal alignment and usize large enough 665 * to cause overflow. This is similar to the first overflow 666 * check above, but it needs to be repeated due to the new 667 * usize value, which may now be *equal* to maximal 668 * alignment, whereas before we only detected overflow if the 669 * original size was *greater* than maximal alignment. 670 */ 671 if (usize < size || usize + alignment < usize) { 672 /* size_t overflow. */ 673 return (0); 674 } 675 676 /* 677 * Calculate the size of the over-size run that arena_palloc() 678 * would need to allocate in order to guarantee the alignment. 679 * If the run wouldn't fit within a chunk, round up to a huge 680 * allocation size. 681 */ 682 run_size = usize + alignment - PAGE; 683 if (run_size <= arena_maxclass) 684 return (PAGE_CEILING(usize)); 685 return (CHUNK_CEILING(usize)); 686 } 687} 688 689JEMALLOC_INLINE unsigned 690narenas_total_get(void) 691{ 692 unsigned narenas; 693 694 malloc_mutex_lock(&arenas_lock); 695 narenas = narenas_total; 696 malloc_mutex_unlock(&arenas_lock); 697 698 return (narenas); 699} 700 701/* Choose an arena based on a per-thread value. */ 702JEMALLOC_INLINE arena_t * 703choose_arena(arena_t *arena) 704{ 705 arena_t *ret; 706 707 if (arena != NULL) 708 return (arena); 709 710 if ((ret = *arenas_tsd_get()) == NULL) { 711 ret = choose_arena_hard(); 712 assert(ret != NULL); 713 } 714 715 return (ret); 716} 717#endif 718 719#include "jemalloc/internal/bitmap.h" 720#include "jemalloc/internal/rtree.h" 721/* 722 * Include arena.h twice in order to resolve circular dependencies with 723 * tcache.h. 724 */ 725#define JEMALLOC_ARENA_INLINE_A 726#include "jemalloc/internal/arena.h" 727#undef JEMALLOC_ARENA_INLINE_A 728#include "jemalloc/internal/tcache.h" 729#define JEMALLOC_ARENA_INLINE_B 730#include "jemalloc/internal/arena.h" 731#undef JEMALLOC_ARENA_INLINE_B 732#include "jemalloc/internal/hash.h" 733#include "jemalloc/internal/quarantine.h" 734 735#ifndef JEMALLOC_ENABLE_INLINE 736void *imallocx(size_t size, bool try_tcache, arena_t *arena); 737void *imalloc(size_t size); 738void *icallocx(size_t size, bool try_tcache, arena_t *arena); 739void *icalloc(size_t size); 740void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 741 arena_t *arena); 742void *ipalloc(size_t usize, size_t alignment, bool zero); 743size_t isalloc(const void *ptr, bool demote); 744size_t ivsalloc(const void *ptr, bool demote); 745size_t u2rz(size_t usize); 746size_t p2rz(const void *ptr); 747void idallocx(void *ptr, bool try_tcache); 748void idalloc(void *ptr); 749void iqallocx(void *ptr, bool try_tcache); 750void iqalloc(void *ptr); 751void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment, 752 bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, 753 arena_t *arena); 754void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 755 bool zero, bool no_move); 756malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 757#endif 758 759#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 760JEMALLOC_INLINE void * 761imallocx(size_t size, bool try_tcache, arena_t *arena) 762{ 763 764 assert(size != 0); 765 766 if (size <= arena_maxclass) 767 return (arena_malloc(arena, size, false, try_tcache)); 768 else 769 return (huge_malloc(size, false)); 770} 771 772JEMALLOC_INLINE void * 773imalloc(size_t size) 774{ 775 776 return (imallocx(size, true, NULL)); 777} 778 779JEMALLOC_INLINE void * 780icallocx(size_t size, bool try_tcache, arena_t *arena) 781{ 782 783 if (size <= arena_maxclass) 784 return (arena_malloc(arena, size, true, try_tcache)); 785 else 786 return (huge_malloc(size, true)); 787} 788 789JEMALLOC_INLINE void * 790icalloc(size_t size) 791{ 792 793 return (icallocx(size, true, NULL)); 794} 795 796JEMALLOC_INLINE void * 797ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, 798 arena_t *arena) 799{ 800 void *ret; 801 802 assert(usize != 0); 803 assert(usize == sa2u(usize, alignment)); 804 805 if (usize <= arena_maxclass && alignment <= PAGE) 806 ret = arena_malloc(arena, usize, zero, try_tcache); 807 else { 808 if (usize <= arena_maxclass) { 809 ret = arena_palloc(choose_arena(arena), usize, 810 alignment, zero); 811 } else if (alignment <= chunksize) 812 ret = huge_malloc(usize, zero); 813 else 814 ret = huge_palloc(usize, alignment, zero); 815 } 816 817 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 818 return (ret); 819} 820 821JEMALLOC_INLINE void * 822ipalloc(size_t usize, size_t alignment, bool zero) 823{ 824 825 return (ipallocx(usize, alignment, zero, true, NULL)); 826} 827 828/* 829 * Typical usage: 830 * void *ptr = [...] 831 * size_t sz = isalloc(ptr, config_prof); 832 */ 833JEMALLOC_INLINE size_t 834isalloc(const void *ptr, bool demote) 835{ 836 size_t ret; 837 arena_chunk_t *chunk; 838 839 assert(ptr != NULL); 840 /* Demotion only makes sense if config_prof is true. */ 841 assert(config_prof || demote == false); 842 843 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 844 if (chunk != ptr) 845 ret = arena_salloc(ptr, demote); 846 else 847 ret = huge_salloc(ptr); 848 849 return (ret); 850} 851 852JEMALLOC_INLINE size_t 853ivsalloc(const void *ptr, bool demote) 854{ 855 856 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 857 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 858 return (0); 859 860 return (isalloc(ptr, demote)); 861} 862 863JEMALLOC_INLINE size_t 864u2rz(size_t usize) 865{ 866 size_t ret; 867 868 if (usize <= SMALL_MAXCLASS) { 869 size_t binind = SMALL_SIZE2BIN(usize); 870 ret = arena_bin_info[binind].redzone_size; 871 } else 872 ret = 0; 873 874 return (ret); 875} 876 877JEMALLOC_INLINE size_t 878p2rz(const void *ptr) 879{ 880 size_t usize = isalloc(ptr, false); 881 882 return (u2rz(usize)); 883} 884 885JEMALLOC_INLINE void 886idallocx(void *ptr, bool try_tcache) 887{ 888 arena_chunk_t *chunk; 889 890 assert(ptr != NULL); 891 892 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 893 if (chunk != ptr) 894 arena_dalloc(chunk->arena, chunk, ptr, try_tcache); 895 else 896 huge_dalloc(ptr, true); 897} 898 899JEMALLOC_INLINE void 900idalloc(void *ptr) 901{ 902 903 idallocx(ptr, true); 904} 905 906JEMALLOC_INLINE void 907iqallocx(void *ptr, bool try_tcache) 908{ 909 910 if (config_fill && opt_quarantine) 911 quarantine(ptr); 912 else 913 idallocx(ptr, try_tcache); 914} 915 916JEMALLOC_INLINE void 917iqalloc(void *ptr) 918{ 919 920 iqallocx(ptr, true); 921} 922 923JEMALLOC_INLINE void * 924irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 925 bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) 926{ 927 void *ret; 928 size_t oldsize; 929 930 assert(ptr != NULL); 931 assert(size != 0); 932 933 oldsize = isalloc(ptr, config_prof); 934 935 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 936 != 0) { 937 size_t usize, copysize; 938 939 /* 940 * Existing object alignment is inadequate; allocate new space 941 * and copy. 942 */ 943 if (no_move) 944 return (NULL); 945 usize = sa2u(size + extra, alignment); 946 if (usize == 0) 947 return (NULL); 948 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena); 949 if (ret == NULL) { 950 if (extra == 0) 951 return (NULL); 952 /* Try again, without extra this time. */ 953 usize = sa2u(size, alignment); 954 if (usize == 0) 955 return (NULL); 956 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, 957 arena); 958 if (ret == NULL) 959 return (NULL); 960 } 961 /* 962 * Copy at most size bytes (not size+extra), since the caller 963 * has no expectation that the extra bytes will be reliably 964 * preserved. 965 */ 966 copysize = (size < oldsize) ? size : oldsize; 967 memcpy(ret, ptr, copysize); 968 iqallocx(ptr, try_tcache_dalloc); 969 return (ret); 970 } 971 972 if (no_move) { 973 if (size <= arena_maxclass) { 974 return (arena_ralloc_no_move(ptr, oldsize, size, 975 extra, zero)); 976 } else { 977 return (huge_ralloc_no_move(ptr, oldsize, size, 978 extra)); 979 } 980 } else { 981 if (size + extra <= arena_maxclass) { 982 return (arena_ralloc(arena, ptr, oldsize, size, extra, 983 alignment, zero, try_tcache_alloc, 984 try_tcache_dalloc)); 985 } else { 986 return (huge_ralloc(ptr, oldsize, size, extra, 987 alignment, zero, try_tcache_dalloc)); 988 } 989 } 990} 991 992JEMALLOC_INLINE void * 993iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 994 bool no_move) 995{ 996 997 return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true, 998 NULL)); 999} 1000 1001malloc_tsd_externs(thread_allocated, thread_allocated_t) 1002malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, 1003 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 1004#endif 1005 1006#include "jemalloc/internal/prof.h" 1007 1008#undef JEMALLOC_H_INLINES 1009/******************************************************************************/ 1010#endif /* JEMALLOC_INTERNAL_H */ 1011