jemalloc_internal.h.in revision 8d5865eb578e99369382d90bdd1e557e5b233277
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <math.h> 4#ifdef _WIN32 5# include <windows.h> 6# define ENOENT ERROR_PATH_NOT_FOUND 7# define EINVAL ERROR_BAD_ARGUMENTS 8# define EAGAIN ERROR_OUTOFMEMORY 9# define EPERM ERROR_WRITE_FAULT 10# define EFAULT ERROR_INVALID_ADDRESS 11# define ENOMEM ERROR_NOT_ENOUGH_MEMORY 12# undef ERANGE 13# define ERANGE ERROR_INVALID_DATA 14#else 15# include <sys/param.h> 16# include <sys/mman.h> 17# include <sys/syscall.h> 18# if !defined(SYS_write) && defined(__NR_write) 19# define SYS_write __NR_write 20# endif 21# include <sys/uio.h> 22# include <pthread.h> 23# include <errno.h> 24#endif 25#include <sys/types.h> 26 27#include <limits.h> 28#ifndef SIZE_T_MAX 29# define SIZE_T_MAX SIZE_MAX 30#endif 31#include <stdarg.h> 32#include <stdbool.h> 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdint.h> 36#include <stddef.h> 37#ifndef offsetof 38# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 39#endif 40#include <inttypes.h> 41#include <string.h> 42#include <strings.h> 43#include <ctype.h> 44#ifdef _MSC_VER 45# include <io.h> 46typedef intptr_t ssize_t; 47# define PATH_MAX 1024 48# define STDERR_FILENO 2 49# define __func__ __FUNCTION__ 50/* Disable warnings about deprecated system functions */ 51# pragma warning(disable: 4996) 52#else 53# include <unistd.h> 54#endif 55#include <fcntl.h> 56 57#define JEMALLOC_NO_DEMANGLE 58#include "../jemalloc@install_suffix@.h" 59 60#ifdef JEMALLOC_UTRACE 61#include <sys/ktrace.h> 62#endif 63 64#ifdef JEMALLOC_VALGRIND 65#include <valgrind/valgrind.h> 66#include <valgrind/memcheck.h> 67#endif 68 69#include "jemalloc/internal/private_namespace.h" 70 71#ifdef JEMALLOC_CC_SILENCE 72#define UNUSED JEMALLOC_ATTR(unused) 73#else 74#define UNUSED 75#endif 76 77static const bool config_debug = 78#ifdef JEMALLOC_DEBUG 79 true 80#else 81 false 82#endif 83 ; 84static const bool config_dss = 85#ifdef JEMALLOC_DSS 86 true 87#else 88 false 89#endif 90 ; 91static const bool config_fill = 92#ifdef JEMALLOC_FILL 93 true 94#else 95 false 96#endif 97 ; 98static const bool config_lazy_lock = 99#ifdef JEMALLOC_LAZY_LOCK 100 true 101#else 102 false 103#endif 104 ; 105static const bool config_prof = 106#ifdef JEMALLOC_PROF 107 true 108#else 109 false 110#endif 111 ; 112static const bool config_prof_libgcc = 113#ifdef JEMALLOC_PROF_LIBGCC 114 true 115#else 116 false 117#endif 118 ; 119static const bool config_prof_libunwind = 120#ifdef JEMALLOC_PROF_LIBUNWIND 121 true 122#else 123 false 124#endif 125 ; 126static const bool config_munmap = 127#ifdef JEMALLOC_MUNMAP 128 true 129#else 130 false 131#endif 132 ; 133static const bool config_stats = 134#ifdef JEMALLOC_STATS 135 true 136#else 137 false 138#endif 139 ; 140static const bool config_tcache = 141#ifdef JEMALLOC_TCACHE 142 true 143#else 144 false 145#endif 146 ; 147static const bool config_tls = 148#ifdef JEMALLOC_TLS 149 true 150#else 151 false 152#endif 153 ; 154static const bool config_utrace = 155#ifdef JEMALLOC_UTRACE 156 true 157#else 158 false 159#endif 160 ; 161static const bool config_valgrind = 162#ifdef JEMALLOC_VALGRIND 163 true 164#else 165 false 166#endif 167 ; 168static const bool config_xmalloc = 169#ifdef JEMALLOC_XMALLOC 170 true 171#else 172 false 173#endif 174 ; 175static const bool config_ivsalloc = 176#ifdef JEMALLOC_IVSALLOC 177 true 178#else 179 false 180#endif 181 ; 182 183#ifdef JEMALLOC_ATOMIC9 184#include <machine/atomic.h> 185#endif 186 187#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 188#include <libkern/OSAtomic.h> 189#endif 190 191#ifdef JEMALLOC_ZONE 192#include <mach/mach_error.h> 193#include <mach/mach_init.h> 194#include <mach/vm_map.h> 195#include <malloc/malloc.h> 196#endif 197 198#define RB_COMPACT 199#include "jemalloc/internal/rb.h" 200#include "jemalloc/internal/qr.h" 201#include "jemalloc/internal/ql.h" 202 203/* 204 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 205 * but there are circular dependencies that cannot be broken without 206 * substantial performance degradation. In order to reduce the effect on 207 * visual code flow, read the header files in multiple passes, with one of the 208 * following cpp variables defined during each pass: 209 * 210 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 211 * types. 212 * JEMALLOC_H_STRUCTS : Data structures. 213 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 214 * JEMALLOC_H_INLINES : Inline functions. 215 */ 216/******************************************************************************/ 217#define JEMALLOC_H_TYPES 218 219#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 220 221#define ZU(z) ((size_t)z) 222 223#ifndef __DECONST 224# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 225#endif 226 227#ifdef JEMALLOC_DEBUG 228 /* Disable inlining to make debugging easier. */ 229# define JEMALLOC_INLINE 230# define inline 231#else 232# define JEMALLOC_ENABLE_INLINE 233# define JEMALLOC_INLINE static inline 234# ifdef _MSC_VER 235# define inline _inline 236# endif 237#endif 238 239/* Smallest size class to support. */ 240#define LG_TINY_MIN 3 241#define TINY_MIN (1U << LG_TINY_MIN) 242 243/* 244 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 245 * classes). 246 */ 247#ifndef LG_QUANTUM 248# if (defined(__i386__) || defined(_M_IX86)) 249# define LG_QUANTUM 4 250# endif 251# ifdef __ia64__ 252# define LG_QUANTUM 4 253# endif 254# ifdef __alpha__ 255# define LG_QUANTUM 4 256# endif 257# ifdef __sparc64__ 258# define LG_QUANTUM 4 259# endif 260# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 261# define LG_QUANTUM 4 262# endif 263# ifdef __arm__ 264# define LG_QUANTUM 3 265# endif 266# ifdef __mips__ 267# define LG_QUANTUM 3 268# endif 269# ifdef __powerpc__ 270# define LG_QUANTUM 4 271# endif 272# ifdef __s390x__ 273# define LG_QUANTUM 4 274# endif 275# ifdef __SH4__ 276# define LG_QUANTUM 4 277# endif 278# ifdef __tile__ 279# define LG_QUANTUM 4 280# endif 281# ifndef LG_QUANTUM 282# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 283# endif 284#endif 285 286#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 287#define QUANTUM_MASK (QUANTUM - 1) 288 289/* Return the smallest quantum multiple that is >= a. */ 290#define QUANTUM_CEILING(a) \ 291 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 292 293#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 294#define LONG_MASK (LONG - 1) 295 296/* Return the smallest long multiple that is >= a. */ 297#define LONG_CEILING(a) \ 298 (((a) + LONG_MASK) & ~LONG_MASK) 299 300#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 301#define PTR_MASK (SIZEOF_PTR - 1) 302 303/* Return the smallest (void *) multiple that is >= a. */ 304#define PTR_CEILING(a) \ 305 (((a) + PTR_MASK) & ~PTR_MASK) 306 307/* 308 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 309 * In addition, this controls the spacing of cacheline-spaced size classes. 310 * 311 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can 312 * only handle raw constants. 313 */ 314#define LG_CACHELINE 6 315#define CACHELINE 64 316#define CACHELINE_MASK (CACHELINE - 1) 317 318/* Return the smallest cacheline multiple that is >= s. */ 319#define CACHELINE_CEILING(s) \ 320 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 321 322/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 323#ifdef PAGE_MASK 324# undef PAGE_MASK 325#endif 326#define LG_PAGE STATIC_PAGE_SHIFT 327#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 328#define PAGE_MASK ((size_t)(PAGE - 1)) 329 330/* Return the smallest pagesize multiple that is >= s. */ 331#define PAGE_CEILING(s) \ 332 (((s) + PAGE_MASK) & ~PAGE_MASK) 333 334/* Return the nearest aligned address at or below a. */ 335#define ALIGNMENT_ADDR2BASE(a, alignment) \ 336 ((void *)((uintptr_t)(a) & (-(alignment)))) 337 338/* Return the offset between a and the nearest aligned address at or below a. */ 339#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 340 ((size_t)((uintptr_t)(a) & (alignment - 1))) 341 342/* Return the smallest alignment multiple that is >= s. */ 343#define ALIGNMENT_CEILING(s, alignment) \ 344 (((s) + (alignment - 1)) & (-(alignment))) 345 346/* Declare a variable length array */ 347#if __STDC_VERSION__ < 199901L 348# ifdef _MSC_VER 349# include <malloc.h> 350# define alloca _alloca 351# else 352# include <alloca.h> 353# endif 354# define VARIABLE_ARRAY(type, name, count) \ 355 type *name = alloca(sizeof(type) * count) 356#else 357# define VARIABLE_ARRAY(type, name, count) type name[count] 358#endif 359 360#ifdef JEMALLOC_VALGRIND 361/* 362 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 363 * so that when Valgrind reports errors, there are no extra stack frames 364 * in the backtraces. 365 * 366 * The size that is reported to valgrind must be consistent through a chain of 367 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 368 * jemalloc, so it is critical that all callers of these macros provide usize 369 * rather than request size. As a result, buffer overflow detection is 370 * technically weakened for the standard API, though it is generally accepted 371 * practice to consider any extra bytes reported by malloc_usable_size() as 372 * usable space. 373 */ 374#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 375 if (config_valgrind && opt_valgrind && cond) \ 376 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 377} while (0) 378#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 379 old_rzsize, zero) do { \ 380 if (config_valgrind && opt_valgrind) { \ 381 size_t rzsize = p2rz(ptr); \ 382 \ 383 if (ptr == old_ptr) { \ 384 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 385 usize, rzsize); \ 386 if (zero && old_usize < usize) { \ 387 VALGRIND_MAKE_MEM_DEFINED( \ 388 (void *)((uintptr_t)ptr + \ 389 old_usize), usize - old_usize); \ 390 } \ 391 } else { \ 392 if (old_ptr != NULL) { \ 393 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 394 old_rzsize); \ 395 } \ 396 if (ptr != NULL) { \ 397 size_t copy_size = (old_usize < usize) \ 398 ? old_usize : usize; \ 399 size_t tail_size = usize - copy_size; \ 400 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 401 rzsize, false); \ 402 if (copy_size > 0) { \ 403 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 404 copy_size); \ 405 } \ 406 if (zero && tail_size > 0) { \ 407 VALGRIND_MAKE_MEM_DEFINED( \ 408 (void *)((uintptr_t)ptr + \ 409 copy_size), tail_size); \ 410 } \ 411 } \ 412 } \ 413 } \ 414} while (0) 415#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 416 if (config_valgrind && opt_valgrind) \ 417 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 418} while (0) 419#else 420#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) 421#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) 422#define VALGRIND_FREELIKE_BLOCK(addr, rzB) 423#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) 424#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) 425#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) 426#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 427 old_rzsize, zero) 428#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) 429#endif 430 431#include "jemalloc/internal/util.h" 432#include "jemalloc/internal/atomic.h" 433#include "jemalloc/internal/prng.h" 434#include "jemalloc/internal/ckh.h" 435#include "jemalloc/internal/size_classes.h" 436#include "jemalloc/internal/stats.h" 437#include "jemalloc/internal/ctl.h" 438#include "jemalloc/internal/mutex.h" 439#include "jemalloc/internal/tsd.h" 440#include "jemalloc/internal/mb.h" 441#include "jemalloc/internal/extent.h" 442#include "jemalloc/internal/arena.h" 443#include "jemalloc/internal/bitmap.h" 444#include "jemalloc/internal/base.h" 445#include "jemalloc/internal/chunk.h" 446#include "jemalloc/internal/huge.h" 447#include "jemalloc/internal/rtree.h" 448#include "jemalloc/internal/tcache.h" 449#include "jemalloc/internal/hash.h" 450#include "jemalloc/internal/quarantine.h" 451#include "jemalloc/internal/prof.h" 452 453#undef JEMALLOC_H_TYPES 454/******************************************************************************/ 455#define JEMALLOC_H_STRUCTS 456 457#include "jemalloc/internal/util.h" 458#include "jemalloc/internal/atomic.h" 459#include "jemalloc/internal/prng.h" 460#include "jemalloc/internal/ckh.h" 461#include "jemalloc/internal/size_classes.h" 462#include "jemalloc/internal/stats.h" 463#include "jemalloc/internal/ctl.h" 464#include "jemalloc/internal/mutex.h" 465#include "jemalloc/internal/tsd.h" 466#include "jemalloc/internal/mb.h" 467#include "jemalloc/internal/bitmap.h" 468#include "jemalloc/internal/extent.h" 469#include "jemalloc/internal/arena.h" 470#include "jemalloc/internal/base.h" 471#include "jemalloc/internal/chunk.h" 472#include "jemalloc/internal/huge.h" 473#include "jemalloc/internal/rtree.h" 474#include "jemalloc/internal/tcache.h" 475#include "jemalloc/internal/hash.h" 476#include "jemalloc/internal/quarantine.h" 477#include "jemalloc/internal/prof.h" 478 479typedef struct { 480 uint64_t allocated; 481 uint64_t deallocated; 482} thread_allocated_t; 483/* 484 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 485 * argument. 486 */ 487#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) 488 489#undef JEMALLOC_H_STRUCTS 490/******************************************************************************/ 491#define JEMALLOC_H_EXTERNS 492 493extern bool opt_abort; 494extern bool opt_junk; 495extern size_t opt_quarantine; 496extern bool opt_redzone; 497extern bool opt_utrace; 498extern bool opt_valgrind; 499extern bool opt_xmalloc; 500extern bool opt_zero; 501extern size_t opt_narenas; 502 503/* Number of CPUs. */ 504extern unsigned ncpus; 505 506extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 507/* 508 * Arenas that are used to service external requests. Not all elements of the 509 * arenas array are necessarily used; arenas are created lazily as needed. 510 */ 511extern arena_t **arenas; 512extern unsigned narenas; 513 514arena_t *arenas_extend(unsigned ind); 515void arenas_cleanup(void *arg); 516arena_t *choose_arena_hard(void); 517void jemalloc_prefork(void); 518void jemalloc_postfork_parent(void); 519void jemalloc_postfork_child(void); 520 521#include "jemalloc/internal/util.h" 522#include "jemalloc/internal/atomic.h" 523#include "jemalloc/internal/prng.h" 524#include "jemalloc/internal/ckh.h" 525#include "jemalloc/internal/size_classes.h" 526#include "jemalloc/internal/stats.h" 527#include "jemalloc/internal/ctl.h" 528#include "jemalloc/internal/mutex.h" 529#include "jemalloc/internal/tsd.h" 530#include "jemalloc/internal/mb.h" 531#include "jemalloc/internal/bitmap.h" 532#include "jemalloc/internal/extent.h" 533#include "jemalloc/internal/arena.h" 534#include "jemalloc/internal/base.h" 535#include "jemalloc/internal/chunk.h" 536#include "jemalloc/internal/huge.h" 537#include "jemalloc/internal/rtree.h" 538#include "jemalloc/internal/tcache.h" 539#include "jemalloc/internal/hash.h" 540#include "jemalloc/internal/quarantine.h" 541#include "jemalloc/internal/prof.h" 542 543#undef JEMALLOC_H_EXTERNS 544/******************************************************************************/ 545#define JEMALLOC_H_INLINES 546 547#include "jemalloc/internal/util.h" 548#include "jemalloc/internal/atomic.h" 549#include "jemalloc/internal/prng.h" 550#include "jemalloc/internal/ckh.h" 551#include "jemalloc/internal/size_classes.h" 552#include "jemalloc/internal/stats.h" 553#include "jemalloc/internal/ctl.h" 554#include "jemalloc/internal/mutex.h" 555#include "jemalloc/internal/tsd.h" 556#include "jemalloc/internal/mb.h" 557#include "jemalloc/internal/extent.h" 558#include "jemalloc/internal/base.h" 559#include "jemalloc/internal/chunk.h" 560#include "jemalloc/internal/huge.h" 561 562#ifndef JEMALLOC_ENABLE_INLINE 563malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 564 565size_t s2u(size_t size); 566size_t sa2u(size_t size, size_t alignment); 567arena_t *choose_arena(arena_t *arena); 568#endif 569 570#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 571/* 572 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 573 * for allocations. 574 */ 575malloc_tsd_externs(arenas, arena_t *) 576malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) 577 578/* 579 * Compute usable size that would result from allocating an object with the 580 * specified size. 581 */ 582JEMALLOC_INLINE size_t 583s2u(size_t size) 584{ 585 586 if (size <= SMALL_MAXCLASS) 587 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 588 if (size <= arena_maxclass) 589 return (PAGE_CEILING(size)); 590 return (CHUNK_CEILING(size)); 591} 592 593/* 594 * Compute usable size that would result from allocating an object with the 595 * specified size and alignment. 596 */ 597JEMALLOC_INLINE size_t 598sa2u(size_t size, size_t alignment) 599{ 600 size_t usize; 601 602 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 603 604 /* 605 * Round size up to the nearest multiple of alignment. 606 * 607 * This done, we can take advantage of the fact that for each small 608 * size class, every object is aligned at the smallest power of two 609 * that is non-zero in the base two representation of the size. For 610 * example: 611 * 612 * Size | Base 2 | Minimum alignment 613 * -----+----------+------------------ 614 * 96 | 1100000 | 32 615 * 144 | 10100000 | 32 616 * 192 | 11000000 | 64 617 */ 618 usize = ALIGNMENT_CEILING(size, alignment); 619 /* 620 * (usize < size) protects against the combination of maximal 621 * alignment and size greater than maximal alignment. 622 */ 623 if (usize < size) { 624 /* size_t overflow. */ 625 return (0); 626 } 627 628 if (usize <= arena_maxclass && alignment <= PAGE) { 629 if (usize <= SMALL_MAXCLASS) 630 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 631 return (PAGE_CEILING(usize)); 632 } else { 633 size_t run_size; 634 635 /* 636 * We can't achieve subpage alignment, so round up alignment 637 * permanently; it makes later calculations simpler. 638 */ 639 alignment = PAGE_CEILING(alignment); 640 usize = PAGE_CEILING(size); 641 /* 642 * (usize < size) protects against very large sizes within 643 * PAGE of SIZE_T_MAX. 644 * 645 * (usize + alignment < usize) protects against the 646 * combination of maximal alignment and usize large enough 647 * to cause overflow. This is similar to the first overflow 648 * check above, but it needs to be repeated due to the new 649 * usize value, which may now be *equal* to maximal 650 * alignment, whereas before we only detected overflow if the 651 * original size was *greater* than maximal alignment. 652 */ 653 if (usize < size || usize + alignment < usize) { 654 /* size_t overflow. */ 655 return (0); 656 } 657 658 /* 659 * Calculate the size of the over-size run that arena_palloc() 660 * would need to allocate in order to guarantee the alignment. 661 * If the run wouldn't fit within a chunk, round up to a huge 662 * allocation size. 663 */ 664 run_size = usize + alignment - PAGE; 665 if (run_size <= arena_maxclass) 666 return (PAGE_CEILING(usize)); 667 return (CHUNK_CEILING(usize)); 668 } 669} 670 671/* Choose an arena based on a per-thread value. */ 672JEMALLOC_INLINE arena_t * 673choose_arena(arena_t *arena) 674{ 675 arena_t *ret; 676 677 if (arena != NULL) 678 return (arena); 679 680 if ((ret = *arenas_tsd_get()) == NULL) { 681 ret = choose_arena_hard(); 682 assert(ret != NULL); 683 } 684 685 return (ret); 686} 687#endif 688 689#include "jemalloc/internal/bitmap.h" 690#include "jemalloc/internal/rtree.h" 691/* 692 * Include arena.h twice in order to resolve circular dependencies with 693 * tcache.h. 694 */ 695#define JEMALLOC_ARENA_INLINE_A 696#include "jemalloc/internal/arena.h" 697#undef JEMALLOC_ARENA_INLINE_A 698#include "jemalloc/internal/tcache.h" 699#define JEMALLOC_ARENA_INLINE_B 700#include "jemalloc/internal/arena.h" 701#undef JEMALLOC_ARENA_INLINE_B 702#include "jemalloc/internal/hash.h" 703#include "jemalloc/internal/quarantine.h" 704 705#ifndef JEMALLOC_ENABLE_INLINE 706void *imalloc(size_t size); 707void *icalloc(size_t size); 708void *ipalloc(size_t usize, size_t alignment, bool zero); 709size_t isalloc(const void *ptr, bool demote); 710size_t ivsalloc(const void *ptr, bool demote); 711size_t u2rz(size_t usize); 712size_t p2rz(const void *ptr); 713void idalloc(void *ptr); 714void iqalloc(void *ptr); 715void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 716 bool zero, bool no_move); 717malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 718#endif 719 720#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 721JEMALLOC_INLINE void * 722imalloc(size_t size) 723{ 724 725 assert(size != 0); 726 727 if (size <= arena_maxclass) 728 return (arena_malloc(NULL, size, false, true)); 729 else 730 return (huge_malloc(size, false)); 731} 732 733JEMALLOC_INLINE void * 734icalloc(size_t size) 735{ 736 737 if (size <= arena_maxclass) 738 return (arena_malloc(NULL, size, true, true)); 739 else 740 return (huge_malloc(size, true)); 741} 742 743JEMALLOC_INLINE void * 744ipalloc(size_t usize, size_t alignment, bool zero) 745{ 746 void *ret; 747 748 assert(usize != 0); 749 assert(usize == sa2u(usize, alignment)); 750 751 if (usize <= arena_maxclass && alignment <= PAGE) 752 ret = arena_malloc(NULL, usize, zero, true); 753 else { 754 if (usize <= arena_maxclass) { 755 ret = arena_palloc(choose_arena(NULL), usize, alignment, 756 zero); 757 } else if (alignment <= chunksize) 758 ret = huge_malloc(usize, zero); 759 else 760 ret = huge_palloc(usize, alignment, zero); 761 } 762 763 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 764 return (ret); 765} 766 767/* 768 * Typical usage: 769 * void *ptr = [...] 770 * size_t sz = isalloc(ptr, config_prof); 771 */ 772JEMALLOC_INLINE size_t 773isalloc(const void *ptr, bool demote) 774{ 775 size_t ret; 776 arena_chunk_t *chunk; 777 778 assert(ptr != NULL); 779 /* Demotion only makes sense if config_prof is true. */ 780 assert(config_prof || demote == false); 781 782 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 783 if (chunk != ptr) 784 ret = arena_salloc(ptr, demote); 785 else 786 ret = huge_salloc(ptr); 787 788 return (ret); 789} 790 791JEMALLOC_INLINE size_t 792ivsalloc(const void *ptr, bool demote) 793{ 794 795 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 796 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 797 return (0); 798 799 return (isalloc(ptr, demote)); 800} 801 802JEMALLOC_INLINE size_t 803u2rz(size_t usize) 804{ 805 size_t ret; 806 807 if (usize <= SMALL_MAXCLASS) { 808 size_t binind = SMALL_SIZE2BIN(usize); 809 ret = arena_bin_info[binind].redzone_size; 810 } else 811 ret = 0; 812 813 return (ret); 814} 815 816JEMALLOC_INLINE size_t 817p2rz(const void *ptr) 818{ 819 size_t usize = isalloc(ptr, false); 820 821 return (u2rz(usize)); 822} 823 824JEMALLOC_INLINE void 825idalloc(void *ptr) 826{ 827 arena_chunk_t *chunk; 828 829 assert(ptr != NULL); 830 831 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 832 if (chunk != ptr) 833 arena_dalloc(chunk->arena, chunk, ptr, true); 834 else 835 huge_dalloc(ptr, true); 836} 837 838JEMALLOC_INLINE void 839iqalloc(void *ptr) 840{ 841 842 if (config_fill && opt_quarantine) 843 quarantine(ptr); 844 else 845 idalloc(ptr); 846} 847 848JEMALLOC_INLINE void * 849iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 850 bool no_move) 851{ 852 void *ret; 853 size_t oldsize; 854 855 assert(ptr != NULL); 856 assert(size != 0); 857 858 oldsize = isalloc(ptr, config_prof); 859 860 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 861 != 0) { 862 size_t usize, copysize; 863 864 /* 865 * Existing object alignment is inadequate; allocate new space 866 * and copy. 867 */ 868 if (no_move) 869 return (NULL); 870 usize = sa2u(size + extra, alignment); 871 if (usize == 0) 872 return (NULL); 873 ret = ipalloc(usize, alignment, zero); 874 if (ret == NULL) { 875 if (extra == 0) 876 return (NULL); 877 /* Try again, without extra this time. */ 878 usize = sa2u(size, alignment); 879 if (usize == 0) 880 return (NULL); 881 ret = ipalloc(usize, alignment, zero); 882 if (ret == NULL) 883 return (NULL); 884 } 885 /* 886 * Copy at most size bytes (not size+extra), since the caller 887 * has no expectation that the extra bytes will be reliably 888 * preserved. 889 */ 890 copysize = (size < oldsize) ? size : oldsize; 891 memcpy(ret, ptr, copysize); 892 iqalloc(ptr); 893 return (ret); 894 } 895 896 if (no_move) { 897 if (size <= arena_maxclass) { 898 return (arena_ralloc_no_move(ptr, oldsize, size, 899 extra, zero)); 900 } else { 901 return (huge_ralloc_no_move(ptr, oldsize, size, 902 extra)); 903 } 904 } else { 905 if (size + extra <= arena_maxclass) { 906 return (arena_ralloc(ptr, oldsize, size, extra, 907 alignment, zero, true)); 908 } else { 909 return (huge_ralloc(ptr, oldsize, size, extra, 910 alignment, zero)); 911 } 912 } 913} 914 915malloc_tsd_externs(thread_allocated, thread_allocated_t) 916malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, 917 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 918#endif 919 920#include "jemalloc/internal/prof.h" 921 922#undef JEMALLOC_H_INLINES 923/******************************************************************************/ 924#endif /* JEMALLOC_INTERNAL_H */ 925