jemalloc_internal.h.in revision fd97b1dfc76647c3f90f28dc63cc987041fe20df
1#ifndef JEMALLOC_INTERNAL_H 2#define JEMALLOC_INTERNAL_H 3#include <math.h> 4#ifdef _WIN32 5# include <windows.h> 6# define ENOENT ERROR_PATH_NOT_FOUND 7# define EINVAL ERROR_BAD_ARGUMENTS 8# define EAGAIN ERROR_OUTOFMEMORY 9# define EPERM ERROR_WRITE_FAULT 10# define EFAULT ERROR_INVALID_ADDRESS 11# define ENOMEM ERROR_NOT_ENOUGH_MEMORY 12# undef ERANGE 13# define ERANGE ERROR_INVALID_DATA 14#else 15# include <sys/param.h> 16# include <sys/mman.h> 17# include <sys/syscall.h> 18# if !defined(SYS_write) && defined(__NR_write) 19# define SYS_write __NR_write 20# endif 21# include <sys/uio.h> 22# include <pthread.h> 23# include <errno.h> 24#endif 25#include <sys/types.h> 26 27#include <limits.h> 28#ifndef SIZE_T_MAX 29# define SIZE_T_MAX SIZE_MAX 30#endif 31#include <stdarg.h> 32#include <stdbool.h> 33#include <stdio.h> 34#include <stdlib.h> 35#include <stdint.h> 36#include <stddef.h> 37#ifndef offsetof 38# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 39#endif 40#include <inttypes.h> 41#include <string.h> 42#include <strings.h> 43#include <ctype.h> 44#ifdef _MSC_VER 45# include <io.h> 46typedef intptr_t ssize_t; 47# define PATH_MAX 1024 48# define STDERR_FILENO 2 49# define __func__ __FUNCTION__ 50/* Disable warnings about deprecated system functions */ 51# pragma warning(disable: 4996) 52#else 53# include <unistd.h> 54#endif 55#include <fcntl.h> 56 57#define JEMALLOC_NO_DEMANGLE 58#include "../jemalloc@install_suffix@.h" 59 60#ifdef JEMALLOC_UTRACE 61#include <sys/ktrace.h> 62#endif 63 64#ifdef JEMALLOC_VALGRIND 65#include <valgrind/valgrind.h> 66#include <valgrind/memcheck.h> 67#endif 68 69#include "jemalloc/internal/private_namespace.h" 70 71#ifdef JEMALLOC_CC_SILENCE 72#define UNUSED JEMALLOC_ATTR(unused) 73#else 74#define UNUSED 75#endif 76 77static const bool config_debug = 78#ifdef JEMALLOC_DEBUG 79 true 80#else 81 false 82#endif 83 ; 84static const bool config_dss = 85#ifdef JEMALLOC_DSS 86 true 87#else 88 false 89#endif 90 ; 91static const bool config_fill = 92#ifdef JEMALLOC_FILL 93 true 94#else 95 false 96#endif 97 ; 98static const bool config_lazy_lock = 99#ifdef JEMALLOC_LAZY_LOCK 100 true 101#else 102 false 103#endif 104 ; 105static const bool config_prof = 106#ifdef JEMALLOC_PROF 107 true 108#else 109 false 110#endif 111 ; 112static const bool config_prof_libgcc = 113#ifdef JEMALLOC_PROF_LIBGCC 114 true 115#else 116 false 117#endif 118 ; 119static const bool config_prof_libunwind = 120#ifdef JEMALLOC_PROF_LIBUNWIND 121 true 122#else 123 false 124#endif 125 ; 126static const bool config_munmap = 127#ifdef JEMALLOC_MUNMAP 128 true 129#else 130 false 131#endif 132 ; 133static const bool config_stats = 134#ifdef JEMALLOC_STATS 135 true 136#else 137 false 138#endif 139 ; 140static const bool config_tcache = 141#ifdef JEMALLOC_TCACHE 142 true 143#else 144 false 145#endif 146 ; 147static const bool config_tls = 148#ifdef JEMALLOC_TLS 149 true 150#else 151 false 152#endif 153 ; 154static const bool config_utrace = 155#ifdef JEMALLOC_UTRACE 156 true 157#else 158 false 159#endif 160 ; 161static const bool config_valgrind = 162#ifdef JEMALLOC_VALGRIND 163 true 164#else 165 false 166#endif 167 ; 168static const bool config_xmalloc = 169#ifdef JEMALLOC_XMALLOC 170 true 171#else 172 false 173#endif 174 ; 175static const bool config_ivsalloc = 176#ifdef JEMALLOC_IVSALLOC 177 true 178#else 179 false 180#endif 181 ; 182 183#ifdef JEMALLOC_ATOMIC9 184#include <machine/atomic.h> 185#endif 186 187#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 188#include <libkern/OSAtomic.h> 189#endif 190 191#ifdef JEMALLOC_ZONE 192#include <mach/mach_error.h> 193#include <mach/mach_init.h> 194#include <mach/vm_map.h> 195#include <malloc/malloc.h> 196#endif 197 198#define RB_COMPACT 199#include "jemalloc/internal/rb.h" 200#include "jemalloc/internal/qr.h" 201#include "jemalloc/internal/ql.h" 202 203/* 204 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 205 * but there are circular dependencies that cannot be broken without 206 * substantial performance degradation. In order to reduce the effect on 207 * visual code flow, read the header files in multiple passes, with one of the 208 * following cpp variables defined during each pass: 209 * 210 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 211 * types. 212 * JEMALLOC_H_STRUCTS : Data structures. 213 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 214 * JEMALLOC_H_INLINES : Inline functions. 215 */ 216/******************************************************************************/ 217#define JEMALLOC_H_TYPES 218 219#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 220 221#define ZU(z) ((size_t)z) 222 223#ifndef __DECONST 224# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 225#endif 226 227#ifdef JEMALLOC_DEBUG 228 /* Disable inlining to make debugging easier. */ 229# define JEMALLOC_INLINE 230# define inline 231#else 232# define JEMALLOC_ENABLE_INLINE 233# define JEMALLOC_INLINE static inline 234# ifdef _MSC_VER 235# define inline _inline 236# endif 237#endif 238 239/* Smallest size class to support. */ 240#define LG_TINY_MIN 3 241#define TINY_MIN (1U << LG_TINY_MIN) 242 243/* 244 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 245 * classes). 246 */ 247#ifndef LG_QUANTUM 248# if (defined(__i386__) || defined(_M_IX86)) 249# define LG_QUANTUM 4 250# endif 251# ifdef __ia64__ 252# define LG_QUANTUM 4 253# endif 254# ifdef __alpha__ 255# define LG_QUANTUM 4 256# endif 257# ifdef __sparc64__ 258# define LG_QUANTUM 4 259# endif 260# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) 261# define LG_QUANTUM 4 262# endif 263# ifdef __arm__ 264# define LG_QUANTUM 3 265# endif 266# ifdef __mips__ 267# define LG_QUANTUM 3 268# endif 269# ifdef __powerpc__ 270# define LG_QUANTUM 4 271# endif 272# ifdef __s390x__ 273# define LG_QUANTUM 4 274# endif 275# ifdef __SH4__ 276# define LG_QUANTUM 4 277# endif 278# ifdef __tile__ 279# define LG_QUANTUM 4 280# endif 281# ifndef LG_QUANTUM 282# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 283# endif 284#endif 285 286#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 287#define QUANTUM_MASK (QUANTUM - 1) 288 289/* Return the smallest quantum multiple that is >= a. */ 290#define QUANTUM_CEILING(a) \ 291 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 292 293#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 294#define LONG_MASK (LONG - 1) 295 296/* Return the smallest long multiple that is >= a. */ 297#define LONG_CEILING(a) \ 298 (((a) + LONG_MASK) & ~LONG_MASK) 299 300#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 301#define PTR_MASK (SIZEOF_PTR - 1) 302 303/* Return the smallest (void *) multiple that is >= a. */ 304#define PTR_CEILING(a) \ 305 (((a) + PTR_MASK) & ~PTR_MASK) 306 307/* 308 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 309 * In addition, this controls the spacing of cacheline-spaced size classes. 310 */ 311#define LG_CACHELINE 6 312#define CACHELINE ((size_t)(1U << LG_CACHELINE)) 313#define CACHELINE_MASK (CACHELINE - 1) 314 315/* Return the smallest cacheline multiple that is >= s. */ 316#define CACHELINE_CEILING(s) \ 317 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 318 319/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 320#ifdef PAGE_MASK 321# undef PAGE_MASK 322#endif 323#define LG_PAGE STATIC_PAGE_SHIFT 324#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) 325#define PAGE_MASK ((size_t)(PAGE - 1)) 326 327/* Return the smallest pagesize multiple that is >= s. */ 328#define PAGE_CEILING(s) \ 329 (((s) + PAGE_MASK) & ~PAGE_MASK) 330 331/* Return the nearest aligned address at or below a. */ 332#define ALIGNMENT_ADDR2BASE(a, alignment) \ 333 ((void *)((uintptr_t)(a) & (-(alignment)))) 334 335/* Return the offset between a and the nearest aligned address at or below a. */ 336#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ 337 ((size_t)((uintptr_t)(a) & (alignment - 1))) 338 339/* Return the smallest alignment multiple that is >= s. */ 340#define ALIGNMENT_CEILING(s, alignment) \ 341 (((s) + (alignment - 1)) & (-(alignment))) 342 343/* Declare a variable length array */ 344#if __STDC_VERSION__ < 199901L 345# ifdef _MSC_VER 346# include <malloc.h> 347# define alloca _alloca 348# else 349# include <alloca.h> 350# endif 351# define VARIABLE_ARRAY(type, name, count) \ 352 type *name = alloca(sizeof(type) * count) 353#else 354# define VARIABLE_ARRAY(type, name, count) type name[count] 355#endif 356 357#ifdef JEMALLOC_VALGRIND 358/* 359 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions 360 * so that when Valgrind reports errors, there are no extra stack frames 361 * in the backtraces. 362 * 363 * The size that is reported to valgrind must be consistent through a chain of 364 * malloc..realloc..realloc calls. Request size isn't recorded anywhere in 365 * jemalloc, so it is critical that all callers of these macros provide usize 366 * rather than request size. As a result, buffer overflow detection is 367 * technically weakened for the standard API, though it is generally accepted 368 * practice to consider any extra bytes reported by malloc_usable_size() as 369 * usable space. 370 */ 371#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ 372 if (config_valgrind && opt_valgrind && cond) \ 373 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ 374} while (0) 375#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 376 old_rzsize, zero) do { \ 377 if (config_valgrind && opt_valgrind) { \ 378 size_t rzsize = p2rz(ptr); \ 379 \ 380 if (ptr == old_ptr) { \ 381 VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ 382 usize, rzsize); \ 383 if (zero && old_usize < usize) { \ 384 VALGRIND_MAKE_MEM_DEFINED( \ 385 (void *)((uintptr_t)ptr + \ 386 old_usize), usize - old_usize); \ 387 } \ 388 } else { \ 389 if (old_ptr != NULL) { \ 390 VALGRIND_FREELIKE_BLOCK(old_ptr, \ 391 old_rzsize); \ 392 } \ 393 if (ptr != NULL) { \ 394 size_t copy_size = (old_usize < usize) \ 395 ? old_usize : usize; \ 396 size_t tail_size = usize - copy_size; \ 397 VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ 398 rzsize, false); \ 399 if (copy_size > 0) { \ 400 VALGRIND_MAKE_MEM_DEFINED(ptr, \ 401 copy_size); \ 402 } \ 403 if (zero && tail_size > 0) { \ 404 VALGRIND_MAKE_MEM_DEFINED( \ 405 (void *)((uintptr_t)ptr + \ 406 copy_size), tail_size); \ 407 } \ 408 } \ 409 } \ 410 } \ 411} while (0) 412#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ 413 if (config_valgrind && opt_valgrind) \ 414 VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ 415} while (0) 416#else 417#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) 418#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) 419#define VALGRIND_FREELIKE_BLOCK(addr, rzB) 420#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) 421#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) 422#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) 423#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ 424 old_rzsize, zero) 425#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) 426#endif 427 428#include "jemalloc/internal/util.h" 429#include "jemalloc/internal/atomic.h" 430#include "jemalloc/internal/prng.h" 431#include "jemalloc/internal/ckh.h" 432#include "jemalloc/internal/size_classes.h" 433#include "jemalloc/internal/stats.h" 434#include "jemalloc/internal/ctl.h" 435#include "jemalloc/internal/mutex.h" 436#include "jemalloc/internal/tsd.h" 437#include "jemalloc/internal/mb.h" 438#include "jemalloc/internal/extent.h" 439#include "jemalloc/internal/arena.h" 440#include "jemalloc/internal/bitmap.h" 441#include "jemalloc/internal/base.h" 442#include "jemalloc/internal/chunk.h" 443#include "jemalloc/internal/huge.h" 444#include "jemalloc/internal/rtree.h" 445#include "jemalloc/internal/tcache.h" 446#include "jemalloc/internal/hash.h" 447#include "jemalloc/internal/quarantine.h" 448#include "jemalloc/internal/prof.h" 449 450#undef JEMALLOC_H_TYPES 451/******************************************************************************/ 452#define JEMALLOC_H_STRUCTS 453 454#include "jemalloc/internal/util.h" 455#include "jemalloc/internal/atomic.h" 456#include "jemalloc/internal/prng.h" 457#include "jemalloc/internal/ckh.h" 458#include "jemalloc/internal/size_classes.h" 459#include "jemalloc/internal/stats.h" 460#include "jemalloc/internal/ctl.h" 461#include "jemalloc/internal/mutex.h" 462#include "jemalloc/internal/tsd.h" 463#include "jemalloc/internal/mb.h" 464#include "jemalloc/internal/bitmap.h" 465#include "jemalloc/internal/extent.h" 466#include "jemalloc/internal/arena.h" 467#include "jemalloc/internal/base.h" 468#include "jemalloc/internal/chunk.h" 469#include "jemalloc/internal/huge.h" 470#include "jemalloc/internal/rtree.h" 471#include "jemalloc/internal/tcache.h" 472#include "jemalloc/internal/hash.h" 473#include "jemalloc/internal/quarantine.h" 474#include "jemalloc/internal/prof.h" 475 476typedef struct { 477 uint64_t allocated; 478 uint64_t deallocated; 479} thread_allocated_t; 480/* 481 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro 482 * argument. 483 */ 484#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) 485 486#undef JEMALLOC_H_STRUCTS 487/******************************************************************************/ 488#define JEMALLOC_H_EXTERNS 489 490extern bool opt_abort; 491extern bool opt_junk; 492extern size_t opt_quarantine; 493extern bool opt_redzone; 494extern bool opt_utrace; 495extern bool opt_valgrind; 496extern bool opt_xmalloc; 497extern bool opt_zero; 498extern size_t opt_narenas; 499 500/* Number of CPUs. */ 501extern unsigned ncpus; 502 503extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 504/* 505 * Arenas that are used to service external requests. Not all elements of the 506 * arenas array are necessarily used; arenas are created lazily as needed. 507 */ 508extern arena_t **arenas; 509extern unsigned narenas; 510 511arena_t *arenas_extend(unsigned ind); 512void arenas_cleanup(void *arg); 513arena_t *choose_arena_hard(void); 514void jemalloc_prefork(void); 515void jemalloc_postfork_parent(void); 516void jemalloc_postfork_child(void); 517 518#include "jemalloc/internal/util.h" 519#include "jemalloc/internal/atomic.h" 520#include "jemalloc/internal/prng.h" 521#include "jemalloc/internal/ckh.h" 522#include "jemalloc/internal/size_classes.h" 523#include "jemalloc/internal/stats.h" 524#include "jemalloc/internal/ctl.h" 525#include "jemalloc/internal/mutex.h" 526#include "jemalloc/internal/tsd.h" 527#include "jemalloc/internal/mb.h" 528#include "jemalloc/internal/bitmap.h" 529#include "jemalloc/internal/extent.h" 530#include "jemalloc/internal/arena.h" 531#include "jemalloc/internal/base.h" 532#include "jemalloc/internal/chunk.h" 533#include "jemalloc/internal/huge.h" 534#include "jemalloc/internal/rtree.h" 535#include "jemalloc/internal/tcache.h" 536#include "jemalloc/internal/hash.h" 537#include "jemalloc/internal/quarantine.h" 538#include "jemalloc/internal/prof.h" 539 540#undef JEMALLOC_H_EXTERNS 541/******************************************************************************/ 542#define JEMALLOC_H_INLINES 543 544#include "jemalloc/internal/util.h" 545#include "jemalloc/internal/atomic.h" 546#include "jemalloc/internal/prng.h" 547#include "jemalloc/internal/ckh.h" 548#include "jemalloc/internal/size_classes.h" 549#include "jemalloc/internal/stats.h" 550#include "jemalloc/internal/ctl.h" 551#include "jemalloc/internal/mutex.h" 552#include "jemalloc/internal/tsd.h" 553#include "jemalloc/internal/mb.h" 554#include "jemalloc/internal/extent.h" 555#include "jemalloc/internal/base.h" 556#include "jemalloc/internal/chunk.h" 557#include "jemalloc/internal/huge.h" 558 559#ifndef JEMALLOC_ENABLE_INLINE 560malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) 561 562size_t s2u(size_t size); 563size_t sa2u(size_t size, size_t alignment); 564arena_t *choose_arena(arena_t *arena); 565#endif 566 567#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 568/* 569 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 570 * for allocations. 571 */ 572malloc_tsd_externs(arenas, arena_t *) 573malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) 574 575/* 576 * Compute usable size that would result from allocating an object with the 577 * specified size. 578 */ 579JEMALLOC_INLINE size_t 580s2u(size_t size) 581{ 582 583 if (size <= SMALL_MAXCLASS) 584 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 585 if (size <= arena_maxclass) 586 return (PAGE_CEILING(size)); 587 return (CHUNK_CEILING(size)); 588} 589 590/* 591 * Compute usable size that would result from allocating an object with the 592 * specified size and alignment. 593 */ 594JEMALLOC_INLINE size_t 595sa2u(size_t size, size_t alignment) 596{ 597 size_t usize; 598 599 assert(alignment != 0 && ((alignment - 1) & alignment) == 0); 600 601 /* 602 * Round size up to the nearest multiple of alignment. 603 * 604 * This done, we can take advantage of the fact that for each small 605 * size class, every object is aligned at the smallest power of two 606 * that is non-zero in the base two representation of the size. For 607 * example: 608 * 609 * Size | Base 2 | Minimum alignment 610 * -----+----------+------------------ 611 * 96 | 1100000 | 32 612 * 144 | 10100000 | 32 613 * 192 | 11000000 | 64 614 */ 615 usize = ALIGNMENT_CEILING(size, alignment); 616 /* 617 * (usize < size) protects against the combination of maximal 618 * alignment and size greater than maximal alignment. 619 */ 620 if (usize < size) { 621 /* size_t overflow. */ 622 return (0); 623 } 624 625 if (usize <= arena_maxclass && alignment <= PAGE) { 626 if (usize <= SMALL_MAXCLASS) 627 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 628 return (PAGE_CEILING(usize)); 629 } else { 630 size_t run_size; 631 632 /* 633 * We can't achieve subpage alignment, so round up alignment 634 * permanently; it makes later calculations simpler. 635 */ 636 alignment = PAGE_CEILING(alignment); 637 usize = PAGE_CEILING(size); 638 /* 639 * (usize < size) protects against very large sizes within 640 * PAGE of SIZE_T_MAX. 641 * 642 * (usize + alignment < usize) protects against the 643 * combination of maximal alignment and usize large enough 644 * to cause overflow. This is similar to the first overflow 645 * check above, but it needs to be repeated due to the new 646 * usize value, which may now be *equal* to maximal 647 * alignment, whereas before we only detected overflow if the 648 * original size was *greater* than maximal alignment. 649 */ 650 if (usize < size || usize + alignment < usize) { 651 /* size_t overflow. */ 652 return (0); 653 } 654 655 /* 656 * Calculate the size of the over-size run that arena_palloc() 657 * would need to allocate in order to guarantee the alignment. 658 * If the run wouldn't fit within a chunk, round up to a huge 659 * allocation size. 660 */ 661 run_size = usize + alignment - PAGE; 662 if (run_size <= arena_maxclass) 663 return (PAGE_CEILING(usize)); 664 return (CHUNK_CEILING(usize)); 665 } 666} 667 668/* Choose an arena based on a per-thread value. */ 669JEMALLOC_INLINE arena_t * 670choose_arena(arena_t *arena) 671{ 672 arena_t *ret; 673 674 if (arena != NULL) 675 return (arena); 676 677 if ((ret = *arenas_tsd_get()) == NULL) { 678 ret = choose_arena_hard(); 679 assert(ret != NULL); 680 } 681 682 return (ret); 683} 684#endif 685 686#include "jemalloc/internal/bitmap.h" 687#include "jemalloc/internal/rtree.h" 688#include "jemalloc/internal/tcache.h" 689#include "jemalloc/internal/arena.h" 690#include "jemalloc/internal/hash.h" 691#include "jemalloc/internal/quarantine.h" 692 693#ifndef JEMALLOC_ENABLE_INLINE 694void *imalloc(size_t size); 695void *icalloc(size_t size); 696void *ipalloc(size_t usize, size_t alignment, bool zero); 697size_t isalloc(const void *ptr, bool demote); 698size_t ivsalloc(const void *ptr, bool demote); 699size_t u2rz(size_t usize); 700size_t p2rz(const void *ptr); 701void idalloc(void *ptr); 702void iqalloc(void *ptr); 703void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 704 bool zero, bool no_move); 705malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) 706#endif 707 708#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 709JEMALLOC_INLINE void * 710imalloc(size_t size) 711{ 712 713 assert(size != 0); 714 715 if (size <= arena_maxclass) 716 return (arena_malloc(NULL, size, false, true)); 717 else 718 return (huge_malloc(size, false)); 719} 720 721JEMALLOC_INLINE void * 722icalloc(size_t size) 723{ 724 725 if (size <= arena_maxclass) 726 return (arena_malloc(NULL, size, true, true)); 727 else 728 return (huge_malloc(size, true)); 729} 730 731JEMALLOC_INLINE void * 732ipalloc(size_t usize, size_t alignment, bool zero) 733{ 734 void *ret; 735 736 assert(usize != 0); 737 assert(usize == sa2u(usize, alignment)); 738 739 if (usize <= arena_maxclass && alignment <= PAGE) 740 ret = arena_malloc(NULL, usize, zero, true); 741 else { 742 if (usize <= arena_maxclass) { 743 ret = arena_palloc(choose_arena(NULL), usize, alignment, 744 zero); 745 } else if (alignment <= chunksize) 746 ret = huge_malloc(usize, zero); 747 else 748 ret = huge_palloc(usize, alignment, zero); 749 } 750 751 assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); 752 return (ret); 753} 754 755/* 756 * Typical usage: 757 * void *ptr = [...] 758 * size_t sz = isalloc(ptr, config_prof); 759 */ 760JEMALLOC_INLINE size_t 761isalloc(const void *ptr, bool demote) 762{ 763 size_t ret; 764 arena_chunk_t *chunk; 765 766 assert(ptr != NULL); 767 /* Demotion only makes sense if config_prof is true. */ 768 assert(config_prof || demote == false); 769 770 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 771 if (chunk != ptr) 772 ret = arena_salloc(ptr, demote); 773 else 774 ret = huge_salloc(ptr); 775 776 return (ret); 777} 778 779JEMALLOC_INLINE size_t 780ivsalloc(const void *ptr, bool demote) 781{ 782 783 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 784 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 785 return (0); 786 787 return (isalloc(ptr, demote)); 788} 789 790JEMALLOC_INLINE size_t 791u2rz(size_t usize) 792{ 793 size_t ret; 794 795 if (usize <= SMALL_MAXCLASS) { 796 size_t binind = SMALL_SIZE2BIN(usize); 797 ret = arena_bin_info[binind].redzone_size; 798 } else 799 ret = 0; 800 801 return (ret); 802} 803 804JEMALLOC_INLINE size_t 805p2rz(const void *ptr) 806{ 807 size_t usize = isalloc(ptr, false); 808 809 return (u2rz(usize)); 810} 811 812JEMALLOC_INLINE void 813idalloc(void *ptr) 814{ 815 arena_chunk_t *chunk; 816 817 assert(ptr != NULL); 818 819 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 820 if (chunk != ptr) 821 arena_dalloc(chunk->arena, chunk, ptr, true); 822 else 823 huge_dalloc(ptr, true); 824} 825 826JEMALLOC_INLINE void 827iqalloc(void *ptr) 828{ 829 830 if (config_fill && opt_quarantine) 831 quarantine(ptr); 832 else 833 idalloc(ptr); 834} 835 836JEMALLOC_INLINE void * 837iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 838 bool no_move) 839{ 840 void *ret; 841 size_t oldsize; 842 843 assert(ptr != NULL); 844 assert(size != 0); 845 846 oldsize = isalloc(ptr, config_prof); 847 848 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 849 != 0) { 850 size_t usize, copysize; 851 852 /* 853 * Existing object alignment is inadequate; allocate new space 854 * and copy. 855 */ 856 if (no_move) 857 return (NULL); 858 usize = sa2u(size + extra, alignment); 859 if (usize == 0) 860 return (NULL); 861 ret = ipalloc(usize, alignment, zero); 862 if (ret == NULL) { 863 if (extra == 0) 864 return (NULL); 865 /* Try again, without extra this time. */ 866 usize = sa2u(size, alignment); 867 if (usize == 0) 868 return (NULL); 869 ret = ipalloc(usize, alignment, zero); 870 if (ret == NULL) 871 return (NULL); 872 } 873 /* 874 * Copy at most size bytes (not size+extra), since the caller 875 * has no expectation that the extra bytes will be reliably 876 * preserved. 877 */ 878 copysize = (size < oldsize) ? size : oldsize; 879 memcpy(ret, ptr, copysize); 880 iqalloc(ptr); 881 return (ret); 882 } 883 884 if (no_move) { 885 if (size <= arena_maxclass) { 886 return (arena_ralloc_no_move(ptr, oldsize, size, 887 extra, zero)); 888 } else { 889 return (huge_ralloc_no_move(ptr, oldsize, size, 890 extra)); 891 } 892 } else { 893 if (size + extra <= arena_maxclass) { 894 return (arena_ralloc(ptr, oldsize, size, extra, 895 alignment, zero, true)); 896 } else { 897 return (huge_ralloc(ptr, oldsize, size, extra, 898 alignment, zero)); 899 } 900 } 901} 902 903malloc_tsd_externs(thread_allocated, thread_allocated_t) 904malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, 905 THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) 906#endif 907 908#include "jemalloc/internal/prof.h" 909 910#undef JEMALLOC_H_INLINES 911/******************************************************************************/ 912#endif /* JEMALLOC_INTERNAL_H */ 913