jemalloc_internal.h.in revision 4e2e3dd9cf19ed5991938a708a8b50611aa5bbf8
1#include <sys/mman.h> 2#include <sys/param.h> 3#include <sys/time.h> 4#include <sys/types.h> 5#include <sys/sysctl.h> 6#include <sys/uio.h> 7 8#include <errno.h> 9#include <limits.h> 10#ifndef SIZE_T_MAX 11# define SIZE_T_MAX SIZE_MAX 12#endif 13#include <pthread.h> 14#include <sched.h> 15#include <stdarg.h> 16#include <stdbool.h> 17#include <stdio.h> 18#include <stdlib.h> 19#include <stdint.h> 20#include <stddef.h> 21#ifndef offsetof 22# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) 23#endif 24#include <inttypes.h> 25#include <string.h> 26#include <strings.h> 27#include <ctype.h> 28#include <unistd.h> 29#include <fcntl.h> 30#include <pthread.h> 31#include <math.h> 32 33#define JEMALLOC_NO_DEMANGLE 34#include "../jemalloc@install_suffix@.h" 35 36#include "jemalloc/internal/private_namespace.h" 37 38#ifdef JEMALLOC_CC_SILENCE 39#define UNUSED JEMALLOC_ATTR(unused) 40#else 41#define UNUSED 42#endif 43 44static const bool config_debug = 45#ifdef JEMALLOC_DEBUG 46 true 47#else 48 false 49#endif 50 ; 51static const bool config_dss = 52#ifdef JEMALLOC_DSS 53 true 54#else 55 false 56#endif 57 ; 58static const bool config_dynamic_page_shift = 59#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT 60 true 61#else 62 false 63#endif 64 ; 65static const bool config_fill = 66#ifdef JEMALLOC_FILL 67 true 68#else 69 false 70#endif 71 ; 72static const bool config_lazy_lock = 73#ifdef JEMALLOC_LAZY_LOCK 74 true 75#else 76 false 77#endif 78 ; 79static const bool config_prof = 80#ifdef JEMALLOC_PROF 81 true 82#else 83 false 84#endif 85 ; 86static const bool config_prof_libgcc = 87#ifdef JEMALLOC_PROF_LIBGCC 88 true 89#else 90 false 91#endif 92 ; 93static const bool config_prof_libunwind = 94#ifdef JEMALLOC_PROF_LIBUNWIND 95 true 96#else 97 false 98#endif 99 ; 100static const bool config_stats = 101#ifdef JEMALLOC_STATS 102 true 103#else 104 false 105#endif 106 ; 107static const bool config_tcache = 108#ifdef JEMALLOC_TCACHE 109 true 110#else 111 false 112#endif 113 ; 114static const bool config_tls = 115#ifdef JEMALLOC_TLS 116 true 117#else 118 false 119#endif 120 ; 121static const bool config_xmalloc = 122#ifdef JEMALLOC_XMALLOC 123 true 124#else 125 false 126#endif 127 ; 128static const bool config_ivsalloc = 129#ifdef JEMALLOC_IVSALLOC 130 true 131#else 132 false 133#endif 134 ; 135 136#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) 137#include <libkern/OSAtomic.h> 138#endif 139 140#ifdef JEMALLOC_ZONE 141#include <mach/mach_error.h> 142#include <mach/mach_init.h> 143#include <mach/vm_map.h> 144#include <malloc/malloc.h> 145#endif 146 147#define RB_COMPACT 148#include "jemalloc/internal/rb.h" 149#include "jemalloc/internal/qr.h" 150#include "jemalloc/internal/ql.h" 151 152/* 153 * jemalloc can conceptually be broken into components (arena, tcache, etc.), 154 * but there are circular dependencies that cannot be broken without 155 * substantial performance degradation. In order to reduce the effect on 156 * visual code flow, read the header files in multiple passes, with one of the 157 * following cpp variables defined during each pass: 158 * 159 * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data 160 * types. 161 * JEMALLOC_H_STRUCTS : Data structures. 162 * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. 163 * JEMALLOC_H_INLINES : Inline functions. 164 */ 165/******************************************************************************/ 166#define JEMALLOC_H_TYPES 167 168#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) 169 170#define ZU(z) ((size_t)z) 171 172#ifndef __DECONST 173# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) 174#endif 175 176#ifdef JEMALLOC_DEBUG 177 /* Disable inlining to make debugging easier. */ 178# define JEMALLOC_INLINE 179# define inline 180#else 181# define JEMALLOC_ENABLE_INLINE 182# define JEMALLOC_INLINE static inline 183#endif 184 185/* Smallest size class to support. */ 186#define LG_TINY_MIN 3 187#define TINY_MIN (1U << LG_TINY_MIN) 188 189/* 190 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size 191 * classes). 192 */ 193#ifndef LG_QUANTUM 194# ifdef __i386__ 195# define LG_QUANTUM 4 196# endif 197# ifdef __ia64__ 198# define LG_QUANTUM 4 199# endif 200# ifdef __alpha__ 201# define LG_QUANTUM 4 202# endif 203# ifdef __sparc64__ 204# define LG_QUANTUM 4 205# endif 206# if (defined(__amd64__) || defined(__x86_64__)) 207# define LG_QUANTUM 4 208# endif 209# ifdef __arm__ 210# define LG_QUANTUM 3 211# endif 212# ifdef __mips__ 213# define LG_QUANTUM 3 214# endif 215# ifdef __powerpc__ 216# define LG_QUANTUM 4 217# endif 218# ifdef __s390x__ 219# define LG_QUANTUM 4 220# endif 221# ifdef __SH4__ 222# define LG_QUANTUM 4 223# endif 224# ifdef __tile__ 225# define LG_QUANTUM 4 226# endif 227# ifndef LG_QUANTUM 228# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" 229# endif 230#endif 231 232#define QUANTUM ((size_t)(1U << LG_QUANTUM)) 233#define QUANTUM_MASK (QUANTUM - 1) 234 235/* Return the smallest quantum multiple that is >= a. */ 236#define QUANTUM_CEILING(a) \ 237 (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) 238 239#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) 240#define LONG_MASK (LONG - 1) 241 242/* Return the smallest long multiple that is >= a. */ 243#define LONG_CEILING(a) \ 244 (((a) + LONG_MASK) & ~LONG_MASK) 245 246#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) 247#define PTR_MASK (SIZEOF_PTR - 1) 248 249/* Return the smallest (void *) multiple that is >= a. */ 250#define PTR_CEILING(a) \ 251 (((a) + PTR_MASK) & ~PTR_MASK) 252 253/* 254 * Maximum size of L1 cache line. This is used to avoid cache line aliasing. 255 * In addition, this controls the spacing of cacheline-spaced size classes. 256 */ 257#define LG_CACHELINE 6 258#define CACHELINE ((size_t)(1U << LG_CACHELINE)) 259#define CACHELINE_MASK (CACHELINE - 1) 260 261/* Return the smallest cacheline multiple that is >= s. */ 262#define CACHELINE_CEILING(s) \ 263 (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) 264 265/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ 266#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT)) 267#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1)) 268#ifdef PAGE_SHIFT 269# undef PAGE_SHIFT 270#endif 271#ifdef PAGE_SIZE 272# undef PAGE_SIZE 273#endif 274#ifdef PAGE_MASK 275# undef PAGE_MASK 276#endif 277#define PAGE_SHIFT STATIC_PAGE_SHIFT 278#define PAGE_SIZE STATIC_PAGE_SIZE 279#define PAGE_MASK STATIC_PAGE_MASK 280 281/* Return the smallest pagesize multiple that is >= s. */ 282#define PAGE_CEILING(s) \ 283 (((s) + PAGE_MASK) & ~PAGE_MASK) 284 285#include "jemalloc/internal/util.h" 286#include "jemalloc/internal/atomic.h" 287#include "jemalloc/internal/prng.h" 288#include "jemalloc/internal/ckh.h" 289#include "jemalloc/internal/size_classes.h" 290#include "jemalloc/internal/stats.h" 291#include "jemalloc/internal/ctl.h" 292#include "jemalloc/internal/mutex.h" 293#include "jemalloc/internal/mb.h" 294#include "jemalloc/internal/extent.h" 295#include "jemalloc/internal/arena.h" 296#include "jemalloc/internal/bitmap.h" 297#include "jemalloc/internal/base.h" 298#include "jemalloc/internal/chunk.h" 299#include "jemalloc/internal/huge.h" 300#include "jemalloc/internal/rtree.h" 301#include "jemalloc/internal/tcache.h" 302#include "jemalloc/internal/hash.h" 303#ifdef JEMALLOC_ZONE 304#include "jemalloc/internal/zone.h" 305#endif 306#include "jemalloc/internal/prof.h" 307 308#undef JEMALLOC_H_TYPES 309/******************************************************************************/ 310#define JEMALLOC_H_STRUCTS 311 312#include "jemalloc/internal/util.h" 313#include "jemalloc/internal/atomic.h" 314#include "jemalloc/internal/prng.h" 315#include "jemalloc/internal/ckh.h" 316#include "jemalloc/internal/size_classes.h" 317#include "jemalloc/internal/stats.h" 318#include "jemalloc/internal/ctl.h" 319#include "jemalloc/internal/mutex.h" 320#include "jemalloc/internal/mb.h" 321#include "jemalloc/internal/bitmap.h" 322#include "jemalloc/internal/extent.h" 323#include "jemalloc/internal/arena.h" 324#include "jemalloc/internal/base.h" 325#include "jemalloc/internal/chunk.h" 326#include "jemalloc/internal/huge.h" 327#include "jemalloc/internal/rtree.h" 328#include "jemalloc/internal/tcache.h" 329#include "jemalloc/internal/hash.h" 330#ifdef JEMALLOC_ZONE 331#include "jemalloc/internal/zone.h" 332#endif 333#include "jemalloc/internal/prof.h" 334 335typedef struct { 336 uint64_t allocated; 337 uint64_t deallocated; 338} thread_allocated_t; 339 340#undef JEMALLOC_H_STRUCTS 341/******************************************************************************/ 342#define JEMALLOC_H_EXTERNS 343 344extern bool opt_abort; 345extern bool opt_junk; 346extern bool opt_xmalloc; 347extern bool opt_zero; 348extern size_t opt_narenas; 349 350#ifdef DYNAMIC_PAGE_SHIFT 351extern size_t pagesize; 352extern size_t pagesize_mask; 353extern size_t lg_pagesize; 354#endif 355 356/* Number of CPUs. */ 357extern unsigned ncpus; 358 359extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ 360extern pthread_key_t arenas_tsd; 361#ifndef NO_TLS 362/* 363 * Map of pthread_self() --> arenas[???], used for selecting an arena to use 364 * for allocations. 365 */ 366extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec")); 367# define ARENA_GET() arenas_tls 368# define ARENA_SET(v) do { \ 369 arenas_tls = (v); \ 370 pthread_setspecific(arenas_tsd, (void *)(v)); \ 371} while (0) 372#else 373# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd)) 374# define ARENA_SET(v) do { \ 375 pthread_setspecific(arenas_tsd, (void *)(v)); \ 376} while (0) 377#endif 378 379/* 380 * Arenas that are used to service external requests. Not all elements of the 381 * arenas array are necessarily used; arenas are created lazily as needed. 382 */ 383extern arena_t **arenas; 384extern unsigned narenas; 385 386#ifndef NO_TLS 387extern __thread thread_allocated_t thread_allocated_tls; 388# define ALLOCATED_GET() (thread_allocated_tls.allocated) 389# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated) 390# define DEALLOCATED_GET() (thread_allocated_tls.deallocated) 391# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated) 392# define ALLOCATED_ADD(a, d) do { \ 393 thread_allocated_tls.allocated += a; \ 394 thread_allocated_tls.deallocated += d; \ 395} while (0) 396#else 397# define ALLOCATED_GET() (thread_allocated_get()->allocated) 398# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated) 399# define DEALLOCATED_GET() (thread_allocated_get()->deallocated) 400# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated) 401# define ALLOCATED_ADD(a, d) do { \ 402 thread_allocated_t *thread_allocated = thread_allocated_get(); \ 403 thread_allocated->allocated += (a); \ 404 thread_allocated->deallocated += (d); \ 405} while (0) 406#endif 407extern pthread_key_t thread_allocated_tsd; 408thread_allocated_t *thread_allocated_get_hard(void); 409 410arena_t *arenas_extend(unsigned ind); 411arena_t *choose_arena_hard(void); 412void jemalloc_prefork(void); 413void jemalloc_postfork_parent(void); 414void jemalloc_postfork_child(void); 415 416#include "jemalloc/internal/util.h" 417#include "jemalloc/internal/atomic.h" 418#include "jemalloc/internal/prng.h" 419#include "jemalloc/internal/ckh.h" 420#include "jemalloc/internal/size_classes.h" 421#include "jemalloc/internal/stats.h" 422#include "jemalloc/internal/ctl.h" 423#include "jemalloc/internal/mutex.h" 424#include "jemalloc/internal/mb.h" 425#include "jemalloc/internal/bitmap.h" 426#include "jemalloc/internal/extent.h" 427#include "jemalloc/internal/arena.h" 428#include "jemalloc/internal/base.h" 429#include "jemalloc/internal/chunk.h" 430#include "jemalloc/internal/huge.h" 431#include "jemalloc/internal/rtree.h" 432#include "jemalloc/internal/tcache.h" 433#include "jemalloc/internal/hash.h" 434#ifdef JEMALLOC_ZONE 435#include "jemalloc/internal/zone.h" 436#endif 437#include "jemalloc/internal/prof.h" 438 439#undef JEMALLOC_H_EXTERNS 440/******************************************************************************/ 441#define JEMALLOC_H_INLINES 442 443#include "jemalloc/internal/util.h" 444#include "jemalloc/internal/atomic.h" 445#include "jemalloc/internal/prng.h" 446#include "jemalloc/internal/ckh.h" 447#include "jemalloc/internal/size_classes.h" 448#include "jemalloc/internal/stats.h" 449#include "jemalloc/internal/ctl.h" 450#include "jemalloc/internal/mutex.h" 451#include "jemalloc/internal/mb.h" 452#include "jemalloc/internal/extent.h" 453#include "jemalloc/internal/base.h" 454#include "jemalloc/internal/chunk.h" 455#include "jemalloc/internal/huge.h" 456 457#ifndef JEMALLOC_ENABLE_INLINE 458size_t s2u(size_t size); 459size_t sa2u(size_t size, size_t alignment, size_t *run_size_p); 460arena_t *choose_arena(void); 461thread_allocated_t *thread_allocated_get(void); 462#endif 463 464#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 465/* 466 * Compute usable size that would result from allocating an object with the 467 * specified size. 468 */ 469JEMALLOC_INLINE size_t 470s2u(size_t size) 471{ 472 473 if (size <= SMALL_MAXCLASS) 474 return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); 475 if (size <= arena_maxclass) 476 return (PAGE_CEILING(size)); 477 return (CHUNK_CEILING(size)); 478} 479 480/* 481 * Compute usable size that would result from allocating an object with the 482 * specified size and alignment. 483 */ 484JEMALLOC_INLINE size_t 485sa2u(size_t size, size_t alignment, size_t *run_size_p) 486{ 487 size_t usize; 488 489 /* 490 * Round size up to the nearest multiple of alignment. 491 * 492 * This done, we can take advantage of the fact that for each small 493 * size class, every object is aligned at the smallest power of two 494 * that is non-zero in the base two representation of the size. For 495 * example: 496 * 497 * Size | Base 2 | Minimum alignment 498 * -----+----------+------------------ 499 * 96 | 1100000 | 32 500 * 144 | 10100000 | 32 501 * 192 | 11000000 | 64 502 */ 503 usize = (size + (alignment - 1)) & (-alignment); 504 /* 505 * (usize < size) protects against the combination of maximal 506 * alignment and size greater than maximal alignment. 507 */ 508 if (usize < size) { 509 /* size_t overflow. */ 510 return (0); 511 } 512 513 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) { 514 if (usize <= SMALL_MAXCLASS) 515 return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); 516 return (PAGE_CEILING(usize)); 517 } else { 518 size_t run_size; 519 520 /* 521 * We can't achieve subpage alignment, so round up alignment 522 * permanently; it makes later calculations simpler. 523 */ 524 alignment = PAGE_CEILING(alignment); 525 usize = PAGE_CEILING(size); 526 /* 527 * (usize < size) protects against very large sizes within 528 * PAGE_SIZE of SIZE_T_MAX. 529 * 530 * (usize + alignment < usize) protects against the 531 * combination of maximal alignment and usize large enough 532 * to cause overflow. This is similar to the first overflow 533 * check above, but it needs to be repeated due to the new 534 * usize value, which may now be *equal* to maximal 535 * alignment, whereas before we only detected overflow if the 536 * original size was *greater* than maximal alignment. 537 */ 538 if (usize < size || usize + alignment < usize) { 539 /* size_t overflow. */ 540 return (0); 541 } 542 543 /* 544 * Calculate the size of the over-size run that arena_palloc() 545 * would need to allocate in order to guarantee the alignment. 546 */ 547 if (usize >= alignment) 548 run_size = usize + alignment - PAGE_SIZE; 549 else { 550 /* 551 * It is possible that (alignment << 1) will cause 552 * overflow, but it doesn't matter because we also 553 * subtract PAGE_SIZE, which in the case of overflow 554 * leaves us with a very large run_size. That causes 555 * the first conditional below to fail, which means 556 * that the bogus run_size value never gets used for 557 * anything important. 558 */ 559 run_size = (alignment << 1) - PAGE_SIZE; 560 } 561 if (run_size_p != NULL) 562 *run_size_p = run_size; 563 564 if (run_size <= arena_maxclass) 565 return (PAGE_CEILING(usize)); 566 return (CHUNK_CEILING(usize)); 567 } 568} 569 570/* Choose an arena based on a per-thread value. */ 571JEMALLOC_INLINE arena_t * 572choose_arena(void) 573{ 574 arena_t *ret; 575 576 ret = ARENA_GET(); 577 if (ret == NULL) { 578 ret = choose_arena_hard(); 579 assert(ret != NULL); 580 } 581 582 return (ret); 583} 584 585JEMALLOC_INLINE thread_allocated_t * 586thread_allocated_get(void) 587{ 588 thread_allocated_t *thread_allocated = (thread_allocated_t *) 589 pthread_getspecific(thread_allocated_tsd); 590 591 if (thread_allocated == NULL) 592 return (thread_allocated_get_hard()); 593 return (thread_allocated); 594} 595#endif 596 597#include "jemalloc/internal/bitmap.h" 598#include "jemalloc/internal/rtree.h" 599#include "jemalloc/internal/tcache.h" 600#include "jemalloc/internal/arena.h" 601#include "jemalloc/internal/hash.h" 602#ifdef JEMALLOC_ZONE 603#include "jemalloc/internal/zone.h" 604#endif 605 606#ifndef JEMALLOC_ENABLE_INLINE 607void *imalloc(size_t size); 608void *icalloc(size_t size); 609void *ipalloc(size_t usize, size_t alignment, bool zero); 610size_t isalloc(const void *ptr); 611size_t ivsalloc(const void *ptr); 612void idalloc(void *ptr); 613void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, 614 bool zero, bool no_move); 615#endif 616 617#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) 618JEMALLOC_INLINE void * 619imalloc(size_t size) 620{ 621 622 assert(size != 0); 623 624 if (size <= arena_maxclass) 625 return (arena_malloc(size, false)); 626 else 627 return (huge_malloc(size, false)); 628} 629 630JEMALLOC_INLINE void * 631icalloc(size_t size) 632{ 633 634 if (size <= arena_maxclass) 635 return (arena_malloc(size, true)); 636 else 637 return (huge_malloc(size, true)); 638} 639 640JEMALLOC_INLINE void * 641ipalloc(size_t usize, size_t alignment, bool zero) 642{ 643 void *ret; 644 645 assert(usize != 0); 646 assert(usize == sa2u(usize, alignment, NULL)); 647 648 if (usize <= arena_maxclass && alignment <= PAGE_SIZE) 649 ret = arena_malloc(usize, zero); 650 else { 651 size_t run_size 652#ifdef JEMALLOC_CC_SILENCE 653 = 0 654#endif 655 ; 656 657 /* 658 * Ideally we would only ever call sa2u() once per aligned 659 * allocation request, and the caller of this function has 660 * already done so once. However, it's rather burdensome to 661 * require every caller to pass in run_size, especially given 662 * that it's only relevant to large allocations. Therefore, 663 * just call it again here in order to get run_size. 664 */ 665 sa2u(usize, alignment, &run_size); 666 if (run_size <= arena_maxclass) { 667 ret = arena_palloc(choose_arena(), usize, run_size, 668 alignment, zero); 669 } else if (alignment <= chunksize) 670 ret = huge_malloc(usize, zero); 671 else 672 ret = huge_palloc(usize, alignment, zero); 673 } 674 675 assert(((uintptr_t)ret & (alignment - 1)) == 0); 676 return (ret); 677} 678 679JEMALLOC_INLINE size_t 680isalloc(const void *ptr) 681{ 682 size_t ret; 683 arena_chunk_t *chunk; 684 685 assert(ptr != NULL); 686 687 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 688 if (chunk != ptr) { 689 /* Region. */ 690 if (config_prof) 691 ret = arena_salloc_demote(ptr); 692 else 693 ret = arena_salloc(ptr); 694 } else 695 ret = huge_salloc(ptr); 696 697 return (ret); 698} 699 700JEMALLOC_INLINE size_t 701ivsalloc(const void *ptr) 702{ 703 704 /* Return 0 if ptr is not within a chunk managed by jemalloc. */ 705 if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) 706 return (0); 707 708 return (isalloc(ptr)); 709} 710 711JEMALLOC_INLINE void 712idalloc(void *ptr) 713{ 714 arena_chunk_t *chunk; 715 716 assert(ptr != NULL); 717 718 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 719 if (chunk != ptr) 720 arena_dalloc(chunk->arena, chunk, ptr); 721 else 722 huge_dalloc(ptr, true); 723} 724 725JEMALLOC_INLINE void * 726iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, 727 bool no_move) 728{ 729 void *ret; 730 size_t oldsize; 731 732 assert(ptr != NULL); 733 assert(size != 0); 734 735 oldsize = isalloc(ptr); 736 737 if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) 738 != 0) { 739 size_t usize, copysize; 740 741 /* 742 * Existing object alignment is inadquate; allocate new space 743 * and copy. 744 */ 745 if (no_move) 746 return (NULL); 747 usize = sa2u(size + extra, alignment, NULL); 748 if (usize == 0) 749 return (NULL); 750 ret = ipalloc(usize, alignment, zero); 751 if (ret == NULL) { 752 if (extra == 0) 753 return (NULL); 754 /* Try again, without extra this time. */ 755 usize = sa2u(size, alignment, NULL); 756 if (usize == 0) 757 return (NULL); 758 ret = ipalloc(usize, alignment, zero); 759 if (ret == NULL) 760 return (NULL); 761 } 762 /* 763 * Copy at most size bytes (not size+extra), since the caller 764 * has no expectation that the extra bytes will be reliably 765 * preserved. 766 */ 767 copysize = (size < oldsize) ? size : oldsize; 768 memcpy(ret, ptr, copysize); 769 idalloc(ptr); 770 return (ret); 771 } 772 773 if (no_move) { 774 if (size <= arena_maxclass) { 775 return (arena_ralloc_no_move(ptr, oldsize, size, 776 extra, zero)); 777 } else { 778 return (huge_ralloc_no_move(ptr, oldsize, size, 779 extra)); 780 } 781 } else { 782 if (size + extra <= arena_maxclass) { 783 return (arena_ralloc(ptr, oldsize, size, extra, 784 alignment, zero)); 785 } else { 786 return (huge_ralloc(ptr, oldsize, size, extra, 787 alignment, zero)); 788 } 789 } 790} 791#endif 792 793#include "jemalloc/internal/prof.h" 794 795#undef JEMALLOC_H_INLINES 796/******************************************************************************/ 797