jemalloc_internal.h.in revision 7e15dab94d3f008b0a6c296ad7afec9ed47ff1ac
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/sysctl.h>
6#include <sys/uio.h>
7
8#include <errno.h>
9#include <limits.h>
10#ifndef SIZE_T_MAX
11#  define SIZE_T_MAX	SIZE_MAX
12#endif
13#include <pthread.h>
14#include <sched.h>
15#include <stdarg.h>
16#include <stdbool.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <stdint.h>
20#include <stddef.h>
21#ifndef offsetof
22#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
23#endif
24#include <inttypes.h>
25#include <string.h>
26#include <strings.h>
27#include <ctype.h>
28#include <unistd.h>
29#include <fcntl.h>
30#include <pthread.h>
31#include <math.h>
32
33#define	JEMALLOC_MANGLE
34#include "../jemalloc@install_suffix@.h"
35
36#include "jemalloc/internal/private_namespace.h"
37
38#ifdef JEMALLOC_CC_SILENCE
39#define	UNUSED JEMALLOC_ATTR(unused)
40#else
41#define	UNUSED
42#endif
43
44static const bool config_debug =
45#ifdef JEMALLOC_DEBUG
46    true
47#else
48    false
49#endif
50    ;
51static const bool config_dss =
52#ifdef JEMALLOC_DSS
53    true
54#else
55    false
56#endif
57    ;
58static const bool config_dynamic_page_shift =
59#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
60    true
61#else
62    false
63#endif
64    ;
65static const bool config_fill =
66#ifdef JEMALLOC_FILL
67    true
68#else
69    false
70#endif
71    ;
72static const bool config_lazy_lock =
73#ifdef JEMALLOC_LAZY_LOCK
74    true
75#else
76    false
77#endif
78    ;
79static const bool config_prof =
80#ifdef JEMALLOC_PROF
81    true
82#else
83    false
84#endif
85    ;
86static const bool config_prof_libgcc =
87#ifdef JEMALLOC_PROF_LIBGCC
88    true
89#else
90    false
91#endif
92    ;
93static const bool config_prof_libunwind =
94#ifdef JEMALLOC_PROF_LIBUNWIND
95    true
96#else
97    false
98#endif
99    ;
100static const bool config_stats =
101#ifdef JEMALLOC_STATS
102    true
103#else
104    false
105#endif
106    ;
107static const bool config_tcache =
108#ifdef JEMALLOC_TCACHE
109    true
110#else
111    false
112#endif
113    ;
114static const bool config_tls =
115#ifdef JEMALLOC_TLS
116    true
117#else
118    false
119#endif
120    ;
121static const bool config_xmalloc =
122#ifdef JEMALLOC_XMALLOC
123    true
124#else
125    false
126#endif
127    ;
128static const bool config_ivsalloc =
129#ifdef JEMALLOC_IVSALLOC
130    true
131#else
132    false
133#endif
134    ;
135
136#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
137#include <libkern/OSAtomic.h>
138#endif
139
140#ifdef JEMALLOC_ZONE
141#include <mach/mach_error.h>
142#include <mach/mach_init.h>
143#include <mach/vm_map.h>
144#include <malloc/malloc.h>
145#endif
146
147#define	RB_COMPACT
148#include "jemalloc/internal/rb.h"
149#include "jemalloc/internal/qr.h"
150#include "jemalloc/internal/ql.h"
151
152extern void	(*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
153
154/*
155 * Define a custom assert() in order to reduce the chances of deadlock during
156 * assertion failure.
157 */
158#ifndef assert
159#  ifdef JEMALLOC_DEBUG
160#    define assert(e) do {						\
161	if (!(e)) {							\
162		char line_buf[UMAX2S_BUFSIZE];				\
163		malloc_write("<jemalloc>: ");				\
164		malloc_write(__FILE__);					\
165		malloc_write(":");					\
166		malloc_write(u2s(__LINE__, 10, line_buf));		\
167		malloc_write(": Failed assertion: ");			\
168		malloc_write("\"");					\
169		malloc_write(#e);					\
170		malloc_write("\"\n");					\
171		abort();						\
172	}								\
173} while (0)
174#  else
175#    define assert(e)
176#  endif
177#endif
178
179/* Use to assert a particular configuration, e.g., cassert(config_debug). */
180#define	cassert(c) do {							\
181	if ((c) == false)						\
182		assert(false);						\
183} while (0)
184
185/*
186 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
187 * but there are circular dependencies that cannot be broken without
188 * substantial performance degradation.  In order to reduce the effect on
189 * visual code flow, read the header files in multiple passes, with one of the
190 * following cpp variables defined during each pass:
191 *
192 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
193 *                        types.
194 *   JEMALLOC_H_STRUCTS : Data structures.
195 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
196 *   JEMALLOC_H_INLINES : Inline functions.
197 */
198/******************************************************************************/
199#define JEMALLOC_H_TYPES
200
201#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
202
203#define	ZU(z)	((size_t)z)
204
205#ifndef __DECONST
206#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
207#endif
208
209#ifdef JEMALLOC_DEBUG
210   /* Disable inlining to make debugging easier. */
211#  define JEMALLOC_INLINE
212#  define inline
213#else
214#  define JEMALLOC_ENABLE_INLINE
215#  define JEMALLOC_INLINE static inline
216#endif
217
218/* Size of stack-allocated buffer passed to buferror(). */
219#define	BUFERROR_BUF		64
220
221/* Smallest size class to support. */
222#define	LG_TINY_MIN		3
223#define	TINY_MIN		(1U << LG_TINY_MIN)
224
225/*
226 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
227 * classes).
228 */
229#ifndef LG_QUANTUM
230#  ifdef __i386__
231#    define LG_QUANTUM		4
232#  endif
233#  ifdef __ia64__
234#    define LG_QUANTUM		4
235#  endif
236#  ifdef __alpha__
237#    define LG_QUANTUM		4
238#  endif
239#  ifdef __sparc64__
240#    define LG_QUANTUM		4
241#  endif
242#  if (defined(__amd64__) || defined(__x86_64__))
243#    define LG_QUANTUM		4
244#  endif
245#  ifdef __arm__
246#    define LG_QUANTUM		3
247#  endif
248#  ifdef __mips__
249#    define LG_QUANTUM		3
250#  endif
251#  ifdef __powerpc__
252#    define LG_QUANTUM		4
253#  endif
254#  ifdef __s390x__
255#    define LG_QUANTUM		4
256#  endif
257#  ifdef __tile__
258#    define LG_QUANTUM		4
259#  endif
260#  ifndef LG_QUANTUM
261#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
262#  endif
263#endif
264
265#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
266#define	QUANTUM_MASK		(QUANTUM - 1)
267
268/* Return the smallest quantum multiple that is >= a. */
269#define	QUANTUM_CEILING(a)						\
270	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
271
272#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
273#define	LONG_MASK		(LONG - 1)
274
275/* Return the smallest long multiple that is >= a. */
276#define	LONG_CEILING(a)						\
277	(((a) + LONG_MASK) & ~LONG_MASK)
278
279#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
280#define	PTR_MASK		(SIZEOF_PTR - 1)
281
282/* Return the smallest (void *) multiple that is >= a. */
283#define	PTR_CEILING(a)						\
284	(((a) + PTR_MASK) & ~PTR_MASK)
285
286/*
287 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
288 * In addition, this controls the spacing of cacheline-spaced size classes.
289 */
290#define	LG_CACHELINE		6
291#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
292#define	CACHELINE_MASK		(CACHELINE - 1)
293
294/* Return the smallest cacheline multiple that is >= s. */
295#define	CACHELINE_CEILING(s)						\
296	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
297
298/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
299#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
300#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
301#ifdef PAGE_SHIFT
302#  undef PAGE_SHIFT
303#endif
304#ifdef PAGE_SIZE
305#  undef PAGE_SIZE
306#endif
307#ifdef PAGE_MASK
308#  undef PAGE_MASK
309#endif
310#define	PAGE_SHIFT	STATIC_PAGE_SHIFT
311#define	PAGE_SIZE	STATIC_PAGE_SIZE
312#define	PAGE_MASK	STATIC_PAGE_MASK
313
314/* Return the smallest pagesize multiple that is >= s. */
315#define	PAGE_CEILING(s)							\
316	(((s) + PAGE_MASK) & ~PAGE_MASK)
317
318#include "jemalloc/internal/atomic.h"
319#include "jemalloc/internal/prn.h"
320#include "jemalloc/internal/ckh.h"
321#include "jemalloc/internal/size_classes.h"
322#include "jemalloc/internal/stats.h"
323#include "jemalloc/internal/ctl.h"
324#include "jemalloc/internal/mutex.h"
325#include "jemalloc/internal/mb.h"
326#include "jemalloc/internal/extent.h"
327#include "jemalloc/internal/arena.h"
328#include "jemalloc/internal/bitmap.h"
329#include "jemalloc/internal/base.h"
330#include "jemalloc/internal/chunk.h"
331#include "jemalloc/internal/huge.h"
332#include "jemalloc/internal/rtree.h"
333#include "jemalloc/internal/tcache.h"
334#include "jemalloc/internal/hash.h"
335#ifdef JEMALLOC_ZONE
336#include "jemalloc/internal/zone.h"
337#endif
338#include "jemalloc/internal/prof.h"
339
340#undef JEMALLOC_H_TYPES
341/******************************************************************************/
342#define JEMALLOC_H_STRUCTS
343
344#include "jemalloc/internal/atomic.h"
345#include "jemalloc/internal/prn.h"
346#include "jemalloc/internal/ckh.h"
347#include "jemalloc/internal/size_classes.h"
348#include "jemalloc/internal/stats.h"
349#include "jemalloc/internal/ctl.h"
350#include "jemalloc/internal/mutex.h"
351#include "jemalloc/internal/mb.h"
352#include "jemalloc/internal/bitmap.h"
353#include "jemalloc/internal/extent.h"
354#include "jemalloc/internal/arena.h"
355#include "jemalloc/internal/base.h"
356#include "jemalloc/internal/chunk.h"
357#include "jemalloc/internal/huge.h"
358#include "jemalloc/internal/rtree.h"
359#include "jemalloc/internal/tcache.h"
360#include "jemalloc/internal/hash.h"
361#ifdef JEMALLOC_ZONE
362#include "jemalloc/internal/zone.h"
363#endif
364#include "jemalloc/internal/prof.h"
365
366typedef struct {
367	uint64_t	allocated;
368	uint64_t	deallocated;
369} thread_allocated_t;
370
371#undef JEMALLOC_H_STRUCTS
372/******************************************************************************/
373#define JEMALLOC_H_EXTERNS
374
375extern bool	opt_abort;
376extern bool	opt_junk;
377extern bool	opt_xmalloc;
378extern bool	opt_zero;
379extern size_t	opt_narenas;
380
381#ifdef DYNAMIC_PAGE_SHIFT
382extern size_t		pagesize;
383extern size_t		pagesize_mask;
384extern size_t		lg_pagesize;
385#endif
386
387/* Number of CPUs. */
388extern unsigned		ncpus;
389
390extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
391extern pthread_key_t	arenas_tsd;
392#ifndef NO_TLS
393/*
394 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
395 * for allocations.
396 */
397extern __thread arena_t	*arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
398#  define ARENA_GET()	arenas_tls
399#  define ARENA_SET(v)	do {						\
400	arenas_tls = (v);						\
401	pthread_setspecific(arenas_tsd, (void *)(v));			\
402} while (0)
403#else
404#  define ARENA_GET()	((arena_t *)pthread_getspecific(arenas_tsd))
405#  define ARENA_SET(v)	do {						\
406	pthread_setspecific(arenas_tsd, (void *)(v));			\
407} while (0)
408#endif
409
410/*
411 * Arenas that are used to service external requests.  Not all elements of the
412 * arenas array are necessarily used; arenas are created lazily as needed.
413 */
414extern arena_t		**arenas;
415extern unsigned		narenas;
416
417#ifndef NO_TLS
418extern __thread thread_allocated_t	thread_allocated_tls;
419#    define ALLOCATED_GET() (thread_allocated_tls.allocated)
420#    define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
421#    define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
422#    define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
423#    define ALLOCATED_ADD(a, d) do {					\
424	thread_allocated_tls.allocated += a;				\
425	thread_allocated_tls.deallocated += d;				\
426} while (0)
427#else
428#    define ALLOCATED_GET() (thread_allocated_get()->allocated)
429#    define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
430#    define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
431#    define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
432#    define ALLOCATED_ADD(a, d) do {					\
433	thread_allocated_t *thread_allocated = thread_allocated_get();	\
434	thread_allocated->allocated += (a);				\
435	thread_allocated->deallocated += (d);				\
436} while (0)
437#endif
438extern pthread_key_t	thread_allocated_tsd;
439thread_allocated_t	*thread_allocated_get_hard(void);
440
441arena_t	*arenas_extend(unsigned ind);
442arena_t	*choose_arena_hard(void);
443int	buferror(int errnum, char *buf, size_t buflen);
444void	jemalloc_prefork(void);
445void	jemalloc_postfork(void);
446
447#include "jemalloc/internal/atomic.h"
448#include "jemalloc/internal/prn.h"
449#include "jemalloc/internal/ckh.h"
450#include "jemalloc/internal/size_classes.h"
451#include "jemalloc/internal/stats.h"
452#include "jemalloc/internal/ctl.h"
453#include "jemalloc/internal/mutex.h"
454#include "jemalloc/internal/mb.h"
455#include "jemalloc/internal/bitmap.h"
456#include "jemalloc/internal/extent.h"
457#include "jemalloc/internal/arena.h"
458#include "jemalloc/internal/base.h"
459#include "jemalloc/internal/chunk.h"
460#include "jemalloc/internal/huge.h"
461#include "jemalloc/internal/rtree.h"
462#include "jemalloc/internal/tcache.h"
463#include "jemalloc/internal/hash.h"
464#ifdef JEMALLOC_ZONE
465#include "jemalloc/internal/zone.h"
466#endif
467#include "jemalloc/internal/prof.h"
468
469#undef JEMALLOC_H_EXTERNS
470/******************************************************************************/
471#define JEMALLOC_H_INLINES
472
473#include "jemalloc/internal/atomic.h"
474#include "jemalloc/internal/prn.h"
475#include "jemalloc/internal/ckh.h"
476#include "jemalloc/internal/size_classes.h"
477#include "jemalloc/internal/stats.h"
478#include "jemalloc/internal/ctl.h"
479#include "jemalloc/internal/mutex.h"
480#include "jemalloc/internal/mb.h"
481#include "jemalloc/internal/extent.h"
482#include "jemalloc/internal/base.h"
483#include "jemalloc/internal/chunk.h"
484#include "jemalloc/internal/huge.h"
485
486#ifndef JEMALLOC_ENABLE_INLINE
487size_t	pow2_ceil(size_t x);
488size_t	s2u(size_t size);
489size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
490void	malloc_write(const char *s);
491arena_t	*choose_arena(void);
492thread_allocated_t	*thread_allocated_get(void);
493#endif
494
495#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
496/* Compute the smallest power of 2 that is >= x. */
497JEMALLOC_INLINE size_t
498pow2_ceil(size_t x)
499{
500
501	x--;
502	x |= x >> 1;
503	x |= x >> 2;
504	x |= x >> 4;
505	x |= x >> 8;
506	x |= x >> 16;
507#if (LG_SIZEOF_PTR == 3)
508	x |= x >> 32;
509#endif
510	x++;
511	return (x);
512}
513
514/*
515 * Compute usable size that would result from allocating an object with the
516 * specified size.
517 */
518JEMALLOC_INLINE size_t
519s2u(size_t size)
520{
521
522	if (size <= SMALL_MAXCLASS)
523		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
524	if (size <= arena_maxclass)
525		return (PAGE_CEILING(size));
526	return (CHUNK_CEILING(size));
527}
528
529/*
530 * Compute usable size that would result from allocating an object with the
531 * specified size and alignment.
532 */
533JEMALLOC_INLINE size_t
534sa2u(size_t size, size_t alignment, size_t *run_size_p)
535{
536	size_t usize;
537
538	/*
539	 * Round size up to the nearest multiple of alignment.
540	 *
541	 * This done, we can take advantage of the fact that for each small
542	 * size class, every object is aligned at the smallest power of two
543	 * that is non-zero in the base two representation of the size.  For
544	 * example:
545	 *
546	 *   Size |   Base 2 | Minimum alignment
547	 *   -----+----------+------------------
548	 *     96 |  1100000 |  32
549	 *    144 | 10100000 |  32
550	 *    192 | 11000000 |  64
551	 */
552	usize = (size + (alignment - 1)) & (-alignment);
553	/*
554	 * (usize < size) protects against the combination of maximal
555	 * alignment and size greater than maximal alignment.
556	 */
557	if (usize < size) {
558		/* size_t overflow. */
559		return (0);
560	}
561
562	if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
563		if (usize <= SMALL_MAXCLASS)
564			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
565		return (PAGE_CEILING(usize));
566	} else {
567		size_t run_size;
568
569		/*
570		 * We can't achieve subpage alignment, so round up alignment
571		 * permanently; it makes later calculations simpler.
572		 */
573		alignment = PAGE_CEILING(alignment);
574		usize = PAGE_CEILING(size);
575		/*
576		 * (usize < size) protects against very large sizes within
577		 * PAGE_SIZE of SIZE_T_MAX.
578		 *
579		 * (usize + alignment < usize) protects against the
580		 * combination of maximal alignment and usize large enough
581		 * to cause overflow.  This is similar to the first overflow
582		 * check above, but it needs to be repeated due to the new
583		 * usize value, which may now be *equal* to maximal
584		 * alignment, whereas before we only detected overflow if the
585		 * original size was *greater* than maximal alignment.
586		 */
587		if (usize < size || usize + alignment < usize) {
588			/* size_t overflow. */
589			return (0);
590		}
591
592		/*
593		 * Calculate the size of the over-size run that arena_palloc()
594		 * would need to allocate in order to guarantee the alignment.
595		 */
596		if (usize >= alignment)
597			run_size = usize + alignment - PAGE_SIZE;
598		else {
599			/*
600			 * It is possible that (alignment << 1) will cause
601			 * overflow, but it doesn't matter because we also
602			 * subtract PAGE_SIZE, which in the case of overflow
603			 * leaves us with a very large run_size.  That causes
604			 * the first conditional below to fail, which means
605			 * that the bogus run_size value never gets used for
606			 * anything important.
607			 */
608			run_size = (alignment << 1) - PAGE_SIZE;
609		}
610		if (run_size_p != NULL)
611			*run_size_p = run_size;
612
613		if (run_size <= arena_maxclass)
614			return (PAGE_CEILING(usize));
615		return (CHUNK_CEILING(usize));
616	}
617}
618
619/*
620 * Wrapper around malloc_message() that avoids the need for
621 * JEMALLOC_P(malloc_message)(...) throughout the code.
622 */
623JEMALLOC_INLINE void
624malloc_write(const char *s)
625{
626
627	JEMALLOC_P(malloc_message)(NULL, s);
628}
629
630/*
631 * Choose an arena based on a per-thread value (fast-path code, calls slow-path
632 * code if necessary).
633 */
634JEMALLOC_INLINE arena_t *
635choose_arena(void)
636{
637	arena_t *ret;
638
639	ret = ARENA_GET();
640	if (ret == NULL) {
641		ret = choose_arena_hard();
642		assert(ret != NULL);
643	}
644
645	return (ret);
646}
647
648JEMALLOC_INLINE thread_allocated_t *
649thread_allocated_get(void)
650{
651	thread_allocated_t *thread_allocated = (thread_allocated_t *)
652	    pthread_getspecific(thread_allocated_tsd);
653
654	if (thread_allocated == NULL)
655		return (thread_allocated_get_hard());
656	return (thread_allocated);
657}
658#endif
659
660#include "jemalloc/internal/bitmap.h"
661#include "jemalloc/internal/rtree.h"
662#include "jemalloc/internal/tcache.h"
663#include "jemalloc/internal/arena.h"
664#include "jemalloc/internal/hash.h"
665#ifdef JEMALLOC_ZONE
666#include "jemalloc/internal/zone.h"
667#endif
668
669#ifndef JEMALLOC_ENABLE_INLINE
670void	*imalloc(size_t size);
671void	*icalloc(size_t size);
672void	*ipalloc(size_t usize, size_t alignment, bool zero);
673size_t	isalloc(const void *ptr);
674size_t	ivsalloc(const void *ptr);
675void	idalloc(void *ptr);
676void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
677    bool zero, bool no_move);
678#endif
679
680#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
681JEMALLOC_INLINE void *
682imalloc(size_t size)
683{
684
685	assert(size != 0);
686
687	if (size <= arena_maxclass)
688		return (arena_malloc(size, false));
689	else
690		return (huge_malloc(size, false));
691}
692
693JEMALLOC_INLINE void *
694icalloc(size_t size)
695{
696
697	if (size <= arena_maxclass)
698		return (arena_malloc(size, true));
699	else
700		return (huge_malloc(size, true));
701}
702
703JEMALLOC_INLINE void *
704ipalloc(size_t usize, size_t alignment, bool zero)
705{
706	void *ret;
707
708	assert(usize != 0);
709	assert(usize == sa2u(usize, alignment, NULL));
710
711	if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
712		ret = arena_malloc(usize, zero);
713	else {
714		size_t run_size
715#ifdef JEMALLOC_CC_SILENCE
716		    = 0
717#endif
718		    ;
719
720		/*
721		 * Ideally we would only ever call sa2u() once per aligned
722		 * allocation request, and the caller of this function has
723		 * already done so once.  However, it's rather burdensome to
724		 * require every caller to pass in run_size, especially given
725		 * that it's only relevant to large allocations.  Therefore,
726		 * just call it again here in order to get run_size.
727		 */
728		sa2u(usize, alignment, &run_size);
729		if (run_size <= arena_maxclass) {
730			ret = arena_palloc(choose_arena(), usize, run_size,
731			    alignment, zero);
732		} else if (alignment <= chunksize)
733			ret = huge_malloc(usize, zero);
734		else
735			ret = huge_palloc(usize, alignment, zero);
736	}
737
738	assert(((uintptr_t)ret & (alignment - 1)) == 0);
739	return (ret);
740}
741
742JEMALLOC_INLINE size_t
743isalloc(const void *ptr)
744{
745	size_t ret;
746	arena_chunk_t *chunk;
747
748	assert(ptr != NULL);
749
750	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
751	if (chunk != ptr) {
752		/* Region. */
753		if (config_prof)
754			ret = arena_salloc_demote(ptr);
755		else
756			ret = arena_salloc(ptr);
757	} else
758		ret = huge_salloc(ptr);
759
760	return (ret);
761}
762
763JEMALLOC_INLINE size_t
764ivsalloc(const void *ptr)
765{
766
767	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
768	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
769		return (0);
770
771	return (isalloc(ptr));
772}
773
774JEMALLOC_INLINE void
775idalloc(void *ptr)
776{
777	arena_chunk_t *chunk;
778
779	assert(ptr != NULL);
780
781	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
782	if (chunk != ptr)
783		arena_dalloc(chunk->arena, chunk, ptr);
784	else
785		huge_dalloc(ptr, true);
786}
787
788JEMALLOC_INLINE void *
789iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
790    bool no_move)
791{
792	void *ret;
793	size_t oldsize;
794
795	assert(ptr != NULL);
796	assert(size != 0);
797
798	oldsize = isalloc(ptr);
799
800	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
801	    != 0) {
802		size_t usize, copysize;
803
804		/*
805		 * Existing object alignment is inadquate; allocate new space
806		 * and copy.
807		 */
808		if (no_move)
809			return (NULL);
810		usize = sa2u(size + extra, alignment, NULL);
811		if (usize == 0)
812			return (NULL);
813		ret = ipalloc(usize, alignment, zero);
814		if (ret == NULL) {
815			if (extra == 0)
816				return (NULL);
817			/* Try again, without extra this time. */
818			usize = sa2u(size, alignment, NULL);
819			if (usize == 0)
820				return (NULL);
821			ret = ipalloc(usize, alignment, zero);
822			if (ret == NULL)
823				return (NULL);
824		}
825		/*
826		 * Copy at most size bytes (not size+extra), since the caller
827		 * has no expectation that the extra bytes will be reliably
828		 * preserved.
829		 */
830		copysize = (size < oldsize) ? size : oldsize;
831		memcpy(ret, ptr, copysize);
832		idalloc(ptr);
833		return (ret);
834	}
835
836	if (no_move) {
837		if (size <= arena_maxclass) {
838			return (arena_ralloc_no_move(ptr, oldsize, size,
839			    extra, zero));
840		} else {
841			return (huge_ralloc_no_move(ptr, oldsize, size,
842			    extra));
843		}
844	} else {
845		if (size + extra <= arena_maxclass) {
846			return (arena_ralloc(ptr, oldsize, size, extra,
847			    alignment, zero));
848		} else {
849			return (huge_ralloc(ptr, oldsize, size, extra,
850			    alignment, zero));
851		}
852	}
853}
854#endif
855
856#include "jemalloc/internal/prof.h"
857
858#undef JEMALLOC_H_INLINES
859/******************************************************************************/
860