1#ifndef JEMALLOC_INTERNAL_H
2#define	JEMALLOC_INTERNAL_H
3
4#include "jemalloc_internal_defs.h"
5#include "jemalloc/internal/jemalloc_internal_decls.h"
6
7#ifdef JEMALLOC_UTRACE
8#include <sys/ktrace.h>
9#endif
10
11#define	JEMALLOC_NO_DEMANGLE
12#ifdef JEMALLOC_JET
13#  define JEMALLOC_N(n) jet_##n
14#  include "jemalloc/internal/public_namespace.h"
15#  define JEMALLOC_NO_RENAME
16#  include "../jemalloc.h"
17#  undef JEMALLOC_NO_RENAME
18#else
19#  define JEMALLOC_N(n) je_##n
20#  include "../jemalloc.h"
21#endif
22#include "jemalloc/internal/private_namespace.h"
23
24static const bool config_debug =
25#ifdef JEMALLOC_DEBUG
26    true
27#else
28    false
29#endif
30    ;
31static const bool have_dss =
32#ifdef JEMALLOC_DSS
33    true
34#else
35    false
36#endif
37    ;
38static const bool config_fill =
39#ifdef JEMALLOC_FILL
40    true
41#else
42    false
43#endif
44    ;
45static const bool config_lazy_lock =
46#ifdef JEMALLOC_LAZY_LOCK
47    true
48#else
49    false
50#endif
51    ;
52static const bool config_prof =
53#ifdef JEMALLOC_PROF
54    true
55#else
56    false
57#endif
58    ;
59static const bool config_prof_libgcc =
60#ifdef JEMALLOC_PROF_LIBGCC
61    true
62#else
63    false
64#endif
65    ;
66static const bool config_prof_libunwind =
67#ifdef JEMALLOC_PROF_LIBUNWIND
68    true
69#else
70    false
71#endif
72    ;
73static const bool config_munmap =
74#ifdef JEMALLOC_MUNMAP
75    true
76#else
77    false
78#endif
79    ;
80static const bool config_stats =
81#ifdef JEMALLOC_STATS
82    true
83#else
84    false
85#endif
86    ;
87static const bool config_tcache =
88#ifdef JEMALLOC_TCACHE
89    true
90#else
91    false
92#endif
93    ;
94static const bool config_tls =
95#ifdef JEMALLOC_TLS
96    true
97#else
98    false
99#endif
100    ;
101static const bool config_utrace =
102#ifdef JEMALLOC_UTRACE
103    true
104#else
105    false
106#endif
107    ;
108static const bool config_valgrind =
109#ifdef JEMALLOC_VALGRIND
110    true
111#else
112    false
113#endif
114    ;
115static const bool config_xmalloc =
116#ifdef JEMALLOC_XMALLOC
117    true
118#else
119    false
120#endif
121    ;
122static const bool config_ivsalloc =
123#ifdef JEMALLOC_IVSALLOC
124    true
125#else
126    false
127#endif
128    ;
129
130#ifdef JEMALLOC_ATOMIC9
131#include <machine/atomic.h>
132#endif
133
134#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
135#include <libkern/OSAtomic.h>
136#endif
137
138#ifdef JEMALLOC_ZONE
139#include <mach/mach_error.h>
140#include <mach/mach_init.h>
141#include <mach/vm_map.h>
142#include <malloc/malloc.h>
143#endif
144
145#define	RB_COMPACT
146#include "jemalloc/internal/rb.h"
147#include "jemalloc/internal/qr.h"
148#include "jemalloc/internal/ql.h"
149
150/*
151 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
152 * but there are circular dependencies that cannot be broken without
153 * substantial performance degradation.  In order to reduce the effect on
154 * visual code flow, read the header files in multiple passes, with one of the
155 * following cpp variables defined during each pass:
156 *
157 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
158 *                        types.
159 *   JEMALLOC_H_STRUCTS : Data structures.
160 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
161 *   JEMALLOC_H_INLINES : Inline functions.
162 */
163/******************************************************************************/
164#define	JEMALLOC_H_TYPES
165
166#include "jemalloc/internal/jemalloc_internal_macros.h"
167
168#define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
169
170/* Smallest size class to support. */
171#define	LG_TINY_MIN		3
172#define	TINY_MIN		(1U << LG_TINY_MIN)
173
174/*
175 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
176 * classes).
177 */
178#ifndef LG_QUANTUM
179#  if (defined(__i386__) || defined(_M_IX86))
180#    define LG_QUANTUM		4
181#  endif
182#  ifdef __ia64__
183#    define LG_QUANTUM		4
184#  endif
185#  ifdef __alpha__
186#    define LG_QUANTUM		4
187#  endif
188#  ifdef __sparc64__
189#    define LG_QUANTUM		4
190#  endif
191#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
192#    define LG_QUANTUM		4
193#  endif
194#  ifdef __arm__
195#    define LG_QUANTUM		3
196#  endif
197#  ifdef __aarch64__
198#    define LG_QUANTUM		4
199#  endif
200#  ifdef __hppa__
201#    define LG_QUANTUM		4
202#  endif
203#  ifdef __mips__
204#    define LG_QUANTUM		3
205#  endif
206#  ifdef __powerpc__
207#    define LG_QUANTUM		4
208#  endif
209#  ifdef __s390__
210#    define LG_QUANTUM		4
211#  endif
212#  ifdef __SH4__
213#    define LG_QUANTUM		4
214#  endif
215#  ifdef __tile__
216#    define LG_QUANTUM		4
217#  endif
218#  ifdef __le32__
219#    define LG_QUANTUM		4
220#  endif
221#  ifndef LG_QUANTUM
222#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
223#  endif
224#endif
225
226#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
227#define	QUANTUM_MASK		(QUANTUM - 1)
228
229/* Return the smallest quantum multiple that is >= a. */
230#define	QUANTUM_CEILING(a)						\
231	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
232
233#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
234#define	LONG_MASK		(LONG - 1)
235
236/* Return the smallest long multiple that is >= a. */
237#define	LONG_CEILING(a)							\
238	(((a) + LONG_MASK) & ~LONG_MASK)
239
240#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
241#define	PTR_MASK		(SIZEOF_PTR - 1)
242
243/* Return the smallest (void *) multiple that is >= a. */
244#define	PTR_CEILING(a)							\
245	(((a) + PTR_MASK) & ~PTR_MASK)
246
247/*
248 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
249 * In addition, this controls the spacing of cacheline-spaced size classes.
250 *
251 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
252 * only handle raw constants.
253 */
254#define	LG_CACHELINE		6
255#define	CACHELINE		64
256#define	CACHELINE_MASK		(CACHELINE - 1)
257
258/* Return the smallest cacheline multiple that is >= s. */
259#define	CACHELINE_CEILING(s)						\
260	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
261
262/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
263#ifdef PAGE_MASK
264#  undef PAGE_MASK
265#endif
266#define	LG_PAGE		STATIC_PAGE_SHIFT
267#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
268#define	PAGE_MASK	((size_t)(PAGE - 1))
269
270/* Return the smallest pagesize multiple that is >= s. */
271#define	PAGE_CEILING(s)							\
272	(((s) + PAGE_MASK) & ~PAGE_MASK)
273
274/* Return the nearest aligned address at or below a. */
275#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
276	((void *)((uintptr_t)(a) & (-(alignment))))
277
278/* Return the offset between a and the nearest aligned address at or below a. */
279#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
280	((size_t)((uintptr_t)(a) & (alignment - 1)))
281
282/* Return the smallest alignment multiple that is >= s. */
283#define	ALIGNMENT_CEILING(s, alignment)					\
284	(((s) + (alignment - 1)) & (-(alignment)))
285
286/* Declare a variable length array */
287#if __STDC_VERSION__ < 199901L
288#  ifdef _MSC_VER
289#    include <malloc.h>
290#    define alloca _alloca
291#  else
292#    ifdef JEMALLOC_HAS_ALLOCA_H
293#      include <alloca.h>
294#    else
295#      include <stdlib.h>
296#    endif
297#  endif
298#  define VARIABLE_ARRAY(type, name, count) \
299	type *name = alloca(sizeof(type) * (count))
300#else
301#  define VARIABLE_ARRAY(type, name, count) type name[(count)]
302#endif
303
304#include "jemalloc/internal/valgrind.h"
305#include "jemalloc/internal/util.h"
306#include "jemalloc/internal/atomic.h"
307#include "jemalloc/internal/prng.h"
308#include "jemalloc/internal/ckh.h"
309#include "jemalloc/internal/size_classes.h"
310#include "jemalloc/internal/stats.h"
311#include "jemalloc/internal/ctl.h"
312#include "jemalloc/internal/mutex.h"
313#include "jemalloc/internal/tsd.h"
314#include "jemalloc/internal/mb.h"
315#include "jemalloc/internal/extent.h"
316#include "jemalloc/internal/arena.h"
317#include "jemalloc/internal/bitmap.h"
318#include "jemalloc/internal/base.h"
319#include "jemalloc/internal/chunk.h"
320#include "jemalloc/internal/huge.h"
321#include "jemalloc/internal/rtree.h"
322#include "jemalloc/internal/tcache.h"
323#include "jemalloc/internal/hash.h"
324#include "jemalloc/internal/quarantine.h"
325#include "jemalloc/internal/prof.h"
326
327#undef JEMALLOC_H_TYPES
328/******************************************************************************/
329#define	JEMALLOC_H_STRUCTS
330
331#include "jemalloc/internal/valgrind.h"
332#include "jemalloc/internal/util.h"
333#include "jemalloc/internal/atomic.h"
334#include "jemalloc/internal/prng.h"
335#include "jemalloc/internal/ckh.h"
336#include "jemalloc/internal/size_classes.h"
337#include "jemalloc/internal/stats.h"
338#include "jemalloc/internal/ctl.h"
339#include "jemalloc/internal/mutex.h"
340#include "jemalloc/internal/tsd.h"
341#include "jemalloc/internal/mb.h"
342#include "jemalloc/internal/bitmap.h"
343#include "jemalloc/internal/extent.h"
344#include "jemalloc/internal/arena.h"
345#include "jemalloc/internal/base.h"
346#include "jemalloc/internal/chunk.h"
347#include "jemalloc/internal/huge.h"
348#include "jemalloc/internal/rtree.h"
349#include "jemalloc/internal/tcache.h"
350#include "jemalloc/internal/hash.h"
351#include "jemalloc/internal/quarantine.h"
352#include "jemalloc/internal/prof.h"
353
354typedef struct {
355	uint64_t	allocated;
356	uint64_t	deallocated;
357} thread_allocated_t;
358/*
359 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
360 * argument.
361 */
362#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_ARG_CONCAT({0, 0})
363
364#undef JEMALLOC_H_STRUCTS
365/******************************************************************************/
366#define	JEMALLOC_H_EXTERNS
367
368extern bool	opt_abort;
369extern bool	opt_junk;
370extern size_t	opt_quarantine;
371extern bool	opt_redzone;
372extern bool	opt_utrace;
373extern bool	opt_xmalloc;
374extern bool	opt_zero;
375extern size_t	opt_narenas;
376
377extern bool	in_valgrind;
378
379/* Number of CPUs. */
380extern unsigned		ncpus;
381
382/* Protects arenas initialization (arenas, arenas_total). */
383extern malloc_mutex_t	arenas_lock;
384/*
385 * Arenas that are used to service external requests.  Not all elements of the
386 * arenas array are necessarily used; arenas are created lazily as needed.
387 *
388 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
389 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
390 * takes some action to create them and allocate from them.
391 */
392extern arena_t		**arenas;
393extern unsigned		narenas_total;
394extern unsigned		narenas_auto; /* Read-only after initialization. */
395
396arena_t	*arenas_extend(unsigned ind);
397void	arenas_cleanup(void *arg);
398arena_t	*choose_arena_hard(void);
399void	jemalloc_prefork(void);
400void	jemalloc_postfork_parent(void);
401void	jemalloc_postfork_child(void);
402
403#include "jemalloc/internal/valgrind.h"
404#include "jemalloc/internal/util.h"
405#include "jemalloc/internal/atomic.h"
406#include "jemalloc/internal/prng.h"
407#include "jemalloc/internal/ckh.h"
408#include "jemalloc/internal/size_classes.h"
409#include "jemalloc/internal/stats.h"
410#include "jemalloc/internal/ctl.h"
411#include "jemalloc/internal/mutex.h"
412#include "jemalloc/internal/tsd.h"
413#include "jemalloc/internal/mb.h"
414#include "jemalloc/internal/bitmap.h"
415#include "jemalloc/internal/extent.h"
416#include "jemalloc/internal/arena.h"
417#include "jemalloc/internal/base.h"
418#include "jemalloc/internal/chunk.h"
419#include "jemalloc/internal/huge.h"
420#include "jemalloc/internal/rtree.h"
421#include "jemalloc/internal/tcache.h"
422#include "jemalloc/internal/hash.h"
423#include "jemalloc/internal/quarantine.h"
424#include "jemalloc/internal/prof.h"
425
426#undef JEMALLOC_H_EXTERNS
427/******************************************************************************/
428#define	JEMALLOC_H_INLINES
429
430#include "jemalloc/internal/valgrind.h"
431#include "jemalloc/internal/util.h"
432#include "jemalloc/internal/atomic.h"
433#include "jemalloc/internal/prng.h"
434#include "jemalloc/internal/ckh.h"
435#include "jemalloc/internal/size_classes.h"
436#include "jemalloc/internal/stats.h"
437#include "jemalloc/internal/ctl.h"
438#include "jemalloc/internal/mutex.h"
439#include "jemalloc/internal/tsd.h"
440#include "jemalloc/internal/mb.h"
441#include "jemalloc/internal/extent.h"
442#include "jemalloc/internal/base.h"
443#include "jemalloc/internal/chunk.h"
444#include "jemalloc/internal/huge.h"
445
446/*
447 * Include arena.h the first time in order to provide inline functions for this
448 * header's inlines.
449 */
450#define	JEMALLOC_ARENA_INLINE_A
451#include "jemalloc/internal/arena.h"
452#undef JEMALLOC_ARENA_INLINE_A
453
454#ifndef JEMALLOC_ENABLE_INLINE
455malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
456
457size_t	s2u(size_t size);
458size_t	sa2u(size_t size, size_t alignment);
459unsigned	narenas_total_get(void);
460arena_t	*choose_arena(arena_t *arena);
461#endif
462
463#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
464/*
465 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
466 * for allocations.
467 */
468malloc_tsd_externs(arenas, arena_t *)
469malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
470    arenas_cleanup)
471
472/*
473 * Compute usable size that would result from allocating an object with the
474 * specified size.
475 */
476JEMALLOC_ALWAYS_INLINE size_t
477s2u(size_t size)
478{
479
480	if (size <= SMALL_MAXCLASS)
481		return (small_s2u(size));
482	if (size <= arena_maxclass)
483		return (PAGE_CEILING(size));
484	return (CHUNK_CEILING(size));
485}
486
487/*
488 * Compute usable size that would result from allocating an object with the
489 * specified size and alignment.
490 */
491JEMALLOC_ALWAYS_INLINE size_t
492sa2u(size_t size, size_t alignment)
493{
494	size_t usize;
495
496	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
497
498	/*
499	 * Round size up to the nearest multiple of alignment.
500	 *
501	 * This done, we can take advantage of the fact that for each small
502	 * size class, every object is aligned at the smallest power of two
503	 * that is non-zero in the base two representation of the size.  For
504	 * example:
505	 *
506	 *   Size |   Base 2 | Minimum alignment
507	 *   -----+----------+------------------
508	 *     96 |  1100000 |  32
509	 *    144 | 10100000 |  32
510	 *    192 | 11000000 |  64
511	 */
512	usize = ALIGNMENT_CEILING(size, alignment);
513	/*
514	 * (usize < size) protects against the combination of maximal
515	 * alignment and size greater than maximal alignment.
516	 */
517	if (usize < size) {
518		/* size_t overflow. */
519		return (0);
520	}
521
522	if (usize <= arena_maxclass && alignment <= PAGE) {
523		if (usize <= SMALL_MAXCLASS)
524			return (small_s2u(usize));
525		return (PAGE_CEILING(usize));
526	} else {
527		size_t run_size;
528
529		/*
530		 * We can't achieve subpage alignment, so round up alignment
531		 * permanently; it makes later calculations simpler.
532		 */
533		alignment = PAGE_CEILING(alignment);
534		usize = PAGE_CEILING(size);
535		/*
536		 * (usize < size) protects against very large sizes within
537		 * PAGE of SIZE_T_MAX.
538		 *
539		 * (usize + alignment < usize) protects against the
540		 * combination of maximal alignment and usize large enough
541		 * to cause overflow.  This is similar to the first overflow
542		 * check above, but it needs to be repeated due to the new
543		 * usize value, which may now be *equal* to maximal
544		 * alignment, whereas before we only detected overflow if the
545		 * original size was *greater* than maximal alignment.
546		 */
547		if (usize < size || usize + alignment < usize) {
548			/* size_t overflow. */
549			return (0);
550		}
551
552		/*
553		 * Calculate the size of the over-size run that arena_palloc()
554		 * would need to allocate in order to guarantee the alignment.
555		 * If the run wouldn't fit within a chunk, round up to a huge
556		 * allocation size.
557		 */
558		run_size = usize + alignment - PAGE;
559		if (run_size <= arena_maxclass)
560			return (PAGE_CEILING(usize));
561		return (CHUNK_CEILING(usize));
562	}
563}
564
565JEMALLOC_INLINE unsigned
566narenas_total_get(void)
567{
568	unsigned narenas;
569
570	malloc_mutex_lock(&arenas_lock);
571	narenas = narenas_total;
572	malloc_mutex_unlock(&arenas_lock);
573
574	return (narenas);
575}
576
577/* Choose an arena based on a per-thread value. */
578JEMALLOC_INLINE arena_t *
579choose_arena(arena_t *arena)
580{
581	arena_t *ret;
582
583	if (arena != NULL)
584		return (arena);
585
586	if ((ret = *arenas_tsd_get()) == NULL) {
587		ret = choose_arena_hard();
588		assert(ret != NULL);
589	}
590
591	return (ret);
592}
593#endif
594
595#include "jemalloc/internal/bitmap.h"
596#include "jemalloc/internal/rtree.h"
597/*
598 * Include arena.h the second and third times in order to resolve circular
599 * dependencies with tcache.h.
600 */
601#define	JEMALLOC_ARENA_INLINE_B
602#include "jemalloc/internal/arena.h"
603#undef JEMALLOC_ARENA_INLINE_B
604#include "jemalloc/internal/tcache.h"
605#define	JEMALLOC_ARENA_INLINE_C
606#include "jemalloc/internal/arena.h"
607#undef JEMALLOC_ARENA_INLINE_C
608#include "jemalloc/internal/hash.h"
609#include "jemalloc/internal/quarantine.h"
610
611#ifndef JEMALLOC_ENABLE_INLINE
612void	*imalloct(size_t size, bool try_tcache, arena_t *arena);
613void	*imalloc(size_t size);
614void	*icalloct(size_t size, bool try_tcache, arena_t *arena);
615void	*icalloc(size_t size);
616void	*ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
617    arena_t *arena);
618void	*ipalloc(size_t usize, size_t alignment, bool zero);
619size_t	isalloc(const void *ptr, bool demote);
620size_t	ivsalloc(const void *ptr, bool demote);
621size_t	u2rz(size_t usize);
622size_t	p2rz(const void *ptr);
623void	idalloct(void *ptr, bool try_tcache);
624void	idalloc(void *ptr);
625void	iqalloct(void *ptr, bool try_tcache);
626void	iqalloc(void *ptr);
627void	*iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
628    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
629    arena_t *arena);
630void	*iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
631    bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
632void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
633    bool zero);
634bool	ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
635    bool zero);
636malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
637#endif
638
639#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
640JEMALLOC_ALWAYS_INLINE void *
641imalloct(size_t size, bool try_tcache, arena_t *arena)
642{
643
644	assert(size != 0);
645
646	if (size <= arena_maxclass)
647		return (arena_malloc(arena, size, false, try_tcache));
648	else
649		return (huge_malloc(arena, size, false));
650}
651
652JEMALLOC_ALWAYS_INLINE void *
653imalloc(size_t size)
654{
655
656	return (imalloct(size, true, NULL));
657}
658
659JEMALLOC_ALWAYS_INLINE void *
660icalloct(size_t size, bool try_tcache, arena_t *arena)
661{
662
663	if (size <= arena_maxclass)
664		return (arena_malloc(arena, size, true, try_tcache));
665	else
666		return (huge_malloc(arena, size, true));
667}
668
669JEMALLOC_ALWAYS_INLINE void *
670icalloc(size_t size)
671{
672
673	return (icalloct(size, true, NULL));
674}
675
676JEMALLOC_ALWAYS_INLINE void *
677ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
678    arena_t *arena)
679{
680	void *ret;
681
682	assert(usize != 0);
683	assert(usize == sa2u(usize, alignment));
684
685	if (usize <= arena_maxclass && alignment <= PAGE)
686		ret = arena_malloc(arena, usize, zero, try_tcache);
687	else {
688		if (usize <= arena_maxclass) {
689			ret = arena_palloc(choose_arena(arena), usize,
690			    alignment, zero);
691		} else if (alignment <= chunksize)
692			ret = huge_malloc(arena, usize, zero);
693		else
694			ret = huge_palloc(arena, usize, alignment, zero);
695	}
696
697	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
698	return (ret);
699}
700
701JEMALLOC_ALWAYS_INLINE void *
702ipalloc(size_t usize, size_t alignment, bool zero)
703{
704
705	return (ipalloct(usize, alignment, zero, true, NULL));
706}
707
708/*
709 * Typical usage:
710 *   void *ptr = [...]
711 *   size_t sz = isalloc(ptr, config_prof);
712 */
713JEMALLOC_ALWAYS_INLINE size_t
714isalloc(const void *ptr, bool demote)
715{
716	size_t ret;
717	arena_chunk_t *chunk;
718
719	assert(ptr != NULL);
720	/* Demotion only makes sense if config_prof is true. */
721	assert(config_prof || demote == false);
722
723	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
724	if (chunk != ptr)
725		ret = arena_salloc(ptr, demote);
726	else
727		ret = huge_salloc(ptr);
728
729	return (ret);
730}
731
732JEMALLOC_ALWAYS_INLINE size_t
733ivsalloc(const void *ptr, bool demote)
734{
735
736	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
737	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
738		return (0);
739
740	return (isalloc(ptr, demote));
741}
742
743JEMALLOC_INLINE size_t
744u2rz(size_t usize)
745{
746	size_t ret;
747
748	if (usize <= SMALL_MAXCLASS) {
749		size_t binind = small_size2bin(usize);
750		ret = arena_bin_info[binind].redzone_size;
751	} else
752		ret = 0;
753
754	return (ret);
755}
756
757JEMALLOC_INLINE size_t
758p2rz(const void *ptr)
759{
760	size_t usize = isalloc(ptr, false);
761
762	return (u2rz(usize));
763}
764
765JEMALLOC_ALWAYS_INLINE void
766idalloct(void *ptr, bool try_tcache)
767{
768	arena_chunk_t *chunk;
769
770	assert(ptr != NULL);
771
772	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
773	if (chunk != ptr)
774		arena_dalloc(chunk, ptr, try_tcache);
775	else
776		huge_dalloc(ptr);
777}
778
779JEMALLOC_ALWAYS_INLINE void
780idalloc(void *ptr)
781{
782
783	idalloct(ptr, true);
784}
785
786JEMALLOC_ALWAYS_INLINE void
787iqalloct(void *ptr, bool try_tcache)
788{
789
790	if (config_fill && opt_quarantine)
791		quarantine(ptr);
792	else
793		idalloct(ptr, try_tcache);
794}
795
796JEMALLOC_ALWAYS_INLINE void
797iqalloc(void *ptr)
798{
799
800	iqalloct(ptr, true);
801}
802
803JEMALLOC_ALWAYS_INLINE void *
804iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
805    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
806    arena_t *arena)
807{
808	void *p;
809	size_t usize, copysize;
810
811	usize = sa2u(size + extra, alignment);
812	if (usize == 0)
813		return (NULL);
814	p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
815	if (p == NULL) {
816		if (extra == 0)
817			return (NULL);
818		/* Try again, without extra this time. */
819		usize = sa2u(size, alignment);
820		if (usize == 0)
821			return (NULL);
822		p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
823		if (p == NULL)
824			return (NULL);
825	}
826	/*
827	 * Copy at most size bytes (not size+extra), since the caller has no
828	 * expectation that the extra bytes will be reliably preserved.
829	 */
830	copysize = (size < oldsize) ? size : oldsize;
831	memcpy(p, ptr, copysize);
832	iqalloct(ptr, try_tcache_dalloc);
833	return (p);
834}
835
836JEMALLOC_ALWAYS_INLINE void *
837iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
838    bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
839{
840	size_t oldsize;
841
842	assert(ptr != NULL);
843	assert(size != 0);
844
845	oldsize = isalloc(ptr, config_prof);
846
847	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
848	    != 0) {
849		/*
850		 * Existing object alignment is inadequate; allocate new space
851		 * and copy.
852		 */
853		return (iralloct_realign(ptr, oldsize, size, extra, alignment,
854		    zero, try_tcache_alloc, try_tcache_dalloc, arena));
855	}
856
857	if (size + extra <= arena_maxclass) {
858		return (arena_ralloc(arena, ptr, oldsize, size, extra,
859		    alignment, zero, try_tcache_alloc,
860		    try_tcache_dalloc));
861	} else {
862		return (huge_ralloc(arena, ptr, oldsize, size, extra,
863		    alignment, zero, try_tcache_dalloc));
864	}
865}
866
867JEMALLOC_ALWAYS_INLINE void *
868iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
869{
870
871	return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
872}
873
874JEMALLOC_ALWAYS_INLINE bool
875ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
876{
877	size_t oldsize;
878
879	assert(ptr != NULL);
880	assert(size != 0);
881
882	oldsize = isalloc(ptr, config_prof);
883	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
884	    != 0) {
885		/* Existing object alignment is inadequate. */
886		return (true);
887	}
888
889	if (size <= arena_maxclass)
890		return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
891	else
892		return (huge_ralloc_no_move(ptr, oldsize, size, extra));
893}
894
895malloc_tsd_externs(thread_allocated, thread_allocated_t)
896malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
897    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
898#endif
899
900#include "jemalloc/internal/prof.h"
901
902#undef JEMALLOC_H_INLINES
903/******************************************************************************/
904#endif /* JEMALLOC_INTERNAL_H */
905