jemalloc_internal.h.in revision 7ca0fdfb85b2a9fc7a112e158892c098e004385b
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/syscall.h>
4#if !defined(SYS_write) && defined(__NR_write)
5#define	SYS_write __NR_write
6#endif
7#include <sys/time.h>
8#include <sys/types.h>
9#include <sys/uio.h>
10
11#include <errno.h>
12#include <limits.h>
13#ifndef SIZE_T_MAX
14#  define SIZE_T_MAX	SIZE_MAX
15#endif
16#include <pthread.h>
17#include <sched.h>
18#include <stdarg.h>
19#include <stdbool.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <stdint.h>
23#include <stddef.h>
24#ifndef offsetof
25#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
26#endif
27#include <inttypes.h>
28#include <string.h>
29#include <strings.h>
30#include <ctype.h>
31#include <unistd.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <math.h>
35
36#define	JEMALLOC_NO_DEMANGLE
37#include "../jemalloc@install_suffix@.h"
38
39#ifdef JEMALLOC_UTRACE
40#include <sys/ktrace.h>
41#endif
42
43#ifdef JEMALLOC_VALGRIND
44#include <valgrind/valgrind.h>
45#include <valgrind/memcheck.h>
46#endif
47
48#include "jemalloc/internal/private_namespace.h"
49
50#ifdef JEMALLOC_CC_SILENCE
51#define	UNUSED JEMALLOC_ATTR(unused)
52#else
53#define	UNUSED
54#endif
55
56static const bool config_debug =
57#ifdef JEMALLOC_DEBUG
58    true
59#else
60    false
61#endif
62    ;
63static const bool config_dss =
64#ifdef JEMALLOC_DSS
65    true
66#else
67    false
68#endif
69    ;
70static const bool config_fill =
71#ifdef JEMALLOC_FILL
72    true
73#else
74    false
75#endif
76    ;
77static const bool config_lazy_lock =
78#ifdef JEMALLOC_LAZY_LOCK
79    true
80#else
81    false
82#endif
83    ;
84static const bool config_prof =
85#ifdef JEMALLOC_PROF
86    true
87#else
88    false
89#endif
90    ;
91static const bool config_prof_libgcc =
92#ifdef JEMALLOC_PROF_LIBGCC
93    true
94#else
95    false
96#endif
97    ;
98static const bool config_prof_libunwind =
99#ifdef JEMALLOC_PROF_LIBUNWIND
100    true
101#else
102    false
103#endif
104    ;
105static const bool config_munmap =
106#ifdef JEMALLOC_MUNMAP
107    true
108#else
109    false
110#endif
111    ;
112static const bool config_stats =
113#ifdef JEMALLOC_STATS
114    true
115#else
116    false
117#endif
118    ;
119static const bool config_tcache =
120#ifdef JEMALLOC_TCACHE
121    true
122#else
123    false
124#endif
125    ;
126static const bool config_tls =
127#ifdef JEMALLOC_TLS
128    true
129#else
130    false
131#endif
132    ;
133static const bool config_utrace =
134#ifdef JEMALLOC_UTRACE
135    true
136#else
137    false
138#endif
139    ;
140static const bool config_valgrind =
141#ifdef JEMALLOC_VALGRIND
142    true
143#else
144    false
145#endif
146    ;
147static const bool config_xmalloc =
148#ifdef JEMALLOC_XMALLOC
149    true
150#else
151    false
152#endif
153    ;
154static const bool config_ivsalloc =
155#ifdef JEMALLOC_IVSALLOC
156    true
157#else
158    false
159#endif
160    ;
161
162#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
163#include <libkern/OSAtomic.h>
164#endif
165
166#ifdef JEMALLOC_ZONE
167#include <mach/mach_error.h>
168#include <mach/mach_init.h>
169#include <mach/vm_map.h>
170#include <malloc/malloc.h>
171#endif
172
173#define	RB_COMPACT
174#include "jemalloc/internal/rb.h"
175#include "jemalloc/internal/qr.h"
176#include "jemalloc/internal/ql.h"
177
178/*
179 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
180 * but there are circular dependencies that cannot be broken without
181 * substantial performance degradation.  In order to reduce the effect on
182 * visual code flow, read the header files in multiple passes, with one of the
183 * following cpp variables defined during each pass:
184 *
185 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
186 *                        types.
187 *   JEMALLOC_H_STRUCTS : Data structures.
188 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
189 *   JEMALLOC_H_INLINES : Inline functions.
190 */
191/******************************************************************************/
192#define JEMALLOC_H_TYPES
193
194#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
195
196#define	ZU(z)	((size_t)z)
197
198#ifndef __DECONST
199#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
200#endif
201
202#ifdef JEMALLOC_DEBUG
203   /* Disable inlining to make debugging easier. */
204#  define JEMALLOC_INLINE
205#  define inline
206#else
207#  define JEMALLOC_ENABLE_INLINE
208#  define JEMALLOC_INLINE static inline
209#endif
210
211/* Smallest size class to support. */
212#define	LG_TINY_MIN		3
213#define	TINY_MIN		(1U << LG_TINY_MIN)
214
215/*
216 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
217 * classes).
218 */
219#ifndef LG_QUANTUM
220#  ifdef __i386__
221#    define LG_QUANTUM		4
222#  endif
223#  ifdef __ia64__
224#    define LG_QUANTUM		4
225#  endif
226#  ifdef __alpha__
227#    define LG_QUANTUM		4
228#  endif
229#  ifdef __sparc64__
230#    define LG_QUANTUM		4
231#  endif
232#  if (defined(__amd64__) || defined(__x86_64__))
233#    define LG_QUANTUM		4
234#  endif
235#  ifdef __arm__
236#    define LG_QUANTUM		3
237#  endif
238#  ifdef __mips__
239#    define LG_QUANTUM		3
240#  endif
241#  ifdef __powerpc__
242#    define LG_QUANTUM		4
243#  endif
244#  ifdef __s390x__
245#    define LG_QUANTUM		4
246#  endif
247#  ifdef __SH4__
248#    define LG_QUANTUM		4
249#  endif
250#  ifdef __tile__
251#    define LG_QUANTUM		4
252#  endif
253#  ifndef LG_QUANTUM
254#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
255#  endif
256#endif
257
258#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
259#define	QUANTUM_MASK		(QUANTUM - 1)
260
261/* Return the smallest quantum multiple that is >= a. */
262#define	QUANTUM_CEILING(a)						\
263	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
264
265#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
266#define	LONG_MASK		(LONG - 1)
267
268/* Return the smallest long multiple that is >= a. */
269#define	LONG_CEILING(a)							\
270	(((a) + LONG_MASK) & ~LONG_MASK)
271
272#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
273#define	PTR_MASK		(SIZEOF_PTR - 1)
274
275/* Return the smallest (void *) multiple that is >= a. */
276#define	PTR_CEILING(a)							\
277	(((a) + PTR_MASK) & ~PTR_MASK)
278
279/*
280 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
281 * In addition, this controls the spacing of cacheline-spaced size classes.
282 */
283#define	LG_CACHELINE		6
284#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
285#define	CACHELINE_MASK		(CACHELINE - 1)
286
287/* Return the smallest cacheline multiple that is >= s. */
288#define	CACHELINE_CEILING(s)						\
289	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
290
291/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
292#ifdef PAGE_MASK
293#  undef PAGE_MASK
294#endif
295#define	LG_PAGE		STATIC_PAGE_SHIFT
296#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
297#define	PAGE_MASK	((size_t)(PAGE - 1))
298
299/* Return the smallest pagesize multiple that is >= s. */
300#define	PAGE_CEILING(s)							\
301	(((s) + PAGE_MASK) & ~PAGE_MASK)
302
303/* Return the nearest aligned address at or below a. */
304#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
305	((void *)((uintptr_t)(a) & (-(alignment))))
306
307/* Return the offset between a and the nearest aligned address at or below a. */
308#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
309	((size_t)((uintptr_t)(a) & (alignment - 1)))
310
311/* Return the smallest alignment multiple that is >= s. */
312#define	ALIGNMENT_CEILING(s, alignment)					\
313	(((s) + (alignment - 1)) & (-(alignment)))
314
315#ifdef JEMALLOC_VALGRIND
316/*
317 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
318 * so that when Valgrind reports errors, there are no extra stack frames
319 * in the backtraces.
320 *
321 * The size that is reported to valgrind must be consistent through a chain of
322 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
323 * jemalloc, so it is critical that all callers of these macros provide usize
324 * rather than request size.  As a result, buffer overflow detection is
325 * technically weakened for the standard API, though it is generally accepted
326 * practice to consider any extra bytes reported by malloc_usable_size() as
327 * usable space.
328 */
329#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
330	if (config_valgrind && opt_valgrind && cond)			\
331		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
332} while (0)
333#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
334    old_rzsize, zero)  do {						\
335	if (config_valgrind && opt_valgrind) {				\
336		size_t rzsize = p2rz(ptr);				\
337									\
338		if (ptr == old_ptr) {					\
339			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
340			    usize, rzsize);				\
341			if (zero && old_usize < usize) {		\
342				VALGRIND_MAKE_MEM_DEFINED(		\
343				    (void *)((uintptr_t)ptr +		\
344				    old_usize), usize - old_usize);	\
345			}						\
346		} else {						\
347			if (old_ptr != NULL) {				\
348				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
349				    old_rzsize);			\
350			}						\
351			if (ptr != NULL) {				\
352				size_t copy_size = (old_usize < usize)	\
353				    ?  old_usize : usize;		\
354				size_t tail_size = usize - copy_size;	\
355				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
356				    rzsize, false);			\
357				if (copy_size > 0) {			\
358					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
359					    copy_size);			\
360				}					\
361				if (zero && tail_size > 0) {		\
362					VALGRIND_MAKE_MEM_DEFINED(	\
363					    (void *)((uintptr_t)ptr +	\
364					    copy_size), tail_size);	\
365				}					\
366			}						\
367		}							\
368	}								\
369} while (0)
370#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
371	if (config_valgrind && opt_valgrind)				\
372		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
373} while (0)
374#else
375#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
376#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
377#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
378#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
379#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
380#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
381#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
382    old_rzsize, zero)
383#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
384#endif
385
386#include "jemalloc/internal/util.h"
387#include "jemalloc/internal/atomic.h"
388#include "jemalloc/internal/prng.h"
389#include "jemalloc/internal/ckh.h"
390#include "jemalloc/internal/size_classes.h"
391#include "jemalloc/internal/stats.h"
392#include "jemalloc/internal/ctl.h"
393#include "jemalloc/internal/mutex.h"
394#include "jemalloc/internal/tsd.h"
395#include "jemalloc/internal/mb.h"
396#include "jemalloc/internal/extent.h"
397#include "jemalloc/internal/arena.h"
398#include "jemalloc/internal/bitmap.h"
399#include "jemalloc/internal/base.h"
400#include "jemalloc/internal/chunk.h"
401#include "jemalloc/internal/huge.h"
402#include "jemalloc/internal/rtree.h"
403#include "jemalloc/internal/tcache.h"
404#include "jemalloc/internal/hash.h"
405#include "jemalloc/internal/quarantine.h"
406#include "jemalloc/internal/prof.h"
407
408#undef JEMALLOC_H_TYPES
409/******************************************************************************/
410#define JEMALLOC_H_STRUCTS
411
412#include "jemalloc/internal/util.h"
413#include "jemalloc/internal/atomic.h"
414#include "jemalloc/internal/prng.h"
415#include "jemalloc/internal/ckh.h"
416#include "jemalloc/internal/size_classes.h"
417#include "jemalloc/internal/stats.h"
418#include "jemalloc/internal/ctl.h"
419#include "jemalloc/internal/mutex.h"
420#include "jemalloc/internal/tsd.h"
421#include "jemalloc/internal/mb.h"
422#include "jemalloc/internal/bitmap.h"
423#include "jemalloc/internal/extent.h"
424#include "jemalloc/internal/arena.h"
425#include "jemalloc/internal/base.h"
426#include "jemalloc/internal/chunk.h"
427#include "jemalloc/internal/huge.h"
428#include "jemalloc/internal/rtree.h"
429#include "jemalloc/internal/tcache.h"
430#include "jemalloc/internal/hash.h"
431#include "jemalloc/internal/quarantine.h"
432#include "jemalloc/internal/prof.h"
433
434typedef struct {
435	uint64_t	allocated;
436	uint64_t	deallocated;
437} thread_allocated_t;
438/*
439 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
440 * argument.
441 */
442#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
443
444#undef JEMALLOC_H_STRUCTS
445/******************************************************************************/
446#define JEMALLOC_H_EXTERNS
447
448extern bool	opt_abort;
449extern bool	opt_junk;
450extern size_t	opt_quarantine;
451extern bool	opt_redzone;
452extern bool	opt_utrace;
453extern bool	opt_valgrind;
454extern bool	opt_xmalloc;
455extern bool	opt_zero;
456extern size_t	opt_narenas;
457
458/* Number of CPUs. */
459extern unsigned		ncpus;
460
461extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
462/*
463 * Arenas that are used to service external requests.  Not all elements of the
464 * arenas array are necessarily used; arenas are created lazily as needed.
465 */
466extern arena_t		**arenas;
467extern unsigned		narenas;
468
469arena_t	*arenas_extend(unsigned ind);
470void	arenas_cleanup(void *arg);
471arena_t	*choose_arena_hard(void);
472void	jemalloc_prefork(void);
473void	jemalloc_postfork_parent(void);
474void	jemalloc_postfork_child(void);
475
476#include "jemalloc/internal/util.h"
477#include "jemalloc/internal/atomic.h"
478#include "jemalloc/internal/prng.h"
479#include "jemalloc/internal/ckh.h"
480#include "jemalloc/internal/size_classes.h"
481#include "jemalloc/internal/stats.h"
482#include "jemalloc/internal/ctl.h"
483#include "jemalloc/internal/mutex.h"
484#include "jemalloc/internal/tsd.h"
485#include "jemalloc/internal/mb.h"
486#include "jemalloc/internal/bitmap.h"
487#include "jemalloc/internal/extent.h"
488#include "jemalloc/internal/arena.h"
489#include "jemalloc/internal/base.h"
490#include "jemalloc/internal/chunk.h"
491#include "jemalloc/internal/huge.h"
492#include "jemalloc/internal/rtree.h"
493#include "jemalloc/internal/tcache.h"
494#include "jemalloc/internal/hash.h"
495#include "jemalloc/internal/quarantine.h"
496#include "jemalloc/internal/prof.h"
497
498#undef JEMALLOC_H_EXTERNS
499/******************************************************************************/
500#define JEMALLOC_H_INLINES
501
502#include "jemalloc/internal/util.h"
503#include "jemalloc/internal/atomic.h"
504#include "jemalloc/internal/prng.h"
505#include "jemalloc/internal/ckh.h"
506#include "jemalloc/internal/size_classes.h"
507#include "jemalloc/internal/stats.h"
508#include "jemalloc/internal/ctl.h"
509#include "jemalloc/internal/mutex.h"
510#include "jemalloc/internal/tsd.h"
511#include "jemalloc/internal/mb.h"
512#include "jemalloc/internal/extent.h"
513#include "jemalloc/internal/base.h"
514#include "jemalloc/internal/chunk.h"
515#include "jemalloc/internal/huge.h"
516
517#ifndef JEMALLOC_ENABLE_INLINE
518malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
519
520size_t	s2u(size_t size);
521size_t	sa2u(size_t size, size_t alignment);
522arena_t	*choose_arena(arena_t *arena);
523#endif
524
525#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
526/*
527 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
528 * for allocations.
529 */
530malloc_tsd_externs(arenas, arena_t *)
531malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
532
533/*
534 * Compute usable size that would result from allocating an object with the
535 * specified size.
536 */
537JEMALLOC_INLINE size_t
538s2u(size_t size)
539{
540
541	if (size <= SMALL_MAXCLASS)
542		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
543	if (size <= arena_maxclass)
544		return (PAGE_CEILING(size));
545	return (CHUNK_CEILING(size));
546}
547
548/*
549 * Compute usable size that would result from allocating an object with the
550 * specified size and alignment.
551 */
552JEMALLOC_INLINE size_t
553sa2u(size_t size, size_t alignment)
554{
555	size_t usize;
556
557	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
558
559	/*
560	 * Round size up to the nearest multiple of alignment.
561	 *
562	 * This done, we can take advantage of the fact that for each small
563	 * size class, every object is aligned at the smallest power of two
564	 * that is non-zero in the base two representation of the size.  For
565	 * example:
566	 *
567	 *   Size |   Base 2 | Minimum alignment
568	 *   -----+----------+------------------
569	 *     96 |  1100000 |  32
570	 *    144 | 10100000 |  32
571	 *    192 | 11000000 |  64
572	 */
573	usize = ALIGNMENT_CEILING(size, alignment);
574	/*
575	 * (usize < size) protects against the combination of maximal
576	 * alignment and size greater than maximal alignment.
577	 */
578	if (usize < size) {
579		/* size_t overflow. */
580		return (0);
581	}
582
583	if (usize <= arena_maxclass && alignment <= PAGE) {
584		if (usize <= SMALL_MAXCLASS)
585			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
586		return (PAGE_CEILING(usize));
587	} else {
588		size_t run_size;
589
590		/*
591		 * We can't achieve subpage alignment, so round up alignment
592		 * permanently; it makes later calculations simpler.
593		 */
594		alignment = PAGE_CEILING(alignment);
595		usize = PAGE_CEILING(size);
596		/*
597		 * (usize < size) protects against very large sizes within
598		 * PAGE of SIZE_T_MAX.
599		 *
600		 * (usize + alignment < usize) protects against the
601		 * combination of maximal alignment and usize large enough
602		 * to cause overflow.  This is similar to the first overflow
603		 * check above, but it needs to be repeated due to the new
604		 * usize value, which may now be *equal* to maximal
605		 * alignment, whereas before we only detected overflow if the
606		 * original size was *greater* than maximal alignment.
607		 */
608		if (usize < size || usize + alignment < usize) {
609			/* size_t overflow. */
610			return (0);
611		}
612
613		/*
614		 * Calculate the size of the over-size run that arena_palloc()
615		 * would need to allocate in order to guarantee the alignment.
616		 * If the run wouldn't fit within a chunk, round up to a huge
617		 * allocation size.
618		 */
619		run_size = usize + alignment - PAGE;
620		if (run_size <= arena_maxclass)
621			return (PAGE_CEILING(usize));
622		return (CHUNK_CEILING(usize));
623	}
624}
625
626/* Choose an arena based on a per-thread value. */
627JEMALLOC_INLINE arena_t *
628choose_arena(arena_t *arena)
629{
630	arena_t *ret;
631
632	if (arena != NULL)
633		return (arena);
634
635	if ((ret = *arenas_tsd_get()) == NULL) {
636		ret = choose_arena_hard();
637		assert(ret != NULL);
638	}
639
640	return (ret);
641}
642#endif
643
644#include "jemalloc/internal/bitmap.h"
645#include "jemalloc/internal/rtree.h"
646#include "jemalloc/internal/tcache.h"
647#include "jemalloc/internal/arena.h"
648#include "jemalloc/internal/hash.h"
649#include "jemalloc/internal/quarantine.h"
650
651#ifndef JEMALLOC_ENABLE_INLINE
652void	*imalloc(size_t size);
653void	*icalloc(size_t size);
654void	*ipalloc(size_t usize, size_t alignment, bool zero);
655size_t	isalloc(const void *ptr, bool demote);
656size_t	ivsalloc(const void *ptr, bool demote);
657size_t	u2rz(size_t usize);
658size_t	p2rz(const void *ptr);
659void	idalloc(void *ptr);
660void	iqalloc(void *ptr);
661void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
662    bool zero, bool no_move);
663malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
664#endif
665
666#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
667JEMALLOC_INLINE void *
668imalloc(size_t size)
669{
670
671	assert(size != 0);
672
673	if (size <= arena_maxclass)
674		return (arena_malloc(NULL, size, false, true));
675	else
676		return (huge_malloc(size, false));
677}
678
679JEMALLOC_INLINE void *
680icalloc(size_t size)
681{
682
683	if (size <= arena_maxclass)
684		return (arena_malloc(NULL, size, true, true));
685	else
686		return (huge_malloc(size, true));
687}
688
689JEMALLOC_INLINE void *
690ipalloc(size_t usize, size_t alignment, bool zero)
691{
692	void *ret;
693
694	assert(usize != 0);
695	assert(usize == sa2u(usize, alignment));
696
697	if (usize <= arena_maxclass && alignment <= PAGE)
698		ret = arena_malloc(NULL, usize, zero, true);
699	else {
700		if (usize <= arena_maxclass) {
701			ret = arena_palloc(choose_arena(NULL), usize, alignment,
702			    zero);
703		} else if (alignment <= chunksize)
704			ret = huge_malloc(usize, zero);
705		else
706			ret = huge_palloc(usize, alignment, zero);
707	}
708
709	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
710	return (ret);
711}
712
713/*
714 * Typical usage:
715 *   void *ptr = [...]
716 *   size_t sz = isalloc(ptr, config_prof);
717 */
718JEMALLOC_INLINE size_t
719isalloc(const void *ptr, bool demote)
720{
721	size_t ret;
722	arena_chunk_t *chunk;
723
724	assert(ptr != NULL);
725	/* Demotion only makes sense if config_prof is true. */
726	assert(config_prof || demote == false);
727
728	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
729	if (chunk != ptr) {
730		/* Region. */
731		ret = arena_salloc(ptr, demote);
732	} else
733		ret = huge_salloc(ptr);
734
735	return (ret);
736}
737
738JEMALLOC_INLINE size_t
739ivsalloc(const void *ptr, bool demote)
740{
741
742	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
743	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
744		return (0);
745
746	return (isalloc(ptr, demote));
747}
748
749JEMALLOC_INLINE size_t
750u2rz(size_t usize)
751{
752	size_t ret;
753
754	if (usize <= SMALL_MAXCLASS) {
755		size_t binind = SMALL_SIZE2BIN(usize);
756		ret = arena_bin_info[binind].redzone_size;
757	} else
758		ret = 0;
759
760	return (ret);
761}
762
763JEMALLOC_INLINE size_t
764p2rz(const void *ptr)
765{
766	size_t usize = isalloc(ptr, false);
767
768	return (u2rz(usize));
769}
770
771JEMALLOC_INLINE void
772idalloc(void *ptr)
773{
774	arena_chunk_t *chunk;
775
776	assert(ptr != NULL);
777
778	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
779	if (chunk != ptr)
780		arena_dalloc(chunk->arena, chunk, ptr, true);
781	else
782		huge_dalloc(ptr, true);
783}
784
785JEMALLOC_INLINE void
786iqalloc(void *ptr)
787{
788
789	if (config_fill && opt_quarantine)
790		quarantine(ptr);
791	else
792		idalloc(ptr);
793}
794
795JEMALLOC_INLINE void *
796iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
797    bool no_move)
798{
799	void *ret;
800	size_t oldsize;
801
802	assert(ptr != NULL);
803	assert(size != 0);
804
805	oldsize = isalloc(ptr, config_prof);
806
807	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
808	    != 0) {
809		size_t usize, copysize;
810
811		/*
812		 * Existing object alignment is inadequate; allocate new space
813		 * and copy.
814		 */
815		if (no_move)
816			return (NULL);
817		usize = sa2u(size + extra, alignment);
818		if (usize == 0)
819			return (NULL);
820		ret = ipalloc(usize, alignment, zero);
821		if (ret == NULL) {
822			if (extra == 0)
823				return (NULL);
824			/* Try again, without extra this time. */
825			usize = sa2u(size, alignment);
826			if (usize == 0)
827				return (NULL);
828			ret = ipalloc(usize, alignment, zero);
829			if (ret == NULL)
830				return (NULL);
831		}
832		/*
833		 * Copy at most size bytes (not size+extra), since the caller
834		 * has no expectation that the extra bytes will be reliably
835		 * preserved.
836		 */
837		copysize = (size < oldsize) ? size : oldsize;
838		memcpy(ret, ptr, copysize);
839		iqalloc(ptr);
840		return (ret);
841	}
842
843	if (no_move) {
844		if (size <= arena_maxclass) {
845			return (arena_ralloc_no_move(ptr, oldsize, size,
846			    extra, zero));
847		} else {
848			return (huge_ralloc_no_move(ptr, oldsize, size,
849			    extra));
850		}
851	} else {
852		if (size + extra <= arena_maxclass) {
853			return (arena_ralloc(ptr, oldsize, size, extra,
854			    alignment, zero, true));
855		} else {
856			return (huge_ralloc(ptr, oldsize, size, extra,
857			    alignment, zero));
858		}
859	}
860}
861
862malloc_tsd_externs(thread_allocated, thread_allocated_t)
863malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
864    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
865#endif
866
867#include "jemalloc/internal/prof.h"
868
869#undef JEMALLOC_H_INLINES
870/******************************************************************************/
871