jemalloc_internal.h.in revision 122449b073bcbaa504c4f592ea2d733503c272d2
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/syscall.h>
4#if !defined(SYS_write) && defined(__NR_write)
5#define	SYS_write __NR_write
6#endif
7#include <sys/time.h>
8#include <sys/types.h>
9#include <sys/uio.h>
10
11#include <errno.h>
12#include <limits.h>
13#ifndef SIZE_T_MAX
14#  define SIZE_T_MAX	SIZE_MAX
15#endif
16#include <pthread.h>
17#include <sched.h>
18#include <stdarg.h>
19#include <stdbool.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <stdint.h>
23#include <stddef.h>
24#ifndef offsetof
25#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
26#endif
27#include <inttypes.h>
28#include <string.h>
29#include <strings.h>
30#include <ctype.h>
31#include <unistd.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <math.h>
35
36#define	JEMALLOC_NO_DEMANGLE
37#include "../jemalloc@install_suffix@.h"
38
39#ifdef JEMALLOC_UTRACE
40#include <sys/ktrace.h>
41#endif
42
43#ifdef JEMALLOC_VALGRIND
44#include <valgrind/valgrind.h>
45#include <valgrind/memcheck.h>
46#endif
47
48#include "jemalloc/internal/private_namespace.h"
49
50#ifdef JEMALLOC_CC_SILENCE
51#define	UNUSED JEMALLOC_ATTR(unused)
52#else
53#define	UNUSED
54#endif
55
56static const bool config_debug =
57#ifdef JEMALLOC_DEBUG
58    true
59#else
60    false
61#endif
62    ;
63static const bool config_dss =
64#ifdef JEMALLOC_DSS
65    true
66#else
67    false
68#endif
69    ;
70static const bool config_fill =
71#ifdef JEMALLOC_FILL
72    true
73#else
74    false
75#endif
76    ;
77static const bool config_lazy_lock =
78#ifdef JEMALLOC_LAZY_LOCK
79    true
80#else
81    false
82#endif
83    ;
84static const bool config_prof =
85#ifdef JEMALLOC_PROF
86    true
87#else
88    false
89#endif
90    ;
91static const bool config_prof_libgcc =
92#ifdef JEMALLOC_PROF_LIBGCC
93    true
94#else
95    false
96#endif
97    ;
98static const bool config_prof_libunwind =
99#ifdef JEMALLOC_PROF_LIBUNWIND
100    true
101#else
102    false
103#endif
104    ;
105static const bool config_stats =
106#ifdef JEMALLOC_STATS
107    true
108#else
109    false
110#endif
111    ;
112static const bool config_tcache =
113#ifdef JEMALLOC_TCACHE
114    true
115#else
116    false
117#endif
118    ;
119static const bool config_tls =
120#ifdef JEMALLOC_TLS
121    true
122#else
123    false
124#endif
125    ;
126static const bool config_utrace =
127#ifdef JEMALLOC_UTRACE
128    true
129#else
130    false
131#endif
132    ;
133static const bool config_valgrind =
134#ifdef JEMALLOC_VALGRIND
135    true
136#else
137    false
138#endif
139    ;
140static const bool config_xmalloc =
141#ifdef JEMALLOC_XMALLOC
142    true
143#else
144    false
145#endif
146    ;
147static const bool config_ivsalloc =
148#ifdef JEMALLOC_IVSALLOC
149    true
150#else
151    false
152#endif
153    ;
154
155#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
156#include <libkern/OSAtomic.h>
157#endif
158
159#ifdef JEMALLOC_ZONE
160#include <mach/mach_error.h>
161#include <mach/mach_init.h>
162#include <mach/vm_map.h>
163#include <malloc/malloc.h>
164#endif
165
166#define	RB_COMPACT
167#include "jemalloc/internal/rb.h"
168#include "jemalloc/internal/qr.h"
169#include "jemalloc/internal/ql.h"
170
171/*
172 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
173 * but there are circular dependencies that cannot be broken without
174 * substantial performance degradation.  In order to reduce the effect on
175 * visual code flow, read the header files in multiple passes, with one of the
176 * following cpp variables defined during each pass:
177 *
178 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
179 *                        types.
180 *   JEMALLOC_H_STRUCTS : Data structures.
181 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
182 *   JEMALLOC_H_INLINES : Inline functions.
183 */
184/******************************************************************************/
185#define JEMALLOC_H_TYPES
186
187#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
188
189#define	ZU(z)	((size_t)z)
190
191#ifndef __DECONST
192#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
193#endif
194
195#ifdef JEMALLOC_DEBUG
196   /* Disable inlining to make debugging easier. */
197#  define JEMALLOC_INLINE
198#  define inline
199#else
200#  define JEMALLOC_ENABLE_INLINE
201#  define JEMALLOC_INLINE static inline
202#endif
203
204/* Smallest size class to support. */
205#define	LG_TINY_MIN		3
206#define	TINY_MIN		(1U << LG_TINY_MIN)
207
208/*
209 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
210 * classes).
211 */
212#ifndef LG_QUANTUM
213#  ifdef __i386__
214#    define LG_QUANTUM		4
215#  endif
216#  ifdef __ia64__
217#    define LG_QUANTUM		4
218#  endif
219#  ifdef __alpha__
220#    define LG_QUANTUM		4
221#  endif
222#  ifdef __sparc64__
223#    define LG_QUANTUM		4
224#  endif
225#  if (defined(__amd64__) || defined(__x86_64__))
226#    define LG_QUANTUM		4
227#  endif
228#  ifdef __arm__
229#    define LG_QUANTUM		3
230#  endif
231#  ifdef __mips__
232#    define LG_QUANTUM		3
233#  endif
234#  ifdef __powerpc__
235#    define LG_QUANTUM		4
236#  endif
237#  ifdef __s390x__
238#    define LG_QUANTUM		4
239#  endif
240#  ifdef __SH4__
241#    define LG_QUANTUM		4
242#  endif
243#  ifdef __tile__
244#    define LG_QUANTUM		4
245#  endif
246#  ifndef LG_QUANTUM
247#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
248#  endif
249#endif
250
251#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
252#define	QUANTUM_MASK		(QUANTUM - 1)
253
254/* Return the smallest quantum multiple that is >= a. */
255#define	QUANTUM_CEILING(a)						\
256	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
257
258#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
259#define	LONG_MASK		(LONG - 1)
260
261/* Return the smallest long multiple that is >= a. */
262#define	LONG_CEILING(a)							\
263	(((a) + LONG_MASK) & ~LONG_MASK)
264
265#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
266#define	PTR_MASK		(SIZEOF_PTR - 1)
267
268/* Return the smallest (void *) multiple that is >= a. */
269#define	PTR_CEILING(a)							\
270	(((a) + PTR_MASK) & ~PTR_MASK)
271
272/*
273 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
274 * In addition, this controls the spacing of cacheline-spaced size classes.
275 */
276#define	LG_CACHELINE		6
277#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
278#define	CACHELINE_MASK		(CACHELINE - 1)
279
280/* Return the smallest cacheline multiple that is >= s. */
281#define	CACHELINE_CEILING(s)						\
282	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
283
284/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
285#ifdef PAGE_MASK
286#  undef PAGE_MASK
287#endif
288#define	LG_PAGE		STATIC_PAGE_SHIFT
289#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
290#define	PAGE_MASK	((size_t)(PAGE - 1))
291
292/* Return the smallest pagesize multiple that is >= s. */
293#define	PAGE_CEILING(s)							\
294	(((s) + PAGE_MASK) & ~PAGE_MASK)
295
296#ifdef JEMALLOC_VALGRIND
297/*
298 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
299 * so that when Valgrind reports errors, there are no extra stack frames
300 * in the backtraces.
301 *
302 * The size that is reported to valgrind must be consistent through a chain of
303 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
304 * jemalloc, so it is critical that all callers of these macros provide usize
305 * rather than request size.  As a result, buffer overflow detection is
306 * technically weakened for the standard API, though it is generally accepted
307 * practice to consider any extra bytes reported by malloc_usable_size() as
308 * usable space.
309 */
310#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
311	if (config_valgrind && opt_valgrind && cond)			\
312		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
313} while (0)
314#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
315    old_rzsize, zero)  do {						\
316	if (config_valgrind && opt_valgrind) {				\
317		size_t rzsize = p2rz(ptr);				\
318									\
319		if (ptr == old_ptr) {					\
320			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
321			    usize, rzsize);				\
322			if (zero && old_usize < usize) {		\
323				VALGRIND_MAKE_MEM_DEFINED(		\
324				    (void *)((uintptr_t)ptr +		\
325				    old_usize), usize - old_usize);	\
326			}						\
327		} else {						\
328			if (old_ptr != NULL) {				\
329				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
330				    old_rzsize);			\
331			}						\
332			if (ptr != NULL) {				\
333				size_t copy_size = (old_usize < usize)	\
334				    ?  old_usize : usize;		\
335				size_t tail_size = usize - copy_size;	\
336				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
337				    rzsize, false);			\
338				if (copy_size > 0) {			\
339					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
340					    copy_size);			\
341				}					\
342				if (zero && tail_size > 0) {		\
343					VALGRIND_MAKE_MEM_DEFINED(	\
344					    (void *)((uintptr_t)ptr +	\
345					    copy_size), tail_size);	\
346				}					\
347			}						\
348		}							\
349	}								\
350} while (0)
351#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
352	if (config_valgrind && opt_valgrind)				\
353		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
354} while (0)
355#else
356#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
357#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
358#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
359#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
360#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
361#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
362#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
363    old_rzsize, zero)
364#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
365#endif
366
367#include "jemalloc/internal/util.h"
368#include "jemalloc/internal/atomic.h"
369#include "jemalloc/internal/prng.h"
370#include "jemalloc/internal/ckh.h"
371#include "jemalloc/internal/size_classes.h"
372#include "jemalloc/internal/stats.h"
373#include "jemalloc/internal/ctl.h"
374#include "jemalloc/internal/mutex.h"
375#include "jemalloc/internal/tsd.h"
376#include "jemalloc/internal/mb.h"
377#include "jemalloc/internal/extent.h"
378#include "jemalloc/internal/arena.h"
379#include "jemalloc/internal/bitmap.h"
380#include "jemalloc/internal/base.h"
381#include "jemalloc/internal/chunk.h"
382#include "jemalloc/internal/huge.h"
383#include "jemalloc/internal/rtree.h"
384#include "jemalloc/internal/tcache.h"
385#include "jemalloc/internal/hash.h"
386#include "jemalloc/internal/quarantine.h"
387#include "jemalloc/internal/prof.h"
388
389#undef JEMALLOC_H_TYPES
390/******************************************************************************/
391#define JEMALLOC_H_STRUCTS
392
393#include "jemalloc/internal/util.h"
394#include "jemalloc/internal/atomic.h"
395#include "jemalloc/internal/prng.h"
396#include "jemalloc/internal/ckh.h"
397#include "jemalloc/internal/size_classes.h"
398#include "jemalloc/internal/stats.h"
399#include "jemalloc/internal/ctl.h"
400#include "jemalloc/internal/mutex.h"
401#include "jemalloc/internal/tsd.h"
402#include "jemalloc/internal/mb.h"
403#include "jemalloc/internal/bitmap.h"
404#include "jemalloc/internal/extent.h"
405#include "jemalloc/internal/arena.h"
406#include "jemalloc/internal/base.h"
407#include "jemalloc/internal/chunk.h"
408#include "jemalloc/internal/huge.h"
409#include "jemalloc/internal/rtree.h"
410#include "jemalloc/internal/tcache.h"
411#include "jemalloc/internal/hash.h"
412#include "jemalloc/internal/quarantine.h"
413#include "jemalloc/internal/prof.h"
414
415typedef struct {
416	uint64_t	allocated;
417	uint64_t	deallocated;
418} thread_allocated_t;
419/*
420 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
421 * argument.
422 */
423#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
424
425#undef JEMALLOC_H_STRUCTS
426/******************************************************************************/
427#define JEMALLOC_H_EXTERNS
428
429extern bool	opt_abort;
430extern bool	opt_junk;
431extern size_t	opt_quarantine;
432extern bool	opt_redzone;
433extern bool	opt_utrace;
434extern bool	opt_valgrind;
435extern bool	opt_xmalloc;
436extern bool	opt_zero;
437extern size_t	opt_narenas;
438
439/* Number of CPUs. */
440extern unsigned		ncpus;
441
442extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
443/*
444 * Arenas that are used to service external requests.  Not all elements of the
445 * arenas array are necessarily used; arenas are created lazily as needed.
446 */
447extern arena_t		**arenas;
448extern unsigned		narenas;
449
450arena_t	*arenas_extend(unsigned ind);
451void	arenas_cleanup(void *arg);
452arena_t	*choose_arena_hard(void);
453void	jemalloc_prefork(void);
454void	jemalloc_postfork_parent(void);
455void	jemalloc_postfork_child(void);
456
457#include "jemalloc/internal/util.h"
458#include "jemalloc/internal/atomic.h"
459#include "jemalloc/internal/prng.h"
460#include "jemalloc/internal/ckh.h"
461#include "jemalloc/internal/size_classes.h"
462#include "jemalloc/internal/stats.h"
463#include "jemalloc/internal/ctl.h"
464#include "jemalloc/internal/mutex.h"
465#include "jemalloc/internal/tsd.h"
466#include "jemalloc/internal/mb.h"
467#include "jemalloc/internal/bitmap.h"
468#include "jemalloc/internal/extent.h"
469#include "jemalloc/internal/arena.h"
470#include "jemalloc/internal/base.h"
471#include "jemalloc/internal/chunk.h"
472#include "jemalloc/internal/huge.h"
473#include "jemalloc/internal/rtree.h"
474#include "jemalloc/internal/tcache.h"
475#include "jemalloc/internal/hash.h"
476#include "jemalloc/internal/quarantine.h"
477#include "jemalloc/internal/prof.h"
478
479#undef JEMALLOC_H_EXTERNS
480/******************************************************************************/
481#define JEMALLOC_H_INLINES
482
483#include "jemalloc/internal/util.h"
484#include "jemalloc/internal/atomic.h"
485#include "jemalloc/internal/prng.h"
486#include "jemalloc/internal/ckh.h"
487#include "jemalloc/internal/size_classes.h"
488#include "jemalloc/internal/stats.h"
489#include "jemalloc/internal/ctl.h"
490#include "jemalloc/internal/mutex.h"
491#include "jemalloc/internal/tsd.h"
492#include "jemalloc/internal/mb.h"
493#include "jemalloc/internal/extent.h"
494#include "jemalloc/internal/base.h"
495#include "jemalloc/internal/chunk.h"
496#include "jemalloc/internal/huge.h"
497
498#ifndef JEMALLOC_ENABLE_INLINE
499malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
500
501size_t	s2u(size_t size);
502size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
503arena_t	*choose_arena(arena_t *arena);
504#endif
505
506#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
507/*
508 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
509 * for allocations.
510 */
511malloc_tsd_externs(arenas, arena_t *)
512malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
513
514/*
515 * Compute usable size that would result from allocating an object with the
516 * specified size.
517 */
518JEMALLOC_INLINE size_t
519s2u(size_t size)
520{
521
522	if (size <= SMALL_MAXCLASS)
523		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
524	if (size <= arena_maxclass)
525		return (PAGE_CEILING(size));
526	return (CHUNK_CEILING(size));
527}
528
529/*
530 * Compute usable size that would result from allocating an object with the
531 * specified size and alignment.
532 */
533JEMALLOC_INLINE size_t
534sa2u(size_t size, size_t alignment, size_t *run_size_p)
535{
536	size_t usize;
537
538	/*
539	 * Round size up to the nearest multiple of alignment.
540	 *
541	 * This done, we can take advantage of the fact that for each small
542	 * size class, every object is aligned at the smallest power of two
543	 * that is non-zero in the base two representation of the size.  For
544	 * example:
545	 *
546	 *   Size |   Base 2 | Minimum alignment
547	 *   -----+----------+------------------
548	 *     96 |  1100000 |  32
549	 *    144 | 10100000 |  32
550	 *    192 | 11000000 |  64
551	 */
552	usize = (size + (alignment - 1)) & (-alignment);
553	/*
554	 * (usize < size) protects against the combination of maximal
555	 * alignment and size greater than maximal alignment.
556	 */
557	if (usize < size) {
558		/* size_t overflow. */
559		return (0);
560	}
561
562	if (usize <= arena_maxclass && alignment <= PAGE) {
563		if (usize <= SMALL_MAXCLASS)
564			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
565		return (PAGE_CEILING(usize));
566	} else {
567		size_t run_size;
568
569		/*
570		 * We can't achieve subpage alignment, so round up alignment
571		 * permanently; it makes later calculations simpler.
572		 */
573		alignment = PAGE_CEILING(alignment);
574		usize = PAGE_CEILING(size);
575		/*
576		 * (usize < size) protects against very large sizes within
577		 * PAGE of SIZE_T_MAX.
578		 *
579		 * (usize + alignment < usize) protects against the
580		 * combination of maximal alignment and usize large enough
581		 * to cause overflow.  This is similar to the first overflow
582		 * check above, but it needs to be repeated due to the new
583		 * usize value, which may now be *equal* to maximal
584		 * alignment, whereas before we only detected overflow if the
585		 * original size was *greater* than maximal alignment.
586		 */
587		if (usize < size || usize + alignment < usize) {
588			/* size_t overflow. */
589			return (0);
590		}
591
592		/*
593		 * Calculate the size of the over-size run that arena_palloc()
594		 * would need to allocate in order to guarantee the alignment.
595		 */
596		if (usize >= alignment)
597			run_size = usize + alignment - PAGE;
598		else {
599			/*
600			 * It is possible that (alignment << 1) will cause
601			 * overflow, but it doesn't matter because we also
602			 * subtract PAGE, which in the case of overflow leaves
603			 * us with a very large run_size.  That causes the
604			 * first conditional below to fail, which means that
605			 * the bogus run_size value never gets used for
606			 * anything important.
607			 */
608			run_size = (alignment << 1) - PAGE;
609		}
610		if (run_size_p != NULL)
611			*run_size_p = run_size;
612
613		if (run_size <= arena_maxclass)
614			return (PAGE_CEILING(usize));
615		return (CHUNK_CEILING(usize));
616	}
617}
618
619/* Choose an arena based on a per-thread value. */
620JEMALLOC_INLINE arena_t *
621choose_arena(arena_t *arena)
622{
623	arena_t *ret;
624
625	if (arena != NULL)
626		return (arena);
627
628	if ((ret = *arenas_tsd_get()) == NULL) {
629		ret = choose_arena_hard();
630		assert(ret != NULL);
631	}
632
633	return (ret);
634}
635#endif
636
637#include "jemalloc/internal/bitmap.h"
638#include "jemalloc/internal/rtree.h"
639#include "jemalloc/internal/tcache.h"
640#include "jemalloc/internal/arena.h"
641#include "jemalloc/internal/hash.h"
642#include "jemalloc/internal/quarantine.h"
643
644#ifndef JEMALLOC_ENABLE_INLINE
645void	*imalloc(size_t size);
646void	*icalloc(size_t size);
647void	*ipalloc(size_t usize, size_t alignment, bool zero);
648size_t	isalloc(const void *ptr, bool demote);
649size_t	ivsalloc(const void *ptr, bool demote);
650size_t	u2rz(size_t usize);
651size_t	p2rz(const void *ptr);
652void	idalloc(void *ptr);
653void	iqalloc(void *ptr);
654void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
655    bool zero, bool no_move);
656malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
657#endif
658
659#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
660JEMALLOC_INLINE void *
661imalloc(size_t size)
662{
663
664	assert(size != 0);
665
666	if (size <= arena_maxclass)
667		return (arena_malloc(NULL, size, false, true));
668	else
669		return (huge_malloc(size, false));
670}
671
672JEMALLOC_INLINE void *
673icalloc(size_t size)
674{
675
676	if (size <= arena_maxclass)
677		return (arena_malloc(NULL, size, true, true));
678	else
679		return (huge_malloc(size, true));
680}
681
682JEMALLOC_INLINE void *
683ipalloc(size_t usize, size_t alignment, bool zero)
684{
685	void *ret;
686
687	assert(usize != 0);
688	assert(usize == sa2u(usize, alignment, NULL));
689
690	if (usize <= arena_maxclass && alignment <= PAGE)
691		ret = arena_malloc(NULL, usize, zero, true);
692	else {
693		size_t run_size JEMALLOC_CC_SILENCE_INIT(0);
694
695		/*
696		 * Ideally we would only ever call sa2u() once per aligned
697		 * allocation request, and the caller of this function has
698		 * already done so once.  However, it's rather burdensome to
699		 * require every caller to pass in run_size, especially given
700		 * that it's only relevant to large allocations.  Therefore,
701		 * just call it again here in order to get run_size.
702		 */
703		sa2u(usize, alignment, &run_size);
704		if (run_size <= arena_maxclass) {
705			ret = arena_palloc(choose_arena(NULL), usize, run_size,
706			    alignment, zero);
707		} else if (alignment <= chunksize)
708			ret = huge_malloc(usize, zero);
709		else
710			ret = huge_palloc(usize, alignment, zero);
711	}
712
713	assert(((uintptr_t)ret & (alignment - 1)) == 0);
714	return (ret);
715}
716
717/*
718 * Typical usage:
719 *   void *ptr = [...]
720 *   size_t sz = isalloc(ptr, config_prof);
721 */
722JEMALLOC_INLINE size_t
723isalloc(const void *ptr, bool demote)
724{
725	size_t ret;
726	arena_chunk_t *chunk;
727
728	assert(ptr != NULL);
729	/* Demotion only makes sense if config_prof is true. */
730	assert(config_prof || demote == false);
731
732	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
733	if (chunk != ptr) {
734		/* Region. */
735		ret = arena_salloc(ptr, demote);
736	} else
737		ret = huge_salloc(ptr);
738
739	return (ret);
740}
741
742JEMALLOC_INLINE size_t
743ivsalloc(const void *ptr, bool demote)
744{
745
746	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
747	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
748		return (0);
749
750	return (isalloc(ptr, demote));
751}
752
753JEMALLOC_INLINE size_t
754u2rz(size_t usize)
755{
756	size_t ret;
757
758	if (usize <= SMALL_MAXCLASS) {
759		size_t binind = SMALL_SIZE2BIN(usize);
760		ret = arena_bin_info[binind].redzone_size;
761	} else
762		ret = 0;
763
764	return (ret);
765}
766
767JEMALLOC_INLINE size_t
768p2rz(const void *ptr)
769{
770	size_t usize = isalloc(ptr, false);
771
772	return (u2rz(usize));
773}
774
775JEMALLOC_INLINE void
776idalloc(void *ptr)
777{
778	arena_chunk_t *chunk;
779
780	assert(ptr != NULL);
781
782	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
783	if (chunk != ptr)
784		arena_dalloc(chunk->arena, chunk, ptr, true);
785	else
786		huge_dalloc(ptr, true);
787}
788
789JEMALLOC_INLINE void
790iqalloc(void *ptr)
791{
792
793	if (config_fill && opt_quarantine)
794		quarantine(ptr);
795	else
796		idalloc(ptr);
797}
798
799JEMALLOC_INLINE void *
800iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
801    bool no_move)
802{
803	void *ret;
804	size_t oldsize;
805
806	assert(ptr != NULL);
807	assert(size != 0);
808
809	oldsize = isalloc(ptr, config_prof);
810
811	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
812	    != 0) {
813		size_t usize, copysize;
814
815		/*
816		 * Existing object alignment is inadequate; allocate new space
817		 * and copy.
818		 */
819		if (no_move)
820			return (NULL);
821		usize = sa2u(size + extra, alignment, NULL);
822		if (usize == 0)
823			return (NULL);
824		ret = ipalloc(usize, alignment, zero);
825		if (ret == NULL) {
826			if (extra == 0)
827				return (NULL);
828			/* Try again, without extra this time. */
829			usize = sa2u(size, alignment, NULL);
830			if (usize == 0)
831				return (NULL);
832			ret = ipalloc(usize, alignment, zero);
833			if (ret == NULL)
834				return (NULL);
835		}
836		/*
837		 * Copy at most size bytes (not size+extra), since the caller
838		 * has no expectation that the extra bytes will be reliably
839		 * preserved.
840		 */
841		copysize = (size < oldsize) ? size : oldsize;
842		memcpy(ret, ptr, copysize);
843		iqalloc(ptr);
844		return (ret);
845	}
846
847	if (no_move) {
848		if (size <= arena_maxclass) {
849			return (arena_ralloc_no_move(ptr, oldsize, size,
850			    extra, zero));
851		} else {
852			return (huge_ralloc_no_move(ptr, oldsize, size,
853			    extra));
854		}
855	} else {
856		if (size + extra <= arena_maxclass) {
857			return (arena_ralloc(ptr, oldsize, size, extra,
858			    alignment, zero, true));
859		} else {
860			return (huge_ralloc(ptr, oldsize, size, extra,
861			    alignment, zero));
862		}
863	}
864}
865
866malloc_tsd_externs(thread_allocated, thread_allocated_t)
867malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
868    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
869#endif
870
871#include "jemalloc/internal/prof.h"
872
873#undef JEMALLOC_H_INLINES
874/******************************************************************************/
875