jemalloc_internal.h.in revision a14bce85e885f83c96116cc5438ae52d740f3727
1#ifndef JEMALLOC_INTERNAL_H
2#define JEMALLOC_INTERNAL_H
3#include <sys/param.h>
4#include <math.h>
5#ifdef _WIN32
6#  include <windows.h>
7#  define ENOENT ERROR_PATH_NOT_FOUND
8#  define EINVAL ERROR_BAD_ARGUMENTS
9#  define EAGAIN ERROR_OUTOFMEMORY
10#  define EPERM  ERROR_WRITE_FAULT
11#  define EFAULT ERROR_INVALID_ADDRESS
12#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
13#  undef ERANGE
14#  define ERANGE ERROR_INVALID_DATA
15#else
16#  include <sys/mman.h>
17#  include <sys/syscall.h>
18#  if !defined(SYS_write) && defined(__NR_write)
19#    define SYS_write __NR_write
20#  endif
21#  include <sys/uio.h>
22#  include <pthread.h>
23#  include <errno.h>
24#endif
25#include <sys/types.h>
26
27#include <limits.h>
28#ifndef SIZE_T_MAX
29#  define SIZE_T_MAX	SIZE_MAX
30#endif
31#include <stdarg.h>
32#include <stdbool.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <stdint.h>
36#include <stddef.h>
37#ifndef offsetof
38#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
39#endif
40#include <inttypes.h>
41#include <string.h>
42#include <strings.h>
43#include <ctype.h>
44#include <unistd.h>
45#include <fcntl.h>
46
47#define	JEMALLOC_NO_DEMANGLE
48#include "../jemalloc@install_suffix@.h"
49
50#ifdef JEMALLOC_UTRACE
51#include <sys/ktrace.h>
52#endif
53
54#ifdef JEMALLOC_VALGRIND
55#include <valgrind/valgrind.h>
56#include <valgrind/memcheck.h>
57#endif
58
59#include "jemalloc/internal/private_namespace.h"
60
61#ifdef JEMALLOC_CC_SILENCE
62#define	UNUSED JEMALLOC_ATTR(unused)
63#else
64#define	UNUSED
65#endif
66
67static const bool config_debug =
68#ifdef JEMALLOC_DEBUG
69    true
70#else
71    false
72#endif
73    ;
74static const bool config_dss =
75#ifdef JEMALLOC_DSS
76    true
77#else
78    false
79#endif
80    ;
81static const bool config_fill =
82#ifdef JEMALLOC_FILL
83    true
84#else
85    false
86#endif
87    ;
88static const bool config_lazy_lock =
89#ifdef JEMALLOC_LAZY_LOCK
90    true
91#else
92    false
93#endif
94    ;
95static const bool config_prof =
96#ifdef JEMALLOC_PROF
97    true
98#else
99    false
100#endif
101    ;
102static const bool config_prof_libgcc =
103#ifdef JEMALLOC_PROF_LIBGCC
104    true
105#else
106    false
107#endif
108    ;
109static const bool config_prof_libunwind =
110#ifdef JEMALLOC_PROF_LIBUNWIND
111    true
112#else
113    false
114#endif
115    ;
116static const bool config_munmap =
117#ifdef JEMALLOC_MUNMAP
118    true
119#else
120    false
121#endif
122    ;
123static const bool config_stats =
124#ifdef JEMALLOC_STATS
125    true
126#else
127    false
128#endif
129    ;
130static const bool config_tcache =
131#ifdef JEMALLOC_TCACHE
132    true
133#else
134    false
135#endif
136    ;
137static const bool config_tls =
138#ifdef JEMALLOC_TLS
139    true
140#else
141    false
142#endif
143    ;
144static const bool config_utrace =
145#ifdef JEMALLOC_UTRACE
146    true
147#else
148    false
149#endif
150    ;
151static const bool config_valgrind =
152#ifdef JEMALLOC_VALGRIND
153    true
154#else
155    false
156#endif
157    ;
158static const bool config_xmalloc =
159#ifdef JEMALLOC_XMALLOC
160    true
161#else
162    false
163#endif
164    ;
165static const bool config_ivsalloc =
166#ifdef JEMALLOC_IVSALLOC
167    true
168#else
169    false
170#endif
171    ;
172
173#ifdef JEMALLOC_ATOMIC9
174#include <machine/atomic.h>
175#endif
176
177#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
178#include <libkern/OSAtomic.h>
179#endif
180
181#ifdef JEMALLOC_ZONE
182#include <mach/mach_error.h>
183#include <mach/mach_init.h>
184#include <mach/vm_map.h>
185#include <malloc/malloc.h>
186#endif
187
188#define	RB_COMPACT
189#include "jemalloc/internal/rb.h"
190#include "jemalloc/internal/qr.h"
191#include "jemalloc/internal/ql.h"
192
193/*
194 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
195 * but there are circular dependencies that cannot be broken without
196 * substantial performance degradation.  In order to reduce the effect on
197 * visual code flow, read the header files in multiple passes, with one of the
198 * following cpp variables defined during each pass:
199 *
200 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
201 *                        types.
202 *   JEMALLOC_H_STRUCTS : Data structures.
203 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
204 *   JEMALLOC_H_INLINES : Inline functions.
205 */
206/******************************************************************************/
207#define JEMALLOC_H_TYPES
208
209#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
210
211#define	ZU(z)	((size_t)z)
212
213#ifndef __DECONST
214#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
215#endif
216
217#ifdef JEMALLOC_DEBUG
218   /* Disable inlining to make debugging easier. */
219#  define JEMALLOC_INLINE
220#  define inline
221#else
222#  define JEMALLOC_ENABLE_INLINE
223#  define JEMALLOC_INLINE static inline
224#endif
225
226/* Smallest size class to support. */
227#define	LG_TINY_MIN		3
228#define	TINY_MIN		(1U << LG_TINY_MIN)
229
230/*
231 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
232 * classes).
233 */
234#ifndef LG_QUANTUM
235#  ifdef __i386__
236#    define LG_QUANTUM		4
237#  endif
238#  ifdef __ia64__
239#    define LG_QUANTUM		4
240#  endif
241#  ifdef __alpha__
242#    define LG_QUANTUM		4
243#  endif
244#  ifdef __sparc64__
245#    define LG_QUANTUM		4
246#  endif
247#  if (defined(__amd64__) || defined(__x86_64__))
248#    define LG_QUANTUM		4
249#  endif
250#  ifdef __arm__
251#    define LG_QUANTUM		3
252#  endif
253#  ifdef __mips__
254#    define LG_QUANTUM		3
255#  endif
256#  ifdef __powerpc__
257#    define LG_QUANTUM		4
258#  endif
259#  ifdef __s390x__
260#    define LG_QUANTUM		4
261#  endif
262#  ifdef __SH4__
263#    define LG_QUANTUM		4
264#  endif
265#  ifdef __tile__
266#    define LG_QUANTUM		4
267#  endif
268#  ifndef LG_QUANTUM
269#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
270#  endif
271#endif
272
273#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
274#define	QUANTUM_MASK		(QUANTUM - 1)
275
276/* Return the smallest quantum multiple that is >= a. */
277#define	QUANTUM_CEILING(a)						\
278	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
279
280#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
281#define	LONG_MASK		(LONG - 1)
282
283/* Return the smallest long multiple that is >= a. */
284#define	LONG_CEILING(a)							\
285	(((a) + LONG_MASK) & ~LONG_MASK)
286
287#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
288#define	PTR_MASK		(SIZEOF_PTR - 1)
289
290/* Return the smallest (void *) multiple that is >= a. */
291#define	PTR_CEILING(a)							\
292	(((a) + PTR_MASK) & ~PTR_MASK)
293
294/*
295 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
296 * In addition, this controls the spacing of cacheline-spaced size classes.
297 */
298#define	LG_CACHELINE		6
299#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
300#define	CACHELINE_MASK		(CACHELINE - 1)
301
302/* Return the smallest cacheline multiple that is >= s. */
303#define	CACHELINE_CEILING(s)						\
304	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
305
306/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
307#ifdef PAGE_MASK
308#  undef PAGE_MASK
309#endif
310#define	LG_PAGE		STATIC_PAGE_SHIFT
311#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
312#define	PAGE_MASK	((size_t)(PAGE - 1))
313
314/* Return the smallest pagesize multiple that is >= s. */
315#define	PAGE_CEILING(s)							\
316	(((s) + PAGE_MASK) & ~PAGE_MASK)
317
318/* Return the nearest aligned address at or below a. */
319#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
320	((void *)((uintptr_t)(a) & (-(alignment))))
321
322/* Return the offset between a and the nearest aligned address at or below a. */
323#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
324	((size_t)((uintptr_t)(a) & (alignment - 1)))
325
326/* Return the smallest alignment multiple that is >= s. */
327#define	ALIGNMENT_CEILING(s, alignment)					\
328	(((s) + (alignment - 1)) & (-(alignment)))
329
330/* Declare a variable length array */
331#if __STDC_VERSION__ < 199901L
332#  ifdef _MSC_VER
333#    include <malloc.h>
334#    define alloca _alloca
335#  else
336#    include <alloca.h>
337#  endif
338#  define VARIABLE_ARRAY(type, name, count) \
339	type *name = alloca(sizeof(type) * count)
340#else
341#  define VARIABLE_ARRAY(type, name, count) type name[count]
342#endif
343
344#ifdef JEMALLOC_VALGRIND
345/*
346 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
347 * so that when Valgrind reports errors, there are no extra stack frames
348 * in the backtraces.
349 *
350 * The size that is reported to valgrind must be consistent through a chain of
351 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
352 * jemalloc, so it is critical that all callers of these macros provide usize
353 * rather than request size.  As a result, buffer overflow detection is
354 * technically weakened for the standard API, though it is generally accepted
355 * practice to consider any extra bytes reported by malloc_usable_size() as
356 * usable space.
357 */
358#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
359	if (config_valgrind && opt_valgrind && cond)			\
360		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
361} while (0)
362#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
363    old_rzsize, zero)  do {						\
364	if (config_valgrind && opt_valgrind) {				\
365		size_t rzsize = p2rz(ptr);				\
366									\
367		if (ptr == old_ptr) {					\
368			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
369			    usize, rzsize);				\
370			if (zero && old_usize < usize) {		\
371				VALGRIND_MAKE_MEM_DEFINED(		\
372				    (void *)((uintptr_t)ptr +		\
373				    old_usize), usize - old_usize);	\
374			}						\
375		} else {						\
376			if (old_ptr != NULL) {				\
377				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
378				    old_rzsize);			\
379			}						\
380			if (ptr != NULL) {				\
381				size_t copy_size = (old_usize < usize)	\
382				    ?  old_usize : usize;		\
383				size_t tail_size = usize - copy_size;	\
384				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
385				    rzsize, false);			\
386				if (copy_size > 0) {			\
387					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
388					    copy_size);			\
389				}					\
390				if (zero && tail_size > 0) {		\
391					VALGRIND_MAKE_MEM_DEFINED(	\
392					    (void *)((uintptr_t)ptr +	\
393					    copy_size), tail_size);	\
394				}					\
395			}						\
396		}							\
397	}								\
398} while (0)
399#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
400	if (config_valgrind && opt_valgrind)				\
401		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
402} while (0)
403#else
404#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
405#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
406#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
407#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
408#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
409#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
410#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
411    old_rzsize, zero)
412#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
413#endif
414
415#include "jemalloc/internal/util.h"
416#include "jemalloc/internal/atomic.h"
417#include "jemalloc/internal/prng.h"
418#include "jemalloc/internal/ckh.h"
419#include "jemalloc/internal/size_classes.h"
420#include "jemalloc/internal/stats.h"
421#include "jemalloc/internal/ctl.h"
422#include "jemalloc/internal/mutex.h"
423#include "jemalloc/internal/tsd.h"
424#include "jemalloc/internal/mb.h"
425#include "jemalloc/internal/extent.h"
426#include "jemalloc/internal/arena.h"
427#include "jemalloc/internal/bitmap.h"
428#include "jemalloc/internal/base.h"
429#include "jemalloc/internal/chunk.h"
430#include "jemalloc/internal/huge.h"
431#include "jemalloc/internal/rtree.h"
432#include "jemalloc/internal/tcache.h"
433#include "jemalloc/internal/hash.h"
434#include "jemalloc/internal/quarantine.h"
435#include "jemalloc/internal/prof.h"
436
437#undef JEMALLOC_H_TYPES
438/******************************************************************************/
439#define JEMALLOC_H_STRUCTS
440
441#include "jemalloc/internal/util.h"
442#include "jemalloc/internal/atomic.h"
443#include "jemalloc/internal/prng.h"
444#include "jemalloc/internal/ckh.h"
445#include "jemalloc/internal/size_classes.h"
446#include "jemalloc/internal/stats.h"
447#include "jemalloc/internal/ctl.h"
448#include "jemalloc/internal/mutex.h"
449#include "jemalloc/internal/tsd.h"
450#include "jemalloc/internal/mb.h"
451#include "jemalloc/internal/bitmap.h"
452#include "jemalloc/internal/extent.h"
453#include "jemalloc/internal/arena.h"
454#include "jemalloc/internal/base.h"
455#include "jemalloc/internal/chunk.h"
456#include "jemalloc/internal/huge.h"
457#include "jemalloc/internal/rtree.h"
458#include "jemalloc/internal/tcache.h"
459#include "jemalloc/internal/hash.h"
460#include "jemalloc/internal/quarantine.h"
461#include "jemalloc/internal/prof.h"
462
463typedef struct {
464	uint64_t	allocated;
465	uint64_t	deallocated;
466} thread_allocated_t;
467/*
468 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
469 * argument.
470 */
471#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
472
473#undef JEMALLOC_H_STRUCTS
474/******************************************************************************/
475#define JEMALLOC_H_EXTERNS
476
477extern bool	opt_abort;
478extern bool	opt_junk;
479extern size_t	opt_quarantine;
480extern bool	opt_redzone;
481extern bool	opt_utrace;
482extern bool	opt_valgrind;
483extern bool	opt_xmalloc;
484extern bool	opt_zero;
485extern size_t	opt_narenas;
486
487/* Number of CPUs. */
488extern unsigned		ncpus;
489
490extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
491/*
492 * Arenas that are used to service external requests.  Not all elements of the
493 * arenas array are necessarily used; arenas are created lazily as needed.
494 */
495extern arena_t		**arenas;
496extern unsigned		narenas;
497
498arena_t	*arenas_extend(unsigned ind);
499void	arenas_cleanup(void *arg);
500arena_t	*choose_arena_hard(void);
501void	jemalloc_prefork(void);
502void	jemalloc_postfork_parent(void);
503void	jemalloc_postfork_child(void);
504
505#include "jemalloc/internal/util.h"
506#include "jemalloc/internal/atomic.h"
507#include "jemalloc/internal/prng.h"
508#include "jemalloc/internal/ckh.h"
509#include "jemalloc/internal/size_classes.h"
510#include "jemalloc/internal/stats.h"
511#include "jemalloc/internal/ctl.h"
512#include "jemalloc/internal/mutex.h"
513#include "jemalloc/internal/tsd.h"
514#include "jemalloc/internal/mb.h"
515#include "jemalloc/internal/bitmap.h"
516#include "jemalloc/internal/extent.h"
517#include "jemalloc/internal/arena.h"
518#include "jemalloc/internal/base.h"
519#include "jemalloc/internal/chunk.h"
520#include "jemalloc/internal/huge.h"
521#include "jemalloc/internal/rtree.h"
522#include "jemalloc/internal/tcache.h"
523#include "jemalloc/internal/hash.h"
524#include "jemalloc/internal/quarantine.h"
525#include "jemalloc/internal/prof.h"
526
527#undef JEMALLOC_H_EXTERNS
528/******************************************************************************/
529#define JEMALLOC_H_INLINES
530
531#include "jemalloc/internal/util.h"
532#include "jemalloc/internal/atomic.h"
533#include "jemalloc/internal/prng.h"
534#include "jemalloc/internal/ckh.h"
535#include "jemalloc/internal/size_classes.h"
536#include "jemalloc/internal/stats.h"
537#include "jemalloc/internal/ctl.h"
538#include "jemalloc/internal/mutex.h"
539#include "jemalloc/internal/tsd.h"
540#include "jemalloc/internal/mb.h"
541#include "jemalloc/internal/extent.h"
542#include "jemalloc/internal/base.h"
543#include "jemalloc/internal/chunk.h"
544#include "jemalloc/internal/huge.h"
545
546#ifndef JEMALLOC_ENABLE_INLINE
547malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
548
549size_t	s2u(size_t size);
550size_t	sa2u(size_t size, size_t alignment);
551arena_t	*choose_arena(arena_t *arena);
552#endif
553
554#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
555/*
556 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
557 * for allocations.
558 */
559malloc_tsd_externs(arenas, arena_t *)
560malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
561
562/*
563 * Compute usable size that would result from allocating an object with the
564 * specified size.
565 */
566JEMALLOC_INLINE size_t
567s2u(size_t size)
568{
569
570	if (size <= SMALL_MAXCLASS)
571		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
572	if (size <= arena_maxclass)
573		return (PAGE_CEILING(size));
574	return (CHUNK_CEILING(size));
575}
576
577/*
578 * Compute usable size that would result from allocating an object with the
579 * specified size and alignment.
580 */
581JEMALLOC_INLINE size_t
582sa2u(size_t size, size_t alignment)
583{
584	size_t usize;
585
586	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
587
588	/*
589	 * Round size up to the nearest multiple of alignment.
590	 *
591	 * This done, we can take advantage of the fact that for each small
592	 * size class, every object is aligned at the smallest power of two
593	 * that is non-zero in the base two representation of the size.  For
594	 * example:
595	 *
596	 *   Size |   Base 2 | Minimum alignment
597	 *   -----+----------+------------------
598	 *     96 |  1100000 |  32
599	 *    144 | 10100000 |  32
600	 *    192 | 11000000 |  64
601	 */
602	usize = ALIGNMENT_CEILING(size, alignment);
603	/*
604	 * (usize < size) protects against the combination of maximal
605	 * alignment and size greater than maximal alignment.
606	 */
607	if (usize < size) {
608		/* size_t overflow. */
609		return (0);
610	}
611
612	if (usize <= arena_maxclass && alignment <= PAGE) {
613		if (usize <= SMALL_MAXCLASS)
614			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
615		return (PAGE_CEILING(usize));
616	} else {
617		size_t run_size;
618
619		/*
620		 * We can't achieve subpage alignment, so round up alignment
621		 * permanently; it makes later calculations simpler.
622		 */
623		alignment = PAGE_CEILING(alignment);
624		usize = PAGE_CEILING(size);
625		/*
626		 * (usize < size) protects against very large sizes within
627		 * PAGE of SIZE_T_MAX.
628		 *
629		 * (usize + alignment < usize) protects against the
630		 * combination of maximal alignment and usize large enough
631		 * to cause overflow.  This is similar to the first overflow
632		 * check above, but it needs to be repeated due to the new
633		 * usize value, which may now be *equal* to maximal
634		 * alignment, whereas before we only detected overflow if the
635		 * original size was *greater* than maximal alignment.
636		 */
637		if (usize < size || usize + alignment < usize) {
638			/* size_t overflow. */
639			return (0);
640		}
641
642		/*
643		 * Calculate the size of the over-size run that arena_palloc()
644		 * would need to allocate in order to guarantee the alignment.
645		 * If the run wouldn't fit within a chunk, round up to a huge
646		 * allocation size.
647		 */
648		run_size = usize + alignment - PAGE;
649		if (run_size <= arena_maxclass)
650			return (PAGE_CEILING(usize));
651		return (CHUNK_CEILING(usize));
652	}
653}
654
655/* Choose an arena based on a per-thread value. */
656JEMALLOC_INLINE arena_t *
657choose_arena(arena_t *arena)
658{
659	arena_t *ret;
660
661	if (arena != NULL)
662		return (arena);
663
664	if ((ret = *arenas_tsd_get()) == NULL) {
665		ret = choose_arena_hard();
666		assert(ret != NULL);
667	}
668
669	return (ret);
670}
671#endif
672
673#include "jemalloc/internal/bitmap.h"
674#include "jemalloc/internal/rtree.h"
675#include "jemalloc/internal/tcache.h"
676#include "jemalloc/internal/arena.h"
677#include "jemalloc/internal/hash.h"
678#include "jemalloc/internal/quarantine.h"
679
680#ifndef JEMALLOC_ENABLE_INLINE
681void	*imalloc(size_t size);
682void	*icalloc(size_t size);
683void	*ipalloc(size_t usize, size_t alignment, bool zero);
684size_t	isalloc(const void *ptr, bool demote);
685size_t	ivsalloc(const void *ptr, bool demote);
686size_t	u2rz(size_t usize);
687size_t	p2rz(const void *ptr);
688void	idalloc(void *ptr);
689void	iqalloc(void *ptr);
690void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
691    bool zero, bool no_move);
692malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
693#endif
694
695#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
696JEMALLOC_INLINE void *
697imalloc(size_t size)
698{
699
700	assert(size != 0);
701
702	if (size <= arena_maxclass)
703		return (arena_malloc(NULL, size, false, true));
704	else
705		return (huge_malloc(size, false));
706}
707
708JEMALLOC_INLINE void *
709icalloc(size_t size)
710{
711
712	if (size <= arena_maxclass)
713		return (arena_malloc(NULL, size, true, true));
714	else
715		return (huge_malloc(size, true));
716}
717
718JEMALLOC_INLINE void *
719ipalloc(size_t usize, size_t alignment, bool zero)
720{
721	void *ret;
722
723	assert(usize != 0);
724	assert(usize == sa2u(usize, alignment));
725
726	if (usize <= arena_maxclass && alignment <= PAGE)
727		ret = arena_malloc(NULL, usize, zero, true);
728	else {
729		if (usize <= arena_maxclass) {
730			ret = arena_palloc(choose_arena(NULL), usize, alignment,
731			    zero);
732		} else if (alignment <= chunksize)
733			ret = huge_malloc(usize, zero);
734		else
735			ret = huge_palloc(usize, alignment, zero);
736	}
737
738	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
739	return (ret);
740}
741
742/*
743 * Typical usage:
744 *   void *ptr = [...]
745 *   size_t sz = isalloc(ptr, config_prof);
746 */
747JEMALLOC_INLINE size_t
748isalloc(const void *ptr, bool demote)
749{
750	size_t ret;
751	arena_chunk_t *chunk;
752
753	assert(ptr != NULL);
754	/* Demotion only makes sense if config_prof is true. */
755	assert(config_prof || demote == false);
756
757	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
758	if (chunk != ptr)
759		ret = arena_salloc(ptr, demote);
760	else
761		ret = huge_salloc(ptr);
762
763	return (ret);
764}
765
766JEMALLOC_INLINE size_t
767ivsalloc(const void *ptr, bool demote)
768{
769
770	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
771	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
772		return (0);
773
774	return (isalloc(ptr, demote));
775}
776
777JEMALLOC_INLINE size_t
778u2rz(size_t usize)
779{
780	size_t ret;
781
782	if (usize <= SMALL_MAXCLASS) {
783		size_t binind = SMALL_SIZE2BIN(usize);
784		ret = arena_bin_info[binind].redzone_size;
785	} else
786		ret = 0;
787
788	return (ret);
789}
790
791JEMALLOC_INLINE size_t
792p2rz(const void *ptr)
793{
794	size_t usize = isalloc(ptr, false);
795
796	return (u2rz(usize));
797}
798
799JEMALLOC_INLINE void
800idalloc(void *ptr)
801{
802	arena_chunk_t *chunk;
803
804	assert(ptr != NULL);
805
806	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
807	if (chunk != ptr)
808		arena_dalloc(chunk->arena, chunk, ptr, true);
809	else
810		huge_dalloc(ptr, true);
811}
812
813JEMALLOC_INLINE void
814iqalloc(void *ptr)
815{
816
817	if (config_fill && opt_quarantine)
818		quarantine(ptr);
819	else
820		idalloc(ptr);
821}
822
823JEMALLOC_INLINE void *
824iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
825    bool no_move)
826{
827	void *ret;
828	size_t oldsize;
829
830	assert(ptr != NULL);
831	assert(size != 0);
832
833	oldsize = isalloc(ptr, config_prof);
834
835	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
836	    != 0) {
837		size_t usize, copysize;
838
839		/*
840		 * Existing object alignment is inadequate; allocate new space
841		 * and copy.
842		 */
843		if (no_move)
844			return (NULL);
845		usize = sa2u(size + extra, alignment);
846		if (usize == 0)
847			return (NULL);
848		ret = ipalloc(usize, alignment, zero);
849		if (ret == NULL) {
850			if (extra == 0)
851				return (NULL);
852			/* Try again, without extra this time. */
853			usize = sa2u(size, alignment);
854			if (usize == 0)
855				return (NULL);
856			ret = ipalloc(usize, alignment, zero);
857			if (ret == NULL)
858				return (NULL);
859		}
860		/*
861		 * Copy at most size bytes (not size+extra), since the caller
862		 * has no expectation that the extra bytes will be reliably
863		 * preserved.
864		 */
865		copysize = (size < oldsize) ? size : oldsize;
866		memcpy(ret, ptr, copysize);
867		iqalloc(ptr);
868		return (ret);
869	}
870
871	if (no_move) {
872		if (size <= arena_maxclass) {
873			return (arena_ralloc_no_move(ptr, oldsize, size,
874			    extra, zero));
875		} else {
876			return (huge_ralloc_no_move(ptr, oldsize, size,
877			    extra));
878		}
879	} else {
880		if (size + extra <= arena_maxclass) {
881			return (arena_ralloc(ptr, oldsize, size, extra,
882			    alignment, zero, true));
883		} else {
884			return (huge_ralloc(ptr, oldsize, size, extra,
885			    alignment, zero));
886		}
887	}
888}
889
890malloc_tsd_externs(thread_allocated, thread_allocated_t)
891malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
892    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
893#endif
894
895#include "jemalloc/internal/prof.h"
896
897#undef JEMALLOC_H_INLINES
898/******************************************************************************/
899#endif /* JEMALLOC_INTERNAL_H */
900