jemalloc_internal.h.in revision bd87b01999416ec7418ff8bdb504d9b6c009ff68
1#ifndef JEMALLOC_INTERNAL_H
2#define	JEMALLOC_INTERNAL_H
3#include <math.h>
4#ifdef _WIN32
5#  include <windows.h>
6#  define ENOENT ERROR_PATH_NOT_FOUND
7#  define EINVAL ERROR_BAD_ARGUMENTS
8#  define EAGAIN ERROR_OUTOFMEMORY
9#  define EPERM  ERROR_WRITE_FAULT
10#  define EFAULT ERROR_INVALID_ADDRESS
11#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
12#  undef ERANGE
13#  define ERANGE ERROR_INVALID_DATA
14#else
15#  include <sys/param.h>
16#  include <sys/mman.h>
17#  include <sys/syscall.h>
18#  if !defined(SYS_write) && defined(__NR_write)
19#    define SYS_write __NR_write
20#  endif
21#  include <sys/uio.h>
22#  include <pthread.h>
23#  include <errno.h>
24#endif
25#include <sys/types.h>
26
27#include <limits.h>
28#ifndef SIZE_T_MAX
29#  define SIZE_T_MAX	SIZE_MAX
30#endif
31#include <stdarg.h>
32#include <stdbool.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <stdint.h>
36#include <stddef.h>
37#ifndef offsetof
38#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
39#endif
40#include <inttypes.h>
41#include <string.h>
42#include <strings.h>
43#include <ctype.h>
44#ifdef _MSC_VER
45#  include <io.h>
46typedef intptr_t ssize_t;
47#  define PATH_MAX 1024
48#  define STDERR_FILENO 2
49#  define __func__ __FUNCTION__
50/* Disable warnings about deprecated system functions */
51#  pragma warning(disable: 4996)
52#else
53#  include <unistd.h>
54#endif
55#include <fcntl.h>
56
57#include "jemalloc_internal_defs.h"
58
59#ifdef JEMALLOC_UTRACE
60#include <sys/ktrace.h>
61#endif
62
63#define	JEMALLOC_NO_DEMANGLE
64#ifdef JEMALLOC_JET
65#  define JEMALLOC_N(n) jet_##n
66#  include "jemalloc/internal/public_namespace.h"
67#  define JEMALLOC_NO_RENAME
68#  include "../jemalloc@install_suffix@.h"
69#  undef JEMALLOC_NO_RENAME
70#else
71#  define JEMALLOC_N(n) @private_namespace@##n
72#  include "../jemalloc@install_suffix@.h"
73#endif
74#include "jemalloc/internal/private_namespace.h"
75
76static const bool config_debug =
77#ifdef JEMALLOC_DEBUG
78    true
79#else
80    false
81#endif
82    ;
83static const bool have_dss =
84#ifdef JEMALLOC_DSS
85    true
86#else
87    false
88#endif
89    ;
90static const bool config_fill =
91#ifdef JEMALLOC_FILL
92    true
93#else
94    false
95#endif
96    ;
97static const bool config_lazy_lock =
98#ifdef JEMALLOC_LAZY_LOCK
99    true
100#else
101    false
102#endif
103    ;
104static const bool config_prof =
105#ifdef JEMALLOC_PROF
106    true
107#else
108    false
109#endif
110    ;
111static const bool config_prof_libgcc =
112#ifdef JEMALLOC_PROF_LIBGCC
113    true
114#else
115    false
116#endif
117    ;
118static const bool config_prof_libunwind =
119#ifdef JEMALLOC_PROF_LIBUNWIND
120    true
121#else
122    false
123#endif
124    ;
125static const bool config_mremap =
126#ifdef JEMALLOC_MREMAP
127    true
128#else
129    false
130#endif
131    ;
132static const bool config_munmap =
133#ifdef JEMALLOC_MUNMAP
134    true
135#else
136    false
137#endif
138    ;
139static const bool config_stats =
140#ifdef JEMALLOC_STATS
141    true
142#else
143    false
144#endif
145    ;
146static const bool config_tcache =
147#ifdef JEMALLOC_TCACHE
148    true
149#else
150    false
151#endif
152    ;
153static const bool config_tls =
154#ifdef JEMALLOC_TLS
155    true
156#else
157    false
158#endif
159    ;
160static const bool config_utrace =
161#ifdef JEMALLOC_UTRACE
162    true
163#else
164    false
165#endif
166    ;
167static const bool config_valgrind =
168#ifdef JEMALLOC_VALGRIND
169    true
170#else
171    false
172#endif
173    ;
174static const bool config_xmalloc =
175#ifdef JEMALLOC_XMALLOC
176    true
177#else
178    false
179#endif
180    ;
181static const bool config_ivsalloc =
182#ifdef JEMALLOC_IVSALLOC
183    true
184#else
185    false
186#endif
187    ;
188
189#ifdef JEMALLOC_ATOMIC9
190#include <machine/atomic.h>
191#endif
192
193#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
194#include <libkern/OSAtomic.h>
195#endif
196
197#ifdef JEMALLOC_ZONE
198#include <mach/mach_error.h>
199#include <mach/mach_init.h>
200#include <mach/vm_map.h>
201#include <malloc/malloc.h>
202#endif
203
204#define	RB_COMPACT
205#include "jemalloc/internal/rb.h"
206#include "jemalloc/internal/qr.h"
207#include "jemalloc/internal/ql.h"
208
209/*
210 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
211 * but there are circular dependencies that cannot be broken without
212 * substantial performance degradation.  In order to reduce the effect on
213 * visual code flow, read the header files in multiple passes, with one of the
214 * following cpp variables defined during each pass:
215 *
216 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
217 *                        types.
218 *   JEMALLOC_H_STRUCTS : Data structures.
219 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
220 *   JEMALLOC_H_INLINES : Inline functions.
221 */
222/******************************************************************************/
223#define	JEMALLOC_H_TYPES
224
225#include "jemalloc/internal/jemalloc_internal_macros.h"
226
227#define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
228
229/* Smallest size class to support. */
230#define	LG_TINY_MIN		3
231#define	TINY_MIN		(1U << LG_TINY_MIN)
232
233/*
234 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
235 * classes).
236 */
237#ifndef LG_QUANTUM
238#  if (defined(__i386__) || defined(_M_IX86))
239#    define LG_QUANTUM		4
240#  endif
241#  ifdef __ia64__
242#    define LG_QUANTUM		4
243#  endif
244#  ifdef __alpha__
245#    define LG_QUANTUM		4
246#  endif
247#  ifdef __sparc64__
248#    define LG_QUANTUM		4
249#  endif
250#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
251#    define LG_QUANTUM		4
252#  endif
253#  ifdef __arm__
254#    define LG_QUANTUM		3
255#  endif
256#  ifdef __aarch64__
257#    define LG_QUANTUM		4
258#  endif
259#  ifdef __hppa__
260#    define LG_QUANTUM		4
261#  endif
262#  ifdef __mips__
263#    define LG_QUANTUM		3
264#  endif
265#  ifdef __powerpc__
266#    define LG_QUANTUM		4
267#  endif
268#  ifdef __s390__
269#    define LG_QUANTUM		4
270#  endif
271#  ifdef __SH4__
272#    define LG_QUANTUM		4
273#  endif
274#  ifdef __tile__
275#    define LG_QUANTUM		4
276#  endif
277#  ifndef LG_QUANTUM
278#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
279#  endif
280#endif
281
282#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
283#define	QUANTUM_MASK		(QUANTUM - 1)
284
285/* Return the smallest quantum multiple that is >= a. */
286#define	QUANTUM_CEILING(a)						\
287	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
288
289#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
290#define	LONG_MASK		(LONG - 1)
291
292/* Return the smallest long multiple that is >= a. */
293#define	LONG_CEILING(a)							\
294	(((a) + LONG_MASK) & ~LONG_MASK)
295
296#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
297#define	PTR_MASK		(SIZEOF_PTR - 1)
298
299/* Return the smallest (void *) multiple that is >= a. */
300#define	PTR_CEILING(a)							\
301	(((a) + PTR_MASK) & ~PTR_MASK)
302
303/*
304 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
305 * In addition, this controls the spacing of cacheline-spaced size classes.
306 *
307 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
308 * only handle raw constants.
309 */
310#define	LG_CACHELINE		6
311#define	CACHELINE		64
312#define	CACHELINE_MASK		(CACHELINE - 1)
313
314/* Return the smallest cacheline multiple that is >= s. */
315#define	CACHELINE_CEILING(s)						\
316	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
317
318/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
319#ifdef PAGE_MASK
320#  undef PAGE_MASK
321#endif
322#define	LG_PAGE		STATIC_PAGE_SHIFT
323#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
324#define	PAGE_MASK	((size_t)(PAGE - 1))
325
326/* Return the smallest pagesize multiple that is >= s. */
327#define	PAGE_CEILING(s)							\
328	(((s) + PAGE_MASK) & ~PAGE_MASK)
329
330/* Return the nearest aligned address at or below a. */
331#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
332	((void *)((uintptr_t)(a) & (-(alignment))))
333
334/* Return the offset between a and the nearest aligned address at or below a. */
335#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
336	((size_t)((uintptr_t)(a) & (alignment - 1)))
337
338/* Return the smallest alignment multiple that is >= s. */
339#define	ALIGNMENT_CEILING(s, alignment)					\
340	(((s) + (alignment - 1)) & (-(alignment)))
341
342/* Declare a variable length array */
343#if __STDC_VERSION__ < 199901L
344#  ifdef _MSC_VER
345#    include <malloc.h>
346#    define alloca _alloca
347#  else
348#    ifdef JEMALLOC_HAS_ALLOCA_H
349#      include <alloca.h>
350#    else
351#      include <stdlib.h>
352#    endif
353#  endif
354#  define VARIABLE_ARRAY(type, name, count) \
355	type *name = alloca(sizeof(type) * count)
356#else
357#  define VARIABLE_ARRAY(type, name, count) type name[count]
358#endif
359
360#include "jemalloc/internal/valgrind.h"
361#include "jemalloc/internal/util.h"
362#include "jemalloc/internal/atomic.h"
363#include "jemalloc/internal/prng.h"
364#include "jemalloc/internal/ckh.h"
365#include "jemalloc/internal/size_classes.h"
366#include "jemalloc/internal/stats.h"
367#include "jemalloc/internal/ctl.h"
368#include "jemalloc/internal/mutex.h"
369#include "jemalloc/internal/tsd.h"
370#include "jemalloc/internal/mb.h"
371#include "jemalloc/internal/extent.h"
372#include "jemalloc/internal/arena.h"
373#include "jemalloc/internal/bitmap.h"
374#include "jemalloc/internal/base.h"
375#include "jemalloc/internal/chunk.h"
376#include "jemalloc/internal/huge.h"
377#include "jemalloc/internal/rtree.h"
378#include "jemalloc/internal/tcache.h"
379#include "jemalloc/internal/hash.h"
380#include "jemalloc/internal/quarantine.h"
381#include "jemalloc/internal/prof.h"
382
383#undef JEMALLOC_H_TYPES
384/******************************************************************************/
385#define	JEMALLOC_H_STRUCTS
386
387#include "jemalloc/internal/valgrind.h"
388#include "jemalloc/internal/util.h"
389#include "jemalloc/internal/atomic.h"
390#include "jemalloc/internal/prng.h"
391#include "jemalloc/internal/ckh.h"
392#include "jemalloc/internal/size_classes.h"
393#include "jemalloc/internal/stats.h"
394#include "jemalloc/internal/ctl.h"
395#include "jemalloc/internal/mutex.h"
396#include "jemalloc/internal/tsd.h"
397#include "jemalloc/internal/mb.h"
398#include "jemalloc/internal/bitmap.h"
399#include "jemalloc/internal/extent.h"
400#include "jemalloc/internal/arena.h"
401#include "jemalloc/internal/base.h"
402#include "jemalloc/internal/chunk.h"
403#include "jemalloc/internal/huge.h"
404#include "jemalloc/internal/rtree.h"
405#include "jemalloc/internal/tcache.h"
406#include "jemalloc/internal/hash.h"
407#include "jemalloc/internal/quarantine.h"
408#include "jemalloc/internal/prof.h"
409
410typedef struct {
411	uint64_t	allocated;
412	uint64_t	deallocated;
413} thread_allocated_t;
414/*
415 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
416 * argument.
417 */
418#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_ARG_CONCAT({0, 0})
419
420#undef JEMALLOC_H_STRUCTS
421/******************************************************************************/
422#define	JEMALLOC_H_EXTERNS
423
424extern bool	opt_abort;
425extern bool	opt_junk;
426extern size_t	opt_quarantine;
427extern bool	opt_redzone;
428extern bool	opt_utrace;
429extern bool	opt_xmalloc;
430extern bool	opt_zero;
431extern size_t	opt_narenas;
432
433extern bool	in_valgrind;
434
435/* Number of CPUs. */
436extern unsigned		ncpus;
437
438/* Protects arenas initialization (arenas, arenas_total). */
439extern malloc_mutex_t	arenas_lock;
440/*
441 * Arenas that are used to service external requests.  Not all elements of the
442 * arenas array are necessarily used; arenas are created lazily as needed.
443 *
444 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
445 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
446 * takes some action to create them and allocate from them.
447 */
448extern arena_t		**arenas;
449extern unsigned		narenas_total;
450extern unsigned		narenas_auto; /* Read-only after initialization. */
451
452arena_t	*arenas_extend(unsigned ind);
453void	arenas_cleanup(void *arg);
454arena_t	*choose_arena_hard(void);
455void	jemalloc_prefork(void);
456void	jemalloc_postfork_parent(void);
457void	jemalloc_postfork_child(void);
458
459#include "jemalloc/internal/valgrind.h"
460#include "jemalloc/internal/util.h"
461#include "jemalloc/internal/atomic.h"
462#include "jemalloc/internal/prng.h"
463#include "jemalloc/internal/ckh.h"
464#include "jemalloc/internal/size_classes.h"
465#include "jemalloc/internal/stats.h"
466#include "jemalloc/internal/ctl.h"
467#include "jemalloc/internal/mutex.h"
468#include "jemalloc/internal/tsd.h"
469#include "jemalloc/internal/mb.h"
470#include "jemalloc/internal/bitmap.h"
471#include "jemalloc/internal/extent.h"
472#include "jemalloc/internal/arena.h"
473#include "jemalloc/internal/base.h"
474#include "jemalloc/internal/chunk.h"
475#include "jemalloc/internal/huge.h"
476#include "jemalloc/internal/rtree.h"
477#include "jemalloc/internal/tcache.h"
478#include "jemalloc/internal/hash.h"
479#include "jemalloc/internal/quarantine.h"
480#include "jemalloc/internal/prof.h"
481
482#undef JEMALLOC_H_EXTERNS
483/******************************************************************************/
484#define	JEMALLOC_H_INLINES
485
486#include "jemalloc/internal/valgrind.h"
487#include "jemalloc/internal/util.h"
488#include "jemalloc/internal/atomic.h"
489#include "jemalloc/internal/prng.h"
490#include "jemalloc/internal/ckh.h"
491#include "jemalloc/internal/size_classes.h"
492#include "jemalloc/internal/stats.h"
493#include "jemalloc/internal/ctl.h"
494#include "jemalloc/internal/mutex.h"
495#include "jemalloc/internal/tsd.h"
496#include "jemalloc/internal/mb.h"
497#include "jemalloc/internal/extent.h"
498#include "jemalloc/internal/base.h"
499#include "jemalloc/internal/chunk.h"
500#include "jemalloc/internal/huge.h"
501
502#ifndef JEMALLOC_ENABLE_INLINE
503malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
504
505size_t	s2u(size_t size);
506size_t	sa2u(size_t size, size_t alignment);
507unsigned	narenas_total_get(void);
508arena_t	*choose_arena(arena_t *arena);
509#endif
510
511#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
512/*
513 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
514 * for allocations.
515 */
516malloc_tsd_externs(arenas, arena_t *)
517malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
518    arenas_cleanup)
519
520/*
521 * Compute usable size that would result from allocating an object with the
522 * specified size.
523 */
524JEMALLOC_ALWAYS_INLINE size_t
525s2u(size_t size)
526{
527
528	if (size <= SMALL_MAXCLASS)
529		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
530	if (size <= arena_maxclass)
531		return (PAGE_CEILING(size));
532	return (CHUNK_CEILING(size));
533}
534
535/*
536 * Compute usable size that would result from allocating an object with the
537 * specified size and alignment.
538 */
539JEMALLOC_ALWAYS_INLINE size_t
540sa2u(size_t size, size_t alignment)
541{
542	size_t usize;
543
544	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
545
546	/*
547	 * Round size up to the nearest multiple of alignment.
548	 *
549	 * This done, we can take advantage of the fact that for each small
550	 * size class, every object is aligned at the smallest power of two
551	 * that is non-zero in the base two representation of the size.  For
552	 * example:
553	 *
554	 *   Size |   Base 2 | Minimum alignment
555	 *   -----+----------+------------------
556	 *     96 |  1100000 |  32
557	 *    144 | 10100000 |  32
558	 *    192 | 11000000 |  64
559	 */
560	usize = ALIGNMENT_CEILING(size, alignment);
561	/*
562	 * (usize < size) protects against the combination of maximal
563	 * alignment and size greater than maximal alignment.
564	 */
565	if (usize < size) {
566		/* size_t overflow. */
567		return (0);
568	}
569
570	if (usize <= arena_maxclass && alignment <= PAGE) {
571		if (usize <= SMALL_MAXCLASS)
572			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
573		return (PAGE_CEILING(usize));
574	} else {
575		size_t run_size;
576
577		/*
578		 * We can't achieve subpage alignment, so round up alignment
579		 * permanently; it makes later calculations simpler.
580		 */
581		alignment = PAGE_CEILING(alignment);
582		usize = PAGE_CEILING(size);
583		/*
584		 * (usize < size) protects against very large sizes within
585		 * PAGE of SIZE_T_MAX.
586		 *
587		 * (usize + alignment < usize) protects against the
588		 * combination of maximal alignment and usize large enough
589		 * to cause overflow.  This is similar to the first overflow
590		 * check above, but it needs to be repeated due to the new
591		 * usize value, which may now be *equal* to maximal
592		 * alignment, whereas before we only detected overflow if the
593		 * original size was *greater* than maximal alignment.
594		 */
595		if (usize < size || usize + alignment < usize) {
596			/* size_t overflow. */
597			return (0);
598		}
599
600		/*
601		 * Calculate the size of the over-size run that arena_palloc()
602		 * would need to allocate in order to guarantee the alignment.
603		 * If the run wouldn't fit within a chunk, round up to a huge
604		 * allocation size.
605		 */
606		run_size = usize + alignment - PAGE;
607		if (run_size <= arena_maxclass)
608			return (PAGE_CEILING(usize));
609		return (CHUNK_CEILING(usize));
610	}
611}
612
613JEMALLOC_INLINE unsigned
614narenas_total_get(void)
615{
616	unsigned narenas;
617
618	malloc_mutex_lock(&arenas_lock);
619	narenas = narenas_total;
620	malloc_mutex_unlock(&arenas_lock);
621
622	return (narenas);
623}
624
625/* Choose an arena based on a per-thread value. */
626JEMALLOC_INLINE arena_t *
627choose_arena(arena_t *arena)
628{
629	arena_t *ret;
630
631	if (arena != NULL)
632		return (arena);
633
634	if ((ret = *arenas_tsd_get()) == NULL) {
635		ret = choose_arena_hard();
636		assert(ret != NULL);
637	}
638
639	return (ret);
640}
641#endif
642
643#include "jemalloc/internal/bitmap.h"
644#include "jemalloc/internal/rtree.h"
645/*
646 * Include arena.h twice in order to resolve circular dependencies with
647 * tcache.h.
648 */
649#define	JEMALLOC_ARENA_INLINE_A
650#include "jemalloc/internal/arena.h"
651#undef JEMALLOC_ARENA_INLINE_A
652#include "jemalloc/internal/tcache.h"
653#define	JEMALLOC_ARENA_INLINE_B
654#include "jemalloc/internal/arena.h"
655#undef JEMALLOC_ARENA_INLINE_B
656#include "jemalloc/internal/hash.h"
657#include "jemalloc/internal/quarantine.h"
658
659#ifndef JEMALLOC_ENABLE_INLINE
660void	*imalloct(size_t size, bool try_tcache, arena_t *arena);
661void	*imalloc(size_t size);
662void	*icalloct(size_t size, bool try_tcache, arena_t *arena);
663void	*icalloc(size_t size);
664void	*ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
665    arena_t *arena);
666void	*ipalloc(size_t usize, size_t alignment, bool zero);
667size_t	isalloc(const void *ptr, bool demote);
668size_t	ivsalloc(const void *ptr, bool demote);
669size_t	u2rz(size_t usize);
670size_t	p2rz(const void *ptr);
671void	idalloct(void *ptr, bool try_tcache);
672void	idalloc(void *ptr);
673void	iqalloct(void *ptr, bool try_tcache);
674void	iqalloc(void *ptr);
675void	*iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
676    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
677    arena_t *arena);
678void	*iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
679    bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
680void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
681    bool zero);
682bool	ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
683    bool zero);
684malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
685#endif
686
687#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
688JEMALLOC_ALWAYS_INLINE void *
689imalloct(size_t size, bool try_tcache, arena_t *arena)
690{
691
692	assert(size != 0);
693
694	if (size <= arena_maxclass)
695		return (arena_malloc(arena, size, false, try_tcache));
696	else
697		return (huge_malloc(size, false, huge_dss_prec_get(arena)));
698}
699
700JEMALLOC_ALWAYS_INLINE void *
701imalloc(size_t size)
702{
703
704	return (imalloct(size, true, NULL));
705}
706
707JEMALLOC_ALWAYS_INLINE void *
708icalloct(size_t size, bool try_tcache, arena_t *arena)
709{
710
711	if (size <= arena_maxclass)
712		return (arena_malloc(arena, size, true, try_tcache));
713	else
714		return (huge_malloc(size, true, huge_dss_prec_get(arena)));
715}
716
717JEMALLOC_ALWAYS_INLINE void *
718icalloc(size_t size)
719{
720
721	return (icalloct(size, true, NULL));
722}
723
724JEMALLOC_ALWAYS_INLINE void *
725ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
726    arena_t *arena)
727{
728	void *ret;
729
730	assert(usize != 0);
731	assert(usize == sa2u(usize, alignment));
732
733	if (usize <= arena_maxclass && alignment <= PAGE)
734		ret = arena_malloc(arena, usize, zero, try_tcache);
735	else {
736		if (usize <= arena_maxclass) {
737			ret = arena_palloc(choose_arena(arena), usize,
738			    alignment, zero);
739		} else if (alignment <= chunksize)
740			ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
741		else
742			ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
743	}
744
745	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
746	return (ret);
747}
748
749JEMALLOC_ALWAYS_INLINE void *
750ipalloc(size_t usize, size_t alignment, bool zero)
751{
752
753	return (ipalloct(usize, alignment, zero, true, NULL));
754}
755
756/*
757 * Typical usage:
758 *   void *ptr = [...]
759 *   size_t sz = isalloc(ptr, config_prof);
760 */
761JEMALLOC_ALWAYS_INLINE size_t
762isalloc(const void *ptr, bool demote)
763{
764	size_t ret;
765	arena_chunk_t *chunk;
766
767	assert(ptr != NULL);
768	/* Demotion only makes sense if config_prof is true. */
769	assert(config_prof || demote == false);
770
771	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
772	if (chunk != ptr)
773		ret = arena_salloc(ptr, demote);
774	else
775		ret = huge_salloc(ptr);
776
777	return (ret);
778}
779
780JEMALLOC_ALWAYS_INLINE size_t
781ivsalloc(const void *ptr, bool demote)
782{
783
784	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
785	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
786		return (0);
787
788	return (isalloc(ptr, demote));
789}
790
791JEMALLOC_INLINE size_t
792u2rz(size_t usize)
793{
794	size_t ret;
795
796	if (usize <= SMALL_MAXCLASS) {
797		size_t binind = SMALL_SIZE2BIN(usize);
798		ret = arena_bin_info[binind].redzone_size;
799	} else
800		ret = 0;
801
802	return (ret);
803}
804
805JEMALLOC_INLINE size_t
806p2rz(const void *ptr)
807{
808	size_t usize = isalloc(ptr, false);
809
810	return (u2rz(usize));
811}
812
813JEMALLOC_ALWAYS_INLINE void
814idalloct(void *ptr, bool try_tcache)
815{
816	arena_chunk_t *chunk;
817
818	assert(ptr != NULL);
819
820	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
821	if (chunk != ptr)
822		arena_dalloc(chunk, ptr, try_tcache);
823	else
824		huge_dalloc(ptr, true);
825}
826
827JEMALLOC_ALWAYS_INLINE void
828idalloc(void *ptr)
829{
830
831	idalloct(ptr, true);
832}
833
834JEMALLOC_ALWAYS_INLINE void
835iqalloct(void *ptr, bool try_tcache)
836{
837
838	if (config_fill && opt_quarantine)
839		quarantine(ptr);
840	else
841		idalloct(ptr, try_tcache);
842}
843
844JEMALLOC_ALWAYS_INLINE void
845iqalloc(void *ptr)
846{
847
848	iqalloct(ptr, true);
849}
850
851JEMALLOC_ALWAYS_INLINE void *
852iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
853    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
854    arena_t *arena)
855{
856	void *p;
857	size_t usize, copysize;
858
859	usize = sa2u(size + extra, alignment);
860	if (usize == 0)
861		return (NULL);
862	p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
863	if (p == NULL) {
864		if (extra == 0)
865			return (NULL);
866		/* Try again, without extra this time. */
867		usize = sa2u(size, alignment);
868		if (usize == 0)
869			return (NULL);
870		p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
871		if (p == NULL)
872			return (NULL);
873	}
874	/*
875	 * Copy at most size bytes (not size+extra), since the caller has no
876	 * expectation that the extra bytes will be reliably preserved.
877	 */
878	copysize = (size < oldsize) ? size : oldsize;
879	memcpy(p, ptr, copysize);
880	iqalloct(ptr, try_tcache_dalloc);
881	return (p);
882}
883
884JEMALLOC_ALWAYS_INLINE void *
885iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
886    bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
887{
888	size_t oldsize;
889
890	assert(ptr != NULL);
891	assert(size != 0);
892
893	oldsize = isalloc(ptr, config_prof);
894
895	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
896	    != 0) {
897		/*
898		 * Existing object alignment is inadequate; allocate new space
899		 * and copy.
900		 */
901		return (iralloct_realign(ptr, oldsize, size, extra, alignment,
902		    zero, try_tcache_alloc, try_tcache_dalloc, arena));
903	}
904
905	if (size + extra <= arena_maxclass) {
906		return (arena_ralloc(arena, ptr, oldsize, size, extra,
907		    alignment, zero, try_tcache_alloc,
908		    try_tcache_dalloc));
909	} else {
910		return (huge_ralloc(ptr, oldsize, size, extra,
911		    alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
912	}
913}
914
915JEMALLOC_ALWAYS_INLINE void *
916iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
917{
918
919	return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
920}
921
922JEMALLOC_ALWAYS_INLINE bool
923ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
924{
925	size_t oldsize;
926
927	assert(ptr != NULL);
928	assert(size != 0);
929
930	oldsize = isalloc(ptr, config_prof);
931	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
932	    != 0) {
933		/* Existing object alignment is inadequate. */
934		return (true);
935	}
936
937	if (size <= arena_maxclass)
938		return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
939	else
940		return (huge_ralloc_no_move(ptr, oldsize, size, extra));
941}
942
943malloc_tsd_externs(thread_allocated, thread_allocated_t)
944malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
945    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
946#endif
947
948#include "jemalloc/internal/prof.h"
949
950#undef JEMALLOC_H_INLINES
951/******************************************************************************/
952#endif /* JEMALLOC_INTERNAL_H */
953