jemalloc_internal.h.in revision dc1bed62272045651e4bbf2cd85f6fccaf7b1331
1#ifndef JEMALLOC_INTERNAL_H
2#define JEMALLOC_INTERNAL_H
3#include <math.h>
4#ifdef _WIN32
5#  include <windows.h>
6#  define ENOENT ERROR_PATH_NOT_FOUND
7#  define EINVAL ERROR_BAD_ARGUMENTS
8#  define EAGAIN ERROR_OUTOFMEMORY
9#  define EPERM  ERROR_WRITE_FAULT
10#  define EFAULT ERROR_INVALID_ADDRESS
11#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
12#  undef ERANGE
13#  define ERANGE ERROR_INVALID_DATA
14#else
15#  include <sys/param.h>
16#  include <sys/mman.h>
17#  include <sys/syscall.h>
18#  if !defined(SYS_write) && defined(__NR_write)
19#    define SYS_write __NR_write
20#  endif
21#  include <sys/uio.h>
22#  include <pthread.h>
23#  include <errno.h>
24#endif
25#include <sys/types.h>
26
27#include <limits.h>
28#ifndef SIZE_T_MAX
29#  define SIZE_T_MAX	SIZE_MAX
30#endif
31#include <stdarg.h>
32#include <stdbool.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <stdint.h>
36#include <stddef.h>
37#ifndef offsetof
38#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
39#endif
40#include <inttypes.h>
41#include <string.h>
42#include <strings.h>
43#include <ctype.h>
44#ifdef _MSC_VER
45#  include <io.h>
46typedef intptr_t ssize_t;
47#  define PATH_MAX 1024
48#  define STDERR_FILENO 2
49#  define __func__ __FUNCTION__
50/* Disable warnings about deprecated system functions */
51#  pragma warning(disable: 4996)
52#else
53#  include <unistd.h>
54#endif
55#include <fcntl.h>
56
57#include "jemalloc_internal_defs.h"
58
59#ifdef JEMALLOC_UTRACE
60#include <sys/ktrace.h>
61#endif
62
63#ifdef JEMALLOC_VALGRIND
64#include <valgrind/valgrind.h>
65#include <valgrind/memcheck.h>
66#endif
67
68#define	JEMALLOC_NO_DEMANGLE
69#ifdef JEMALLOC_JET
70#  define JEMALLOC_N(n) jet_##n
71#  include "jemalloc/internal/public_namespace.h"
72#  define JEMALLOC_NO_RENAME
73#  include "../jemalloc@install_suffix@.h"
74#else
75#  define JEMALLOC_N(n) @private_namespace@##n
76#  include "../jemalloc@install_suffix@.h"
77#endif
78#include "jemalloc/internal/private_namespace.h"
79
80static const bool config_debug =
81#ifdef JEMALLOC_DEBUG
82    true
83#else
84    false
85#endif
86    ;
87static const bool config_dss =
88#ifdef JEMALLOC_DSS
89    true
90#else
91    false
92#endif
93    ;
94static const bool config_fill =
95#ifdef JEMALLOC_FILL
96    true
97#else
98    false
99#endif
100    ;
101static const bool config_lazy_lock =
102#ifdef JEMALLOC_LAZY_LOCK
103    true
104#else
105    false
106#endif
107    ;
108static const bool config_prof =
109#ifdef JEMALLOC_PROF
110    true
111#else
112    false
113#endif
114    ;
115static const bool config_prof_libgcc =
116#ifdef JEMALLOC_PROF_LIBGCC
117    true
118#else
119    false
120#endif
121    ;
122static const bool config_prof_libunwind =
123#ifdef JEMALLOC_PROF_LIBUNWIND
124    true
125#else
126    false
127#endif
128    ;
129static const bool config_mremap =
130#ifdef JEMALLOC_MREMAP
131    true
132#else
133    false
134#endif
135    ;
136static const bool config_munmap =
137#ifdef JEMALLOC_MUNMAP
138    true
139#else
140    false
141#endif
142    ;
143static const bool config_stats =
144#ifdef JEMALLOC_STATS
145    true
146#else
147    false
148#endif
149    ;
150static const bool config_tcache =
151#ifdef JEMALLOC_TCACHE
152    true
153#else
154    false
155#endif
156    ;
157static const bool config_tls =
158#ifdef JEMALLOC_TLS
159    true
160#else
161    false
162#endif
163    ;
164static const bool config_utrace =
165#ifdef JEMALLOC_UTRACE
166    true
167#else
168    false
169#endif
170    ;
171static const bool config_valgrind =
172#ifdef JEMALLOC_VALGRIND
173    true
174#else
175    false
176#endif
177    ;
178static const bool config_xmalloc =
179#ifdef JEMALLOC_XMALLOC
180    true
181#else
182    false
183#endif
184    ;
185static const bool config_ivsalloc =
186#ifdef JEMALLOC_IVSALLOC
187    true
188#else
189    false
190#endif
191    ;
192
193#ifdef JEMALLOC_ATOMIC9
194#include <machine/atomic.h>
195#endif
196
197#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
198#include <libkern/OSAtomic.h>
199#endif
200
201#ifdef JEMALLOC_ZONE
202#include <mach/mach_error.h>
203#include <mach/mach_init.h>
204#include <mach/vm_map.h>
205#include <malloc/malloc.h>
206#endif
207
208#define	RB_COMPACT
209#include "jemalloc/internal/rb.h"
210#include "jemalloc/internal/qr.h"
211#include "jemalloc/internal/ql.h"
212
213/*
214 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
215 * but there are circular dependencies that cannot be broken without
216 * substantial performance degradation.  In order to reduce the effect on
217 * visual code flow, read the header files in multiple passes, with one of the
218 * following cpp variables defined during each pass:
219 *
220 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
221 *                        types.
222 *   JEMALLOC_H_STRUCTS : Data structures.
223 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
224 *   JEMALLOC_H_INLINES : Inline functions.
225 */
226/******************************************************************************/
227#define JEMALLOC_H_TYPES
228
229#include "jemalloc/internal/jemalloc_internal_macros.h"
230
231#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
232
233/* Smallest size class to support. */
234#define	LG_TINY_MIN		3
235#define	TINY_MIN		(1U << LG_TINY_MIN)
236
237/*
238 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
239 * classes).
240 */
241#ifndef LG_QUANTUM
242#  if (defined(__i386__) || defined(_M_IX86))
243#    define LG_QUANTUM		4
244#  endif
245#  ifdef __ia64__
246#    define LG_QUANTUM		4
247#  endif
248#  ifdef __alpha__
249#    define LG_QUANTUM		4
250#  endif
251#  ifdef __sparc64__
252#    define LG_QUANTUM		4
253#  endif
254#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
255#    define LG_QUANTUM		4
256#  endif
257#  ifdef __arm__
258#    define LG_QUANTUM		3
259#  endif
260#  ifdef __aarch64__
261#    define LG_QUANTUM		4
262#  endif
263#  ifdef __hppa__
264#    define LG_QUANTUM		4
265#  endif
266#  ifdef __mips__
267#    define LG_QUANTUM		3
268#  endif
269#  ifdef __powerpc__
270#    define LG_QUANTUM		4
271#  endif
272#  ifdef __s390__
273#    define LG_QUANTUM		4
274#  endif
275#  ifdef __SH4__
276#    define LG_QUANTUM		4
277#  endif
278#  ifdef __tile__
279#    define LG_QUANTUM		4
280#  endif
281#  ifndef LG_QUANTUM
282#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
283#  endif
284#endif
285
286#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
287#define	QUANTUM_MASK		(QUANTUM - 1)
288
289/* Return the smallest quantum multiple that is >= a. */
290#define	QUANTUM_CEILING(a)						\
291	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
292
293#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
294#define	LONG_MASK		(LONG - 1)
295
296/* Return the smallest long multiple that is >= a. */
297#define	LONG_CEILING(a)							\
298	(((a) + LONG_MASK) & ~LONG_MASK)
299
300#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
301#define	PTR_MASK		(SIZEOF_PTR - 1)
302
303/* Return the smallest (void *) multiple that is >= a. */
304#define	PTR_CEILING(a)							\
305	(((a) + PTR_MASK) & ~PTR_MASK)
306
307/*
308 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
309 * In addition, this controls the spacing of cacheline-spaced size classes.
310 *
311 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
312 * only handle raw constants.
313 */
314#define	LG_CACHELINE		6
315#define	CACHELINE		64
316#define	CACHELINE_MASK		(CACHELINE - 1)
317
318/* Return the smallest cacheline multiple that is >= s. */
319#define	CACHELINE_CEILING(s)						\
320	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
321
322/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
323#ifdef PAGE_MASK
324#  undef PAGE_MASK
325#endif
326#define	LG_PAGE		STATIC_PAGE_SHIFT
327#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
328#define	PAGE_MASK	((size_t)(PAGE - 1))
329
330/* Return the smallest pagesize multiple that is >= s. */
331#define	PAGE_CEILING(s)							\
332	(((s) + PAGE_MASK) & ~PAGE_MASK)
333
334/* Return the nearest aligned address at or below a. */
335#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
336	((void *)((uintptr_t)(a) & (-(alignment))))
337
338/* Return the offset between a and the nearest aligned address at or below a. */
339#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
340	((size_t)((uintptr_t)(a) & (alignment - 1)))
341
342/* Return the smallest alignment multiple that is >= s. */
343#define	ALIGNMENT_CEILING(s, alignment)					\
344	(((s) + (alignment - 1)) & (-(alignment)))
345
346/* Declare a variable length array */
347#if __STDC_VERSION__ < 199901L
348#  ifdef _MSC_VER
349#    include <malloc.h>
350#    define alloca _alloca
351#  else
352#    ifdef JEMALLOC_HAS_ALLOCA_H
353#      include <alloca.h>
354#    else
355#      include <stdlib.h>
356#    endif
357#  endif
358#  define VARIABLE_ARRAY(type, name, count) \
359	type *name = alloca(sizeof(type) * count)
360#else
361#  define VARIABLE_ARRAY(type, name, count) type name[count]
362#endif
363
364#ifdef JEMALLOC_VALGRIND
365/*
366 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
367 * so that when Valgrind reports errors, there are no extra stack frames
368 * in the backtraces.
369 *
370 * The size that is reported to valgrind must be consistent through a chain of
371 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
372 * jemalloc, so it is critical that all callers of these macros provide usize
373 * rather than request size.  As a result, buffer overflow detection is
374 * technically weakened for the standard API, though it is generally accepted
375 * practice to consider any extra bytes reported by malloc_usable_size() as
376 * usable space.
377 */
378#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
379	if (config_valgrind && opt_valgrind && cond)			\
380		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
381} while (0)
382#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
383    old_rzsize, zero)  do {						\
384	if (config_valgrind && opt_valgrind) {				\
385		size_t rzsize = p2rz(ptr);				\
386									\
387		if (ptr == old_ptr) {					\
388			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
389			    usize, rzsize);				\
390			if (zero && old_usize < usize) {		\
391				VALGRIND_MAKE_MEM_DEFINED(		\
392				    (void *)((uintptr_t)ptr +		\
393				    old_usize), usize - old_usize);	\
394			}						\
395		} else {						\
396			if (old_ptr != NULL) {				\
397				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
398				    old_rzsize);			\
399			}						\
400			if (ptr != NULL) {				\
401				size_t copy_size = (old_usize < usize)	\
402				    ?  old_usize : usize;		\
403				size_t tail_size = usize - copy_size;	\
404				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
405				    rzsize, false);			\
406				if (copy_size > 0) {			\
407					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
408					    copy_size);			\
409				}					\
410				if (zero && tail_size > 0) {		\
411					VALGRIND_MAKE_MEM_DEFINED(	\
412					    (void *)((uintptr_t)ptr +	\
413					    copy_size), tail_size);	\
414				}					\
415			}						\
416		}							\
417	}								\
418} while (0)
419#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
420	if (config_valgrind && opt_valgrind)				\
421		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
422} while (0)
423#else
424#define	RUNNING_ON_VALGRIND	((unsigned)0)
425#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
426    do {} while (0)
427#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
428    do {} while (0)
429#define	VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
430#define	VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
431#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
432#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
433#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
434#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
435    old_rzsize, zero) do {} while (0)
436#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
437#endif
438
439#include "jemalloc/internal/util.h"
440#include "jemalloc/internal/atomic.h"
441#include "jemalloc/internal/prng.h"
442#include "jemalloc/internal/ckh.h"
443#include "jemalloc/internal/size_classes.h"
444#include "jemalloc/internal/stats.h"
445#include "jemalloc/internal/ctl.h"
446#include "jemalloc/internal/mutex.h"
447#include "jemalloc/internal/tsd.h"
448#include "jemalloc/internal/mb.h"
449#include "jemalloc/internal/extent.h"
450#include "jemalloc/internal/arena.h"
451#include "jemalloc/internal/bitmap.h"
452#include "jemalloc/internal/base.h"
453#include "jemalloc/internal/chunk.h"
454#include "jemalloc/internal/huge.h"
455#include "jemalloc/internal/rtree.h"
456#include "jemalloc/internal/tcache.h"
457#include "jemalloc/internal/hash.h"
458#include "jemalloc/internal/quarantine.h"
459#include "jemalloc/internal/prof.h"
460
461#undef JEMALLOC_H_TYPES
462/******************************************************************************/
463#define JEMALLOC_H_STRUCTS
464
465#include "jemalloc/internal/util.h"
466#include "jemalloc/internal/atomic.h"
467#include "jemalloc/internal/prng.h"
468#include "jemalloc/internal/ckh.h"
469#include "jemalloc/internal/size_classes.h"
470#include "jemalloc/internal/stats.h"
471#include "jemalloc/internal/ctl.h"
472#include "jemalloc/internal/mutex.h"
473#include "jemalloc/internal/tsd.h"
474#include "jemalloc/internal/mb.h"
475#include "jemalloc/internal/bitmap.h"
476#include "jemalloc/internal/extent.h"
477#include "jemalloc/internal/arena.h"
478#include "jemalloc/internal/base.h"
479#include "jemalloc/internal/chunk.h"
480#include "jemalloc/internal/huge.h"
481#include "jemalloc/internal/rtree.h"
482#include "jemalloc/internal/tcache.h"
483#include "jemalloc/internal/hash.h"
484#include "jemalloc/internal/quarantine.h"
485#include "jemalloc/internal/prof.h"
486
487typedef struct {
488	uint64_t	allocated;
489	uint64_t	deallocated;
490} thread_allocated_t;
491/*
492 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
493 * argument.
494 */
495#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_ARG_CONCAT({0, 0})
496
497#undef JEMALLOC_H_STRUCTS
498/******************************************************************************/
499#define JEMALLOC_H_EXTERNS
500
501extern bool	opt_abort;
502extern bool	opt_junk;
503extern size_t	opt_quarantine;
504extern bool	opt_redzone;
505extern bool	opt_utrace;
506extern bool	opt_valgrind;
507extern bool	opt_xmalloc;
508extern bool	opt_zero;
509extern size_t	opt_narenas;
510
511/* Number of CPUs. */
512extern unsigned		ncpus;
513
514/* Protects arenas initialization (arenas, arenas_total). */
515extern malloc_mutex_t	arenas_lock;
516/*
517 * Arenas that are used to service external requests.  Not all elements of the
518 * arenas array are necessarily used; arenas are created lazily as needed.
519 *
520 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
521 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
522 * takes some action to create them and allocate from them.
523 */
524extern arena_t		**arenas;
525extern unsigned		narenas_total;
526extern unsigned		narenas_auto; /* Read-only after initialization. */
527
528arena_t	*arenas_extend(unsigned ind);
529void	arenas_cleanup(void *arg);
530arena_t	*choose_arena_hard(void);
531void	jemalloc_prefork(void);
532void	jemalloc_postfork_parent(void);
533void	jemalloc_postfork_child(void);
534
535#include "jemalloc/internal/util.h"
536#include "jemalloc/internal/atomic.h"
537#include "jemalloc/internal/prng.h"
538#include "jemalloc/internal/ckh.h"
539#include "jemalloc/internal/size_classes.h"
540#include "jemalloc/internal/stats.h"
541#include "jemalloc/internal/ctl.h"
542#include "jemalloc/internal/mutex.h"
543#include "jemalloc/internal/tsd.h"
544#include "jemalloc/internal/mb.h"
545#include "jemalloc/internal/bitmap.h"
546#include "jemalloc/internal/extent.h"
547#include "jemalloc/internal/arena.h"
548#include "jemalloc/internal/base.h"
549#include "jemalloc/internal/chunk.h"
550#include "jemalloc/internal/huge.h"
551#include "jemalloc/internal/rtree.h"
552#include "jemalloc/internal/tcache.h"
553#include "jemalloc/internal/hash.h"
554#include "jemalloc/internal/quarantine.h"
555#include "jemalloc/internal/prof.h"
556
557#undef JEMALLOC_H_EXTERNS
558/******************************************************************************/
559#define JEMALLOC_H_INLINES
560
561#include "jemalloc/internal/util.h"
562#include "jemalloc/internal/atomic.h"
563#include "jemalloc/internal/prng.h"
564#include "jemalloc/internal/ckh.h"
565#include "jemalloc/internal/size_classes.h"
566#include "jemalloc/internal/stats.h"
567#include "jemalloc/internal/ctl.h"
568#include "jemalloc/internal/mutex.h"
569#include "jemalloc/internal/tsd.h"
570#include "jemalloc/internal/mb.h"
571#include "jemalloc/internal/extent.h"
572#include "jemalloc/internal/base.h"
573#include "jemalloc/internal/chunk.h"
574#include "jemalloc/internal/huge.h"
575
576#ifndef JEMALLOC_ENABLE_INLINE
577malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
578
579size_t	s2u(size_t size);
580size_t	sa2u(size_t size, size_t alignment);
581unsigned	narenas_total_get(void);
582arena_t	*choose_arena(arena_t *arena);
583#endif
584
585#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
586/*
587 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
588 * for allocations.
589 */
590malloc_tsd_externs(arenas, arena_t *)
591malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
592    arenas_cleanup)
593
594/*
595 * Compute usable size that would result from allocating an object with the
596 * specified size.
597 */
598JEMALLOC_ALWAYS_INLINE size_t
599s2u(size_t size)
600{
601
602	if (size <= SMALL_MAXCLASS)
603		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
604	if (size <= arena_maxclass)
605		return (PAGE_CEILING(size));
606	return (CHUNK_CEILING(size));
607}
608
609/*
610 * Compute usable size that would result from allocating an object with the
611 * specified size and alignment.
612 */
613JEMALLOC_ALWAYS_INLINE size_t
614sa2u(size_t size, size_t alignment)
615{
616	size_t usize;
617
618	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
619
620	/*
621	 * Round size up to the nearest multiple of alignment.
622	 *
623	 * This done, we can take advantage of the fact that for each small
624	 * size class, every object is aligned at the smallest power of two
625	 * that is non-zero in the base two representation of the size.  For
626	 * example:
627	 *
628	 *   Size |   Base 2 | Minimum alignment
629	 *   -----+----------+------------------
630	 *     96 |  1100000 |  32
631	 *    144 | 10100000 |  32
632	 *    192 | 11000000 |  64
633	 */
634	usize = ALIGNMENT_CEILING(size, alignment);
635	/*
636	 * (usize < size) protects against the combination of maximal
637	 * alignment and size greater than maximal alignment.
638	 */
639	if (usize < size) {
640		/* size_t overflow. */
641		return (0);
642	}
643
644	if (usize <= arena_maxclass && alignment <= PAGE) {
645		if (usize <= SMALL_MAXCLASS)
646			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
647		return (PAGE_CEILING(usize));
648	} else {
649		size_t run_size;
650
651		/*
652		 * We can't achieve subpage alignment, so round up alignment
653		 * permanently; it makes later calculations simpler.
654		 */
655		alignment = PAGE_CEILING(alignment);
656		usize = PAGE_CEILING(size);
657		/*
658		 * (usize < size) protects against very large sizes within
659		 * PAGE of SIZE_T_MAX.
660		 *
661		 * (usize + alignment < usize) protects against the
662		 * combination of maximal alignment and usize large enough
663		 * to cause overflow.  This is similar to the first overflow
664		 * check above, but it needs to be repeated due to the new
665		 * usize value, which may now be *equal* to maximal
666		 * alignment, whereas before we only detected overflow if the
667		 * original size was *greater* than maximal alignment.
668		 */
669		if (usize < size || usize + alignment < usize) {
670			/* size_t overflow. */
671			return (0);
672		}
673
674		/*
675		 * Calculate the size of the over-size run that arena_palloc()
676		 * would need to allocate in order to guarantee the alignment.
677		 * If the run wouldn't fit within a chunk, round up to a huge
678		 * allocation size.
679		 */
680		run_size = usize + alignment - PAGE;
681		if (run_size <= arena_maxclass)
682			return (PAGE_CEILING(usize));
683		return (CHUNK_CEILING(usize));
684	}
685}
686
687JEMALLOC_INLINE unsigned
688narenas_total_get(void)
689{
690	unsigned narenas;
691
692	malloc_mutex_lock(&arenas_lock);
693	narenas = narenas_total;
694	malloc_mutex_unlock(&arenas_lock);
695
696	return (narenas);
697}
698
699/* Choose an arena based on a per-thread value. */
700JEMALLOC_INLINE arena_t *
701choose_arena(arena_t *arena)
702{
703	arena_t *ret;
704
705	if (arena != NULL)
706		return (arena);
707
708	if ((ret = *arenas_tsd_get()) == NULL) {
709		ret = choose_arena_hard();
710		assert(ret != NULL);
711	}
712
713	return (ret);
714}
715#endif
716
717#include "jemalloc/internal/bitmap.h"
718#include "jemalloc/internal/rtree.h"
719/*
720 * Include arena.h twice in order to resolve circular dependencies with
721 * tcache.h.
722 */
723#define	JEMALLOC_ARENA_INLINE_A
724#include "jemalloc/internal/arena.h"
725#undef JEMALLOC_ARENA_INLINE_A
726#include "jemalloc/internal/tcache.h"
727#define	JEMALLOC_ARENA_INLINE_B
728#include "jemalloc/internal/arena.h"
729#undef JEMALLOC_ARENA_INLINE_B
730#include "jemalloc/internal/hash.h"
731#include "jemalloc/internal/quarantine.h"
732
733#ifndef JEMALLOC_ENABLE_INLINE
734void	*imallocx(size_t size, bool try_tcache, arena_t *arena);
735void	*imalloc(size_t size);
736void	*icallocx(size_t size, bool try_tcache, arena_t *arena);
737void	*icalloc(size_t size);
738void	*ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
739    arena_t *arena);
740void	*ipalloc(size_t usize, size_t alignment, bool zero);
741size_t	isalloc(const void *ptr, bool demote);
742size_t	ivsalloc(const void *ptr, bool demote);
743size_t	u2rz(size_t usize);
744size_t	p2rz(const void *ptr);
745void	idallocx(void *ptr, bool try_tcache);
746void	idalloc(void *ptr);
747void	iqallocx(void *ptr, bool try_tcache);
748void	iqalloc(void *ptr);
749void	*irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
750    bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
751    arena_t *arena);
752void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
753    bool zero, bool no_move);
754malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
755#endif
756
757#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
758JEMALLOC_ALWAYS_INLINE void *
759imallocx(size_t size, bool try_tcache, arena_t *arena)
760{
761
762	assert(size != 0);
763
764	if (size <= arena_maxclass)
765		return (arena_malloc(arena, size, false, try_tcache));
766	else
767		return (huge_malloc(size, false));
768}
769
770JEMALLOC_ALWAYS_INLINE void *
771imalloc(size_t size)
772{
773
774	return (imallocx(size, true, NULL));
775}
776
777JEMALLOC_ALWAYS_INLINE void *
778icallocx(size_t size, bool try_tcache, arena_t *arena)
779{
780
781	if (size <= arena_maxclass)
782		return (arena_malloc(arena, size, true, try_tcache));
783	else
784		return (huge_malloc(size, true));
785}
786
787JEMALLOC_ALWAYS_INLINE void *
788icalloc(size_t size)
789{
790
791	return (icallocx(size, true, NULL));
792}
793
794JEMALLOC_ALWAYS_INLINE void *
795ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
796    arena_t *arena)
797{
798	void *ret;
799
800	assert(usize != 0);
801	assert(usize == sa2u(usize, alignment));
802
803	if (usize <= arena_maxclass && alignment <= PAGE)
804		ret = arena_malloc(arena, usize, zero, try_tcache);
805	else {
806		if (usize <= arena_maxclass) {
807			ret = arena_palloc(choose_arena(arena), usize,
808			    alignment, zero);
809		} else if (alignment <= chunksize)
810			ret = huge_malloc(usize, zero);
811		else
812			ret = huge_palloc(usize, alignment, zero);
813	}
814
815	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
816	return (ret);
817}
818
819JEMALLOC_ALWAYS_INLINE void *
820ipalloc(size_t usize, size_t alignment, bool zero)
821{
822
823	return (ipallocx(usize, alignment, zero, true, NULL));
824}
825
826/*
827 * Typical usage:
828 *   void *ptr = [...]
829 *   size_t sz = isalloc(ptr, config_prof);
830 */
831JEMALLOC_ALWAYS_INLINE size_t
832isalloc(const void *ptr, bool demote)
833{
834	size_t ret;
835	arena_chunk_t *chunk;
836
837	assert(ptr != NULL);
838	/* Demotion only makes sense if config_prof is true. */
839	assert(config_prof || demote == false);
840
841	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
842	if (chunk != ptr)
843		ret = arena_salloc(ptr, demote);
844	else
845		ret = huge_salloc(ptr);
846
847	return (ret);
848}
849
850JEMALLOC_ALWAYS_INLINE size_t
851ivsalloc(const void *ptr, bool demote)
852{
853
854	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
855	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
856		return (0);
857
858	return (isalloc(ptr, demote));
859}
860
861JEMALLOC_INLINE size_t
862u2rz(size_t usize)
863{
864	size_t ret;
865
866	if (usize <= SMALL_MAXCLASS) {
867		size_t binind = SMALL_SIZE2BIN(usize);
868		ret = arena_bin_info[binind].redzone_size;
869	} else
870		ret = 0;
871
872	return (ret);
873}
874
875JEMALLOC_INLINE size_t
876p2rz(const void *ptr)
877{
878	size_t usize = isalloc(ptr, false);
879
880	return (u2rz(usize));
881}
882
883JEMALLOC_ALWAYS_INLINE void
884idallocx(void *ptr, bool try_tcache)
885{
886	arena_chunk_t *chunk;
887
888	assert(ptr != NULL);
889
890	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
891	if (chunk != ptr)
892		arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
893	else
894		huge_dalloc(ptr, true);
895}
896
897JEMALLOC_ALWAYS_INLINE void
898idalloc(void *ptr)
899{
900
901	idallocx(ptr, true);
902}
903
904JEMALLOC_ALWAYS_INLINE void
905iqallocx(void *ptr, bool try_tcache)
906{
907
908	if (config_fill && opt_quarantine)
909		quarantine(ptr);
910	else
911		idallocx(ptr, try_tcache);
912}
913
914JEMALLOC_ALWAYS_INLINE void
915iqalloc(void *ptr)
916{
917
918	iqallocx(ptr, true);
919}
920
921JEMALLOC_ALWAYS_INLINE void *
922irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
923    bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
924{
925	void *ret;
926	size_t oldsize;
927
928	assert(ptr != NULL);
929	assert(size != 0);
930
931	oldsize = isalloc(ptr, config_prof);
932
933	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
934	    != 0) {
935		size_t usize, copysize;
936
937		/*
938		 * Existing object alignment is inadequate; allocate new space
939		 * and copy.
940		 */
941		if (no_move)
942			return (NULL);
943		usize = sa2u(size + extra, alignment);
944		if (usize == 0)
945			return (NULL);
946		ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
947		if (ret == NULL) {
948			if (extra == 0)
949				return (NULL);
950			/* Try again, without extra this time. */
951			usize = sa2u(size, alignment);
952			if (usize == 0)
953				return (NULL);
954			ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
955			    arena);
956			if (ret == NULL)
957				return (NULL);
958		}
959		/*
960		 * Copy at most size bytes (not size+extra), since the caller
961		 * has no expectation that the extra bytes will be reliably
962		 * preserved.
963		 */
964		copysize = (size < oldsize) ? size : oldsize;
965		memcpy(ret, ptr, copysize);
966		iqallocx(ptr, try_tcache_dalloc);
967		return (ret);
968	}
969
970	if (no_move) {
971		if (size <= arena_maxclass) {
972			return (arena_ralloc_no_move(ptr, oldsize, size,
973			    extra, zero));
974		} else {
975			return (huge_ralloc_no_move(ptr, oldsize, size,
976			    extra));
977		}
978	} else {
979		if (size + extra <= arena_maxclass) {
980			return (arena_ralloc(arena, ptr, oldsize, size, extra,
981			    alignment, zero, try_tcache_alloc,
982			    try_tcache_dalloc));
983		} else {
984			return (huge_ralloc(ptr, oldsize, size, extra,
985			    alignment, zero, try_tcache_dalloc));
986		}
987	}
988}
989
990JEMALLOC_ALWAYS_INLINE void *
991iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
992    bool no_move)
993{
994
995	return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
996	    NULL));
997}
998
999malloc_tsd_externs(thread_allocated, thread_allocated_t)
1000malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
1001    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
1002#endif
1003
1004#include "jemalloc/internal/prof.h"
1005
1006#undef JEMALLOC_H_INLINES
1007/******************************************************************************/
1008#endif /* JEMALLOC_INTERNAL_H */
1009