jemalloc_internal.h.in revision 14990b83d1dffe04638df0c09eb1a5c3b1118462
1#ifndef JEMALLOC_INTERNAL_H
2#define JEMALLOC_INTERNAL_H
3#include <math.h>
4#ifdef _WIN32
5#  include <windows.h>
6#  define ENOENT ERROR_PATH_NOT_FOUND
7#  define EINVAL ERROR_BAD_ARGUMENTS
8#  define EAGAIN ERROR_OUTOFMEMORY
9#  define EPERM  ERROR_WRITE_FAULT
10#  define EFAULT ERROR_INVALID_ADDRESS
11#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
12#  undef ERANGE
13#  define ERANGE ERROR_INVALID_DATA
14#else
15#  include <sys/param.h>
16#  include <sys/mman.h>
17#  include <sys/syscall.h>
18#  if !defined(SYS_write) && defined(__NR_write)
19#    define SYS_write __NR_write
20#  endif
21#  include <sys/uio.h>
22#  include <pthread.h>
23#  include <errno.h>
24#endif
25#include <sys/types.h>
26
27#include <limits.h>
28#ifndef SIZE_T_MAX
29#  define SIZE_T_MAX	SIZE_MAX
30#endif
31#include <stdarg.h>
32#include <stdbool.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <stdint.h>
36#include <stddef.h>
37#ifndef offsetof
38#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
39#endif
40#include <inttypes.h>
41#include <string.h>
42#include <strings.h>
43#include <ctype.h>
44#ifdef _MSC_VER
45#  include <io.h>
46typedef intptr_t ssize_t;
47#  define PATH_MAX 1024
48#  define STDERR_FILENO 2
49#  define __func__ __FUNCTION__
50/* Disable warnings about deprecated system functions */
51#  pragma warning(disable: 4996)
52#else
53#  include <unistd.h>
54#endif
55#include <fcntl.h>
56
57#include "jemalloc_internal_defs.h"
58
59#ifdef JEMALLOC_UTRACE
60#include <sys/ktrace.h>
61#endif
62
63#ifdef JEMALLOC_VALGRIND
64#include <valgrind/valgrind.h>
65#include <valgrind/memcheck.h>
66#endif
67
68#define	JEMALLOC_NO_DEMANGLE
69#ifdef JEMALLOC_JET
70#  define JEMALLOC_N(n) jet_##n
71#  include "jemalloc/internal/public_namespace.h"
72#  define JEMALLOC_NO_RENAME
73#  include "../jemalloc@install_suffix@.h"
74#else
75#  define JEMALLOC_N(n) @private_namespace@##n
76#  include "../jemalloc@install_suffix@.h"
77#endif
78#include "jemalloc/internal/private_namespace.h"
79
80#ifdef JEMALLOC_CC_SILENCE
81#define	UNUSED JEMALLOC_ATTR(unused)
82#else
83#define	UNUSED
84#endif
85
86static const bool config_debug =
87#ifdef JEMALLOC_DEBUG
88    true
89#else
90    false
91#endif
92    ;
93static const bool config_dss =
94#ifdef JEMALLOC_DSS
95    true
96#else
97    false
98#endif
99    ;
100static const bool config_fill =
101#ifdef JEMALLOC_FILL
102    true
103#else
104    false
105#endif
106    ;
107static const bool config_lazy_lock =
108#ifdef JEMALLOC_LAZY_LOCK
109    true
110#else
111    false
112#endif
113    ;
114static const bool config_prof =
115#ifdef JEMALLOC_PROF
116    true
117#else
118    false
119#endif
120    ;
121static const bool config_prof_libgcc =
122#ifdef JEMALLOC_PROF_LIBGCC
123    true
124#else
125    false
126#endif
127    ;
128static const bool config_prof_libunwind =
129#ifdef JEMALLOC_PROF_LIBUNWIND
130    true
131#else
132    false
133#endif
134    ;
135static const bool config_mremap =
136#ifdef JEMALLOC_MREMAP
137    true
138#else
139    false
140#endif
141    ;
142static const bool config_munmap =
143#ifdef JEMALLOC_MUNMAP
144    true
145#else
146    false
147#endif
148    ;
149static const bool config_stats =
150#ifdef JEMALLOC_STATS
151    true
152#else
153    false
154#endif
155    ;
156static const bool config_tcache =
157#ifdef JEMALLOC_TCACHE
158    true
159#else
160    false
161#endif
162    ;
163static const bool config_tls =
164#ifdef JEMALLOC_TLS
165    true
166#else
167    false
168#endif
169    ;
170static const bool config_utrace =
171#ifdef JEMALLOC_UTRACE
172    true
173#else
174    false
175#endif
176    ;
177static const bool config_valgrind =
178#ifdef JEMALLOC_VALGRIND
179    true
180#else
181    false
182#endif
183    ;
184static const bool config_xmalloc =
185#ifdef JEMALLOC_XMALLOC
186    true
187#else
188    false
189#endif
190    ;
191static const bool config_ivsalloc =
192#ifdef JEMALLOC_IVSALLOC
193    true
194#else
195    false
196#endif
197    ;
198
199#ifdef JEMALLOC_ATOMIC9
200#include <machine/atomic.h>
201#endif
202
203#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
204#include <libkern/OSAtomic.h>
205#endif
206
207#ifdef JEMALLOC_ZONE
208#include <mach/mach_error.h>
209#include <mach/mach_init.h>
210#include <mach/vm_map.h>
211#include <malloc/malloc.h>
212#endif
213
214#define	RB_COMPACT
215#include "jemalloc/internal/rb.h"
216#include "jemalloc/internal/qr.h"
217#include "jemalloc/internal/ql.h"
218
219/*
220 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
221 * but there are circular dependencies that cannot be broken without
222 * substantial performance degradation.  In order to reduce the effect on
223 * visual code flow, read the header files in multiple passes, with one of the
224 * following cpp variables defined during each pass:
225 *
226 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
227 *                        types.
228 *   JEMALLOC_H_STRUCTS : Data structures.
229 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
230 *   JEMALLOC_H_INLINES : Inline functions.
231 */
232/******************************************************************************/
233#define JEMALLOC_H_TYPES
234
235#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
236
237#define	ZU(z)	((size_t)z)
238#define	QU(q)	((uint64_t)q)
239
240#ifndef __DECONST
241#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
242#endif
243
244/* Smallest size class to support. */
245#define	LG_TINY_MIN		3
246#define	TINY_MIN		(1U << LG_TINY_MIN)
247
248/*
249 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
250 * classes).
251 */
252#ifndef LG_QUANTUM
253#  if (defined(__i386__) || defined(_M_IX86))
254#    define LG_QUANTUM		4
255#  endif
256#  ifdef __ia64__
257#    define LG_QUANTUM		4
258#  endif
259#  ifdef __alpha__
260#    define LG_QUANTUM		4
261#  endif
262#  ifdef __sparc64__
263#    define LG_QUANTUM		4
264#  endif
265#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
266#    define LG_QUANTUM		4
267#  endif
268#  ifdef __arm__
269#    define LG_QUANTUM		3
270#  endif
271#  ifdef __aarch64__
272#    define LG_QUANTUM		4
273#  endif
274#  ifdef __hppa__
275#    define LG_QUANTUM		4
276#  endif
277#  ifdef __mips__
278#    define LG_QUANTUM		3
279#  endif
280#  ifdef __powerpc__
281#    define LG_QUANTUM		4
282#  endif
283#  ifdef __s390__
284#    define LG_QUANTUM		4
285#  endif
286#  ifdef __SH4__
287#    define LG_QUANTUM		4
288#  endif
289#  ifdef __tile__
290#    define LG_QUANTUM		4
291#  endif
292#  ifndef LG_QUANTUM
293#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
294#  endif
295#endif
296
297#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
298#define	QUANTUM_MASK		(QUANTUM - 1)
299
300/* Return the smallest quantum multiple that is >= a. */
301#define	QUANTUM_CEILING(a)						\
302	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
303
304#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
305#define	LONG_MASK		(LONG - 1)
306
307/* Return the smallest long multiple that is >= a. */
308#define	LONG_CEILING(a)							\
309	(((a) + LONG_MASK) & ~LONG_MASK)
310
311#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
312#define	PTR_MASK		(SIZEOF_PTR - 1)
313
314/* Return the smallest (void *) multiple that is >= a. */
315#define	PTR_CEILING(a)							\
316	(((a) + PTR_MASK) & ~PTR_MASK)
317
318/*
319 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
320 * In addition, this controls the spacing of cacheline-spaced size classes.
321 *
322 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
323 * only handle raw constants.
324 */
325#define	LG_CACHELINE		6
326#define	CACHELINE		64
327#define	CACHELINE_MASK		(CACHELINE - 1)
328
329/* Return the smallest cacheline multiple that is >= s. */
330#define	CACHELINE_CEILING(s)						\
331	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
332
333/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
334#ifdef PAGE_MASK
335#  undef PAGE_MASK
336#endif
337#define	LG_PAGE		STATIC_PAGE_SHIFT
338#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
339#define	PAGE_MASK	((size_t)(PAGE - 1))
340
341/* Return the smallest pagesize multiple that is >= s. */
342#define	PAGE_CEILING(s)							\
343	(((s) + PAGE_MASK) & ~PAGE_MASK)
344
345/* Return the nearest aligned address at or below a. */
346#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
347	((void *)((uintptr_t)(a) & (-(alignment))))
348
349/* Return the offset between a and the nearest aligned address at or below a. */
350#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
351	((size_t)((uintptr_t)(a) & (alignment - 1)))
352
353/* Return the smallest alignment multiple that is >= s. */
354#define	ALIGNMENT_CEILING(s, alignment)					\
355	(((s) + (alignment - 1)) & (-(alignment)))
356
357/* Declare a variable length array */
358#if __STDC_VERSION__ < 199901L
359#  ifdef _MSC_VER
360#    include <malloc.h>
361#    define alloca _alloca
362#  else
363#    ifdef JEMALLOC_HAS_ALLOCA_H
364#      include <alloca.h>
365#    else
366#      include <stdlib.h>
367#    endif
368#  endif
369#  define VARIABLE_ARRAY(type, name, count) \
370	type *name = alloca(sizeof(type) * count)
371#else
372#  define VARIABLE_ARRAY(type, name, count) type name[count]
373#endif
374
375#ifdef JEMALLOC_VALGRIND
376/*
377 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
378 * so that when Valgrind reports errors, there are no extra stack frames
379 * in the backtraces.
380 *
381 * The size that is reported to valgrind must be consistent through a chain of
382 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
383 * jemalloc, so it is critical that all callers of these macros provide usize
384 * rather than request size.  As a result, buffer overflow detection is
385 * technically weakened for the standard API, though it is generally accepted
386 * practice to consider any extra bytes reported by malloc_usable_size() as
387 * usable space.
388 */
389#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
390	if (config_valgrind && opt_valgrind && cond)			\
391		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
392} while (0)
393#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
394    old_rzsize, zero)  do {						\
395	if (config_valgrind && opt_valgrind) {				\
396		size_t rzsize = p2rz(ptr);				\
397									\
398		if (ptr == old_ptr) {					\
399			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
400			    usize, rzsize);				\
401			if (zero && old_usize < usize) {		\
402				VALGRIND_MAKE_MEM_DEFINED(		\
403				    (void *)((uintptr_t)ptr +		\
404				    old_usize), usize - old_usize);	\
405			}						\
406		} else {						\
407			if (old_ptr != NULL) {				\
408				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
409				    old_rzsize);			\
410			}						\
411			if (ptr != NULL) {				\
412				size_t copy_size = (old_usize < usize)	\
413				    ?  old_usize : usize;		\
414				size_t tail_size = usize - copy_size;	\
415				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
416				    rzsize, false);			\
417				if (copy_size > 0) {			\
418					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
419					    copy_size);			\
420				}					\
421				if (zero && tail_size > 0) {		\
422					VALGRIND_MAKE_MEM_DEFINED(	\
423					    (void *)((uintptr_t)ptr +	\
424					    copy_size), tail_size);	\
425				}					\
426			}						\
427		}							\
428	}								\
429} while (0)
430#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
431	if (config_valgrind && opt_valgrind)				\
432		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
433} while (0)
434#else
435#define	RUNNING_ON_VALGRIND	((unsigned)0)
436#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
437    do {} while (0)
438#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
439    do {} while (0)
440#define	VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
441#define	VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
442#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
443#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
444#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
445#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
446    old_rzsize, zero) do {} while (0)
447#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
448#endif
449
450#include "jemalloc/internal/util.h"
451#include "jemalloc/internal/atomic.h"
452#include "jemalloc/internal/prng.h"
453#include "jemalloc/internal/ckh.h"
454#include "jemalloc/internal/size_classes.h"
455#include "jemalloc/internal/stats.h"
456#include "jemalloc/internal/ctl.h"
457#include "jemalloc/internal/mutex.h"
458#include "jemalloc/internal/tsd.h"
459#include "jemalloc/internal/mb.h"
460#include "jemalloc/internal/extent.h"
461#include "jemalloc/internal/arena.h"
462#include "jemalloc/internal/bitmap.h"
463#include "jemalloc/internal/base.h"
464#include "jemalloc/internal/chunk.h"
465#include "jemalloc/internal/huge.h"
466#include "jemalloc/internal/rtree.h"
467#include "jemalloc/internal/tcache.h"
468#include "jemalloc/internal/hash.h"
469#include "jemalloc/internal/quarantine.h"
470#include "jemalloc/internal/prof.h"
471
472#undef JEMALLOC_H_TYPES
473/******************************************************************************/
474#define JEMALLOC_H_STRUCTS
475
476#include "jemalloc/internal/util.h"
477#include "jemalloc/internal/atomic.h"
478#include "jemalloc/internal/prng.h"
479#include "jemalloc/internal/ckh.h"
480#include "jemalloc/internal/size_classes.h"
481#include "jemalloc/internal/stats.h"
482#include "jemalloc/internal/ctl.h"
483#include "jemalloc/internal/mutex.h"
484#include "jemalloc/internal/tsd.h"
485#include "jemalloc/internal/mb.h"
486#include "jemalloc/internal/bitmap.h"
487#include "jemalloc/internal/extent.h"
488#include "jemalloc/internal/arena.h"
489#include "jemalloc/internal/base.h"
490#include "jemalloc/internal/chunk.h"
491#include "jemalloc/internal/huge.h"
492#include "jemalloc/internal/rtree.h"
493#include "jemalloc/internal/tcache.h"
494#include "jemalloc/internal/hash.h"
495#include "jemalloc/internal/quarantine.h"
496#include "jemalloc/internal/prof.h"
497
498typedef struct {
499	uint64_t	allocated;
500	uint64_t	deallocated;
501} thread_allocated_t;
502/*
503 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
504 * argument.
505 */
506#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_ARG_CONCAT({0, 0})
507
508#undef JEMALLOC_H_STRUCTS
509/******************************************************************************/
510#define JEMALLOC_H_EXTERNS
511
512extern bool	opt_abort;
513extern bool	opt_junk;
514extern size_t	opt_quarantine;
515extern bool	opt_redzone;
516extern bool	opt_utrace;
517extern bool	opt_valgrind;
518extern bool	opt_xmalloc;
519extern bool	opt_zero;
520extern size_t	opt_narenas;
521
522/* Number of CPUs. */
523extern unsigned		ncpus;
524
525/* Protects arenas initialization (arenas, arenas_total). */
526extern malloc_mutex_t	arenas_lock;
527/*
528 * Arenas that are used to service external requests.  Not all elements of the
529 * arenas array are necessarily used; arenas are created lazily as needed.
530 *
531 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
532 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
533 * takes some action to create them and allocate from them.
534 */
535extern arena_t		**arenas;
536extern unsigned		narenas_total;
537extern unsigned		narenas_auto; /* Read-only after initialization. */
538
539arena_t	*arenas_extend(unsigned ind);
540void	arenas_cleanup(void *arg);
541arena_t	*choose_arena_hard(void);
542void	jemalloc_prefork(void);
543void	jemalloc_postfork_parent(void);
544void	jemalloc_postfork_child(void);
545
546#include "jemalloc/internal/util.h"
547#include "jemalloc/internal/atomic.h"
548#include "jemalloc/internal/prng.h"
549#include "jemalloc/internal/ckh.h"
550#include "jemalloc/internal/size_classes.h"
551#include "jemalloc/internal/stats.h"
552#include "jemalloc/internal/ctl.h"
553#include "jemalloc/internal/mutex.h"
554#include "jemalloc/internal/tsd.h"
555#include "jemalloc/internal/mb.h"
556#include "jemalloc/internal/bitmap.h"
557#include "jemalloc/internal/extent.h"
558#include "jemalloc/internal/arena.h"
559#include "jemalloc/internal/base.h"
560#include "jemalloc/internal/chunk.h"
561#include "jemalloc/internal/huge.h"
562#include "jemalloc/internal/rtree.h"
563#include "jemalloc/internal/tcache.h"
564#include "jemalloc/internal/hash.h"
565#include "jemalloc/internal/quarantine.h"
566#include "jemalloc/internal/prof.h"
567
568#undef JEMALLOC_H_EXTERNS
569/******************************************************************************/
570#define JEMALLOC_H_INLINES
571
572#include "jemalloc/internal/util.h"
573#include "jemalloc/internal/atomic.h"
574#include "jemalloc/internal/prng.h"
575#include "jemalloc/internal/ckh.h"
576#include "jemalloc/internal/size_classes.h"
577#include "jemalloc/internal/stats.h"
578#include "jemalloc/internal/ctl.h"
579#include "jemalloc/internal/mutex.h"
580#include "jemalloc/internal/tsd.h"
581#include "jemalloc/internal/mb.h"
582#include "jemalloc/internal/extent.h"
583#include "jemalloc/internal/base.h"
584#include "jemalloc/internal/chunk.h"
585#include "jemalloc/internal/huge.h"
586
587#ifndef JEMALLOC_ENABLE_INLINE
588malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
589
590size_t	s2u(size_t size);
591size_t	sa2u(size_t size, size_t alignment);
592unsigned	narenas_total_get(void);
593arena_t	*choose_arena(arena_t *arena);
594#endif
595
596#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
597/*
598 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
599 * for allocations.
600 */
601malloc_tsd_externs(arenas, arena_t *)
602malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
603    arenas_cleanup)
604
605/*
606 * Compute usable size that would result from allocating an object with the
607 * specified size.
608 */
609JEMALLOC_ALWAYS_INLINE size_t
610s2u(size_t size)
611{
612
613	if (size <= SMALL_MAXCLASS)
614		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
615	if (size <= arena_maxclass)
616		return (PAGE_CEILING(size));
617	return (CHUNK_CEILING(size));
618}
619
620/*
621 * Compute usable size that would result from allocating an object with the
622 * specified size and alignment.
623 */
624JEMALLOC_ALWAYS_INLINE size_t
625sa2u(size_t size, size_t alignment)
626{
627	size_t usize;
628
629	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
630
631	/*
632	 * Round size up to the nearest multiple of alignment.
633	 *
634	 * This done, we can take advantage of the fact that for each small
635	 * size class, every object is aligned at the smallest power of two
636	 * that is non-zero in the base two representation of the size.  For
637	 * example:
638	 *
639	 *   Size |   Base 2 | Minimum alignment
640	 *   -----+----------+------------------
641	 *     96 |  1100000 |  32
642	 *    144 | 10100000 |  32
643	 *    192 | 11000000 |  64
644	 */
645	usize = ALIGNMENT_CEILING(size, alignment);
646	/*
647	 * (usize < size) protects against the combination of maximal
648	 * alignment and size greater than maximal alignment.
649	 */
650	if (usize < size) {
651		/* size_t overflow. */
652		return (0);
653	}
654
655	if (usize <= arena_maxclass && alignment <= PAGE) {
656		if (usize <= SMALL_MAXCLASS)
657			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
658		return (PAGE_CEILING(usize));
659	} else {
660		size_t run_size;
661
662		/*
663		 * We can't achieve subpage alignment, so round up alignment
664		 * permanently; it makes later calculations simpler.
665		 */
666		alignment = PAGE_CEILING(alignment);
667		usize = PAGE_CEILING(size);
668		/*
669		 * (usize < size) protects against very large sizes within
670		 * PAGE of SIZE_T_MAX.
671		 *
672		 * (usize + alignment < usize) protects against the
673		 * combination of maximal alignment and usize large enough
674		 * to cause overflow.  This is similar to the first overflow
675		 * check above, but it needs to be repeated due to the new
676		 * usize value, which may now be *equal* to maximal
677		 * alignment, whereas before we only detected overflow if the
678		 * original size was *greater* than maximal alignment.
679		 */
680		if (usize < size || usize + alignment < usize) {
681			/* size_t overflow. */
682			return (0);
683		}
684
685		/*
686		 * Calculate the size of the over-size run that arena_palloc()
687		 * would need to allocate in order to guarantee the alignment.
688		 * If the run wouldn't fit within a chunk, round up to a huge
689		 * allocation size.
690		 */
691		run_size = usize + alignment - PAGE;
692		if (run_size <= arena_maxclass)
693			return (PAGE_CEILING(usize));
694		return (CHUNK_CEILING(usize));
695	}
696}
697
698JEMALLOC_INLINE unsigned
699narenas_total_get(void)
700{
701	unsigned narenas;
702
703	malloc_mutex_lock(&arenas_lock);
704	narenas = narenas_total;
705	malloc_mutex_unlock(&arenas_lock);
706
707	return (narenas);
708}
709
710/* Choose an arena based on a per-thread value. */
711JEMALLOC_INLINE arena_t *
712choose_arena(arena_t *arena)
713{
714	arena_t *ret;
715
716	if (arena != NULL)
717		return (arena);
718
719	if ((ret = *arenas_tsd_get()) == NULL) {
720		ret = choose_arena_hard();
721		assert(ret != NULL);
722	}
723
724	return (ret);
725}
726#endif
727
728#include "jemalloc/internal/bitmap.h"
729#include "jemalloc/internal/rtree.h"
730/*
731 * Include arena.h twice in order to resolve circular dependencies with
732 * tcache.h.
733 */
734#define	JEMALLOC_ARENA_INLINE_A
735#include "jemalloc/internal/arena.h"
736#undef JEMALLOC_ARENA_INLINE_A
737#include "jemalloc/internal/tcache.h"
738#define	JEMALLOC_ARENA_INLINE_B
739#include "jemalloc/internal/arena.h"
740#undef JEMALLOC_ARENA_INLINE_B
741#include "jemalloc/internal/hash.h"
742#include "jemalloc/internal/quarantine.h"
743
744#ifndef JEMALLOC_ENABLE_INLINE
745void	*imallocx(size_t size, bool try_tcache, arena_t *arena);
746void	*imalloc(size_t size);
747void	*icallocx(size_t size, bool try_tcache, arena_t *arena);
748void	*icalloc(size_t size);
749void	*ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
750    arena_t *arena);
751void	*ipalloc(size_t usize, size_t alignment, bool zero);
752size_t	isalloc(const void *ptr, bool demote);
753size_t	ivsalloc(const void *ptr, bool demote);
754size_t	u2rz(size_t usize);
755size_t	p2rz(const void *ptr);
756void	idallocx(void *ptr, bool try_tcache);
757void	idalloc(void *ptr);
758void	iqallocx(void *ptr, bool try_tcache);
759void	iqalloc(void *ptr);
760void	*irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
761    bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
762    arena_t *arena);
763void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
764    bool zero, bool no_move);
765malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
766#endif
767
768#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
769JEMALLOC_ALWAYS_INLINE void *
770imallocx(size_t size, bool try_tcache, arena_t *arena)
771{
772
773	assert(size != 0);
774
775	if (size <= arena_maxclass)
776		return (arena_malloc(arena, size, false, try_tcache));
777	else
778		return (huge_malloc(size, false));
779}
780
781JEMALLOC_ALWAYS_INLINE void *
782imalloc(size_t size)
783{
784
785	return (imallocx(size, true, NULL));
786}
787
788JEMALLOC_ALWAYS_INLINE void *
789icallocx(size_t size, bool try_tcache, arena_t *arena)
790{
791
792	if (size <= arena_maxclass)
793		return (arena_malloc(arena, size, true, try_tcache));
794	else
795		return (huge_malloc(size, true));
796}
797
798JEMALLOC_ALWAYS_INLINE void *
799icalloc(size_t size)
800{
801
802	return (icallocx(size, true, NULL));
803}
804
805JEMALLOC_ALWAYS_INLINE void *
806ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
807    arena_t *arena)
808{
809	void *ret;
810
811	assert(usize != 0);
812	assert(usize == sa2u(usize, alignment));
813
814	if (usize <= arena_maxclass && alignment <= PAGE)
815		ret = arena_malloc(arena, usize, zero, try_tcache);
816	else {
817		if (usize <= arena_maxclass) {
818			ret = arena_palloc(choose_arena(arena), usize,
819			    alignment, zero);
820		} else if (alignment <= chunksize)
821			ret = huge_malloc(usize, zero);
822		else
823			ret = huge_palloc(usize, alignment, zero);
824	}
825
826	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
827	return (ret);
828}
829
830JEMALLOC_ALWAYS_INLINE void *
831ipalloc(size_t usize, size_t alignment, bool zero)
832{
833
834	return (ipallocx(usize, alignment, zero, true, NULL));
835}
836
837/*
838 * Typical usage:
839 *   void *ptr = [...]
840 *   size_t sz = isalloc(ptr, config_prof);
841 */
842JEMALLOC_ALWAYS_INLINE size_t
843isalloc(const void *ptr, bool demote)
844{
845	size_t ret;
846	arena_chunk_t *chunk;
847
848	assert(ptr != NULL);
849	/* Demotion only makes sense if config_prof is true. */
850	assert(config_prof || demote == false);
851
852	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
853	if (chunk != ptr)
854		ret = arena_salloc(ptr, demote);
855	else
856		ret = huge_salloc(ptr);
857
858	return (ret);
859}
860
861JEMALLOC_ALWAYS_INLINE size_t
862ivsalloc(const void *ptr, bool demote)
863{
864
865	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
866	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
867		return (0);
868
869	return (isalloc(ptr, demote));
870}
871
872JEMALLOC_INLINE size_t
873u2rz(size_t usize)
874{
875	size_t ret;
876
877	if (usize <= SMALL_MAXCLASS) {
878		size_t binind = SMALL_SIZE2BIN(usize);
879		ret = arena_bin_info[binind].redzone_size;
880	} else
881		ret = 0;
882
883	return (ret);
884}
885
886JEMALLOC_INLINE size_t
887p2rz(const void *ptr)
888{
889	size_t usize = isalloc(ptr, false);
890
891	return (u2rz(usize));
892}
893
894JEMALLOC_ALWAYS_INLINE void
895idallocx(void *ptr, bool try_tcache)
896{
897	arena_chunk_t *chunk;
898
899	assert(ptr != NULL);
900
901	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
902	if (chunk != ptr)
903		arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
904	else
905		huge_dalloc(ptr, true);
906}
907
908JEMALLOC_ALWAYS_INLINE void
909idalloc(void *ptr)
910{
911
912	idallocx(ptr, true);
913}
914
915JEMALLOC_ALWAYS_INLINE void
916iqallocx(void *ptr, bool try_tcache)
917{
918
919	if (config_fill && opt_quarantine)
920		quarantine(ptr);
921	else
922		idallocx(ptr, try_tcache);
923}
924
925JEMALLOC_ALWAYS_INLINE void
926iqalloc(void *ptr)
927{
928
929	iqallocx(ptr, true);
930}
931
932JEMALLOC_ALWAYS_INLINE void *
933irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
934    bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
935{
936	void *ret;
937	size_t oldsize;
938
939	assert(ptr != NULL);
940	assert(size != 0);
941
942	oldsize = isalloc(ptr, config_prof);
943
944	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
945	    != 0) {
946		size_t usize, copysize;
947
948		/*
949		 * Existing object alignment is inadequate; allocate new space
950		 * and copy.
951		 */
952		if (no_move)
953			return (NULL);
954		usize = sa2u(size + extra, alignment);
955		if (usize == 0)
956			return (NULL);
957		ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
958		if (ret == NULL) {
959			if (extra == 0)
960				return (NULL);
961			/* Try again, without extra this time. */
962			usize = sa2u(size, alignment);
963			if (usize == 0)
964				return (NULL);
965			ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
966			    arena);
967			if (ret == NULL)
968				return (NULL);
969		}
970		/*
971		 * Copy at most size bytes (not size+extra), since the caller
972		 * has no expectation that the extra bytes will be reliably
973		 * preserved.
974		 */
975		copysize = (size < oldsize) ? size : oldsize;
976		memcpy(ret, ptr, copysize);
977		iqallocx(ptr, try_tcache_dalloc);
978		return (ret);
979	}
980
981	if (no_move) {
982		if (size <= arena_maxclass) {
983			return (arena_ralloc_no_move(ptr, oldsize, size,
984			    extra, zero));
985		} else {
986			return (huge_ralloc_no_move(ptr, oldsize, size,
987			    extra));
988		}
989	} else {
990		if (size + extra <= arena_maxclass) {
991			return (arena_ralloc(arena, ptr, oldsize, size, extra,
992			    alignment, zero, try_tcache_alloc,
993			    try_tcache_dalloc));
994		} else {
995			return (huge_ralloc(ptr, oldsize, size, extra,
996			    alignment, zero, try_tcache_dalloc));
997		}
998	}
999}
1000
1001JEMALLOC_ALWAYS_INLINE void *
1002iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
1003    bool no_move)
1004{
1005
1006	return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
1007	    NULL));
1008}
1009
1010malloc_tsd_externs(thread_allocated, thread_allocated_t)
1011malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
1012    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
1013#endif
1014
1015#include "jemalloc/internal/prof.h"
1016
1017#undef JEMALLOC_H_INLINES
1018/******************************************************************************/
1019#endif /* JEMALLOC_INTERNAL_H */
1020