jemalloc_internal.h.in revision 9f9897ad4275e540cf1bea5a6de762c809b7695c
1#ifndef JEMALLOC_INTERNAL_H
2#define JEMALLOC_INTERNAL_H
3#include <math.h>
4#ifdef _WIN32
5#  include <windows.h>
6#  define ENOENT ERROR_PATH_NOT_FOUND
7#  define EINVAL ERROR_BAD_ARGUMENTS
8#  define EAGAIN ERROR_OUTOFMEMORY
9#  define EPERM  ERROR_WRITE_FAULT
10#  define EFAULT ERROR_INVALID_ADDRESS
11#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
12#  undef ERANGE
13#  define ERANGE ERROR_INVALID_DATA
14#else
15#  include <sys/param.h>
16#  include <sys/mman.h>
17#  include <sys/syscall.h>
18#  if !defined(SYS_write) && defined(__NR_write)
19#    define SYS_write __NR_write
20#  endif
21#  include <sys/uio.h>
22#  include <pthread.h>
23#  include <errno.h>
24#endif
25#include <sys/types.h>
26
27#include <limits.h>
28#ifndef SIZE_T_MAX
29#  define SIZE_T_MAX	SIZE_MAX
30#endif
31#include <stdarg.h>
32#include <stdbool.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <stdint.h>
36#include <stddef.h>
37#ifndef offsetof
38#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
39#endif
40#include <inttypes.h>
41#include <string.h>
42#include <strings.h>
43#include <ctype.h>
44#ifdef _MSC_VER
45#  include <io.h>
46typedef intptr_t ssize_t;
47#  define PATH_MAX 1024
48#  define STDERR_FILENO 2
49#  define __func__ __FUNCTION__
50/* Disable warnings about deprecated system functions */
51#  pragma warning(disable: 4996)
52#else
53#  include <unistd.h>
54#endif
55#include <fcntl.h>
56
57#define	JEMALLOC_NO_DEMANGLE
58#include "../jemalloc@install_suffix@.h"
59
60#ifdef JEMALLOC_UTRACE
61#include <sys/ktrace.h>
62#endif
63
64#ifdef JEMALLOC_VALGRIND
65#include <valgrind/valgrind.h>
66#include <valgrind/memcheck.h>
67#endif
68
69#include "jemalloc/internal/private_namespace.h"
70
71#ifdef JEMALLOC_CC_SILENCE
72#define	UNUSED JEMALLOC_ATTR(unused)
73#else
74#define	UNUSED
75#endif
76
77static const bool config_debug =
78#ifdef JEMALLOC_DEBUG
79    true
80#else
81    false
82#endif
83    ;
84static const bool config_dss =
85#ifdef JEMALLOC_DSS
86    true
87#else
88    false
89#endif
90    ;
91static const bool config_fill =
92#ifdef JEMALLOC_FILL
93    true
94#else
95    false
96#endif
97    ;
98static const bool config_lazy_lock =
99#ifdef JEMALLOC_LAZY_LOCK
100    true
101#else
102    false
103#endif
104    ;
105static const bool config_prof =
106#ifdef JEMALLOC_PROF
107    true
108#else
109    false
110#endif
111    ;
112static const bool config_prof_libgcc =
113#ifdef JEMALLOC_PROF_LIBGCC
114    true
115#else
116    false
117#endif
118    ;
119static const bool config_prof_libunwind =
120#ifdef JEMALLOC_PROF_LIBUNWIND
121    true
122#else
123    false
124#endif
125    ;
126static const bool config_mremap =
127#ifdef JEMALLOC_MREMAP
128    true
129#else
130    false
131#endif
132    ;
133static const bool config_munmap =
134#ifdef JEMALLOC_MUNMAP
135    true
136#else
137    false
138#endif
139    ;
140static const bool config_stats =
141#ifdef JEMALLOC_STATS
142    true
143#else
144    false
145#endif
146    ;
147static const bool config_tcache =
148#ifdef JEMALLOC_TCACHE
149    true
150#else
151    false
152#endif
153    ;
154static const bool config_tls =
155#ifdef JEMALLOC_TLS
156    true
157#else
158    false
159#endif
160    ;
161static const bool config_utrace =
162#ifdef JEMALLOC_UTRACE
163    true
164#else
165    false
166#endif
167    ;
168static const bool config_valgrind =
169#ifdef JEMALLOC_VALGRIND
170    true
171#else
172    false
173#endif
174    ;
175static const bool config_xmalloc =
176#ifdef JEMALLOC_XMALLOC
177    true
178#else
179    false
180#endif
181    ;
182static const bool config_ivsalloc =
183#ifdef JEMALLOC_IVSALLOC
184    true
185#else
186    false
187#endif
188    ;
189
190#ifdef JEMALLOC_ATOMIC9
191#include <machine/atomic.h>
192#endif
193
194#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
195#include <libkern/OSAtomic.h>
196#endif
197
198#ifdef JEMALLOC_ZONE
199#include <mach/mach_error.h>
200#include <mach/mach_init.h>
201#include <mach/vm_map.h>
202#include <malloc/malloc.h>
203#endif
204
205#define	RB_COMPACT
206#include "jemalloc/internal/rb.h"
207#include "jemalloc/internal/qr.h"
208#include "jemalloc/internal/ql.h"
209
210/*
211 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
212 * but there are circular dependencies that cannot be broken without
213 * substantial performance degradation.  In order to reduce the effect on
214 * visual code flow, read the header files in multiple passes, with one of the
215 * following cpp variables defined during each pass:
216 *
217 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
218 *                        types.
219 *   JEMALLOC_H_STRUCTS : Data structures.
220 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
221 *   JEMALLOC_H_INLINES : Inline functions.
222 */
223/******************************************************************************/
224#define JEMALLOC_H_TYPES
225
226#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
227
228#define	ZU(z)	((size_t)z)
229#define	QU(q)	((uint64_t)q)
230
231#ifndef __DECONST
232#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
233#endif
234
235#ifdef JEMALLOC_DEBUG
236   /* Disable inlining to make debugging easier. */
237#  define JEMALLOC_ALWAYS_INLINE
238#  define JEMALLOC_INLINE
239#  define inline
240#else
241#  define JEMALLOC_ENABLE_INLINE
242#  ifdef JEMALLOC_HAVE_ATTR
243#    define JEMALLOC_ALWAYS_INLINE \
244	 static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
245#  else
246#    define JEMALLOC_ALWAYS_INLINE static inline
247#  endif
248#  define JEMALLOC_INLINE static inline
249#  ifdef _MSC_VER
250#    define inline _inline
251#  endif
252#endif
253
254/* Smallest size class to support. */
255#define	LG_TINY_MIN		3
256#define	TINY_MIN		(1U << LG_TINY_MIN)
257
258/*
259 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
260 * classes).
261 */
262#ifndef LG_QUANTUM
263#  if (defined(__i386__) || defined(_M_IX86))
264#    define LG_QUANTUM		4
265#  endif
266#  ifdef __ia64__
267#    define LG_QUANTUM		4
268#  endif
269#  ifdef __alpha__
270#    define LG_QUANTUM		4
271#  endif
272#  ifdef __sparc64__
273#    define LG_QUANTUM		4
274#  endif
275#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
276#    define LG_QUANTUM		4
277#  endif
278#  ifdef __arm__
279#    define LG_QUANTUM		3
280#  endif
281#  ifdef __hppa__
282#    define LG_QUANTUM		4
283#  endif
284#  ifdef __mips__
285#    define LG_QUANTUM		3
286#  endif
287#  ifdef __powerpc__
288#    define LG_QUANTUM		4
289#  endif
290#  ifdef __s390__
291#    define LG_QUANTUM		4
292#  endif
293#  ifdef __SH4__
294#    define LG_QUANTUM		4
295#  endif
296#  ifdef __tile__
297#    define LG_QUANTUM		4
298#  endif
299#  ifndef LG_QUANTUM
300#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
301#  endif
302#endif
303
304#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
305#define	QUANTUM_MASK		(QUANTUM - 1)
306
307/* Return the smallest quantum multiple that is >= a. */
308#define	QUANTUM_CEILING(a)						\
309	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
310
311#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
312#define	LONG_MASK		(LONG - 1)
313
314/* Return the smallest long multiple that is >= a. */
315#define	LONG_CEILING(a)							\
316	(((a) + LONG_MASK) & ~LONG_MASK)
317
318#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
319#define	PTR_MASK		(SIZEOF_PTR - 1)
320
321/* Return the smallest (void *) multiple that is >= a. */
322#define	PTR_CEILING(a)							\
323	(((a) + PTR_MASK) & ~PTR_MASK)
324
325/*
326 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
327 * In addition, this controls the spacing of cacheline-spaced size classes.
328 *
329 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
330 * only handle raw constants.
331 */
332#define	LG_CACHELINE		6
333#define	CACHELINE		64
334#define	CACHELINE_MASK		(CACHELINE - 1)
335
336/* Return the smallest cacheline multiple that is >= s. */
337#define	CACHELINE_CEILING(s)						\
338	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
339
340/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
341#ifdef PAGE_MASK
342#  undef PAGE_MASK
343#endif
344#define	LG_PAGE		STATIC_PAGE_SHIFT
345#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
346#define	PAGE_MASK	((size_t)(PAGE - 1))
347
348/* Return the smallest pagesize multiple that is >= s. */
349#define	PAGE_CEILING(s)							\
350	(((s) + PAGE_MASK) & ~PAGE_MASK)
351
352/* Return the nearest aligned address at or below a. */
353#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
354	((void *)((uintptr_t)(a) & (-(alignment))))
355
356/* Return the offset between a and the nearest aligned address at or below a. */
357#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
358	((size_t)((uintptr_t)(a) & (alignment - 1)))
359
360/* Return the smallest alignment multiple that is >= s. */
361#define	ALIGNMENT_CEILING(s, alignment)					\
362	(((s) + (alignment - 1)) & (-(alignment)))
363
364/* Declare a variable length array */
365#if __STDC_VERSION__ < 199901L
366#  ifdef _MSC_VER
367#    include <malloc.h>
368#    define alloca _alloca
369#  else
370#    ifdef JEMALLOC_HAS_ALLOCA_H
371#      include <alloca.h>
372#    else
373#      include <stdlib.h>
374#    endif
375#  endif
376#  define VARIABLE_ARRAY(type, name, count) \
377	type *name = alloca(sizeof(type) * count)
378#else
379#  define VARIABLE_ARRAY(type, name, count) type name[count]
380#endif
381
382#ifdef JEMALLOC_VALGRIND
383/*
384 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
385 * so that when Valgrind reports errors, there are no extra stack frames
386 * in the backtraces.
387 *
388 * The size that is reported to valgrind must be consistent through a chain of
389 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
390 * jemalloc, so it is critical that all callers of these macros provide usize
391 * rather than request size.  As a result, buffer overflow detection is
392 * technically weakened for the standard API, though it is generally accepted
393 * practice to consider any extra bytes reported by malloc_usable_size() as
394 * usable space.
395 */
396#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
397	if (config_valgrind && opt_valgrind && cond)			\
398		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
399} while (0)
400#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
401    old_rzsize, zero)  do {						\
402	if (config_valgrind && opt_valgrind) {				\
403		size_t rzsize = p2rz(ptr);				\
404									\
405		if (ptr == old_ptr) {					\
406			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
407			    usize, rzsize);				\
408			if (zero && old_usize < usize) {		\
409				VALGRIND_MAKE_MEM_DEFINED(		\
410				    (void *)((uintptr_t)ptr +		\
411				    old_usize), usize - old_usize);	\
412			}						\
413		} else {						\
414			if (old_ptr != NULL) {				\
415				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
416				    old_rzsize);			\
417			}						\
418			if (ptr != NULL) {				\
419				size_t copy_size = (old_usize < usize)	\
420				    ?  old_usize : usize;		\
421				size_t tail_size = usize - copy_size;	\
422				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
423				    rzsize, false);			\
424				if (copy_size > 0) {			\
425					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
426					    copy_size);			\
427				}					\
428				if (zero && tail_size > 0) {		\
429					VALGRIND_MAKE_MEM_DEFINED(	\
430					    (void *)((uintptr_t)ptr +	\
431					    copy_size), tail_size);	\
432				}					\
433			}						\
434		}							\
435	}								\
436} while (0)
437#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
438	if (config_valgrind && opt_valgrind)				\
439		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
440} while (0)
441#else
442#define	RUNNING_ON_VALGRIND	((unsigned)0)
443#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
444#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
445#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
446#define	VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len)
447#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
448#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
449#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
450#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
451    old_rzsize, zero)
452#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
453#endif
454
455#include "jemalloc/internal/util.h"
456#include "jemalloc/internal/atomic.h"
457#include "jemalloc/internal/prng.h"
458#include "jemalloc/internal/ckh.h"
459#include "jemalloc/internal/size_classes.h"
460#include "jemalloc/internal/stats.h"
461#include "jemalloc/internal/ctl.h"
462#include "jemalloc/internal/mutex.h"
463#include "jemalloc/internal/tsd.h"
464#include "jemalloc/internal/mb.h"
465#include "jemalloc/internal/extent.h"
466#include "jemalloc/internal/arena.h"
467#include "jemalloc/internal/bitmap.h"
468#include "jemalloc/internal/base.h"
469#include "jemalloc/internal/chunk.h"
470#include "jemalloc/internal/huge.h"
471#include "jemalloc/internal/rtree.h"
472#include "jemalloc/internal/tcache.h"
473#include "jemalloc/internal/hash.h"
474#include "jemalloc/internal/quarantine.h"
475#include "jemalloc/internal/prof.h"
476
477#undef JEMALLOC_H_TYPES
478/******************************************************************************/
479#define JEMALLOC_H_STRUCTS
480
481#include "jemalloc/internal/util.h"
482#include "jemalloc/internal/atomic.h"
483#include "jemalloc/internal/prng.h"
484#include "jemalloc/internal/ckh.h"
485#include "jemalloc/internal/size_classes.h"
486#include "jemalloc/internal/stats.h"
487#include "jemalloc/internal/ctl.h"
488#include "jemalloc/internal/mutex.h"
489#include "jemalloc/internal/tsd.h"
490#include "jemalloc/internal/mb.h"
491#include "jemalloc/internal/bitmap.h"
492#include "jemalloc/internal/extent.h"
493#include "jemalloc/internal/arena.h"
494#include "jemalloc/internal/base.h"
495#include "jemalloc/internal/chunk.h"
496#include "jemalloc/internal/huge.h"
497#include "jemalloc/internal/rtree.h"
498#include "jemalloc/internal/tcache.h"
499#include "jemalloc/internal/hash.h"
500#include "jemalloc/internal/quarantine.h"
501#include "jemalloc/internal/prof.h"
502
503typedef struct {
504	uint64_t	allocated;
505	uint64_t	deallocated;
506} thread_allocated_t;
507/*
508 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
509 * argument.
510 */
511#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
512
513#undef JEMALLOC_H_STRUCTS
514/******************************************************************************/
515#define JEMALLOC_H_EXTERNS
516
517extern bool	opt_abort;
518extern bool	opt_junk;
519extern size_t	opt_quarantine;
520extern bool	opt_redzone;
521extern bool	opt_utrace;
522extern bool	opt_valgrind;
523extern bool	opt_xmalloc;
524extern bool	opt_zero;
525extern size_t	opt_narenas;
526
527/* Number of CPUs. */
528extern unsigned		ncpus;
529
530/* Protects arenas initialization (arenas, arenas_total). */
531extern malloc_mutex_t	arenas_lock;
532/*
533 * Arenas that are used to service external requests.  Not all elements of the
534 * arenas array are necessarily used; arenas are created lazily as needed.
535 *
536 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
537 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
538 * takes some action to create them and allocate from them.
539 */
540extern arena_t		**arenas;
541extern unsigned		narenas_total;
542extern unsigned		narenas_auto; /* Read-only after initialization. */
543
544arena_t	*arenas_extend(unsigned ind);
545void	arenas_cleanup(void *arg);
546arena_t	*choose_arena_hard(void);
547void	jemalloc_prefork(void);
548void	jemalloc_postfork_parent(void);
549void	jemalloc_postfork_child(void);
550
551#include "jemalloc/internal/util.h"
552#include "jemalloc/internal/atomic.h"
553#include "jemalloc/internal/prng.h"
554#include "jemalloc/internal/ckh.h"
555#include "jemalloc/internal/size_classes.h"
556#include "jemalloc/internal/stats.h"
557#include "jemalloc/internal/ctl.h"
558#include "jemalloc/internal/mutex.h"
559#include "jemalloc/internal/tsd.h"
560#include "jemalloc/internal/mb.h"
561#include "jemalloc/internal/bitmap.h"
562#include "jemalloc/internal/extent.h"
563#include "jemalloc/internal/arena.h"
564#include "jemalloc/internal/base.h"
565#include "jemalloc/internal/chunk.h"
566#include "jemalloc/internal/huge.h"
567#include "jemalloc/internal/rtree.h"
568#include "jemalloc/internal/tcache.h"
569#include "jemalloc/internal/hash.h"
570#include "jemalloc/internal/quarantine.h"
571#include "jemalloc/internal/prof.h"
572
573#undef JEMALLOC_H_EXTERNS
574/******************************************************************************/
575#define JEMALLOC_H_INLINES
576
577#include "jemalloc/internal/util.h"
578#include "jemalloc/internal/atomic.h"
579#include "jemalloc/internal/prng.h"
580#include "jemalloc/internal/ckh.h"
581#include "jemalloc/internal/size_classes.h"
582#include "jemalloc/internal/stats.h"
583#include "jemalloc/internal/ctl.h"
584#include "jemalloc/internal/mutex.h"
585#include "jemalloc/internal/tsd.h"
586#include "jemalloc/internal/mb.h"
587#include "jemalloc/internal/extent.h"
588#include "jemalloc/internal/base.h"
589#include "jemalloc/internal/chunk.h"
590#include "jemalloc/internal/huge.h"
591
592#ifndef JEMALLOC_ENABLE_INLINE
593malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
594
595size_t	s2u(size_t size);
596size_t	sa2u(size_t size, size_t alignment);
597unsigned	narenas_total_get(void);
598arena_t	*choose_arena(arena_t *arena);
599#endif
600
601#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
602/*
603 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
604 * for allocations.
605 */
606malloc_tsd_externs(arenas, arena_t *)
607malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
608    arenas_cleanup)
609
610/*
611 * Compute usable size that would result from allocating an object with the
612 * specified size.
613 */
614JEMALLOC_ALWAYS_INLINE size_t
615s2u(size_t size)
616{
617
618	if (size <= SMALL_MAXCLASS)
619		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
620	if (size <= arena_maxclass)
621		return (PAGE_CEILING(size));
622	return (CHUNK_CEILING(size));
623}
624
625/*
626 * Compute usable size that would result from allocating an object with the
627 * specified size and alignment.
628 */
629JEMALLOC_ALWAYS_INLINE size_t
630sa2u(size_t size, size_t alignment)
631{
632	size_t usize;
633
634	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
635
636	/*
637	 * Round size up to the nearest multiple of alignment.
638	 *
639	 * This done, we can take advantage of the fact that for each small
640	 * size class, every object is aligned at the smallest power of two
641	 * that is non-zero in the base two representation of the size.  For
642	 * example:
643	 *
644	 *   Size |   Base 2 | Minimum alignment
645	 *   -----+----------+------------------
646	 *     96 |  1100000 |  32
647	 *    144 | 10100000 |  32
648	 *    192 | 11000000 |  64
649	 */
650	usize = ALIGNMENT_CEILING(size, alignment);
651	/*
652	 * (usize < size) protects against the combination of maximal
653	 * alignment and size greater than maximal alignment.
654	 */
655	if (usize < size) {
656		/* size_t overflow. */
657		return (0);
658	}
659
660	if (usize <= arena_maxclass && alignment <= PAGE) {
661		if (usize <= SMALL_MAXCLASS)
662			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
663		return (PAGE_CEILING(usize));
664	} else {
665		size_t run_size;
666
667		/*
668		 * We can't achieve subpage alignment, so round up alignment
669		 * permanently; it makes later calculations simpler.
670		 */
671		alignment = PAGE_CEILING(alignment);
672		usize = PAGE_CEILING(size);
673		/*
674		 * (usize < size) protects against very large sizes within
675		 * PAGE of SIZE_T_MAX.
676		 *
677		 * (usize + alignment < usize) protects against the
678		 * combination of maximal alignment and usize large enough
679		 * to cause overflow.  This is similar to the first overflow
680		 * check above, but it needs to be repeated due to the new
681		 * usize value, which may now be *equal* to maximal
682		 * alignment, whereas before we only detected overflow if the
683		 * original size was *greater* than maximal alignment.
684		 */
685		if (usize < size || usize + alignment < usize) {
686			/* size_t overflow. */
687			return (0);
688		}
689
690		/*
691		 * Calculate the size of the over-size run that arena_palloc()
692		 * would need to allocate in order to guarantee the alignment.
693		 * If the run wouldn't fit within a chunk, round up to a huge
694		 * allocation size.
695		 */
696		run_size = usize + alignment - PAGE;
697		if (run_size <= arena_maxclass)
698			return (PAGE_CEILING(usize));
699		return (CHUNK_CEILING(usize));
700	}
701}
702
703JEMALLOC_INLINE unsigned
704narenas_total_get(void)
705{
706	unsigned narenas;
707
708	malloc_mutex_lock(&arenas_lock);
709	narenas = narenas_total;
710	malloc_mutex_unlock(&arenas_lock);
711
712	return (narenas);
713}
714
715/* Choose an arena based on a per-thread value. */
716JEMALLOC_INLINE arena_t *
717choose_arena(arena_t *arena)
718{
719	arena_t *ret;
720
721	if (arena != NULL)
722		return (arena);
723
724	if ((ret = *arenas_tsd_get()) == NULL) {
725		ret = choose_arena_hard();
726		assert(ret != NULL);
727	}
728
729	return (ret);
730}
731#endif
732
733#include "jemalloc/internal/bitmap.h"
734#include "jemalloc/internal/rtree.h"
735/*
736 * Include arena.h twice in order to resolve circular dependencies with
737 * tcache.h.
738 */
739#define	JEMALLOC_ARENA_INLINE_A
740#include "jemalloc/internal/arena.h"
741#undef JEMALLOC_ARENA_INLINE_A
742#include "jemalloc/internal/tcache.h"
743#define	JEMALLOC_ARENA_INLINE_B
744#include "jemalloc/internal/arena.h"
745#undef JEMALLOC_ARENA_INLINE_B
746#include "jemalloc/internal/hash.h"
747#include "jemalloc/internal/quarantine.h"
748
749#ifndef JEMALLOC_ENABLE_INLINE
750void	*imallocx(size_t size, bool try_tcache, arena_t *arena);
751void	*imalloc(size_t size);
752void	*icallocx(size_t size, bool try_tcache, arena_t *arena);
753void	*icalloc(size_t size);
754void	*ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
755    arena_t *arena);
756void	*ipalloc(size_t usize, size_t alignment, bool zero);
757size_t	isalloc(const void *ptr, bool demote);
758size_t	ivsalloc(const void *ptr, bool demote);
759size_t	u2rz(size_t usize);
760size_t	p2rz(const void *ptr);
761void	idallocx(void *ptr, bool try_tcache);
762void	idalloc(void *ptr);
763void	iqallocx(void *ptr, bool try_tcache);
764void	iqalloc(void *ptr);
765void	*irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
766    bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
767    arena_t *arena);
768void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
769    bool zero, bool no_move);
770malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
771#endif
772
773#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
774JEMALLOC_ALWAYS_INLINE void *
775imallocx(size_t size, bool try_tcache, arena_t *arena)
776{
777
778	assert(size != 0);
779
780	if (size <= arena_maxclass)
781		return (arena_malloc(arena, size, false, try_tcache));
782	else
783		return (huge_malloc(size, false));
784}
785
786JEMALLOC_ALWAYS_INLINE void *
787imalloc(size_t size)
788{
789
790	return (imallocx(size, true, NULL));
791}
792
793JEMALLOC_ALWAYS_INLINE void *
794icallocx(size_t size, bool try_tcache, arena_t *arena)
795{
796
797	if (size <= arena_maxclass)
798		return (arena_malloc(arena, size, true, try_tcache));
799	else
800		return (huge_malloc(size, true));
801}
802
803JEMALLOC_ALWAYS_INLINE void *
804icalloc(size_t size)
805{
806
807	return (icallocx(size, true, NULL));
808}
809
810JEMALLOC_ALWAYS_INLINE void *
811ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
812    arena_t *arena)
813{
814	void *ret;
815
816	assert(usize != 0);
817	assert(usize == sa2u(usize, alignment));
818
819	if (usize <= arena_maxclass && alignment <= PAGE)
820		ret = arena_malloc(arena, usize, zero, try_tcache);
821	else {
822		if (usize <= arena_maxclass) {
823			ret = arena_palloc(choose_arena(arena), usize,
824			    alignment, zero);
825		} else if (alignment <= chunksize)
826			ret = huge_malloc(usize, zero);
827		else
828			ret = huge_palloc(usize, alignment, zero);
829	}
830
831	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
832	return (ret);
833}
834
835JEMALLOC_ALWAYS_INLINE void *
836ipalloc(size_t usize, size_t alignment, bool zero)
837{
838
839	return (ipallocx(usize, alignment, zero, true, NULL));
840}
841
842/*
843 * Typical usage:
844 *   void *ptr = [...]
845 *   size_t sz = isalloc(ptr, config_prof);
846 */
847JEMALLOC_ALWAYS_INLINE size_t
848isalloc(const void *ptr, bool demote)
849{
850	size_t ret;
851	arena_chunk_t *chunk;
852
853	assert(ptr != NULL);
854	/* Demotion only makes sense if config_prof is true. */
855	assert(config_prof || demote == false);
856
857	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
858	if (chunk != ptr)
859		ret = arena_salloc(ptr, demote);
860	else
861		ret = huge_salloc(ptr);
862
863	return (ret);
864}
865
866JEMALLOC_ALWAYS_INLINE size_t
867ivsalloc(const void *ptr, bool demote)
868{
869
870	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
871	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
872		return (0);
873
874	return (isalloc(ptr, demote));
875}
876
877JEMALLOC_INLINE size_t
878u2rz(size_t usize)
879{
880	size_t ret;
881
882	if (usize <= SMALL_MAXCLASS) {
883		size_t binind = SMALL_SIZE2BIN(usize);
884		ret = arena_bin_info[binind].redzone_size;
885	} else
886		ret = 0;
887
888	return (ret);
889}
890
891JEMALLOC_INLINE size_t
892p2rz(const void *ptr)
893{
894	size_t usize = isalloc(ptr, false);
895
896	return (u2rz(usize));
897}
898
899JEMALLOC_ALWAYS_INLINE void
900idallocx(void *ptr, bool try_tcache)
901{
902	arena_chunk_t *chunk;
903
904	assert(ptr != NULL);
905
906	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
907	if (chunk != ptr)
908		arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
909	else
910		huge_dalloc(ptr, true);
911}
912
913JEMALLOC_ALWAYS_INLINE void
914idalloc(void *ptr)
915{
916
917	idallocx(ptr, true);
918}
919
920JEMALLOC_ALWAYS_INLINE void
921iqallocx(void *ptr, bool try_tcache)
922{
923
924	if (config_fill && opt_quarantine)
925		quarantine(ptr);
926	else
927		idallocx(ptr, try_tcache);
928}
929
930JEMALLOC_ALWAYS_INLINE void
931iqalloc(void *ptr)
932{
933
934	iqallocx(ptr, true);
935}
936
937JEMALLOC_ALWAYS_INLINE void *
938irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
939    bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
940{
941	void *ret;
942	size_t oldsize;
943
944	assert(ptr != NULL);
945	assert(size != 0);
946
947	oldsize = isalloc(ptr, config_prof);
948
949	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
950	    != 0) {
951		size_t usize, copysize;
952
953		/*
954		 * Existing object alignment is inadequate; allocate new space
955		 * and copy.
956		 */
957		if (no_move)
958			return (NULL);
959		usize = sa2u(size + extra, alignment);
960		if (usize == 0)
961			return (NULL);
962		ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
963		if (ret == NULL) {
964			if (extra == 0)
965				return (NULL);
966			/* Try again, without extra this time. */
967			usize = sa2u(size, alignment);
968			if (usize == 0)
969				return (NULL);
970			ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
971			    arena);
972			if (ret == NULL)
973				return (NULL);
974		}
975		/*
976		 * Copy at most size bytes (not size+extra), since the caller
977		 * has no expectation that the extra bytes will be reliably
978		 * preserved.
979		 */
980		copysize = (size < oldsize) ? size : oldsize;
981		memcpy(ret, ptr, copysize);
982		iqallocx(ptr, try_tcache_dalloc);
983		return (ret);
984	}
985
986	if (no_move) {
987		if (size <= arena_maxclass) {
988			return (arena_ralloc_no_move(ptr, oldsize, size,
989			    extra, zero));
990		} else {
991			return (huge_ralloc_no_move(ptr, oldsize, size,
992			    extra));
993		}
994	} else {
995		if (size + extra <= arena_maxclass) {
996			return (arena_ralloc(arena, ptr, oldsize, size, extra,
997			    alignment, zero, try_tcache_alloc,
998			    try_tcache_dalloc));
999		} else {
1000			return (huge_ralloc(ptr, oldsize, size, extra,
1001			    alignment, zero, try_tcache_dalloc));
1002		}
1003	}
1004}
1005
1006JEMALLOC_ALWAYS_INLINE void *
1007iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
1008    bool no_move)
1009{
1010
1011	return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
1012	    NULL));
1013}
1014
1015malloc_tsd_externs(thread_allocated, thread_allocated_t)
1016malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
1017    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
1018#endif
1019
1020#include "jemalloc/internal/prof.h"
1021
1022#undef JEMALLOC_H_INLINES
1023/******************************************************************************/
1024#endif /* JEMALLOC_INTERNAL_H */
1025