jemalloc_internal.h.in revision fbb31029a5c2f556f39e04a8781340d4ee4cf16c
1#ifndef JEMALLOC_INTERNAL_H
2#define	JEMALLOC_INTERNAL_H
3#include <math.h>
4#ifdef _WIN32
5#  include <windows.h>
6#  define ENOENT ERROR_PATH_NOT_FOUND
7#  define EINVAL ERROR_BAD_ARGUMENTS
8#  define EAGAIN ERROR_OUTOFMEMORY
9#  define EPERM  ERROR_WRITE_FAULT
10#  define EFAULT ERROR_INVALID_ADDRESS
11#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
12#  undef ERANGE
13#  define ERANGE ERROR_INVALID_DATA
14#else
15#  include <sys/param.h>
16#  include <sys/mman.h>
17#  include <sys/syscall.h>
18#  if !defined(SYS_write) && defined(__NR_write)
19#    define SYS_write __NR_write
20#  endif
21#  include <sys/uio.h>
22#  include <pthread.h>
23#  include <errno.h>
24#endif
25#include <sys/types.h>
26
27#include <limits.h>
28#ifndef SIZE_T_MAX
29#  define SIZE_T_MAX	SIZE_MAX
30#endif
31#include <stdarg.h>
32#include <stdbool.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <stdint.h>
36#include <stddef.h>
37#ifndef offsetof
38#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
39#endif
40#include <inttypes.h>
41#include <string.h>
42#include <strings.h>
43#include <ctype.h>
44#ifdef _MSC_VER
45#  include <io.h>
46typedef intptr_t ssize_t;
47#  define PATH_MAX 1024
48#  define STDERR_FILENO 2
49#  define __func__ __FUNCTION__
50/* Disable warnings about deprecated system functions */
51#  pragma warning(disable: 4996)
52#else
53#  include <unistd.h>
54#endif
55#include <fcntl.h>
56
57#include "jemalloc_internal_defs.h"
58
59#ifdef JEMALLOC_UTRACE
60#include <sys/ktrace.h>
61#endif
62
63#ifdef JEMALLOC_VALGRIND
64#include <valgrind/valgrind.h>
65#include <valgrind/memcheck.h>
66#endif
67
68#define	JEMALLOC_NO_DEMANGLE
69#ifdef JEMALLOC_JET
70#  define JEMALLOC_N(n) jet_##n
71#  include "jemalloc/internal/public_namespace.h"
72#  define JEMALLOC_NO_RENAME
73#  include "../jemalloc@install_suffix@.h"
74#  undef JEMALLOC_NO_RENAME
75#else
76#  define JEMALLOC_N(n) @private_namespace@##n
77#  include "../jemalloc@install_suffix@.h"
78#endif
79#include "jemalloc/internal/private_namespace.h"
80
81static const bool config_debug =
82#ifdef JEMALLOC_DEBUG
83    true
84#else
85    false
86#endif
87    ;
88static const bool config_dss =
89#ifdef JEMALLOC_DSS
90    true
91#else
92    false
93#endif
94    ;
95static const bool config_fill =
96#ifdef JEMALLOC_FILL
97    true
98#else
99    false
100#endif
101    ;
102static const bool config_lazy_lock =
103#ifdef JEMALLOC_LAZY_LOCK
104    true
105#else
106    false
107#endif
108    ;
109static const bool config_prof =
110#ifdef JEMALLOC_PROF
111    true
112#else
113    false
114#endif
115    ;
116static const bool config_prof_libgcc =
117#ifdef JEMALLOC_PROF_LIBGCC
118    true
119#else
120    false
121#endif
122    ;
123static const bool config_prof_libunwind =
124#ifdef JEMALLOC_PROF_LIBUNWIND
125    true
126#else
127    false
128#endif
129    ;
130static const bool config_mremap =
131#ifdef JEMALLOC_MREMAP
132    true
133#else
134    false
135#endif
136    ;
137static const bool config_munmap =
138#ifdef JEMALLOC_MUNMAP
139    true
140#else
141    false
142#endif
143    ;
144static const bool config_stats =
145#ifdef JEMALLOC_STATS
146    true
147#else
148    false
149#endif
150    ;
151static const bool config_tcache =
152#ifdef JEMALLOC_TCACHE
153    true
154#else
155    false
156#endif
157    ;
158static const bool config_tls =
159#ifdef JEMALLOC_TLS
160    true
161#else
162    false
163#endif
164    ;
165static const bool config_utrace =
166#ifdef JEMALLOC_UTRACE
167    true
168#else
169    false
170#endif
171    ;
172static const bool config_valgrind =
173#ifdef JEMALLOC_VALGRIND
174    true
175#else
176    false
177#endif
178    ;
179static const bool config_xmalloc =
180#ifdef JEMALLOC_XMALLOC
181    true
182#else
183    false
184#endif
185    ;
186static const bool config_ivsalloc =
187#ifdef JEMALLOC_IVSALLOC
188    true
189#else
190    false
191#endif
192    ;
193
194#ifdef JEMALLOC_ATOMIC9
195#include <machine/atomic.h>
196#endif
197
198#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
199#include <libkern/OSAtomic.h>
200#endif
201
202#ifdef JEMALLOC_ZONE
203#include <mach/mach_error.h>
204#include <mach/mach_init.h>
205#include <mach/vm_map.h>
206#include <malloc/malloc.h>
207#endif
208
209#define	RB_COMPACT
210#include "jemalloc/internal/rb.h"
211#include "jemalloc/internal/qr.h"
212#include "jemalloc/internal/ql.h"
213
214/*
215 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
216 * but there are circular dependencies that cannot be broken without
217 * substantial performance degradation.  In order to reduce the effect on
218 * visual code flow, read the header files in multiple passes, with one of the
219 * following cpp variables defined during each pass:
220 *
221 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
222 *                        types.
223 *   JEMALLOC_H_STRUCTS : Data structures.
224 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
225 *   JEMALLOC_H_INLINES : Inline functions.
226 */
227/******************************************************************************/
228#define	JEMALLOC_H_TYPES
229
230#include "jemalloc/internal/jemalloc_internal_macros.h"
231
232#define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
233#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
234
235/* Smallest size class to support. */
236#define	LG_TINY_MIN		3
237#define	TINY_MIN		(1U << LG_TINY_MIN)
238
239/*
240 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
241 * classes).
242 */
243#ifndef LG_QUANTUM
244#  if (defined(__i386__) || defined(_M_IX86))
245#    define LG_QUANTUM		4
246#  endif
247#  ifdef __ia64__
248#    define LG_QUANTUM		4
249#  endif
250#  ifdef __alpha__
251#    define LG_QUANTUM		4
252#  endif
253#  ifdef __sparc64__
254#    define LG_QUANTUM		4
255#  endif
256#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
257#    define LG_QUANTUM		4
258#  endif
259#  ifdef __arm__
260#    define LG_QUANTUM		3
261#  endif
262#  ifdef __aarch64__
263#    define LG_QUANTUM		4
264#  endif
265#  ifdef __hppa__
266#    define LG_QUANTUM		4
267#  endif
268#  ifdef __mips__
269#    define LG_QUANTUM		3
270#  endif
271#  ifdef __powerpc__
272#    define LG_QUANTUM		4
273#  endif
274#  ifdef __s390__
275#    define LG_QUANTUM		4
276#  endif
277#  ifdef __SH4__
278#    define LG_QUANTUM		4
279#  endif
280#  ifdef __tile__
281#    define LG_QUANTUM		4
282#  endif
283#  ifndef LG_QUANTUM
284#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
285#  endif
286#endif
287
288#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
289#define	QUANTUM_MASK		(QUANTUM - 1)
290
291/* Return the smallest quantum multiple that is >= a. */
292#define	QUANTUM_CEILING(a)						\
293	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
294
295#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
296#define	LONG_MASK		(LONG - 1)
297
298/* Return the smallest long multiple that is >= a. */
299#define	LONG_CEILING(a)							\
300	(((a) + LONG_MASK) & ~LONG_MASK)
301
302#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
303#define	PTR_MASK		(SIZEOF_PTR - 1)
304
305/* Return the smallest (void *) multiple that is >= a. */
306#define	PTR_CEILING(a)							\
307	(((a) + PTR_MASK) & ~PTR_MASK)
308
309/*
310 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
311 * In addition, this controls the spacing of cacheline-spaced size classes.
312 *
313 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
314 * only handle raw constants.
315 */
316#define	LG_CACHELINE		6
317#define	CACHELINE		64
318#define	CACHELINE_MASK		(CACHELINE - 1)
319
320/* Return the smallest cacheline multiple that is >= s. */
321#define	CACHELINE_CEILING(s)						\
322	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
323
324/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
325#ifdef PAGE_MASK
326#  undef PAGE_MASK
327#endif
328#define	LG_PAGE		STATIC_PAGE_SHIFT
329#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
330#define	PAGE_MASK	((size_t)(PAGE - 1))
331
332/* Return the smallest pagesize multiple that is >= s. */
333#define	PAGE_CEILING(s)							\
334	(((s) + PAGE_MASK) & ~PAGE_MASK)
335
336/* Return the nearest aligned address at or below a. */
337#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
338	((void *)((uintptr_t)(a) & (-(alignment))))
339
340/* Return the offset between a and the nearest aligned address at or below a. */
341#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
342	((size_t)((uintptr_t)(a) & (alignment - 1)))
343
344/* Return the smallest alignment multiple that is >= s. */
345#define	ALIGNMENT_CEILING(s, alignment)					\
346	(((s) + (alignment - 1)) & (-(alignment)))
347
348/* Declare a variable length array */
349#if __STDC_VERSION__ < 199901L
350#  ifdef _MSC_VER
351#    include <malloc.h>
352#    define alloca _alloca
353#  else
354#    ifdef JEMALLOC_HAS_ALLOCA_H
355#      include <alloca.h>
356#    else
357#      include <stdlib.h>
358#    endif
359#  endif
360#  define VARIABLE_ARRAY(type, name, count) \
361	type *name = alloca(sizeof(type) * count)
362#else
363#  define VARIABLE_ARRAY(type, name, count) type name[count]
364#endif
365
366#ifdef JEMALLOC_VALGRIND
367/*
368 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
369 * so that when Valgrind reports errors, there are no extra stack frames
370 * in the backtraces.
371 *
372 * The size that is reported to valgrind must be consistent through a chain of
373 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
374 * jemalloc, so it is critical that all callers of these macros provide usize
375 * rather than request size.  As a result, buffer overflow detection is
376 * technically weakened for the standard API, though it is generally accepted
377 * practice to consider any extra bytes reported by malloc_usable_size() as
378 * usable space.
379 */
380#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
381	if (config_valgrind && opt_valgrind && cond)			\
382		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
383} while (0)
384#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
385    old_rzsize, zero)  do {						\
386	if (config_valgrind && opt_valgrind) {				\
387		size_t rzsize = p2rz(ptr);				\
388									\
389		if (ptr == old_ptr) {					\
390			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
391			    usize, rzsize);				\
392			if (zero && old_usize < usize) {		\
393				VALGRIND_MAKE_MEM_DEFINED(		\
394				    (void *)((uintptr_t)ptr +		\
395				    old_usize), usize - old_usize);	\
396			}						\
397		} else {						\
398			if (old_ptr != NULL) {				\
399				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
400				    old_rzsize);			\
401			}						\
402			if (ptr != NULL) {				\
403				size_t copy_size = (old_usize < usize)	\
404				    ?  old_usize : usize;		\
405				size_t tail_size = usize - copy_size;	\
406				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
407				    rzsize, false);			\
408				if (copy_size > 0) {			\
409					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
410					    copy_size);			\
411				}					\
412				if (zero && tail_size > 0) {		\
413					VALGRIND_MAKE_MEM_DEFINED(	\
414					    (void *)((uintptr_t)ptr +	\
415					    copy_size), tail_size);	\
416				}					\
417			}						\
418		}							\
419	}								\
420} while (0)
421#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
422	if (config_valgrind && opt_valgrind)				\
423		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
424} while (0)
425#else
426#define	RUNNING_ON_VALGRIND	((unsigned)0)
427#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
428    do {} while (0)
429#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
430    do {} while (0)
431#define	VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
432#define	VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
433#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
434#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
435#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
436#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
437    old_rzsize, zero) do {} while (0)
438#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
439#endif
440
441#include "jemalloc/internal/util.h"
442#include "jemalloc/internal/atomic.h"
443#include "jemalloc/internal/prng.h"
444#include "jemalloc/internal/ckh.h"
445#include "jemalloc/internal/size_classes.h"
446#include "jemalloc/internal/stats.h"
447#include "jemalloc/internal/ctl.h"
448#include "jemalloc/internal/mutex.h"
449#include "jemalloc/internal/tsd.h"
450#include "jemalloc/internal/mb.h"
451#include "jemalloc/internal/extent.h"
452#include "jemalloc/internal/arena.h"
453#include "jemalloc/internal/bitmap.h"
454#include "jemalloc/internal/base.h"
455#include "jemalloc/internal/chunk.h"
456#include "jemalloc/internal/huge.h"
457#include "jemalloc/internal/rtree.h"
458#include "jemalloc/internal/tcache.h"
459#include "jemalloc/internal/hash.h"
460#include "jemalloc/internal/quarantine.h"
461#include "jemalloc/internal/prof.h"
462
463#undef JEMALLOC_H_TYPES
464/******************************************************************************/
465#define	JEMALLOC_H_STRUCTS
466
467#include "jemalloc/internal/util.h"
468#include "jemalloc/internal/atomic.h"
469#include "jemalloc/internal/prng.h"
470#include "jemalloc/internal/ckh.h"
471#include "jemalloc/internal/size_classes.h"
472#include "jemalloc/internal/stats.h"
473#include "jemalloc/internal/ctl.h"
474#include "jemalloc/internal/mutex.h"
475#include "jemalloc/internal/tsd.h"
476#include "jemalloc/internal/mb.h"
477#include "jemalloc/internal/bitmap.h"
478#include "jemalloc/internal/extent.h"
479#include "jemalloc/internal/arena.h"
480#include "jemalloc/internal/base.h"
481#include "jemalloc/internal/chunk.h"
482#include "jemalloc/internal/huge.h"
483#include "jemalloc/internal/rtree.h"
484#include "jemalloc/internal/tcache.h"
485#include "jemalloc/internal/hash.h"
486#include "jemalloc/internal/quarantine.h"
487#include "jemalloc/internal/prof.h"
488
489typedef struct {
490	uint64_t	allocated;
491	uint64_t	deallocated;
492} thread_allocated_t;
493/*
494 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
495 * argument.
496 */
497#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_ARG_CONCAT({0, 0})
498
499#undef JEMALLOC_H_STRUCTS
500/******************************************************************************/
501#define	JEMALLOC_H_EXTERNS
502
503extern bool	opt_abort;
504extern bool	opt_junk;
505extern size_t	opt_quarantine;
506extern bool	opt_redzone;
507extern bool	opt_utrace;
508extern bool	opt_valgrind;
509extern bool	opt_xmalloc;
510extern bool	opt_zero;
511extern size_t	opt_narenas;
512
513/* Number of CPUs. */
514extern unsigned		ncpus;
515
516/* Protects arenas initialization (arenas, arenas_total). */
517extern malloc_mutex_t	arenas_lock;
518/*
519 * Arenas that are used to service external requests.  Not all elements of the
520 * arenas array are necessarily used; arenas are created lazily as needed.
521 *
522 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
523 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
524 * takes some action to create them and allocate from them.
525 */
526extern arena_t		**arenas;
527extern unsigned		narenas_total;
528extern unsigned		narenas_auto; /* Read-only after initialization. */
529
530arena_t	*arenas_extend(unsigned ind);
531void	arenas_cleanup(void *arg);
532arena_t	*choose_arena_hard(void);
533void	jemalloc_prefork(void);
534void	jemalloc_postfork_parent(void);
535void	jemalloc_postfork_child(void);
536
537#include "jemalloc/internal/util.h"
538#include "jemalloc/internal/atomic.h"
539#include "jemalloc/internal/prng.h"
540#include "jemalloc/internal/ckh.h"
541#include "jemalloc/internal/size_classes.h"
542#include "jemalloc/internal/stats.h"
543#include "jemalloc/internal/ctl.h"
544#include "jemalloc/internal/mutex.h"
545#include "jemalloc/internal/tsd.h"
546#include "jemalloc/internal/mb.h"
547#include "jemalloc/internal/bitmap.h"
548#include "jemalloc/internal/extent.h"
549#include "jemalloc/internal/arena.h"
550#include "jemalloc/internal/base.h"
551#include "jemalloc/internal/chunk.h"
552#include "jemalloc/internal/huge.h"
553#include "jemalloc/internal/rtree.h"
554#include "jemalloc/internal/tcache.h"
555#include "jemalloc/internal/hash.h"
556#include "jemalloc/internal/quarantine.h"
557#include "jemalloc/internal/prof.h"
558
559#undef JEMALLOC_H_EXTERNS
560/******************************************************************************/
561#define	JEMALLOC_H_INLINES
562
563#include "jemalloc/internal/util.h"
564#include "jemalloc/internal/atomic.h"
565#include "jemalloc/internal/prng.h"
566#include "jemalloc/internal/ckh.h"
567#include "jemalloc/internal/size_classes.h"
568#include "jemalloc/internal/stats.h"
569#include "jemalloc/internal/ctl.h"
570#include "jemalloc/internal/mutex.h"
571#include "jemalloc/internal/tsd.h"
572#include "jemalloc/internal/mb.h"
573#include "jemalloc/internal/extent.h"
574#include "jemalloc/internal/base.h"
575#include "jemalloc/internal/chunk.h"
576#include "jemalloc/internal/huge.h"
577
578#ifndef JEMALLOC_ENABLE_INLINE
579malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
580
581size_t	s2u(size_t size);
582size_t	sa2u(size_t size, size_t alignment);
583unsigned	narenas_total_get(void);
584arena_t	*choose_arena(arena_t *arena);
585#endif
586
587#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
588/*
589 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
590 * for allocations.
591 */
592malloc_tsd_externs(arenas, arena_t *)
593malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
594    arenas_cleanup)
595
596/*
597 * Compute usable size that would result from allocating an object with the
598 * specified size.
599 */
600JEMALLOC_ALWAYS_INLINE size_t
601s2u(size_t size)
602{
603
604	if (size <= SMALL_MAXCLASS)
605		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
606	if (size <= arena_maxclass)
607		return (PAGE_CEILING(size));
608	return (CHUNK_CEILING(size));
609}
610
611/*
612 * Compute usable size that would result from allocating an object with the
613 * specified size and alignment.
614 */
615JEMALLOC_ALWAYS_INLINE size_t
616sa2u(size_t size, size_t alignment)
617{
618	size_t usize;
619
620	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
621
622	/*
623	 * Round size up to the nearest multiple of alignment.
624	 *
625	 * This done, we can take advantage of the fact that for each small
626	 * size class, every object is aligned at the smallest power of two
627	 * that is non-zero in the base two representation of the size.  For
628	 * example:
629	 *
630	 *   Size |   Base 2 | Minimum alignment
631	 *   -----+----------+------------------
632	 *     96 |  1100000 |  32
633	 *    144 | 10100000 |  32
634	 *    192 | 11000000 |  64
635	 */
636	usize = ALIGNMENT_CEILING(size, alignment);
637	/*
638	 * (usize < size) protects against the combination of maximal
639	 * alignment and size greater than maximal alignment.
640	 */
641	if (usize < size) {
642		/* size_t overflow. */
643		return (0);
644	}
645
646	if (usize <= arena_maxclass && alignment <= PAGE) {
647		if (usize <= SMALL_MAXCLASS)
648			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
649		return (PAGE_CEILING(usize));
650	} else {
651		size_t run_size;
652
653		/*
654		 * We can't achieve subpage alignment, so round up alignment
655		 * permanently; it makes later calculations simpler.
656		 */
657		alignment = PAGE_CEILING(alignment);
658		usize = PAGE_CEILING(size);
659		/*
660		 * (usize < size) protects against very large sizes within
661		 * PAGE of SIZE_T_MAX.
662		 *
663		 * (usize + alignment < usize) protects against the
664		 * combination of maximal alignment and usize large enough
665		 * to cause overflow.  This is similar to the first overflow
666		 * check above, but it needs to be repeated due to the new
667		 * usize value, which may now be *equal* to maximal
668		 * alignment, whereas before we only detected overflow if the
669		 * original size was *greater* than maximal alignment.
670		 */
671		if (usize < size || usize + alignment < usize) {
672			/* size_t overflow. */
673			return (0);
674		}
675
676		/*
677		 * Calculate the size of the over-size run that arena_palloc()
678		 * would need to allocate in order to guarantee the alignment.
679		 * If the run wouldn't fit within a chunk, round up to a huge
680		 * allocation size.
681		 */
682		run_size = usize + alignment - PAGE;
683		if (run_size <= arena_maxclass)
684			return (PAGE_CEILING(usize));
685		return (CHUNK_CEILING(usize));
686	}
687}
688
689JEMALLOC_INLINE unsigned
690narenas_total_get(void)
691{
692	unsigned narenas;
693
694	malloc_mutex_lock(&arenas_lock);
695	narenas = narenas_total;
696	malloc_mutex_unlock(&arenas_lock);
697
698	return (narenas);
699}
700
701/* Choose an arena based on a per-thread value. */
702JEMALLOC_INLINE arena_t *
703choose_arena(arena_t *arena)
704{
705	arena_t *ret;
706
707	if (arena != NULL)
708		return (arena);
709
710	if ((ret = *arenas_tsd_get()) == NULL) {
711		ret = choose_arena_hard();
712		assert(ret != NULL);
713	}
714
715	return (ret);
716}
717#endif
718
719#include "jemalloc/internal/bitmap.h"
720#include "jemalloc/internal/rtree.h"
721/*
722 * Include arena.h twice in order to resolve circular dependencies with
723 * tcache.h.
724 */
725#define	JEMALLOC_ARENA_INLINE_A
726#include "jemalloc/internal/arena.h"
727#undef JEMALLOC_ARENA_INLINE_A
728#include "jemalloc/internal/tcache.h"
729#define	JEMALLOC_ARENA_INLINE_B
730#include "jemalloc/internal/arena.h"
731#undef JEMALLOC_ARENA_INLINE_B
732#include "jemalloc/internal/hash.h"
733#include "jemalloc/internal/quarantine.h"
734
735#ifndef JEMALLOC_ENABLE_INLINE
736void	*imalloct(size_t size, bool try_tcache, arena_t *arena);
737void	*imalloc(size_t size);
738void	*icalloct(size_t size, bool try_tcache, arena_t *arena);
739void	*icalloc(size_t size);
740void	*ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
741    arena_t *arena);
742void	*ipalloc(size_t usize, size_t alignment, bool zero);
743size_t	isalloc(const void *ptr, bool demote);
744size_t	ivsalloc(const void *ptr, bool demote);
745size_t	u2rz(size_t usize);
746size_t	p2rz(const void *ptr);
747void	idalloct(void *ptr, bool try_tcache);
748void	idalloc(void *ptr);
749void	iqalloct(void *ptr, bool try_tcache);
750void	iqalloc(void *ptr);
751void	*iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
752    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
753    arena_t *arena);
754void	*iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
755    bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
756void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
757    bool zero);
758bool	ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
759    bool zero);
760malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
761#endif
762
763#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
764JEMALLOC_ALWAYS_INLINE void *
765imalloct(size_t size, bool try_tcache, arena_t *arena)
766{
767
768	assert(size != 0);
769
770	if (size <= arena_maxclass)
771		return (arena_malloc(arena, size, false, try_tcache));
772	else
773		return (huge_malloc(size, false, huge_dss_prec_get(arena)));
774}
775
776JEMALLOC_ALWAYS_INLINE void *
777imalloc(size_t size)
778{
779
780	return (imalloct(size, true, NULL));
781}
782
783JEMALLOC_ALWAYS_INLINE void *
784icalloct(size_t size, bool try_tcache, arena_t *arena)
785{
786
787	if (size <= arena_maxclass)
788		return (arena_malloc(arena, size, true, try_tcache));
789	else
790		return (huge_malloc(size, true, huge_dss_prec_get(arena)));
791}
792
793JEMALLOC_ALWAYS_INLINE void *
794icalloc(size_t size)
795{
796
797	return (icalloct(size, true, NULL));
798}
799
800JEMALLOC_ALWAYS_INLINE void *
801ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
802    arena_t *arena)
803{
804	void *ret;
805
806	assert(usize != 0);
807	assert(usize == sa2u(usize, alignment));
808
809	if (usize <= arena_maxclass && alignment <= PAGE)
810		ret = arena_malloc(arena, usize, zero, try_tcache);
811	else {
812		if (usize <= arena_maxclass) {
813			ret = arena_palloc(choose_arena(arena), usize,
814			    alignment, zero);
815		} else if (alignment <= chunksize)
816			ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
817		else
818			ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
819	}
820
821	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
822	return (ret);
823}
824
825JEMALLOC_ALWAYS_INLINE void *
826ipalloc(size_t usize, size_t alignment, bool zero)
827{
828
829	return (ipalloct(usize, alignment, zero, true, NULL));
830}
831
832/*
833 * Typical usage:
834 *   void *ptr = [...]
835 *   size_t sz = isalloc(ptr, config_prof);
836 */
837JEMALLOC_ALWAYS_INLINE size_t
838isalloc(const void *ptr, bool demote)
839{
840	size_t ret;
841	arena_chunk_t *chunk;
842
843	assert(ptr != NULL);
844	/* Demotion only makes sense if config_prof is true. */
845	assert(config_prof || demote == false);
846
847	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
848	if (chunk != ptr)
849		ret = arena_salloc(ptr, demote);
850	else
851		ret = huge_salloc(ptr);
852
853	return (ret);
854}
855
856JEMALLOC_ALWAYS_INLINE size_t
857ivsalloc(const void *ptr, bool demote)
858{
859
860	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
861	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
862		return (0);
863
864	return (isalloc(ptr, demote));
865}
866
867JEMALLOC_INLINE size_t
868u2rz(size_t usize)
869{
870	size_t ret;
871
872	if (usize <= SMALL_MAXCLASS) {
873		size_t binind = SMALL_SIZE2BIN(usize);
874		ret = arena_bin_info[binind].redzone_size;
875	} else
876		ret = 0;
877
878	return (ret);
879}
880
881JEMALLOC_INLINE size_t
882p2rz(const void *ptr)
883{
884	size_t usize = isalloc(ptr, false);
885
886	return (u2rz(usize));
887}
888
889JEMALLOC_ALWAYS_INLINE void
890idalloct(void *ptr, bool try_tcache)
891{
892	arena_chunk_t *chunk;
893
894	assert(ptr != NULL);
895
896	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
897	if (chunk != ptr)
898		arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
899	else
900		huge_dalloc(ptr, true);
901}
902
903JEMALLOC_ALWAYS_INLINE void
904idalloc(void *ptr)
905{
906
907	idalloct(ptr, true);
908}
909
910JEMALLOC_ALWAYS_INLINE void
911iqalloct(void *ptr, bool try_tcache)
912{
913
914	if (config_fill && opt_quarantine)
915		quarantine(ptr);
916	else
917		idalloct(ptr, try_tcache);
918}
919
920JEMALLOC_ALWAYS_INLINE void
921iqalloc(void *ptr)
922{
923
924	iqalloct(ptr, true);
925}
926
927JEMALLOC_ALWAYS_INLINE void *
928iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
929    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
930    arena_t *arena)
931{
932	void *p;
933	size_t usize, copysize;
934
935	usize = sa2u(size + extra, alignment);
936	if (usize == 0)
937		return (NULL);
938	p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
939	if (p == NULL) {
940		if (extra == 0)
941			return (NULL);
942		/* Try again, without extra this time. */
943		usize = sa2u(size, alignment);
944		if (usize == 0)
945			return (NULL);
946		p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
947		if (p == NULL)
948			return (NULL);
949	}
950	/*
951	 * Copy at most size bytes (not size+extra), since the caller has no
952	 * expectation that the extra bytes will be reliably preserved.
953	 */
954	copysize = (size < oldsize) ? size : oldsize;
955	memcpy(p, ptr, copysize);
956	iqalloct(ptr, try_tcache_dalloc);
957	return (p);
958}
959
960JEMALLOC_ALWAYS_INLINE void *
961iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
962    bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
963{
964	size_t oldsize;
965
966	assert(ptr != NULL);
967	assert(size != 0);
968
969	oldsize = isalloc(ptr, config_prof);
970
971	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
972	    != 0) {
973		/*
974		 * Existing object alignment is inadequate; allocate new space
975		 * and copy.
976		 */
977		return (iralloct_realign(ptr, oldsize, size, extra, alignment,
978		    zero, try_tcache_alloc, try_tcache_dalloc, arena));
979	}
980
981	if (size + extra <= arena_maxclass) {
982		return (arena_ralloc(arena, ptr, oldsize, size, extra,
983		    alignment, zero, try_tcache_alloc,
984		    try_tcache_dalloc));
985	} else {
986		return (huge_ralloc(ptr, oldsize, size, extra,
987		    alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
988	}
989}
990
991JEMALLOC_ALWAYS_INLINE void *
992iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
993{
994
995	return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
996}
997
998JEMALLOC_ALWAYS_INLINE bool
999ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
1000{
1001	size_t oldsize;
1002
1003	assert(ptr != NULL);
1004	assert(size != 0);
1005
1006	oldsize = isalloc(ptr, config_prof);
1007	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
1008	    != 0) {
1009		/* Existing object alignment is inadequate. */
1010		return (true);
1011	}
1012
1013	if (size <= arena_maxclass)
1014		return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
1015	else
1016		return (huge_ralloc_no_move(ptr, oldsize, size, extra));
1017}
1018
1019malloc_tsd_externs(thread_allocated, thread_allocated_t)
1020malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
1021    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
1022#endif
1023
1024#include "jemalloc/internal/prof.h"
1025
1026#undef JEMALLOC_H_INLINES
1027/******************************************************************************/
1028#endif /* JEMALLOC_INTERNAL_H */
1029