jemalloc_internal.h.in revision 2e671ffbadc02fc7de8cbafdd1031e3b0ad73c5b
1#ifndef JEMALLOC_INTERNAL_H
2#define JEMALLOC_INTERNAL_H
3#include <math.h>
4#ifdef _WIN32
5#  include <windows.h>
6#  define ENOENT ERROR_PATH_NOT_FOUND
7#  define EINVAL ERROR_BAD_ARGUMENTS
8#  define EAGAIN ERROR_OUTOFMEMORY
9#  define EPERM  ERROR_WRITE_FAULT
10#  define EFAULT ERROR_INVALID_ADDRESS
11#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
12#  undef ERANGE
13#  define ERANGE ERROR_INVALID_DATA
14#else
15#  include <sys/param.h>
16#  include <sys/mman.h>
17#  include <sys/syscall.h>
18#  if !defined(SYS_write) && defined(__NR_write)
19#    define SYS_write __NR_write
20#  endif
21#  include <sys/uio.h>
22#  include <pthread.h>
23#  include <errno.h>
24#endif
25#include <sys/types.h>
26
27#include <limits.h>
28#ifndef SIZE_T_MAX
29#  define SIZE_T_MAX	SIZE_MAX
30#endif
31#include <stdarg.h>
32#include <stdbool.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <stdint.h>
36#include <stddef.h>
37#ifndef offsetof
38#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
39#endif
40#include <inttypes.h>
41#include <string.h>
42#include <strings.h>
43#include <ctype.h>
44#ifdef _MSC_VER
45#  include <io.h>
46typedef intptr_t ssize_t;
47#  define PATH_MAX 1024
48#  define STDERR_FILENO 2
49#  define __func__ __FUNCTION__
50/* Disable warnings about deprecated system functions */
51#  pragma warning(disable: 4996)
52#else
53#  include <unistd.h>
54#endif
55#include <fcntl.h>
56
57#define	JEMALLOC_NO_DEMANGLE
58#include "../jemalloc@install_suffix@.h"
59
60#ifdef JEMALLOC_UTRACE
61#include <sys/ktrace.h>
62#endif
63
64#ifdef JEMALLOC_VALGRIND
65#include <valgrind/valgrind.h>
66#include <valgrind/memcheck.h>
67#endif
68
69#include "jemalloc/internal/private_namespace.h"
70
71#ifdef JEMALLOC_CC_SILENCE
72#define	UNUSED JEMALLOC_ATTR(unused)
73#else
74#define	UNUSED
75#endif
76
77static const bool config_debug =
78#ifdef JEMALLOC_DEBUG
79    true
80#else
81    false
82#endif
83    ;
84static const bool config_dss =
85#ifdef JEMALLOC_DSS
86    true
87#else
88    false
89#endif
90    ;
91static const bool config_fill =
92#ifdef JEMALLOC_FILL
93    true
94#else
95    false
96#endif
97    ;
98static const bool config_lazy_lock =
99#ifdef JEMALLOC_LAZY_LOCK
100    true
101#else
102    false
103#endif
104    ;
105static const bool config_prof =
106#ifdef JEMALLOC_PROF
107    true
108#else
109    false
110#endif
111    ;
112static const bool config_prof_libgcc =
113#ifdef JEMALLOC_PROF_LIBGCC
114    true
115#else
116    false
117#endif
118    ;
119static const bool config_prof_libunwind =
120#ifdef JEMALLOC_PROF_LIBUNWIND
121    true
122#else
123    false
124#endif
125    ;
126static const bool config_mremap =
127#ifdef JEMALLOC_MREMAP
128    true
129#else
130    false
131#endif
132    ;
133static const bool config_munmap =
134#ifdef JEMALLOC_MUNMAP
135    true
136#else
137    false
138#endif
139    ;
140static const bool config_stats =
141#ifdef JEMALLOC_STATS
142    true
143#else
144    false
145#endif
146    ;
147static const bool config_tcache =
148#ifdef JEMALLOC_TCACHE
149    true
150#else
151    false
152#endif
153    ;
154static const bool config_tls =
155#ifdef JEMALLOC_TLS
156    true
157#else
158    false
159#endif
160    ;
161static const bool config_utrace =
162#ifdef JEMALLOC_UTRACE
163    true
164#else
165    false
166#endif
167    ;
168static const bool config_valgrind =
169#ifdef JEMALLOC_VALGRIND
170    true
171#else
172    false
173#endif
174    ;
175static const bool config_xmalloc =
176#ifdef JEMALLOC_XMALLOC
177    true
178#else
179    false
180#endif
181    ;
182static const bool config_ivsalloc =
183#ifdef JEMALLOC_IVSALLOC
184    true
185#else
186    false
187#endif
188    ;
189
190#ifdef JEMALLOC_ATOMIC9
191#include <machine/atomic.h>
192#endif
193
194#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
195#include <libkern/OSAtomic.h>
196#endif
197
198#ifdef JEMALLOC_ZONE
199#include <mach/mach_error.h>
200#include <mach/mach_init.h>
201#include <mach/vm_map.h>
202#include <malloc/malloc.h>
203#endif
204
205#define	RB_COMPACT
206#include "jemalloc/internal/rb.h"
207#include "jemalloc/internal/qr.h"
208#include "jemalloc/internal/ql.h"
209
210/*
211 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
212 * but there are circular dependencies that cannot be broken without
213 * substantial performance degradation.  In order to reduce the effect on
214 * visual code flow, read the header files in multiple passes, with one of the
215 * following cpp variables defined during each pass:
216 *
217 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
218 *                        types.
219 *   JEMALLOC_H_STRUCTS : Data structures.
220 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
221 *   JEMALLOC_H_INLINES : Inline functions.
222 */
223/******************************************************************************/
224#define JEMALLOC_H_TYPES
225
226#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
227
228#define	ZU(z)	((size_t)z)
229
230#ifndef __DECONST
231#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
232#endif
233
234#ifdef JEMALLOC_DEBUG
235   /* Disable inlining to make debugging easier. */
236#  define JEMALLOC_INLINE
237#  define inline
238#else
239#  define JEMALLOC_ENABLE_INLINE
240#  define JEMALLOC_INLINE static inline
241#  ifdef _MSC_VER
242#    define inline _inline
243#  endif
244#endif
245
246/* Smallest size class to support. */
247#define	LG_TINY_MIN		3
248#define	TINY_MIN		(1U << LG_TINY_MIN)
249
250/*
251 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
252 * classes).
253 */
254#ifndef LG_QUANTUM
255#  if (defined(__i386__) || defined(_M_IX86))
256#    define LG_QUANTUM		4
257#  endif
258#  ifdef __ia64__
259#    define LG_QUANTUM		4
260#  endif
261#  ifdef __alpha__
262#    define LG_QUANTUM		4
263#  endif
264#  ifdef __sparc64__
265#    define LG_QUANTUM		4
266#  endif
267#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
268#    define LG_QUANTUM		4
269#  endif
270#  ifdef __arm__
271#    define LG_QUANTUM		3
272#  endif
273#  ifdef __mips__
274#    define LG_QUANTUM		3
275#  endif
276#  ifdef __powerpc__
277#    define LG_QUANTUM		4
278#  endif
279#  ifdef __s390x__
280#    define LG_QUANTUM		4
281#  endif
282#  ifdef __SH4__
283#    define LG_QUANTUM		4
284#  endif
285#  ifdef __tile__
286#    define LG_QUANTUM		4
287#  endif
288#  ifndef LG_QUANTUM
289#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
290#  endif
291#endif
292
293#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
294#define	QUANTUM_MASK		(QUANTUM - 1)
295
296/* Return the smallest quantum multiple that is >= a. */
297#define	QUANTUM_CEILING(a)						\
298	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
299
300#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
301#define	LONG_MASK		(LONG - 1)
302
303/* Return the smallest long multiple that is >= a. */
304#define	LONG_CEILING(a)							\
305	(((a) + LONG_MASK) & ~LONG_MASK)
306
307#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
308#define	PTR_MASK		(SIZEOF_PTR - 1)
309
310/* Return the smallest (void *) multiple that is >= a. */
311#define	PTR_CEILING(a)							\
312	(((a) + PTR_MASK) & ~PTR_MASK)
313
314/*
315 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
316 * In addition, this controls the spacing of cacheline-spaced size classes.
317 *
318 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
319 * only handle raw constants.
320 */
321#define	LG_CACHELINE		6
322#define	CACHELINE		64
323#define	CACHELINE_MASK		(CACHELINE - 1)
324
325/* Return the smallest cacheline multiple that is >= s. */
326#define	CACHELINE_CEILING(s)						\
327	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
328
329/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
330#ifdef PAGE_MASK
331#  undef PAGE_MASK
332#endif
333#define	LG_PAGE		STATIC_PAGE_SHIFT
334#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
335#define	PAGE_MASK	((size_t)(PAGE - 1))
336
337/* Return the smallest pagesize multiple that is >= s. */
338#define	PAGE_CEILING(s)							\
339	(((s) + PAGE_MASK) & ~PAGE_MASK)
340
341/* Return the nearest aligned address at or below a. */
342#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
343	((void *)((uintptr_t)(a) & (-(alignment))))
344
345/* Return the offset between a and the nearest aligned address at or below a. */
346#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
347	((size_t)((uintptr_t)(a) & (alignment - 1)))
348
349/* Return the smallest alignment multiple that is >= s. */
350#define	ALIGNMENT_CEILING(s, alignment)					\
351	(((s) + (alignment - 1)) & (-(alignment)))
352
353/* Declare a variable length array */
354#if __STDC_VERSION__ < 199901L
355#  ifdef _MSC_VER
356#    include <malloc.h>
357#    define alloca _alloca
358#  else
359#    include <alloca.h>
360#  endif
361#  define VARIABLE_ARRAY(type, name, count) \
362	type *name = alloca(sizeof(type) * count)
363#else
364#  define VARIABLE_ARRAY(type, name, count) type name[count]
365#endif
366
367#ifdef JEMALLOC_VALGRIND
368/*
369 * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
370 * so that when Valgrind reports errors, there are no extra stack frames
371 * in the backtraces.
372 *
373 * The size that is reported to valgrind must be consistent through a chain of
374 * malloc..realloc..realloc calls.  Request size isn't recorded anywhere in
375 * jemalloc, so it is critical that all callers of these macros provide usize
376 * rather than request size.  As a result, buffer overflow detection is
377 * technically weakened for the standard API, though it is generally accepted
378 * practice to consider any extra bytes reported by malloc_usable_size() as
379 * usable space.
380 */
381#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {		\
382	if (config_valgrind && opt_valgrind && cond)			\
383		VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero);	\
384} while (0)
385#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
386    old_rzsize, zero)  do {						\
387	if (config_valgrind && opt_valgrind) {				\
388		size_t rzsize = p2rz(ptr);				\
389									\
390		if (ptr == old_ptr) {					\
391			VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize,	\
392			    usize, rzsize);				\
393			if (zero && old_usize < usize) {		\
394				VALGRIND_MAKE_MEM_DEFINED(		\
395				    (void *)((uintptr_t)ptr +		\
396				    old_usize), usize - old_usize);	\
397			}						\
398		} else {						\
399			if (old_ptr != NULL) {				\
400				VALGRIND_FREELIKE_BLOCK(old_ptr,	\
401				    old_rzsize);			\
402			}						\
403			if (ptr != NULL) {				\
404				size_t copy_size = (old_usize < usize)	\
405				    ?  old_usize : usize;		\
406				size_t tail_size = usize - copy_size;	\
407				VALGRIND_MALLOCLIKE_BLOCK(ptr, usize,	\
408				    rzsize, false);			\
409				if (copy_size > 0) {			\
410					VALGRIND_MAKE_MEM_DEFINED(ptr,	\
411					    copy_size);			\
412				}					\
413				if (zero && tail_size > 0) {		\
414					VALGRIND_MAKE_MEM_DEFINED(	\
415					    (void *)((uintptr_t)ptr +	\
416					    copy_size), tail_size);	\
417				}					\
418			}						\
419		}							\
420	}								\
421} while (0)
422#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {			\
423	if (config_valgrind && opt_valgrind)				\
424		VALGRIND_FREELIKE_BLOCK(ptr, rzsize);			\
425} while (0)
426#else
427#define	VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
428#define	VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
429#define	VALGRIND_FREELIKE_BLOCK(addr, rzB)
430#define	VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
431#define	VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
432#define	JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
433#define	JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize,	\
434    old_rzsize, zero)
435#define	JEMALLOC_VALGRIND_FREE(ptr, rzsize)
436#endif
437
438#include "jemalloc/internal/util.h"
439#include "jemalloc/internal/atomic.h"
440#include "jemalloc/internal/prng.h"
441#include "jemalloc/internal/ckh.h"
442#include "jemalloc/internal/size_classes.h"
443#include "jemalloc/internal/stats.h"
444#include "jemalloc/internal/ctl.h"
445#include "jemalloc/internal/mutex.h"
446#include "jemalloc/internal/tsd.h"
447#include "jemalloc/internal/mb.h"
448#include "jemalloc/internal/extent.h"
449#include "jemalloc/internal/arena.h"
450#include "jemalloc/internal/bitmap.h"
451#include "jemalloc/internal/base.h"
452#include "jemalloc/internal/chunk.h"
453#include "jemalloc/internal/huge.h"
454#include "jemalloc/internal/rtree.h"
455#include "jemalloc/internal/tcache.h"
456#include "jemalloc/internal/hash.h"
457#include "jemalloc/internal/quarantine.h"
458#include "jemalloc/internal/prof.h"
459
460#undef JEMALLOC_H_TYPES
461/******************************************************************************/
462#define JEMALLOC_H_STRUCTS
463
464#include "jemalloc/internal/util.h"
465#include "jemalloc/internal/atomic.h"
466#include "jemalloc/internal/prng.h"
467#include "jemalloc/internal/ckh.h"
468#include "jemalloc/internal/size_classes.h"
469#include "jemalloc/internal/stats.h"
470#include "jemalloc/internal/ctl.h"
471#include "jemalloc/internal/mutex.h"
472#include "jemalloc/internal/tsd.h"
473#include "jemalloc/internal/mb.h"
474#include "jemalloc/internal/bitmap.h"
475#include "jemalloc/internal/extent.h"
476#include "jemalloc/internal/arena.h"
477#include "jemalloc/internal/base.h"
478#include "jemalloc/internal/chunk.h"
479#include "jemalloc/internal/huge.h"
480#include "jemalloc/internal/rtree.h"
481#include "jemalloc/internal/tcache.h"
482#include "jemalloc/internal/hash.h"
483#include "jemalloc/internal/quarantine.h"
484#include "jemalloc/internal/prof.h"
485
486typedef struct {
487	uint64_t	allocated;
488	uint64_t	deallocated;
489} thread_allocated_t;
490/*
491 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
492 * argument.
493 */
494#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
495
496#undef JEMALLOC_H_STRUCTS
497/******************************************************************************/
498#define JEMALLOC_H_EXTERNS
499
500extern bool	opt_abort;
501extern bool	opt_junk;
502extern size_t	opt_quarantine;
503extern bool	opt_redzone;
504extern bool	opt_utrace;
505extern bool	opt_valgrind;
506extern bool	opt_xmalloc;
507extern bool	opt_zero;
508extern size_t	opt_narenas;
509
510/* Number of CPUs. */
511extern unsigned		ncpus;
512
513extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
514/*
515 * Arenas that are used to service external requests.  Not all elements of the
516 * arenas array are necessarily used; arenas are created lazily as needed.
517 */
518extern arena_t		**arenas;
519extern unsigned		narenas;
520
521arena_t	*arenas_extend(unsigned ind);
522void	arenas_cleanup(void *arg);
523arena_t	*choose_arena_hard(void);
524void	jemalloc_prefork(void);
525void	jemalloc_postfork_parent(void);
526void	jemalloc_postfork_child(void);
527
528#include "jemalloc/internal/util.h"
529#include "jemalloc/internal/atomic.h"
530#include "jemalloc/internal/prng.h"
531#include "jemalloc/internal/ckh.h"
532#include "jemalloc/internal/size_classes.h"
533#include "jemalloc/internal/stats.h"
534#include "jemalloc/internal/ctl.h"
535#include "jemalloc/internal/mutex.h"
536#include "jemalloc/internal/tsd.h"
537#include "jemalloc/internal/mb.h"
538#include "jemalloc/internal/bitmap.h"
539#include "jemalloc/internal/extent.h"
540#include "jemalloc/internal/arena.h"
541#include "jemalloc/internal/base.h"
542#include "jemalloc/internal/chunk.h"
543#include "jemalloc/internal/huge.h"
544#include "jemalloc/internal/rtree.h"
545#include "jemalloc/internal/tcache.h"
546#include "jemalloc/internal/hash.h"
547#include "jemalloc/internal/quarantine.h"
548#include "jemalloc/internal/prof.h"
549
550#undef JEMALLOC_H_EXTERNS
551/******************************************************************************/
552#define JEMALLOC_H_INLINES
553
554#include "jemalloc/internal/util.h"
555#include "jemalloc/internal/atomic.h"
556#include "jemalloc/internal/prng.h"
557#include "jemalloc/internal/ckh.h"
558#include "jemalloc/internal/size_classes.h"
559#include "jemalloc/internal/stats.h"
560#include "jemalloc/internal/ctl.h"
561#include "jemalloc/internal/mutex.h"
562#include "jemalloc/internal/tsd.h"
563#include "jemalloc/internal/mb.h"
564#include "jemalloc/internal/extent.h"
565#include "jemalloc/internal/base.h"
566#include "jemalloc/internal/chunk.h"
567#include "jemalloc/internal/huge.h"
568
569#ifndef JEMALLOC_ENABLE_INLINE
570malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
571
572size_t	s2u(size_t size);
573size_t	sa2u(size_t size, size_t alignment);
574arena_t	*choose_arena(arena_t *arena);
575#endif
576
577#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
578/*
579 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
580 * for allocations.
581 */
582malloc_tsd_externs(arenas, arena_t *)
583malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
584
585/*
586 * Compute usable size that would result from allocating an object with the
587 * specified size.
588 */
589JEMALLOC_INLINE size_t
590s2u(size_t size)
591{
592
593	if (size <= SMALL_MAXCLASS)
594		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
595	if (size <= arena_maxclass)
596		return (PAGE_CEILING(size));
597	return (CHUNK_CEILING(size));
598}
599
600/*
601 * Compute usable size that would result from allocating an object with the
602 * specified size and alignment.
603 */
604JEMALLOC_INLINE size_t
605sa2u(size_t size, size_t alignment)
606{
607	size_t usize;
608
609	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
610
611	/*
612	 * Round size up to the nearest multiple of alignment.
613	 *
614	 * This done, we can take advantage of the fact that for each small
615	 * size class, every object is aligned at the smallest power of two
616	 * that is non-zero in the base two representation of the size.  For
617	 * example:
618	 *
619	 *   Size |   Base 2 | Minimum alignment
620	 *   -----+----------+------------------
621	 *     96 |  1100000 |  32
622	 *    144 | 10100000 |  32
623	 *    192 | 11000000 |  64
624	 */
625	usize = ALIGNMENT_CEILING(size, alignment);
626	/*
627	 * (usize < size) protects against the combination of maximal
628	 * alignment and size greater than maximal alignment.
629	 */
630	if (usize < size) {
631		/* size_t overflow. */
632		return (0);
633	}
634
635	if (usize <= arena_maxclass && alignment <= PAGE) {
636		if (usize <= SMALL_MAXCLASS)
637			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
638		return (PAGE_CEILING(usize));
639	} else {
640		size_t run_size;
641
642		/*
643		 * We can't achieve subpage alignment, so round up alignment
644		 * permanently; it makes later calculations simpler.
645		 */
646		alignment = PAGE_CEILING(alignment);
647		usize = PAGE_CEILING(size);
648		/*
649		 * (usize < size) protects against very large sizes within
650		 * PAGE of SIZE_T_MAX.
651		 *
652		 * (usize + alignment < usize) protects against the
653		 * combination of maximal alignment and usize large enough
654		 * to cause overflow.  This is similar to the first overflow
655		 * check above, but it needs to be repeated due to the new
656		 * usize value, which may now be *equal* to maximal
657		 * alignment, whereas before we only detected overflow if the
658		 * original size was *greater* than maximal alignment.
659		 */
660		if (usize < size || usize + alignment < usize) {
661			/* size_t overflow. */
662			return (0);
663		}
664
665		/*
666		 * Calculate the size of the over-size run that arena_palloc()
667		 * would need to allocate in order to guarantee the alignment.
668		 * If the run wouldn't fit within a chunk, round up to a huge
669		 * allocation size.
670		 */
671		run_size = usize + alignment - PAGE;
672		if (run_size <= arena_maxclass)
673			return (PAGE_CEILING(usize));
674		return (CHUNK_CEILING(usize));
675	}
676}
677
678/* Choose an arena based on a per-thread value. */
679JEMALLOC_INLINE arena_t *
680choose_arena(arena_t *arena)
681{
682	arena_t *ret;
683
684	if (arena != NULL)
685		return (arena);
686
687	if ((ret = *arenas_tsd_get()) == NULL) {
688		ret = choose_arena_hard();
689		assert(ret != NULL);
690	}
691
692	return (ret);
693}
694#endif
695
696#include "jemalloc/internal/bitmap.h"
697#include "jemalloc/internal/rtree.h"
698/*
699 * Include arena.h twice in order to resolve circular dependencies with
700 * tcache.h.
701 */
702#define	JEMALLOC_ARENA_INLINE_A
703#include "jemalloc/internal/arena.h"
704#undef JEMALLOC_ARENA_INLINE_A
705#include "jemalloc/internal/tcache.h"
706#define	JEMALLOC_ARENA_INLINE_B
707#include "jemalloc/internal/arena.h"
708#undef JEMALLOC_ARENA_INLINE_B
709#include "jemalloc/internal/hash.h"
710#include "jemalloc/internal/quarantine.h"
711
712#ifndef JEMALLOC_ENABLE_INLINE
713void	*imalloc(size_t size);
714void	*icalloc(size_t size);
715void	*ipalloc(size_t usize, size_t alignment, bool zero);
716size_t	isalloc(const void *ptr, bool demote);
717size_t	ivsalloc(const void *ptr, bool demote);
718size_t	u2rz(size_t usize);
719size_t	p2rz(const void *ptr);
720void	idalloc(void *ptr);
721void	iqalloc(void *ptr);
722void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
723    bool zero, bool no_move);
724malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
725#endif
726
727#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
728JEMALLOC_INLINE void *
729imalloc(size_t size)
730{
731
732	assert(size != 0);
733
734	if (size <= arena_maxclass)
735		return (arena_malloc(NULL, size, false, true));
736	else
737		return (huge_malloc(size, false));
738}
739
740JEMALLOC_INLINE void *
741icalloc(size_t size)
742{
743
744	if (size <= arena_maxclass)
745		return (arena_malloc(NULL, size, true, true));
746	else
747		return (huge_malloc(size, true));
748}
749
750JEMALLOC_INLINE void *
751ipalloc(size_t usize, size_t alignment, bool zero)
752{
753	void *ret;
754
755	assert(usize != 0);
756	assert(usize == sa2u(usize, alignment));
757
758	if (usize <= arena_maxclass && alignment <= PAGE)
759		ret = arena_malloc(NULL, usize, zero, true);
760	else {
761		if (usize <= arena_maxclass) {
762			ret = arena_palloc(choose_arena(NULL), usize, alignment,
763			    zero);
764		} else if (alignment <= chunksize)
765			ret = huge_malloc(usize, zero);
766		else
767			ret = huge_palloc(usize, alignment, zero);
768	}
769
770	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
771	return (ret);
772}
773
774/*
775 * Typical usage:
776 *   void *ptr = [...]
777 *   size_t sz = isalloc(ptr, config_prof);
778 */
779JEMALLOC_INLINE size_t
780isalloc(const void *ptr, bool demote)
781{
782	size_t ret;
783	arena_chunk_t *chunk;
784
785	assert(ptr != NULL);
786	/* Demotion only makes sense if config_prof is true. */
787	assert(config_prof || demote == false);
788
789	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
790	if (chunk != ptr)
791		ret = arena_salloc(ptr, demote);
792	else
793		ret = huge_salloc(ptr);
794
795	return (ret);
796}
797
798JEMALLOC_INLINE size_t
799ivsalloc(const void *ptr, bool demote)
800{
801
802	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
803	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
804		return (0);
805
806	return (isalloc(ptr, demote));
807}
808
809JEMALLOC_INLINE size_t
810u2rz(size_t usize)
811{
812	size_t ret;
813
814	if (usize <= SMALL_MAXCLASS) {
815		size_t binind = SMALL_SIZE2BIN(usize);
816		ret = arena_bin_info[binind].redzone_size;
817	} else
818		ret = 0;
819
820	return (ret);
821}
822
823JEMALLOC_INLINE size_t
824p2rz(const void *ptr)
825{
826	size_t usize = isalloc(ptr, false);
827
828	return (u2rz(usize));
829}
830
831JEMALLOC_INLINE void
832idalloc(void *ptr)
833{
834	arena_chunk_t *chunk;
835
836	assert(ptr != NULL);
837
838	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
839	if (chunk != ptr)
840		arena_dalloc(chunk->arena, chunk, ptr, true);
841	else
842		huge_dalloc(ptr, true);
843}
844
845JEMALLOC_INLINE void
846iqalloc(void *ptr)
847{
848
849	if (config_fill && opt_quarantine)
850		quarantine(ptr);
851	else
852		idalloc(ptr);
853}
854
855JEMALLOC_INLINE void *
856iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
857    bool no_move)
858{
859	void *ret;
860	size_t oldsize;
861
862	assert(ptr != NULL);
863	assert(size != 0);
864
865	oldsize = isalloc(ptr, config_prof);
866
867	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
868	    != 0) {
869		size_t usize, copysize;
870
871		/*
872		 * Existing object alignment is inadequate; allocate new space
873		 * and copy.
874		 */
875		if (no_move)
876			return (NULL);
877		usize = sa2u(size + extra, alignment);
878		if (usize == 0)
879			return (NULL);
880		ret = ipalloc(usize, alignment, zero);
881		if (ret == NULL) {
882			if (extra == 0)
883				return (NULL);
884			/* Try again, without extra this time. */
885			usize = sa2u(size, alignment);
886			if (usize == 0)
887				return (NULL);
888			ret = ipalloc(usize, alignment, zero);
889			if (ret == NULL)
890				return (NULL);
891		}
892		/*
893		 * Copy at most size bytes (not size+extra), since the caller
894		 * has no expectation that the extra bytes will be reliably
895		 * preserved.
896		 */
897		copysize = (size < oldsize) ? size : oldsize;
898		memcpy(ret, ptr, copysize);
899		iqalloc(ptr);
900		return (ret);
901	}
902
903	if (no_move) {
904		if (size <= arena_maxclass) {
905			return (arena_ralloc_no_move(ptr, oldsize, size,
906			    extra, zero));
907		} else {
908			return (huge_ralloc_no_move(ptr, oldsize, size,
909			    extra));
910		}
911	} else {
912		if (size + extra <= arena_maxclass) {
913			return (arena_ralloc(ptr, oldsize, size, extra,
914			    alignment, zero, true));
915		} else {
916			return (huge_ralloc(ptr, oldsize, size, extra,
917			    alignment, zero));
918		}
919	}
920}
921
922malloc_tsd_externs(thread_allocated, thread_allocated_t)
923malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
924    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
925#endif
926
927#include "jemalloc/internal/prof.h"
928
929#undef JEMALLOC_H_INLINES
930/******************************************************************************/
931#endif /* JEMALLOC_INTERNAL_H */
932