jemalloc_internal.h revision 5daf4e4a8d52ac2d5b40b0d12ce5721c6b9676e7
15821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifndef JEMALLOC_INTERNAL_H
25821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	JEMALLOC_INTERNAL_H
35821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <math.h>
45821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifdef _WIN32
55821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <windows.h>
65821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define ENOENT ERROR_PATH_NOT_FOUND
75821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define EINVAL ERROR_BAD_ARGUMENTS
85821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define EAGAIN ERROR_OUTOFMEMORY
95821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define EPERM  ERROR_WRITE_FAULT
105821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define EFAULT ERROR_INVALID_ADDRESS
115821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define ENOMEM ERROR_NOT_ENOUGH_MEMORY
125821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  undef ERANGE
135821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define ERANGE ERROR_INVALID_DATA
145821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#else
155821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <sys/param.h>
165821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <sys/mman.h>
175821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <sys/syscall.h>
185821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  if !defined(SYS_write) && defined(__NR_write)
195821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#    define SYS_write __NR_write
205821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  endif
215821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <sys/uio.h>
225821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <pthread.h>
235821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <errno.h>
245821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif
255821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <sys/types.h>
265821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
275821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <limits.h>
285821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifndef SIZE_T_MAX
295821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define SIZE_T_MAX	SIZE_MAX
305821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif
315821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <stdarg.h>
325821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <stdbool.h>
335821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <stdio.h>
345821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <stdlib.h>
355821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <stdint.h>
365821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <stddef.h>
375821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifndef offsetof
385821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
395821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif
405821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <inttypes.h>
415821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <string.h>
425821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <strings.h>
435821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <ctype.h>
445821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifdef _MSC_VER
455821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <io.h>
465821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef intptr_t ssize_t;
475821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define PATH_MAX 1024
485821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define STDERR_FILENO 2
495821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  define __func__ __FUNCTION__
505821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/* Disable warnings about deprecated system functions */
515821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  pragma warning(disable: 4996)
525821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#else
535821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#  include <unistd.h>
545821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif
555821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <fcntl.h>
565821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
575821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include "jemalloc_internal_defs.h"
585821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
595821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifdef JEMALLOC_UTRACE
605821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#include <sys/ktrace.h>
615821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif
625821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
63#define	JEMALLOC_NO_DEMANGLE
64#ifdef JEMALLOC_JET
65#  define JEMALLOC_N(n) jet_##n
66#  include "jemalloc/internal/public_namespace.h"
67#  define JEMALLOC_NO_RENAME
68#  include "../jemalloc.h"
69#  undef JEMALLOC_NO_RENAME
70#else
71#  define JEMALLOC_N(n) je_##n
72#  include "../jemalloc.h"
73#endif
74#include "jemalloc/internal/private_namespace.h"
75
76static const bool config_debug =
77#ifdef JEMALLOC_DEBUG
78    true
79#else
80    false
81#endif
82    ;
83static const bool have_dss =
84#ifdef JEMALLOC_DSS
85    true
86#else
87    false
88#endif
89    ;
90static const bool config_fill =
91#ifdef JEMALLOC_FILL
92    true
93#else
94    false
95#endif
96    ;
97static const bool config_lazy_lock =
98#ifdef JEMALLOC_LAZY_LOCK
99    true
100#else
101    false
102#endif
103    ;
104static const bool config_prof =
105#ifdef JEMALLOC_PROF
106    true
107#else
108    false
109#endif
110    ;
111static const bool config_prof_libgcc =
112#ifdef JEMALLOC_PROF_LIBGCC
113    true
114#else
115    false
116#endif
117    ;
118static const bool config_prof_libunwind =
119#ifdef JEMALLOC_PROF_LIBUNWIND
120    true
121#else
122    false
123#endif
124    ;
125static const bool config_mremap =
126#ifdef JEMALLOC_MREMAP
127    true
128#else
129    false
130#endif
131    ;
132static const bool config_munmap =
133#ifdef JEMALLOC_MUNMAP
134    true
135#else
136    false
137#endif
138    ;
139static const bool config_stats =
140#ifdef JEMALLOC_STATS
141    true
142#else
143    false
144#endif
145    ;
146static const bool config_tcache =
147#ifdef JEMALLOC_TCACHE
148    true
149#else
150    false
151#endif
152    ;
153static const bool config_tls =
154#ifdef JEMALLOC_TLS
155    true
156#else
157    false
158#endif
159    ;
160static const bool config_utrace =
161#ifdef JEMALLOC_UTRACE
162    true
163#else
164    false
165#endif
166    ;
167static const bool config_valgrind =
168#ifdef JEMALLOC_VALGRIND
169    true
170#else
171    false
172#endif
173    ;
174static const bool config_xmalloc =
175#ifdef JEMALLOC_XMALLOC
176    true
177#else
178    false
179#endif
180    ;
181static const bool config_ivsalloc =
182#ifdef JEMALLOC_IVSALLOC
183    true
184#else
185    false
186#endif
187    ;
188
189#ifdef JEMALLOC_ATOMIC9
190#include <machine/atomic.h>
191#endif
192
193#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
194#include <libkern/OSAtomic.h>
195#endif
196
197#ifdef JEMALLOC_ZONE
198#include <mach/mach_error.h>
199#include <mach/mach_init.h>
200#include <mach/vm_map.h>
201#include <malloc/malloc.h>
202#endif
203
204#define	RB_COMPACT
205#include "jemalloc/internal/rb.h"
206#include "jemalloc/internal/qr.h"
207#include "jemalloc/internal/ql.h"
208
209/*
210 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
211 * but there are circular dependencies that cannot be broken without
212 * substantial performance degradation.  In order to reduce the effect on
213 * visual code flow, read the header files in multiple passes, with one of the
214 * following cpp variables defined during each pass:
215 *
216 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
217 *                        types.
218 *   JEMALLOC_H_STRUCTS : Data structures.
219 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
220 *   JEMALLOC_H_INLINES : Inline functions.
221 */
222/******************************************************************************/
223#define	JEMALLOC_H_TYPES
224
225#include "jemalloc/internal/jemalloc_internal_macros.h"
226
227#define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
228
229/* Smallest size class to support. */
230#define	LG_TINY_MIN		3
231#define	TINY_MIN		(1U << LG_TINY_MIN)
232
233/*
234 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
235 * classes).
236 */
237#ifndef LG_QUANTUM
238#  if (defined(__i386__) || defined(_M_IX86))
239#    define LG_QUANTUM		4
240#  endif
241#  ifdef __ia64__
242#    define LG_QUANTUM		4
243#  endif
244#  ifdef __alpha__
245#    define LG_QUANTUM		4
246#  endif
247#  ifdef __sparc64__
248#    define LG_QUANTUM		4
249#  endif
250#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
251#    define LG_QUANTUM		4
252#  endif
253#  ifdef __arm__
254#    define LG_QUANTUM		3
255#  endif
256#  ifdef __aarch64__
257#    define LG_QUANTUM		4
258#  endif
259#  ifdef __hppa__
260#    define LG_QUANTUM		4
261#  endif
262#  ifdef __mips__
263#    define LG_QUANTUM		3
264#  endif
265#  ifdef __powerpc__
266#    define LG_QUANTUM		4
267#  endif
268#  ifdef __s390__
269#    define LG_QUANTUM		4
270#  endif
271#  ifdef __SH4__
272#    define LG_QUANTUM		4
273#  endif
274#  ifdef __tile__
275#    define LG_QUANTUM		4
276#  endif
277#  ifndef LG_QUANTUM
278#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
279#  endif
280#endif
281
282#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
283#define	QUANTUM_MASK		(QUANTUM - 1)
284
285/* Return the smallest quantum multiple that is >= a. */
286#define	QUANTUM_CEILING(a)						\
287	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
288
289#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
290#define	LONG_MASK		(LONG - 1)
291
292/* Return the smallest long multiple that is >= a. */
293#define	LONG_CEILING(a)							\
294	(((a) + LONG_MASK) & ~LONG_MASK)
295
296#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
297#define	PTR_MASK		(SIZEOF_PTR - 1)
298
299/* Return the smallest (void *) multiple that is >= a. */
300#define	PTR_CEILING(a)							\
301	(((a) + PTR_MASK) & ~PTR_MASK)
302
303/*
304 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
305 * In addition, this controls the spacing of cacheline-spaced size classes.
306 *
307 * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
308 * only handle raw constants.
309 */
310#define	LG_CACHELINE		6
311#define	CACHELINE		64
312#define	CACHELINE_MASK		(CACHELINE - 1)
313
314/* Return the smallest cacheline multiple that is >= s. */
315#define	CACHELINE_CEILING(s)						\
316	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
317
318/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
319#ifdef PAGE_MASK
320#  undef PAGE_MASK
321#endif
322#define	LG_PAGE		STATIC_PAGE_SHIFT
323#define	PAGE		((size_t)(1U << STATIC_PAGE_SHIFT))
324#define	PAGE_MASK	((size_t)(PAGE - 1))
325
326/* Return the smallest pagesize multiple that is >= s. */
327#define	PAGE_CEILING(s)							\
328	(((s) + PAGE_MASK) & ~PAGE_MASK)
329
330/* Return the nearest aligned address at or below a. */
331#define	ALIGNMENT_ADDR2BASE(a, alignment)				\
332	((void *)((uintptr_t)(a) & (-(alignment))))
333
334/* Return the offset between a and the nearest aligned address at or below a. */
335#define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
336	((size_t)((uintptr_t)(a) & (alignment - 1)))
337
338/* Return the smallest alignment multiple that is >= s. */
339#define	ALIGNMENT_CEILING(s, alignment)					\
340	(((s) + (alignment - 1)) & (-(alignment)))
341
342/* Declare a variable length array */
343#if __STDC_VERSION__ < 199901L
344#  ifdef _MSC_VER
345#    include <malloc.h>
346#    define alloca _alloca
347#  else
348#    ifdef JEMALLOC_HAS_ALLOCA_H
349#      include <alloca.h>
350#    else
351#      include <stdlib.h>
352#    endif
353#  endif
354#  define VARIABLE_ARRAY(type, name, count) \
355	type *name = alloca(sizeof(type) * count)
356#else
357#  define VARIABLE_ARRAY(type, name, count) type name[count]
358#endif
359
360#include "jemalloc/internal/valgrind.h"
361#include "jemalloc/internal/util.h"
362#include "jemalloc/internal/atomic.h"
363#include "jemalloc/internal/prng.h"
364#include "jemalloc/internal/ckh.h"
365#include "jemalloc/internal/size_classes.h"
366#include "jemalloc/internal/stats.h"
367#include "jemalloc/internal/ctl.h"
368#include "jemalloc/internal/mutex.h"
369#include "jemalloc/internal/tsd.h"
370#include "jemalloc/internal/mb.h"
371#include "jemalloc/internal/extent.h"
372#include "jemalloc/internal/arena.h"
373#include "jemalloc/internal/bitmap.h"
374#include "jemalloc/internal/base.h"
375#include "jemalloc/internal/chunk.h"
376#include "jemalloc/internal/huge.h"
377#include "jemalloc/internal/rtree.h"
378#include "jemalloc/internal/tcache.h"
379#include "jemalloc/internal/hash.h"
380#include "jemalloc/internal/quarantine.h"
381#include "jemalloc/internal/prof.h"
382
383#undef JEMALLOC_H_TYPES
384/******************************************************************************/
385#define	JEMALLOC_H_STRUCTS
386
387#include "jemalloc/internal/valgrind.h"
388#include "jemalloc/internal/util.h"
389#include "jemalloc/internal/atomic.h"
390#include "jemalloc/internal/prng.h"
391#include "jemalloc/internal/ckh.h"
392#include "jemalloc/internal/size_classes.h"
393#include "jemalloc/internal/stats.h"
394#include "jemalloc/internal/ctl.h"
395#include "jemalloc/internal/mutex.h"
396#include "jemalloc/internal/tsd.h"
397#include "jemalloc/internal/mb.h"
398#include "jemalloc/internal/bitmap.h"
399#include "jemalloc/internal/extent.h"
400#include "jemalloc/internal/arena.h"
401#include "jemalloc/internal/base.h"
402#include "jemalloc/internal/chunk.h"
403#include "jemalloc/internal/huge.h"
404#include "jemalloc/internal/rtree.h"
405#include "jemalloc/internal/tcache.h"
406#include "jemalloc/internal/hash.h"
407#include "jemalloc/internal/quarantine.h"
408#include "jemalloc/internal/prof.h"
409
410typedef struct {
411	uint64_t	allocated;
412	uint64_t	deallocated;
413} thread_allocated_t;
414/*
415 * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
416 * argument.
417 */
418#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_ARG_CONCAT({0, 0})
419
420#undef JEMALLOC_H_STRUCTS
421/******************************************************************************/
422#define	JEMALLOC_H_EXTERNS
423
424extern bool	opt_abort;
425extern bool	opt_junk;
426extern size_t	opt_quarantine;
427extern bool	opt_redzone;
428extern bool	opt_utrace;
429extern bool	opt_xmalloc;
430extern bool	opt_zero;
431extern size_t	opt_narenas;
432
433extern bool	in_valgrind;
434
435/* Number of CPUs. */
436extern unsigned		ncpus;
437
438/* Protects arenas initialization (arenas, arenas_total). */
439extern malloc_mutex_t	arenas_lock;
440/*
441 * Arenas that are used to service external requests.  Not all elements of the
442 * arenas array are necessarily used; arenas are created lazily as needed.
443 *
444 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
445 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
446 * takes some action to create them and allocate from them.
447 */
448extern arena_t		**arenas;
449extern unsigned		narenas_total;
450extern unsigned		narenas_auto; /* Read-only after initialization. */
451
452arena_t	*arenas_extend(unsigned ind);
453void	arenas_cleanup(void *arg);
454arena_t	*choose_arena_hard(void);
455void	jemalloc_prefork(void);
456void	jemalloc_postfork_parent(void);
457void	jemalloc_postfork_child(void);
458
459#include "jemalloc/internal/valgrind.h"
460#include "jemalloc/internal/util.h"
461#include "jemalloc/internal/atomic.h"
462#include "jemalloc/internal/prng.h"
463#include "jemalloc/internal/ckh.h"
464#include "jemalloc/internal/size_classes.h"
465#include "jemalloc/internal/stats.h"
466#include "jemalloc/internal/ctl.h"
467#include "jemalloc/internal/mutex.h"
468#include "jemalloc/internal/tsd.h"
469#include "jemalloc/internal/mb.h"
470#include "jemalloc/internal/bitmap.h"
471#include "jemalloc/internal/extent.h"
472#include "jemalloc/internal/arena.h"
473#include "jemalloc/internal/base.h"
474#include "jemalloc/internal/chunk.h"
475#include "jemalloc/internal/huge.h"
476#include "jemalloc/internal/rtree.h"
477#include "jemalloc/internal/tcache.h"
478#include "jemalloc/internal/hash.h"
479#include "jemalloc/internal/quarantine.h"
480#include "jemalloc/internal/prof.h"
481
482#undef JEMALLOC_H_EXTERNS
483/******************************************************************************/
484#define	JEMALLOC_H_INLINES
485
486#include "jemalloc/internal/valgrind.h"
487#include "jemalloc/internal/util.h"
488#include "jemalloc/internal/atomic.h"
489#include "jemalloc/internal/prng.h"
490#include "jemalloc/internal/ckh.h"
491#include "jemalloc/internal/size_classes.h"
492#include "jemalloc/internal/stats.h"
493#include "jemalloc/internal/ctl.h"
494#include "jemalloc/internal/mutex.h"
495#include "jemalloc/internal/tsd.h"
496#include "jemalloc/internal/mb.h"
497#include "jemalloc/internal/extent.h"
498#include "jemalloc/internal/base.h"
499#include "jemalloc/internal/chunk.h"
500#include "jemalloc/internal/huge.h"
501
502/*
503 * Include arena.h the first time in order to provide inline functions for this
504 * header's inlines.
505 */
506#define	JEMALLOC_ARENA_INLINE_A
507#include "jemalloc/internal/arena.h"
508#undef JEMALLOC_ARENA_INLINE_A
509
510#ifndef JEMALLOC_ENABLE_INLINE
511malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
512
513size_t	s2u(size_t size);
514size_t	sa2u(size_t size, size_t alignment);
515unsigned	narenas_total_get(void);
516arena_t	*choose_arena(arena_t *arena);
517#endif
518
519#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
520/*
521 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
522 * for allocations.
523 */
524malloc_tsd_externs(arenas, arena_t *)
525malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
526    arenas_cleanup)
527
528/*
529 * Compute usable size that would result from allocating an object with the
530 * specified size.
531 */
532JEMALLOC_ALWAYS_INLINE size_t
533s2u(size_t size)
534{
535
536	if (size <= SMALL_MAXCLASS)
537		return (small_bin2size(small_size2bin(size)));
538	if (size <= arena_maxclass)
539		return (PAGE_CEILING(size));
540	return (CHUNK_CEILING(size));
541}
542
543/*
544 * Compute usable size that would result from allocating an object with the
545 * specified size and alignment.
546 */
547JEMALLOC_ALWAYS_INLINE size_t
548sa2u(size_t size, size_t alignment)
549{
550	size_t usize;
551
552	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
553
554	/*
555	 * Round size up to the nearest multiple of alignment.
556	 *
557	 * This done, we can take advantage of the fact that for each small
558	 * size class, every object is aligned at the smallest power of two
559	 * that is non-zero in the base two representation of the size.  For
560	 * example:
561	 *
562	 *   Size |   Base 2 | Minimum alignment
563	 *   -----+----------+------------------
564	 *     96 |  1100000 |  32
565	 *    144 | 10100000 |  32
566	 *    192 | 11000000 |  64
567	 */
568	usize = ALIGNMENT_CEILING(size, alignment);
569	/*
570	 * (usize < size) protects against the combination of maximal
571	 * alignment and size greater than maximal alignment.
572	 */
573	if (usize < size) {
574		/* size_t overflow. */
575		return (0);
576	}
577
578	if (usize <= arena_maxclass && alignment <= PAGE) {
579		if (usize <= SMALL_MAXCLASS)
580			return (small_bin2size(small_size2bin(usize)));
581		return (PAGE_CEILING(usize));
582	} else {
583		size_t run_size;
584
585		/*
586		 * We can't achieve subpage alignment, so round up alignment
587		 * permanently; it makes later calculations simpler.
588		 */
589		alignment = PAGE_CEILING(alignment);
590		usize = PAGE_CEILING(size);
591		/*
592		 * (usize < size) protects against very large sizes within
593		 * PAGE of SIZE_T_MAX.
594		 *
595		 * (usize + alignment < usize) protects against the
596		 * combination of maximal alignment and usize large enough
597		 * to cause overflow.  This is similar to the first overflow
598		 * check above, but it needs to be repeated due to the new
599		 * usize value, which may now be *equal* to maximal
600		 * alignment, whereas before we only detected overflow if the
601		 * original size was *greater* than maximal alignment.
602		 */
603		if (usize < size || usize + alignment < usize) {
604			/* size_t overflow. */
605			return (0);
606		}
607
608		/*
609		 * Calculate the size of the over-size run that arena_palloc()
610		 * would need to allocate in order to guarantee the alignment.
611		 * If the run wouldn't fit within a chunk, round up to a huge
612		 * allocation size.
613		 */
614		run_size = usize + alignment - PAGE;
615		if (run_size <= arena_maxclass)
616			return (PAGE_CEILING(usize));
617		return (CHUNK_CEILING(usize));
618	}
619}
620
621JEMALLOC_INLINE unsigned
622narenas_total_get(void)
623{
624	unsigned narenas;
625
626	malloc_mutex_lock(&arenas_lock);
627	narenas = narenas_total;
628	malloc_mutex_unlock(&arenas_lock);
629
630	return (narenas);
631}
632
633/* Choose an arena based on a per-thread value. */
634JEMALLOC_INLINE arena_t *
635choose_arena(arena_t *arena)
636{
637	arena_t *ret;
638
639	if (arena != NULL)
640		return (arena);
641
642	if ((ret = *arenas_tsd_get()) == NULL) {
643		ret = choose_arena_hard();
644		assert(ret != NULL);
645	}
646
647	return (ret);
648}
649#endif
650
651#include "jemalloc/internal/bitmap.h"
652#include "jemalloc/internal/rtree.h"
653/*
654 * Include arena.h the second and third times in order to resolve circular
655 * dependencies with tcache.h.
656 */
657#define	JEMALLOC_ARENA_INLINE_B
658#include "jemalloc/internal/arena.h"
659#undef JEMALLOC_ARENA_INLINE_B
660#include "jemalloc/internal/tcache.h"
661#define	JEMALLOC_ARENA_INLINE_C
662#include "jemalloc/internal/arena.h"
663#undef JEMALLOC_ARENA_INLINE_C
664#include "jemalloc/internal/hash.h"
665#include "jemalloc/internal/quarantine.h"
666
667#ifndef JEMALLOC_ENABLE_INLINE
668void	*imalloct(size_t size, bool try_tcache, arena_t *arena);
669void	*imalloc(size_t size);
670void	*icalloct(size_t size, bool try_tcache, arena_t *arena);
671void	*icalloc(size_t size);
672void	*ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
673    arena_t *arena);
674void	*ipalloc(size_t usize, size_t alignment, bool zero);
675size_t	isalloc(const void *ptr, bool demote);
676size_t	ivsalloc(const void *ptr, bool demote);
677size_t	u2rz(size_t usize);
678size_t	p2rz(const void *ptr);
679void	idalloct(void *ptr, bool try_tcache);
680void	idalloc(void *ptr);
681void	iqalloct(void *ptr, bool try_tcache);
682void	iqalloc(void *ptr);
683void	*iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
684    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
685    arena_t *arena);
686void	*iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
687    bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
688void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
689    bool zero);
690bool	ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
691    bool zero);
692malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
693#endif
694
695#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
696JEMALLOC_ALWAYS_INLINE void *
697imalloct(size_t size, bool try_tcache, arena_t *arena)
698{
699
700	assert(size != 0);
701
702	if (size <= arena_maxclass)
703		return (arena_malloc(arena, size, false, try_tcache));
704	else
705		return (huge_malloc(size, false, huge_dss_prec_get(arena)));
706}
707
708JEMALLOC_ALWAYS_INLINE void *
709imalloc(size_t size)
710{
711
712	return (imalloct(size, true, NULL));
713}
714
715JEMALLOC_ALWAYS_INLINE void *
716icalloct(size_t size, bool try_tcache, arena_t *arena)
717{
718
719	if (size <= arena_maxclass)
720		return (arena_malloc(arena, size, true, try_tcache));
721	else
722		return (huge_malloc(size, true, huge_dss_prec_get(arena)));
723}
724
725JEMALLOC_ALWAYS_INLINE void *
726icalloc(size_t size)
727{
728
729	return (icalloct(size, true, NULL));
730}
731
732JEMALLOC_ALWAYS_INLINE void *
733ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
734    arena_t *arena)
735{
736	void *ret;
737
738	assert(usize != 0);
739	assert(usize == sa2u(usize, alignment));
740
741	if (usize <= arena_maxclass && alignment <= PAGE)
742		ret = arena_malloc(arena, usize, zero, try_tcache);
743	else {
744		if (usize <= arena_maxclass) {
745			ret = arena_palloc(choose_arena(arena), usize,
746			    alignment, zero);
747		} else if (alignment <= chunksize)
748			ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
749		else
750			ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
751	}
752
753	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
754	return (ret);
755}
756
757JEMALLOC_ALWAYS_INLINE void *
758ipalloc(size_t usize, size_t alignment, bool zero)
759{
760
761	return (ipalloct(usize, alignment, zero, true, NULL));
762}
763
764/*
765 * Typical usage:
766 *   void *ptr = [...]
767 *   size_t sz = isalloc(ptr, config_prof);
768 */
769JEMALLOC_ALWAYS_INLINE size_t
770isalloc(const void *ptr, bool demote)
771{
772	size_t ret;
773	arena_chunk_t *chunk;
774
775	assert(ptr != NULL);
776	/* Demotion only makes sense if config_prof is true. */
777	assert(config_prof || demote == false);
778
779	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
780	if (chunk != ptr)
781		ret = arena_salloc(ptr, demote);
782	else
783		ret = huge_salloc(ptr);
784
785	return (ret);
786}
787
788JEMALLOC_ALWAYS_INLINE size_t
789ivsalloc(const void *ptr, bool demote)
790{
791
792	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
793	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
794		return (0);
795
796	return (isalloc(ptr, demote));
797}
798
799JEMALLOC_INLINE size_t
800u2rz(size_t usize)
801{
802	size_t ret;
803
804	if (usize <= SMALL_MAXCLASS) {
805		size_t binind = small_size2bin(usize);
806		ret = arena_bin_info[binind].redzone_size;
807	} else
808		ret = 0;
809
810	return (ret);
811}
812
813JEMALLOC_INLINE size_t
814p2rz(const void *ptr)
815{
816	size_t usize = isalloc(ptr, false);
817
818	return (u2rz(usize));
819}
820
821JEMALLOC_ALWAYS_INLINE void
822idalloct(void *ptr, bool try_tcache)
823{
824	arena_chunk_t *chunk;
825
826	assert(ptr != NULL);
827
828	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
829	if (chunk != ptr)
830		arena_dalloc(chunk, ptr, try_tcache);
831	else
832		huge_dalloc(ptr, true);
833}
834
835JEMALLOC_ALWAYS_INLINE void
836idalloc(void *ptr)
837{
838
839	idalloct(ptr, true);
840}
841
842JEMALLOC_ALWAYS_INLINE void
843iqalloct(void *ptr, bool try_tcache)
844{
845
846	if (config_fill && opt_quarantine)
847		quarantine(ptr);
848	else
849		idalloct(ptr, try_tcache);
850}
851
852JEMALLOC_ALWAYS_INLINE void
853iqalloc(void *ptr)
854{
855
856	iqalloct(ptr, true);
857}
858
859JEMALLOC_ALWAYS_INLINE void *
860iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
861    size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
862    arena_t *arena)
863{
864	void *p;
865	size_t usize, copysize;
866
867	usize = sa2u(size + extra, alignment);
868	if (usize == 0)
869		return (NULL);
870	p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
871	if (p == NULL) {
872		if (extra == 0)
873			return (NULL);
874		/* Try again, without extra this time. */
875		usize = sa2u(size, alignment);
876		if (usize == 0)
877			return (NULL);
878		p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
879		if (p == NULL)
880			return (NULL);
881	}
882	/*
883	 * Copy at most size bytes (not size+extra), since the caller has no
884	 * expectation that the extra bytes will be reliably preserved.
885	 */
886	copysize = (size < oldsize) ? size : oldsize;
887	memcpy(p, ptr, copysize);
888	iqalloct(ptr, try_tcache_dalloc);
889	return (p);
890}
891
892JEMALLOC_ALWAYS_INLINE void *
893iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
894    bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
895{
896	size_t oldsize;
897
898	assert(ptr != NULL);
899	assert(size != 0);
900
901	oldsize = isalloc(ptr, config_prof);
902
903	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
904	    != 0) {
905		/*
906		 * Existing object alignment is inadequate; allocate new space
907		 * and copy.
908		 */
909		return (iralloct_realign(ptr, oldsize, size, extra, alignment,
910		    zero, try_tcache_alloc, try_tcache_dalloc, arena));
911	}
912
913	if (size + extra <= arena_maxclass) {
914		return (arena_ralloc(arena, ptr, oldsize, size, extra,
915		    alignment, zero, try_tcache_alloc,
916		    try_tcache_dalloc));
917	} else {
918		return (huge_ralloc(ptr, oldsize, size, extra,
919		    alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
920	}
921}
922
923JEMALLOC_ALWAYS_INLINE void *
924iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
925{
926
927	return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
928}
929
930JEMALLOC_ALWAYS_INLINE bool
931ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
932{
933	size_t oldsize;
934
935	assert(ptr != NULL);
936	assert(size != 0);
937
938	oldsize = isalloc(ptr, config_prof);
939	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
940	    != 0) {
941		/* Existing object alignment is inadequate. */
942		return (true);
943	}
944
945	if (size <= arena_maxclass)
946		return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
947	else
948		return (huge_ralloc_no_move(ptr, oldsize, size, extra));
949}
950
951malloc_tsd_externs(thread_allocated, thread_allocated_t)
952malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
953    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
954#endif
955
956#include "jemalloc/internal/prof.h"
957
958#undef JEMALLOC_H_INLINES
959/******************************************************************************/
960#endif /* JEMALLOC_INTERNAL_H */
961