jemalloc.c revision b2c0d6322d2307458ae2b28545f8a5c9903d7ef5
1#define	JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7/* Runtime configuration options. */
8const char	*je_malloc_conf JEMALLOC_ATTR(weak);
9bool	opt_abort =
10#ifdef JEMALLOC_DEBUG
11    true
12#else
13    false
14#endif
15    ;
16const char	*opt_junk =
17#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
18    "true"
19#else
20    "false"
21#endif
22    ;
23bool	opt_junk_alloc =
24#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
25    true
26#else
27    false
28#endif
29    ;
30bool	opt_junk_free =
31#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
32    true
33#else
34    false
35#endif
36    ;
37
38size_t	opt_quarantine = ZU(0);
39bool	opt_redzone = false;
40bool	opt_utrace = false;
41bool	opt_xmalloc = false;
42bool	opt_zero = false;
43unsigned	opt_narenas = 0;
44
45/* Initialized to true if the process is running inside Valgrind. */
46bool	in_valgrind;
47
48unsigned	ncpus;
49
50/* Protects arenas initialization. */
51static malloc_mutex_t	arenas_lock;
52/*
53 * Arenas that are used to service external requests.  Not all elements of the
54 * arenas array are necessarily used; arenas are created lazily as needed.
55 *
56 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
57 * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
58 * takes some action to create them and allocate from them.
59 */
60arena_t			**arenas;
61static unsigned		narenas_total; /* Use narenas_total_*(). */
62static arena_t		*a0; /* arenas[0]; read-only after initialization. */
63static unsigned		narenas_auto; /* Read-only after initialization. */
64
65typedef enum {
66	malloc_init_uninitialized	= 3,
67	malloc_init_a0_initialized	= 2,
68	malloc_init_recursible		= 1,
69	malloc_init_initialized		= 0 /* Common case --> jnz. */
70} malloc_init_t;
71static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
72
73/* 0 should be the common case.  Set to true to trigger initialization. */
74static bool	malloc_slow = true;
75
76/* When malloc_slow != 0, set the corresponding bits for sanity check. */
77enum {
78	flag_opt_junk_alloc	= (1U),
79	flag_opt_junk_free	= (1U << 1),
80	flag_opt_quarantine	= (1U << 2),
81	flag_opt_zero		= (1U << 3),
82	flag_opt_utrace		= (1U << 4),
83	flag_in_valgrind	= (1U << 5),
84	flag_opt_xmalloc	= (1U << 6)
85};
86static uint8_t	malloc_slow_flags;
87
88/* Last entry for overflow detection only.  */
89JEMALLOC_ALIGNED(CACHELINE)
90const size_t	index2size_tab[NSIZES+1] = {
91#define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
92	((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
93	SIZE_CLASSES
94#undef SC
95	ZU(0)
96};
97
98JEMALLOC_ALIGNED(CACHELINE)
99const uint8_t	size2index_tab[] = {
100#if LG_TINY_MIN == 0
101#warning "Dangerous LG_TINY_MIN"
102#define	S2B_0(i)	i,
103#elif LG_TINY_MIN == 1
104#warning "Dangerous LG_TINY_MIN"
105#define	S2B_1(i)	i,
106#elif LG_TINY_MIN == 2
107#warning "Dangerous LG_TINY_MIN"
108#define	S2B_2(i)	i,
109#elif LG_TINY_MIN == 3
110#define	S2B_3(i)	i,
111#elif LG_TINY_MIN == 4
112#define	S2B_4(i)	i,
113#elif LG_TINY_MIN == 5
114#define	S2B_5(i)	i,
115#elif LG_TINY_MIN == 6
116#define	S2B_6(i)	i,
117#elif LG_TINY_MIN == 7
118#define	S2B_7(i)	i,
119#elif LG_TINY_MIN == 8
120#define	S2B_8(i)	i,
121#elif LG_TINY_MIN == 9
122#define	S2B_9(i)	i,
123#elif LG_TINY_MIN == 10
124#define	S2B_10(i)	i,
125#elif LG_TINY_MIN == 11
126#define	S2B_11(i)	i,
127#else
128#error "Unsupported LG_TINY_MIN"
129#endif
130#if LG_TINY_MIN < 1
131#define	S2B_1(i)	S2B_0(i) S2B_0(i)
132#endif
133#if LG_TINY_MIN < 2
134#define	S2B_2(i)	S2B_1(i) S2B_1(i)
135#endif
136#if LG_TINY_MIN < 3
137#define	S2B_3(i)	S2B_2(i) S2B_2(i)
138#endif
139#if LG_TINY_MIN < 4
140#define	S2B_4(i)	S2B_3(i) S2B_3(i)
141#endif
142#if LG_TINY_MIN < 5
143#define	S2B_5(i)	S2B_4(i) S2B_4(i)
144#endif
145#if LG_TINY_MIN < 6
146#define	S2B_6(i)	S2B_5(i) S2B_5(i)
147#endif
148#if LG_TINY_MIN < 7
149#define	S2B_7(i)	S2B_6(i) S2B_6(i)
150#endif
151#if LG_TINY_MIN < 8
152#define	S2B_8(i)	S2B_7(i) S2B_7(i)
153#endif
154#if LG_TINY_MIN < 9
155#define	S2B_9(i)	S2B_8(i) S2B_8(i)
156#endif
157#if LG_TINY_MIN < 10
158#define	S2B_10(i)	S2B_9(i) S2B_9(i)
159#endif
160#if LG_TINY_MIN < 11
161#define	S2B_11(i)	S2B_10(i) S2B_10(i)
162#endif
163#define	S2B_no(i)
164#define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
165	S2B_##lg_delta_lookup(index)
166	SIZE_CLASSES
167#undef S2B_3
168#undef S2B_4
169#undef S2B_5
170#undef S2B_6
171#undef S2B_7
172#undef S2B_8
173#undef S2B_9
174#undef S2B_10
175#undef S2B_11
176#undef S2B_no
177#undef SC
178};
179
180#ifdef JEMALLOC_THREADED_INIT
181/* Used to let the initializing thread recursively allocate. */
182#  define NO_INITIALIZER	((unsigned long)0)
183#  define INITIALIZER		pthread_self()
184#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
185static pthread_t		malloc_initializer = NO_INITIALIZER;
186#else
187#  define NO_INITIALIZER	false
188#  define INITIALIZER		true
189#  define IS_INITIALIZER	malloc_initializer
190static bool			malloc_initializer = NO_INITIALIZER;
191#endif
192
193/* Used to avoid initialization races. */
194#ifdef _WIN32
195#if _WIN32_WINNT >= 0x0600
196static malloc_mutex_t	init_lock = SRWLOCK_INIT;
197#else
198static malloc_mutex_t	init_lock;
199static bool init_lock_initialized = false;
200
201JEMALLOC_ATTR(constructor)
202static void WINAPI
203_init_init_lock(void)
204{
205
206	/* If another constructor in the same binary is using mallctl to
207	 * e.g. setup chunk hooks, it may end up running before this one,
208	 * and malloc_init_hard will crash trying to lock the uninitialized
209	 * lock. So we force an initialization of the lock in
210	 * malloc_init_hard as well. We don't try to care about atomicity
211	 * of the accessed to the init_lock_initialized boolean, since it
212	 * really only matters early in the process creation, before any
213	 * separate thread normally starts doing anything. */
214	if (!init_lock_initialized)
215		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
216	init_lock_initialized = true;
217}
218
219#ifdef _MSC_VER
220#  pragma section(".CRT$XCU", read)
221JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
222static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
223#endif
224#endif
225#else
226static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
227#endif
228
229typedef struct {
230	void	*p;	/* Input pointer (as in realloc(p, s)). */
231	size_t	s;	/* Request size. */
232	void	*r;	/* Result pointer. */
233} malloc_utrace_t;
234
235#ifdef JEMALLOC_UTRACE
236#  define UTRACE(a, b, c) do {						\
237	if (unlikely(opt_utrace)) {					\
238		int utrace_serrno = errno;				\
239		malloc_utrace_t ut;					\
240		ut.p = (a);						\
241		ut.s = (b);						\
242		ut.r = (c);						\
243		utrace(&ut, sizeof(ut));				\
244		errno = utrace_serrno;					\
245	}								\
246} while (0)
247#else
248#  define UTRACE(a, b, c)
249#endif
250
251/******************************************************************************/
252/*
253 * Function prototypes for static functions that are referenced prior to
254 * definition.
255 */
256
257static bool	malloc_init_hard_a0(tsd_t *tsd);
258static bool	malloc_init_hard(void);
259
260/******************************************************************************/
261/*
262 * Begin miscellaneous support functions.
263 */
264
265JEMALLOC_ALWAYS_INLINE_C bool
266malloc_initialized(void)
267{
268
269	return (malloc_init_state == malloc_init_initialized);
270}
271
272JEMALLOC_ALWAYS_INLINE_C void
273malloc_thread_init(void)
274{
275
276	/*
277	 * TSD initialization can't be safely done as a side effect of
278	 * deallocation, because it is possible for a thread to do nothing but
279	 * deallocate its TLS data via free(), in which case writing to TLS
280	 * would cause write-after-free memory corruption.  The quarantine
281	 * facility *only* gets used as a side effect of deallocation, so make
282	 * a best effort attempt at initializing its TSD by hooking all
283	 * allocation events.
284	 */
285	if (config_fill && unlikely(opt_quarantine))
286		quarantine_alloc_hook();
287}
288
289JEMALLOC_ALWAYS_INLINE_C bool
290malloc_init_a0(void)
291{
292
293	if (unlikely(malloc_init_state == malloc_init_uninitialized))
294		return (malloc_init_hard_a0(NULL));
295	return (false);
296}
297
298JEMALLOC_ALWAYS_INLINE_C bool
299malloc_init(void)
300{
301
302	if (unlikely(!malloc_initialized()) && malloc_init_hard())
303		return (true);
304	malloc_thread_init();
305
306	return (false);
307}
308
309/*
310 * The a0*() functions are used instead of i[mcd]alloc() in situations that
311 * cannot tolerate TLS variable access.
312 */
313
314static void *
315a0ialloc(size_t size, bool zero, bool is_metadata)
316{
317
318	if (unlikely(malloc_init_a0()))
319		return (NULL);
320
321	return (iallocztm(NULL, size, size2index(size), zero, false,
322	    is_metadata, arena_get(NULL, 0, false), true));
323}
324
325static void
326a0idalloc(void *ptr, bool is_metadata)
327{
328
329	idalloctm(NULL, ptr, false, is_metadata, true);
330}
331
332void *
333a0malloc(size_t size)
334{
335
336	return (a0ialloc(size, false, true));
337}
338
339void
340a0dalloc(void *ptr)
341{
342
343	a0idalloc(ptr, true);
344}
345
346/*
347 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
348 * situations that cannot tolerate TLS variable access (TLS allocation and very
349 * early internal data structure initialization).
350 */
351
352void *
353bootstrap_malloc(size_t size)
354{
355
356	if (unlikely(size == 0))
357		size = 1;
358
359	return (a0ialloc(size, false, false));
360}
361
362void *
363bootstrap_calloc(size_t num, size_t size)
364{
365	size_t num_size;
366
367	num_size = num * size;
368	if (unlikely(num_size == 0)) {
369		assert(num == 0 || size == 0);
370		num_size = 1;
371	}
372
373	return (a0ialloc(num_size, true, false));
374}
375
376void
377bootstrap_free(void *ptr)
378{
379
380	if (unlikely(ptr == NULL))
381		return;
382
383	a0idalloc(ptr, false);
384}
385
386static void
387arena_set(unsigned ind, arena_t *arena)
388{
389
390	atomic_write_p((void **)&arenas[ind], arena);
391}
392
393static void
394narenas_total_set(unsigned narenas)
395{
396
397	atomic_write_u(&narenas_total, narenas);
398}
399
400static void
401narenas_total_inc(void)
402{
403
404	atomic_add_u(&narenas_total, 1);
405}
406
407unsigned
408narenas_total_get(void)
409{
410
411	return (atomic_read_u(&narenas_total));
412}
413
414/* Create a new arena and insert it into the arenas array at index ind. */
415static arena_t *
416arena_init_locked(tsd_t *tsd, unsigned ind)
417{
418	arena_t *arena;
419
420	assert(ind <= narenas_total_get());
421	if (ind > MALLOCX_ARENA_MAX)
422		return (NULL);
423	if (ind == narenas_total_get())
424		narenas_total_inc();
425
426	/*
427	 * Another thread may have already initialized arenas[ind] if it's an
428	 * auto arena.
429	 */
430	arena = arena_get(tsd, ind, false);
431	if (arena != NULL) {
432		assert(ind < narenas_auto);
433		return (arena);
434	}
435
436	/* Actually initialize the arena. */
437	arena = arena_new(tsd, ind);
438	arena_set(ind, arena);
439	return (arena);
440}
441
442arena_t *
443arena_init(tsd_t *tsd, unsigned ind)
444{
445	arena_t *arena;
446
447	malloc_mutex_lock(tsd, &arenas_lock);
448	arena = arena_init_locked(tsd, ind);
449	malloc_mutex_unlock(tsd, &arenas_lock);
450	return (arena);
451}
452
453static void
454arena_bind(tsd_t *tsd, unsigned ind)
455{
456	arena_t *arena;
457
458	arena = arena_get(tsd, ind, false);
459	arena_nthreads_inc(arena);
460
461	if (tsd_nominal(tsd))
462		tsd_arena_set(tsd, arena);
463}
464
465void
466arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
467{
468	arena_t *oldarena, *newarena;
469
470	oldarena = arena_get(tsd, oldind, false);
471	newarena = arena_get(tsd, newind, false);
472	arena_nthreads_dec(oldarena);
473	arena_nthreads_inc(newarena);
474	tsd_arena_set(tsd, newarena);
475}
476
477static void
478arena_unbind(tsd_t *tsd, unsigned ind)
479{
480	arena_t *arena;
481
482	arena = arena_get(tsd, ind, false);
483	arena_nthreads_dec(arena);
484	tsd_arena_set(tsd, NULL);
485}
486
487arena_tdata_t *
488arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
489{
490	arena_tdata_t *tdata, *arenas_tdata_old;
491	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
492	unsigned narenas_tdata_old, i;
493	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
494	unsigned narenas_actual = narenas_total_get();
495
496	/*
497	 * Dissociate old tdata array (and set up for deallocation upon return)
498	 * if it's too small.
499	 */
500	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
501		arenas_tdata_old = arenas_tdata;
502		narenas_tdata_old = narenas_tdata;
503		arenas_tdata = NULL;
504		narenas_tdata = 0;
505		tsd_arenas_tdata_set(tsd, arenas_tdata);
506		tsd_narenas_tdata_set(tsd, narenas_tdata);
507	} else {
508		arenas_tdata_old = NULL;
509		narenas_tdata_old = 0;
510	}
511
512	/* Allocate tdata array if it's missing. */
513	if (arenas_tdata == NULL) {
514		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
515		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
516
517		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
518			*arenas_tdata_bypassp = true;
519			arenas_tdata = (arena_tdata_t *)a0malloc(
520			    sizeof(arena_tdata_t) * narenas_tdata);
521			*arenas_tdata_bypassp = false;
522		}
523		if (arenas_tdata == NULL) {
524			tdata = NULL;
525			goto label_return;
526		}
527		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
528		tsd_arenas_tdata_set(tsd, arenas_tdata);
529		tsd_narenas_tdata_set(tsd, narenas_tdata);
530	}
531
532	/*
533	 * Copy to tdata array.  It's possible that the actual number of arenas
534	 * has increased since narenas_total_get() was called above, but that
535	 * causes no correctness issues unless two threads concurrently execute
536	 * the arenas.extend mallctl, which we trust mallctl synchronization to
537	 * prevent.
538	 */
539
540	/* Copy/initialize tickers. */
541	for (i = 0; i < narenas_actual; i++) {
542		if (i < narenas_tdata_old) {
543			ticker_copy(&arenas_tdata[i].decay_ticker,
544			    &arenas_tdata_old[i].decay_ticker);
545		} else {
546			ticker_init(&arenas_tdata[i].decay_ticker,
547			    DECAY_NTICKS_PER_UPDATE);
548		}
549	}
550	if (narenas_tdata > narenas_actual) {
551		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
552		    * (narenas_tdata - narenas_actual));
553	}
554
555	/* Read the refreshed tdata array. */
556	tdata = &arenas_tdata[ind];
557label_return:
558	if (arenas_tdata_old != NULL)
559		a0dalloc(arenas_tdata_old);
560	return (tdata);
561}
562
563/* Slow path, called only by arena_choose(). */
564arena_t *
565arena_choose_hard(tsd_t *tsd)
566{
567	arena_t *ret;
568
569	if (narenas_auto > 1) {
570		unsigned i, choose, first_null;
571
572		choose = 0;
573		first_null = narenas_auto;
574		malloc_mutex_lock(tsd, &arenas_lock);
575		assert(arena_get(tsd, 0, false) != NULL);
576		for (i = 1; i < narenas_auto; i++) {
577			if (arena_get(tsd, i, false) != NULL) {
578				/*
579				 * Choose the first arena that has the lowest
580				 * number of threads assigned to it.
581				 */
582				if (arena_nthreads_get(arena_get(tsd, i, false))
583				    < arena_nthreads_get(arena_get(tsd, choose,
584				    false)))
585					choose = i;
586			} else if (first_null == narenas_auto) {
587				/*
588				 * Record the index of the first uninitialized
589				 * arena, in case all extant arenas are in use.
590				 *
591				 * NB: It is possible for there to be
592				 * discontinuities in terms of initialized
593				 * versus uninitialized arenas, due to the
594				 * "thread.arena" mallctl.
595				 */
596				first_null = i;
597			}
598		}
599
600		if (arena_nthreads_get(arena_get(tsd, choose, false)) == 0
601		    || first_null == narenas_auto) {
602			/*
603			 * Use an unloaded arena, or the least loaded arena if
604			 * all arenas are already initialized.
605			 */
606			ret = arena_get(tsd, choose, false);
607		} else {
608			/* Initialize a new arena. */
609			choose = first_null;
610			ret = arena_init_locked(tsd, choose);
611			if (ret == NULL) {
612				malloc_mutex_unlock(tsd, &arenas_lock);
613				return (NULL);
614			}
615		}
616		arena_bind(tsd, choose);
617		malloc_mutex_unlock(tsd, &arenas_lock);
618	} else {
619		ret = arena_get(tsd, 0, false);
620		arena_bind(tsd, 0);
621	}
622
623	return (ret);
624}
625
626void
627thread_allocated_cleanup(tsd_t *tsd)
628{
629
630	/* Do nothing. */
631}
632
633void
634thread_deallocated_cleanup(tsd_t *tsd)
635{
636
637	/* Do nothing. */
638}
639
640void
641arena_cleanup(tsd_t *tsd)
642{
643	arena_t *arena;
644
645	arena = tsd_arena_get(tsd);
646	if (arena != NULL)
647		arena_unbind(tsd, arena->ind);
648}
649
650void
651arenas_tdata_cleanup(tsd_t *tsd)
652{
653	arena_tdata_t *arenas_tdata;
654
655	/* Prevent tsd->arenas_tdata from being (re)created. */
656	*tsd_arenas_tdata_bypassp_get(tsd) = true;
657
658	arenas_tdata = tsd_arenas_tdata_get(tsd);
659	if (arenas_tdata != NULL) {
660		tsd_arenas_tdata_set(tsd, NULL);
661		a0dalloc(arenas_tdata);
662	}
663}
664
665void
666narenas_tdata_cleanup(tsd_t *tsd)
667{
668
669	/* Do nothing. */
670}
671
672void
673arenas_tdata_bypass_cleanup(tsd_t *tsd)
674{
675
676	/* Do nothing. */
677}
678
679static void
680stats_print_atexit(void)
681{
682
683	if (config_tcache && config_stats) {
684		tsd_t *tsd;
685		unsigned narenas, i;
686
687		tsd = tsd_fetch();
688
689		/*
690		 * Merge stats from extant threads.  This is racy, since
691		 * individual threads do not lock when recording tcache stats
692		 * events.  As a consequence, the final stats may be slightly
693		 * out of date by the time they are reported, if other threads
694		 * continue to allocate.
695		 */
696		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
697			arena_t *arena = arena_get(tsd, i, false);
698			if (arena != NULL) {
699				tcache_t *tcache;
700
701				/*
702				 * tcache_stats_merge() locks bins, so if any
703				 * code is introduced that acquires both arena
704				 * and bin locks in the opposite order,
705				 * deadlocks may result.
706				 */
707				malloc_mutex_lock(tsd, &arena->lock);
708				ql_foreach(tcache, &arena->tcache_ql, link) {
709					tcache_stats_merge(tsd, tcache, arena);
710				}
711				malloc_mutex_unlock(tsd, &arena->lock);
712			}
713		}
714	}
715	je_malloc_stats_print(NULL, NULL, NULL);
716}
717
718/*
719 * End miscellaneous support functions.
720 */
721/******************************************************************************/
722/*
723 * Begin initialization functions.
724 */
725
726#ifndef JEMALLOC_HAVE_SECURE_GETENV
727static char *
728secure_getenv(const char *name)
729{
730
731#  ifdef JEMALLOC_HAVE_ISSETUGID
732	if (issetugid() != 0)
733		return (NULL);
734#  endif
735	return (getenv(name));
736}
737#endif
738
739static unsigned
740malloc_ncpus(void)
741{
742	long result;
743
744#ifdef _WIN32
745	SYSTEM_INFO si;
746	GetSystemInfo(&si);
747	result = si.dwNumberOfProcessors;
748#else
749	result = sysconf(_SC_NPROCESSORS_ONLN);
750#endif
751	return ((result == -1) ? 1 : (unsigned)result);
752}
753
754static bool
755malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
756    char const **v_p, size_t *vlen_p)
757{
758	bool accept;
759	const char *opts = *opts_p;
760
761	*k_p = opts;
762
763	for (accept = false; !accept;) {
764		switch (*opts) {
765		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
766		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
767		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
768		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
769		case 'Y': case 'Z':
770		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
771		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
772		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
773		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
774		case 'y': case 'z':
775		case '0': case '1': case '2': case '3': case '4': case '5':
776		case '6': case '7': case '8': case '9':
777		case '_':
778			opts++;
779			break;
780		case ':':
781			opts++;
782			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
783			*v_p = opts;
784			accept = true;
785			break;
786		case '\0':
787			if (opts != *opts_p) {
788				malloc_write("<jemalloc>: Conf string ends "
789				    "with key\n");
790			}
791			return (true);
792		default:
793			malloc_write("<jemalloc>: Malformed conf string\n");
794			return (true);
795		}
796	}
797
798	for (accept = false; !accept;) {
799		switch (*opts) {
800		case ',':
801			opts++;
802			/*
803			 * Look ahead one character here, because the next time
804			 * this function is called, it will assume that end of
805			 * input has been cleanly reached if no input remains,
806			 * but we have optimistically already consumed the
807			 * comma if one exists.
808			 */
809			if (*opts == '\0') {
810				malloc_write("<jemalloc>: Conf string ends "
811				    "with comma\n");
812			}
813			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
814			accept = true;
815			break;
816		case '\0':
817			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
818			accept = true;
819			break;
820		default:
821			opts++;
822			break;
823		}
824	}
825
826	*opts_p = opts;
827	return (false);
828}
829
830static void
831malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
832    size_t vlen)
833{
834
835	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
836	    (int)vlen, v);
837}
838
839static void
840malloc_slow_flag_init(void)
841{
842	/*
843	 * Combine the runtime options into malloc_slow for fast path.  Called
844	 * after processing all the options.
845	 */
846	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
847	    | (opt_junk_free ? flag_opt_junk_free : 0)
848	    | (opt_quarantine ? flag_opt_quarantine : 0)
849	    | (opt_zero ? flag_opt_zero : 0)
850	    | (opt_utrace ? flag_opt_utrace : 0)
851	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
852
853	if (config_valgrind)
854		malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
855
856	malloc_slow = (malloc_slow_flags != 0);
857}
858
859static void
860malloc_conf_init(void)
861{
862	unsigned i;
863	char buf[PATH_MAX + 1];
864	const char *opts, *k, *v;
865	size_t klen, vlen;
866
867	/*
868	 * Automatically configure valgrind before processing options.  The
869	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
870	 */
871	if (config_valgrind) {
872		in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
873		if (config_fill && unlikely(in_valgrind)) {
874			opt_junk = "false";
875			opt_junk_alloc = false;
876			opt_junk_free = false;
877			assert(!opt_zero);
878			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
879			opt_redzone = true;
880		}
881		if (config_tcache && unlikely(in_valgrind))
882			opt_tcache = false;
883	}
884
885	for (i = 0; i < 4; i++) {
886		/* Get runtime configuration. */
887		switch (i) {
888		case 0:
889			opts = config_malloc_conf;
890			break;
891		case 1:
892			if (je_malloc_conf != NULL) {
893				/*
894				 * Use options that were compiled into the
895				 * program.
896				 */
897				opts = je_malloc_conf;
898			} else {
899				/* No configuration specified. */
900				buf[0] = '\0';
901				opts = buf;
902			}
903			break;
904		case 2: {
905			ssize_t linklen = 0;
906#ifndef _WIN32
907			int saved_errno = errno;
908			const char *linkname =
909#  ifdef JEMALLOC_PREFIX
910			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
911#  else
912			    "/etc/malloc.conf"
913#  endif
914			    ;
915
916			/*
917			 * Try to use the contents of the "/etc/malloc.conf"
918			 * symbolic link's name.
919			 */
920			linklen = readlink(linkname, buf, sizeof(buf) - 1);
921			if (linklen == -1) {
922				/* No configuration specified. */
923				linklen = 0;
924				/* Restore errno. */
925				set_errno(saved_errno);
926			}
927#endif
928			buf[linklen] = '\0';
929			opts = buf;
930			break;
931		} case 3: {
932			const char *envname =
933#ifdef JEMALLOC_PREFIX
934			    JEMALLOC_CPREFIX"MALLOC_CONF"
935#else
936			    "MALLOC_CONF"
937#endif
938			    ;
939
940			if ((opts = secure_getenv(envname)) != NULL) {
941				/*
942				 * Do nothing; opts is already initialized to
943				 * the value of the MALLOC_CONF environment
944				 * variable.
945				 */
946			} else {
947				/* No configuration specified. */
948				buf[0] = '\0';
949				opts = buf;
950			}
951			break;
952		} default:
953			not_reached();
954			buf[0] = '\0';
955			opts = buf;
956		}
957
958		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
959		    &vlen)) {
960#define	CONF_MATCH(n)							\
961	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
962#define	CONF_MATCH_VALUE(n)						\
963	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
964#define	CONF_HANDLE_BOOL(o, n, cont)					\
965			if (CONF_MATCH(n)) {				\
966				if (CONF_MATCH_VALUE("true"))		\
967					o = true;			\
968				else if (CONF_MATCH_VALUE("false"))	\
969					o = false;			\
970				else {					\
971					malloc_conf_error(		\
972					    "Invalid conf value",	\
973					    k, klen, v, vlen);		\
974				}					\
975				if (cont)				\
976					continue;			\
977			}
978#define	CONF_HANDLE_T_U(t, o, n, min, max, clip)			\
979			if (CONF_MATCH(n)) {				\
980				uintmax_t um;				\
981				char *end;				\
982									\
983				set_errno(0);				\
984				um = malloc_strtoumax(v, &end, 0);	\
985				if (get_errno() != 0 || (uintptr_t)end -\
986				    (uintptr_t)v != vlen) {		\
987					malloc_conf_error(		\
988					    "Invalid conf value",	\
989					    k, klen, v, vlen);		\
990				} else if (clip) {			\
991					if ((min) != 0 && um < (min))	\
992						o = (t)(min);		\
993					else if (um > (max))		\
994						o = (t)(max);		\
995					else				\
996						o = (t)um;		\
997				} else {				\
998					if (((min) != 0 && um < (min))	\
999					    || um > (max)) {		\
1000						malloc_conf_error(	\
1001						    "Out-of-range "	\
1002						    "conf value",	\
1003						    k, klen, v, vlen);	\
1004					} else				\
1005						o = (t)um;		\
1006				}					\
1007				continue;				\
1008			}
1009#define	CONF_HANDLE_UNSIGNED(o, n, min, max, clip)			\
1010			CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
1011#define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
1012			CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
1013#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
1014			if (CONF_MATCH(n)) {				\
1015				long l;					\
1016				char *end;				\
1017									\
1018				set_errno(0);				\
1019				l = strtol(v, &end, 0);			\
1020				if (get_errno() != 0 || (uintptr_t)end -\
1021				    (uintptr_t)v != vlen) {		\
1022					malloc_conf_error(		\
1023					    "Invalid conf value",	\
1024					    k, klen, v, vlen);		\
1025				} else if (l < (ssize_t)(min) || l >	\
1026				    (ssize_t)(max)) {			\
1027					malloc_conf_error(		\
1028					    "Out-of-range conf value",	\
1029					    k, klen, v, vlen);		\
1030				} else					\
1031					o = l;				\
1032				continue;				\
1033			}
1034#define	CONF_HANDLE_CHAR_P(o, n, d)					\
1035			if (CONF_MATCH(n)) {				\
1036				size_t cpylen = (vlen <=		\
1037				    sizeof(o)-1) ? vlen :		\
1038				    sizeof(o)-1;			\
1039				strncpy(o, v, cpylen);			\
1040				o[cpylen] = '\0';			\
1041				continue;				\
1042			}
1043
1044			CONF_HANDLE_BOOL(opt_abort, "abort", true)
1045			/*
1046			 * Chunks always require at least one header page,
1047			 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1048			 * possibly an additional page in the presence of
1049			 * redzones.  In order to simplify options processing,
1050			 * use a conservative bound that accommodates all these
1051			 * constraints.
1052			 */
1053			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1054			    LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1055			    (sizeof(size_t) << 3) - 1, true)
1056			if (strncmp("dss", k, klen) == 0) {
1057				int i;
1058				bool match = false;
1059				for (i = 0; i < dss_prec_limit; i++) {
1060					if (strncmp(dss_prec_names[i], v, vlen)
1061					    == 0) {
1062						if (chunk_dss_prec_set(NULL,
1063						   i)) {
1064							malloc_conf_error(
1065							    "Error setting dss",
1066							    k, klen, v, vlen);
1067						} else {
1068							opt_dss =
1069							    dss_prec_names[i];
1070							match = true;
1071							break;
1072						}
1073					}
1074				}
1075				if (!match) {
1076					malloc_conf_error("Invalid conf value",
1077					    k, klen, v, vlen);
1078				}
1079				continue;
1080			}
1081			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1082			    UINT_MAX, false)
1083			if (strncmp("purge", k, klen) == 0) {
1084				int i;
1085				bool match = false;
1086				for (i = 0; i < purge_mode_limit; i++) {
1087					if (strncmp(purge_mode_names[i], v,
1088					    vlen) == 0) {
1089						opt_purge = (purge_mode_t)i;
1090						match = true;
1091						break;
1092					}
1093				}
1094				if (!match) {
1095					malloc_conf_error("Invalid conf value",
1096					    k, klen, v, vlen);
1097				}
1098				continue;
1099			}
1100			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1101			    -1, (sizeof(size_t) << 3) - 1)
1102			CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
1103			    NSTIME_SEC_MAX);
1104			CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1105			if (config_fill) {
1106				if (CONF_MATCH("junk")) {
1107					if (CONF_MATCH_VALUE("true")) {
1108						opt_junk = "true";
1109						opt_junk_alloc = opt_junk_free =
1110						    true;
1111					} else if (CONF_MATCH_VALUE("false")) {
1112						opt_junk = "false";
1113						opt_junk_alloc = opt_junk_free =
1114						    false;
1115					} else if (CONF_MATCH_VALUE("alloc")) {
1116						opt_junk = "alloc";
1117						opt_junk_alloc = true;
1118						opt_junk_free = false;
1119					} else if (CONF_MATCH_VALUE("free")) {
1120						opt_junk = "free";
1121						opt_junk_alloc = false;
1122						opt_junk_free = true;
1123					} else {
1124						malloc_conf_error(
1125						    "Invalid conf value", k,
1126						    klen, v, vlen);
1127					}
1128					continue;
1129				}
1130				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1131				    0, SIZE_T_MAX, false)
1132				CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1133				CONF_HANDLE_BOOL(opt_zero, "zero", true)
1134			}
1135			if (config_utrace) {
1136				CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1137			}
1138			if (config_xmalloc) {
1139				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1140			}
1141			if (config_tcache) {
1142				CONF_HANDLE_BOOL(opt_tcache, "tcache",
1143				    !config_valgrind || !in_valgrind)
1144				if (CONF_MATCH("tcache")) {
1145					assert(config_valgrind && in_valgrind);
1146					if (opt_tcache) {
1147						opt_tcache = false;
1148						malloc_conf_error(
1149						"tcache cannot be enabled "
1150						"while running inside Valgrind",
1151						k, klen, v, vlen);
1152					}
1153					continue;
1154				}
1155				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1156				    "lg_tcache_max", -1,
1157				    (sizeof(size_t) << 3) - 1)
1158			}
1159			if (config_prof) {
1160				CONF_HANDLE_BOOL(opt_prof, "prof", true)
1161				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1162				    "prof_prefix", "jeprof")
1163				CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1164				    true)
1165				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1166				    "prof_thread_active_init", true)
1167				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1168				    "lg_prof_sample", 0,
1169				    (sizeof(uint64_t) << 3) - 1, true)
1170				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1171				    true)
1172				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1173				    "lg_prof_interval", -1,
1174				    (sizeof(uint64_t) << 3) - 1)
1175				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1176				    true)
1177				CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1178				    true)
1179				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1180				    true)
1181			}
1182			malloc_conf_error("Invalid conf pair", k, klen, v,
1183			    vlen);
1184#undef CONF_MATCH
1185#undef CONF_HANDLE_BOOL
1186#undef CONF_HANDLE_SIZE_T
1187#undef CONF_HANDLE_SSIZE_T
1188#undef CONF_HANDLE_CHAR_P
1189		}
1190	}
1191}
1192
1193static bool
1194malloc_init_hard_needed(void)
1195{
1196
1197	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1198	    malloc_init_recursible)) {
1199		/*
1200		 * Another thread initialized the allocator before this one
1201		 * acquired init_lock, or this thread is the initializing
1202		 * thread, and it is recursively allocating.
1203		 */
1204		return (false);
1205	}
1206#ifdef JEMALLOC_THREADED_INIT
1207	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1208		/* Busy-wait until the initializing thread completes. */
1209		do {
1210			malloc_mutex_unlock(NULL, &init_lock);
1211			CPU_SPINWAIT;
1212			malloc_mutex_lock(NULL, &init_lock);
1213		} while (!malloc_initialized());
1214		return (false);
1215	}
1216#endif
1217	return (true);
1218}
1219
1220static bool
1221malloc_init_hard_a0_locked(tsd_t *tsd)
1222{
1223
1224	malloc_initializer = INITIALIZER;
1225
1226	if (config_prof)
1227		prof_boot0();
1228	malloc_conf_init();
1229	if (opt_stats_print) {
1230		/* Print statistics at exit. */
1231		if (atexit(stats_print_atexit) != 0) {
1232			malloc_write("<jemalloc>: Error in atexit()\n");
1233			if (opt_abort)
1234				abort();
1235		}
1236	}
1237	if (base_boot())
1238		return (true);
1239	if (chunk_boot())
1240		return (true);
1241	if (ctl_boot())
1242		return (true);
1243	if (config_prof)
1244		prof_boot1();
1245	if (arena_boot())
1246		return (true);
1247	if (config_tcache && tcache_boot(tsd))
1248		return (true);
1249	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
1250		return (true);
1251	/*
1252	 * Create enough scaffolding to allow recursive allocation in
1253	 * malloc_ncpus().
1254	 */
1255	narenas_auto = 1;
1256	narenas_total_set(narenas_auto);
1257	arenas = &a0;
1258	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1259	/*
1260	 * Initialize one arena here.  The rest are lazily created in
1261	 * arena_choose_hard().
1262	 */
1263	if (arena_init(tsd, 0) == NULL)
1264		return (true);
1265	malloc_init_state = malloc_init_a0_initialized;
1266	return (false);
1267}
1268
1269static bool
1270malloc_init_hard_a0(tsd_t *tsd)
1271{
1272	bool ret;
1273
1274	malloc_mutex_lock(tsd, &init_lock);
1275	ret = malloc_init_hard_a0_locked(tsd);
1276	malloc_mutex_unlock(tsd, &init_lock);
1277	return (ret);
1278}
1279
1280/* Initialize data structures which may trigger recursive allocation. */
1281static bool
1282malloc_init_hard_recursible(tsd_t **tsd)
1283{
1284	bool ret;
1285
1286	malloc_init_state = malloc_init_recursible;
1287	malloc_mutex_unlock(*tsd, &init_lock);
1288
1289	/* LinuxThreads' pthread_setspecific() allocates. */
1290	*tsd = malloc_tsd_boot0();
1291	if (*tsd == NULL) {
1292		ret = true;
1293		goto label_return;
1294	}
1295
1296	ncpus = malloc_ncpus();
1297
1298#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1299    && !defined(_WIN32) && !defined(__native_client__))
1300	/* LinuxThreads' pthread_atfork() allocates. */
1301	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1302	    jemalloc_postfork_child) != 0) {
1303		ret = true;
1304		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1305		if (opt_abort)
1306			abort();
1307	}
1308#endif
1309
1310	ret = false;
1311label_return:
1312	malloc_mutex_lock(*tsd, &init_lock);
1313	return (ret);
1314}
1315
1316static bool
1317malloc_init_hard_finish(tsd_t *tsd)
1318{
1319
1320	if (malloc_mutex_boot())
1321		return (true);
1322
1323	if (opt_narenas == 0) {
1324		/*
1325		 * For SMP systems, create more than one arena per CPU by
1326		 * default.
1327		 */
1328		if (ncpus > 1)
1329			opt_narenas = ncpus << 2;
1330		else
1331			opt_narenas = 1;
1332	}
1333	narenas_auto = opt_narenas;
1334	/*
1335	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1336	 */
1337	if (narenas_auto > MALLOCX_ARENA_MAX) {
1338		narenas_auto = MALLOCX_ARENA_MAX;
1339		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1340		    narenas_auto);
1341	}
1342	narenas_total_set(narenas_auto);
1343
1344	/* Allocate and initialize arenas. */
1345	arenas = (arena_t **)base_alloc(tsd, sizeof(arena_t *) *
1346	    (MALLOCX_ARENA_MAX+1));
1347	if (arenas == NULL)
1348		return (true);
1349	/* Copy the pointer to the one arena that was already initialized. */
1350	arena_set(0, a0);
1351
1352	malloc_init_state = malloc_init_initialized;
1353	malloc_slow_flag_init();
1354
1355	return (false);
1356}
1357
1358static bool
1359malloc_init_hard(void)
1360{
1361	tsd_t *tsd = NULL;
1362
1363#if defined(_WIN32) && _WIN32_WINNT < 0x0600
1364	_init_init_lock();
1365#endif
1366	malloc_mutex_lock(tsd, &init_lock);
1367	if (!malloc_init_hard_needed()) {
1368		malloc_mutex_unlock(tsd, &init_lock);
1369		return (false);
1370	}
1371
1372	if (malloc_init_state != malloc_init_a0_initialized &&
1373	    malloc_init_hard_a0_locked(tsd)) {
1374		malloc_mutex_unlock(tsd, &init_lock);
1375		return (true);
1376	}
1377
1378	if (malloc_init_hard_recursible(&tsd)) {
1379		malloc_mutex_unlock(tsd, &init_lock);
1380		return (true);
1381	}
1382
1383	if (config_prof && prof_boot2(tsd)) {
1384		malloc_mutex_unlock(tsd, &init_lock);
1385		return (true);
1386	}
1387
1388	if (malloc_init_hard_finish(tsd)) {
1389		malloc_mutex_unlock(tsd, &init_lock);
1390		return (true);
1391	}
1392
1393	malloc_mutex_unlock(tsd, &init_lock);
1394	malloc_tsd_boot1();
1395	return (false);
1396}
1397
1398/*
1399 * End initialization functions.
1400 */
1401/******************************************************************************/
1402/*
1403 * Begin malloc(3)-compatible functions.
1404 */
1405
1406static void *
1407imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
1408    prof_tctx_t *tctx, bool slow_path)
1409{
1410	void *p;
1411
1412	if (tctx == NULL)
1413		return (NULL);
1414	if (usize <= SMALL_MAXCLASS) {
1415		szind_t ind_large = size2index(LARGE_MINCLASS);
1416		p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path);
1417		if (p == NULL)
1418			return (NULL);
1419		arena_prof_promoted(tsd, p, usize);
1420	} else
1421		p = imalloc(tsd, usize, ind, slow_path);
1422
1423	return (p);
1424}
1425
1426JEMALLOC_ALWAYS_INLINE_C void *
1427imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path)
1428{
1429	void *p;
1430	prof_tctx_t *tctx;
1431
1432	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1433	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1434		p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path);
1435	else
1436		p = imalloc(tsd, usize, ind, slow_path);
1437	if (unlikely(p == NULL)) {
1438		prof_alloc_rollback(tsd, tctx, true);
1439		return (NULL);
1440	}
1441	prof_malloc(tsd, p, usize, tctx);
1442
1443	return (p);
1444}
1445
1446JEMALLOC_ALWAYS_INLINE_C void *
1447imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
1448{
1449	szind_t ind;
1450
1451	if (slow_path && unlikely(malloc_init()))
1452		return (NULL);
1453
1454	*tsd = tsd_fetch();
1455
1456	witness_assert_lockless(*tsd);
1457
1458	ind = size2index(size);
1459	if (unlikely(ind >= NSIZES))
1460		return (NULL);
1461
1462	if (config_stats || (config_prof && opt_prof) || (slow_path &&
1463	    config_valgrind && unlikely(in_valgrind))) {
1464		*usize = index2size(ind);
1465		assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
1466	}
1467
1468	if (config_prof && opt_prof)
1469		return (imalloc_prof(*tsd, *usize, ind, slow_path));
1470
1471	return (imalloc(*tsd, size, ind, slow_path));
1472}
1473
1474JEMALLOC_ALWAYS_INLINE_C void
1475imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path)
1476{
1477	if (unlikely(ret == NULL)) {
1478		if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
1479			malloc_write("<jemalloc>: Error in malloc(): "
1480			    "out of memory\n");
1481			abort();
1482		}
1483		set_errno(ENOMEM);
1484	}
1485	if (config_stats && likely(ret != NULL)) {
1486		assert(usize == isalloc(tsd, ret, config_prof));
1487		*tsd_thread_allocatedp_get(tsd) += usize;
1488	}
1489}
1490
1491JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1492void JEMALLOC_NOTHROW *
1493JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1494je_malloc(size_t size)
1495{
1496	void *ret;
1497	tsd_t *tsd;
1498	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1499
1500	if (size == 0)
1501		size = 1;
1502
1503	if (likely(!malloc_slow)) {
1504		/*
1505		 * imalloc_body() is inlined so that fast and slow paths are
1506		 * generated separately with statically known slow_path.
1507		 */
1508		ret = imalloc_body(size, &tsd, &usize, false);
1509		imalloc_post_check(ret, tsd, usize, false);
1510	} else {
1511		ret = imalloc_body(size, &tsd, &usize, true);
1512		imalloc_post_check(ret, tsd, usize, true);
1513		UTRACE(0, size, ret);
1514		JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsd, ret, usize, false);
1515	}
1516
1517	witness_assert_lockless(tsd);
1518	return (ret);
1519}
1520
1521static void *
1522imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1523    prof_tctx_t *tctx)
1524{
1525	void *p;
1526
1527	if (tctx == NULL)
1528		return (NULL);
1529	if (usize <= SMALL_MAXCLASS) {
1530		assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1531		p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1532		if (p == NULL)
1533			return (NULL);
1534		arena_prof_promoted(tsd, p, usize);
1535	} else
1536		p = ipalloc(tsd, usize, alignment, false);
1537
1538	return (p);
1539}
1540
1541JEMALLOC_ALWAYS_INLINE_C void *
1542imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1543{
1544	void *p;
1545	prof_tctx_t *tctx;
1546
1547	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1548	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1549		p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1550	else
1551		p = ipalloc(tsd, usize, alignment, false);
1552	if (unlikely(p == NULL)) {
1553		prof_alloc_rollback(tsd, tctx, true);
1554		return (NULL);
1555	}
1556	prof_malloc(tsd, p, usize, tctx);
1557
1558	return (p);
1559}
1560
1561JEMALLOC_ATTR(nonnull(1))
1562static int
1563imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1564{
1565	int ret;
1566	tsd_t *tsd;
1567	size_t usize;
1568	void *result;
1569
1570	assert(min_alignment != 0);
1571
1572	if (unlikely(malloc_init())) {
1573		tsd = NULL;
1574		result = NULL;
1575		goto label_oom;
1576	}
1577	tsd = tsd_fetch();
1578	witness_assert_lockless(tsd);
1579	if (size == 0)
1580		size = 1;
1581
1582	/* Make sure that alignment is a large enough power of 2. */
1583	if (unlikely(((alignment - 1) & alignment) != 0
1584	    || (alignment < min_alignment))) {
1585		if (config_xmalloc && unlikely(opt_xmalloc)) {
1586			malloc_write("<jemalloc>: Error allocating "
1587			    "aligned memory: invalid alignment\n");
1588			abort();
1589		}
1590		result = NULL;
1591		ret = EINVAL;
1592		goto label_return;
1593	}
1594
1595	usize = sa2u(size, alignment);
1596	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
1597		result = NULL;
1598		goto label_oom;
1599	}
1600
1601	if (config_prof && opt_prof)
1602		result = imemalign_prof(tsd, alignment, usize);
1603	else
1604		result = ipalloc(tsd, usize, alignment, false);
1605	if (unlikely(result == NULL))
1606		goto label_oom;
1607	assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1608
1609	*memptr = result;
1610	ret = 0;
1611label_return:
1612	if (config_stats && likely(result != NULL)) {
1613		assert(usize == isalloc(tsd, result, config_prof));
1614		*tsd_thread_allocatedp_get(tsd) += usize;
1615	}
1616	UTRACE(0, size, result);
1617	JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd, result, usize, false);
1618	witness_assert_lockless(tsd);
1619	return (ret);
1620label_oom:
1621	assert(result == NULL);
1622	if (config_xmalloc && unlikely(opt_xmalloc)) {
1623		malloc_write("<jemalloc>: Error allocating aligned memory: "
1624		    "out of memory\n");
1625		abort();
1626	}
1627	ret = ENOMEM;
1628	witness_assert_lockless(tsd);
1629	goto label_return;
1630}
1631
1632JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1633JEMALLOC_ATTR(nonnull(1))
1634je_posix_memalign(void **memptr, size_t alignment, size_t size)
1635{
1636	int ret;
1637
1638	ret = imemalign(memptr, alignment, size, sizeof(void *));
1639
1640	return (ret);
1641}
1642
1643JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1644void JEMALLOC_NOTHROW *
1645JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1646je_aligned_alloc(size_t alignment, size_t size)
1647{
1648	void *ret;
1649	int err;
1650
1651	if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1652		ret = NULL;
1653		set_errno(err);
1654	}
1655
1656	return (ret);
1657}
1658
1659static void *
1660icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx)
1661{
1662	void *p;
1663
1664	if (tctx == NULL)
1665		return (NULL);
1666	if (usize <= SMALL_MAXCLASS) {
1667		szind_t ind_large = size2index(LARGE_MINCLASS);
1668		p = icalloc(tsd, LARGE_MINCLASS, ind_large);
1669		if (p == NULL)
1670			return (NULL);
1671		arena_prof_promoted(tsd, p, usize);
1672	} else
1673		p = icalloc(tsd, usize, ind);
1674
1675	return (p);
1676}
1677
1678JEMALLOC_ALWAYS_INLINE_C void *
1679icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind)
1680{
1681	void *p;
1682	prof_tctx_t *tctx;
1683
1684	tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
1685	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1686		p = icalloc_prof_sample(tsd, usize, ind, tctx);
1687	else
1688		p = icalloc(tsd, usize, ind);
1689	if (unlikely(p == NULL)) {
1690		prof_alloc_rollback(tsd, tctx, true);
1691		return (NULL);
1692	}
1693	prof_malloc(tsd, p, usize, tctx);
1694
1695	return (p);
1696}
1697
1698JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1699void JEMALLOC_NOTHROW *
1700JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1701je_calloc(size_t num, size_t size)
1702{
1703	void *ret;
1704	tsd_t *tsd;
1705	size_t num_size;
1706	szind_t ind;
1707	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1708
1709	if (unlikely(malloc_init())) {
1710		tsd = NULL;
1711		num_size = 0;
1712		ret = NULL;
1713		goto label_return;
1714	}
1715	tsd = tsd_fetch();
1716	witness_assert_lockless(tsd);
1717
1718	num_size = num * size;
1719	if (unlikely(num_size == 0)) {
1720		if (num == 0 || size == 0)
1721			num_size = 1;
1722		else {
1723			ret = NULL;
1724			goto label_return;
1725		}
1726	/*
1727	 * Try to avoid division here.  We know that it isn't possible to
1728	 * overflow during multiplication if neither operand uses any of the
1729	 * most significant half of the bits in a size_t.
1730	 */
1731	} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1732	    2))) && (num_size / size != num))) {
1733		/* size_t overflow. */
1734		ret = NULL;
1735		goto label_return;
1736	}
1737
1738	ind = size2index(num_size);
1739	if (unlikely(ind >= NSIZES)) {
1740		ret = NULL;
1741		goto label_return;
1742	}
1743	if (config_prof && opt_prof) {
1744		usize = index2size(ind);
1745		ret = icalloc_prof(tsd, usize, ind);
1746	} else {
1747		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1748			usize = index2size(ind);
1749		ret = icalloc(tsd, num_size, ind);
1750	}
1751
1752label_return:
1753	if (unlikely(ret == NULL)) {
1754		if (config_xmalloc && unlikely(opt_xmalloc)) {
1755			malloc_write("<jemalloc>: Error in calloc(): out of "
1756			    "memory\n");
1757			abort();
1758		}
1759		set_errno(ENOMEM);
1760	}
1761	if (config_stats && likely(ret != NULL)) {
1762		assert(usize == isalloc(tsd, ret, config_prof));
1763		*tsd_thread_allocatedp_get(tsd) += usize;
1764	}
1765	UTRACE(0, num_size, ret);
1766	JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsd, ret, usize, true);
1767	witness_assert_lockless(tsd);
1768	return (ret);
1769}
1770
1771static void *
1772irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
1773    prof_tctx_t *tctx)
1774{
1775	void *p;
1776
1777	if (tctx == NULL)
1778		return (NULL);
1779	if (usize <= SMALL_MAXCLASS) {
1780		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
1781		if (p == NULL)
1782			return (NULL);
1783		arena_prof_promoted(tsd, p, usize);
1784	} else
1785		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1786
1787	return (p);
1788}
1789
1790JEMALLOC_ALWAYS_INLINE_C void *
1791irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
1792{
1793	void *p;
1794	bool prof_active;
1795	prof_tctx_t *old_tctx, *tctx;
1796
1797	prof_active = prof_active_get_unlocked();
1798	old_tctx = prof_tctx_get(tsd, old_ptr);
1799	tctx = prof_alloc_prep(tsd, usize, prof_active, true);
1800	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1801		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
1802	else
1803		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
1804	if (unlikely(p == NULL)) {
1805		prof_alloc_rollback(tsd, tctx, true);
1806		return (NULL);
1807	}
1808	prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
1809	    old_tctx);
1810
1811	return (p);
1812}
1813
1814JEMALLOC_INLINE_C void
1815ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
1816{
1817	size_t usize;
1818	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1819
1820	witness_assert_lockless(tsd);
1821
1822	assert(ptr != NULL);
1823	assert(malloc_initialized() || IS_INITIALIZER);
1824
1825	if (config_prof && opt_prof) {
1826		usize = isalloc(tsd, ptr, config_prof);
1827		prof_free(tsd, ptr, usize);
1828	} else if (config_stats || config_valgrind)
1829		usize = isalloc(tsd, ptr, config_prof);
1830	if (config_stats)
1831		*tsd_thread_deallocatedp_get(tsd) += usize;
1832
1833	if (likely(!slow_path))
1834		iqalloc(tsd, ptr, tcache, false);
1835	else {
1836		if (config_valgrind && unlikely(in_valgrind))
1837			rzsize = p2rz(tsd, ptr);
1838		iqalloc(tsd, ptr, tcache, true);
1839		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1840	}
1841}
1842
1843JEMALLOC_INLINE_C void
1844isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1845{
1846	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1847
1848	witness_assert_lockless(tsd);
1849
1850	assert(ptr != NULL);
1851	assert(malloc_initialized() || IS_INITIALIZER);
1852
1853	if (config_prof && opt_prof)
1854		prof_free(tsd, ptr, usize);
1855	if (config_stats)
1856		*tsd_thread_deallocatedp_get(tsd) += usize;
1857	if (config_valgrind && unlikely(in_valgrind))
1858		rzsize = p2rz(tsd, ptr);
1859	isqalloc(tsd, ptr, usize, tcache);
1860	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1861}
1862
1863JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1864void JEMALLOC_NOTHROW *
1865JEMALLOC_ALLOC_SIZE(2)
1866je_realloc(void *ptr, size_t size)
1867{
1868	void *ret;
1869	tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1870	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1871	size_t old_usize = 0;
1872	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1873
1874	if (unlikely(size == 0)) {
1875		if (ptr != NULL) {
1876			/* realloc(ptr, 0) is equivalent to free(ptr). */
1877			UTRACE(ptr, 0, 0);
1878			tsd = tsd_fetch();
1879			ifree(tsd, ptr, tcache_get(tsd, false), true);
1880			return (NULL);
1881		}
1882		size = 1;
1883	}
1884
1885	if (likely(ptr != NULL)) {
1886		assert(malloc_initialized() || IS_INITIALIZER);
1887		malloc_thread_init();
1888		tsd = tsd_fetch();
1889		witness_assert_lockless(tsd);
1890
1891		old_usize = isalloc(tsd, ptr, config_prof);
1892		if (config_valgrind && unlikely(in_valgrind)) {
1893			old_rzsize = config_prof ? p2rz(tsd, ptr) :
1894			    u2rz(old_usize);
1895		}
1896
1897		if (config_prof && opt_prof) {
1898			usize = s2u(size);
1899			ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
1900			    NULL : irealloc_prof(tsd, ptr, old_usize, usize);
1901		} else {
1902			if (config_stats || (config_valgrind &&
1903			    unlikely(in_valgrind)))
1904				usize = s2u(size);
1905			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1906		}
1907	} else {
1908		/* realloc(NULL, size) is equivalent to malloc(size). */
1909		if (likely(!malloc_slow))
1910			ret = imalloc_body(size, &tsd, &usize, false);
1911		else
1912			ret = imalloc_body(size, &tsd, &usize, true);
1913	}
1914
1915	if (unlikely(ret == NULL)) {
1916		if (config_xmalloc && unlikely(opt_xmalloc)) {
1917			malloc_write("<jemalloc>: Error in realloc(): "
1918			    "out of memory\n");
1919			abort();
1920		}
1921		set_errno(ENOMEM);
1922	}
1923	if (config_stats && likely(ret != NULL)) {
1924		assert(usize == isalloc(tsd, ret, config_prof));
1925		*tsd_thread_allocatedp_get(tsd) += usize;
1926		*tsd_thread_deallocatedp_get(tsd) += old_usize;
1927	}
1928	UTRACE(ptr, size, ret);
1929	JEMALLOC_VALGRIND_REALLOC(true, tsd, ret, usize, true, ptr, old_usize,
1930	    old_rzsize, true, false);
1931	witness_assert_lockless(tsd);
1932	return (ret);
1933}
1934
1935JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1936je_free(void *ptr)
1937{
1938
1939	UTRACE(ptr, 0, 0);
1940	if (likely(ptr != NULL)) {
1941		tsd_t *tsd = tsd_fetch();
1942		if (likely(!malloc_slow))
1943			ifree(tsd, ptr, tcache_get(tsd, false), false);
1944		else
1945			ifree(tsd, ptr, tcache_get(tsd, false), true);
1946		witness_assert_lockless(tsd);
1947	}
1948}
1949
1950/*
1951 * End malloc(3)-compatible functions.
1952 */
1953/******************************************************************************/
1954/*
1955 * Begin non-standard override functions.
1956 */
1957
1958#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1959JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1960void JEMALLOC_NOTHROW *
1961JEMALLOC_ATTR(malloc)
1962je_memalign(size_t alignment, size_t size)
1963{
1964	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1965	if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1966		ret = NULL;
1967	return (ret);
1968}
1969#endif
1970
1971#ifdef JEMALLOC_OVERRIDE_VALLOC
1972JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1973void JEMALLOC_NOTHROW *
1974JEMALLOC_ATTR(malloc)
1975je_valloc(size_t size)
1976{
1977	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1978	if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1979		ret = NULL;
1980	return (ret);
1981}
1982#endif
1983
1984/*
1985 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1986 * #define je_malloc malloc
1987 */
1988#define	malloc_is_malloc 1
1989#define	is_malloc_(a) malloc_is_ ## a
1990#define	is_malloc(a) is_malloc_(a)
1991
1992#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1993/*
1994 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1995 * to inconsistently reference libc's malloc(3)-compatible functions
1996 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1997 *
1998 * These definitions interpose hooks in glibc.  The functions are actually
1999 * passed an extra argument for the caller return address, which will be
2000 * ignored.
2001 */
2002JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2003JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2004JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2005# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2006JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2007    je_memalign;
2008# endif
2009#endif
2010
2011/*
2012 * End non-standard override functions.
2013 */
2014/******************************************************************************/
2015/*
2016 * Begin non-standard functions.
2017 */
2018
2019JEMALLOC_ALWAYS_INLINE_C bool
2020imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
2021    size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2022{
2023
2024	if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
2025		*alignment = 0;
2026		*usize = s2u(size);
2027	} else {
2028		*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2029		*usize = sa2u(size, *alignment);
2030	}
2031	if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2032		return (true);
2033	*zero = MALLOCX_ZERO_GET(flags);
2034	if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2035		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2036			*tcache = NULL;
2037		else
2038			*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2039	} else
2040		*tcache = tcache_get(tsd, true);
2041	if ((flags & MALLOCX_ARENA_MASK) != 0) {
2042		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2043		*arena = arena_get(tsd, arena_ind, true);
2044		if (unlikely(*arena == NULL))
2045			return (true);
2046	} else
2047		*arena = NULL;
2048	return (false);
2049}
2050
2051JEMALLOC_ALWAYS_INLINE_C bool
2052imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
2053    size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
2054{
2055
2056	if (likely(flags == 0)) {
2057		*usize = s2u(size);
2058		if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
2059			return (true);
2060		*alignment = 0;
2061		*zero = false;
2062		*tcache = tcache_get(tsd, true);
2063		*arena = NULL;
2064		return (false);
2065	} else {
2066		return (imallocx_flags_decode_hard(tsd, size, flags, usize,
2067		    alignment, zero, tcache, arena));
2068	}
2069}
2070
2071JEMALLOC_ALWAYS_INLINE_C void *
2072imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2073    tcache_t *tcache, arena_t *arena)
2074{
2075	szind_t ind;
2076
2077	if (unlikely(alignment != 0))
2078		return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
2079	ind = size2index(usize);
2080	assert(ind < NSIZES);
2081	if (unlikely(zero))
2082		return (icalloct(tsd, usize, ind, tcache, arena));
2083	return (imalloct(tsd, usize, ind, tcache, arena));
2084}
2085
2086static void *
2087imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
2088    tcache_t *tcache, arena_t *arena)
2089{
2090	void *p;
2091
2092	if (usize <= SMALL_MAXCLASS) {
2093		assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
2094		    sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
2095		p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
2096		    arena);
2097		if (p == NULL)
2098			return (NULL);
2099		arena_prof_promoted(tsd, p, usize);
2100	} else
2101		p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
2102
2103	return (p);
2104}
2105
2106JEMALLOC_ALWAYS_INLINE_C void *
2107imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2108{
2109	void *p;
2110	size_t alignment;
2111	bool zero;
2112	tcache_t *tcache;
2113	arena_t *arena;
2114	prof_tctx_t *tctx;
2115
2116	if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2117	    &zero, &tcache, &arena)))
2118		return (NULL);
2119	tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
2120	if (likely((uintptr_t)tctx == (uintptr_t)1U))
2121		p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2122	else if ((uintptr_t)tctx > (uintptr_t)1U) {
2123		p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
2124		    arena);
2125	} else
2126		p = NULL;
2127	if (unlikely(p == NULL)) {
2128		prof_alloc_rollback(tsd, tctx, true);
2129		return (NULL);
2130	}
2131	prof_malloc(tsd, p, *usize, tctx);
2132
2133	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2134	return (p);
2135}
2136
2137JEMALLOC_ALWAYS_INLINE_C void *
2138imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2139{
2140	void *p;
2141	size_t alignment;
2142	bool zero;
2143	tcache_t *tcache;
2144	arena_t *arena;
2145
2146	if (likely(flags == 0)) {
2147		szind_t ind = size2index(size);
2148		if (unlikely(ind >= NSIZES))
2149			return (NULL);
2150		if (config_stats || (config_valgrind &&
2151		    unlikely(in_valgrind))) {
2152			*usize = index2size(ind);
2153			assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
2154		}
2155		return (imalloc(tsd, size, ind, true));
2156	}
2157
2158	if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2159	    &alignment, &zero, &tcache, &arena)))
2160		return (NULL);
2161	p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2162	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2163	return (p);
2164}
2165
2166JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2167void JEMALLOC_NOTHROW *
2168JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2169je_mallocx(size_t size, int flags)
2170{
2171	tsd_t *tsd;
2172	void *p;
2173	size_t usize;
2174
2175	assert(size != 0);
2176
2177	if (unlikely(malloc_init())) {
2178		tsd = NULL;
2179		goto label_oom;
2180	}
2181	tsd = tsd_fetch();
2182	witness_assert_lockless(tsd);
2183
2184	if (config_prof && opt_prof)
2185		p = imallocx_prof(tsd, size, flags, &usize);
2186	else
2187		p = imallocx_no_prof(tsd, size, flags, &usize);
2188	if (unlikely(p == NULL))
2189		goto label_oom;
2190
2191	if (config_stats) {
2192		assert(usize == isalloc(tsd, p, config_prof));
2193		*tsd_thread_allocatedp_get(tsd) += usize;
2194	}
2195	UTRACE(0, size, p);
2196	JEMALLOC_VALGRIND_MALLOC(true, tsd, p, usize, MALLOCX_ZERO_GET(flags));
2197	witness_assert_lockless(tsd);
2198	return (p);
2199label_oom:
2200	if (config_xmalloc && unlikely(opt_xmalloc)) {
2201		malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2202		abort();
2203	}
2204	UTRACE(0, size, 0);
2205	witness_assert_lockless(tsd);
2206	return (NULL);
2207}
2208
2209static void *
2210irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
2211    size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2212    prof_tctx_t *tctx)
2213{
2214	void *p;
2215
2216	if (tctx == NULL)
2217		return (NULL);
2218	if (usize <= SMALL_MAXCLASS) {
2219		p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
2220		    zero, tcache, arena);
2221		if (p == NULL)
2222			return (NULL);
2223		arena_prof_promoted(tsd, p, usize);
2224	} else {
2225		p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
2226		    tcache, arena);
2227	}
2228
2229	return (p);
2230}
2231
2232JEMALLOC_ALWAYS_INLINE_C void *
2233irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2234    size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2235    arena_t *arena)
2236{
2237	void *p;
2238	bool prof_active;
2239	prof_tctx_t *old_tctx, *tctx;
2240
2241	prof_active = prof_active_get_unlocked();
2242	old_tctx = prof_tctx_get(tsd, old_ptr);
2243	tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
2244	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2245		p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
2246		    alignment, zero, tcache, arena, tctx);
2247	} else {
2248		p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
2249		    tcache, arena);
2250	}
2251	if (unlikely(p == NULL)) {
2252		prof_alloc_rollback(tsd, tctx, true);
2253		return (NULL);
2254	}
2255
2256	if (p == old_ptr && alignment != 0) {
2257		/*
2258		 * The allocation did not move, so it is possible that the size
2259		 * class is smaller than would guarantee the requested
2260		 * alignment, and that the alignment constraint was
2261		 * serendipitously satisfied.  Additionally, old_usize may not
2262		 * be the same as the current usize because of in-place large
2263		 * reallocation.  Therefore, query the actual value of usize.
2264		 */
2265		*usize = isalloc(tsd, p, config_prof);
2266	}
2267	prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
2268	    old_usize, old_tctx);
2269
2270	return (p);
2271}
2272
2273JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2274void JEMALLOC_NOTHROW *
2275JEMALLOC_ALLOC_SIZE(2)
2276je_rallocx(void *ptr, size_t size, int flags)
2277{
2278	void *p;
2279	tsd_t *tsd;
2280	size_t usize;
2281	size_t old_usize;
2282	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2283	size_t alignment = MALLOCX_ALIGN_GET(flags);
2284	bool zero = flags & MALLOCX_ZERO;
2285	arena_t *arena;
2286	tcache_t *tcache;
2287
2288	assert(ptr != NULL);
2289	assert(size != 0);
2290	assert(malloc_initialized() || IS_INITIALIZER);
2291	malloc_thread_init();
2292	tsd = tsd_fetch();
2293	witness_assert_lockless(tsd);
2294
2295	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2296		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2297		arena = arena_get(tsd, arena_ind, true);
2298		if (unlikely(arena == NULL))
2299			goto label_oom;
2300	} else
2301		arena = NULL;
2302
2303	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2304		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2305			tcache = NULL;
2306		else
2307			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2308	} else
2309		tcache = tcache_get(tsd, true);
2310
2311	old_usize = isalloc(tsd, ptr, config_prof);
2312	if (config_valgrind && unlikely(in_valgrind))
2313		old_rzsize = u2rz(old_usize);
2314
2315	if (config_prof && opt_prof) {
2316		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2317		if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
2318			goto label_oom;
2319		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2320		    zero, tcache, arena);
2321		if (unlikely(p == NULL))
2322			goto label_oom;
2323	} else {
2324		p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2325		     tcache, arena);
2326		if (unlikely(p == NULL))
2327			goto label_oom;
2328		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2329			usize = isalloc(tsd, p, config_prof);
2330	}
2331	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2332
2333	if (config_stats) {
2334		*tsd_thread_allocatedp_get(tsd) += usize;
2335		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2336	}
2337	UTRACE(ptr, size, p);
2338	JEMALLOC_VALGRIND_REALLOC(true, tsd, p, usize, false, ptr, old_usize,
2339	    old_rzsize, false, zero);
2340	witness_assert_lockless(tsd);
2341	return (p);
2342label_oom:
2343	if (config_xmalloc && unlikely(opt_xmalloc)) {
2344		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2345		abort();
2346	}
2347	UTRACE(ptr, size, 0);
2348	witness_assert_lockless(tsd);
2349	return (NULL);
2350}
2351
2352JEMALLOC_ALWAYS_INLINE_C size_t
2353ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2354    size_t extra, size_t alignment, bool zero)
2355{
2356	size_t usize;
2357
2358	if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
2359		return (old_usize);
2360	usize = isalloc(tsd, ptr, config_prof);
2361
2362	return (usize);
2363}
2364
2365static size_t
2366ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2367    size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
2368{
2369	size_t usize;
2370
2371	if (tctx == NULL)
2372		return (old_usize);
2373	usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
2374	    zero);
2375
2376	return (usize);
2377}
2378
2379JEMALLOC_ALWAYS_INLINE_C size_t
2380ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2381    size_t extra, size_t alignment, bool zero)
2382{
2383	size_t usize_max, usize;
2384	bool prof_active;
2385	prof_tctx_t *old_tctx, *tctx;
2386
2387	prof_active = prof_active_get_unlocked();
2388	old_tctx = prof_tctx_get(tsd, ptr);
2389	/*
2390	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2391	 * Therefore, compute its maximum possible value and use that in
2392	 * prof_alloc_prep() to decide whether to capture a backtrace.
2393	 * prof_realloc() will use the actual usize to decide whether to sample.
2394	 */
2395	if (alignment == 0) {
2396		usize_max = s2u(size+extra);
2397		assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
2398	} else {
2399		usize_max = sa2u(size+extra, alignment);
2400		if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
2401			/*
2402			 * usize_max is out of range, and chances are that
2403			 * allocation will fail, but use the maximum possible
2404			 * value and carry on with prof_alloc_prep(), just in
2405			 * case allocation succeeds.
2406			 */
2407			usize_max = HUGE_MAXCLASS;
2408		}
2409	}
2410	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2411
2412	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2413		usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
2414		    alignment, zero, tctx);
2415	} else {
2416		usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2417		    alignment, zero);
2418	}
2419	if (usize == old_usize) {
2420		prof_alloc_rollback(tsd, tctx, false);
2421		return (usize);
2422	}
2423	prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2424	    old_tctx);
2425
2426	return (usize);
2427}
2428
2429JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2430je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2431{
2432	tsd_t *tsd;
2433	size_t usize, old_usize;
2434	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2435	size_t alignment = MALLOCX_ALIGN_GET(flags);
2436	bool zero = flags & MALLOCX_ZERO;
2437
2438	assert(ptr != NULL);
2439	assert(size != 0);
2440	assert(SIZE_T_MAX - size >= extra);
2441	assert(malloc_initialized() || IS_INITIALIZER);
2442	malloc_thread_init();
2443	tsd = tsd_fetch();
2444	witness_assert_lockless(tsd);
2445
2446	old_usize = isalloc(tsd, ptr, config_prof);
2447
2448	/*
2449	 * The API explicitly absolves itself of protecting against (size +
2450	 * extra) numerical overflow, but we may need to clamp extra to avoid
2451	 * exceeding HUGE_MAXCLASS.
2452	 *
2453	 * Ordinarily, size limit checking is handled deeper down, but here we
2454	 * have to check as part of (size + extra) clamping, since we need the
2455	 * clamped value in the above helper functions.
2456	 */
2457	if (unlikely(size > HUGE_MAXCLASS)) {
2458		usize = old_usize;
2459		goto label_not_resized;
2460	}
2461	if (unlikely(HUGE_MAXCLASS - size < extra))
2462		extra = HUGE_MAXCLASS - size;
2463
2464	if (config_valgrind && unlikely(in_valgrind))
2465		old_rzsize = u2rz(old_usize);
2466
2467	if (config_prof && opt_prof) {
2468		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2469		    alignment, zero);
2470	} else {
2471		usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
2472		    alignment, zero);
2473	}
2474	if (unlikely(usize == old_usize))
2475		goto label_not_resized;
2476
2477	if (config_stats) {
2478		*tsd_thread_allocatedp_get(tsd) += usize;
2479		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2480	}
2481	JEMALLOC_VALGRIND_REALLOC(false, tsd, ptr, usize, false, ptr, old_usize,
2482	    old_rzsize, false, zero);
2483label_not_resized:
2484	UTRACE(ptr, size, ptr);
2485	witness_assert_lockless(tsd);
2486	return (usize);
2487}
2488
2489JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2490JEMALLOC_ATTR(pure)
2491je_sallocx(const void *ptr, int flags)
2492{
2493	size_t usize;
2494	tsd_t *tsd;
2495
2496	assert(malloc_initialized() || IS_INITIALIZER);
2497	malloc_thread_init();
2498
2499	tsd = tsd_fetch();
2500	witness_assert_lockless(tsd);
2501
2502	if (config_ivsalloc)
2503		usize = ivsalloc(tsd, ptr, config_prof);
2504	else
2505		usize = isalloc(tsd, ptr, config_prof);
2506
2507	witness_assert_lockless(tsd);
2508	return (usize);
2509}
2510
2511JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2512je_dallocx(void *ptr, int flags)
2513{
2514	tsd_t *tsd;
2515	tcache_t *tcache;
2516
2517	assert(ptr != NULL);
2518	assert(malloc_initialized() || IS_INITIALIZER);
2519
2520	tsd = tsd_fetch();
2521	witness_assert_lockless(tsd);
2522	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2523		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2524			tcache = NULL;
2525		else
2526			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2527	} else
2528		tcache = tcache_get(tsd, false);
2529
2530	UTRACE(ptr, 0, 0);
2531	ifree(tsd_fetch(), ptr, tcache, true);
2532	witness_assert_lockless(tsd);
2533}
2534
2535JEMALLOC_ALWAYS_INLINE_C size_t
2536inallocx(tsd_t *tsd, size_t size, int flags)
2537{
2538	size_t usize;
2539
2540	witness_assert_lockless(tsd);
2541
2542	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2543		usize = s2u(size);
2544	else
2545		usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2546	witness_assert_lockless(tsd);
2547	return (usize);
2548}
2549
2550JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2551je_sdallocx(void *ptr, size_t size, int flags)
2552{
2553	tsd_t *tsd;
2554	tcache_t *tcache;
2555	size_t usize;
2556
2557	assert(ptr != NULL);
2558	assert(malloc_initialized() || IS_INITIALIZER);
2559	tsd = tsd_fetch();
2560	usize = inallocx(tsd, size, flags);
2561	assert(usize == isalloc(tsd, ptr, config_prof));
2562
2563	witness_assert_lockless(tsd);
2564	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2565		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2566			tcache = NULL;
2567		else
2568			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2569	} else
2570		tcache = tcache_get(tsd, false);
2571
2572	UTRACE(ptr, 0, 0);
2573	isfree(tsd, ptr, usize, tcache);
2574	witness_assert_lockless(tsd);
2575}
2576
2577JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2578JEMALLOC_ATTR(pure)
2579je_nallocx(size_t size, int flags)
2580{
2581	size_t usize;
2582	tsd_t *tsd;
2583
2584	assert(size != 0);
2585
2586	if (unlikely(malloc_init()))
2587		return (0);
2588
2589	tsd = tsd_fetch();
2590	witness_assert_lockless(tsd);
2591
2592	usize = inallocx(tsd, size, flags);
2593	if (unlikely(usize > HUGE_MAXCLASS))
2594		return (0);
2595
2596	witness_assert_lockless(tsd);
2597	return (usize);
2598}
2599
2600JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2601je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2602    size_t newlen)
2603{
2604	int ret;
2605	tsd_t *tsd;
2606
2607	if (unlikely(malloc_init()))
2608		return (EAGAIN);
2609
2610	tsd = tsd_fetch();
2611	witness_assert_lockless(tsd);
2612	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
2613	witness_assert_lockless(tsd);
2614	return (ret);
2615}
2616
2617JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2618je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2619{
2620	int ret;
2621	tsd_t *tsd;
2622
2623	if (unlikely(malloc_init()))
2624		return (EAGAIN);
2625
2626	tsd = tsd_fetch();
2627	witness_assert_lockless(tsd);
2628	ret = ctl_nametomib(tsd, name, mibp, miblenp);
2629	witness_assert_lockless(tsd);
2630	return (ret);
2631}
2632
2633JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2634je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2635  void *newp, size_t newlen)
2636{
2637	int ret;
2638	tsd_t *tsd;
2639
2640	if (unlikely(malloc_init()))
2641		return (EAGAIN);
2642
2643	tsd = tsd_fetch();
2644	witness_assert_lockless(tsd);
2645	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
2646	witness_assert_lockless(tsd);
2647	return (ret);
2648}
2649
2650JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2651je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2652    const char *opts)
2653{
2654	tsd_t *tsd;
2655
2656	tsd = tsd_fetch();
2657	witness_assert_lockless(tsd);
2658	stats_print(write_cb, cbopaque, opts);
2659	witness_assert_lockless(tsd);
2660}
2661
2662JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2663je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2664{
2665	size_t ret;
2666	tsd_t *tsd;
2667
2668	assert(malloc_initialized() || IS_INITIALIZER);
2669	malloc_thread_init();
2670
2671	tsd = tsd_fetch();
2672	witness_assert_lockless(tsd);
2673
2674	if (config_ivsalloc)
2675		ret = ivsalloc(tsd, ptr, config_prof);
2676	else
2677		ret = (ptr == NULL) ? 0 : isalloc(tsd, ptr, config_prof);
2678
2679	witness_assert_lockless(tsd);
2680	return (ret);
2681}
2682
2683/*
2684 * End non-standard functions.
2685 */
2686/******************************************************************************/
2687/*
2688 * The following functions are used by threading libraries for protection of
2689 * malloc during fork().
2690 */
2691
2692/*
2693 * If an application creates a thread before doing any allocation in the main
2694 * thread, then calls fork(2) in the main thread followed by memory allocation
2695 * in the child process, a race can occur that results in deadlock within the
2696 * child: the main thread may have forked while the created thread had
2697 * partially initialized the allocator.  Ordinarily jemalloc prevents
2698 * fork/malloc races via the following functions it registers during
2699 * initialization using pthread_atfork(), but of course that does no good if
2700 * the allocator isn't fully initialized at fork time.  The following library
2701 * constructor is a partial solution to this problem.  It may still be possible
2702 * to trigger the deadlock described above, but doing so would involve forking
2703 * via a library constructor that runs before jemalloc's runs.
2704 */
2705JEMALLOC_ATTR(constructor)
2706static void
2707jemalloc_constructor(void)
2708{
2709
2710	malloc_init();
2711}
2712
2713#ifndef JEMALLOC_MUTEX_INIT_CB
2714void
2715jemalloc_prefork(void)
2716#else
2717JEMALLOC_EXPORT void
2718_malloc_prefork(void)
2719#endif
2720{
2721	tsd_t *tsd;
2722	unsigned i, narenas;
2723
2724#ifdef JEMALLOC_MUTEX_INIT_CB
2725	if (!malloc_initialized())
2726		return;
2727#endif
2728	assert(malloc_initialized());
2729
2730	tsd = tsd_fetch();
2731
2732	/* Acquire all mutexes in a safe order. */
2733	ctl_prefork(tsd);
2734	prof_prefork(tsd);
2735	malloc_mutex_prefork(tsd, &arenas_lock);
2736	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2737		arena_t *arena;
2738
2739		if ((arena = arena_get(tsd, i, false)) != NULL)
2740			arena_prefork(tsd, arena);
2741	}
2742	chunk_prefork(tsd);
2743	base_prefork(tsd);
2744}
2745
2746#ifndef JEMALLOC_MUTEX_INIT_CB
2747void
2748jemalloc_postfork_parent(void)
2749#else
2750JEMALLOC_EXPORT void
2751_malloc_postfork(void)
2752#endif
2753{
2754	tsd_t *tsd;
2755	unsigned i, narenas;
2756
2757#ifdef JEMALLOC_MUTEX_INIT_CB
2758	if (!malloc_initialized())
2759		return;
2760#endif
2761	assert(malloc_initialized());
2762
2763	tsd = tsd_fetch();
2764
2765	/* Release all mutexes, now that fork() has completed. */
2766	base_postfork_parent(tsd);
2767	chunk_postfork_parent(tsd);
2768	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2769		arena_t *arena;
2770
2771		if ((arena = arena_get(tsd, i, false)) != NULL)
2772			arena_postfork_parent(tsd, arena);
2773	}
2774	malloc_mutex_postfork_parent(tsd, &arenas_lock);
2775	prof_postfork_parent(tsd);
2776	ctl_postfork_parent(tsd);
2777}
2778
2779void
2780jemalloc_postfork_child(void)
2781{
2782	tsd_t *tsd;
2783	unsigned i, narenas;
2784
2785	assert(malloc_initialized());
2786
2787	tsd = tsd_fetch();
2788
2789	/* Release all mutexes, now that fork() has completed. */
2790	base_postfork_child(tsd);
2791	chunk_postfork_child(tsd);
2792	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
2793		arena_t *arena;
2794
2795		if ((arena = arena_get(tsd, i, false)) != NULL)
2796			arena_postfork_child(tsd, arena);
2797	}
2798	malloc_mutex_postfork_child(tsd, &arenas_lock);
2799	prof_postfork_child(tsd);
2800	ctl_postfork_child(tsd);
2801}
2802
2803/******************************************************************************/
2804