jemalloc.c revision 58ad1e4956affe0f9949445dce4410ad70b4cdac
1#define	JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9    THREAD_ALLOCATED_INITIALIZER)
10
11/* Runtime configuration options. */
12const char	*je_malloc_conf;
13#ifdef JEMALLOC_DEBUG
14bool	opt_abort = true;
15#  ifdef JEMALLOC_FILL
16bool	opt_junk = true;
17#  else
18bool	opt_junk = false;
19#  endif
20#else
21bool	opt_abort = false;
22bool	opt_junk = false;
23#endif
24size_t	opt_quarantine = ZU(0);
25bool	opt_redzone = false;
26bool	opt_utrace = false;
27bool	opt_valgrind = false;
28bool	opt_xmalloc = false;
29bool	opt_zero = false;
30size_t	opt_narenas = 0;
31
32unsigned	ncpus;
33
34malloc_mutex_t		arenas_lock;
35arena_t			**arenas;
36unsigned		narenas;
37
38/* Set to true once the allocator has been initialized. */
39static bool		malloc_initialized = false;
40
41#ifdef JEMALLOC_THREADED_INIT
42/* Used to let the initializing thread recursively allocate. */
43#  define NO_INITIALIZER	((unsigned long)0)
44#  define INITIALIZER		pthread_self()
45#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
46static pthread_t		malloc_initializer = NO_INITIALIZER;
47#else
48#  define NO_INITIALIZER	false
49#  define INITIALIZER		true
50#  define IS_INITIALIZER	malloc_initializer
51static bool			malloc_initializer = NO_INITIALIZER;
52#endif
53
54/* Used to avoid initialization races. */
55#ifdef _WIN32
56static malloc_mutex_t	init_lock;
57
58JEMALLOC_ATTR(constructor)
59static void WINAPI
60_init_init_lock(void)
61{
62
63	malloc_mutex_init(&init_lock);
64}
65
66#ifdef _MSC_VER
67#  pragma section(".CRT$XCU", read)
68JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
69static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
70#endif
71
72#else
73static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
74#endif
75
76typedef struct {
77	void	*p;	/* Input pointer (as in realloc(p, s)). */
78	size_t	s;	/* Request size. */
79	void	*r;	/* Result pointer. */
80} malloc_utrace_t;
81
82#ifdef JEMALLOC_UTRACE
83#  define UTRACE(a, b, c) do {						\
84	if (opt_utrace) {						\
85		malloc_utrace_t ut;					\
86		ut.p = (a);						\
87		ut.s = (b);						\
88		ut.r = (c);						\
89		utrace(&ut, sizeof(ut));				\
90	}								\
91} while (0)
92#else
93#  define UTRACE(a, b, c)
94#endif
95
96/******************************************************************************/
97/* Function prototypes for non-inline static functions. */
98
99static void	stats_print_atexit(void);
100static unsigned	malloc_ncpus(void);
101static bool	malloc_conf_next(char const **opts_p, char const **k_p,
102    size_t *klen_p, char const **v_p, size_t *vlen_p);
103static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
104    const char *v, size_t vlen);
105static void	malloc_conf_init(void);
106static bool	malloc_init_hard(void);
107static int	imemalign(void **memptr, size_t alignment, size_t size,
108    size_t min_alignment);
109
110/******************************************************************************/
111/*
112 * Begin miscellaneous support functions.
113 */
114
115/* Create a new arena and insert it into the arenas array at index ind. */
116arena_t *
117arenas_extend(unsigned ind)
118{
119	arena_t *ret;
120
121	ret = (arena_t *)base_alloc(sizeof(arena_t));
122	if (ret != NULL && arena_new(ret, ind) == false) {
123		arenas[ind] = ret;
124		return (ret);
125	}
126	/* Only reached if there is an OOM error. */
127
128	/*
129	 * OOM here is quite inconvenient to propagate, since dealing with it
130	 * would require a check for failure in the fast path.  Instead, punt
131	 * by using arenas[0].  In practice, this is an extremely unlikely
132	 * failure.
133	 */
134	malloc_write("<jemalloc>: Error initializing arena\n");
135	if (opt_abort)
136		abort();
137
138	return (arenas[0]);
139}
140
141/* Slow path, called only by choose_arena(). */
142arena_t *
143choose_arena_hard(void)
144{
145	arena_t *ret;
146
147	if (narenas > 1) {
148		unsigned i, choose, first_null;
149
150		choose = 0;
151		first_null = narenas;
152		malloc_mutex_lock(&arenas_lock);
153		assert(arenas[0] != NULL);
154		for (i = 1; i < narenas; i++) {
155			if (arenas[i] != NULL) {
156				/*
157				 * Choose the first arena that has the lowest
158				 * number of threads assigned to it.
159				 */
160				if (arenas[i]->nthreads <
161				    arenas[choose]->nthreads)
162					choose = i;
163			} else if (first_null == narenas) {
164				/*
165				 * Record the index of the first uninitialized
166				 * arena, in case all extant arenas are in use.
167				 *
168				 * NB: It is possible for there to be
169				 * discontinuities in terms of initialized
170				 * versus uninitialized arenas, due to the
171				 * "thread.arena" mallctl.
172				 */
173				first_null = i;
174			}
175		}
176
177		if (arenas[choose]->nthreads == 0 || first_null == narenas) {
178			/*
179			 * Use an unloaded arena, or the least loaded arena if
180			 * all arenas are already initialized.
181			 */
182			ret = arenas[choose];
183		} else {
184			/* Initialize a new arena. */
185			ret = arenas_extend(first_null);
186		}
187		ret->nthreads++;
188		malloc_mutex_unlock(&arenas_lock);
189	} else {
190		ret = arenas[0];
191		malloc_mutex_lock(&arenas_lock);
192		ret->nthreads++;
193		malloc_mutex_unlock(&arenas_lock);
194	}
195
196	arenas_tsd_set(&ret);
197
198	return (ret);
199}
200
201static void
202stats_print_atexit(void)
203{
204
205	if (config_tcache && config_stats) {
206		unsigned i;
207
208		/*
209		 * Merge stats from extant threads.  This is racy, since
210		 * individual threads do not lock when recording tcache stats
211		 * events.  As a consequence, the final stats may be slightly
212		 * out of date by the time they are reported, if other threads
213		 * continue to allocate.
214		 */
215		for (i = 0; i < narenas; i++) {
216			arena_t *arena = arenas[i];
217			if (arena != NULL) {
218				tcache_t *tcache;
219
220				/*
221				 * tcache_stats_merge() locks bins, so if any
222				 * code is introduced that acquires both arena
223				 * and bin locks in the opposite order,
224				 * deadlocks may result.
225				 */
226				malloc_mutex_lock(&arena->lock);
227				ql_foreach(tcache, &arena->tcache_ql, link) {
228					tcache_stats_merge(tcache, arena);
229				}
230				malloc_mutex_unlock(&arena->lock);
231			}
232		}
233	}
234	je_malloc_stats_print(NULL, NULL, NULL);
235}
236
237/*
238 * End miscellaneous support functions.
239 */
240/******************************************************************************/
241/*
242 * Begin initialization functions.
243 */
244
245static unsigned
246malloc_ncpus(void)
247{
248	unsigned ret;
249	long result;
250
251#ifdef _WIN32
252	SYSTEM_INFO si;
253	GetSystemInfo(&si);
254	result = si.dwNumberOfProcessors;
255#else
256	result = sysconf(_SC_NPROCESSORS_ONLN);
257	if (result == -1) {
258		/* Error. */
259		ret = 1;
260	}
261#endif
262	ret = (unsigned)result;
263
264	return (ret);
265}
266
267void
268arenas_cleanup(void *arg)
269{
270	arena_t *arena = *(arena_t **)arg;
271
272	malloc_mutex_lock(&arenas_lock);
273	arena->nthreads--;
274	malloc_mutex_unlock(&arenas_lock);
275}
276
277static inline bool
278malloc_init(void)
279{
280
281	if (malloc_initialized == false)
282		return (malloc_init_hard());
283
284	return (false);
285}
286
287static bool
288malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
289    char const **v_p, size_t *vlen_p)
290{
291	bool accept;
292	const char *opts = *opts_p;
293
294	*k_p = opts;
295
296	for (accept = false; accept == false;) {
297		switch (*opts) {
298		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
299		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
300		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
301		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
302		case 'Y': case 'Z':
303		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
304		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
305		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
306		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
307		case 'y': case 'z':
308		case '0': case '1': case '2': case '3': case '4': case '5':
309		case '6': case '7': case '8': case '9':
310		case '_':
311			opts++;
312			break;
313		case ':':
314			opts++;
315			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
316			*v_p = opts;
317			accept = true;
318			break;
319		case '\0':
320			if (opts != *opts_p) {
321				malloc_write("<jemalloc>: Conf string ends "
322				    "with key\n");
323			}
324			return (true);
325		default:
326			malloc_write("<jemalloc>: Malformed conf string\n");
327			return (true);
328		}
329	}
330
331	for (accept = false; accept == false;) {
332		switch (*opts) {
333		case ',':
334			opts++;
335			/*
336			 * Look ahead one character here, because the next time
337			 * this function is called, it will assume that end of
338			 * input has been cleanly reached if no input remains,
339			 * but we have optimistically already consumed the
340			 * comma if one exists.
341			 */
342			if (*opts == '\0') {
343				malloc_write("<jemalloc>: Conf string ends "
344				    "with comma\n");
345			}
346			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
347			accept = true;
348			break;
349		case '\0':
350			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
351			accept = true;
352			break;
353		default:
354			opts++;
355			break;
356		}
357	}
358
359	*opts_p = opts;
360	return (false);
361}
362
363static void
364malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
365    size_t vlen)
366{
367
368	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
369	    (int)vlen, v);
370}
371
372static void
373malloc_conf_init(void)
374{
375	unsigned i;
376	char buf[PATH_MAX + 1];
377	const char *opts, *k, *v;
378	size_t klen, vlen;
379
380	for (i = 0; i < 3; i++) {
381		/* Get runtime configuration. */
382		switch (i) {
383		case 0:
384			if (je_malloc_conf != NULL) {
385				/*
386				 * Use options that were compiled into the
387				 * program.
388				 */
389				opts = je_malloc_conf;
390			} else {
391				/* No configuration specified. */
392				buf[0] = '\0';
393				opts = buf;
394			}
395			break;
396		case 1: {
397#ifndef _WIN32
398			int linklen;
399			const char *linkname =
400#  ifdef JEMALLOC_PREFIX
401			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
402#  else
403			    "/etc/malloc.conf"
404#  endif
405			    ;
406
407			if ((linklen = readlink(linkname, buf,
408			    sizeof(buf) - 1)) != -1) {
409				/*
410				 * Use the contents of the "/etc/malloc.conf"
411				 * symbolic link's name.
412				 */
413				buf[linklen] = '\0';
414				opts = buf;
415			} else
416#endif
417			{
418				/* No configuration specified. */
419				buf[0] = '\0';
420				opts = buf;
421			}
422			break;
423		} case 2: {
424			const char *envname =
425#ifdef JEMALLOC_PREFIX
426			    JEMALLOC_CPREFIX"MALLOC_CONF"
427#else
428			    "MALLOC_CONF"
429#endif
430			    ;
431
432			if ((opts = getenv(envname)) != NULL) {
433				/*
434				 * Do nothing; opts is already initialized to
435				 * the value of the MALLOC_CONF environment
436				 * variable.
437				 */
438			} else {
439				/* No configuration specified. */
440				buf[0] = '\0';
441				opts = buf;
442			}
443			break;
444		} default:
445			/* NOTREACHED */
446			assert(false);
447			buf[0] = '\0';
448			opts = buf;
449		}
450
451		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
452		    &vlen) == false) {
453#define	CONF_HANDLE_BOOL_HIT(o, n, hit)					\
454			if (sizeof(n)-1 == klen && strncmp(n, k,	\
455			    klen) == 0) {				\
456				if (strncmp("true", v, vlen) == 0 &&	\
457				    vlen == sizeof("true")-1)		\
458					o = true;			\
459				else if (strncmp("false", v, vlen) ==	\
460				    0 && vlen == sizeof("false")-1)	\
461					o = false;			\
462				else {					\
463					malloc_conf_error(		\
464					    "Invalid conf value",	\
465					    k, klen, v, vlen);		\
466				}					\
467				hit = true;				\
468			} else						\
469				hit = false;
470#define	CONF_HANDLE_BOOL(o, n) {					\
471			bool hit;					\
472			CONF_HANDLE_BOOL_HIT(o, n, hit);		\
473			if (hit)					\
474				continue;				\
475}
476#define	CONF_HANDLE_SIZE_T(o, n, min, max)				\
477			if (sizeof(n)-1 == klen && strncmp(n, k,	\
478			    klen) == 0) {				\
479				uintmax_t um;				\
480				char *end;				\
481									\
482				set_errno(0);				\
483				um = malloc_strtoumax(v, &end, 0);	\
484				if (get_errno() != 0 || (uintptr_t)end -\
485				    (uintptr_t)v != vlen) {		\
486					malloc_conf_error(		\
487					    "Invalid conf value",	\
488					    k, klen, v, vlen);		\
489				} else if (um < min || um > max) {	\
490					malloc_conf_error(		\
491					    "Out-of-range conf value",	\
492					    k, klen, v, vlen);		\
493				} else					\
494					o = um;				\
495				continue;				\
496			}
497#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
498			if (sizeof(n)-1 == klen && strncmp(n, k,	\
499			    klen) == 0) {				\
500				long l;					\
501				char *end;				\
502									\
503				set_errno(0);				\
504				l = strtol(v, &end, 0);			\
505				if (get_errno() != 0 || (uintptr_t)end -\
506				    (uintptr_t)v != vlen) {		\
507					malloc_conf_error(		\
508					    "Invalid conf value",	\
509					    k, klen, v, vlen);		\
510				} else if (l < (ssize_t)min || l >	\
511				    (ssize_t)max) {			\
512					malloc_conf_error(		\
513					    "Out-of-range conf value",	\
514					    k, klen, v, vlen);		\
515				} else					\
516					o = l;				\
517				continue;				\
518			}
519#define	CONF_HANDLE_CHAR_P(o, n, d)					\
520			if (sizeof(n)-1 == klen && strncmp(n, k,	\
521			    klen) == 0) {				\
522				size_t cpylen = (vlen <=		\
523				    sizeof(o)-1) ? vlen :		\
524				    sizeof(o)-1;			\
525				strncpy(o, v, cpylen);			\
526				o[cpylen] = '\0';			\
527				continue;				\
528			}
529
530			CONF_HANDLE_BOOL(opt_abort, "abort")
531			/*
532			 * Chunks always require at least one header page, plus
533			 * one data page in the absence of redzones, or three
534			 * pages in the presence of redzones.  In order to
535			 * simplify options processing, fix the limit based on
536			 * config_fill.
537			 */
538			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
539			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
540			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
541			    SIZE_T_MAX)
542			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
543			    -1, (sizeof(size_t) << 3) - 1)
544			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
545			if (config_fill) {
546				CONF_HANDLE_BOOL(opt_junk, "junk")
547				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
548				    0, SIZE_T_MAX)
549				CONF_HANDLE_BOOL(opt_redzone, "redzone")
550				CONF_HANDLE_BOOL(opt_zero, "zero")
551			}
552			if (config_utrace) {
553				CONF_HANDLE_BOOL(opt_utrace, "utrace")
554			}
555			if (config_valgrind) {
556				bool hit;
557				CONF_HANDLE_BOOL_HIT(opt_valgrind,
558				    "valgrind", hit)
559				if (config_fill && opt_valgrind && hit) {
560					opt_junk = false;
561					opt_zero = false;
562					if (opt_quarantine == 0) {
563						opt_quarantine =
564						    JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
565					}
566					opt_redzone = true;
567				}
568				if (hit)
569					continue;
570			}
571			if (config_xmalloc) {
572				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
573			}
574			if (config_tcache) {
575				CONF_HANDLE_BOOL(opt_tcache, "tcache")
576				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
577				    "lg_tcache_max", -1,
578				    (sizeof(size_t) << 3) - 1)
579			}
580			if (config_prof) {
581				CONF_HANDLE_BOOL(opt_prof, "prof")
582				CONF_HANDLE_CHAR_P(opt_prof_prefix,
583				    "prof_prefix", "jeprof")
584				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
585				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
586				    "lg_prof_sample", 0,
587				    (sizeof(uint64_t) << 3) - 1)
588				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
589				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
590				    "lg_prof_interval", -1,
591				    (sizeof(uint64_t) << 3) - 1)
592				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
593				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
594				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
595			}
596			malloc_conf_error("Invalid conf pair", k, klen, v,
597			    vlen);
598#undef CONF_HANDLE_BOOL
599#undef CONF_HANDLE_SIZE_T
600#undef CONF_HANDLE_SSIZE_T
601#undef CONF_HANDLE_CHAR_P
602		}
603	}
604}
605
606static bool
607malloc_init_hard(void)
608{
609	arena_t *init_arenas[1];
610
611	malloc_mutex_lock(&init_lock);
612	if (malloc_initialized || IS_INITIALIZER) {
613		/*
614		 * Another thread initialized the allocator before this one
615		 * acquired init_lock, or this thread is the initializing
616		 * thread, and it is recursively allocating.
617		 */
618		malloc_mutex_unlock(&init_lock);
619		return (false);
620	}
621#ifdef JEMALLOC_THREADED_INIT
622	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
623		/* Busy-wait until the initializing thread completes. */
624		do {
625			malloc_mutex_unlock(&init_lock);
626			CPU_SPINWAIT;
627			malloc_mutex_lock(&init_lock);
628		} while (malloc_initialized == false);
629		malloc_mutex_unlock(&init_lock);
630		return (false);
631	}
632#endif
633	malloc_initializer = INITIALIZER;
634
635	malloc_tsd_boot();
636	if (config_prof)
637		prof_boot0();
638
639	malloc_conf_init();
640
641#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
642    && !defined(_WIN32))
643	/* Register fork handlers. */
644	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
645	    jemalloc_postfork_child) != 0) {
646		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
647		if (opt_abort)
648			abort();
649	}
650#endif
651
652	if (opt_stats_print) {
653		/* Print statistics at exit. */
654		if (atexit(stats_print_atexit) != 0) {
655			malloc_write("<jemalloc>: Error in atexit()\n");
656			if (opt_abort)
657				abort();
658		}
659	}
660
661	if (base_boot()) {
662		malloc_mutex_unlock(&init_lock);
663		return (true);
664	}
665
666	if (chunk_boot()) {
667		malloc_mutex_unlock(&init_lock);
668		return (true);
669	}
670
671	if (ctl_boot()) {
672		malloc_mutex_unlock(&init_lock);
673		return (true);
674	}
675
676	if (config_prof)
677		prof_boot1();
678
679	arena_boot();
680
681	if (config_tcache && tcache_boot0()) {
682		malloc_mutex_unlock(&init_lock);
683		return (true);
684	}
685
686	if (huge_boot()) {
687		malloc_mutex_unlock(&init_lock);
688		return (true);
689	}
690
691	if (malloc_mutex_init(&arenas_lock))
692		return (true);
693
694	/*
695	 * Create enough scaffolding to allow recursive allocation in
696	 * malloc_ncpus().
697	 */
698	narenas = 1;
699	arenas = init_arenas;
700	memset(arenas, 0, sizeof(arena_t *) * narenas);
701
702	/*
703	 * Initialize one arena here.  The rest are lazily created in
704	 * choose_arena_hard().
705	 */
706	arenas_extend(0);
707	if (arenas[0] == NULL) {
708		malloc_mutex_unlock(&init_lock);
709		return (true);
710	}
711
712	/* Initialize allocation counters before any allocations can occur. */
713	if (config_stats && thread_allocated_tsd_boot()) {
714		malloc_mutex_unlock(&init_lock);
715		return (true);
716	}
717
718	if (arenas_tsd_boot()) {
719		malloc_mutex_unlock(&init_lock);
720		return (true);
721	}
722
723	if (config_tcache && tcache_boot1()) {
724		malloc_mutex_unlock(&init_lock);
725		return (true);
726	}
727
728	if (config_fill && quarantine_boot()) {
729		malloc_mutex_unlock(&init_lock);
730		return (true);
731	}
732
733	if (config_prof && prof_boot2()) {
734		malloc_mutex_unlock(&init_lock);
735		return (true);
736	}
737
738	/* Get number of CPUs. */
739	malloc_mutex_unlock(&init_lock);
740	ncpus = malloc_ncpus();
741	malloc_mutex_lock(&init_lock);
742
743	if (mutex_boot()) {
744		malloc_mutex_unlock(&init_lock);
745		return (true);
746	}
747
748	if (opt_narenas == 0) {
749		/*
750		 * For SMP systems, create more than one arena per CPU by
751		 * default.
752		 */
753		if (ncpus > 1)
754			opt_narenas = ncpus << 2;
755		else
756			opt_narenas = 1;
757	}
758	narenas = opt_narenas;
759	/*
760	 * Make sure that the arenas array can be allocated.  In practice, this
761	 * limit is enough to allow the allocator to function, but the ctl
762	 * machinery will fail to allocate memory at far lower limits.
763	 */
764	if (narenas > chunksize / sizeof(arena_t *)) {
765		narenas = chunksize / sizeof(arena_t *);
766		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
767		    narenas);
768	}
769
770	/* Allocate and initialize arenas. */
771	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
772	if (arenas == NULL) {
773		malloc_mutex_unlock(&init_lock);
774		return (true);
775	}
776	/*
777	 * Zero the array.  In practice, this should always be pre-zeroed,
778	 * since it was just mmap()ed, but let's be sure.
779	 */
780	memset(arenas, 0, sizeof(arena_t *) * narenas);
781	/* Copy the pointer to the one arena that was already initialized. */
782	arenas[0] = init_arenas[0];
783
784	malloc_initialized = true;
785	malloc_mutex_unlock(&init_lock);
786	return (false);
787}
788
789/*
790 * End initialization functions.
791 */
792/******************************************************************************/
793/*
794 * Begin malloc(3)-compatible functions.
795 */
796
797void *
798je_malloc(size_t size)
799{
800	void *ret;
801	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
802	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
803
804	if (malloc_init()) {
805		ret = NULL;
806		goto label_oom;
807	}
808
809	if (size == 0)
810		size = 1;
811
812	if (config_prof && opt_prof) {
813		usize = s2u(size);
814		PROF_ALLOC_PREP(1, usize, cnt);
815		if (cnt == NULL) {
816			ret = NULL;
817			goto label_oom;
818		}
819		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
820		    SMALL_MAXCLASS) {
821			ret = imalloc(SMALL_MAXCLASS+1);
822			if (ret != NULL)
823				arena_prof_promoted(ret, usize);
824		} else
825			ret = imalloc(size);
826	} else {
827		if (config_stats || (config_valgrind && opt_valgrind))
828			usize = s2u(size);
829		ret = imalloc(size);
830	}
831
832label_oom:
833	if (ret == NULL) {
834		if (config_xmalloc && opt_xmalloc) {
835			malloc_write("<jemalloc>: Error in malloc(): "
836			    "out of memory\n");
837			abort();
838		}
839		set_errno(ENOMEM);
840	}
841	if (config_prof && opt_prof && ret != NULL)
842		prof_malloc(ret, usize, cnt);
843	if (config_stats && ret != NULL) {
844		assert(usize == isalloc(ret, config_prof));
845		thread_allocated_tsd_get()->allocated += usize;
846	}
847	UTRACE(0, size, ret);
848	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
849	return (ret);
850}
851
852JEMALLOC_ATTR(nonnull(1))
853#ifdef JEMALLOC_PROF
854/*
855 * Avoid any uncertainty as to how many backtrace frames to ignore in
856 * PROF_ALLOC_PREP().
857 */
858JEMALLOC_ATTR(noinline)
859#endif
860static int
861imemalign(void **memptr, size_t alignment, size_t size,
862    size_t min_alignment)
863{
864	int ret;
865	size_t usize;
866	void *result;
867	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
868
869	assert(min_alignment != 0);
870
871	if (malloc_init())
872		result = NULL;
873	else {
874		if (size == 0)
875			size = 1;
876
877		/* Make sure that alignment is a large enough power of 2. */
878		if (((alignment - 1) & alignment) != 0
879		    || (alignment < min_alignment)) {
880			if (config_xmalloc && opt_xmalloc) {
881				malloc_write("<jemalloc>: Error allocating "
882				    "aligned memory: invalid alignment\n");
883				abort();
884			}
885			result = NULL;
886			ret = EINVAL;
887			goto label_return;
888		}
889
890		usize = sa2u(size, alignment);
891		if (usize == 0) {
892			result = NULL;
893			ret = ENOMEM;
894			goto label_return;
895		}
896
897		if (config_prof && opt_prof) {
898			PROF_ALLOC_PREP(2, usize, cnt);
899			if (cnt == NULL) {
900				result = NULL;
901				ret = EINVAL;
902			} else {
903				if (prof_promote && (uintptr_t)cnt !=
904				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
905					assert(sa2u(SMALL_MAXCLASS+1,
906					    alignment) != 0);
907					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
908					    alignment), alignment, false);
909					if (result != NULL) {
910						arena_prof_promoted(result,
911						    usize);
912					}
913				} else {
914					result = ipalloc(usize, alignment,
915					    false);
916				}
917			}
918		} else
919			result = ipalloc(usize, alignment, false);
920	}
921
922	if (result == NULL) {
923		if (config_xmalloc && opt_xmalloc) {
924			malloc_write("<jemalloc>: Error allocating aligned "
925			    "memory: out of memory\n");
926			abort();
927		}
928		ret = ENOMEM;
929		goto label_return;
930	}
931
932	*memptr = result;
933	ret = 0;
934
935label_return:
936	if (config_stats && result != NULL) {
937		assert(usize == isalloc(result, config_prof));
938		thread_allocated_tsd_get()->allocated += usize;
939	}
940	if (config_prof && opt_prof && result != NULL)
941		prof_malloc(result, usize, cnt);
942	UTRACE(0, size, result);
943	return (ret);
944}
945
946int
947je_posix_memalign(void **memptr, size_t alignment, size_t size)
948{
949	int ret = imemalign(memptr, alignment, size, sizeof(void *));
950	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
951	    config_prof), false);
952	return (ret);
953}
954
955void *
956je_aligned_alloc(size_t alignment, size_t size)
957{
958	void *ret;
959	int err;
960
961	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
962		ret = NULL;
963		set_errno(err);
964	}
965	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
966	    false);
967	return (ret);
968}
969
970void *
971je_calloc(size_t num, size_t size)
972{
973	void *ret;
974	size_t num_size;
975	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
976	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
977
978	if (malloc_init()) {
979		num_size = 0;
980		ret = NULL;
981		goto label_return;
982	}
983
984	num_size = num * size;
985	if (num_size == 0) {
986		if (num == 0 || size == 0)
987			num_size = 1;
988		else {
989			ret = NULL;
990			goto label_return;
991		}
992	/*
993	 * Try to avoid division here.  We know that it isn't possible to
994	 * overflow during multiplication if neither operand uses any of the
995	 * most significant half of the bits in a size_t.
996	 */
997	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
998	    && (num_size / size != num)) {
999		/* size_t overflow. */
1000		ret = NULL;
1001		goto label_return;
1002	}
1003
1004	if (config_prof && opt_prof) {
1005		usize = s2u(num_size);
1006		PROF_ALLOC_PREP(1, usize, cnt);
1007		if (cnt == NULL) {
1008			ret = NULL;
1009			goto label_return;
1010		}
1011		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1012		    <= SMALL_MAXCLASS) {
1013			ret = icalloc(SMALL_MAXCLASS+1);
1014			if (ret != NULL)
1015				arena_prof_promoted(ret, usize);
1016		} else
1017			ret = icalloc(num_size);
1018	} else {
1019		if (config_stats || (config_valgrind && opt_valgrind))
1020			usize = s2u(num_size);
1021		ret = icalloc(num_size);
1022	}
1023
1024label_return:
1025	if (ret == NULL) {
1026		if (config_xmalloc && opt_xmalloc) {
1027			malloc_write("<jemalloc>: Error in calloc(): out of "
1028			    "memory\n");
1029			abort();
1030		}
1031		set_errno(ENOMEM);
1032	}
1033
1034	if (config_prof && opt_prof && ret != NULL)
1035		prof_malloc(ret, usize, cnt);
1036	if (config_stats && ret != NULL) {
1037		assert(usize == isalloc(ret, config_prof));
1038		thread_allocated_tsd_get()->allocated += usize;
1039	}
1040	UTRACE(0, num_size, ret);
1041	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1042	return (ret);
1043}
1044
1045void *
1046je_realloc(void *ptr, size_t size)
1047{
1048	void *ret;
1049	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1050	size_t old_size = 0;
1051	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1052	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1053	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1054
1055	if (size == 0) {
1056		if (ptr != NULL) {
1057			/* realloc(ptr, 0) is equivalent to free(p). */
1058			if (config_prof) {
1059				old_size = isalloc(ptr, true);
1060				if (config_valgrind && opt_valgrind)
1061					old_rzsize = p2rz(ptr);
1062			} else if (config_stats) {
1063				old_size = isalloc(ptr, false);
1064				if (config_valgrind && opt_valgrind)
1065					old_rzsize = u2rz(old_size);
1066			} else if (config_valgrind && opt_valgrind) {
1067				old_size = isalloc(ptr, false);
1068				old_rzsize = u2rz(old_size);
1069			}
1070			if (config_prof && opt_prof) {
1071				old_ctx = prof_ctx_get(ptr);
1072				cnt = NULL;
1073			}
1074			iqalloc(ptr);
1075			ret = NULL;
1076			goto label_return;
1077		} else
1078			size = 1;
1079	}
1080
1081	if (ptr != NULL) {
1082		assert(malloc_initialized || IS_INITIALIZER);
1083
1084		if (config_prof) {
1085			old_size = isalloc(ptr, true);
1086			if (config_valgrind && opt_valgrind)
1087				old_rzsize = p2rz(ptr);
1088		} else if (config_stats) {
1089			old_size = isalloc(ptr, false);
1090			if (config_valgrind && opt_valgrind)
1091				old_rzsize = u2rz(old_size);
1092		} else if (config_valgrind && opt_valgrind) {
1093			old_size = isalloc(ptr, false);
1094			old_rzsize = u2rz(old_size);
1095		}
1096		if (config_prof && opt_prof) {
1097			usize = s2u(size);
1098			old_ctx = prof_ctx_get(ptr);
1099			PROF_ALLOC_PREP(1, usize, cnt);
1100			if (cnt == NULL) {
1101				old_ctx = NULL;
1102				ret = NULL;
1103				goto label_oom;
1104			}
1105			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1106			    usize <= SMALL_MAXCLASS) {
1107				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1108				    false, false);
1109				if (ret != NULL)
1110					arena_prof_promoted(ret, usize);
1111				else
1112					old_ctx = NULL;
1113			} else {
1114				ret = iralloc(ptr, size, 0, 0, false, false);
1115				if (ret == NULL)
1116					old_ctx = NULL;
1117			}
1118		} else {
1119			if (config_stats || (config_valgrind && opt_valgrind))
1120				usize = s2u(size);
1121			ret = iralloc(ptr, size, 0, 0, false, false);
1122		}
1123
1124label_oom:
1125		if (ret == NULL) {
1126			if (config_xmalloc && opt_xmalloc) {
1127				malloc_write("<jemalloc>: Error in realloc(): "
1128				    "out of memory\n");
1129				abort();
1130			}
1131			set_errno(ENOMEM);
1132		}
1133	} else {
1134		/* realloc(NULL, size) is equivalent to malloc(size). */
1135		if (config_prof && opt_prof)
1136			old_ctx = NULL;
1137		if (malloc_init()) {
1138			if (config_prof && opt_prof)
1139				cnt = NULL;
1140			ret = NULL;
1141		} else {
1142			if (config_prof && opt_prof) {
1143				usize = s2u(size);
1144				PROF_ALLOC_PREP(1, usize, cnt);
1145				if (cnt == NULL)
1146					ret = NULL;
1147				else {
1148					if (prof_promote && (uintptr_t)cnt !=
1149					    (uintptr_t)1U && usize <=
1150					    SMALL_MAXCLASS) {
1151						ret = imalloc(SMALL_MAXCLASS+1);
1152						if (ret != NULL) {
1153							arena_prof_promoted(ret,
1154							    usize);
1155						}
1156					} else
1157						ret = imalloc(size);
1158				}
1159			} else {
1160				if (config_stats || (config_valgrind &&
1161				    opt_valgrind))
1162					usize = s2u(size);
1163				ret = imalloc(size);
1164			}
1165		}
1166
1167		if (ret == NULL) {
1168			if (config_xmalloc && opt_xmalloc) {
1169				malloc_write("<jemalloc>: Error in realloc(): "
1170				    "out of memory\n");
1171				abort();
1172			}
1173			set_errno(ENOMEM);
1174		}
1175	}
1176
1177label_return:
1178	if (config_prof && opt_prof)
1179		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1180	if (config_stats && ret != NULL) {
1181		thread_allocated_t *ta;
1182		assert(usize == isalloc(ret, config_prof));
1183		ta = thread_allocated_tsd_get();
1184		ta->allocated += usize;
1185		ta->deallocated += old_size;
1186	}
1187	UTRACE(ptr, size, ret);
1188	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1189	return (ret);
1190}
1191
1192void
1193je_free(void *ptr)
1194{
1195
1196	UTRACE(ptr, 0, 0);
1197	if (ptr != NULL) {
1198		size_t usize;
1199		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1200
1201		assert(malloc_initialized || IS_INITIALIZER);
1202
1203		if (config_prof && opt_prof) {
1204			usize = isalloc(ptr, config_prof);
1205			prof_free(ptr, usize);
1206		} else if (config_stats || config_valgrind)
1207			usize = isalloc(ptr, config_prof);
1208		if (config_stats)
1209			thread_allocated_tsd_get()->deallocated += usize;
1210		if (config_valgrind && opt_valgrind)
1211			rzsize = p2rz(ptr);
1212		iqalloc(ptr);
1213		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1214	}
1215}
1216
1217/*
1218 * End malloc(3)-compatible functions.
1219 */
1220/******************************************************************************/
1221/*
1222 * Begin non-standard override functions.
1223 */
1224
1225#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1226void *
1227je_memalign(size_t alignment, size_t size)
1228{
1229	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1230	imemalign(&ret, alignment, size, 1);
1231	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1232	return (ret);
1233}
1234#endif
1235
1236#ifdef JEMALLOC_OVERRIDE_VALLOC
1237void *
1238je_valloc(size_t size)
1239{
1240	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1241	imemalign(&ret, PAGE, size, 1);
1242	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1243	return (ret);
1244}
1245#endif
1246
1247/*
1248 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1249 * #define je_malloc malloc
1250 */
1251#define	malloc_is_malloc 1
1252#define	is_malloc_(a) malloc_is_ ## a
1253#define	is_malloc(a) is_malloc_(a)
1254
1255#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1256/*
1257 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1258 * to inconsistently reference libc's malloc(3)-compatible functions
1259 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1260 *
1261 * These definitions interpose hooks in glibc.  The functions are actually
1262 * passed an extra argument for the caller return address, which will be
1263 * ignored.
1264 */
1265JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
1266JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
1267JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
1268    je_realloc;
1269JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
1270    je_memalign;
1271#endif
1272
1273/*
1274 * End non-standard override functions.
1275 */
1276/******************************************************************************/
1277/*
1278 * Begin non-standard functions.
1279 */
1280
1281size_t
1282je_malloc_usable_size(const void *ptr)
1283{
1284	size_t ret;
1285
1286	assert(malloc_initialized || IS_INITIALIZER);
1287
1288	if (config_ivsalloc)
1289		ret = ivsalloc(ptr, config_prof);
1290	else
1291		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1292
1293	return (ret);
1294}
1295
1296void
1297je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1298    const char *opts)
1299{
1300
1301	stats_print(write_cb, cbopaque, opts);
1302}
1303
1304int
1305je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1306    size_t newlen)
1307{
1308
1309	if (malloc_init())
1310		return (EAGAIN);
1311
1312	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1313}
1314
1315int
1316je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1317{
1318
1319	if (malloc_init())
1320		return (EAGAIN);
1321
1322	return (ctl_nametomib(name, mibp, miblenp));
1323}
1324
1325int
1326je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1327  void *newp, size_t newlen)
1328{
1329
1330	if (malloc_init())
1331		return (EAGAIN);
1332
1333	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1334}
1335
1336/*
1337 * End non-standard functions.
1338 */
1339/******************************************************************************/
1340/*
1341 * Begin experimental functions.
1342 */
1343#ifdef JEMALLOC_EXPERIMENTAL
1344
1345JEMALLOC_INLINE void *
1346iallocm(size_t usize, size_t alignment, bool zero)
1347{
1348
1349	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1350	    alignment)));
1351
1352	if (alignment != 0)
1353		return (ipalloc(usize, alignment, zero));
1354	else if (zero)
1355		return (icalloc(usize));
1356	else
1357		return (imalloc(usize));
1358}
1359
1360int
1361je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1362{
1363	void *p;
1364	size_t usize;
1365	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1366	    & (SIZE_T_MAX-1));
1367	bool zero = flags & ALLOCM_ZERO;
1368
1369	assert(ptr != NULL);
1370	assert(size != 0);
1371
1372	if (malloc_init())
1373		goto label_oom;
1374
1375	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1376	if (usize == 0)
1377		goto label_oom;
1378
1379	if (config_prof && opt_prof) {
1380		prof_thr_cnt_t *cnt;
1381
1382		PROF_ALLOC_PREP(1, usize, cnt);
1383		if (cnt == NULL)
1384			goto label_oom;
1385		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1386		    SMALL_MAXCLASS) {
1387			size_t usize_promoted = (alignment == 0) ?
1388			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1389			    alignment);
1390			assert(usize_promoted != 0);
1391			p = iallocm(usize_promoted, alignment, zero);
1392			if (p == NULL)
1393				goto label_oom;
1394			arena_prof_promoted(p, usize);
1395		} else {
1396			p = iallocm(usize, alignment, zero);
1397			if (p == NULL)
1398				goto label_oom;
1399		}
1400		prof_malloc(p, usize, cnt);
1401	} else {
1402		p = iallocm(usize, alignment, zero);
1403		if (p == NULL)
1404			goto label_oom;
1405	}
1406	if (rsize != NULL)
1407		*rsize = usize;
1408
1409	*ptr = p;
1410	if (config_stats) {
1411		assert(usize == isalloc(p, config_prof));
1412		thread_allocated_tsd_get()->allocated += usize;
1413	}
1414	UTRACE(0, size, p);
1415	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1416	return (ALLOCM_SUCCESS);
1417label_oom:
1418	if (config_xmalloc && opt_xmalloc) {
1419		malloc_write("<jemalloc>: Error in allocm(): "
1420		    "out of memory\n");
1421		abort();
1422	}
1423	*ptr = NULL;
1424	UTRACE(0, size, 0);
1425	return (ALLOCM_ERR_OOM);
1426}
1427
1428int
1429je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1430{
1431	void *p, *q;
1432	size_t usize;
1433	size_t old_size;
1434	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1435	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1436	    & (SIZE_T_MAX-1));
1437	bool zero = flags & ALLOCM_ZERO;
1438	bool no_move = flags & ALLOCM_NO_MOVE;
1439
1440	assert(ptr != NULL);
1441	assert(*ptr != NULL);
1442	assert(size != 0);
1443	assert(SIZE_T_MAX - size >= extra);
1444	assert(malloc_initialized || IS_INITIALIZER);
1445
1446	p = *ptr;
1447	if (config_prof && opt_prof) {
1448		prof_thr_cnt_t *cnt;
1449
1450		/*
1451		 * usize isn't knowable before iralloc() returns when extra is
1452		 * non-zero.  Therefore, compute its maximum possible value and
1453		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1454		 * backtrace.  prof_realloc() will use the actual usize to
1455		 * decide whether to sample.
1456		 */
1457		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1458		    sa2u(size+extra, alignment);
1459		prof_ctx_t *old_ctx = prof_ctx_get(p);
1460		old_size = isalloc(p, true);
1461		if (config_valgrind && opt_valgrind)
1462			old_rzsize = p2rz(p);
1463		PROF_ALLOC_PREP(1, max_usize, cnt);
1464		if (cnt == NULL)
1465			goto label_oom;
1466		/*
1467		 * Use minimum usize to determine whether promotion may happen.
1468		 */
1469		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1470		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1471		    <= SMALL_MAXCLASS) {
1472			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1473			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1474			    alignment, zero, no_move);
1475			if (q == NULL)
1476				goto label_err;
1477			if (max_usize < PAGE) {
1478				usize = max_usize;
1479				arena_prof_promoted(q, usize);
1480			} else
1481				usize = isalloc(q, config_prof);
1482		} else {
1483			q = iralloc(p, size, extra, alignment, zero, no_move);
1484			if (q == NULL)
1485				goto label_err;
1486			usize = isalloc(q, config_prof);
1487		}
1488		prof_realloc(q, usize, cnt, old_size, old_ctx);
1489		if (rsize != NULL)
1490			*rsize = usize;
1491	} else {
1492		if (config_stats) {
1493			old_size = isalloc(p, false);
1494			if (config_valgrind && opt_valgrind)
1495				old_rzsize = u2rz(old_size);
1496		} else if (config_valgrind && opt_valgrind) {
1497			old_size = isalloc(p, false);
1498			old_rzsize = u2rz(old_size);
1499		}
1500		q = iralloc(p, size, extra, alignment, zero, no_move);
1501		if (q == NULL)
1502			goto label_err;
1503		if (config_stats)
1504			usize = isalloc(q, config_prof);
1505		if (rsize != NULL) {
1506			if (config_stats == false)
1507				usize = isalloc(q, config_prof);
1508			*rsize = usize;
1509		}
1510	}
1511
1512	*ptr = q;
1513	if (config_stats) {
1514		thread_allocated_t *ta;
1515		ta = thread_allocated_tsd_get();
1516		ta->allocated += usize;
1517		ta->deallocated += old_size;
1518	}
1519	UTRACE(p, size, q);
1520	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1521	return (ALLOCM_SUCCESS);
1522label_err:
1523	if (no_move) {
1524		UTRACE(p, size, q);
1525		return (ALLOCM_ERR_NOT_MOVED);
1526	}
1527label_oom:
1528	if (config_xmalloc && opt_xmalloc) {
1529		malloc_write("<jemalloc>: Error in rallocm(): "
1530		    "out of memory\n");
1531		abort();
1532	}
1533	UTRACE(p, size, 0);
1534	return (ALLOCM_ERR_OOM);
1535}
1536
1537int
1538je_sallocm(const void *ptr, size_t *rsize, int flags)
1539{
1540	size_t sz;
1541
1542	assert(malloc_initialized || IS_INITIALIZER);
1543
1544	if (config_ivsalloc)
1545		sz = ivsalloc(ptr, config_prof);
1546	else {
1547		assert(ptr != NULL);
1548		sz = isalloc(ptr, config_prof);
1549	}
1550	assert(rsize != NULL);
1551	*rsize = sz;
1552
1553	return (ALLOCM_SUCCESS);
1554}
1555
1556int
1557je_dallocm(void *ptr, int flags)
1558{
1559	size_t usize;
1560	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1561
1562	assert(ptr != NULL);
1563	assert(malloc_initialized || IS_INITIALIZER);
1564
1565	UTRACE(ptr, 0, 0);
1566	if (config_stats || config_valgrind)
1567		usize = isalloc(ptr, config_prof);
1568	if (config_prof && opt_prof) {
1569		if (config_stats == false && config_valgrind == false)
1570			usize = isalloc(ptr, config_prof);
1571		prof_free(ptr, usize);
1572	}
1573	if (config_stats)
1574		thread_allocated_tsd_get()->deallocated += usize;
1575	if (config_valgrind && opt_valgrind)
1576		rzsize = p2rz(ptr);
1577	iqalloc(ptr);
1578	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1579
1580	return (ALLOCM_SUCCESS);
1581}
1582
1583int
1584je_nallocm(size_t *rsize, size_t size, int flags)
1585{
1586	size_t usize;
1587	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1588	    & (SIZE_T_MAX-1));
1589
1590	assert(size != 0);
1591
1592	if (malloc_init())
1593		return (ALLOCM_ERR_OOM);
1594
1595	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1596	if (usize == 0)
1597		return (ALLOCM_ERR_OOM);
1598
1599	if (rsize != NULL)
1600		*rsize = usize;
1601	return (ALLOCM_SUCCESS);
1602}
1603
1604#endif
1605/*
1606 * End experimental functions.
1607 */
1608/******************************************************************************/
1609/*
1610 * The following functions are used by threading libraries for protection of
1611 * malloc during fork().
1612 */
1613
1614#ifndef JEMALLOC_MUTEX_INIT_CB
1615void
1616jemalloc_prefork(void)
1617#else
1618JEMALLOC_EXPORT void
1619_malloc_prefork(void)
1620#endif
1621{
1622	unsigned i;
1623
1624#ifdef JEMALLOC_MUTEX_INIT_CB
1625	if (malloc_initialized == false)
1626		return;
1627#endif
1628	assert(malloc_initialized);
1629
1630	/* Acquire all mutexes in a safe order. */
1631	malloc_mutex_prefork(&arenas_lock);
1632	for (i = 0; i < narenas; i++) {
1633		if (arenas[i] != NULL)
1634			arena_prefork(arenas[i]);
1635	}
1636	base_prefork();
1637	huge_prefork();
1638	chunk_dss_prefork();
1639}
1640
1641#ifndef JEMALLOC_MUTEX_INIT_CB
1642void
1643jemalloc_postfork_parent(void)
1644#else
1645JEMALLOC_EXPORT void
1646_malloc_postfork(void)
1647#endif
1648{
1649	unsigned i;
1650
1651#ifdef JEMALLOC_MUTEX_INIT_CB
1652	if (malloc_initialized == false)
1653		return;
1654#endif
1655	assert(malloc_initialized);
1656
1657	/* Release all mutexes, now that fork() has completed. */
1658	chunk_dss_postfork_parent();
1659	huge_postfork_parent();
1660	base_postfork_parent();
1661	for (i = 0; i < narenas; i++) {
1662		if (arenas[i] != NULL)
1663			arena_postfork_parent(arenas[i]);
1664	}
1665	malloc_mutex_postfork_parent(&arenas_lock);
1666}
1667
1668void
1669jemalloc_postfork_child(void)
1670{
1671	unsigned i;
1672
1673	assert(malloc_initialized);
1674
1675	/* Release all mutexes, now that fork() has completed. */
1676	chunk_dss_postfork_child();
1677	huge_postfork_child();
1678	base_postfork_child();
1679	for (i = 0; i < narenas; i++) {
1680		if (arenas[i] != NULL)
1681			arena_postfork_child(arenas[i]);
1682	}
1683	malloc_mutex_postfork_child(&arenas_lock);
1684}
1685
1686/******************************************************************************/
1687/*
1688 * The following functions are used for TLS allocation/deallocation in static
1689 * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1690 * is that these avoid accessing TLS variables.
1691 */
1692
1693static void *
1694a0alloc(size_t size, bool zero)
1695{
1696
1697	if (malloc_init())
1698		return (NULL);
1699
1700	if (size == 0)
1701		size = 1;
1702
1703	if (size <= arena_maxclass)
1704		return (arena_malloc(arenas[0], size, zero, false));
1705	else
1706		return (huge_malloc(size, zero));
1707}
1708
1709void *
1710a0malloc(size_t size)
1711{
1712
1713	return (a0alloc(size, false));
1714}
1715
1716void *
1717a0calloc(size_t num, size_t size)
1718{
1719
1720	return (a0alloc(num * size, true));
1721}
1722
1723void
1724a0free(void *ptr)
1725{
1726	arena_chunk_t *chunk;
1727
1728	if (ptr == NULL)
1729		return;
1730
1731	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1732	if (chunk != ptr)
1733		arena_dalloc(chunk->arena, chunk, ptr, false);
1734	else
1735		huge_dalloc(ptr, true);
1736}
1737
1738/******************************************************************************/
1739