jemalloc.c revision 80b25932ca52e9506d4e2b8ee0fa58aa5ae3306d
1#define	JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9    THREAD_ALLOCATED_INITIALIZER)
10
11/* Runtime configuration options. */
12const char	*je_malloc_conf JEMALLOC_ATTR(visibility("default"));
13#ifdef JEMALLOC_DEBUG
14bool	opt_abort = true;
15#  ifdef JEMALLOC_FILL
16bool	opt_junk = true;
17#  else
18bool	opt_junk = false;
19#  endif
20#else
21bool	opt_abort = false;
22bool	opt_junk = false;
23#endif
24bool	opt_xmalloc = false;
25bool	opt_zero = false;
26size_t	opt_narenas = 0;
27
28#ifdef DYNAMIC_PAGE_SHIFT
29size_t		pagesize;
30size_t		pagesize_mask;
31size_t		lg_pagesize;
32#endif
33
34unsigned	ncpus;
35
36malloc_mutex_t		arenas_lock;
37arena_t			**arenas;
38unsigned		narenas;
39
40/* Set to true once the allocator has been initialized. */
41static bool		malloc_initialized = false;
42
43#ifdef JEMALLOC_THREADED_INIT
44/* Used to let the initializing thread recursively allocate. */
45static pthread_t	malloc_initializer = (unsigned long)0;
46#  define INITIALIZER		pthread_self()
47#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
48#else
49static bool		malloc_initializer = false;
50#  define INITIALIZER		true
51#  define IS_INITIALIZER	malloc_initializer
52#endif
53
54/* Used to avoid initialization races. */
55static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
56
57/******************************************************************************/
58/* Function prototypes for non-inline static functions. */
59
60static void	stats_print_atexit(void);
61static unsigned	malloc_ncpus(void);
62static bool	malloc_conf_next(char const **opts_p, char const **k_p,
63    size_t *klen_p, char const **v_p, size_t *vlen_p);
64static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
65    const char *v, size_t vlen);
66static void	malloc_conf_init(void);
67static bool	malloc_init_hard(void);
68static int	imemalign(void **memptr, size_t alignment, size_t size,
69    size_t min_alignment);
70
71/******************************************************************************/
72/*
73 * Begin miscellaneous support functions.
74 */
75
76/* Create a new arena and insert it into the arenas array at index ind. */
77arena_t *
78arenas_extend(unsigned ind)
79{
80	arena_t *ret;
81
82	ret = (arena_t *)base_alloc(sizeof(arena_t));
83	if (ret != NULL && arena_new(ret, ind) == false) {
84		arenas[ind] = ret;
85		return (ret);
86	}
87	/* Only reached if there is an OOM error. */
88
89	/*
90	 * OOM here is quite inconvenient to propagate, since dealing with it
91	 * would require a check for failure in the fast path.  Instead, punt
92	 * by using arenas[0].  In practice, this is an extremely unlikely
93	 * failure.
94	 */
95	malloc_write("<jemalloc>: Error initializing arena\n");
96	if (opt_abort)
97		abort();
98
99	return (arenas[0]);
100}
101
102/* Slow path, called only by choose_arena(). */
103arena_t *
104choose_arena_hard(void)
105{
106	arena_t *ret;
107
108	if (narenas > 1) {
109		unsigned i, choose, first_null;
110
111		choose = 0;
112		first_null = narenas;
113		malloc_mutex_lock(&arenas_lock);
114		assert(arenas[0] != NULL);
115		for (i = 1; i < narenas; i++) {
116			if (arenas[i] != NULL) {
117				/*
118				 * Choose the first arena that has the lowest
119				 * number of threads assigned to it.
120				 */
121				if (arenas[i]->nthreads <
122				    arenas[choose]->nthreads)
123					choose = i;
124			} else if (first_null == narenas) {
125				/*
126				 * Record the index of the first uninitialized
127				 * arena, in case all extant arenas are in use.
128				 *
129				 * NB: It is possible for there to be
130				 * discontinuities in terms of initialized
131				 * versus uninitialized arenas, due to the
132				 * "thread.arena" mallctl.
133				 */
134				first_null = i;
135			}
136		}
137
138		if (arenas[choose]->nthreads == 0 || first_null == narenas) {
139			/*
140			 * Use an unloaded arena, or the least loaded arena if
141			 * all arenas are already initialized.
142			 */
143			ret = arenas[choose];
144		} else {
145			/* Initialize a new arena. */
146			ret = arenas_extend(first_null);
147		}
148		ret->nthreads++;
149		malloc_mutex_unlock(&arenas_lock);
150	} else {
151		ret = arenas[0];
152		malloc_mutex_lock(&arenas_lock);
153		ret->nthreads++;
154		malloc_mutex_unlock(&arenas_lock);
155	}
156
157	arenas_tsd_set(&ret);
158
159	return (ret);
160}
161
162static void
163stats_print_atexit(void)
164{
165
166	if (config_tcache && config_stats) {
167		unsigned i;
168
169		/*
170		 * Merge stats from extant threads.  This is racy, since
171		 * individual threads do not lock when recording tcache stats
172		 * events.  As a consequence, the final stats may be slightly
173		 * out of date by the time they are reported, if other threads
174		 * continue to allocate.
175		 */
176		for (i = 0; i < narenas; i++) {
177			arena_t *arena = arenas[i];
178			if (arena != NULL) {
179				tcache_t *tcache;
180
181				/*
182				 * tcache_stats_merge() locks bins, so if any
183				 * code is introduced that acquires both arena
184				 * and bin locks in the opposite order,
185				 * deadlocks may result.
186				 */
187				malloc_mutex_lock(&arena->lock);
188				ql_foreach(tcache, &arena->tcache_ql, link) {
189					tcache_stats_merge(tcache, arena);
190				}
191				malloc_mutex_unlock(&arena->lock);
192			}
193		}
194	}
195	je_malloc_stats_print(NULL, NULL, NULL);
196}
197
198/*
199 * End miscellaneous support functions.
200 */
201/******************************************************************************/
202/*
203 * Begin initialization functions.
204 */
205
206static unsigned
207malloc_ncpus(void)
208{
209	unsigned ret;
210	long result;
211
212	result = sysconf(_SC_NPROCESSORS_ONLN);
213	if (result == -1) {
214		/* Error. */
215		ret = 1;
216	}
217	ret = (unsigned)result;
218
219	return (ret);
220}
221
222void
223arenas_cleanup(void *arg)
224{
225	arena_t *arena = *(arena_t **)arg;
226
227	malloc_mutex_lock(&arenas_lock);
228	arena->nthreads--;
229	malloc_mutex_unlock(&arenas_lock);
230}
231
232static inline bool
233malloc_init(void)
234{
235
236	if (malloc_initialized == false)
237		return (malloc_init_hard());
238
239	return (false);
240}
241
242static bool
243malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
244    char const **v_p, size_t *vlen_p)
245{
246	bool accept;
247	const char *opts = *opts_p;
248
249	*k_p = opts;
250
251	for (accept = false; accept == false;) {
252		switch (*opts) {
253		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
254		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
255		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
256		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
257		case 'Y': case 'Z':
258		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
259		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
260		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
261		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
262		case 'y': case 'z':
263		case '0': case '1': case '2': case '3': case '4': case '5':
264		case '6': case '7': case '8': case '9':
265		case '_':
266			opts++;
267			break;
268		case ':':
269			opts++;
270			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
271			*v_p = opts;
272			accept = true;
273			break;
274		case '\0':
275			if (opts != *opts_p) {
276				malloc_write("<jemalloc>: Conf string ends "
277				    "with key\n");
278			}
279			return (true);
280		default:
281			malloc_write("<jemalloc>: Malformed conf string\n");
282			return (true);
283		}
284	}
285
286	for (accept = false; accept == false;) {
287		switch (*opts) {
288		case ',':
289			opts++;
290			/*
291			 * Look ahead one character here, because the next time
292			 * this function is called, it will assume that end of
293			 * input has been cleanly reached if no input remains,
294			 * but we have optimistically already consumed the
295			 * comma if one exists.
296			 */
297			if (*opts == '\0') {
298				malloc_write("<jemalloc>: Conf string ends "
299				    "with comma\n");
300			}
301			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
302			accept = true;
303			break;
304		case '\0':
305			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
306			accept = true;
307			break;
308		default:
309			opts++;
310			break;
311		}
312	}
313
314	*opts_p = opts;
315	return (false);
316}
317
318static void
319malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
320    size_t vlen)
321{
322
323	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
324	    (int)vlen, v);
325}
326
327static void
328malloc_conf_init(void)
329{
330	unsigned i;
331	char buf[PATH_MAX + 1];
332	const char *opts, *k, *v;
333	size_t klen, vlen;
334
335	for (i = 0; i < 3; i++) {
336		/* Get runtime configuration. */
337		switch (i) {
338		case 0:
339			if (je_malloc_conf != NULL) {
340				/*
341				 * Use options that were compiled into the
342				 * program.
343				 */
344				opts = je_malloc_conf;
345			} else {
346				/* No configuration specified. */
347				buf[0] = '\0';
348				opts = buf;
349			}
350			break;
351		case 1: {
352			int linklen;
353			const char *linkname =
354#ifdef JEMALLOC_PREFIX
355			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
356#else
357			    "/etc/malloc.conf"
358#endif
359			    ;
360
361			if ((linklen = readlink(linkname, buf,
362			    sizeof(buf) - 1)) != -1) {
363				/*
364				 * Use the contents of the "/etc/malloc.conf"
365				 * symbolic link's name.
366				 */
367				buf[linklen] = '\0';
368				opts = buf;
369			} else {
370				/* No configuration specified. */
371				buf[0] = '\0';
372				opts = buf;
373			}
374			break;
375		} case 2: {
376			const char *envname =
377#ifdef JEMALLOC_PREFIX
378			    JEMALLOC_CPREFIX"MALLOC_CONF"
379#else
380			    "MALLOC_CONF"
381#endif
382			    ;
383
384			if ((opts = getenv(envname)) != NULL) {
385				/*
386				 * Do nothing; opts is already initialized to
387				 * the value of the MALLOC_CONF environment
388				 * variable.
389				 */
390			} else {
391				/* No configuration specified. */
392				buf[0] = '\0';
393				opts = buf;
394			}
395			break;
396		} default:
397			/* NOTREACHED */
398			assert(false);
399			buf[0] = '\0';
400			opts = buf;
401		}
402
403		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
404		    &vlen) == false) {
405#define	CONF_HANDLE_BOOL(o, n)						\
406			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
407			    klen) == 0) {				\
408				if (strncmp("true", v, vlen) == 0 &&	\
409				    vlen == sizeof("true")-1)		\
410					o = true;			\
411				else if (strncmp("false", v, vlen) ==	\
412				    0 && vlen == sizeof("false")-1)	\
413					o = false;			\
414				else {					\
415					malloc_conf_error(		\
416					    "Invalid conf value",	\
417					    k, klen, v, vlen);		\
418				}					\
419				continue;				\
420			}
421#define	CONF_HANDLE_SIZE_T(o, n, min, max)				\
422			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
423			    klen) == 0) {				\
424				uintmax_t um;			\
425				char *end;				\
426									\
427				errno = 0;				\
428				um = malloc_strtoumax(v, &end, 0);	\
429				if (errno != 0 || (uintptr_t)end -	\
430				    (uintptr_t)v != vlen) {		\
431					malloc_conf_error(		\
432					    "Invalid conf value",	\
433					    k, klen, v, vlen);		\
434				} else if (um < min || um > max) {	\
435					malloc_conf_error(		\
436					    "Out-of-range conf value",	\
437					    k, klen, v, vlen);		\
438				} else					\
439					o = um;				\
440				continue;				\
441			}
442#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
443			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
444			    klen) == 0) {				\
445				long l;					\
446				char *end;				\
447									\
448				errno = 0;				\
449				l = strtol(v, &end, 0);			\
450				if (errno != 0 || (uintptr_t)end -	\
451				    (uintptr_t)v != vlen) {		\
452					malloc_conf_error(		\
453					    "Invalid conf value",	\
454					    k, klen, v, vlen);		\
455				} else if (l < (ssize_t)min || l >	\
456				    (ssize_t)max) {			\
457					malloc_conf_error(		\
458					    "Out-of-range conf value",	\
459					    k, klen, v, vlen);		\
460				} else					\
461					o = l;				\
462				continue;				\
463			}
464#define	CONF_HANDLE_CHAR_P(o, n, d)					\
465			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
466			    klen) == 0) {				\
467				size_t cpylen = (vlen <=		\
468				    sizeof(o)-1) ? vlen :		\
469				    sizeof(o)-1;			\
470				strncpy(o, v, cpylen);			\
471				o[cpylen] = '\0';			\
472				continue;				\
473			}
474
475			CONF_HANDLE_BOOL(opt_abort, abort)
476			/*
477			 * Chunks always require at least one * header page,
478			 * plus one data page.
479			 */
480			CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
481			    (sizeof(size_t) << 3) - 1)
482			CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
483			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
484			    -1, (sizeof(size_t) << 3) - 1)
485			CONF_HANDLE_BOOL(opt_stats_print, stats_print)
486			if (config_fill) {
487				CONF_HANDLE_BOOL(opt_junk, junk)
488				CONF_HANDLE_BOOL(opt_zero, zero)
489			}
490			if (config_xmalloc) {
491				CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
492			}
493			if (config_tcache) {
494				CONF_HANDLE_BOOL(opt_tcache, tcache)
495				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
496				    lg_tcache_max, -1,
497				    (sizeof(size_t) << 3) - 1)
498			}
499			if (config_prof) {
500				CONF_HANDLE_BOOL(opt_prof, prof)
501				CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
502				    "jeprof")
503				CONF_HANDLE_BOOL(opt_prof_active, prof_active)
504				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
505				    lg_prof_sample, 0,
506				    (sizeof(uint64_t) << 3) - 1)
507				CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
508				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
509				    lg_prof_interval, -1,
510				    (sizeof(uint64_t) << 3) - 1)
511				CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
512				CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
513			}
514			malloc_conf_error("Invalid conf pair", k, klen, v,
515			    vlen);
516#undef CONF_HANDLE_BOOL
517#undef CONF_HANDLE_SIZE_T
518#undef CONF_HANDLE_SSIZE_T
519#undef CONF_HANDLE_CHAR_P
520		}
521	}
522}
523
524static bool
525malloc_init_hard(void)
526{
527	arena_t *init_arenas[1];
528
529	malloc_mutex_lock(&init_lock);
530	if (malloc_initialized || IS_INITIALIZER) {
531		/*
532		 * Another thread initialized the allocator before this one
533		 * acquired init_lock, or this thread is the initializing
534		 * thread, and it is recursively allocating.
535		 */
536		malloc_mutex_unlock(&init_lock);
537		return (false);
538	}
539#ifdef JEMALLOC_THREADED_INIT
540	if (IS_INITIALIZER == false) {
541		/* Busy-wait until the initializing thread completes. */
542		do {
543			malloc_mutex_unlock(&init_lock);
544			CPU_SPINWAIT;
545			malloc_mutex_lock(&init_lock);
546		} while (malloc_initialized == false);
547		malloc_mutex_unlock(&init_lock);
548		return (false);
549	}
550#endif
551	malloc_initializer = INITIALIZER;
552
553#ifdef DYNAMIC_PAGE_SHIFT
554	/* Get page size. */
555	{
556		long result;
557
558		result = sysconf(_SC_PAGESIZE);
559		assert(result != -1);
560		pagesize = (size_t)result;
561
562		/*
563		 * We assume that pagesize is a power of 2 when calculating
564		 * pagesize_mask and lg_pagesize.
565		 */
566		assert(((result - 1) & result) == 0);
567		pagesize_mask = result - 1;
568		lg_pagesize = ffs((int)result) - 1;
569	}
570#endif
571
572	malloc_tsd_boot();
573	if (config_prof)
574		prof_boot0();
575
576	malloc_conf_init();
577
578#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
579	/* Register fork handlers. */
580	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
581	    jemalloc_postfork_child) != 0) {
582		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
583		if (opt_abort)
584			abort();
585	}
586#endif
587
588	if (opt_stats_print) {
589		/* Print statistics at exit. */
590		if (atexit(stats_print_atexit) != 0) {
591			malloc_write("<jemalloc>: Error in atexit()\n");
592			if (opt_abort)
593				abort();
594		}
595	}
596
597	if (chunk_boot0()) {
598		malloc_mutex_unlock(&init_lock);
599		return (true);
600	}
601
602	if (base_boot()) {
603		malloc_mutex_unlock(&init_lock);
604		return (true);
605	}
606
607	if (ctl_boot()) {
608		malloc_mutex_unlock(&init_lock);
609		return (true);
610	}
611
612	if (config_prof)
613		prof_boot1();
614
615	arena_boot();
616
617	if (config_tcache && tcache_boot0()) {
618		malloc_mutex_unlock(&init_lock);
619		return (true);
620	}
621
622	if (huge_boot()) {
623		malloc_mutex_unlock(&init_lock);
624		return (true);
625	}
626
627	if (malloc_mutex_init(&arenas_lock))
628		return (true);
629
630	/*
631	 * Create enough scaffolding to allow recursive allocation in
632	 * malloc_ncpus().
633	 */
634	narenas = 1;
635	arenas = init_arenas;
636	memset(arenas, 0, sizeof(arena_t *) * narenas);
637
638	/*
639	 * Initialize one arena here.  The rest are lazily created in
640	 * choose_arena_hard().
641	 */
642	arenas_extend(0);
643	if (arenas[0] == NULL) {
644		malloc_mutex_unlock(&init_lock);
645		return (true);
646	}
647
648	/* Initialize allocation counters before any allocations can occur. */
649	if (config_stats && thread_allocated_tsd_boot()) {
650		malloc_mutex_unlock(&init_lock);
651		return (true);
652	}
653
654	if (arenas_tsd_boot()) {
655		malloc_mutex_unlock(&init_lock);
656		return (true);
657	}
658
659	if (config_tcache && tcache_boot1()) {
660		malloc_mutex_unlock(&init_lock);
661		return (true);
662	}
663
664	if (config_prof && prof_boot2()) {
665		malloc_mutex_unlock(&init_lock);
666		return (true);
667	}
668
669	/* Get number of CPUs. */
670	malloc_mutex_unlock(&init_lock);
671	ncpus = malloc_ncpus();
672	malloc_mutex_lock(&init_lock);
673
674	if (chunk_boot1()) {
675		malloc_mutex_unlock(&init_lock);
676		return (true);
677	}
678
679	if (opt_narenas == 0) {
680		/*
681		 * For SMP systems, create more than one arena per CPU by
682		 * default.
683		 */
684		if (ncpus > 1)
685			opt_narenas = ncpus << 2;
686		else
687			opt_narenas = 1;
688	}
689	narenas = opt_narenas;
690	/*
691	 * Make sure that the arenas array can be allocated.  In practice, this
692	 * limit is enough to allow the allocator to function, but the ctl
693	 * machinery will fail to allocate memory at far lower limits.
694	 */
695	if (narenas > chunksize / sizeof(arena_t *)) {
696		narenas = chunksize / sizeof(arena_t *);
697		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
698		    narenas);
699	}
700
701	/* Allocate and initialize arenas. */
702	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
703	if (arenas == NULL) {
704		malloc_mutex_unlock(&init_lock);
705		return (true);
706	}
707	/*
708	 * Zero the array.  In practice, this should always be pre-zeroed,
709	 * since it was just mmap()ed, but let's be sure.
710	 */
711	memset(arenas, 0, sizeof(arena_t *) * narenas);
712	/* Copy the pointer to the one arena that was already initialized. */
713	arenas[0] = init_arenas[0];
714
715	malloc_initialized = true;
716	malloc_mutex_unlock(&init_lock);
717	return (false);
718}
719
720/*
721 * End initialization functions.
722 */
723/******************************************************************************/
724/*
725 * Begin malloc(3)-compatible functions.
726 */
727
728JEMALLOC_ATTR(malloc)
729JEMALLOC_ATTR(visibility("default"))
730void *
731je_malloc(size_t size)
732{
733	void *ret;
734	size_t usize;
735	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
736
737	if (malloc_init()) {
738		ret = NULL;
739		goto OOM;
740	}
741
742	if (size == 0)
743		size = 1;
744
745	if (config_prof && opt_prof) {
746		usize = s2u(size);
747		PROF_ALLOC_PREP(1, usize, cnt);
748		if (cnt == NULL) {
749			ret = NULL;
750			goto OOM;
751		}
752		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
753		    SMALL_MAXCLASS) {
754			ret = imalloc(SMALL_MAXCLASS+1);
755			if (ret != NULL)
756				arena_prof_promoted(ret, usize);
757		} else
758			ret = imalloc(size);
759	} else {
760		if (config_stats)
761			usize = s2u(size);
762		ret = imalloc(size);
763	}
764
765OOM:
766	if (ret == NULL) {
767		if (config_xmalloc && opt_xmalloc) {
768			malloc_write("<jemalloc>: Error in malloc(): "
769			    "out of memory\n");
770			abort();
771		}
772		errno = ENOMEM;
773	}
774	if (config_prof && opt_prof && ret != NULL)
775		prof_malloc(ret, usize, cnt);
776	if (config_stats && ret != NULL) {
777		assert(usize == isalloc(ret));
778		thread_allocated_tsd_get()->allocated += usize;
779	}
780	return (ret);
781}
782
783JEMALLOC_ATTR(nonnull(1))
784#ifdef JEMALLOC_PROF
785/*
786 * Avoid any uncertainty as to how many backtrace frames to ignore in
787 * PROF_ALLOC_PREP().
788 */
789JEMALLOC_ATTR(noinline)
790#endif
791static int
792imemalign(void **memptr, size_t alignment, size_t size,
793    size_t min_alignment)
794{
795	int ret;
796	size_t usize;
797	void *result;
798	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
799
800	assert(min_alignment != 0);
801
802	if (malloc_init())
803		result = NULL;
804	else {
805		if (size == 0)
806			size = 1;
807
808		/* Make sure that alignment is a large enough power of 2. */
809		if (((alignment - 1) & alignment) != 0
810		    || (alignment < min_alignment)) {
811			if (config_xmalloc && opt_xmalloc) {
812				malloc_write("<jemalloc>: Error allocating "
813				    "aligned memory: invalid alignment\n");
814				abort();
815			}
816			result = NULL;
817			ret = EINVAL;
818			goto RETURN;
819		}
820
821		usize = sa2u(size, alignment, NULL);
822		if (usize == 0) {
823			result = NULL;
824			ret = ENOMEM;
825			goto RETURN;
826		}
827
828		if (config_prof && opt_prof) {
829			PROF_ALLOC_PREP(2, usize, cnt);
830			if (cnt == NULL) {
831				result = NULL;
832				ret = EINVAL;
833			} else {
834				if (prof_promote && (uintptr_t)cnt !=
835				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
836					assert(sa2u(SMALL_MAXCLASS+1,
837					    alignment, NULL) != 0);
838					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
839					    alignment, NULL), alignment, false);
840					if (result != NULL) {
841						arena_prof_promoted(result,
842						    usize);
843					}
844				} else {
845					result = ipalloc(usize, alignment,
846					    false);
847				}
848			}
849		} else
850			result = ipalloc(usize, alignment, false);
851	}
852
853	if (result == NULL) {
854		if (config_xmalloc && opt_xmalloc) {
855			malloc_write("<jemalloc>: Error allocating aligned "
856			    "memory: out of memory\n");
857			abort();
858		}
859		ret = ENOMEM;
860		goto RETURN;
861	}
862
863	*memptr = result;
864	ret = 0;
865
866RETURN:
867	if (config_stats && result != NULL) {
868		assert(usize == isalloc(result));
869		thread_allocated_tsd_get()->allocated += usize;
870	}
871	if (config_prof && opt_prof && result != NULL)
872		prof_malloc(result, usize, cnt);
873	return (ret);
874}
875
876JEMALLOC_ATTR(nonnull(1))
877JEMALLOC_ATTR(visibility("default"))
878int
879je_posix_memalign(void **memptr, size_t alignment, size_t size)
880{
881
882	return imemalign(memptr, alignment, size, sizeof(void *));
883}
884
885JEMALLOC_ATTR(malloc)
886JEMALLOC_ATTR(visibility("default"))
887void *
888je_aligned_alloc(size_t alignment, size_t size)
889{
890	void *ret;
891	int err;
892
893	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
894		ret = NULL;
895		errno = err;
896	}
897	return (ret);
898}
899
900JEMALLOC_ATTR(malloc)
901JEMALLOC_ATTR(visibility("default"))
902void *
903je_calloc(size_t num, size_t size)
904{
905	void *ret;
906	size_t num_size;
907	size_t usize;
908	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
909
910	if (malloc_init()) {
911		num_size = 0;
912		ret = NULL;
913		goto RETURN;
914	}
915
916	num_size = num * size;
917	if (num_size == 0) {
918		if (num == 0 || size == 0)
919			num_size = 1;
920		else {
921			ret = NULL;
922			goto RETURN;
923		}
924	/*
925	 * Try to avoid division here.  We know that it isn't possible to
926	 * overflow during multiplication if neither operand uses any of the
927	 * most significant half of the bits in a size_t.
928	 */
929	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
930	    && (num_size / size != num)) {
931		/* size_t overflow. */
932		ret = NULL;
933		goto RETURN;
934	}
935
936	if (config_prof && opt_prof) {
937		usize = s2u(num_size);
938		PROF_ALLOC_PREP(1, usize, cnt);
939		if (cnt == NULL) {
940			ret = NULL;
941			goto RETURN;
942		}
943		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
944		    <= SMALL_MAXCLASS) {
945			ret = icalloc(SMALL_MAXCLASS+1);
946			if (ret != NULL)
947				arena_prof_promoted(ret, usize);
948		} else
949			ret = icalloc(num_size);
950	} else {
951		if (config_stats)
952			usize = s2u(num_size);
953		ret = icalloc(num_size);
954	}
955
956RETURN:
957	if (ret == NULL) {
958		if (config_xmalloc && opt_xmalloc) {
959			malloc_write("<jemalloc>: Error in calloc(): out of "
960			    "memory\n");
961			abort();
962		}
963		errno = ENOMEM;
964	}
965
966	if (config_prof && opt_prof && ret != NULL)
967		prof_malloc(ret, usize, cnt);
968	if (config_stats && ret != NULL) {
969		assert(usize == isalloc(ret));
970		thread_allocated_tsd_get()->allocated += usize;
971	}
972	return (ret);
973}
974
975JEMALLOC_ATTR(visibility("default"))
976void *
977je_realloc(void *ptr, size_t size)
978{
979	void *ret;
980	size_t usize;
981	size_t old_size = 0;
982	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
983	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
984
985	if (size == 0) {
986		if (ptr != NULL) {
987			/* realloc(ptr, 0) is equivalent to free(p). */
988			if (config_prof || config_stats)
989				old_size = isalloc(ptr);
990			if (config_prof && opt_prof) {
991				old_ctx = prof_ctx_get(ptr);
992				cnt = NULL;
993			}
994			idalloc(ptr);
995			ret = NULL;
996			goto RETURN;
997		} else
998			size = 1;
999	}
1000
1001	if (ptr != NULL) {
1002		assert(malloc_initialized || IS_INITIALIZER);
1003
1004		if (config_prof || config_stats)
1005			old_size = isalloc(ptr);
1006		if (config_prof && opt_prof) {
1007			usize = s2u(size);
1008			old_ctx = prof_ctx_get(ptr);
1009			PROF_ALLOC_PREP(1, usize, cnt);
1010			if (cnt == NULL) {
1011				old_ctx = NULL;
1012				ret = NULL;
1013				goto OOM;
1014			}
1015			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1016			    usize <= SMALL_MAXCLASS) {
1017				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1018				    false, false);
1019				if (ret != NULL)
1020					arena_prof_promoted(ret, usize);
1021				else
1022					old_ctx = NULL;
1023			} else {
1024				ret = iralloc(ptr, size, 0, 0, false, false);
1025				if (ret == NULL)
1026					old_ctx = NULL;
1027			}
1028		} else {
1029			if (config_stats)
1030				usize = s2u(size);
1031			ret = iralloc(ptr, size, 0, 0, false, false);
1032		}
1033
1034OOM:
1035		if (ret == NULL) {
1036			if (config_xmalloc && opt_xmalloc) {
1037				malloc_write("<jemalloc>: Error in realloc(): "
1038				    "out of memory\n");
1039				abort();
1040			}
1041			errno = ENOMEM;
1042		}
1043	} else {
1044		/* realloc(NULL, size) is equivalent to malloc(size). */
1045		if (config_prof && opt_prof)
1046			old_ctx = NULL;
1047		if (malloc_init()) {
1048			if (config_prof && opt_prof)
1049				cnt = NULL;
1050			ret = NULL;
1051		} else {
1052			if (config_prof && opt_prof) {
1053				usize = s2u(size);
1054				PROF_ALLOC_PREP(1, usize, cnt);
1055				if (cnt == NULL)
1056					ret = NULL;
1057				else {
1058					if (prof_promote && (uintptr_t)cnt !=
1059					    (uintptr_t)1U && usize <=
1060					    SMALL_MAXCLASS) {
1061						ret = imalloc(SMALL_MAXCLASS+1);
1062						if (ret != NULL) {
1063							arena_prof_promoted(ret,
1064							    usize);
1065						}
1066					} else
1067						ret = imalloc(size);
1068				}
1069			} else {
1070				if (config_stats)
1071					usize = s2u(size);
1072				ret = imalloc(size);
1073			}
1074		}
1075
1076		if (ret == NULL) {
1077			if (config_xmalloc && opt_xmalloc) {
1078				malloc_write("<jemalloc>: Error in realloc(): "
1079				    "out of memory\n");
1080				abort();
1081			}
1082			errno = ENOMEM;
1083		}
1084	}
1085
1086RETURN:
1087	if (config_prof && opt_prof)
1088		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1089	if (config_stats && ret != NULL) {
1090		thread_allocated_t *ta;
1091		assert(usize == isalloc(ret));
1092		ta = thread_allocated_tsd_get();
1093		ta->allocated += usize;
1094		ta->deallocated += old_size;
1095	}
1096	return (ret);
1097}
1098
1099JEMALLOC_ATTR(visibility("default"))
1100void
1101je_free(void *ptr)
1102{
1103
1104	if (ptr != NULL) {
1105		size_t usize;
1106
1107		assert(malloc_initialized || IS_INITIALIZER);
1108
1109		if (config_prof && opt_prof) {
1110			usize = isalloc(ptr);
1111			prof_free(ptr, usize);
1112		} else if (config_stats) {
1113			usize = isalloc(ptr);
1114		}
1115		if (config_stats)
1116			thread_allocated_tsd_get()->deallocated += usize;
1117		idalloc(ptr);
1118	}
1119}
1120
1121/*
1122 * End malloc(3)-compatible functions.
1123 */
1124/******************************************************************************/
1125/*
1126 * Begin non-standard override functions.
1127 */
1128
1129#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1130JEMALLOC_ATTR(malloc)
1131JEMALLOC_ATTR(visibility("default"))
1132void *
1133je_memalign(size_t alignment, size_t size)
1134{
1135	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1136	imemalign(&ret, alignment, size, 1);
1137	return (ret);
1138}
1139#endif
1140
1141#ifdef JEMALLOC_OVERRIDE_VALLOC
1142JEMALLOC_ATTR(malloc)
1143JEMALLOC_ATTR(visibility("default"))
1144void *
1145je_valloc(size_t size)
1146{
1147	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1148	imemalign(&ret, PAGE_SIZE, size, 1);
1149	return (ret);
1150}
1151#endif
1152
1153/*
1154 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1155 * #define je_malloc malloc
1156 */
1157#define	malloc_is_malloc 1
1158#define	is_malloc_(a) malloc_is_ ## a
1159#define	is_malloc(a) is_malloc_(a)
1160
1161#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1162/*
1163 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1164 * to inconsistently reference libc's malloc(3)-compatible functions
1165 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1166 *
1167 * These definitions interpose hooks in glibc.  The functions are actually
1168 * passed an extra argument for the caller return address, which will be
1169 * ignored.
1170 */
1171JEMALLOC_ATTR(visibility("default"))
1172void (* const __free_hook)(void *ptr) = je_free;
1173
1174JEMALLOC_ATTR(visibility("default"))
1175void *(* const __malloc_hook)(size_t size) = je_malloc;
1176
1177JEMALLOC_ATTR(visibility("default"))
1178void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
1179
1180JEMALLOC_ATTR(visibility("default"))
1181void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
1182#endif
1183
1184/*
1185 * End non-standard override functions.
1186 */
1187/******************************************************************************/
1188/*
1189 * Begin non-standard functions.
1190 */
1191
1192JEMALLOC_ATTR(visibility("default"))
1193size_t
1194je_malloc_usable_size(const void *ptr)
1195{
1196	size_t ret;
1197
1198	assert(malloc_initialized || IS_INITIALIZER);
1199
1200	if (config_ivsalloc)
1201		ret = ivsalloc(ptr);
1202	else
1203		ret = (ptr != NULL) ? isalloc(ptr) : 0;
1204
1205	return (ret);
1206}
1207
1208JEMALLOC_ATTR(visibility("default"))
1209void
1210je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1211    const char *opts)
1212{
1213
1214	stats_print(write_cb, cbopaque, opts);
1215}
1216
1217JEMALLOC_ATTR(visibility("default"))
1218int
1219je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1220    size_t newlen)
1221{
1222
1223	if (malloc_init())
1224		return (EAGAIN);
1225
1226	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1227}
1228
1229JEMALLOC_ATTR(visibility("default"))
1230int
1231je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1232{
1233
1234	if (malloc_init())
1235		return (EAGAIN);
1236
1237	return (ctl_nametomib(name, mibp, miblenp));
1238}
1239
1240JEMALLOC_ATTR(visibility("default"))
1241int
1242je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1243  void *newp, size_t newlen)
1244{
1245
1246	if (malloc_init())
1247		return (EAGAIN);
1248
1249	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1250}
1251
1252/*
1253 * End non-standard functions.
1254 */
1255/******************************************************************************/
1256/*
1257 * Begin experimental functions.
1258 */
1259#ifdef JEMALLOC_EXPERIMENTAL
1260
1261JEMALLOC_INLINE void *
1262iallocm(size_t usize, size_t alignment, bool zero)
1263{
1264
1265	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1266	    NULL)));
1267
1268	if (alignment != 0)
1269		return (ipalloc(usize, alignment, zero));
1270	else if (zero)
1271		return (icalloc(usize));
1272	else
1273		return (imalloc(usize));
1274}
1275
1276JEMALLOC_ATTR(nonnull(1))
1277JEMALLOC_ATTR(visibility("default"))
1278int
1279je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1280{
1281	void *p;
1282	size_t usize;
1283	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1284	    & (SIZE_T_MAX-1));
1285	bool zero = flags & ALLOCM_ZERO;
1286	prof_thr_cnt_t *cnt;
1287
1288	assert(ptr != NULL);
1289	assert(size != 0);
1290
1291	if (malloc_init())
1292		goto OOM;
1293
1294	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1295	if (usize == 0)
1296		goto OOM;
1297
1298	if (config_prof && opt_prof) {
1299		PROF_ALLOC_PREP(1, usize, cnt);
1300		if (cnt == NULL)
1301			goto OOM;
1302		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1303		    SMALL_MAXCLASS) {
1304			size_t usize_promoted = (alignment == 0) ?
1305			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1306			    alignment, NULL);
1307			assert(usize_promoted != 0);
1308			p = iallocm(usize_promoted, alignment, zero);
1309			if (p == NULL)
1310				goto OOM;
1311			arena_prof_promoted(p, usize);
1312		} else {
1313			p = iallocm(usize, alignment, zero);
1314			if (p == NULL)
1315				goto OOM;
1316		}
1317		prof_malloc(p, usize, cnt);
1318	} else {
1319		p = iallocm(usize, alignment, zero);
1320		if (p == NULL)
1321			goto OOM;
1322	}
1323	if (rsize != NULL)
1324		*rsize = usize;
1325
1326	*ptr = p;
1327	if (config_stats) {
1328		assert(usize == isalloc(p));
1329		thread_allocated_tsd_get()->allocated += usize;
1330	}
1331	return (ALLOCM_SUCCESS);
1332OOM:
1333	if (config_xmalloc && opt_xmalloc) {
1334		malloc_write("<jemalloc>: Error in allocm(): "
1335		    "out of memory\n");
1336		abort();
1337	}
1338	*ptr = NULL;
1339	return (ALLOCM_ERR_OOM);
1340}
1341
1342JEMALLOC_ATTR(nonnull(1))
1343JEMALLOC_ATTR(visibility("default"))
1344int
1345je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1346{
1347	void *p, *q;
1348	size_t usize;
1349	size_t old_size;
1350	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1351	    & (SIZE_T_MAX-1));
1352	bool zero = flags & ALLOCM_ZERO;
1353	bool no_move = flags & ALLOCM_NO_MOVE;
1354	prof_thr_cnt_t *cnt;
1355
1356	assert(ptr != NULL);
1357	assert(*ptr != NULL);
1358	assert(size != 0);
1359	assert(SIZE_T_MAX - size >= extra);
1360	assert(malloc_initialized || IS_INITIALIZER);
1361
1362	p = *ptr;
1363	if (config_prof && opt_prof) {
1364		/*
1365		 * usize isn't knowable before iralloc() returns when extra is
1366		 * non-zero.  Therefore, compute its maximum possible value and
1367		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1368		 * backtrace.  prof_realloc() will use the actual usize to
1369		 * decide whether to sample.
1370		 */
1371		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1372		    sa2u(size+extra, alignment, NULL);
1373		prof_ctx_t *old_ctx = prof_ctx_get(p);
1374		old_size = isalloc(p);
1375		PROF_ALLOC_PREP(1, max_usize, cnt);
1376		if (cnt == NULL)
1377			goto OOM;
1378		/*
1379		 * Use minimum usize to determine whether promotion may happen.
1380		 */
1381		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1382		    && ((alignment == 0) ? s2u(size) : sa2u(size,
1383		    alignment, NULL)) <= SMALL_MAXCLASS) {
1384			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1385			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1386			    alignment, zero, no_move);
1387			if (q == NULL)
1388				goto ERR;
1389			if (max_usize < PAGE_SIZE) {
1390				usize = max_usize;
1391				arena_prof_promoted(q, usize);
1392			} else
1393				usize = isalloc(q);
1394		} else {
1395			q = iralloc(p, size, extra, alignment, zero, no_move);
1396			if (q == NULL)
1397				goto ERR;
1398			usize = isalloc(q);
1399		}
1400		prof_realloc(q, usize, cnt, old_size, old_ctx);
1401		if (rsize != NULL)
1402			*rsize = usize;
1403	} else {
1404		if (config_stats)
1405			old_size = isalloc(p);
1406		q = iralloc(p, size, extra, alignment, zero, no_move);
1407		if (q == NULL)
1408			goto ERR;
1409		if (config_stats)
1410			usize = isalloc(q);
1411		if (rsize != NULL) {
1412			if (config_stats == false)
1413				usize = isalloc(q);
1414			*rsize = usize;
1415		}
1416	}
1417
1418	*ptr = q;
1419	if (config_stats) {
1420		thread_allocated_t *ta;
1421		ta = thread_allocated_tsd_get();
1422		ta->allocated += usize;
1423		ta->deallocated += old_size;
1424	}
1425	return (ALLOCM_SUCCESS);
1426ERR:
1427	if (no_move)
1428		return (ALLOCM_ERR_NOT_MOVED);
1429OOM:
1430	if (config_xmalloc && opt_xmalloc) {
1431		malloc_write("<jemalloc>: Error in rallocm(): "
1432		    "out of memory\n");
1433		abort();
1434	}
1435	return (ALLOCM_ERR_OOM);
1436}
1437
1438JEMALLOC_ATTR(nonnull(1))
1439JEMALLOC_ATTR(visibility("default"))
1440int
1441je_sallocm(const void *ptr, size_t *rsize, int flags)
1442{
1443	size_t sz;
1444
1445	assert(malloc_initialized || IS_INITIALIZER);
1446
1447	if (config_ivsalloc)
1448		sz = ivsalloc(ptr);
1449	else {
1450		assert(ptr != NULL);
1451		sz = isalloc(ptr);
1452	}
1453	assert(rsize != NULL);
1454	*rsize = sz;
1455
1456	return (ALLOCM_SUCCESS);
1457}
1458
1459JEMALLOC_ATTR(nonnull(1))
1460JEMALLOC_ATTR(visibility("default"))
1461int
1462je_dallocm(void *ptr, int flags)
1463{
1464	size_t usize;
1465
1466	assert(ptr != NULL);
1467	assert(malloc_initialized || IS_INITIALIZER);
1468
1469	if (config_stats)
1470		usize = isalloc(ptr);
1471	if (config_prof && opt_prof) {
1472		if (config_stats == false)
1473			usize = isalloc(ptr);
1474		prof_free(ptr, usize);
1475	}
1476	if (config_stats)
1477		thread_allocated_tsd_get()->deallocated += usize;
1478	idalloc(ptr);
1479
1480	return (ALLOCM_SUCCESS);
1481}
1482
1483JEMALLOC_ATTR(visibility("default"))
1484int
1485je_nallocm(size_t *rsize, size_t size, int flags)
1486{
1487	size_t usize;
1488	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1489	    & (SIZE_T_MAX-1));
1490
1491	assert(size != 0);
1492
1493	if (malloc_init())
1494		return (ALLOCM_ERR_OOM);
1495
1496	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1497	if (usize == 0)
1498		return (ALLOCM_ERR_OOM);
1499
1500	if (rsize != NULL)
1501		*rsize = usize;
1502	return (ALLOCM_SUCCESS);
1503}
1504
1505#endif
1506/*
1507 * End experimental functions.
1508 */
1509/******************************************************************************/
1510
1511/*
1512 * The following functions are used by threading libraries for protection of
1513 * malloc during fork().
1514 */
1515
1516#ifndef JEMALLOC_MUTEX_INIT_CB
1517void
1518jemalloc_prefork(void)
1519#else
1520void
1521_malloc_prefork(void)
1522#endif
1523{
1524	unsigned i;
1525
1526	/* Acquire all mutexes in a safe order. */
1527	malloc_mutex_prefork(&arenas_lock);
1528	for (i = 0; i < narenas; i++) {
1529		if (arenas[i] != NULL)
1530			arena_prefork(arenas[i]);
1531	}
1532	base_prefork();
1533	huge_prefork();
1534	chunk_dss_prefork();
1535}
1536
1537#ifndef JEMALLOC_MUTEX_INIT_CB
1538void
1539jemalloc_postfork_parent(void)
1540#else
1541void
1542_malloc_postfork(void)
1543#endif
1544{
1545	unsigned i;
1546
1547	/* Release all mutexes, now that fork() has completed. */
1548	chunk_dss_postfork_parent();
1549	huge_postfork_parent();
1550	base_postfork_parent();
1551	for (i = 0; i < narenas; i++) {
1552		if (arenas[i] != NULL)
1553			arena_postfork_parent(arenas[i]);
1554	}
1555	malloc_mutex_postfork_parent(&arenas_lock);
1556}
1557
1558void
1559jemalloc_postfork_child(void)
1560{
1561	unsigned i;
1562
1563	/* Release all mutexes, now that fork() has completed. */
1564	chunk_dss_postfork_child();
1565	huge_postfork_child();
1566	base_postfork_child();
1567	for (i = 0; i < narenas; i++) {
1568		if (arenas[i] != NULL)
1569			arena_postfork_child(arenas[i]);
1570	}
1571	malloc_mutex_postfork_child(&arenas_lock);
1572}
1573
1574/******************************************************************************/
1575