jemalloc.c revision 183ba50c1940a95080f6cf890ae4ae40200301e7
1#define	JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_mutex_t		arenas_lock;
8arena_t			**arenas;
9unsigned		narenas;
10
11pthread_key_t		arenas_tsd;
12#ifndef NO_TLS
13__thread arena_t	*arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
14#endif
15
16#ifdef JEMALLOC_STATS
17#  ifndef NO_TLS
18__thread thread_allocated_t	thread_allocated_tls;
19#  else
20pthread_key_t		thread_allocated_tsd;
21#  endif
22#endif
23
24/* Set to true once the allocator has been initialized. */
25static bool		malloc_initialized = false;
26
27/* Used to let the initializing thread recursively allocate. */
28static pthread_t	malloc_initializer = (unsigned long)0;
29
30/* Used to avoid initialization races. */
31static malloc_mutex_t	init_lock =
32#ifdef JEMALLOC_OSSPIN
33    0
34#else
35    MALLOC_MUTEX_INITIALIZER
36#endif
37    ;
38
39#ifdef DYNAMIC_PAGE_SHIFT
40size_t		pagesize;
41size_t		pagesize_mask;
42size_t		lg_pagesize;
43#endif
44
45unsigned	ncpus;
46
47/* Runtime configuration options. */
48const char	*JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
49#ifdef JEMALLOC_DEBUG
50bool	opt_abort = true;
51#  ifdef JEMALLOC_FILL
52bool	opt_junk = true;
53#  endif
54#else
55bool	opt_abort = false;
56#  ifdef JEMALLOC_FILL
57bool	opt_junk = false;
58#  endif
59#endif
60#ifdef JEMALLOC_SYSV
61bool	opt_sysv = false;
62#endif
63#ifdef JEMALLOC_XMALLOC
64bool	opt_xmalloc = false;
65#endif
66#ifdef JEMALLOC_FILL
67bool	opt_zero = false;
68#endif
69size_t	opt_narenas = 0;
70
71/******************************************************************************/
72/* Function prototypes for non-inline static functions. */
73
74static void	wrtmessage(void *cbopaque, const char *s);
75static void	stats_print_atexit(void);
76static unsigned	malloc_ncpus(void);
77static void	arenas_cleanup(void *arg);
78#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
79static void	thread_allocated_cleanup(void *arg);
80#endif
81static bool	malloc_conf_next(char const **opts_p, char const **k_p,
82    size_t *klen_p, char const **v_p, size_t *vlen_p);
83static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
84    const char *v, size_t vlen);
85static void	malloc_conf_init(void);
86static bool	malloc_init_hard(void);
87
88/******************************************************************************/
89/* malloc_message() setup. */
90
91#ifdef JEMALLOC_HAVE_ATTR
92JEMALLOC_ATTR(visibility("hidden"))
93#else
94static
95#endif
96void
97wrtmessage(void *cbopaque, const char *s)
98{
99#ifdef JEMALLOC_CC_SILENCE
100	int result =
101#endif
102	    write(STDERR_FILENO, s, strlen(s));
103#ifdef JEMALLOC_CC_SILENCE
104	if (result < 0)
105		result = errno;
106#endif
107}
108
109void	(*JEMALLOC_P(malloc_message))(void *, const char *s)
110    JEMALLOC_ATTR(visibility("default")) = wrtmessage;
111
112/******************************************************************************/
113/*
114 * Begin miscellaneous support functions.
115 */
116
117/* Create a new arena and insert it into the arenas array at index ind. */
118arena_t *
119arenas_extend(unsigned ind)
120{
121	arena_t *ret;
122
123	/* Allocate enough space for trailing bins. */
124	ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
125	    + (sizeof(arena_bin_t) * nbins));
126	if (ret != NULL && arena_new(ret, ind) == false) {
127		arenas[ind] = ret;
128		return (ret);
129	}
130	/* Only reached if there is an OOM error. */
131
132	/*
133	 * OOM here is quite inconvenient to propagate, since dealing with it
134	 * would require a check for failure in the fast path.  Instead, punt
135	 * by using arenas[0].  In practice, this is an extremely unlikely
136	 * failure.
137	 */
138	malloc_write("<jemalloc>: Error initializing arena\n");
139	if (opt_abort)
140		abort();
141
142	return (arenas[0]);
143}
144
145/*
146 * Choose an arena based on a per-thread value (slow-path code only, called
147 * only by choose_arena()).
148 */
149arena_t *
150choose_arena_hard(void)
151{
152	arena_t *ret;
153
154	if (narenas > 1) {
155		unsigned i, choose, first_null;
156
157		choose = 0;
158		first_null = narenas;
159		malloc_mutex_lock(&arenas_lock);
160		assert(arenas[0] != NULL);
161		for (i = 1; i < narenas; i++) {
162			if (arenas[i] != NULL) {
163				/*
164				 * Choose the first arena that has the lowest
165				 * number of threads assigned to it.
166				 */
167				if (arenas[i]->nthreads <
168				    arenas[choose]->nthreads)
169					choose = i;
170			} else if (first_null == narenas) {
171				/*
172				 * Record the index of the first uninitialized
173				 * arena, in case all extant arenas are in use.
174				 *
175				 * NB: It is possible for there to be
176				 * discontinuities in terms of initialized
177				 * versus uninitialized arenas, due to the
178				 * "thread.arena" mallctl.
179				 */
180				first_null = i;
181			}
182		}
183
184		if (arenas[choose] == 0 || first_null == narenas) {
185			/*
186			 * Use an unloaded arena, or the least loaded arena if
187			 * all arenas are already initialized.
188			 */
189			ret = arenas[choose];
190		} else {
191			/* Initialize a new arena. */
192			ret = arenas_extend(first_null);
193		}
194		ret->nthreads++;
195		malloc_mutex_unlock(&arenas_lock);
196	} else {
197		ret = arenas[0];
198		malloc_mutex_lock(&arenas_lock);
199		ret->nthreads++;
200		malloc_mutex_unlock(&arenas_lock);
201	}
202
203	ARENA_SET(ret);
204
205	return (ret);
206}
207
208/*
209 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
210 * provide a wrapper.
211 */
212int
213buferror(int errnum, char *buf, size_t buflen)
214{
215#ifdef _GNU_SOURCE
216	char *b = strerror_r(errno, buf, buflen);
217	if (b != buf) {
218		strncpy(buf, b, buflen);
219		buf[buflen-1] = '\0';
220	}
221	return (0);
222#else
223	return (strerror_r(errno, buf, buflen));
224#endif
225}
226
227static void
228stats_print_atexit(void)
229{
230
231#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
232	unsigned i;
233
234	/*
235	 * Merge stats from extant threads.  This is racy, since individual
236	 * threads do not lock when recording tcache stats events.  As a
237	 * consequence, the final stats may be slightly out of date by the time
238	 * they are reported, if other threads continue to allocate.
239	 */
240	for (i = 0; i < narenas; i++) {
241		arena_t *arena = arenas[i];
242		if (arena != NULL) {
243			tcache_t *tcache;
244
245			/*
246			 * tcache_stats_merge() locks bins, so if any code is
247			 * introduced that acquires both arena and bin locks in
248			 * the opposite order, deadlocks may result.
249			 */
250			malloc_mutex_lock(&arena->lock);
251			ql_foreach(tcache, &arena->tcache_ql, link) {
252				tcache_stats_merge(tcache, arena);
253			}
254			malloc_mutex_unlock(&arena->lock);
255		}
256	}
257#endif
258	JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
259}
260
261#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
262thread_allocated_t *
263thread_allocated_get_hard(void)
264{
265	thread_allocated_t *thread_allocated = (thread_allocated_t *)
266	    imalloc(sizeof(thread_allocated_t));
267	if (thread_allocated == NULL) {
268		static thread_allocated_t static_thread_allocated = {0, 0};
269		malloc_write("<jemalloc>: Error allocating TSD;"
270		    " mallctl(\"thread.{de,}allocated[p]\", ...)"
271		    " will be inaccurate\n");
272		if (opt_abort)
273			abort();
274		return (&static_thread_allocated);
275	}
276	pthread_setspecific(thread_allocated_tsd, thread_allocated);
277	thread_allocated->allocated = 0;
278	thread_allocated->deallocated = 0;
279	return (thread_allocated);
280}
281#endif
282
283/*
284 * End miscellaneous support functions.
285 */
286/******************************************************************************/
287/*
288 * Begin initialization functions.
289 */
290
291static unsigned
292malloc_ncpus(void)
293{
294	unsigned ret;
295	long result;
296
297	result = sysconf(_SC_NPROCESSORS_ONLN);
298	if (result == -1) {
299		/* Error. */
300		ret = 1;
301	}
302	ret = (unsigned)result;
303
304	return (ret);
305}
306
307static void
308arenas_cleanup(void *arg)
309{
310	arena_t *arena = (arena_t *)arg;
311
312	malloc_mutex_lock(&arenas_lock);
313	arena->nthreads--;
314	malloc_mutex_unlock(&arenas_lock);
315}
316
317#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
318static void
319thread_allocated_cleanup(void *arg)
320{
321	uint64_t *allocated = (uint64_t *)arg;
322
323	if (allocated != NULL)
324		idalloc(allocated);
325}
326#endif
327
328/*
329 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
330 * implementation has to take pains to avoid infinite recursion during
331 * initialization.
332 */
333static inline bool
334malloc_init(void)
335{
336
337	if (malloc_initialized == false)
338		return (malloc_init_hard());
339
340	return (false);
341}
342
343static bool
344malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
345    char const **v_p, size_t *vlen_p)
346{
347	bool accept;
348	const char *opts = *opts_p;
349
350	*k_p = opts;
351
352	for (accept = false; accept == false;) {
353		switch (*opts) {
354			case 'A': case 'B': case 'C': case 'D': case 'E':
355			case 'F': case 'G': case 'H': case 'I': case 'J':
356			case 'K': case 'L': case 'M': case 'N': case 'O':
357			case 'P': case 'Q': case 'R': case 'S': case 'T':
358			case 'U': case 'V': case 'W': case 'X': case 'Y':
359			case 'Z':
360			case 'a': case 'b': case 'c': case 'd': case 'e':
361			case 'f': case 'g': case 'h': case 'i': case 'j':
362			case 'k': case 'l': case 'm': case 'n': case 'o':
363			case 'p': case 'q': case 'r': case 's': case 't':
364			case 'u': case 'v': case 'w': case 'x': case 'y':
365			case 'z':
366			case '0': case '1': case '2': case '3': case '4':
367			case '5': case '6': case '7': case '8': case '9':
368			case '_':
369				opts++;
370				break;
371			case ':':
372				opts++;
373				*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
374				*v_p = opts;
375				accept = true;
376				break;
377			case '\0':
378				if (opts != *opts_p) {
379					malloc_write("<jemalloc>: Conf string "
380					    "ends with key\n");
381				}
382				return (true);
383			default:
384				malloc_write("<jemalloc>: Malformed conf "
385				    "string\n");
386				return (true);
387		}
388	}
389
390	for (accept = false; accept == false;) {
391		switch (*opts) {
392			case ',':
393				opts++;
394				/*
395				 * Look ahead one character here, because the
396				 * next time this function is called, it will
397				 * assume that end of input has been cleanly
398				 * reached if no input remains, but we have
399				 * optimistically already consumed the comma if
400				 * one exists.
401				 */
402				if (*opts == '\0') {
403					malloc_write("<jemalloc>: Conf string "
404					    "ends with comma\n");
405				}
406				*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
407				accept = true;
408				break;
409			case '\0':
410				*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
411				accept = true;
412				break;
413			default:
414				opts++;
415				break;
416		}
417	}
418
419	*opts_p = opts;
420	return (false);
421}
422
423static void
424malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
425    size_t vlen)
426{
427	char buf[PATH_MAX + 1];
428
429	malloc_write("<jemalloc>: ");
430	malloc_write(msg);
431	malloc_write(": ");
432	memcpy(buf, k, klen);
433	memcpy(&buf[klen], ":", 1);
434	memcpy(&buf[klen+1], v, vlen);
435	buf[klen+1+vlen] = '\0';
436	malloc_write(buf);
437	malloc_write("\n");
438}
439
440static void
441malloc_conf_init(void)
442{
443	unsigned i;
444	char buf[PATH_MAX + 1];
445	const char *opts, *k, *v;
446	size_t klen, vlen;
447
448	for (i = 0; i < 3; i++) {
449		/* Get runtime configuration. */
450		switch (i) {
451		case 0:
452			if (JEMALLOC_P(malloc_conf) != NULL) {
453				/*
454				 * Use options that were compiled into the
455				 * program.
456				 */
457				opts = JEMALLOC_P(malloc_conf);
458			} else {
459				/* No configuration specified. */
460				buf[0] = '\0';
461				opts = buf;
462			}
463			break;
464		case 1: {
465			int linklen;
466			const char *linkname =
467#ifdef JEMALLOC_PREFIX
468			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
469#else
470			    "/etc/malloc.conf"
471#endif
472			    ;
473
474			if ((linklen = readlink(linkname, buf,
475			    sizeof(buf) - 1)) != -1) {
476				/*
477				 * Use the contents of the "/etc/malloc.conf"
478				 * symbolic link's name.
479				 */
480				buf[linklen] = '\0';
481				opts = buf;
482			} else {
483				/* No configuration specified. */
484				buf[0] = '\0';
485				opts = buf;
486			}
487			break;
488		}
489		case 2: {
490			const char *envname =
491#ifdef JEMALLOC_PREFIX
492			    JEMALLOC_CPREFIX"MALLOC_CONF"
493#else
494			    "MALLOC_CONF"
495#endif
496			    ;
497
498			if ((opts = getenv(envname)) != NULL) {
499				/*
500				 * Do nothing; opts is already initialized to
501				 * the value of the MALLOC_CONF environment
502				 * variable.
503				 */
504			} else {
505				/* No configuration specified. */
506				buf[0] = '\0';
507				opts = buf;
508			}
509			break;
510		}
511		default:
512			/* NOTREACHED */
513			assert(false);
514			buf[0] = '\0';
515			opts = buf;
516		}
517
518		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
519		    &vlen) == false) {
520#define	CONF_HANDLE_BOOL(n)						\
521			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
522			    klen) == 0) {				\
523				if (strncmp("true", v, vlen) == 0 &&	\
524				    vlen == sizeof("true")-1)		\
525					opt_##n = true;			\
526				else if (strncmp("false", v, vlen) ==	\
527				    0 && vlen == sizeof("false")-1)	\
528					opt_##n = false;		\
529				else {					\
530					malloc_conf_error(		\
531					    "Invalid conf value",	\
532					    k, klen, v, vlen);		\
533				}					\
534				continue;				\
535			}
536#define	CONF_HANDLE_SIZE_T(n, min, max)					\
537			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
538			    klen) == 0) {				\
539				unsigned long ul;			\
540				char *end;				\
541									\
542				errno = 0;				\
543				ul = strtoul(v, &end, 0);		\
544				if (errno != 0 || (uintptr_t)end -	\
545				    (uintptr_t)v != vlen) {		\
546					malloc_conf_error(		\
547					    "Invalid conf value",	\
548					    k, klen, v, vlen);		\
549				} else if (ul < min || ul > max) {	\
550					malloc_conf_error(		\
551					    "Out-of-range conf value",	\
552					    k, klen, v, vlen);		\
553				} else					\
554					opt_##n = ul;			\
555				continue;				\
556			}
557#define	CONF_HANDLE_SSIZE_T(n, min, max)				\
558			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
559			    klen) == 0) {				\
560				long l;					\
561				char *end;				\
562									\
563				errno = 0;				\
564				l = strtol(v, &end, 0);			\
565				if (errno != 0 || (uintptr_t)end -	\
566				    (uintptr_t)v != vlen) {		\
567					malloc_conf_error(		\
568					    "Invalid conf value",	\
569					    k, klen, v, vlen);		\
570				} else if (l < (ssize_t)min || l >	\
571				    (ssize_t)max) {			\
572					malloc_conf_error(		\
573					    "Out-of-range conf value",	\
574					    k, klen, v, vlen);		\
575				} else					\
576					opt_##n = l;			\
577				continue;				\
578			}
579#define	CONF_HANDLE_CHAR_P(n, d)					\
580			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
581			    klen) == 0) {				\
582				size_t cpylen = (vlen <=		\
583				    sizeof(opt_##n)-1) ? vlen :		\
584				    sizeof(opt_##n)-1;			\
585				strncpy(opt_##n, v, cpylen);		\
586				opt_##n[cpylen] = '\0';			\
587				continue;				\
588			}
589
590			CONF_HANDLE_BOOL(abort)
591			CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
592			    PAGE_SHIFT-1)
593			CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
594			    PAGE_SHIFT-1)
595			/*
596			 * Chunks always require at least one * header page,
597			 * plus one data page.
598			 */
599			CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
600			    (sizeof(size_t) << 3) - 1)
601			CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
602			CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
603			    (sizeof(size_t) << 3) - 1)
604			CONF_HANDLE_BOOL(stats_print)
605#ifdef JEMALLOC_FILL
606			CONF_HANDLE_BOOL(junk)
607			CONF_HANDLE_BOOL(zero)
608#endif
609#ifdef JEMALLOC_SYSV
610			CONF_HANDLE_BOOL(sysv)
611#endif
612#ifdef JEMALLOC_XMALLOC
613			CONF_HANDLE_BOOL(xmalloc)
614#endif
615#ifdef JEMALLOC_TCACHE
616			CONF_HANDLE_BOOL(tcache)
617			CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
618			    (sizeof(size_t) << 3) - 1)
619			CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
620			    (sizeof(size_t) << 3) - 1)
621#endif
622#ifdef JEMALLOC_PROF
623			CONF_HANDLE_BOOL(prof)
624			CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
625			CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
626			CONF_HANDLE_BOOL(prof_active)
627			CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
628			    (sizeof(uint64_t) << 3) - 1)
629			CONF_HANDLE_BOOL(prof_accum)
630			CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
631			    (sizeof(size_t) << 3) - 1)
632			CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
633			    (sizeof(uint64_t) << 3) - 1)
634			CONF_HANDLE_BOOL(prof_gdump)
635			CONF_HANDLE_BOOL(prof_leak)
636#endif
637#ifdef JEMALLOC_SWAP
638			CONF_HANDLE_BOOL(overcommit)
639#endif
640			malloc_conf_error("Invalid conf pair", k, klen, v,
641			    vlen);
642#undef CONF_HANDLE_BOOL
643#undef CONF_HANDLE_SIZE_T
644#undef CONF_HANDLE_SSIZE_T
645#undef CONF_HANDLE_CHAR_P
646		}
647
648		/* Validate configuration of options that are inter-related. */
649		if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
650			malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
651			    "relationship; restoring defaults\n");
652			opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
653			opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
654		}
655	}
656}
657
658static bool
659malloc_init_hard(void)
660{
661	arena_t *init_arenas[1];
662
663	malloc_mutex_lock(&init_lock);
664	if (malloc_initialized || malloc_initializer == pthread_self()) {
665		/*
666		 * Another thread initialized the allocator before this one
667		 * acquired init_lock, or this thread is the initializing
668		 * thread, and it is recursively allocating.
669		 */
670		malloc_mutex_unlock(&init_lock);
671		return (false);
672	}
673	if (malloc_initializer != (unsigned long)0) {
674		/* Busy-wait until the initializing thread completes. */
675		do {
676			malloc_mutex_unlock(&init_lock);
677			CPU_SPINWAIT;
678			malloc_mutex_lock(&init_lock);
679		} while (malloc_initialized == false);
680		malloc_mutex_unlock(&init_lock);
681		return (false);
682	}
683
684#ifdef DYNAMIC_PAGE_SHIFT
685	/* Get page size. */
686	{
687		long result;
688
689		result = sysconf(_SC_PAGESIZE);
690		assert(result != -1);
691		pagesize = (unsigned)result;
692
693		/*
694		 * We assume that pagesize is a power of 2 when calculating
695		 * pagesize_mask and lg_pagesize.
696		 */
697		assert(((result - 1) & result) == 0);
698		pagesize_mask = result - 1;
699		lg_pagesize = ffs((int)result) - 1;
700	}
701#endif
702
703#ifdef JEMALLOC_PROF
704	prof_boot0();
705#endif
706
707	malloc_conf_init();
708
709	/* Register fork handlers. */
710	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
711	    jemalloc_postfork) != 0) {
712		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
713		if (opt_abort)
714			abort();
715	}
716
717	if (ctl_boot()) {
718		malloc_mutex_unlock(&init_lock);
719		return (true);
720	}
721
722	if (opt_stats_print) {
723		/* Print statistics at exit. */
724		if (atexit(stats_print_atexit) != 0) {
725			malloc_write("<jemalloc>: Error in atexit()\n");
726			if (opt_abort)
727				abort();
728		}
729	}
730
731	if (chunk_boot()) {
732		malloc_mutex_unlock(&init_lock);
733		return (true);
734	}
735
736	if (base_boot()) {
737		malloc_mutex_unlock(&init_lock);
738		return (true);
739	}
740
741#ifdef JEMALLOC_PROF
742	prof_boot1();
743#endif
744
745	if (arena_boot()) {
746		malloc_mutex_unlock(&init_lock);
747		return (true);
748	}
749
750#ifdef JEMALLOC_TCACHE
751	if (tcache_boot()) {
752		malloc_mutex_unlock(&init_lock);
753		return (true);
754	}
755#endif
756
757	if (huge_boot()) {
758		malloc_mutex_unlock(&init_lock);
759		return (true);
760	}
761
762#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
763	/* Initialize allocation counters before any allocations can occur. */
764	if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
765	    != 0) {
766		malloc_mutex_unlock(&init_lock);
767		return (true);
768	}
769#endif
770
771	/*
772	 * Create enough scaffolding to allow recursive allocation in
773	 * malloc_ncpus().
774	 */
775	narenas = 1;
776	arenas = init_arenas;
777	memset(arenas, 0, sizeof(arena_t *) * narenas);
778
779	/*
780	 * Initialize one arena here.  The rest are lazily created in
781	 * choose_arena_hard().
782	 */
783	arenas_extend(0);
784	if (arenas[0] == NULL) {
785		malloc_mutex_unlock(&init_lock);
786		return (true);
787	}
788
789	/*
790	 * Assign the initial arena to the initial thread, in order to avoid
791	 * spurious creation of an extra arena if the application switches to
792	 * threaded mode.
793	 */
794	ARENA_SET(arenas[0]);
795	arenas[0]->nthreads++;
796
797	if (malloc_mutex_init(&arenas_lock))
798		return (true);
799
800	if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
801		malloc_mutex_unlock(&init_lock);
802		return (true);
803	}
804
805#ifdef JEMALLOC_PROF
806	if (prof_boot2()) {
807		malloc_mutex_unlock(&init_lock);
808		return (true);
809	}
810#endif
811
812	/* Get number of CPUs. */
813	malloc_initializer = pthread_self();
814	malloc_mutex_unlock(&init_lock);
815	ncpus = malloc_ncpus();
816	malloc_mutex_lock(&init_lock);
817
818	if (opt_narenas == 0) {
819		/*
820		 * For SMP systems, create more than one arena per CPU by
821		 * default.
822		 */
823		if (ncpus > 1)
824			opt_narenas = ncpus << 2;
825		else
826			opt_narenas = 1;
827	}
828	narenas = opt_narenas;
829	/*
830	 * Make sure that the arenas array can be allocated.  In practice, this
831	 * limit is enough to allow the allocator to function, but the ctl
832	 * machinery will fail to allocate memory at far lower limits.
833	 */
834	if (narenas > chunksize / sizeof(arena_t *)) {
835		char buf[UMAX2S_BUFSIZE];
836
837		narenas = chunksize / sizeof(arena_t *);
838		malloc_write("<jemalloc>: Reducing narenas to limit (");
839		malloc_write(u2s(narenas, 10, buf));
840		malloc_write(")\n");
841	}
842
843	/* Allocate and initialize arenas. */
844	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
845	if (arenas == NULL) {
846		malloc_mutex_unlock(&init_lock);
847		return (true);
848	}
849	/*
850	 * Zero the array.  In practice, this should always be pre-zeroed,
851	 * since it was just mmap()ed, but let's be sure.
852	 */
853	memset(arenas, 0, sizeof(arena_t *) * narenas);
854	/* Copy the pointer to the one arena that was already initialized. */
855	arenas[0] = init_arenas[0];
856
857#ifdef JEMALLOC_ZONE
858	/* Register the custom zone. */
859	malloc_zone_register(create_zone());
860
861	/*
862	 * Convert the default szone to an "overlay zone" that is capable of
863	 * deallocating szone-allocated objects, but allocating new objects
864	 * from jemalloc.
865	 */
866	szone2ozone(malloc_default_zone());
867#endif
868
869	malloc_initialized = true;
870	malloc_mutex_unlock(&init_lock);
871	return (false);
872}
873
874#ifdef JEMALLOC_ZONE
875JEMALLOC_ATTR(constructor)
876void
877jemalloc_darwin_init(void)
878{
879
880	if (malloc_init_hard())
881		abort();
882}
883#endif
884
885/*
886 * End initialization functions.
887 */
888/******************************************************************************/
889/*
890 * Begin malloc(3)-compatible functions.
891 */
892
893JEMALLOC_ATTR(malloc)
894JEMALLOC_ATTR(visibility("default"))
895void *
896JEMALLOC_P(malloc)(size_t size)
897{
898	void *ret;
899#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
900	size_t usize
901#  ifdef JEMALLOC_CC_SILENCE
902	    = 0
903#  endif
904	    ;
905#endif
906#ifdef JEMALLOC_PROF
907	prof_thr_cnt_t *cnt
908#  ifdef JEMALLOC_CC_SILENCE
909	    = NULL
910#  endif
911	    ;
912#endif
913
914	if (malloc_init()) {
915		ret = NULL;
916		goto OOM;
917	}
918
919	if (size == 0) {
920#ifdef JEMALLOC_SYSV
921		if (opt_sysv == false)
922#endif
923			size = 1;
924#ifdef JEMALLOC_SYSV
925		else {
926#  ifdef JEMALLOC_XMALLOC
927			if (opt_xmalloc) {
928				malloc_write("<jemalloc>: Error in malloc(): "
929				    "invalid size 0\n");
930				abort();
931			}
932#  endif
933			ret = NULL;
934			goto RETURN;
935		}
936#endif
937	}
938
939#ifdef JEMALLOC_PROF
940	if (opt_prof) {
941		usize = s2u(size);
942		if ((cnt = prof_alloc_prep(usize)) == NULL) {
943			ret = NULL;
944			goto OOM;
945		}
946		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
947		    small_maxclass) {
948			ret = imalloc(small_maxclass+1);
949			if (ret != NULL)
950				arena_prof_promoted(ret, usize);
951		} else
952			ret = imalloc(size);
953	} else
954#endif
955	{
956#ifdef JEMALLOC_STATS
957		usize = s2u(size);
958#endif
959		ret = imalloc(size);
960	}
961
962OOM:
963	if (ret == NULL) {
964#ifdef JEMALLOC_XMALLOC
965		if (opt_xmalloc) {
966			malloc_write("<jemalloc>: Error in malloc(): "
967			    "out of memory\n");
968			abort();
969		}
970#endif
971		errno = ENOMEM;
972	}
973
974#ifdef JEMALLOC_SYSV
975RETURN:
976#endif
977#ifdef JEMALLOC_PROF
978	if (opt_prof && ret != NULL)
979		prof_malloc(ret, usize, cnt);
980#endif
981#ifdef JEMALLOC_STATS
982	if (ret != NULL) {
983		assert(usize == isalloc(ret));
984		ALLOCATED_ADD(usize, 0);
985	}
986#endif
987	return (ret);
988}
989
990JEMALLOC_ATTR(nonnull(1))
991JEMALLOC_ATTR(visibility("default"))
992int
993JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
994{
995	int ret;
996	size_t usize
997#ifdef JEMALLOC_CC_SILENCE
998	    = 0
999#endif
1000	    ;
1001	void *result;
1002#ifdef JEMALLOC_PROF
1003	prof_thr_cnt_t *cnt
1004#  ifdef JEMALLOC_CC_SILENCE
1005	    = NULL
1006#  endif
1007	    ;
1008#endif
1009
1010	if (malloc_init())
1011		result = NULL;
1012	else {
1013		if (size == 0) {
1014#ifdef JEMALLOC_SYSV
1015			if (opt_sysv == false)
1016#endif
1017				size = 1;
1018#ifdef JEMALLOC_SYSV
1019			else {
1020#  ifdef JEMALLOC_XMALLOC
1021				if (opt_xmalloc) {
1022					malloc_write("<jemalloc>: Error in "
1023					    "posix_memalign(): invalid size "
1024					    "0\n");
1025					abort();
1026				}
1027#  endif
1028				result = NULL;
1029				*memptr = NULL;
1030				ret = 0;
1031				goto RETURN;
1032			}
1033#endif
1034		}
1035
1036		/* Make sure that alignment is a large enough power of 2. */
1037		if (((alignment - 1) & alignment) != 0
1038		    || alignment < sizeof(void *)) {
1039#ifdef JEMALLOC_XMALLOC
1040			if (opt_xmalloc) {
1041				malloc_write("<jemalloc>: Error in "
1042				    "posix_memalign(): invalid alignment\n");
1043				abort();
1044			}
1045#endif
1046			result = NULL;
1047			ret = EINVAL;
1048			goto RETURN;
1049		}
1050
1051		usize = sa2u(size, alignment, NULL);
1052		if (usize == 0) {
1053			result = NULL;
1054			ret = ENOMEM;
1055			goto RETURN;
1056		}
1057
1058#ifdef JEMALLOC_PROF
1059		if (opt_prof) {
1060			if ((cnt = prof_alloc_prep(usize)) == NULL) {
1061				result = NULL;
1062				ret = EINVAL;
1063			} else {
1064				if (prof_promote && (uintptr_t)cnt !=
1065				    (uintptr_t)1U && usize <= small_maxclass) {
1066					assert(sa2u(small_maxclass+1,
1067					    alignment, NULL) != 0);
1068					result = ipalloc(sa2u(small_maxclass+1,
1069					    alignment, NULL), alignment, false);
1070					if (result != NULL) {
1071						arena_prof_promoted(result,
1072						    usize);
1073					}
1074				} else {
1075					result = ipalloc(usize, alignment,
1076					    false);
1077				}
1078			}
1079		} else
1080#endif
1081			result = ipalloc(usize, alignment, false);
1082	}
1083
1084	if (result == NULL) {
1085#ifdef JEMALLOC_XMALLOC
1086		if (opt_xmalloc) {
1087			malloc_write("<jemalloc>: Error in posix_memalign(): "
1088			    "out of memory\n");
1089			abort();
1090		}
1091#endif
1092		ret = ENOMEM;
1093		goto RETURN;
1094	}
1095
1096	*memptr = result;
1097	ret = 0;
1098
1099RETURN:
1100#ifdef JEMALLOC_STATS
1101	if (result != NULL) {
1102		assert(usize == isalloc(result));
1103		ALLOCATED_ADD(usize, 0);
1104	}
1105#endif
1106#ifdef JEMALLOC_PROF
1107	if (opt_prof && result != NULL)
1108		prof_malloc(result, usize, cnt);
1109#endif
1110	return (ret);
1111}
1112
1113JEMALLOC_ATTR(malloc)
1114JEMALLOC_ATTR(visibility("default"))
1115void *
1116JEMALLOC_P(calloc)(size_t num, size_t size)
1117{
1118	void *ret;
1119	size_t num_size;
1120#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1121	size_t usize
1122#  ifdef JEMALLOC_CC_SILENCE
1123	    = 0
1124#  endif
1125	    ;
1126#endif
1127#ifdef JEMALLOC_PROF
1128	prof_thr_cnt_t *cnt
1129#  ifdef JEMALLOC_CC_SILENCE
1130	    = NULL
1131#  endif
1132	    ;
1133#endif
1134
1135	if (malloc_init()) {
1136		num_size = 0;
1137		ret = NULL;
1138		goto RETURN;
1139	}
1140
1141	num_size = num * size;
1142	if (num_size == 0) {
1143#ifdef JEMALLOC_SYSV
1144		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
1145#endif
1146			num_size = 1;
1147#ifdef JEMALLOC_SYSV
1148		else {
1149			ret = NULL;
1150			goto RETURN;
1151		}
1152#endif
1153	/*
1154	 * Try to avoid division here.  We know that it isn't possible to
1155	 * overflow during multiplication if neither operand uses any of the
1156	 * most significant half of the bits in a size_t.
1157	 */
1158	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1159	    && (num_size / size != num)) {
1160		/* size_t overflow. */
1161		ret = NULL;
1162		goto RETURN;
1163	}
1164
1165#ifdef JEMALLOC_PROF
1166	if (opt_prof) {
1167		usize = s2u(num_size);
1168		if ((cnt = prof_alloc_prep(usize)) == NULL) {
1169			ret = NULL;
1170			goto RETURN;
1171		}
1172		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1173		    <= small_maxclass) {
1174			ret = icalloc(small_maxclass+1);
1175			if (ret != NULL)
1176				arena_prof_promoted(ret, usize);
1177		} else
1178			ret = icalloc(num_size);
1179	} else
1180#endif
1181	{
1182#ifdef JEMALLOC_STATS
1183		usize = s2u(num_size);
1184#endif
1185		ret = icalloc(num_size);
1186	}
1187
1188RETURN:
1189	if (ret == NULL) {
1190#ifdef JEMALLOC_XMALLOC
1191		if (opt_xmalloc) {
1192			malloc_write("<jemalloc>: Error in calloc(): out of "
1193			    "memory\n");
1194			abort();
1195		}
1196#endif
1197		errno = ENOMEM;
1198	}
1199
1200#ifdef JEMALLOC_PROF
1201	if (opt_prof && ret != NULL)
1202		prof_malloc(ret, usize, cnt);
1203#endif
1204#ifdef JEMALLOC_STATS
1205	if (ret != NULL) {
1206		assert(usize == isalloc(ret));
1207		ALLOCATED_ADD(usize, 0);
1208	}
1209#endif
1210	return (ret);
1211}
1212
1213JEMALLOC_ATTR(visibility("default"))
1214void *
1215JEMALLOC_P(realloc)(void *ptr, size_t size)
1216{
1217	void *ret;
1218#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1219	size_t usize
1220#  ifdef JEMALLOC_CC_SILENCE
1221	    = 0
1222#  endif
1223	    ;
1224	size_t old_size = 0;
1225#endif
1226#ifdef JEMALLOC_PROF
1227	prof_thr_cnt_t *cnt
1228#  ifdef JEMALLOC_CC_SILENCE
1229	    = NULL
1230#  endif
1231	    ;
1232	prof_ctx_t *old_ctx
1233#  ifdef JEMALLOC_CC_SILENCE
1234	    = NULL
1235#  endif
1236	    ;
1237#endif
1238
1239	if (size == 0) {
1240#ifdef JEMALLOC_SYSV
1241		if (opt_sysv == false)
1242#endif
1243			size = 1;
1244#ifdef JEMALLOC_SYSV
1245		else {
1246			if (ptr != NULL) {
1247#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1248				old_size = isalloc(ptr);
1249#endif
1250#ifdef JEMALLOC_PROF
1251				if (opt_prof) {
1252					old_ctx = prof_ctx_get(ptr);
1253					cnt = NULL;
1254				}
1255#endif
1256				idalloc(ptr);
1257			}
1258#ifdef JEMALLOC_PROF
1259			else if (opt_prof) {
1260				old_ctx = NULL;
1261				cnt = NULL;
1262			}
1263#endif
1264			ret = NULL;
1265			goto RETURN;
1266		}
1267#endif
1268	}
1269
1270	if (ptr != NULL) {
1271		assert(malloc_initialized || malloc_initializer ==
1272		    pthread_self());
1273
1274#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1275		old_size = isalloc(ptr);
1276#endif
1277#ifdef JEMALLOC_PROF
1278		if (opt_prof) {
1279			usize = s2u(size);
1280			old_ctx = prof_ctx_get(ptr);
1281			if ((cnt = prof_alloc_prep(usize)) == NULL) {
1282				ret = NULL;
1283				goto OOM;
1284			}
1285			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1286			    usize <= small_maxclass) {
1287				ret = iralloc(ptr, small_maxclass+1, 0, 0,
1288				    false, false);
1289				if (ret != NULL)
1290					arena_prof_promoted(ret, usize);
1291			} else
1292				ret = iralloc(ptr, size, 0, 0, false, false);
1293		} else
1294#endif
1295		{
1296#ifdef JEMALLOC_STATS
1297			usize = s2u(size);
1298#endif
1299			ret = iralloc(ptr, size, 0, 0, false, false);
1300		}
1301
1302#ifdef JEMALLOC_PROF
1303OOM:
1304#endif
1305		if (ret == NULL) {
1306#ifdef JEMALLOC_XMALLOC
1307			if (opt_xmalloc) {
1308				malloc_write("<jemalloc>: Error in realloc(): "
1309				    "out of memory\n");
1310				abort();
1311			}
1312#endif
1313			errno = ENOMEM;
1314		}
1315	} else {
1316#ifdef JEMALLOC_PROF
1317		if (opt_prof)
1318			old_ctx = NULL;
1319#endif
1320		if (malloc_init()) {
1321#ifdef JEMALLOC_PROF
1322			if (opt_prof)
1323				cnt = NULL;
1324#endif
1325			ret = NULL;
1326		} else {
1327#ifdef JEMALLOC_PROF
1328			if (opt_prof) {
1329				usize = s2u(size);
1330				if ((cnt = prof_alloc_prep(usize)) == NULL)
1331					ret = NULL;
1332				else {
1333					if (prof_promote && (uintptr_t)cnt !=
1334					    (uintptr_t)1U && usize <=
1335					    small_maxclass) {
1336						ret = imalloc(small_maxclass+1);
1337						if (ret != NULL) {
1338							arena_prof_promoted(ret,
1339							    usize);
1340						}
1341					} else
1342						ret = imalloc(size);
1343				}
1344			} else
1345#endif
1346			{
1347#ifdef JEMALLOC_STATS
1348				usize = s2u(size);
1349#endif
1350				ret = imalloc(size);
1351			}
1352		}
1353
1354		if (ret == NULL) {
1355#ifdef JEMALLOC_XMALLOC
1356			if (opt_xmalloc) {
1357				malloc_write("<jemalloc>: Error in realloc(): "
1358				    "out of memory\n");
1359				abort();
1360			}
1361#endif
1362			errno = ENOMEM;
1363		}
1364	}
1365
1366#ifdef JEMALLOC_SYSV
1367RETURN:
1368#endif
1369#ifdef JEMALLOC_PROF
1370	if (opt_prof)
1371		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1372#endif
1373#ifdef JEMALLOC_STATS
1374	if (ret != NULL) {
1375		assert(usize == isalloc(ret));
1376		ALLOCATED_ADD(usize, old_size);
1377	}
1378#endif
1379	return (ret);
1380}
1381
1382JEMALLOC_ATTR(visibility("default"))
1383void
1384JEMALLOC_P(free)(void *ptr)
1385{
1386
1387	if (ptr != NULL) {
1388#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1389		size_t usize;
1390#endif
1391
1392		assert(malloc_initialized || malloc_initializer ==
1393		    pthread_self());
1394
1395#ifdef JEMALLOC_STATS
1396		usize = isalloc(ptr);
1397#endif
1398#ifdef JEMALLOC_PROF
1399		if (opt_prof) {
1400#  ifndef JEMALLOC_STATS
1401			usize = isalloc(ptr);
1402#  endif
1403			prof_free(ptr, usize);
1404		}
1405#endif
1406#ifdef JEMALLOC_STATS
1407		ALLOCATED_ADD(0, usize);
1408#endif
1409		idalloc(ptr);
1410	}
1411}
1412
1413/*
1414 * End malloc(3)-compatible functions.
1415 */
1416/******************************************************************************/
1417/*
1418 * Begin non-standard override functions.
1419 *
1420 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1421 * entire point is to avoid accidental mixed allocator usage.
1422 */
1423#ifndef JEMALLOC_PREFIX
1424
1425#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1426JEMALLOC_ATTR(malloc)
1427JEMALLOC_ATTR(visibility("default"))
1428void *
1429JEMALLOC_P(memalign)(size_t alignment, size_t size)
1430{
1431	void *ret;
1432#ifdef JEMALLOC_CC_SILENCE
1433	int result =
1434#endif
1435	    JEMALLOC_P(posix_memalign)(&ret, alignment, size);
1436#ifdef JEMALLOC_CC_SILENCE
1437	if (result != 0)
1438		return (NULL);
1439#endif
1440	return (ret);
1441}
1442#endif
1443
1444#ifdef JEMALLOC_OVERRIDE_VALLOC
1445JEMALLOC_ATTR(malloc)
1446JEMALLOC_ATTR(visibility("default"))
1447void *
1448JEMALLOC_P(valloc)(size_t size)
1449{
1450	void *ret;
1451#ifdef JEMALLOC_CC_SILENCE
1452	int result =
1453#endif
1454	    JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
1455#ifdef JEMALLOC_CC_SILENCE
1456	if (result != 0)
1457		return (NULL);
1458#endif
1459	return (ret);
1460}
1461#endif
1462
1463#endif /* JEMALLOC_PREFIX */
1464/*
1465 * End non-standard override functions.
1466 */
1467/******************************************************************************/
1468/*
1469 * Begin non-standard functions.
1470 */
1471
1472JEMALLOC_ATTR(visibility("default"))
1473size_t
1474JEMALLOC_P(malloc_usable_size)(const void *ptr)
1475{
1476	size_t ret;
1477
1478	assert(malloc_initialized || malloc_initializer == pthread_self());
1479
1480#ifdef JEMALLOC_IVSALLOC
1481	ret = ivsalloc(ptr);
1482#else
1483	assert(ptr != NULL);
1484	ret = isalloc(ptr);
1485#endif
1486
1487	return (ret);
1488}
1489
1490JEMALLOC_ATTR(visibility("default"))
1491void
1492JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1493    void *cbopaque, const char *opts)
1494{
1495
1496	stats_print(write_cb, cbopaque, opts);
1497}
1498
1499JEMALLOC_ATTR(visibility("default"))
1500int
1501JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1502    size_t newlen)
1503{
1504
1505	if (malloc_init())
1506		return (EAGAIN);
1507
1508	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1509}
1510
1511JEMALLOC_ATTR(visibility("default"))
1512int
1513JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1514{
1515
1516	if (malloc_init())
1517		return (EAGAIN);
1518
1519	return (ctl_nametomib(name, mibp, miblenp));
1520}
1521
1522JEMALLOC_ATTR(visibility("default"))
1523int
1524JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1525    size_t *oldlenp, void *newp, size_t newlen)
1526{
1527
1528	if (malloc_init())
1529		return (EAGAIN);
1530
1531	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1532}
1533
1534JEMALLOC_INLINE void *
1535iallocm(size_t usize, size_t alignment, bool zero)
1536{
1537
1538	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1539	    NULL)));
1540
1541	if (alignment != 0)
1542		return (ipalloc(usize, alignment, zero));
1543	else if (zero)
1544		return (icalloc(usize));
1545	else
1546		return (imalloc(usize));
1547}
1548
1549JEMALLOC_ATTR(nonnull(1))
1550JEMALLOC_ATTR(visibility("default"))
1551int
1552JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1553{
1554	void *p;
1555	size_t usize;
1556	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1557	    & (SIZE_T_MAX-1));
1558	bool zero = flags & ALLOCM_ZERO;
1559#ifdef JEMALLOC_PROF
1560	prof_thr_cnt_t *cnt;
1561#endif
1562
1563	assert(ptr != NULL);
1564	assert(size != 0);
1565
1566	if (malloc_init())
1567		goto OOM;
1568
1569	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment,
1570	    NULL);
1571	if (usize == 0)
1572		goto OOM;
1573
1574#ifdef JEMALLOC_PROF
1575	if (opt_prof) {
1576		if ((cnt = prof_alloc_prep(usize)) == NULL)
1577			goto OOM;
1578		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1579		    small_maxclass) {
1580			size_t usize_promoted = (alignment == 0) ?
1581			    s2u(small_maxclass+1) : sa2u(small_maxclass+1,
1582			    alignment, NULL);
1583			assert(usize_promoted != 0);
1584			p = iallocm(usize_promoted, alignment, zero);
1585			if (p == NULL)
1586				goto OOM;
1587			arena_prof_promoted(p, usize);
1588		} else {
1589			p = iallocm(usize, alignment, zero);
1590			if (p == NULL)
1591				goto OOM;
1592		}
1593
1594		if (rsize != NULL)
1595			*rsize = usize;
1596	} else
1597#endif
1598	{
1599		p = iallocm(usize, alignment, zero);
1600		if (p == NULL)
1601			goto OOM;
1602#ifndef JEMALLOC_STATS
1603		if (rsize != NULL)
1604#endif
1605		{
1606#ifdef JEMALLOC_STATS
1607			if (rsize != NULL)
1608#endif
1609				*rsize = usize;
1610		}
1611	}
1612
1613	*ptr = p;
1614#ifdef JEMALLOC_STATS
1615	assert(usize == isalloc(p));
1616	ALLOCATED_ADD(usize, 0);
1617#endif
1618	return (ALLOCM_SUCCESS);
1619OOM:
1620#ifdef JEMALLOC_XMALLOC
1621	if (opt_xmalloc) {
1622		malloc_write("<jemalloc>: Error in allocm(): "
1623		    "out of memory\n");
1624		abort();
1625	}
1626#endif
1627	*ptr = NULL;
1628	return (ALLOCM_ERR_OOM);
1629}
1630
1631JEMALLOC_ATTR(nonnull(1))
1632JEMALLOC_ATTR(visibility("default"))
1633int
1634JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1635    int flags)
1636{
1637	void *p, *q;
1638	size_t usize;
1639#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1640	size_t old_size;
1641#endif
1642	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1643	    & (SIZE_T_MAX-1));
1644	bool zero = flags & ALLOCM_ZERO;
1645	bool no_move = flags & ALLOCM_NO_MOVE;
1646#ifdef JEMALLOC_PROF
1647	prof_thr_cnt_t *cnt;
1648	prof_ctx_t *old_ctx;
1649#endif
1650
1651	assert(ptr != NULL);
1652	assert(*ptr != NULL);
1653	assert(size != 0);
1654	assert(SIZE_T_MAX - size >= extra);
1655	assert(malloc_initialized || malloc_initializer == pthread_self());
1656
1657	p = *ptr;
1658#ifdef JEMALLOC_PROF
1659	if (opt_prof) {
1660		/*
1661		 * usize isn't knowable before iralloc() returns when extra is
1662		 * non-zero.  Therefore, compute its maximum possible value and
1663		 * use that in prof_alloc_prep() to decide whether to capture a
1664		 * backtrace.  prof_realloc() will use the actual usize to
1665		 * decide whether to sample.
1666		 */
1667		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1668		    sa2u(size+extra, alignment, NULL);
1669		old_size = isalloc(p);
1670		old_ctx = prof_ctx_get(p);
1671		if ((cnt = prof_alloc_prep(max_usize)) == NULL)
1672			goto OOM;
1673		/*
1674		 * Use minimum usize to determine whether promotion may happen.
1675		 */
1676		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1677		    && ((alignment == 0) ? s2u(size) : sa2u(size,
1678		    alignment, NULL)) <= small_maxclass) {
1679			q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
1680			    size+extra) ? 0 : size+extra - (small_maxclass+1),
1681			    alignment, zero, no_move);
1682			if (q == NULL)
1683				goto ERR;
1684			usize = isalloc(q);
1685			if (max_usize < PAGE_SIZE) {
1686				usize = max_usize;
1687				arena_prof_promoted(q, usize);
1688			}
1689		} else {
1690			q = iralloc(p, size, extra, alignment, zero, no_move);
1691			if (q == NULL)
1692				goto ERR;
1693			usize = isalloc(q);
1694		}
1695		prof_realloc(q, usize, cnt, old_size, old_ctx);
1696		if (rsize != NULL)
1697			*rsize = usize;
1698	} else
1699#endif
1700	{
1701#ifdef JEMALLOC_STATS
1702		old_size = isalloc(p);
1703#endif
1704		q = iralloc(p, size, extra, alignment, zero, no_move);
1705		if (q == NULL)
1706			goto ERR;
1707#ifndef JEMALLOC_STATS
1708		if (rsize != NULL)
1709#endif
1710		{
1711			usize = isalloc(q);
1712#ifdef JEMALLOC_STATS
1713			if (rsize != NULL)
1714#endif
1715				*rsize = usize;
1716		}
1717	}
1718
1719	*ptr = q;
1720#ifdef JEMALLOC_STATS
1721	ALLOCATED_ADD(usize, old_size);
1722#endif
1723	return (ALLOCM_SUCCESS);
1724ERR:
1725	if (no_move)
1726		return (ALLOCM_ERR_NOT_MOVED);
1727#ifdef JEMALLOC_PROF
1728OOM:
1729#endif
1730#ifdef JEMALLOC_XMALLOC
1731	if (opt_xmalloc) {
1732		malloc_write("<jemalloc>: Error in rallocm(): "
1733		    "out of memory\n");
1734		abort();
1735	}
1736#endif
1737	return (ALLOCM_ERR_OOM);
1738}
1739
1740JEMALLOC_ATTR(nonnull(1))
1741JEMALLOC_ATTR(visibility("default"))
1742int
1743JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1744{
1745	size_t sz;
1746
1747	assert(malloc_initialized || malloc_initializer == pthread_self());
1748
1749#ifdef JEMALLOC_IVSALLOC
1750	sz = ivsalloc(ptr);
1751#else
1752	assert(ptr != NULL);
1753	sz = isalloc(ptr);
1754#endif
1755	assert(rsize != NULL);
1756	*rsize = sz;
1757
1758	return (ALLOCM_SUCCESS);
1759}
1760
1761JEMALLOC_ATTR(nonnull(1))
1762JEMALLOC_ATTR(visibility("default"))
1763int
1764JEMALLOC_P(dallocm)(void *ptr, int flags)
1765{
1766#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1767	size_t usize;
1768#endif
1769
1770	assert(ptr != NULL);
1771	assert(malloc_initialized || malloc_initializer == pthread_self());
1772
1773#ifdef JEMALLOC_STATS
1774	usize = isalloc(ptr);
1775#endif
1776#ifdef JEMALLOC_PROF
1777	if (opt_prof) {
1778#  ifndef JEMALLOC_STATS
1779		usize = isalloc(ptr);
1780#  endif
1781		prof_free(ptr, usize);
1782	}
1783#endif
1784#ifdef JEMALLOC_STATS
1785	ALLOCATED_ADD(0, usize);
1786#endif
1787	idalloc(ptr);
1788
1789	return (ALLOCM_SUCCESS);
1790}
1791
1792/*
1793 * End non-standard functions.
1794 */
1795/******************************************************************************/
1796
1797/*
1798 * The following functions are used by threading libraries for protection of
1799 * malloc during fork().
1800 */
1801
1802void
1803jemalloc_prefork(void)
1804{
1805	unsigned i;
1806
1807	/* Acquire all mutexes in a safe order. */
1808
1809	malloc_mutex_lock(&arenas_lock);
1810	for (i = 0; i < narenas; i++) {
1811		if (arenas[i] != NULL)
1812			malloc_mutex_lock(&arenas[i]->lock);
1813	}
1814
1815	malloc_mutex_lock(&base_mtx);
1816
1817	malloc_mutex_lock(&huge_mtx);
1818
1819#ifdef JEMALLOC_DSS
1820	malloc_mutex_lock(&dss_mtx);
1821#endif
1822
1823#ifdef JEMALLOC_SWAP
1824	malloc_mutex_lock(&swap_mtx);
1825#endif
1826}
1827
1828void
1829jemalloc_postfork(void)
1830{
1831	unsigned i;
1832
1833	/* Release all mutexes, now that fork() has completed. */
1834
1835#ifdef JEMALLOC_SWAP
1836	malloc_mutex_unlock(&swap_mtx);
1837#endif
1838
1839#ifdef JEMALLOC_DSS
1840	malloc_mutex_unlock(&dss_mtx);
1841#endif
1842
1843	malloc_mutex_unlock(&huge_mtx);
1844
1845	malloc_mutex_unlock(&base_mtx);
1846
1847	for (i = 0; i < narenas; i++) {
1848		if (arenas[i] != NULL)
1849			malloc_mutex_unlock(&arenas[i]->lock);
1850	}
1851	malloc_mutex_unlock(&arenas_lock);
1852}
1853
1854/******************************************************************************/
1855