jemalloc.c revision 7ca0fdfb85b2a9fc7a112e158892c098e004385b
1#define	JEMALLOC_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9    THREAD_ALLOCATED_INITIALIZER)
10
11/* Runtime configuration options. */
12const char	*je_malloc_conf JEMALLOC_ATTR(visibility("default"));
13#ifdef JEMALLOC_DEBUG
14bool	opt_abort = true;
15#  ifdef JEMALLOC_FILL
16bool	opt_junk = true;
17#  else
18bool	opt_junk = false;
19#  endif
20#else
21bool	opt_abort = false;
22bool	opt_junk = false;
23#endif
24size_t	opt_quarantine = ZU(0);
25bool	opt_redzone = false;
26bool	opt_utrace = false;
27bool	opt_valgrind = false;
28bool	opt_xmalloc = false;
29bool	opt_zero = false;
30size_t	opt_narenas = 0;
31
32unsigned	ncpus;
33
34malloc_mutex_t		arenas_lock;
35arena_t			**arenas;
36unsigned		narenas;
37
38/* Set to true once the allocator has been initialized. */
39static bool		malloc_initialized = false;
40
41#ifdef JEMALLOC_THREADED_INIT
42/* Used to let the initializing thread recursively allocate. */
43#  define NO_INITIALIZER	((unsigned long)0)
44#  define INITIALIZER		pthread_self()
45#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
46static pthread_t		malloc_initializer = NO_INITIALIZER;
47#else
48#  define NO_INITIALIZER	false
49#  define INITIALIZER		true
50#  define IS_INITIALIZER	malloc_initializer
51static bool			malloc_initializer = NO_INITIALIZER;
52#endif
53
54/* Used to avoid initialization races. */
55static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
56
57typedef struct {
58	void	*p;	/* Input pointer (as in realloc(p, s)). */
59	size_t	s;	/* Request size. */
60	void	*r;	/* Result pointer. */
61} malloc_utrace_t;
62
63#ifdef JEMALLOC_UTRACE
64#  define UTRACE(a, b, c) do {						\
65	if (opt_utrace) {						\
66		malloc_utrace_t ut;					\
67		ut.p = (a);						\
68		ut.s = (b);						\
69		ut.r = (c);						\
70		utrace(&ut, sizeof(ut));				\
71	}								\
72} while (0)
73#else
74#  define UTRACE(a, b, c)
75#endif
76
77/******************************************************************************/
78/* Function prototypes for non-inline static functions. */
79
80static void	stats_print_atexit(void);
81static unsigned	malloc_ncpus(void);
82static bool	malloc_conf_next(char const **opts_p, char const **k_p,
83    size_t *klen_p, char const **v_p, size_t *vlen_p);
84static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
85    const char *v, size_t vlen);
86static void	malloc_conf_init(void);
87static bool	malloc_init_hard(void);
88static int	imemalign(void **memptr, size_t alignment, size_t size,
89    size_t min_alignment);
90
91/******************************************************************************/
92/*
93 * Begin miscellaneous support functions.
94 */
95
96/* Create a new arena and insert it into the arenas array at index ind. */
97arena_t *
98arenas_extend(unsigned ind)
99{
100	arena_t *ret;
101
102	ret = (arena_t *)base_alloc(sizeof(arena_t));
103	if (ret != NULL && arena_new(ret, ind) == false) {
104		arenas[ind] = ret;
105		return (ret);
106	}
107	/* Only reached if there is an OOM error. */
108
109	/*
110	 * OOM here is quite inconvenient to propagate, since dealing with it
111	 * would require a check for failure in the fast path.  Instead, punt
112	 * by using arenas[0].  In practice, this is an extremely unlikely
113	 * failure.
114	 */
115	malloc_write("<jemalloc>: Error initializing arena\n");
116	if (opt_abort)
117		abort();
118
119	return (arenas[0]);
120}
121
122/* Slow path, called only by choose_arena(). */
123arena_t *
124choose_arena_hard(void)
125{
126	arena_t *ret;
127
128	if (narenas > 1) {
129		unsigned i, choose, first_null;
130
131		choose = 0;
132		first_null = narenas;
133		malloc_mutex_lock(&arenas_lock);
134		assert(arenas[0] != NULL);
135		for (i = 1; i < narenas; i++) {
136			if (arenas[i] != NULL) {
137				/*
138				 * Choose the first arena that has the lowest
139				 * number of threads assigned to it.
140				 */
141				if (arenas[i]->nthreads <
142				    arenas[choose]->nthreads)
143					choose = i;
144			} else if (first_null == narenas) {
145				/*
146				 * Record the index of the first uninitialized
147				 * arena, in case all extant arenas are in use.
148				 *
149				 * NB: It is possible for there to be
150				 * discontinuities in terms of initialized
151				 * versus uninitialized arenas, due to the
152				 * "thread.arena" mallctl.
153				 */
154				first_null = i;
155			}
156		}
157
158		if (arenas[choose]->nthreads == 0 || first_null == narenas) {
159			/*
160			 * Use an unloaded arena, or the least loaded arena if
161			 * all arenas are already initialized.
162			 */
163			ret = arenas[choose];
164		} else {
165			/* Initialize a new arena. */
166			ret = arenas_extend(first_null);
167		}
168		ret->nthreads++;
169		malloc_mutex_unlock(&arenas_lock);
170	} else {
171		ret = arenas[0];
172		malloc_mutex_lock(&arenas_lock);
173		ret->nthreads++;
174		malloc_mutex_unlock(&arenas_lock);
175	}
176
177	arenas_tsd_set(&ret);
178
179	return (ret);
180}
181
182static void
183stats_print_atexit(void)
184{
185
186	if (config_tcache && config_stats) {
187		unsigned i;
188
189		/*
190		 * Merge stats from extant threads.  This is racy, since
191		 * individual threads do not lock when recording tcache stats
192		 * events.  As a consequence, the final stats may be slightly
193		 * out of date by the time they are reported, if other threads
194		 * continue to allocate.
195		 */
196		for (i = 0; i < narenas; i++) {
197			arena_t *arena = arenas[i];
198			if (arena != NULL) {
199				tcache_t *tcache;
200
201				/*
202				 * tcache_stats_merge() locks bins, so if any
203				 * code is introduced that acquires both arena
204				 * and bin locks in the opposite order,
205				 * deadlocks may result.
206				 */
207				malloc_mutex_lock(&arena->lock);
208				ql_foreach(tcache, &arena->tcache_ql, link) {
209					tcache_stats_merge(tcache, arena);
210				}
211				malloc_mutex_unlock(&arena->lock);
212			}
213		}
214	}
215	je_malloc_stats_print(NULL, NULL, NULL);
216}
217
218/*
219 * End miscellaneous support functions.
220 */
221/******************************************************************************/
222/*
223 * Begin initialization functions.
224 */
225
226static unsigned
227malloc_ncpus(void)
228{
229	unsigned ret;
230	long result;
231
232	result = sysconf(_SC_NPROCESSORS_ONLN);
233	if (result == -1) {
234		/* Error. */
235		ret = 1;
236	}
237	ret = (unsigned)result;
238
239	return (ret);
240}
241
242void
243arenas_cleanup(void *arg)
244{
245	arena_t *arena = *(arena_t **)arg;
246
247	malloc_mutex_lock(&arenas_lock);
248	arena->nthreads--;
249	malloc_mutex_unlock(&arenas_lock);
250}
251
252static inline bool
253malloc_init(void)
254{
255
256	if (malloc_initialized == false)
257		return (malloc_init_hard());
258
259	return (false);
260}
261
262static bool
263malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
264    char const **v_p, size_t *vlen_p)
265{
266	bool accept;
267	const char *opts = *opts_p;
268
269	*k_p = opts;
270
271	for (accept = false; accept == false;) {
272		switch (*opts) {
273		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
274		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
275		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
276		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
277		case 'Y': case 'Z':
278		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
279		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
280		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
281		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
282		case 'y': case 'z':
283		case '0': case '1': case '2': case '3': case '4': case '5':
284		case '6': case '7': case '8': case '9':
285		case '_':
286			opts++;
287			break;
288		case ':':
289			opts++;
290			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
291			*v_p = opts;
292			accept = true;
293			break;
294		case '\0':
295			if (opts != *opts_p) {
296				malloc_write("<jemalloc>: Conf string ends "
297				    "with key\n");
298			}
299			return (true);
300		default:
301			malloc_write("<jemalloc>: Malformed conf string\n");
302			return (true);
303		}
304	}
305
306	for (accept = false; accept == false;) {
307		switch (*opts) {
308		case ',':
309			opts++;
310			/*
311			 * Look ahead one character here, because the next time
312			 * this function is called, it will assume that end of
313			 * input has been cleanly reached if no input remains,
314			 * but we have optimistically already consumed the
315			 * comma if one exists.
316			 */
317			if (*opts == '\0') {
318				malloc_write("<jemalloc>: Conf string ends "
319				    "with comma\n");
320			}
321			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
322			accept = true;
323			break;
324		case '\0':
325			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
326			accept = true;
327			break;
328		default:
329			opts++;
330			break;
331		}
332	}
333
334	*opts_p = opts;
335	return (false);
336}
337
338static void
339malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
340    size_t vlen)
341{
342
343	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
344	    (int)vlen, v);
345}
346
347static void
348malloc_conf_init(void)
349{
350	unsigned i;
351	char buf[PATH_MAX + 1];
352	const char *opts, *k, *v;
353	size_t klen, vlen;
354
355	for (i = 0; i < 3; i++) {
356		/* Get runtime configuration. */
357		switch (i) {
358		case 0:
359			if (je_malloc_conf != NULL) {
360				/*
361				 * Use options that were compiled into the
362				 * program.
363				 */
364				opts = je_malloc_conf;
365			} else {
366				/* No configuration specified. */
367				buf[0] = '\0';
368				opts = buf;
369			}
370			break;
371		case 1: {
372			int linklen;
373			const char *linkname =
374#ifdef JEMALLOC_PREFIX
375			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
376#else
377			    "/etc/malloc.conf"
378#endif
379			    ;
380
381			if ((linklen = readlink(linkname, buf,
382			    sizeof(buf) - 1)) != -1) {
383				/*
384				 * Use the contents of the "/etc/malloc.conf"
385				 * symbolic link's name.
386				 */
387				buf[linklen] = '\0';
388				opts = buf;
389			} else {
390				/* No configuration specified. */
391				buf[0] = '\0';
392				opts = buf;
393			}
394			break;
395		} case 2: {
396			const char *envname =
397#ifdef JEMALLOC_PREFIX
398			    JEMALLOC_CPREFIX"MALLOC_CONF"
399#else
400			    "MALLOC_CONF"
401#endif
402			    ;
403
404			if ((opts = getenv(envname)) != NULL) {
405				/*
406				 * Do nothing; opts is already initialized to
407				 * the value of the MALLOC_CONF environment
408				 * variable.
409				 */
410			} else {
411				/* No configuration specified. */
412				buf[0] = '\0';
413				opts = buf;
414			}
415			break;
416		} default:
417			/* NOTREACHED */
418			assert(false);
419			buf[0] = '\0';
420			opts = buf;
421		}
422
423		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
424		    &vlen) == false) {
425#define	CONF_HANDLE_BOOL_HIT(o, n, hit)					\
426			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
427			    klen) == 0) {				\
428				if (strncmp("true", v, vlen) == 0 &&	\
429				    vlen == sizeof("true")-1)		\
430					o = true;			\
431				else if (strncmp("false", v, vlen) ==	\
432				    0 && vlen == sizeof("false")-1)	\
433					o = false;			\
434				else {					\
435					malloc_conf_error(		\
436					    "Invalid conf value",	\
437					    k, klen, v, vlen);		\
438				}					\
439				hit = true;				\
440			} else						\
441				hit = false;
442#define	CONF_HANDLE_BOOL(o, n) {					\
443			bool hit;					\
444			CONF_HANDLE_BOOL_HIT(o, n, hit);		\
445			if (hit)					\
446				continue;				\
447}
448#define	CONF_HANDLE_SIZE_T(o, n, min, max)				\
449			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
450			    klen) == 0) {				\
451				uintmax_t um;				\
452				char *end;				\
453									\
454				errno = 0;				\
455				um = malloc_strtoumax(v, &end, 0);	\
456				if (errno != 0 || (uintptr_t)end -	\
457				    (uintptr_t)v != vlen) {		\
458					malloc_conf_error(		\
459					    "Invalid conf value",	\
460					    k, klen, v, vlen);		\
461				} else if (um < min || um > max) {	\
462					malloc_conf_error(		\
463					    "Out-of-range conf value",	\
464					    k, klen, v, vlen);		\
465				} else					\
466					o = um;				\
467				continue;				\
468			}
469#define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
470			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
471			    klen) == 0) {				\
472				long l;					\
473				char *end;				\
474									\
475				errno = 0;				\
476				l = strtol(v, &end, 0);			\
477				if (errno != 0 || (uintptr_t)end -	\
478				    (uintptr_t)v != vlen) {		\
479					malloc_conf_error(		\
480					    "Invalid conf value",	\
481					    k, klen, v, vlen);		\
482				} else if (l < (ssize_t)min || l >	\
483				    (ssize_t)max) {			\
484					malloc_conf_error(		\
485					    "Out-of-range conf value",	\
486					    k, klen, v, vlen);		\
487				} else					\
488					o = l;				\
489				continue;				\
490			}
491#define	CONF_HANDLE_CHAR_P(o, n, d)					\
492			if (sizeof(#n)-1 == klen && strncmp(#n, k,	\
493			    klen) == 0) {				\
494				size_t cpylen = (vlen <=		\
495				    sizeof(o)-1) ? vlen :		\
496				    sizeof(o)-1;			\
497				strncpy(o, v, cpylen);			\
498				o[cpylen] = '\0';			\
499				continue;				\
500			}
501
502			CONF_HANDLE_BOOL(opt_abort, abort)
503			/*
504			 * Chunks always require at least one header page, plus
505			 * one data page in the absence of redzones, or three
506			 * pages in the presence of redzones.  In order to
507			 * simplify options processing, fix the limit based on
508			 * config_fill.
509			 */
510			CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE +
511			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
512			CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
513			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
514			    -1, (sizeof(size_t) << 3) - 1)
515			CONF_HANDLE_BOOL(opt_stats_print, stats_print)
516			if (config_fill) {
517				CONF_HANDLE_BOOL(opt_junk, junk)
518				CONF_HANDLE_SIZE_T(opt_quarantine, quarantine,
519				    0, SIZE_T_MAX)
520				CONF_HANDLE_BOOL(opt_redzone, redzone)
521				CONF_HANDLE_BOOL(opt_zero, zero)
522			}
523			if (config_utrace) {
524				CONF_HANDLE_BOOL(opt_utrace, utrace)
525			}
526			if (config_valgrind) {
527				bool hit;
528				CONF_HANDLE_BOOL_HIT(opt_valgrind,
529				    valgrind, hit)
530				if (config_fill && opt_valgrind && hit) {
531					opt_junk = false;
532					opt_zero = false;
533					if (opt_quarantine == 0) {
534						opt_quarantine =
535						    JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
536					}
537					opt_redzone = true;
538				}
539				if (hit)
540					continue;
541			}
542			if (config_xmalloc) {
543				CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
544			}
545			if (config_tcache) {
546				CONF_HANDLE_BOOL(opt_tcache, tcache)
547				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
548				    lg_tcache_max, -1,
549				    (sizeof(size_t) << 3) - 1)
550			}
551			if (config_prof) {
552				CONF_HANDLE_BOOL(opt_prof, prof)
553				CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
554				    "jeprof")
555				CONF_HANDLE_BOOL(opt_prof_active, prof_active)
556				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
557				    lg_prof_sample, 0,
558				    (sizeof(uint64_t) << 3) - 1)
559				CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
560				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
561				    lg_prof_interval, -1,
562				    (sizeof(uint64_t) << 3) - 1)
563				CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
564				CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
565			}
566			malloc_conf_error("Invalid conf pair", k, klen, v,
567			    vlen);
568#undef CONF_HANDLE_BOOL
569#undef CONF_HANDLE_SIZE_T
570#undef CONF_HANDLE_SSIZE_T
571#undef CONF_HANDLE_CHAR_P
572		}
573	}
574}
575
576static bool
577malloc_init_hard(void)
578{
579	arena_t *init_arenas[1];
580
581	malloc_mutex_lock(&init_lock);
582	if (malloc_initialized || IS_INITIALIZER) {
583		/*
584		 * Another thread initialized the allocator before this one
585		 * acquired init_lock, or this thread is the initializing
586		 * thread, and it is recursively allocating.
587		 */
588		malloc_mutex_unlock(&init_lock);
589		return (false);
590	}
591#ifdef JEMALLOC_THREADED_INIT
592	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
593		/* Busy-wait until the initializing thread completes. */
594		do {
595			malloc_mutex_unlock(&init_lock);
596			CPU_SPINWAIT;
597			malloc_mutex_lock(&init_lock);
598		} while (malloc_initialized == false);
599		malloc_mutex_unlock(&init_lock);
600		return (false);
601	}
602#endif
603	malloc_initializer = INITIALIZER;
604
605	malloc_tsd_boot();
606	if (config_prof)
607		prof_boot0();
608
609	malloc_conf_init();
610
611#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
612	/* Register fork handlers. */
613	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
614	    jemalloc_postfork_child) != 0) {
615		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
616		if (opt_abort)
617			abort();
618	}
619#endif
620
621	if (opt_stats_print) {
622		/* Print statistics at exit. */
623		if (atexit(stats_print_atexit) != 0) {
624			malloc_write("<jemalloc>: Error in atexit()\n");
625			if (opt_abort)
626				abort();
627		}
628	}
629
630	if (base_boot()) {
631		malloc_mutex_unlock(&init_lock);
632		return (true);
633	}
634
635	if (chunk_boot0()) {
636		malloc_mutex_unlock(&init_lock);
637		return (true);
638	}
639
640	if (ctl_boot()) {
641		malloc_mutex_unlock(&init_lock);
642		return (true);
643	}
644
645	if (config_prof)
646		prof_boot1();
647
648	arena_boot();
649
650	if (config_tcache && tcache_boot0()) {
651		malloc_mutex_unlock(&init_lock);
652		return (true);
653	}
654
655	if (huge_boot()) {
656		malloc_mutex_unlock(&init_lock);
657		return (true);
658	}
659
660	if (malloc_mutex_init(&arenas_lock))
661		return (true);
662
663	/*
664	 * Create enough scaffolding to allow recursive allocation in
665	 * malloc_ncpus().
666	 */
667	narenas = 1;
668	arenas = init_arenas;
669	memset(arenas, 0, sizeof(arena_t *) * narenas);
670
671	/*
672	 * Initialize one arena here.  The rest are lazily created in
673	 * choose_arena_hard().
674	 */
675	arenas_extend(0);
676	if (arenas[0] == NULL) {
677		malloc_mutex_unlock(&init_lock);
678		return (true);
679	}
680
681	/* Initialize allocation counters before any allocations can occur. */
682	if (config_stats && thread_allocated_tsd_boot()) {
683		malloc_mutex_unlock(&init_lock);
684		return (true);
685	}
686
687	if (arenas_tsd_boot()) {
688		malloc_mutex_unlock(&init_lock);
689		return (true);
690	}
691
692	if (config_tcache && tcache_boot1()) {
693		malloc_mutex_unlock(&init_lock);
694		return (true);
695	}
696
697	if (config_fill && quarantine_boot()) {
698		malloc_mutex_unlock(&init_lock);
699		return (true);
700	}
701
702	if (config_prof && prof_boot2()) {
703		malloc_mutex_unlock(&init_lock);
704		return (true);
705	}
706
707	/* Get number of CPUs. */
708	malloc_mutex_unlock(&init_lock);
709	ncpus = malloc_ncpus();
710	malloc_mutex_lock(&init_lock);
711
712	if (chunk_boot1()) {
713		malloc_mutex_unlock(&init_lock);
714		return (true);
715	}
716
717	if (mutex_boot()) {
718		malloc_mutex_unlock(&init_lock);
719		return (true);
720	}
721
722	if (opt_narenas == 0) {
723		/*
724		 * For SMP systems, create more than one arena per CPU by
725		 * default.
726		 */
727		if (ncpus > 1)
728			opt_narenas = ncpus << 2;
729		else
730			opt_narenas = 1;
731	}
732	narenas = opt_narenas;
733	/*
734	 * Make sure that the arenas array can be allocated.  In practice, this
735	 * limit is enough to allow the allocator to function, but the ctl
736	 * machinery will fail to allocate memory at far lower limits.
737	 */
738	if (narenas > chunksize / sizeof(arena_t *)) {
739		narenas = chunksize / sizeof(arena_t *);
740		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
741		    narenas);
742	}
743
744	/* Allocate and initialize arenas. */
745	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
746	if (arenas == NULL) {
747		malloc_mutex_unlock(&init_lock);
748		return (true);
749	}
750	/*
751	 * Zero the array.  In practice, this should always be pre-zeroed,
752	 * since it was just mmap()ed, but let's be sure.
753	 */
754	memset(arenas, 0, sizeof(arena_t *) * narenas);
755	/* Copy the pointer to the one arena that was already initialized. */
756	arenas[0] = init_arenas[0];
757
758	malloc_initialized = true;
759	malloc_mutex_unlock(&init_lock);
760	return (false);
761}
762
763/*
764 * End initialization functions.
765 */
766/******************************************************************************/
767/*
768 * Begin malloc(3)-compatible functions.
769 */
770
771JEMALLOC_ATTR(malloc)
772JEMALLOC_ATTR(visibility("default"))
773void *
774je_malloc(size_t size)
775{
776	void *ret;
777	size_t usize;
778	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
779
780	if (malloc_init()) {
781		ret = NULL;
782		goto label_oom;
783	}
784
785	if (size == 0)
786		size = 1;
787
788	if (config_prof && opt_prof) {
789		usize = s2u(size);
790		PROF_ALLOC_PREP(1, usize, cnt);
791		if (cnt == NULL) {
792			ret = NULL;
793			goto label_oom;
794		}
795		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
796		    SMALL_MAXCLASS) {
797			ret = imalloc(SMALL_MAXCLASS+1);
798			if (ret != NULL)
799				arena_prof_promoted(ret, usize);
800		} else
801			ret = imalloc(size);
802	} else {
803		if (config_stats || (config_valgrind && opt_valgrind))
804			usize = s2u(size);
805		ret = imalloc(size);
806	}
807
808label_oom:
809	if (ret == NULL) {
810		if (config_xmalloc && opt_xmalloc) {
811			malloc_write("<jemalloc>: Error in malloc(): "
812			    "out of memory\n");
813			abort();
814		}
815		errno = ENOMEM;
816	}
817	if (config_prof && opt_prof && ret != NULL)
818		prof_malloc(ret, usize, cnt);
819	if (config_stats && ret != NULL) {
820		assert(usize == isalloc(ret, config_prof));
821		thread_allocated_tsd_get()->allocated += usize;
822	}
823	UTRACE(0, size, ret);
824	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
825	return (ret);
826}
827
828JEMALLOC_ATTR(nonnull(1))
829#ifdef JEMALLOC_PROF
830/*
831 * Avoid any uncertainty as to how many backtrace frames to ignore in
832 * PROF_ALLOC_PREP().
833 */
834JEMALLOC_ATTR(noinline)
835#endif
836static int
837imemalign(void **memptr, size_t alignment, size_t size,
838    size_t min_alignment)
839{
840	int ret;
841	size_t usize;
842	void *result;
843	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
844
845	assert(min_alignment != 0);
846
847	if (malloc_init())
848		result = NULL;
849	else {
850		if (size == 0)
851			size = 1;
852
853		/* Make sure that alignment is a large enough power of 2. */
854		if (((alignment - 1) & alignment) != 0
855		    || (alignment < min_alignment)) {
856			if (config_xmalloc && opt_xmalloc) {
857				malloc_write("<jemalloc>: Error allocating "
858				    "aligned memory: invalid alignment\n");
859				abort();
860			}
861			result = NULL;
862			ret = EINVAL;
863			goto label_return;
864		}
865
866		usize = sa2u(size, alignment);
867		if (usize == 0) {
868			result = NULL;
869			ret = ENOMEM;
870			goto label_return;
871		}
872
873		if (config_prof && opt_prof) {
874			PROF_ALLOC_PREP(2, usize, cnt);
875			if (cnt == NULL) {
876				result = NULL;
877				ret = EINVAL;
878			} else {
879				if (prof_promote && (uintptr_t)cnt !=
880				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
881					assert(sa2u(SMALL_MAXCLASS+1,
882					    alignment) != 0);
883					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
884					    alignment), alignment, false);
885					if (result != NULL) {
886						arena_prof_promoted(result,
887						    usize);
888					}
889				} else {
890					result = ipalloc(usize, alignment,
891					    false);
892				}
893			}
894		} else
895			result = ipalloc(usize, alignment, false);
896	}
897
898	if (result == NULL) {
899		if (config_xmalloc && opt_xmalloc) {
900			malloc_write("<jemalloc>: Error allocating aligned "
901			    "memory: out of memory\n");
902			abort();
903		}
904		ret = ENOMEM;
905		goto label_return;
906	}
907
908	*memptr = result;
909	ret = 0;
910
911label_return:
912	if (config_stats && result != NULL) {
913		assert(usize == isalloc(result, config_prof));
914		thread_allocated_tsd_get()->allocated += usize;
915	}
916	if (config_prof && opt_prof && result != NULL)
917		prof_malloc(result, usize, cnt);
918	UTRACE(0, size, result);
919	return (ret);
920}
921
922JEMALLOC_ATTR(nonnull(1))
923JEMALLOC_ATTR(visibility("default"))
924int
925je_posix_memalign(void **memptr, size_t alignment, size_t size)
926{
927	int ret = imemalign(memptr, alignment, size, sizeof(void *));
928	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
929	    config_prof), false);
930	return (ret);
931}
932
933JEMALLOC_ATTR(malloc)
934JEMALLOC_ATTR(visibility("default"))
935void *
936je_aligned_alloc(size_t alignment, size_t size)
937{
938	void *ret;
939	int err;
940
941	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
942		ret = NULL;
943		errno = err;
944	}
945	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
946	    false);
947	return (ret);
948}
949
950JEMALLOC_ATTR(malloc)
951JEMALLOC_ATTR(visibility("default"))
952void *
953je_calloc(size_t num, size_t size)
954{
955	void *ret;
956	size_t num_size;
957	size_t usize;
958	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
959
960	if (malloc_init()) {
961		num_size = 0;
962		ret = NULL;
963		goto label_return;
964	}
965
966	num_size = num * size;
967	if (num_size == 0) {
968		if (num == 0 || size == 0)
969			num_size = 1;
970		else {
971			ret = NULL;
972			goto label_return;
973		}
974	/*
975	 * Try to avoid division here.  We know that it isn't possible to
976	 * overflow during multiplication if neither operand uses any of the
977	 * most significant half of the bits in a size_t.
978	 */
979	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
980	    && (num_size / size != num)) {
981		/* size_t overflow. */
982		ret = NULL;
983		goto label_return;
984	}
985
986	if (config_prof && opt_prof) {
987		usize = s2u(num_size);
988		PROF_ALLOC_PREP(1, usize, cnt);
989		if (cnt == NULL) {
990			ret = NULL;
991			goto label_return;
992		}
993		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
994		    <= SMALL_MAXCLASS) {
995			ret = icalloc(SMALL_MAXCLASS+1);
996			if (ret != NULL)
997				arena_prof_promoted(ret, usize);
998		} else
999			ret = icalloc(num_size);
1000	} else {
1001		if (config_stats || (config_valgrind && opt_valgrind))
1002			usize = s2u(num_size);
1003		ret = icalloc(num_size);
1004	}
1005
1006label_return:
1007	if (ret == NULL) {
1008		if (config_xmalloc && opt_xmalloc) {
1009			malloc_write("<jemalloc>: Error in calloc(): out of "
1010			    "memory\n");
1011			abort();
1012		}
1013		errno = ENOMEM;
1014	}
1015
1016	if (config_prof && opt_prof && ret != NULL)
1017		prof_malloc(ret, usize, cnt);
1018	if (config_stats && ret != NULL) {
1019		assert(usize == isalloc(ret, config_prof));
1020		thread_allocated_tsd_get()->allocated += usize;
1021	}
1022	UTRACE(0, num_size, ret);
1023	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1024	return (ret);
1025}
1026
1027JEMALLOC_ATTR(visibility("default"))
1028void *
1029je_realloc(void *ptr, size_t size)
1030{
1031	void *ret;
1032	size_t usize;
1033	size_t old_size = 0;
1034	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1035	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1036	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1037
1038	if (size == 0) {
1039		if (ptr != NULL) {
1040			/* realloc(ptr, 0) is equivalent to free(p). */
1041			if (config_prof) {
1042				old_size = isalloc(ptr, true);
1043				if (config_valgrind && opt_valgrind)
1044					old_rzsize = p2rz(ptr);
1045			} else if (config_stats) {
1046				old_size = isalloc(ptr, false);
1047				if (config_valgrind && opt_valgrind)
1048					old_rzsize = u2rz(old_size);
1049			} else if (config_valgrind && opt_valgrind) {
1050				old_size = isalloc(ptr, false);
1051				old_rzsize = u2rz(old_size);
1052			}
1053			if (config_prof && opt_prof) {
1054				old_ctx = prof_ctx_get(ptr);
1055				cnt = NULL;
1056			}
1057			iqalloc(ptr);
1058			ret = NULL;
1059			goto label_return;
1060		} else
1061			size = 1;
1062	}
1063
1064	if (ptr != NULL) {
1065		assert(malloc_initialized || IS_INITIALIZER);
1066
1067		if (config_prof) {
1068			old_size = isalloc(ptr, true);
1069			if (config_valgrind && opt_valgrind)
1070				old_rzsize = p2rz(ptr);
1071		} else if (config_stats) {
1072			old_size = isalloc(ptr, false);
1073			if (config_valgrind && opt_valgrind)
1074				old_rzsize = u2rz(old_size);
1075		} else if (config_valgrind && opt_valgrind) {
1076			old_size = isalloc(ptr, false);
1077			old_rzsize = u2rz(old_size);
1078		}
1079		if (config_prof && opt_prof) {
1080			usize = s2u(size);
1081			old_ctx = prof_ctx_get(ptr);
1082			PROF_ALLOC_PREP(1, usize, cnt);
1083			if (cnt == NULL) {
1084				old_ctx = NULL;
1085				ret = NULL;
1086				goto label_oom;
1087			}
1088			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1089			    usize <= SMALL_MAXCLASS) {
1090				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1091				    false, false);
1092				if (ret != NULL)
1093					arena_prof_promoted(ret, usize);
1094				else
1095					old_ctx = NULL;
1096			} else {
1097				ret = iralloc(ptr, size, 0, 0, false, false);
1098				if (ret == NULL)
1099					old_ctx = NULL;
1100			}
1101		} else {
1102			if (config_stats || (config_valgrind && opt_valgrind))
1103				usize = s2u(size);
1104			ret = iralloc(ptr, size, 0, 0, false, false);
1105		}
1106
1107label_oom:
1108		if (ret == NULL) {
1109			if (config_xmalloc && opt_xmalloc) {
1110				malloc_write("<jemalloc>: Error in realloc(): "
1111				    "out of memory\n");
1112				abort();
1113			}
1114			errno = ENOMEM;
1115		}
1116	} else {
1117		/* realloc(NULL, size) is equivalent to malloc(size). */
1118		if (config_prof && opt_prof)
1119			old_ctx = NULL;
1120		if (malloc_init()) {
1121			if (config_prof && opt_prof)
1122				cnt = NULL;
1123			ret = NULL;
1124		} else {
1125			if (config_prof && opt_prof) {
1126				usize = s2u(size);
1127				PROF_ALLOC_PREP(1, usize, cnt);
1128				if (cnt == NULL)
1129					ret = NULL;
1130				else {
1131					if (prof_promote && (uintptr_t)cnt !=
1132					    (uintptr_t)1U && usize <=
1133					    SMALL_MAXCLASS) {
1134						ret = imalloc(SMALL_MAXCLASS+1);
1135						if (ret != NULL) {
1136							arena_prof_promoted(ret,
1137							    usize);
1138						}
1139					} else
1140						ret = imalloc(size);
1141				}
1142			} else {
1143				if (config_stats || (config_valgrind &&
1144				    opt_valgrind))
1145					usize = s2u(size);
1146				ret = imalloc(size);
1147			}
1148		}
1149
1150		if (ret == NULL) {
1151			if (config_xmalloc && opt_xmalloc) {
1152				malloc_write("<jemalloc>: Error in realloc(): "
1153				    "out of memory\n");
1154				abort();
1155			}
1156			errno = ENOMEM;
1157		}
1158	}
1159
1160label_return:
1161	if (config_prof && opt_prof)
1162		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1163	if (config_stats && ret != NULL) {
1164		thread_allocated_t *ta;
1165		assert(usize == isalloc(ret, config_prof));
1166		ta = thread_allocated_tsd_get();
1167		ta->allocated += usize;
1168		ta->deallocated += old_size;
1169	}
1170	UTRACE(ptr, size, ret);
1171	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1172	return (ret);
1173}
1174
1175JEMALLOC_ATTR(visibility("default"))
1176void
1177je_free(void *ptr)
1178{
1179
1180	UTRACE(ptr, 0, 0);
1181	if (ptr != NULL) {
1182		size_t usize;
1183		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1184
1185		assert(malloc_initialized || IS_INITIALIZER);
1186
1187		if (config_prof && opt_prof) {
1188			usize = isalloc(ptr, config_prof);
1189			prof_free(ptr, usize);
1190		} else if (config_stats || config_valgrind)
1191			usize = isalloc(ptr, config_prof);
1192		if (config_stats)
1193			thread_allocated_tsd_get()->deallocated += usize;
1194		if (config_valgrind && opt_valgrind)
1195			rzsize = p2rz(ptr);
1196		iqalloc(ptr);
1197		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1198	}
1199}
1200
1201/*
1202 * End malloc(3)-compatible functions.
1203 */
1204/******************************************************************************/
1205/*
1206 * Begin non-standard override functions.
1207 */
1208
1209#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1210JEMALLOC_ATTR(malloc)
1211JEMALLOC_ATTR(visibility("default"))
1212void *
1213je_memalign(size_t alignment, size_t size)
1214{
1215	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1216	imemalign(&ret, alignment, size, 1);
1217	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1218	return (ret);
1219}
1220#endif
1221
1222#ifdef JEMALLOC_OVERRIDE_VALLOC
1223JEMALLOC_ATTR(malloc)
1224JEMALLOC_ATTR(visibility("default"))
1225void *
1226je_valloc(size_t size)
1227{
1228	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1229	imemalign(&ret, PAGE, size, 1);
1230	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1231	return (ret);
1232}
1233#endif
1234
1235/*
1236 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1237 * #define je_malloc malloc
1238 */
1239#define	malloc_is_malloc 1
1240#define	is_malloc_(a) malloc_is_ ## a
1241#define	is_malloc(a) is_malloc_(a)
1242
1243#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1244/*
1245 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1246 * to inconsistently reference libc's malloc(3)-compatible functions
1247 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1248 *
1249 * These definitions interpose hooks in glibc.  The functions are actually
1250 * passed an extra argument for the caller return address, which will be
1251 * ignored.
1252 */
1253JEMALLOC_ATTR(visibility("default"))
1254void (* const __free_hook)(void *ptr) = je_free;
1255
1256JEMALLOC_ATTR(visibility("default"))
1257void *(* const __malloc_hook)(size_t size) = je_malloc;
1258
1259JEMALLOC_ATTR(visibility("default"))
1260void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
1261
1262JEMALLOC_ATTR(visibility("default"))
1263void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
1264#endif
1265
1266/*
1267 * End non-standard override functions.
1268 */
1269/******************************************************************************/
1270/*
1271 * Begin non-standard functions.
1272 */
1273
1274JEMALLOC_ATTR(visibility("default"))
1275size_t
1276je_malloc_usable_size(const void *ptr)
1277{
1278	size_t ret;
1279
1280	assert(malloc_initialized || IS_INITIALIZER);
1281
1282	if (config_ivsalloc)
1283		ret = ivsalloc(ptr, config_prof);
1284	else
1285		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1286
1287	return (ret);
1288}
1289
1290JEMALLOC_ATTR(visibility("default"))
1291void
1292je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1293    const char *opts)
1294{
1295
1296	stats_print(write_cb, cbopaque, opts);
1297}
1298
1299JEMALLOC_ATTR(visibility("default"))
1300int
1301je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1302    size_t newlen)
1303{
1304
1305	if (malloc_init())
1306		return (EAGAIN);
1307
1308	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1309}
1310
1311JEMALLOC_ATTR(visibility("default"))
1312int
1313je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1314{
1315
1316	if (malloc_init())
1317		return (EAGAIN);
1318
1319	return (ctl_nametomib(name, mibp, miblenp));
1320}
1321
1322JEMALLOC_ATTR(visibility("default"))
1323int
1324je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1325  void *newp, size_t newlen)
1326{
1327
1328	if (malloc_init())
1329		return (EAGAIN);
1330
1331	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1332}
1333
1334/*
1335 * End non-standard functions.
1336 */
1337/******************************************************************************/
1338/*
1339 * Begin experimental functions.
1340 */
1341#ifdef JEMALLOC_EXPERIMENTAL
1342
1343JEMALLOC_INLINE void *
1344iallocm(size_t usize, size_t alignment, bool zero)
1345{
1346
1347	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1348	    alignment)));
1349
1350	if (alignment != 0)
1351		return (ipalloc(usize, alignment, zero));
1352	else if (zero)
1353		return (icalloc(usize));
1354	else
1355		return (imalloc(usize));
1356}
1357
1358JEMALLOC_ATTR(nonnull(1))
1359JEMALLOC_ATTR(visibility("default"))
1360int
1361je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1362{
1363	void *p;
1364	size_t usize;
1365	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1366	    & (SIZE_T_MAX-1));
1367	bool zero = flags & ALLOCM_ZERO;
1368	prof_thr_cnt_t *cnt;
1369
1370	assert(ptr != NULL);
1371	assert(size != 0);
1372
1373	if (malloc_init())
1374		goto label_oom;
1375
1376	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1377	if (usize == 0)
1378		goto label_oom;
1379
1380	if (config_prof && opt_prof) {
1381		PROF_ALLOC_PREP(1, usize, cnt);
1382		if (cnt == NULL)
1383			goto label_oom;
1384		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1385		    SMALL_MAXCLASS) {
1386			size_t usize_promoted = (alignment == 0) ?
1387			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1388			    alignment);
1389			assert(usize_promoted != 0);
1390			p = iallocm(usize_promoted, alignment, zero);
1391			if (p == NULL)
1392				goto label_oom;
1393			arena_prof_promoted(p, usize);
1394		} else {
1395			p = iallocm(usize, alignment, zero);
1396			if (p == NULL)
1397				goto label_oom;
1398		}
1399		prof_malloc(p, usize, cnt);
1400	} else {
1401		p = iallocm(usize, alignment, zero);
1402		if (p == NULL)
1403			goto label_oom;
1404	}
1405	if (rsize != NULL)
1406		*rsize = usize;
1407
1408	*ptr = p;
1409	if (config_stats) {
1410		assert(usize == isalloc(p, config_prof));
1411		thread_allocated_tsd_get()->allocated += usize;
1412	}
1413	UTRACE(0, size, p);
1414	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1415	return (ALLOCM_SUCCESS);
1416label_oom:
1417	if (config_xmalloc && opt_xmalloc) {
1418		malloc_write("<jemalloc>: Error in allocm(): "
1419		    "out of memory\n");
1420		abort();
1421	}
1422	*ptr = NULL;
1423	UTRACE(0, size, 0);
1424	return (ALLOCM_ERR_OOM);
1425}
1426
1427JEMALLOC_ATTR(nonnull(1))
1428JEMALLOC_ATTR(visibility("default"))
1429int
1430je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1431{
1432	void *p, *q;
1433	size_t usize;
1434	size_t old_size;
1435	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1436	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1437	    & (SIZE_T_MAX-1));
1438	bool zero = flags & ALLOCM_ZERO;
1439	bool no_move = flags & ALLOCM_NO_MOVE;
1440	prof_thr_cnt_t *cnt;
1441
1442	assert(ptr != NULL);
1443	assert(*ptr != NULL);
1444	assert(size != 0);
1445	assert(SIZE_T_MAX - size >= extra);
1446	assert(malloc_initialized || IS_INITIALIZER);
1447
1448	p = *ptr;
1449	if (config_prof && opt_prof) {
1450		/*
1451		 * usize isn't knowable before iralloc() returns when extra is
1452		 * non-zero.  Therefore, compute its maximum possible value and
1453		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1454		 * backtrace.  prof_realloc() will use the actual usize to
1455		 * decide whether to sample.
1456		 */
1457		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1458		    sa2u(size+extra, alignment);
1459		prof_ctx_t *old_ctx = prof_ctx_get(p);
1460		old_size = isalloc(p, true);
1461		if (config_valgrind && opt_valgrind)
1462			old_rzsize = p2rz(p);
1463		PROF_ALLOC_PREP(1, max_usize, cnt);
1464		if (cnt == NULL)
1465			goto label_oom;
1466		/*
1467		 * Use minimum usize to determine whether promotion may happen.
1468		 */
1469		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1470		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1471		    <= SMALL_MAXCLASS) {
1472			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1473			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1474			    alignment, zero, no_move);
1475			if (q == NULL)
1476				goto label_err;
1477			if (max_usize < PAGE) {
1478				usize = max_usize;
1479				arena_prof_promoted(q, usize);
1480			} else
1481				usize = isalloc(q, config_prof);
1482		} else {
1483			q = iralloc(p, size, extra, alignment, zero, no_move);
1484			if (q == NULL)
1485				goto label_err;
1486			usize = isalloc(q, config_prof);
1487		}
1488		prof_realloc(q, usize, cnt, old_size, old_ctx);
1489		if (rsize != NULL)
1490			*rsize = usize;
1491	} else {
1492		if (config_stats) {
1493			old_size = isalloc(p, false);
1494			if (config_valgrind && opt_valgrind)
1495				old_rzsize = u2rz(old_size);
1496		} else if (config_valgrind && opt_valgrind) {
1497			old_size = isalloc(p, false);
1498			old_rzsize = u2rz(old_size);
1499		}
1500		q = iralloc(p, size, extra, alignment, zero, no_move);
1501		if (q == NULL)
1502			goto label_err;
1503		if (config_stats)
1504			usize = isalloc(q, config_prof);
1505		if (rsize != NULL) {
1506			if (config_stats == false)
1507				usize = isalloc(q, config_prof);
1508			*rsize = usize;
1509		}
1510	}
1511
1512	*ptr = q;
1513	if (config_stats) {
1514		thread_allocated_t *ta;
1515		ta = thread_allocated_tsd_get();
1516		ta->allocated += usize;
1517		ta->deallocated += old_size;
1518	}
1519	UTRACE(p, size, q);
1520	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1521	return (ALLOCM_SUCCESS);
1522label_err:
1523	if (no_move) {
1524		UTRACE(p, size, q);
1525		return (ALLOCM_ERR_NOT_MOVED);
1526	}
1527label_oom:
1528	if (config_xmalloc && opt_xmalloc) {
1529		malloc_write("<jemalloc>: Error in rallocm(): "
1530		    "out of memory\n");
1531		abort();
1532	}
1533	UTRACE(p, size, 0);
1534	return (ALLOCM_ERR_OOM);
1535}
1536
1537JEMALLOC_ATTR(nonnull(1))
1538JEMALLOC_ATTR(visibility("default"))
1539int
1540je_sallocm(const void *ptr, size_t *rsize, int flags)
1541{
1542	size_t sz;
1543
1544	assert(malloc_initialized || IS_INITIALIZER);
1545
1546	if (config_ivsalloc)
1547		sz = ivsalloc(ptr, config_prof);
1548	else {
1549		assert(ptr != NULL);
1550		sz = isalloc(ptr, config_prof);
1551	}
1552	assert(rsize != NULL);
1553	*rsize = sz;
1554
1555	return (ALLOCM_SUCCESS);
1556}
1557
1558JEMALLOC_ATTR(nonnull(1))
1559JEMALLOC_ATTR(visibility("default"))
1560int
1561je_dallocm(void *ptr, int flags)
1562{
1563	size_t usize;
1564	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1565
1566	assert(ptr != NULL);
1567	assert(malloc_initialized || IS_INITIALIZER);
1568
1569	UTRACE(ptr, 0, 0);
1570	if (config_stats || config_valgrind)
1571		usize = isalloc(ptr, config_prof);
1572	if (config_prof && opt_prof) {
1573		if (config_stats == false && config_valgrind == false)
1574			usize = isalloc(ptr, config_prof);
1575		prof_free(ptr, usize);
1576	}
1577	if (config_stats)
1578		thread_allocated_tsd_get()->deallocated += usize;
1579	if (config_valgrind && opt_valgrind)
1580		rzsize = p2rz(ptr);
1581	iqalloc(ptr);
1582	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1583
1584	return (ALLOCM_SUCCESS);
1585}
1586
1587JEMALLOC_ATTR(visibility("default"))
1588int
1589je_nallocm(size_t *rsize, size_t size, int flags)
1590{
1591	size_t usize;
1592	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1593	    & (SIZE_T_MAX-1));
1594
1595	assert(size != 0);
1596
1597	if (malloc_init())
1598		return (ALLOCM_ERR_OOM);
1599
1600	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1601	if (usize == 0)
1602		return (ALLOCM_ERR_OOM);
1603
1604	if (rsize != NULL)
1605		*rsize = usize;
1606	return (ALLOCM_SUCCESS);
1607}
1608
1609#endif
1610/*
1611 * End experimental functions.
1612 */
1613/******************************************************************************/
1614/*
1615 * The following functions are used by threading libraries for protection of
1616 * malloc during fork().
1617 */
1618
1619#ifndef JEMALLOC_MUTEX_INIT_CB
1620void
1621jemalloc_prefork(void)
1622#else
1623void
1624_malloc_prefork(void)
1625#endif
1626{
1627	unsigned i;
1628
1629	/* Acquire all mutexes in a safe order. */
1630	malloc_mutex_prefork(&arenas_lock);
1631	for (i = 0; i < narenas; i++) {
1632		if (arenas[i] != NULL)
1633			arena_prefork(arenas[i]);
1634	}
1635	base_prefork();
1636	huge_prefork();
1637	chunk_dss_prefork();
1638}
1639
1640#ifndef JEMALLOC_MUTEX_INIT_CB
1641void
1642jemalloc_postfork_parent(void)
1643#else
1644void
1645_malloc_postfork(void)
1646#endif
1647{
1648	unsigned i;
1649
1650	/* Release all mutexes, now that fork() has completed. */
1651	chunk_dss_postfork_parent();
1652	huge_postfork_parent();
1653	base_postfork_parent();
1654	for (i = 0; i < narenas; i++) {
1655		if (arenas[i] != NULL)
1656			arena_postfork_parent(arenas[i]);
1657	}
1658	malloc_mutex_postfork_parent(&arenas_lock);
1659}
1660
1661void
1662jemalloc_postfork_child(void)
1663{
1664	unsigned i;
1665
1666	/* Release all mutexes, now that fork() has completed. */
1667	chunk_dss_postfork_child();
1668	huge_postfork_child();
1669	base_postfork_child();
1670	for (i = 0; i < narenas; i++) {
1671		if (arenas[i] != NULL)
1672			arena_postfork_child(arenas[i]);
1673	}
1674	malloc_mutex_postfork_child(&arenas_lock);
1675}
1676
1677/******************************************************************************/
1678/*
1679 * The following functions are used for TLS allocation/deallocation in static
1680 * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1681 * is that these avoid accessing TLS variables.
1682 */
1683
1684static void *
1685a0alloc(size_t size, bool zero)
1686{
1687
1688	if (malloc_init())
1689		return (NULL);
1690
1691	if (size == 0)
1692		size = 1;
1693
1694	if (size <= arena_maxclass)
1695		return (arena_malloc(arenas[0], size, zero, false));
1696	else
1697		return (huge_malloc(size, zero));
1698}
1699
1700void *
1701a0malloc(size_t size)
1702{
1703
1704	return (a0alloc(size, false));
1705}
1706
1707void *
1708a0calloc(size_t num, size_t size)
1709{
1710
1711	return (a0alloc(num * size, true));
1712}
1713
1714void
1715a0free(void *ptr)
1716{
1717	arena_chunk_t *chunk;
1718
1719	if (ptr == NULL)
1720		return;
1721
1722	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1723	if (chunk != ptr)
1724		arena_dalloc(chunk->arena, chunk, ptr, false);
1725	else
1726		huge_dalloc(ptr, true);
1727}
1728
1729/******************************************************************************/
1730