jemalloc_internal.h.in revision 41b6afb834b1f5250223678c52bd4f013d4234f6
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/syscall.h>
4#include <sys/time.h>
5#include <sys/types.h>
6#include <sys/uio.h>
7
8#include <errno.h>
9#include <limits.h>
10#ifndef SIZE_T_MAX
11#  define SIZE_T_MAX	SIZE_MAX
12#endif
13#include <pthread.h>
14#include <sched.h>
15#include <stdarg.h>
16#include <stdbool.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <stdint.h>
20#include <stddef.h>
21#ifndef offsetof
22#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
23#endif
24#include <inttypes.h>
25#include <string.h>
26#include <strings.h>
27#include <ctype.h>
28#include <unistd.h>
29#include <fcntl.h>
30#include <pthread.h>
31#include <math.h>
32
33#define	JEMALLOC_NO_DEMANGLE
34#include "../jemalloc@install_suffix@.h"
35
36#include "jemalloc/internal/private_namespace.h"
37
38#ifdef JEMALLOC_CC_SILENCE
39#define	UNUSED JEMALLOC_ATTR(unused)
40#else
41#define	UNUSED
42#endif
43
44static const bool config_debug =
45#ifdef JEMALLOC_DEBUG
46    true
47#else
48    false
49#endif
50    ;
51static const bool config_dss =
52#ifdef JEMALLOC_DSS
53    true
54#else
55    false
56#endif
57    ;
58static const bool config_dynamic_page_shift =
59#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
60    true
61#else
62    false
63#endif
64    ;
65static const bool config_fill =
66#ifdef JEMALLOC_FILL
67    true
68#else
69    false
70#endif
71    ;
72static const bool config_lazy_lock =
73#ifdef JEMALLOC_LAZY_LOCK
74    true
75#else
76    false
77#endif
78    ;
79static const bool config_prof =
80#ifdef JEMALLOC_PROF
81    true
82#else
83    false
84#endif
85    ;
86static const bool config_prof_libgcc =
87#ifdef JEMALLOC_PROF_LIBGCC
88    true
89#else
90    false
91#endif
92    ;
93static const bool config_prof_libunwind =
94#ifdef JEMALLOC_PROF_LIBUNWIND
95    true
96#else
97    false
98#endif
99    ;
100static const bool config_stats =
101#ifdef JEMALLOC_STATS
102    true
103#else
104    false
105#endif
106    ;
107static const bool config_tcache =
108#ifdef JEMALLOC_TCACHE
109    true
110#else
111    false
112#endif
113    ;
114static const bool config_tls =
115#ifdef JEMALLOC_TLS
116    true
117#else
118    false
119#endif
120    ;
121static const bool config_xmalloc =
122#ifdef JEMALLOC_XMALLOC
123    true
124#else
125    false
126#endif
127    ;
128static const bool config_ivsalloc =
129#ifdef JEMALLOC_IVSALLOC
130    true
131#else
132    false
133#endif
134    ;
135
136#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
137#include <libkern/OSAtomic.h>
138#endif
139
140#ifdef JEMALLOC_ZONE
141#include <mach/mach_error.h>
142#include <mach/mach_init.h>
143#include <mach/vm_map.h>
144#include <malloc/malloc.h>
145#endif
146
147#define	RB_COMPACT
148#include "jemalloc/internal/rb.h"
149#include "jemalloc/internal/qr.h"
150#include "jemalloc/internal/ql.h"
151
152/*
153 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
154 * but there are circular dependencies that cannot be broken without
155 * substantial performance degradation.  In order to reduce the effect on
156 * visual code flow, read the header files in multiple passes, with one of the
157 * following cpp variables defined during each pass:
158 *
159 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
160 *                        types.
161 *   JEMALLOC_H_STRUCTS : Data structures.
162 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
163 *   JEMALLOC_H_INLINES : Inline functions.
164 */
165/******************************************************************************/
166#define JEMALLOC_H_TYPES
167
168#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
169
170#define	ZU(z)	((size_t)z)
171
172#ifndef __DECONST
173#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
174#endif
175
176#ifdef JEMALLOC_DEBUG
177   /* Disable inlining to make debugging easier. */
178#  define JEMALLOC_INLINE
179#  define inline
180#else
181#  define JEMALLOC_ENABLE_INLINE
182#  define JEMALLOC_INLINE static inline
183#endif
184
185/* Smallest size class to support. */
186#define	LG_TINY_MIN		3
187#define	TINY_MIN		(1U << LG_TINY_MIN)
188
189/*
190 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
191 * classes).
192 */
193#ifndef LG_QUANTUM
194#  ifdef __i386__
195#    define LG_QUANTUM		4
196#  endif
197#  ifdef __ia64__
198#    define LG_QUANTUM		4
199#  endif
200#  ifdef __alpha__
201#    define LG_QUANTUM		4
202#  endif
203#  ifdef __sparc64__
204#    define LG_QUANTUM		4
205#  endif
206#  if (defined(__amd64__) || defined(__x86_64__))
207#    define LG_QUANTUM		4
208#  endif
209#  ifdef __arm__
210#    define LG_QUANTUM		3
211#  endif
212#  ifdef __mips__
213#    define LG_QUANTUM		3
214#  endif
215#  ifdef __powerpc__
216#    define LG_QUANTUM		4
217#  endif
218#  ifdef __s390x__
219#    define LG_QUANTUM		4
220#  endif
221#  ifdef __SH4__
222#    define LG_QUANTUM		4
223#  endif
224#  ifdef __tile__
225#    define LG_QUANTUM		4
226#  endif
227#  ifndef LG_QUANTUM
228#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
229#  endif
230#endif
231
232#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
233#define	QUANTUM_MASK		(QUANTUM - 1)
234
235/* Return the smallest quantum multiple that is >= a. */
236#define	QUANTUM_CEILING(a)						\
237	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
238
239#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
240#define	LONG_MASK		(LONG - 1)
241
242/* Return the smallest long multiple that is >= a. */
243#define	LONG_CEILING(a)							\
244	(((a) + LONG_MASK) & ~LONG_MASK)
245
246#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
247#define	PTR_MASK		(SIZEOF_PTR - 1)
248
249/* Return the smallest (void *) multiple that is >= a. */
250#define	PTR_CEILING(a)							\
251	(((a) + PTR_MASK) & ~PTR_MASK)
252
253/*
254 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
255 * In addition, this controls the spacing of cacheline-spaced size classes.
256 */
257#define	LG_CACHELINE		6
258#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
259#define	CACHELINE_MASK		(CACHELINE - 1)
260
261/* Return the smallest cacheline multiple that is >= s. */
262#define	CACHELINE_CEILING(s)						\
263	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
264
265/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
266#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
267#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
268#ifdef PAGE_SHIFT
269#  undef PAGE_SHIFT
270#endif
271#ifdef PAGE_SIZE
272#  undef PAGE_SIZE
273#endif
274#ifdef PAGE_MASK
275#  undef PAGE_MASK
276#endif
277#define	PAGE_SHIFT	STATIC_PAGE_SHIFT
278#define	PAGE_SIZE	STATIC_PAGE_SIZE
279#define	PAGE_MASK	STATIC_PAGE_MASK
280
281/* Return the smallest pagesize multiple that is >= s. */
282#define	PAGE_CEILING(s)							\
283	(((s) + PAGE_MASK) & ~PAGE_MASK)
284
285#include "jemalloc/internal/util.h"
286#include "jemalloc/internal/atomic.h"
287#include "jemalloc/internal/prng.h"
288#include "jemalloc/internal/ckh.h"
289#include "jemalloc/internal/size_classes.h"
290#include "jemalloc/internal/stats.h"
291#include "jemalloc/internal/ctl.h"
292#include "jemalloc/internal/mutex.h"
293#include "jemalloc/internal/tsd.h"
294#include "jemalloc/internal/mb.h"
295#include "jemalloc/internal/extent.h"
296#include "jemalloc/internal/arena.h"
297#include "jemalloc/internal/bitmap.h"
298#include "jemalloc/internal/base.h"
299#include "jemalloc/internal/chunk.h"
300#include "jemalloc/internal/huge.h"
301#include "jemalloc/internal/rtree.h"
302#include "jemalloc/internal/tcache.h"
303#include "jemalloc/internal/hash.h"
304#ifdef JEMALLOC_ZONE
305#include "jemalloc/internal/zone.h"
306#endif
307#include "jemalloc/internal/prof.h"
308
309#undef JEMALLOC_H_TYPES
310/******************************************************************************/
311#define JEMALLOC_H_STRUCTS
312
313#include "jemalloc/internal/util.h"
314#include "jemalloc/internal/atomic.h"
315#include "jemalloc/internal/prng.h"
316#include "jemalloc/internal/ckh.h"
317#include "jemalloc/internal/size_classes.h"
318#include "jemalloc/internal/stats.h"
319#include "jemalloc/internal/ctl.h"
320#include "jemalloc/internal/mutex.h"
321#include "jemalloc/internal/tsd.h"
322#include "jemalloc/internal/mb.h"
323#include "jemalloc/internal/bitmap.h"
324#include "jemalloc/internal/extent.h"
325#include "jemalloc/internal/arena.h"
326#include "jemalloc/internal/base.h"
327#include "jemalloc/internal/chunk.h"
328#include "jemalloc/internal/huge.h"
329#include "jemalloc/internal/rtree.h"
330#include "jemalloc/internal/tcache.h"
331#include "jemalloc/internal/hash.h"
332#ifdef JEMALLOC_ZONE
333#include "jemalloc/internal/zone.h"
334#endif
335#include "jemalloc/internal/prof.h"
336
337typedef struct {
338	uint64_t	allocated;
339	uint64_t	deallocated;
340} thread_allocated_t;
341/*
342 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
343 * argument.
344 */
345#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
346
347#undef JEMALLOC_H_STRUCTS
348/******************************************************************************/
349#define JEMALLOC_H_EXTERNS
350
351extern bool	opt_abort;
352extern bool	opt_junk;
353extern bool	opt_xmalloc;
354extern bool	opt_zero;
355extern size_t	opt_narenas;
356
357#ifdef DYNAMIC_PAGE_SHIFT
358extern size_t		pagesize;
359extern size_t		pagesize_mask;
360extern size_t		lg_pagesize;
361#endif
362
363/* Number of CPUs. */
364extern unsigned		ncpus;
365
366extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
367/*
368 * Arenas that are used to service external requests.  Not all elements of the
369 * arenas array are necessarily used; arenas are created lazily as needed.
370 */
371extern arena_t		**arenas;
372extern unsigned		narenas;
373
374extern bool		malloc_initialized;
375
376arena_t	*arenas_extend(unsigned ind);
377void	arenas_cleanup(void *arg);
378arena_t	*choose_arena_hard(void);
379void	jemalloc_prefork(void);
380void	jemalloc_postfork_parent(void);
381void	jemalloc_postfork_child(void);
382
383#include "jemalloc/internal/util.h"
384#include "jemalloc/internal/atomic.h"
385#include "jemalloc/internal/prng.h"
386#include "jemalloc/internal/ckh.h"
387#include "jemalloc/internal/size_classes.h"
388#include "jemalloc/internal/stats.h"
389#include "jemalloc/internal/ctl.h"
390#include "jemalloc/internal/mutex.h"
391#include "jemalloc/internal/tsd.h"
392#include "jemalloc/internal/mb.h"
393#include "jemalloc/internal/bitmap.h"
394#include "jemalloc/internal/extent.h"
395#include "jemalloc/internal/arena.h"
396#include "jemalloc/internal/base.h"
397#include "jemalloc/internal/chunk.h"
398#include "jemalloc/internal/huge.h"
399#include "jemalloc/internal/rtree.h"
400#include "jemalloc/internal/tcache.h"
401#include "jemalloc/internal/hash.h"
402#ifdef JEMALLOC_ZONE
403#include "jemalloc/internal/zone.h"
404#endif
405#include "jemalloc/internal/prof.h"
406
407#undef JEMALLOC_H_EXTERNS
408/******************************************************************************/
409#define JEMALLOC_H_INLINES
410
411#include "jemalloc/internal/util.h"
412#include "jemalloc/internal/atomic.h"
413#include "jemalloc/internal/prng.h"
414#include "jemalloc/internal/ckh.h"
415#include "jemalloc/internal/size_classes.h"
416#include "jemalloc/internal/stats.h"
417#include "jemalloc/internal/ctl.h"
418#include "jemalloc/internal/mutex.h"
419#include "jemalloc/internal/tsd.h"
420#include "jemalloc/internal/mb.h"
421#include "jemalloc/internal/extent.h"
422#include "jemalloc/internal/base.h"
423#include "jemalloc/internal/chunk.h"
424#include "jemalloc/internal/huge.h"
425
426#ifndef JEMALLOC_ENABLE_INLINE
427malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
428
429size_t	s2u(size_t size);
430size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
431arena_t	*choose_arena(void);
432#endif
433
434#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
435/*
436 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
437 * for allocations.
438 */
439malloc_tsd_externs(arenas, arena_t *)
440malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
441
442/*
443 * Compute usable size that would result from allocating an object with the
444 * specified size.
445 */
446JEMALLOC_INLINE size_t
447s2u(size_t size)
448{
449
450	if (size <= SMALL_MAXCLASS)
451		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
452	if (size <= arena_maxclass)
453		return (PAGE_CEILING(size));
454	return (CHUNK_CEILING(size));
455}
456
457/*
458 * Compute usable size that would result from allocating an object with the
459 * specified size and alignment.
460 */
461JEMALLOC_INLINE size_t
462sa2u(size_t size, size_t alignment, size_t *run_size_p)
463{
464	size_t usize;
465
466	/*
467	 * Round size up to the nearest multiple of alignment.
468	 *
469	 * This done, we can take advantage of the fact that for each small
470	 * size class, every object is aligned at the smallest power of two
471	 * that is non-zero in the base two representation of the size.  For
472	 * example:
473	 *
474	 *   Size |   Base 2 | Minimum alignment
475	 *   -----+----------+------------------
476	 *     96 |  1100000 |  32
477	 *    144 | 10100000 |  32
478	 *    192 | 11000000 |  64
479	 */
480	usize = (size + (alignment - 1)) & (-alignment);
481	/*
482	 * (usize < size) protects against the combination of maximal
483	 * alignment and size greater than maximal alignment.
484	 */
485	if (usize < size) {
486		/* size_t overflow. */
487		return (0);
488	}
489
490	if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
491		if (usize <= SMALL_MAXCLASS)
492			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
493		return (PAGE_CEILING(usize));
494	} else {
495		size_t run_size;
496
497		/*
498		 * We can't achieve subpage alignment, so round up alignment
499		 * permanently; it makes later calculations simpler.
500		 */
501		alignment = PAGE_CEILING(alignment);
502		usize = PAGE_CEILING(size);
503		/*
504		 * (usize < size) protects against very large sizes within
505		 * PAGE_SIZE of SIZE_T_MAX.
506		 *
507		 * (usize + alignment < usize) protects against the
508		 * combination of maximal alignment and usize large enough
509		 * to cause overflow.  This is similar to the first overflow
510		 * check above, but it needs to be repeated due to the new
511		 * usize value, which may now be *equal* to maximal
512		 * alignment, whereas before we only detected overflow if the
513		 * original size was *greater* than maximal alignment.
514		 */
515		if (usize < size || usize + alignment < usize) {
516			/* size_t overflow. */
517			return (0);
518		}
519
520		/*
521		 * Calculate the size of the over-size run that arena_palloc()
522		 * would need to allocate in order to guarantee the alignment.
523		 */
524		if (usize >= alignment)
525			run_size = usize + alignment - PAGE_SIZE;
526		else {
527			/*
528			 * It is possible that (alignment << 1) will cause
529			 * overflow, but it doesn't matter because we also
530			 * subtract PAGE_SIZE, which in the case of overflow
531			 * leaves us with a very large run_size.  That causes
532			 * the first conditional below to fail, which means
533			 * that the bogus run_size value never gets used for
534			 * anything important.
535			 */
536			run_size = (alignment << 1) - PAGE_SIZE;
537		}
538		if (run_size_p != NULL)
539			*run_size_p = run_size;
540
541		if (run_size <= arena_maxclass)
542			return (PAGE_CEILING(usize));
543		return (CHUNK_CEILING(usize));
544	}
545}
546
547/* Choose an arena based on a per-thread value. */
548JEMALLOC_INLINE arena_t *
549choose_arena(void)
550{
551	arena_t *ret;
552
553	if ((ret = *arenas_tsd_get()) == NULL) {
554		ret = choose_arena_hard();
555		assert(ret != NULL);
556	}
557
558	return (ret);
559}
560#endif
561
562#include "jemalloc/internal/bitmap.h"
563#include "jemalloc/internal/rtree.h"
564#include "jemalloc/internal/tcache.h"
565#include "jemalloc/internal/arena.h"
566#include "jemalloc/internal/hash.h"
567#ifdef JEMALLOC_ZONE
568#include "jemalloc/internal/zone.h"
569#endif
570
571#ifndef JEMALLOC_ENABLE_INLINE
572void	*imalloc(size_t size);
573void	*icalloc(size_t size);
574void	*ipalloc(size_t usize, size_t alignment, bool zero);
575size_t	isalloc(const void *ptr);
576size_t	ivsalloc(const void *ptr);
577void	idalloc(void *ptr);
578void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
579    bool zero, bool no_move);
580malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
581#endif
582
583#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
584JEMALLOC_INLINE void *
585imalloc(size_t size)
586{
587
588	assert(size != 0);
589
590	if (size <= arena_maxclass)
591		return (arena_malloc(size, false));
592	else
593		return (huge_malloc(size, false));
594}
595
596JEMALLOC_INLINE void *
597icalloc(size_t size)
598{
599
600	if (size <= arena_maxclass)
601		return (arena_malloc(size, true));
602	else
603		return (huge_malloc(size, true));
604}
605
606JEMALLOC_INLINE void *
607ipalloc(size_t usize, size_t alignment, bool zero)
608{
609	void *ret;
610
611	assert(usize != 0);
612	assert(usize == sa2u(usize, alignment, NULL));
613
614	if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
615		ret = arena_malloc(usize, zero);
616	else {
617		size_t run_size JEMALLOC_CC_SILENCE_INIT(0);
618
619		/*
620		 * Ideally we would only ever call sa2u() once per aligned
621		 * allocation request, and the caller of this function has
622		 * already done so once.  However, it's rather burdensome to
623		 * require every caller to pass in run_size, especially given
624		 * that it's only relevant to large allocations.  Therefore,
625		 * just call it again here in order to get run_size.
626		 */
627		sa2u(usize, alignment, &run_size);
628		if (run_size <= arena_maxclass) {
629			ret = arena_palloc(choose_arena(), usize, run_size,
630			    alignment, zero);
631		} else if (alignment <= chunksize)
632			ret = huge_malloc(usize, zero);
633		else
634			ret = huge_palloc(usize, alignment, zero);
635	}
636
637	assert(((uintptr_t)ret & (alignment - 1)) == 0);
638	return (ret);
639}
640
641JEMALLOC_INLINE size_t
642isalloc(const void *ptr)
643{
644	size_t ret;
645	arena_chunk_t *chunk;
646
647	assert(ptr != NULL);
648
649	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
650	if (chunk != ptr) {
651		/* Region. */
652		if (config_prof)
653			ret = arena_salloc_demote(ptr);
654		else
655			ret = arena_salloc(ptr);
656	} else
657		ret = huge_salloc(ptr);
658
659	return (ret);
660}
661
662JEMALLOC_INLINE size_t
663ivsalloc(const void *ptr)
664{
665
666	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
667	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
668		return (0);
669
670	return (isalloc(ptr));
671}
672
673JEMALLOC_INLINE void
674idalloc(void *ptr)
675{
676	arena_chunk_t *chunk;
677
678	assert(ptr != NULL);
679
680	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
681	if (chunk != ptr)
682		arena_dalloc(chunk->arena, chunk, ptr);
683	else
684		huge_dalloc(ptr, true);
685}
686
687JEMALLOC_INLINE void *
688iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
689    bool no_move)
690{
691	void *ret;
692	size_t oldsize;
693
694	assert(ptr != NULL);
695	assert(size != 0);
696
697	oldsize = isalloc(ptr);
698
699	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
700	    != 0) {
701		size_t usize, copysize;
702
703		/*
704		 * Existing object alignment is inadquate; allocate new space
705		 * and copy.
706		 */
707		if (no_move)
708			return (NULL);
709		usize = sa2u(size + extra, alignment, NULL);
710		if (usize == 0)
711			return (NULL);
712		ret = ipalloc(usize, alignment, zero);
713		if (ret == NULL) {
714			if (extra == 0)
715				return (NULL);
716			/* Try again, without extra this time. */
717			usize = sa2u(size, alignment, NULL);
718			if (usize == 0)
719				return (NULL);
720			ret = ipalloc(usize, alignment, zero);
721			if (ret == NULL)
722				return (NULL);
723		}
724		/*
725		 * Copy at most size bytes (not size+extra), since the caller
726		 * has no expectation that the extra bytes will be reliably
727		 * preserved.
728		 */
729		copysize = (size < oldsize) ? size : oldsize;
730		memcpy(ret, ptr, copysize);
731		idalloc(ptr);
732		return (ret);
733	}
734
735	if (no_move) {
736		if (size <= arena_maxclass) {
737			return (arena_ralloc_no_move(ptr, oldsize, size,
738			    extra, zero));
739		} else {
740			return (huge_ralloc_no_move(ptr, oldsize, size,
741			    extra));
742		}
743	} else {
744		if (size + extra <= arena_maxclass) {
745			return (arena_ralloc(ptr, oldsize, size, extra,
746			    alignment, zero));
747		} else {
748			return (huge_ralloc(ptr, oldsize, size, extra,
749			    alignment, zero));
750		}
751	}
752}
753
754malloc_tsd_externs(thread_allocated, thread_allocated_t)
755malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
756    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
757#endif
758
759#include "jemalloc/internal/prof.h"
760
761#undef JEMALLOC_H_INLINES
762/******************************************************************************/
763