jemalloc_internal.h.in revision cd9a1346e96f71bdecdc654ea50fc62d76371e74
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/uio.h>
6
7#include <errno.h>
8#include <limits.h>
9#ifndef SIZE_T_MAX
10#  define SIZE_T_MAX	SIZE_MAX
11#endif
12#include <pthread.h>
13#include <sched.h>
14#include <stdarg.h>
15#include <stdbool.h>
16#include <stdio.h>
17#include <stdlib.h>
18#include <stdint.h>
19#include <stddef.h>
20#ifndef offsetof
21#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
22#endif
23#include <inttypes.h>
24#include <string.h>
25#include <strings.h>
26#include <ctype.h>
27#include <unistd.h>
28#include <fcntl.h>
29#include <pthread.h>
30#include <math.h>
31
32#define	JEMALLOC_NO_DEMANGLE
33#include "../jemalloc@install_suffix@.h"
34
35#include "jemalloc/internal/private_namespace.h"
36
37#ifdef JEMALLOC_CC_SILENCE
38#define	UNUSED JEMALLOC_ATTR(unused)
39#else
40#define	UNUSED
41#endif
42
43static const bool config_debug =
44#ifdef JEMALLOC_DEBUG
45    true
46#else
47    false
48#endif
49    ;
50static const bool config_dss =
51#ifdef JEMALLOC_DSS
52    true
53#else
54    false
55#endif
56    ;
57static const bool config_dynamic_page_shift =
58#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
59    true
60#else
61    false
62#endif
63    ;
64static const bool config_fill =
65#ifdef JEMALLOC_FILL
66    true
67#else
68    false
69#endif
70    ;
71static const bool config_lazy_lock =
72#ifdef JEMALLOC_LAZY_LOCK
73    true
74#else
75    false
76#endif
77    ;
78static const bool config_prof =
79#ifdef JEMALLOC_PROF
80    true
81#else
82    false
83#endif
84    ;
85static const bool config_prof_libgcc =
86#ifdef JEMALLOC_PROF_LIBGCC
87    true
88#else
89    false
90#endif
91    ;
92static const bool config_prof_libunwind =
93#ifdef JEMALLOC_PROF_LIBUNWIND
94    true
95#else
96    false
97#endif
98    ;
99static const bool config_stats =
100#ifdef JEMALLOC_STATS
101    true
102#else
103    false
104#endif
105    ;
106static const bool config_tcache =
107#ifdef JEMALLOC_TCACHE
108    true
109#else
110    false
111#endif
112    ;
113static const bool config_tls =
114#ifdef JEMALLOC_TLS
115    true
116#else
117    false
118#endif
119    ;
120static const bool config_xmalloc =
121#ifdef JEMALLOC_XMALLOC
122    true
123#else
124    false
125#endif
126    ;
127static const bool config_ivsalloc =
128#ifdef JEMALLOC_IVSALLOC
129    true
130#else
131    false
132#endif
133    ;
134
135#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
136#include <libkern/OSAtomic.h>
137#endif
138
139#ifdef JEMALLOC_ZONE
140#include <mach/mach_error.h>
141#include <mach/mach_init.h>
142#include <mach/vm_map.h>
143#include <malloc/malloc.h>
144#endif
145
146#define	RB_COMPACT
147#include "jemalloc/internal/rb.h"
148#include "jemalloc/internal/qr.h"
149#include "jemalloc/internal/ql.h"
150
151/*
152 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
153 * but there are circular dependencies that cannot be broken without
154 * substantial performance degradation.  In order to reduce the effect on
155 * visual code flow, read the header files in multiple passes, with one of the
156 * following cpp variables defined during each pass:
157 *
158 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
159 *                        types.
160 *   JEMALLOC_H_STRUCTS : Data structures.
161 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
162 *   JEMALLOC_H_INLINES : Inline functions.
163 */
164/******************************************************************************/
165#define JEMALLOC_H_TYPES
166
167#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
168
169#define	ZU(z)	((size_t)z)
170
171#ifndef __DECONST
172#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
173#endif
174
175#ifdef JEMALLOC_DEBUG
176   /* Disable inlining to make debugging easier. */
177#  define JEMALLOC_INLINE
178#  define inline
179#else
180#  define JEMALLOC_ENABLE_INLINE
181#  define JEMALLOC_INLINE static inline
182#endif
183
184/* Smallest size class to support. */
185#define	LG_TINY_MIN		3
186#define	TINY_MIN		(1U << LG_TINY_MIN)
187
188/*
189 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
190 * classes).
191 */
192#ifndef LG_QUANTUM
193#  ifdef __i386__
194#    define LG_QUANTUM		4
195#  endif
196#  ifdef __ia64__
197#    define LG_QUANTUM		4
198#  endif
199#  ifdef __alpha__
200#    define LG_QUANTUM		4
201#  endif
202#  ifdef __sparc64__
203#    define LG_QUANTUM		4
204#  endif
205#  if (defined(__amd64__) || defined(__x86_64__))
206#    define LG_QUANTUM		4
207#  endif
208#  ifdef __arm__
209#    define LG_QUANTUM		3
210#  endif
211#  ifdef __mips__
212#    define LG_QUANTUM		3
213#  endif
214#  ifdef __powerpc__
215#    define LG_QUANTUM		4
216#  endif
217#  ifdef __s390x__
218#    define LG_QUANTUM		4
219#  endif
220#  ifdef __SH4__
221#    define LG_QUANTUM		4
222#  endif
223#  ifdef __tile__
224#    define LG_QUANTUM		4
225#  endif
226#  ifndef LG_QUANTUM
227#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
228#  endif
229#endif
230
231#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
232#define	QUANTUM_MASK		(QUANTUM - 1)
233
234/* Return the smallest quantum multiple that is >= a. */
235#define	QUANTUM_CEILING(a)						\
236	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
237
238#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
239#define	LONG_MASK		(LONG - 1)
240
241/* Return the smallest long multiple that is >= a. */
242#define	LONG_CEILING(a)							\
243	(((a) + LONG_MASK) & ~LONG_MASK)
244
245#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
246#define	PTR_MASK		(SIZEOF_PTR - 1)
247
248/* Return the smallest (void *) multiple that is >= a. */
249#define	PTR_CEILING(a)							\
250	(((a) + PTR_MASK) & ~PTR_MASK)
251
252/*
253 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
254 * In addition, this controls the spacing of cacheline-spaced size classes.
255 */
256#define	LG_CACHELINE		6
257#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
258#define	CACHELINE_MASK		(CACHELINE - 1)
259
260/* Return the smallest cacheline multiple that is >= s. */
261#define	CACHELINE_CEILING(s)						\
262	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
263
264/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
265#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
266#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
267#ifdef PAGE_SHIFT
268#  undef PAGE_SHIFT
269#endif
270#ifdef PAGE_SIZE
271#  undef PAGE_SIZE
272#endif
273#ifdef PAGE_MASK
274#  undef PAGE_MASK
275#endif
276#define	PAGE_SHIFT	STATIC_PAGE_SHIFT
277#define	PAGE_SIZE	STATIC_PAGE_SIZE
278#define	PAGE_MASK	STATIC_PAGE_MASK
279
280/* Return the smallest pagesize multiple that is >= s. */
281#define	PAGE_CEILING(s)							\
282	(((s) + PAGE_MASK) & ~PAGE_MASK)
283
284#include "jemalloc/internal/util.h"
285#include "jemalloc/internal/atomic.h"
286#include "jemalloc/internal/prng.h"
287#include "jemalloc/internal/ckh.h"
288#include "jemalloc/internal/size_classes.h"
289#include "jemalloc/internal/stats.h"
290#include "jemalloc/internal/ctl.h"
291#include "jemalloc/internal/mutex.h"
292#include "jemalloc/internal/tsd.h"
293#include "jemalloc/internal/mb.h"
294#include "jemalloc/internal/extent.h"
295#include "jemalloc/internal/arena.h"
296#include "jemalloc/internal/bitmap.h"
297#include "jemalloc/internal/base.h"
298#include "jemalloc/internal/chunk.h"
299#include "jemalloc/internal/huge.h"
300#include "jemalloc/internal/rtree.h"
301#include "jemalloc/internal/tcache.h"
302#include "jemalloc/internal/hash.h"
303#ifdef JEMALLOC_ZONE
304#include "jemalloc/internal/zone.h"
305#endif
306#include "jemalloc/internal/prof.h"
307
308#undef JEMALLOC_H_TYPES
309/******************************************************************************/
310#define JEMALLOC_H_STRUCTS
311
312#include "jemalloc/internal/util.h"
313#include "jemalloc/internal/atomic.h"
314#include "jemalloc/internal/prng.h"
315#include "jemalloc/internal/ckh.h"
316#include "jemalloc/internal/size_classes.h"
317#include "jemalloc/internal/stats.h"
318#include "jemalloc/internal/ctl.h"
319#include "jemalloc/internal/mutex.h"
320#include "jemalloc/internal/tsd.h"
321#include "jemalloc/internal/mb.h"
322#include "jemalloc/internal/bitmap.h"
323#include "jemalloc/internal/extent.h"
324#include "jemalloc/internal/arena.h"
325#include "jemalloc/internal/base.h"
326#include "jemalloc/internal/chunk.h"
327#include "jemalloc/internal/huge.h"
328#include "jemalloc/internal/rtree.h"
329#include "jemalloc/internal/tcache.h"
330#include "jemalloc/internal/hash.h"
331#ifdef JEMALLOC_ZONE
332#include "jemalloc/internal/zone.h"
333#endif
334#include "jemalloc/internal/prof.h"
335
336typedef struct {
337	uint64_t	allocated;
338	uint64_t	deallocated;
339} thread_allocated_t;
340/*
341 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
342 * argument.
343 */
344#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
345
346#undef JEMALLOC_H_STRUCTS
347/******************************************************************************/
348#define JEMALLOC_H_EXTERNS
349
350extern bool	opt_abort;
351extern bool	opt_junk;
352extern bool	opt_xmalloc;
353extern bool	opt_zero;
354extern size_t	opt_narenas;
355
356#ifdef DYNAMIC_PAGE_SHIFT
357extern size_t		pagesize;
358extern size_t		pagesize_mask;
359extern size_t		lg_pagesize;
360#endif
361
362/* Number of CPUs. */
363extern unsigned		ncpus;
364
365extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
366/*
367 * Arenas that are used to service external requests.  Not all elements of the
368 * arenas array are necessarily used; arenas are created lazily as needed.
369 */
370extern arena_t		**arenas;
371extern unsigned		narenas;
372
373arena_t	*arenas_extend(unsigned ind);
374void	arenas_cleanup(void *arg);
375arena_t	*choose_arena_hard(void);
376void	jemalloc_prefork(void);
377void	jemalloc_postfork_parent(void);
378void	jemalloc_postfork_child(void);
379
380#include "jemalloc/internal/util.h"
381#include "jemalloc/internal/atomic.h"
382#include "jemalloc/internal/prng.h"
383#include "jemalloc/internal/ckh.h"
384#include "jemalloc/internal/size_classes.h"
385#include "jemalloc/internal/stats.h"
386#include "jemalloc/internal/ctl.h"
387#include "jemalloc/internal/mutex.h"
388#include "jemalloc/internal/tsd.h"
389#include "jemalloc/internal/mb.h"
390#include "jemalloc/internal/bitmap.h"
391#include "jemalloc/internal/extent.h"
392#include "jemalloc/internal/arena.h"
393#include "jemalloc/internal/base.h"
394#include "jemalloc/internal/chunk.h"
395#include "jemalloc/internal/huge.h"
396#include "jemalloc/internal/rtree.h"
397#include "jemalloc/internal/tcache.h"
398#include "jemalloc/internal/hash.h"
399#ifdef JEMALLOC_ZONE
400#include "jemalloc/internal/zone.h"
401#endif
402#include "jemalloc/internal/prof.h"
403
404#undef JEMALLOC_H_EXTERNS
405/******************************************************************************/
406#define JEMALLOC_H_INLINES
407
408#include "jemalloc/internal/util.h"
409#include "jemalloc/internal/atomic.h"
410#include "jemalloc/internal/prng.h"
411#include "jemalloc/internal/ckh.h"
412#include "jemalloc/internal/size_classes.h"
413#include "jemalloc/internal/stats.h"
414#include "jemalloc/internal/ctl.h"
415#include "jemalloc/internal/mutex.h"
416#include "jemalloc/internal/tsd.h"
417#include "jemalloc/internal/mb.h"
418#include "jemalloc/internal/extent.h"
419#include "jemalloc/internal/base.h"
420#include "jemalloc/internal/chunk.h"
421#include "jemalloc/internal/huge.h"
422
423#ifndef JEMALLOC_ENABLE_INLINE
424malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
425
426size_t	s2u(size_t size);
427size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
428arena_t	*choose_arena(void);
429#endif
430
431#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
432/*
433 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
434 * for allocations.
435 */
436malloc_tsd_externs(arenas, arena_t *)
437malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
438
439/*
440 * Compute usable size that would result from allocating an object with the
441 * specified size.
442 */
443JEMALLOC_INLINE size_t
444s2u(size_t size)
445{
446
447	if (size <= SMALL_MAXCLASS)
448		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
449	if (size <= arena_maxclass)
450		return (PAGE_CEILING(size));
451	return (CHUNK_CEILING(size));
452}
453
454/*
455 * Compute usable size that would result from allocating an object with the
456 * specified size and alignment.
457 */
458JEMALLOC_INLINE size_t
459sa2u(size_t size, size_t alignment, size_t *run_size_p)
460{
461	size_t usize;
462
463	/*
464	 * Round size up to the nearest multiple of alignment.
465	 *
466	 * This done, we can take advantage of the fact that for each small
467	 * size class, every object is aligned at the smallest power of two
468	 * that is non-zero in the base two representation of the size.  For
469	 * example:
470	 *
471	 *   Size |   Base 2 | Minimum alignment
472	 *   -----+----------+------------------
473	 *     96 |  1100000 |  32
474	 *    144 | 10100000 |  32
475	 *    192 | 11000000 |  64
476	 */
477	usize = (size + (alignment - 1)) & (-alignment);
478	/*
479	 * (usize < size) protects against the combination of maximal
480	 * alignment and size greater than maximal alignment.
481	 */
482	if (usize < size) {
483		/* size_t overflow. */
484		return (0);
485	}
486
487	if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
488		if (usize <= SMALL_MAXCLASS)
489			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
490		return (PAGE_CEILING(usize));
491	} else {
492		size_t run_size;
493
494		/*
495		 * We can't achieve subpage alignment, so round up alignment
496		 * permanently; it makes later calculations simpler.
497		 */
498		alignment = PAGE_CEILING(alignment);
499		usize = PAGE_CEILING(size);
500		/*
501		 * (usize < size) protects against very large sizes within
502		 * PAGE_SIZE of SIZE_T_MAX.
503		 *
504		 * (usize + alignment < usize) protects against the
505		 * combination of maximal alignment and usize large enough
506		 * to cause overflow.  This is similar to the first overflow
507		 * check above, but it needs to be repeated due to the new
508		 * usize value, which may now be *equal* to maximal
509		 * alignment, whereas before we only detected overflow if the
510		 * original size was *greater* than maximal alignment.
511		 */
512		if (usize < size || usize + alignment < usize) {
513			/* size_t overflow. */
514			return (0);
515		}
516
517		/*
518		 * Calculate the size of the over-size run that arena_palloc()
519		 * would need to allocate in order to guarantee the alignment.
520		 */
521		if (usize >= alignment)
522			run_size = usize + alignment - PAGE_SIZE;
523		else {
524			/*
525			 * It is possible that (alignment << 1) will cause
526			 * overflow, but it doesn't matter because we also
527			 * subtract PAGE_SIZE, which in the case of overflow
528			 * leaves us with a very large run_size.  That causes
529			 * the first conditional below to fail, which means
530			 * that the bogus run_size value never gets used for
531			 * anything important.
532			 */
533			run_size = (alignment << 1) - PAGE_SIZE;
534		}
535		if (run_size_p != NULL)
536			*run_size_p = run_size;
537
538		if (run_size <= arena_maxclass)
539			return (PAGE_CEILING(usize));
540		return (CHUNK_CEILING(usize));
541	}
542}
543
544/* Choose an arena based on a per-thread value. */
545JEMALLOC_INLINE arena_t *
546choose_arena(void)
547{
548	arena_t *ret;
549
550	if ((ret = *arenas_tsd_get()) == NULL) {
551		ret = choose_arena_hard();
552		assert(ret != NULL);
553	}
554
555	return (ret);
556}
557#endif
558
559#include "jemalloc/internal/bitmap.h"
560#include "jemalloc/internal/rtree.h"
561#include "jemalloc/internal/tcache.h"
562#include "jemalloc/internal/arena.h"
563#include "jemalloc/internal/hash.h"
564#ifdef JEMALLOC_ZONE
565#include "jemalloc/internal/zone.h"
566#endif
567
568#ifndef JEMALLOC_ENABLE_INLINE
569void	*imalloc(size_t size);
570void	*icalloc(size_t size);
571void	*ipalloc(size_t usize, size_t alignment, bool zero);
572size_t	isalloc(const void *ptr);
573size_t	ivsalloc(const void *ptr);
574void	idalloc(void *ptr);
575void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
576    bool zero, bool no_move);
577malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
578#endif
579
580#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
581JEMALLOC_INLINE void *
582imalloc(size_t size)
583{
584
585	assert(size != 0);
586
587	if (size <= arena_maxclass)
588		return (arena_malloc(size, false));
589	else
590		return (huge_malloc(size, false));
591}
592
593JEMALLOC_INLINE void *
594icalloc(size_t size)
595{
596
597	if (size <= arena_maxclass)
598		return (arena_malloc(size, true));
599	else
600		return (huge_malloc(size, true));
601}
602
603JEMALLOC_INLINE void *
604ipalloc(size_t usize, size_t alignment, bool zero)
605{
606	void *ret;
607
608	assert(usize != 0);
609	assert(usize == sa2u(usize, alignment, NULL));
610
611	if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
612		ret = arena_malloc(usize, zero);
613	else {
614		size_t run_size
615#ifdef JEMALLOC_CC_SILENCE
616		    = 0
617#endif
618		    ;
619
620		/*
621		 * Ideally we would only ever call sa2u() once per aligned
622		 * allocation request, and the caller of this function has
623		 * already done so once.  However, it's rather burdensome to
624		 * require every caller to pass in run_size, especially given
625		 * that it's only relevant to large allocations.  Therefore,
626		 * just call it again here in order to get run_size.
627		 */
628		sa2u(usize, alignment, &run_size);
629		if (run_size <= arena_maxclass) {
630			ret = arena_palloc(choose_arena(), usize, run_size,
631			    alignment, zero);
632		} else if (alignment <= chunksize)
633			ret = huge_malloc(usize, zero);
634		else
635			ret = huge_palloc(usize, alignment, zero);
636	}
637
638	assert(((uintptr_t)ret & (alignment - 1)) == 0);
639	return (ret);
640}
641
642JEMALLOC_INLINE size_t
643isalloc(const void *ptr)
644{
645	size_t ret;
646	arena_chunk_t *chunk;
647
648	assert(ptr != NULL);
649
650	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
651	if (chunk != ptr) {
652		/* Region. */
653		if (config_prof)
654			ret = arena_salloc_demote(ptr);
655		else
656			ret = arena_salloc(ptr);
657	} else
658		ret = huge_salloc(ptr);
659
660	return (ret);
661}
662
663JEMALLOC_INLINE size_t
664ivsalloc(const void *ptr)
665{
666
667	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
668	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
669		return (0);
670
671	return (isalloc(ptr));
672}
673
674JEMALLOC_INLINE void
675idalloc(void *ptr)
676{
677	arena_chunk_t *chunk;
678
679	assert(ptr != NULL);
680
681	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
682	if (chunk != ptr)
683		arena_dalloc(chunk->arena, chunk, ptr);
684	else
685		huge_dalloc(ptr, true);
686}
687
688JEMALLOC_INLINE void *
689iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
690    bool no_move)
691{
692	void *ret;
693	size_t oldsize;
694
695	assert(ptr != NULL);
696	assert(size != 0);
697
698	oldsize = isalloc(ptr);
699
700	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
701	    != 0) {
702		size_t usize, copysize;
703
704		/*
705		 * Existing object alignment is inadquate; allocate new space
706		 * and copy.
707		 */
708		if (no_move)
709			return (NULL);
710		usize = sa2u(size + extra, alignment, NULL);
711		if (usize == 0)
712			return (NULL);
713		ret = ipalloc(usize, alignment, zero);
714		if (ret == NULL) {
715			if (extra == 0)
716				return (NULL);
717			/* Try again, without extra this time. */
718			usize = sa2u(size, alignment, NULL);
719			if (usize == 0)
720				return (NULL);
721			ret = ipalloc(usize, alignment, zero);
722			if (ret == NULL)
723				return (NULL);
724		}
725		/*
726		 * Copy at most size bytes (not size+extra), since the caller
727		 * has no expectation that the extra bytes will be reliably
728		 * preserved.
729		 */
730		copysize = (size < oldsize) ? size : oldsize;
731		memcpy(ret, ptr, copysize);
732		idalloc(ptr);
733		return (ret);
734	}
735
736	if (no_move) {
737		if (size <= arena_maxclass) {
738			return (arena_ralloc_no_move(ptr, oldsize, size,
739			    extra, zero));
740		} else {
741			return (huge_ralloc_no_move(ptr, oldsize, size,
742			    extra));
743		}
744	} else {
745		if (size + extra <= arena_maxclass) {
746			return (arena_ralloc(ptr, oldsize, size, extra,
747			    alignment, zero));
748		} else {
749			return (huge_ralloc(ptr, oldsize, size, extra,
750			    alignment, zero));
751		}
752	}
753}
754
755malloc_tsd_externs(thread_allocated, thread_allocated_t)
756malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
757    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
758#endif
759
760#include "jemalloc/internal/prof.h"
761
762#undef JEMALLOC_H_INLINES
763/******************************************************************************/
764