jemalloc_internal.h.in revision 96d4120ac08db3f2d566e8e5c3bc134a24aa0afc
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/syscall.h>
4#if !defined(SYS_write) && defined(__NR_write)
5#define	SYS_write __NR_write
6#endif
7#include <sys/time.h>
8#include <sys/types.h>
9#include <sys/uio.h>
10
11#include <errno.h>
12#include <limits.h>
13#ifndef SIZE_T_MAX
14#  define SIZE_T_MAX	SIZE_MAX
15#endif
16#include <pthread.h>
17#include <sched.h>
18#include <stdarg.h>
19#include <stdbool.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <stdint.h>
23#include <stddef.h>
24#ifndef offsetof
25#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
26#endif
27#include <inttypes.h>
28#include <string.h>
29#include <strings.h>
30#include <ctype.h>
31#include <unistd.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <math.h>
35
36#define	JEMALLOC_NO_DEMANGLE
37#include "../jemalloc@install_suffix@.h"
38
39#include "jemalloc/internal/private_namespace.h"
40
41#ifdef JEMALLOC_CC_SILENCE
42#define	UNUSED JEMALLOC_ATTR(unused)
43#else
44#define	UNUSED
45#endif
46
47static const bool config_debug =
48#ifdef JEMALLOC_DEBUG
49    true
50#else
51    false
52#endif
53    ;
54static const bool config_dss =
55#ifdef JEMALLOC_DSS
56    true
57#else
58    false
59#endif
60    ;
61static const bool config_dynamic_page_shift =
62#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
63    true
64#else
65    false
66#endif
67    ;
68static const bool config_fill =
69#ifdef JEMALLOC_FILL
70    true
71#else
72    false
73#endif
74    ;
75static const bool config_lazy_lock =
76#ifdef JEMALLOC_LAZY_LOCK
77    true
78#else
79    false
80#endif
81    ;
82static const bool config_prof =
83#ifdef JEMALLOC_PROF
84    true
85#else
86    false
87#endif
88    ;
89static const bool config_prof_libgcc =
90#ifdef JEMALLOC_PROF_LIBGCC
91    true
92#else
93    false
94#endif
95    ;
96static const bool config_prof_libunwind =
97#ifdef JEMALLOC_PROF_LIBUNWIND
98    true
99#else
100    false
101#endif
102    ;
103static const bool config_stats =
104#ifdef JEMALLOC_STATS
105    true
106#else
107    false
108#endif
109    ;
110static const bool config_tcache =
111#ifdef JEMALLOC_TCACHE
112    true
113#else
114    false
115#endif
116    ;
117static const bool config_tls =
118#ifdef JEMALLOC_TLS
119    true
120#else
121    false
122#endif
123    ;
124static const bool config_xmalloc =
125#ifdef JEMALLOC_XMALLOC
126    true
127#else
128    false
129#endif
130    ;
131static const bool config_ivsalloc =
132#ifdef JEMALLOC_IVSALLOC
133    true
134#else
135    false
136#endif
137    ;
138
139#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
140#include <libkern/OSAtomic.h>
141#endif
142
143#ifdef JEMALLOC_ZONE
144#include <mach/mach_error.h>
145#include <mach/mach_init.h>
146#include <mach/vm_map.h>
147#include <malloc/malloc.h>
148#endif
149
150#define	RB_COMPACT
151#include "jemalloc/internal/rb.h"
152#include "jemalloc/internal/qr.h"
153#include "jemalloc/internal/ql.h"
154
155/*
156 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
157 * but there are circular dependencies that cannot be broken without
158 * substantial performance degradation.  In order to reduce the effect on
159 * visual code flow, read the header files in multiple passes, with one of the
160 * following cpp variables defined during each pass:
161 *
162 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
163 *                        types.
164 *   JEMALLOC_H_STRUCTS : Data structures.
165 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
166 *   JEMALLOC_H_INLINES : Inline functions.
167 */
168/******************************************************************************/
169#define JEMALLOC_H_TYPES
170
171#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
172
173#define	ZU(z)	((size_t)z)
174
175#ifndef __DECONST
176#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
177#endif
178
179#ifdef JEMALLOC_DEBUG
180   /* Disable inlining to make debugging easier. */
181#  define JEMALLOC_INLINE
182#  define inline
183#else
184#  define JEMALLOC_ENABLE_INLINE
185#  define JEMALLOC_INLINE static inline
186#endif
187
188/* Smallest size class to support. */
189#define	LG_TINY_MIN		3
190#define	TINY_MIN		(1U << LG_TINY_MIN)
191
192/*
193 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
194 * classes).
195 */
196#ifndef LG_QUANTUM
197#  ifdef __i386__
198#    define LG_QUANTUM		4
199#  endif
200#  ifdef __ia64__
201#    define LG_QUANTUM		4
202#  endif
203#  ifdef __alpha__
204#    define LG_QUANTUM		4
205#  endif
206#  ifdef __sparc64__
207#    define LG_QUANTUM		4
208#  endif
209#  if (defined(__amd64__) || defined(__x86_64__))
210#    define LG_QUANTUM		4
211#  endif
212#  ifdef __arm__
213#    define LG_QUANTUM		3
214#  endif
215#  ifdef __mips__
216#    define LG_QUANTUM		3
217#  endif
218#  ifdef __powerpc__
219#    define LG_QUANTUM		4
220#  endif
221#  ifdef __s390x__
222#    define LG_QUANTUM		4
223#  endif
224#  ifdef __SH4__
225#    define LG_QUANTUM		4
226#  endif
227#  ifdef __tile__
228#    define LG_QUANTUM		4
229#  endif
230#  ifndef LG_QUANTUM
231#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
232#  endif
233#endif
234
235#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
236#define	QUANTUM_MASK		(QUANTUM - 1)
237
238/* Return the smallest quantum multiple that is >= a. */
239#define	QUANTUM_CEILING(a)						\
240	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
241
242#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
243#define	LONG_MASK		(LONG - 1)
244
245/* Return the smallest long multiple that is >= a. */
246#define	LONG_CEILING(a)							\
247	(((a) + LONG_MASK) & ~LONG_MASK)
248
249#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
250#define	PTR_MASK		(SIZEOF_PTR - 1)
251
252/* Return the smallest (void *) multiple that is >= a. */
253#define	PTR_CEILING(a)							\
254	(((a) + PTR_MASK) & ~PTR_MASK)
255
256/*
257 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
258 * In addition, this controls the spacing of cacheline-spaced size classes.
259 */
260#define	LG_CACHELINE		6
261#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
262#define	CACHELINE_MASK		(CACHELINE - 1)
263
264/* Return the smallest cacheline multiple that is >= s. */
265#define	CACHELINE_CEILING(s)						\
266	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
267
268/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
269#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
270#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
271#ifdef PAGE_SHIFT
272#  undef PAGE_SHIFT
273#endif
274#ifdef PAGE_SIZE
275#  undef PAGE_SIZE
276#endif
277#ifdef PAGE_MASK
278#  undef PAGE_MASK
279#endif
280#define	PAGE_SHIFT	STATIC_PAGE_SHIFT
281#define	PAGE_SIZE	STATIC_PAGE_SIZE
282#define	PAGE_MASK	STATIC_PAGE_MASK
283
284/* Return the smallest pagesize multiple that is >= s. */
285#define	PAGE_CEILING(s)							\
286	(((s) + PAGE_MASK) & ~PAGE_MASK)
287
288#include "jemalloc/internal/util.h"
289#include "jemalloc/internal/atomic.h"
290#include "jemalloc/internal/prng.h"
291#include "jemalloc/internal/ckh.h"
292#include "jemalloc/internal/size_classes.h"
293#include "jemalloc/internal/stats.h"
294#include "jemalloc/internal/ctl.h"
295#include "jemalloc/internal/mutex.h"
296#include "jemalloc/internal/tsd.h"
297#include "jemalloc/internal/mb.h"
298#include "jemalloc/internal/extent.h"
299#include "jemalloc/internal/arena.h"
300#include "jemalloc/internal/bitmap.h"
301#include "jemalloc/internal/base.h"
302#include "jemalloc/internal/chunk.h"
303#include "jemalloc/internal/huge.h"
304#include "jemalloc/internal/rtree.h"
305#include "jemalloc/internal/tcache.h"
306#include "jemalloc/internal/hash.h"
307#include "jemalloc/internal/prof.h"
308
309#undef JEMALLOC_H_TYPES
310/******************************************************************************/
311#define JEMALLOC_H_STRUCTS
312
313#include "jemalloc/internal/util.h"
314#include "jemalloc/internal/atomic.h"
315#include "jemalloc/internal/prng.h"
316#include "jemalloc/internal/ckh.h"
317#include "jemalloc/internal/size_classes.h"
318#include "jemalloc/internal/stats.h"
319#include "jemalloc/internal/ctl.h"
320#include "jemalloc/internal/mutex.h"
321#include "jemalloc/internal/tsd.h"
322#include "jemalloc/internal/mb.h"
323#include "jemalloc/internal/bitmap.h"
324#include "jemalloc/internal/extent.h"
325#include "jemalloc/internal/arena.h"
326#include "jemalloc/internal/base.h"
327#include "jemalloc/internal/chunk.h"
328#include "jemalloc/internal/huge.h"
329#include "jemalloc/internal/rtree.h"
330#include "jemalloc/internal/tcache.h"
331#include "jemalloc/internal/hash.h"
332#include "jemalloc/internal/prof.h"
333
334typedef struct {
335	uint64_t	allocated;
336	uint64_t	deallocated;
337} thread_allocated_t;
338/*
339 * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
340 * argument.
341 */
342#define	THREAD_ALLOCATED_INITIALIZER	JEMALLOC_CONCAT({0, 0})
343
344#undef JEMALLOC_H_STRUCTS
345/******************************************************************************/
346#define JEMALLOC_H_EXTERNS
347
348extern bool	opt_abort;
349extern bool	opt_junk;
350extern bool	opt_xmalloc;
351extern bool	opt_zero;
352extern size_t	opt_narenas;
353
354#ifdef DYNAMIC_PAGE_SHIFT
355extern size_t		pagesize;
356extern size_t		pagesize_mask;
357extern size_t		lg_pagesize;
358#endif
359
360/* Number of CPUs. */
361extern unsigned		ncpus;
362
363extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
364/*
365 * Arenas that are used to service external requests.  Not all elements of the
366 * arenas array are necessarily used; arenas are created lazily as needed.
367 */
368extern arena_t		**arenas;
369extern unsigned		narenas;
370
371arena_t	*arenas_extend(unsigned ind);
372void	arenas_cleanup(void *arg);
373arena_t	*choose_arena_hard(void);
374void	jemalloc_prefork(void);
375void	jemalloc_postfork_parent(void);
376void	jemalloc_postfork_child(void);
377
378#include "jemalloc/internal/util.h"
379#include "jemalloc/internal/atomic.h"
380#include "jemalloc/internal/prng.h"
381#include "jemalloc/internal/ckh.h"
382#include "jemalloc/internal/size_classes.h"
383#include "jemalloc/internal/stats.h"
384#include "jemalloc/internal/ctl.h"
385#include "jemalloc/internal/mutex.h"
386#include "jemalloc/internal/tsd.h"
387#include "jemalloc/internal/mb.h"
388#include "jemalloc/internal/bitmap.h"
389#include "jemalloc/internal/extent.h"
390#include "jemalloc/internal/arena.h"
391#include "jemalloc/internal/base.h"
392#include "jemalloc/internal/chunk.h"
393#include "jemalloc/internal/huge.h"
394#include "jemalloc/internal/rtree.h"
395#include "jemalloc/internal/tcache.h"
396#include "jemalloc/internal/hash.h"
397#include "jemalloc/internal/prof.h"
398
399#undef JEMALLOC_H_EXTERNS
400/******************************************************************************/
401#define JEMALLOC_H_INLINES
402
403#include "jemalloc/internal/util.h"
404#include "jemalloc/internal/atomic.h"
405#include "jemalloc/internal/prng.h"
406#include "jemalloc/internal/ckh.h"
407#include "jemalloc/internal/size_classes.h"
408#include "jemalloc/internal/stats.h"
409#include "jemalloc/internal/ctl.h"
410#include "jemalloc/internal/mutex.h"
411#include "jemalloc/internal/tsd.h"
412#include "jemalloc/internal/mb.h"
413#include "jemalloc/internal/extent.h"
414#include "jemalloc/internal/base.h"
415#include "jemalloc/internal/chunk.h"
416#include "jemalloc/internal/huge.h"
417
418#ifndef JEMALLOC_ENABLE_INLINE
419malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
420
421size_t	s2u(size_t size);
422size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
423arena_t	*choose_arena(void);
424#endif
425
426#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
427/*
428 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
429 * for allocations.
430 */
431malloc_tsd_externs(arenas, arena_t *)
432malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
433
434/*
435 * Compute usable size that would result from allocating an object with the
436 * specified size.
437 */
438JEMALLOC_INLINE size_t
439s2u(size_t size)
440{
441
442	if (size <= SMALL_MAXCLASS)
443		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
444	if (size <= arena_maxclass)
445		return (PAGE_CEILING(size));
446	return (CHUNK_CEILING(size));
447}
448
449/*
450 * Compute usable size that would result from allocating an object with the
451 * specified size and alignment.
452 */
453JEMALLOC_INLINE size_t
454sa2u(size_t size, size_t alignment, size_t *run_size_p)
455{
456	size_t usize;
457
458	/*
459	 * Round size up to the nearest multiple of alignment.
460	 *
461	 * This done, we can take advantage of the fact that for each small
462	 * size class, every object is aligned at the smallest power of two
463	 * that is non-zero in the base two representation of the size.  For
464	 * example:
465	 *
466	 *   Size |   Base 2 | Minimum alignment
467	 *   -----+----------+------------------
468	 *     96 |  1100000 |  32
469	 *    144 | 10100000 |  32
470	 *    192 | 11000000 |  64
471	 */
472	usize = (size + (alignment - 1)) & (-alignment);
473	/*
474	 * (usize < size) protects against the combination of maximal
475	 * alignment and size greater than maximal alignment.
476	 */
477	if (usize < size) {
478		/* size_t overflow. */
479		return (0);
480	}
481
482	if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
483		if (usize <= SMALL_MAXCLASS)
484			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
485		return (PAGE_CEILING(usize));
486	} else {
487		size_t run_size;
488
489		/*
490		 * We can't achieve subpage alignment, so round up alignment
491		 * permanently; it makes later calculations simpler.
492		 */
493		alignment = PAGE_CEILING(alignment);
494		usize = PAGE_CEILING(size);
495		/*
496		 * (usize < size) protects against very large sizes within
497		 * PAGE_SIZE of SIZE_T_MAX.
498		 *
499		 * (usize + alignment < usize) protects against the
500		 * combination of maximal alignment and usize large enough
501		 * to cause overflow.  This is similar to the first overflow
502		 * check above, but it needs to be repeated due to the new
503		 * usize value, which may now be *equal* to maximal
504		 * alignment, whereas before we only detected overflow if the
505		 * original size was *greater* than maximal alignment.
506		 */
507		if (usize < size || usize + alignment < usize) {
508			/* size_t overflow. */
509			return (0);
510		}
511
512		/*
513		 * Calculate the size of the over-size run that arena_palloc()
514		 * would need to allocate in order to guarantee the alignment.
515		 */
516		if (usize >= alignment)
517			run_size = usize + alignment - PAGE_SIZE;
518		else {
519			/*
520			 * It is possible that (alignment << 1) will cause
521			 * overflow, but it doesn't matter because we also
522			 * subtract PAGE_SIZE, which in the case of overflow
523			 * leaves us with a very large run_size.  That causes
524			 * the first conditional below to fail, which means
525			 * that the bogus run_size value never gets used for
526			 * anything important.
527			 */
528			run_size = (alignment << 1) - PAGE_SIZE;
529		}
530		if (run_size_p != NULL)
531			*run_size_p = run_size;
532
533		if (run_size <= arena_maxclass)
534			return (PAGE_CEILING(usize));
535		return (CHUNK_CEILING(usize));
536	}
537}
538
539/* Choose an arena based on a per-thread value. */
540JEMALLOC_INLINE arena_t *
541choose_arena(void)
542{
543	arena_t *ret;
544
545	if ((ret = *arenas_tsd_get()) == NULL) {
546		ret = choose_arena_hard();
547		assert(ret != NULL);
548	}
549
550	return (ret);
551}
552#endif
553
554#include "jemalloc/internal/bitmap.h"
555#include "jemalloc/internal/rtree.h"
556#include "jemalloc/internal/tcache.h"
557#include "jemalloc/internal/arena.h"
558#include "jemalloc/internal/hash.h"
559
560#ifndef JEMALLOC_ENABLE_INLINE
561void	*imalloc(size_t size);
562void	*icalloc(size_t size);
563void	*ipalloc(size_t usize, size_t alignment, bool zero);
564size_t	isalloc(const void *ptr);
565size_t	ivsalloc(const void *ptr);
566void	idalloc(void *ptr);
567void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
568    bool zero, bool no_move);
569malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
570#endif
571
572#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
573JEMALLOC_INLINE void *
574imalloc(size_t size)
575{
576
577	assert(size != 0);
578
579	if (size <= arena_maxclass)
580		return (arena_malloc(size, false));
581	else
582		return (huge_malloc(size, false));
583}
584
585JEMALLOC_INLINE void *
586icalloc(size_t size)
587{
588
589	if (size <= arena_maxclass)
590		return (arena_malloc(size, true));
591	else
592		return (huge_malloc(size, true));
593}
594
595JEMALLOC_INLINE void *
596ipalloc(size_t usize, size_t alignment, bool zero)
597{
598	void *ret;
599
600	assert(usize != 0);
601	assert(usize == sa2u(usize, alignment, NULL));
602
603	if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
604		ret = arena_malloc(usize, zero);
605	else {
606		size_t run_size JEMALLOC_CC_SILENCE_INIT(0);
607
608		/*
609		 * Ideally we would only ever call sa2u() once per aligned
610		 * allocation request, and the caller of this function has
611		 * already done so once.  However, it's rather burdensome to
612		 * require every caller to pass in run_size, especially given
613		 * that it's only relevant to large allocations.  Therefore,
614		 * just call it again here in order to get run_size.
615		 */
616		sa2u(usize, alignment, &run_size);
617		if (run_size <= arena_maxclass) {
618			ret = arena_palloc(choose_arena(), usize, run_size,
619			    alignment, zero);
620		} else if (alignment <= chunksize)
621			ret = huge_malloc(usize, zero);
622		else
623			ret = huge_palloc(usize, alignment, zero);
624	}
625
626	assert(((uintptr_t)ret & (alignment - 1)) == 0);
627	return (ret);
628}
629
630JEMALLOC_INLINE size_t
631isalloc(const void *ptr)
632{
633	size_t ret;
634	arena_chunk_t *chunk;
635
636	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
637	if (chunk != ptr) {
638		/* Region. */
639		if (config_prof)
640			ret = arena_salloc_demote(ptr);
641		else
642			ret = arena_salloc(ptr);
643	} else if (ptr != NULL)
644		ret = huge_salloc(ptr);
645	else
646		ret = 0;
647
648	return (ret);
649}
650
651JEMALLOC_INLINE size_t
652ivsalloc(const void *ptr)
653{
654
655	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
656	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
657		return (0);
658
659	return (isalloc(ptr));
660}
661
662JEMALLOC_INLINE void
663idalloc(void *ptr)
664{
665	arena_chunk_t *chunk;
666
667	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
668	if (chunk != ptr)
669		arena_dalloc(chunk->arena, chunk, ptr);
670	else if (ptr != NULL)
671		huge_dalloc(ptr, true);
672}
673
674JEMALLOC_INLINE void *
675iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
676    bool no_move)
677{
678	void *ret;
679	size_t oldsize;
680
681	assert(ptr != NULL);
682	assert(size != 0);
683
684	oldsize = isalloc(ptr);
685
686	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
687	    != 0) {
688		size_t usize, copysize;
689
690		/*
691		 * Existing object alignment is inadquate; allocate new space
692		 * and copy.
693		 */
694		if (no_move)
695			return (NULL);
696		usize = sa2u(size + extra, alignment, NULL);
697		if (usize == 0)
698			return (NULL);
699		ret = ipalloc(usize, alignment, zero);
700		if (ret == NULL) {
701			if (extra == 0)
702				return (NULL);
703			/* Try again, without extra this time. */
704			usize = sa2u(size, alignment, NULL);
705			if (usize == 0)
706				return (NULL);
707			ret = ipalloc(usize, alignment, zero);
708			if (ret == NULL)
709				return (NULL);
710		}
711		/*
712		 * Copy at most size bytes (not size+extra), since the caller
713		 * has no expectation that the extra bytes will be reliably
714		 * preserved.
715		 */
716		copysize = (size < oldsize) ? size : oldsize;
717		memcpy(ret, ptr, copysize);
718		idalloc(ptr);
719		return (ret);
720	}
721
722	if (no_move) {
723		if (size <= arena_maxclass) {
724			return (arena_ralloc_no_move(ptr, oldsize, size,
725			    extra, zero));
726		} else {
727			return (huge_ralloc_no_move(ptr, oldsize, size,
728			    extra));
729		}
730	} else {
731		if (size + extra <= arena_maxclass) {
732			return (arena_ralloc(ptr, oldsize, size, extra,
733			    alignment, zero));
734		} else {
735			return (huge_ralloc(ptr, oldsize, size, extra,
736			    alignment, zero));
737		}
738	}
739}
740
741malloc_tsd_externs(thread_allocated, thread_allocated_t)
742malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
743    THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
744#endif
745
746#include "jemalloc/internal/prof.h"
747
748#undef JEMALLOC_H_INLINES
749/******************************************************************************/
750