jemalloc_internal.h.in revision 04ca1efe35349a6114523b37abbd4ca066cd17fa
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/sysctl.h>
6#include <sys/uio.h>
7
8#include <errno.h>
9#include <limits.h>
10#ifndef SIZE_T_MAX
11#  define SIZE_T_MAX	SIZE_MAX
12#endif
13#include <pthread.h>
14#include <sched.h>
15#include <stdarg.h>
16#include <stdbool.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <stdint.h>
20#include <stddef.h>
21#ifndef offsetof
22#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
23#endif
24#include <inttypes.h>
25#include <string.h>
26#include <strings.h>
27#include <ctype.h>
28#include <unistd.h>
29#include <fcntl.h>
30#include <pthread.h>
31#include <math.h>
32
33#define	JEMALLOC_MANGLE
34#include "../jemalloc@install_suffix@.h"
35
36#include "jemalloc/internal/private_namespace.h"
37
38#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
39#include <libkern/OSAtomic.h>
40#endif
41
42#ifdef JEMALLOC_ZONE
43#include <mach/mach_error.h>
44#include <mach/mach_init.h>
45#include <mach/vm_map.h>
46#include <malloc/malloc.h>
47#endif
48
49#ifdef JEMALLOC_LAZY_LOCK
50#include <dlfcn.h>
51#endif
52
53#define	RB_COMPACT
54#include "jemalloc/internal/rb.h"
55#include "jemalloc/internal/qr.h"
56#include "jemalloc/internal/ql.h"
57
58extern void	(*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
59
60/*
61 * Define a custom assert() in order to reduce the chances of deadlock during
62 * assertion failure.
63 */
64#ifndef assert
65#  ifdef JEMALLOC_DEBUG
66#    define assert(e) do {						\
67	if (!(e)) {							\
68		char line_buf[UMAX2S_BUFSIZE];				\
69		malloc_write("<jemalloc>: ");				\
70		malloc_write(__FILE__);					\
71		malloc_write(":");					\
72		malloc_write(u2s(__LINE__, 10, line_buf));		\
73		malloc_write(": Failed assertion: ");			\
74		malloc_write("\"");					\
75		malloc_write(#e);					\
76		malloc_write("\"\n");					\
77		abort();						\
78	}								\
79} while (0)
80#  else
81#    define assert(e)
82#  endif
83#endif
84
85#ifdef JEMALLOC_DEBUG
86#  define dassert(e) assert(e)
87#else
88#  define dassert(e)
89#endif
90
91/*
92 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
93 * but there are circular dependencies that cannot be broken without
94 * substantial performance degradation.  In order to reduce the effect on
95 * visual code flow, read the header files in multiple passes, with one of the
96 * following cpp variables defined during each pass:
97 *
98 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
99 *                        types.
100 *   JEMALLOC_H_STRUCTS : Data structures.
101 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
102 *   JEMALLOC_H_INLINES : Inline functions.
103 */
104/******************************************************************************/
105#define JEMALLOC_H_TYPES
106
107#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
108
109#define	ZU(z)	((size_t)z)
110
111#ifndef __DECONST
112#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
113#endif
114
115#ifdef JEMALLOC_DEBUG
116   /* Disable inlining to make debugging easier. */
117#  define JEMALLOC_INLINE
118#  define inline
119#else
120#  define JEMALLOC_ENABLE_INLINE
121#  define JEMALLOC_INLINE static inline
122#endif
123
124/* Size of stack-allocated buffer passed to buferror(). */
125#define	BUFERROR_BUF		64
126
127/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */
128#ifdef __i386__
129#  define LG_QUANTUM		4
130#endif
131#ifdef __ia64__
132#  define LG_QUANTUM		4
133#endif
134#ifdef __alpha__
135#  define LG_QUANTUM		4
136#endif
137#ifdef __sparc64__
138#  define LG_QUANTUM		4
139#endif
140#if (defined(__amd64__) || defined(__x86_64__))
141#  define LG_QUANTUM		4
142#endif
143#ifdef __arm__
144#  define LG_QUANTUM		3
145#endif
146#ifdef __mips__
147#  define LG_QUANTUM		3
148#endif
149#ifdef __powerpc__
150#  define LG_QUANTUM		4
151#endif
152#ifdef __s390x__
153#  define LG_QUANTUM		4
154#endif
155
156#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
157#define	QUANTUM_MASK		(QUANTUM - 1)
158
159/* Return the smallest quantum multiple that is >= a. */
160#define	QUANTUM_CEILING(a)						\
161	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
162
163#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
164#define	LONG_MASK		(LONG - 1)
165
166/* Return the smallest long multiple that is >= a. */
167#define	LONG_CEILING(a)						\
168	(((a) + LONG_MASK) & ~LONG_MASK)
169
170#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
171#define	PTR_MASK		(SIZEOF_PTR - 1)
172
173/* Return the smallest (void *) multiple that is >= a. */
174#define	PTR_CEILING(a)						\
175	(((a) + PTR_MASK) & ~PTR_MASK)
176
177/*
178 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
179 * In addition, this controls the spacing of cacheline-spaced size classes.
180 */
181#define	LG_CACHELINE		6
182#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
183#define	CACHELINE_MASK		(CACHELINE - 1)
184
185/* Return the smallest cacheline multiple that is >= s. */
186#define	CACHELINE_CEILING(s)						\
187	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
188
189/*
190 * Page size.  STATIC_PAGE_SHIFT is determined by the configure script.  If
191 * DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
192 * compile-time values are required for the purposes of defining data
193 * structures.
194 */
195#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
196#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
197
198#ifdef PAGE_SHIFT
199#  undef PAGE_SHIFT
200#endif
201#ifdef PAGE_SIZE
202#  undef PAGE_SIZE
203#endif
204#ifdef PAGE_MASK
205#  undef PAGE_MASK
206#endif
207
208#ifdef DYNAMIC_PAGE_SHIFT
209#  define PAGE_SHIFT	lg_pagesize
210#  define PAGE_SIZE	pagesize
211#  define PAGE_MASK	pagesize_mask
212#else
213#  define PAGE_SHIFT	STATIC_PAGE_SHIFT
214#  define PAGE_SIZE	STATIC_PAGE_SIZE
215#  define PAGE_MASK	STATIC_PAGE_MASK
216#endif
217
218/* Return the smallest pagesize multiple that is >= s. */
219#define	PAGE_CEILING(s)							\
220	(((s) + PAGE_MASK) & ~PAGE_MASK)
221
222#include "jemalloc/internal/atomic.h"
223#include "jemalloc/internal/prn.h"
224#include "jemalloc/internal/ckh.h"
225#include "jemalloc/internal/stats.h"
226#include "jemalloc/internal/ctl.h"
227#include "jemalloc/internal/mutex.h"
228#include "jemalloc/internal/mb.h"
229#include "jemalloc/internal/extent.h"
230#include "jemalloc/internal/arena.h"
231#include "jemalloc/internal/bitmap.h"
232#include "jemalloc/internal/base.h"
233#include "jemalloc/internal/chunk.h"
234#include "jemalloc/internal/huge.h"
235#include "jemalloc/internal/rtree.h"
236#include "jemalloc/internal/tcache.h"
237#include "jemalloc/internal/hash.h"
238#ifdef JEMALLOC_ZONE
239#include "jemalloc/internal/zone.h"
240#endif
241#include "jemalloc/internal/prof.h"
242
243#undef JEMALLOC_H_TYPES
244/******************************************************************************/
245#define JEMALLOC_H_STRUCTS
246
247#include "jemalloc/internal/atomic.h"
248#include "jemalloc/internal/prn.h"
249#include "jemalloc/internal/ckh.h"
250#include "jemalloc/internal/stats.h"
251#include "jemalloc/internal/ctl.h"
252#include "jemalloc/internal/mutex.h"
253#include "jemalloc/internal/mb.h"
254#include "jemalloc/internal/bitmap.h"
255#include "jemalloc/internal/extent.h"
256#include "jemalloc/internal/arena.h"
257#include "jemalloc/internal/base.h"
258#include "jemalloc/internal/chunk.h"
259#include "jemalloc/internal/huge.h"
260#include "jemalloc/internal/rtree.h"
261#include "jemalloc/internal/tcache.h"
262#include "jemalloc/internal/hash.h"
263#ifdef JEMALLOC_ZONE
264#include "jemalloc/internal/zone.h"
265#endif
266#include "jemalloc/internal/prof.h"
267
268#ifdef JEMALLOC_STATS
269typedef struct {
270	uint64_t	allocated;
271	uint64_t	deallocated;
272} thread_allocated_t;
273#endif
274
275#undef JEMALLOC_H_STRUCTS
276/******************************************************************************/
277#define JEMALLOC_H_EXTERNS
278
279extern bool	opt_abort;
280#ifdef JEMALLOC_FILL
281extern bool	opt_junk;
282#endif
283#ifdef JEMALLOC_SYSV
284extern bool	opt_sysv;
285#endif
286#ifdef JEMALLOC_XMALLOC
287extern bool	opt_xmalloc;
288#endif
289#ifdef JEMALLOC_FILL
290extern bool	opt_zero;
291#endif
292extern size_t	opt_narenas;
293
294#ifdef DYNAMIC_PAGE_SHIFT
295extern size_t		pagesize;
296extern size_t		pagesize_mask;
297extern size_t		lg_pagesize;
298#endif
299
300/* Number of CPUs. */
301extern unsigned		ncpus;
302
303extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
304extern pthread_key_t	arenas_tsd;
305#ifndef NO_TLS
306/*
307 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
308 * for allocations.
309 */
310extern __thread arena_t	*arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
311#  define ARENA_GET()	arenas_tls
312#  define ARENA_SET(v)	do {						\
313	arenas_tls = (v);						\
314	pthread_setspecific(arenas_tsd, (void *)(v));			\
315} while (0)
316#else
317#  define ARENA_GET()	((arena_t *)pthread_getspecific(arenas_tsd))
318#  define ARENA_SET(v)	do {						\
319	pthread_setspecific(arenas_tsd, (void *)(v));			\
320} while (0)
321#endif
322
323/*
324 * Arenas that are used to service external requests.  Not all elements of the
325 * arenas array are necessarily used; arenas are created lazily as needed.
326 */
327extern arena_t		**arenas;
328extern unsigned		narenas;
329
330#ifdef JEMALLOC_STATS
331#  ifndef NO_TLS
332extern __thread thread_allocated_t	thread_allocated_tls;
333#    define ALLOCATED_GET() (thread_allocated_tls.allocated)
334#    define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
335#    define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
336#    define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
337#    define ALLOCATED_ADD(a, d) do {					\
338	thread_allocated_tls.allocated += a;				\
339	thread_allocated_tls.deallocated += d;				\
340} while (0)
341#  else
342extern pthread_key_t	thread_allocated_tsd;
343thread_allocated_t	*thread_allocated_get_hard(void);
344
345#    define ALLOCATED_GET() (thread_allocated_get()->allocated)
346#    define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
347#    define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
348#    define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
349#    define ALLOCATED_ADD(a, d) do {					\
350	thread_allocated_t *thread_allocated = thread_allocated_get();	\
351	thread_allocated->allocated += (a);				\
352	thread_allocated->deallocated += (d);				\
353} while (0)
354#  endif
355#endif
356
357arena_t	*arenas_extend(unsigned ind);
358arena_t	*choose_arena_hard(void);
359int	buferror(int errnum, char *buf, size_t buflen);
360void	jemalloc_prefork(void);
361void	jemalloc_postfork(void);
362
363#include "jemalloc/internal/atomic.h"
364#include "jemalloc/internal/prn.h"
365#include "jemalloc/internal/ckh.h"
366#include "jemalloc/internal/stats.h"
367#include "jemalloc/internal/ctl.h"
368#include "jemalloc/internal/mutex.h"
369#include "jemalloc/internal/mb.h"
370#include "jemalloc/internal/bitmap.h"
371#include "jemalloc/internal/extent.h"
372#include "jemalloc/internal/arena.h"
373#include "jemalloc/internal/base.h"
374#include "jemalloc/internal/chunk.h"
375#include "jemalloc/internal/huge.h"
376#include "jemalloc/internal/rtree.h"
377#include "jemalloc/internal/tcache.h"
378#include "jemalloc/internal/hash.h"
379#ifdef JEMALLOC_ZONE
380#include "jemalloc/internal/zone.h"
381#endif
382#include "jemalloc/internal/prof.h"
383
384#undef JEMALLOC_H_EXTERNS
385/******************************************************************************/
386#define JEMALLOC_H_INLINES
387
388#include "jemalloc/internal/atomic.h"
389#include "jemalloc/internal/prn.h"
390#include "jemalloc/internal/ckh.h"
391#include "jemalloc/internal/stats.h"
392#include "jemalloc/internal/ctl.h"
393#include "jemalloc/internal/mutex.h"
394#include "jemalloc/internal/mb.h"
395#include "jemalloc/internal/extent.h"
396#include "jemalloc/internal/base.h"
397#include "jemalloc/internal/chunk.h"
398#include "jemalloc/internal/huge.h"
399
400#ifndef JEMALLOC_ENABLE_INLINE
401size_t	pow2_ceil(size_t x);
402size_t	s2u(size_t size);
403size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
404void	malloc_write(const char *s);
405arena_t	*choose_arena(void);
406#  if (defined(JEMALLOC_STATS) && defined(NO_TLS))
407thread_allocated_t	*thread_allocated_get(void);
408#  endif
409#endif
410
411#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
412/* Compute the smallest power of 2 that is >= x. */
413JEMALLOC_INLINE size_t
414pow2_ceil(size_t x)
415{
416
417	x--;
418	x |= x >> 1;
419	x |= x >> 2;
420	x |= x >> 4;
421	x |= x >> 8;
422	x |= x >> 16;
423#if (LG_SIZEOF_PTR == 3)
424	x |= x >> 32;
425#endif
426	x++;
427	return (x);
428}
429
430/*
431 * Compute usable size that would result from allocating an object with the
432 * specified size.
433 */
434JEMALLOC_INLINE size_t
435s2u(size_t size)
436{
437
438	if (size <= small_maxclass)
439		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
440	if (size <= arena_maxclass)
441		return (PAGE_CEILING(size));
442	return (CHUNK_CEILING(size));
443}
444
445/*
446 * Compute usable size that would result from allocating an object with the
447 * specified size and alignment.
448 */
449JEMALLOC_INLINE size_t
450sa2u(size_t size, size_t alignment, size_t *run_size_p)
451{
452	size_t usize;
453
454	/*
455	 * Round size up to the nearest multiple of alignment.
456	 *
457	 * This done, we can take advantage of the fact that for each small
458	 * size class, every object is aligned at the smallest power of two
459	 * that is non-zero in the base two representation of the size.  For
460	 * example:
461	 *
462	 *   Size |   Base 2 | Minimum alignment
463	 *   -----+----------+------------------
464	 *     96 |  1100000 |  32
465	 *    144 | 10100000 |  32
466	 *    192 | 11000000 |  64
467	 *
468	 * Depending on runtime settings, it is possible that arena_malloc()
469	 * will further round up to a power of two, but that never causes
470	 * correctness issues.
471	 */
472	usize = (size + (alignment - 1)) & (-alignment);
473	/*
474	 * (usize < size) protects against the combination of maximal
475	 * alignment and size greater than maximal alignment.
476	 */
477	if (usize < size) {
478		/* size_t overflow. */
479		return (0);
480	}
481
482	if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
483		if (usize <= small_maxclass)
484			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
485		return (PAGE_CEILING(usize));
486	} else {
487		size_t run_size;
488
489		/*
490		 * We can't achieve subpage alignment, so round up alignment
491		 * permanently; it makes later calculations simpler.
492		 */
493		alignment = PAGE_CEILING(alignment);
494		usize = PAGE_CEILING(size);
495		/*
496		 * (usize < size) protects against very large sizes within
497		 * PAGE_SIZE of SIZE_T_MAX.
498		 *
499		 * (usize + alignment < usize) protects against the
500		 * combination of maximal alignment and usize large enough
501		 * to cause overflow.  This is similar to the first overflow
502		 * check above, but it needs to be repeated due to the new
503		 * usize value, which may now be *equal* to maximal
504		 * alignment, whereas before we only detected overflow if the
505		 * original size was *greater* than maximal alignment.
506		 */
507		if (usize < size || usize + alignment < usize) {
508			/* size_t overflow. */
509			return (0);
510		}
511
512		/*
513		 * Calculate the size of the over-size run that arena_palloc()
514		 * would need to allocate in order to guarantee the alignment.
515		 */
516		if (usize >= alignment)
517			run_size = usize + alignment - PAGE_SIZE;
518		else {
519			/*
520			 * It is possible that (alignment << 1) will cause
521			 * overflow, but it doesn't matter because we also
522			 * subtract PAGE_SIZE, which in the case of overflow
523			 * leaves us with a very large run_size.  That causes
524			 * the first conditional below to fail, which means
525			 * that the bogus run_size value never gets used for
526			 * anything important.
527			 */
528			run_size = (alignment << 1) - PAGE_SIZE;
529		}
530		if (run_size_p != NULL)
531			*run_size_p = run_size;
532
533		if (run_size <= arena_maxclass)
534			return (PAGE_CEILING(usize));
535		return (CHUNK_CEILING(usize));
536	}
537}
538
539/*
540 * Wrapper around malloc_message() that avoids the need for
541 * JEMALLOC_P(malloc_message)(...) throughout the code.
542 */
543JEMALLOC_INLINE void
544malloc_write(const char *s)
545{
546
547	JEMALLOC_P(malloc_message)(NULL, s);
548}
549
550/*
551 * Choose an arena based on a per-thread value (fast-path code, calls slow-path
552 * code if necessary).
553 */
554JEMALLOC_INLINE arena_t *
555choose_arena(void)
556{
557	arena_t *ret;
558
559	ret = ARENA_GET();
560	if (ret == NULL) {
561		ret = choose_arena_hard();
562		assert(ret != NULL);
563	}
564
565	return (ret);
566}
567
568#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
569JEMALLOC_INLINE thread_allocated_t *
570thread_allocated_get(void)
571{
572	thread_allocated_t *thread_allocated = (thread_allocated_t *)
573	    pthread_getspecific(thread_allocated_tsd);
574
575	if (thread_allocated == NULL)
576		return (thread_allocated_get_hard());
577	return (thread_allocated);
578}
579#endif
580#endif
581
582#include "jemalloc/internal/bitmap.h"
583#include "jemalloc/internal/rtree.h"
584#include "jemalloc/internal/tcache.h"
585#include "jemalloc/internal/arena.h"
586#include "jemalloc/internal/hash.h"
587#ifdef JEMALLOC_ZONE
588#include "jemalloc/internal/zone.h"
589#endif
590
591#ifndef JEMALLOC_ENABLE_INLINE
592void	*imalloc(size_t size);
593void	*icalloc(size_t size);
594void	*ipalloc(size_t usize, size_t alignment, bool zero);
595size_t	isalloc(const void *ptr);
596#  ifdef JEMALLOC_IVSALLOC
597size_t	ivsalloc(const void *ptr);
598#  endif
599void	idalloc(void *ptr);
600void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
601    bool zero, bool no_move);
602#endif
603
604#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
605JEMALLOC_INLINE void *
606imalloc(size_t size)
607{
608
609	assert(size != 0);
610
611	if (size <= arena_maxclass)
612		return (arena_malloc(size, false));
613	else
614		return (huge_malloc(size, false));
615}
616
617JEMALLOC_INLINE void *
618icalloc(size_t size)
619{
620
621	if (size <= arena_maxclass)
622		return (arena_malloc(size, true));
623	else
624		return (huge_malloc(size, true));
625}
626
627JEMALLOC_INLINE void *
628ipalloc(size_t usize, size_t alignment, bool zero)
629{
630	void *ret;
631
632	assert(usize != 0);
633	assert(usize == sa2u(usize, alignment, NULL));
634
635	if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
636		ret = arena_malloc(usize, zero);
637	else {
638		size_t run_size
639#ifdef JEMALLOC_CC_SILENCE
640		    = 0
641#endif
642		    ;
643
644		/*
645		 * Ideally we would only ever call sa2u() once per aligned
646		 * allocation request, and the caller of this function has
647		 * already done so once.  However, it's rather burdensome to
648		 * require every caller to pass in run_size, especially given
649		 * that it's only relevant to large allocations.  Therefore,
650		 * just call it again here in order to get run_size.
651		 */
652		sa2u(usize, alignment, &run_size);
653		if (run_size <= arena_maxclass) {
654			ret = arena_palloc(choose_arena(), usize, run_size,
655			    alignment, zero);
656		} else if (alignment <= chunksize)
657			ret = huge_malloc(usize, zero);
658		else
659			ret = huge_palloc(usize, alignment, zero);
660	}
661
662	assert(((uintptr_t)ret & (alignment - 1)) == 0);
663	return (ret);
664}
665
666JEMALLOC_INLINE size_t
667isalloc(const void *ptr)
668{
669	size_t ret;
670	arena_chunk_t *chunk;
671
672	assert(ptr != NULL);
673
674	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
675	if (chunk != ptr) {
676		/* Region. */
677		dassert(chunk->arena->magic == ARENA_MAGIC);
678
679#ifdef JEMALLOC_PROF
680		ret = arena_salloc_demote(ptr);
681#else
682		ret = arena_salloc(ptr);
683#endif
684	} else
685		ret = huge_salloc(ptr);
686
687	return (ret);
688}
689
690#ifdef JEMALLOC_IVSALLOC
691JEMALLOC_INLINE size_t
692ivsalloc(const void *ptr)
693{
694
695	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
696	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
697		return (0);
698
699	return (isalloc(ptr));
700}
701#endif
702
703JEMALLOC_INLINE void
704idalloc(void *ptr)
705{
706	arena_chunk_t *chunk;
707
708	assert(ptr != NULL);
709
710	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
711	if (chunk != ptr)
712		arena_dalloc(chunk->arena, chunk, ptr);
713	else
714		huge_dalloc(ptr, true);
715}
716
717JEMALLOC_INLINE void *
718iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
719    bool no_move)
720{
721	void *ret;
722	size_t oldsize;
723
724	assert(ptr != NULL);
725	assert(size != 0);
726
727	oldsize = isalloc(ptr);
728
729	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
730	    != 0) {
731		size_t usize, copysize;
732
733		/*
734		 * Existing object alignment is inadquate; allocate new space
735		 * and copy.
736		 */
737		if (no_move)
738			return (NULL);
739		usize = sa2u(size + extra, alignment, NULL);
740		if (usize == 0)
741			return (NULL);
742		ret = ipalloc(usize, alignment, zero);
743		if (ret == NULL) {
744			if (extra == 0)
745				return (NULL);
746			/* Try again, without extra this time. */
747			usize = sa2u(size, alignment, NULL);
748			if (usize == 0)
749				return (NULL);
750			ret = ipalloc(usize, alignment, zero);
751			if (ret == NULL)
752				return (NULL);
753		}
754		/*
755		 * Copy at most size bytes (not size+extra), since the caller
756		 * has no expectation that the extra bytes will be reliably
757		 * preserved.
758		 */
759		copysize = (size < oldsize) ? size : oldsize;
760		memcpy(ret, ptr, copysize);
761		idalloc(ptr);
762		return (ret);
763	}
764
765	if (no_move) {
766		if (size <= arena_maxclass) {
767			return (arena_ralloc_no_move(ptr, oldsize, size,
768			    extra, zero));
769		} else {
770			return (huge_ralloc_no_move(ptr, oldsize, size,
771			    extra));
772		}
773	} else {
774		if (size + extra <= arena_maxclass) {
775			return (arena_ralloc(ptr, oldsize, size, extra,
776			    alignment, zero));
777		} else {
778			return (huge_ralloc(ptr, oldsize, size, extra,
779			    alignment, zero));
780		}
781	}
782}
783#endif
784
785#include "jemalloc/internal/prof.h"
786
787#undef JEMALLOC_H_INLINES
788/******************************************************************************/
789