jemalloc_internal.h.in revision 4c2faa8a7c42a47a6bea509f5a23234bc5a66d40
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/sysctl.h>
6#include <sys/uio.h>
7
8#include <errno.h>
9#include <limits.h>
10#ifndef SIZE_T_MAX
11#  define SIZE_T_MAX	SIZE_MAX
12#endif
13#include <pthread.h>
14#include <sched.h>
15#include <stdarg.h>
16#include <stdbool.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <stdint.h>
20#include <stddef.h>
21#ifndef offsetof
22#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
23#endif
24#include <inttypes.h>
25#include <string.h>
26#include <strings.h>
27#include <ctype.h>
28#include <unistd.h>
29#include <fcntl.h>
30#include <pthread.h>
31#include <math.h>
32
33#define	JEMALLOC_NO_DEMANGLE
34#include "../jemalloc@install_suffix@.h"
35
36#include "jemalloc/internal/private_namespace.h"
37
38#ifdef JEMALLOC_CC_SILENCE
39#define	UNUSED JEMALLOC_ATTR(unused)
40#else
41#define	UNUSED
42#endif
43
44static const bool config_debug =
45#ifdef JEMALLOC_DEBUG
46    true
47#else
48    false
49#endif
50    ;
51static const bool config_dss =
52#ifdef JEMALLOC_DSS
53    true
54#else
55    false
56#endif
57    ;
58static const bool config_dynamic_page_shift =
59#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
60    true
61#else
62    false
63#endif
64    ;
65static const bool config_fill =
66#ifdef JEMALLOC_FILL
67    true
68#else
69    false
70#endif
71    ;
72static const bool config_lazy_lock =
73#ifdef JEMALLOC_LAZY_LOCK
74    true
75#else
76    false
77#endif
78    ;
79static const bool config_prof =
80#ifdef JEMALLOC_PROF
81    true
82#else
83    false
84#endif
85    ;
86static const bool config_prof_libgcc =
87#ifdef JEMALLOC_PROF_LIBGCC
88    true
89#else
90    false
91#endif
92    ;
93static const bool config_prof_libunwind =
94#ifdef JEMALLOC_PROF_LIBUNWIND
95    true
96#else
97    false
98#endif
99    ;
100static const bool config_stats =
101#ifdef JEMALLOC_STATS
102    true
103#else
104    false
105#endif
106    ;
107static const bool config_tcache =
108#ifdef JEMALLOC_TCACHE
109    true
110#else
111    false
112#endif
113    ;
114static const bool config_tls =
115#ifdef JEMALLOC_TLS
116    true
117#else
118    false
119#endif
120    ;
121static const bool config_xmalloc =
122#ifdef JEMALLOC_XMALLOC
123    true
124#else
125    false
126#endif
127    ;
128static const bool config_ivsalloc =
129#ifdef JEMALLOC_IVSALLOC
130    true
131#else
132    false
133#endif
134    ;
135
136#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
137#include <libkern/OSAtomic.h>
138#endif
139
140#ifdef JEMALLOC_ZONE
141#include <mach/mach_error.h>
142#include <mach/mach_init.h>
143#include <mach/vm_map.h>
144#include <malloc/malloc.h>
145#endif
146
147#define	RB_COMPACT
148#include "jemalloc/internal/rb.h"
149#include "jemalloc/internal/qr.h"
150#include "jemalloc/internal/ql.h"
151
152/*
153 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
154 * but there are circular dependencies that cannot be broken without
155 * substantial performance degradation.  In order to reduce the effect on
156 * visual code flow, read the header files in multiple passes, with one of the
157 * following cpp variables defined during each pass:
158 *
159 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
160 *                        types.
161 *   JEMALLOC_H_STRUCTS : Data structures.
162 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
163 *   JEMALLOC_H_INLINES : Inline functions.
164 */
165/******************************************************************************/
166#define JEMALLOC_H_TYPES
167
168#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
169
170#define	ZU(z)	((size_t)z)
171
172#ifndef __DECONST
173#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
174#endif
175
176#ifdef JEMALLOC_DEBUG
177   /* Disable inlining to make debugging easier. */
178#  define JEMALLOC_INLINE
179#  define inline
180#else
181#  define JEMALLOC_ENABLE_INLINE
182#  define JEMALLOC_INLINE static inline
183#endif
184
185/* Smallest size class to support. */
186#define	LG_TINY_MIN		3
187#define	TINY_MIN		(1U << LG_TINY_MIN)
188
189/*
190 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
191 * classes).
192 */
193#ifndef LG_QUANTUM
194#  ifdef __i386__
195#    define LG_QUANTUM		4
196#  endif
197#  ifdef __ia64__
198#    define LG_QUANTUM		4
199#  endif
200#  ifdef __alpha__
201#    define LG_QUANTUM		4
202#  endif
203#  ifdef __sparc64__
204#    define LG_QUANTUM		4
205#  endif
206#  if (defined(__amd64__) || defined(__x86_64__))
207#    define LG_QUANTUM		4
208#  endif
209#  ifdef __arm__
210#    define LG_QUANTUM		3
211#  endif
212#  ifdef __mips__
213#    define LG_QUANTUM		3
214#  endif
215#  ifdef __powerpc__
216#    define LG_QUANTUM		4
217#  endif
218#  ifdef __s390x__
219#    define LG_QUANTUM		4
220#  endif
221#  ifdef __SH4__
222#    define LG_QUANTUM		4
223#  endif
224#  ifdef __tile__
225#    define LG_QUANTUM		4
226#  endif
227#  ifndef LG_QUANTUM
228#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
229#  endif
230#endif
231
232#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
233#define	QUANTUM_MASK		(QUANTUM - 1)
234
235/* Return the smallest quantum multiple that is >= a. */
236#define	QUANTUM_CEILING(a)						\
237	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
238
239#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
240#define	LONG_MASK		(LONG - 1)
241
242/* Return the smallest long multiple that is >= a. */
243#define	LONG_CEILING(a)							\
244	(((a) + LONG_MASK) & ~LONG_MASK)
245
246#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
247#define	PTR_MASK		(SIZEOF_PTR - 1)
248
249/* Return the smallest (void *) multiple that is >= a. */
250#define	PTR_CEILING(a)							\
251	(((a) + PTR_MASK) & ~PTR_MASK)
252
253/*
254 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
255 * In addition, this controls the spacing of cacheline-spaced size classes.
256 */
257#define	LG_CACHELINE		6
258#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
259#define	CACHELINE_MASK		(CACHELINE - 1)
260
261/* Return the smallest cacheline multiple that is >= s. */
262#define	CACHELINE_CEILING(s)						\
263	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
264
265/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
266#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
267#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
268#ifdef PAGE_SHIFT
269#  undef PAGE_SHIFT
270#endif
271#ifdef PAGE_SIZE
272#  undef PAGE_SIZE
273#endif
274#ifdef PAGE_MASK
275#  undef PAGE_MASK
276#endif
277#define	PAGE_SHIFT	STATIC_PAGE_SHIFT
278#define	PAGE_SIZE	STATIC_PAGE_SIZE
279#define	PAGE_MASK	STATIC_PAGE_MASK
280
281/* Return the smallest pagesize multiple that is >= s. */
282#define	PAGE_CEILING(s)							\
283	(((s) + PAGE_MASK) & ~PAGE_MASK)
284
285#include "jemalloc/internal/util.h"
286#include "jemalloc/internal/atomic.h"
287#include "jemalloc/internal/prng.h"
288#include "jemalloc/internal/ckh.h"
289#include "jemalloc/internal/size_classes.h"
290#include "jemalloc/internal/stats.h"
291#include "jemalloc/internal/ctl.h"
292#include "jemalloc/internal/mutex.h"
293#include "jemalloc/internal/mb.h"
294#include "jemalloc/internal/extent.h"
295#include "jemalloc/internal/arena.h"
296#include "jemalloc/internal/bitmap.h"
297#include "jemalloc/internal/base.h"
298#include "jemalloc/internal/chunk.h"
299#include "jemalloc/internal/huge.h"
300#include "jemalloc/internal/rtree.h"
301#include "jemalloc/internal/tcache.h"
302#include "jemalloc/internal/hash.h"
303#ifdef JEMALLOC_ZONE
304#include "jemalloc/internal/zone.h"
305#endif
306#include "jemalloc/internal/prof.h"
307
308#undef JEMALLOC_H_TYPES
309/******************************************************************************/
310#define JEMALLOC_H_STRUCTS
311
312#include "jemalloc/internal/util.h"
313#include "jemalloc/internal/atomic.h"
314#include "jemalloc/internal/prng.h"
315#include "jemalloc/internal/ckh.h"
316#include "jemalloc/internal/size_classes.h"
317#include "jemalloc/internal/stats.h"
318#include "jemalloc/internal/ctl.h"
319#include "jemalloc/internal/mutex.h"
320#include "jemalloc/internal/mb.h"
321#include "jemalloc/internal/bitmap.h"
322#include "jemalloc/internal/extent.h"
323#include "jemalloc/internal/arena.h"
324#include "jemalloc/internal/base.h"
325#include "jemalloc/internal/chunk.h"
326#include "jemalloc/internal/huge.h"
327#include "jemalloc/internal/rtree.h"
328#include "jemalloc/internal/tcache.h"
329#include "jemalloc/internal/hash.h"
330#ifdef JEMALLOC_ZONE
331#include "jemalloc/internal/zone.h"
332#endif
333#include "jemalloc/internal/prof.h"
334
335typedef struct {
336	uint64_t	allocated;
337	uint64_t	deallocated;
338} thread_allocated_t;
339
340#undef JEMALLOC_H_STRUCTS
341/******************************************************************************/
342#define JEMALLOC_H_EXTERNS
343
344extern bool	opt_abort;
345extern bool	opt_junk;
346extern bool	opt_xmalloc;
347extern bool	opt_zero;
348extern size_t	opt_narenas;
349
350#ifdef DYNAMIC_PAGE_SHIFT
351extern size_t		pagesize;
352extern size_t		pagesize_mask;
353extern size_t		lg_pagesize;
354#endif
355
356/* Number of CPUs. */
357extern unsigned		ncpus;
358
359extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
360extern pthread_key_t	arenas_tsd;
361#ifndef NO_TLS
362/*
363 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
364 * for allocations.
365 */
366extern __thread arena_t	*arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
367#  define ARENA_GET()	arenas_tls
368#  define ARENA_SET(v)	do {						\
369	arenas_tls = (v);						\
370	pthread_setspecific(arenas_tsd, (void *)(v));			\
371} while (0)
372#else
373#  define ARENA_GET()	((arena_t *)pthread_getspecific(arenas_tsd))
374#  define ARENA_SET(v)	do {						\
375	pthread_setspecific(arenas_tsd, (void *)(v));			\
376} while (0)
377#endif
378
379/*
380 * Arenas that are used to service external requests.  Not all elements of the
381 * arenas array are necessarily used; arenas are created lazily as needed.
382 */
383extern arena_t		**arenas;
384extern unsigned		narenas;
385
386#ifndef NO_TLS
387extern __thread thread_allocated_t	thread_allocated_tls;
388#    define ALLOCATED_GET() (thread_allocated_tls.allocated)
389#    define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
390#    define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
391#    define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
392#    define ALLOCATED_ADD(a, d) do {					\
393	thread_allocated_tls.allocated += a;				\
394	thread_allocated_tls.deallocated += d;				\
395} while (0)
396#else
397#    define ALLOCATED_GET() (thread_allocated_get()->allocated)
398#    define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
399#    define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
400#    define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
401#    define ALLOCATED_ADD(a, d) do {					\
402	thread_allocated_t *thread_allocated = thread_allocated_get();	\
403	thread_allocated->allocated += (a);				\
404	thread_allocated->deallocated += (d);				\
405} while (0)
406#endif
407extern pthread_key_t	thread_allocated_tsd;
408thread_allocated_t	*thread_allocated_get_hard(void);
409
410arena_t	*arenas_extend(unsigned ind);
411arena_t	*choose_arena_hard(void);
412void	jemalloc_prefork(void);
413void	jemalloc_postfork(void);
414
415#include "jemalloc/internal/util.h"
416#include "jemalloc/internal/atomic.h"
417#include "jemalloc/internal/prng.h"
418#include "jemalloc/internal/ckh.h"
419#include "jemalloc/internal/size_classes.h"
420#include "jemalloc/internal/stats.h"
421#include "jemalloc/internal/ctl.h"
422#include "jemalloc/internal/mutex.h"
423#include "jemalloc/internal/mb.h"
424#include "jemalloc/internal/bitmap.h"
425#include "jemalloc/internal/extent.h"
426#include "jemalloc/internal/arena.h"
427#include "jemalloc/internal/base.h"
428#include "jemalloc/internal/chunk.h"
429#include "jemalloc/internal/huge.h"
430#include "jemalloc/internal/rtree.h"
431#include "jemalloc/internal/tcache.h"
432#include "jemalloc/internal/hash.h"
433#ifdef JEMALLOC_ZONE
434#include "jemalloc/internal/zone.h"
435#endif
436#include "jemalloc/internal/prof.h"
437
438#undef JEMALLOC_H_EXTERNS
439/******************************************************************************/
440#define JEMALLOC_H_INLINES
441
442#include "jemalloc/internal/util.h"
443#include "jemalloc/internal/atomic.h"
444#include "jemalloc/internal/prng.h"
445#include "jemalloc/internal/ckh.h"
446#include "jemalloc/internal/size_classes.h"
447#include "jemalloc/internal/stats.h"
448#include "jemalloc/internal/ctl.h"
449#include "jemalloc/internal/mutex.h"
450#include "jemalloc/internal/mb.h"
451#include "jemalloc/internal/extent.h"
452#include "jemalloc/internal/base.h"
453#include "jemalloc/internal/chunk.h"
454#include "jemalloc/internal/huge.h"
455
456#ifndef JEMALLOC_ENABLE_INLINE
457size_t	s2u(size_t size);
458size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
459arena_t	*choose_arena(void);
460thread_allocated_t	*thread_allocated_get(void);
461#endif
462
463#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
464/*
465 * Compute usable size that would result from allocating an object with the
466 * specified size.
467 */
468JEMALLOC_INLINE size_t
469s2u(size_t size)
470{
471
472	if (size <= SMALL_MAXCLASS)
473		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
474	if (size <= arena_maxclass)
475		return (PAGE_CEILING(size));
476	return (CHUNK_CEILING(size));
477}
478
479/*
480 * Compute usable size that would result from allocating an object with the
481 * specified size and alignment.
482 */
483JEMALLOC_INLINE size_t
484sa2u(size_t size, size_t alignment, size_t *run_size_p)
485{
486	size_t usize;
487
488	/*
489	 * Round size up to the nearest multiple of alignment.
490	 *
491	 * This done, we can take advantage of the fact that for each small
492	 * size class, every object is aligned at the smallest power of two
493	 * that is non-zero in the base two representation of the size.  For
494	 * example:
495	 *
496	 *   Size |   Base 2 | Minimum alignment
497	 *   -----+----------+------------------
498	 *     96 |  1100000 |  32
499	 *    144 | 10100000 |  32
500	 *    192 | 11000000 |  64
501	 */
502	usize = (size + (alignment - 1)) & (-alignment);
503	/*
504	 * (usize < size) protects against the combination of maximal
505	 * alignment and size greater than maximal alignment.
506	 */
507	if (usize < size) {
508		/* size_t overflow. */
509		return (0);
510	}
511
512	if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
513		if (usize <= SMALL_MAXCLASS)
514			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
515		return (PAGE_CEILING(usize));
516	} else {
517		size_t run_size;
518
519		/*
520		 * We can't achieve subpage alignment, so round up alignment
521		 * permanently; it makes later calculations simpler.
522		 */
523		alignment = PAGE_CEILING(alignment);
524		usize = PAGE_CEILING(size);
525		/*
526		 * (usize < size) protects against very large sizes within
527		 * PAGE_SIZE of SIZE_T_MAX.
528		 *
529		 * (usize + alignment < usize) protects against the
530		 * combination of maximal alignment and usize large enough
531		 * to cause overflow.  This is similar to the first overflow
532		 * check above, but it needs to be repeated due to the new
533		 * usize value, which may now be *equal* to maximal
534		 * alignment, whereas before we only detected overflow if the
535		 * original size was *greater* than maximal alignment.
536		 */
537		if (usize < size || usize + alignment < usize) {
538			/* size_t overflow. */
539			return (0);
540		}
541
542		/*
543		 * Calculate the size of the over-size run that arena_palloc()
544		 * would need to allocate in order to guarantee the alignment.
545		 */
546		if (usize >= alignment)
547			run_size = usize + alignment - PAGE_SIZE;
548		else {
549			/*
550			 * It is possible that (alignment << 1) will cause
551			 * overflow, but it doesn't matter because we also
552			 * subtract PAGE_SIZE, which in the case of overflow
553			 * leaves us with a very large run_size.  That causes
554			 * the first conditional below to fail, which means
555			 * that the bogus run_size value never gets used for
556			 * anything important.
557			 */
558			run_size = (alignment << 1) - PAGE_SIZE;
559		}
560		if (run_size_p != NULL)
561			*run_size_p = run_size;
562
563		if (run_size <= arena_maxclass)
564			return (PAGE_CEILING(usize));
565		return (CHUNK_CEILING(usize));
566	}
567}
568
569/* Choose an arena based on a per-thread value. */
570JEMALLOC_INLINE arena_t *
571choose_arena(void)
572{
573	arena_t *ret;
574
575	ret = ARENA_GET();
576	if (ret == NULL) {
577		ret = choose_arena_hard();
578		assert(ret != NULL);
579	}
580
581	return (ret);
582}
583
584JEMALLOC_INLINE thread_allocated_t *
585thread_allocated_get(void)
586{
587	thread_allocated_t *thread_allocated = (thread_allocated_t *)
588	    pthread_getspecific(thread_allocated_tsd);
589
590	if (thread_allocated == NULL)
591		return (thread_allocated_get_hard());
592	return (thread_allocated);
593}
594#endif
595
596#include "jemalloc/internal/bitmap.h"
597#include "jemalloc/internal/rtree.h"
598#include "jemalloc/internal/tcache.h"
599#include "jemalloc/internal/arena.h"
600#include "jemalloc/internal/hash.h"
601#ifdef JEMALLOC_ZONE
602#include "jemalloc/internal/zone.h"
603#endif
604
605#ifndef JEMALLOC_ENABLE_INLINE
606void	*imalloc(size_t size);
607void	*icalloc(size_t size);
608void	*ipalloc(size_t usize, size_t alignment, bool zero);
609size_t	isalloc(const void *ptr);
610size_t	ivsalloc(const void *ptr);
611void	idalloc(void *ptr);
612void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
613    bool zero, bool no_move);
614#endif
615
616#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
617JEMALLOC_INLINE void *
618imalloc(size_t size)
619{
620
621	assert(size != 0);
622
623	if (size <= arena_maxclass)
624		return (arena_malloc(size, false));
625	else
626		return (huge_malloc(size, false));
627}
628
629JEMALLOC_INLINE void *
630icalloc(size_t size)
631{
632
633	if (size <= arena_maxclass)
634		return (arena_malloc(size, true));
635	else
636		return (huge_malloc(size, true));
637}
638
639JEMALLOC_INLINE void *
640ipalloc(size_t usize, size_t alignment, bool zero)
641{
642	void *ret;
643
644	assert(usize != 0);
645	assert(usize == sa2u(usize, alignment, NULL));
646
647	if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
648		ret = arena_malloc(usize, zero);
649	else {
650		size_t run_size
651#ifdef JEMALLOC_CC_SILENCE
652		    = 0
653#endif
654		    ;
655
656		/*
657		 * Ideally we would only ever call sa2u() once per aligned
658		 * allocation request, and the caller of this function has
659		 * already done so once.  However, it's rather burdensome to
660		 * require every caller to pass in run_size, especially given
661		 * that it's only relevant to large allocations.  Therefore,
662		 * just call it again here in order to get run_size.
663		 */
664		sa2u(usize, alignment, &run_size);
665		if (run_size <= arena_maxclass) {
666			ret = arena_palloc(choose_arena(), usize, run_size,
667			    alignment, zero);
668		} else if (alignment <= chunksize)
669			ret = huge_malloc(usize, zero);
670		else
671			ret = huge_palloc(usize, alignment, zero);
672	}
673
674	assert(((uintptr_t)ret & (alignment - 1)) == 0);
675	return (ret);
676}
677
678JEMALLOC_INLINE size_t
679isalloc(const void *ptr)
680{
681	size_t ret;
682	arena_chunk_t *chunk;
683
684	assert(ptr != NULL);
685
686	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
687	if (chunk != ptr) {
688		/* Region. */
689		if (config_prof)
690			ret = arena_salloc_demote(ptr);
691		else
692			ret = arena_salloc(ptr);
693	} else
694		ret = huge_salloc(ptr);
695
696	return (ret);
697}
698
699JEMALLOC_INLINE size_t
700ivsalloc(const void *ptr)
701{
702
703	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
704	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
705		return (0);
706
707	return (isalloc(ptr));
708}
709
710JEMALLOC_INLINE void
711idalloc(void *ptr)
712{
713	arena_chunk_t *chunk;
714
715	assert(ptr != NULL);
716
717	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
718	if (chunk != ptr)
719		arena_dalloc(chunk->arena, chunk, ptr);
720	else
721		huge_dalloc(ptr, true);
722}
723
724JEMALLOC_INLINE void *
725iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
726    bool no_move)
727{
728	void *ret;
729	size_t oldsize;
730
731	assert(ptr != NULL);
732	assert(size != 0);
733
734	oldsize = isalloc(ptr);
735
736	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
737	    != 0) {
738		size_t usize, copysize;
739
740		/*
741		 * Existing object alignment is inadquate; allocate new space
742		 * and copy.
743		 */
744		if (no_move)
745			return (NULL);
746		usize = sa2u(size + extra, alignment, NULL);
747		if (usize == 0)
748			return (NULL);
749		ret = ipalloc(usize, alignment, zero);
750		if (ret == NULL) {
751			if (extra == 0)
752				return (NULL);
753			/* Try again, without extra this time. */
754			usize = sa2u(size, alignment, NULL);
755			if (usize == 0)
756				return (NULL);
757			ret = ipalloc(usize, alignment, zero);
758			if (ret == NULL)
759				return (NULL);
760		}
761		/*
762		 * Copy at most size bytes (not size+extra), since the caller
763		 * has no expectation that the extra bytes will be reliably
764		 * preserved.
765		 */
766		copysize = (size < oldsize) ? size : oldsize;
767		memcpy(ret, ptr, copysize);
768		idalloc(ptr);
769		return (ret);
770	}
771
772	if (no_move) {
773		if (size <= arena_maxclass) {
774			return (arena_ralloc_no_move(ptr, oldsize, size,
775			    extra, zero));
776		} else {
777			return (huge_ralloc_no_move(ptr, oldsize, size,
778			    extra));
779		}
780	} else {
781		if (size + extra <= arena_maxclass) {
782			return (arena_ralloc(ptr, oldsize, size, extra,
783			    alignment, zero));
784		} else {
785			return (huge_ralloc(ptr, oldsize, size, extra,
786			    alignment, zero));
787		}
788	}
789}
790#endif
791
792#include "jemalloc/internal/prof.h"
793
794#undef JEMALLOC_H_INLINES
795/******************************************************************************/
796