jemalloc_internal.h.in revision e24c7af35d1e9d24d02166ac98cfca7cf807ff13
1#include <sys/mman.h>
2#include <sys/param.h>
3#include <sys/time.h>
4#include <sys/types.h>
5#include <sys/uio.h>
6
7#include <errno.h>
8#include <limits.h>
9#ifndef SIZE_T_MAX
10#  define SIZE_T_MAX	SIZE_MAX
11#endif
12#include <pthread.h>
13#include <sched.h>
14#include <stdarg.h>
15#include <stdbool.h>
16#include <stdio.h>
17#include <stdlib.h>
18#include <stdint.h>
19#include <stddef.h>
20#ifndef offsetof
21#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
22#endif
23#include <inttypes.h>
24#include <string.h>
25#include <strings.h>
26#include <ctype.h>
27#include <unistd.h>
28#include <fcntl.h>
29#include <pthread.h>
30#include <math.h>
31
32#define	JEMALLOC_NO_DEMANGLE
33#include "../jemalloc@install_suffix@.h"
34
35#include "jemalloc/internal/private_namespace.h"
36
37#ifdef JEMALLOC_CC_SILENCE
38#define	UNUSED JEMALLOC_ATTR(unused)
39#else
40#define	UNUSED
41#endif
42
43static const bool config_debug =
44#ifdef JEMALLOC_DEBUG
45    true
46#else
47    false
48#endif
49    ;
50static const bool config_dss =
51#ifdef JEMALLOC_DSS
52    true
53#else
54    false
55#endif
56    ;
57static const bool config_dynamic_page_shift =
58#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
59    true
60#else
61    false
62#endif
63    ;
64static const bool config_fill =
65#ifdef JEMALLOC_FILL
66    true
67#else
68    false
69#endif
70    ;
71static const bool config_lazy_lock =
72#ifdef JEMALLOC_LAZY_LOCK
73    true
74#else
75    false
76#endif
77    ;
78static const bool config_prof =
79#ifdef JEMALLOC_PROF
80    true
81#else
82    false
83#endif
84    ;
85static const bool config_prof_libgcc =
86#ifdef JEMALLOC_PROF_LIBGCC
87    true
88#else
89    false
90#endif
91    ;
92static const bool config_prof_libunwind =
93#ifdef JEMALLOC_PROF_LIBUNWIND
94    true
95#else
96    false
97#endif
98    ;
99static const bool config_stats =
100#ifdef JEMALLOC_STATS
101    true
102#else
103    false
104#endif
105    ;
106static const bool config_tcache =
107#ifdef JEMALLOC_TCACHE
108    true
109#else
110    false
111#endif
112    ;
113static const bool config_tls =
114#ifdef JEMALLOC_TLS
115    true
116#else
117    false
118#endif
119    ;
120static const bool config_xmalloc =
121#ifdef JEMALLOC_XMALLOC
122    true
123#else
124    false
125#endif
126    ;
127static const bool config_ivsalloc =
128#ifdef JEMALLOC_IVSALLOC
129    true
130#else
131    false
132#endif
133    ;
134
135#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
136#include <libkern/OSAtomic.h>
137#endif
138
139#ifdef JEMALLOC_ZONE
140#include <mach/mach_error.h>
141#include <mach/mach_init.h>
142#include <mach/vm_map.h>
143#include <malloc/malloc.h>
144#endif
145
146#define	RB_COMPACT
147#include "jemalloc/internal/rb.h"
148#include "jemalloc/internal/qr.h"
149#include "jemalloc/internal/ql.h"
150
151/*
152 * jemalloc can conceptually be broken into components (arena, tcache, etc.),
153 * but there are circular dependencies that cannot be broken without
154 * substantial performance degradation.  In order to reduce the effect on
155 * visual code flow, read the header files in multiple passes, with one of the
156 * following cpp variables defined during each pass:
157 *
158 *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
159 *                        types.
160 *   JEMALLOC_H_STRUCTS : Data structures.
161 *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
162 *   JEMALLOC_H_INLINES : Inline functions.
163 */
164/******************************************************************************/
165#define JEMALLOC_H_TYPES
166
167#define	ALLOCM_LG_ALIGN_MASK	((int)0x3f)
168
169#define	ZU(z)	((size_t)z)
170
171#ifndef __DECONST
172#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
173#endif
174
175#ifdef JEMALLOC_DEBUG
176   /* Disable inlining to make debugging easier. */
177#  define JEMALLOC_INLINE
178#  define inline
179#else
180#  define JEMALLOC_ENABLE_INLINE
181#  define JEMALLOC_INLINE static inline
182#endif
183
184/* Smallest size class to support. */
185#define	LG_TINY_MIN		3
186#define	TINY_MIN		(1U << LG_TINY_MIN)
187
188/*
189 * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
190 * classes).
191 */
192#ifndef LG_QUANTUM
193#  ifdef __i386__
194#    define LG_QUANTUM		4
195#  endif
196#  ifdef __ia64__
197#    define LG_QUANTUM		4
198#  endif
199#  ifdef __alpha__
200#    define LG_QUANTUM		4
201#  endif
202#  ifdef __sparc64__
203#    define LG_QUANTUM		4
204#  endif
205#  if (defined(__amd64__) || defined(__x86_64__))
206#    define LG_QUANTUM		4
207#  endif
208#  ifdef __arm__
209#    define LG_QUANTUM		3
210#  endif
211#  ifdef __mips__
212#    define LG_QUANTUM		3
213#  endif
214#  ifdef __powerpc__
215#    define LG_QUANTUM		4
216#  endif
217#  ifdef __s390x__
218#    define LG_QUANTUM		4
219#  endif
220#  ifdef __SH4__
221#    define LG_QUANTUM		4
222#  endif
223#  ifdef __tile__
224#    define LG_QUANTUM		4
225#  endif
226#  ifndef LG_QUANTUM
227#    error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
228#  endif
229#endif
230
231#define	QUANTUM			((size_t)(1U << LG_QUANTUM))
232#define	QUANTUM_MASK		(QUANTUM - 1)
233
234/* Return the smallest quantum multiple that is >= a. */
235#define	QUANTUM_CEILING(a)						\
236	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
237
238#define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
239#define	LONG_MASK		(LONG - 1)
240
241/* Return the smallest long multiple that is >= a. */
242#define	LONG_CEILING(a)							\
243	(((a) + LONG_MASK) & ~LONG_MASK)
244
245#define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
246#define	PTR_MASK		(SIZEOF_PTR - 1)
247
248/* Return the smallest (void *) multiple that is >= a. */
249#define	PTR_CEILING(a)							\
250	(((a) + PTR_MASK) & ~PTR_MASK)
251
252/*
253 * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
254 * In addition, this controls the spacing of cacheline-spaced size classes.
255 */
256#define	LG_CACHELINE		6
257#define	CACHELINE		((size_t)(1U << LG_CACHELINE))
258#define	CACHELINE_MASK		(CACHELINE - 1)
259
260/* Return the smallest cacheline multiple that is >= s. */
261#define	CACHELINE_CEILING(s)						\
262	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
263
264/* Page size.  STATIC_PAGE_SHIFT is determined by the configure script. */
265#define	STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
266#define	STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
267#ifdef PAGE_SHIFT
268#  undef PAGE_SHIFT
269#endif
270#ifdef PAGE_SIZE
271#  undef PAGE_SIZE
272#endif
273#ifdef PAGE_MASK
274#  undef PAGE_MASK
275#endif
276#define	PAGE_SHIFT	STATIC_PAGE_SHIFT
277#define	PAGE_SIZE	STATIC_PAGE_SIZE
278#define	PAGE_MASK	STATIC_PAGE_MASK
279
280/* Return the smallest pagesize multiple that is >= s. */
281#define	PAGE_CEILING(s)							\
282	(((s) + PAGE_MASK) & ~PAGE_MASK)
283
284#include "jemalloc/internal/util.h"
285#include "jemalloc/internal/atomic.h"
286#include "jemalloc/internal/prng.h"
287#include "jemalloc/internal/ckh.h"
288#include "jemalloc/internal/size_classes.h"
289#include "jemalloc/internal/stats.h"
290#include "jemalloc/internal/ctl.h"
291#include "jemalloc/internal/mutex.h"
292#include "jemalloc/internal/mb.h"
293#include "jemalloc/internal/extent.h"
294#include "jemalloc/internal/arena.h"
295#include "jemalloc/internal/bitmap.h"
296#include "jemalloc/internal/base.h"
297#include "jemalloc/internal/chunk.h"
298#include "jemalloc/internal/huge.h"
299#include "jemalloc/internal/rtree.h"
300#include "jemalloc/internal/tcache.h"
301#include "jemalloc/internal/hash.h"
302#ifdef JEMALLOC_ZONE
303#include "jemalloc/internal/zone.h"
304#endif
305#include "jemalloc/internal/prof.h"
306
307#undef JEMALLOC_H_TYPES
308/******************************************************************************/
309#define JEMALLOC_H_STRUCTS
310
311#include "jemalloc/internal/util.h"
312#include "jemalloc/internal/atomic.h"
313#include "jemalloc/internal/prng.h"
314#include "jemalloc/internal/ckh.h"
315#include "jemalloc/internal/size_classes.h"
316#include "jemalloc/internal/stats.h"
317#include "jemalloc/internal/ctl.h"
318#include "jemalloc/internal/mutex.h"
319#include "jemalloc/internal/mb.h"
320#include "jemalloc/internal/bitmap.h"
321#include "jemalloc/internal/extent.h"
322#include "jemalloc/internal/arena.h"
323#include "jemalloc/internal/base.h"
324#include "jemalloc/internal/chunk.h"
325#include "jemalloc/internal/huge.h"
326#include "jemalloc/internal/rtree.h"
327#include "jemalloc/internal/tcache.h"
328#include "jemalloc/internal/hash.h"
329#ifdef JEMALLOC_ZONE
330#include "jemalloc/internal/zone.h"
331#endif
332#include "jemalloc/internal/prof.h"
333
334typedef struct {
335	uint64_t	allocated;
336	uint64_t	deallocated;
337} thread_allocated_t;
338
339#undef JEMALLOC_H_STRUCTS
340/******************************************************************************/
341#define JEMALLOC_H_EXTERNS
342
343extern bool	opt_abort;
344extern bool	opt_junk;
345extern bool	opt_xmalloc;
346extern bool	opt_zero;
347extern size_t	opt_narenas;
348
349#ifdef DYNAMIC_PAGE_SHIFT
350extern size_t		pagesize;
351extern size_t		pagesize_mask;
352extern size_t		lg_pagesize;
353#endif
354
355/* Number of CPUs. */
356extern unsigned		ncpus;
357
358extern malloc_mutex_t	arenas_lock; /* Protects arenas initialization. */
359extern pthread_key_t	arenas_tsd;
360#ifdef JEMALLOC_TLS
361/*
362 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
363 * for allocations.
364 */
365extern __thread arena_t	*arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
366#  define ARENA_GET()	arenas_tls
367#  define ARENA_SET(v)	do {						\
368	arenas_tls = (v);						\
369	pthread_setspecific(arenas_tsd, (void *)(v));			\
370} while (0)
371#else
372#  define ARENA_GET()	((arena_t *)pthread_getspecific(arenas_tsd))
373#  define ARENA_SET(v)	do {						\
374	pthread_setspecific(arenas_tsd, (void *)(v));			\
375} while (0)
376#endif
377
378/*
379 * Arenas that are used to service external requests.  Not all elements of the
380 * arenas array are necessarily used; arenas are created lazily as needed.
381 */
382extern arena_t		**arenas;
383extern unsigned		narenas;
384
385#ifdef JEMALLOC_TLS
386extern __thread thread_allocated_t	thread_allocated_tls;
387#    define ALLOCATED_GET() (thread_allocated_tls.allocated)
388#    define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
389#    define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
390#    define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
391#    define ALLOCATED_ADD(a, d) do {					\
392	thread_allocated_tls.allocated += a;				\
393	thread_allocated_tls.deallocated += d;				\
394} while (0)
395#else
396#    define ALLOCATED_GET() (thread_allocated_get()->allocated)
397#    define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
398#    define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
399#    define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
400#    define ALLOCATED_ADD(a, d) do {					\
401	thread_allocated_t *thread_allocated = thread_allocated_get();	\
402	thread_allocated->allocated += (a);				\
403	thread_allocated->deallocated += (d);				\
404} while (0)
405#endif
406extern pthread_key_t	thread_allocated_tsd;
407thread_allocated_t	*thread_allocated_get_hard(void);
408
409arena_t	*arenas_extend(unsigned ind);
410arena_t	*choose_arena_hard(void);
411void	jemalloc_prefork(void);
412void	jemalloc_postfork_parent(void);
413void	jemalloc_postfork_child(void);
414
415#include "jemalloc/internal/util.h"
416#include "jemalloc/internal/atomic.h"
417#include "jemalloc/internal/prng.h"
418#include "jemalloc/internal/ckh.h"
419#include "jemalloc/internal/size_classes.h"
420#include "jemalloc/internal/stats.h"
421#include "jemalloc/internal/ctl.h"
422#include "jemalloc/internal/mutex.h"
423#include "jemalloc/internal/mb.h"
424#include "jemalloc/internal/bitmap.h"
425#include "jemalloc/internal/extent.h"
426#include "jemalloc/internal/arena.h"
427#include "jemalloc/internal/base.h"
428#include "jemalloc/internal/chunk.h"
429#include "jemalloc/internal/huge.h"
430#include "jemalloc/internal/rtree.h"
431#include "jemalloc/internal/tcache.h"
432#include "jemalloc/internal/hash.h"
433#ifdef JEMALLOC_ZONE
434#include "jemalloc/internal/zone.h"
435#endif
436#include "jemalloc/internal/prof.h"
437
438#undef JEMALLOC_H_EXTERNS
439/******************************************************************************/
440#define JEMALLOC_H_INLINES
441
442#include "jemalloc/internal/util.h"
443#include "jemalloc/internal/atomic.h"
444#include "jemalloc/internal/prng.h"
445#include "jemalloc/internal/ckh.h"
446#include "jemalloc/internal/size_classes.h"
447#include "jemalloc/internal/stats.h"
448#include "jemalloc/internal/ctl.h"
449#include "jemalloc/internal/mutex.h"
450#include "jemalloc/internal/mb.h"
451#include "jemalloc/internal/extent.h"
452#include "jemalloc/internal/base.h"
453#include "jemalloc/internal/chunk.h"
454#include "jemalloc/internal/huge.h"
455
456#ifndef JEMALLOC_ENABLE_INLINE
457size_t	s2u(size_t size);
458size_t	sa2u(size_t size, size_t alignment, size_t *run_size_p);
459arena_t	*choose_arena(void);
460thread_allocated_t	*thread_allocated_get(void);
461#endif
462
463#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
464/*
465 * Compute usable size that would result from allocating an object with the
466 * specified size.
467 */
468JEMALLOC_INLINE size_t
469s2u(size_t size)
470{
471
472	if (size <= SMALL_MAXCLASS)
473		return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
474	if (size <= arena_maxclass)
475		return (PAGE_CEILING(size));
476	return (CHUNK_CEILING(size));
477}
478
479/*
480 * Compute usable size that would result from allocating an object with the
481 * specified size and alignment.
482 */
483JEMALLOC_INLINE size_t
484sa2u(size_t size, size_t alignment, size_t *run_size_p)
485{
486	size_t usize;
487
488	/*
489	 * Round size up to the nearest multiple of alignment.
490	 *
491	 * This done, we can take advantage of the fact that for each small
492	 * size class, every object is aligned at the smallest power of two
493	 * that is non-zero in the base two representation of the size.  For
494	 * example:
495	 *
496	 *   Size |   Base 2 | Minimum alignment
497	 *   -----+----------+------------------
498	 *     96 |  1100000 |  32
499	 *    144 | 10100000 |  32
500	 *    192 | 11000000 |  64
501	 */
502	usize = (size + (alignment - 1)) & (-alignment);
503	/*
504	 * (usize < size) protects against the combination of maximal
505	 * alignment and size greater than maximal alignment.
506	 */
507	if (usize < size) {
508		/* size_t overflow. */
509		return (0);
510	}
511
512	if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
513		if (usize <= SMALL_MAXCLASS)
514			return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
515		return (PAGE_CEILING(usize));
516	} else {
517		size_t run_size;
518
519		/*
520		 * We can't achieve subpage alignment, so round up alignment
521		 * permanently; it makes later calculations simpler.
522		 */
523		alignment = PAGE_CEILING(alignment);
524		usize = PAGE_CEILING(size);
525		/*
526		 * (usize < size) protects against very large sizes within
527		 * PAGE_SIZE of SIZE_T_MAX.
528		 *
529		 * (usize + alignment < usize) protects against the
530		 * combination of maximal alignment and usize large enough
531		 * to cause overflow.  This is similar to the first overflow
532		 * check above, but it needs to be repeated due to the new
533		 * usize value, which may now be *equal* to maximal
534		 * alignment, whereas before we only detected overflow if the
535		 * original size was *greater* than maximal alignment.
536		 */
537		if (usize < size || usize + alignment < usize) {
538			/* size_t overflow. */
539			return (0);
540		}
541
542		/*
543		 * Calculate the size of the over-size run that arena_palloc()
544		 * would need to allocate in order to guarantee the alignment.
545		 */
546		if (usize >= alignment)
547			run_size = usize + alignment - PAGE_SIZE;
548		else {
549			/*
550			 * It is possible that (alignment << 1) will cause
551			 * overflow, but it doesn't matter because we also
552			 * subtract PAGE_SIZE, which in the case of overflow
553			 * leaves us with a very large run_size.  That causes
554			 * the first conditional below to fail, which means
555			 * that the bogus run_size value never gets used for
556			 * anything important.
557			 */
558			run_size = (alignment << 1) - PAGE_SIZE;
559		}
560		if (run_size_p != NULL)
561			*run_size_p = run_size;
562
563		if (run_size <= arena_maxclass)
564			return (PAGE_CEILING(usize));
565		return (CHUNK_CEILING(usize));
566	}
567}
568
569/* Choose an arena based on a per-thread value. */
570JEMALLOC_INLINE arena_t *
571choose_arena(void)
572{
573	arena_t *ret;
574
575	ret = ARENA_GET();
576	if (ret == NULL) {
577		ret = choose_arena_hard();
578		assert(ret != NULL);
579	}
580
581	return (ret);
582}
583
584JEMALLOC_INLINE thread_allocated_t *
585thread_allocated_get(void)
586{
587	thread_allocated_t *thread_allocated = (thread_allocated_t *)
588	    pthread_getspecific(thread_allocated_tsd);
589
590	if (thread_allocated == NULL)
591		return (thread_allocated_get_hard());
592	return (thread_allocated);
593}
594#endif
595
596#include "jemalloc/internal/bitmap.h"
597#include "jemalloc/internal/rtree.h"
598#include "jemalloc/internal/tcache.h"
599#include "jemalloc/internal/arena.h"
600#include "jemalloc/internal/hash.h"
601#ifdef JEMALLOC_ZONE
602#include "jemalloc/internal/zone.h"
603#endif
604
605#ifndef JEMALLOC_ENABLE_INLINE
606void	*imalloc(size_t size);
607void	*icalloc(size_t size);
608void	*ipalloc(size_t usize, size_t alignment, bool zero);
609size_t	isalloc(const void *ptr);
610size_t	ivsalloc(const void *ptr);
611void	idalloc(void *ptr);
612void	*iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
613    bool zero, bool no_move);
614#endif
615
616#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
617JEMALLOC_INLINE void *
618imalloc(size_t size)
619{
620
621	assert(size != 0);
622
623	if (size <= arena_maxclass)
624		return (arena_malloc(size, false));
625	else
626		return (huge_malloc(size, false));
627}
628
629JEMALLOC_INLINE void *
630icalloc(size_t size)
631{
632
633	if (size <= arena_maxclass)
634		return (arena_malloc(size, true));
635	else
636		return (huge_malloc(size, true));
637}
638
639JEMALLOC_INLINE void *
640ipalloc(size_t usize, size_t alignment, bool zero)
641{
642	void *ret;
643
644	assert(usize != 0);
645	assert(usize == sa2u(usize, alignment, NULL));
646
647	if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
648		ret = arena_malloc(usize, zero);
649	else {
650		size_t run_size
651#ifdef JEMALLOC_CC_SILENCE
652		    = 0
653#endif
654		    ;
655
656		/*
657		 * Ideally we would only ever call sa2u() once per aligned
658		 * allocation request, and the caller of this function has
659		 * already done so once.  However, it's rather burdensome to
660		 * require every caller to pass in run_size, especially given
661		 * that it's only relevant to large allocations.  Therefore,
662		 * just call it again here in order to get run_size.
663		 */
664		sa2u(usize, alignment, &run_size);
665		if (run_size <= arena_maxclass) {
666			ret = arena_palloc(choose_arena(), usize, run_size,
667			    alignment, zero);
668		} else if (alignment <= chunksize)
669			ret = huge_malloc(usize, zero);
670		else
671			ret = huge_palloc(usize, alignment, zero);
672	}
673
674	assert(((uintptr_t)ret & (alignment - 1)) == 0);
675	return (ret);
676}
677
678JEMALLOC_INLINE size_t
679isalloc(const void *ptr)
680{
681	size_t ret;
682	arena_chunk_t *chunk;
683
684	assert(ptr != NULL);
685
686	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
687	if (chunk != ptr) {
688		/* Region. */
689		if (config_prof)
690			ret = arena_salloc_demote(ptr);
691		else
692			ret = arena_salloc(ptr);
693	} else
694		ret = huge_salloc(ptr);
695
696	return (ret);
697}
698
699JEMALLOC_INLINE size_t
700ivsalloc(const void *ptr)
701{
702
703	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
704	if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
705		return (0);
706
707	return (isalloc(ptr));
708}
709
710JEMALLOC_INLINE void
711idalloc(void *ptr)
712{
713	arena_chunk_t *chunk;
714
715	assert(ptr != NULL);
716
717	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
718	if (chunk != ptr)
719		arena_dalloc(chunk->arena, chunk, ptr);
720	else
721		huge_dalloc(ptr, true);
722}
723
724JEMALLOC_INLINE void *
725iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
726    bool no_move)
727{
728	void *ret;
729	size_t oldsize;
730
731	assert(ptr != NULL);
732	assert(size != 0);
733
734	oldsize = isalloc(ptr);
735
736	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
737	    != 0) {
738		size_t usize, copysize;
739
740		/*
741		 * Existing object alignment is inadquate; allocate new space
742		 * and copy.
743		 */
744		if (no_move)
745			return (NULL);
746		usize = sa2u(size + extra, alignment, NULL);
747		if (usize == 0)
748			return (NULL);
749		ret = ipalloc(usize, alignment, zero);
750		if (ret == NULL) {
751			if (extra == 0)
752				return (NULL);
753			/* Try again, without extra this time. */
754			usize = sa2u(size, alignment, NULL);
755			if (usize == 0)
756				return (NULL);
757			ret = ipalloc(usize, alignment, zero);
758			if (ret == NULL)
759				return (NULL);
760		}
761		/*
762		 * Copy at most size bytes (not size+extra), since the caller
763		 * has no expectation that the extra bytes will be reliably
764		 * preserved.
765		 */
766		copysize = (size < oldsize) ? size : oldsize;
767		memcpy(ret, ptr, copysize);
768		idalloc(ptr);
769		return (ret);
770	}
771
772	if (no_move) {
773		if (size <= arena_maxclass) {
774			return (arena_ralloc_no_move(ptr, oldsize, size,
775			    extra, zero));
776		} else {
777			return (huge_ralloc_no_move(ptr, oldsize, size,
778			    extra));
779		}
780	} else {
781		if (size + extra <= arena_maxclass) {
782			return (arena_ralloc(ptr, oldsize, size, extra,
783			    alignment, zero));
784		} else {
785			return (huge_ralloc(ptr, oldsize, size, extra,
786			    alignment, zero));
787		}
788	}
789}
790#endif
791
792#include "jemalloc/internal/prof.h"
793
794#undef JEMALLOC_H_INLINES
795/******************************************************************************/
796