arena.h revision 609ae595f0358157b19311b0f9f9591db7cee705
15821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/******************************************************************************/
25821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifdef JEMALLOC_H_TYPES
35821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
45821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/*
55821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
65821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * as small as possible such that this setting is still honored, without
75821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * violating other constraints.  The goal is to make runs as small as possible
85821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * without exceeding a per run external fragmentation threshold.
95821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
105821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * We use binary fixed point math for overhead computations, where the binary
115821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * point is implicitly RUN_BFP bits to the left.
125821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
135821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
145821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * honored for some/all object sizes, since when heap profiling is enabled
155821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * there is one pointer of header overhead per object (plus a constant).  This
165821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * constraint is relaxed (ignored) for runs that are so small that the
175821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * per-region overhead is greater than:
185821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
195821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *   (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
205821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) */
215821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	RUN_BFP			12
225821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/*                                    \/   Implicit binary fixed point. */
235821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	RUN_MAX_OVRHD		0x0000003dU
245821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	RUN_MAX_OVRHD_RELAX	0x00001800U
255821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
265821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/* Maximum number of regions in one run. */
275821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	LG_RUN_MAXREGS		11
285821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	RUN_MAXREGS		(1U << LG_RUN_MAXREGS)
295821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
305821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/*
315821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * Minimum redzone size.  Redzones may be larger than this if necessary to
325821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * preserve region alignment.
335821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) */
345821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	REDZONE_MINSIZE		16
355821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
365821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/*
375821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * The minimum ratio of active:dirty pages per arena is computed as:
385821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
395821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *   (nactive >> opt_lg_dirty_mult) >= ndirty
405821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
415821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
425821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * times as many active pages as dirty pages.
435821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) */
445821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	LG_DIRTY_MULT_DEFAULT	5
455821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
465821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef struct arena_chunk_map_s arena_chunk_map_t;
475821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef struct arena_chunk_s arena_chunk_t;
485821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef struct arena_run_s arena_run_t;
495821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef struct arena_bin_info_s arena_bin_info_t;
505821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef struct arena_bin_s arena_bin_t;
515821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef struct arena_s arena_t;
525821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
535821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif /* JEMALLOC_H_TYPES */
545821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/******************************************************************************/
555821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifdef JEMALLOC_H_STRUCTS
565821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
575821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/* Each element of the chunk map corresponds to one page within the chunk. */
585821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)struct arena_chunk_map_s {
595821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifndef JEMALLOC_PROF
605821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
615821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Overlay prof_ctx in order to allow it to be referenced by dead code.
625821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Such antics aren't warranted for per arena data structures, but
635821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * chunk map overhead accounts for a percentage of memory, rather than
645821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * being just a fixed cost.
655821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
665821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	union {
675821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif
685821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	union {
695821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		/*
705821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 * Linkage for run trees.  There are two disjoint uses:
715821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 *
725821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 * 1) arena_t's runs_avail_{clean,dirty} trees.
735821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 * 2) arena_run_t conceptually uses this linkage for in-use
745821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 *    non-full runs, rather than directly embedding linkage.
755821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 */
765821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		rb_node(arena_chunk_map_t)	rb_link;
775821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		/*
785821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 * List of runs currently in purgatory.  arena_chunk_purge()
795821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 * temporarily allocates runs that contain dirty pages while
805821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 * purging, so that other threads cannot use the runs while the
815821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 * purging thread is operating without the arena lock held.
825821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		 */
835821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)		ql_elm(arena_chunk_map_t)	ql_link;
845821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	}				u;
855821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
865821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Profile counters, used for large object runs. */
875821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	prof_ctx_t			*prof_ctx;
885821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#ifndef JEMALLOC_PROF
895821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	}; /* union { ... }; */
905821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#endif
915821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
925821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
935821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Run address (or size) and various flags are stored together.  The bit
945821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * layout looks like (assuming 32-bit system):
955821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
965821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *   ???????? ???????? ????nnnn nnnndula
975821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
985821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * ? : Unallocated: Run address for first/last pages, unset for internal
995821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *                  pages.
1005821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     Small: Run page offset.
1015821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     Large: Run size for first page, unset for trailing pages.
1025821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * n : binind for small size class, BININD_INVALID for large size class.
1035821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * d : dirty?
1045821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * u : unzeroed?
1055821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * l : large?
1065821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * a : allocated?
1075821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1085821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Following are example bit patterns for the three types of runs.
1095821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1105821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * p : run page offset
1115821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * s : run size
1125821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * n : binind for size class; large objects set these to BININD_INVALID
1135821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     except for promoted allocations (see prof_promote)
1145821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * x : don't care
1155821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * - : 0
1165821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * + : 1
1175821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * [DULA] : bit set
1185821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * [dula] : bit unset
1195821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1205821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *   Unallocated (clean):
1215821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     ssssssss ssssssss ssss++++ ++++du-a
1225821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
1235821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     ssssssss ssssssss ssss++++ ++++dU-a
1245821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1255821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *   Unallocated (dirty):
1265821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     ssssssss ssssssss ssss++++ ++++D--a
1275821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
1285821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     ssssssss ssssssss ssss++++ ++++D--a
1295821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1305821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *   Small:
1315821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     pppppppp pppppppp ppppnnnn nnnnd--A
1325821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     pppppppp pppppppp ppppnnnn nnnn---A
1335821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     pppppppp pppppppp ppppnnnn nnnnd--A
1345821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1355821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *   Large:
1365821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     ssssssss ssssssss ssss++++ ++++D-LA
1375821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
1385821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     -------- -------- ----++++ ++++D-LA
1395821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1405821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *   Large (sampled, size <= PAGE):
1415821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     ssssssss ssssssss ssssnnnn nnnnD-LA
1425821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *
1435821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *   Large (not sampled, size == PAGE):
1445821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *     ssssssss ssssssss ssss++++ ++++D-LA
1455821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
1465821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	size_t				bits;
1475821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_BININD_SHIFT	4
1485821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	BININD_INVALID		((size_t)0xffU)
1495821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/*     CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
1505821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_BININD_MASK	((size_t)0xff0U)
1515821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
1525821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_FLAGS_MASK	((size_t)0xcU)
1535821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_DIRTY		((size_t)0x8U)
1545821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_UNZEROED	((size_t)0x4U)
1555821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_LARGE		((size_t)0x2U)
1565821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_ALLOCATED	((size_t)0x1U)
1575821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)#define	CHUNK_MAP_KEY		CHUNK_MAP_ALLOCATED
1585821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)};
1595821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
1605821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
1615821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1625821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/* Arena chunk header. */
1635821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)struct arena_chunk_s {
1645821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Arena that owns the chunk. */
1655821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	arena_t		*arena;
1665821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1675821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Linkage for the arena's chunks_dirty list. */
1685821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	ql_elm(arena_chunk_t) link_dirty;
1695821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1705821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
1715821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * True if the chunk is currently in the chunks_dirty list, due to
1725821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * having at some point contained one or more dirty pages.  Removal
1735821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
1745821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
1755821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	bool		dirtied;
1765821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1775821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Number of dirty pages. */
1785821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	size_t		ndirty;
1795821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1805821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
1815821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Map of pages within chunk that keeps track of free/large/small.  The
1825821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * first map_bias entries are omitted, since the chunk header does not
1835821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * need to be tracked in the map.  This omission saves a header page
1845821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * for common chunk sizes (e.g. 4 MiB).
1855821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
1865821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	arena_chunk_map_t map[1]; /* Dynamically sized. */
1875821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)};
1885821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
1895821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1905821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)struct arena_run_s {
1915821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Bin this run is associated with. */
1925821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	arena_bin_t	*bin;
1935821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1945821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Index of next region that has never been allocated, or nregs. */
1955821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	uint32_t	nextind;
1965821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
1975821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Number of free regions in run. */
1985821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	unsigned	nfree;
1995821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)};
2005821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2015821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)/*
2025821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * Read-only information associated with each element of arena_t's bins array
2035821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * is stored separately, partly to reduce memory usage (only one copy, rather
2045821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * than one per arena), but mainly to avoid false cacheline sharing.
2055821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
2065821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * Each run has the following layout:
2075821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
2085821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               /--------------------\
2095821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | arena_run_t header |
2105821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | ...                |
2115821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * bitmap_offset | bitmap             |
2125821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | ...                |
2135821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *   ctx0_offset | ctx map            |
2145821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | ...                |
2155821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               |--------------------|
2165821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | redzone            |
2175821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *   reg0_offset | region 0           |
2185821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | redzone            |
2195821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               |--------------------| \
2205821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | redzone            | |
2215821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | region 1           |  > reg_interval
2225821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | redzone            | /
2235821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               |--------------------|
2245821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | ...                |
2255821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | ...                |
2265821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | ...                |
2275821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               |--------------------|
2285821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | redzone            |
2295821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | region nregs-1     |
2305821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | redzone            |
2315821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               |--------------------|
2325821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               | alignment pad?     |
2335821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *               \--------------------/
2345821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) *
2355821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * reg_interval has at least the same minimum alignment as reg_size; this
2365821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * preserves the alignment constraint that sa2u() depends on.  Alignment pad is
2375821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) * either 0 or redzone_size; it is present only if needed to align reg0_offset.
2385821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles) */
2395821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)struct arena_bin_info_s {
2405821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Size of regions in a run for this bin's size class. */
2415821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	size_t		reg_size;
2425821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2435821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Redzone size. */
2445821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	size_t		redzone_size;
2455821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2465821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Interval between regions (reg_size + (redzone_size << 1)). */
2475821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	size_t		reg_interval;
2485821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2495821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Total size of a run for this bin's size class. */
2505821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	size_t		run_size;
2515821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2525821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Total number of regions in a run for this bin's size class. */
2535821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	uint32_t	nregs;
2545821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2555821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
2565821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Offset of first bitmap_t element in a run header for this bin's size
2575821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * class.
2585821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
2595821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	uint32_t	bitmap_offset;
2605821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2615821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
2625821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Metadata used to manipulate bitmaps for runs associated with this
2635821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * bin.
2645821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
2655821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	bitmap_info_t	bitmap_info;
2665821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2675821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
2685821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Offset of first (prof_ctx_t *) in a run header for this bin's size
2695821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * class, or 0 if (config_prof == false || opt_prof == false).
2705821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
2715821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	uint32_t	ctx0_offset;
2725821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2735821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Offset of first region in a run for this bin's size class. */
2745821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	uint32_t	reg0_offset;
2755821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)};
2765821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2775821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)struct arena_bin_s {
2785821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
2795821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * All operations on runcur, runs, and stats require that lock be
2805821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * locked.  Run allocation/deallocation are protected by the arena lock,
2815821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * which may be acquired while holding one or more bin locks, but not
2825821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * vise versa.
2835821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
2845821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	malloc_mutex_t	lock;
2855821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2865821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
2875821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Current run being used to service allocations of this bin's size
2885821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * class.
2895821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
2905821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	arena_run_t	*runcur;
2915821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
2925821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
2935821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Tree of non-full runs.  This tree is used when looking for an
2945821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * existing run when runcur is no longer usable.  We choose the
2955821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * non-full run that is lowest in memory; this policy tends to keep
2965821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * objects packed well, and it can also help reduce the number of
2975821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * almost-empty chunks.
2985821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
2995821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	arena_run_tree_t runs;
3005821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3015821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* Bin statistics. */
3025821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	malloc_bin_stats_t stats;
3035821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)};
3045821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3055821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)struct arena_s {
3065821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* This arena's index within the arenas array. */
3075821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	unsigned		ind;
3085821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3095821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
3105821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Number of threads currently assigned to this arena.  This field is
3115821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * protected by arenas_lock.
3125821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
3135821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	unsigned		nthreads;
3145821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3155821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
3165821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * There are three classes of arena operations from a locking
3175821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * perspective:
3185821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * 1) Thread asssignment (modifies nthreads) is protected by
3195821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 *    arenas_lock.
3205821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * 2) Bin-related operations are protected by bin locks.
3215821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * 3) Chunk- and run-related operations are protected by this mutex.
3225821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
3235821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	malloc_mutex_t		lock;
3245821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3255821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	arena_stats_t		stats;
3265821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
3275821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * List of tcaches for extant threads associated with this arena.
3285821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * Stats from these are merged incrementally, and at exit.
3295821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 */
3305821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	ql_head(tcache_t)	tcache_ql;
3315821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3325821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	uint64_t		prof_accumbytes;
3335821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3345821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	dss_prec_t		dss_prec;
3355821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3365821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/* List of dirty-page-containing chunks this arena manages. */
3375821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	ql_head(arena_chunk_t)	chunks_dirty;
3385821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)
3395821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	/*
3405821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * In order to avoid rapid chunk allocation/deallocation when an arena
3415821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * oscillates right on the cusp of needing a new chunk, cache the most
3425821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * recently freed chunk.  The spare is left in the arena's chunk trees
3435821806d5e7f356e8fa4b058a389a808ea183019Torne (Richard Coles)	 * until it is deleted.
344	 *
345	 * There is one spare chunk per arena, rather than one spare total, in
346	 * order to avoid interactions between multiple threads that could make
347	 * a single spare inadequate.
348	 */
349	arena_chunk_t		*spare;
350
351	/* Number of pages in active runs. */
352	size_t			nactive;
353
354	/*
355	 * Current count of pages within unused runs that are potentially
356	 * dirty, and for which madvise(... MADV_DONTNEED) has not been called.
357	 * By tracking this, we can institute a limit on how much dirty unused
358	 * memory is mapped for each arena.
359	 */
360	size_t			ndirty;
361
362	/*
363	 * Approximate number of pages being purged.  It is possible for
364	 * multiple threads to purge dirty pages concurrently, and they use
365	 * npurgatory to indicate the total number of pages all threads are
366	 * attempting to purge.
367	 */
368	size_t			npurgatory;
369
370	/*
371	 * Size/address-ordered trees of this arena's available runs.  The trees
372	 * are used for first-best-fit run allocation.  The dirty tree contains
373	 * runs with dirty pages (i.e. very likely to have been touched and
374	 * therefore have associated physical pages), whereas the clean tree
375	 * contains runs with pages that either have no associated physical
376	 * pages, or have pages that the kernel may recycle at any time due to
377	 * previous madvise(2) calls.  The dirty tree is used in preference to
378	 * the clean tree for allocations, because using dirty pages reduces
379	 * the amount of dirty purging necessary to keep the active:dirty page
380	 * ratio below the purge threshold.
381	 */
382	arena_avail_tree_t	runs_avail_clean;
383	arena_avail_tree_t	runs_avail_dirty;
384
385	/* bins is used to store trees of free regions. */
386	arena_bin_t		bins[NBINS];
387};
388
389#endif /* JEMALLOC_H_STRUCTS */
390/******************************************************************************/
391#ifdef JEMALLOC_H_EXTERNS
392
393extern ssize_t	opt_lg_dirty_mult;
394/*
395 * small_size2bin is a compact lookup table that rounds request sizes up to
396 * size classes.  In order to reduce cache footprint, the table is compressed,
397 * and all accesses are via the SMALL_SIZE2BIN macro.
398 */
399extern uint8_t const	small_size2bin[];
400#define	SMALL_SIZE2BIN(s)	(small_size2bin[(s-1) >> LG_TINY_MIN])
401
402extern arena_bin_info_t	arena_bin_info[NBINS];
403
404/* Number of large size classes. */
405#define			nlclasses (chunk_npages - map_bias)
406
407void	arena_purge_all(arena_t *arena);
408void	arena_prof_accum(arena_t *arena, uint64_t accumbytes);
409void	arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
410    size_t binind, uint64_t prof_accumbytes);
411void	arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
412    bool zero);
413void	arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
414void	*arena_malloc_small(arena_t *arena, size_t size, bool zero);
415void	*arena_malloc_large(arena_t *arena, size_t size, bool zero);
416void	*arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
417void	arena_prof_promoted(const void *ptr, size_t size);
418void	arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
419    arena_chunk_map_t *mapelm);
420void	arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
421    size_t pageind, arena_chunk_map_t *mapelm);
422void	arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
423    size_t pageind);
424void	arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
425    void *ptr);
426void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
427void	*arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
428    size_t extra, bool zero);
429void	*arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
430    size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
431    bool try_tcache_dalloc);
432dss_prec_t	arena_dss_prec_get(arena_t *arena);
433void	arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
434void	arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
435    size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
436    malloc_large_stats_t *lstats);
437bool	arena_new(arena_t *arena, unsigned ind);
438void	arena_boot(void);
439void	arena_prefork(arena_t *arena);
440void	arena_postfork_parent(arena_t *arena);
441void	arena_postfork_child(arena_t *arena);
442
443#endif /* JEMALLOC_H_EXTERNS */
444/******************************************************************************/
445#ifdef JEMALLOC_H_INLINES
446
447#ifndef JEMALLOC_ENABLE_INLINE
448arena_chunk_map_t	*arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
449size_t	*arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
450size_t	arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
451size_t	arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
452    size_t pageind);
453size_t	arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
454size_t	arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
455size_t	arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
456size_t	arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
457size_t	arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
458size_t	arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
459size_t	arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
460void	arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
461    size_t size, size_t flags);
462void	arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
463    size_t size);
464void	arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
465    size_t size, size_t flags);
466void	arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
467    size_t binind);
468void	arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
469    size_t runind, size_t binind, size_t flags);
470void	arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
471    size_t unzeroed);
472size_t	arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
473size_t	arena_bin_index(arena_t *arena, arena_bin_t *bin);
474unsigned	arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
475    const void *ptr);
476prof_ctx_t	*arena_prof_ctx_get(const void *ptr);
477void	arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
478void	*arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
479size_t	arena_salloc(const void *ptr, bool demote);
480void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
481    bool try_tcache);
482#endif
483
484#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
485#  ifdef JEMALLOC_ARENA_INLINE_A
486JEMALLOC_INLINE arena_chunk_map_t *
487arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
488{
489
490	assert(pageind >= map_bias);
491	assert(pageind < chunk_npages);
492
493	return (&chunk->map[pageind-map_bias]);
494}
495
496JEMALLOC_INLINE size_t *
497arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
498{
499
500	return (&arena_mapp_get(chunk, pageind)->bits);
501}
502
503JEMALLOC_INLINE size_t
504arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
505{
506
507	return (*arena_mapbitsp_get(chunk, pageind));
508}
509
510JEMALLOC_INLINE size_t
511arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
512{
513	size_t mapbits;
514
515	mapbits = arena_mapbits_get(chunk, pageind);
516	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
517	return (mapbits & ~PAGE_MASK);
518}
519
520JEMALLOC_INLINE size_t
521arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
522{
523	size_t mapbits;
524
525	mapbits = arena_mapbits_get(chunk, pageind);
526	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
527	    (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
528	return (mapbits & ~PAGE_MASK);
529}
530
531JEMALLOC_INLINE size_t
532arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
533{
534	size_t mapbits;
535
536	mapbits = arena_mapbits_get(chunk, pageind);
537	assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
538	    CHUNK_MAP_ALLOCATED);
539	return (mapbits >> LG_PAGE);
540}
541
542JEMALLOC_INLINE size_t
543arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
544{
545	size_t mapbits;
546	size_t binind;
547
548	mapbits = arena_mapbits_get(chunk, pageind);
549	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
550	assert(binind < NBINS || binind == BININD_INVALID);
551	return (binind);
552}
553
554JEMALLOC_INLINE size_t
555arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
556{
557	size_t mapbits;
558
559	mapbits = arena_mapbits_get(chunk, pageind);
560	return (mapbits & CHUNK_MAP_DIRTY);
561}
562
563JEMALLOC_INLINE size_t
564arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
565{
566	size_t mapbits;
567
568	mapbits = arena_mapbits_get(chunk, pageind);
569	return (mapbits & CHUNK_MAP_UNZEROED);
570}
571
572JEMALLOC_INLINE size_t
573arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
574{
575	size_t mapbits;
576
577	mapbits = arena_mapbits_get(chunk, pageind);
578	return (mapbits & CHUNK_MAP_LARGE);
579}
580
581JEMALLOC_INLINE size_t
582arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
583{
584	size_t mapbits;
585
586	mapbits = arena_mapbits_get(chunk, pageind);
587	return (mapbits & CHUNK_MAP_ALLOCATED);
588}
589
590JEMALLOC_INLINE void
591arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
592    size_t flags)
593{
594	size_t *mapbitsp;
595
596	mapbitsp = arena_mapbitsp_get(chunk, pageind);
597	assert((size & PAGE_MASK) == 0);
598	assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
599	assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
600	*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
601}
602
603JEMALLOC_INLINE void
604arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
605    size_t size)
606{
607	size_t *mapbitsp;
608
609	mapbitsp = arena_mapbitsp_get(chunk, pageind);
610	assert((size & PAGE_MASK) == 0);
611	assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
612	*mapbitsp = size | (*mapbitsp & PAGE_MASK);
613}
614
615JEMALLOC_INLINE void
616arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
617    size_t flags)
618{
619	size_t *mapbitsp;
620	size_t unzeroed;
621
622	mapbitsp = arena_mapbitsp_get(chunk, pageind);
623	assert((size & PAGE_MASK) == 0);
624	assert((flags & CHUNK_MAP_DIRTY) == flags);
625	unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
626	*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
627	    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
628}
629
630JEMALLOC_INLINE void
631arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
632    size_t binind)
633{
634	size_t *mapbitsp;
635
636	assert(binind <= BININD_INVALID);
637	mapbitsp = arena_mapbitsp_get(chunk, pageind);
638	assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
639	*mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
640	    CHUNK_MAP_BININD_SHIFT);
641}
642
643JEMALLOC_INLINE void
644arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
645    size_t binind, size_t flags)
646{
647	size_t *mapbitsp;
648	size_t unzeroed;
649
650	assert(binind < BININD_INVALID);
651	mapbitsp = arena_mapbitsp_get(chunk, pageind);
652	assert(pageind - runind >= map_bias);
653	assert((flags & CHUNK_MAP_DIRTY) == flags);
654	unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
655	*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
656	    flags | unzeroed | CHUNK_MAP_ALLOCATED;
657}
658
659JEMALLOC_INLINE void
660arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
661    size_t unzeroed)
662{
663	size_t *mapbitsp;
664
665	mapbitsp = arena_mapbitsp_get(chunk, pageind);
666	*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
667}
668
669JEMALLOC_INLINE size_t
670arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
671{
672	size_t binind;
673
674	binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
675
676	if (config_debug) {
677		arena_chunk_t *chunk;
678		arena_t *arena;
679		size_t pageind;
680		size_t actual_mapbits;
681		arena_run_t *run;
682		arena_bin_t *bin;
683		size_t actual_binind;
684		arena_bin_info_t *bin_info;
685
686		assert(binind != BININD_INVALID);
687		assert(binind < NBINS);
688		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
689		arena = chunk->arena;
690		pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
691		actual_mapbits = arena_mapbits_get(chunk, pageind);
692		assert(mapbits == actual_mapbits);
693		assert(arena_mapbits_large_get(chunk, pageind) == 0);
694		assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
695		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
696		    (actual_mapbits >> LG_PAGE)) << LG_PAGE));
697		bin = run->bin;
698		actual_binind = bin - arena->bins;
699		assert(binind == actual_binind);
700		bin_info = &arena_bin_info[actual_binind];
701		assert(((uintptr_t)ptr - ((uintptr_t)run +
702		    (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
703		    == 0);
704	}
705
706	return (binind);
707}
708#  endif /* JEMALLOC_ARENA_INLINE_A */
709
710#  ifdef JEMALLOC_ARENA_INLINE_B
711JEMALLOC_INLINE size_t
712arena_bin_index(arena_t *arena, arena_bin_t *bin)
713{
714	size_t binind = bin - arena->bins;
715	assert(binind < NBINS);
716	return (binind);
717}
718
719JEMALLOC_INLINE unsigned
720arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
721{
722	unsigned shift, diff, regind;
723	size_t interval;
724
725	/*
726	 * Freeing a pointer lower than region zero can cause assertion
727	 * failure.
728	 */
729	assert((uintptr_t)ptr >= (uintptr_t)run +
730	    (uintptr_t)bin_info->reg0_offset);
731
732	/*
733	 * Avoid doing division with a variable divisor if possible.  Using
734	 * actual division here can reduce allocator throughput by over 20%!
735	 */
736	diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
737	    bin_info->reg0_offset);
738
739	/* Rescale (factor powers of 2 out of the numerator and denominator). */
740	interval = bin_info->reg_interval;
741	shift = ffs(interval) - 1;
742	diff >>= shift;
743	interval >>= shift;
744
745	if (interval == 1) {
746		/* The divisor was a power of 2. */
747		regind = diff;
748	} else {
749		/*
750		 * To divide by a number D that is not a power of two we
751		 * multiply by (2^21 / D) and then right shift by 21 positions.
752		 *
753		 *   X / D
754		 *
755		 * becomes
756		 *
757		 *   (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
758		 *
759		 * We can omit the first three elements, because we never
760		 * divide by 0, and 1 and 2 are both powers of two, which are
761		 * handled above.
762		 */
763#define	SIZE_INV_SHIFT	((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
764#define	SIZE_INV(s)	(((1U << SIZE_INV_SHIFT) / (s)) + 1)
765		static const unsigned interval_invs[] = {
766		    SIZE_INV(3),
767		    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
768		    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
769		    SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
770		    SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
771		    SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
772		    SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
773		    SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
774		};
775
776		if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
777		    2)) {
778			regind = (diff * interval_invs[interval - 3]) >>
779			    SIZE_INV_SHIFT;
780		} else
781			regind = diff / interval;
782#undef SIZE_INV
783#undef SIZE_INV_SHIFT
784	}
785	assert(diff == regind * interval);
786	assert(regind < bin_info->nregs);
787
788	return (regind);
789}
790
791JEMALLOC_INLINE prof_ctx_t *
792arena_prof_ctx_get(const void *ptr)
793{
794	prof_ctx_t *ret;
795	arena_chunk_t *chunk;
796	size_t pageind, mapbits;
797
798	cassert(config_prof);
799	assert(ptr != NULL);
800	assert(CHUNK_ADDR2BASE(ptr) != ptr);
801
802	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
803	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
804	mapbits = arena_mapbits_get(chunk, pageind);
805	assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
806	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
807		if (prof_promote)
808			ret = (prof_ctx_t *)(uintptr_t)1U;
809		else {
810			arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
811			    (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
812			    LG_PAGE));
813			size_t binind = arena_ptr_small_binind_get(ptr,
814			    mapbits);
815			arena_bin_info_t *bin_info = &arena_bin_info[binind];
816			unsigned regind;
817
818			regind = arena_run_regind(run, bin_info, ptr);
819			ret = *(prof_ctx_t **)((uintptr_t)run +
820			    bin_info->ctx0_offset + (regind *
821			    sizeof(prof_ctx_t *)));
822		}
823	} else
824		ret = arena_mapp_get(chunk, pageind)->prof_ctx;
825
826	return (ret);
827}
828
829JEMALLOC_INLINE void
830arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
831{
832	arena_chunk_t *chunk;
833	size_t pageind, mapbits;
834
835	cassert(config_prof);
836	assert(ptr != NULL);
837	assert(CHUNK_ADDR2BASE(ptr) != ptr);
838
839	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
840	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
841	mapbits = arena_mapbits_get(chunk, pageind);
842	assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
843	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
844		if (prof_promote == false) {
845			arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
846			    (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
847			    LG_PAGE));
848			size_t binind;
849			arena_bin_info_t *bin_info;
850			unsigned regind;
851
852			binind = arena_ptr_small_binind_get(ptr, mapbits);
853			bin_info = &arena_bin_info[binind];
854			regind = arena_run_regind(run, bin_info, ptr);
855
856			*((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
857			    + (regind * sizeof(prof_ctx_t *)))) = ctx;
858		} else
859			assert((uintptr_t)ctx == (uintptr_t)1U);
860	} else
861		arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
862}
863
864JEMALLOC_INLINE void *
865arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
866{
867	tcache_t *tcache;
868
869	assert(size != 0);
870	assert(size <= arena_maxclass);
871
872	if (size <= SMALL_MAXCLASS) {
873		if (try_tcache && (tcache = tcache_get(true)) != NULL)
874			return (tcache_alloc_small(tcache, size, zero));
875		else {
876			return (arena_malloc_small(choose_arena(arena), size,
877			    zero));
878		}
879	} else {
880		/*
881		 * Initialize tcache after checking size in order to avoid
882		 * infinite recursion during tcache initialization.
883		 */
884		if (try_tcache && size <= tcache_maxclass && (tcache =
885		    tcache_get(true)) != NULL)
886			return (tcache_alloc_large(tcache, size, zero));
887		else {
888			return (arena_malloc_large(choose_arena(arena), size,
889			    zero));
890		}
891	}
892}
893
894/* Return the size of the allocation pointed to by ptr. */
895JEMALLOC_INLINE size_t
896arena_salloc(const void *ptr, bool demote)
897{
898	size_t ret;
899	arena_chunk_t *chunk;
900	size_t pageind, binind;
901
902	assert(ptr != NULL);
903	assert(CHUNK_ADDR2BASE(ptr) != ptr);
904
905	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
906	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
907	assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
908	binind = arena_mapbits_binind_get(chunk, pageind);
909	if (binind == BININD_INVALID || (config_prof && demote == false &&
910	    prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) {
911		/*
912		 * Large allocation.  In the common case (demote == true), and
913		 * as this is an inline function, most callers will only end up
914		 * looking at binind to determine that ptr is a small
915		 * allocation.
916		 */
917		assert(((uintptr_t)ptr & PAGE_MASK) == 0);
918		ret = arena_mapbits_large_size_get(chunk, pageind);
919		assert(ret != 0);
920		assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
921		assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
922		    pageind+(ret>>LG_PAGE)-1) == 0);
923		assert(binind == arena_mapbits_binind_get(chunk,
924		    pageind+(ret>>LG_PAGE)-1));
925		assert(arena_mapbits_dirty_get(chunk, pageind) ==
926		    arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
927	} else {
928		/*
929		 * Small allocation (possibly promoted to a large object due to
930		 * prof_promote).
931		 */
932		assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
933		    arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
934		    pageind)) == binind);
935		ret = arena_bin_info[binind].reg_size;
936	}
937
938	return (ret);
939}
940
941JEMALLOC_INLINE void
942arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
943{
944	size_t pageind, mapbits;
945	tcache_t *tcache;
946
947	assert(arena != NULL);
948	assert(chunk->arena == arena);
949	assert(ptr != NULL);
950	assert(CHUNK_ADDR2BASE(ptr) != ptr);
951
952	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
953	mapbits = arena_mapbits_get(chunk, pageind);
954	assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
955	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
956		/* Small allocation. */
957		if (try_tcache && (tcache = tcache_get(false)) != NULL) {
958			size_t binind;
959
960			binind = arena_ptr_small_binind_get(ptr, mapbits);
961			tcache_dalloc_small(tcache, ptr, binind);
962		} else
963			arena_dalloc_small(arena, chunk, ptr, pageind);
964	} else {
965		size_t size = arena_mapbits_large_size_get(chunk, pageind);
966
967		assert(((uintptr_t)ptr & PAGE_MASK) == 0);
968
969		if (try_tcache && size <= tcache_maxclass && (tcache =
970		    tcache_get(false)) != NULL) {
971			tcache_dalloc_large(tcache, ptr, size);
972		} else
973			arena_dalloc_large(arena, chunk, ptr);
974	}
975}
976#  endif /* JEMALLOC_ARENA_INLINE_B */
977#endif
978
979#endif /* JEMALLOC_H_INLINES */
980/******************************************************************************/
981