1/*
2 * This radix tree implementation is tailored to the singular purpose of
3 * tracking which chunks are currently owned by jemalloc.  This functionality
4 * is mandatory for OS X, where jemalloc must be able to respond to object
5 * ownership queries.
6 *
7 *******************************************************************************
8 */
9#ifdef JEMALLOC_H_TYPES
10
11typedef struct rtree_s rtree_t;
12
13/*
14 * Size of each radix tree node (must be a power of 2).  This impacts tree
15 * depth.
16 */
17#define	RTREE_NODESIZE (1U << 16)
18
19typedef void *(rtree_alloc_t)(size_t);
20typedef void (rtree_dalloc_t)(void *);
21
22#endif /* JEMALLOC_H_TYPES */
23/******************************************************************************/
24#ifdef JEMALLOC_H_STRUCTS
25
26struct rtree_s {
27	rtree_alloc_t	*alloc;
28	rtree_dalloc_t	*dalloc;
29	malloc_mutex_t	mutex;
30	void		**root;
31	unsigned	height;
32	unsigned	level2bits[1]; /* Dynamically sized. */
33};
34
35#endif /* JEMALLOC_H_STRUCTS */
36/******************************************************************************/
37#ifdef JEMALLOC_H_EXTERNS
38
39rtree_t	*rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
40void	rtree_delete(rtree_t *rtree);
41void	rtree_prefork(rtree_t *rtree);
42void	rtree_postfork_parent(rtree_t *rtree);
43void	rtree_postfork_child(rtree_t *rtree);
44
45#endif /* JEMALLOC_H_EXTERNS */
46/******************************************************************************/
47#ifdef JEMALLOC_H_INLINES
48
49#ifndef JEMALLOC_ENABLE_INLINE
50#ifdef JEMALLOC_DEBUG
51uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
52#endif
53uint8_t	rtree_get(rtree_t *rtree, uintptr_t key);
54bool	rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
55#endif
56
57#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
58#define	RTREE_GET_GENERATE(f)						\
59/* The least significant bits of the key are ignored. */		\
60JEMALLOC_INLINE uint8_t							\
61f(rtree_t *rtree, uintptr_t key)					\
62{									\
63	uint8_t ret;							\
64	uintptr_t subkey;						\
65	unsigned i, lshift, height, bits;				\
66	void **node, **child;						\
67									\
68	RTREE_LOCK(&rtree->mutex);					\
69	for (i = lshift = 0, height = rtree->height, node = rtree->root;\
70	    i < height - 1;						\
71	    i++, lshift += bits, node = child) {			\
72		bits = rtree->level2bits[i];				\
73		subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR +	\
74		    3)) - bits);					\
75		child = (void**)node[subkey];				\
76		if (child == NULL) {					\
77			RTREE_UNLOCK(&rtree->mutex);			\
78			return (0);					\
79		}							\
80	}								\
81									\
82	/*								\
83	 * node is a leaf, so it contains values rather than node	\
84	 * pointers.							\
85	 */								\
86	bits = rtree->level2bits[i];					\
87	subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -	\
88	    bits);							\
89	{								\
90		uint8_t *leaf = (uint8_t *)node;			\
91		ret = leaf[subkey];					\
92	}								\
93	RTREE_UNLOCK(&rtree->mutex);					\
94									\
95	RTREE_GET_VALIDATE						\
96	return (ret);							\
97}
98
99#ifdef JEMALLOC_DEBUG
100#  define RTREE_LOCK(l)		malloc_mutex_lock(l)
101#  define RTREE_UNLOCK(l)	malloc_mutex_unlock(l)
102#  define RTREE_GET_VALIDATE
103RTREE_GET_GENERATE(rtree_get_locked)
104#  undef RTREE_LOCK
105#  undef RTREE_UNLOCK
106#  undef RTREE_GET_VALIDATE
107#endif
108
109#define	RTREE_LOCK(l)
110#define	RTREE_UNLOCK(l)
111#ifdef JEMALLOC_DEBUG
112   /*
113    * Suppose that it were possible for a jemalloc-allocated chunk to be
114    * munmap()ped, followed by a different allocator in another thread re-using
115    * overlapping virtual memory, all without invalidating the cached rtree
116    * value.  The result would be a false positive (the rtree would claim that
117    * jemalloc owns memory that it had actually discarded).  This scenario
118    * seems impossible, but the following assertion is a prudent sanity check.
119    */
120#  define RTREE_GET_VALIDATE						\
121	assert(rtree_get_locked(rtree, key) == ret);
122#else
123#  define RTREE_GET_VALIDATE
124#endif
125RTREE_GET_GENERATE(rtree_get)
126#undef RTREE_LOCK
127#undef RTREE_UNLOCK
128#undef RTREE_GET_VALIDATE
129
130JEMALLOC_INLINE bool
131rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
132{
133	uintptr_t subkey;
134	unsigned i, lshift, height, bits;
135	void **node, **child;
136
137	malloc_mutex_lock(&rtree->mutex);
138	for (i = lshift = 0, height = rtree->height, node = rtree->root;
139	    i < height - 1;
140	    i++, lshift += bits, node = child) {
141		bits = rtree->level2bits[i];
142		subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
143		    bits);
144		child = (void**)node[subkey];
145		if (child == NULL) {
146			size_t size = ((i + 1 < height - 1) ? sizeof(void *)
147			    : (sizeof(uint8_t))) << rtree->level2bits[i+1];
148			child = (void**)rtree->alloc(size);
149			if (child == NULL) {
150				malloc_mutex_unlock(&rtree->mutex);
151				return (true);
152			}
153			memset(child, 0, size);
154			node[subkey] = child;
155		}
156	}
157
158	/* node is a leaf, so it contains values rather than node pointers. */
159	bits = rtree->level2bits[i];
160	subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
161	{
162		uint8_t *leaf = (uint8_t *)node;
163		leaf[subkey] = val;
164	}
165	malloc_mutex_unlock(&rtree->mutex);
166
167	return (false);
168}
169#endif
170
171#endif /* JEMALLOC_H_INLINES */
172/******************************************************************************/
173