rtree.h revision b980cc774a9ccb208a82f4e9ccdcc695d06a960a
1/*
2 * This radix tree implementation is tailored to the singular purpose of
3 * tracking which chunks are currently owned by jemalloc.  This functionality
4 * is mandatory for OS X, where jemalloc must be able to respond to object
5 * ownership queries.
6 *
7 *******************************************************************************
8 */
9#ifdef JEMALLOC_H_TYPES
10
11typedef struct rtree_s rtree_t;
12
13/*
14 * Size of each radix tree node (must be a power of 2).  This impacts tree
15 * depth.
16 */
17#if (LG_SIZEOF_PTR == 2)
18#  define RTREE_NODESIZE (1U << 14)
19#else
20#  define RTREE_NODESIZE CACHELINE
21#endif
22
23typedef void *(rtree_alloc_t)(size_t);
24typedef void (rtree_dalloc_t)(void *);
25
26#endif /* JEMALLOC_H_TYPES */
27/******************************************************************************/
28#ifdef JEMALLOC_H_STRUCTS
29
30struct rtree_s {
31	rtree_alloc_t	*alloc;
32	rtree_dalloc_t	*dalloc;
33	malloc_mutex_t	mutex;
34	void		**root;
35	unsigned	height;
36	unsigned	level2bits[1]; /* Dynamically sized. */
37};
38
39#endif /* JEMALLOC_H_STRUCTS */
40/******************************************************************************/
41#ifdef JEMALLOC_H_EXTERNS
42
43rtree_t	*rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
44void	rtree_delete(rtree_t *rtree);
45void	rtree_prefork(rtree_t *rtree);
46void	rtree_postfork_parent(rtree_t *rtree);
47void	rtree_postfork_child(rtree_t *rtree);
48
49#endif /* JEMALLOC_H_EXTERNS */
50/******************************************************************************/
51#ifdef JEMALLOC_H_INLINES
52
53#ifndef JEMALLOC_ENABLE_INLINE
54#ifdef JEMALLOC_DEBUG
55void	*rtree_get_locked(rtree_t *rtree, uintptr_t key);
56#endif
57void	*rtree_get(rtree_t *rtree, uintptr_t key);
58bool	rtree_set(rtree_t *rtree, uintptr_t key, void *val);
59#endif
60
61#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
62#define	RTREE_GET_GENERATE(f)						\
63/* The least significant bits of the key are ignored. */		\
64JEMALLOC_INLINE void *							\
65f(rtree_t *rtree, uintptr_t key)					\
66{									\
67	void *ret;							\
68	uintptr_t subkey;						\
69	unsigned i, lshift, height, bits;				\
70	void **node, **child;						\
71									\
72	RTREE_LOCK(&rtree->mutex);					\
73	for (i = lshift = 0, height = rtree->height, node = rtree->root;\
74	    i < height - 1;						\
75	    i++, lshift += bits, node = child) {			\
76		bits = rtree->level2bits[i];				\
77		subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR +	\
78		    3)) - bits);					\
79		child = (void**)node[subkey];				\
80		if (child == NULL) {					\
81			RTREE_UNLOCK(&rtree->mutex);			\
82			return (NULL);					\
83		}							\
84	}								\
85									\
86	/*								\
87	 * node is a leaf, so it contains values rather than node	\
88	 * pointers.							\
89	 */								\
90	bits = rtree->level2bits[i];					\
91	subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -	\
92	    bits);							\
93	ret = node[subkey];						\
94	RTREE_UNLOCK(&rtree->mutex);					\
95									\
96	RTREE_GET_VALIDATE						\
97	return (ret);							\
98}
99
100#ifdef JEMALLOC_DEBUG
101#  define RTREE_LOCK(l)		malloc_mutex_lock(l)
102#  define RTREE_UNLOCK(l)	malloc_mutex_unlock(l)
103#  define RTREE_GET_VALIDATE
104RTREE_GET_GENERATE(rtree_get_locked)
105#  undef RTREE_LOCK
106#  undef RTREE_UNLOCK
107#  undef RTREE_GET_VALIDATE
108#endif
109
110#define	RTREE_LOCK(l)
111#define	RTREE_UNLOCK(l)
112#ifdef JEMALLOC_DEBUG
113   /*
114    * Suppose that it were possible for a jemalloc-allocated chunk to be
115    * munmap()ped, followed by a different allocator in another thread re-using
116    * overlapping virtual memory, all without invalidating the cached rtree
117    * value.  The result would be a false positive (the rtree would claim that
118    * jemalloc owns memory that it had actually discarded).  This scenario
119    * seems impossible, but the following assertion is a prudent sanity check.
120    */
121#  define RTREE_GET_VALIDATE						\
122	assert(rtree_get_locked(rtree, key) == ret);
123#else
124#  define RTREE_GET_VALIDATE
125#endif
126RTREE_GET_GENERATE(rtree_get)
127#undef RTREE_LOCK
128#undef RTREE_UNLOCK
129#undef RTREE_GET_VALIDATE
130
131JEMALLOC_INLINE bool
132rtree_set(rtree_t *rtree, uintptr_t key, void *val)
133{
134	uintptr_t subkey;
135	unsigned i, lshift, height, bits;
136	void **node, **child;
137
138	malloc_mutex_lock(&rtree->mutex);
139	for (i = lshift = 0, height = rtree->height, node = rtree->root;
140	    i < height - 1;
141	    i++, lshift += bits, node = child) {
142		bits = rtree->level2bits[i];
143		subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
144		    bits);
145		child = (void**)node[subkey];
146		if (child == NULL) {
147			child = (void**)rtree->alloc(sizeof(void *) <<
148			    rtree->level2bits[i+1]);
149			if (child == NULL) {
150				malloc_mutex_unlock(&rtree->mutex);
151				return (true);
152			}
153			memset(child, 0, sizeof(void *) <<
154			    rtree->level2bits[i+1]);
155			node[subkey] = child;
156		}
157	}
158
159	/* node is a leaf, so it contains values rather than node pointers. */
160	bits = rtree->level2bits[i];
161	subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
162	node[subkey] = val;
163	malloc_mutex_unlock(&rtree->mutex);
164
165	return (false);
166}
167#endif
168
169#endif /* JEMALLOC_H_INLINES */
170/******************************************************************************/
171