1#define	JEMALLOC_CHUNK_DSS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3/******************************************************************************/
4/* Data. */
5
6const char	*dss_prec_names[] = {
7	"disabled",
8	"primary",
9	"secondary",
10	"N/A"
11};
12
13/* Current dss precedence default, used when creating new arenas. */
14static dss_prec_t	dss_prec_default = DSS_PREC_DEFAULT;
15
16/*
17 * Protects sbrk() calls.  This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
19 */
20static malloc_mutex_t	dss_mtx;
21
22/* Base address of the DSS. */
23static void		*dss_base;
24/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
25static void		*dss_prev;
26/* Current upper limit on DSS addresses. */
27static void		*dss_max;
28
29/******************************************************************************/
30
31static void *
32chunk_dss_sbrk(intptr_t increment)
33{
34
35#ifdef JEMALLOC_DSS
36	return (sbrk(increment));
37#else
38	not_implemented();
39	return (NULL);
40#endif
41}
42
43dss_prec_t
44chunk_dss_prec_get(void)
45{
46	dss_prec_t ret;
47
48	if (have_dss == false)
49		return (dss_prec_disabled);
50	malloc_mutex_lock(&dss_mtx);
51	ret = dss_prec_default;
52	malloc_mutex_unlock(&dss_mtx);
53	return (ret);
54}
55
56bool
57chunk_dss_prec_set(dss_prec_t dss_prec)
58{
59
60	if (have_dss == false)
61		return (dss_prec != dss_prec_disabled);
62	malloc_mutex_lock(&dss_mtx);
63	dss_prec_default = dss_prec;
64	malloc_mutex_unlock(&dss_mtx);
65	return (false);
66}
67
68void *
69chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
70{
71	void *ret;
72
73	cassert(have_dss);
74	assert(size > 0 && (size & chunksize_mask) == 0);
75	assert(alignment > 0 && (alignment & chunksize_mask) == 0);
76
77	/*
78	 * sbrk() uses a signed increment argument, so take care not to
79	 * interpret a huge allocation request as a negative increment.
80	 */
81	if ((intptr_t)size < 0)
82		return (NULL);
83
84	malloc_mutex_lock(&dss_mtx);
85	if (dss_prev != (void *)-1) {
86		size_t gap_size, cpad_size;
87		void *cpad, *dss_next;
88		intptr_t incr;
89
90		/*
91		 * The loop is necessary to recover from races with other
92		 * threads that are using the DSS for something other than
93		 * malloc.
94		 */
95		do {
96			/* Get the current end of the DSS. */
97			dss_max = chunk_dss_sbrk(0);
98			/*
99			 * Calculate how much padding is necessary to
100			 * chunk-align the end of the DSS.
101			 */
102			gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
103			    chunksize_mask;
104			/*
105			 * Compute how much chunk-aligned pad space (if any) is
106			 * necessary to satisfy alignment.  This space can be
107			 * recycled for later use.
108			 */
109			cpad = (void *)((uintptr_t)dss_max + gap_size);
110			ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
111			    alignment);
112			cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
113			dss_next = (void *)((uintptr_t)ret + size);
114			if ((uintptr_t)ret < (uintptr_t)dss_max ||
115			    (uintptr_t)dss_next < (uintptr_t)dss_max) {
116				/* Wrap-around. */
117				malloc_mutex_unlock(&dss_mtx);
118				return (NULL);
119			}
120			incr = gap_size + cpad_size + size;
121			dss_prev = chunk_dss_sbrk(incr);
122			if (dss_prev == dss_max) {
123				/* Success. */
124				dss_max = dss_next;
125				malloc_mutex_unlock(&dss_mtx);
126				if (cpad_size != 0)
127					chunk_unmap(cpad, cpad_size);
128				if (*zero) {
129					JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
130					    ret, size);
131					memset(ret, 0, size);
132				}
133				return (ret);
134			}
135		} while (dss_prev != (void *)-1);
136	}
137	malloc_mutex_unlock(&dss_mtx);
138
139	return (NULL);
140}
141
142bool
143chunk_in_dss(void *chunk)
144{
145	bool ret;
146
147	cassert(have_dss);
148
149	malloc_mutex_lock(&dss_mtx);
150	if ((uintptr_t)chunk >= (uintptr_t)dss_base
151	    && (uintptr_t)chunk < (uintptr_t)dss_max)
152		ret = true;
153	else
154		ret = false;
155	malloc_mutex_unlock(&dss_mtx);
156
157	return (ret);
158}
159
160bool
161chunk_dss_boot(void)
162{
163
164	cassert(have_dss);
165
166	if (malloc_mutex_init(&dss_mtx))
167		return (true);
168	dss_base = chunk_dss_sbrk(0);
169	dss_prev = dss_base;
170	dss_max = dss_base;
171
172	return (false);
173}
174
175void
176chunk_dss_prefork(void)
177{
178
179	if (have_dss)
180		malloc_mutex_prefork(&dss_mtx);
181}
182
183void
184chunk_dss_postfork_parent(void)
185{
186
187	if (have_dss)
188		malloc_mutex_postfork_parent(&dss_mtx);
189}
190
191void
192chunk_dss_postfork_child(void)
193{
194
195	if (have_dss)
196		malloc_mutex_postfork_child(&dss_mtx);
197}
198
199/******************************************************************************/
200