chunk_mmap.c revision cd9a1346e96f71bdecdc654ea50fc62d76371e74
1#define	JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7/*
8 * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
9 * potentially avoid some system calls.
10 */
11malloc_tsd_data(static, mmap_unaligned, bool, false)
12malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
13    malloc_tsd_no_cleanup)
14
15/******************************************************************************/
16/* Function prototypes for non-inline static functions. */
17
18static void	*pages_map(void *addr, size_t size, bool noreserve);
19static void	pages_unmap(void *addr, size_t size);
20static void	*chunk_alloc_mmap_slow(size_t size, bool unaligned,
21    bool noreserve);
22static void	*chunk_alloc_mmap_internal(size_t size, bool noreserve);
23
24/******************************************************************************/
25
26static void *
27pages_map(void *addr, size_t size, bool noreserve)
28{
29	void *ret;
30
31	/*
32	 * We don't use MAP_FIXED here, because it can cause the *replacement*
33	 * of existing mappings, and we only want to create new mappings.
34	 */
35	int flags = MAP_PRIVATE | MAP_ANON;
36#ifdef MAP_NORESERVE
37	if (noreserve)
38		flags |= MAP_NORESERVE;
39#endif
40	ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
41	assert(ret != NULL);
42
43	if (ret == MAP_FAILED)
44		ret = NULL;
45	else if (addr != NULL && ret != addr) {
46		/*
47		 * We succeeded in mapping memory, but not in the right place.
48		 */
49		if (munmap(ret, size) == -1) {
50			char buf[BUFERROR_BUF];
51
52			buferror(errno, buf, sizeof(buf));
53			malloc_printf("<jemalloc: Error in munmap(): %s\n",
54			    buf);
55			if (opt_abort)
56				abort();
57		}
58		ret = NULL;
59	}
60
61	assert(ret == NULL || (addr == NULL && ret != addr)
62	    || (addr != NULL && ret == addr));
63	return (ret);
64}
65
66static void
67pages_unmap(void *addr, size_t size)
68{
69
70	if (munmap(addr, size) == -1) {
71		char buf[BUFERROR_BUF];
72
73		buferror(errno, buf, sizeof(buf));
74		malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
75		if (opt_abort)
76			abort();
77	}
78}
79
80static void *
81chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
82{
83	void *ret;
84	size_t offset;
85
86	/* Beware size_t wrap-around. */
87	if (size + chunksize <= size)
88		return (NULL);
89
90	ret = pages_map(NULL, size + chunksize, noreserve);
91	if (ret == NULL)
92		return (NULL);
93
94	/* Clean up unneeded leading/trailing space. */
95	offset = CHUNK_ADDR2OFFSET(ret);
96	if (offset != 0) {
97		/* Note that mmap() returned an unaligned mapping. */
98		unaligned = true;
99
100		/* Leading space. */
101		pages_unmap(ret, chunksize - offset);
102
103		ret = (void *)((uintptr_t)ret +
104		    (chunksize - offset));
105
106		/* Trailing space. */
107		pages_unmap((void *)((uintptr_t)ret + size),
108		    offset);
109	} else {
110		/* Trailing space only. */
111		pages_unmap((void *)((uintptr_t)ret + size),
112		    chunksize);
113	}
114
115	/*
116	 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
117	 * the next chunk_alloc_mmap() execution tries the fast allocation
118	 * method.
119	 */
120	if (unaligned == false && mmap_unaligned_booted) {
121		bool mu = false;
122		mmap_unaligned_tsd_set(&mu);
123	}
124
125	return (ret);
126}
127
128static void *
129chunk_alloc_mmap_internal(size_t size, bool noreserve)
130{
131	void *ret;
132
133	/*
134	 * Ideally, there would be a way to specify alignment to mmap() (like
135	 * NetBSD has), but in the absence of such a feature, we have to work
136	 * hard to efficiently create aligned mappings.  The reliable, but
137	 * slow method is to create a mapping that is over-sized, then trim the
138	 * excess.  However, that always results in at least one call to
139	 * pages_unmap().
140	 *
141	 * A more optimistic approach is to try mapping precisely the right
142	 * amount, then try to append another mapping if alignment is off.  In
143	 * practice, this works out well as long as the application is not
144	 * interleaving mappings via direct mmap() calls.  If we do run into a
145	 * situation where there is an interleaved mapping and we are unable to
146	 * extend an unaligned mapping, our best option is to switch to the
147	 * slow method until mmap() returns another aligned mapping.  This will
148	 * tend to leave a gap in the memory map that is too small to cause
149	 * later problems for the optimistic method.
150	 *
151	 * Another possible confounding factor is address space layout
152	 * randomization (ASLR), which causes mmap(2) to disregard the
153	 * requested address.  mmap_unaligned tracks whether the previous
154	 * chunk_alloc_mmap() execution received any unaligned or relocated
155	 * mappings, and if so, the current execution will immediately fall
156	 * back to the slow method.  However, we keep track of whether the fast
157	 * method would have succeeded, and if so, we make a note to try the
158	 * fast method next time.
159	 */
160
161	if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
162		size_t offset;
163
164		ret = pages_map(NULL, size, noreserve);
165		if (ret == NULL)
166			return (NULL);
167
168		offset = CHUNK_ADDR2OFFSET(ret);
169		if (offset != 0) {
170			bool mu = true;
171			mmap_unaligned_tsd_set(&mu);
172			/* Try to extend chunk boundary. */
173			if (pages_map((void *)((uintptr_t)ret + size),
174			    chunksize - offset, noreserve) == NULL) {
175				/*
176				 * Extension failed.  Clean up, then revert to
177				 * the reliable-but-expensive method.
178				 */
179				pages_unmap(ret, size);
180				ret = chunk_alloc_mmap_slow(size, true,
181				    noreserve);
182			} else {
183				/* Clean up unneeded leading space. */
184				pages_unmap(ret, chunksize - offset);
185				ret = (void *)((uintptr_t)ret + (chunksize -
186				    offset));
187			}
188		}
189	} else
190		ret = chunk_alloc_mmap_slow(size, false, noreserve);
191
192	return (ret);
193}
194
195void *
196chunk_alloc_mmap(size_t size)
197{
198
199	return (chunk_alloc_mmap_internal(size, false));
200}
201
202void *
203chunk_alloc_mmap_noreserve(size_t size)
204{
205
206	return (chunk_alloc_mmap_internal(size, true));
207}
208
209void
210chunk_dealloc_mmap(void *chunk, size_t size)
211{
212
213	pages_unmap(chunk, size);
214}
215
216bool
217chunk_mmap_boot(void)
218{
219
220	/*
221	 * XXX For the non-TLS implementation of tsd, the first access from
222	 * each thread causes memory allocation.  The result is a bootstrapping
223	 * problem for this particular use case, so for now just disable it by
224	 * leaving it in an unbooted state.
225	 */
226#ifdef JEMALLOC_TLS
227	if (mmap_unaligned_tsd_boot())
228		return (true);
229#endif
230
231	return (false);
232}
233