chunk_mmap.c revision a19e87fbad020e8dd3d26682032929e8e5ae71c1
1#define	JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Function prototypes for non-inline static functions. */
6
7static void	*pages_map(void *addr, size_t size);
8static void	pages_unmap(void *addr, size_t size);
9static void	*chunk_alloc_mmap_slow(size_t size, size_t alignment,
10    bool *zero);
11
12/******************************************************************************/
13
14static void *
15pages_map(void *addr, size_t size)
16{
17	void *ret;
18
19#ifdef _WIN32
20	/*
21	 * If VirtualAlloc can't allocate at the given address when one is
22	 * given, it fails and returns NULL.
23	 */
24	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
25	    PAGE_READWRITE);
26#else
27	/*
28	 * We don't use MAP_FIXED here, because it can cause the *replacement*
29	 * of existing mappings, and we only want to create new mappings.
30	 */
31	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
32	    -1, 0);
33	assert(ret != NULL);
34
35	if (ret == MAP_FAILED)
36		ret = NULL;
37	else if (addr != NULL && ret != addr) {
38		/*
39		 * We succeeded in mapping memory, but not in the right place.
40		 */
41		if (munmap(ret, size) == -1) {
42			char buf[BUFERROR_BUF];
43
44			buferror(errno, buf, sizeof(buf));
45			malloc_printf("<jemalloc: Error in munmap(): %s\n",
46			    buf);
47			if (opt_abort)
48				abort();
49		}
50		ret = NULL;
51	}
52#endif
53	assert(ret == NULL || (addr == NULL && ret != addr)
54	    || (addr != NULL && ret == addr));
55	return (ret);
56}
57
58static void
59pages_unmap(void *addr, size_t size)
60{
61
62#ifdef _WIN32
63	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
64#else
65	if (munmap(addr, size) == -1)
66#endif
67	{
68		char buf[BUFERROR_BUF];
69
70		buferror(errno, buf, sizeof(buf));
71		malloc_printf("<jemalloc>: Error in "
72#ifdef _WIN32
73		              "VirtualFree"
74#else
75		              "munmap"
76#endif
77		              "(): %s\n", buf);
78		if (opt_abort)
79			abort();
80	}
81}
82
83static void *
84pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
85{
86	void *ret = (void *)((uintptr_t)addr + leadsize);
87
88	assert(alloc_size >= leadsize + size);
89#ifdef _WIN32
90	{
91		void *new_addr;
92
93		pages_unmap(addr, alloc_size);
94		new_addr = pages_map(ret, size);
95		if (new_addr == ret)
96			return (ret);
97		if (new_addr)
98			pages_unmap(new_addr, size);
99		return (NULL);
100	}
101#else
102	{
103		size_t trailsize = alloc_size - leadsize - size;
104
105		if (leadsize != 0)
106			pages_unmap(addr, leadsize);
107		if (trailsize != 0)
108			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
109		return (ret);
110	}
111#endif
112}
113
114void
115pages_purge(void *addr, size_t length)
116{
117
118#ifdef _WIN32
119	VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
120#else
121#  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
122#    define JEMALLOC_MADV_PURGE MADV_DONTNEED
123#  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
124#    define JEMALLOC_MADV_PURGE MADV_FREE
125#  else
126#    error "No method defined for purging unused dirty pages."
127#  endif
128	madvise(addr, length, JEMALLOC_MADV_PURGE);
129#endif
130}
131
132static void *
133chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
134{
135	void *ret, *pages;
136	size_t alloc_size, leadsize;
137
138	alloc_size = size + alignment - PAGE;
139	/* Beware size_t wrap-around. */
140	if (alloc_size < size)
141		return (NULL);
142	do {
143		pages = pages_map(NULL, alloc_size);
144		if (pages == NULL)
145			return (NULL);
146		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
147		    (uintptr_t)pages;
148		ret = pages_trim(pages, alloc_size, leadsize, size);
149	} while (ret == NULL);
150
151	assert(ret != NULL);
152	*zero = true;
153	return (ret);
154}
155
156void *
157chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
158{
159	void *ret;
160	size_t offset;
161
162	/*
163	 * Ideally, there would be a way to specify alignment to mmap() (like
164	 * NetBSD has), but in the absence of such a feature, we have to work
165	 * hard to efficiently create aligned mappings.  The reliable, but
166	 * slow method is to create a mapping that is over-sized, then trim the
167	 * excess.  However, that always results in at least one call to
168	 * pages_unmap().
169	 *
170	 * A more optimistic approach is to try mapping precisely the right
171	 * amount, then try to append another mapping if alignment is off.  In
172	 * practice, this works out well as long as the application is not
173	 * interleaving mappings via direct mmap() calls.  If we do run into a
174	 * situation where there is an interleaved mapping and we are unable to
175	 * extend an unaligned mapping, our best option is to switch to the
176	 * slow method until mmap() returns another aligned mapping.  This will
177	 * tend to leave a gap in the memory map that is too small to cause
178	 * later problems for the optimistic method.
179	 *
180	 * Another possible confounding factor is address space layout
181	 * randomization (ASLR), which causes mmap(2) to disregard the
182	 * requested address.  As such, repeatedly trying to extend unaligned
183	 * mappings could result in an infinite loop, so if extension fails,
184	 * immediately fall back to the reliable method of over-allocation
185	 * followed by trimming.
186	 */
187
188	ret = pages_map(NULL, size);
189	if (ret == NULL)
190		return (NULL);
191
192	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
193	if (offset != 0) {
194#ifdef _WIN32
195		return (chunk_alloc_mmap_slow(size, alignment, zero));
196#else
197		/* Try to extend chunk boundary. */
198		if (pages_map((void *)((uintptr_t)ret + size), chunksize -
199		    offset) == NULL) {
200			/*
201			 * Extension failed.  Clean up, then fall back to the
202			 * reliable-but-expensive method.
203			 */
204			pages_unmap(ret, size);
205			return (chunk_alloc_mmap_slow(size, alignment, zero));
206		} else {
207			/* Clean up unneeded leading space. */
208			pages_unmap(ret, chunksize - offset);
209			ret = (void *)((uintptr_t)ret + (chunksize - offset));
210		}
211#endif
212	}
213
214	assert(ret != NULL);
215	*zero = true;
216	return (ret);
217}
218
219bool
220chunk_dealloc_mmap(void *chunk, size_t size)
221{
222
223	if (config_munmap)
224		pages_unmap(chunk, size);
225
226	return (config_munmap == false);
227}
228