chunk_mmap.c revision 551ebc43647521bdd0bc78558b106762b3388928
1#define	JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Function prototypes for non-inline static functions. */
6
7static void	*pages_map(void *addr, size_t size);
8static void	pages_unmap(void *addr, size_t size);
9static void	*chunk_alloc_mmap_slow(size_t size, size_t alignment,
10    bool *zero);
11
12/******************************************************************************/
13
14static void *
15pages_map(void *addr, size_t size)
16{
17	void *ret;
18
19	assert(size != 0);
20
21#ifdef _WIN32
22	/*
23	 * If VirtualAlloc can't allocate at the given address when one is
24	 * given, it fails and returns NULL.
25	 */
26	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
27	    PAGE_READWRITE);
28#else
29	/*
30	 * We don't use MAP_FIXED here, because it can cause the *replacement*
31	 * of existing mappings, and we only want to create new mappings.
32	 */
33	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
34	    -1, 0);
35	assert(ret != NULL);
36
37	if (ret == MAP_FAILED)
38		ret = NULL;
39	else if (addr != NULL && ret != addr) {
40		/*
41		 * We succeeded in mapping memory, but not in the right place.
42		 */
43		if (munmap(ret, size) == -1) {
44			char buf[BUFERROR_BUF];
45
46			buferror(get_errno(), buf, sizeof(buf));
47			malloc_printf("<jemalloc: Error in munmap(): %s\n",
48			    buf);
49			if (opt_abort)
50				abort();
51		}
52		ret = NULL;
53	}
54#endif
55	assert(ret == NULL || (addr == NULL && ret != addr)
56	    || (addr != NULL && ret == addr));
57	return (ret);
58}
59
60static void
61pages_unmap(void *addr, size_t size)
62{
63
64#ifdef _WIN32
65	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
66#else
67	if (munmap(addr, size) == -1)
68#endif
69	{
70		char buf[BUFERROR_BUF];
71
72		buferror(get_errno(), buf, sizeof(buf));
73		malloc_printf("<jemalloc>: Error in "
74#ifdef _WIN32
75		              "VirtualFree"
76#else
77		              "munmap"
78#endif
79		              "(): %s\n", buf);
80		if (opt_abort)
81			abort();
82	}
83}
84
85static void *
86pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
87{
88	void *ret = (void *)((uintptr_t)addr + leadsize);
89
90	assert(alloc_size >= leadsize + size);
91#ifdef _WIN32
92	{
93		void *new_addr;
94
95		pages_unmap(addr, alloc_size);
96		new_addr = pages_map(ret, size);
97		if (new_addr == ret)
98			return (ret);
99		if (new_addr)
100			pages_unmap(new_addr, size);
101		return (NULL);
102	}
103#else
104	{
105		size_t trailsize = alloc_size - leadsize - size;
106
107		if (leadsize != 0)
108			pages_unmap(addr, leadsize);
109		if (trailsize != 0)
110			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
111		return (ret);
112	}
113#endif
114}
115
116bool
117pages_purge(void *addr, size_t length)
118{
119	bool unzeroed;
120
121#ifdef _WIN32
122	VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
123	unzeroed = true;
124#elif defined(JEMALLOC_HAVE_MADVISE)
125#  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
126#    define JEMALLOC_MADV_PURGE MADV_DONTNEED
127#    define JEMALLOC_MADV_ZEROS true
128#  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
129#    define JEMALLOC_MADV_PURGE MADV_FREE
130#    define JEMALLOC_MADV_ZEROS false
131#  else
132#    error "No madvise(2) flag defined for purging unused dirty pages."
133#  endif
134	int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
135	unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
136#  undef JEMALLOC_MADV_PURGE
137#  undef JEMALLOC_MADV_ZEROS
138#else
139	/* Last resort no-op. */
140	unzeroed = true;
141#endif
142	return (unzeroed);
143}
144
145static void *
146chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
147{
148	void *ret, *pages;
149	size_t alloc_size, leadsize;
150
151	alloc_size = size + alignment - PAGE;
152	/* Beware size_t wrap-around. */
153	if (alloc_size < size)
154		return (NULL);
155	do {
156		pages = pages_map(NULL, alloc_size);
157		if (pages == NULL)
158			return (NULL);
159		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
160		    (uintptr_t)pages;
161		ret = pages_trim(pages, alloc_size, leadsize, size);
162	} while (ret == NULL);
163
164	assert(ret != NULL);
165	*zero = true;
166	return (ret);
167}
168
169void *
170chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
171{
172	void *ret;
173	size_t offset;
174
175	/*
176	 * Ideally, there would be a way to specify alignment to mmap() (like
177	 * NetBSD has), but in the absence of such a feature, we have to work
178	 * hard to efficiently create aligned mappings.  The reliable, but
179	 * slow method is to create a mapping that is over-sized, then trim the
180	 * excess.  However, that always results in one or two calls to
181	 * pages_unmap().
182	 *
183	 * Optimistically try mapping precisely the right amount before falling
184	 * back to the slow method, with the expectation that the optimistic
185	 * approach works most of the time.
186	 */
187
188	assert(alignment != 0);
189	assert((alignment & chunksize_mask) == 0);
190
191	ret = pages_map(NULL, size);
192	if (ret == NULL)
193		return (NULL);
194	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
195	if (offset != 0) {
196		pages_unmap(ret, size);
197		return (chunk_alloc_mmap_slow(size, alignment, zero));
198	}
199
200	assert(ret != NULL);
201	*zero = true;
202	return (ret);
203}
204
205bool
206chunk_dalloc_mmap(void *chunk, size_t size)
207{
208
209	if (config_munmap)
210		pages_unmap(chunk, size);
211
212	return (!config_munmap);
213}
214