chunk_mmap.c revision ef0a0cc3283ea561a40b33f4325d54bbc351de21
1#define	JEMALLOC_CHUNK_MMAP_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Function prototypes for non-inline static functions. */
6
7static void	*pages_map(void *addr, size_t size);
8static void	pages_unmap(void *addr, size_t size);
9static void	*chunk_alloc_mmap_slow(size_t size, size_t alignment,
10    bool *zero);
11
12/******************************************************************************/
13
14static void *
15pages_map(void *addr, size_t size)
16{
17	void *ret;
18
19	assert(size != 0);
20
21#ifdef _WIN32
22	/*
23	 * If VirtualAlloc can't allocate at the given address when one is
24	 * given, it fails and returns NULL.
25	 */
26	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
27	    PAGE_READWRITE);
28#else
29	/*
30	 * We don't use MAP_FIXED here, because it can cause the *replacement*
31	 * of existing mappings, and we only want to create new mappings.
32	 */
33	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
34	    -1, 0);
35	assert(ret != NULL);
36
37	if (ret == MAP_FAILED)
38		ret = NULL;
39	else if (addr != NULL && ret != addr) {
40		/*
41		 * We succeeded in mapping memory, but not in the right place.
42		 */
43		pages_unmap(ret, size);
44		ret = NULL;
45	}
46#endif
47	assert(ret == NULL || (addr == NULL && ret != addr)
48	    || (addr != NULL && ret == addr));
49	return (ret);
50}
51
52static void
53pages_unmap(void *addr, size_t size)
54{
55
56#ifdef _WIN32
57	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
58#else
59	if (munmap(addr, size) == -1)
60#endif
61	{
62		char buf[BUFERROR_BUF];
63
64		buferror(get_errno(), buf, sizeof(buf));
65		malloc_printf("<jemalloc>: Error in "
66#ifdef _WIN32
67		              "VirtualFree"
68#else
69		              "munmap"
70#endif
71		              "(): %s\n", buf);
72		if (opt_abort)
73			abort();
74	}
75}
76
77static void *
78pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
79{
80	void *ret = (void *)((uintptr_t)addr + leadsize);
81
82	assert(alloc_size >= leadsize + size);
83#ifdef _WIN32
84	{
85		void *new_addr;
86
87		pages_unmap(addr, alloc_size);
88		new_addr = pages_map(ret, size);
89		if (new_addr == ret)
90			return (ret);
91		if (new_addr)
92			pages_unmap(new_addr, size);
93		return (NULL);
94	}
95#else
96	{
97		size_t trailsize = alloc_size - leadsize - size;
98
99		if (leadsize != 0)
100			pages_unmap(addr, leadsize);
101		if (trailsize != 0)
102			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
103		return (ret);
104	}
105#endif
106}
107
108bool
109pages_purge(void *addr, size_t length)
110{
111	bool unzeroed;
112
113#ifdef _WIN32
114	VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
115	unzeroed = true;
116#elif defined(JEMALLOC_HAVE_MADVISE)
117#  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
118#    define JEMALLOC_MADV_PURGE MADV_DONTNEED
119#    define JEMALLOC_MADV_ZEROS true
120#  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
121#    define JEMALLOC_MADV_PURGE MADV_FREE
122#    define JEMALLOC_MADV_ZEROS false
123#  else
124#    error "No madvise(2) flag defined for purging unused dirty pages."
125#  endif
126	int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
127	unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
128#  undef JEMALLOC_MADV_PURGE
129#  undef JEMALLOC_MADV_ZEROS
130#else
131	/* Last resort no-op. */
132	unzeroed = true;
133#endif
134	return (unzeroed);
135}
136
137static void *
138chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
139{
140	void *ret, *pages;
141	size_t alloc_size, leadsize;
142
143	alloc_size = size + alignment - PAGE;
144	/* Beware size_t wrap-around. */
145	if (alloc_size < size)
146		return (NULL);
147	do {
148		pages = pages_map(NULL, alloc_size);
149		if (pages == NULL)
150			return (NULL);
151		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
152		    (uintptr_t)pages;
153		ret = pages_trim(pages, alloc_size, leadsize, size);
154	} while (ret == NULL);
155
156	assert(ret != NULL);
157	*zero = true;
158	return (ret);
159}
160
161void *
162chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
163{
164	void *ret;
165	size_t offset;
166
167	/*
168	 * Ideally, there would be a way to specify alignment to mmap() (like
169	 * NetBSD has), but in the absence of such a feature, we have to work
170	 * hard to efficiently create aligned mappings.  The reliable, but
171	 * slow method is to create a mapping that is over-sized, then trim the
172	 * excess.  However, that always results in one or two calls to
173	 * pages_unmap().
174	 *
175	 * Optimistically try mapping precisely the right amount before falling
176	 * back to the slow method, with the expectation that the optimistic
177	 * approach works most of the time.
178	 */
179
180	assert(alignment != 0);
181	assert((alignment & chunksize_mask) == 0);
182
183	ret = pages_map(NULL, size);
184	if (ret == NULL)
185		return (NULL);
186	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
187	if (offset != 0) {
188		pages_unmap(ret, size);
189		return (chunk_alloc_mmap_slow(size, alignment, zero));
190	}
191
192	assert(ret != NULL);
193	*zero = true;
194	return (ret);
195}
196
197bool
198chunk_dalloc_mmap(void *chunk, size_t size)
199{
200
201	if (config_munmap)
202		pages_unmap(chunk, size);
203
204	return (!config_munmap);
205}
206