huge.c revision 0c516a00c4cb28cff55ce0995f756b5aae074c9e
1#define	JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static extent_node_t *
7huge_node_get(const void *ptr)
8{
9	extent_node_t *node;
10
11	node = chunk_lookup(ptr, true);
12	assert(!extent_node_achunk_get(node));
13
14	return (node);
15}
16
17static bool
18huge_node_set(const void *ptr, extent_node_t *node)
19{
20
21	assert(extent_node_addr_get(node) == ptr);
22	assert(!extent_node_achunk_get(node));
23	return (chunk_register(ptr, node));
24}
25
26static void
27huge_node_unset(const void *ptr, const extent_node_t *node)
28{
29
30	chunk_deregister(ptr, node);
31}
32
33void *
34huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
35    tcache_t *tcache)
36{
37
38	assert(usize == s2u(usize));
39
40	return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
41}
42
43void *
44huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
45    bool zero, tcache_t *tcache)
46{
47	void *ret;
48	size_t ausize;
49	extent_node_t *node;
50	bool is_zeroed;
51
52	/* Allocate one or more contiguous chunks for this request. */
53
54	ausize = sa2u(usize, alignment);
55	if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
56		return (NULL);
57	assert(ausize >= chunksize);
58
59	/* Allocate an extent node with which to track the chunk. */
60	node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
61	    CACHELINE, false, tcache, true, arena);
62	if (node == NULL)
63		return (NULL);
64
65	/*
66	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
67	 * it is possible to make correct junk/zero fill decisions below.
68	 */
69	is_zeroed = zero;
70	arena = arena_choose(tsd, arena);
71	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
72	    usize, alignment, &is_zeroed)) == NULL) {
73		idalloctm(tsd, node, tcache, true, true);
74		return (NULL);
75	}
76
77	extent_node_init(node, arena, ret, usize, is_zeroed, true);
78
79	if (huge_node_set(ret, node)) {
80		arena_chunk_dalloc_huge(arena, ret, usize);
81		idalloctm(tsd, node, tcache, true, true);
82		return (NULL);
83	}
84
85	/* Insert node into huge. */
86	malloc_mutex_lock(&arena->huge_mtx);
87	ql_elm_new(node, ql_link);
88	ql_tail_insert(&arena->huge, node, ql_link);
89	malloc_mutex_unlock(&arena->huge_mtx);
90
91	if (zero || (config_fill && unlikely(opt_zero))) {
92		if (!is_zeroed)
93			memset(ret, 0, usize);
94	} else if (config_fill && unlikely(opt_junk_alloc))
95		memset(ret, 0xa5, usize);
96
97	arena_decay_tick(tsd, arena);
98	return (ret);
99}
100
101#ifdef JEMALLOC_JET
102#undef huge_dalloc_junk
103#define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
104#endif
105static void
106huge_dalloc_junk(void *ptr, size_t usize)
107{
108
109	if (config_fill && have_dss && unlikely(opt_junk_free)) {
110		/*
111		 * Only bother junk filling if the chunk isn't about to be
112		 * unmapped.
113		 */
114		if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
115			memset(ptr, 0x5a, usize);
116	}
117}
118#ifdef JEMALLOC_JET
119#undef huge_dalloc_junk
120#define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
121huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
122#endif
123
124static void
125huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
126    size_t usize_max, bool zero)
127{
128	size_t usize, usize_next;
129	extent_node_t *node;
130	arena_t *arena;
131	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
132	bool pre_zeroed, post_zeroed;
133
134	/* Increase usize to incorporate extra. */
135	for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
136	    <= oldsize; usize = usize_next)
137		; /* Do nothing. */
138
139	if (oldsize == usize)
140		return;
141
142	node = huge_node_get(ptr);
143	arena = extent_node_arena_get(node);
144	pre_zeroed = extent_node_zeroed_get(node);
145
146	/* Fill if necessary (shrinking). */
147	if (oldsize > usize) {
148		size_t sdiff = oldsize - usize;
149		if (config_fill && unlikely(opt_junk_free)) {
150			memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
151			post_zeroed = false;
152		} else {
153			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
154			    ptr, CHUNK_CEILING(oldsize), usize, sdiff);
155		}
156	} else
157		post_zeroed = pre_zeroed;
158
159	malloc_mutex_lock(&arena->huge_mtx);
160	/* Update the size of the huge allocation. */
161	assert(extent_node_size_get(node) != usize);
162	extent_node_size_set(node, usize);
163	/* Update zeroed. */
164	extent_node_zeroed_set(node, post_zeroed);
165	malloc_mutex_unlock(&arena->huge_mtx);
166
167	arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
168
169	/* Fill if necessary (growing). */
170	if (oldsize < usize) {
171		if (zero || (config_fill && unlikely(opt_zero))) {
172			if (!pre_zeroed) {
173				memset((void *)((uintptr_t)ptr + oldsize), 0,
174				    usize - oldsize);
175			}
176		} else if (config_fill && unlikely(opt_junk_alloc)) {
177			memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
178			    oldsize);
179		}
180	}
181}
182
183static bool
184huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
185{
186	extent_node_t *node;
187	arena_t *arena;
188	chunk_hooks_t chunk_hooks;
189	size_t cdiff;
190	bool pre_zeroed, post_zeroed;
191
192	node = huge_node_get(ptr);
193	arena = extent_node_arena_get(node);
194	pre_zeroed = extent_node_zeroed_get(node);
195	chunk_hooks = chunk_hooks_get(arena);
196
197	assert(oldsize > usize);
198
199	/* Split excess chunks. */
200	cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
201	if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
202	    CHUNK_CEILING(usize), cdiff, true, arena->ind))
203		return (true);
204
205	if (oldsize > usize) {
206		size_t sdiff = oldsize - usize;
207		if (config_fill && unlikely(opt_junk_free)) {
208			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
209			    sdiff);
210			post_zeroed = false;
211		} else {
212			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
213			    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
214			    CHUNK_CEILING(oldsize),
215			    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
216		}
217	} else
218		post_zeroed = pre_zeroed;
219
220	malloc_mutex_lock(&arena->huge_mtx);
221	/* Update the size of the huge allocation. */
222	extent_node_size_set(node, usize);
223	/* Update zeroed. */
224	extent_node_zeroed_set(node, post_zeroed);
225	malloc_mutex_unlock(&arena->huge_mtx);
226
227	/* Zap the excess chunks. */
228	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
229
230	return (false);
231}
232
233static bool
234huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
235	extent_node_t *node;
236	arena_t *arena;
237	bool is_zeroed_subchunk, is_zeroed_chunk;
238
239	node = huge_node_get(ptr);
240	arena = extent_node_arena_get(node);
241	malloc_mutex_lock(&arena->huge_mtx);
242	is_zeroed_subchunk = extent_node_zeroed_get(node);
243	malloc_mutex_unlock(&arena->huge_mtx);
244
245	/*
246	 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
247	 * that it is possible to make correct junk/zero fill decisions below.
248	 */
249	is_zeroed_chunk = zero;
250
251	if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
252	     &is_zeroed_chunk))
253		return (true);
254
255	malloc_mutex_lock(&arena->huge_mtx);
256	/* Update the size of the huge allocation. */
257	extent_node_size_set(node, usize);
258	malloc_mutex_unlock(&arena->huge_mtx);
259
260	if (zero || (config_fill && unlikely(opt_zero))) {
261		if (!is_zeroed_subchunk) {
262			memset((void *)((uintptr_t)ptr + oldsize), 0,
263			    CHUNK_CEILING(oldsize) - oldsize);
264		}
265		if (!is_zeroed_chunk) {
266			memset((void *)((uintptr_t)ptr +
267			    CHUNK_CEILING(oldsize)), 0, usize -
268			    CHUNK_CEILING(oldsize));
269		}
270	} else if (config_fill && unlikely(opt_junk_alloc)) {
271		memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
272		    oldsize);
273	}
274
275	return (false);
276}
277
278bool
279huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
280    size_t usize_max, bool zero)
281{
282
283	assert(s2u(oldsize) == oldsize);
284	/* The following should have been caught by callers. */
285	assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
286
287	/* Both allocations must be huge to avoid a move. */
288	if (oldsize < chunksize || usize_max < chunksize)
289		return (true);
290
291	if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
292		/* Attempt to expand the allocation in-place. */
293		if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
294		    zero)) {
295			arena_decay_tick(tsd, huge_aalloc(ptr));
296			return (false);
297		}
298		/* Try again, this time with usize_min. */
299		if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
300		    CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
301		    oldsize, usize_min, zero)) {
302			arena_decay_tick(tsd, huge_aalloc(ptr));
303			return (false);
304		}
305	}
306
307	/*
308	 * Avoid moving the allocation if the existing chunk size accommodates
309	 * the new size.
310	 */
311	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
312	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
313		huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
314		    zero);
315		arena_decay_tick(tsd, huge_aalloc(ptr));
316		return (false);
317	}
318
319	/* Attempt to shrink the allocation in-place. */
320	if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
321		if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
322			arena_decay_tick(tsd, huge_aalloc(ptr));
323			return (false);
324		}
325	}
326	return (true);
327}
328
329static void *
330huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
331    size_t alignment, bool zero, tcache_t *tcache)
332{
333
334	if (alignment <= chunksize)
335		return (huge_malloc(tsd, arena, usize, zero, tcache));
336	return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
337}
338
339void *
340huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
341    size_t alignment, bool zero, tcache_t *tcache)
342{
343	void *ret;
344	size_t copysize;
345
346	/* The following should have been caught by callers. */
347	assert(usize > 0 && usize <= HUGE_MAXCLASS);
348
349	/* Try to avoid moving the allocation. */
350	if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
351		return (ptr);
352
353	/*
354	 * usize and oldsize are different enough that we need to use a
355	 * different size class.  In that case, fall back to allocating new
356	 * space and copying.
357	 */
358	ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
359	    tcache);
360	if (ret == NULL)
361		return (NULL);
362
363	copysize = (usize < oldsize) ? usize : oldsize;
364	memcpy(ret, ptr, copysize);
365	isqalloc(tsd, ptr, oldsize, tcache);
366	return (ret);
367}
368
369void
370huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
371{
372	extent_node_t *node;
373	arena_t *arena;
374
375	node = huge_node_get(ptr);
376	arena = extent_node_arena_get(node);
377	huge_node_unset(ptr, node);
378	malloc_mutex_lock(&arena->huge_mtx);
379	ql_remove(&arena->huge, node, ql_link);
380	malloc_mutex_unlock(&arena->huge_mtx);
381
382	huge_dalloc_junk(extent_node_addr_get(node),
383	    extent_node_size_get(node));
384	arena_chunk_dalloc_huge(extent_node_arena_get(node),
385	    extent_node_addr_get(node), extent_node_size_get(node));
386	idalloctm(tsd, node, tcache, true, true);
387
388	arena_decay_tick(tsd, arena);
389}
390
391arena_t *
392huge_aalloc(const void *ptr)
393{
394
395	return (extent_node_arena_get(huge_node_get(ptr)));
396}
397
398size_t
399huge_salloc(const void *ptr)
400{
401	size_t size;
402	extent_node_t *node;
403	arena_t *arena;
404
405	node = huge_node_get(ptr);
406	arena = extent_node_arena_get(node);
407	malloc_mutex_lock(&arena->huge_mtx);
408	size = extent_node_size_get(node);
409	malloc_mutex_unlock(&arena->huge_mtx);
410
411	return (size);
412}
413
414prof_tctx_t *
415huge_prof_tctx_get(const void *ptr)
416{
417	prof_tctx_t *tctx;
418	extent_node_t *node;
419	arena_t *arena;
420
421	node = huge_node_get(ptr);
422	arena = extent_node_arena_get(node);
423	malloc_mutex_lock(&arena->huge_mtx);
424	tctx = extent_node_prof_tctx_get(node);
425	malloc_mutex_unlock(&arena->huge_mtx);
426
427	return (tctx);
428}
429
430void
431huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
432{
433	extent_node_t *node;
434	arena_t *arena;
435
436	node = huge_node_get(ptr);
437	arena = extent_node_arena_get(node);
438	malloc_mutex_lock(&arena->huge_mtx);
439	extent_node_prof_tctx_set(node, tctx);
440	malloc_mutex_unlock(&arena->huge_mtx);
441}
442
443void
444huge_prof_tctx_reset(const void *ptr)
445{
446
447	huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
448}
449