arena.c revision 78f7352259768f670f8e1f9b000388dd32b62493
1#define	JEMALLOC_ARENA_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8arena_bin_info_t	arena_bin_info[NBINS];
9
10JEMALLOC_ATTR(aligned(CACHELINE))
11const uint8_t	small_size2bin[] = {
12#define	S2B_8(i)	i,
13#define	S2B_16(i)	S2B_8(i) S2B_8(i)
14#define	S2B_32(i)	S2B_16(i) S2B_16(i)
15#define	S2B_64(i)	S2B_32(i) S2B_32(i)
16#define	S2B_128(i)	S2B_64(i) S2B_64(i)
17#define	S2B_256(i)	S2B_128(i) S2B_128(i)
18#define	S2B_512(i)	S2B_256(i) S2B_256(i)
19#define	S2B_1024(i)	S2B_512(i) S2B_512(i)
20#define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
21#define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
22#define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
23#define	SIZE_CLASS(bin, delta, size)					\
24	S2B_##delta(bin)
25	SIZE_CLASSES
26#undef S2B_8
27#undef S2B_16
28#undef S2B_32
29#undef S2B_64
30#undef S2B_128
31#undef S2B_256
32#undef S2B_512
33#undef S2B_1024
34#undef S2B_2048
35#undef S2B_4096
36#undef S2B_8192
37#undef SIZE_CLASS
38};
39
40/******************************************************************************/
41/* Function prototypes for non-inline static functions. */
42
43static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
44    bool large, bool zero);
45static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
46static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
47static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
48    bool zero);
49static void	arena_purge(arena_t *arena, bool all);
50static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
51static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
52    arena_run_t *run, size_t oldsize, size_t newsize);
53static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
54    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
55static arena_run_t	*arena_bin_runs_first(arena_bin_t *bin);
56static void	arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
57static void	arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
58static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
59static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
60static void	*arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
61static void	arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
62    arena_bin_t *bin);
63static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
64    arena_run_t *run, arena_bin_t *bin);
65static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
66    arena_run_t *run, arena_bin_t *bin);
67static void	arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
68    void *ptr, size_t oldsize, size_t size);
69static bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
70    void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
71static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
72    size_t extra, bool zero);
73static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
74    size_t min_run_size);
75static void	bin_info_init(void);
76
77/******************************************************************************/
78
79static inline int
80arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
81{
82	uintptr_t a_mapelm = (uintptr_t)a;
83	uintptr_t b_mapelm = (uintptr_t)b;
84
85	assert(a != NULL);
86	assert(b != NULL);
87
88	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
89}
90
91/* Generate red-black tree functions. */
92rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
93    u.rb_link, arena_run_comp)
94
95static inline int
96arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
97{
98	int ret;
99	size_t a_size = a->bits & ~PAGE_MASK;
100	size_t b_size = b->bits & ~PAGE_MASK;
101
102	assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
103	    CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
104
105	ret = (a_size > b_size) - (a_size < b_size);
106	if (ret == 0) {
107		uintptr_t a_mapelm, b_mapelm;
108
109		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
110			a_mapelm = (uintptr_t)a;
111		else {
112			/*
113			 * Treat keys as though they are lower than anything
114			 * else.
115			 */
116			a_mapelm = 0;
117		}
118		b_mapelm = (uintptr_t)b;
119
120		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
121	}
122
123	return (ret);
124}
125
126/* Generate red-black tree functions. */
127rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
128    u.rb_link, arena_avail_comp)
129
130static inline void *
131arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
132{
133	void *ret;
134	unsigned regind;
135	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
136	    (uintptr_t)bin_info->bitmap_offset);
137
138	assert(run->nfree > 0);
139	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
140
141	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
142	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
143	    (uintptr_t)(bin_info->reg_interval * regind));
144	run->nfree--;
145	if (regind == run->nextind)
146		run->nextind++;
147	assert(regind < run->nextind);
148	return (ret);
149}
150
151static inline void
152arena_run_reg_dalloc(arena_run_t *run, void *ptr)
153{
154	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
155	size_t binind = arena_bin_index(chunk->arena, run->bin);
156	arena_bin_info_t *bin_info = &arena_bin_info[binind];
157	unsigned regind = arena_run_regind(run, bin_info, ptr);
158	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
159	    (uintptr_t)bin_info->bitmap_offset);
160
161	assert(run->nfree < bin_info->nregs);
162	/* Freeing an interior pointer can cause assertion failure. */
163	assert(((uintptr_t)ptr - ((uintptr_t)run +
164	    (uintptr_t)bin_info->reg0_offset)) %
165	    (uintptr_t)bin_info->reg_interval == 0);
166	assert((uintptr_t)ptr >= (uintptr_t)run +
167	    (uintptr_t)bin_info->reg0_offset);
168	/* Freeing an unallocated pointer can cause assertion failure. */
169	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
170
171	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
172	run->nfree++;
173}
174
175static inline void
176arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
177{
178	size_t i;
179	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
180
181	for (i = 0; i < PAGE / sizeof(size_t); i++)
182		assert(p[i] == 0);
183}
184
185static void
186arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
187    bool zero)
188{
189	arena_chunk_t *chunk;
190	size_t run_ind, total_pages, need_pages, rem_pages, i;
191	size_t flag_dirty;
192	arena_avail_tree_t *runs_avail;
193
194	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
195	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
196	flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
197	runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
198	    &arena->runs_avail_clean;
199	total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
200	    LG_PAGE;
201	assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
202	    CHUNK_MAP_DIRTY) == flag_dirty);
203	need_pages = (size >> LG_PAGE);
204	assert(need_pages > 0);
205	assert(need_pages <= total_pages);
206	rem_pages = total_pages - need_pages;
207
208	arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
209	if (config_stats) {
210		/*
211		 * Update stats_cactive if nactive is crossing a chunk
212		 * multiple.
213		 */
214		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
215		    need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
216		    LG_PAGE);
217		if (cactive_diff != 0)
218			stats_cactive_add(cactive_diff);
219	}
220	arena->nactive += need_pages;
221
222	/* Keep track of trailing unused pages for later use. */
223	if (rem_pages > 0) {
224		if (flag_dirty != 0) {
225			chunk->map[run_ind+need_pages-map_bias].bits =
226			    (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
227			chunk->map[run_ind+total_pages-1-map_bias].bits =
228			    (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
229		} else {
230			chunk->map[run_ind+need_pages-map_bias].bits =
231			    (rem_pages << LG_PAGE) |
232			    (chunk->map[run_ind+need_pages-map_bias].bits &
233			    CHUNK_MAP_UNZEROED);
234			chunk->map[run_ind+total_pages-1-map_bias].bits =
235			    (rem_pages << LG_PAGE) |
236			    (chunk->map[run_ind+total_pages-1-map_bias].bits &
237			    CHUNK_MAP_UNZEROED);
238		}
239		arena_avail_tree_insert(runs_avail,
240		    &chunk->map[run_ind+need_pages-map_bias]);
241	}
242
243	/* Update dirty page accounting. */
244	if (flag_dirty != 0) {
245		chunk->ndirty -= need_pages;
246		arena->ndirty -= need_pages;
247	}
248
249	/*
250	 * Update the page map separately for large vs. small runs, since it is
251	 * possible to avoid iteration for large mallocs.
252	 */
253	if (large) {
254		if (zero) {
255			if (flag_dirty == 0) {
256				/*
257				 * The run is clean, so some pages may be
258				 * zeroed (i.e. never before touched).
259				 */
260				for (i = 0; i < need_pages; i++) {
261					if ((chunk->map[run_ind+i-map_bias].bits
262					    & CHUNK_MAP_UNZEROED) != 0) {
263						VALGRIND_MAKE_MEM_UNDEFINED(
264						    (void *)((uintptr_t)
265						    chunk + ((run_ind+i) <<
266						    LG_PAGE)), PAGE);
267						memset((void *)((uintptr_t)
268						    chunk + ((run_ind+i) <<
269						    LG_PAGE)), 0, PAGE);
270					} else if (config_debug) {
271						VALGRIND_MAKE_MEM_DEFINED(
272						    (void *)((uintptr_t)
273						    chunk + ((run_ind+i) <<
274						    LG_PAGE)), PAGE);
275						arena_chunk_validate_zeroed(
276						    chunk, run_ind+i);
277					}
278				}
279			} else {
280				/*
281				 * The run is dirty, so all pages must be
282				 * zeroed.
283				 */
284				VALGRIND_MAKE_MEM_UNDEFINED((void
285				    *)((uintptr_t)chunk + (run_ind <<
286				    LG_PAGE)), (need_pages << LG_PAGE));
287				memset((void *)((uintptr_t)chunk + (run_ind <<
288				    LG_PAGE)), 0, (need_pages << LG_PAGE));
289			}
290		}
291
292		/*
293		 * Set the last element first, in case the run only contains one
294		 * page (i.e. both statements set the same element).
295		 */
296		chunk->map[run_ind+need_pages-1-map_bias].bits =
297		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
298		chunk->map[run_ind-map_bias].bits = size | flag_dirty |
299		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
300	} else {
301		assert(zero == false);
302		/*
303		 * Propagate the dirty and unzeroed flags to the allocated
304		 * small run, so that arena_dalloc_bin_run() has the ability to
305		 * conditionally trim clean pages.
306		 */
307		chunk->map[run_ind-map_bias].bits =
308		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
309		    CHUNK_MAP_ALLOCATED | flag_dirty;
310		/*
311		 * The first page will always be dirtied during small run
312		 * initialization, so a validation failure here would not
313		 * actually cause an observable failure.
314		 */
315		if (config_debug && flag_dirty == 0 &&
316		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
317		    == 0)
318			arena_chunk_validate_zeroed(chunk, run_ind);
319		for (i = 1; i < need_pages - 1; i++) {
320			chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE)
321			    | (chunk->map[run_ind+i-map_bias].bits &
322			    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
323			if (config_debug && flag_dirty == 0 &&
324			    (chunk->map[run_ind+i-map_bias].bits &
325			    CHUNK_MAP_UNZEROED) == 0)
326				arena_chunk_validate_zeroed(chunk, run_ind+i);
327		}
328		chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
329		    - 1) << LG_PAGE) |
330		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
331		    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
332		if (config_debug && flag_dirty == 0 &&
333		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
334		    CHUNK_MAP_UNZEROED) == 0) {
335			arena_chunk_validate_zeroed(chunk,
336			    run_ind+need_pages-1);
337		}
338	}
339}
340
341static arena_chunk_t *
342arena_chunk_alloc(arena_t *arena)
343{
344	arena_chunk_t *chunk;
345	size_t i;
346
347	if (arena->spare != NULL) {
348		arena_avail_tree_t *runs_avail;
349
350		chunk = arena->spare;
351		arena->spare = NULL;
352
353		/* Insert the run into the appropriate runs_avail_* tree. */
354		if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
355			runs_avail = &arena->runs_avail_clean;
356		else
357			runs_avail = &arena->runs_avail_dirty;
358		assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
359		assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
360		    == arena_maxclass);
361		assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
362		    (chunk->map[chunk_npages-1-map_bias].bits &
363		    CHUNK_MAP_DIRTY));
364		arena_avail_tree_insert(runs_avail, &chunk->map[0]);
365	} else {
366		bool zero;
367		size_t unzeroed;
368
369		zero = false;
370		malloc_mutex_unlock(&arena->lock);
371		chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
372		    false, &zero);
373		malloc_mutex_lock(&arena->lock);
374		if (chunk == NULL)
375			return (NULL);
376		if (config_stats)
377			arena->stats.mapped += chunksize;
378
379		chunk->arena = arena;
380		ql_elm_new(chunk, link_dirty);
381		chunk->dirtied = false;
382
383		/*
384		 * Claim that no pages are in use, since the header is merely
385		 * overhead.
386		 */
387		chunk->ndirty = 0;
388
389		/*
390		 * Initialize the map to contain one maximal free untouched run.
391		 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
392		 * chunk.
393		 */
394		unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
395		chunk->map[0].bits = arena_maxclass | unzeroed;
396		/*
397		 * There is no need to initialize the internal page map entries
398		 * unless the chunk is not zeroed.
399		 */
400		if (zero == false) {
401			for (i = map_bias+1; i < chunk_npages-1; i++)
402				chunk->map[i-map_bias].bits = unzeroed;
403		} else if (config_debug) {
404			for (i = map_bias+1; i < chunk_npages-1; i++)
405				assert(chunk->map[i-map_bias].bits == unzeroed);
406		}
407		chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
408		    unzeroed;
409
410		/* Insert the run into the runs_avail_clean tree. */
411		arena_avail_tree_insert(&arena->runs_avail_clean,
412		    &chunk->map[0]);
413	}
414
415	return (chunk);
416}
417
418static void
419arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
420{
421	arena_avail_tree_t *runs_avail;
422
423	/*
424	 * Remove run from the appropriate runs_avail_* tree, so that the arena
425	 * does not use it.
426	 */
427	if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
428		runs_avail = &arena->runs_avail_clean;
429	else
430		runs_avail = &arena->runs_avail_dirty;
431	arena_avail_tree_remove(runs_avail, &chunk->map[0]);
432
433	if (arena->spare != NULL) {
434		arena_chunk_t *spare = arena->spare;
435
436		arena->spare = chunk;
437		if (spare->dirtied) {
438			ql_remove(&chunk->arena->chunks_dirty, spare,
439			    link_dirty);
440			arena->ndirty -= spare->ndirty;
441		}
442		malloc_mutex_unlock(&arena->lock);
443		chunk_dealloc((void *)spare, chunksize, true);
444		malloc_mutex_lock(&arena->lock);
445		if (config_stats)
446			arena->stats.mapped -= chunksize;
447	} else
448		arena->spare = chunk;
449}
450
451static arena_run_t *
452arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
453{
454	arena_chunk_t *chunk;
455	arena_run_t *run;
456	arena_chunk_map_t *mapelm, key;
457
458	assert(size <= arena_maxclass);
459	assert((size & PAGE_MASK) == 0);
460
461	/* Search the arena's chunks for the lowest best fit. */
462	key.bits = size | CHUNK_MAP_KEY;
463	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
464	if (mapelm != NULL) {
465		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
466		size_t pageind = (((uintptr_t)mapelm -
467		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
468		    + map_bias;
469
470		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
471		    LG_PAGE));
472		arena_run_split(arena, run, size, large, zero);
473		return (run);
474	}
475	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
476	if (mapelm != NULL) {
477		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
478		size_t pageind = (((uintptr_t)mapelm -
479		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
480		    + map_bias;
481
482		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
483		    LG_PAGE));
484		arena_run_split(arena, run, size, large, zero);
485		return (run);
486	}
487
488	/*
489	 * No usable runs.  Create a new chunk from which to allocate the run.
490	 */
491	chunk = arena_chunk_alloc(arena);
492	if (chunk != NULL) {
493		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
494		arena_run_split(arena, run, size, large, zero);
495		return (run);
496	}
497
498	/*
499	 * arena_chunk_alloc() failed, but another thread may have made
500	 * sufficient memory available while this one dropped arena->lock in
501	 * arena_chunk_alloc(), so search one more time.
502	 */
503	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
504	if (mapelm != NULL) {
505		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
506		size_t pageind = (((uintptr_t)mapelm -
507		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
508		    + map_bias;
509
510		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
511		    LG_PAGE));
512		arena_run_split(arena, run, size, large, zero);
513		return (run);
514	}
515	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
516	if (mapelm != NULL) {
517		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
518		size_t pageind = (((uintptr_t)mapelm -
519		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
520		    + map_bias;
521
522		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
523		    LG_PAGE));
524		arena_run_split(arena, run, size, large, zero);
525		return (run);
526	}
527
528	return (NULL);
529}
530
531static inline void
532arena_maybe_purge(arena_t *arena)
533{
534
535	/* Enforce opt_lg_dirty_mult. */
536	if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
537	    (arena->ndirty - arena->npurgatory) > chunk_npages &&
538	    (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
539	    arena->npurgatory))
540		arena_purge(arena, false);
541}
542
543static inline void
544arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
545{
546	ql_head(arena_chunk_map_t) mapelms;
547	arena_chunk_map_t *mapelm;
548	size_t pageind, flag_unzeroed;
549	size_t ndirty;
550	size_t nmadvise;
551
552	ql_new(&mapelms);
553
554	flag_unzeroed =
555#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
556   /*
557    * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
558    * mappings, but not for file-backed mappings.
559    */
560	    0
561#else
562	    CHUNK_MAP_UNZEROED
563#endif
564	    ;
565
566	/*
567	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
568	 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
569	 * completely discarded by another thread while arena->lock is dropped
570	 * by this thread.  Note that the arena_run_dalloc() call will
571	 * implicitly deallocate the chunk, so no explicit action is required
572	 * in this function to deallocate the chunk.
573	 *
574	 * Note that once a chunk contains dirty pages, it cannot again contain
575	 * a single run unless 1) it is a dirty run, or 2) this function purges
576	 * dirty pages and causes the transition to a single clean run.  Thus
577	 * (chunk == arena->spare) is possible, but it is not possible for
578	 * this function to be called on the spare unless it contains a dirty
579	 * run.
580	 */
581	if (chunk == arena->spare) {
582		assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
583		arena_chunk_alloc(arena);
584	}
585
586	/* Temporarily allocate all free dirty runs within chunk. */
587	for (pageind = map_bias; pageind < chunk_npages;) {
588		mapelm = &chunk->map[pageind-map_bias];
589		if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
590			size_t npages;
591
592			npages = mapelm->bits >> LG_PAGE;
593			assert(pageind + npages <= chunk_npages);
594			if (mapelm->bits & CHUNK_MAP_DIRTY) {
595				size_t i;
596
597				arena_avail_tree_remove(
598				    &arena->runs_avail_dirty, mapelm);
599
600				mapelm->bits = (npages << LG_PAGE) |
601				    flag_unzeroed | CHUNK_MAP_LARGE |
602				    CHUNK_MAP_ALLOCATED;
603				/*
604				 * Update internal elements in the page map, so
605				 * that CHUNK_MAP_UNZEROED is properly set.
606				 */
607				for (i = 1; i < npages - 1; i++) {
608					chunk->map[pageind+i-map_bias].bits =
609					    flag_unzeroed;
610				}
611				if (npages > 1) {
612					chunk->map[
613					    pageind+npages-1-map_bias].bits =
614					    flag_unzeroed | CHUNK_MAP_LARGE |
615					    CHUNK_MAP_ALLOCATED;
616				}
617
618				if (config_stats) {
619					/*
620					 * Update stats_cactive if nactive is
621					 * crossing a chunk multiple.
622					 */
623					size_t cactive_diff =
624					    CHUNK_CEILING((arena->nactive +
625					    npages) << LG_PAGE) -
626					    CHUNK_CEILING(arena->nactive <<
627					    LG_PAGE);
628					if (cactive_diff != 0)
629						stats_cactive_add(cactive_diff);
630				}
631				arena->nactive += npages;
632				/* Append to list for later processing. */
633				ql_elm_new(mapelm, u.ql_link);
634				ql_tail_insert(&mapelms, mapelm, u.ql_link);
635			}
636
637			pageind += npages;
638		} else {
639			/* Skip allocated run. */
640			if (mapelm->bits & CHUNK_MAP_LARGE)
641				pageind += mapelm->bits >> LG_PAGE;
642			else {
643				arena_run_t *run = (arena_run_t *)((uintptr_t)
644				    chunk + (uintptr_t)(pageind << LG_PAGE));
645
646				assert((mapelm->bits >> LG_PAGE) == 0);
647				size_t binind = arena_bin_index(arena,
648				    run->bin);
649				arena_bin_info_t *bin_info =
650				    &arena_bin_info[binind];
651				pageind += bin_info->run_size >> LG_PAGE;
652			}
653		}
654	}
655	assert(pageind == chunk_npages);
656
657	if (config_debug)
658		ndirty = chunk->ndirty;
659	if (config_stats)
660		arena->stats.purged += chunk->ndirty;
661	arena->ndirty -= chunk->ndirty;
662	chunk->ndirty = 0;
663	ql_remove(&arena->chunks_dirty, chunk, link_dirty);
664	chunk->dirtied = false;
665
666	malloc_mutex_unlock(&arena->lock);
667	if (config_stats)
668		nmadvise = 0;
669	ql_foreach(mapelm, &mapelms, u.ql_link) {
670		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
671		    sizeof(arena_chunk_map_t)) + map_bias;
672		size_t npages = mapelm->bits >> LG_PAGE;
673
674		assert(pageind + npages <= chunk_npages);
675		assert(ndirty >= npages);
676		if (config_debug)
677			ndirty -= npages;
678
679		madvise((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
680		    (npages << LG_PAGE), JEMALLOC_MADV_PURGE);
681		if (config_stats)
682			nmadvise++;
683	}
684	assert(ndirty == 0);
685	malloc_mutex_lock(&arena->lock);
686	if (config_stats)
687		arena->stats.nmadvise += nmadvise;
688
689	/* Deallocate runs. */
690	for (mapelm = ql_first(&mapelms); mapelm != NULL;
691	    mapelm = ql_first(&mapelms)) {
692		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
693		    sizeof(arena_chunk_map_t)) + map_bias;
694		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
695		    (uintptr_t)(pageind << LG_PAGE));
696
697		ql_remove(&mapelms, mapelm, u.ql_link);
698		arena_run_dalloc(arena, run, false);
699	}
700}
701
702static void
703arena_purge(arena_t *arena, bool all)
704{
705	arena_chunk_t *chunk;
706	size_t npurgatory;
707	if (config_debug) {
708		size_t ndirty = 0;
709
710		ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
711		    assert(chunk->dirtied);
712		    ndirty += chunk->ndirty;
713		}
714		assert(ndirty == arena->ndirty);
715	}
716	assert(arena->ndirty > arena->npurgatory || all);
717	assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
718	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
719	    arena->npurgatory) || all);
720
721	if (config_stats)
722		arena->stats.npurge++;
723
724	/*
725	 * Compute the minimum number of pages that this thread should try to
726	 * purge, and add the result to arena->npurgatory.  This will keep
727	 * multiple threads from racing to reduce ndirty below the threshold.
728	 */
729	npurgatory = arena->ndirty - arena->npurgatory;
730	if (all == false) {
731		assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
732		npurgatory -= arena->nactive >> opt_lg_dirty_mult;
733	}
734	arena->npurgatory += npurgatory;
735
736	while (npurgatory > 0) {
737		/* Get next chunk with dirty pages. */
738		chunk = ql_first(&arena->chunks_dirty);
739		if (chunk == NULL) {
740			/*
741			 * This thread was unable to purge as many pages as
742			 * originally intended, due to races with other threads
743			 * that either did some of the purging work, or re-used
744			 * dirty pages.
745			 */
746			arena->npurgatory -= npurgatory;
747			return;
748		}
749		while (chunk->ndirty == 0) {
750			ql_remove(&arena->chunks_dirty, chunk, link_dirty);
751			chunk->dirtied = false;
752			chunk = ql_first(&arena->chunks_dirty);
753			if (chunk == NULL) {
754				/* Same logic as for above. */
755				arena->npurgatory -= npurgatory;
756				return;
757			}
758		}
759
760		if (chunk->ndirty > npurgatory) {
761			/*
762			 * This thread will, at a minimum, purge all the dirty
763			 * pages in chunk, so set npurgatory to reflect this
764			 * thread's commitment to purge the pages.  This tends
765			 * to reduce the chances of the following scenario:
766			 *
767			 * 1) This thread sets arena->npurgatory such that
768			 *    (arena->ndirty - arena->npurgatory) is at the
769			 *    threshold.
770			 * 2) This thread drops arena->lock.
771			 * 3) Another thread causes one or more pages to be
772			 *    dirtied, and immediately determines that it must
773			 *    purge dirty pages.
774			 *
775			 * If this scenario *does* play out, that's okay,
776			 * because all of the purging work being done really
777			 * needs to happen.
778			 */
779			arena->npurgatory += chunk->ndirty - npurgatory;
780			npurgatory = chunk->ndirty;
781		}
782
783		arena->npurgatory -= chunk->ndirty;
784		npurgatory -= chunk->ndirty;
785		arena_chunk_purge(arena, chunk);
786	}
787}
788
789void
790arena_purge_all(arena_t *arena)
791{
792
793	malloc_mutex_lock(&arena->lock);
794	arena_purge(arena, true);
795	malloc_mutex_unlock(&arena->lock);
796}
797
798static void
799arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
800{
801	arena_chunk_t *chunk;
802	size_t size, run_ind, run_pages, flag_dirty;
803	arena_avail_tree_t *runs_avail;
804
805	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
806	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
807	assert(run_ind >= map_bias);
808	assert(run_ind < chunk_npages);
809	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
810		size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
811		assert(size == PAGE ||
812		    (chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
813		    ~PAGE_MASK) == 0);
814		assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
815		    CHUNK_MAP_LARGE) != 0);
816		assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
817		    CHUNK_MAP_ALLOCATED) != 0);
818	} else {
819		size_t binind = arena_bin_index(arena, run->bin);
820		arena_bin_info_t *bin_info = &arena_bin_info[binind];
821		size = bin_info->run_size;
822	}
823	run_pages = (size >> LG_PAGE);
824	if (config_stats) {
825		/*
826		 * Update stats_cactive if nactive is crossing a chunk
827		 * multiple.
828		 */
829		size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
830		    CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
831		if (cactive_diff != 0)
832			stats_cactive_sub(cactive_diff);
833	}
834	arena->nactive -= run_pages;
835
836	/*
837	 * The run is dirty if the caller claims to have dirtied it, as well as
838	 * if it was already dirty before being allocated.
839	 */
840	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
841		dirty = true;
842	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
843	runs_avail = dirty ? &arena->runs_avail_dirty :
844	    &arena->runs_avail_clean;
845
846	/* Mark pages as unallocated in the chunk map. */
847	if (dirty) {
848		chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
849		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
850		    CHUNK_MAP_DIRTY;
851
852		chunk->ndirty += run_pages;
853		arena->ndirty += run_pages;
854	} else {
855		chunk->map[run_ind-map_bias].bits = size |
856		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
857		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
858		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
859		    CHUNK_MAP_UNZEROED);
860	}
861
862	/* Try to coalesce forward. */
863	if (run_ind + run_pages < chunk_npages &&
864	    (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
865	    == 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
866	    CHUNK_MAP_DIRTY) == flag_dirty) {
867		size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
868		    ~PAGE_MASK;
869		size_t nrun_pages = nrun_size >> LG_PAGE;
870
871		/*
872		 * Remove successor from runs_avail; the coalesced run is
873		 * inserted later.
874		 */
875		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
876		    & ~PAGE_MASK) == nrun_size);
877		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
878		    & CHUNK_MAP_ALLOCATED) == 0);
879		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
880		    & CHUNK_MAP_DIRTY) == flag_dirty);
881		arena_avail_tree_remove(runs_avail,
882		    &chunk->map[run_ind+run_pages-map_bias]);
883
884		size += nrun_size;
885		run_pages += nrun_pages;
886
887		chunk->map[run_ind-map_bias].bits = size |
888		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
889		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
890		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
891		    CHUNK_MAP_FLAGS_MASK);
892	}
893
894	/* Try to coalesce backward. */
895	if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
896	    CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
897	    CHUNK_MAP_DIRTY) == flag_dirty) {
898		size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
899		    ~PAGE_MASK;
900		size_t prun_pages = prun_size >> LG_PAGE;
901
902		run_ind -= prun_pages;
903
904		/*
905		 * Remove predecessor from runs_avail; the coalesced run is
906		 * inserted later.
907		 */
908		assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
909		    == prun_size);
910		assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
911		    == 0);
912		assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
913		    == flag_dirty);
914		arena_avail_tree_remove(runs_avail,
915		    &chunk->map[run_ind-map_bias]);
916
917		size += prun_size;
918		run_pages += prun_pages;
919
920		chunk->map[run_ind-map_bias].bits = size |
921		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
922		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
923		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
924		    CHUNK_MAP_FLAGS_MASK);
925	}
926
927	/* Insert into runs_avail, now that coalescing is complete. */
928	assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
929	    (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
930	assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
931	    (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
932	arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
933
934	if (dirty) {
935		/*
936		 * Insert into chunks_dirty before potentially calling
937		 * arena_chunk_dealloc(), so that chunks_dirty and
938		 * arena->ndirty are consistent.
939		 */
940		if (chunk->dirtied == false) {
941			ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
942			chunk->dirtied = true;
943		}
944	}
945
946	/*
947	 * Deallocate chunk if it is now completely unused.  The bit
948	 * manipulation checks whether the first run is unallocated and extends
949	 * to the end of the chunk.
950	 */
951	if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
952	    arena_maxclass)
953		arena_chunk_dealloc(arena, chunk);
954
955	/*
956	 * It is okay to do dirty page processing here even if the chunk was
957	 * deallocated above, since in that case it is the spare.  Waiting
958	 * until after possible chunk deallocation to do dirty processing
959	 * allows for an old spare to be fully deallocated, thus decreasing the
960	 * chances of spuriously crossing the dirty page purging threshold.
961	 */
962	if (dirty)
963		arena_maybe_purge(arena);
964}
965
966static void
967arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
968    size_t oldsize, size_t newsize)
969{
970	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
971	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
972	size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
973
974	assert(oldsize > newsize);
975
976	/*
977	 * Update the chunk map so that arena_run_dalloc() can treat the
978	 * leading run as separately allocated.  Set the last element of each
979	 * run first, in case of single-page runs.
980	 */
981	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
982	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
983	chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
984	    (chunk->map[pageind+head_npages-1-map_bias].bits &
985	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
986	chunk->map[pageind-map_bias].bits = (oldsize - newsize)
987	    | flag_dirty | (chunk->map[pageind-map_bias].bits &
988	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
989
990	if (config_debug) {
991		UNUSED size_t tail_npages = newsize >> LG_PAGE;
992		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
993		    .bits & ~PAGE_MASK) == 0);
994		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
995		    .bits & CHUNK_MAP_DIRTY) == flag_dirty);
996		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
997		    .bits & CHUNK_MAP_LARGE) != 0);
998		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
999		    .bits & CHUNK_MAP_ALLOCATED) != 0);
1000	}
1001	chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
1002	    (chunk->map[pageind+head_npages-map_bias].bits &
1003	    CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1004
1005	arena_run_dalloc(arena, run, false);
1006}
1007
1008static void
1009arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1010    size_t oldsize, size_t newsize, bool dirty)
1011{
1012	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1013	size_t head_npages = newsize >> LG_PAGE;
1014	size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1015	size_t flag_dirty = chunk->map[pageind-map_bias].bits &
1016	    CHUNK_MAP_DIRTY;
1017
1018	assert(oldsize > newsize);
1019
1020	/*
1021	 * Update the chunk map so that arena_run_dalloc() can treat the
1022	 * trailing run as separately allocated.  Set the last element of each
1023	 * run first, in case of single-page runs.
1024	 */
1025	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
1026	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
1027	chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
1028	    (chunk->map[pageind+head_npages-1-map_bias].bits &
1029	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1030	chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
1031	    (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
1032	    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1033
1034	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1035	    ~PAGE_MASK) == 0);
1036	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1037	    CHUNK_MAP_LARGE) != 0);
1038	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1039	    CHUNK_MAP_ALLOCATED) != 0);
1040	chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
1041	    flag_dirty |
1042	    (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1043	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1044	chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
1045	    flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
1046	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1047
1048	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1049	    dirty);
1050}
1051
1052static arena_run_t *
1053arena_bin_runs_first(arena_bin_t *bin)
1054{
1055	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1056	if (mapelm != NULL) {
1057		arena_chunk_t *chunk;
1058		size_t pageind;
1059
1060		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1061		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1062		    sizeof(arena_chunk_map_t))) + map_bias;
1063		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
1064		    (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
1065		    LG_PAGE));
1066		return (run);
1067	}
1068
1069	return (NULL);
1070}
1071
1072static void
1073arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1074{
1075	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1076	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1077	arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
1078
1079	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1080
1081	arena_run_tree_insert(&bin->runs, mapelm);
1082}
1083
1084static void
1085arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1086{
1087	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1088	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1089	arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
1090
1091	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1092
1093	arena_run_tree_remove(&bin->runs, mapelm);
1094}
1095
1096static arena_run_t *
1097arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1098{
1099	arena_run_t *run = arena_bin_runs_first(bin);
1100	if (run != NULL) {
1101		arena_bin_runs_remove(bin, run);
1102		if (config_stats)
1103			bin->stats.reruns++;
1104	}
1105	return (run);
1106}
1107
1108static arena_run_t *
1109arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1110{
1111	arena_run_t *run;
1112	size_t binind;
1113	arena_bin_info_t *bin_info;
1114
1115	/* Look for a usable run. */
1116	run = arena_bin_nonfull_run_tryget(bin);
1117	if (run != NULL)
1118		return (run);
1119	/* No existing runs have any space available. */
1120
1121	binind = arena_bin_index(arena, bin);
1122	bin_info = &arena_bin_info[binind];
1123
1124	/* Allocate a new run. */
1125	malloc_mutex_unlock(&bin->lock);
1126	/******************************/
1127	malloc_mutex_lock(&arena->lock);
1128	run = arena_run_alloc(arena, bin_info->run_size, false, false);
1129	if (run != NULL) {
1130		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1131		    (uintptr_t)bin_info->bitmap_offset);
1132
1133		/* Initialize run internals. */
1134		run->bin = bin;
1135		run->nextind = 0;
1136		run->nfree = bin_info->nregs;
1137		bitmap_init(bitmap, &bin_info->bitmap_info);
1138	}
1139	malloc_mutex_unlock(&arena->lock);
1140	/********************************/
1141	malloc_mutex_lock(&bin->lock);
1142	if (run != NULL) {
1143		if (config_stats) {
1144			bin->stats.nruns++;
1145			bin->stats.curruns++;
1146		}
1147		return (run);
1148	}
1149
1150	/*
1151	 * arena_run_alloc() failed, but another thread may have made
1152	 * sufficient memory available while this one dropped bin->lock above,
1153	 * so search one more time.
1154	 */
1155	run = arena_bin_nonfull_run_tryget(bin);
1156	if (run != NULL)
1157		return (run);
1158
1159	return (NULL);
1160}
1161
1162/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1163static void *
1164arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1165{
1166	void *ret;
1167	size_t binind;
1168	arena_bin_info_t *bin_info;
1169	arena_run_t *run;
1170
1171	binind = arena_bin_index(arena, bin);
1172	bin_info = &arena_bin_info[binind];
1173	bin->runcur = NULL;
1174	run = arena_bin_nonfull_run_get(arena, bin);
1175	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1176		/*
1177		 * Another thread updated runcur while this one ran without the
1178		 * bin lock in arena_bin_nonfull_run_get().
1179		 */
1180		assert(bin->runcur->nfree > 0);
1181		ret = arena_run_reg_alloc(bin->runcur, bin_info);
1182		if (run != NULL) {
1183			arena_chunk_t *chunk;
1184
1185			/*
1186			 * arena_run_alloc() may have allocated run, or it may
1187			 * have pulled run from the bin's run tree.  Therefore
1188			 * it is unsafe to make any assumptions about how run
1189			 * has previously been used, and arena_bin_lower_run()
1190			 * must be called, as if a region were just deallocated
1191			 * from the run.
1192			 */
1193			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1194			if (run->nfree == bin_info->nregs)
1195				arena_dalloc_bin_run(arena, chunk, run, bin);
1196			else
1197				arena_bin_lower_run(arena, chunk, run, bin);
1198		}
1199		return (ret);
1200	}
1201
1202	if (run == NULL)
1203		return (NULL);
1204
1205	bin->runcur = run;
1206
1207	assert(bin->runcur->nfree > 0);
1208
1209	return (arena_run_reg_alloc(bin->runcur, bin_info));
1210}
1211
1212void
1213arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1214{
1215
1216	cassert(config_prof);
1217
1218	if (config_prof && prof_interval != 0) {
1219		arena->prof_accumbytes += accumbytes;
1220		if (arena->prof_accumbytes >= prof_interval) {
1221			prof_idump();
1222			arena->prof_accumbytes -= prof_interval;
1223		}
1224	}
1225}
1226
1227void
1228arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1229    uint64_t prof_accumbytes)
1230{
1231	unsigned i, nfill;
1232	arena_bin_t *bin;
1233	arena_run_t *run;
1234	void *ptr;
1235
1236	assert(tbin->ncached == 0);
1237
1238	if (config_prof) {
1239		malloc_mutex_lock(&arena->lock);
1240		arena_prof_accum(arena, prof_accumbytes);
1241		malloc_mutex_unlock(&arena->lock);
1242	}
1243	bin = &arena->bins[binind];
1244	malloc_mutex_lock(&bin->lock);
1245	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1246	    tbin->lg_fill_div); i < nfill; i++) {
1247		if ((run = bin->runcur) != NULL && run->nfree > 0)
1248			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1249		else
1250			ptr = arena_bin_malloc_hard(arena, bin);
1251		if (ptr == NULL)
1252			break;
1253		if (config_fill && opt_junk) {
1254			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1255			    true);
1256		}
1257		/* Insert such that low regions get used first. */
1258		tbin->avail[nfill - 1 - i] = ptr;
1259	}
1260	if (config_stats) {
1261		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1262		bin->stats.nmalloc += i;
1263		bin->stats.nrequests += tbin->tstats.nrequests;
1264		bin->stats.nfills++;
1265		tbin->tstats.nrequests = 0;
1266	}
1267	malloc_mutex_unlock(&bin->lock);
1268	tbin->ncached = i;
1269}
1270
1271void
1272arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1273{
1274
1275	if (zero) {
1276		size_t redzone_size = bin_info->redzone_size;
1277		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1278		    redzone_size);
1279		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1280		    redzone_size);
1281	} else {
1282		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1283		    bin_info->reg_interval);
1284	}
1285}
1286
1287void
1288arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1289{
1290	size_t size = bin_info->reg_size;
1291	size_t redzone_size = bin_info->redzone_size;
1292	size_t i;
1293	bool error = false;
1294
1295	for (i = 1; i <= redzone_size; i++) {
1296		unsigned byte;
1297		if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1298			error = true;
1299			malloc_printf("<jemalloc>: Corrupt redzone "
1300			    "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1301			    (i == 1) ? "" : "s", ptr, size, byte);
1302		}
1303	}
1304	for (i = 0; i < redzone_size; i++) {
1305		unsigned byte;
1306		if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1307			error = true;
1308			malloc_printf("<jemalloc>: Corrupt redzone "
1309			    "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1310			    i, (i == 1) ? "" : "s", ptr, size, byte);
1311		}
1312	}
1313	if (opt_abort && error)
1314		abort();
1315
1316	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1317	    bin_info->reg_interval);
1318}
1319
1320void *
1321arena_malloc_small(arena_t *arena, size_t size, bool zero)
1322{
1323	void *ret;
1324	arena_bin_t *bin;
1325	arena_run_t *run;
1326	size_t binind;
1327
1328	binind = SMALL_SIZE2BIN(size);
1329	assert(binind < NBINS);
1330	bin = &arena->bins[binind];
1331	size = arena_bin_info[binind].reg_size;
1332
1333	malloc_mutex_lock(&bin->lock);
1334	if ((run = bin->runcur) != NULL && run->nfree > 0)
1335		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1336	else
1337		ret = arena_bin_malloc_hard(arena, bin);
1338
1339	if (ret == NULL) {
1340		malloc_mutex_unlock(&bin->lock);
1341		return (NULL);
1342	}
1343
1344	if (config_stats) {
1345		bin->stats.allocated += size;
1346		bin->stats.nmalloc++;
1347		bin->stats.nrequests++;
1348	}
1349	malloc_mutex_unlock(&bin->lock);
1350	if (config_prof && isthreaded == false) {
1351		malloc_mutex_lock(&arena->lock);
1352		arena_prof_accum(arena, size);
1353		malloc_mutex_unlock(&arena->lock);
1354	}
1355
1356	if (zero == false) {
1357		if (config_fill) {
1358			if (opt_junk) {
1359				arena_alloc_junk_small(ret,
1360				    &arena_bin_info[binind], false);
1361			} else if (opt_zero)
1362				memset(ret, 0, size);
1363		}
1364	} else {
1365		if (config_fill && opt_junk) {
1366			arena_alloc_junk_small(ret, &arena_bin_info[binind],
1367			    true);
1368		}
1369		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1370		memset(ret, 0, size);
1371	}
1372
1373	return (ret);
1374}
1375
1376void *
1377arena_malloc_large(arena_t *arena, size_t size, bool zero)
1378{
1379	void *ret;
1380
1381	/* Large allocation. */
1382	size = PAGE_CEILING(size);
1383	malloc_mutex_lock(&arena->lock);
1384	ret = (void *)arena_run_alloc(arena, size, true, zero);
1385	if (ret == NULL) {
1386		malloc_mutex_unlock(&arena->lock);
1387		return (NULL);
1388	}
1389	if (config_stats) {
1390		arena->stats.nmalloc_large++;
1391		arena->stats.nrequests_large++;
1392		arena->stats.allocated_large += size;
1393		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1394		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1395		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1396	}
1397	if (config_prof)
1398		arena_prof_accum(arena, size);
1399	malloc_mutex_unlock(&arena->lock);
1400
1401	if (zero == false) {
1402		if (config_fill) {
1403			if (opt_junk)
1404				memset(ret, 0xa5, size);
1405			else if (opt_zero)
1406				memset(ret, 0, size);
1407		}
1408	}
1409
1410	return (ret);
1411}
1412
1413/* Only handles large allocations that require more than page alignment. */
1414void *
1415arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1416{
1417	void *ret;
1418	size_t alloc_size, leadsize, trailsize;
1419	arena_run_t *run;
1420	arena_chunk_t *chunk;
1421
1422	assert((size & PAGE_MASK) == 0);
1423
1424	alignment = PAGE_CEILING(alignment);
1425	alloc_size = size + alignment - PAGE;
1426
1427	malloc_mutex_lock(&arena->lock);
1428	run = arena_run_alloc(arena, alloc_size, true, zero);
1429	if (run == NULL) {
1430		malloc_mutex_unlock(&arena->lock);
1431		return (NULL);
1432	}
1433	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1434
1435	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1436	    (uintptr_t)run;
1437	assert(alloc_size >= leadsize + size);
1438	trailsize = alloc_size - leadsize - size;
1439	ret = (void *)((uintptr_t)run + leadsize);
1440	if (leadsize != 0) {
1441		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1442		    leadsize);
1443	}
1444	if (trailsize != 0) {
1445		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1446		    false);
1447	}
1448
1449	if (config_stats) {
1450		arena->stats.nmalloc_large++;
1451		arena->stats.nrequests_large++;
1452		arena->stats.allocated_large += size;
1453		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1454		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1455		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1456	}
1457	malloc_mutex_unlock(&arena->lock);
1458
1459	if (config_fill && zero == false) {
1460		if (opt_junk)
1461			memset(ret, 0xa5, size);
1462		else if (opt_zero)
1463			memset(ret, 0, size);
1464	}
1465	return (ret);
1466}
1467
1468/* Return the size of the allocation pointed to by ptr. */
1469size_t
1470arena_salloc(const void *ptr, bool demote)
1471{
1472	size_t ret;
1473	arena_chunk_t *chunk;
1474	size_t pageind, mapbits;
1475
1476	assert(ptr != NULL);
1477	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1478
1479	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1480	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1481	mapbits = chunk->map[pageind-map_bias].bits;
1482	assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
1483	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
1484		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
1485		    (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
1486		size_t binind = arena_bin_index(chunk->arena, run->bin);
1487		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1488		assert(((uintptr_t)ptr - ((uintptr_t)run +
1489		    (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
1490		    == 0);
1491		ret = bin_info->reg_size;
1492	} else {
1493		assert(((uintptr_t)ptr & PAGE_MASK) == 0);
1494		ret = mapbits & ~PAGE_MASK;
1495		if (config_prof && demote && prof_promote && ret == PAGE &&
1496		    (mapbits & CHUNK_MAP_CLASS_MASK) != 0) {
1497			size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
1498			    CHUNK_MAP_CLASS_SHIFT) - 1;
1499			assert(binind < NBINS);
1500			ret = arena_bin_info[binind].reg_size;
1501		}
1502		assert(ret != 0);
1503	}
1504
1505	return (ret);
1506}
1507
1508void
1509arena_prof_promoted(const void *ptr, size_t size)
1510{
1511	arena_chunk_t *chunk;
1512	size_t pageind, binind;
1513
1514	cassert(config_prof);
1515	assert(ptr != NULL);
1516	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1517	assert(isalloc(ptr, false) == PAGE);
1518	assert(isalloc(ptr, true) == PAGE);
1519	assert(size <= SMALL_MAXCLASS);
1520
1521	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1522	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1523	binind = SMALL_SIZE2BIN(size);
1524	assert(binind < NBINS);
1525	chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
1526	    ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
1527
1528	assert(isalloc(ptr, false) == PAGE);
1529	assert(isalloc(ptr, true) == size);
1530}
1531
1532static void
1533arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1534    arena_bin_t *bin)
1535{
1536
1537	/* Dissociate run from bin. */
1538	if (run == bin->runcur)
1539		bin->runcur = NULL;
1540	else {
1541		size_t binind = arena_bin_index(chunk->arena, bin);
1542		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1543
1544		if (bin_info->nregs != 1) {
1545			/*
1546			 * This block's conditional is necessary because if the
1547			 * run only contains one region, then it never gets
1548			 * inserted into the non-full runs tree.
1549			 */
1550			arena_bin_runs_remove(bin, run);
1551		}
1552	}
1553}
1554
1555static void
1556arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1557    arena_bin_t *bin)
1558{
1559	size_t binind;
1560	arena_bin_info_t *bin_info;
1561	size_t npages, run_ind, past;
1562
1563	assert(run != bin->runcur);
1564	assert(arena_run_tree_search(&bin->runs, &chunk->map[
1565	    (((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL);
1566
1567	binind = arena_bin_index(chunk->arena, run->bin);
1568	bin_info = &arena_bin_info[binind];
1569
1570	malloc_mutex_unlock(&bin->lock);
1571	/******************************/
1572	npages = bin_info->run_size >> LG_PAGE;
1573	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1574	past = (size_t)(PAGE_CEILING((uintptr_t)run +
1575	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1576	    bin_info->reg_interval - bin_info->redzone_size) -
1577	    (uintptr_t)chunk) >> LG_PAGE);
1578	malloc_mutex_lock(&arena->lock);
1579
1580	/*
1581	 * If the run was originally clean, and some pages were never touched,
1582	 * trim the clean pages before deallocating the dirty portion of the
1583	 * run.
1584	 */
1585	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
1586	    - run_ind < npages) {
1587		/*
1588		 * Trim clean pages.  Convert to large run beforehand.  Set the
1589		 * last map element first, in case this is a one-page run.
1590		 */
1591		chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
1592		    (chunk->map[run_ind+npages-1-map_bias].bits &
1593		    CHUNK_MAP_FLAGS_MASK);
1594		chunk->map[run_ind-map_bias].bits = bin_info->run_size |
1595		    CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
1596		    CHUNK_MAP_FLAGS_MASK);
1597		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1598		    ((past - run_ind) << LG_PAGE), false);
1599		/* npages = past - run_ind; */
1600	}
1601	arena_run_dalloc(arena, run, true);
1602	malloc_mutex_unlock(&arena->lock);
1603	/****************************/
1604	malloc_mutex_lock(&bin->lock);
1605	if (config_stats)
1606		bin->stats.curruns--;
1607}
1608
1609static void
1610arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1611    arena_bin_t *bin)
1612{
1613
1614	/*
1615	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1616	 * non-full run.  It is okay to NULL runcur out rather than proactively
1617	 * keeping it pointing at the lowest non-full run.
1618	 */
1619	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1620		/* Switch runcur. */
1621		if (bin->runcur->nfree > 0)
1622			arena_bin_runs_insert(bin, bin->runcur);
1623		bin->runcur = run;
1624		if (config_stats)
1625			bin->stats.reruns++;
1626	} else
1627		arena_bin_runs_insert(bin, run);
1628}
1629
1630void
1631arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1632    arena_chunk_map_t *mapelm)
1633{
1634	size_t pageind;
1635	arena_run_t *run;
1636	arena_bin_t *bin;
1637	size_t size;
1638
1639	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1640	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1641	    (mapelm->bits >> LG_PAGE)) << LG_PAGE));
1642	bin = run->bin;
1643	size_t binind = arena_bin_index(arena, bin);
1644	arena_bin_info_t *bin_info = &arena_bin_info[binind];
1645	if (config_fill || config_stats)
1646		size = bin_info->reg_size;
1647
1648	if (config_fill && opt_junk)
1649		arena_dalloc_junk_small(ptr, bin_info);
1650
1651	arena_run_reg_dalloc(run, ptr);
1652	if (run->nfree == bin_info->nregs) {
1653		arena_dissociate_bin_run(chunk, run, bin);
1654		arena_dalloc_bin_run(arena, chunk, run, bin);
1655	} else if (run->nfree == 1 && run != bin->runcur)
1656		arena_bin_lower_run(arena, chunk, run, bin);
1657
1658	if (config_stats) {
1659		bin->stats.allocated -= size;
1660		bin->stats.ndalloc++;
1661	}
1662}
1663
1664void
1665arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
1666    arena_stats_t *astats, malloc_bin_stats_t *bstats,
1667    malloc_large_stats_t *lstats)
1668{
1669	unsigned i;
1670
1671	malloc_mutex_lock(&arena->lock);
1672	*nactive += arena->nactive;
1673	*ndirty += arena->ndirty;
1674
1675	astats->mapped += arena->stats.mapped;
1676	astats->npurge += arena->stats.npurge;
1677	astats->nmadvise += arena->stats.nmadvise;
1678	astats->purged += arena->stats.purged;
1679	astats->allocated_large += arena->stats.allocated_large;
1680	astats->nmalloc_large += arena->stats.nmalloc_large;
1681	astats->ndalloc_large += arena->stats.ndalloc_large;
1682	astats->nrequests_large += arena->stats.nrequests_large;
1683
1684	for (i = 0; i < nlclasses; i++) {
1685		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
1686		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
1687		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
1688		lstats[i].curruns += arena->stats.lstats[i].curruns;
1689	}
1690	malloc_mutex_unlock(&arena->lock);
1691
1692	for (i = 0; i < NBINS; i++) {
1693		arena_bin_t *bin = &arena->bins[i];
1694
1695		malloc_mutex_lock(&bin->lock);
1696		bstats[i].allocated += bin->stats.allocated;
1697		bstats[i].nmalloc += bin->stats.nmalloc;
1698		bstats[i].ndalloc += bin->stats.ndalloc;
1699		bstats[i].nrequests += bin->stats.nrequests;
1700		if (config_tcache) {
1701			bstats[i].nfills += bin->stats.nfills;
1702			bstats[i].nflushes += bin->stats.nflushes;
1703		}
1704		bstats[i].nruns += bin->stats.nruns;
1705		bstats[i].reruns += bin->stats.reruns;
1706		bstats[i].curruns += bin->stats.curruns;
1707		malloc_mutex_unlock(&bin->lock);
1708	}
1709}
1710
1711void
1712arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1713{
1714
1715	if (config_fill || config_stats) {
1716		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1717		size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
1718
1719		if (config_fill && config_stats && opt_junk)
1720			memset(ptr, 0x5a, size);
1721		if (config_stats) {
1722			arena->stats.ndalloc_large++;
1723			arena->stats.allocated_large -= size;
1724			arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1725			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1726		}
1727	}
1728
1729	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
1730}
1731
1732static void
1733arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1734    size_t oldsize, size_t size)
1735{
1736
1737	assert(size < oldsize);
1738
1739	/*
1740	 * Shrink the run, and make trailing pages available for other
1741	 * allocations.
1742	 */
1743	malloc_mutex_lock(&arena->lock);
1744	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1745	    true);
1746	if (config_stats) {
1747		arena->stats.ndalloc_large++;
1748		arena->stats.allocated_large -= oldsize;
1749		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1750		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1751
1752		arena->stats.nmalloc_large++;
1753		arena->stats.nrequests_large++;
1754		arena->stats.allocated_large += size;
1755		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1756		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1757		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1758	}
1759	malloc_mutex_unlock(&arena->lock);
1760}
1761
1762static bool
1763arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1764    size_t oldsize, size_t size, size_t extra, bool zero)
1765{
1766	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1767	size_t npages = oldsize >> LG_PAGE;
1768	size_t followsize;
1769
1770	assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
1771
1772	/* Try to extend the run. */
1773	assert(size + extra > oldsize);
1774	malloc_mutex_lock(&arena->lock);
1775	if (pageind + npages < chunk_npages &&
1776	    (chunk->map[pageind+npages-map_bias].bits
1777	    & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
1778	    chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
1779	    oldsize) {
1780		/*
1781		 * The next run is available and sufficiently large.  Split the
1782		 * following run, then merge the first part with the existing
1783		 * allocation.
1784		 */
1785		size_t flag_dirty;
1786		size_t splitsize = (oldsize + followsize <= size + extra)
1787		    ? followsize : size + extra - oldsize;
1788		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1789		    ((pageind+npages) << LG_PAGE)), splitsize, true, zero);
1790
1791		size = oldsize + splitsize;
1792		npages = size >> LG_PAGE;
1793
1794		/*
1795		 * Mark the extended run as dirty if either portion of the run
1796		 * was dirty before allocation.  This is rather pedantic,
1797		 * because there's not actually any sequence of events that
1798		 * could cause the resulting run to be passed to
1799		 * arena_run_dalloc() with the dirty argument set to false
1800		 * (which is when dirty flag consistency would really matter).
1801		 */
1802		flag_dirty = (chunk->map[pageind-map_bias].bits &
1803		    CHUNK_MAP_DIRTY) |
1804		    (chunk->map[pageind+npages-1-map_bias].bits &
1805		    CHUNK_MAP_DIRTY);
1806		chunk->map[pageind-map_bias].bits = size | flag_dirty
1807		    | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1808		chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
1809		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1810
1811		if (config_stats) {
1812			arena->stats.ndalloc_large++;
1813			arena->stats.allocated_large -= oldsize;
1814			arena->stats.lstats[(oldsize >> LG_PAGE)
1815			    - 1].ndalloc++;
1816			arena->stats.lstats[(oldsize >> LG_PAGE)
1817			    - 1].curruns--;
1818
1819			arena->stats.nmalloc_large++;
1820			arena->stats.nrequests_large++;
1821			arena->stats.allocated_large += size;
1822			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1823			arena->stats.lstats[(size >> LG_PAGE)
1824			    - 1].nrequests++;
1825			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1826		}
1827		malloc_mutex_unlock(&arena->lock);
1828		return (false);
1829	}
1830	malloc_mutex_unlock(&arena->lock);
1831
1832	return (true);
1833}
1834
1835/*
1836 * Try to resize a large allocation, in order to avoid copying.  This will
1837 * always fail if growing an object, and the following run is already in use.
1838 */
1839static bool
1840arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1841    bool zero)
1842{
1843	size_t psize;
1844
1845	psize = PAGE_CEILING(size + extra);
1846	if (psize == oldsize) {
1847		/* Same size class. */
1848		if (config_fill && opt_junk && size < oldsize) {
1849			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1850			    size);
1851		}
1852		return (false);
1853	} else {
1854		arena_chunk_t *chunk;
1855		arena_t *arena;
1856
1857		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1858		arena = chunk->arena;
1859
1860		if (psize < oldsize) {
1861			/* Fill before shrinking in order avoid a race. */
1862			if (config_fill && opt_junk) {
1863				memset((void *)((uintptr_t)ptr + size), 0x5a,
1864				    oldsize - size);
1865			}
1866			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1867			    psize);
1868			return (false);
1869		} else {
1870			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1871			    oldsize, PAGE_CEILING(size),
1872			    psize - PAGE_CEILING(size), zero);
1873			if (config_fill && ret == false && zero == false &&
1874			    opt_zero) {
1875				memset((void *)((uintptr_t)ptr + oldsize), 0,
1876				    size - oldsize);
1877			}
1878			return (ret);
1879		}
1880	}
1881}
1882
1883void *
1884arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1885    bool zero)
1886{
1887
1888	/*
1889	 * Avoid moving the allocation if the size class can be left the same.
1890	 */
1891	if (oldsize <= arena_maxclass) {
1892		if (oldsize <= SMALL_MAXCLASS) {
1893			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1894			    == oldsize);
1895			if ((size + extra <= SMALL_MAXCLASS &&
1896			    SMALL_SIZE2BIN(size + extra) ==
1897			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1898			    size + extra >= oldsize)) {
1899				if (config_fill && opt_junk && size < oldsize) {
1900					memset((void *)((uintptr_t)ptr + size),
1901					    0x5a, oldsize - size);
1902				}
1903				return (ptr);
1904			}
1905		} else {
1906			assert(size <= arena_maxclass);
1907			if (size + extra > SMALL_MAXCLASS) {
1908				if (arena_ralloc_large(ptr, oldsize, size,
1909				    extra, zero) == false)
1910					return (ptr);
1911			}
1912		}
1913	}
1914
1915	/* Reallocation would require a move. */
1916	return (NULL);
1917}
1918
1919void *
1920arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
1921    size_t alignment, bool zero, bool try_tcache)
1922{
1923	void *ret;
1924	size_t copysize;
1925
1926	/* Try to avoid moving the allocation. */
1927	ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1928	if (ret != NULL)
1929		return (ret);
1930
1931	/*
1932	 * size and oldsize are different enough that we need to move the
1933	 * object.  In that case, fall back to allocating new space and
1934	 * copying.
1935	 */
1936	if (alignment != 0) {
1937		size_t usize = sa2u(size + extra, alignment);
1938		if (usize == 0)
1939			return (NULL);
1940		ret = ipalloc(usize, alignment, zero);
1941	} else
1942		ret = arena_malloc(NULL, size + extra, zero, try_tcache);
1943
1944	if (ret == NULL) {
1945		if (extra == 0)
1946			return (NULL);
1947		/* Try again, this time without extra. */
1948		if (alignment != 0) {
1949			size_t usize = sa2u(size, alignment);
1950			if (usize == 0)
1951				return (NULL);
1952			ret = ipalloc(usize, alignment, zero);
1953		} else
1954			ret = arena_malloc(NULL, size, zero, try_tcache);
1955
1956		if (ret == NULL)
1957			return (NULL);
1958	}
1959
1960	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
1961
1962	/*
1963	 * Copy at most size bytes (not size+extra), since the caller has no
1964	 * expectation that the extra bytes will be reliably preserved.
1965	 */
1966	copysize = (size < oldsize) ? size : oldsize;
1967	memcpy(ret, ptr, copysize);
1968	iqalloc(ptr);
1969	return (ret);
1970}
1971
1972bool
1973arena_new(arena_t *arena, unsigned ind)
1974{
1975	unsigned i;
1976	arena_bin_t *bin;
1977
1978	arena->ind = ind;
1979	arena->nthreads = 0;
1980
1981	if (malloc_mutex_init(&arena->lock))
1982		return (true);
1983
1984	if (config_stats) {
1985		memset(&arena->stats, 0, sizeof(arena_stats_t));
1986		arena->stats.lstats =
1987		    (malloc_large_stats_t *)base_alloc(nlclasses *
1988		    sizeof(malloc_large_stats_t));
1989		if (arena->stats.lstats == NULL)
1990			return (true);
1991		memset(arena->stats.lstats, 0, nlclasses *
1992		    sizeof(malloc_large_stats_t));
1993		if (config_tcache)
1994			ql_new(&arena->tcache_ql);
1995	}
1996
1997	if (config_prof)
1998		arena->prof_accumbytes = 0;
1999
2000	/* Initialize chunks. */
2001	ql_new(&arena->chunks_dirty);
2002	arena->spare = NULL;
2003
2004	arena->nactive = 0;
2005	arena->ndirty = 0;
2006	arena->npurgatory = 0;
2007
2008	arena_avail_tree_new(&arena->runs_avail_clean);
2009	arena_avail_tree_new(&arena->runs_avail_dirty);
2010
2011	/* Initialize bins. */
2012	for (i = 0; i < NBINS; i++) {
2013		bin = &arena->bins[i];
2014		if (malloc_mutex_init(&bin->lock))
2015			return (true);
2016		bin->runcur = NULL;
2017		arena_run_tree_new(&bin->runs);
2018		if (config_stats)
2019			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2020	}
2021
2022	return (false);
2023}
2024
2025/*
2026 * Calculate bin_info->run_size such that it meets the following constraints:
2027 *
2028 *   *) bin_info->run_size >= min_run_size
2029 *   *) bin_info->run_size <= arena_maxclass
2030 *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2031 *   *) bin_info->nregs <= RUN_MAXREGS
2032 *
2033 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2034 * calculated here, since these settings are all interdependent.
2035 */
2036static size_t
2037bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2038{
2039	size_t pad_size;
2040	size_t try_run_size, good_run_size;
2041	uint32_t try_nregs, good_nregs;
2042	uint32_t try_hdr_size, good_hdr_size;
2043	uint32_t try_bitmap_offset, good_bitmap_offset;
2044	uint32_t try_ctx0_offset, good_ctx0_offset;
2045	uint32_t try_redzone0_offset, good_redzone0_offset;
2046
2047	assert(min_run_size >= PAGE);
2048	assert(min_run_size <= arena_maxclass);
2049
2050	/*
2051	 * Determine redzone size based on minimum alignment and minimum
2052	 * redzone size.  Add padding to the end of the run if it is needed to
2053	 * align the regions.  The padding allows each redzone to be half the
2054	 * minimum alignment; without the padding, each redzone would have to
2055	 * be twice as large in order to maintain alignment.
2056	 */
2057	if (config_fill && opt_redzone) {
2058		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2059		if (align_min <= REDZONE_MINSIZE) {
2060			bin_info->redzone_size = REDZONE_MINSIZE;
2061			pad_size = 0;
2062		} else {
2063			bin_info->redzone_size = align_min >> 1;
2064			pad_size = bin_info->redzone_size;
2065		}
2066	} else {
2067		bin_info->redzone_size = 0;
2068		pad_size = 0;
2069	}
2070	bin_info->reg_interval = bin_info->reg_size +
2071	    (bin_info->redzone_size << 1);
2072
2073	/*
2074	 * Calculate known-valid settings before entering the run_size
2075	 * expansion loop, so that the first part of the loop always copies
2076	 * valid settings.
2077	 *
2078	 * The do..while loop iteratively reduces the number of regions until
2079	 * the run header and the regions no longer overlap.  A closed formula
2080	 * would be quite messy, since there is an interdependency between the
2081	 * header's mask length and the number of regions.
2082	 */
2083	try_run_size = min_run_size;
2084	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2085	    bin_info->reg_interval)
2086	    + 1; /* Counter-act try_nregs-- in loop. */
2087	if (try_nregs > RUN_MAXREGS) {
2088		try_nregs = RUN_MAXREGS
2089		    + 1; /* Counter-act try_nregs-- in loop. */
2090	}
2091	do {
2092		try_nregs--;
2093		try_hdr_size = sizeof(arena_run_t);
2094		/* Pad to a long boundary. */
2095		try_hdr_size = LONG_CEILING(try_hdr_size);
2096		try_bitmap_offset = try_hdr_size;
2097		/* Add space for bitmap. */
2098		try_hdr_size += bitmap_size(try_nregs);
2099		if (config_prof && opt_prof && prof_promote == false) {
2100			/* Pad to a quantum boundary. */
2101			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2102			try_ctx0_offset = try_hdr_size;
2103			/* Add space for one (prof_ctx_t *) per region. */
2104			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2105		} else
2106			try_ctx0_offset = 0;
2107		try_redzone0_offset = try_run_size - (try_nregs *
2108		    bin_info->reg_interval) - pad_size;
2109	} while (try_hdr_size > try_redzone0_offset);
2110
2111	/* run_size expansion loop. */
2112	do {
2113		/*
2114		 * Copy valid settings before trying more aggressive settings.
2115		 */
2116		good_run_size = try_run_size;
2117		good_nregs = try_nregs;
2118		good_hdr_size = try_hdr_size;
2119		good_bitmap_offset = try_bitmap_offset;
2120		good_ctx0_offset = try_ctx0_offset;
2121		good_redzone0_offset = try_redzone0_offset;
2122
2123		/* Try more aggressive settings. */
2124		try_run_size += PAGE;
2125		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2126		    bin_info->reg_interval)
2127		    + 1; /* Counter-act try_nregs-- in loop. */
2128		if (try_nregs > RUN_MAXREGS) {
2129			try_nregs = RUN_MAXREGS
2130			    + 1; /* Counter-act try_nregs-- in loop. */
2131		}
2132		do {
2133			try_nregs--;
2134			try_hdr_size = sizeof(arena_run_t);
2135			/* Pad to a long boundary. */
2136			try_hdr_size = LONG_CEILING(try_hdr_size);
2137			try_bitmap_offset = try_hdr_size;
2138			/* Add space for bitmap. */
2139			try_hdr_size += bitmap_size(try_nregs);
2140			if (config_prof && opt_prof && prof_promote == false) {
2141				/* Pad to a quantum boundary. */
2142				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2143				try_ctx0_offset = try_hdr_size;
2144				/*
2145				 * Add space for one (prof_ctx_t *) per region.
2146				 */
2147				try_hdr_size += try_nregs *
2148				    sizeof(prof_ctx_t *);
2149			}
2150			try_redzone0_offset = try_run_size - (try_nregs *
2151			    bin_info->reg_interval) - pad_size;
2152		} while (try_hdr_size > try_redzone0_offset);
2153	} while (try_run_size <= arena_maxclass
2154	    && try_run_size <= arena_maxclass
2155	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2156	    RUN_MAX_OVRHD_RELAX
2157	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2158	    && try_nregs < RUN_MAXREGS);
2159
2160	assert(good_hdr_size <= good_redzone0_offset);
2161
2162	/* Copy final settings. */
2163	bin_info->run_size = good_run_size;
2164	bin_info->nregs = good_nregs;
2165	bin_info->bitmap_offset = good_bitmap_offset;
2166	bin_info->ctx0_offset = good_ctx0_offset;
2167	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2168
2169	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2170	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
2171
2172	return (good_run_size);
2173}
2174
2175static void
2176bin_info_init(void)
2177{
2178	arena_bin_info_t *bin_info;
2179	size_t prev_run_size = PAGE;
2180
2181#define	SIZE_CLASS(bin, delta, size)					\
2182	bin_info = &arena_bin_info[bin];				\
2183	bin_info->reg_size = size;					\
2184	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2185	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2186	SIZE_CLASSES
2187#undef SIZE_CLASS
2188}
2189
2190void
2191arena_boot(void)
2192{
2193	size_t header_size;
2194	unsigned i;
2195
2196	/*
2197	 * Compute the header size such that it is large enough to contain the
2198	 * page map.  The page map is biased to omit entries for the header
2199	 * itself, so some iteration is necessary to compute the map bias.
2200	 *
2201	 * 1) Compute safe header_size and map_bias values that include enough
2202	 *    space for an unbiased page map.
2203	 * 2) Refine map_bias based on (1) to omit the header pages in the page
2204	 *    map.  The resulting map_bias may be one too small.
2205	 * 3) Refine map_bias based on (2).  The result will be >= the result
2206	 *    from (2), and will always be correct.
2207	 */
2208	map_bias = 0;
2209	for (i = 0; i < 3; i++) {
2210		header_size = offsetof(arena_chunk_t, map) +
2211		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2212		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2213		    != 0);
2214	}
2215	assert(map_bias > 0);
2216
2217	arena_maxclass = chunksize - (map_bias << LG_PAGE);
2218
2219	bin_info_init();
2220}
2221
2222void
2223arena_prefork(arena_t *arena)
2224{
2225	unsigned i;
2226
2227	malloc_mutex_prefork(&arena->lock);
2228	for (i = 0; i < NBINS; i++)
2229		malloc_mutex_prefork(&arena->bins[i].lock);
2230}
2231
2232void
2233arena_postfork_parent(arena_t *arena)
2234{
2235	unsigned i;
2236
2237	for (i = 0; i < NBINS; i++)
2238		malloc_mutex_postfork_parent(&arena->bins[i].lock);
2239	malloc_mutex_postfork_parent(&arena->lock);
2240}
2241
2242void
2243arena_postfork_child(arena_t *arena)
2244{
2245	unsigned i;
2246
2247	for (i = 0; i < NBINS; i++)
2248		malloc_mutex_postfork_child(&arena->bins[i].lock);
2249	malloc_mutex_postfork_child(&arena->lock);
2250}
2251