tcache.c revision 58799f6d1c1f58053f4aac1b100ce9049c868039
1#define	JEMALLOC_TCACHE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7malloc_tsd_data(, tcache, tcache_t *, NULL)
8malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
9
10bool	opt_tcache = true;
11ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
12
13tcache_bin_info_t	*tcache_bin_info;
14static unsigned		stack_nelms; /* Total stack elms per tcache. */
15
16size_t			nhbins;
17size_t			tcache_maxclass;
18
19/******************************************************************************/
20
21size_t	tcache_salloc(const void *ptr)
22{
23
24	return (arena_salloc(ptr, false));
25}
26
27void
28tcache_event_hard(tcache_t *tcache)
29{
30	size_t binind = tcache->next_gc_bin;
31	tcache_bin_t *tbin = &tcache->tbins[binind];
32	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
33
34	if (tbin->low_water > 0) {
35		/*
36		 * Flush (ceiling) 3/4 of the objects below the low water mark.
37		 */
38		if (binind < NBINS) {
39			tcache_bin_flush_small(tbin, binind, tbin->ncached -
40			    tbin->low_water + (tbin->low_water >> 2), tcache);
41		} else {
42			tcache_bin_flush_large(tbin, binind, tbin->ncached -
43			    tbin->low_water + (tbin->low_water >> 2), tcache);
44		}
45		/*
46		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
47		 * fill count is always at least 1.
48		 */
49		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
50			tbin->lg_fill_div++;
51	} else if (tbin->low_water < 0) {
52		/*
53		 * Increase fill count by 2X.  Make sure lg_fill_div stays
54		 * greater than 0.
55		 */
56		if (tbin->lg_fill_div > 1)
57			tbin->lg_fill_div--;
58	}
59	tbin->low_water = tbin->ncached;
60
61	tcache->next_gc_bin++;
62	if (tcache->next_gc_bin == nhbins)
63		tcache->next_gc_bin = 0;
64	tcache->ev_cnt = 0;
65}
66
67void *
68tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
69{
70	void *ret;
71
72	arena_tcache_fill_small(tcache->arena, tbin, binind,
73	    config_prof ? tcache->prof_accumbytes : 0);
74	if (config_prof)
75		tcache->prof_accumbytes = 0;
76	ret = tcache_alloc_easy(tbin);
77
78	return (ret);
79}
80
81void
82tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
83    tcache_t *tcache)
84{
85	void *ptr;
86	unsigned i, nflush, ndeferred;
87	bool merged_stats = false;
88
89	assert(binind < NBINS);
90	assert(rem <= tbin->ncached);
91
92	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
93		/* Lock the arena bin associated with the first object. */
94		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
95		    tbin->avail[0]);
96		arena_t *arena = chunk->arena;
97		arena_bin_t *bin = &arena->bins[binind];
98
99		if (config_prof && arena == tcache->arena) {
100			if (arena_prof_accum(arena, tcache->prof_accumbytes))
101				prof_idump();
102			tcache->prof_accumbytes = 0;
103		}
104
105		malloc_mutex_lock(&bin->lock);
106		if (config_stats && arena == tcache->arena) {
107			assert(merged_stats == false);
108			merged_stats = true;
109			bin->stats.nflushes++;
110			bin->stats.nrequests += tbin->tstats.nrequests;
111			tbin->tstats.nrequests = 0;
112		}
113		ndeferred = 0;
114		for (i = 0; i < nflush; i++) {
115			ptr = tbin->avail[i];
116			assert(ptr != NULL);
117			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
118			if (chunk->arena == arena) {
119				size_t pageind = ((uintptr_t)ptr -
120				    (uintptr_t)chunk) >> LG_PAGE;
121				arena_chunk_map_t *mapelm =
122				    arena_mapp_get(chunk, pageind);
123				arena_dalloc_bin_locked(arena, chunk, ptr,
124				    mapelm);
125			} else {
126				/*
127				 * This object was allocated via a different
128				 * arena bin than the one that is currently
129				 * locked.  Stash the object, so that it can be
130				 * handled in a future pass.
131				 */
132				tbin->avail[ndeferred] = ptr;
133				ndeferred++;
134			}
135		}
136		malloc_mutex_unlock(&bin->lock);
137	}
138	if (config_stats && merged_stats == false) {
139		/*
140		 * The flush loop didn't happen to flush to this thread's
141		 * arena, so the stats didn't get merged.  Manually do so now.
142		 */
143		arena_bin_t *bin = &tcache->arena->bins[binind];
144		malloc_mutex_lock(&bin->lock);
145		bin->stats.nflushes++;
146		bin->stats.nrequests += tbin->tstats.nrequests;
147		tbin->tstats.nrequests = 0;
148		malloc_mutex_unlock(&bin->lock);
149	}
150
151	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
152	    rem * sizeof(void *));
153	tbin->ncached = rem;
154	if ((int)tbin->ncached < tbin->low_water)
155		tbin->low_water = tbin->ncached;
156}
157
158void
159tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
160    tcache_t *tcache)
161{
162	void *ptr;
163	unsigned i, nflush, ndeferred;
164	bool merged_stats = false;
165
166	assert(binind < nhbins);
167	assert(rem <= tbin->ncached);
168
169	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
170		/* Lock the arena associated with the first object. */
171		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
172		    tbin->avail[0]);
173		arena_t *arena = chunk->arena;
174		UNUSED bool idump;
175
176		if (config_prof)
177			idump = false;
178		malloc_mutex_lock(&arena->lock);
179		if ((config_prof || config_stats) && arena == tcache->arena) {
180			if (config_prof) {
181				idump = arena_prof_accum_locked(arena,
182				    tcache->prof_accumbytes);
183				tcache->prof_accumbytes = 0;
184			}
185			if (config_stats) {
186				merged_stats = true;
187				arena->stats.nrequests_large +=
188				    tbin->tstats.nrequests;
189				arena->stats.lstats[binind - NBINS].nrequests +=
190				    tbin->tstats.nrequests;
191				tbin->tstats.nrequests = 0;
192			}
193		}
194		ndeferred = 0;
195		for (i = 0; i < nflush; i++) {
196			ptr = tbin->avail[i];
197			assert(ptr != NULL);
198			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
199			if (chunk->arena == arena)
200				arena_dalloc_large_locked(arena, chunk, ptr);
201			else {
202				/*
203				 * This object was allocated via a different
204				 * arena than the one that is currently locked.
205				 * Stash the object, so that it can be handled
206				 * in a future pass.
207				 */
208				tbin->avail[ndeferred] = ptr;
209				ndeferred++;
210			}
211		}
212		malloc_mutex_unlock(&arena->lock);
213		if (config_prof && idump)
214			prof_idump();
215	}
216	if (config_stats && merged_stats == false) {
217		/*
218		 * The flush loop didn't happen to flush to this thread's
219		 * arena, so the stats didn't get merged.  Manually do so now.
220		 */
221		arena_t *arena = tcache->arena;
222		malloc_mutex_lock(&arena->lock);
223		arena->stats.nrequests_large += tbin->tstats.nrequests;
224		arena->stats.lstats[binind - NBINS].nrequests +=
225		    tbin->tstats.nrequests;
226		tbin->tstats.nrequests = 0;
227		malloc_mutex_unlock(&arena->lock);
228	}
229
230	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
231	    rem * sizeof(void *));
232	tbin->ncached = rem;
233	if ((int)tbin->ncached < tbin->low_water)
234		tbin->low_water = tbin->ncached;
235}
236
237void
238tcache_arena_associate(tcache_t *tcache, arena_t *arena)
239{
240
241	if (config_stats) {
242		/* Link into list of extant tcaches. */
243		malloc_mutex_lock(&arena->lock);
244		ql_elm_new(tcache, link);
245		ql_tail_insert(&arena->tcache_ql, tcache, link);
246		malloc_mutex_unlock(&arena->lock);
247	}
248	tcache->arena = arena;
249}
250
251void
252tcache_arena_dissociate(tcache_t *tcache)
253{
254
255	if (config_stats) {
256		/* Unlink from list of extant tcaches. */
257		malloc_mutex_lock(&tcache->arena->lock);
258		ql_remove(&tcache->arena->tcache_ql, tcache, link);
259		tcache_stats_merge(tcache, tcache->arena);
260		malloc_mutex_unlock(&tcache->arena->lock);
261	}
262}
263
264tcache_t *
265tcache_get_hard(tcache_t *tcache, bool create)
266{
267
268	if (tcache == NULL) {
269		if (create == false) {
270			/*
271			 * Creating a tcache here would cause
272			 * allocation as a side effect of free().
273			 * Ordinarily that would be okay since
274			 * tcache_create() failure is a soft failure
275			 * that doesn't propagate.  However, if TLS
276			 * data are freed via free() as in glibc,
277			 * subtle corruption could result from setting
278			 * a TLS variable after its backing memory is
279			 * freed.
280			 */
281			return (NULL);
282		}
283		if (tcache_enabled_get() == false) {
284			tcache_enabled_set(false); /* Memoize. */
285			return (NULL);
286		}
287		return (tcache_create(choose_arena(NULL)));
288	}
289	if (tcache == TCACHE_STATE_PURGATORY) {
290		/*
291		 * Make a note that an allocator function was called
292		 * after tcache_thread_cleanup() was called.
293		 */
294		tcache = TCACHE_STATE_REINCARNATED;
295		tcache_tsd_set(&tcache);
296		return (NULL);
297	}
298	if (tcache == TCACHE_STATE_REINCARNATED)
299		return (NULL);
300	not_reached();
301	return (NULL);
302}
303
304tcache_t *
305tcache_create(arena_t *arena)
306{
307	tcache_t *tcache;
308	size_t size, stack_offset;
309	unsigned i;
310
311	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
312	/* Naturally align the pointer stacks. */
313	size = PTR_CEILING(size);
314	stack_offset = size;
315	size += stack_nelms * sizeof(void *);
316	/*
317	 * Round up to the nearest multiple of the cacheline size, in order to
318	 * avoid the possibility of false cacheline sharing.
319	 *
320	 * That this works relies on the same logic as in ipalloc(), but we
321	 * cannot directly call ipalloc() here due to tcache bootstrapping
322	 * issues.
323	 */
324	size = (size + CACHELINE_MASK) & (-CACHELINE);
325
326	if (size <= SMALL_MAXCLASS)
327		tcache = (tcache_t *)arena_malloc_small(arena, size, true);
328	else if (size <= tcache_maxclass)
329		tcache = (tcache_t *)arena_malloc_large(arena, size, true);
330	else
331		tcache = (tcache_t *)icalloct(size, false, arena);
332
333	if (tcache == NULL)
334		return (NULL);
335
336	tcache_arena_associate(tcache, arena);
337
338	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
339	for (i = 0; i < nhbins; i++) {
340		tcache->tbins[i].lg_fill_div = 1;
341		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
342		    (uintptr_t)stack_offset);
343		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
344	}
345
346	tcache_tsd_set(&tcache);
347
348	return (tcache);
349}
350
351void
352tcache_destroy(tcache_t *tcache)
353{
354	unsigned i;
355	size_t tcache_size;
356
357	tcache_arena_dissociate(tcache);
358
359	for (i = 0; i < NBINS; i++) {
360		tcache_bin_t *tbin = &tcache->tbins[i];
361		tcache_bin_flush_small(tbin, i, 0, tcache);
362
363		if (config_stats && tbin->tstats.nrequests != 0) {
364			arena_t *arena = tcache->arena;
365			arena_bin_t *bin = &arena->bins[i];
366			malloc_mutex_lock(&bin->lock);
367			bin->stats.nrequests += tbin->tstats.nrequests;
368			malloc_mutex_unlock(&bin->lock);
369		}
370	}
371
372	for (; i < nhbins; i++) {
373		tcache_bin_t *tbin = &tcache->tbins[i];
374		tcache_bin_flush_large(tbin, i, 0, tcache);
375
376		if (config_stats && tbin->tstats.nrequests != 0) {
377			arena_t *arena = tcache->arena;
378			malloc_mutex_lock(&arena->lock);
379			arena->stats.nrequests_large += tbin->tstats.nrequests;
380			arena->stats.lstats[i - NBINS].nrequests +=
381			    tbin->tstats.nrequests;
382			malloc_mutex_unlock(&arena->lock);
383		}
384	}
385
386	if (config_prof && tcache->prof_accumbytes > 0 &&
387	    arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
388		prof_idump();
389
390	tcache_size = arena_salloc(tcache, false);
391	if (tcache_size <= SMALL_MAXCLASS) {
392		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
393		arena_t *arena = chunk->arena;
394		size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
395		    LG_PAGE;
396		arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
397
398		arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
399	} else if (tcache_size <= tcache_maxclass) {
400		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
401		arena_t *arena = chunk->arena;
402
403		arena_dalloc_large(arena, chunk, tcache);
404	} else
405		idalloct(tcache, false);
406}
407
408void
409tcache_thread_cleanup(void *arg)
410{
411	tcache_t *tcache = *(tcache_t **)arg;
412
413	if (tcache == TCACHE_STATE_DISABLED) {
414		/* Do nothing. */
415	} else if (tcache == TCACHE_STATE_REINCARNATED) {
416		/*
417		 * Another destructor called an allocator function after this
418		 * destructor was called.  Reset tcache to
419		 * TCACHE_STATE_PURGATORY in order to receive another callback.
420		 */
421		tcache = TCACHE_STATE_PURGATORY;
422		tcache_tsd_set(&tcache);
423	} else if (tcache == TCACHE_STATE_PURGATORY) {
424		/*
425		 * The previous time this destructor was called, we set the key
426		 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
427		 * cause re-creation of the tcache.  This time, do nothing, so
428		 * that the destructor will not be called again.
429		 */
430	} else if (tcache != NULL) {
431		assert(tcache != TCACHE_STATE_PURGATORY);
432		tcache_destroy(tcache);
433		tcache = TCACHE_STATE_PURGATORY;
434		tcache_tsd_set(&tcache);
435	}
436}
437
438/* Caller must own arena->lock. */
439void
440tcache_stats_merge(tcache_t *tcache, arena_t *arena)
441{
442	unsigned i;
443
444	cassert(config_stats);
445
446	/* Merge and reset tcache stats. */
447	for (i = 0; i < NBINS; i++) {
448		arena_bin_t *bin = &arena->bins[i];
449		tcache_bin_t *tbin = &tcache->tbins[i];
450		malloc_mutex_lock(&bin->lock);
451		bin->stats.nrequests += tbin->tstats.nrequests;
452		malloc_mutex_unlock(&bin->lock);
453		tbin->tstats.nrequests = 0;
454	}
455
456	for (; i < nhbins; i++) {
457		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
458		tcache_bin_t *tbin = &tcache->tbins[i];
459		arena->stats.nrequests_large += tbin->tstats.nrequests;
460		lstats->nrequests += tbin->tstats.nrequests;
461		tbin->tstats.nrequests = 0;
462	}
463}
464
465bool
466tcache_boot0(void)
467{
468	unsigned i;
469
470	/*
471	 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
472	 * known.
473	 */
474	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
475		tcache_maxclass = SMALL_MAXCLASS;
476	else if ((1U << opt_lg_tcache_max) > arena_maxclass)
477		tcache_maxclass = arena_maxclass;
478	else
479		tcache_maxclass = (1U << opt_lg_tcache_max);
480
481	nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
482
483	/* Initialize tcache_bin_info. */
484	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
485	    sizeof(tcache_bin_info_t));
486	if (tcache_bin_info == NULL)
487		return (true);
488	stack_nelms = 0;
489	for (i = 0; i < NBINS; i++) {
490		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
491			tcache_bin_info[i].ncached_max =
492			    (arena_bin_info[i].nregs << 1);
493		} else {
494			tcache_bin_info[i].ncached_max =
495			    TCACHE_NSLOTS_SMALL_MAX;
496		}
497		stack_nelms += tcache_bin_info[i].ncached_max;
498	}
499	for (; i < nhbins; i++) {
500		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
501		stack_nelms += tcache_bin_info[i].ncached_max;
502	}
503
504	return (false);
505}
506
507bool
508tcache_boot1(void)
509{
510
511	if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
512		return (true);
513
514	return (false);
515}
516