tcache.c revision 064dbfbaf76617643bbbe66cbcc880e7ee9ec00f
1#define	JEMALLOC_TCACHE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7bool	opt_tcache = true;
8ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
9
10tcache_bin_info_t	*tcache_bin_info;
11static unsigned		stack_nelms; /* Total stack elms per tcache. */
12
13size_t			nhbins;
14size_t			tcache_maxclass;
15
16tcaches_t		*tcaches;
17
18/* Index of first element within tcaches that has never been used. */
19static unsigned		tcaches_past;
20
21/* Head of singly linked list tracking available tcaches elements. */
22static tcaches_t	*tcaches_avail;
23
24/******************************************************************************/
25
26size_t	tcache_salloc(const void *ptr)
27{
28
29	return (arena_salloc(ptr, false));
30}
31
32void
33tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
34{
35	index_t binind = tcache->next_gc_bin;
36	tcache_bin_t *tbin = &tcache->tbins[binind];
37	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
38
39	if (tbin->low_water > 0) {
40		/*
41		 * Flush (ceiling) 3/4 of the objects below the low water mark.
42		 */
43		if (binind < NBINS) {
44			tcache_bin_flush_small(tsd, tbin, binind, tbin->ncached
45			    - tbin->low_water + (tbin->low_water >> 2), tcache);
46		} else {
47			tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
48			    - tbin->low_water + (tbin->low_water >> 2), tcache);
49		}
50		/*
51		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
52		 * fill count is always at least 1.
53		 */
54		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
55			tbin->lg_fill_div++;
56	} else if (tbin->low_water < 0) {
57		/*
58		 * Increase fill count by 2X.  Make sure lg_fill_div stays
59		 * greater than 0.
60		 */
61		if (tbin->lg_fill_div > 1)
62			tbin->lg_fill_div--;
63	}
64	tbin->low_water = tbin->ncached;
65
66	tcache->next_gc_bin++;
67	if (tcache->next_gc_bin == nhbins)
68		tcache->next_gc_bin = 0;
69	tcache->ev_cnt = 0;
70}
71
72void *
73tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
74    index_t binind)
75{
76	void *ret;
77
78	arena_tcache_fill_small(arena_choose(tsd, NULL), tbin, binind,
79	    config_prof ? tcache->prof_accumbytes : 0);
80	if (config_prof)
81		tcache->prof_accumbytes = 0;
82	ret = tcache_alloc_easy(tbin);
83
84	return (ret);
85}
86
87void
88tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
89    unsigned rem, tcache_t *tcache)
90{
91	arena_t *arena;
92	void *ptr;
93	unsigned i, nflush, ndeferred;
94	bool merged_stats = false;
95
96	assert(binind < NBINS);
97	assert(rem <= tbin->ncached);
98
99	arena = arena_choose(tsd, NULL);
100	assert(arena != NULL);
101	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
102		/* Lock the arena bin associated with the first object. */
103		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
104		    tbin->avail[0]);
105		arena_t *bin_arena = chunk->arena;
106		arena_bin_t *bin = &bin_arena->bins[binind];
107
108		if (config_prof && bin_arena == arena) {
109			if (arena_prof_accum(arena, tcache->prof_accumbytes))
110				prof_idump();
111			tcache->prof_accumbytes = 0;
112		}
113
114		malloc_mutex_lock(&bin->lock);
115		if (config_stats && bin_arena == arena) {
116			assert(!merged_stats);
117			merged_stats = true;
118			bin->stats.nflushes++;
119			bin->stats.nrequests += tbin->tstats.nrequests;
120			tbin->tstats.nrequests = 0;
121		}
122		ndeferred = 0;
123		for (i = 0; i < nflush; i++) {
124			ptr = tbin->avail[i];
125			assert(ptr != NULL);
126			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
127			if (chunk->arena == bin_arena) {
128				size_t pageind = ((uintptr_t)ptr -
129				    (uintptr_t)chunk) >> LG_PAGE;
130				arena_chunk_map_bits_t *bitselm =
131				    arena_bitselm_get(chunk, pageind);
132				arena_dalloc_bin_junked_locked(bin_arena, chunk,
133				    ptr, bitselm);
134			} else {
135				/*
136				 * This object was allocated via a different
137				 * arena bin than the one that is currently
138				 * locked.  Stash the object, so that it can be
139				 * handled in a future pass.
140				 */
141				tbin->avail[ndeferred] = ptr;
142				ndeferred++;
143			}
144		}
145		malloc_mutex_unlock(&bin->lock);
146	}
147	if (config_stats && !merged_stats) {
148		/*
149		 * The flush loop didn't happen to flush to this thread's
150		 * arena, so the stats didn't get merged.  Manually do so now.
151		 */
152		arena_bin_t *bin = &arena->bins[binind];
153		malloc_mutex_lock(&bin->lock);
154		bin->stats.nflushes++;
155		bin->stats.nrequests += tbin->tstats.nrequests;
156		tbin->tstats.nrequests = 0;
157		malloc_mutex_unlock(&bin->lock);
158	}
159
160	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
161	    rem * sizeof(void *));
162	tbin->ncached = rem;
163	if ((int)tbin->ncached < tbin->low_water)
164		tbin->low_water = tbin->ncached;
165}
166
167void
168tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
169    unsigned rem, tcache_t *tcache)
170{
171	arena_t *arena;
172	void *ptr;
173	unsigned i, nflush, ndeferred;
174	bool merged_stats = false;
175
176	assert(binind < nhbins);
177	assert(rem <= tbin->ncached);
178
179	arena = arena_choose(tsd, NULL);
180	assert(arena != NULL);
181	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
182		/* Lock the arena associated with the first object. */
183		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
184		    tbin->avail[0]);
185		arena_t *locked_arena = chunk->arena;
186		UNUSED bool idump;
187
188		if (config_prof)
189			idump = false;
190		malloc_mutex_lock(&locked_arena->lock);
191		if ((config_prof || config_stats) && locked_arena == arena) {
192			if (config_prof) {
193				idump = arena_prof_accum_locked(arena,
194				    tcache->prof_accumbytes);
195				tcache->prof_accumbytes = 0;
196			}
197			if (config_stats) {
198				merged_stats = true;
199				arena->stats.nrequests_large +=
200				    tbin->tstats.nrequests;
201				arena->stats.lstats[binind - NBINS].nrequests +=
202				    tbin->tstats.nrequests;
203				tbin->tstats.nrequests = 0;
204			}
205		}
206		ndeferred = 0;
207		for (i = 0; i < nflush; i++) {
208			ptr = tbin->avail[i];
209			assert(ptr != NULL);
210			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
211			if (chunk->arena == locked_arena) {
212				arena_dalloc_large_junked_locked(locked_arena,
213				    chunk, ptr);
214			} else {
215				/*
216				 * This object was allocated via a different
217				 * arena than the one that is currently locked.
218				 * Stash the object, so that it can be handled
219				 * in a future pass.
220				 */
221				tbin->avail[ndeferred] = ptr;
222				ndeferred++;
223			}
224		}
225		malloc_mutex_unlock(&locked_arena->lock);
226		if (config_prof && idump)
227			prof_idump();
228	}
229	if (config_stats && !merged_stats) {
230		/*
231		 * The flush loop didn't happen to flush to this thread's
232		 * arena, so the stats didn't get merged.  Manually do so now.
233		 */
234		malloc_mutex_lock(&arena->lock);
235		arena->stats.nrequests_large += tbin->tstats.nrequests;
236		arena->stats.lstats[binind - NBINS].nrequests +=
237		    tbin->tstats.nrequests;
238		tbin->tstats.nrequests = 0;
239		malloc_mutex_unlock(&arena->lock);
240	}
241
242	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
243	    rem * sizeof(void *));
244	tbin->ncached = rem;
245	if ((int)tbin->ncached < tbin->low_water)
246		tbin->low_water = tbin->ncached;
247}
248
249void
250tcache_arena_associate(tcache_t *tcache, arena_t *arena)
251{
252
253	if (config_stats) {
254		/* Link into list of extant tcaches. */
255		malloc_mutex_lock(&arena->lock);
256		ql_elm_new(tcache, link);
257		ql_tail_insert(&arena->tcache_ql, tcache, link);
258		malloc_mutex_unlock(&arena->lock);
259	}
260}
261
262void
263tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
264{
265
266	tcache_arena_dissociate(tcache, oldarena);
267	tcache_arena_associate(tcache, newarena);
268}
269
270void
271tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
272{
273
274	if (config_stats) {
275		/* Unlink from list of extant tcaches. */
276		malloc_mutex_lock(&arena->lock);
277		if (config_debug) {
278			bool in_ql = false;
279			tcache_t *iter;
280			ql_foreach(iter, &arena->tcache_ql, link) {
281				if (iter == tcache) {
282					in_ql = true;
283					break;
284				}
285			}
286			assert(in_ql);
287		}
288		ql_remove(&arena->tcache_ql, tcache, link);
289		tcache_stats_merge(tcache, arena);
290		malloc_mutex_unlock(&arena->lock);
291	}
292}
293
294tcache_t *
295tcache_get_hard(tsd_t *tsd)
296{
297	arena_t *arena;
298
299	if (!tcache_enabled_get()) {
300		if (tsd_nominal(tsd))
301			tcache_enabled_set(false); /* Memoize. */
302		return (NULL);
303	}
304	arena = arena_choose(tsd, NULL);
305	if (unlikely(arena == NULL))
306		return (NULL);
307	return (tcache_create(tsd, arena));
308}
309
310tcache_t *
311tcache_create(tsd_t *tsd, arena_t *arena)
312{
313	tcache_t *tcache;
314	size_t size, stack_offset;
315	unsigned i;
316
317	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
318	/* Naturally align the pointer stacks. */
319	size = PTR_CEILING(size);
320	stack_offset = size;
321	size += stack_nelms * sizeof(void *);
322	/* Avoid false cacheline sharing. */
323	size = sa2u(size, CACHELINE);
324
325	tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
326	if (tcache == NULL)
327		return (NULL);
328
329	tcache_arena_associate(tcache, arena);
330
331	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
332	for (i = 0; i < nhbins; i++) {
333		tcache->tbins[i].lg_fill_div = 1;
334		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
335		    (uintptr_t)stack_offset);
336		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
337	}
338
339	return (tcache);
340}
341
342static void
343tcache_destroy(tsd_t *tsd, tcache_t *tcache)
344{
345	arena_t *arena;
346	unsigned i;
347
348	arena = arena_choose(tsd, NULL);
349	tcache_arena_dissociate(tcache, arena);
350
351	for (i = 0; i < NBINS; i++) {
352		tcache_bin_t *tbin = &tcache->tbins[i];
353		tcache_bin_flush_small(tsd, tbin, i, 0, tcache);
354
355		if (config_stats && tbin->tstats.nrequests != 0) {
356			arena_bin_t *bin = &arena->bins[i];
357			malloc_mutex_lock(&bin->lock);
358			bin->stats.nrequests += tbin->tstats.nrequests;
359			malloc_mutex_unlock(&bin->lock);
360		}
361	}
362
363	for (; i < nhbins; i++) {
364		tcache_bin_t *tbin = &tcache->tbins[i];
365		tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
366
367		if (config_stats && tbin->tstats.nrequests != 0) {
368			malloc_mutex_lock(&arena->lock);
369			arena->stats.nrequests_large += tbin->tstats.nrequests;
370			arena->stats.lstats[i - NBINS].nrequests +=
371			    tbin->tstats.nrequests;
372			malloc_mutex_unlock(&arena->lock);
373		}
374	}
375
376	if (config_prof && tcache->prof_accumbytes > 0 &&
377	    arena_prof_accum(arena, tcache->prof_accumbytes))
378		prof_idump();
379
380	idalloctm(tsd, tcache, false, true);
381}
382
383void
384tcache_cleanup(tsd_t *tsd)
385{
386	tcache_t *tcache;
387
388	if (!config_tcache)
389		return;
390
391	if ((tcache = tsd_tcache_get(tsd)) != NULL) {
392		tcache_destroy(tsd, tcache);
393		tsd_tcache_set(tsd, NULL);
394	}
395}
396
397void
398tcache_enabled_cleanup(tsd_t *tsd)
399{
400
401	/* Do nothing. */
402}
403
404/* Caller must own arena->lock. */
405void
406tcache_stats_merge(tcache_t *tcache, arena_t *arena)
407{
408	unsigned i;
409
410	cassert(config_stats);
411
412	/* Merge and reset tcache stats. */
413	for (i = 0; i < NBINS; i++) {
414		arena_bin_t *bin = &arena->bins[i];
415		tcache_bin_t *tbin = &tcache->tbins[i];
416		malloc_mutex_lock(&bin->lock);
417		bin->stats.nrequests += tbin->tstats.nrequests;
418		malloc_mutex_unlock(&bin->lock);
419		tbin->tstats.nrequests = 0;
420	}
421
422	for (; i < nhbins; i++) {
423		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
424		tcache_bin_t *tbin = &tcache->tbins[i];
425		arena->stats.nrequests_large += tbin->tstats.nrequests;
426		lstats->nrequests += tbin->tstats.nrequests;
427		tbin->tstats.nrequests = 0;
428	}
429}
430
431bool
432tcaches_create(tsd_t *tsd, unsigned *r_ind)
433{
434	tcache_t *tcache;
435	tcaches_t *elm;
436
437	if (tcaches == NULL) {
438		tcaches = base_alloc(sizeof(tcache_t *) *
439		    (MALLOCX_TCACHE_MAX+1));
440		if (tcaches == NULL)
441			return (true);
442	}
443
444	if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
445		return (true);
446	tcache = tcache_create(tsd, a0get());
447	if (tcache == NULL)
448		return (true);
449
450	if (tcaches_avail != NULL) {
451		elm = tcaches_avail;
452		tcaches_avail = tcaches_avail->next;
453		elm->tcache = tcache;
454		*r_ind = elm - tcaches;
455	} else {
456		elm = &tcaches[tcaches_past];
457		elm->tcache = tcache;
458		*r_ind = tcaches_past;
459		tcaches_past++;
460	}
461
462	return (false);
463}
464
465static void
466tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
467{
468
469	if (elm->tcache == NULL)
470		return;
471	tcache_destroy(tsd, elm->tcache);
472	elm->tcache = NULL;
473}
474
475void
476tcaches_flush(tsd_t *tsd, unsigned ind)
477{
478
479	tcaches_elm_flush(tsd, &tcaches[ind]);
480}
481
482void
483tcaches_destroy(tsd_t *tsd, unsigned ind)
484{
485	tcaches_t *elm = &tcaches[ind];
486	tcaches_elm_flush(tsd, elm);
487	elm->next = tcaches_avail;
488	tcaches_avail = elm;
489}
490
491bool
492tcache_boot(void)
493{
494	unsigned i;
495
496	/*
497	 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
498	 * known.
499	 */
500	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
501		tcache_maxclass = SMALL_MAXCLASS;
502	else if ((1U << opt_lg_tcache_max) > arena_maxclass)
503		tcache_maxclass = arena_maxclass;
504	else
505		tcache_maxclass = (1U << opt_lg_tcache_max);
506
507	nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
508
509	/* Initialize tcache_bin_info. */
510	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
511	    sizeof(tcache_bin_info_t));
512	if (tcache_bin_info == NULL)
513		return (true);
514	stack_nelms = 0;
515	for (i = 0; i < NBINS; i++) {
516		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
517			tcache_bin_info[i].ncached_max =
518			    (arena_bin_info[i].nregs << 1);
519		} else {
520			tcache_bin_info[i].ncached_max =
521			    TCACHE_NSLOTS_SMALL_MAX;
522		}
523		stack_nelms += tcache_bin_info[i].ncached_max;
524	}
525	for (; i < nhbins; i++) {
526		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
527		stack_nelms += tcache_bin_info[i].ncached_max;
528	}
529
530	return (false);
531}
532