tcache.h revision bd87b01999416ec7418ff8bdb504d9b6c009ff68
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4typedef struct tcache_bin_info_s tcache_bin_info_t;
5typedef struct tcache_bin_s tcache_bin_t;
6typedef struct tcache_s tcache_t;
7
8/*
9 * tcache pointers close to NULL are used to encode state information that is
10 * used for two purposes: preventing thread caching on a per thread basis and
11 * cleaning up during thread shutdown.
12 */
13#define	TCACHE_STATE_DISABLED		((tcache_t *)(uintptr_t)1)
14#define	TCACHE_STATE_REINCARNATED	((tcache_t *)(uintptr_t)2)
15#define	TCACHE_STATE_PURGATORY		((tcache_t *)(uintptr_t)3)
16#define	TCACHE_STATE_MAX		TCACHE_STATE_PURGATORY
17
18/*
19 * Absolute maximum number of cache slots for each small bin in the thread
20 * cache.  This is an additional constraint beyond that imposed as: twice the
21 * number of regions per run for this size class.
22 *
23 * This constant must be an even number.
24 */
25#define	TCACHE_NSLOTS_SMALL_MAX		200
26
27/* Number of cache slots for large size classes. */
28#define	TCACHE_NSLOTS_LARGE		20
29
30/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
31#define	LG_TCACHE_MAXCLASS_DEFAULT	15
32
33/*
34 * TCACHE_GC_SWEEP is the approximate number of allocation events between
35 * full GC sweeps.  Integer rounding may cause the actual number to be
36 * slightly higher, since GC is performed incrementally.
37 */
38#define	TCACHE_GC_SWEEP			8192
39
40/* Number of tcache allocation/deallocation events between incremental GCs. */
41#define	TCACHE_GC_INCR							\
42    ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
43
44#endif /* JEMALLOC_H_TYPES */
45/******************************************************************************/
46#ifdef JEMALLOC_H_STRUCTS
47
48typedef enum {
49	tcache_enabled_false   = 0, /* Enable cast to/from bool. */
50	tcache_enabled_true    = 1,
51	tcache_enabled_default = 2
52} tcache_enabled_t;
53
54/*
55 * Read-only information associated with each element of tcache_t's tbins array
56 * is stored separately, mainly to reduce memory usage.
57 */
58struct tcache_bin_info_s {
59	unsigned	ncached_max;	/* Upper limit on ncached. */
60};
61
62struct tcache_bin_s {
63	tcache_bin_stats_t tstats;
64	int		low_water;	/* Min # cached since last GC. */
65	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
66	unsigned	ncached;	/* # of cached objects. */
67	void		**avail;	/* Stack of available objects. */
68};
69
70struct tcache_s {
71	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
72	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
73	arena_t		*arena;		/* This thread's arena. */
74	unsigned	ev_cnt;		/* Event count since incremental GC. */
75	unsigned	next_gc_bin;	/* Next bin to GC. */
76	tcache_bin_t	tbins[1];	/* Dynamically sized. */
77	/*
78	 * The pointer stacks associated with tbins follow as a contiguous
79	 * array.  During tcache initialization, the avail pointer in each
80	 * element of tbins is initialized to point to the proper offset within
81	 * this array.
82	 */
83};
84
85#endif /* JEMALLOC_H_STRUCTS */
86/******************************************************************************/
87#ifdef JEMALLOC_H_EXTERNS
88
89extern bool	opt_tcache;
90extern ssize_t	opt_lg_tcache_max;
91
92extern tcache_bin_info_t	*tcache_bin_info;
93
94/*
95 * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
96 * large-object bins.
97 */
98extern size_t			nhbins;
99
100/* Maximum cached size class. */
101extern size_t			tcache_maxclass;
102
103size_t	tcache_salloc(const void *ptr);
104void	tcache_event_hard(tcache_t *tcache);
105void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
106    size_t binind);
107void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
108    tcache_t *tcache);
109void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
110    tcache_t *tcache);
111void	tcache_arena_associate(tcache_t *tcache, arena_t *arena);
112void	tcache_arena_dissociate(tcache_t *tcache);
113tcache_t *tcache_create(arena_t *arena);
114void	tcache_destroy(tcache_t *tcache);
115void	tcache_thread_cleanup(void *arg);
116void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
117bool	tcache_boot0(void);
118bool	tcache_boot1(void);
119
120#endif /* JEMALLOC_H_EXTERNS */
121/******************************************************************************/
122#ifdef JEMALLOC_H_INLINES
123
124#ifndef JEMALLOC_ENABLE_INLINE
125malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
126malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
127
128void	tcache_event(tcache_t *tcache);
129void	tcache_flush(void);
130bool	tcache_enabled_get(void);
131tcache_t *tcache_get(bool create);
132void	tcache_enabled_set(bool enabled);
133void	*tcache_alloc_easy(tcache_bin_t *tbin);
134void	*tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
135void	*tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
136void	tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
137void	tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
138#endif
139
140#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
141/* Map of thread-specific caches. */
142malloc_tsd_externs(tcache, tcache_t *)
143malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
144    tcache_thread_cleanup)
145/* Per thread flag that allows thread caches to be disabled. */
146malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
147malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
148    tcache_enabled_default, malloc_tsd_no_cleanup)
149
150JEMALLOC_INLINE void
151tcache_flush(void)
152{
153	tcache_t *tcache;
154
155	cassert(config_tcache);
156
157	tcache = *tcache_tsd_get();
158	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
159		return;
160	tcache_destroy(tcache);
161	tcache = NULL;
162	tcache_tsd_set(&tcache);
163}
164
165JEMALLOC_INLINE bool
166tcache_enabled_get(void)
167{
168	tcache_enabled_t tcache_enabled;
169
170	cassert(config_tcache);
171
172	tcache_enabled = *tcache_enabled_tsd_get();
173	if (tcache_enabled == tcache_enabled_default) {
174		tcache_enabled = (tcache_enabled_t)opt_tcache;
175		tcache_enabled_tsd_set(&tcache_enabled);
176	}
177
178	return ((bool)tcache_enabled);
179}
180
181JEMALLOC_INLINE void
182tcache_enabled_set(bool enabled)
183{
184	tcache_enabled_t tcache_enabled;
185	tcache_t *tcache;
186
187	cassert(config_tcache);
188
189	tcache_enabled = (tcache_enabled_t)enabled;
190	tcache_enabled_tsd_set(&tcache_enabled);
191	tcache = *tcache_tsd_get();
192	if (enabled) {
193		if (tcache == TCACHE_STATE_DISABLED) {
194			tcache = NULL;
195			tcache_tsd_set(&tcache);
196		}
197	} else /* disabled */ {
198		if (tcache > TCACHE_STATE_MAX) {
199			tcache_destroy(tcache);
200			tcache = NULL;
201		}
202		if (tcache == NULL) {
203			tcache = TCACHE_STATE_DISABLED;
204			tcache_tsd_set(&tcache);
205		}
206	}
207}
208
209JEMALLOC_ALWAYS_INLINE tcache_t *
210tcache_get(bool create)
211{
212	tcache_t *tcache;
213
214	if (config_tcache == false)
215		return (NULL);
216	if (config_lazy_lock && isthreaded == false)
217		return (NULL);
218
219	tcache = *tcache_tsd_get();
220	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
221		if (tcache == TCACHE_STATE_DISABLED)
222			return (NULL);
223		if (tcache == NULL) {
224			if (create == false) {
225				/*
226				 * Creating a tcache here would cause
227				 * allocation as a side effect of free().
228				 * Ordinarily that would be okay since
229				 * tcache_create() failure is a soft failure
230				 * that doesn't propagate.  However, if TLS
231				 * data are freed via free() as in glibc,
232				 * subtle corruption could result from setting
233				 * a TLS variable after its backing memory is
234				 * freed.
235				 */
236				return (NULL);
237			}
238			if (tcache_enabled_get() == false) {
239				tcache_enabled_set(false); /* Memoize. */
240				return (NULL);
241			}
242			return (tcache_create(choose_arena(NULL)));
243		}
244		if (tcache == TCACHE_STATE_PURGATORY) {
245			/*
246			 * Make a note that an allocator function was called
247			 * after tcache_thread_cleanup() was called.
248			 */
249			tcache = TCACHE_STATE_REINCARNATED;
250			tcache_tsd_set(&tcache);
251			return (NULL);
252		}
253		if (tcache == TCACHE_STATE_REINCARNATED)
254			return (NULL);
255		not_reached();
256	}
257
258	return (tcache);
259}
260
261JEMALLOC_ALWAYS_INLINE void
262tcache_event(tcache_t *tcache)
263{
264
265	if (TCACHE_GC_INCR == 0)
266		return;
267
268	tcache->ev_cnt++;
269	assert(tcache->ev_cnt <= TCACHE_GC_INCR);
270	if (tcache->ev_cnt == TCACHE_GC_INCR)
271		tcache_event_hard(tcache);
272}
273
274JEMALLOC_ALWAYS_INLINE void *
275tcache_alloc_easy(tcache_bin_t *tbin)
276{
277	void *ret;
278
279	if (tbin->ncached == 0) {
280		tbin->low_water = -1;
281		return (NULL);
282	}
283	tbin->ncached--;
284	if ((int)tbin->ncached < tbin->low_water)
285		tbin->low_water = tbin->ncached;
286	ret = tbin->avail[tbin->ncached];
287	return (ret);
288}
289
290JEMALLOC_ALWAYS_INLINE void *
291tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
292{
293	void *ret;
294	size_t binind;
295	tcache_bin_t *tbin;
296
297	binind = SMALL_SIZE2BIN(size);
298	assert(binind < NBINS);
299	tbin = &tcache->tbins[binind];
300	size = arena_bin_info[binind].reg_size;
301	ret = tcache_alloc_easy(tbin);
302	if (ret == NULL) {
303		ret = tcache_alloc_small_hard(tcache, tbin, binind);
304		if (ret == NULL)
305			return (NULL);
306	}
307	assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
308
309	if (zero == false) {
310		if (config_fill) {
311			if (opt_junk) {
312				arena_alloc_junk_small(ret,
313				    &arena_bin_info[binind], false);
314			} else if (opt_zero)
315				memset(ret, 0, size);
316		}
317	} else {
318		if (config_fill && opt_junk) {
319			arena_alloc_junk_small(ret, &arena_bin_info[binind],
320			    true);
321		}
322		memset(ret, 0, size);
323	}
324
325	if (config_stats)
326		tbin->tstats.nrequests++;
327	if (config_prof)
328		tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
329	tcache_event(tcache);
330	return (ret);
331}
332
333JEMALLOC_ALWAYS_INLINE void *
334tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
335{
336	void *ret;
337	size_t binind;
338	tcache_bin_t *tbin;
339
340	size = PAGE_CEILING(size);
341	assert(size <= tcache_maxclass);
342	binind = NBINS + (size >> LG_PAGE) - 1;
343	assert(binind < nhbins);
344	tbin = &tcache->tbins[binind];
345	ret = tcache_alloc_easy(tbin);
346	if (ret == NULL) {
347		/*
348		 * Only allocate one large object at a time, because it's quite
349		 * expensive to create one and not use it.
350		 */
351		ret = arena_malloc_large(tcache->arena, size, zero);
352		if (ret == NULL)
353			return (NULL);
354	} else {
355		if (config_prof && size == PAGE) {
356			arena_chunk_t *chunk =
357			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
358			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
359			    LG_PAGE);
360			arena_mapbits_large_binind_set(chunk, pageind,
361			    BININD_INVALID);
362		}
363		if (zero == false) {
364			if (config_fill) {
365				if (opt_junk)
366					memset(ret, 0xa5, size);
367				else if (opt_zero)
368					memset(ret, 0, size);
369			}
370		} else
371			memset(ret, 0, size);
372
373		if (config_stats)
374			tbin->tstats.nrequests++;
375		if (config_prof)
376			tcache->prof_accumbytes += size;
377	}
378
379	tcache_event(tcache);
380	return (ret);
381}
382
383JEMALLOC_ALWAYS_INLINE void
384tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
385{
386	tcache_bin_t *tbin;
387	tcache_bin_info_t *tbin_info;
388
389	assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
390
391	if (config_fill && opt_junk)
392		arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
393
394	tbin = &tcache->tbins[binind];
395	tbin_info = &tcache_bin_info[binind];
396	if (tbin->ncached == tbin_info->ncached_max) {
397		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
398		    1), tcache);
399	}
400	assert(tbin->ncached < tbin_info->ncached_max);
401	tbin->avail[tbin->ncached] = ptr;
402	tbin->ncached++;
403
404	tcache_event(tcache);
405}
406
407JEMALLOC_ALWAYS_INLINE void
408tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
409{
410	size_t binind;
411	tcache_bin_t *tbin;
412	tcache_bin_info_t *tbin_info;
413
414	assert((size & PAGE_MASK) == 0);
415	assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
416	assert(tcache_salloc(ptr) <= tcache_maxclass);
417
418	binind = NBINS + (size >> LG_PAGE) - 1;
419
420	if (config_fill && opt_junk)
421		memset(ptr, 0x5a, size);
422
423	tbin = &tcache->tbins[binind];
424	tbin_info = &tcache_bin_info[binind];
425	if (tbin->ncached == tbin_info->ncached_max) {
426		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
427		    1), tcache);
428	}
429	assert(tbin->ncached < tbin_info->ncached_max);
430	tbin->avail[tbin->ncached] = ptr;
431	tbin->ncached++;
432
433	tcache_event(tcache);
434}
435#endif
436
437#endif /* JEMALLOC_H_INLINES */
438/******************************************************************************/
439