tcache.h revision fb795867f0b3aa28bbdf177e1026f3e3408e0338
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4typedef struct tcache_bin_info_s tcache_bin_info_t;
5typedef struct tcache_bin_s tcache_bin_t;
6typedef struct tcache_s tcache_t;
7
8/*
9 * tcache pointers close to NULL are used to encode state information that is
10 * used for two purposes: preventing thread caching on a per thread basis and
11 * cleaning up during thread shutdown.
12 */
13#define	TCACHE_STATE_DISABLED		((tcache_t *)(uintptr_t)1)
14#define	TCACHE_STATE_REINCARNATED	((tcache_t *)(uintptr_t)2)
15#define	TCACHE_STATE_PURGATORY		((tcache_t *)(uintptr_t)3)
16#define	TCACHE_STATE_MAX		TCACHE_STATE_PURGATORY
17
18/*
19 * Absolute maximum number of cache slots for each small bin in the thread
20 * cache.  This is an additional constraint beyond that imposed as: twice the
21 * number of regions per run for this size class.
22 *
23 * This constant must be an even number.
24 */
25#if defined(ANDROID_TCACHE_NSLOTS_SMALL_MAX)
26#define	TCACHE_NSLOTS_SMALL_MAX		ANDROID_TCACHE_NSLOTS_SMALL_MAX
27#else
28#define	TCACHE_NSLOTS_SMALL_MAX		200
29#endif
30
31/* Number of cache slots for large size classes. */
32#if defined(ANDROID_TCACHE_NSLOTS_LARGE)
33#define	TCACHE_NSLOTS_LARGE		ANDROID_TCACHE_NSLOTS_LARGE
34#else
35#define	TCACHE_NSLOTS_LARGE		20
36#endif
37
38/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
39#if defined(ANDROID_LG_TCACHE_MAXCLASS_DEFAULT)
40#define	LG_TCACHE_MAXCLASS_DEFAULT	ANDROID_LG_TCACHE_MAXCLASS_DEFAULT
41#else
42#define	LG_TCACHE_MAXCLASS_DEFAULT	15
43#endif
44
45/*
46 * TCACHE_GC_SWEEP is the approximate number of allocation events between
47 * full GC sweeps.  Integer rounding may cause the actual number to be
48 * slightly higher, since GC is performed incrementally.
49 */
50#define	TCACHE_GC_SWEEP			8192
51
52/* Number of tcache allocation/deallocation events between incremental GCs. */
53#define	TCACHE_GC_INCR							\
54    ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
55
56#endif /* JEMALLOC_H_TYPES */
57/******************************************************************************/
58#ifdef JEMALLOC_H_STRUCTS
59
60typedef enum {
61	tcache_enabled_false   = 0, /* Enable cast to/from bool. */
62	tcache_enabled_true    = 1,
63	tcache_enabled_default = 2
64} tcache_enabled_t;
65
66/*
67 * Read-only information associated with each element of tcache_t's tbins array
68 * is stored separately, mainly to reduce memory usage.
69 */
70struct tcache_bin_info_s {
71	unsigned	ncached_max;	/* Upper limit on ncached. */
72};
73
74struct tcache_bin_s {
75	tcache_bin_stats_t tstats;
76	int		low_water;	/* Min # cached since last GC. */
77	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
78	unsigned	ncached;	/* # of cached objects. */
79	void		**avail;	/* Stack of available objects. */
80};
81
82struct tcache_s {
83	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
84	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
85	arena_t		*arena;		/* This thread's arena. */
86	unsigned	ev_cnt;		/* Event count since incremental GC. */
87	unsigned	next_gc_bin;	/* Next bin to GC. */
88	tcache_bin_t	tbins[1];	/* Dynamically sized. */
89	/*
90	 * The pointer stacks associated with tbins follow as a contiguous
91	 * array.  During tcache initialization, the avail pointer in each
92	 * element of tbins is initialized to point to the proper offset within
93	 * this array.
94	 */
95};
96
97#endif /* JEMALLOC_H_STRUCTS */
98/******************************************************************************/
99#ifdef JEMALLOC_H_EXTERNS
100
101extern bool	opt_tcache;
102extern ssize_t	opt_lg_tcache_max;
103
104extern tcache_bin_info_t	*tcache_bin_info;
105
106/*
107 * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
108 * large-object bins.
109 */
110extern size_t			nhbins;
111
112/* Maximum cached size class. */
113extern size_t			tcache_maxclass;
114
115size_t	tcache_salloc(const void *ptr);
116void	tcache_event_hard(tcache_t *tcache);
117void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
118    size_t binind);
119void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
120    tcache_t *tcache);
121void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
122    tcache_t *tcache);
123void	tcache_arena_associate(tcache_t *tcache, arena_t *arena);
124void	tcache_arena_dissociate(tcache_t *tcache);
125tcache_t *tcache_get_hard(tcache_t *tcache, bool create);
126tcache_t *tcache_create(arena_t *arena);
127void	tcache_destroy(tcache_t *tcache);
128void	tcache_thread_cleanup(void *arg);
129void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
130bool	tcache_boot0(void);
131bool	tcache_boot1(void);
132
133#endif /* JEMALLOC_H_EXTERNS */
134/******************************************************************************/
135#ifdef JEMALLOC_H_INLINES
136
137#ifndef JEMALLOC_ENABLE_INLINE
138malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
139malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
140
141void	tcache_event(tcache_t *tcache);
142void	tcache_flush(void);
143bool	tcache_enabled_get(void);
144tcache_t *tcache_get(bool create);
145void	tcache_enabled_set(bool enabled);
146void	*tcache_alloc_easy(tcache_bin_t *tbin);
147void	*tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
148void	*tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
149void	tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
150void	tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
151#endif
152
153#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
154/* Map of thread-specific caches. */
155malloc_tsd_externs(tcache, tcache_t *)
156malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
157    tcache_thread_cleanup)
158/* Per thread flag that allows thread caches to be disabled. */
159malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
160malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
161    tcache_enabled_default, malloc_tsd_no_cleanup)
162
163JEMALLOC_INLINE void
164tcache_flush(void)
165{
166	tcache_t *tcache;
167
168	cassert(config_tcache);
169
170	tcache = *tcache_tsd_get();
171	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
172		return;
173	tcache_destroy(tcache);
174	tcache = NULL;
175	tcache_tsd_set(&tcache);
176}
177
178JEMALLOC_INLINE bool
179tcache_enabled_get(void)
180{
181	tcache_enabled_t tcache_enabled;
182
183	cassert(config_tcache);
184
185	tcache_enabled = *tcache_enabled_tsd_get();
186	if (tcache_enabled == tcache_enabled_default) {
187		tcache_enabled = (tcache_enabled_t)opt_tcache;
188		tcache_enabled_tsd_set(&tcache_enabled);
189	}
190
191	return ((bool)tcache_enabled);
192}
193
194JEMALLOC_INLINE void
195tcache_enabled_set(bool enabled)
196{
197	tcache_enabled_t tcache_enabled;
198	tcache_t *tcache;
199
200	cassert(config_tcache);
201
202	tcache_enabled = (tcache_enabled_t)enabled;
203	tcache_enabled_tsd_set(&tcache_enabled);
204	tcache = *tcache_tsd_get();
205	if (enabled) {
206		if (tcache == TCACHE_STATE_DISABLED) {
207			tcache = NULL;
208			tcache_tsd_set(&tcache);
209		}
210	} else /* disabled */ {
211		if (tcache > TCACHE_STATE_MAX) {
212			tcache_destroy(tcache);
213			tcache = NULL;
214		}
215		if (tcache == NULL) {
216			tcache = TCACHE_STATE_DISABLED;
217			tcache_tsd_set(&tcache);
218		}
219	}
220}
221
222JEMALLOC_ALWAYS_INLINE tcache_t *
223tcache_get(bool create)
224{
225	tcache_t *tcache;
226
227	if (config_tcache == false)
228		return (NULL);
229	if (config_lazy_lock && isthreaded == false)
230		return (NULL);
231
232	tcache = *tcache_tsd_get();
233	if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
234		if (tcache == TCACHE_STATE_DISABLED)
235			return (NULL);
236		tcache = tcache_get_hard(tcache, create);
237	}
238
239	return (tcache);
240}
241
242JEMALLOC_ALWAYS_INLINE void
243tcache_event(tcache_t *tcache)
244{
245
246	if (TCACHE_GC_INCR == 0)
247		return;
248
249	tcache->ev_cnt++;
250	assert(tcache->ev_cnt <= TCACHE_GC_INCR);
251	if (tcache->ev_cnt == TCACHE_GC_INCR)
252		tcache_event_hard(tcache);
253}
254
255JEMALLOC_ALWAYS_INLINE void *
256tcache_alloc_easy(tcache_bin_t *tbin)
257{
258	void *ret;
259
260	if (tbin->ncached == 0) {
261		tbin->low_water = -1;
262		return (NULL);
263	}
264	tbin->ncached--;
265	if ((int)tbin->ncached < tbin->low_water)
266		tbin->low_water = tbin->ncached;
267	ret = tbin->avail[tbin->ncached];
268	return (ret);
269}
270
271JEMALLOC_ALWAYS_INLINE void *
272tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
273{
274	void *ret;
275	size_t binind;
276	tcache_bin_t *tbin;
277
278	binind = small_size2bin(size);
279	assert(binind < NBINS);
280	tbin = &tcache->tbins[binind];
281	size = small_bin2size(binind);
282	ret = tcache_alloc_easy(tbin);
283	if (ret == NULL) {
284		ret = tcache_alloc_small_hard(tcache, tbin, binind);
285		if (ret == NULL)
286			return (NULL);
287	}
288	assert(tcache_salloc(ret) == size);
289
290	if (zero == false) {
291		if (config_fill) {
292			if (opt_junk) {
293				arena_alloc_junk_small(ret,
294				    &arena_bin_info[binind], false);
295			} else if (opt_zero)
296				memset(ret, 0, size);
297		}
298	} else {
299		if (config_fill && opt_junk) {
300			arena_alloc_junk_small(ret, &arena_bin_info[binind],
301			    true);
302		}
303		memset(ret, 0, size);
304	}
305
306	if (config_stats)
307		tbin->tstats.nrequests++;
308	if (config_prof)
309		tcache->prof_accumbytes += size;
310	tcache_event(tcache);
311	return (ret);
312}
313
314JEMALLOC_ALWAYS_INLINE void *
315tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
316{
317	void *ret;
318	size_t binind;
319	tcache_bin_t *tbin;
320
321	size = PAGE_CEILING(size);
322	assert(size <= tcache_maxclass);
323	binind = NBINS + (size >> LG_PAGE) - 1;
324	assert(binind < nhbins);
325	tbin = &tcache->tbins[binind];
326	ret = tcache_alloc_easy(tbin);
327	if (ret == NULL) {
328		/*
329		 * Only allocate one large object at a time, because it's quite
330		 * expensive to create one and not use it.
331		 */
332		ret = arena_malloc_large(tcache->arena, size, zero);
333		if (ret == NULL)
334			return (NULL);
335	} else {
336		if (config_prof && size == PAGE) {
337			arena_chunk_t *chunk =
338			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
339			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
340			    LG_PAGE);
341			arena_mapbits_large_binind_set(chunk, pageind,
342			    BININD_INVALID);
343		}
344		if (zero == false) {
345			if (config_fill) {
346				if (opt_junk)
347					memset(ret, 0xa5, size);
348				else if (opt_zero)
349					memset(ret, 0, size);
350			}
351		} else
352			memset(ret, 0, size);
353
354		if (config_stats)
355			tbin->tstats.nrequests++;
356		if (config_prof)
357			tcache->prof_accumbytes += size;
358	}
359
360	tcache_event(tcache);
361	return (ret);
362}
363
364JEMALLOC_ALWAYS_INLINE void
365tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
366{
367	tcache_bin_t *tbin;
368	tcache_bin_info_t *tbin_info;
369
370	assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
371
372	if (config_fill && opt_junk)
373		arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
374
375	tbin = &tcache->tbins[binind];
376	tbin_info = &tcache_bin_info[binind];
377	if (tbin->ncached == tbin_info->ncached_max) {
378		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
379		    1), tcache);
380	}
381	assert(tbin->ncached < tbin_info->ncached_max);
382	tbin->avail[tbin->ncached] = ptr;
383	tbin->ncached++;
384
385	tcache_event(tcache);
386}
387
388JEMALLOC_ALWAYS_INLINE void
389tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
390{
391	size_t binind;
392	tcache_bin_t *tbin;
393	tcache_bin_info_t *tbin_info;
394
395	assert((size & PAGE_MASK) == 0);
396	assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
397	assert(tcache_salloc(ptr) <= tcache_maxclass);
398
399	binind = NBINS + (size >> LG_PAGE) - 1;
400
401	if (config_fill && opt_junk)
402		memset(ptr, 0x5a, size);
403
404	tbin = &tcache->tbins[binind];
405	tbin_info = &tcache_bin_info[binind];
406	if (tbin->ncached == tbin_info->ncached_max) {
407		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
408		    1), tcache);
409	}
410	assert(tbin->ncached < tbin_info->ncached_max);
411	tbin->avail[tbin->ncached] = ptr;
412	tbin->ncached++;
413
414	tcache_event(tcache);
415}
416#endif
417
418#endif /* JEMALLOC_H_INLINES */
419/******************************************************************************/
420