tcache.h revision e24c7af35d1e9d24d02166ac98cfca7cf807ff13
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4typedef struct tcache_bin_info_s tcache_bin_info_t;
5typedef struct tcache_bin_s tcache_bin_t;
6typedef struct tcache_s tcache_t;
7
8/*
9 * Absolute maximum number of cache slots for each small bin in the thread
10 * cache.  This is an additional constraint beyond that imposed as: twice the
11 * number of regions per run for this size class.
12 *
13 * This constant must be an even number.
14 */
15#define	TCACHE_NSLOTS_SMALL_MAX		200
16
17/* Number of cache slots for large size classes. */
18#define	TCACHE_NSLOTS_LARGE		20
19
20/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
21#define	LG_TCACHE_MAXCLASS_DEFAULT	15
22
23/*
24 * TCACHE_GC_SWEEP is the approximate number of allocation events between
25 * full GC sweeps.  Integer rounding may cause the actual number to be
26 * slightly higher, since GC is performed incrementally.
27 */
28#define	TCACHE_GC_SWEEP			8192
29
30/* Number of tcache allocation/deallocation events between incremental GCs. */
31#define	TCACHE_GC_INCR							\
32    ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
33
34#endif /* JEMALLOC_H_TYPES */
35/******************************************************************************/
36#ifdef JEMALLOC_H_STRUCTS
37
38/*
39 * Read-only information associated with each element of tcache_t's tbins array
40 * is stored separately, mainly to reduce memory usage.
41 */
42struct tcache_bin_info_s {
43	unsigned	ncached_max;	/* Upper limit on ncached. */
44};
45
46struct tcache_bin_s {
47	tcache_bin_stats_t tstats;
48	int		low_water;	/* Min # cached since last GC. */
49	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
50	unsigned	ncached;	/* # of cached objects. */
51	void		**avail;	/* Stack of available objects. */
52};
53
54struct tcache_s {
55	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
56	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
57	arena_t		*arena;		/* This thread's arena. */
58	unsigned	ev_cnt;		/* Event count since incremental GC. */
59	unsigned	next_gc_bin;	/* Next bin to GC. */
60	tcache_bin_t	tbins[1];	/* Dynamically sized. */
61	/*
62	 * The pointer stacks associated with tbins follow as a contiguous
63	 * array.  During tcache initialization, the avail pointer in each
64	 * element of tbins is initialized to point to the proper offset within
65	 * this array.
66	 */
67};
68
69#endif /* JEMALLOC_H_STRUCTS */
70/******************************************************************************/
71#ifdef JEMALLOC_H_EXTERNS
72
73extern bool	opt_tcache;
74extern ssize_t	opt_lg_tcache_max;
75
76extern tcache_bin_info_t	*tcache_bin_info;
77
78/* Map of thread-specific caches. */
79#ifdef JEMALLOC_TLS
80extern __thread tcache_t	*tcache_tls
81    JEMALLOC_ATTR(tls_model("initial-exec"));
82#  define TCACHE_GET()	tcache_tls
83#  define TCACHE_SET(v)	do {						\
84	tcache_tls = (tcache_t *)(v);					\
85	pthread_setspecific(tcache_tsd, (void *)(v));			\
86} while (0)
87#else
88#  define TCACHE_GET()	((tcache_t *)pthread_getspecific(tcache_tsd))
89#  define TCACHE_SET(v)	do {						\
90	pthread_setspecific(tcache_tsd, (void *)(v));			\
91} while (0)
92#endif
93extern pthread_key_t		tcache_tsd;
94
95/*
96 * Number of tcache bins.  There are NBINS small-object bins, plus 0 or more
97 * large-object bins.
98 */
99extern size_t			nhbins;
100
101/* Maximum cached size class. */
102extern size_t			tcache_maxclass;
103
104void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
105    tcache_t *tcache);
106void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
107    tcache_t *tcache);
108tcache_t *tcache_create(arena_t *arena);
109void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
110    size_t binind);
111void	tcache_destroy(tcache_t *tcache);
112void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
113bool	tcache_boot(void);
114
115#endif /* JEMALLOC_H_EXTERNS */
116/******************************************************************************/
117#ifdef JEMALLOC_H_INLINES
118
119#ifndef JEMALLOC_ENABLE_INLINE
120void	tcache_event(tcache_t *tcache);
121tcache_t *tcache_get(void);
122void	*tcache_alloc_easy(tcache_bin_t *tbin);
123void	*tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
124void	*tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
125void	tcache_dalloc_small(tcache_t *tcache, void *ptr);
126void	tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
127#endif
128
129#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
130JEMALLOC_INLINE tcache_t *
131tcache_get(void)
132{
133	tcache_t *tcache;
134
135	if (config_tcache == false)
136		return (NULL);
137	if (config_lazy_lock && (isthreaded & opt_tcache) == false)
138		return (NULL);
139	else if (opt_tcache == false)
140		return (NULL);
141
142	tcache = TCACHE_GET();
143	if ((uintptr_t)tcache <= (uintptr_t)2) {
144		if (tcache == NULL) {
145			tcache = tcache_create(choose_arena());
146			if (tcache == NULL)
147				return (NULL);
148		} else {
149			if (tcache == (void *)(uintptr_t)1) {
150				/*
151				 * Make a note that an allocator function was
152				 * called after the tcache_thread_cleanup() was
153				 * called.
154				 */
155				TCACHE_SET((uintptr_t)2);
156			}
157			return (NULL);
158		}
159	}
160
161	return (tcache);
162}
163
164JEMALLOC_INLINE void
165tcache_event(tcache_t *tcache)
166{
167
168	if (TCACHE_GC_INCR == 0)
169		return;
170
171	tcache->ev_cnt++;
172	assert(tcache->ev_cnt <= TCACHE_GC_INCR);
173	if (tcache->ev_cnt == TCACHE_GC_INCR) {
174		size_t binind = tcache->next_gc_bin;
175		tcache_bin_t *tbin = &tcache->tbins[binind];
176		tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
177
178		if (tbin->low_water > 0) {
179			/*
180			 * Flush (ceiling) 3/4 of the objects below the low
181			 * water mark.
182			 */
183			if (binind < NBINS) {
184				tcache_bin_flush_small(tbin, binind,
185				    tbin->ncached - tbin->low_water +
186				    (tbin->low_water >> 2), tcache);
187			} else {
188				tcache_bin_flush_large(tbin, binind,
189				    tbin->ncached - tbin->low_water +
190				    (tbin->low_water >> 2), tcache);
191			}
192			/*
193			 * Reduce fill count by 2X.  Limit lg_fill_div such that
194			 * the fill count is always at least 1.
195			 */
196			if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
197			    >= 1)
198				tbin->lg_fill_div++;
199		} else if (tbin->low_water < 0) {
200			/*
201			 * Increase fill count by 2X.  Make sure lg_fill_div
202			 * stays greater than 0.
203			 */
204			if (tbin->lg_fill_div > 1)
205				tbin->lg_fill_div--;
206		}
207		tbin->low_water = tbin->ncached;
208
209		tcache->next_gc_bin++;
210		if (tcache->next_gc_bin == nhbins)
211			tcache->next_gc_bin = 0;
212		tcache->ev_cnt = 0;
213	}
214}
215
216JEMALLOC_INLINE void *
217tcache_alloc_easy(tcache_bin_t *tbin)
218{
219	void *ret;
220
221	if (tbin->ncached == 0) {
222		tbin->low_water = -1;
223		return (NULL);
224	}
225	tbin->ncached--;
226	if ((int)tbin->ncached < tbin->low_water)
227		tbin->low_water = tbin->ncached;
228	ret = tbin->avail[tbin->ncached];
229	return (ret);
230}
231
232JEMALLOC_INLINE void *
233tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
234{
235	void *ret;
236	size_t binind;
237	tcache_bin_t *tbin;
238
239	binind = SMALL_SIZE2BIN(size);
240	assert(binind < NBINS);
241	tbin = &tcache->tbins[binind];
242	ret = tcache_alloc_easy(tbin);
243	if (ret == NULL) {
244		ret = tcache_alloc_small_hard(tcache, tbin, binind);
245		if (ret == NULL)
246			return (NULL);
247	}
248	assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
249
250	if (zero == false) {
251		if (config_fill) {
252			if (opt_junk)
253				memset(ret, 0xa5, size);
254			else if (opt_zero)
255				memset(ret, 0, size);
256		}
257	} else
258		memset(ret, 0, size);
259
260	if (config_stats)
261		tbin->tstats.nrequests++;
262	if (config_prof)
263		tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
264	tcache_event(tcache);
265	return (ret);
266}
267
268JEMALLOC_INLINE void *
269tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
270{
271	void *ret;
272	size_t binind;
273	tcache_bin_t *tbin;
274
275	size = PAGE_CEILING(size);
276	assert(size <= tcache_maxclass);
277	binind = NBINS + (size >> PAGE_SHIFT) - 1;
278	assert(binind < nhbins);
279	tbin = &tcache->tbins[binind];
280	ret = tcache_alloc_easy(tbin);
281	if (ret == NULL) {
282		/*
283		 * Only allocate one large object at a time, because it's quite
284		 * expensive to create one and not use it.
285		 */
286		ret = arena_malloc_large(tcache->arena, size, zero);
287		if (ret == NULL)
288			return (NULL);
289	} else {
290		if (config_prof) {
291			arena_chunk_t *chunk =
292			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
293			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
294			    PAGE_SHIFT);
295			chunk->map[pageind-map_bias].bits &=
296			    ~CHUNK_MAP_CLASS_MASK;
297		}
298		if (zero == false) {
299			if (config_fill) {
300				if (opt_junk)
301					memset(ret, 0xa5, size);
302				else if (opt_zero)
303					memset(ret, 0, size);
304			}
305		} else
306			memset(ret, 0, size);
307
308		if (config_stats)
309			tbin->tstats.nrequests++;
310		if (config_prof)
311			tcache->prof_accumbytes += size;
312	}
313
314	tcache_event(tcache);
315	return (ret);
316}
317
318JEMALLOC_INLINE void
319tcache_dalloc_small(tcache_t *tcache, void *ptr)
320{
321	arena_t *arena;
322	arena_chunk_t *chunk;
323	arena_run_t *run;
324	arena_bin_t *bin;
325	tcache_bin_t *tbin;
326	tcache_bin_info_t *tbin_info;
327	size_t pageind, binind;
328	arena_chunk_map_t *mapelm;
329
330	assert(arena_salloc(ptr) <= SMALL_MAXCLASS);
331
332	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
333	arena = chunk->arena;
334	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
335	mapelm = &chunk->map[pageind-map_bias];
336	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
337	    (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
338	bin = run->bin;
339	binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
340	    sizeof(arena_bin_t);
341	assert(binind < NBINS);
342
343	if (config_fill && opt_junk)
344		memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
345
346	tbin = &tcache->tbins[binind];
347	tbin_info = &tcache_bin_info[binind];
348	if (tbin->ncached == tbin_info->ncached_max) {
349		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
350		    1), tcache);
351	}
352	assert(tbin->ncached < tbin_info->ncached_max);
353	tbin->avail[tbin->ncached] = ptr;
354	tbin->ncached++;
355
356	tcache_event(tcache);
357}
358
359JEMALLOC_INLINE void
360tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
361{
362	size_t binind;
363	tcache_bin_t *tbin;
364	tcache_bin_info_t *tbin_info;
365
366	assert((size & PAGE_MASK) == 0);
367	assert(arena_salloc(ptr) > SMALL_MAXCLASS);
368	assert(arena_salloc(ptr) <= tcache_maxclass);
369
370	binind = NBINS + (size >> PAGE_SHIFT) - 1;
371
372	if (config_fill && opt_junk)
373		memset(ptr, 0x5a, size);
374
375	tbin = &tcache->tbins[binind];
376	tbin_info = &tcache_bin_info[binind];
377	if (tbin->ncached == tbin_info->ncached_max) {
378		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
379		    1), tcache);
380	}
381	assert(tbin->ncached < tbin_info->ncached_max);
382	tbin->avail[tbin->ncached] = ptr;
383	tbin->ncached++;
384
385	tcache_event(tcache);
386}
387#endif
388
389#endif /* JEMALLOC_H_INLINES */
390/******************************************************************************/
391