tcache.h revision 7372b15a31c63ac5cb9ed8aeabc2a0a3c005e8bf
1#ifdef JEMALLOC_TCACHE
2/******************************************************************************/
3#ifdef JEMALLOC_H_TYPES
4
5typedef struct tcache_bin_info_s tcache_bin_info_t;
6typedef struct tcache_bin_s tcache_bin_t;
7typedef struct tcache_s tcache_t;
8
9/*
10 * Absolute maximum number of cache slots for each small bin in the thread
11 * cache.  This is an additional constraint beyond that imposed as: twice the
12 * number of regions per run for this size class.
13 *
14 * This constant must be an even number.
15 */
16#define	TCACHE_NSLOTS_SMALL_MAX		200
17
18/* Number of cache slots for large size classes. */
19#define	TCACHE_NSLOTS_LARGE		20
20
21/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
22#define	LG_TCACHE_MAXCLASS_DEFAULT	15
23
24/*
25 * (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
26 * events between full GC sweeps (-1: disabled).  Integer rounding may cause
27 * the actual number to be slightly higher, since GC is performed
28 * incrementally.
29 */
30#define	LG_TCACHE_GC_SWEEP_DEFAULT	13
31
32#endif /* JEMALLOC_H_TYPES */
33/******************************************************************************/
34#ifdef JEMALLOC_H_STRUCTS
35
36/*
37 * Read-only information associated with each element of tcache_t's tbins array
38 * is stored separately, mainly to reduce memory usage.
39 */
40struct tcache_bin_info_s {
41	unsigned	ncached_max;	/* Upper limit on ncached. */
42};
43
44struct tcache_bin_s {
45	tcache_bin_stats_t tstats;
46	int		low_water;	/* Min # cached since last GC. */
47	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
48	unsigned	ncached;	/* # of cached objects. */
49	void		**avail;	/* Stack of available objects. */
50};
51
52struct tcache_s {
53	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
54	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
55	arena_t		*arena;		/* This thread's arena. */
56	unsigned	ev_cnt;		/* Event count since incremental GC. */
57	unsigned	next_gc_bin;	/* Next bin to GC. */
58	tcache_bin_t	tbins[1];	/* Dynamically sized. */
59	/*
60	 * The pointer stacks associated with tbins follow as a contiguous
61	 * array.  During tcache initialization, the avail pointer in each
62	 * element of tbins is initialized to point to the proper offset within
63	 * this array.
64	 */
65};
66
67#endif /* JEMALLOC_H_STRUCTS */
68/******************************************************************************/
69#ifdef JEMALLOC_H_EXTERNS
70
71extern bool	opt_tcache;
72extern ssize_t	opt_lg_tcache_max;
73extern ssize_t	opt_lg_tcache_gc_sweep;
74
75extern tcache_bin_info_t	*tcache_bin_info;
76
77/* Map of thread-specific caches. */
78#ifndef NO_TLS
79extern __thread tcache_t	*tcache_tls
80    JEMALLOC_ATTR(tls_model("initial-exec"));
81#  define TCACHE_GET()	tcache_tls
82#  define TCACHE_SET(v)	do {						\
83	tcache_tls = (tcache_t *)(v);					\
84	pthread_setspecific(tcache_tsd, (void *)(v));			\
85} while (0)
86#else
87#  define TCACHE_GET()	((tcache_t *)pthread_getspecific(tcache_tsd))
88#  define TCACHE_SET(v)	do {						\
89	pthread_setspecific(tcache_tsd, (void *)(v));			\
90} while (0)
91#endif
92extern pthread_key_t		tcache_tsd;
93
94/*
95 * Number of tcache bins.  There are nbins small-object bins, plus 0 or more
96 * large-object bins.
97 */
98extern size_t			nhbins;
99
100/* Maximum cached size class. */
101extern size_t			tcache_maxclass;
102
103/* Number of tcache allocation/deallocation events between incremental GCs. */
104extern unsigned			tcache_gc_incr;
105
106void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
107    tcache_t *tcache);
108void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
109    tcache_t *tcache);
110tcache_t *tcache_create(arena_t *arena);
111void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
112    size_t binind);
113void	tcache_destroy(tcache_t *tcache);
114void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
115bool	tcache_boot(void);
116
117#endif /* JEMALLOC_H_EXTERNS */
118/******************************************************************************/
119#ifdef JEMALLOC_H_INLINES
120
121#ifndef JEMALLOC_ENABLE_INLINE
122void	tcache_event(tcache_t *tcache);
123tcache_t *tcache_get(void);
124void	*tcache_alloc_easy(tcache_bin_t *tbin);
125void	*tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
126void	*tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
127void	tcache_dalloc_small(tcache_t *tcache, void *ptr);
128void	tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
129#endif
130
131#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
132JEMALLOC_INLINE tcache_t *
133tcache_get(void)
134{
135	tcache_t *tcache;
136
137	if ((isthreaded & opt_tcache) == false)
138		return (NULL);
139
140	tcache = TCACHE_GET();
141	if ((uintptr_t)tcache <= (uintptr_t)2) {
142		if (tcache == NULL) {
143			tcache = tcache_create(choose_arena());
144			if (tcache == NULL)
145				return (NULL);
146		} else {
147			if (tcache == (void *)(uintptr_t)1) {
148				/*
149				 * Make a note that an allocator function was
150				 * called after the tcache_thread_cleanup() was
151				 * called.
152				 */
153				TCACHE_SET((uintptr_t)2);
154			}
155			return (NULL);
156		}
157	}
158
159	return (tcache);
160}
161
162JEMALLOC_INLINE void
163tcache_event(tcache_t *tcache)
164{
165
166	if (tcache_gc_incr == 0)
167		return;
168
169	tcache->ev_cnt++;
170	assert(tcache->ev_cnt <= tcache_gc_incr);
171	if (tcache->ev_cnt == tcache_gc_incr) {
172		size_t binind = tcache->next_gc_bin;
173		tcache_bin_t *tbin = &tcache->tbins[binind];
174		tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
175
176		if (tbin->low_water > 0) {
177			/*
178			 * Flush (ceiling) 3/4 of the objects below the low
179			 * water mark.
180			 */
181			if (binind < nbins) {
182				tcache_bin_flush_small(tbin, binind,
183				    tbin->ncached - tbin->low_water +
184				    (tbin->low_water >> 2), tcache);
185			} else {
186				tcache_bin_flush_large(tbin, binind,
187				    tbin->ncached - tbin->low_water +
188				    (tbin->low_water >> 2), tcache);
189			}
190			/*
191			 * Reduce fill count by 2X.  Limit lg_fill_div such that
192			 * the fill count is always at least 1.
193			 */
194			if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
195			    >= 1)
196				tbin->lg_fill_div++;
197		} else if (tbin->low_water < 0) {
198			/*
199			 * Increase fill count by 2X.  Make sure lg_fill_div
200			 * stays greater than 0.
201			 */
202			if (tbin->lg_fill_div > 1)
203				tbin->lg_fill_div--;
204		}
205		tbin->low_water = tbin->ncached;
206
207		tcache->next_gc_bin++;
208		if (tcache->next_gc_bin == nhbins)
209			tcache->next_gc_bin = 0;
210		tcache->ev_cnt = 0;
211	}
212}
213
214JEMALLOC_INLINE void *
215tcache_alloc_easy(tcache_bin_t *tbin)
216{
217	void *ret;
218
219	if (tbin->ncached == 0) {
220		tbin->low_water = -1;
221		return (NULL);
222	}
223	tbin->ncached--;
224	if ((int)tbin->ncached < tbin->low_water)
225		tbin->low_water = tbin->ncached;
226	ret = tbin->avail[tbin->ncached];
227	return (ret);
228}
229
230JEMALLOC_INLINE void *
231tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
232{
233	void *ret;
234	size_t binind;
235	tcache_bin_t *tbin;
236
237	binind = SMALL_SIZE2BIN(size);
238	assert(binind < nbins);
239	tbin = &tcache->tbins[binind];
240	ret = tcache_alloc_easy(tbin);
241	if (ret == NULL) {
242		ret = tcache_alloc_small_hard(tcache, tbin, binind);
243		if (ret == NULL)
244			return (NULL);
245	}
246	assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
247
248	if (zero == false) {
249		if (config_fill) {
250			if (opt_junk)
251				memset(ret, 0xa5, size);
252			else if (opt_zero)
253				memset(ret, 0, size);
254		}
255	} else
256		memset(ret, 0, size);
257
258	if (config_stats)
259		tbin->tstats.nrequests++;
260	if (config_prof)
261		tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
262	tcache_event(tcache);
263	return (ret);
264}
265
266JEMALLOC_INLINE void *
267tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
268{
269	void *ret;
270	size_t binind;
271	tcache_bin_t *tbin;
272
273	size = PAGE_CEILING(size);
274	assert(size <= tcache_maxclass);
275	binind = nbins + (size >> PAGE_SHIFT) - 1;
276	assert(binind < nhbins);
277	tbin = &tcache->tbins[binind];
278	ret = tcache_alloc_easy(tbin);
279	if (ret == NULL) {
280		/*
281		 * Only allocate one large object at a time, because it's quite
282		 * expensive to create one and not use it.
283		 */
284		ret = arena_malloc_large(tcache->arena, size, zero);
285		if (ret == NULL)
286			return (NULL);
287	} else {
288		if (config_prof) {
289			arena_chunk_t *chunk =
290			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
291			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
292			    PAGE_SHIFT);
293			chunk->map[pageind-map_bias].bits &=
294			    ~CHUNK_MAP_CLASS_MASK;
295		}
296		if (zero == false) {
297			if (config_fill) {
298				if (opt_junk)
299					memset(ret, 0xa5, size);
300				else if (opt_zero)
301					memset(ret, 0, size);
302			}
303		} else
304			memset(ret, 0, size);
305
306		if (config_stats)
307			tbin->tstats.nrequests++;
308		if (config_prof)
309			tcache->prof_accumbytes += size;
310	}
311
312	tcache_event(tcache);
313	return (ret);
314}
315
316JEMALLOC_INLINE void
317tcache_dalloc_small(tcache_t *tcache, void *ptr)
318{
319	arena_t *arena;
320	arena_chunk_t *chunk;
321	arena_run_t *run;
322	arena_bin_t *bin;
323	tcache_bin_t *tbin;
324	tcache_bin_info_t *tbin_info;
325	size_t pageind, binind;
326	arena_chunk_map_t *mapelm;
327
328	assert(arena_salloc(ptr) <= small_maxclass);
329
330	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
331	arena = chunk->arena;
332	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
333	mapelm = &chunk->map[pageind-map_bias];
334	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
335	    (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
336	assert(run->magic == ARENA_RUN_MAGIC);
337	bin = run->bin;
338	binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
339	    sizeof(arena_bin_t);
340	assert(binind < nbins);
341
342	if (config_fill && opt_junk)
343		memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
344
345	tbin = &tcache->tbins[binind];
346	tbin_info = &tcache_bin_info[binind];
347	if (tbin->ncached == tbin_info->ncached_max) {
348		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
349		    1), tcache);
350	}
351	assert(tbin->ncached < tbin_info->ncached_max);
352	tbin->avail[tbin->ncached] = ptr;
353	tbin->ncached++;
354
355	tcache_event(tcache);
356}
357
358JEMALLOC_INLINE void
359tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
360{
361	arena_t *arena;
362	arena_chunk_t *chunk;
363	size_t pageind, binind;
364	tcache_bin_t *tbin;
365	tcache_bin_info_t *tbin_info;
366
367	assert((size & PAGE_MASK) == 0);
368	assert(arena_salloc(ptr) > small_maxclass);
369	assert(arena_salloc(ptr) <= tcache_maxclass);
370
371	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
372	arena = chunk->arena;
373	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
374	binind = nbins + (size >> PAGE_SHIFT) - 1;
375
376	if (config_fill && opt_junk)
377		memset(ptr, 0x5a, size);
378
379	tbin = &tcache->tbins[binind];
380	tbin_info = &tcache_bin_info[binind];
381	if (tbin->ncached == tbin_info->ncached_max) {
382		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
383		    1), tcache);
384	}
385	assert(tbin->ncached < tbin_info->ncached_max);
386	tbin->avail[tbin->ncached] = ptr;
387	tbin->ncached++;
388
389	tcache_event(tcache);
390}
391#endif
392
393#endif /* JEMALLOC_H_INLINES */
394/******************************************************************************/
395#endif /* JEMALLOC_TCACHE */
396