tcache.h revision 962463d9b57bcc65de2fa108a691b4183b9b2faf
1/******************************************************************************/
2#ifdef JEMALLOC_H_TYPES
3
4typedef struct tcache_bin_info_s tcache_bin_info_t;
5typedef struct tcache_bin_s tcache_bin_t;
6typedef struct tcache_s tcache_t;
7
8/*
9 * Absolute maximum number of cache slots for each small bin in the thread
10 * cache.  This is an additional constraint beyond that imposed as: twice the
11 * number of regions per run for this size class.
12 *
13 * This constant must be an even number.
14 */
15#define	TCACHE_NSLOTS_SMALL_MAX		200
16
17/* Number of cache slots for large size classes. */
18#define	TCACHE_NSLOTS_LARGE		20
19
20/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
21#define	LG_TCACHE_MAXCLASS_DEFAULT	15
22
23/*
24 * (1U << opt_lg_tcache_gc_sweep) is the approximate number of allocation
25 * events between full GC sweeps (-1: disabled).  Integer rounding may cause
26 * the actual number to be slightly higher, since GC is performed
27 * incrementally.
28 */
29#define	LG_TCACHE_GC_SWEEP_DEFAULT	13
30
31#endif /* JEMALLOC_H_TYPES */
32/******************************************************************************/
33#ifdef JEMALLOC_H_STRUCTS
34
35/*
36 * Read-only information associated with each element of tcache_t's tbins array
37 * is stored separately, mainly to reduce memory usage.
38 */
39struct tcache_bin_info_s {
40	unsigned	ncached_max;	/* Upper limit on ncached. */
41};
42
43struct tcache_bin_s {
44	tcache_bin_stats_t tstats;
45	int		low_water;	/* Min # cached since last GC. */
46	unsigned	lg_fill_div;	/* Fill (ncached_max >> lg_fill_div). */
47	unsigned	ncached;	/* # of cached objects. */
48	void		**avail;	/* Stack of available objects. */
49};
50
51struct tcache_s {
52	ql_elm(tcache_t) link;		/* Used for aggregating stats. */
53	uint64_t	prof_accumbytes;/* Cleared after arena_prof_accum() */
54	arena_t		*arena;		/* This thread's arena. */
55	unsigned	ev_cnt;		/* Event count since incremental GC. */
56	unsigned	next_gc_bin;	/* Next bin to GC. */
57	tcache_bin_t	tbins[1];	/* Dynamically sized. */
58	/*
59	 * The pointer stacks associated with tbins follow as a contiguous
60	 * array.  During tcache initialization, the avail pointer in each
61	 * element of tbins is initialized to point to the proper offset within
62	 * this array.
63	 */
64};
65
66#endif /* JEMALLOC_H_STRUCTS */
67/******************************************************************************/
68#ifdef JEMALLOC_H_EXTERNS
69
70extern bool	opt_tcache;
71extern ssize_t	opt_lg_tcache_max;
72extern ssize_t	opt_lg_tcache_gc_sweep;
73
74extern tcache_bin_info_t	*tcache_bin_info;
75
76/* Map of thread-specific caches. */
77#ifndef NO_TLS
78extern __thread tcache_t	*tcache_tls
79    JEMALLOC_ATTR(tls_model("initial-exec"));
80#  define TCACHE_GET()	tcache_tls
81#  define TCACHE_SET(v)	do {						\
82	tcache_tls = (tcache_t *)(v);					\
83	pthread_setspecific(tcache_tsd, (void *)(v));			\
84} while (0)
85#else
86#  define TCACHE_GET()	((tcache_t *)pthread_getspecific(tcache_tsd))
87#  define TCACHE_SET(v)	do {						\
88	pthread_setspecific(tcache_tsd, (void *)(v));			\
89} while (0)
90#endif
91extern pthread_key_t		tcache_tsd;
92
93/*
94 * Number of tcache bins.  There are nbins small-object bins, plus 0 or more
95 * large-object bins.
96 */
97extern size_t			nhbins;
98
99/* Maximum cached size class. */
100extern size_t			tcache_maxclass;
101
102/* Number of tcache allocation/deallocation events between incremental GCs. */
103extern unsigned			tcache_gc_incr;
104
105void	tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
106    tcache_t *tcache);
107void	tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
108    tcache_t *tcache);
109tcache_t *tcache_create(arena_t *arena);
110void	*tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
111    size_t binind);
112void	tcache_destroy(tcache_t *tcache);
113void	tcache_stats_merge(tcache_t *tcache, arena_t *arena);
114bool	tcache_boot(void);
115
116#endif /* JEMALLOC_H_EXTERNS */
117/******************************************************************************/
118#ifdef JEMALLOC_H_INLINES
119
120#ifndef JEMALLOC_ENABLE_INLINE
121void	tcache_event(tcache_t *tcache);
122tcache_t *tcache_get(void);
123void	*tcache_alloc_easy(tcache_bin_t *tbin);
124void	*tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
125void	*tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
126void	tcache_dalloc_small(tcache_t *tcache, void *ptr);
127void	tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
128#endif
129
130#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
131JEMALLOC_INLINE tcache_t *
132tcache_get(void)
133{
134	tcache_t *tcache;
135
136	if (config_tcache == false)
137		return (NULL);
138	if (config_lazy_lock && (isthreaded & opt_tcache) == false)
139		return (NULL);
140	else if (opt_tcache == false)
141		return (NULL);
142
143	tcache = TCACHE_GET();
144	if ((uintptr_t)tcache <= (uintptr_t)2) {
145		if (tcache == NULL) {
146			tcache = tcache_create(choose_arena());
147			if (tcache == NULL)
148				return (NULL);
149		} else {
150			if (tcache == (void *)(uintptr_t)1) {
151				/*
152				 * Make a note that an allocator function was
153				 * called after the tcache_thread_cleanup() was
154				 * called.
155				 */
156				TCACHE_SET((uintptr_t)2);
157			}
158			return (NULL);
159		}
160	}
161
162	return (tcache);
163}
164
165JEMALLOC_INLINE void
166tcache_event(tcache_t *tcache)
167{
168
169	if (tcache_gc_incr == 0)
170		return;
171
172	tcache->ev_cnt++;
173	assert(tcache->ev_cnt <= tcache_gc_incr);
174	if (tcache->ev_cnt == tcache_gc_incr) {
175		size_t binind = tcache->next_gc_bin;
176		tcache_bin_t *tbin = &tcache->tbins[binind];
177		tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
178
179		if (tbin->low_water > 0) {
180			/*
181			 * Flush (ceiling) 3/4 of the objects below the low
182			 * water mark.
183			 */
184			if (binind < nbins) {
185				tcache_bin_flush_small(tbin, binind,
186				    tbin->ncached - tbin->low_water +
187				    (tbin->low_water >> 2), tcache);
188			} else {
189				tcache_bin_flush_large(tbin, binind,
190				    tbin->ncached - tbin->low_water +
191				    (tbin->low_water >> 2), tcache);
192			}
193			/*
194			 * Reduce fill count by 2X.  Limit lg_fill_div such that
195			 * the fill count is always at least 1.
196			 */
197			if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1))
198			    >= 1)
199				tbin->lg_fill_div++;
200		} else if (tbin->low_water < 0) {
201			/*
202			 * Increase fill count by 2X.  Make sure lg_fill_div
203			 * stays greater than 0.
204			 */
205			if (tbin->lg_fill_div > 1)
206				tbin->lg_fill_div--;
207		}
208		tbin->low_water = tbin->ncached;
209
210		tcache->next_gc_bin++;
211		if (tcache->next_gc_bin == nhbins)
212			tcache->next_gc_bin = 0;
213		tcache->ev_cnt = 0;
214	}
215}
216
217JEMALLOC_INLINE void *
218tcache_alloc_easy(tcache_bin_t *tbin)
219{
220	void *ret;
221
222	if (tbin->ncached == 0) {
223		tbin->low_water = -1;
224		return (NULL);
225	}
226	tbin->ncached--;
227	if ((int)tbin->ncached < tbin->low_water)
228		tbin->low_water = tbin->ncached;
229	ret = tbin->avail[tbin->ncached];
230	return (ret);
231}
232
233JEMALLOC_INLINE void *
234tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
235{
236	void *ret;
237	size_t binind;
238	tcache_bin_t *tbin;
239
240	binind = SMALL_SIZE2BIN(size);
241	assert(binind < nbins);
242	tbin = &tcache->tbins[binind];
243	ret = tcache_alloc_easy(tbin);
244	if (ret == NULL) {
245		ret = tcache_alloc_small_hard(tcache, tbin, binind);
246		if (ret == NULL)
247			return (NULL);
248	}
249	assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
250
251	if (zero == false) {
252		if (config_fill) {
253			if (opt_junk)
254				memset(ret, 0xa5, size);
255			else if (opt_zero)
256				memset(ret, 0, size);
257		}
258	} else
259		memset(ret, 0, size);
260
261	if (config_stats)
262		tbin->tstats.nrequests++;
263	if (config_prof)
264		tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
265	tcache_event(tcache);
266	return (ret);
267}
268
269JEMALLOC_INLINE void *
270tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
271{
272	void *ret;
273	size_t binind;
274	tcache_bin_t *tbin;
275
276	size = PAGE_CEILING(size);
277	assert(size <= tcache_maxclass);
278	binind = nbins + (size >> PAGE_SHIFT) - 1;
279	assert(binind < nhbins);
280	tbin = &tcache->tbins[binind];
281	ret = tcache_alloc_easy(tbin);
282	if (ret == NULL) {
283		/*
284		 * Only allocate one large object at a time, because it's quite
285		 * expensive to create one and not use it.
286		 */
287		ret = arena_malloc_large(tcache->arena, size, zero);
288		if (ret == NULL)
289			return (NULL);
290	} else {
291		if (config_prof) {
292			arena_chunk_t *chunk =
293			    (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
294			size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
295			    PAGE_SHIFT);
296			chunk->map[pageind-map_bias].bits &=
297			    ~CHUNK_MAP_CLASS_MASK;
298		}
299		if (zero == false) {
300			if (config_fill) {
301				if (opt_junk)
302					memset(ret, 0xa5, size);
303				else if (opt_zero)
304					memset(ret, 0, size);
305			}
306		} else
307			memset(ret, 0, size);
308
309		if (config_stats)
310			tbin->tstats.nrequests++;
311		if (config_prof)
312			tcache->prof_accumbytes += size;
313	}
314
315	tcache_event(tcache);
316	return (ret);
317}
318
319JEMALLOC_INLINE void
320tcache_dalloc_small(tcache_t *tcache, void *ptr)
321{
322	arena_t *arena;
323	arena_chunk_t *chunk;
324	arena_run_t *run;
325	arena_bin_t *bin;
326	tcache_bin_t *tbin;
327	tcache_bin_info_t *tbin_info;
328	size_t pageind, binind;
329	arena_chunk_map_t *mapelm;
330
331	assert(arena_salloc(ptr) <= small_maxclass);
332
333	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
334	arena = chunk->arena;
335	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
336	mapelm = &chunk->map[pageind-map_bias];
337	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
338	    (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
339	bin = run->bin;
340	binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
341	    sizeof(arena_bin_t);
342	assert(binind < nbins);
343
344	if (config_fill && opt_junk)
345		memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
346
347	tbin = &tcache->tbins[binind];
348	tbin_info = &tcache_bin_info[binind];
349	if (tbin->ncached == tbin_info->ncached_max) {
350		tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
351		    1), tcache);
352	}
353	assert(tbin->ncached < tbin_info->ncached_max);
354	tbin->avail[tbin->ncached] = ptr;
355	tbin->ncached++;
356
357	tcache_event(tcache);
358}
359
360JEMALLOC_INLINE void
361tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
362{
363	arena_t *arena;
364	arena_chunk_t *chunk;
365	size_t pageind, binind;
366	tcache_bin_t *tbin;
367	tcache_bin_info_t *tbin_info;
368
369	assert((size & PAGE_MASK) == 0);
370	assert(arena_salloc(ptr) > small_maxclass);
371	assert(arena_salloc(ptr) <= tcache_maxclass);
372
373	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
374	arena = chunk->arena;
375	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
376	binind = nbins + (size >> PAGE_SHIFT) - 1;
377
378	if (config_fill && opt_junk)
379		memset(ptr, 0x5a, size);
380
381	tbin = &tcache->tbins[binind];
382	tbin_info = &tcache_bin_info[binind];
383	if (tbin->ncached == tbin_info->ncached_max) {
384		tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
385		    1), tcache);
386	}
387	assert(tbin->ncached < tbin_info->ncached_max);
388	tbin->avail[tbin->ncached] = ptr;
389	tbin->ncached++;
390
391	tcache_event(tcache);
392}
393#endif
394
395#endif /* JEMALLOC_H_INLINES */
396/******************************************************************************/
397