ctl.c revision e7b8fa18d256e0bc18b61ee03b69af87fa3d7969
124943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner#define	JEMALLOC_CTL_C_
224943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner#include "jemalloc/internal/jemalloc_internal.h"
324943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner
424943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner/******************************************************************************/
524943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner/* Data. */
624943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner
724943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner/*
824943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner * ctl_mtx protects the following:
924943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner * - ctl_stats.*
1024943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner * - opt_prof_active
1124943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner */
1224943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattnerstatic malloc_mutex_t	ctl_mtx;
135f81547fd786584b10999c087528b323b5945896Eli Friedmanstatic bool		ctl_initialized;
1424943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattnerstatic uint64_t		ctl_epoch;
1524943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattnerstatic ctl_stats_t	ctl_stats;
1624943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner
1724943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner/******************************************************************************/
1824943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner/* Function prototypes for non-inline static functions. */
1924943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner
2024943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner#define	CTL_PROTO(n)							\
2124943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattnerstatic int	n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
2224943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner    size_t *oldlenp, void *newp, size_t newlen);
2324943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner
2424943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattner#define	INDEX_PROTO(n)							\
2524943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattnerconst ctl_node_t	*n##_index(const size_t *mib, size_t miblen,	\
2669aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Clayton    size_t i);
2769aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Clayton
2869aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Claytonstatic bool	ctl_arena_init(ctl_arena_stats_t *astats);
2924943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattnerstatic void	ctl_arena_clear(ctl_arena_stats_t *astats);
3024943d2ee8bfaa7cf5893e4709143924157a5c1eChris Lattnerstatic void	ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
3169aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Clayton    arena_t *arena);
3269aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Claytonstatic void	ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
3369aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Clayton    ctl_arena_stats_t *astats);
3469aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Claytonstatic void	ctl_arena_refresh(arena_t *arena, unsigned i);
3569aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Claytonstatic void	ctl_refresh(void);
3669aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Claytonstatic bool	ctl_init(void);
3769aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Claytonstatic int	ctl_lookup(const char *name, ctl_node_t const **nodesp,
3869aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Clayton    size_t *mibp, size_t *depthp);
3969aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg Clayton
4069aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(version)
4169aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(epoch)
4269aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(thread_tcache_flush)
4369aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(thread_arena)
4469aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(thread_allocated)
4569aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(thread_allocatedp)
4669aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(thread_deallocated)
4769aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(thread_deallocatedp)
4869aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(config_debug)
4969aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(config_dss)
5069aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(config_fill)
5124943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(config_lazy_lock)
5224943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(config_prof)
5324943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(config_prof_libgcc)
5424943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(config_prof_libunwind)
5524943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(config_stats)
5669aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(config_tcache)
5769aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(config_tls)
5824943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(config_xmalloc)
5924943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_abort)
6069aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(opt_lg_chunk)
6169aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(opt_narenas)
6269aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(opt_lg_dirty_mult)
6369aa5d9a7620a183cdc4da12cc87ea82e2ffcbf9Greg ClaytonCTL_PROTO(opt_stats_print)
6424943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_junk)
6563094e0bb161580564954dee512955c1c79d3476Greg ClaytonCTL_PROTO(opt_zero)
6624943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_xmalloc)
6724943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_tcache)
6824943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_prof)
6924943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_prof_prefix)
7024943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_prof_active)
7124943d2ee8bfaa7cf5893e4709143924157a5c1eChris LattnerCTL_PROTO(opt_lg_prof_sample)
72CTL_PROTO(opt_lg_prof_interval)
73CTL_PROTO(opt_prof_gdump)
74CTL_PROTO(opt_prof_leak)
75CTL_PROTO(opt_prof_accum)
76CTL_PROTO(arenas_bin_i_size)
77CTL_PROTO(arenas_bin_i_nregs)
78CTL_PROTO(arenas_bin_i_run_size)
79INDEX_PROTO(arenas_bin_i)
80CTL_PROTO(arenas_lrun_i_size)
81INDEX_PROTO(arenas_lrun_i)
82CTL_PROTO(arenas_narenas)
83CTL_PROTO(arenas_initialized)
84CTL_PROTO(arenas_quantum)
85CTL_PROTO(arenas_pagesize)
86CTL_PROTO(arenas_chunksize)
87CTL_PROTO(arenas_tcache_max)
88CTL_PROTO(arenas_nbins)
89CTL_PROTO(arenas_nhbins)
90CTL_PROTO(arenas_nlruns)
91CTL_PROTO(arenas_purge)
92CTL_PROTO(prof_active)
93CTL_PROTO(prof_dump)
94CTL_PROTO(prof_interval)
95CTL_PROTO(stats_chunks_current)
96CTL_PROTO(stats_chunks_total)
97CTL_PROTO(stats_chunks_high)
98CTL_PROTO(stats_huge_allocated)
99CTL_PROTO(stats_huge_nmalloc)
100CTL_PROTO(stats_huge_ndalloc)
101CTL_PROTO(stats_arenas_i_small_allocated)
102CTL_PROTO(stats_arenas_i_small_nmalloc)
103CTL_PROTO(stats_arenas_i_small_ndalloc)
104CTL_PROTO(stats_arenas_i_small_nrequests)
105CTL_PROTO(stats_arenas_i_large_allocated)
106CTL_PROTO(stats_arenas_i_large_nmalloc)
107CTL_PROTO(stats_arenas_i_large_ndalloc)
108CTL_PROTO(stats_arenas_i_large_nrequests)
109CTL_PROTO(stats_arenas_i_bins_j_allocated)
110CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
111CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
112CTL_PROTO(stats_arenas_i_bins_j_nrequests)
113CTL_PROTO(stats_arenas_i_bins_j_nfills)
114CTL_PROTO(stats_arenas_i_bins_j_nflushes)
115CTL_PROTO(stats_arenas_i_bins_j_nruns)
116CTL_PROTO(stats_arenas_i_bins_j_nreruns)
117CTL_PROTO(stats_arenas_i_bins_j_curruns)
118INDEX_PROTO(stats_arenas_i_bins_j)
119CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
120CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
121CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
122CTL_PROTO(stats_arenas_i_lruns_j_curruns)
123INDEX_PROTO(stats_arenas_i_lruns_j)
124CTL_PROTO(stats_arenas_i_nthreads)
125CTL_PROTO(stats_arenas_i_pactive)
126CTL_PROTO(stats_arenas_i_pdirty)
127CTL_PROTO(stats_arenas_i_mapped)
128CTL_PROTO(stats_arenas_i_npurge)
129CTL_PROTO(stats_arenas_i_nmadvise)
130CTL_PROTO(stats_arenas_i_purged)
131INDEX_PROTO(stats_arenas_i)
132CTL_PROTO(stats_cactive)
133CTL_PROTO(stats_allocated)
134CTL_PROTO(stats_active)
135CTL_PROTO(stats_mapped)
136
137/******************************************************************************/
138/* mallctl tree. */
139
140/* Maximum tree depth. */
141#define	CTL_MAX_DEPTH	6
142
143#define	NAME(n)	true,	{.named = {n
144#define	CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t),	c##_node}},	NULL
145#define	CTL(c)	0,				NULL}},		c##_ctl
146
147/*
148 * Only handles internal indexed nodes, since there are currently no external
149 * ones.
150 */
151#define	INDEX(i)	false,	{.indexed = {i##_index}},		NULL
152
153static const ctl_node_t	tcache_node[] = {
154	{NAME("flush"),		CTL(thread_tcache_flush)}
155};
156
157static const ctl_node_t	thread_node[] = {
158	{NAME("arena"),		CTL(thread_arena)},
159	{NAME("allocated"),	CTL(thread_allocated)},
160	{NAME("allocatedp"),	CTL(thread_allocatedp)},
161	{NAME("deallocated"),	CTL(thread_deallocated)},
162	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
163	{NAME("tcache"),	CHILD(tcache)}
164};
165
166static const ctl_node_t	config_node[] = {
167	{NAME("debug"),			CTL(config_debug)},
168	{NAME("dss"),			CTL(config_dss)},
169	{NAME("fill"),			CTL(config_fill)},
170	{NAME("lazy_lock"),		CTL(config_lazy_lock)},
171	{NAME("prof"),			CTL(config_prof)},
172	{NAME("prof_libgcc"),		CTL(config_prof_libgcc)},
173	{NAME("prof_libunwind"),	CTL(config_prof_libunwind)},
174	{NAME("stats"),			CTL(config_stats)},
175	{NAME("tcache"),		CTL(config_tcache)},
176	{NAME("tls"),			CTL(config_tls)},
177	{NAME("xmalloc"),		CTL(config_xmalloc)}
178};
179
180static const ctl_node_t opt_node[] = {
181	{NAME("abort"),			CTL(opt_abort)},
182	{NAME("lg_chunk"),		CTL(opt_lg_chunk)},
183	{NAME("narenas"),		CTL(opt_narenas)},
184	{NAME("lg_dirty_mult"),		CTL(opt_lg_dirty_mult)},
185	{NAME("stats_print"),		CTL(opt_stats_print)},
186	{NAME("junk"),			CTL(opt_junk)},
187	{NAME("zero"),			CTL(opt_zero)},
188	{NAME("xmalloc"),		CTL(opt_xmalloc)},
189	{NAME("tcache"),		CTL(opt_tcache)},
190	{NAME("prof"),			CTL(opt_prof)},
191	{NAME("prof_prefix"),		CTL(opt_prof_prefix)},
192	{NAME("prof_active"),		CTL(opt_prof_active)},
193	{NAME("lg_prof_sample"),	CTL(opt_lg_prof_sample)},
194	{NAME("lg_prof_interval"),	CTL(opt_lg_prof_interval)},
195	{NAME("prof_gdump"),		CTL(opt_prof_gdump)},
196	{NAME("prof_leak"),		CTL(opt_prof_leak)},
197	{NAME("prof_accum"),		CTL(opt_prof_accum)}
198};
199
200static const ctl_node_t arenas_bin_i_node[] = {
201	{NAME("size"),			CTL(arenas_bin_i_size)},
202	{NAME("nregs"),			CTL(arenas_bin_i_nregs)},
203	{NAME("run_size"),		CTL(arenas_bin_i_run_size)}
204};
205static const ctl_node_t super_arenas_bin_i_node[] = {
206	{NAME(""),			CHILD(arenas_bin_i)}
207};
208
209static const ctl_node_t arenas_bin_node[] = {
210	{INDEX(arenas_bin_i)}
211};
212
213static const ctl_node_t arenas_lrun_i_node[] = {
214	{NAME("size"),			CTL(arenas_lrun_i_size)}
215};
216static const ctl_node_t super_arenas_lrun_i_node[] = {
217	{NAME(""),			CHILD(arenas_lrun_i)}
218};
219
220static const ctl_node_t arenas_lrun_node[] = {
221	{INDEX(arenas_lrun_i)}
222};
223
224static const ctl_node_t arenas_node[] = {
225	{NAME("narenas"),		CTL(arenas_narenas)},
226	{NAME("initialized"),		CTL(arenas_initialized)},
227	{NAME("quantum"),		CTL(arenas_quantum)},
228	{NAME("pagesize"),		CTL(arenas_pagesize)},
229	{NAME("chunksize"),		CTL(arenas_chunksize)},
230	{NAME("tcache_max"),		CTL(arenas_tcache_max)},
231	{NAME("nbins"),			CTL(arenas_nbins)},
232	{NAME("nhbins"),		CTL(arenas_nhbins)},
233	{NAME("bin"),			CHILD(arenas_bin)},
234	{NAME("nlruns"),		CTL(arenas_nlruns)},
235	{NAME("lrun"),			CHILD(arenas_lrun)},
236	{NAME("purge"),			CTL(arenas_purge)}
237};
238
239static const ctl_node_t	prof_node[] = {
240	{NAME("active"),	CTL(prof_active)},
241	{NAME("dump"),		CTL(prof_dump)},
242	{NAME("interval"),	CTL(prof_interval)}
243};
244
245static const ctl_node_t stats_chunks_node[] = {
246	{NAME("current"),		CTL(stats_chunks_current)},
247	{NAME("total"),			CTL(stats_chunks_total)},
248	{NAME("high"),			CTL(stats_chunks_high)}
249};
250
251static const ctl_node_t stats_huge_node[] = {
252	{NAME("allocated"),		CTL(stats_huge_allocated)},
253	{NAME("nmalloc"),		CTL(stats_huge_nmalloc)},
254	{NAME("ndalloc"),		CTL(stats_huge_ndalloc)}
255};
256
257static const ctl_node_t stats_arenas_i_small_node[] = {
258	{NAME("allocated"),		CTL(stats_arenas_i_small_allocated)},
259	{NAME("nmalloc"),		CTL(stats_arenas_i_small_nmalloc)},
260	{NAME("ndalloc"),		CTL(stats_arenas_i_small_ndalloc)},
261	{NAME("nrequests"),		CTL(stats_arenas_i_small_nrequests)}
262};
263
264static const ctl_node_t stats_arenas_i_large_node[] = {
265	{NAME("allocated"),		CTL(stats_arenas_i_large_allocated)},
266	{NAME("nmalloc"),		CTL(stats_arenas_i_large_nmalloc)},
267	{NAME("ndalloc"),		CTL(stats_arenas_i_large_ndalloc)},
268	{NAME("nrequests"),		CTL(stats_arenas_i_large_nrequests)}
269};
270
271static const ctl_node_t stats_arenas_i_bins_j_node[] = {
272	{NAME("allocated"),		CTL(stats_arenas_i_bins_j_allocated)},
273	{NAME("nmalloc"),		CTL(stats_arenas_i_bins_j_nmalloc)},
274	{NAME("ndalloc"),		CTL(stats_arenas_i_bins_j_ndalloc)},
275	{NAME("nrequests"),		CTL(stats_arenas_i_bins_j_nrequests)},
276	{NAME("nfills"),		CTL(stats_arenas_i_bins_j_nfills)},
277	{NAME("nflushes"),		CTL(stats_arenas_i_bins_j_nflushes)},
278	{NAME("nruns"),			CTL(stats_arenas_i_bins_j_nruns)},
279	{NAME("nreruns"),		CTL(stats_arenas_i_bins_j_nreruns)},
280	{NAME("curruns"),		CTL(stats_arenas_i_bins_j_curruns)}
281};
282static const ctl_node_t super_stats_arenas_i_bins_j_node[] = {
283	{NAME(""),			CHILD(stats_arenas_i_bins_j)}
284};
285
286static const ctl_node_t stats_arenas_i_bins_node[] = {
287	{INDEX(stats_arenas_i_bins_j)}
288};
289
290static const ctl_node_t stats_arenas_i_lruns_j_node[] = {
291	{NAME("nmalloc"),		CTL(stats_arenas_i_lruns_j_nmalloc)},
292	{NAME("ndalloc"),		CTL(stats_arenas_i_lruns_j_ndalloc)},
293	{NAME("nrequests"),		CTL(stats_arenas_i_lruns_j_nrequests)},
294	{NAME("curruns"),		CTL(stats_arenas_i_lruns_j_curruns)}
295};
296static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = {
297	{NAME(""),			CHILD(stats_arenas_i_lruns_j)}
298};
299
300static const ctl_node_t stats_arenas_i_lruns_node[] = {
301	{INDEX(stats_arenas_i_lruns_j)}
302};
303
304static const ctl_node_t stats_arenas_i_node[] = {
305	{NAME("nthreads"),		CTL(stats_arenas_i_nthreads)},
306	{NAME("pactive"),		CTL(stats_arenas_i_pactive)},
307	{NAME("pdirty"),		CTL(stats_arenas_i_pdirty)},
308	{NAME("mapped"),		CTL(stats_arenas_i_mapped)},
309	{NAME("npurge"),		CTL(stats_arenas_i_npurge)},
310	{NAME("nmadvise"),		CTL(stats_arenas_i_nmadvise)},
311	{NAME("purged"),		CTL(stats_arenas_i_purged)},
312	{NAME("small"),			CHILD(stats_arenas_i_small)},
313	{NAME("large"),			CHILD(stats_arenas_i_large)},
314	{NAME("bins"),			CHILD(stats_arenas_i_bins)},
315	{NAME("lruns"),		CHILD(stats_arenas_i_lruns)}
316};
317static const ctl_node_t super_stats_arenas_i_node[] = {
318	{NAME(""),			CHILD(stats_arenas_i)}
319};
320
321static const ctl_node_t stats_arenas_node[] = {
322	{INDEX(stats_arenas_i)}
323};
324
325static const ctl_node_t stats_node[] = {
326	{NAME("cactive"),		CTL(stats_cactive)},
327	{NAME("allocated"),		CTL(stats_allocated)},
328	{NAME("active"),		CTL(stats_active)},
329	{NAME("mapped"),		CTL(stats_mapped)},
330	{NAME("chunks"),		CHILD(stats_chunks)},
331	{NAME("huge"),			CHILD(stats_huge)},
332	{NAME("arenas"),		CHILD(stats_arenas)}
333};
334
335static const ctl_node_t	root_node[] = {
336	{NAME("version"),	CTL(version)},
337	{NAME("epoch"),		CTL(epoch)},
338	{NAME("thread"),	CHILD(thread)},
339	{NAME("config"),	CHILD(config)},
340	{NAME("opt"),		CHILD(opt)},
341	{NAME("arenas"),	CHILD(arenas)},
342	{NAME("prof"),		CHILD(prof)},
343	{NAME("stats"),		CHILD(stats)}
344};
345static const ctl_node_t super_root_node[] = {
346	{NAME(""),		CHILD(root)}
347};
348
349#undef NAME
350#undef CHILD
351#undef CTL
352#undef INDEX
353
354/******************************************************************************/
355
356static bool
357ctl_arena_init(ctl_arena_stats_t *astats)
358{
359
360	if (astats->lstats == NULL) {
361		astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
362		    sizeof(malloc_large_stats_t));
363		if (astats->lstats == NULL)
364			return (true);
365	}
366
367	return (false);
368}
369
370static void
371ctl_arena_clear(ctl_arena_stats_t *astats)
372{
373
374	astats->pactive = 0;
375	astats->pdirty = 0;
376	if (config_stats) {
377		memset(&astats->astats, 0, sizeof(arena_stats_t));
378		astats->allocated_small = 0;
379		astats->nmalloc_small = 0;
380		astats->ndalloc_small = 0;
381		astats->nrequests_small = 0;
382		memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
383		memset(astats->lstats, 0, nlclasses *
384		    sizeof(malloc_large_stats_t));
385	}
386}
387
388static void
389ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
390{
391	unsigned i;
392
393	arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
394	    &cstats->astats, cstats->bstats, cstats->lstats);
395
396	for (i = 0; i < NBINS; i++) {
397		cstats->allocated_small += cstats->bstats[i].allocated;
398		cstats->nmalloc_small += cstats->bstats[i].nmalloc;
399		cstats->ndalloc_small += cstats->bstats[i].ndalloc;
400		cstats->nrequests_small += cstats->bstats[i].nrequests;
401	}
402}
403
404static void
405ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
406{
407	unsigned i;
408
409	sstats->pactive += astats->pactive;
410	sstats->pdirty += astats->pdirty;
411
412	sstats->astats.mapped += astats->astats.mapped;
413	sstats->astats.npurge += astats->astats.npurge;
414	sstats->astats.nmadvise += astats->astats.nmadvise;
415	sstats->astats.purged += astats->astats.purged;
416
417	sstats->allocated_small += astats->allocated_small;
418	sstats->nmalloc_small += astats->nmalloc_small;
419	sstats->ndalloc_small += astats->ndalloc_small;
420	sstats->nrequests_small += astats->nrequests_small;
421
422	sstats->astats.allocated_large += astats->astats.allocated_large;
423	sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
424	sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
425	sstats->astats.nrequests_large += astats->astats.nrequests_large;
426
427	for (i = 0; i < nlclasses; i++) {
428		sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
429		sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
430		sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
431		sstats->lstats[i].curruns += astats->lstats[i].curruns;
432	}
433
434	for (i = 0; i < NBINS; i++) {
435		sstats->bstats[i].allocated += astats->bstats[i].allocated;
436		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
437		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
438		sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
439		if (config_tcache) {
440			sstats->bstats[i].nfills += astats->bstats[i].nfills;
441			sstats->bstats[i].nflushes +=
442			    astats->bstats[i].nflushes;
443		}
444		sstats->bstats[i].nruns += astats->bstats[i].nruns;
445		sstats->bstats[i].reruns += astats->bstats[i].reruns;
446		sstats->bstats[i].curruns += astats->bstats[i].curruns;
447	}
448}
449
450static void
451ctl_arena_refresh(arena_t *arena, unsigned i)
452{
453	ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
454	ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
455
456	ctl_arena_clear(astats);
457
458	sstats->nthreads += astats->nthreads;
459	if (config_stats) {
460		ctl_arena_stats_amerge(astats, arena);
461		/* Merge into sum stats as well. */
462		ctl_arena_stats_smerge(sstats, astats);
463	} else {
464		astats->pactive += arena->nactive;
465		astats->pdirty += arena->ndirty;
466		/* Merge into sum stats as well. */
467		sstats->pactive += arena->nactive;
468		sstats->pdirty += arena->ndirty;
469	}
470}
471
472static void
473ctl_refresh(void)
474{
475	unsigned i;
476	arena_t *tarenas[narenas];
477
478	if (config_stats) {
479		malloc_mutex_lock(&chunks_mtx);
480		ctl_stats.chunks.current = stats_chunks.curchunks;
481		ctl_stats.chunks.total = stats_chunks.nchunks;
482		ctl_stats.chunks.high = stats_chunks.highchunks;
483		malloc_mutex_unlock(&chunks_mtx);
484
485		malloc_mutex_lock(&huge_mtx);
486		ctl_stats.huge.allocated = huge_allocated;
487		ctl_stats.huge.nmalloc = huge_nmalloc;
488		ctl_stats.huge.ndalloc = huge_ndalloc;
489		malloc_mutex_unlock(&huge_mtx);
490	}
491
492	/*
493	 * Clear sum stats, since they will be merged into by
494	 * ctl_arena_refresh().
495	 */
496	ctl_stats.arenas[narenas].nthreads = 0;
497	ctl_arena_clear(&ctl_stats.arenas[narenas]);
498
499	malloc_mutex_lock(&arenas_lock);
500	memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
501	for (i = 0; i < narenas; i++) {
502		if (arenas[i] != NULL)
503			ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
504		else
505			ctl_stats.arenas[i].nthreads = 0;
506	}
507	malloc_mutex_unlock(&arenas_lock);
508	for (i = 0; i < narenas; i++) {
509		bool initialized = (tarenas[i] != NULL);
510
511		ctl_stats.arenas[i].initialized = initialized;
512		if (initialized)
513			ctl_arena_refresh(tarenas[i], i);
514	}
515
516	if (config_stats) {
517		ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
518		    + ctl_stats.arenas[narenas].astats.allocated_large
519		    + ctl_stats.huge.allocated;
520		ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
521		    PAGE_SHIFT) + ctl_stats.huge.allocated;
522		ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
523	}
524
525	ctl_epoch++;
526}
527
528static bool
529ctl_init(void)
530{
531	bool ret;
532
533	malloc_mutex_lock(&ctl_mtx);
534	if (ctl_initialized == false) {
535		/*
536		 * Allocate space for one extra arena stats element, which
537		 * contains summed stats across all arenas.
538		 */
539		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
540		    (narenas + 1) * sizeof(ctl_arena_stats_t));
541		if (ctl_stats.arenas == NULL) {
542			ret = true;
543			goto RETURN;
544		}
545		memset(ctl_stats.arenas, 0, (narenas + 1) *
546		    sizeof(ctl_arena_stats_t));
547
548		/*
549		 * Initialize all stats structures, regardless of whether they
550		 * ever get used.  Lazy initialization would allow errors to
551		 * cause inconsistent state to be viewable by the application.
552		 */
553		if (config_stats) {
554			unsigned i;
555			for (i = 0; i <= narenas; i++) {
556				if (ctl_arena_init(&ctl_stats.arenas[i])) {
557					ret = true;
558					goto RETURN;
559				}
560			}
561		}
562		ctl_stats.arenas[narenas].initialized = true;
563
564		ctl_epoch = 0;
565		ctl_refresh();
566		ctl_initialized = true;
567	}
568
569	ret = false;
570RETURN:
571	malloc_mutex_unlock(&ctl_mtx);
572	return (ret);
573}
574
575static int
576ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
577    size_t *depthp)
578{
579	int ret;
580	const char *elm, *tdot, *dot;
581	size_t elen, i, j;
582	const ctl_node_t *node;
583
584	elm = name;
585	/* Equivalent to strchrnul(). */
586	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
587	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
588	if (elen == 0) {
589		ret = ENOENT;
590		goto RETURN;
591	}
592	node = super_root_node;
593	for (i = 0; i < *depthp; i++) {
594		assert(node->named);
595		assert(node->u.named.nchildren > 0);
596		if (node->u.named.children[0].named) {
597			const ctl_node_t *pnode = node;
598
599			/* Children are named. */
600			for (j = 0; j < node->u.named.nchildren; j++) {
601				const ctl_node_t *child =
602				    &node->u.named.children[j];
603				if (strlen(child->u.named.name) == elen
604				    && strncmp(elm, child->u.named.name,
605				    elen) == 0) {
606					node = child;
607					if (nodesp != NULL)
608						nodesp[i] = node;
609					mibp[i] = j;
610					break;
611				}
612			}
613			if (node == pnode) {
614				ret = ENOENT;
615				goto RETURN;
616			}
617		} else {
618			unsigned long index;
619			const ctl_node_t *inode;
620
621			/* Children are indexed. */
622			index = strtoul(elm, NULL, 10);
623			if (index == ULONG_MAX) {
624				ret = ENOENT;
625				goto RETURN;
626			}
627
628			inode = &node->u.named.children[0];
629			node = inode->u.indexed.index(mibp, *depthp,
630			    index);
631			if (node == NULL) {
632				ret = ENOENT;
633				goto RETURN;
634			}
635
636			if (nodesp != NULL)
637				nodesp[i] = node;
638			mibp[i] = (size_t)index;
639		}
640
641		if (node->ctl != NULL) {
642			/* Terminal node. */
643			if (*dot != '\0') {
644				/*
645				 * The name contains more elements than are
646				 * in this path through the tree.
647				 */
648				ret = ENOENT;
649				goto RETURN;
650			}
651			/* Complete lookup successful. */
652			*depthp = i + 1;
653			break;
654		}
655
656		/* Update elm. */
657		if (*dot == '\0') {
658			/* No more elements. */
659			ret = ENOENT;
660			goto RETURN;
661		}
662		elm = &dot[1];
663		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
664		    strchr(elm, '\0');
665		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
666	}
667
668	ret = 0;
669RETURN:
670	return (ret);
671}
672
673int
674ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
675    size_t newlen)
676{
677	int ret;
678	size_t depth;
679	ctl_node_t const *nodes[CTL_MAX_DEPTH];
680	size_t mib[CTL_MAX_DEPTH];
681
682	if (ctl_initialized == false && ctl_init()) {
683		ret = EAGAIN;
684		goto RETURN;
685	}
686
687	depth = CTL_MAX_DEPTH;
688	ret = ctl_lookup(name, nodes, mib, &depth);
689	if (ret != 0)
690		goto RETURN;
691
692	if (nodes[depth-1]->ctl == NULL) {
693		/* The name refers to a partial path through the ctl tree. */
694		ret = ENOENT;
695		goto RETURN;
696	}
697
698	ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
699RETURN:
700	return(ret);
701}
702
703int
704ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
705{
706	int ret;
707
708	if (ctl_initialized == false && ctl_init()) {
709		ret = EAGAIN;
710		goto RETURN;
711	}
712
713	ret = ctl_lookup(name, NULL, mibp, miblenp);
714RETURN:
715	return(ret);
716}
717
718int
719ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
720    void *newp, size_t newlen)
721{
722	int ret;
723	const ctl_node_t *node;
724	size_t i;
725
726	if (ctl_initialized == false && ctl_init()) {
727		ret = EAGAIN;
728		goto RETURN;
729	}
730
731	/* Iterate down the tree. */
732	node = super_root_node;
733	for (i = 0; i < miblen; i++) {
734		if (node->u.named.children[0].named) {
735			/* Children are named. */
736			if (node->u.named.nchildren <= mib[i]) {
737				ret = ENOENT;
738				goto RETURN;
739			}
740			node = &node->u.named.children[mib[i]];
741		} else {
742			const ctl_node_t *inode;
743
744			/* Indexed element. */
745			inode = &node->u.named.children[0];
746			node = inode->u.indexed.index(mib, miblen, mib[i]);
747			if (node == NULL) {
748				ret = ENOENT;
749				goto RETURN;
750			}
751		}
752	}
753
754	/* Call the ctl function. */
755	if (node->ctl == NULL) {
756		/* Partial MIB. */
757		ret = ENOENT;
758		goto RETURN;
759	}
760	ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
761
762RETURN:
763	return(ret);
764}
765
766bool
767ctl_boot(void)
768{
769
770	if (malloc_mutex_init(&ctl_mtx))
771		return (true);
772
773	ctl_initialized = false;
774
775	return (false);
776}
777
778/******************************************************************************/
779/* *_ctl() functions. */
780
781#define	READONLY()	do {						\
782	if (newp != NULL || newlen != 0) {				\
783		ret = EPERM;						\
784		goto RETURN;						\
785	}								\
786} while (0)
787
788#define	WRITEONLY()	do {						\
789	if (oldp != NULL || oldlenp != NULL) {				\
790		ret = EPERM;						\
791		goto RETURN;						\
792	}								\
793} while (0)
794
795#define	VOID()	do {							\
796	READONLY();							\
797	WRITEONLY();							\
798} while (0)
799
800#define	READ(v, t)	do {						\
801	if (oldp != NULL && oldlenp != NULL) {				\
802		if (*oldlenp != sizeof(t)) {				\
803			size_t	copylen = (sizeof(t) <= *oldlenp)	\
804			    ? sizeof(t) : *oldlenp;			\
805			memcpy(oldp, (void *)&v, copylen);		\
806			ret = EINVAL;					\
807			goto RETURN;					\
808		} else							\
809			*(t *)oldp = v;					\
810	}								\
811} while (0)
812
813#define	WRITE(v, t)	do {						\
814	if (newp != NULL) {						\
815		if (newlen != sizeof(t)) {				\
816			ret = EINVAL;					\
817			goto RETURN;					\
818		}							\
819		v = *(t *)newp;						\
820	}								\
821} while (0)
822
823/*
824 * There's a lot of code duplication in the following macros due to limitations
825 * in how nested cpp macros are expanded.
826 */
827#define	CTL_RO_CLGEN(c, l, n, v, t)					\
828static int								\
829n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
830    void *newp, size_t newlen)						\
831{									\
832	int ret;							\
833	t oldval;							\
834									\
835	if ((c) == false)						\
836		return (ENOENT);					\
837	if (l)								\
838		malloc_mutex_lock(&ctl_mtx);				\
839	READONLY();							\
840	oldval = v;							\
841	READ(oldval, t);						\
842									\
843	ret = 0;							\
844RETURN:									\
845	if (l)								\
846		malloc_mutex_unlock(&ctl_mtx);				\
847	return (ret);							\
848}
849
850#define	CTL_RO_CGEN(c, n, v, t)						\
851static int								\
852n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
853    void *newp, size_t newlen)						\
854{									\
855	int ret;							\
856	t oldval;							\
857									\
858	if ((c) == false)						\
859		return (ENOENT);					\
860	malloc_mutex_lock(&ctl_mtx);					\
861	READONLY();							\
862	oldval = v;							\
863	READ(oldval, t);						\
864									\
865	ret = 0;							\
866RETURN:									\
867	malloc_mutex_unlock(&ctl_mtx);					\
868	return (ret);							\
869}
870
871#define	CTL_RO_GEN(n, v, t)						\
872static int								\
873n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
874    void *newp, size_t newlen)						\
875{									\
876	int ret;							\
877	t oldval;							\
878									\
879	malloc_mutex_lock(&ctl_mtx);					\
880	READONLY();							\
881	oldval = v;							\
882	READ(oldval, t);						\
883									\
884	ret = 0;							\
885RETURN:									\
886	malloc_mutex_unlock(&ctl_mtx);					\
887	return (ret);							\
888}
889
890/*
891 * ctl_mtx is not acquired, under the assumption that no pertinent data will
892 * mutate during the call.
893 */
894#define	CTL_RO_NL_CGEN(c, n, v, t)					\
895static int								\
896n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
897    void *newp, size_t newlen)						\
898{									\
899	int ret;							\
900	t oldval;							\
901									\
902	if ((c) == false)						\
903		return (ENOENT);					\
904	READONLY();							\
905	oldval = v;							\
906	READ(oldval, t);						\
907									\
908	ret = 0;							\
909RETURN:									\
910	return (ret);							\
911}
912
913#define	CTL_RO_NL_GEN(n, v, t)						\
914static int								\
915n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
916    void *newp, size_t newlen)						\
917{									\
918	int ret;							\
919	t oldval;							\
920									\
921	READONLY();							\
922	oldval = v;							\
923	READ(oldval, t);						\
924									\
925	ret = 0;							\
926RETURN:									\
927	return (ret);							\
928}
929
930#define	CTL_RO_BOOL_CONFIG_GEN(n)					\
931static int								\
932n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
933    void *newp, size_t newlen)						\
934{									\
935	int ret;							\
936	bool oldval;							\
937									\
938	READONLY();							\
939	oldval = n;							\
940	READ(oldval, bool);						\
941									\
942	ret = 0;							\
943RETURN:									\
944	return (ret);							\
945}
946
947CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
948
949static int
950epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
951    void *newp, size_t newlen)
952{
953	int ret;
954	uint64_t newval;
955
956	malloc_mutex_lock(&ctl_mtx);
957	newval = 0;
958	WRITE(newval, uint64_t);
959	if (newval != 0)
960		ctl_refresh();
961	READ(ctl_epoch, uint64_t);
962
963	ret = 0;
964RETURN:
965	malloc_mutex_unlock(&ctl_mtx);
966	return (ret);
967}
968
969static int
970thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
971    size_t *oldlenp, void *newp, size_t newlen)
972{
973	int ret;
974	tcache_t *tcache;
975
976	if (config_tcache == false)
977		return (ENOENT);
978
979	VOID();
980
981	tcache = TCACHE_GET();
982	if (tcache == NULL) {
983		ret = 0;
984		goto RETURN;
985	}
986	tcache_destroy(tcache);
987	TCACHE_SET(NULL);
988
989	ret = 0;
990RETURN:
991	return (ret);
992}
993
994static int
995thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
996    void *newp, size_t newlen)
997{
998	int ret;
999	unsigned newind, oldind;
1000
1001	newind = oldind = choose_arena()->ind;
1002	WRITE(newind, unsigned);
1003	READ(oldind, unsigned);
1004	if (newind != oldind) {
1005		arena_t *arena;
1006
1007		if (newind >= narenas) {
1008			/* New arena index is out of range. */
1009			ret = EFAULT;
1010			goto RETURN;
1011		}
1012
1013		/* Initialize arena if necessary. */
1014		malloc_mutex_lock(&arenas_lock);
1015		if ((arena = arenas[newind]) == NULL)
1016			arena = arenas_extend(newind);
1017		arenas[oldind]->nthreads--;
1018		arenas[newind]->nthreads++;
1019		malloc_mutex_unlock(&arenas_lock);
1020		if (arena == NULL) {
1021			ret = EAGAIN;
1022			goto RETURN;
1023		}
1024
1025		/* Set new arena association. */
1026		ARENA_SET(arena);
1027		if (config_tcache) {
1028			tcache_t *tcache = TCACHE_GET();
1029			if (tcache != NULL)
1030				tcache->arena = arena;
1031		}
1032	}
1033
1034	ret = 0;
1035RETURN:
1036	return (ret);
1037}
1038
1039CTL_RO_NL_CGEN(config_stats, thread_allocated, ALLOCATED_GET(), uint64_t)
1040CTL_RO_NL_CGEN(config_stats, thread_allocatedp, ALLOCATEDP_GET(), uint64_t *)
1041CTL_RO_NL_CGEN(config_stats, thread_deallocated, DEALLOCATED_GET(), uint64_t)
1042CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, DEALLOCATEDP_GET(),
1043    uint64_t *)
1044
1045/******************************************************************************/
1046
1047CTL_RO_BOOL_CONFIG_GEN(config_debug)
1048CTL_RO_BOOL_CONFIG_GEN(config_dss)
1049CTL_RO_BOOL_CONFIG_GEN(config_fill)
1050CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
1051CTL_RO_BOOL_CONFIG_GEN(config_prof)
1052CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
1053CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
1054CTL_RO_BOOL_CONFIG_GEN(config_stats)
1055CTL_RO_BOOL_CONFIG_GEN(config_tcache)
1056CTL_RO_BOOL_CONFIG_GEN(config_tls)
1057CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
1058
1059/******************************************************************************/
1060
1061CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1062CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
1063CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
1064CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
1065CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1066CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
1067CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1068CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1069CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
1070CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1071CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1072CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
1073CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1074CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1075CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1076CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1077CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1078
1079/******************************************************************************/
1080
1081CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
1082CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
1083CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
1084const ctl_node_t *
1085arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
1086{
1087
1088	if (i > NBINS)
1089		return (NULL);
1090	return (super_arenas_bin_i_node);
1091}
1092
1093CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
1094const ctl_node_t *
1095arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
1096{
1097
1098	if (i > nlclasses)
1099		return (NULL);
1100	return (super_arenas_lrun_i_node);
1101}
1102
1103CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
1104
1105static int
1106arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
1107    size_t *oldlenp, void *newp, size_t newlen)
1108{
1109	int ret;
1110	unsigned nread, i;
1111
1112	malloc_mutex_lock(&ctl_mtx);
1113	READONLY();
1114	if (*oldlenp != narenas * sizeof(bool)) {
1115		ret = EINVAL;
1116		nread = (*oldlenp < narenas * sizeof(bool))
1117		    ? (*oldlenp / sizeof(bool)) : narenas;
1118	} else {
1119		ret = 0;
1120		nread = narenas;
1121	}
1122
1123	for (i = 0; i < nread; i++)
1124		((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
1125
1126RETURN:
1127	malloc_mutex_unlock(&ctl_mtx);
1128	return (ret);
1129}
1130
1131CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
1132CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
1133CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
1134CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
1135CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
1136CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
1137CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
1138
1139static int
1140arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1141    void *newp, size_t newlen)
1142{
1143	int ret;
1144	unsigned arena;
1145
1146	WRITEONLY();
1147	arena = UINT_MAX;
1148	WRITE(arena, unsigned);
1149	if (newp != NULL && arena >= narenas) {
1150		ret = EFAULT;
1151		goto RETURN;
1152	} else {
1153		arena_t *tarenas[narenas];
1154
1155		malloc_mutex_lock(&arenas_lock);
1156		memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
1157		malloc_mutex_unlock(&arenas_lock);
1158
1159		if (arena == UINT_MAX) {
1160			unsigned i;
1161			for (i = 0; i < narenas; i++) {
1162				if (tarenas[i] != NULL)
1163					arena_purge_all(tarenas[i]);
1164			}
1165		} else {
1166			assert(arena < narenas);
1167			if (tarenas[arena] != NULL)
1168				arena_purge_all(tarenas[arena]);
1169		}
1170	}
1171
1172	ret = 0;
1173RETURN:
1174	return (ret);
1175}
1176
1177/******************************************************************************/
1178
1179static int
1180prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1181    void *newp, size_t newlen)
1182{
1183	int ret;
1184	bool oldval;
1185
1186	if (config_prof == false)
1187		return (ENOENT);
1188
1189	malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
1190	oldval = opt_prof_active;
1191	if (newp != NULL) {
1192		/*
1193		 * The memory barriers will tend to make opt_prof_active
1194		 * propagate faster on systems with weak memory ordering.
1195		 */
1196		mb_write();
1197		WRITE(opt_prof_active, bool);
1198		mb_write();
1199	}
1200	READ(oldval, bool);
1201
1202	ret = 0;
1203RETURN:
1204	malloc_mutex_unlock(&ctl_mtx);
1205	return (ret);
1206}
1207
1208static int
1209prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1210    void *newp, size_t newlen)
1211{
1212	int ret;
1213	const char *filename = NULL;
1214
1215	if (config_prof == false)
1216		return (ENOENT);
1217
1218	WRITEONLY();
1219	WRITE(filename, const char *);
1220
1221	if (prof_mdump(filename)) {
1222		ret = EFAULT;
1223		goto RETURN;
1224	}
1225
1226	ret = 0;
1227RETURN:
1228	return (ret);
1229}
1230
1231CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
1232
1233/******************************************************************************/
1234
1235CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
1236    size_t)
1237CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
1238CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
1239CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
1240CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
1241CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
1242CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
1243    ctl_stats.arenas[mib[2]].allocated_small, size_t)
1244CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
1245    ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
1246CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
1247    ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
1248CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
1249    ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
1250CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
1251    ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
1252CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
1253    ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
1254CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
1255    ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
1256CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
1257    ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
1258
1259CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
1260    ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
1261CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
1262    ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
1263CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
1264    ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
1265CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
1266    ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
1267CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
1268    ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
1269CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
1270    ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
1271CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
1272    ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
1273CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
1274    ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
1275CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
1276    ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
1277
1278const ctl_node_t *
1279stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
1280{
1281
1282	if (j > NBINS)
1283		return (NULL);
1284	return (super_stats_arenas_i_bins_j_node);
1285}
1286
1287CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
1288    ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
1289CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
1290    ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
1291CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
1292    ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
1293CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
1294    ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
1295
1296const ctl_node_t *
1297stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
1298{
1299
1300	if (j > nlclasses)
1301		return (NULL);
1302	return (super_stats_arenas_i_lruns_j_node);
1303}
1304
1305CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
1306CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
1307CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
1308CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
1309    ctl_stats.arenas[mib[2]].astats.mapped, size_t)
1310CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
1311    ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
1312CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
1313    ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
1314CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
1315    ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
1316
1317const ctl_node_t *
1318stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
1319{
1320	const ctl_node_t * ret;
1321
1322	malloc_mutex_lock(&ctl_mtx);
1323	if (ctl_stats.arenas[i].initialized == false) {
1324		ret = NULL;
1325		goto RETURN;
1326	}
1327
1328	ret = super_stats_arenas_i_node;
1329RETURN:
1330	malloc_mutex_unlock(&ctl_mtx);
1331	return (ret);
1332}
1333
1334CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
1335CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
1336CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
1337CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
1338