1#define	JEMALLOC_STATS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4#define	CTL_GET(n, v, t) do {						\
5	size_t sz = sizeof(t);						\
6	xmallctl(n, v, &sz, NULL, 0);					\
7} while (0)
8
9#define	CTL_I_GET(n, v, t) do {						\
10	size_t mib[6];							\
11	size_t miblen = sizeof(mib) / sizeof(size_t);			\
12	size_t sz = sizeof(t);						\
13	xmallctlnametomib(n, mib, &miblen);				\
14	mib[2] = i;							\
15	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
16} while (0)
17
18#define	CTL_J_GET(n, v, t) do {						\
19	size_t mib[6];							\
20	size_t miblen = sizeof(mib) / sizeof(size_t);			\
21	size_t sz = sizeof(t);						\
22	xmallctlnametomib(n, mib, &miblen);				\
23	mib[2] = j;							\
24	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
25} while (0)
26
27#define	CTL_IJ_GET(n, v, t) do {					\
28	size_t mib[6];							\
29	size_t miblen = sizeof(mib) / sizeof(size_t);			\
30	size_t sz = sizeof(t);						\
31	xmallctlnametomib(n, mib, &miblen);				\
32	mib[2] = i;							\
33	mib[4] = j;							\
34	xmallctlbymib(mib, miblen, v, &sz, NULL, 0);			\
35} while (0)
36
37/******************************************************************************/
38/* Data. */
39
40bool	opt_stats_print = false;
41
42size_t	stats_cactive = 0;
43
44/******************************************************************************/
45/* Function prototypes for non-inline static functions. */
46
47static void	stats_arena_bins_print(void (*write_cb)(void *, const char *),
48    void *cbopaque, unsigned i);
49static void	stats_arena_lruns_print(void (*write_cb)(void *, const char *),
50    void *cbopaque, unsigned i);
51static void	stats_arena_print(void (*write_cb)(void *, const char *),
52    void *cbopaque, unsigned i, bool bins, bool large);
53
54/******************************************************************************/
55
56static void
57stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
58    unsigned i)
59{
60	size_t page;
61	bool config_tcache;
62	unsigned nbins, j, gap_start;
63
64	CTL_GET("arenas.page", &page, size_t);
65
66	CTL_GET("config.tcache", &config_tcache, bool);
67	if (config_tcache) {
68		malloc_cprintf(write_cb, cbopaque,
69		    "bins:     bin  size regs pgs    allocated      nmalloc"
70		    "      ndalloc    nrequests       nfills     nflushes"
71		    "      newruns       reruns      curruns\n");
72	} else {
73		malloc_cprintf(write_cb, cbopaque,
74		    "bins:     bin  size regs pgs    allocated      nmalloc"
75		    "      ndalloc      newruns       reruns      curruns\n");
76	}
77	CTL_GET("arenas.nbins", &nbins, unsigned);
78	for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
79		uint64_t nruns;
80
81		CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
82		if (nruns == 0) {
83			if (gap_start == UINT_MAX)
84				gap_start = j;
85		} else {
86			size_t reg_size, run_size, allocated;
87			uint32_t nregs;
88			uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
89			uint64_t reruns;
90			size_t curruns;
91
92			if (gap_start != UINT_MAX) {
93				if (j > gap_start + 1) {
94					/* Gap of more than one size class. */
95					malloc_cprintf(write_cb, cbopaque,
96					    "[%u..%u]\n", gap_start,
97					    j - 1);
98				} else {
99					/* Gap of one size class. */
100					malloc_cprintf(write_cb, cbopaque,
101					    "[%u]\n", gap_start);
102				}
103				gap_start = UINT_MAX;
104			}
105			CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
106			CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
107			CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
108			CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
109			    &allocated, size_t);
110			CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
111			    &nmalloc, uint64_t);
112			CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
113			    &ndalloc, uint64_t);
114			if (config_tcache) {
115				CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
116				    &nrequests, uint64_t);
117				CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
118				    &nfills, uint64_t);
119				CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
120				    &nflushes, uint64_t);
121			}
122			CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
123			    uint64_t);
124			CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
125			    size_t);
126			if (config_tcache) {
127				malloc_cprintf(write_cb, cbopaque,
128				    "%13u %5zu %4u %3zu %12zu %12"PRIu64
129				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
130				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
131				    " %12zu\n",
132				    j, reg_size, nregs, run_size / page,
133				    allocated, nmalloc, ndalloc, nrequests,
134				    nfills, nflushes, nruns, reruns, curruns);
135			} else {
136				malloc_cprintf(write_cb, cbopaque,
137				    "%13u %5zu %4u %3zu %12zu %12"PRIu64
138				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
139				    " %12zu\n",
140				    j, reg_size, nregs, run_size / page,
141				    allocated, nmalloc, ndalloc, nruns, reruns,
142				    curruns);
143			}
144		}
145	}
146	if (gap_start != UINT_MAX) {
147		if (j > gap_start + 1) {
148			/* Gap of more than one size class. */
149			malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
150			    gap_start, j - 1);
151		} else {
152			/* Gap of one size class. */
153			malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
154		}
155	}
156}
157
158static void
159stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
160    unsigned i)
161{
162	size_t page, nlruns, j;
163	ssize_t gap_start;
164
165	CTL_GET("arenas.page", &page, size_t);
166
167	malloc_cprintf(write_cb, cbopaque,
168	    "large:   size pages      nmalloc      ndalloc    nrequests"
169	    "      curruns\n");
170	CTL_GET("arenas.nlruns", &nlruns, size_t);
171	for (j = 0, gap_start = -1; j < nlruns; j++) {
172		uint64_t nmalloc, ndalloc, nrequests;
173		size_t run_size, curruns;
174
175		CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
176		    uint64_t);
177		CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
178		    uint64_t);
179		CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
180		    uint64_t);
181		if (nrequests == 0) {
182			if (gap_start == -1)
183				gap_start = j;
184		} else {
185			CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
186			CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
187			    size_t);
188			if (gap_start != -1) {
189				malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
190				    j - gap_start);
191				gap_start = -1;
192			}
193			malloc_cprintf(write_cb, cbopaque,
194			    "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
195			    " %12zu\n",
196			    run_size, run_size / page, nmalloc, ndalloc,
197			    nrequests, curruns);
198		}
199	}
200	if (gap_start != -1)
201		malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
202}
203
204static void
205stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
206    unsigned i, bool bins, bool large)
207{
208	unsigned nthreads;
209	const char *dss;
210	size_t page, pactive, pdirty, mapped;
211	uint64_t npurge, nmadvise, purged;
212	size_t small_allocated;
213	uint64_t small_nmalloc, small_ndalloc, small_nrequests;
214	size_t large_allocated;
215	uint64_t large_nmalloc, large_ndalloc, large_nrequests;
216	size_t huge_allocated;
217	uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
218
219	CTL_GET("arenas.page", &page, size_t);
220
221	CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
222	malloc_cprintf(write_cb, cbopaque,
223	    "assigned threads: %u\n", nthreads);
224	CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
225	malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
226	    dss);
227	CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
228	CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
229	CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
230	CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
231	CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
232	malloc_cprintf(write_cb, cbopaque,
233	    "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
234	    " %"PRIu64" madvise%s, %"PRIu64" purged\n",
235	    pactive, pdirty, npurge, npurge == 1 ? "" : "s",
236	    nmadvise, nmadvise == 1 ? "" : "s", purged);
237
238	malloc_cprintf(write_cb, cbopaque,
239	    "            allocated      nmalloc      ndalloc    nrequests\n");
240	CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
241	CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
242	CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
243	CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
244	malloc_cprintf(write_cb, cbopaque,
245	    "small:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
246	    small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
247	CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
248	CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
249	CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
250	CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
251	malloc_cprintf(write_cb, cbopaque,
252	    "large:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
253	    large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
254	CTL_I_GET("stats.arenas.0.huge.allocated", &huge_allocated, size_t);
255	CTL_I_GET("stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
256	CTL_I_GET("stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
257	CTL_I_GET("stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
258	malloc_cprintf(write_cb, cbopaque,
259	    "huge:    %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
260	    huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
261	malloc_cprintf(write_cb, cbopaque,
262	    "total:   %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
263	    small_allocated + large_allocated + huge_allocated,
264	    small_nmalloc + large_nmalloc + huge_nmalloc,
265	    small_ndalloc + large_ndalloc + huge_ndalloc,
266	    small_nrequests + large_nrequests + huge_nrequests);
267	malloc_cprintf(write_cb, cbopaque, "active:  %12zu\n", pactive * page);
268	CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
269	malloc_cprintf(write_cb, cbopaque, "mapped:  %12zu\n", mapped);
270
271	if (bins)
272		stats_arena_bins_print(write_cb, cbopaque, i);
273	if (large)
274		stats_arena_lruns_print(write_cb, cbopaque, i);
275}
276
277void
278stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
279    const char *opts)
280{
281	int err;
282	uint64_t epoch;
283	size_t u64sz;
284	bool general = true;
285	bool merged = true;
286	bool unmerged = true;
287	bool bins = true;
288	bool large = true;
289
290	/*
291	 * Refresh stats, in case mallctl() was called by the application.
292	 *
293	 * Check for OOM here, since refreshing the ctl cache can trigger
294	 * allocation.  In practice, none of the subsequent mallctl()-related
295	 * calls in this function will cause OOM if this one succeeds.
296	 * */
297	epoch = 1;
298	u64sz = sizeof(uint64_t);
299	err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
300	if (err != 0) {
301		if (err == EAGAIN) {
302			malloc_write("<jemalloc>: Memory allocation failure in "
303			    "mallctl(\"epoch\", ...)\n");
304			return;
305		}
306		malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
307		    "...)\n");
308		abort();
309	}
310
311	if (opts != NULL) {
312		unsigned i;
313
314		for (i = 0; opts[i] != '\0'; i++) {
315			switch (opts[i]) {
316			case 'g':
317				general = false;
318				break;
319			case 'm':
320				merged = false;
321				break;
322			case 'a':
323				unmerged = false;
324				break;
325			case 'b':
326				bins = false;
327				break;
328			case 'l':
329				large = false;
330				break;
331			default:;
332			}
333		}
334	}
335
336	malloc_cprintf(write_cb, cbopaque,
337	    "___ Begin jemalloc statistics ___\n");
338	if (general) {
339		int err;
340		const char *cpv;
341		bool bv;
342		unsigned uv;
343		ssize_t ssv;
344		size_t sv, bsz, ssz, sssz, cpsz;
345
346		bsz = sizeof(bool);
347		ssz = sizeof(size_t);
348		sssz = sizeof(ssize_t);
349		cpsz = sizeof(const char *);
350
351		CTL_GET("version", &cpv, const char *);
352		malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
353		CTL_GET("config.debug", &bv, bool);
354		malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
355		    bv ? "enabled" : "disabled");
356
357#define	OPT_WRITE_BOOL(n)						\
358		if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0))	\
359		    == 0) {						\
360			malloc_cprintf(write_cb, cbopaque,		\
361			    "  opt."#n": %s\n", bv ? "true" : "false");	\
362		}
363#define	OPT_WRITE_SIZE_T(n)						\
364		if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0))	\
365		    == 0) {						\
366			malloc_cprintf(write_cb, cbopaque,		\
367			"  opt."#n": %zu\n", sv);			\
368		}
369#define	OPT_WRITE_SSIZE_T(n)						\
370		if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0))	\
371		    == 0) {						\
372			malloc_cprintf(write_cb, cbopaque,		\
373			    "  opt."#n": %zd\n", ssv);			\
374		}
375#define	OPT_WRITE_CHAR_P(n)						\
376		if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0))	\
377		    == 0) {						\
378			malloc_cprintf(write_cb, cbopaque,		\
379			    "  opt."#n": \"%s\"\n", cpv);		\
380		}
381
382		malloc_cprintf(write_cb, cbopaque,
383		    "Run-time option settings:\n");
384		OPT_WRITE_BOOL(abort)
385		OPT_WRITE_SIZE_T(lg_chunk)
386		OPT_WRITE_CHAR_P(dss)
387		OPT_WRITE_SIZE_T(narenas)
388		OPT_WRITE_SSIZE_T(lg_dirty_mult)
389		OPT_WRITE_BOOL(stats_print)
390		OPT_WRITE_BOOL(junk)
391		OPT_WRITE_SIZE_T(quarantine)
392		OPT_WRITE_BOOL(redzone)
393		OPT_WRITE_BOOL(zero)
394		OPT_WRITE_BOOL(utrace)
395		OPT_WRITE_BOOL(valgrind)
396		OPT_WRITE_BOOL(xmalloc)
397		OPT_WRITE_BOOL(tcache)
398		OPT_WRITE_SSIZE_T(lg_tcache_max)
399		OPT_WRITE_BOOL(prof)
400		OPT_WRITE_CHAR_P(prof_prefix)
401		OPT_WRITE_BOOL(prof_active)
402		OPT_WRITE_SSIZE_T(lg_prof_sample)
403		OPT_WRITE_BOOL(prof_accum)
404		OPT_WRITE_SSIZE_T(lg_prof_interval)
405		OPT_WRITE_BOOL(prof_gdump)
406		OPT_WRITE_BOOL(prof_final)
407		OPT_WRITE_BOOL(prof_leak)
408
409#undef OPT_WRITE_BOOL
410#undef OPT_WRITE_SIZE_T
411#undef OPT_WRITE_SSIZE_T
412#undef OPT_WRITE_CHAR_P
413
414		malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
415
416		CTL_GET("arenas.narenas", &uv, unsigned);
417		malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
418
419		malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
420		    sizeof(void *));
421
422		CTL_GET("arenas.quantum", &sv, size_t);
423		malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
424
425		CTL_GET("arenas.page", &sv, size_t);
426		malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
427
428		CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
429		if (ssv >= 0) {
430			malloc_cprintf(write_cb, cbopaque,
431			    "Min active:dirty page ratio per arena: %u:1\n",
432			    (1U << ssv));
433		} else {
434			malloc_cprintf(write_cb, cbopaque,
435			    "Min active:dirty page ratio per arena: N/A\n");
436		}
437		if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
438		    == 0) {
439			malloc_cprintf(write_cb, cbopaque,
440			    "Maximum thread-cached size class: %zu\n", sv);
441		}
442		if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
443		    bv) {
444			CTL_GET("opt.lg_prof_sample", &sv, size_t);
445			malloc_cprintf(write_cb, cbopaque,
446			    "Average profile sample interval: %"PRIu64
447			    " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
448
449			CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
450			if (ssv >= 0) {
451				malloc_cprintf(write_cb, cbopaque,
452				    "Average profile dump interval: %"PRIu64
453				    " (2^%zd)\n",
454				    (((uint64_t)1U) << ssv), ssv);
455			} else {
456				malloc_cprintf(write_cb, cbopaque,
457				    "Average profile dump interval: N/A\n");
458			}
459		}
460		CTL_GET("opt.lg_chunk", &sv, size_t);
461		malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
462		    (ZU(1) << sv), sv);
463	}
464
465	if (config_stats) {
466		size_t *cactive;
467		size_t allocated, active, mapped;
468		size_t chunks_current, chunks_high;
469		uint64_t chunks_total;
470
471		CTL_GET("stats.cactive", &cactive, size_t *);
472		CTL_GET("stats.allocated", &allocated, size_t);
473		CTL_GET("stats.active", &active, size_t);
474		CTL_GET("stats.mapped", &mapped, size_t);
475		malloc_cprintf(write_cb, cbopaque,
476		    "Allocated: %zu, active: %zu, mapped: %zu\n",
477		    allocated, active, mapped);
478		malloc_cprintf(write_cb, cbopaque,
479		    "Current active ceiling: %zu\n", atomic_read_z(cactive));
480
481		/* Print chunk stats. */
482		CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
483		CTL_GET("stats.chunks.high", &chunks_high, size_t);
484		CTL_GET("stats.chunks.current", &chunks_current, size_t);
485		malloc_cprintf(write_cb, cbopaque, "chunks: nchunks   "
486		    "highchunks    curchunks\n");
487		malloc_cprintf(write_cb, cbopaque,
488		    "  %13"PRIu64" %12zu %12zu\n",
489		    chunks_total, chunks_high, chunks_current);
490
491		if (merged) {
492			unsigned narenas;
493
494			CTL_GET("arenas.narenas", &narenas, unsigned);
495			{
496				VARIABLE_ARRAY(bool, initialized, narenas);
497				size_t isz;
498				unsigned i, ninitialized;
499
500				isz = sizeof(bool) * narenas;
501				xmallctl("arenas.initialized", initialized,
502				    &isz, NULL, 0);
503				for (i = ninitialized = 0; i < narenas; i++) {
504					if (initialized[i])
505						ninitialized++;
506				}
507
508				if (ninitialized > 1 || unmerged == false) {
509					/* Print merged arena stats. */
510					malloc_cprintf(write_cb, cbopaque,
511					    "\nMerged arenas stats:\n");
512					stats_arena_print(write_cb, cbopaque,
513					    narenas, bins, large);
514				}
515			}
516		}
517
518		if (unmerged) {
519			unsigned narenas;
520
521			/* Print stats for each arena. */
522
523			CTL_GET("arenas.narenas", &narenas, unsigned);
524			{
525				VARIABLE_ARRAY(bool, initialized, narenas);
526				size_t isz;
527				unsigned i;
528
529				isz = sizeof(bool) * narenas;
530				xmallctl("arenas.initialized", initialized,
531				    &isz, NULL, 0);
532
533				for (i = 0; i < narenas; i++) {
534					if (initialized[i]) {
535						malloc_cprintf(write_cb,
536						    cbopaque,
537						    "\narenas[%u]:\n", i);
538						stats_arena_print(write_cb,
539						    cbopaque, i, bins, large);
540					}
541				}
542			}
543		}
544	}
545	malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
546}
547