1#include "prof_accum.h"
2
3#ifdef JEMALLOC_PROF
4const char *malloc_conf =
5    "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0";
6#endif
7
8static int
9prof_dump_open_intercept(bool propagate_err, const char *filename)
10{
11	int fd;
12
13	fd = open("/dev/null", O_WRONLY);
14	assert_d_ne(fd, -1, "Unexpected open() failure");
15
16	return (fd);
17}
18
19static void *
20alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
21{
22
23	return (alloc_0(thd_ind*NALLOCS_PER_THREAD + iteration));
24}
25
26static void *
27thd_start(void *varg)
28{
29	unsigned thd_ind = *(unsigned *)varg;
30	size_t bt_count_prev, bt_count;
31	unsigned i_prev, i;
32
33	i_prev = 0;
34	bt_count_prev = 0;
35	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
36		void *p = alloc_from_permuted_backtrace(thd_ind, i);
37		dallocx(p, 0);
38		if (i % DUMP_INTERVAL == 0) {
39			assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
40			    0, "Unexpected error while dumping heap profile");
41		}
42
43		if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
44		    i+1 == NALLOCS_PER_THREAD) {
45			bt_count = prof_bt_count();
46			assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
47			    "Expected larger backtrace count increase");
48			i_prev = i;
49			bt_count_prev = bt_count;
50		}
51	}
52
53	return (NULL);
54}
55
56TEST_BEGIN(test_idump)
57{
58	bool active;
59	thd_t thds[NTHREADS];
60	unsigned thd_args[NTHREADS];
61	unsigned i;
62
63	test_skip_if(!config_prof);
64
65	active = true;
66	assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
67	    0, "Unexpected mallctl failure while activating profiling");
68
69	prof_dump_open = prof_dump_open_intercept;
70
71	for (i = 0; i < NTHREADS; i++) {
72		thd_args[i] = i;
73		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
74	}
75	for (i = 0; i < NTHREADS; i++)
76		thd_join(thds[i], NULL);
77}
78TEST_END
79
80int
81main(void)
82{
83
84	return (test(
85	    test_idump));
86}
87