1#include "test/jemalloc_test.h"
2
3const char *malloc_conf =
4    /* Use smallest possible chunk size. */
5    "lg_chunk:0"
6    /* Immediately purge to minimize fragmentation. */
7    ",lg_dirty_mult:-1"
8    ",decay_time:-1"
9    ;
10
11/*
12 * Size class that is a divisor of the page size, ideally 4+ regions per run.
13 */
14#if LG_PAGE <= 14
15#define	SZ	(ZU(1) << (LG_PAGE - 2))
16#else
17#define	SZ	4096
18#endif
19
20/*
21 * Number of chunks to consume at high water mark.  Should be at least 2 so that
22 * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
23 * tested.
24 */
25#define	NCHUNKS	8
26
27static unsigned
28binind_compute(void)
29{
30	size_t sz;
31	unsigned nbins, i;
32
33	sz = sizeof(nbins);
34	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
35	    "Unexpected mallctl failure");
36
37	for (i = 0; i < nbins; i++) {
38		size_t mib[4];
39		size_t miblen = sizeof(mib)/sizeof(size_t);
40		size_t size;
41
42		assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
43		    &miblen), 0, "Unexpected mallctlnametomb failure");
44		mib[2] = (size_t)i;
45
46		sz = sizeof(size);
47		assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
48		    0), 0, "Unexpected mallctlbymib failure");
49		if (size == SZ)
50			return (i);
51	}
52
53	test_fail("Unable to compute nregs_per_run");
54	return (0);
55}
56
57static size_t
58nregs_per_run_compute(void)
59{
60	uint32_t nregs;
61	size_t sz;
62	unsigned binind = binind_compute();
63	size_t mib[4];
64	size_t miblen = sizeof(mib)/sizeof(size_t);
65
66	assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
67	    "Unexpected mallctlnametomb failure");
68	mib[2] = (size_t)binind;
69	sz = sizeof(nregs);
70	assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
71	    0), 0, "Unexpected mallctlbymib failure");
72	return (nregs);
73}
74
75static size_t
76npages_per_run_compute(void)
77{
78	size_t sz;
79	unsigned binind = binind_compute();
80	size_t mib[4];
81	size_t miblen = sizeof(mib)/sizeof(size_t);
82	size_t run_size;
83
84	assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
85	    "Unexpected mallctlnametomb failure");
86	mib[2] = (size_t)binind;
87	sz = sizeof(run_size);
88	assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL,
89	    0), 0, "Unexpected mallctlbymib failure");
90	return (run_size >> LG_PAGE);
91}
92
93static size_t
94npages_per_chunk_compute(void)
95{
96
97	return ((chunksize >> LG_PAGE) - map_bias);
98}
99
100static size_t
101nruns_per_chunk_compute(void)
102{
103
104	return (npages_per_chunk_compute() / npages_per_run_compute());
105}
106
107static unsigned
108arenas_extend_mallctl(void)
109{
110	unsigned arena_ind;
111	size_t sz;
112
113	sz = sizeof(arena_ind);
114	assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
115	    0, "Error in arenas.extend");
116
117	return (arena_ind);
118}
119
120static void
121arena_reset_mallctl(unsigned arena_ind)
122{
123	size_t mib[3];
124	size_t miblen = sizeof(mib)/sizeof(size_t);
125
126	assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
127	    "Unexpected mallctlnametomib() failure");
128	mib[1] = (size_t)arena_ind;
129	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
130	    "Unexpected mallctlbymib() failure");
131}
132
133TEST_BEGIN(test_pack)
134{
135	unsigned arena_ind = arenas_extend_mallctl();
136	size_t nregs_per_run = nregs_per_run_compute();
137	size_t nruns_per_chunk = nruns_per_chunk_compute();
138	size_t nruns = nruns_per_chunk * NCHUNKS;
139	size_t nregs = nregs_per_run * nruns;
140	VARIABLE_ARRAY(void *, ptrs, nregs);
141	size_t i, j, offset;
142
143	/* Fill matrix. */
144	for (i = offset = 0; i < nruns; i++) {
145		for (j = 0; j < nregs_per_run; j++) {
146			void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
147			    MALLOCX_TCACHE_NONE);
148			assert_ptr_not_null(p,
149			    "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
150			    " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
151			    SZ, arena_ind, i, j);
152			ptrs[(i * nregs_per_run) + j] = p;
153		}
154	}
155
156	/*
157	 * Free all but one region of each run, but rotate which region is
158	 * preserved, so that subsequent allocations exercise the within-run
159	 * layout policy.
160	 */
161	offset = 0;
162	for (i = offset = 0;
163	    i < nruns;
164	    i++, offset = (offset + 1) % nregs_per_run) {
165		for (j = 0; j < nregs_per_run; j++) {
166			void *p = ptrs[(i * nregs_per_run) + j];
167			if (offset == j)
168				continue;
169			dallocx(p, MALLOCX_ARENA(arena_ind) |
170			    MALLOCX_TCACHE_NONE);
171		}
172	}
173
174	/*
175	 * Logically refill matrix, skipping preserved regions and verifying
176	 * that the matrix is unmodified.
177	 */
178	offset = 0;
179	for (i = offset = 0;
180	    i < nruns;
181	    i++, offset = (offset + 1) % nregs_per_run) {
182		for (j = 0; j < nregs_per_run; j++) {
183			void *p;
184
185			if (offset == j)
186				continue;
187			p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
188			    MALLOCX_TCACHE_NONE);
189			assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
190			    "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
191			    i, j);
192		}
193	}
194
195	/* Clean up. */
196	arena_reset_mallctl(arena_ind);
197}
198TEST_END
199
200int
201main(void)
202{
203
204	return (test(
205	    test_pack));
206}
207