1#include "test/jemalloc_test.h"
2
3#ifdef JEMALLOC_FILL
4const char *malloc_conf = "junk:false";
5#endif
6
7static unsigned
8get_nsizes_impl(const char *cmd)
9{
10	unsigned ret;
11	size_t z;
12
13	z = sizeof(unsigned);
14	assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
15	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
16
17	return (ret);
18}
19
20static unsigned
21get_nhuge(void)
22{
23
24	return (get_nsizes_impl("arenas.nhchunks"));
25}
26
27static size_t
28get_size_impl(const char *cmd, size_t ind)
29{
30	size_t ret;
31	size_t z;
32	size_t mib[4];
33	size_t miblen = 4;
34
35	z = sizeof(size_t);
36	assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
37	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
38	mib[2] = ind;
39	z = sizeof(size_t);
40	assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
41	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
42
43	return (ret);
44}
45
46static size_t
47get_huge_size(size_t ind)
48{
49
50	return (get_size_impl("arenas.hchunk.0.size", ind));
51}
52
53/*
54 * On systems which can't merge extents, tests that call this function generate
55 * a lot of dirty memory very quickly.  Purging between cycles mitigates
56 * potential OOM on e.g. 32-bit Windows.
57 */
58static void
59purge(void)
60{
61
62	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
63	    "Unexpected mallctl error");
64}
65
66TEST_BEGIN(test_overflow)
67{
68	size_t hugemax;
69
70	hugemax = get_huge_size(get_nhuge()-1);
71
72	assert_ptr_null(mallocx(hugemax+1, 0),
73	    "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1);
74
75	assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
76	    "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
77
78	assert_ptr_null(mallocx(SIZE_T_MAX, 0),
79	    "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
80
81	assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
82	    "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
83	    ZU(PTRDIFF_MAX)+1);
84}
85TEST_END
86
87TEST_BEGIN(test_oom)
88{
89	size_t hugemax;
90	bool oom;
91	void *ptrs[3];
92	unsigned i;
93
94	/*
95	 * It should be impossible to allocate three objects that each consume
96	 * nearly half the virtual address space.
97	 */
98	hugemax = get_huge_size(get_nhuge()-1);
99	oom = false;
100	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
101		ptrs[i] = mallocx(hugemax, 0);
102		if (ptrs[i] == NULL)
103			oom = true;
104	}
105	assert_true(oom,
106	    "Expected OOM during series of calls to mallocx(size=%zu, 0)",
107	    hugemax);
108	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
109		if (ptrs[i] != NULL)
110			dallocx(ptrs[i], 0);
111	}
112	purge();
113
114#if LG_SIZEOF_PTR == 3
115	assert_ptr_null(mallocx(0x8000000000000000ULL,
116	    MALLOCX_ALIGN(0x8000000000000000ULL)),
117	    "Expected OOM for mallocx()");
118	assert_ptr_null(mallocx(0x8000000000000000ULL,
119	    MALLOCX_ALIGN(0x80000000)),
120	    "Expected OOM for mallocx()");
121#else
122	assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
123	    "Expected OOM for mallocx()");
124#endif
125}
126TEST_END
127
128TEST_BEGIN(test_basic)
129{
130#define	MAXSZ (((size_t)1) << 23)
131	size_t sz;
132
133	for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
134		size_t nsz, rsz;
135		void *p;
136		nsz = nallocx(sz, 0);
137		assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
138		p = mallocx(sz, 0);
139		assert_ptr_not_null(p,
140		    "Unexpected mallocx(size=%zx, flags=0) error", sz);
141		rsz = sallocx(p, 0);
142		assert_zu_ge(rsz, sz, "Real size smaller than expected");
143		assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
144		dallocx(p, 0);
145
146		p = mallocx(sz, 0);
147		assert_ptr_not_null(p,
148		    "Unexpected mallocx(size=%zx, flags=0) error", sz);
149		dallocx(p, 0);
150
151		nsz = nallocx(sz, MALLOCX_ZERO);
152		assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
153		p = mallocx(sz, MALLOCX_ZERO);
154		assert_ptr_not_null(p,
155		    "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
156		    nsz);
157		rsz = sallocx(p, 0);
158		assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
159		dallocx(p, 0);
160		purge();
161	}
162#undef MAXSZ
163}
164TEST_END
165
166TEST_BEGIN(test_alignment_and_size)
167{
168#define	MAXALIGN (((size_t)1) << 23)
169#define	NITER 4
170	size_t nsz, rsz, sz, alignment, total;
171	unsigned i;
172	void *ps[NITER];
173
174	for (i = 0; i < NITER; i++)
175		ps[i] = NULL;
176
177	for (alignment = 8;
178	    alignment <= MAXALIGN;
179	    alignment <<= 1) {
180		total = 0;
181		for (sz = 1;
182		    sz < 3 * alignment && sz < (1U << 31);
183		    sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
184			for (i = 0; i < NITER; i++) {
185				nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
186				    MALLOCX_ZERO);
187				assert_zu_ne(nsz, 0,
188				    "nallocx() error for alignment=%zu, "
189				    "size=%zu (%#zx)", alignment, sz, sz);
190				ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
191				    MALLOCX_ZERO);
192				assert_ptr_not_null(ps[i],
193				    "mallocx() error for alignment=%zu, "
194				    "size=%zu (%#zx)", alignment, sz, sz);
195				rsz = sallocx(ps[i], 0);
196				assert_zu_ge(rsz, sz,
197				    "Real size smaller than expected for "
198				    "alignment=%zu, size=%zu", alignment, sz);
199				assert_zu_eq(nsz, rsz,
200				    "nallocx()/sallocx() size mismatch for "
201				    "alignment=%zu, size=%zu", alignment, sz);
202				assert_ptr_null(
203				    (void *)((uintptr_t)ps[i] & (alignment-1)),
204				    "%p inadequately aligned for"
205				    " alignment=%zu, size=%zu", ps[i],
206				    alignment, sz);
207				total += rsz;
208				if (total >= (MAXALIGN << 1))
209					break;
210			}
211			for (i = 0; i < NITER; i++) {
212				if (ps[i] != NULL) {
213					dallocx(ps[i], 0);
214					ps[i] = NULL;
215				}
216			}
217		}
218		purge();
219	}
220#undef MAXALIGN
221#undef NITER
222}
223TEST_END
224
225int
226main(void)
227{
228
229	return (test(
230	    test_overflow,
231	    test_oom,
232	    test_basic,
233	    test_alignment_and_size));
234}
235