1/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
2/* vim:set softtabstop=8 shiftwidth=8: */
3/*-
4 * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice(s), this list of conditions and the following disclaimer as
12 *    the first lines of this file unmodified other than the possible
13 *    addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice(s), this list of conditions and the following disclaimer in
16 *    the documentation and/or other materials provided with the
17 *    distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
26 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
28 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef _JEMALLOC_H_
33#define _JEMALLOC_H_
34
35/* grab size_t */
36#ifdef _MSC_VER
37#include <crtdefs.h>
38#else
39#include <stddef.h>
40#endif
41
42#ifdef __cplusplus
43extern "C" {
44#endif
45
46typedef unsigned char jemalloc_bool;
47
48extern const char	*_malloc_options;
49
50/*
51 * jemalloc_stats() is not a stable interface.  When using jemalloc_stats_t, be
52 * sure that the compiled results of jemalloc.c are in sync with this header
53 * file.
54 */
55typedef struct {
56	/*
57	 * Run-time configuration settings.
58	 */
59	jemalloc_bool	opt_abort;	/* abort(3) on error? */
60	jemalloc_bool	opt_junk;	/* Fill allocated/free memory with 0xa5/0x5a? */
61	jemalloc_bool	opt_utrace;	/* Trace all allocation events? */
62	jemalloc_bool	opt_sysv;	/* SysV semantics? */
63	jemalloc_bool	opt_xmalloc;	/* abort(3) on OOM? */
64	jemalloc_bool	opt_zero;	/* Fill allocated memory with 0x0? */
65	size_t	narenas;	/* Number of arenas. */
66	size_t	balance_threshold; /* Arena contention rebalance threshold. */
67	size_t	quantum;	/* Allocation quantum. */
68	size_t	small_max;	/* Max quantum-spaced allocation size. */
69	size_t	large_max;	/* Max sub-chunksize allocation size. */
70	size_t	chunksize;	/* Size of each virtual memory mapping. */
71	size_t	dirty_max;	/* Max dirty pages per arena. */
72	size_t	reserve_min;	/* reserve_low callback threshold. */
73	size_t	reserve_max;	/* Maximum reserve size before unmapping. */
74
75	/*
76	 * Current memory usage statistics.
77	 */
78	size_t	mapped;		/* Bytes mapped (not necessarily committed). */
79	size_t	committed;	/* Bytes committed (readable/writable). */
80	size_t	allocated;	/* Bytes allocted (in use by application). */
81	size_t	dirty;		/* Bytes dirty (committed unused pages). */
82	size_t	reserve_cur;	/* Current memory reserve. */
83} jemalloc_stats_t;
84
85#ifndef MOZ_MEMORY_DARWIN
86void	*malloc(size_t size);
87void	*valloc(size_t size);
88void	*calloc(size_t num, size_t size);
89void	*realloc(void *ptr, size_t size);
90void	free(void *ptr);
91#endif
92
93int	posix_memalign(void **memptr, size_t alignment, size_t size);
94void	*memalign(size_t alignment, size_t size);
95size_t	malloc_usable_size(const void *ptr);
96void	jemalloc_stats(jemalloc_stats_t *stats);
97
98/* The x*() functions never return NULL. */
99void	*xmalloc(size_t size);
100void	*xcalloc(size_t num, size_t size);
101void	*xrealloc(void *ptr, size_t size);
102void	*xmemalign(size_t alignment, size_t size);
103
104/*
105 * The allocator maintains a memory reserve that is used to satisfy allocation
106 * requests when no additional memory can be acquired from the operating
107 * system.  Under normal operating conditions, the reserve size is at least
108 * reserve_min bytes.  If the reserve is depleted or insufficient to satisfy an
109 * allocation request, then condition notifications are sent to one or more of
110 * the registered callback functions:
111 *
112 *   RESERVE_CND_LOW: The reserve had to be used to satisfy an allocation
113 *                    request, which dropped the reserve size below the
114 *                    minimum.  The callee should try to free memory in order
115 *                    to restore the reserve.
116 *
117 *   RESERVE_CND_CRIT: The reserve was not large enough to satisfy a pending
118 *                     allocation request.  Some callee must free adequate
119 *                     memory in order to prevent application failure (unless
120 *                     the condition spontaneously desists due to concurrent
121 *                     deallocation).
122 *
123 *   RESERVE_CND_FAIL: An allocation request could not be satisfied, despite all
124 *                     attempts.  The allocator is about to terminate the
125 *                     application.
126 *
127 * The order in which the callback functions are called is only loosely
128 * specified: in the absence of interposing callback
129 * registrations/unregistrations, enabled callbacks will be called in an
130 * arbitrary round-robin order.
131 *
132 * Condition notifications are sent to callbacks only while conditions exist.
133 * For example, just before the allocator sends a RESERVE_CND_LOW condition
134 * notification to a callback, the reserve is in fact depleted.  However, due
135 * to allocator concurrency, the reserve may have been restored by the time the
136 * callback function executes.  Furthermore, if the reserve is restored at some
137 * point during the delivery of condition notifications to callbacks, no
138 * further deliveries will occur, since the condition no longer exists.
139 *
140 * Callback functions can freely call back into the allocator (i.e. the
141 * allocator releases all internal resources before calling each callback
142 * function), though allocation is discouraged, since recursive callbacks are
143 * likely to result, which places extra burden on the application to avoid
144 * deadlock.
145 *
146 * Callback functions must be thread-safe, since it is possible that multiple
147 * threads will call into the same callback function concurrently.
148 */
149
150/* Memory reserve condition types. */
151typedef enum {
152	RESERVE_CND_LOW,
153	RESERVE_CND_CRIT,
154	RESERVE_CND_FAIL
155} reserve_cnd_t;
156
157/*
158 * Reserve condition notification callback function type definition.
159 *
160 * Inputs:
161 *   ctx: Opaque application data, as passed to reserve_cb_register().
162 *   cnd: Condition type being delivered.
163 *   size: Allocation request size for the allocation that caused the condition.
164 */
165typedef void reserve_cb_t(void *ctx, reserve_cnd_t cnd, size_t size);
166
167/*
168 * Register a callback function.
169 *
170 * Inputs:
171 *   cb: Callback function pointer.
172 *   ctx: Opaque application data, passed to cb().
173 *
174 * Output:
175 *   ret: If true, failure due to OOM; success otherwise.
176 */
177jemalloc_bool	reserve_cb_register(reserve_cb_t *cb, void *ctx);
178
179/*
180 * Unregister a callback function.
181 *
182 * Inputs:
183 *   cb: Callback function pointer.
184 *   ctx: Opaque application data, same as that passed to reserve_cb_register().
185 *
186 * Output:
187 *   ret: False upon success, true if the {cb,ctx} registration could not be
188 *        found.
189 */
190jemalloc_bool	reserve_cb_unregister(reserve_cb_t *cb, void *ctx);
191
192/*
193 * Get the current reserve size.
194 *
195 * ret: Current reserve size.
196 */
197size_t	reserve_cur_get(void);
198
199/*
200 * Get the minimum acceptable reserve size.  If the reserve drops below this
201 * value, the RESERVE_CND_LOW condition notification is sent to the callbacks.
202 *
203 * ret: Minimum acceptable reserve size.
204 */
205size_t	reserve_min_get(void);
206
207/*
208 * Set the minimum acceptable reserve size.
209 *
210 * min: Reserve threshold.  This value may be internally rounded up.
211 * ret: False if the reserve was successfully resized; true otherwise.  Note
212 *      that failure to resize the reserve also results in a RESERVE_CND_LOW
213 *      condition.
214 */
215jemalloc_bool	reserve_min_set(size_t min);
216
217#ifdef __cplusplus
218} /* extern "C" */
219#endif
220
221#endif /* _JEMALLOC_H_ */
222