1/*
2  Default header file for malloc-2.8.x, written by Doug Lea
3  and released to the public domain, as explained at
4  http://creativecommons.org/licenses/publicdomain.
5
6  last update: Wed May 27 14:25:17 2009  Doug Lea  (dl at gee)
7
8  This header is for ANSI C/C++ only.  You can set any of
9  the following #defines before including:
10
11  * If USE_DL_PREFIX is defined, it is assumed that malloc.c
12    was also compiled with this option, so all routines
13    have names starting with "dl".
14
15  * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
16    file will be #included AFTER <malloc.h>. This is needed only if
17    your system defines a struct mallinfo that is incompatible with the
18    standard one declared here.  Otherwise, you can include this file
19    INSTEAD of your system system <malloc.h>.  At least on ANSI, all
20    declarations should be compatible with system versions
21
22  * If MSPACES is defined, declarations for mspace versions are included.
23*/
24
25#ifndef MALLOC_280_H
26#define MALLOC_280_H
27
28#define USE_DL_PREFIX
29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
34#include <stddef.h>   /* for size_t */
35
36#ifndef ONLY_MSPACES
37#define ONLY_MSPACES 0     /* define to a value */
38#endif  /* ONLY_MSPACES */
39#ifndef NO_MALLINFO
40#define NO_MALLINFO 0
41#endif  /* NO_MALLINFO */
42
43
44#if !ONLY_MSPACES
45
46#ifndef USE_DL_PREFIX
47#define dlcalloc               calloc
48#define dlfree                 free
49#define dlmalloc               malloc
50#define dlmemalign             memalign
51#define dlrealloc              realloc
52#define dlvalloc               valloc
53#define dlpvalloc              pvalloc
54#define dlmallinfo             mallinfo
55#define dlmallopt              mallopt
56#define dlmalloc_trim          malloc_trim
57#define dlmalloc_stats         malloc_stats
58#define dlmalloc_usable_size   malloc_usable_size
59#define dlmalloc_footprint     malloc_footprint
60#define dlindependent_calloc   independent_calloc
61#define dlindependent_comalloc independent_comalloc
62#endif /* USE_DL_PREFIX */
63#if !NO_MALLINFO
64#ifndef HAVE_USR_INCLUDE_MALLOC_H
65#ifndef _MALLOC_H
66#ifndef MALLINFO_FIELD_TYPE
67#define MALLINFO_FIELD_TYPE size_t
68#endif /* MALLINFO_FIELD_TYPE */
69#ifndef STRUCT_MALLINFO_DECLARED
70#define STRUCT_MALLINFO_DECLARED 1
71struct mallinfo {
72  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
73  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
74  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
75  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
76  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
77  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
78  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
79  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
80  MALLINFO_FIELD_TYPE fordblks; /* total free space */
81  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
82};
83#endif /* STRUCT_MALLINFO_DECLARED */
84#endif  /* _MALLOC_H */
85#endif  /* HAVE_USR_INCLUDE_MALLOC_H */
86#endif  /* !NO_MALLINFO */
87
88/*
89  malloc(size_t n)
90  Returns a pointer to a newly allocated chunk of at least n bytes, or
91  null if no space is available, in which case errno is set to ENOMEM
92  on ANSI C systems.
93
94  If n is zero, malloc returns a minimum-sized chunk. (The minimum
95  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
96  systems.)  Note that size_t is an unsigned type, so calls with
97  arguments that would be negative if signed are interpreted as
98  requests for huge amounts of space, which will often fail. The
99  maximum supported value of n differs across systems, but is in all
100  cases less than the maximum representable value of a size_t.
101*/
102void* dlmalloc(size_t);
103
104/*
105  free(void* p)
106  Releases the chunk of memory pointed to by p, that had been previously
107  allocated using malloc or a related routine such as realloc.
108  It has no effect if p is null. If p was not malloced or already
109  freed, free(p) will by default cuase the current program to abort.
110*/
111void  dlfree(void*);
112
113/*
114  calloc(size_t n_elements, size_t element_size);
115  Returns a pointer to n_elements * element_size bytes, with all locations
116  set to zero.
117*/
118void* dlcalloc(size_t, size_t);
119
120/*
121  realloc(void* p, size_t n)
122  Returns a pointer to a chunk of size n that contains the same data
123  as does chunk p up to the minimum of (n, p's size) bytes, or null
124  if no space is available.
125
126  The returned pointer may or may not be the same as p. The algorithm
127  prefers extending p in most cases when possible, otherwise it
128  employs the equivalent of a malloc-copy-free sequence.
129
130  If p is null, realloc is equivalent to malloc.
131
132  If space is not available, realloc returns null, errno is set (if on
133  ANSI) and p is NOT freed.
134
135  if n is for fewer bytes than already held by p, the newly unused
136  space is lopped off and freed if possible.  realloc with a size
137  argument of zero (re)allocates a minimum-sized chunk.
138
139  The old unix realloc convention of allowing the last-free'd chunk
140  to be used as an argument to realloc is not supported.
141*/
142
143void* dlrealloc(void*, size_t);
144
145/*
146  memalign(size_t alignment, size_t n);
147  Returns a pointer to a newly allocated chunk of n bytes, aligned
148  in accord with the alignment argument.
149
150  The alignment argument should be a power of two. If the argument is
151  not a power of two, the nearest greater power is used.
152  8-byte alignment is guaranteed by normal malloc calls, so don't
153  bother calling memalign with an argument of 8 or less.
154
155  Overreliance on memalign is a sure way to fragment space.
156*/
157void* dlmemalign(size_t, size_t);
158
159/*
160  valloc(size_t n);
161  Equivalent to memalign(pagesize, n), where pagesize is the page
162  size of the system. If the pagesize is unknown, 4096 is used.
163*/
164void* dlvalloc(size_t);
165
166/*
167  mallopt(int parameter_number, int parameter_value)
168  Sets tunable parameters The format is to provide a
169  (parameter-number, parameter-value) pair.  mallopt then sets the
170  corresponding parameter to the argument value if it can (i.e., so
171  long as the value is meaningful), and returns 1 if successful else
172  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
173  normally defined in malloc.h.  None of these are use in this malloc,
174  so setting them has no effect. But this malloc also supports other
175  options in mallopt:
176
177  Symbol            param #  default    allowed param values
178  M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1U disables trimming)
179  M_GRANULARITY        -2     page size   any power of 2 >= page size
180  M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
181*/
182int dlmallopt(int, int);
183
184#define M_TRIM_THRESHOLD     (-1)
185#define M_GRANULARITY        (-2)
186#define M_MMAP_THRESHOLD     (-3)
187
188
189/*
190  malloc_footprint();
191  Returns the number of bytes obtained from the system.  The total
192  number of bytes allocated by malloc, realloc etc., is less than this
193  value. Unlike mallinfo, this function returns only a precomputed
194  result, so can be called frequently to monitor memory consumption.
195  Even if locks are otherwise defined, this function does not use them,
196  so results might not be up to date.
197*/
198size_t dlmalloc_footprint();
199
200#if !NO_MALLINFO
201/*
202  mallinfo()
203  Returns (by copy) a struct containing various summary statistics:
204
205  arena:     current total non-mmapped bytes allocated from system
206  ordblks:   the number of free chunks
207  smblks:    always zero.
208  hblks:     current number of mmapped regions
209  hblkhd:    total bytes held in mmapped regions
210  usmblks:   the maximum total allocated space. This will be greater
211                than current total if trimming has occurred.
212  fsmblks:   always zero
213  uordblks:  current total allocated space (normal or mmapped)
214  fordblks:  total free space
215  keepcost:  the maximum number of bytes that could ideally be released
216               back to system via malloc_trim. ("ideally" means that
217               it ignores page restrictions etc.)
218
219  Because these fields are ints, but internal bookkeeping may
220  be kept as longs, the reported values may wrap around zero and
221  thus be inaccurate.
222*/
223
224struct mallinfo dlmallinfo(void);
225#endif  /* NO_MALLINFO */
226
227/*
228  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
229
230  independent_calloc is similar to calloc, but instead of returning a
231  single cleared space, it returns an array of pointers to n_elements
232  independent elements that can hold contents of size elem_size, each
233  of which starts out cleared, and can be independently freed,
234  realloc'ed etc. The elements are guaranteed to be adjacently
235  allocated (this is not guaranteed to occur with multiple callocs or
236  mallocs), which may also improve cache locality in some
237  applications.
238
239  The "chunks" argument is optional (i.e., may be null, which is
240  probably the most typical usage). If it is null, the returned array
241  is itself dynamically allocated and should also be freed when it is
242  no longer needed. Otherwise, the chunks array must be of at least
243  n_elements in length. It is filled in with the pointers to the
244  chunks.
245
246  In either case, independent_calloc returns this pointer array, or
247  null if the allocation failed.  If n_elements is zero and "chunks"
248  is null, it returns a chunk representing an array with zero elements
249  (which should be freed if not wanted).
250
251  Each element must be individually freed when it is no longer
252  needed. If you'd like to instead be able to free all at once, you
253  should instead use regular calloc and assign pointers into this
254  space to represent elements.  (In this case though, you cannot
255  independently free elements.)
256
257  independent_calloc simplifies and speeds up implementations of many
258  kinds of pools.  It may also be useful when constructing large data
259  structures that initially have a fixed number of fixed-sized nodes,
260  but the number is not known at compile time, and some of the nodes
261  may later need to be freed. For example:
262
263  struct Node { int item; struct Node* next; };
264
265  struct Node* build_list() {
266    struct Node** pool;
267    int n = read_number_of_nodes_needed();
268    if (n <= 0) return 0;
269    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
270    if (pool == 0) die();
271    // organize into a linked list...
272    struct Node* first = pool[0];
273    for (i = 0; i < n-1; ++i)
274      pool[i]->next = pool[i+1];
275    free(pool);     // Can now free the array (or not, if it is needed later)
276    return first;
277  }
278*/
279void** dlindependent_calloc(size_t, size_t, void**);
280
281/*
282  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
283
284  independent_comalloc allocates, all at once, a set of n_elements
285  chunks with sizes indicated in the "sizes" array.    It returns
286  an array of pointers to these elements, each of which can be
287  independently freed, realloc'ed etc. The elements are guaranteed to
288  be adjacently allocated (this is not guaranteed to occur with
289  multiple callocs or mallocs), which may also improve cache locality
290  in some applications.
291
292  The "chunks" argument is optional (i.e., may be null). If it is null
293  the returned array is itself dynamically allocated and should also
294  be freed when it is no longer needed. Otherwise, the chunks array
295  must be of at least n_elements in length. It is filled in with the
296  pointers to the chunks.
297
298  In either case, independent_comalloc returns this pointer array, or
299  null if the allocation failed.  If n_elements is zero and chunks is
300  null, it returns a chunk representing an array with zero elements
301  (which should be freed if not wanted).
302
303  Each element must be individually freed when it is no longer
304  needed. If you'd like to instead be able to free all at once, you
305  should instead use a single regular malloc, and assign pointers at
306  particular offsets in the aggregate space. (In this case though, you
307  cannot independently free elements.)
308
309  independent_comallac differs from independent_calloc in that each
310  element may have a different size, and also that it does not
311  automatically clear elements.
312
313  independent_comalloc can be used to speed up allocation in cases
314  where several structs or objects must always be allocated at the
315  same time.  For example:
316
317  struct Head { ... }
318  struct Foot { ... }
319
320  void send_message(char* msg) {
321    int msglen = strlen(msg);
322    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
323    void* chunks[3];
324    if (independent_comalloc(3, sizes, chunks) == 0)
325      die();
326    struct Head* head = (struct Head*)(chunks[0]);
327    char*        body = (char*)(chunks[1]);
328    struct Foot* foot = (struct Foot*)(chunks[2]);
329    // ...
330  }
331
332  In general though, independent_comalloc is worth using only for
333  larger values of n_elements. For small values, you probably won't
334  detect enough difference from series of malloc calls to bother.
335
336  Overuse of independent_comalloc can increase overall memory usage,
337  since it cannot reuse existing noncontiguous small chunks that
338  might be available for some of the elements.
339*/
340void** dlindependent_comalloc(size_t, size_t*, void**);
341
342
343/*
344  pvalloc(size_t n);
345  Equivalent to valloc(minimum-page-that-holds(n)), that is,
346  round up n to nearest pagesize.
347 */
348void*  dlpvalloc(size_t);
349
350/*
351  malloc_trim(size_t pad);
352
353  If possible, gives memory back to the system (via negative arguments
354  to sbrk) if there is unused memory at the `high' end of the malloc
355  pool or in unused MMAP segments. You can call this after freeing
356  large blocks of memory to potentially reduce the system-level memory
357  requirements of a program. However, it cannot guarantee to reduce
358  memory. Under some allocation patterns, some large free blocks of
359  memory will be locked between two used chunks, so they cannot be
360  given back to the system.
361
362  The `pad' argument to malloc_trim represents the amount of free
363  trailing space to leave untrimmed. If this argument is zero, only
364  the minimum amount of memory to maintain internal data structures
365  will be left. Non-zero arguments can be supplied to maintain enough
366  trailing space to service future expected allocations without having
367  to re-obtain memory from the system.
368
369  Malloc_trim returns 1 if it actually released any memory, else 0.
370*/
371int  dlmalloc_trim(size_t);
372
373/*
374  malloc_stats();
375  Prints on stderr the amount of space obtained from the system (both
376  via sbrk and mmap), the maximum amount (which may be more than
377  current if malloc_trim and/or munmap got called), and the current
378  number of bytes allocated via malloc (or realloc, etc) but not yet
379  freed. Note that this is the number of bytes allocated, not the
380  number requested. It will be larger than the number requested
381  because of alignment and bookkeeping overhead. Because it includes
382  alignment wastage as being in use, this figure may be greater than
383  zero even when no user-level chunks are allocated.
384
385  The reported current and maximum system memory can be inaccurate if
386  a program makes other calls to system memory allocation functions
387  (normally sbrk) outside of malloc.
388
389  malloc_stats prints only the most commonly interesting statistics.
390  More information can be obtained by calling mallinfo.
391*/
392void  dlmalloc_stats();
393
394#endif /* !ONLY_MSPACES */
395
396/*
397  malloc_usable_size(void* p);
398
399  Returns the number of bytes you can actually use in
400  an allocated chunk, which may be more than you requested (although
401  often not) due to alignment and minimum size constraints.
402  You can use this many bytes without worrying about
403  overwriting other allocated objects. This is not a particularly great
404  programming practice. malloc_usable_size can be more useful in
405  debugging and assertions, for example:
406
407  p = malloc(n);
408  assert(malloc_usable_size(p) >= 256);
409*/
410size_t dlmalloc_usable_size(void*);
411
412
413#if MSPACES
414
415/*
416  mspace is an opaque type representing an independent
417  region of space that supports mspace_malloc, etc.
418*/
419typedef void* mspace;
420
421/*
422  create_mspace creates and returns a new independent space with the
423  given initial capacity, or, if 0, the default granularity size.  It
424  returns null if there is no system memory available to create the
425  space.  If argument locked is non-zero, the space uses a separate
426  lock to control access. The capacity of the space will grow
427  dynamically as needed to service mspace_malloc requests.  You can
428  control the sizes of incremental increases of this space by
429  compiling with a different DEFAULT_GRANULARITY or dynamically
430  setting with mallopt(M_GRANULARITY, value).
431*/
432mspace create_mspace(size_t capacity, int locked);
433
434/*
435  destroy_mspace destroys the given space, and attempts to return all
436  of its memory back to the system, returning the total number of
437  bytes freed. After destruction, the results of access to all memory
438  used by the space become undefined.
439*/
440size_t destroy_mspace(mspace msp);
441
442/*
443  create_mspace_with_base uses the memory supplied as the initial base
444  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
445  space is used for bookkeeping, so the capacity must be at least this
446  large. (Otherwise 0 is returned.) When this initial space is
447  exhausted, additional memory will be obtained from the system.
448  Destroying this space will deallocate all additionally allocated
449  space (if possible) but not the initial base.
450*/
451mspace create_mspace_with_base(void* base, size_t capacity, int locked);
452
453/*
454  mspace_track_large_chunks controls whether requests for large chunks
455  are allocated in their own untracked mmapped regions, separate from
456  others in this mspace. By default large chunks are not tracked,
457  which reduces fragmentation. However, such chunks are not
458  necessarily released to the system upon destroy_mspace.  Enabling
459  tracking by setting to true may increase fragmentation, but avoids
460  leakage when relying on destroy_mspace to release all memory
461  allocated using this space.  The function returns the previous
462  setting.
463*/
464int mspace_track_large_chunks(mspace msp, int enable);
465
466/*
467  mspace_malloc behaves as malloc, but operates within
468  the given space.
469*/
470void* mspace_malloc(mspace msp, size_t bytes);
471
472/*
473  mspace_free behaves as free, but operates within
474  the given space.
475
476  If compiled with FOOTERS==1, mspace_free is not actually needed.
477  free may be called instead of mspace_free because freed chunks from
478  any space are handled by their originating spaces.
479*/
480void mspace_free(mspace msp, void* mem);
481
482/*
483  mspace_realloc behaves as realloc, but operates within
484  the given space.
485
486  If compiled with FOOTERS==1, mspace_realloc is not actually
487  needed.  realloc may be called instead of mspace_realloc because
488  realloced chunks from any space are handled by their originating
489  spaces.
490*/
491void* mspace_realloc(mspace msp, void* mem, size_t newsize);
492
493/*
494  mspace_calloc behaves as calloc, but operates within
495  the given space.
496*/
497void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
498
499/*
500  mspace_memalign behaves as memalign, but operates within
501  the given space.
502*/
503void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
504
505/*
506  mspace_independent_calloc behaves as independent_calloc, but
507  operates within the given space.
508*/
509void** mspace_independent_calloc(mspace msp, size_t n_elements,
510                                 size_t elem_size, void* chunks[]);
511
512/*
513  mspace_independent_comalloc behaves as independent_comalloc, but
514  operates within the given space.
515*/
516void** mspace_independent_comalloc(mspace msp, size_t n_elements,
517                                   size_t sizes[], void* chunks[]);
518
519/*
520  mspace_footprint() returns the number of bytes obtained from the
521  system for this space.
522*/
523size_t mspace_footprint(mspace msp);
524
525
526#if !NO_MALLINFO
527/*
528  mspace_mallinfo behaves as mallinfo, but reports properties of
529  the given space.
530*/
531struct mallinfo mspace_mallinfo(mspace msp);
532#endif /* NO_MALLINFO */
533
534/*
535  malloc_usable_size(void* p) behaves the same as malloc_usable_size;
536*/
537 size_t mspace_usable_size(void* mem);
538
539/*
540  mspace_malloc_stats behaves as malloc_stats, but reports
541  properties of the given space.
542*/
543void mspace_malloc_stats(mspace msp);
544
545/*
546  mspace_trim behaves as malloc_trim, but
547  operates within the given space.
548*/
549int mspace_trim(mspace msp, size_t pad);
550
551/*
552  An alias for mallopt.
553*/
554int mspace_mallopt(int, int);
555
556#endif  /* MSPACES */
557
558#ifdef __cplusplus
559};  /* end of extern "C" */
560#endif
561
562#endif /* MALLOC_280_H */
563