1/*
2     This file is part of libmicrohttpd
3     Copyright (C) 2007, 2009, 2010 Daniel Pittman and Christian Grothoff
4
5     This library is free software; you can redistribute it and/or
6     modify it under the terms of the GNU Lesser General Public
7     License as published by the Free Software Foundation; either
8     version 2.1 of the License, or (at your option) any later version.
9
10     This library is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13     Lesser General Public License for more details.
14
15     You should have received a copy of the GNU Lesser General Public
16     License along with this library; if not, write to the Free Software
17     Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18*/
19
20/**
21 * @file memorypool.c
22 * @brief memory pool
23 * @author Christian Grothoff
24 */
25#include "memorypool.h"
26
27/* define MAP_ANONYMOUS for Mac OS X */
28#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
29#define MAP_ANONYMOUS MAP_ANON
30#endif
31#ifndef MAP_FAILED
32#define MAP_FAILED ((void*)-1)
33#endif
34
35/**
36 * Align to 2x word size (as GNU libc does).
37 */
38#define ALIGN_SIZE (2 * sizeof(void*))
39
40/**
41 * Round up 'n' to a multiple of ALIGN_SIZE.
42 */
43#define ROUND_TO_ALIGN(n) ((n+(ALIGN_SIZE-1)) & (~(ALIGN_SIZE-1)))
44
45
46/**
47 * Handle for a memory pool.  Pools are not reentrant and must not be
48 * used by multiple threads.
49 */
50struct MemoryPool
51{
52
53  /**
54   * Pointer to the pool's memory
55   */
56  char *memory;
57
58  /**
59   * Size of the pool.
60   */
61  size_t size;
62
63  /**
64   * Offset of the first unallocated byte.
65   */
66  size_t pos;
67
68  /**
69   * Offset of the last unallocated byte.
70   */
71  size_t end;
72
73  /**
74   * #MHD_NO if pool was malloc'ed, #MHD_YES if mmapped (VirtualAlloc'ed for W32).
75   */
76  int is_mmap;
77};
78
79
80/**
81 * Create a memory pool.
82 *
83 * @param max maximum size of the pool
84 * @return NULL on error
85 */
86struct MemoryPool *
87MHD_pool_create (size_t max)
88{
89  struct MemoryPool *pool;
90
91  pool = malloc (sizeof (struct MemoryPool));
92  if (NULL == pool)
93    return NULL;
94#if defined(MAP_ANONYMOUS) || defined(_WIN32)
95  if (max <= 32 * 1024)
96    pool->memory = MAP_FAILED;
97  else
98#if defined(MAP_ANONYMOUS) && !defined(_WIN32)
99    pool->memory = mmap (NULL, max, PROT_READ | PROT_WRITE,
100			 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
101#elif defined(_WIN32)
102    pool->memory = VirtualAlloc(NULL, max, MEM_COMMIT | MEM_RESERVE,
103        PAGE_READWRITE);
104#endif
105#else
106  pool->memory = MAP_FAILED;
107#endif
108  if ((pool->memory == MAP_FAILED) || (pool->memory == NULL))
109    {
110      pool->memory = malloc (max);
111      if (pool->memory == NULL)
112        {
113          free (pool);
114          return NULL;
115        }
116      pool->is_mmap = MHD_NO;
117    }
118  else
119    {
120      pool->is_mmap = MHD_YES;
121    }
122  pool->pos = 0;
123  pool->end = max;
124  pool->size = max;
125  return pool;
126}
127
128
129/**
130 * Destroy a memory pool.
131 *
132 * @param pool memory pool to destroy
133 */
134void
135MHD_pool_destroy (struct MemoryPool *pool)
136{
137  if (pool == NULL)
138    return;
139  if (pool->is_mmap == MHD_NO)
140    free (pool->memory);
141  else
142#if defined(MAP_ANONYMOUS) && !defined(_WIN32)
143    munmap (pool->memory, pool->size);
144#elif defined(_WIN32)
145    VirtualFree(pool->memory, 0, MEM_RELEASE);
146#else
147    abort();
148#endif
149  free (pool);
150}
151
152
153/**
154 * Allocate size bytes from the pool.
155 *
156 * @param pool memory pool to use for the operation
157 * @param size number of bytes to allocate
158 * @param from_end allocate from end of pool (set to #MHD_YES);
159 *        use this for small, persistent allocations that
160 *        will never be reallocated
161 * @return NULL if the pool cannot support size more
162 *         bytes
163 */
164void *
165MHD_pool_allocate (struct MemoryPool *pool,
166		   size_t size, int from_end)
167{
168  void *ret;
169  size_t asize;
170
171  asize = ROUND_TO_ALIGN (size);
172  if ( (0 == asize) && (0 != size) )
173    return NULL; /* size too close to SIZE_MAX */
174  if ((pool->pos + asize > pool->end) || (pool->pos + asize < pool->pos))
175    return NULL;
176  if (from_end == MHD_YES)
177    {
178      ret = &pool->memory[pool->end - asize];
179      pool->end -= asize;
180    }
181  else
182    {
183      ret = &pool->memory[pool->pos];
184      pool->pos += asize;
185    }
186  return ret;
187}
188
189
190/**
191 * Reallocate a block of memory obtained from the pool.
192 * This is particularly efficient when growing or
193 * shrinking the block that was last (re)allocated.
194 * If the given block is not the most recenlty
195 * (re)allocated block, the memory of the previous
196 * allocation may be leaked until the pool is
197 * destroyed (and copying the data maybe required).
198 *
199 * @param pool memory pool to use for the operation
200 * @param old the existing block
201 * @param old_size the size of the existing block
202 * @param new_size the new size of the block
203 * @return new address of the block, or
204 *         NULL if the pool cannot support @a new_size
205 *         bytes (old continues to be valid for @a old_size)
206 */
207void *
208MHD_pool_reallocate (struct MemoryPool *pool,
209                     void *old,
210		     size_t old_size,
211		     size_t new_size)
212{
213  void *ret;
214  size_t asize;
215
216  asize = ROUND_TO_ALIGN (new_size);
217  if ( (0 == asize) && (0 != new_size) )
218    return NULL; /* new_size too close to SIZE_MAX */
219  if ((pool->end < old_size) || (pool->end < asize))
220    return NULL;                /* unsatisfiable or bogus request */
221
222  if ((pool->pos >= old_size) && (&pool->memory[pool->pos - old_size] == old))
223    {
224      /* was the previous allocation - optimize! */
225      if (pool->pos + asize - old_size <= pool->end)
226        {
227          /* fits */
228          pool->pos += asize - old_size;
229          if (asize < old_size)      /* shrinking - zero again! */
230            memset (&pool->memory[pool->pos], 0, old_size - asize);
231          return old;
232        }
233      /* does not fit */
234      return NULL;
235    }
236  if (asize <= old_size)
237    return old;                 /* cannot shrink, no need to move */
238  if ((pool->pos + asize >= pool->pos) &&
239      (pool->pos + asize <= pool->end))
240    {
241      /* fits */
242      ret = &pool->memory[pool->pos];
243      memcpy (ret, old, old_size);
244      pool->pos += asize;
245      return ret;
246    }
247  /* does not fit */
248  return NULL;
249}
250
251
252/**
253 * Clear all entries from the memory pool except
254 * for @a keep of the given @a size.
255 *
256 * @param pool memory pool to use for the operation
257 * @param keep pointer to the entry to keep (maybe NULL)
258 * @param size how many bytes need to be kept at this address
259 * @return addr new address of @a keep (if it had to change)
260 */
261void *
262MHD_pool_reset (struct MemoryPool *pool,
263		void *keep,
264		size_t size)
265{
266  if (NULL != keep)
267    {
268      if (keep != pool->memory)
269        {
270          memmove (pool->memory, keep, size);
271          keep = pool->memory;
272        }
273    }
274  pool->end = pool->size;
275  memset (&pool->memory[size],
276	  0,
277	  pool->size - size);
278  if (NULL != keep)
279    pool->pos = ROUND_TO_ALIGN(size);
280  return keep;
281}
282
283
284/* end of memorypool.c */
285