1//
2// Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5//
6
7#ifndef _POOLALLOC_INCLUDED_
8#define _POOLALLOC_INCLUDED_
9
10#ifdef _DEBUG
11#define GUARD_BLOCKS  // define to enable guard block sanity checking
12#endif
13
14//
15// This header defines an allocator that can be used to efficiently
16// allocate a large number of small requests for heap memory, with the
17// intention that they are not individually deallocated, but rather
18// collectively deallocated at one time.
19//
20// This simultaneously
21//
22// * Makes each individual allocation much more efficient; the
23//     typical allocation is trivial.
24// * Completely avoids the cost of doing individual deallocation.
25// * Saves the trouble of tracking down and plugging a large class of leaks.
26//
27// Individual classes can use this allocator by supplying their own
28// new and delete methods.
29//
30// STL containers can use this allocator by using the pool_allocator
31// class as the allocator (second) template argument.
32//
33
34#include <stddef.h>
35#include <string.h>
36#include <vector>
37
38// If we are using guard blocks, we must track each indivual
39// allocation.  If we aren't using guard blocks, these
40// never get instantiated, so won't have any impact.
41//
42
43class TAllocation {
44public:
45    TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
46        size(size), mem(mem), prevAlloc(prev) {
47        // Allocations are bracketed:
48        //    [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
49        // This would be cleaner with if (guardBlockSize)..., but that
50        // makes the compiler print warnings about 0 length memsets,
51        // even with the if() protecting them.
52#ifdef GUARD_BLOCKS
53        memset(preGuard(), guardBlockBeginVal, guardBlockSize);
54        memset(data(),      userDataFill,       size);
55        memset(postGuard(), guardBlockEndVal,   guardBlockSize);
56#endif
57    }
58
59    void check() const {
60        checkGuardBlock(preGuard(),  guardBlockBeginVal, "before");
61        checkGuardBlock(postGuard(), guardBlockEndVal,   "after");
62    }
63
64    void checkAllocList() const;
65
66    // Return total size needed to accomodate user buffer of 'size',
67    // plus our tracking data.
68    inline static size_t allocationSize(size_t size) {
69        return size + 2 * guardBlockSize + headerSize();
70    }
71
72    // Offset from surrounding buffer to get to user data buffer.
73    inline static unsigned char* offsetAllocation(unsigned char* m) {
74        return m + guardBlockSize + headerSize();
75    }
76
77private:
78    void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
79
80    // Find offsets to pre and post guard blocks, and user data buffer
81    unsigned char* preGuard()  const { return mem + headerSize(); }
82    unsigned char* data()      const { return preGuard() + guardBlockSize; }
83    unsigned char* postGuard() const { return data() + size; }
84
85    size_t size;                  // size of the user data area
86    unsigned char* mem;           // beginning of our allocation (pts to header)
87    TAllocation* prevAlloc;       // prior allocation in the chain
88
89    // Support MSVC++ 6.0
90    const static unsigned char guardBlockBeginVal;
91    const static unsigned char guardBlockEndVal;
92    const static unsigned char userDataFill;
93
94    const static size_t guardBlockSize;
95#ifdef GUARD_BLOCKS
96    inline static size_t headerSize() { return sizeof(TAllocation); }
97#else
98    inline static size_t headerSize() { return 0; }
99#endif
100};
101
102//
103// There are several stacks.  One is to track the pushing and popping
104// of the user, and not yet implemented.  The others are simply a
105// repositories of free pages or used pages.
106//
107// Page stacks are linked together with a simple header at the beginning
108// of each allocation obtained from the underlying OS.  Multi-page allocations
109// are returned to the OS.  Individual page allocations are kept for future
110// re-use.
111//
112// The "page size" used is not, nor must it match, the underlying OS
113// page size.  But, having it be about that size or equal to a set of
114// pages is likely most optimal.
115//
116class TPoolAllocator {
117public:
118    TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
119
120    //
121    // Don't call the destructor just to free up the memory, call pop()
122    //
123    ~TPoolAllocator();
124
125    //
126    // Call push() to establish a new place to pop memory too.  Does not
127    // have to be called to get things started.
128    //
129    void push();
130
131    //
132    // Call pop() to free all memory allocated since the last call to push(),
133    // or if no last call to push, frees all memory since first allocation.
134    //
135    void pop();
136
137    //
138    // Call popAll() to free all memory allocated.
139    //
140    void popAll();
141
142    //
143    // Call allocate() to actually acquire memory.  Returns 0 if no memory
144    // available, otherwise a properly aligned pointer to 'numBytes' of memory.
145    //
146    void* allocate(size_t numBytes);
147
148    //
149    // There is no deallocate.  The point of this class is that
150    // deallocation can be skipped by the user of it, as the model
151    // of use is to simultaneously deallocate everything at once
152    // by calling pop(), and to not have to solve memory leak problems.
153    //
154
155protected:
156    friend struct tHeader;
157
158    struct tHeader {
159        tHeader(tHeader* nextPage, size_t pageCount) :
160            nextPage(nextPage),
161            pageCount(pageCount)
162#ifdef GUARD_BLOCKS
163          , lastAllocation(0)
164#endif
165            { }
166
167        ~tHeader() {
168#ifdef GUARD_BLOCKS
169            if (lastAllocation)
170                lastAllocation->checkAllocList();
171#endif
172        }
173
174        tHeader* nextPage;
175        size_t pageCount;
176#ifdef GUARD_BLOCKS
177        TAllocation* lastAllocation;
178#endif
179    };
180
181    struct tAllocState {
182        size_t offset;
183        tHeader* page;
184    };
185    typedef std::vector<tAllocState> tAllocStack;
186
187    // Track allocations if and only if we're using guard blocks
188    void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
189#ifdef GUARD_BLOCKS
190        new(memory) TAllocation(numBytes, memory, block->lastAllocation);
191        block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
192#endif
193        // This is optimized entirely away if GUARD_BLOCKS is not defined.
194        return TAllocation::offsetAllocation(memory);
195    }
196
197    size_t pageSize;        // granularity of allocation from the OS
198    size_t alignment;       // all returned allocations will be aligned at
199                            // this granularity, which will be a power of 2
200    size_t alignmentMask;
201    size_t headerSkip;      // amount of memory to skip to make room for the
202                            //      header (basically, size of header, rounded
203                            //      up to make it aligned
204    size_t currentPageOffset;  // next offset in top of inUseList to allocate from
205    tHeader* freeList;      // list of popped memory
206    tHeader* inUseList;     // list of all memory currently being used
207    tAllocStack stack;      // stack of where to allocate from, to partition pool
208
209    int numCalls;           // just an interesting statistic
210    size_t totalBytes;      // just an interesting statistic
211private:
212    TPoolAllocator& operator=(const TPoolAllocator&);  // dont allow assignment operator
213    TPoolAllocator(const TPoolAllocator&);  // dont allow default copy constructor
214};
215
216
217//
218// There could potentially be many pools with pops happening at
219// different times.  But a simple use is to have a global pop
220// with everyone using the same global allocator.
221//
222extern TPoolAllocator* GetGlobalPoolAllocator();
223extern void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator);
224
225//
226// This STL compatible allocator is intended to be used as the allocator
227// parameter to templatized STL containers, like vector and map.
228//
229// It will use the pools for allocation, and not
230// do any deallocation, but will still do destruction.
231//
232template<class T>
233class pool_allocator {
234public:
235    typedef size_t size_type;
236    typedef ptrdiff_t difference_type;
237    typedef T* pointer;
238    typedef const T* const_pointer;
239    typedef T& reference;
240    typedef const T& const_reference;
241    typedef T value_type;
242
243    template<class Other>
244    struct rebind {
245        typedef pool_allocator<Other> other;
246    };
247    pointer address(reference x) const { return &x; }
248    const_pointer address(const_reference x) const { return &x; }
249
250    pool_allocator() : allocator(GetGlobalPoolAllocator()) { }
251    pool_allocator(TPoolAllocator& a) : allocator(&a) { }
252    pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
253
254    template <class Other>
255    pool_allocator<T>& operator=(const pool_allocator<Other>& p) {
256      allocator = p.allocator;
257      return *this;
258    }
259
260    template<class Other>
261    pool_allocator(const pool_allocator<Other>& p) : allocator(&p.getAllocator()) { }
262
263#if defined(__SUNPRO_CC) && !defined(_RWSTD_ALLOCATOR)
264    // libCStd on some platforms have a different allocate/deallocate interface.
265    // Caller pre-bakes sizeof(T) into 'n' which is the number of bytes to be
266    // allocated, not the number of elements.
267    void* allocate(size_type n) {
268        return getAllocator().allocate(n);
269    }
270    void* allocate(size_type n, const void*) {
271        return getAllocator().allocate(n);
272    }
273    void deallocate(void*, size_type) {}
274#else
275    pointer allocate(size_type n) {
276        return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
277    }
278    pointer allocate(size_type n, const void*) {
279        return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T)));
280    }
281    void deallocate(pointer, size_type) {}
282#endif  // _RWSTD_ALLOCATOR
283
284    void construct(pointer p, const T& val) { new ((void *)p) T(val); }
285    void destroy(pointer p) { p->T::~T(); }
286
287    bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
288    bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
289
290    size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
291    size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
292
293    void setAllocator(TPoolAllocator* a) { allocator = a; }
294    TPoolAllocator& getAllocator() const { return *allocator; }
295
296protected:
297    TPoolAllocator* allocator;
298};
299
300#endif // _POOLALLOC_INCLUDED_
301