1//===--- Allocator.h - Simple memory allocation abstraction -----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
12/// of these conform to an LLVM "Allocator" concept which consists of an
13/// Allocate method accepting a size and alignment, and a Deallocate accepting
14/// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
15/// Allocate and Deallocate for setting size and alignment based on the final
16/// type. These overloads are typically provided by a base class template \c
17/// AllocatorBase.
18///
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_SUPPORT_ALLOCATOR_H
22#define LLVM_SUPPORT_ALLOCATOR_H
23
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/Support/AlignOf.h"
26#include "llvm/Support/DataTypes.h"
27#include "llvm/Support/MathExtras.h"
28#include "llvm/Support/Memory.h"
29#include <algorithm>
30#include <cassert>
31#include <cstddef>
32#include <cstdlib>
33
34namespace llvm {
35
36/// \brief CRTP base class providing obvious overloads for the core \c
37/// Allocate() methods of LLVM-style allocators.
38///
39/// This base class both documents the full public interface exposed by all
40/// LLVM-style allocators, and redirects all of the overloads to a single core
41/// set of methods which the derived class must define.
42template <typename DerivedT> class AllocatorBase {
43public:
44  /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method
45  /// must be implemented by \c DerivedT.
46  void *Allocate(size_t Size, size_t Alignment) {
47#ifdef __clang__
48    static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
49                      &AllocatorBase::Allocate) !=
50                      static_cast<void *(DerivedT::*)(size_t, size_t)>(
51                          &DerivedT::Allocate),
52                  "Class derives from AllocatorBase without implementing the "
53                  "core Allocate(size_t, size_t) overload!");
54#endif
55    return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
56  }
57
58  /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this
59  /// allocator.
60  void Deallocate(const void *Ptr, size_t Size) {
61#ifdef __clang__
62    static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
63                      &AllocatorBase::Deallocate) !=
64                      static_cast<void (DerivedT::*)(const void *, size_t)>(
65                          &DerivedT::Deallocate),
66                  "Class derives from AllocatorBase without implementing the "
67                  "core Deallocate(void *) overload!");
68#endif
69    return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
70  }
71
72  // The rest of these methods are helpers that redirect to one of the above
73  // core methods.
74
75  /// \brief Allocate space for a sequence of objects without constructing them.
76  template <typename T> T *Allocate(size_t Num = 1) {
77    return static_cast<T *>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
78  }
79
80  /// \brief Deallocate space for a sequence of objects without constructing them.
81  template <typename T>
82  typename std::enable_if<
83      !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
84  Deallocate(T *Ptr, size_t Num = 1) {
85    Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
86  }
87};
88
89class MallocAllocator : public AllocatorBase<MallocAllocator> {
90public:
91  void Reset() {}
92
93  void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); }
94
95  // Pull in base class overloads.
96  using AllocatorBase<MallocAllocator>::Allocate;
97
98  void Deallocate(const void *Ptr, size_t /*Size*/) {
99    free(const_cast<void *>(Ptr));
100  }
101
102  // Pull in base class overloads.
103  using AllocatorBase<MallocAllocator>::Deallocate;
104
105  void PrintStats() const {}
106};
107
108namespace detail {
109
110// We call out to an external function to actually print the message as the
111// printing code uses Allocator.h in its implementation.
112void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
113                                size_t TotalMemory);
114} // End namespace detail.
115
116/// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
117///
118/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
119/// memory rather than relying on boundless contiguous heap. However, it has
120/// bump-pointer semantics in that is a monotonically growing pool of memory
121/// where every allocation is found by merely allocating the next N bytes in
122/// the slab, or the next N bytes in the next slab.
123///
124/// Note that this also has a threshold for forcing allocations above a certain
125/// size into their own slab.
126///
127/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
128/// object, which wraps malloc, to allocate memory, but it can be changed to
129/// use a custom allocator.
130template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
131          size_t SizeThreshold = SlabSize>
132class BumpPtrAllocatorImpl
133    : public AllocatorBase<
134          BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
135public:
136  static_assert(SizeThreshold <= SlabSize,
137                "The SizeThreshold must be at most the SlabSize to ensure "
138                "that objects larger than a slab go into their own memory "
139                "allocation.");
140
141  BumpPtrAllocatorImpl()
142      : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {}
143  template <typename T>
144  BumpPtrAllocatorImpl(T &&Allocator)
145      : CurPtr(nullptr), End(nullptr), BytesAllocated(0),
146        Allocator(std::forward<T &&>(Allocator)) {}
147
148  // Manually implement a move constructor as we must clear the old allocators
149  // slabs as a matter of correctness.
150  BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
151      : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
152        CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
153        BytesAllocated(Old.BytesAllocated),
154        Allocator(std::move(Old.Allocator)) {
155    Old.CurPtr = Old.End = nullptr;
156    Old.BytesAllocated = 0;
157    Old.Slabs.clear();
158    Old.CustomSizedSlabs.clear();
159  }
160
161  ~BumpPtrAllocatorImpl() {
162    DeallocateSlabs(Slabs.begin(), Slabs.end());
163    DeallocateCustomSizedSlabs();
164  }
165
166  BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
167    DeallocateSlabs(Slabs.begin(), Slabs.end());
168    DeallocateCustomSizedSlabs();
169
170    CurPtr = RHS.CurPtr;
171    End = RHS.End;
172    BytesAllocated = RHS.BytesAllocated;
173    Slabs = std::move(RHS.Slabs);
174    CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
175    Allocator = std::move(RHS.Allocator);
176
177    RHS.CurPtr = RHS.End = nullptr;
178    RHS.BytesAllocated = 0;
179    RHS.Slabs.clear();
180    RHS.CustomSizedSlabs.clear();
181    return *this;
182  }
183
184  /// \brief Deallocate all but the current slab and reset the current pointer
185  /// to the beginning of it, freeing all memory allocated so far.
186  void Reset() {
187    if (Slabs.empty())
188      return;
189
190    // Reset the state.
191    BytesAllocated = 0;
192    CurPtr = (char *)Slabs.front();
193    End = CurPtr + SlabSize;
194
195    // Deallocate all but the first slab, and all custome sized slabs.
196    DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
197    Slabs.erase(std::next(Slabs.begin()), Slabs.end());
198    DeallocateCustomSizedSlabs();
199    CustomSizedSlabs.clear();
200  }
201
202  /// \brief Allocate space at the specified alignment.
203  void *Allocate(size_t Size, size_t Alignment) {
204    if (!CurPtr) // Start a new slab if we haven't allocated one already.
205      StartNewSlab();
206
207    // Keep track of how many bytes we've allocated.
208    BytesAllocated += Size;
209
210    // 0-byte alignment means 1-byte alignment.
211    if (Alignment == 0)
212      Alignment = 1;
213
214    // Allocate the aligned space, going forwards from CurPtr.
215    char *Ptr = alignPtr(CurPtr, Alignment);
216
217    // Check if we can hold it.
218    if (Ptr + Size <= End) {
219      CurPtr = Ptr + Size;
220      // Update the allocation point of this memory block in MemorySanitizer.
221      // Without this, MemorySanitizer messages for values originated from here
222      // will point to the allocation of the entire slab.
223      __msan_allocated_memory(Ptr, Size);
224      return Ptr;
225    }
226
227    // If Size is really big, allocate a separate slab for it.
228    size_t PaddedSize = Size + Alignment - 1;
229    if (PaddedSize > SizeThreshold) {
230      void *NewSlab = Allocator.Allocate(PaddedSize, 0);
231      CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
232
233      Ptr = alignPtr((char *)NewSlab, Alignment);
234      assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + PaddedSize);
235      __msan_allocated_memory(Ptr, Size);
236      return Ptr;
237    }
238
239    // Otherwise, start a new slab and try again.
240    StartNewSlab();
241    Ptr = alignPtr(CurPtr, Alignment);
242    CurPtr = Ptr + Size;
243    assert(CurPtr <= End && "Unable to allocate memory!");
244    __msan_allocated_memory(Ptr, Size);
245    return Ptr;
246  }
247
248  // Pull in base class overloads.
249  using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
250
251  void Deallocate(const void * /*Ptr*/, size_t /*Size*/) {}
252
253  // Pull in base class overloads.
254  using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
255
256  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
257
258  size_t getTotalMemory() const {
259    size_t TotalMemory = 0;
260    for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
261      TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
262    for (auto &PtrAndSize : CustomSizedSlabs)
263      TotalMemory += PtrAndSize.second;
264    return TotalMemory;
265  }
266
267  void PrintStats() const {
268    detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
269                                       getTotalMemory());
270  }
271
272private:
273  /// \brief The current pointer into the current slab.
274  ///
275  /// This points to the next free byte in the slab.
276  char *CurPtr;
277
278  /// \brief The end of the current slab.
279  char *End;
280
281  /// \brief The slabs allocated so far.
282  SmallVector<void *, 4> Slabs;
283
284  /// \brief Custom-sized slabs allocated for too-large allocation requests.
285  SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
286
287  /// \brief How many bytes we've allocated.
288  ///
289  /// Used so that we can compute how much space was wasted.
290  size_t BytesAllocated;
291
292  /// \brief The allocator instance we use to get slabs of memory.
293  AllocatorT Allocator;
294
295  static size_t computeSlabSize(unsigned SlabIdx) {
296    // Scale the actual allocated slab size based on the number of slabs
297    // allocated. Every 128 slabs allocated, we double the allocated size to
298    // reduce allocation frequency, but saturate at multiplying the slab size by
299    // 2^30.
300    return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
301  }
302
303  /// \brief Allocate a new slab and move the bump pointers over into the new
304  /// slab, modifying CurPtr and End.
305  void StartNewSlab() {
306    size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
307
308    void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
309    Slabs.push_back(NewSlab);
310    CurPtr = (char *)(NewSlab);
311    End = ((char *)NewSlab) + AllocatedSlabSize;
312  }
313
314  /// \brief Deallocate a sequence of slabs.
315  void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
316                       SmallVectorImpl<void *>::iterator E) {
317    for (; I != E; ++I) {
318      size_t AllocatedSlabSize =
319          computeSlabSize(std::distance(Slabs.begin(), I));
320#ifndef NDEBUG
321      // Poison the memory so stale pointers crash sooner.  Note we must
322      // preserve the Size and NextPtr fields at the beginning.
323      sys::Memory::setRangeWritable(*I, AllocatedSlabSize);
324      memset(*I, 0xCD, AllocatedSlabSize);
325#endif
326      Allocator.Deallocate(*I, AllocatedSlabSize);
327    }
328  }
329
330  /// \brief Deallocate all memory for custom sized slabs.
331  void DeallocateCustomSizedSlabs() {
332    for (auto &PtrAndSize : CustomSizedSlabs) {
333      void *Ptr = PtrAndSize.first;
334      size_t Size = PtrAndSize.second;
335#ifndef NDEBUG
336      // Poison the memory so stale pointers crash sooner.  Note we must
337      // preserve the Size and NextPtr fields at the beginning.
338      sys::Memory::setRangeWritable(Ptr, Size);
339      memset(Ptr, 0xCD, Size);
340#endif
341      Allocator.Deallocate(Ptr, Size);
342    }
343  }
344
345  template <typename T> friend class SpecificBumpPtrAllocator;
346};
347
348/// \brief The standard BumpPtrAllocator which just uses the default template
349/// paramaters.
350typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
351
352/// \brief A BumpPtrAllocator that allows only elements of a specific type to be
353/// allocated.
354///
355/// This allows calling the destructor in DestroyAll() and when the allocator is
356/// destroyed.
357template <typename T> class SpecificBumpPtrAllocator {
358  BumpPtrAllocator Allocator;
359
360public:
361  SpecificBumpPtrAllocator() : Allocator() {}
362  SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
363      : Allocator(std::move(Old.Allocator)) {}
364  ~SpecificBumpPtrAllocator() { DestroyAll(); }
365
366  SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
367    Allocator = std::move(RHS.Allocator);
368    return *this;
369  }
370
371  /// Call the destructor of each allocated object and deallocate all but the
372  /// current slab and reset the current pointer to the beginning of it, freeing
373  /// all memory allocated so far.
374  void DestroyAll() {
375    auto DestroyElements = [](char *Begin, char *End) {
376      assert(Begin == alignPtr(Begin, alignOf<T>()));
377      for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
378        reinterpret_cast<T *>(Ptr)->~T();
379    };
380
381    for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
382         ++I) {
383      size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
384          std::distance(Allocator.Slabs.begin(), I));
385      char *Begin = alignPtr((char *)*I, alignOf<T>());
386      char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
387                                               : (char *)*I + AllocatedSlabSize;
388
389      DestroyElements(Begin, End);
390    }
391
392    for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
393      void *Ptr = PtrAndSize.first;
394      size_t Size = PtrAndSize.second;
395      DestroyElements(alignPtr((char *)Ptr, alignOf<T>()), (char *)Ptr + Size);
396    }
397
398    Allocator.Reset();
399  }
400
401  /// \brief Allocate space for an array of objects without constructing them.
402  T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
403};
404
405}  // end namespace llvm
406
407template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
408void *operator new(size_t Size,
409                   llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
410                                              SizeThreshold> &Allocator) {
411  struct S {
412    char c;
413    union {
414      double D;
415      long double LD;
416      long long L;
417      void *P;
418    } x;
419  };
420  return Allocator.Allocate(
421      Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
422}
423
424template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
425void operator delete(
426    void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) {
427}
428
429#endif // LLVM_SUPPORT_ALLOCATOR_H
430