sanitizer_allocator.h revision cab6133c5d7478e96882cb54467e29b3716c0d89
1//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef SANITIZER_ALLOCATOR_H
15#define SANITIZER_ALLOCATOR_H
16
17#include "sanitizer_internal_defs.h"
18#include "sanitizer_common.h"
19#include "sanitizer_libc.h"
20#include "sanitizer_list.h"
21#include "sanitizer_mutex.h"
22
23namespace __sanitizer {
24
25// Maps size class id to size and back.
26template <uptr l0, uptr l1, uptr l2, uptr l3, uptr l4, uptr l5,
27          uptr s0, uptr s1, uptr s2, uptr s3, uptr s4,
28          uptr c0, uptr c1, uptr c2, uptr c3, uptr c4>
29class SplineSizeClassMap {
30 private:
31  // Here we use a spline composed of 5 polynomials of oder 1.
32  // The first size class is l0, then the classes go with step s0
33  // untill they reach l1, after which they go with step s1 and so on.
34  // Steps should be powers of two for cheap division.
35  // The size of the last size class should be a power of two.
36  // There should be at most 256 size classes.
37  static const uptr u0 = 0  + (l1 - l0) / s0;
38  static const uptr u1 = u0 + (l2 - l1) / s1;
39  static const uptr u2 = u1 + (l3 - l2) / s2;
40  static const uptr u3 = u2 + (l4 - l3) / s3;
41  static const uptr u4 = u3 + (l5 - l4) / s4;
42
43 public:
44  // The number of size classes should be a power of two for fast division.
45  static const uptr kNumClasses = u4 + 1;
46  static const uptr kMaxSize = l5;
47  static const uptr kMinSize = l0;
48
49  COMPILER_CHECK(kNumClasses <= 256);
50  COMPILER_CHECK((kNumClasses & (kNumClasses - 1)) == 0);
51  COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
52
53  static uptr Size(uptr class_id) {
54    if (class_id <= u0) return l0 + s0 * (class_id - 0);
55    if (class_id <= u1) return l1 + s1 * (class_id - u0);
56    if (class_id <= u2) return l2 + s2 * (class_id - u1);
57    if (class_id <= u3) return l3 + s3 * (class_id - u2);
58    if (class_id <= u4) return l4 + s4 * (class_id - u3);
59    return 0;
60  }
61  static uptr ClassID(uptr size) {
62    if (size <= l1) return 0  + (size - l0 + s0 - 1) / s0;
63    if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
64    if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
65    if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
66    if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
67    return 0;
68  }
69
70  static uptr MaxCached(uptr class_id) {
71    if (class_id <= u0) return c0;
72    if (class_id <= u1) return c1;
73    if (class_id <= u2) return c2;
74    if (class_id <= u3) return c3;
75    if (class_id <= u4) return c4;
76    return 0;
77  }
78};
79
80class DefaultSizeClassMap: public SplineSizeClassMap<
81  /* l: */1 << 4, 1 << 9,  1 << 12, 1 << 15, 1 << 18, 1 << 21,
82  /* s: */1 << 4, 1 << 6,  1 << 9,  1 << 12, 1 << 15,
83  /* c: */256,    64,      16,      4,       1> {
84 private:
85  COMPILER_CHECK(kNumClasses == 256);
86};
87
88class CompactSizeClassMap: public SplineSizeClassMap<
89  /* l: */1 << 3, 1 << 4,  1 << 7, 1 << 8, 1 << 12, 1 << 15,
90  /* s: */1 << 3, 1 << 4,  1 << 7, 1 << 8, 1 << 12,
91  /* c: */256,    64,      16,      4,       1> {
92 private:
93  COMPILER_CHECK(kNumClasses <= 32);
94};
95
96struct AllocatorListNode {
97  AllocatorListNode *next;
98};
99
100typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
101
102// Move at most max_count chunks from allocate_from to allocate_to.
103// This function is better be a method of AllocatorFreeList, but we can't
104// inherit it from IntrusiveList as the ancient gcc complains about non-PODness.
105static inline void BulkMove(uptr max_count,
106                            AllocatorFreeList *allocate_from,
107                            AllocatorFreeList *allocate_to) {
108  CHECK(!allocate_from->empty());
109  CHECK(allocate_to->empty());
110  if (allocate_from->size() <= max_count) {
111    allocate_to->append_front(allocate_from);
112    CHECK(allocate_from->empty());
113  } else {
114    for (uptr i = 0; i < max_count; i++) {
115      AllocatorListNode *node = allocate_from->front();
116      allocate_from->pop_front();
117      allocate_to->push_front(node);
118    }
119    CHECK(!allocate_from->empty());
120  }
121  CHECK(!allocate_to->empty());
122}
123
124// Allocators call these callbacks on mmap/munmap.
125struct NoOpMapUnmapCallback {
126  void OnMap(uptr p, uptr size) const { }
127  void OnUnmap(uptr p, uptr size) const { }
128};
129
130// SizeClassAllocator64 -- allocator for 64-bit address space.
131//
132// Space: a portion of address space of kSpaceSize bytes starting at
133// a fixed address (kSpaceBeg). Both constants are powers of two and
134// kSpaceBeg is kSpaceSize-aligned.
135// At the beginning the entire space is mprotect-ed, then small parts of it
136// are mapped on demand.
137//
138// Region: a part of Space dedicated to a single size class.
139// There are kNumClasses Regions of equal size.
140//
141// UserChunk: a piece of memory returned to user.
142// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
143//
144// A Region looks like this:
145// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
146template <const uptr kSpaceBeg, const uptr kSpaceSize,
147          const uptr kMetadataSize, class SizeClassMap,
148          class MapUnmapCallback = NoOpMapUnmapCallback>
149class SizeClassAllocator64 {
150 public:
151  void Init() {
152    CHECK_EQ(kSpaceBeg,
153             reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
154    MapWithCallback(kSpaceEnd, AdditionalSize());
155  }
156
157  void MapWithCallback(uptr beg, uptr size) {
158    CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
159    MapUnmapCallback().OnMap(beg, size);
160  }
161
162  void UnmapWithCallback(uptr beg, uptr size) {
163    MapUnmapCallback().OnUnmap(beg, size);
164    UnmapOrDie(reinterpret_cast<void *>(beg), size);
165  }
166
167  bool CanAllocate(uptr size, uptr alignment) {
168    return size <= SizeClassMap::kMaxSize &&
169      alignment <= SizeClassMap::kMaxSize;
170  }
171
172  void *Allocate(uptr size, uptr alignment) {
173    if (size < alignment) size = alignment;
174    CHECK(CanAllocate(size, alignment));
175    return AllocateBySizeClass(ClassID(size));
176  }
177
178  void Deallocate(void *p) {
179    CHECK(PointerIsMine(p));
180    DeallocateBySizeClass(p, GetSizeClass(p));
181  }
182
183  // Allocate several chunks of the given class_id.
184  void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
185    CHECK_LT(class_id, kNumClasses);
186    RegionInfo *region = GetRegionInfo(class_id);
187    SpinMutexLock l(&region->mutex);
188    if (region->free_list.empty()) {
189      PopulateFreeList(class_id, region);
190    }
191    BulkMove(SizeClassMap::MaxCached(class_id), &region->free_list, free_list);
192  }
193
194  // Swallow the entire free_list for the given class_id.
195  void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
196    CHECK_LT(class_id, kNumClasses);
197    RegionInfo *region = GetRegionInfo(class_id);
198    SpinMutexLock l(&region->mutex);
199    region->free_list.append_front(free_list);
200  }
201
202  static bool PointerIsMine(void *p) {
203    return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
204  }
205
206  static uptr GetSizeClass(void *p) {
207    return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
208  }
209
210  void *GetBlockBegin(void *p) {
211    uptr class_id = GetSizeClass(p);
212    uptr size = SizeClassMap::Size(class_id);
213    uptr chunk_idx = GetChunkIdx((uptr)p, size);
214    uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
215    uptr beg = chunk_idx * size;
216    uptr next_beg = beg + size;
217    RegionInfo *region = GetRegionInfo(class_id);
218    if (region->mapped_user >= next_beg)
219      return reinterpret_cast<void*>(reg_beg + beg);
220    return 0;
221  }
222
223  static uptr GetActuallyAllocatedSize(void *p) {
224    CHECK(PointerIsMine(p));
225    return SizeClassMap::Size(GetSizeClass(p));
226  }
227
228  uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
229
230  void *GetMetaData(void *p) {
231    uptr class_id = GetSizeClass(p);
232    uptr size = SizeClassMap::Size(class_id);
233    uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
234    return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
235                                   (1 + chunk_idx) * kMetadataSize);
236  }
237
238  uptr TotalMemoryUsed() {
239    uptr res = 0;
240    for (uptr i = 0; i < kNumClasses; i++)
241      res += GetRegionInfo(i)->allocated_user;
242    return res;
243  }
244
245  // Test-only.
246  void TestOnlyUnmap() {
247    UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
248  }
249
250  typedef SizeClassMap SizeClassMapT;
251  static const uptr kNumClasses = SizeClassMap::kNumClasses;  // 2^k <= 256
252
253 private:
254  static const uptr kRegionSize = kSpaceSize / kNumClasses;
255  static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
256  COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
257  // kRegionSize must be >= 2^32.
258  COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
259  // Populate the free list with at most this number of bytes at once
260  // or with one element if its size is greater.
261  static const uptr kPopulateSize = 1 << 18;
262  // Call mmap for user memory with at least this size.
263  static const uptr kUserMapSize = 1 << 18;
264  // Call mmap for metadata memory with at least this size.
265  static const uptr kMetaMapSize = 1 << 16;
266
267  struct RegionInfo {
268    SpinMutex mutex;
269    AllocatorFreeList free_list;
270    uptr allocated_user;  // Bytes allocated for user memory.
271    uptr allocated_meta;  // Bytes allocated for metadata.
272    uptr mapped_user;  // Bytes mapped for user memory.
273    uptr mapped_meta;  // Bytes mapped for metadata.
274  };
275  COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
276
277  static uptr AdditionalSize() {
278    uptr PageSize = GetPageSizeCached();
279    uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
280    CHECK_EQ(res % PageSize, 0);
281    return res;
282  }
283
284  RegionInfo *GetRegionInfo(uptr class_id) {
285    CHECK_LT(class_id, kNumClasses);
286    RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
287    return &regions[class_id];
288  }
289
290  static uptr GetChunkIdx(uptr chunk, uptr size) {
291    u32 offset = chunk % kRegionSize;
292    // Here we divide by a non-constant. This is costly.
293    // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
294    // We save 2x by using 32-bit div, but may need to use a 256-way switch.
295    return offset / (u32)size;
296  }
297
298  void PopulateFreeList(uptr class_id, RegionInfo *region) {
299    CHECK(region->free_list.empty());
300    uptr size = SizeClassMap::Size(class_id);
301    uptr beg_idx = region->allocated_user;
302    uptr end_idx = beg_idx + kPopulateSize;
303    uptr region_beg = kSpaceBeg + kRegionSize * class_id;
304    if (end_idx + size > region->mapped_user) {
305      // Do the mmap for the user memory.
306      uptr map_size = kUserMapSize;
307      while (end_idx + size > region->mapped_user + map_size)
308        map_size += kUserMapSize;
309      CHECK_GT(region->mapped_user + map_size, end_idx);
310      MapWithCallback(region_beg + region->mapped_user, map_size);
311      region->mapped_user += map_size;
312    }
313    uptr idx = beg_idx;
314    uptr i = 0;
315    do {  // do-while loop because we need to put at least one item.
316      uptr p = region_beg + idx;
317      region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
318      idx += size;
319      i++;
320    } while (idx < end_idx);
321    region->allocated_user += idx - beg_idx;
322    CHECK_LE(region->allocated_user, region->mapped_user);
323    region->allocated_meta += i * kMetadataSize;
324    if (region->allocated_meta > region->mapped_meta) {
325      uptr map_size = kMetaMapSize;
326      while (region->allocated_meta > region->mapped_meta + map_size)
327        map_size += kMetaMapSize;
328      // Do the mmap for the metadata.
329      CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
330      MapWithCallback(region_beg + kRegionSize -
331                      region->mapped_meta - map_size, map_size);
332      region->mapped_meta += map_size;
333    }
334    CHECK_LE(region->allocated_meta, region->mapped_meta);
335    if (region->allocated_user + region->allocated_meta > kRegionSize) {
336      Printf("Out of memory. Dying.\n");
337      Printf("The process has exhausted %zuMB for size class %zu.\n",
338          kRegionSize / 1024 / 1024, size);
339      Die();
340    }
341  }
342
343  void *AllocateBySizeClass(uptr class_id) {
344    CHECK_LT(class_id, kNumClasses);
345    RegionInfo *region = GetRegionInfo(class_id);
346    SpinMutexLock l(&region->mutex);
347    if (region->free_list.empty()) {
348      PopulateFreeList(class_id, region);
349    }
350    CHECK(!region->free_list.empty());
351    AllocatorListNode *node = region->free_list.front();
352    region->free_list.pop_front();
353    return reinterpret_cast<void*>(node);
354  }
355
356  void DeallocateBySizeClass(void *p, uptr class_id) {
357    RegionInfo *region = GetRegionInfo(class_id);
358    SpinMutexLock l(&region->mutex);
359    region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
360  }
361};
362
363// SizeClassAllocator32 -- allocator for 32-bit address space.
364// This allocator can theoretically be used on 64-bit arch, but there it is less
365// efficient than SizeClassAllocator64.
366//
367// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
368// be returned by MmapOrDie().
369//
370// Region:
371//   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
372// Since the regions are aligned by kRegionSize, there are exactly
373// kNumPossibleRegions possible regions in the address space and so we keep
374// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
375// 0 size class means the region is not used by the allocator.
376//
377// One Region is used to allocate chunks of a single size class.
378// A Region looks like this:
379// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
380//
381// In order to avoid false sharing the objects of this class should be
382// chache-line aligned.
383template <const uptr kSpaceBeg, const u64 kSpaceSize,
384          const uptr kMetadataSize, class SizeClassMap,
385          class MapUnmapCallback = NoOpMapUnmapCallback>
386class SizeClassAllocator32 {
387 public:
388  void Init() {
389    state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
390  }
391
392  void *MapWithCallback(uptr size) {
393    size = RoundUpTo(size, GetPageSizeCached());
394    void *res = MmapOrDie(size, "SizeClassAllocator32");
395    MapUnmapCallback().OnMap((uptr)res, size);
396    return res;
397  }
398  void UnmapWithCallback(uptr beg, uptr size) {
399    MapUnmapCallback().OnUnmap(beg, size);
400    UnmapOrDie(reinterpret_cast<void *>(beg), size);
401  }
402
403  bool CanAllocate(uptr size, uptr alignment) {
404    return size <= SizeClassMap::kMaxSize &&
405      alignment <= SizeClassMap::kMaxSize;
406  }
407
408  void *Allocate(uptr size, uptr alignment) {
409    if (size < alignment) size = alignment;
410    CHECK(CanAllocate(size, alignment));
411    return AllocateBySizeClass(ClassID(size));
412  }
413
414  void Deallocate(void *p) {
415    CHECK(PointerIsMine(p));
416    DeallocateBySizeClass(p, GetSizeClass(p));
417  }
418
419  void *GetMetaData(void *p) {
420    CHECK(PointerIsMine(p));
421    uptr mem = reinterpret_cast<uptr>(p);
422    uptr beg = ComputeRegionBeg(mem);
423    uptr size = SizeClassMap::Size(GetSizeClass(p));
424    u32 offset = mem - beg;
425    uptr n = offset / (u32)size;  // 32-bit division
426    uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
427    return reinterpret_cast<void*>(meta);
428  }
429
430  // Allocate several chunks of the given class_id.
431  void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
432    SizeClassInfo *sci = GetSizeClassInfo(class_id);
433    SpinMutexLock l(&sci->mutex);
434    EnsureSizeClassHasAvailableChunks(sci, class_id);
435    CHECK(!sci->free_list.empty());
436    BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list, free_list);
437  }
438
439  // Swallow the entire free_list for the given class_id.
440  void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
441    SizeClassInfo *sci = GetSizeClassInfo(class_id);
442    SpinMutexLock l(&sci->mutex);
443    sci->free_list.append_front(free_list);
444  }
445
446  bool PointerIsMine(void *p) {
447    return
448      state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))] != 0;
449  }
450
451  uptr GetSizeClass(void *p) {
452    return
453      state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))] - 1;
454  }
455
456  void *GetBlockBegin(void *p) {
457    CHECK(PointerIsMine(p));
458    uptr mem = reinterpret_cast<uptr>(p);
459    uptr beg = ComputeRegionBeg(mem);
460    uptr size = SizeClassMap::Size(GetSizeClass(p));
461    u32 offset = mem - beg;
462    u32 n = offset / (u32)size;  // 32-bit division
463    uptr res = beg + (n * (u32)size);
464    return reinterpret_cast<void*>(res);
465  }
466
467  uptr GetActuallyAllocatedSize(void *p) {
468    CHECK(PointerIsMine(p));
469    return SizeClassMap::Size(GetSizeClass(p));
470  }
471
472  uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
473
474  uptr TotalMemoryUsed() {
475    // No need to lock here.
476    uptr res = 0;
477    for (uptr i = 0; i < kNumPossibleRegions; i++)
478      if (state_->possible_regions[i])
479        res += kRegionSize;
480    return res;
481  }
482
483  void TestOnlyUnmap() {
484    for (uptr i = 0; i < kNumPossibleRegions; i++)
485      if (state_->possible_regions[i])
486        UnmapWithCallback((i * kRegionSize), kRegionSize);
487    UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
488  }
489
490  typedef SizeClassMap SizeClassMapT;
491  static const uptr kNumClasses = SizeClassMap::kNumClasses;  // 2^k <= 128
492
493 private:
494  static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
495  static const uptr kRegionSize = 1 << kRegionSizeLog;
496  static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
497  COMPILER_CHECK(kNumClasses <= 128);
498
499  struct SizeClassInfo {
500    SpinMutex mutex;
501    AllocatorFreeList free_list;
502    char padding[kCacheLineSize - sizeof(uptr) - sizeof(AllocatorFreeList)];
503  };
504  COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
505
506  uptr ComputeRegionId(uptr mem) {
507    uptr res = mem >> kRegionSizeLog;
508    CHECK_LT(res, kNumPossibleRegions);
509    return res;
510  }
511
512  uptr ComputeRegionBeg(uptr mem) {
513    return mem & ~(kRegionSize - 1);
514  }
515
516  uptr AllocateRegion(uptr class_id) {
517    CHECK_LT(class_id, kNumClasses);
518    uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
519                                      "SizeClassAllocator32"));
520    MapUnmapCallback().OnMap(res, kRegionSize);
521    CHECK_EQ(0U, (res & (kRegionSize - 1)));
522    CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
523    state_->possible_regions[ComputeRegionId(res)] = class_id + 1;
524    return res;
525  }
526
527  SizeClassInfo *GetSizeClassInfo(uptr class_id) {
528    CHECK_LT(class_id, kNumClasses);
529    return &state_->size_class_info_array[class_id];
530  }
531
532  void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) {
533    if (!sci->free_list.empty()) return;
534    uptr size = SizeClassMap::Size(class_id);
535    uptr reg = AllocateRegion(class_id);
536    uptr n_chunks = kRegionSize / (size + kMetadataSize);
537    for (uptr i = reg; i < reg + n_chunks * size; i += size)
538      sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i));
539  }
540
541  void *AllocateBySizeClass(uptr class_id) {
542    CHECK_LT(class_id, kNumClasses);
543    SizeClassInfo *sci = GetSizeClassInfo(class_id);
544    SpinMutexLock l(&sci->mutex);
545    EnsureSizeClassHasAvailableChunks(sci, class_id);
546    CHECK(!sci->free_list.empty());
547    AllocatorListNode *node = sci->free_list.front();
548    sci->free_list.pop_front();
549    return reinterpret_cast<void*>(node);
550  }
551
552  void DeallocateBySizeClass(void *p, uptr class_id) {
553    CHECK_LT(class_id, kNumClasses);
554    SizeClassInfo *sci = GetSizeClassInfo(class_id);
555    SpinMutexLock l(&sci->mutex);
556    sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
557  }
558
559  struct State {
560    u8 possible_regions[kNumPossibleRegions];
561    SizeClassInfo size_class_info_array[kNumClasses];
562  };
563  State *state_;
564};
565
566// Objects of this type should be used as local caches for SizeClassAllocator64.
567// Since the typical use of this class is to have one object per thread in TLS,
568// is has to be POD.
569template<class SizeClassAllocator>
570struct SizeClassAllocatorLocalCache {
571  typedef SizeClassAllocator Allocator;
572  static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
573  // Don't need to call Init if the object is a global (i.e. zero-initialized).
574  void Init() {
575    internal_memset(this, 0, sizeof(*this));
576  }
577
578  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
579    CHECK_LT(class_id, kNumClasses);
580    AllocatorFreeList *free_list = &free_lists_[class_id];
581    if (free_list->empty())
582      allocator->BulkAllocate(class_id, free_list);
583    CHECK(!free_list->empty());
584    void *res = free_list->front();
585    free_list->pop_front();
586    return res;
587  }
588
589  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
590    CHECK_LT(class_id, kNumClasses);
591    AllocatorFreeList *free_list = &free_lists_[class_id];
592    free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
593    if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
594      DrainHalf(allocator, class_id);
595  }
596
597  void Drain(SizeClassAllocator *allocator) {
598    for (uptr i = 0; i < kNumClasses; i++) {
599      allocator->BulkDeallocate(i, &free_lists_[i]);
600      CHECK(free_lists_[i].empty());
601    }
602  }
603
604  // private:
605  typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
606  AllocatorFreeList free_lists_[kNumClasses];
607
608  void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
609    AllocatorFreeList *free_list = &free_lists_[class_id];
610    AllocatorFreeList half;
611    half.clear();
612    const uptr count = free_list->size() / 2;
613    for (uptr i = 0; i < count; i++) {
614      AllocatorListNode *node = free_list->front();
615      free_list->pop_front();
616      half.push_front(node);
617    }
618    allocator->BulkDeallocate(class_id, &half);
619  }
620};
621
622// This class can (de)allocate only large chunks of memory using mmap/unmap.
623// The main purpose of this allocator is to cover large and rare allocation
624// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
625template <class MapUnmapCallback = NoOpMapUnmapCallback>
626class LargeMmapAllocator {
627 public:
628  void Init() {
629    internal_memset(this, 0, sizeof(*this));
630    page_size_ = GetPageSizeCached();
631  }
632  void *Allocate(uptr size, uptr alignment) {
633    CHECK(IsPowerOfTwo(alignment));
634    uptr map_size = RoundUpMapSize(size);
635    if (alignment > page_size_)
636      map_size += alignment;
637    if (map_size < size) return 0;  // Overflow.
638    uptr map_beg = reinterpret_cast<uptr>(
639        MmapOrDie(map_size, "LargeMmapAllocator"));
640    MapUnmapCallback().OnMap(map_beg, map_size);
641    uptr map_end = map_beg + map_size;
642    uptr res = map_beg + page_size_;
643    if (res & (alignment - 1))  // Align.
644      res += alignment - (res & (alignment - 1));
645    CHECK_EQ(0, res & (alignment - 1));
646    CHECK_LE(res + size, map_end);
647    Header *h = GetHeader(res);
648    h->size = size;
649    h->map_beg = map_beg;
650    h->map_size = map_size;
651    {
652      SpinMutexLock l(&mutex_);
653      h->next = list_;
654      h->prev = 0;
655      if (list_)
656        list_->prev = h;
657      list_ = h;
658    }
659    return reinterpret_cast<void*>(res);
660  }
661
662  void Deallocate(void *p) {
663    Header *h = GetHeader(p);
664    {
665      SpinMutexLock l(&mutex_);
666      Header *prev = h->prev;
667      Header *next = h->next;
668      if (prev)
669        prev->next = next;
670      if (next)
671        next->prev = prev;
672      if (h == list_)
673        list_ = next;
674    }
675    MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
676    UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
677  }
678
679  uptr TotalMemoryUsed() {
680    SpinMutexLock l(&mutex_);
681    uptr res = 0;
682    for (Header *l = list_; l; l = l->next) {
683      res += RoundUpMapSize(l->size);
684    }
685    return res;
686  }
687
688  bool PointerIsMine(void *p) {
689    return GetBlockBegin(p) != 0;
690  }
691
692  uptr GetActuallyAllocatedSize(void *p) {
693    return RoundUpTo(GetHeader(p)->size, page_size_);
694  }
695
696  // At least page_size_/2 metadata bytes is available.
697  void *GetMetaData(void *p) {
698    return GetHeader(p) + 1;
699  }
700
701  void *GetBlockBegin(void *ptr) {
702    uptr p = reinterpret_cast<uptr>(ptr);
703    SpinMutexLock l(&mutex_);
704    for (Header *l = list_; l; l = l->next) {
705      if (p >= l->map_beg && p < l->map_beg + l->map_size)
706        return GetUser(l);
707    }
708    return 0;
709  }
710
711 private:
712  struct Header {
713    uptr map_beg;
714    uptr map_size;
715    uptr size;
716    Header *next;
717    Header *prev;
718  };
719
720  Header *GetHeader(uptr p) {
721    CHECK_EQ(p % page_size_, 0);
722    return reinterpret_cast<Header*>(p - page_size_);
723  }
724  Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
725
726  void *GetUser(Header *h) {
727    CHECK_EQ((uptr)h % page_size_, 0);
728    return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
729  }
730
731  uptr RoundUpMapSize(uptr size) {
732    return RoundUpTo(size, page_size_) + page_size_;
733  }
734
735  uptr page_size_;
736  Header *list_;
737  SpinMutex mutex_;
738};
739
740// This class implements a complete memory allocator by using two
741// internal allocators:
742// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
743//  When allocating 2^x bytes it should return 2^x aligned chunk.
744// PrimaryAllocator is used via a local AllocatorCache.
745// SecondaryAllocator can allocate anything, but is not efficient.
746template <class PrimaryAllocator, class AllocatorCache,
747          class SecondaryAllocator>  // NOLINT
748class CombinedAllocator {
749 public:
750  void Init() {
751    primary_.Init();
752    secondary_.Init();
753  }
754
755  void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
756                 bool cleared = false) {
757    // Returning 0 on malloc(0) may break a lot of code.
758    if (size == 0)
759      size = 1;
760    if (size + alignment < size)
761      return 0;
762    if (alignment > 8)
763      size = RoundUpTo(size, alignment);
764    void *res;
765    if (primary_.CanAllocate(size, alignment)) {
766      if (cache)  // Allocate from cache.
767        res = cache->Allocate(&primary_, primary_.ClassID(size));
768      else  // No thread-local cache, allocate directly from primary allocator.
769        res = primary_.Allocate(size, alignment);
770    } else {  // Secondary allocator does not use cache.
771      res = secondary_.Allocate(size, alignment);
772    }
773    if (alignment > 8)
774      CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
775    if (cleared && res)
776      internal_memset(res, 0, size);
777    return res;
778  }
779
780  void Deallocate(AllocatorCache *cache, void *p) {
781    if (!p) return;
782    if (primary_.PointerIsMine(p))
783      cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
784    else
785      secondary_.Deallocate(p);
786  }
787
788  void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
789                   uptr alignment) {
790    if (!p)
791      return Allocate(cache, new_size, alignment);
792    if (!new_size) {
793      Deallocate(cache, p);
794      return 0;
795    }
796    CHECK(PointerIsMine(p));
797    uptr old_size = GetActuallyAllocatedSize(p);
798    uptr memcpy_size = Min(new_size, old_size);
799    void *new_p = Allocate(cache, new_size, alignment);
800    if (new_p)
801      internal_memcpy(new_p, p, memcpy_size);
802    Deallocate(cache, p);
803    return new_p;
804  }
805
806  bool PointerIsMine(void *p) {
807    if (primary_.PointerIsMine(p))
808      return true;
809    return secondary_.PointerIsMine(p);
810  }
811
812  void *GetMetaData(void *p) {
813    if (primary_.PointerIsMine(p))
814      return primary_.GetMetaData(p);
815    return secondary_.GetMetaData(p);
816  }
817
818  void *GetBlockBegin(void *p) {
819    if (primary_.PointerIsMine(p))
820      return primary_.GetBlockBegin(p);
821    return secondary_.GetBlockBegin(p);
822  }
823
824  uptr GetActuallyAllocatedSize(void *p) {
825    if (primary_.PointerIsMine(p))
826      return primary_.GetActuallyAllocatedSize(p);
827    return secondary_.GetActuallyAllocatedSize(p);
828  }
829
830  uptr TotalMemoryUsed() {
831    return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
832  }
833
834  void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
835
836  void SwallowCache(AllocatorCache *cache) {
837    cache->Drain(&primary_);
838  }
839
840 private:
841  PrimaryAllocator primary_;
842  SecondaryAllocator secondary_;
843};
844
845}  // namespace __sanitizer
846
847#endif  // SANITIZER_ALLOCATOR_H
848
849