sanitizer_allocator.h revision 2592d766401edb3d36676f1f522592f1d5fb2b07
1//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef SANITIZER_ALLOCATOR_H
15#define SANITIZER_ALLOCATOR_H
16
17#include "sanitizer_internal_defs.h"
18#include "sanitizer_common.h"
19#include "sanitizer_libc.h"
20#include "sanitizer_list.h"
21#include "sanitizer_mutex.h"
22
23namespace __sanitizer {
24
25// Maps size class id to size and back.
26template <uptr l0, uptr l1, uptr l2, uptr l3, uptr l4, uptr l5,
27          uptr s0, uptr s1, uptr s2, uptr s3, uptr s4,
28          uptr c0, uptr c1, uptr c2, uptr c3, uptr c4>
29class SplineSizeClassMap {
30 private:
31  // Here we use a spline composed of 5 polynomials of oder 1.
32  // The first size class is l0, then the classes go with step s0
33  // untill they reach l1, after which they go with step s1 and so on.
34  // Steps should be powers of two for cheap division.
35  // The size of the last size class should be a power of two.
36  // There should be at most 256 size classes.
37  static const uptr u0 = 0  + (l1 - l0) / s0;
38  static const uptr u1 = u0 + (l2 - l1) / s1;
39  static const uptr u2 = u1 + (l3 - l2) / s2;
40  static const uptr u3 = u2 + (l4 - l3) / s3;
41  static const uptr u4 = u3 + (l5 - l4) / s4;
42
43 public:
44  // The number of size classes should be a power of two for fast division.
45  static const uptr kNumClasses = u4 + 1;
46  static const uptr kMaxSize = l5;
47  static const uptr kMinSize = l0;
48
49  COMPILER_CHECK(kNumClasses <= 256);
50  COMPILER_CHECK((kNumClasses & (kNumClasses - 1)) == 0);
51  COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
52
53  static uptr Size(uptr class_id) {
54    if (class_id <= u0) return l0 + s0 * (class_id - 0);
55    if (class_id <= u1) return l1 + s1 * (class_id - u0);
56    if (class_id <= u2) return l2 + s2 * (class_id - u1);
57    if (class_id <= u3) return l3 + s3 * (class_id - u2);
58    if (class_id <= u4) return l4 + s4 * (class_id - u3);
59    return 0;
60  }
61  static uptr ClassID(uptr size) {
62    if (size <= l1) return 0  + (size - l0 + s0 - 1) / s0;
63    if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
64    if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
65    if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
66    if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
67    return 0;
68  }
69
70  static uptr MaxCached(uptr class_id) {
71    if (class_id <= u0) return c0;
72    if (class_id <= u1) return c1;
73    if (class_id <= u2) return c2;
74    if (class_id <= u3) return c3;
75    if (class_id <= u4) return c4;
76    return 0;
77  }
78};
79
80class DefaultSizeClassMap: public SplineSizeClassMap<
81  /* l: */1 << 4, 1 << 9,  1 << 12, 1 << 15, 1 << 18, 1 << 21,
82  /* s: */1 << 4, 1 << 6,  1 << 9,  1 << 12, 1 << 15,
83  /* c: */256,    64,      16,      4,       1> {
84 private:
85  COMPILER_CHECK(kNumClasses == 256);
86};
87
88class CompactSizeClassMap: public SplineSizeClassMap<
89  /* l: */1 << 3, 1 << 4,  1 << 7, 1 << 8, 1 << 12, 1 << 15,
90  /* s: */1 << 3, 1 << 4,  1 << 7, 1 << 8, 1 << 12,
91  /* c: */256,    64,      16,      4,       1> {
92 private:
93  COMPILER_CHECK(kNumClasses <= 32);
94};
95
96struct AllocatorListNode {
97  AllocatorListNode *next;
98};
99
100typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
101
102// Move at most max_count chunks from allocate_from to allocate_to.
103// This function is better be a method of AllocatorFreeList, but we can't
104// inherit it from IntrusiveList as the ancient gcc complains about non-PODness.
105static inline void BulkMove(uptr max_count,
106                            AllocatorFreeList *allocate_from,
107                            AllocatorFreeList *allocate_to) {
108  CHECK(!allocate_from->empty());
109  CHECK(allocate_to->empty());
110  if (allocate_from->size() <= max_count) {
111    allocate_to->append_front(allocate_from);
112    CHECK(allocate_from->empty());
113  } else {
114    for (uptr i = 0; i < max_count; i++) {
115      AllocatorListNode *node = allocate_from->front();
116      allocate_from->pop_front();
117      allocate_to->push_front(node);
118    }
119    CHECK(!allocate_from->empty());
120  }
121  CHECK(!allocate_to->empty());
122}
123
124// Allocators call these callbacks on mmap/munmap.
125struct NoOpMapUnmapCallback {
126  void OnMap(uptr p, uptr size) const { }
127  void OnUnmap(uptr p, uptr size) const { }
128};
129
130// SizeClassAllocator64 -- allocator for 64-bit address space.
131//
132// Space: a portion of address space of kSpaceSize bytes starting at
133// a fixed address (kSpaceBeg). Both constants are powers of two and
134// kSpaceBeg is kSpaceSize-aligned.
135// At the beginning the entire space is mprotect-ed, then small parts of it
136// are mapped on demand.
137//
138// Region: a part of Space dedicated to a single size class.
139// There are kNumClasses Regions of equal size.
140//
141// UserChunk: a piece of memory returned to user.
142// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
143//
144// A Region looks like this:
145// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
146template <const uptr kSpaceBeg, const uptr kSpaceSize,
147          const uptr kMetadataSize, class SizeClassMap,
148          class MapUnmapCallback = NoOpMapUnmapCallback>
149class SizeClassAllocator64 {
150 public:
151  void Init() {
152    CHECK_EQ(kSpaceBeg,
153             reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
154    MapWithCallback(kSpaceEnd, AdditionalSize());
155  }
156
157  void MapWithCallback(uptr beg, uptr size) {
158    CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
159    MapUnmapCallback().OnMap(beg, size);
160  }
161
162  void UnmapWithCallback(uptr beg, uptr size) {
163    MapUnmapCallback().OnUnmap(beg, size);
164    UnmapOrDie(reinterpret_cast<void *>(beg), size);
165  }
166
167  bool CanAllocate(uptr size, uptr alignment) {
168    return size <= SizeClassMap::kMaxSize &&
169      alignment <= SizeClassMap::kMaxSize;
170  }
171
172  void *Allocate(uptr size, uptr alignment) {
173    if (size < alignment) size = alignment;
174    CHECK(CanAllocate(size, alignment));
175    return AllocateBySizeClass(ClassID(size));
176  }
177
178  void Deallocate(void *p) {
179    CHECK(PointerIsMine(p));
180    DeallocateBySizeClass(p, GetSizeClass(p));
181  }
182
183  // Allocate several chunks of the given class_id.
184  void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
185    CHECK_LT(class_id, kNumClasses);
186    RegionInfo *region = GetRegionInfo(class_id);
187    SpinMutexLock l(&region->mutex);
188    if (region->free_list.empty()) {
189      PopulateFreeList(class_id, region);
190    }
191    BulkMove(SizeClassMap::MaxCached(class_id), &region->free_list, free_list);
192  }
193
194  // Swallow the entire free_list for the given class_id.
195  void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
196    CHECK_LT(class_id, kNumClasses);
197    RegionInfo *region = GetRegionInfo(class_id);
198    SpinMutexLock l(&region->mutex);
199    region->free_list.append_front(free_list);
200  }
201
202  static bool PointerIsMine(void *p) {
203    return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
204  }
205
206  static uptr GetSizeClass(void *p) {
207    return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
208  }
209
210  void *GetBlockBegin(void *p) {
211    uptr class_id = GetSizeClass(p);
212    uptr size = SizeClassMap::Size(class_id);
213    uptr chunk_idx = GetChunkIdx((uptr)p, size);
214    uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
215    uptr begin = reg_beg + chunk_idx * size;
216    RegionInfo *region = GetRegionInfo(class_id);
217    if (region->allocated_user >= (chunk_idx + 1) * size)
218      return reinterpret_cast<void*>(begin);
219    return 0;
220  }
221
222  static uptr GetActuallyAllocatedSize(void *p) {
223    CHECK(PointerIsMine(p));
224    return SizeClassMap::Size(GetSizeClass(p));
225  }
226
227  uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
228
229  void *GetMetaData(void *p) {
230    uptr class_id = GetSizeClass(p);
231    uptr size = SizeClassMap::Size(class_id);
232    uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
233    return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
234                                   (1 + chunk_idx) * kMetadataSize);
235  }
236
237  uptr TotalMemoryUsed() {
238    uptr res = 0;
239    for (uptr i = 0; i < kNumClasses; i++)
240      res += GetRegionInfo(i)->allocated_user;
241    return res;
242  }
243
244  // Test-only.
245  void TestOnlyUnmap() {
246    UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
247  }
248
249  typedef SizeClassMap SizeClassMapT;
250  static const uptr kNumClasses = SizeClassMap::kNumClasses;  // 2^k <= 256
251
252 private:
253  static const uptr kRegionSize = kSpaceSize / kNumClasses;
254  static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
255  COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
256  // kRegionSize must be >= 2^32.
257  COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
258  // Populate the free list with at most this number of bytes at once
259  // or with one element if its size is greater.
260  static const uptr kPopulateSize = 1 << 18;
261  // Call mmap for user memory with this size.
262  static const uptr kUserMapSize = 1 << 22;
263  // Call mmap for metadata memory with this size.
264  static const uptr kMetaMapSize = 1 << 20;
265
266  struct RegionInfo {
267    SpinMutex mutex;
268    AllocatorFreeList free_list;
269    uptr allocated_user;  // Bytes allocated for user memory.
270    uptr allocated_meta;  // Bytes allocated for metadata.
271    uptr mapped_user;  // Bytes mapped for user memory.
272    uptr mapped_meta;  // Bytes mapped for metadata.
273  };
274  COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
275
276  static uptr AdditionalSize() {
277    uptr PageSize = GetPageSizeCached();
278    uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
279    CHECK_EQ(res % PageSize, 0);
280    return res;
281  }
282
283  RegionInfo *GetRegionInfo(uptr class_id) {
284    CHECK_LT(class_id, kNumClasses);
285    RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
286    return &regions[class_id];
287  }
288
289  static uptr GetChunkIdx(uptr chunk, uptr size) {
290    u32 offset = chunk % kRegionSize;
291    // Here we divide by a non-constant. This is costly.
292    // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
293    // We save 2x by using 32-bit div, but may need to use a 256-way switch.
294    return offset / (u32)size;
295  }
296
297  void PopulateFreeList(uptr class_id, RegionInfo *region) {
298    CHECK(region->free_list.empty());
299    uptr size = SizeClassMap::Size(class_id);
300    uptr beg_idx = region->allocated_user;
301    uptr end_idx = beg_idx + kPopulateSize;
302    uptr region_beg = kSpaceBeg + kRegionSize * class_id;
303    if (end_idx + size > region->mapped_user) {
304      // Do the mmap for the user memory.
305      CHECK_GT(region->mapped_user + kUserMapSize, end_idx);
306      MapWithCallback(region_beg + region->mapped_user, kUserMapSize);
307      region->mapped_user += kUserMapSize;
308    }
309    uptr idx = beg_idx;
310    uptr i = 0;
311    do {  // do-while loop because we need to put at least one item.
312      uptr p = region_beg + idx;
313      region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
314      idx += size;
315      i++;
316    } while (idx < end_idx);
317    region->allocated_user += idx - beg_idx;
318    CHECK_LE(region->allocated_user, region->mapped_user);
319    region->allocated_meta += i * kMetadataSize;
320    if (region->allocated_meta > region->mapped_meta) {
321      // Do the mmap for the metadata.
322      CHECK_GT(region->mapped_meta + kMetaMapSize, region->allocated_meta);
323      MapWithCallback(region_beg + kRegionSize -
324                      region->mapped_meta - kMetaMapSize, kMetaMapSize);
325      region->mapped_meta += kMetaMapSize;
326    }
327    if (region->allocated_user + region->allocated_meta > kRegionSize) {
328      Printf("Out of memory. Dying.\n");
329      Printf("The process has exhausted %zuMB for size class %zu.\n",
330          kRegionSize / 1024 / 1024, size);
331      Die();
332    }
333  }
334
335  void *AllocateBySizeClass(uptr class_id) {
336    CHECK_LT(class_id, kNumClasses);
337    RegionInfo *region = GetRegionInfo(class_id);
338    SpinMutexLock l(&region->mutex);
339    if (region->free_list.empty()) {
340      PopulateFreeList(class_id, region);
341    }
342    CHECK(!region->free_list.empty());
343    AllocatorListNode *node = region->free_list.front();
344    region->free_list.pop_front();
345    return reinterpret_cast<void*>(node);
346  }
347
348  void DeallocateBySizeClass(void *p, uptr class_id) {
349    RegionInfo *region = GetRegionInfo(class_id);
350    SpinMutexLock l(&region->mutex);
351    region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
352  }
353};
354
355// SizeClassAllocator32 -- allocator for 32-bit address space.
356// This allocator can theoretically be used on 64-bit arch, but there it is less
357// efficient than SizeClassAllocator64.
358//
359// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
360// be returned by MmapOrDie().
361//
362// Region:
363//   a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
364// Since the regions are aligned by kRegionSize, there are exactly
365// kNumPossibleRegions possible regions in the address space and so we keep
366// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
367// 0 size class means the region is not used by the allocator.
368//
369// One Region is used to allocate chunks of a single size class.
370// A Region looks like this:
371// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
372//
373// In order to avoid false sharing the objects of this class should be
374// chache-line aligned.
375template <const uptr kSpaceBeg, const u64 kSpaceSize,
376          const uptr kMetadataSize, class SizeClassMap,
377          class MapUnmapCallback = NoOpMapUnmapCallback>
378class SizeClassAllocator32 {
379 public:
380  void Init() {
381    state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
382  }
383
384  void *MapWithCallback(uptr size) {
385    size = RoundUpTo(size, GetPageSizeCached());
386    void *res = MmapOrDie(size, "SizeClassAllocator32");
387    MapUnmapCallback().OnMap((uptr)res, size);
388    return res;
389  }
390  void UnmapWithCallback(uptr beg, uptr size) {
391    MapUnmapCallback().OnUnmap(beg, size);
392    UnmapOrDie(reinterpret_cast<void *>(beg), size);
393  }
394
395  bool CanAllocate(uptr size, uptr alignment) {
396    return size <= SizeClassMap::kMaxSize &&
397      alignment <= SizeClassMap::kMaxSize;
398  }
399
400  void *Allocate(uptr size, uptr alignment) {
401    if (size < alignment) size = alignment;
402    CHECK(CanAllocate(size, alignment));
403    return AllocateBySizeClass(ClassID(size));
404  }
405
406  void Deallocate(void *p) {
407    CHECK(PointerIsMine(p));
408    DeallocateBySizeClass(p, GetSizeClass(p));
409  }
410
411  void *GetMetaData(void *p) {
412    CHECK(PointerIsMine(p));
413    uptr mem = reinterpret_cast<uptr>(p);
414    uptr beg = ComputeRegionBeg(mem);
415    uptr size = SizeClassMap::Size(GetSizeClass(p));
416    u32 offset = mem - beg;
417    uptr n = offset / (u32)size;  // 32-bit division
418    uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
419    return reinterpret_cast<void*>(meta);
420  }
421
422  // Allocate several chunks of the given class_id.
423  void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
424    SizeClassInfo *sci = GetSizeClassInfo(class_id);
425    SpinMutexLock l(&sci->mutex);
426    EnsureSizeClassHasAvailableChunks(sci, class_id);
427    CHECK(!sci->free_list.empty());
428    BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list, free_list);
429  }
430
431  // Swallow the entire free_list for the given class_id.
432  void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
433    SizeClassInfo *sci = GetSizeClassInfo(class_id);
434    SpinMutexLock l(&sci->mutex);
435    sci->free_list.append_front(free_list);
436  }
437
438  bool PointerIsMine(void *p) {
439    return
440      state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))] != 0;
441  }
442
443  uptr GetSizeClass(void *p) {
444    return
445      state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))] - 1;
446  }
447
448  void *GetBlockBegin(void *p) {
449    CHECK(PointerIsMine(p));
450    uptr mem = reinterpret_cast<uptr>(p);
451    uptr beg = ComputeRegionBeg(mem);
452    uptr size = SizeClassMap::Size(GetSizeClass(p));
453    u32 offset = mem - beg;
454    u32 n = offset / (u32)size;  // 32-bit division
455    uptr res = beg + (n * (u32)size);
456    return reinterpret_cast<void*>(res);
457  }
458
459  uptr GetActuallyAllocatedSize(void *p) {
460    CHECK(PointerIsMine(p));
461    return SizeClassMap::Size(GetSizeClass(p));
462  }
463
464  uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
465
466  uptr TotalMemoryUsed() {
467    // No need to lock here.
468    uptr res = 0;
469    for (uptr i = 0; i < kNumPossibleRegions; i++)
470      if (state_->possible_regions[i])
471        res += kRegionSize;
472    return res;
473  }
474
475  void TestOnlyUnmap() {
476    for (uptr i = 0; i < kNumPossibleRegions; i++)
477      if (state_->possible_regions[i])
478        UnmapWithCallback((i * kRegionSize), kRegionSize);
479    UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
480  }
481
482  typedef SizeClassMap SizeClassMapT;
483  static const uptr kNumClasses = SizeClassMap::kNumClasses;  // 2^k <= 128
484
485 private:
486  static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
487  static const uptr kRegionSize = 1 << kRegionSizeLog;
488  static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
489  COMPILER_CHECK(kNumClasses <= 128);
490
491  struct SizeClassInfo {
492    SpinMutex mutex;
493    AllocatorFreeList free_list;
494    char padding[kCacheLineSize - sizeof(uptr) - sizeof(AllocatorFreeList)];
495  };
496  COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
497
498  uptr ComputeRegionId(uptr mem) {
499    uptr res = mem >> kRegionSizeLog;
500    CHECK_LT(res, kNumPossibleRegions);
501    return res;
502  }
503
504  uptr ComputeRegionBeg(uptr mem) {
505    return mem & ~(kRegionSize - 1);
506  }
507
508  uptr AllocateRegion(uptr class_id) {
509    CHECK_LT(class_id, kNumClasses);
510    uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
511                                      "SizeClassAllocator32"));
512    MapUnmapCallback().OnMap(res, kRegionSize);
513    CHECK_EQ(0U, (res & (kRegionSize - 1)));
514    CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
515    state_->possible_regions[ComputeRegionId(res)] = class_id + 1;
516    return res;
517  }
518
519  SizeClassInfo *GetSizeClassInfo(uptr class_id) {
520    CHECK_LT(class_id, kNumClasses);
521    return &state_->size_class_info_array[class_id];
522  }
523
524  void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) {
525    if (!sci->free_list.empty()) return;
526    uptr size = SizeClassMap::Size(class_id);
527    uptr reg = AllocateRegion(class_id);
528    uptr n_chunks = kRegionSize / (size + kMetadataSize);
529    for (uptr i = reg; i < reg + n_chunks * size; i += size)
530      sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i));
531  }
532
533  void *AllocateBySizeClass(uptr class_id) {
534    CHECK_LT(class_id, kNumClasses);
535    SizeClassInfo *sci = GetSizeClassInfo(class_id);
536    SpinMutexLock l(&sci->mutex);
537    EnsureSizeClassHasAvailableChunks(sci, class_id);
538    CHECK(!sci->free_list.empty());
539    AllocatorListNode *node = sci->free_list.front();
540    sci->free_list.pop_front();
541    return reinterpret_cast<void*>(node);
542  }
543
544  void DeallocateBySizeClass(void *p, uptr class_id) {
545    CHECK_LT(class_id, kNumClasses);
546    SizeClassInfo *sci = GetSizeClassInfo(class_id);
547    SpinMutexLock l(&sci->mutex);
548    sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
549  }
550
551  struct State {
552    u8 possible_regions[kNumPossibleRegions];
553    SizeClassInfo size_class_info_array[kNumClasses];
554  };
555  State *state_;
556};
557
558// Objects of this type should be used as local caches for SizeClassAllocator64.
559// Since the typical use of this class is to have one object per thread in TLS,
560// is has to be POD.
561template<class SizeClassAllocator>
562struct SizeClassAllocatorLocalCache {
563  typedef SizeClassAllocator Allocator;
564  static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
565  // Don't need to call Init if the object is a global (i.e. zero-initialized).
566  void Init() {
567    internal_memset(this, 0, sizeof(*this));
568  }
569
570  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
571    CHECK_LT(class_id, kNumClasses);
572    AllocatorFreeList *free_list = &free_lists_[class_id];
573    if (free_list->empty())
574      allocator->BulkAllocate(class_id, free_list);
575    CHECK(!free_list->empty());
576    void *res = free_list->front();
577    free_list->pop_front();
578    return res;
579  }
580
581  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
582    CHECK_LT(class_id, kNumClasses);
583    AllocatorFreeList *free_list = &free_lists_[class_id];
584    free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
585    if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
586      DrainHalf(allocator, class_id);
587  }
588
589  void Drain(SizeClassAllocator *allocator) {
590    for (uptr i = 0; i < kNumClasses; i++) {
591      allocator->BulkDeallocate(i, &free_lists_[i]);
592      CHECK(free_lists_[i].empty());
593    }
594  }
595
596  // private:
597  typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
598  AllocatorFreeList free_lists_[kNumClasses];
599
600  void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
601    AllocatorFreeList *free_list = &free_lists_[class_id];
602    AllocatorFreeList half;
603    half.clear();
604    const uptr count = free_list->size() / 2;
605    for (uptr i = 0; i < count; i++) {
606      AllocatorListNode *node = free_list->front();
607      free_list->pop_front();
608      half.push_front(node);
609    }
610    allocator->BulkDeallocate(class_id, &half);
611  }
612};
613
614// This class can (de)allocate only large chunks of memory using mmap/unmap.
615// The main purpose of this allocator is to cover large and rare allocation
616// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
617template <class MapUnmapCallback = NoOpMapUnmapCallback>
618class LargeMmapAllocator {
619 public:
620  void Init() {
621    internal_memset(this, 0, sizeof(*this));
622    page_size_ = GetPageSizeCached();
623  }
624  void *Allocate(uptr size, uptr alignment) {
625    CHECK(IsPowerOfTwo(alignment));
626    uptr map_size = RoundUpMapSize(size);
627    if (alignment > page_size_)
628      map_size += alignment;
629    if (map_size < size) return 0;  // Overflow.
630    uptr map_beg = reinterpret_cast<uptr>(
631        MmapOrDie(map_size, "LargeMmapAllocator"));
632    MapUnmapCallback().OnMap(map_beg, map_size);
633    uptr map_end = map_beg + map_size;
634    uptr res = map_beg + page_size_;
635    if (res & (alignment - 1))  // Align.
636      res += alignment - (res & (alignment - 1));
637    CHECK_EQ(0, res & (alignment - 1));
638    CHECK_LE(res + size, map_end);
639    Header *h = GetHeader(res);
640    h->size = size;
641    h->map_beg = map_beg;
642    h->map_size = map_size;
643    {
644      SpinMutexLock l(&mutex_);
645      h->next = list_;
646      h->prev = 0;
647      if (list_)
648        list_->prev = h;
649      list_ = h;
650    }
651    return reinterpret_cast<void*>(res);
652  }
653
654  void Deallocate(void *p) {
655    Header *h = GetHeader(p);
656    {
657      SpinMutexLock l(&mutex_);
658      Header *prev = h->prev;
659      Header *next = h->next;
660      if (prev)
661        prev->next = next;
662      if (next)
663        next->prev = prev;
664      if (h == list_)
665        list_ = next;
666    }
667    MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
668    UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
669  }
670
671  uptr TotalMemoryUsed() {
672    SpinMutexLock l(&mutex_);
673    uptr res = 0;
674    for (Header *l = list_; l; l = l->next) {
675      res += RoundUpMapSize(l->size);
676    }
677    return res;
678  }
679
680  bool PointerIsMine(void *p) {
681    return GetBlockBegin(p) != 0;
682  }
683
684  uptr GetActuallyAllocatedSize(void *p) {
685    return RoundUpTo(GetHeader(p)->size, page_size_);
686  }
687
688  // At least page_size_/2 metadata bytes is available.
689  void *GetMetaData(void *p) {
690    return GetHeader(p) + 1;
691  }
692
693  void *GetBlockBegin(void *ptr) {
694    uptr p = reinterpret_cast<uptr>(ptr);
695    SpinMutexLock l(&mutex_);
696    for (Header *l = list_; l; l = l->next) {
697      if (p >= l->map_beg && p < l->map_beg + l->map_size)
698        return GetUser(l);
699    }
700    return 0;
701  }
702
703 private:
704  struct Header {
705    uptr map_beg;
706    uptr map_size;
707    uptr size;
708    Header *next;
709    Header *prev;
710  };
711
712  Header *GetHeader(uptr p) {
713    CHECK_EQ(p % page_size_, 0);
714    return reinterpret_cast<Header*>(p - page_size_);
715  }
716  Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
717
718  void *GetUser(Header *h) {
719    CHECK_EQ((uptr)h % page_size_, 0);
720    return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
721  }
722
723  uptr RoundUpMapSize(uptr size) {
724    return RoundUpTo(size, page_size_) + page_size_;
725  }
726
727  uptr page_size_;
728  Header *list_;
729  SpinMutex mutex_;
730};
731
732// This class implements a complete memory allocator by using two
733// internal allocators:
734// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
735//  When allocating 2^x bytes it should return 2^x aligned chunk.
736// PrimaryAllocator is used via a local AllocatorCache.
737// SecondaryAllocator can allocate anything, but is not efficient.
738template <class PrimaryAllocator, class AllocatorCache,
739          class SecondaryAllocator>  // NOLINT
740class CombinedAllocator {
741 public:
742  void Init() {
743    primary_.Init();
744    secondary_.Init();
745  }
746
747  void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
748                 bool cleared = false) {
749    // Returning 0 on malloc(0) may break a lot of code.
750    if (size == 0)
751      size = 1;
752    if (size + alignment < size)
753      return 0;
754    if (alignment > 8)
755      size = RoundUpTo(size, alignment);
756    void *res;
757    if (primary_.CanAllocate(size, alignment))
758      res = cache->Allocate(&primary_, primary_.ClassID(size));
759    else
760      res = secondary_.Allocate(size, alignment);
761    if (alignment > 8)
762      CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
763    if (cleared && res)
764      internal_memset(res, 0, size);
765    return res;
766  }
767
768  void Deallocate(AllocatorCache *cache, void *p) {
769    if (!p) return;
770    if (primary_.PointerIsMine(p))
771      cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
772    else
773      secondary_.Deallocate(p);
774  }
775
776  void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
777                   uptr alignment) {
778    if (!p)
779      return Allocate(cache, new_size, alignment);
780    if (!new_size) {
781      Deallocate(cache, p);
782      return 0;
783    }
784    CHECK(PointerIsMine(p));
785    uptr old_size = GetActuallyAllocatedSize(p);
786    uptr memcpy_size = Min(new_size, old_size);
787    void *new_p = Allocate(cache, new_size, alignment);
788    if (new_p)
789      internal_memcpy(new_p, p, memcpy_size);
790    Deallocate(cache, p);
791    return new_p;
792  }
793
794  bool PointerIsMine(void *p) {
795    if (primary_.PointerIsMine(p))
796      return true;
797    return secondary_.PointerIsMine(p);
798  }
799
800  void *GetMetaData(void *p) {
801    if (primary_.PointerIsMine(p))
802      return primary_.GetMetaData(p);
803    return secondary_.GetMetaData(p);
804  }
805
806  void *GetBlockBegin(void *p) {
807    if (primary_.PointerIsMine(p))
808      return primary_.GetBlockBegin(p);
809    return secondary_.GetBlockBegin(p);
810  }
811
812  uptr GetActuallyAllocatedSize(void *p) {
813    if (primary_.PointerIsMine(p))
814      return primary_.GetActuallyAllocatedSize(p);
815    return secondary_.GetActuallyAllocatedSize(p);
816  }
817
818  uptr TotalMemoryUsed() {
819    return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
820  }
821
822  void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
823
824  void SwallowCache(AllocatorCache *cache) {
825    cache->Drain(&primary_);
826  }
827
828 private:
829  PrimaryAllocator primary_;
830  SecondaryAllocator secondary_;
831};
832
833}  // namespace __sanitizer
834
835#endif  // SANITIZER_ALLOCATOR_H
836
837