sanitizer_allocator_test.cc revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
19258b6bc66e09368ada54001f619d53b4fc976d5ager@chromium.org//===-- sanitizer_allocator_test.cc ---------------------------------------===//
29a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//
39a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//                     The LLVM Compiler Infrastructure
49a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//
59a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com// This file is distributed under the University of Illinois Open Source
69a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com// License. See LICENSE.TXT for details.
79a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//
89a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//===----------------------------------------------------------------------===//
99a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//
109a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
119a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com// Tests for sanitizer_allocator.h.
129a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//
139a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com//===----------------------------------------------------------------------===//
149a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include "sanitizer_common/sanitizer_allocator.h"
159a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include "sanitizer_common/sanitizer_allocator_internal.h"
169a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include "sanitizer_common/sanitizer_common.h"
179a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include "sanitizer_common/sanitizer_flags.h"
189a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com
199a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include "sanitizer_test_utils.h"
209a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include "sanitizer_pthread_wrappers.h"
219a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com
229a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include "gtest/gtest.h"
239a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com
249a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include <stdlib.h>
259a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include <algorithm>
269a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include <vector>
279a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#include <set>
289a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com
299a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com// Too slow for debug build
309a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#if TSAN_DEBUG == 0
319a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com
329a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com#if SANITIZER_WORDSIZE == 64
339a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.comstatic const uptr kAllocatorSpace = 0x700000000000ULL;
349a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.comstatic const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
359a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.comstatic const u64 kAddressSpaceSize = 1ULL << 47;
369a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.com
379a4089a092cad9ff23b6416b92cd5d818dc101d1mads.s.ager@gmail.comtypedef SizeClassAllocator64<
38  kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
39
40typedef SizeClassAllocator64<
41  kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
42#else
43static const u64 kAddressSpaceSize = 1ULL << 32;
44#endif
45
46static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
47static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
48
49typedef SizeClassAllocator32<
50  0, kAddressSpaceSize,
51  /*kMetadataSize*/16,
52  CompactSizeClassMap,
53  kRegionSizeLog,
54  FlatByteMap<kFlatByteMapSize> >
55  Allocator32Compact;
56
57template <class SizeClassMap>
58void TestSizeClassMap() {
59  typedef SizeClassMap SCMap;
60  // SCMap::Print();
61  SCMap::Validate();
62}
63
64TEST(SanitizerCommon, DefaultSizeClassMap) {
65  TestSizeClassMap<DefaultSizeClassMap>();
66}
67
68TEST(SanitizerCommon, CompactSizeClassMap) {
69  TestSizeClassMap<CompactSizeClassMap>();
70}
71
72TEST(SanitizerCommon, InternalSizeClassMap) {
73  TestSizeClassMap<InternalSizeClassMap>();
74}
75
76template <class Allocator>
77void TestSizeClassAllocator() {
78  Allocator *a = new Allocator;
79  a->Init();
80  SizeClassAllocatorLocalCache<Allocator> cache;
81  memset(&cache, 0, sizeof(cache));
82  cache.Init(0);
83
84  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
85    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
86
87  std::vector<void *> allocated;
88
89  uptr last_total_allocated = 0;
90  for (int i = 0; i < 3; i++) {
91    // Allocate a bunch of chunks.
92    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
93      uptr size = sizes[s];
94      if (!a->CanAllocate(size, 1)) continue;
95      // printf("s = %ld\n", size);
96      uptr n_iter = std::max((uptr)6, 8000000 / size);
97      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
98      for (uptr i = 0; i < n_iter; i++) {
99        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
100        char *x = (char*)cache.Allocate(a, class_id0);
101        x[0] = 0;
102        x[size - 1] = 0;
103        x[size / 2] = 0;
104        allocated.push_back(x);
105        CHECK_EQ(x, a->GetBlockBegin(x));
106        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
107        CHECK(a->PointerIsMine(x));
108        CHECK(a->PointerIsMine(x + size - 1));
109        CHECK(a->PointerIsMine(x + size / 2));
110        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
111        uptr class_id = a->GetSizeClass(x);
112        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
113        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
114        metadata[0] = reinterpret_cast<uptr>(x) + 1;
115        metadata[1] = 0xABCD;
116      }
117    }
118    // Deallocate all.
119    for (uptr i = 0; i < allocated.size(); i++) {
120      void *x = allocated[i];
121      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
122      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
123      CHECK_EQ(metadata[1], 0xABCD);
124      cache.Deallocate(a, a->GetSizeClass(x), x);
125    }
126    allocated.clear();
127    uptr total_allocated = a->TotalMemoryUsed();
128    if (last_total_allocated == 0)
129      last_total_allocated = total_allocated;
130    CHECK_EQ(last_total_allocated, total_allocated);
131  }
132
133  // Check that GetBlockBegin never crashes.
134  for (uptr x = 0, step = kAddressSpaceSize / 100000;
135       x < kAddressSpaceSize - step; x += step)
136    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
137      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
138
139  a->TestOnlyUnmap();
140  delete a;
141}
142
143#if SANITIZER_WORDSIZE == 64
144TEST(SanitizerCommon, SizeClassAllocator64) {
145  TestSizeClassAllocator<Allocator64>();
146}
147
148TEST(SanitizerCommon, SizeClassAllocator64Compact) {
149  TestSizeClassAllocator<Allocator64Compact>();
150}
151#endif
152
153TEST(SanitizerCommon, SizeClassAllocator32Compact) {
154  TestSizeClassAllocator<Allocator32Compact>();
155}
156
157template <class Allocator>
158void SizeClassAllocatorMetadataStress() {
159  Allocator *a = new Allocator;
160  a->Init();
161  SizeClassAllocatorLocalCache<Allocator> cache;
162  memset(&cache, 0, sizeof(cache));
163  cache.Init(0);
164
165  const uptr kNumAllocs = 1 << 13;
166  void *allocated[kNumAllocs];
167  void *meta[kNumAllocs];
168  for (uptr i = 0; i < kNumAllocs; i++) {
169    void *x = cache.Allocate(a, 1 + i % 50);
170    allocated[i] = x;
171    meta[i] = a->GetMetaData(x);
172  }
173  // Get Metadata kNumAllocs^2 times.
174  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
175    uptr idx = i % kNumAllocs;
176    void *m = a->GetMetaData(allocated[idx]);
177    EXPECT_EQ(m, meta[idx]);
178  }
179  for (uptr i = 0; i < kNumAllocs; i++) {
180    cache.Deallocate(a, 1 + i % 50, allocated[i]);
181  }
182
183  a->TestOnlyUnmap();
184  delete a;
185}
186
187#if SANITIZER_WORDSIZE == 64
188TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
189  SizeClassAllocatorMetadataStress<Allocator64>();
190}
191
192TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
193  SizeClassAllocatorMetadataStress<Allocator64Compact>();
194}
195#endif  // SANITIZER_WORDSIZE == 64
196TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
197  SizeClassAllocatorMetadataStress<Allocator32Compact>();
198}
199
200template <class Allocator>
201void SizeClassAllocatorGetBlockBeginStress() {
202  Allocator *a = new Allocator;
203  a->Init();
204  SizeClassAllocatorLocalCache<Allocator> cache;
205  memset(&cache, 0, sizeof(cache));
206  cache.Init(0);
207
208  uptr max_size_class = Allocator::kNumClasses - 1;
209  uptr size = Allocator::SizeClassMapT::Size(max_size_class);
210  u64 G8 = 1ULL << 33;
211  // Make sure we correctly compute GetBlockBegin() w/o overflow.
212  for (size_t i = 0; i <= G8 / size; i++) {
213    void *x = cache.Allocate(a, max_size_class);
214    void *beg = a->GetBlockBegin(x);
215    // if ((i & (i - 1)) == 0)
216    //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
217    EXPECT_EQ(x, beg);
218  }
219
220  a->TestOnlyUnmap();
221  delete a;
222}
223
224#if SANITIZER_WORDSIZE == 64
225TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
226  SizeClassAllocatorGetBlockBeginStress<Allocator64>();
227}
228TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
229  SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
230}
231TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
232  SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
233}
234#endif  // SANITIZER_WORDSIZE == 64
235
236struct TestMapUnmapCallback {
237  static int map_count, unmap_count;
238  void OnMap(uptr p, uptr size) const { map_count++; }
239  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
240};
241int TestMapUnmapCallback::map_count;
242int TestMapUnmapCallback::unmap_count;
243
244#if SANITIZER_WORDSIZE == 64
245TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
246  TestMapUnmapCallback::map_count = 0;
247  TestMapUnmapCallback::unmap_count = 0;
248  typedef SizeClassAllocator64<
249      kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
250      TestMapUnmapCallback> Allocator64WithCallBack;
251  Allocator64WithCallBack *a = new Allocator64WithCallBack;
252  a->Init();
253  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
254  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
255  memset(&cache, 0, sizeof(cache));
256  cache.Init(0);
257  AllocatorStats stats;
258  stats.Init();
259  a->AllocateBatch(&stats, &cache, 32);
260  EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
261  a->TestOnlyUnmap();
262  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
263  delete a;
264}
265#endif
266
267TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
268  TestMapUnmapCallback::map_count = 0;
269  TestMapUnmapCallback::unmap_count = 0;
270  typedef SizeClassAllocator32<
271      0, kAddressSpaceSize,
272      /*kMetadataSize*/16,
273      CompactSizeClassMap,
274      kRegionSizeLog,
275      FlatByteMap<kFlatByteMapSize>,
276      TestMapUnmapCallback>
277    Allocator32WithCallBack;
278  Allocator32WithCallBack *a = new Allocator32WithCallBack;
279  a->Init();
280  EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
281  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
282  memset(&cache, 0, sizeof(cache));
283  cache.Init(0);
284  AllocatorStats stats;
285  stats.Init();
286  a->AllocateBatch(&stats, &cache, 32);
287  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
288  a->TestOnlyUnmap();
289  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
290  delete a;
291  // fprintf(stderr, "Map: %d Unmap: %d\n",
292  //         TestMapUnmapCallback::map_count,
293  //         TestMapUnmapCallback::unmap_count);
294}
295
296TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
297  TestMapUnmapCallback::map_count = 0;
298  TestMapUnmapCallback::unmap_count = 0;
299  LargeMmapAllocator<TestMapUnmapCallback> a;
300  a.Init();
301  AllocatorStats stats;
302  stats.Init();
303  void *x = a.Allocate(&stats, 1 << 20, 1);
304  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
305  a.Deallocate(&stats, x);
306  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
307}
308
309template<class Allocator>
310void FailInAssertionOnOOM() {
311  Allocator a;
312  a.Init();
313  SizeClassAllocatorLocalCache<Allocator> cache;
314  memset(&cache, 0, sizeof(cache));
315  cache.Init(0);
316  AllocatorStats stats;
317  stats.Init();
318  for (int i = 0; i < 1000000; i++) {
319    a.AllocateBatch(&stats, &cache, 52);
320  }
321
322  a.TestOnlyUnmap();
323}
324
325#if SANITIZER_WORDSIZE == 64
326TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
327  EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
328}
329#endif
330
331#if !defined(_WIN32)  // FIXME: This currently fails on Windows.
332TEST(SanitizerCommon, LargeMmapAllocator) {
333  LargeMmapAllocator<> a;
334  a.Init();
335  AllocatorStats stats;
336  stats.Init();
337
338  static const int kNumAllocs = 1000;
339  char *allocated[kNumAllocs];
340  static const uptr size = 4000;
341  // Allocate some.
342  for (int i = 0; i < kNumAllocs; i++) {
343    allocated[i] = (char *)a.Allocate(&stats, size, 1);
344    CHECK(a.PointerIsMine(allocated[i]));
345  }
346  // Deallocate all.
347  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
348  for (int i = 0; i < kNumAllocs; i++) {
349    char *p = allocated[i];
350    CHECK(a.PointerIsMine(p));
351    a.Deallocate(&stats, p);
352  }
353  // Check that non left.
354  CHECK_EQ(a.TotalMemoryUsed(), 0);
355
356  // Allocate some more, also add metadata.
357  for (int i = 0; i < kNumAllocs; i++) {
358    char *x = (char *)a.Allocate(&stats, size, 1);
359    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
360    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
361    *meta = i;
362    allocated[i] = x;
363  }
364  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
365    char *p = allocated[i % kNumAllocs];
366    CHECK(a.PointerIsMine(p));
367    CHECK(a.PointerIsMine(p + 2000));
368  }
369  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
370  // Deallocate all in reverse order.
371  for (int i = 0; i < kNumAllocs; i++) {
372    int idx = kNumAllocs - i - 1;
373    char *p = allocated[idx];
374    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
375    CHECK_EQ(*meta, idx);
376    CHECK(a.PointerIsMine(p));
377    a.Deallocate(&stats, p);
378  }
379  CHECK_EQ(a.TotalMemoryUsed(), 0);
380
381  // Test alignments.
382  uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
383  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
384    const uptr kNumAlignedAllocs = 100;
385    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
386      uptr size = ((i % 10) + 1) * 4096;
387      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
388      CHECK_EQ(p, a.GetBlockBegin(p));
389      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
390      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
391      CHECK_EQ(0, (uptr)allocated[i] % alignment);
392      p[0] = p[size - 1] = 0;
393    }
394    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
395      a.Deallocate(&stats, allocated[i]);
396    }
397  }
398
399  // Regression test for boundary condition in GetBlockBegin().
400  uptr page_size = GetPageSizeCached();
401  char *p = (char *)a.Allocate(&stats, page_size, 1);
402  CHECK_EQ(p, a.GetBlockBegin(p));
403  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
404  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
405  a.Deallocate(&stats, p);
406}
407#endif
408
409template
410<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
411void TestCombinedAllocator() {
412  typedef
413      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
414      Allocator;
415  Allocator *a = new Allocator;
416  a->Init();
417
418  AllocatorCache cache;
419  memset(&cache, 0, sizeof(cache));
420  a->InitCache(&cache);
421
422  bool allocator_may_return_null = common_flags()->allocator_may_return_null;
423  common_flags()->allocator_may_return_null = true;
424  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
425  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
426  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
427  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
428  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
429
430  common_flags()->allocator_may_return_null = false;
431  EXPECT_DEATH(a->Allocate(&cache, -1, 1),
432               "allocator is terminating the process");
433  // Restore the original value.
434  common_flags()->allocator_may_return_null = allocator_may_return_null;
435
436  const uptr kNumAllocs = 100000;
437  const uptr kNumIter = 10;
438  for (uptr iter = 0; iter < kNumIter; iter++) {
439    std::vector<void*> allocated;
440    for (uptr i = 0; i < kNumAllocs; i++) {
441      uptr size = (i % (1 << 14)) + 1;
442      if ((i % 1024) == 0)
443        size = 1 << (10 + (i % 14));
444      void *x = a->Allocate(&cache, size, 1);
445      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
446      CHECK_EQ(*meta, 0);
447      *meta = size;
448      allocated.push_back(x);
449    }
450
451    random_shuffle(allocated.begin(), allocated.end());
452
453    for (uptr i = 0; i < kNumAllocs; i++) {
454      void *x = allocated[i];
455      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
456      CHECK_NE(*meta, 0);
457      CHECK(a->PointerIsMine(x));
458      *meta = 0;
459      a->Deallocate(&cache, x);
460    }
461    allocated.clear();
462    a->SwallowCache(&cache);
463  }
464  a->DestroyCache(&cache);
465  a->TestOnlyUnmap();
466}
467
468#if SANITIZER_WORDSIZE == 64
469TEST(SanitizerCommon, CombinedAllocator64) {
470  TestCombinedAllocator<Allocator64,
471      LargeMmapAllocator<>,
472      SizeClassAllocatorLocalCache<Allocator64> > ();
473}
474
475TEST(SanitizerCommon, CombinedAllocator64Compact) {
476  TestCombinedAllocator<Allocator64Compact,
477      LargeMmapAllocator<>,
478      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
479}
480#endif
481
482#if !defined(_WIN32)  // FIXME: This currently fails on Windows.
483TEST(SanitizerCommon, CombinedAllocator32Compact) {
484  TestCombinedAllocator<Allocator32Compact,
485      LargeMmapAllocator<>,
486      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
487}
488#endif
489
490template <class AllocatorCache>
491void TestSizeClassAllocatorLocalCache() {
492  AllocatorCache cache;
493  typedef typename AllocatorCache::Allocator Allocator;
494  Allocator *a = new Allocator();
495
496  a->Init();
497  memset(&cache, 0, sizeof(cache));
498  cache.Init(0);
499
500  const uptr kNumAllocs = 10000;
501  const int kNumIter = 100;
502  uptr saved_total = 0;
503  for (int class_id = 1; class_id <= 5; class_id++) {
504    for (int it = 0; it < kNumIter; it++) {
505      void *allocated[kNumAllocs];
506      for (uptr i = 0; i < kNumAllocs; i++) {
507        allocated[i] = cache.Allocate(a, class_id);
508      }
509      for (uptr i = 0; i < kNumAllocs; i++) {
510        cache.Deallocate(a, class_id, allocated[i]);
511      }
512      cache.Drain(a);
513      uptr total_allocated = a->TotalMemoryUsed();
514      if (it)
515        CHECK_EQ(saved_total, total_allocated);
516      saved_total = total_allocated;
517    }
518  }
519
520  a->TestOnlyUnmap();
521  delete a;
522}
523
524#if SANITIZER_WORDSIZE == 64
525TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
526  TestSizeClassAllocatorLocalCache<
527      SizeClassAllocatorLocalCache<Allocator64> >();
528}
529
530TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
531  TestSizeClassAllocatorLocalCache<
532      SizeClassAllocatorLocalCache<Allocator64Compact> >();
533}
534#endif
535
536TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
537  TestSizeClassAllocatorLocalCache<
538      SizeClassAllocatorLocalCache<Allocator32Compact> >();
539}
540
541#if SANITIZER_WORDSIZE == 64
542typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
543static AllocatorCache static_allocator_cache;
544
545void *AllocatorLeakTestWorker(void *arg) {
546  typedef AllocatorCache::Allocator Allocator;
547  Allocator *a = (Allocator*)(arg);
548  static_allocator_cache.Allocate(a, 10);
549  static_allocator_cache.Drain(a);
550  return 0;
551}
552
553TEST(SanitizerCommon, AllocatorLeakTest) {
554  typedef AllocatorCache::Allocator Allocator;
555  Allocator a;
556  a.Init();
557  uptr total_used_memory = 0;
558  for (int i = 0; i < 100; i++) {
559    pthread_t t;
560    PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
561    PTHREAD_JOIN(t, 0);
562    if (i == 0)
563      total_used_memory = a.TotalMemoryUsed();
564    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
565  }
566
567  a.TestOnlyUnmap();
568}
569
570// Struct which is allocated to pass info to new threads.  The new thread frees
571// it.
572struct NewThreadParams {
573  AllocatorCache *thread_cache;
574  AllocatorCache::Allocator *allocator;
575  uptr class_id;
576};
577
578// Called in a new thread.  Just frees its argument.
579static void *DeallocNewThreadWorker(void *arg) {
580  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
581  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
582  return NULL;
583}
584
585// The allocator cache is supposed to be POD and zero initialized.  We should be
586// able to call Deallocate on a zeroed cache, and it will self-initialize.
587TEST(Allocator, AllocatorCacheDeallocNewThread) {
588  AllocatorCache::Allocator allocator;
589  allocator.Init();
590  AllocatorCache main_cache;
591  AllocatorCache child_cache;
592  memset(&main_cache, 0, sizeof(main_cache));
593  memset(&child_cache, 0, sizeof(child_cache));
594
595  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
596  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
597      main_cache.Allocate(&allocator, class_id));
598  params->thread_cache = &child_cache;
599  params->allocator = &allocator;
600  params->class_id = class_id;
601  pthread_t t;
602  PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
603  PTHREAD_JOIN(t, 0);
604}
605#endif
606
607TEST(Allocator, Basic) {
608  char *p = (char*)InternalAlloc(10);
609  EXPECT_NE(p, (char*)0);
610  char *p2 = (char*)InternalAlloc(20);
611  EXPECT_NE(p2, (char*)0);
612  EXPECT_NE(p2, p);
613  InternalFree(p);
614  InternalFree(p2);
615}
616
617TEST(Allocator, Stress) {
618  const int kCount = 1000;
619  char *ptrs[kCount];
620  unsigned rnd = 42;
621  for (int i = 0; i < kCount; i++) {
622    uptr sz = my_rand_r(&rnd) % 1000;
623    char *p = (char*)InternalAlloc(sz);
624    EXPECT_NE(p, (char*)0);
625    ptrs[i] = p;
626  }
627  for (int i = 0; i < kCount; i++) {
628    InternalFree(ptrs[i]);
629  }
630}
631
632TEST(Allocator, InternalAllocFailure) {
633  EXPECT_DEATH(Ident(InternalAlloc(10 << 20)),
634               "Unexpected mmap in InternalAllocator!");
635}
636
637TEST(Allocator, ScopedBuffer) {
638  const int kSize = 512;
639  {
640    InternalScopedBuffer<int> int_buf(kSize);
641    EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
642  }
643  InternalScopedBuffer<char> char_buf(kSize);
644  EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
645  internal_memset(char_buf.data(), 'c', kSize);
646  for (int i = 0; i < kSize; i++) {
647    EXPECT_EQ('c', char_buf[i]);
648  }
649}
650
651void IterationTestCallback(uptr chunk, void *arg) {
652  reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
653}
654
655template <class Allocator>
656void TestSizeClassAllocatorIteration() {
657  Allocator *a = new Allocator;
658  a->Init();
659  SizeClassAllocatorLocalCache<Allocator> cache;
660  memset(&cache, 0, sizeof(cache));
661  cache.Init(0);
662
663  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
664    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
665
666  std::vector<void *> allocated;
667
668  // Allocate a bunch of chunks.
669  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
670    uptr size = sizes[s];
671    if (!a->CanAllocate(size, 1)) continue;
672    // printf("s = %ld\n", size);
673    uptr n_iter = std::max((uptr)6, 80000 / size);
674    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
675    for (uptr j = 0; j < n_iter; j++) {
676      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
677      void *x = cache.Allocate(a, class_id0);
678      allocated.push_back(x);
679    }
680  }
681
682  std::set<uptr> reported_chunks;
683  a->ForceLock();
684  a->ForEachChunk(IterationTestCallback, &reported_chunks);
685  a->ForceUnlock();
686
687  for (uptr i = 0; i < allocated.size(); i++) {
688    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
689    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
690              reported_chunks.end());
691  }
692
693  a->TestOnlyUnmap();
694  delete a;
695}
696
697#if SANITIZER_WORDSIZE == 64
698TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
699  TestSizeClassAllocatorIteration<Allocator64>();
700}
701#endif
702
703TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
704  TestSizeClassAllocatorIteration<Allocator32Compact>();
705}
706
707TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
708  LargeMmapAllocator<> a;
709  a.Init();
710  AllocatorStats stats;
711  stats.Init();
712
713  static const uptr kNumAllocs = 1000;
714  char *allocated[kNumAllocs];
715  static const uptr size = 40;
716  // Allocate some.
717  for (uptr i = 0; i < kNumAllocs; i++)
718    allocated[i] = (char *)a.Allocate(&stats, size, 1);
719
720  std::set<uptr> reported_chunks;
721  a.ForceLock();
722  a.ForEachChunk(IterationTestCallback, &reported_chunks);
723  a.ForceUnlock();
724
725  for (uptr i = 0; i < kNumAllocs; i++) {
726    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
727    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
728              reported_chunks.end());
729  }
730  for (uptr i = 0; i < kNumAllocs; i++)
731    a.Deallocate(&stats, allocated[i]);
732}
733
734TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
735  LargeMmapAllocator<> a;
736  a.Init();
737  AllocatorStats stats;
738  stats.Init();
739
740  static const uptr kNumAllocs = 1024;
741  static const uptr kNumExpectedFalseLookups = 10000000;
742  char *allocated[kNumAllocs];
743  static const uptr size = 4096;
744  // Allocate some.
745  for (uptr i = 0; i < kNumAllocs; i++) {
746    allocated[i] = (char *)a.Allocate(&stats, size, 1);
747  }
748
749  a.ForceLock();
750  for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
751    // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
752    char *p1 = allocated[i % kNumAllocs];
753    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
754    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
755    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
756    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
757  }
758
759  for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
760    void *p = reinterpret_cast<void *>(i % 1024);
761    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
762    p = reinterpret_cast<void *>(~0L - (i % 1024));
763    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
764  }
765  a.ForceUnlock();
766
767  for (uptr i = 0; i < kNumAllocs; i++)
768    a.Deallocate(&stats, allocated[i]);
769}
770
771
772#if SANITIZER_WORDSIZE == 64
773// Regression test for out-of-memory condition in PopulateFreeList().
774TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
775  // In a world where regions are small and chunks are huge...
776  typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
777  typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
778                               SpecialSizeClassMap> SpecialAllocator64;
779  const uptr kRegionSize =
780      kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
781  SpecialAllocator64 *a = new SpecialAllocator64;
782  a->Init();
783  SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
784  memset(&cache, 0, sizeof(cache));
785  cache.Init(0);
786
787  // ...one man is on a mission to overflow a region with a series of
788  // successive allocations.
789  const uptr kClassID = 107;
790  const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
791  ASSERT_LT(2 * kAllocationSize, kRegionSize);
792  ASSERT_GT(3 * kAllocationSize, kRegionSize);
793  cache.Allocate(a, kClassID);
794  EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
795               "The process has exhausted");
796  a->TestOnlyUnmap();
797  delete a;
798}
799#endif
800
801TEST(SanitizerCommon, TwoLevelByteMap) {
802  const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
803  const u64 n = kSize1 * kSize2;
804  TwoLevelByteMap<kSize1, kSize2> m;
805  m.TestOnlyInit();
806  for (u64 i = 0; i < n; i += 7) {
807    m.set(i, (i % 100) + 1);
808  }
809  for (u64 j = 0; j < n; j++) {
810    if (j % 7)
811      EXPECT_EQ(m[j], 0);
812    else
813      EXPECT_EQ(m[j], (j % 100) + 1);
814  }
815
816  m.TestOnlyUnmap();
817}
818
819
820typedef TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback> TestByteMap;
821
822struct TestByteMapParam {
823  TestByteMap *m;
824  size_t shard;
825  size_t num_shards;
826};
827
828void *TwoLevelByteMapUserThread(void *param) {
829  TestByteMapParam *p = (TestByteMapParam*)param;
830  for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
831    size_t val = (i % 100) + 1;
832    p->m->set(i, val);
833    EXPECT_EQ((*p->m)[i], val);
834  }
835  return 0;
836}
837
838TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
839  TestByteMap m;
840  m.TestOnlyInit();
841  TestMapUnmapCallback::map_count = 0;
842  TestMapUnmapCallback::unmap_count = 0;
843  static const int kNumThreads = 4;
844  pthread_t t[kNumThreads];
845  TestByteMapParam p[kNumThreads];
846  for (int i = 0; i < kNumThreads; i++) {
847    p[i].m = &m;
848    p[i].shard = i;
849    p[i].num_shards = kNumThreads;
850    PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
851  }
852  for (int i = 0; i < kNumThreads; i++) {
853    PTHREAD_JOIN(t[i], 0);
854  }
855  EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
856  EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
857  m.TestOnlyUnmap();
858  EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
859  EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
860}
861
862#endif  // #if TSAN_DEBUG==0
863