sanitizer_allocator_test.cc revision a1560bd5d12ddf245ca44fa6a259e9b5c5f81933
1//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Tests for sanitizer_allocator.h.
12//
13//===----------------------------------------------------------------------===//
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_common.h"
16
17#include "sanitizer_test_utils.h"
18
19#include "gtest/gtest.h"
20
21#include <stdlib.h>
22#include <pthread.h>
23#include <algorithm>
24#include <vector>
25#include <set>
26
27// Too slow for debug build
28#if TSAN_DEBUG == 0
29
30#if SANITIZER_WORDSIZE == 64
31static const uptr kAllocatorSpace = 0x700000000000ULL;
32static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
33static const u64 kAddressSpaceSize = 1ULL << 47;
34
35typedef SizeClassAllocator64<
36  kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
37
38typedef SizeClassAllocator64<
39  kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
40#else
41static const u64 kAddressSpaceSize = 1ULL << 32;
42#endif
43
44typedef SizeClassAllocator32<
45  0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
46
47template <class SizeClassMap>
48void TestSizeClassMap() {
49  typedef SizeClassMap SCMap;
50  // SCMap::Print();
51  SCMap::Validate();
52}
53
54TEST(SanitizerCommon, DefaultSizeClassMap) {
55  TestSizeClassMap<DefaultSizeClassMap>();
56}
57
58TEST(SanitizerCommon, CompactSizeClassMap) {
59  TestSizeClassMap<CompactSizeClassMap>();
60}
61
62template <class Allocator>
63void TestSizeClassAllocator() {
64  Allocator *a = new Allocator;
65  a->Init();
66  SizeClassAllocatorLocalCache<Allocator> cache;
67  memset(&cache, 0, sizeof(cache));
68  cache.Init(0);
69
70  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
71    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
72
73  std::vector<void *> allocated;
74
75  uptr last_total_allocated = 0;
76  for (int i = 0; i < 3; i++) {
77    // Allocate a bunch of chunks.
78    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
79      uptr size = sizes[s];
80      if (!a->CanAllocate(size, 1)) continue;
81      // printf("s = %ld\n", size);
82      uptr n_iter = std::max((uptr)6, 8000000 / size);
83      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
84      for (uptr i = 0; i < n_iter; i++) {
85        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
86        char *x = (char*)cache.Allocate(a, class_id0);
87        x[0] = 0;
88        x[size - 1] = 0;
89        x[size / 2] = 0;
90        allocated.push_back(x);
91        CHECK_EQ(x, a->GetBlockBegin(x));
92        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
93        CHECK(a->PointerIsMine(x));
94        CHECK(a->PointerIsMine(x + size - 1));
95        CHECK(a->PointerIsMine(x + size / 2));
96        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
97        uptr class_id = a->GetSizeClass(x);
98        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
99        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
100        metadata[0] = reinterpret_cast<uptr>(x) + 1;
101        metadata[1] = 0xABCD;
102      }
103    }
104    // Deallocate all.
105    for (uptr i = 0; i < allocated.size(); i++) {
106      void *x = allocated[i];
107      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
108      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
109      CHECK_EQ(metadata[1], 0xABCD);
110      cache.Deallocate(a, a->GetSizeClass(x), x);
111    }
112    allocated.clear();
113    uptr total_allocated = a->TotalMemoryUsed();
114    if (last_total_allocated == 0)
115      last_total_allocated = total_allocated;
116    CHECK_EQ(last_total_allocated, total_allocated);
117  }
118
119  // Check that GetBlockBegin never crashes.
120  for (uptr x = 0, step = kAddressSpaceSize / 100000;
121       x < kAddressSpaceSize - step; x += step)
122    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
123      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
124
125  a->TestOnlyUnmap();
126  delete a;
127}
128
129#if SANITIZER_WORDSIZE == 64
130TEST(SanitizerCommon, SizeClassAllocator64) {
131  TestSizeClassAllocator<Allocator64>();
132}
133
134TEST(SanitizerCommon, SizeClassAllocator64Compact) {
135  TestSizeClassAllocator<Allocator64Compact>();
136}
137#endif
138
139TEST(SanitizerCommon, SizeClassAllocator32Compact) {
140  TestSizeClassAllocator<Allocator32Compact>();
141}
142
143template <class Allocator>
144void SizeClassAllocatorMetadataStress() {
145  Allocator *a = new Allocator;
146  a->Init();
147  SizeClassAllocatorLocalCache<Allocator> cache;
148  memset(&cache, 0, sizeof(cache));
149  cache.Init(0);
150  static volatile void *sink;
151
152  const uptr kNumAllocs = 10000;
153  void *allocated[kNumAllocs];
154  for (uptr i = 0; i < kNumAllocs; i++) {
155    void *x = cache.Allocate(a, 1 + i % 50);
156    allocated[i] = x;
157  }
158  // Get Metadata kNumAllocs^2 times.
159  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
160    sink = a->GetMetaData(allocated[i % kNumAllocs]);
161  }
162  for (uptr i = 0; i < kNumAllocs; i++) {
163    cache.Deallocate(a, 1 + i % 50, allocated[i]);
164  }
165
166  a->TestOnlyUnmap();
167  (void)sink;
168  delete a;
169}
170
171#if SANITIZER_WORDSIZE == 64
172TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
173  SizeClassAllocatorMetadataStress<Allocator64>();
174}
175
176TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
177  SizeClassAllocatorMetadataStress<Allocator64Compact>();
178}
179#endif  // SANITIZER_WORDSIZE == 64
180TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
181  SizeClassAllocatorMetadataStress<Allocator32Compact>();
182}
183
184template <class Allocator>
185void SizeClassAllocatorGetBlockBeginStress() {
186  Allocator *a = new Allocator;
187  a->Init();
188  SizeClassAllocatorLocalCache<Allocator> cache;
189  memset(&cache, 0, sizeof(cache));
190  cache.Init(0);
191
192  uptr max_size_class = Allocator::kNumClasses - 1;
193  uptr size = Allocator::SizeClassMapT::Size(max_size_class);
194  u64 G8 = 1ULL << 33;
195  for (size_t i = 0; i <= G8 / size; i++) {
196    void *x = cache.Allocate(a, max_size_class);
197    void *beg = a->GetBlockBegin(x);
198    if ((i & (i - 1)) == 0)
199      fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
200    EXPECT_EQ(x, beg);
201  }
202
203  a->TestOnlyUnmap();
204  delete a;
205}
206
207#if SANITIZER_WORDSIZE == 64
208TEST(SanitizerCommon, DISABLED_SizeClassAllocator64GetBlockBegin) {
209  SizeClassAllocatorGetBlockBeginStress<Allocator64>();
210}
211#endif  // SANITIZER_WORDSIZE == 64
212
213struct TestMapUnmapCallback {
214  static int map_count, unmap_count;
215  void OnMap(uptr p, uptr size) const { map_count++; }
216  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
217};
218int TestMapUnmapCallback::map_count;
219int TestMapUnmapCallback::unmap_count;
220
221#if SANITIZER_WORDSIZE == 64
222TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
223  TestMapUnmapCallback::map_count = 0;
224  TestMapUnmapCallback::unmap_count = 0;
225  typedef SizeClassAllocator64<
226      kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
227      TestMapUnmapCallback> Allocator64WithCallBack;
228  Allocator64WithCallBack *a = new Allocator64WithCallBack;
229  a->Init();
230  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
231  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
232  memset(&cache, 0, sizeof(cache));
233  cache.Init(0);
234  AllocatorStats stats;
235  stats.Init();
236  a->AllocateBatch(&stats, &cache, 32);
237  EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
238  a->TestOnlyUnmap();
239  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
240  delete a;
241}
242#endif
243
244TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
245  TestMapUnmapCallback::map_count = 0;
246  TestMapUnmapCallback::unmap_count = 0;
247  typedef SizeClassAllocator32<
248      0, kAddressSpaceSize, 16, CompactSizeClassMap,
249      TestMapUnmapCallback> Allocator32WithCallBack;
250  Allocator32WithCallBack *a = new Allocator32WithCallBack;
251  a->Init();
252  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
253  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
254  memset(&cache, 0, sizeof(cache));
255  cache.Init(0);
256  AllocatorStats stats;
257  stats.Init();
258  a->AllocateBatch(&stats, &cache, 32);
259  EXPECT_EQ(TestMapUnmapCallback::map_count, 2);  // alloc.
260  a->TestOnlyUnmap();
261  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2);  // The whole thing + alloc.
262  delete a;
263  // fprintf(stderr, "Map: %d Unmap: %d\n",
264  //         TestMapUnmapCallback::map_count,
265  //         TestMapUnmapCallback::unmap_count);
266}
267
268TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
269  TestMapUnmapCallback::map_count = 0;
270  TestMapUnmapCallback::unmap_count = 0;
271  LargeMmapAllocator<TestMapUnmapCallback> a;
272  a.Init();
273  AllocatorStats stats;
274  stats.Init();
275  void *x = a.Allocate(&stats, 1 << 20, 1);
276  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
277  a.Deallocate(&stats, x);
278  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
279}
280
281template<class Allocator>
282void FailInAssertionOnOOM() {
283  Allocator a;
284  a.Init();
285  SizeClassAllocatorLocalCache<Allocator> cache;
286  memset(&cache, 0, sizeof(cache));
287  cache.Init(0);
288  AllocatorStats stats;
289  stats.Init();
290  for (int i = 0; i < 1000000; i++) {
291    a.AllocateBatch(&stats, &cache, 52);
292  }
293
294  a.TestOnlyUnmap();
295}
296
297#if SANITIZER_WORDSIZE == 64
298TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
299  EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
300}
301#endif
302
303TEST(SanitizerCommon, LargeMmapAllocator) {
304  LargeMmapAllocator<> a;
305  a.Init();
306  AllocatorStats stats;
307  stats.Init();
308
309  static const int kNumAllocs = 1000;
310  char *allocated[kNumAllocs];
311  static const uptr size = 4000;
312  // Allocate some.
313  for (int i = 0; i < kNumAllocs; i++) {
314    allocated[i] = (char *)a.Allocate(&stats, size, 1);
315    CHECK(a.PointerIsMine(allocated[i]));
316  }
317  // Deallocate all.
318  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
319  for (int i = 0; i < kNumAllocs; i++) {
320    char *p = allocated[i];
321    CHECK(a.PointerIsMine(p));
322    a.Deallocate(&stats, p);
323  }
324  // Check that non left.
325  CHECK_EQ(a.TotalMemoryUsed(), 0);
326
327  // Allocate some more, also add metadata.
328  for (int i = 0; i < kNumAllocs; i++) {
329    char *x = (char *)a.Allocate(&stats, size, 1);
330    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
331    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
332    *meta = i;
333    allocated[i] = x;
334  }
335  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
336    char *p = allocated[i % kNumAllocs];
337    CHECK(a.PointerIsMine(p));
338    CHECK(a.PointerIsMine(p + 2000));
339  }
340  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
341  // Deallocate all in reverse order.
342  for (int i = 0; i < kNumAllocs; i++) {
343    int idx = kNumAllocs - i - 1;
344    char *p = allocated[idx];
345    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
346    CHECK_EQ(*meta, idx);
347    CHECK(a.PointerIsMine(p));
348    a.Deallocate(&stats, p);
349  }
350  CHECK_EQ(a.TotalMemoryUsed(), 0);
351
352  // Test alignments.
353  uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
354  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
355    const uptr kNumAlignedAllocs = 100;
356    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
357      uptr size = ((i % 10) + 1) * 4096;
358      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
359      CHECK_EQ(p, a.GetBlockBegin(p));
360      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
361      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
362      CHECK_EQ(0, (uptr)allocated[i] % alignment);
363      p[0] = p[size - 1] = 0;
364    }
365    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
366      a.Deallocate(&stats, allocated[i]);
367    }
368  }
369
370  // Regression test for boundary condition in GetBlockBegin().
371  uptr page_size = GetPageSizeCached();
372  char *p = (char *)a.Allocate(&stats, page_size, 1);
373  CHECK_EQ(p, a.GetBlockBegin(p));
374  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
375  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
376  a.Deallocate(&stats, p);
377}
378
379template
380<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
381void TestCombinedAllocator() {
382  typedef
383      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
384      Allocator;
385  Allocator *a = new Allocator;
386  a->Init();
387
388  AllocatorCache cache;
389  memset(&cache, 0, sizeof(cache));
390  a->InitCache(&cache);
391
392  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
393  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
394  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
395  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
396  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
397
398  const uptr kNumAllocs = 100000;
399  const uptr kNumIter = 10;
400  for (uptr iter = 0; iter < kNumIter; iter++) {
401    std::vector<void*> allocated;
402    for (uptr i = 0; i < kNumAllocs; i++) {
403      uptr size = (i % (1 << 14)) + 1;
404      if ((i % 1024) == 0)
405        size = 1 << (10 + (i % 14));
406      void *x = a->Allocate(&cache, size, 1);
407      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
408      CHECK_EQ(*meta, 0);
409      *meta = size;
410      allocated.push_back(x);
411    }
412
413    random_shuffle(allocated.begin(), allocated.end());
414
415    for (uptr i = 0; i < kNumAllocs; i++) {
416      void *x = allocated[i];
417      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
418      CHECK_NE(*meta, 0);
419      CHECK(a->PointerIsMine(x));
420      *meta = 0;
421      a->Deallocate(&cache, x);
422    }
423    allocated.clear();
424    a->SwallowCache(&cache);
425  }
426  a->DestroyCache(&cache);
427  a->TestOnlyUnmap();
428}
429
430#if SANITIZER_WORDSIZE == 64
431TEST(SanitizerCommon, CombinedAllocator64) {
432  TestCombinedAllocator<Allocator64,
433      LargeMmapAllocator<>,
434      SizeClassAllocatorLocalCache<Allocator64> > ();
435}
436
437TEST(SanitizerCommon, CombinedAllocator64Compact) {
438  TestCombinedAllocator<Allocator64Compact,
439      LargeMmapAllocator<>,
440      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
441}
442#endif
443
444TEST(SanitizerCommon, CombinedAllocator32Compact) {
445  TestCombinedAllocator<Allocator32Compact,
446      LargeMmapAllocator<>,
447      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
448}
449
450template <class AllocatorCache>
451void TestSizeClassAllocatorLocalCache() {
452  AllocatorCache cache;
453  typedef typename AllocatorCache::Allocator Allocator;
454  Allocator *a = new Allocator();
455
456  a->Init();
457  memset(&cache, 0, sizeof(cache));
458  cache.Init(0);
459
460  const uptr kNumAllocs = 10000;
461  const int kNumIter = 100;
462  uptr saved_total = 0;
463  for (int class_id = 1; class_id <= 5; class_id++) {
464    for (int it = 0; it < kNumIter; it++) {
465      void *allocated[kNumAllocs];
466      for (uptr i = 0; i < kNumAllocs; i++) {
467        allocated[i] = cache.Allocate(a, class_id);
468      }
469      for (uptr i = 0; i < kNumAllocs; i++) {
470        cache.Deallocate(a, class_id, allocated[i]);
471      }
472      cache.Drain(a);
473      uptr total_allocated = a->TotalMemoryUsed();
474      if (it)
475        CHECK_EQ(saved_total, total_allocated);
476      saved_total = total_allocated;
477    }
478  }
479
480  a->TestOnlyUnmap();
481  delete a;
482}
483
484#if SANITIZER_WORDSIZE == 64
485TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
486  TestSizeClassAllocatorLocalCache<
487      SizeClassAllocatorLocalCache<Allocator64> >();
488}
489
490TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
491  TestSizeClassAllocatorLocalCache<
492      SizeClassAllocatorLocalCache<Allocator64Compact> >();
493}
494#endif
495
496TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
497  TestSizeClassAllocatorLocalCache<
498      SizeClassAllocatorLocalCache<Allocator32Compact> >();
499}
500
501#if SANITIZER_WORDSIZE == 64
502typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
503static AllocatorCache static_allocator_cache;
504
505void *AllocatorLeakTestWorker(void *arg) {
506  typedef AllocatorCache::Allocator Allocator;
507  Allocator *a = (Allocator*)(arg);
508  static_allocator_cache.Allocate(a, 10);
509  static_allocator_cache.Drain(a);
510  return 0;
511}
512
513TEST(SanitizerCommon, AllocatorLeakTest) {
514  typedef AllocatorCache::Allocator Allocator;
515  Allocator a;
516  a.Init();
517  uptr total_used_memory = 0;
518  for (int i = 0; i < 100; i++) {
519    pthread_t t;
520    EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
521    EXPECT_EQ(0, pthread_join(t, 0));
522    if (i == 0)
523      total_used_memory = a.TotalMemoryUsed();
524    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
525  }
526
527  a.TestOnlyUnmap();
528}
529
530// Struct which is allocated to pass info to new threads.  The new thread frees
531// it.
532struct NewThreadParams {
533  AllocatorCache *thread_cache;
534  AllocatorCache::Allocator *allocator;
535  uptr class_id;
536};
537
538// Called in a new thread.  Just frees its argument.
539static void *DeallocNewThreadWorker(void *arg) {
540  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
541  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
542  return NULL;
543}
544
545// The allocator cache is supposed to be POD and zero initialized.  We should be
546// able to call Deallocate on a zeroed cache, and it will self-initialize.
547TEST(Allocator, AllocatorCacheDeallocNewThread) {
548  AllocatorCache::Allocator allocator;
549  allocator.Init();
550  AllocatorCache main_cache;
551  AllocatorCache child_cache;
552  memset(&main_cache, 0, sizeof(main_cache));
553  memset(&child_cache, 0, sizeof(child_cache));
554
555  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
556  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
557      main_cache.Allocate(&allocator, class_id));
558  params->thread_cache = &child_cache;
559  params->allocator = &allocator;
560  params->class_id = class_id;
561  pthread_t t;
562  EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
563  EXPECT_EQ(0, pthread_join(t, 0));
564}
565#endif
566
567TEST(Allocator, Basic) {
568  char *p = (char*)InternalAlloc(10);
569  EXPECT_NE(p, (char*)0);
570  char *p2 = (char*)InternalAlloc(20);
571  EXPECT_NE(p2, (char*)0);
572  EXPECT_NE(p2, p);
573  InternalFree(p);
574  InternalFree(p2);
575}
576
577TEST(Allocator, Stress) {
578  const int kCount = 1000;
579  char *ptrs[kCount];
580  unsigned rnd = 42;
581  for (int i = 0; i < kCount; i++) {
582    uptr sz = my_rand_r(&rnd) % 1000;
583    char *p = (char*)InternalAlloc(sz);
584    EXPECT_NE(p, (char*)0);
585    ptrs[i] = p;
586  }
587  for (int i = 0; i < kCount; i++) {
588    InternalFree(ptrs[i]);
589  }
590}
591
592TEST(Allocator, ScopedBuffer) {
593  const int kSize = 512;
594  {
595    InternalScopedBuffer<int> int_buf(kSize);
596    EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
597  }
598  InternalScopedBuffer<char> char_buf(kSize);
599  EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
600  internal_memset(char_buf.data(), 'c', kSize);
601  for (int i = 0; i < kSize; i++) {
602    EXPECT_EQ('c', char_buf[i]);
603  }
604}
605
606class IterationTestCallback {
607 public:
608  explicit IterationTestCallback(std::set<void *> *chunks)
609    : chunks_(chunks) {}
610  void operator()(void *chunk) const {
611    chunks_->insert(chunk);
612  }
613 private:
614  std::set<void *> *chunks_;
615};
616
617template <class Allocator>
618void TestSizeClassAllocatorIteration() {
619  Allocator *a = new Allocator;
620  a->Init();
621  SizeClassAllocatorLocalCache<Allocator> cache;
622  memset(&cache, 0, sizeof(cache));
623  cache.Init(0);
624
625  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
626    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
627
628  std::vector<void *> allocated;
629
630  // Allocate a bunch of chunks.
631  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
632    uptr size = sizes[s];
633    if (!a->CanAllocate(size, 1)) continue;
634    // printf("s = %ld\n", size);
635    uptr n_iter = std::max((uptr)6, 80000 / size);
636    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
637    for (uptr j = 0; j < n_iter; j++) {
638      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
639      void *x = cache.Allocate(a, class_id0);
640      allocated.push_back(x);
641    }
642  }
643
644  std::set<void *> reported_chunks;
645  IterationTestCallback callback(&reported_chunks);
646  a->ForceLock();
647  a->ForEachChunk(callback);
648  a->ForceUnlock();
649
650  for (uptr i = 0; i < allocated.size(); i++) {
651    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
652    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
653  }
654
655  a->TestOnlyUnmap();
656  delete a;
657}
658
659#if SANITIZER_WORDSIZE == 64
660TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
661  TestSizeClassAllocatorIteration<Allocator64>();
662}
663#endif
664
665TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
666  TestSizeClassAllocatorIteration<Allocator32Compact>();
667}
668
669TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
670  LargeMmapAllocator<> a;
671  a.Init();
672  AllocatorStats stats;
673  stats.Init();
674
675  static const uptr kNumAllocs = 1000;
676  char *allocated[kNumAllocs];
677  static const uptr size = 40;
678  // Allocate some.
679  for (uptr i = 0; i < kNumAllocs; i++) {
680    allocated[i] = (char *)a.Allocate(&stats, size, 1);
681  }
682
683  std::set<void *> reported_chunks;
684  IterationTestCallback callback(&reported_chunks);
685  a.ForceLock();
686  a.ForEachChunk(callback);
687  a.ForceUnlock();
688
689  for (uptr i = 0; i < kNumAllocs; i++) {
690    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
691    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
692  }
693}
694
695#endif  // #if TSAN_DEBUG==0
696