sanitizer_allocator_test.cc revision 871b7fd4fdde47cbf36c2b5c19d679e87dd11df6
1//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Tests for sanitizer_allocator.h.
12//
13//===----------------------------------------------------------------------===//
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_common.h"
16
17#include "sanitizer_test_utils.h"
18
19#include "gtest/gtest.h"
20
21#include <stdlib.h>
22#include <pthread.h>
23#include <algorithm>
24#include <vector>
25#include <set>
26
27// Too slow for debug build
28#if TSAN_DEBUG == 0
29
30#if SANITIZER_WORDSIZE == 64
31static const uptr kAllocatorSpace = 0x700000000000ULL;
32static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
33static const u64 kAddressSpaceSize = 1ULL << 47;
34
35typedef SizeClassAllocator64<
36  kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
37
38typedef SizeClassAllocator64<
39  kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
40#else
41static const u64 kAddressSpaceSize = 1ULL << 32;
42#endif
43
44typedef SizeClassAllocator32<
45  0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
46
47template <class SizeClassMap>
48void TestSizeClassMap() {
49  typedef SizeClassMap SCMap;
50  // SCMap::Print();
51  SCMap::Validate();
52}
53
54TEST(SanitizerCommon, DefaultSizeClassMap) {
55  TestSizeClassMap<DefaultSizeClassMap>();
56}
57
58TEST(SanitizerCommon, CompactSizeClassMap) {
59  TestSizeClassMap<CompactSizeClassMap>();
60}
61
62template <class Allocator>
63void TestSizeClassAllocator() {
64  Allocator *a = new Allocator;
65  a->Init();
66  SizeClassAllocatorLocalCache<Allocator> cache;
67  memset(&cache, 0, sizeof(cache));
68  cache.Init(0);
69
70  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
71    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
72
73  std::vector<void *> allocated;
74
75  uptr last_total_allocated = 0;
76  for (int i = 0; i < 3; i++) {
77    // Allocate a bunch of chunks.
78    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
79      uptr size = sizes[s];
80      if (!a->CanAllocate(size, 1)) continue;
81      // printf("s = %ld\n", size);
82      uptr n_iter = std::max((uptr)6, 8000000 / size);
83      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
84      for (uptr i = 0; i < n_iter; i++) {
85        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
86        char *x = (char*)cache.Allocate(a, class_id0);
87        x[0] = 0;
88        x[size - 1] = 0;
89        x[size / 2] = 0;
90        allocated.push_back(x);
91        CHECK_EQ(x, a->GetBlockBegin(x));
92        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
93        CHECK(a->PointerIsMine(x));
94        CHECK(a->PointerIsMine(x + size - 1));
95        CHECK(a->PointerIsMine(x + size / 2));
96        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
97        uptr class_id = a->GetSizeClass(x);
98        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
99        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
100        metadata[0] = reinterpret_cast<uptr>(x) + 1;
101        metadata[1] = 0xABCD;
102      }
103    }
104    // Deallocate all.
105    for (uptr i = 0; i < allocated.size(); i++) {
106      void *x = allocated[i];
107      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
108      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
109      CHECK_EQ(metadata[1], 0xABCD);
110      cache.Deallocate(a, a->GetSizeClass(x), x);
111    }
112    allocated.clear();
113    uptr total_allocated = a->TotalMemoryUsed();
114    if (last_total_allocated == 0)
115      last_total_allocated = total_allocated;
116    CHECK_EQ(last_total_allocated, total_allocated);
117  }
118
119  // Check that GetBlockBegin never crashes.
120  for (uptr x = 0, step = kAddressSpaceSize / 100000;
121       x < kAddressSpaceSize - step; x += step)
122    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
123      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
124
125  a->TestOnlyUnmap();
126  delete a;
127}
128
129#if SANITIZER_WORDSIZE == 64
130TEST(SanitizerCommon, SizeClassAllocator64) {
131  TestSizeClassAllocator<Allocator64>();
132}
133
134TEST(SanitizerCommon, SizeClassAllocator64Compact) {
135  TestSizeClassAllocator<Allocator64Compact>();
136}
137#endif
138
139TEST(SanitizerCommon, SizeClassAllocator32Compact) {
140  TestSizeClassAllocator<Allocator32Compact>();
141}
142
143template <class Allocator>
144void SizeClassAllocatorMetadataStress() {
145  Allocator *a = new Allocator;
146  a->Init();
147  SizeClassAllocatorLocalCache<Allocator> cache;
148  memset(&cache, 0, sizeof(cache));
149  cache.Init(0);
150
151  const uptr kNumAllocs = 1 << 13;
152  void *allocated[kNumAllocs];
153  void *meta[kNumAllocs];
154  for (uptr i = 0; i < kNumAllocs; i++) {
155    void *x = cache.Allocate(a, 1 + i % 50);
156    allocated[i] = x;
157    meta[i] = a->GetMetaData(x);
158  }
159  // Get Metadata kNumAllocs^2 times.
160  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
161    uptr idx = i % kNumAllocs;
162    void *m = a->GetMetaData(allocated[idx]);
163    EXPECT_EQ(m, meta[idx]);
164  }
165  for (uptr i = 0; i < kNumAllocs; i++) {
166    cache.Deallocate(a, 1 + i % 50, allocated[i]);
167  }
168
169  a->TestOnlyUnmap();
170  delete a;
171}
172
173#if SANITIZER_WORDSIZE == 64
174TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
175  SizeClassAllocatorMetadataStress<Allocator64>();
176}
177
178TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
179  SizeClassAllocatorMetadataStress<Allocator64Compact>();
180}
181#endif  // SANITIZER_WORDSIZE == 64
182TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
183  SizeClassAllocatorMetadataStress<Allocator32Compact>();
184}
185
186template <class Allocator>
187void SizeClassAllocatorGetBlockBeginStress() {
188  Allocator *a = new Allocator;
189  a->Init();
190  SizeClassAllocatorLocalCache<Allocator> cache;
191  memset(&cache, 0, sizeof(cache));
192  cache.Init(0);
193
194  uptr max_size_class = Allocator::kNumClasses - 1;
195  uptr size = Allocator::SizeClassMapT::Size(max_size_class);
196  u64 G8 = 1ULL << 33;
197  // Make sure we correctly compute GetBlockBegin() w/o overflow.
198  for (size_t i = 0; i <= G8 / size; i++) {
199    void *x = cache.Allocate(a, max_size_class);
200    void *beg = a->GetBlockBegin(x);
201    if ((i & (i - 1)) == 0)
202      fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
203    EXPECT_EQ(x, beg);
204  }
205
206  a->TestOnlyUnmap();
207  delete a;
208}
209
210#if SANITIZER_WORDSIZE == 64
211TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
212  SizeClassAllocatorGetBlockBeginStress<Allocator64>();
213}
214TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
215  SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
216}
217TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
218  SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
219}
220#endif  // SANITIZER_WORDSIZE == 64
221
222struct TestMapUnmapCallback {
223  static int map_count, unmap_count;
224  void OnMap(uptr p, uptr size) const { map_count++; }
225  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
226};
227int TestMapUnmapCallback::map_count;
228int TestMapUnmapCallback::unmap_count;
229
230#if SANITIZER_WORDSIZE == 64
231TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
232  TestMapUnmapCallback::map_count = 0;
233  TestMapUnmapCallback::unmap_count = 0;
234  typedef SizeClassAllocator64<
235      kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
236      TestMapUnmapCallback> Allocator64WithCallBack;
237  Allocator64WithCallBack *a = new Allocator64WithCallBack;
238  a->Init();
239  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
240  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
241  memset(&cache, 0, sizeof(cache));
242  cache.Init(0);
243  AllocatorStats stats;
244  stats.Init();
245  a->AllocateBatch(&stats, &cache, 32);
246  EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
247  a->TestOnlyUnmap();
248  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
249  delete a;
250}
251#endif
252
253TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
254  TestMapUnmapCallback::map_count = 0;
255  TestMapUnmapCallback::unmap_count = 0;
256  typedef SizeClassAllocator32<
257      0, kAddressSpaceSize, 16, CompactSizeClassMap,
258      TestMapUnmapCallback> Allocator32WithCallBack;
259  Allocator32WithCallBack *a = new Allocator32WithCallBack;
260  a->Init();
261  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
262  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
263  memset(&cache, 0, sizeof(cache));
264  cache.Init(0);
265  AllocatorStats stats;
266  stats.Init();
267  a->AllocateBatch(&stats, &cache, 32);
268  EXPECT_EQ(TestMapUnmapCallback::map_count, 2);  // alloc.
269  a->TestOnlyUnmap();
270  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2);  // The whole thing + alloc.
271  delete a;
272  // fprintf(stderr, "Map: %d Unmap: %d\n",
273  //         TestMapUnmapCallback::map_count,
274  //         TestMapUnmapCallback::unmap_count);
275}
276
277TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
278  TestMapUnmapCallback::map_count = 0;
279  TestMapUnmapCallback::unmap_count = 0;
280  LargeMmapAllocator<TestMapUnmapCallback> a;
281  a.Init();
282  AllocatorStats stats;
283  stats.Init();
284  void *x = a.Allocate(&stats, 1 << 20, 1);
285  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
286  a.Deallocate(&stats, x);
287  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
288}
289
290template<class Allocator>
291void FailInAssertionOnOOM() {
292  Allocator a;
293  a.Init();
294  SizeClassAllocatorLocalCache<Allocator> cache;
295  memset(&cache, 0, sizeof(cache));
296  cache.Init(0);
297  AllocatorStats stats;
298  stats.Init();
299  for (int i = 0; i < 1000000; i++) {
300    a.AllocateBatch(&stats, &cache, 52);
301  }
302
303  a.TestOnlyUnmap();
304}
305
306#if SANITIZER_WORDSIZE == 64
307TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
308  EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
309}
310#endif
311
312TEST(SanitizerCommon, LargeMmapAllocator) {
313  LargeMmapAllocator<> a;
314  a.Init();
315  AllocatorStats stats;
316  stats.Init();
317
318  static const int kNumAllocs = 1000;
319  char *allocated[kNumAllocs];
320  static const uptr size = 4000;
321  // Allocate some.
322  for (int i = 0; i < kNumAllocs; i++) {
323    allocated[i] = (char *)a.Allocate(&stats, size, 1);
324    CHECK(a.PointerIsMine(allocated[i]));
325  }
326  // Deallocate all.
327  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
328  for (int i = 0; i < kNumAllocs; i++) {
329    char *p = allocated[i];
330    CHECK(a.PointerIsMine(p));
331    a.Deallocate(&stats, p);
332  }
333  // Check that non left.
334  CHECK_EQ(a.TotalMemoryUsed(), 0);
335
336  // Allocate some more, also add metadata.
337  for (int i = 0; i < kNumAllocs; i++) {
338    char *x = (char *)a.Allocate(&stats, size, 1);
339    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
340    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
341    *meta = i;
342    allocated[i] = x;
343  }
344  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
345    char *p = allocated[i % kNumAllocs];
346    CHECK(a.PointerIsMine(p));
347    CHECK(a.PointerIsMine(p + 2000));
348  }
349  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
350  // Deallocate all in reverse order.
351  for (int i = 0; i < kNumAllocs; i++) {
352    int idx = kNumAllocs - i - 1;
353    char *p = allocated[idx];
354    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
355    CHECK_EQ(*meta, idx);
356    CHECK(a.PointerIsMine(p));
357    a.Deallocate(&stats, p);
358  }
359  CHECK_EQ(a.TotalMemoryUsed(), 0);
360
361  // Test alignments.
362  uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
363  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
364    const uptr kNumAlignedAllocs = 100;
365    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
366      uptr size = ((i % 10) + 1) * 4096;
367      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
368      CHECK_EQ(p, a.GetBlockBegin(p));
369      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
370      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
371      CHECK_EQ(0, (uptr)allocated[i] % alignment);
372      p[0] = p[size - 1] = 0;
373    }
374    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
375      a.Deallocate(&stats, allocated[i]);
376    }
377  }
378
379  // Regression test for boundary condition in GetBlockBegin().
380  uptr page_size = GetPageSizeCached();
381  char *p = (char *)a.Allocate(&stats, page_size, 1);
382  CHECK_EQ(p, a.GetBlockBegin(p));
383  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
384  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
385  a.Deallocate(&stats, p);
386}
387
388template
389<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
390void TestCombinedAllocator() {
391  typedef
392      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
393      Allocator;
394  Allocator *a = new Allocator;
395  a->Init();
396
397  AllocatorCache cache;
398  memset(&cache, 0, sizeof(cache));
399  a->InitCache(&cache);
400
401  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
402  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
403  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
404  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
405  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
406
407  const uptr kNumAllocs = 100000;
408  const uptr kNumIter = 10;
409  for (uptr iter = 0; iter < kNumIter; iter++) {
410    std::vector<void*> allocated;
411    for (uptr i = 0; i < kNumAllocs; i++) {
412      uptr size = (i % (1 << 14)) + 1;
413      if ((i % 1024) == 0)
414        size = 1 << (10 + (i % 14));
415      void *x = a->Allocate(&cache, size, 1);
416      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
417      CHECK_EQ(*meta, 0);
418      *meta = size;
419      allocated.push_back(x);
420    }
421
422    random_shuffle(allocated.begin(), allocated.end());
423
424    for (uptr i = 0; i < kNumAllocs; i++) {
425      void *x = allocated[i];
426      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
427      CHECK_NE(*meta, 0);
428      CHECK(a->PointerIsMine(x));
429      *meta = 0;
430      a->Deallocate(&cache, x);
431    }
432    allocated.clear();
433    a->SwallowCache(&cache);
434  }
435  a->DestroyCache(&cache);
436  a->TestOnlyUnmap();
437}
438
439#if SANITIZER_WORDSIZE == 64
440TEST(SanitizerCommon, CombinedAllocator64) {
441  TestCombinedAllocator<Allocator64,
442      LargeMmapAllocator<>,
443      SizeClassAllocatorLocalCache<Allocator64> > ();
444}
445
446TEST(SanitizerCommon, CombinedAllocator64Compact) {
447  TestCombinedAllocator<Allocator64Compact,
448      LargeMmapAllocator<>,
449      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
450}
451#endif
452
453TEST(SanitizerCommon, CombinedAllocator32Compact) {
454  TestCombinedAllocator<Allocator32Compact,
455      LargeMmapAllocator<>,
456      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
457}
458
459template <class AllocatorCache>
460void TestSizeClassAllocatorLocalCache() {
461  AllocatorCache cache;
462  typedef typename AllocatorCache::Allocator Allocator;
463  Allocator *a = new Allocator();
464
465  a->Init();
466  memset(&cache, 0, sizeof(cache));
467  cache.Init(0);
468
469  const uptr kNumAllocs = 10000;
470  const int kNumIter = 100;
471  uptr saved_total = 0;
472  for (int class_id = 1; class_id <= 5; class_id++) {
473    for (int it = 0; it < kNumIter; it++) {
474      void *allocated[kNumAllocs];
475      for (uptr i = 0; i < kNumAllocs; i++) {
476        allocated[i] = cache.Allocate(a, class_id);
477      }
478      for (uptr i = 0; i < kNumAllocs; i++) {
479        cache.Deallocate(a, class_id, allocated[i]);
480      }
481      cache.Drain(a);
482      uptr total_allocated = a->TotalMemoryUsed();
483      if (it)
484        CHECK_EQ(saved_total, total_allocated);
485      saved_total = total_allocated;
486    }
487  }
488
489  a->TestOnlyUnmap();
490  delete a;
491}
492
493#if SANITIZER_WORDSIZE == 64
494TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
495  TestSizeClassAllocatorLocalCache<
496      SizeClassAllocatorLocalCache<Allocator64> >();
497}
498
499TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
500  TestSizeClassAllocatorLocalCache<
501      SizeClassAllocatorLocalCache<Allocator64Compact> >();
502}
503#endif
504
505TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
506  TestSizeClassAllocatorLocalCache<
507      SizeClassAllocatorLocalCache<Allocator32Compact> >();
508}
509
510#if SANITIZER_WORDSIZE == 64
511typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
512static AllocatorCache static_allocator_cache;
513
514void *AllocatorLeakTestWorker(void *arg) {
515  typedef AllocatorCache::Allocator Allocator;
516  Allocator *a = (Allocator*)(arg);
517  static_allocator_cache.Allocate(a, 10);
518  static_allocator_cache.Drain(a);
519  return 0;
520}
521
522TEST(SanitizerCommon, AllocatorLeakTest) {
523  typedef AllocatorCache::Allocator Allocator;
524  Allocator a;
525  a.Init();
526  uptr total_used_memory = 0;
527  for (int i = 0; i < 100; i++) {
528    pthread_t t;
529    EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
530    EXPECT_EQ(0, pthread_join(t, 0));
531    if (i == 0)
532      total_used_memory = a.TotalMemoryUsed();
533    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
534  }
535
536  a.TestOnlyUnmap();
537}
538
539// Struct which is allocated to pass info to new threads.  The new thread frees
540// it.
541struct NewThreadParams {
542  AllocatorCache *thread_cache;
543  AllocatorCache::Allocator *allocator;
544  uptr class_id;
545};
546
547// Called in a new thread.  Just frees its argument.
548static void *DeallocNewThreadWorker(void *arg) {
549  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
550  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
551  return NULL;
552}
553
554// The allocator cache is supposed to be POD and zero initialized.  We should be
555// able to call Deallocate on a zeroed cache, and it will self-initialize.
556TEST(Allocator, AllocatorCacheDeallocNewThread) {
557  AllocatorCache::Allocator allocator;
558  allocator.Init();
559  AllocatorCache main_cache;
560  AllocatorCache child_cache;
561  memset(&main_cache, 0, sizeof(main_cache));
562  memset(&child_cache, 0, sizeof(child_cache));
563
564  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
565  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
566      main_cache.Allocate(&allocator, class_id));
567  params->thread_cache = &child_cache;
568  params->allocator = &allocator;
569  params->class_id = class_id;
570  pthread_t t;
571  EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
572  EXPECT_EQ(0, pthread_join(t, 0));
573}
574#endif
575
576TEST(Allocator, Basic) {
577  char *p = (char*)InternalAlloc(10);
578  EXPECT_NE(p, (char*)0);
579  char *p2 = (char*)InternalAlloc(20);
580  EXPECT_NE(p2, (char*)0);
581  EXPECT_NE(p2, p);
582  InternalFree(p);
583  InternalFree(p2);
584}
585
586TEST(Allocator, Stress) {
587  const int kCount = 1000;
588  char *ptrs[kCount];
589  unsigned rnd = 42;
590  for (int i = 0; i < kCount; i++) {
591    uptr sz = my_rand_r(&rnd) % 1000;
592    char *p = (char*)InternalAlloc(sz);
593    EXPECT_NE(p, (char*)0);
594    ptrs[i] = p;
595  }
596  for (int i = 0; i < kCount; i++) {
597    InternalFree(ptrs[i]);
598  }
599}
600
601TEST(Allocator, ScopedBuffer) {
602  const int kSize = 512;
603  {
604    InternalScopedBuffer<int> int_buf(kSize);
605    EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
606  }
607  InternalScopedBuffer<char> char_buf(kSize);
608  EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
609  internal_memset(char_buf.data(), 'c', kSize);
610  for (int i = 0; i < kSize; i++) {
611    EXPECT_EQ('c', char_buf[i]);
612  }
613}
614
615class IterationTestCallback {
616 public:
617  explicit IterationTestCallback(std::set<void *> *chunks)
618    : chunks_(chunks) {}
619  void operator()(void *chunk) const {
620    chunks_->insert(chunk);
621  }
622 private:
623  std::set<void *> *chunks_;
624};
625
626template <class Allocator>
627void TestSizeClassAllocatorIteration() {
628  Allocator *a = new Allocator;
629  a->Init();
630  SizeClassAllocatorLocalCache<Allocator> cache;
631  memset(&cache, 0, sizeof(cache));
632  cache.Init(0);
633
634  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
635    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
636
637  std::vector<void *> allocated;
638
639  // Allocate a bunch of chunks.
640  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
641    uptr size = sizes[s];
642    if (!a->CanAllocate(size, 1)) continue;
643    // printf("s = %ld\n", size);
644    uptr n_iter = std::max((uptr)6, 80000 / size);
645    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
646    for (uptr j = 0; j < n_iter; j++) {
647      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
648      void *x = cache.Allocate(a, class_id0);
649      allocated.push_back(x);
650    }
651  }
652
653  std::set<void *> reported_chunks;
654  IterationTestCallback callback(&reported_chunks);
655  a->ForceLock();
656  a->ForEachChunk(callback);
657  a->ForceUnlock();
658
659  for (uptr i = 0; i < allocated.size(); i++) {
660    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
661    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
662  }
663
664  a->TestOnlyUnmap();
665  delete a;
666}
667
668#if SANITIZER_WORDSIZE == 64
669TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
670  TestSizeClassAllocatorIteration<Allocator64>();
671}
672#endif
673
674TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
675  TestSizeClassAllocatorIteration<Allocator32Compact>();
676}
677
678TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
679  LargeMmapAllocator<> a;
680  a.Init();
681  AllocatorStats stats;
682  stats.Init();
683
684  static const uptr kNumAllocs = 1000;
685  char *allocated[kNumAllocs];
686  static const uptr size = 40;
687  // Allocate some.
688  for (uptr i = 0; i < kNumAllocs; i++) {
689    allocated[i] = (char *)a.Allocate(&stats, size, 1);
690  }
691
692  std::set<void *> reported_chunks;
693  IterationTestCallback callback(&reported_chunks);
694  a.ForceLock();
695  a.ForEachChunk(callback);
696  a.ForceUnlock();
697
698  for (uptr i = 0; i < kNumAllocs; i++) {
699    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
700    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
701  }
702}
703
704#endif  // #if TSAN_DEBUG==0
705