sanitizer_allocator_test.cc revision a2c1d98745a9e8e860948004423d0072424f48b7
1//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Tests for sanitizer_allocator.h.
12//
13//===----------------------------------------------------------------------===//
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_common.h"
16
17#include "sanitizer_test_utils.h"
18
19#include "gtest/gtest.h"
20
21#include <stdlib.h>
22#include <pthread.h>
23#include <algorithm>
24#include <vector>
25#include <set>
26
27// Too slow for debug build
28#if TSAN_DEBUG == 0
29
30#if SANITIZER_WORDSIZE == 64
31static const uptr kAllocatorSpace = 0x700000000000ULL;
32static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
33static const u64 kAddressSpaceSize = 1ULL << 47;
34
35typedef SizeClassAllocator64<
36  kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
37
38typedef SizeClassAllocator64<
39  kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
40#else
41static const u64 kAddressSpaceSize = 1ULL << 32;
42#endif
43
44typedef SizeClassAllocator32<
45  0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
46
47template <class SizeClassMap>
48void TestSizeClassMap() {
49  typedef SizeClassMap SCMap;
50  // SCMap::Print();
51  SCMap::Validate();
52}
53
54TEST(SanitizerCommon, DefaultSizeClassMap) {
55  TestSizeClassMap<DefaultSizeClassMap>();
56}
57
58TEST(SanitizerCommon, CompactSizeClassMap) {
59  TestSizeClassMap<CompactSizeClassMap>();
60}
61
62template <class Allocator>
63void TestSizeClassAllocator() {
64  Allocator *a = new Allocator;
65  a->Init();
66  SizeClassAllocatorLocalCache<Allocator> cache;
67  memset(&cache, 0, sizeof(cache));
68  cache.Init(0);
69
70  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
71    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
72
73  std::vector<void *> allocated;
74
75  uptr last_total_allocated = 0;
76  for (int i = 0; i < 3; i++) {
77    // Allocate a bunch of chunks.
78    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
79      uptr size = sizes[s];
80      if (!a->CanAllocate(size, 1)) continue;
81      // printf("s = %ld\n", size);
82      uptr n_iter = std::max((uptr)6, 8000000 / size);
83      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
84      for (uptr i = 0; i < n_iter; i++) {
85        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
86        char *x = (char*)cache.Allocate(a, class_id0);
87        x[0] = 0;
88        x[size - 1] = 0;
89        x[size / 2] = 0;
90        allocated.push_back(x);
91        CHECK_EQ(x, a->GetBlockBegin(x));
92        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
93        CHECK(a->PointerIsMine(x));
94        CHECK(a->PointerIsMine(x + size - 1));
95        CHECK(a->PointerIsMine(x + size / 2));
96        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
97        uptr class_id = a->GetSizeClass(x);
98        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
99        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
100        metadata[0] = reinterpret_cast<uptr>(x) + 1;
101        metadata[1] = 0xABCD;
102      }
103    }
104    // Deallocate all.
105    for (uptr i = 0; i < allocated.size(); i++) {
106      void *x = allocated[i];
107      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
108      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
109      CHECK_EQ(metadata[1], 0xABCD);
110      cache.Deallocate(a, a->GetSizeClass(x), x);
111    }
112    allocated.clear();
113    uptr total_allocated = a->TotalMemoryUsed();
114    if (last_total_allocated == 0)
115      last_total_allocated = total_allocated;
116    CHECK_EQ(last_total_allocated, total_allocated);
117  }
118
119  // Check that GetBlockBegin never crashes.
120  for (uptr x = 0, step = kAddressSpaceSize / 100000;
121       x < kAddressSpaceSize - step; x += step)
122    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
123      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
124
125  a->TestOnlyUnmap();
126  delete a;
127}
128
129#if SANITIZER_WORDSIZE == 64
130TEST(SanitizerCommon, SizeClassAllocator64) {
131  TestSizeClassAllocator<Allocator64>();
132}
133
134TEST(SanitizerCommon, SizeClassAllocator64Compact) {
135  TestSizeClassAllocator<Allocator64Compact>();
136}
137#endif
138
139TEST(SanitizerCommon, SizeClassAllocator32Compact) {
140  TestSizeClassAllocator<Allocator32Compact>();
141}
142
143template <class Allocator>
144void SizeClassAllocatorMetadataStress() {
145  Allocator *a = new Allocator;
146  a->Init();
147  SizeClassAllocatorLocalCache<Allocator> cache;
148  memset(&cache, 0, sizeof(cache));
149  cache.Init(0);
150  static volatile void *sink;
151
152  const uptr kNumAllocs = 10000;
153  void *allocated[kNumAllocs];
154  for (uptr i = 0; i < kNumAllocs; i++) {
155    void *x = cache.Allocate(a, 1 + i % 50);
156    allocated[i] = x;
157  }
158  // Get Metadata kNumAllocs^2 times.
159  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
160    sink = a->GetMetaData(allocated[i % kNumAllocs]);
161  }
162  for (uptr i = 0; i < kNumAllocs; i++) {
163    cache.Deallocate(a, 1 + i % 50, allocated[i]);
164  }
165
166  a->TestOnlyUnmap();
167  (void)sink;
168  delete a;
169}
170
171#if SANITIZER_WORDSIZE == 64
172TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
173  SizeClassAllocatorMetadataStress<Allocator64>();
174}
175
176TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
177  SizeClassAllocatorMetadataStress<Allocator64Compact>();
178}
179#endif
180TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
181  SizeClassAllocatorMetadataStress<Allocator32Compact>();
182}
183
184struct TestMapUnmapCallback {
185  static int map_count, unmap_count;
186  void OnMap(uptr p, uptr size) const { map_count++; }
187  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
188};
189int TestMapUnmapCallback::map_count;
190int TestMapUnmapCallback::unmap_count;
191
192#if SANITIZER_WORDSIZE == 64
193TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
194  TestMapUnmapCallback::map_count = 0;
195  TestMapUnmapCallback::unmap_count = 0;
196  typedef SizeClassAllocator64<
197      kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
198      TestMapUnmapCallback> Allocator64WithCallBack;
199  Allocator64WithCallBack *a = new Allocator64WithCallBack;
200  a->Init();
201  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
202  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
203  memset(&cache, 0, sizeof(cache));
204  cache.Init(0);
205  AllocatorStats stats;
206  stats.Init();
207  a->AllocateBatch(&stats, &cache, 32);
208  EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
209  a->TestOnlyUnmap();
210  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
211  delete a;
212}
213#endif
214
215TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
216  TestMapUnmapCallback::map_count = 0;
217  TestMapUnmapCallback::unmap_count = 0;
218  typedef SizeClassAllocator32<
219      0, kAddressSpaceSize, 16, CompactSizeClassMap,
220      TestMapUnmapCallback> Allocator32WithCallBack;
221  Allocator32WithCallBack *a = new Allocator32WithCallBack;
222  a->Init();
223  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
224  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
225  memset(&cache, 0, sizeof(cache));
226  cache.Init(0);
227  AllocatorStats stats;
228  stats.Init();
229  a->AllocateBatch(&stats, &cache, 32);
230  EXPECT_EQ(TestMapUnmapCallback::map_count, 2);  // alloc.
231  a->TestOnlyUnmap();
232  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2);  // The whole thing + alloc.
233  delete a;
234  // fprintf(stderr, "Map: %d Unmap: %d\n",
235  //         TestMapUnmapCallback::map_count,
236  //         TestMapUnmapCallback::unmap_count);
237}
238
239TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
240  TestMapUnmapCallback::map_count = 0;
241  TestMapUnmapCallback::unmap_count = 0;
242  LargeMmapAllocator<TestMapUnmapCallback> a;
243  a.Init();
244  AllocatorStats stats;
245  stats.Init();
246  void *x = a.Allocate(&stats, 1 << 20, 1);
247  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
248  a.Deallocate(&stats, x);
249  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
250}
251
252template<class Allocator>
253void FailInAssertionOnOOM() {
254  Allocator a;
255  a.Init();
256  SizeClassAllocatorLocalCache<Allocator> cache;
257  memset(&cache, 0, sizeof(cache));
258  cache.Init(0);
259  AllocatorStats stats;
260  stats.Init();
261  for (int i = 0; i < 1000000; i++) {
262    a.AllocateBatch(&stats, &cache, 52);
263  }
264
265  a.TestOnlyUnmap();
266}
267
268#if SANITIZER_WORDSIZE == 64
269TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
270  EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
271}
272#endif
273
274TEST(SanitizerCommon, LargeMmapAllocator) {
275  LargeMmapAllocator<> a;
276  a.Init();
277  AllocatorStats stats;
278  stats.Init();
279
280  static const int kNumAllocs = 1000;
281  char *allocated[kNumAllocs];
282  static const uptr size = 4000;
283  // Allocate some.
284  for (int i = 0; i < kNumAllocs; i++) {
285    allocated[i] = (char *)a.Allocate(&stats, size, 1);
286    CHECK(a.PointerIsMine(allocated[i]));
287  }
288  // Deallocate all.
289  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
290  for (int i = 0; i < kNumAllocs; i++) {
291    char *p = allocated[i];
292    CHECK(a.PointerIsMine(p));
293    a.Deallocate(&stats, p);
294  }
295  // Check that non left.
296  CHECK_EQ(a.TotalMemoryUsed(), 0);
297
298  // Allocate some more, also add metadata.
299  for (int i = 0; i < kNumAllocs; i++) {
300    char *x = (char *)a.Allocate(&stats, size, 1);
301    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
302    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
303    *meta = i;
304    allocated[i] = x;
305  }
306  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
307    char *p = allocated[i % kNumAllocs];
308    CHECK(a.PointerIsMine(p));
309    CHECK(a.PointerIsMine(p + 2000));
310  }
311  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
312  // Deallocate all in reverse order.
313  for (int i = 0; i < kNumAllocs; i++) {
314    int idx = kNumAllocs - i - 1;
315    char *p = allocated[idx];
316    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
317    CHECK_EQ(*meta, idx);
318    CHECK(a.PointerIsMine(p));
319    a.Deallocate(&stats, p);
320  }
321  CHECK_EQ(a.TotalMemoryUsed(), 0);
322
323  // Test alignments.
324  uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
325  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
326    const uptr kNumAlignedAllocs = 100;
327    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
328      uptr size = ((i % 10) + 1) * 4096;
329      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
330      CHECK_EQ(p, a.GetBlockBegin(p));
331      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
332      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
333      CHECK_EQ(0, (uptr)allocated[i] % alignment);
334      p[0] = p[size - 1] = 0;
335    }
336    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
337      a.Deallocate(&stats, allocated[i]);
338    }
339  }
340
341  // Regression test for boundary condition in GetBlockBegin().
342  uptr page_size = GetPageSizeCached();
343  char *p = (char *)a.Allocate(&stats, page_size, 1);
344  CHECK_EQ(p, a.GetBlockBegin(p));
345  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
346  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
347  a.Deallocate(&stats, p);
348}
349
350template
351<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
352void TestCombinedAllocator() {
353  typedef
354      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
355      Allocator;
356  Allocator *a = new Allocator;
357  a->Init();
358
359  AllocatorCache cache;
360  memset(&cache, 0, sizeof(cache));
361  a->InitCache(&cache);
362
363  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
364  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
365  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
366  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
367  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
368
369  const uptr kNumAllocs = 100000;
370  const uptr kNumIter = 10;
371  for (uptr iter = 0; iter < kNumIter; iter++) {
372    std::vector<void*> allocated;
373    for (uptr i = 0; i < kNumAllocs; i++) {
374      uptr size = (i % (1 << 14)) + 1;
375      if ((i % 1024) == 0)
376        size = 1 << (10 + (i % 14));
377      void *x = a->Allocate(&cache, size, 1);
378      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
379      CHECK_EQ(*meta, 0);
380      *meta = size;
381      allocated.push_back(x);
382    }
383
384    random_shuffle(allocated.begin(), allocated.end());
385
386    for (uptr i = 0; i < kNumAllocs; i++) {
387      void *x = allocated[i];
388      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
389      CHECK_NE(*meta, 0);
390      CHECK(a->PointerIsMine(x));
391      *meta = 0;
392      a->Deallocate(&cache, x);
393    }
394    allocated.clear();
395    a->SwallowCache(&cache);
396  }
397  a->DestroyCache(&cache);
398  a->TestOnlyUnmap();
399}
400
401#if SANITIZER_WORDSIZE == 64
402TEST(SanitizerCommon, CombinedAllocator64) {
403  TestCombinedAllocator<Allocator64,
404      LargeMmapAllocator<>,
405      SizeClassAllocatorLocalCache<Allocator64> > ();
406}
407
408TEST(SanitizerCommon, CombinedAllocator64Compact) {
409  TestCombinedAllocator<Allocator64Compact,
410      LargeMmapAllocator<>,
411      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
412}
413#endif
414
415TEST(SanitizerCommon, CombinedAllocator32Compact) {
416  TestCombinedAllocator<Allocator32Compact,
417      LargeMmapAllocator<>,
418      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
419}
420
421template <class AllocatorCache>
422void TestSizeClassAllocatorLocalCache() {
423  AllocatorCache cache;
424  typedef typename AllocatorCache::Allocator Allocator;
425  Allocator *a = new Allocator();
426
427  a->Init();
428  memset(&cache, 0, sizeof(cache));
429  cache.Init(0);
430
431  const uptr kNumAllocs = 10000;
432  const int kNumIter = 100;
433  uptr saved_total = 0;
434  for (int class_id = 1; class_id <= 5; class_id++) {
435    for (int it = 0; it < kNumIter; it++) {
436      void *allocated[kNumAllocs];
437      for (uptr i = 0; i < kNumAllocs; i++) {
438        allocated[i] = cache.Allocate(a, class_id);
439      }
440      for (uptr i = 0; i < kNumAllocs; i++) {
441        cache.Deallocate(a, class_id, allocated[i]);
442      }
443      cache.Drain(a);
444      uptr total_allocated = a->TotalMemoryUsed();
445      if (it)
446        CHECK_EQ(saved_total, total_allocated);
447      saved_total = total_allocated;
448    }
449  }
450
451  a->TestOnlyUnmap();
452  delete a;
453}
454
455#if SANITIZER_WORDSIZE == 64
456TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
457  TestSizeClassAllocatorLocalCache<
458      SizeClassAllocatorLocalCache<Allocator64> >();
459}
460
461TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
462  TestSizeClassAllocatorLocalCache<
463      SizeClassAllocatorLocalCache<Allocator64Compact> >();
464}
465#endif
466
467TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
468  TestSizeClassAllocatorLocalCache<
469      SizeClassAllocatorLocalCache<Allocator32Compact> >();
470}
471
472#if SANITIZER_WORDSIZE == 64
473typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
474static AllocatorCache static_allocator_cache;
475
476void *AllocatorLeakTestWorker(void *arg) {
477  typedef AllocatorCache::Allocator Allocator;
478  Allocator *a = (Allocator*)(arg);
479  static_allocator_cache.Allocate(a, 10);
480  static_allocator_cache.Drain(a);
481  return 0;
482}
483
484TEST(SanitizerCommon, AllocatorLeakTest) {
485  typedef AllocatorCache::Allocator Allocator;
486  Allocator a;
487  a.Init();
488  uptr total_used_memory = 0;
489  for (int i = 0; i < 100; i++) {
490    pthread_t t;
491    EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
492    EXPECT_EQ(0, pthread_join(t, 0));
493    if (i == 0)
494      total_used_memory = a.TotalMemoryUsed();
495    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
496  }
497
498  a.TestOnlyUnmap();
499}
500
501// Struct which is allocated to pass info to new threads.  The new thread frees
502// it.
503struct NewThreadParams {
504  AllocatorCache *thread_cache;
505  AllocatorCache::Allocator *allocator;
506  uptr class_id;
507};
508
509// Called in a new thread.  Just frees its argument.
510static void *DeallocNewThreadWorker(void *arg) {
511  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
512  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
513  return NULL;
514}
515
516// The allocator cache is supposed to be POD and zero initialized.  We should be
517// able to call Deallocate on a zeroed cache, and it will self-initialize.
518TEST(Allocator, AllocatorCacheDeallocNewThread) {
519  AllocatorCache::Allocator allocator;
520  allocator.Init();
521  AllocatorCache main_cache;
522  AllocatorCache child_cache;
523  memset(&main_cache, 0, sizeof(main_cache));
524  memset(&child_cache, 0, sizeof(child_cache));
525
526  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
527  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
528      main_cache.Allocate(&allocator, class_id));
529  params->thread_cache = &child_cache;
530  params->allocator = &allocator;
531  params->class_id = class_id;
532  pthread_t t;
533  EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
534  EXPECT_EQ(0, pthread_join(t, 0));
535}
536#endif
537
538TEST(Allocator, Basic) {
539  char *p = (char*)InternalAlloc(10);
540  EXPECT_NE(p, (char*)0);
541  char *p2 = (char*)InternalAlloc(20);
542  EXPECT_NE(p2, (char*)0);
543  EXPECT_NE(p2, p);
544  InternalFree(p);
545  InternalFree(p2);
546}
547
548TEST(Allocator, Stress) {
549  const int kCount = 1000;
550  char *ptrs[kCount];
551  unsigned rnd = 42;
552  for (int i = 0; i < kCount; i++) {
553    uptr sz = my_rand_r(&rnd) % 1000;
554    char *p = (char*)InternalAlloc(sz);
555    EXPECT_NE(p, (char*)0);
556    ptrs[i] = p;
557  }
558  for (int i = 0; i < kCount; i++) {
559    InternalFree(ptrs[i]);
560  }
561}
562
563TEST(Allocator, ScopedBuffer) {
564  const int kSize = 512;
565  {
566    InternalScopedBuffer<int> int_buf(kSize);
567    EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
568  }
569  InternalScopedBuffer<char> char_buf(kSize);
570  EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
571  internal_memset(char_buf.data(), 'c', kSize);
572  for (int i = 0; i < kSize; i++) {
573    EXPECT_EQ('c', char_buf[i]);
574  }
575}
576
577class IterationTestCallback {
578 public:
579  explicit IterationTestCallback(std::set<void *> *chunks)
580    : chunks_(chunks) {}
581  void operator()(void *chunk) const {
582    chunks_->insert(chunk);
583  }
584 private:
585  std::set<void *> *chunks_;
586};
587
588template <class Allocator>
589void TestSizeClassAllocatorIteration() {
590  Allocator *a = new Allocator;
591  a->Init();
592  SizeClassAllocatorLocalCache<Allocator> cache;
593  memset(&cache, 0, sizeof(cache));
594  cache.Init(0);
595
596  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
597    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
598
599  std::vector<void *> allocated;
600
601  // Allocate a bunch of chunks.
602  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
603    uptr size = sizes[s];
604    if (!a->CanAllocate(size, 1)) continue;
605    // printf("s = %ld\n", size);
606    uptr n_iter = std::max((uptr)6, 80000 / size);
607    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
608    for (uptr j = 0; j < n_iter; j++) {
609      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
610      void *x = cache.Allocate(a, class_id0);
611      allocated.push_back(x);
612    }
613  }
614
615  std::set<void *> reported_chunks;
616  IterationTestCallback callback(&reported_chunks);
617  a->ForceLock();
618  a->ForEachChunk(callback);
619  a->ForceUnlock();
620
621  for (uptr i = 0; i < allocated.size(); i++) {
622    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
623    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
624  }
625
626  a->TestOnlyUnmap();
627  delete a;
628}
629
630#if SANITIZER_WORDSIZE == 64
631TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
632  TestSizeClassAllocatorIteration<Allocator64>();
633}
634#endif
635
636TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
637  TestSizeClassAllocatorIteration<Allocator32Compact>();
638}
639
640TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
641  LargeMmapAllocator<> a;
642  a.Init();
643  AllocatorStats stats;
644  stats.Init();
645
646  static const uptr kNumAllocs = 1000;
647  char *allocated[kNumAllocs];
648  static const uptr size = 40;
649  // Allocate some.
650  for (uptr i = 0; i < kNumAllocs; i++) {
651    allocated[i] = (char *)a.Allocate(&stats, size, 1);
652  }
653
654  std::set<void *> reported_chunks;
655  IterationTestCallback callback(&reported_chunks);
656  a.ForceLock();
657  a.ForEachChunk(callback);
658  a.ForceUnlock();
659
660  for (uptr i = 0; i < kNumAllocs; i++) {
661    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
662    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
663  }
664}
665
666#endif  // #if TSAN_DEBUG==0
667