sanitizer_allocator_test.cc revision 1b54cbfbd12d6302838a6d4d6c42d90bd9c6b9d5
1//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Tests for sanitizer_allocator.h.
12//
13//===----------------------------------------------------------------------===//
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_common.h"
16
17#include "sanitizer_test_utils.h"
18
19#include "gtest/gtest.h"
20
21#include <stdlib.h>
22#include <pthread.h>
23#include <algorithm>
24#include <vector>
25#include <set>
26
27// Too slow for debug build
28#if TSAN_DEBUG == 0
29
30#if SANITIZER_WORDSIZE == 64
31static const uptr kAllocatorSpace = 0x700000000000ULL;
32static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
33static const u64 kAddressSpaceSize = 1ULL << 47;
34
35typedef SizeClassAllocator64<
36  kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
37
38typedef SizeClassAllocator64<
39  kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
40#else
41static const u64 kAddressSpaceSize = 1ULL << 32;
42#endif
43
44static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
45static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
46
47typedef SizeClassAllocator32<
48  0, kAddressSpaceSize,
49  /*kMetadataSize*/16,
50  CompactSizeClassMap,
51  kRegionSizeLog,
52  FlatByteMap<kFlatByteMapSize> >
53  Allocator32Compact;
54
55template <class SizeClassMap>
56void TestSizeClassMap() {
57  typedef SizeClassMap SCMap;
58  // SCMap::Print();
59  SCMap::Validate();
60}
61
62TEST(SanitizerCommon, DefaultSizeClassMap) {
63  TestSizeClassMap<DefaultSizeClassMap>();
64}
65
66TEST(SanitizerCommon, CompactSizeClassMap) {
67  TestSizeClassMap<CompactSizeClassMap>();
68}
69
70template <class Allocator>
71void TestSizeClassAllocator() {
72  Allocator *a = new Allocator;
73  a->Init();
74  SizeClassAllocatorLocalCache<Allocator> cache;
75  memset(&cache, 0, sizeof(cache));
76  cache.Init(0);
77
78  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
79    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
80
81  std::vector<void *> allocated;
82
83  uptr last_total_allocated = 0;
84  for (int i = 0; i < 3; i++) {
85    // Allocate a bunch of chunks.
86    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
87      uptr size = sizes[s];
88      if (!a->CanAllocate(size, 1)) continue;
89      // printf("s = %ld\n", size);
90      uptr n_iter = std::max((uptr)6, 8000000 / size);
91      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
92      for (uptr i = 0; i < n_iter; i++) {
93        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
94        char *x = (char*)cache.Allocate(a, class_id0);
95        x[0] = 0;
96        x[size - 1] = 0;
97        x[size / 2] = 0;
98        allocated.push_back(x);
99        CHECK_EQ(x, a->GetBlockBegin(x));
100        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
101        CHECK(a->PointerIsMine(x));
102        CHECK(a->PointerIsMine(x + size - 1));
103        CHECK(a->PointerIsMine(x + size / 2));
104        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
105        uptr class_id = a->GetSizeClass(x);
106        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
107        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
108        metadata[0] = reinterpret_cast<uptr>(x) + 1;
109        metadata[1] = 0xABCD;
110      }
111    }
112    // Deallocate all.
113    for (uptr i = 0; i < allocated.size(); i++) {
114      void *x = allocated[i];
115      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
116      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
117      CHECK_EQ(metadata[1], 0xABCD);
118      cache.Deallocate(a, a->GetSizeClass(x), x);
119    }
120    allocated.clear();
121    uptr total_allocated = a->TotalMemoryUsed();
122    if (last_total_allocated == 0)
123      last_total_allocated = total_allocated;
124    CHECK_EQ(last_total_allocated, total_allocated);
125  }
126
127  // Check that GetBlockBegin never crashes.
128  for (uptr x = 0, step = kAddressSpaceSize / 100000;
129       x < kAddressSpaceSize - step; x += step)
130    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
131      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
132
133  a->TestOnlyUnmap();
134  delete a;
135}
136
137#if SANITIZER_WORDSIZE == 64
138TEST(SanitizerCommon, SizeClassAllocator64) {
139  TestSizeClassAllocator<Allocator64>();
140}
141
142TEST(SanitizerCommon, SizeClassAllocator64Compact) {
143  TestSizeClassAllocator<Allocator64Compact>();
144}
145#endif
146
147TEST(SanitizerCommon, SizeClassAllocator32Compact) {
148  TestSizeClassAllocator<Allocator32Compact>();
149}
150
151template <class Allocator>
152void SizeClassAllocatorMetadataStress() {
153  Allocator *a = new Allocator;
154  a->Init();
155  SizeClassAllocatorLocalCache<Allocator> cache;
156  memset(&cache, 0, sizeof(cache));
157  cache.Init(0);
158
159  const uptr kNumAllocs = 1 << 13;
160  void *allocated[kNumAllocs];
161  void *meta[kNumAllocs];
162  for (uptr i = 0; i < kNumAllocs; i++) {
163    void *x = cache.Allocate(a, 1 + i % 50);
164    allocated[i] = x;
165    meta[i] = a->GetMetaData(x);
166  }
167  // Get Metadata kNumAllocs^2 times.
168  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
169    uptr idx = i % kNumAllocs;
170    void *m = a->GetMetaData(allocated[idx]);
171    EXPECT_EQ(m, meta[idx]);
172  }
173  for (uptr i = 0; i < kNumAllocs; i++) {
174    cache.Deallocate(a, 1 + i % 50, allocated[i]);
175  }
176
177  a->TestOnlyUnmap();
178  delete a;
179}
180
181#if SANITIZER_WORDSIZE == 64
182TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
183  SizeClassAllocatorMetadataStress<Allocator64>();
184}
185
186TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
187  SizeClassAllocatorMetadataStress<Allocator64Compact>();
188}
189#endif  // SANITIZER_WORDSIZE == 64
190TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
191  SizeClassAllocatorMetadataStress<Allocator32Compact>();
192}
193
194template <class Allocator>
195void SizeClassAllocatorGetBlockBeginStress() {
196  Allocator *a = new Allocator;
197  a->Init();
198  SizeClassAllocatorLocalCache<Allocator> cache;
199  memset(&cache, 0, sizeof(cache));
200  cache.Init(0);
201
202  uptr max_size_class = Allocator::kNumClasses - 1;
203  uptr size = Allocator::SizeClassMapT::Size(max_size_class);
204  u64 G8 = 1ULL << 33;
205  // Make sure we correctly compute GetBlockBegin() w/o overflow.
206  for (size_t i = 0; i <= G8 / size; i++) {
207    void *x = cache.Allocate(a, max_size_class);
208    void *beg = a->GetBlockBegin(x);
209    // if ((i & (i - 1)) == 0)
210    //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
211    EXPECT_EQ(x, beg);
212  }
213
214  a->TestOnlyUnmap();
215  delete a;
216}
217
218#if SANITIZER_WORDSIZE == 64
219TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
220  SizeClassAllocatorGetBlockBeginStress<Allocator64>();
221}
222TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
223  SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
224}
225TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
226  SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
227}
228#endif  // SANITIZER_WORDSIZE == 64
229
230struct TestMapUnmapCallback {
231  static int map_count, unmap_count;
232  void OnMap(uptr p, uptr size) const { map_count++; }
233  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
234};
235int TestMapUnmapCallback::map_count;
236int TestMapUnmapCallback::unmap_count;
237
238#if SANITIZER_WORDSIZE == 64
239TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
240  TestMapUnmapCallback::map_count = 0;
241  TestMapUnmapCallback::unmap_count = 0;
242  typedef SizeClassAllocator64<
243      kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
244      TestMapUnmapCallback> Allocator64WithCallBack;
245  Allocator64WithCallBack *a = new Allocator64WithCallBack;
246  a->Init();
247  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
248  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
249  memset(&cache, 0, sizeof(cache));
250  cache.Init(0);
251  AllocatorStats stats;
252  stats.Init();
253  a->AllocateBatch(&stats, &cache, 32);
254  EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
255  a->TestOnlyUnmap();
256  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
257  delete a;
258}
259#endif
260
261TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
262  TestMapUnmapCallback::map_count = 0;
263  TestMapUnmapCallback::unmap_count = 0;
264  typedef SizeClassAllocator32<
265      0, kAddressSpaceSize,
266      /*kMetadataSize*/16,
267      CompactSizeClassMap,
268      kRegionSizeLog,
269      FlatByteMap<kFlatByteMapSize>,
270      TestMapUnmapCallback>
271    Allocator32WithCallBack;
272  Allocator32WithCallBack *a = new Allocator32WithCallBack;
273  a->Init();
274  EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
275  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
276  memset(&cache, 0, sizeof(cache));
277  cache.Init(0);
278  AllocatorStats stats;
279  stats.Init();
280  a->AllocateBatch(&stats, &cache, 32);
281  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
282  a->TestOnlyUnmap();
283  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
284  delete a;
285  // fprintf(stderr, "Map: %d Unmap: %d\n",
286  //         TestMapUnmapCallback::map_count,
287  //         TestMapUnmapCallback::unmap_count);
288}
289
290TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
291  TestMapUnmapCallback::map_count = 0;
292  TestMapUnmapCallback::unmap_count = 0;
293  LargeMmapAllocator<TestMapUnmapCallback> a;
294  a.Init();
295  AllocatorStats stats;
296  stats.Init();
297  void *x = a.Allocate(&stats, 1 << 20, 1);
298  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
299  a.Deallocate(&stats, x);
300  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
301}
302
303template<class Allocator>
304void FailInAssertionOnOOM() {
305  Allocator a;
306  a.Init();
307  SizeClassAllocatorLocalCache<Allocator> cache;
308  memset(&cache, 0, sizeof(cache));
309  cache.Init(0);
310  AllocatorStats stats;
311  stats.Init();
312  for (int i = 0; i < 1000000; i++) {
313    a.AllocateBatch(&stats, &cache, 52);
314  }
315
316  a.TestOnlyUnmap();
317}
318
319#if SANITIZER_WORDSIZE == 64
320TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
321  EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
322}
323#endif
324
325TEST(SanitizerCommon, LargeMmapAllocator) {
326  LargeMmapAllocator<> a;
327  a.Init();
328  AllocatorStats stats;
329  stats.Init();
330
331  static const int kNumAllocs = 1000;
332  char *allocated[kNumAllocs];
333  static const uptr size = 4000;
334  // Allocate some.
335  for (int i = 0; i < kNumAllocs; i++) {
336    allocated[i] = (char *)a.Allocate(&stats, size, 1);
337    CHECK(a.PointerIsMine(allocated[i]));
338  }
339  // Deallocate all.
340  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
341  for (int i = 0; i < kNumAllocs; i++) {
342    char *p = allocated[i];
343    CHECK(a.PointerIsMine(p));
344    a.Deallocate(&stats, p);
345  }
346  // Check that non left.
347  CHECK_EQ(a.TotalMemoryUsed(), 0);
348
349  // Allocate some more, also add metadata.
350  for (int i = 0; i < kNumAllocs; i++) {
351    char *x = (char *)a.Allocate(&stats, size, 1);
352    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
353    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
354    *meta = i;
355    allocated[i] = x;
356  }
357  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
358    char *p = allocated[i % kNumAllocs];
359    CHECK(a.PointerIsMine(p));
360    CHECK(a.PointerIsMine(p + 2000));
361  }
362  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
363  // Deallocate all in reverse order.
364  for (int i = 0; i < kNumAllocs; i++) {
365    int idx = kNumAllocs - i - 1;
366    char *p = allocated[idx];
367    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
368    CHECK_EQ(*meta, idx);
369    CHECK(a.PointerIsMine(p));
370    a.Deallocate(&stats, p);
371  }
372  CHECK_EQ(a.TotalMemoryUsed(), 0);
373
374  // Test alignments.
375  uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
376  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
377    const uptr kNumAlignedAllocs = 100;
378    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
379      uptr size = ((i % 10) + 1) * 4096;
380      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
381      CHECK_EQ(p, a.GetBlockBegin(p));
382      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
383      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
384      CHECK_EQ(0, (uptr)allocated[i] % alignment);
385      p[0] = p[size - 1] = 0;
386    }
387    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
388      a.Deallocate(&stats, allocated[i]);
389    }
390  }
391
392  // Regression test for boundary condition in GetBlockBegin().
393  uptr page_size = GetPageSizeCached();
394  char *p = (char *)a.Allocate(&stats, page_size, 1);
395  CHECK_EQ(p, a.GetBlockBegin(p));
396  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
397  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
398  a.Deallocate(&stats, p);
399}
400
401template
402<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
403void TestCombinedAllocator() {
404  typedef
405      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
406      Allocator;
407  Allocator *a = new Allocator;
408  a->Init();
409
410  AllocatorCache cache;
411  memset(&cache, 0, sizeof(cache));
412  a->InitCache(&cache);
413
414  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
415  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
416  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
417  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
418  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
419
420  const uptr kNumAllocs = 100000;
421  const uptr kNumIter = 10;
422  for (uptr iter = 0; iter < kNumIter; iter++) {
423    std::vector<void*> allocated;
424    for (uptr i = 0; i < kNumAllocs; i++) {
425      uptr size = (i % (1 << 14)) + 1;
426      if ((i % 1024) == 0)
427        size = 1 << (10 + (i % 14));
428      void *x = a->Allocate(&cache, size, 1);
429      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
430      CHECK_EQ(*meta, 0);
431      *meta = size;
432      allocated.push_back(x);
433    }
434
435    random_shuffle(allocated.begin(), allocated.end());
436
437    for (uptr i = 0; i < kNumAllocs; i++) {
438      void *x = allocated[i];
439      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
440      CHECK_NE(*meta, 0);
441      CHECK(a->PointerIsMine(x));
442      *meta = 0;
443      a->Deallocate(&cache, x);
444    }
445    allocated.clear();
446    a->SwallowCache(&cache);
447  }
448  a->DestroyCache(&cache);
449  a->TestOnlyUnmap();
450}
451
452#if SANITIZER_WORDSIZE == 64
453TEST(SanitizerCommon, CombinedAllocator64) {
454  TestCombinedAllocator<Allocator64,
455      LargeMmapAllocator<>,
456      SizeClassAllocatorLocalCache<Allocator64> > ();
457}
458
459TEST(SanitizerCommon, CombinedAllocator64Compact) {
460  TestCombinedAllocator<Allocator64Compact,
461      LargeMmapAllocator<>,
462      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
463}
464#endif
465
466TEST(SanitizerCommon, CombinedAllocator32Compact) {
467  TestCombinedAllocator<Allocator32Compact,
468      LargeMmapAllocator<>,
469      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
470}
471
472template <class AllocatorCache>
473void TestSizeClassAllocatorLocalCache() {
474  AllocatorCache cache;
475  typedef typename AllocatorCache::Allocator Allocator;
476  Allocator *a = new Allocator();
477
478  a->Init();
479  memset(&cache, 0, sizeof(cache));
480  cache.Init(0);
481
482  const uptr kNumAllocs = 10000;
483  const int kNumIter = 100;
484  uptr saved_total = 0;
485  for (int class_id = 1; class_id <= 5; class_id++) {
486    for (int it = 0; it < kNumIter; it++) {
487      void *allocated[kNumAllocs];
488      for (uptr i = 0; i < kNumAllocs; i++) {
489        allocated[i] = cache.Allocate(a, class_id);
490      }
491      for (uptr i = 0; i < kNumAllocs; i++) {
492        cache.Deallocate(a, class_id, allocated[i]);
493      }
494      cache.Drain(a);
495      uptr total_allocated = a->TotalMemoryUsed();
496      if (it)
497        CHECK_EQ(saved_total, total_allocated);
498      saved_total = total_allocated;
499    }
500  }
501
502  a->TestOnlyUnmap();
503  delete a;
504}
505
506#if SANITIZER_WORDSIZE == 64
507TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
508  TestSizeClassAllocatorLocalCache<
509      SizeClassAllocatorLocalCache<Allocator64> >();
510}
511
512TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
513  TestSizeClassAllocatorLocalCache<
514      SizeClassAllocatorLocalCache<Allocator64Compact> >();
515}
516#endif
517
518TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
519  TestSizeClassAllocatorLocalCache<
520      SizeClassAllocatorLocalCache<Allocator32Compact> >();
521}
522
523#if SANITIZER_WORDSIZE == 64
524typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
525static AllocatorCache static_allocator_cache;
526
527void *AllocatorLeakTestWorker(void *arg) {
528  typedef AllocatorCache::Allocator Allocator;
529  Allocator *a = (Allocator*)(arg);
530  static_allocator_cache.Allocate(a, 10);
531  static_allocator_cache.Drain(a);
532  return 0;
533}
534
535TEST(SanitizerCommon, AllocatorLeakTest) {
536  typedef AllocatorCache::Allocator Allocator;
537  Allocator a;
538  a.Init();
539  uptr total_used_memory = 0;
540  for (int i = 0; i < 100; i++) {
541    pthread_t t;
542    EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
543    EXPECT_EQ(0, pthread_join(t, 0));
544    if (i == 0)
545      total_used_memory = a.TotalMemoryUsed();
546    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
547  }
548
549  a.TestOnlyUnmap();
550}
551
552// Struct which is allocated to pass info to new threads.  The new thread frees
553// it.
554struct NewThreadParams {
555  AllocatorCache *thread_cache;
556  AllocatorCache::Allocator *allocator;
557  uptr class_id;
558};
559
560// Called in a new thread.  Just frees its argument.
561static void *DeallocNewThreadWorker(void *arg) {
562  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
563  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
564  return NULL;
565}
566
567// The allocator cache is supposed to be POD and zero initialized.  We should be
568// able to call Deallocate on a zeroed cache, and it will self-initialize.
569TEST(Allocator, AllocatorCacheDeallocNewThread) {
570  AllocatorCache::Allocator allocator;
571  allocator.Init();
572  AllocatorCache main_cache;
573  AllocatorCache child_cache;
574  memset(&main_cache, 0, sizeof(main_cache));
575  memset(&child_cache, 0, sizeof(child_cache));
576
577  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
578  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
579      main_cache.Allocate(&allocator, class_id));
580  params->thread_cache = &child_cache;
581  params->allocator = &allocator;
582  params->class_id = class_id;
583  pthread_t t;
584  EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
585  EXPECT_EQ(0, pthread_join(t, 0));
586}
587#endif
588
589TEST(Allocator, Basic) {
590  char *p = (char*)InternalAlloc(10);
591  EXPECT_NE(p, (char*)0);
592  char *p2 = (char*)InternalAlloc(20);
593  EXPECT_NE(p2, (char*)0);
594  EXPECT_NE(p2, p);
595  InternalFree(p);
596  InternalFree(p2);
597}
598
599TEST(Allocator, Stress) {
600  const int kCount = 1000;
601  char *ptrs[kCount];
602  unsigned rnd = 42;
603  for (int i = 0; i < kCount; i++) {
604    uptr sz = my_rand_r(&rnd) % 1000;
605    char *p = (char*)InternalAlloc(sz);
606    EXPECT_NE(p, (char*)0);
607    ptrs[i] = p;
608  }
609  for (int i = 0; i < kCount; i++) {
610    InternalFree(ptrs[i]);
611  }
612}
613
614TEST(Allocator, ScopedBuffer) {
615  const int kSize = 512;
616  {
617    InternalScopedBuffer<int> int_buf(kSize);
618    EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
619  }
620  InternalScopedBuffer<char> char_buf(kSize);
621  EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
622  internal_memset(char_buf.data(), 'c', kSize);
623  for (int i = 0; i < kSize; i++) {
624    EXPECT_EQ('c', char_buf[i]);
625  }
626}
627
628class IterationTestCallback {
629 public:
630  explicit IterationTestCallback(std::set<void *> *chunks)
631    : chunks_(chunks) {}
632  void operator()(void *chunk) const {
633    chunks_->insert(chunk);
634  }
635 private:
636  std::set<void *> *chunks_;
637};
638
639template <class Allocator>
640void TestSizeClassAllocatorIteration() {
641  Allocator *a = new Allocator;
642  a->Init();
643  SizeClassAllocatorLocalCache<Allocator> cache;
644  memset(&cache, 0, sizeof(cache));
645  cache.Init(0);
646
647  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
648    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
649
650  std::vector<void *> allocated;
651
652  // Allocate a bunch of chunks.
653  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
654    uptr size = sizes[s];
655    if (!a->CanAllocate(size, 1)) continue;
656    // printf("s = %ld\n", size);
657    uptr n_iter = std::max((uptr)6, 80000 / size);
658    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
659    for (uptr j = 0; j < n_iter; j++) {
660      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
661      void *x = cache.Allocate(a, class_id0);
662      allocated.push_back(x);
663    }
664  }
665
666  std::set<void *> reported_chunks;
667  IterationTestCallback callback(&reported_chunks);
668  a->ForceLock();
669  a->ForEachChunk(callback);
670  a->ForceUnlock();
671
672  for (uptr i = 0; i < allocated.size(); i++) {
673    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
674    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
675  }
676
677  a->TestOnlyUnmap();
678  delete a;
679}
680
681#if SANITIZER_WORDSIZE == 64
682TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
683  TestSizeClassAllocatorIteration<Allocator64>();
684}
685#endif
686
687TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
688  TestSizeClassAllocatorIteration<Allocator32Compact>();
689}
690
691TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
692  LargeMmapAllocator<> a;
693  a.Init();
694  AllocatorStats stats;
695  stats.Init();
696
697  static const uptr kNumAllocs = 1000;
698  char *allocated[kNumAllocs];
699  static const uptr size = 40;
700  // Allocate some.
701  for (uptr i = 0; i < kNumAllocs; i++) {
702    allocated[i] = (char *)a.Allocate(&stats, size, 1);
703  }
704
705  std::set<void *> reported_chunks;
706  IterationTestCallback callback(&reported_chunks);
707  a.ForceLock();
708  a.ForEachChunk(callback);
709  a.ForceUnlock();
710
711  for (uptr i = 0; i < kNumAllocs; i++) {
712    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
713    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
714  }
715}
716
717#if SANITIZER_WORDSIZE == 64
718// Regression test for out-of-memory condition in PopulateFreeList().
719TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
720  // In a world where regions are small and chunks are huge...
721  typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
722  typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
723                               SpecialSizeClassMap> SpecialAllocator64;
724  const uptr kRegionSize =
725      kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
726  SpecialAllocator64 *a = new SpecialAllocator64;
727  a->Init();
728  SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
729  memset(&cache, 0, sizeof(cache));
730  cache.Init(0);
731
732  // ...one man is on a mission to overflow a region with a series of
733  // successive allocations.
734  const uptr kClassID = 107;
735  const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
736  ASSERT_LT(2 * kAllocationSize, kRegionSize);
737  ASSERT_GT(3 * kAllocationSize, kRegionSize);
738  cache.Allocate(a, kClassID);
739  EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
740               "The process has exhausted");
741  a->TestOnlyUnmap();
742  delete a;
743}
744#endif
745
746#endif  // #if TSAN_DEBUG==0
747