sanitizer_allocator_test.cc revision 9150f397ba4c5478275d72665ea3e53a84c7076a
1//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Tests for sanitizer_allocator.h.
12//
13//===----------------------------------------------------------------------===//
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_allocator_internal.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_flags.h"
18
19#include "sanitizer_test_utils.h"
20
21#include "gtest/gtest.h"
22
23#include <stdlib.h>
24#include <pthread.h>
25#include <algorithm>
26#include <vector>
27#include <set>
28
29// Too slow for debug build
30#if TSAN_DEBUG == 0
31
32#if SANITIZER_WORDSIZE == 64
33static const uptr kAllocatorSpace = 0x700000000000ULL;
34static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
35static const u64 kAddressSpaceSize = 1ULL << 47;
36
37typedef SizeClassAllocator64<
38  kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
39
40typedef SizeClassAllocator64<
41  kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
42#else
43static const u64 kAddressSpaceSize = 1ULL << 32;
44#endif
45
46static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
47static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
48
49typedef SizeClassAllocator32<
50  0, kAddressSpaceSize,
51  /*kMetadataSize*/16,
52  CompactSizeClassMap,
53  kRegionSizeLog,
54  FlatByteMap<kFlatByteMapSize> >
55  Allocator32Compact;
56
57template <class SizeClassMap>
58void TestSizeClassMap() {
59  typedef SizeClassMap SCMap;
60  // SCMap::Print();
61  SCMap::Validate();
62}
63
64TEST(SanitizerCommon, DefaultSizeClassMap) {
65  TestSizeClassMap<DefaultSizeClassMap>();
66}
67
68TEST(SanitizerCommon, CompactSizeClassMap) {
69  TestSizeClassMap<CompactSizeClassMap>();
70}
71
72TEST(SanitizerCommon, InternalSizeClassMap) {
73  TestSizeClassMap<InternalSizeClassMap>();
74}
75
76template <class Allocator>
77void TestSizeClassAllocator() {
78  Allocator *a = new Allocator;
79  a->Init();
80  SizeClassAllocatorLocalCache<Allocator> cache;
81  memset(&cache, 0, sizeof(cache));
82  cache.Init(0);
83
84  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
85    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
86
87  std::vector<void *> allocated;
88
89  uptr last_total_allocated = 0;
90  for (int i = 0; i < 3; i++) {
91    // Allocate a bunch of chunks.
92    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
93      uptr size = sizes[s];
94      if (!a->CanAllocate(size, 1)) continue;
95      // printf("s = %ld\n", size);
96      uptr n_iter = std::max((uptr)6, 8000000 / size);
97      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
98      for (uptr i = 0; i < n_iter; i++) {
99        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
100        char *x = (char*)cache.Allocate(a, class_id0);
101        x[0] = 0;
102        x[size - 1] = 0;
103        x[size / 2] = 0;
104        allocated.push_back(x);
105        CHECK_EQ(x, a->GetBlockBegin(x));
106        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
107        CHECK(a->PointerIsMine(x));
108        CHECK(a->PointerIsMine(x + size - 1));
109        CHECK(a->PointerIsMine(x + size / 2));
110        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
111        uptr class_id = a->GetSizeClass(x);
112        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
113        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
114        metadata[0] = reinterpret_cast<uptr>(x) + 1;
115        metadata[1] = 0xABCD;
116      }
117    }
118    // Deallocate all.
119    for (uptr i = 0; i < allocated.size(); i++) {
120      void *x = allocated[i];
121      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
122      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
123      CHECK_EQ(metadata[1], 0xABCD);
124      cache.Deallocate(a, a->GetSizeClass(x), x);
125    }
126    allocated.clear();
127    uptr total_allocated = a->TotalMemoryUsed();
128    if (last_total_allocated == 0)
129      last_total_allocated = total_allocated;
130    CHECK_EQ(last_total_allocated, total_allocated);
131  }
132
133  // Check that GetBlockBegin never crashes.
134  for (uptr x = 0, step = kAddressSpaceSize / 100000;
135       x < kAddressSpaceSize - step; x += step)
136    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
137      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
138
139  a->TestOnlyUnmap();
140  delete a;
141}
142
143#if SANITIZER_WORDSIZE == 64
144TEST(SanitizerCommon, SizeClassAllocator64) {
145  TestSizeClassAllocator<Allocator64>();
146}
147
148TEST(SanitizerCommon, SizeClassAllocator64Compact) {
149  TestSizeClassAllocator<Allocator64Compact>();
150}
151#endif
152
153TEST(SanitizerCommon, SizeClassAllocator32Compact) {
154  TestSizeClassAllocator<Allocator32Compact>();
155}
156
157template <class Allocator>
158void SizeClassAllocatorMetadataStress() {
159  Allocator *a = new Allocator;
160  a->Init();
161  SizeClassAllocatorLocalCache<Allocator> cache;
162  memset(&cache, 0, sizeof(cache));
163  cache.Init(0);
164
165  const uptr kNumAllocs = 1 << 13;
166  void *allocated[kNumAllocs];
167  void *meta[kNumAllocs];
168  for (uptr i = 0; i < kNumAllocs; i++) {
169    void *x = cache.Allocate(a, 1 + i % 50);
170    allocated[i] = x;
171    meta[i] = a->GetMetaData(x);
172  }
173  // Get Metadata kNumAllocs^2 times.
174  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
175    uptr idx = i % kNumAllocs;
176    void *m = a->GetMetaData(allocated[idx]);
177    EXPECT_EQ(m, meta[idx]);
178  }
179  for (uptr i = 0; i < kNumAllocs; i++) {
180    cache.Deallocate(a, 1 + i % 50, allocated[i]);
181  }
182
183  a->TestOnlyUnmap();
184  delete a;
185}
186
187#if SANITIZER_WORDSIZE == 64
188TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
189  SizeClassAllocatorMetadataStress<Allocator64>();
190}
191
192TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
193  SizeClassAllocatorMetadataStress<Allocator64Compact>();
194}
195#endif  // SANITIZER_WORDSIZE == 64
196TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
197  SizeClassAllocatorMetadataStress<Allocator32Compact>();
198}
199
200template <class Allocator>
201void SizeClassAllocatorGetBlockBeginStress() {
202  Allocator *a = new Allocator;
203  a->Init();
204  SizeClassAllocatorLocalCache<Allocator> cache;
205  memset(&cache, 0, sizeof(cache));
206  cache.Init(0);
207
208  uptr max_size_class = Allocator::kNumClasses - 1;
209  uptr size = Allocator::SizeClassMapT::Size(max_size_class);
210  u64 G8 = 1ULL << 33;
211  // Make sure we correctly compute GetBlockBegin() w/o overflow.
212  for (size_t i = 0; i <= G8 / size; i++) {
213    void *x = cache.Allocate(a, max_size_class);
214    void *beg = a->GetBlockBegin(x);
215    // if ((i & (i - 1)) == 0)
216    //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
217    EXPECT_EQ(x, beg);
218  }
219
220  a->TestOnlyUnmap();
221  delete a;
222}
223
224#if SANITIZER_WORDSIZE == 64
225TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
226  SizeClassAllocatorGetBlockBeginStress<Allocator64>();
227}
228TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
229  SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
230}
231TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
232  SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
233}
234#endif  // SANITIZER_WORDSIZE == 64
235
236struct TestMapUnmapCallback {
237  static int map_count, unmap_count;
238  void OnMap(uptr p, uptr size) const { map_count++; }
239  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
240};
241int TestMapUnmapCallback::map_count;
242int TestMapUnmapCallback::unmap_count;
243
244#if SANITIZER_WORDSIZE == 64
245TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
246  TestMapUnmapCallback::map_count = 0;
247  TestMapUnmapCallback::unmap_count = 0;
248  typedef SizeClassAllocator64<
249      kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
250      TestMapUnmapCallback> Allocator64WithCallBack;
251  Allocator64WithCallBack *a = new Allocator64WithCallBack;
252  a->Init();
253  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
254  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
255  memset(&cache, 0, sizeof(cache));
256  cache.Init(0);
257  AllocatorStats stats;
258  stats.Init();
259  a->AllocateBatch(&stats, &cache, 32);
260  EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
261  a->TestOnlyUnmap();
262  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
263  delete a;
264}
265#endif
266
267TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
268  TestMapUnmapCallback::map_count = 0;
269  TestMapUnmapCallback::unmap_count = 0;
270  typedef SizeClassAllocator32<
271      0, kAddressSpaceSize,
272      /*kMetadataSize*/16,
273      CompactSizeClassMap,
274      kRegionSizeLog,
275      FlatByteMap<kFlatByteMapSize>,
276      TestMapUnmapCallback>
277    Allocator32WithCallBack;
278  Allocator32WithCallBack *a = new Allocator32WithCallBack;
279  a->Init();
280  EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
281  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
282  memset(&cache, 0, sizeof(cache));
283  cache.Init(0);
284  AllocatorStats stats;
285  stats.Init();
286  a->AllocateBatch(&stats, &cache, 32);
287  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
288  a->TestOnlyUnmap();
289  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
290  delete a;
291  // fprintf(stderr, "Map: %d Unmap: %d\n",
292  //         TestMapUnmapCallback::map_count,
293  //         TestMapUnmapCallback::unmap_count);
294}
295
296TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
297  TestMapUnmapCallback::map_count = 0;
298  TestMapUnmapCallback::unmap_count = 0;
299  LargeMmapAllocator<TestMapUnmapCallback> a;
300  a.Init();
301  AllocatorStats stats;
302  stats.Init();
303  void *x = a.Allocate(&stats, 1 << 20, 1);
304  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
305  a.Deallocate(&stats, x);
306  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
307}
308
309template<class Allocator>
310void FailInAssertionOnOOM() {
311  Allocator a;
312  a.Init();
313  SizeClassAllocatorLocalCache<Allocator> cache;
314  memset(&cache, 0, sizeof(cache));
315  cache.Init(0);
316  AllocatorStats stats;
317  stats.Init();
318  for (int i = 0; i < 1000000; i++) {
319    a.AllocateBatch(&stats, &cache, 52);
320  }
321
322  a.TestOnlyUnmap();
323}
324
325#if SANITIZER_WORDSIZE == 64
326TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
327  EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
328}
329#endif
330
331TEST(SanitizerCommon, LargeMmapAllocator) {
332  LargeMmapAllocator<> a;
333  a.Init();
334  AllocatorStats stats;
335  stats.Init();
336
337  static const int kNumAllocs = 1000;
338  char *allocated[kNumAllocs];
339  static const uptr size = 4000;
340  // Allocate some.
341  for (int i = 0; i < kNumAllocs; i++) {
342    allocated[i] = (char *)a.Allocate(&stats, size, 1);
343    CHECK(a.PointerIsMine(allocated[i]));
344  }
345  // Deallocate all.
346  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
347  for (int i = 0; i < kNumAllocs; i++) {
348    char *p = allocated[i];
349    CHECK(a.PointerIsMine(p));
350    a.Deallocate(&stats, p);
351  }
352  // Check that non left.
353  CHECK_EQ(a.TotalMemoryUsed(), 0);
354
355  // Allocate some more, also add metadata.
356  for (int i = 0; i < kNumAllocs; i++) {
357    char *x = (char *)a.Allocate(&stats, size, 1);
358    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
359    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
360    *meta = i;
361    allocated[i] = x;
362  }
363  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
364    char *p = allocated[i % kNumAllocs];
365    CHECK(a.PointerIsMine(p));
366    CHECK(a.PointerIsMine(p + 2000));
367  }
368  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
369  // Deallocate all in reverse order.
370  for (int i = 0; i < kNumAllocs; i++) {
371    int idx = kNumAllocs - i - 1;
372    char *p = allocated[idx];
373    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
374    CHECK_EQ(*meta, idx);
375    CHECK(a.PointerIsMine(p));
376    a.Deallocate(&stats, p);
377  }
378  CHECK_EQ(a.TotalMemoryUsed(), 0);
379
380  // Test alignments.
381  uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
382  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
383    const uptr kNumAlignedAllocs = 100;
384    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
385      uptr size = ((i % 10) + 1) * 4096;
386      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
387      CHECK_EQ(p, a.GetBlockBegin(p));
388      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
389      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
390      CHECK_EQ(0, (uptr)allocated[i] % alignment);
391      p[0] = p[size - 1] = 0;
392    }
393    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
394      a.Deallocate(&stats, allocated[i]);
395    }
396  }
397
398  // Regression test for boundary condition in GetBlockBegin().
399  uptr page_size = GetPageSizeCached();
400  char *p = (char *)a.Allocate(&stats, page_size, 1);
401  CHECK_EQ(p, a.GetBlockBegin(p));
402  CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
403  CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
404  a.Deallocate(&stats, p);
405}
406
407template
408<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
409void TestCombinedAllocator() {
410  typedef
411      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
412      Allocator;
413  Allocator *a = new Allocator;
414  a->Init();
415
416  AllocatorCache cache;
417  memset(&cache, 0, sizeof(cache));
418  a->InitCache(&cache);
419
420  bool allocator_may_return_null = common_flags()->allocator_may_return_null;
421  common_flags()->allocator_may_return_null = true;
422  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
423  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
424  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
425  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
426  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
427
428  common_flags()->allocator_may_return_null = false;
429  EXPECT_DEATH(a->Allocate(&cache, -1, 1),
430               "allocator is terminating the process");
431  // Restore the original value.
432  common_flags()->allocator_may_return_null = allocator_may_return_null;
433
434  const uptr kNumAllocs = 100000;
435  const uptr kNumIter = 10;
436  for (uptr iter = 0; iter < kNumIter; iter++) {
437    std::vector<void*> allocated;
438    for (uptr i = 0; i < kNumAllocs; i++) {
439      uptr size = (i % (1 << 14)) + 1;
440      if ((i % 1024) == 0)
441        size = 1 << (10 + (i % 14));
442      void *x = a->Allocate(&cache, size, 1);
443      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
444      CHECK_EQ(*meta, 0);
445      *meta = size;
446      allocated.push_back(x);
447    }
448
449    random_shuffle(allocated.begin(), allocated.end());
450
451    for (uptr i = 0; i < kNumAllocs; i++) {
452      void *x = allocated[i];
453      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
454      CHECK_NE(*meta, 0);
455      CHECK(a->PointerIsMine(x));
456      *meta = 0;
457      a->Deallocate(&cache, x);
458    }
459    allocated.clear();
460    a->SwallowCache(&cache);
461  }
462  a->DestroyCache(&cache);
463  a->TestOnlyUnmap();
464}
465
466#if SANITIZER_WORDSIZE == 64
467TEST(SanitizerCommon, CombinedAllocator64) {
468  TestCombinedAllocator<Allocator64,
469      LargeMmapAllocator<>,
470      SizeClassAllocatorLocalCache<Allocator64> > ();
471}
472
473TEST(SanitizerCommon, CombinedAllocator64Compact) {
474  TestCombinedAllocator<Allocator64Compact,
475      LargeMmapAllocator<>,
476      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
477}
478#endif
479
480TEST(SanitizerCommon, CombinedAllocator32Compact) {
481  TestCombinedAllocator<Allocator32Compact,
482      LargeMmapAllocator<>,
483      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
484}
485
486template <class AllocatorCache>
487void TestSizeClassAllocatorLocalCache() {
488  AllocatorCache cache;
489  typedef typename AllocatorCache::Allocator Allocator;
490  Allocator *a = new Allocator();
491
492  a->Init();
493  memset(&cache, 0, sizeof(cache));
494  cache.Init(0);
495
496  const uptr kNumAllocs = 10000;
497  const int kNumIter = 100;
498  uptr saved_total = 0;
499  for (int class_id = 1; class_id <= 5; class_id++) {
500    for (int it = 0; it < kNumIter; it++) {
501      void *allocated[kNumAllocs];
502      for (uptr i = 0; i < kNumAllocs; i++) {
503        allocated[i] = cache.Allocate(a, class_id);
504      }
505      for (uptr i = 0; i < kNumAllocs; i++) {
506        cache.Deallocate(a, class_id, allocated[i]);
507      }
508      cache.Drain(a);
509      uptr total_allocated = a->TotalMemoryUsed();
510      if (it)
511        CHECK_EQ(saved_total, total_allocated);
512      saved_total = total_allocated;
513    }
514  }
515
516  a->TestOnlyUnmap();
517  delete a;
518}
519
520#if SANITIZER_WORDSIZE == 64
521TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
522  TestSizeClassAllocatorLocalCache<
523      SizeClassAllocatorLocalCache<Allocator64> >();
524}
525
526TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
527  TestSizeClassAllocatorLocalCache<
528      SizeClassAllocatorLocalCache<Allocator64Compact> >();
529}
530#endif
531
532TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
533  TestSizeClassAllocatorLocalCache<
534      SizeClassAllocatorLocalCache<Allocator32Compact> >();
535}
536
537#if SANITIZER_WORDSIZE == 64
538typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
539static AllocatorCache static_allocator_cache;
540
541void *AllocatorLeakTestWorker(void *arg) {
542  typedef AllocatorCache::Allocator Allocator;
543  Allocator *a = (Allocator*)(arg);
544  static_allocator_cache.Allocate(a, 10);
545  static_allocator_cache.Drain(a);
546  return 0;
547}
548
549TEST(SanitizerCommon, AllocatorLeakTest) {
550  typedef AllocatorCache::Allocator Allocator;
551  Allocator a;
552  a.Init();
553  uptr total_used_memory = 0;
554  for (int i = 0; i < 100; i++) {
555    pthread_t t;
556    EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
557    EXPECT_EQ(0, pthread_join(t, 0));
558    if (i == 0)
559      total_used_memory = a.TotalMemoryUsed();
560    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
561  }
562
563  a.TestOnlyUnmap();
564}
565
566// Struct which is allocated to pass info to new threads.  The new thread frees
567// it.
568struct NewThreadParams {
569  AllocatorCache *thread_cache;
570  AllocatorCache::Allocator *allocator;
571  uptr class_id;
572};
573
574// Called in a new thread.  Just frees its argument.
575static void *DeallocNewThreadWorker(void *arg) {
576  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
577  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
578  return NULL;
579}
580
581// The allocator cache is supposed to be POD and zero initialized.  We should be
582// able to call Deallocate on a zeroed cache, and it will self-initialize.
583TEST(Allocator, AllocatorCacheDeallocNewThread) {
584  AllocatorCache::Allocator allocator;
585  allocator.Init();
586  AllocatorCache main_cache;
587  AllocatorCache child_cache;
588  memset(&main_cache, 0, sizeof(main_cache));
589  memset(&child_cache, 0, sizeof(child_cache));
590
591  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
592  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
593      main_cache.Allocate(&allocator, class_id));
594  params->thread_cache = &child_cache;
595  params->allocator = &allocator;
596  params->class_id = class_id;
597  pthread_t t;
598  EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
599  EXPECT_EQ(0, pthread_join(t, 0));
600}
601#endif
602
603TEST(Allocator, Basic) {
604  char *p = (char*)InternalAlloc(10);
605  EXPECT_NE(p, (char*)0);
606  char *p2 = (char*)InternalAlloc(20);
607  EXPECT_NE(p2, (char*)0);
608  EXPECT_NE(p2, p);
609  InternalFree(p);
610  InternalFree(p2);
611}
612
613TEST(Allocator, Stress) {
614  const int kCount = 1000;
615  char *ptrs[kCount];
616  unsigned rnd = 42;
617  for (int i = 0; i < kCount; i++) {
618    uptr sz = my_rand_r(&rnd) % 1000;
619    char *p = (char*)InternalAlloc(sz);
620    EXPECT_NE(p, (char*)0);
621    ptrs[i] = p;
622  }
623  for (int i = 0; i < kCount; i++) {
624    InternalFree(ptrs[i]);
625  }
626}
627
628TEST(Allocator, InternalAllocFailure) {
629  EXPECT_DEATH(Ident(InternalAlloc(10 << 20)),
630               "Unexpected mmap in InternalAllocator!");
631}
632
633TEST(Allocator, ScopedBuffer) {
634  const int kSize = 512;
635  {
636    InternalScopedBuffer<int> int_buf(kSize);
637    EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
638  }
639  InternalScopedBuffer<char> char_buf(kSize);
640  EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
641  internal_memset(char_buf.data(), 'c', kSize);
642  for (int i = 0; i < kSize; i++) {
643    EXPECT_EQ('c', char_buf[i]);
644  }
645}
646
647void IterationTestCallback(uptr chunk, void *arg) {
648  reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
649}
650
651template <class Allocator>
652void TestSizeClassAllocatorIteration() {
653  Allocator *a = new Allocator;
654  a->Init();
655  SizeClassAllocatorLocalCache<Allocator> cache;
656  memset(&cache, 0, sizeof(cache));
657  cache.Init(0);
658
659  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
660    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
661
662  std::vector<void *> allocated;
663
664  // Allocate a bunch of chunks.
665  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
666    uptr size = sizes[s];
667    if (!a->CanAllocate(size, 1)) continue;
668    // printf("s = %ld\n", size);
669    uptr n_iter = std::max((uptr)6, 80000 / size);
670    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
671    for (uptr j = 0; j < n_iter; j++) {
672      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
673      void *x = cache.Allocate(a, class_id0);
674      allocated.push_back(x);
675    }
676  }
677
678  std::set<uptr> reported_chunks;
679  a->ForceLock();
680  a->ForEachChunk(IterationTestCallback, &reported_chunks);
681  a->ForceUnlock();
682
683  for (uptr i = 0; i < allocated.size(); i++) {
684    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
685    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
686              reported_chunks.end());
687  }
688
689  a->TestOnlyUnmap();
690  delete a;
691}
692
693#if SANITIZER_WORDSIZE == 64
694TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
695  TestSizeClassAllocatorIteration<Allocator64>();
696}
697#endif
698
699TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
700  TestSizeClassAllocatorIteration<Allocator32Compact>();
701}
702
703TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
704  LargeMmapAllocator<> a;
705  a.Init();
706  AllocatorStats stats;
707  stats.Init();
708
709  static const uptr kNumAllocs = 1000;
710  char *allocated[kNumAllocs];
711  static const uptr size = 40;
712  // Allocate some.
713  for (uptr i = 0; i < kNumAllocs; i++)
714    allocated[i] = (char *)a.Allocate(&stats, size, 1);
715
716  std::set<uptr> reported_chunks;
717  a.ForceLock();
718  a.ForEachChunk(IterationTestCallback, &reported_chunks);
719  a.ForceUnlock();
720
721  for (uptr i = 0; i < kNumAllocs; i++) {
722    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
723    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
724              reported_chunks.end());
725  }
726  for (uptr i = 0; i < kNumAllocs; i++)
727    a.Deallocate(&stats, allocated[i]);
728}
729
730TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
731  LargeMmapAllocator<> a;
732  a.Init();
733  AllocatorStats stats;
734  stats.Init();
735
736  static const uptr kNumAllocs = 1024;
737  static const uptr kNumExpectedFalseLookups = 10000000;
738  char *allocated[kNumAllocs];
739  static const uptr size = 4096;
740  // Allocate some.
741  for (uptr i = 0; i < kNumAllocs; i++) {
742    allocated[i] = (char *)a.Allocate(&stats, size, 1);
743  }
744
745  for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
746    // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
747    char *p1 = allocated[i % kNumAllocs];
748    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
749    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
750    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
751    EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
752  }
753
754  for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
755    void *p = reinterpret_cast<void *>(i % 1024);
756    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
757    p = reinterpret_cast<void *>(~0L - (i % 1024));
758    EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
759  }
760
761  for (uptr i = 0; i < kNumAllocs; i++)
762    a.Deallocate(&stats, allocated[i]);
763}
764
765
766#if SANITIZER_WORDSIZE == 64
767// Regression test for out-of-memory condition in PopulateFreeList().
768TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
769  // In a world where regions are small and chunks are huge...
770  typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
771  typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
772                               SpecialSizeClassMap> SpecialAllocator64;
773  const uptr kRegionSize =
774      kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
775  SpecialAllocator64 *a = new SpecialAllocator64;
776  a->Init();
777  SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
778  memset(&cache, 0, sizeof(cache));
779  cache.Init(0);
780
781  // ...one man is on a mission to overflow a region with a series of
782  // successive allocations.
783  const uptr kClassID = 107;
784  const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
785  ASSERT_LT(2 * kAllocationSize, kRegionSize);
786  ASSERT_GT(3 * kAllocationSize, kRegionSize);
787  cache.Allocate(a, kClassID);
788  EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
789               "The process has exhausted");
790  a->TestOnlyUnmap();
791  delete a;
792}
793#endif
794
795#endif  // #if TSAN_DEBUG==0
796