sanitizer_allocator_test.cc revision aa0f20dca32ad6ed0d4f5623b393dd700375f12a
1//===-- sanitizer_allocator_test.cc ---------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11// Tests for sanitizer_allocator.h.
12//
13//===----------------------------------------------------------------------===//
14#include "sanitizer_common/sanitizer_allocator.h"
15#include "sanitizer_common/sanitizer_common.h"
16
17#include "sanitizer_test_utils.h"
18
19#include "gtest/gtest.h"
20
21#include <stdlib.h>
22#include <pthread.h>
23#include <algorithm>
24#include <vector>
25
26// Too slow for debug build
27#if TSAN_DEBUG == 0
28
29#if SANITIZER_WORDSIZE == 64
30static const uptr kAllocatorSpace = 0x700000000000ULL;
31static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
32static const u64 kAddressSpaceSize = 1ULL << 47;
33
34typedef SizeClassAllocator64<
35  kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
36
37typedef SizeClassAllocator64<
38  kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
39#else
40static const u64 kAddressSpaceSize = 1ULL << 32;
41#endif
42
43typedef SizeClassAllocator32<
44  0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
45
46template <class SizeClassMap>
47void TestSizeClassMap() {
48  typedef SizeClassMap SCMap;
49  // SCMap::Print();
50  SCMap::Validate();
51}
52
53TEST(SanitizerCommon, DefaultSizeClassMap) {
54  TestSizeClassMap<DefaultSizeClassMap>();
55}
56
57TEST(SanitizerCommon, CompactSizeClassMap) {
58  TestSizeClassMap<CompactSizeClassMap>();
59}
60
61template <class Allocator>
62void TestSizeClassAllocator() {
63  Allocator *a = new Allocator;
64  a->Init();
65  SizeClassAllocatorLocalCache<Allocator> cache;
66  memset(&cache, 0, sizeof(cache));
67  cache.Init(0);
68
69  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
70    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
71
72  std::vector<void *> allocated;
73
74  uptr last_total_allocated = 0;
75  for (int i = 0; i < 3; i++) {
76    // Allocate a bunch of chunks.
77    for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
78      uptr size = sizes[s];
79      if (!a->CanAllocate(size, 1)) continue;
80      // printf("s = %ld\n", size);
81      uptr n_iter = std::max((uptr)6, 8000000 / size);
82      // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
83      for (uptr i = 0; i < n_iter; i++) {
84        uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
85        char *x = (char*)cache.Allocate(a, class_id0);
86        x[0] = 0;
87        x[size - 1] = 0;
88        x[size / 2] = 0;
89        allocated.push_back(x);
90        CHECK_EQ(x, a->GetBlockBegin(x));
91        CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
92        CHECK(a->PointerIsMine(x));
93        CHECK(a->PointerIsMine(x + size - 1));
94        CHECK(a->PointerIsMine(x + size / 2));
95        CHECK_GE(a->GetActuallyAllocatedSize(x), size);
96        uptr class_id = a->GetSizeClass(x);
97        CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
98        uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
99        metadata[0] = reinterpret_cast<uptr>(x) + 1;
100        metadata[1] = 0xABCD;
101      }
102    }
103    // Deallocate all.
104    for (uptr i = 0; i < allocated.size(); i++) {
105      void *x = allocated[i];
106      uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
107      CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
108      CHECK_EQ(metadata[1], 0xABCD);
109      cache.Deallocate(a, a->GetSizeClass(x), x);
110    }
111    allocated.clear();
112    uptr total_allocated = a->TotalMemoryUsed();
113    if (last_total_allocated == 0)
114      last_total_allocated = total_allocated;
115    CHECK_EQ(last_total_allocated, total_allocated);
116  }
117
118  // Check that GetBlockBegin never crashes.
119  for (uptr x = 0, step = kAddressSpaceSize / 100000;
120       x < kAddressSpaceSize - step; x += step)
121    if (a->PointerIsMine(reinterpret_cast<void *>(x)))
122      Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
123
124  a->TestOnlyUnmap();
125  delete a;
126}
127
128#if SANITIZER_WORDSIZE == 64
129TEST(SanitizerCommon, SizeClassAllocator64) {
130  TestSizeClassAllocator<Allocator64>();
131}
132
133TEST(SanitizerCommon, SizeClassAllocator64Compact) {
134  TestSizeClassAllocator<Allocator64Compact>();
135}
136#endif
137
138TEST(SanitizerCommon, SizeClassAllocator32Compact) {
139  TestSizeClassAllocator<Allocator32Compact>();
140}
141
142template <class Allocator>
143void SizeClassAllocatorMetadataStress() {
144  Allocator *a = new Allocator;
145  a->Init();
146  SizeClassAllocatorLocalCache<Allocator> cache;
147  memset(&cache, 0, sizeof(cache));
148  cache.Init(0);
149  static volatile void *sink;
150
151  const uptr kNumAllocs = 10000;
152  void *allocated[kNumAllocs];
153  for (uptr i = 0; i < kNumAllocs; i++) {
154    void *x = cache.Allocate(a, 1 + i % 50);
155    allocated[i] = x;
156  }
157  // Get Metadata kNumAllocs^2 times.
158  for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
159    sink = a->GetMetaData(allocated[i % kNumAllocs]);
160  }
161  for (uptr i = 0; i < kNumAllocs; i++) {
162    cache.Deallocate(a, 1 + i % 50, allocated[i]);
163  }
164
165  a->TestOnlyUnmap();
166  (void)sink;
167  delete a;
168}
169
170#if SANITIZER_WORDSIZE == 64
171TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
172  SizeClassAllocatorMetadataStress<Allocator64>();
173}
174
175TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
176  SizeClassAllocatorMetadataStress<Allocator64Compact>();
177}
178#endif
179TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
180  SizeClassAllocatorMetadataStress<Allocator32Compact>();
181}
182
183struct TestMapUnmapCallback {
184  static int map_count, unmap_count;
185  void OnMap(uptr p, uptr size) const { map_count++; }
186  void OnUnmap(uptr p, uptr size) const { unmap_count++; }
187};
188int TestMapUnmapCallback::map_count;
189int TestMapUnmapCallback::unmap_count;
190
191#if SANITIZER_WORDSIZE == 64
192TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
193  TestMapUnmapCallback::map_count = 0;
194  TestMapUnmapCallback::unmap_count = 0;
195  typedef SizeClassAllocator64<
196      kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
197      TestMapUnmapCallback> Allocator64WithCallBack;
198  Allocator64WithCallBack *a = new Allocator64WithCallBack;
199  a->Init();
200  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
201  SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
202  memset(&cache, 0, sizeof(cache));
203  cache.Init(0);
204  AllocatorStats stats;
205  stats.Init();
206  a->AllocateBatch(&stats, &cache, 64);
207  EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
208  a->TestOnlyUnmap();
209  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
210  delete a;
211}
212#endif
213
214TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
215  TestMapUnmapCallback::map_count = 0;
216  TestMapUnmapCallback::unmap_count = 0;
217  typedef SizeClassAllocator32<
218      0, kAddressSpaceSize, 16, CompactSizeClassMap,
219      TestMapUnmapCallback> Allocator32WithCallBack;
220  Allocator32WithCallBack *a = new Allocator32WithCallBack;
221  a->Init();
222  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
223  SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
224  memset(&cache, 0, sizeof(cache));
225  cache.Init(0);
226  AllocatorStats stats;
227  stats.Init();
228  a->AllocateBatch(&stats, &cache, 64);
229  EXPECT_EQ(TestMapUnmapCallback::map_count, 2);  // alloc.
230  a->TestOnlyUnmap();
231  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2);  // The whole thing + alloc.
232  delete a;
233  // fprintf(stderr, "Map: %d Unmap: %d\n",
234  //         TestMapUnmapCallback::map_count,
235  //         TestMapUnmapCallback::unmap_count);
236}
237
238TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
239  TestMapUnmapCallback::map_count = 0;
240  TestMapUnmapCallback::unmap_count = 0;
241  LargeMmapAllocator<TestMapUnmapCallback> a;
242  a.Init();
243  AllocatorStats stats;
244  stats.Init();
245  void *x = a.Allocate(&stats, 1 << 20, 1);
246  EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
247  a.Deallocate(&stats, x);
248  EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
249}
250
251template<class Allocator>
252void FailInAssertionOnOOM() {
253  Allocator a;
254  a.Init();
255  SizeClassAllocatorLocalCache<Allocator> cache;
256  memset(&cache, 0, sizeof(cache));
257  cache.Init(0);
258  AllocatorStats stats;
259  stats.Init();
260  for (int i = 0; i < 1000000; i++) {
261    a.AllocateBatch(&stats, &cache, 64);
262  }
263
264  a.TestOnlyUnmap();
265}
266
267#if SANITIZER_WORDSIZE == 64
268TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
269  EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
270}
271#endif
272
273TEST(SanitizerCommon, LargeMmapAllocator) {
274  LargeMmapAllocator<> a;
275  a.Init();
276  AllocatorStats stats;
277  stats.Init();
278
279  static const int kNumAllocs = 1000;
280  char *allocated[kNumAllocs];
281  static const uptr size = 4000;
282  // Allocate some.
283  for (int i = 0; i < kNumAllocs; i++) {
284    allocated[i] = (char *)a.Allocate(&stats, size, 1);
285    CHECK(a.PointerIsMine(allocated[i]));
286  }
287  // Deallocate all.
288  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
289  for (int i = 0; i < kNumAllocs; i++) {
290    char *p = allocated[i];
291    CHECK(a.PointerIsMine(p));
292    a.Deallocate(&stats, p);
293  }
294  // Check that non left.
295  CHECK_EQ(a.TotalMemoryUsed(), 0);
296
297  // Allocate some more, also add metadata.
298  for (int i = 0; i < kNumAllocs; i++) {
299    char *x = (char *)a.Allocate(&stats, size, 1);
300    CHECK_GE(a.GetActuallyAllocatedSize(x), size);
301    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
302    *meta = i;
303    allocated[i] = x;
304  }
305  for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
306    char *p = allocated[i % kNumAllocs];
307    CHECK(a.PointerIsMine(p));
308    CHECK(a.PointerIsMine(p + 2000));
309  }
310  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
311  // Deallocate all in reverse order.
312  for (int i = 0; i < kNumAllocs; i++) {
313    int idx = kNumAllocs - i - 1;
314    char *p = allocated[idx];
315    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
316    CHECK_EQ(*meta, idx);
317    CHECK(a.PointerIsMine(p));
318    a.Deallocate(&stats, p);
319  }
320  CHECK_EQ(a.TotalMemoryUsed(), 0);
321
322  // Test alignments.
323  uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
324  for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
325    const uptr kNumAlignedAllocs = 100;
326    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
327      uptr size = ((i % 10) + 1) * 4096;
328      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
329      CHECK_EQ(p, a.GetBlockBegin(p));
330      CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
331      CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
332      CHECK_EQ(0, (uptr)allocated[i] % alignment);
333      p[0] = p[size - 1] = 0;
334    }
335    for (uptr i = 0; i < kNumAlignedAllocs; i++) {
336      a.Deallocate(&stats, allocated[i]);
337    }
338  }
339}
340
341template
342<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
343void TestCombinedAllocator() {
344  typedef
345      CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
346      Allocator;
347  Allocator *a = new Allocator;
348  a->Init();
349
350  AllocatorCache cache;
351  memset(&cache, 0, sizeof(cache));
352  a->InitCache(&cache);
353
354  EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
355  EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
356  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
357  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
358  EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
359
360  const uptr kNumAllocs = 100000;
361  const uptr kNumIter = 10;
362  for (uptr iter = 0; iter < kNumIter; iter++) {
363    std::vector<void*> allocated;
364    for (uptr i = 0; i < kNumAllocs; i++) {
365      uptr size = (i % (1 << 14)) + 1;
366      if ((i % 1024) == 0)
367        size = 1 << (10 + (i % 14));
368      void *x = a->Allocate(&cache, size, 1);
369      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
370      CHECK_EQ(*meta, 0);
371      *meta = size;
372      allocated.push_back(x);
373    }
374
375    random_shuffle(allocated.begin(), allocated.end());
376
377    for (uptr i = 0; i < kNumAllocs; i++) {
378      void *x = allocated[i];
379      uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
380      CHECK_NE(*meta, 0);
381      CHECK(a->PointerIsMine(x));
382      *meta = 0;
383      a->Deallocate(&cache, x);
384    }
385    allocated.clear();
386    a->SwallowCache(&cache);
387  }
388  a->DestroyCache(&cache);
389  a->TestOnlyUnmap();
390}
391
392#if SANITIZER_WORDSIZE == 64
393TEST(SanitizerCommon, CombinedAllocator64) {
394  TestCombinedAllocator<Allocator64,
395      LargeMmapAllocator<>,
396      SizeClassAllocatorLocalCache<Allocator64> > ();
397}
398
399TEST(SanitizerCommon, CombinedAllocator64Compact) {
400  TestCombinedAllocator<Allocator64Compact,
401      LargeMmapAllocator<>,
402      SizeClassAllocatorLocalCache<Allocator64Compact> > ();
403}
404#endif
405
406TEST(SanitizerCommon, CombinedAllocator32Compact) {
407  TestCombinedAllocator<Allocator32Compact,
408      LargeMmapAllocator<>,
409      SizeClassAllocatorLocalCache<Allocator32Compact> > ();
410}
411
412template <class AllocatorCache>
413void TestSizeClassAllocatorLocalCache() {
414  AllocatorCache cache;
415  typedef typename AllocatorCache::Allocator Allocator;
416  Allocator *a = new Allocator();
417
418  a->Init();
419  memset(&cache, 0, sizeof(cache));
420  cache.Init(0);
421
422  const uptr kNumAllocs = 10000;
423  const int kNumIter = 100;
424  uptr saved_total = 0;
425  for (int class_id = 1; class_id <= 5; class_id++) {
426    for (int it = 0; it < kNumIter; it++) {
427      void *allocated[kNumAllocs];
428      for (uptr i = 0; i < kNumAllocs; i++) {
429        allocated[i] = cache.Allocate(a, class_id);
430      }
431      for (uptr i = 0; i < kNumAllocs; i++) {
432        cache.Deallocate(a, class_id, allocated[i]);
433      }
434      cache.Drain(a);
435      uptr total_allocated = a->TotalMemoryUsed();
436      if (it)
437        CHECK_EQ(saved_total, total_allocated);
438      saved_total = total_allocated;
439    }
440  }
441
442  a->TestOnlyUnmap();
443  delete a;
444}
445
446#if SANITIZER_WORDSIZE == 64
447TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
448  TestSizeClassAllocatorLocalCache<
449      SizeClassAllocatorLocalCache<Allocator64> >();
450}
451
452TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
453  TestSizeClassAllocatorLocalCache<
454      SizeClassAllocatorLocalCache<Allocator64Compact> >();
455}
456#endif
457
458TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
459  TestSizeClassAllocatorLocalCache<
460      SizeClassAllocatorLocalCache<Allocator32Compact> >();
461}
462
463#if SANITIZER_WORDSIZE == 64
464typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
465static AllocatorCache static_allocator_cache;
466
467void *AllocatorLeakTestWorker(void *arg) {
468  typedef AllocatorCache::Allocator Allocator;
469  Allocator *a = (Allocator*)(arg);
470  static_allocator_cache.Allocate(a, 10);
471  static_allocator_cache.Drain(a);
472  return 0;
473}
474
475TEST(SanitizerCommon, AllocatorLeakTest) {
476  typedef AllocatorCache::Allocator Allocator;
477  Allocator a;
478  a.Init();
479  uptr total_used_memory = 0;
480  for (int i = 0; i < 100; i++) {
481    pthread_t t;
482    EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
483    EXPECT_EQ(0, pthread_join(t, 0));
484    if (i == 0)
485      total_used_memory = a.TotalMemoryUsed();
486    EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
487  }
488
489  a.TestOnlyUnmap();
490}
491
492// Struct which is allocated to pass info to new threads.  The new thread frees
493// it.
494struct NewThreadParams {
495  AllocatorCache *thread_cache;
496  AllocatorCache::Allocator *allocator;
497  uptr class_id;
498};
499
500// Called in a new thread.  Just frees its argument.
501static void *DeallocNewThreadWorker(void *arg) {
502  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
503  params->thread_cache->Deallocate(params->allocator, params->class_id, params);
504  return NULL;
505}
506
507// The allocator cache is supposed to be POD and zero initialized.  We should be
508// able to call Deallocate on a zeroed cache, and it will self-initialize.
509TEST(Allocator, AllocatorCacheDeallocNewThread) {
510  AllocatorCache::Allocator allocator;
511  allocator.Init();
512  AllocatorCache main_cache;
513  AllocatorCache child_cache;
514  memset(&main_cache, 0, sizeof(main_cache));
515  memset(&child_cache, 0, sizeof(child_cache));
516
517  uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
518  NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
519      main_cache.Allocate(&allocator, class_id));
520  params->thread_cache = &child_cache;
521  params->allocator = &allocator;
522  params->class_id = class_id;
523  pthread_t t;
524  EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
525  EXPECT_EQ(0, pthread_join(t, 0));
526}
527#endif
528
529TEST(Allocator, Basic) {
530  char *p = (char*)InternalAlloc(10);
531  EXPECT_NE(p, (char*)0);
532  char *p2 = (char*)InternalAlloc(20);
533  EXPECT_NE(p2, (char*)0);
534  EXPECT_NE(p2, p);
535  InternalFree(p);
536  InternalFree(p2);
537}
538
539TEST(Allocator, Stress) {
540  const int kCount = 1000;
541  char *ptrs[kCount];
542  unsigned rnd = 42;
543  for (int i = 0; i < kCount; i++) {
544    uptr sz = my_rand_r(&rnd) % 1000;
545    char *p = (char*)InternalAlloc(sz);
546    EXPECT_NE(p, (char*)0);
547    ptrs[i] = p;
548  }
549  for (int i = 0; i < kCount; i++) {
550    InternalFree(ptrs[i]);
551  }
552}
553
554TEST(Allocator, ScopedBuffer) {
555  const int kSize = 512;
556  {
557    InternalScopedBuffer<int> int_buf(kSize);
558    EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
559  }
560  InternalScopedBuffer<char> char_buf(kSize);
561  EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
562  internal_memset(char_buf.data(), 'c', kSize);
563  for (int i = 0; i < kSize; i++) {
564    EXPECT_EQ('c', char_buf[i]);
565  }
566}
567
568#endif  // #if TSAN_DEBUG==0
569