asan_noinst_test.cc revision 65199f1b253c4bfb225805629217acb8f0b1e185
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "sanitizer/asan_interface.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28#include <limits>
29
30
31TEST(AddressSanitizer, InternalSimpleDeathTest) {
32  EXPECT_DEATH(exit(1), "");
33}
34
35static void MallocStress(size_t n) {
36  u32 seed = my_rand();
37  __asan::StackTrace stack1;
38  stack1.trace[0] = 0xa123;
39  stack1.trace[1] = 0xa456;
40  stack1.size = 2;
41
42  __asan::StackTrace stack2;
43  stack2.trace[0] = 0xb123;
44  stack2.trace[1] = 0xb456;
45  stack2.size = 2;
46
47  __asan::StackTrace stack3;
48  stack3.trace[0] = 0xc123;
49  stack3.trace[1] = 0xc456;
50  stack3.size = 2;
51
52  std::vector<void *> vec;
53  for (size_t i = 0; i < n; i++) {
54    if ((i % 3) == 0) {
55      if (vec.empty()) continue;
56      size_t idx = my_rand_r(&seed) % vec.size();
57      void *ptr = vec[idx];
58      vec[idx] = vec.back();
59      vec.pop_back();
60      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
61    } else {
62      size_t size = my_rand_r(&seed) % 1000 + 1;
63      switch ((my_rand_r(&seed) % 128)) {
64        case 0: size += 1024; break;
65        case 1: size += 2048; break;
66        case 2: size += 4096; break;
67      }
68      size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
69      char *ptr = (char*)__asan::asan_memalign(alignment, size,
70                                               &stack2, __asan::FROM_MALLOC);
71      vec.push_back(ptr);
72      ptr[0] = 0;
73      ptr[size-1] = 0;
74      ptr[size/2] = 0;
75    }
76  }
77  for (size_t i = 0; i < vec.size(); i++)
78    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
79}
80
81
82TEST(AddressSanitizer, NoInstMallocTest) {
83#ifdef __arm__
84  MallocStress(300000);
85#else
86  MallocStress(1000000);
87#endif
88}
89
90static void PrintShadow(const char *tag, uptr ptr, size_t size) {
91  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
92  uptr prev_shadow = 0;
93  for (sptr i = -32; i < (sptr)size + 32; i++) {
94    uptr shadow = __asan::MemToShadow(ptr + i);
95    if (i == 0 || i == (sptr)size)
96      fprintf(stderr, ".");
97    if (shadow != prev_shadow) {
98      prev_shadow = shadow;
99      fprintf(stderr, "%02x", (int)*(u8*)shadow);
100    }
101  }
102  fprintf(stderr, "\n");
103}
104
105TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
106  for (size_t size = 1; size <= 513; size++) {
107    char *ptr = new char[size];
108    PrintShadow("m", (uptr)ptr, size);
109    delete [] ptr;
110    PrintShadow("f", (uptr)ptr, size);
111  }
112}
113
114static uptr pc_array[] = {
115#if SANITIZER_WORDSIZE == 64
116  0x7effbf756068ULL,
117  0x7effbf75e5abULL,
118  0x7effc0625b7cULL,
119  0x7effc05b8997ULL,
120  0x7effbf990577ULL,
121  0x7effbf990c56ULL,
122  0x7effbf992f3cULL,
123  0x7effbf950c22ULL,
124  0x7effc036dba0ULL,
125  0x7effc03638a3ULL,
126  0x7effc035be4aULL,
127  0x7effc0539c45ULL,
128  0x7effc0539a65ULL,
129  0x7effc03db9b3ULL,
130  0x7effc03db100ULL,
131  0x7effc037c7b8ULL,
132  0x7effc037bfffULL,
133  0x7effc038b777ULL,
134  0x7effc038021cULL,
135  0x7effc037c7d1ULL,
136  0x7effc037bfffULL,
137  0x7effc038b777ULL,
138  0x7effc038021cULL,
139  0x7effc037c7d1ULL,
140  0x7effc037bfffULL,
141  0x7effc038b777ULL,
142  0x7effc038021cULL,
143  0x7effc037c7d1ULL,
144  0x7effc037bfffULL,
145  0x7effc0520d26ULL,
146  0x7effc009ddffULL,
147  0x7effbf90bb50ULL,
148  0x7effbdddfa69ULL,
149  0x7effbdde1fe2ULL,
150  0x7effbdde2424ULL,
151  0x7effbdde27b3ULL,
152  0x7effbddee53bULL,
153  0x7effbdde1988ULL,
154  0x7effbdde0904ULL,
155  0x7effc106ce0dULL,
156  0x7effbcc3fa04ULL,
157  0x7effbcc3f6a4ULL,
158  0x7effbcc3e726ULL,
159  0x7effbcc40852ULL,
160  0x7effb681ec4dULL,
161#endif  // SANITIZER_WORDSIZE
162  0xB0B5E768,
163  0x7B682EC1,
164  0x367F9918,
165  0xAE34E13,
166  0xBA0C6C6,
167  0x13250F46,
168  0xA0D6A8AB,
169  0x2B07C1A8,
170  0x6C844F4A,
171  0x2321B53,
172  0x1F3D4F8F,
173  0x3FE2924B,
174  0xB7A2F568,
175  0xBD23950A,
176  0x61020930,
177  0x33E7970C,
178  0x405998A1,
179  0x59F3551D,
180  0x350E3028,
181  0xBC55A28D,
182  0x361F3AED,
183  0xBEAD0F73,
184  0xAEF28479,
185  0x757E971F,
186  0xAEBA450,
187  0x43AD22F5,
188  0x8C2C50C4,
189  0x7AD8A2E1,
190  0x69EE4EE8,
191  0xC08DFF,
192  0x4BA6538,
193  0x3708AB2,
194  0xC24B6475,
195  0x7C8890D7,
196  0x6662495F,
197  0x9B641689,
198  0xD3596B,
199  0xA1049569,
200  0x44CBC16,
201  0x4D39C39F
202};
203
204void CompressStackTraceTest(size_t n_iter) {
205  u32 seed = my_rand();
206  const size_t kNumPcs = ARRAY_SIZE(pc_array);
207  u32 compressed[2 * kNumPcs];
208
209  for (size_t iter = 0; iter < n_iter; iter++) {
210    std::random_shuffle(pc_array, pc_array + kNumPcs);
211    __asan::StackTrace stack0, stack1;
212    stack0.CopyFrom(pc_array, kNumPcs);
213    stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
214    size_t compress_size =
215      std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
216    size_t n_frames =
217      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
218    Ident(n_frames);
219    assert(n_frames <= stack0.size);
220    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
221    assert(stack1.size == n_frames);
222    for (size_t i = 0; i < stack1.size; i++) {
223      assert(stack0.trace[i] == stack1.trace[i]);
224    }
225  }
226}
227
228TEST(AddressSanitizer, CompressStackTraceTest) {
229  CompressStackTraceTest(10000);
230}
231
232void CompressStackTraceBenchmark(size_t n_iter) {
233  const size_t kNumPcs = ARRAY_SIZE(pc_array);
234  u32 compressed[2 * kNumPcs];
235  std::random_shuffle(pc_array, pc_array + kNumPcs);
236
237  __asan::StackTrace stack0;
238  stack0.CopyFrom(pc_array, kNumPcs);
239  stack0.size = kNumPcs;
240  for (size_t iter = 0; iter < n_iter; iter++) {
241    size_t compress_size = kNumPcs;
242    size_t n_frames =
243      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
244    Ident(n_frames);
245  }
246}
247
248TEST(AddressSanitizer, CompressStackTraceBenchmark) {
249  CompressStackTraceBenchmark(1 << 24);
250}
251
252TEST(AddressSanitizer, QuarantineTest) {
253  __asan::StackTrace stack;
254  stack.trace[0] = 0x890;
255  stack.size = 1;
256
257  const int size = 32;
258  void *p = __asan::asan_malloc(size, &stack);
259  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
260  size_t i;
261  size_t max_i = 1 << 30;
262  for (i = 0; i < max_i; i++) {
263    void *p1 = __asan::asan_malloc(size, &stack);
264    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
265    if (p1 == p) break;
266  }
267  // fprintf(stderr, "i=%ld\n", i);
268  EXPECT_GE(i, 100000U);
269  EXPECT_LT(i, max_i);
270}
271
272void *ThreadedQuarantineTestWorker(void *unused) {
273  (void)unused;
274  u32 seed = my_rand();
275  __asan::StackTrace stack;
276  stack.trace[0] = 0x890;
277  stack.size = 1;
278
279  for (size_t i = 0; i < 1000; i++) {
280    void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
281    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
282  }
283  return NULL;
284}
285
286// Check that the thread local allocators are flushed when threads are
287// destroyed.
288TEST(AddressSanitizer, ThreadedQuarantineTest) {
289  const int n_threads = 3000;
290  size_t mmaped1 = __asan_get_heap_size();
291  for (int i = 0; i < n_threads; i++) {
292    pthread_t t;
293    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
294    PTHREAD_JOIN(t, 0);
295    size_t mmaped2 = __asan_get_heap_size();
296    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
297  }
298}
299
300void *ThreadedOneSizeMallocStress(void *unused) {
301  (void)unused;
302  __asan::StackTrace stack;
303  stack.trace[0] = 0x890;
304  stack.size = 1;
305  const size_t kNumMallocs = 1000;
306  for (int iter = 0; iter < 1000; iter++) {
307    void *p[kNumMallocs];
308    for (size_t i = 0; i < kNumMallocs; i++) {
309      p[i] = __asan::asan_malloc(32, &stack);
310    }
311    for (size_t i = 0; i < kNumMallocs; i++) {
312      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
313    }
314  }
315  return NULL;
316}
317
318TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
319  const int kNumThreads = 4;
320  pthread_t t[kNumThreads];
321  for (int i = 0; i < kNumThreads; i++) {
322    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
323  }
324  for (int i = 0; i < kNumThreads; i++) {
325    PTHREAD_JOIN(t[i], 0);
326  }
327}
328
329TEST(AddressSanitizer, MemsetWildAddressTest) {
330  using __asan::kHighMemEnd;
331  typedef void*(*memset_p)(void*, int, size_t);
332  // Prevent inlining of memset().
333  volatile memset_p libc_memset = (memset_p)memset;
334  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
335               (kLowShadowEnd == 0) ? "unknown-crash.*shadow gap"
336                                    : "unknown-crash.*low shadow");
337  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
338               "unknown-crash.*shadow gap");
339  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
340               "unknown-crash.*high shadow");
341}
342
343TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
344#if ASAN_ALLOCATOR_VERSION == 1
345  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
346#elif ASAN_ALLOCATOR_VERSION == 2
347  EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
348#endif
349  const size_t sizes[] = { 1, 30, 1<<30 };
350  for (size_t i = 0; i < 3; i++) {
351    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
352  }
353}
354
355static const char* kGetAllocatedSizeErrorMsg =
356  "attempting to call __asan_get_allocated_size()";
357
358TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
359  const size_t kArraySize = 100;
360  char *array = Ident((char*)malloc(kArraySize));
361  int *int_ptr = Ident(new int);
362
363  // Allocated memory is owned by allocator. Allocated size should be
364  // equal to requested size.
365  EXPECT_EQ(true, __asan_get_ownership(array));
366  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
367  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
368  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
369
370  // We cannot call GetAllocatedSize from the memory we didn't map,
371  // and from the interior pointers (not returned by previous malloc).
372  void *wild_addr = (void*)0x1;
373  EXPECT_FALSE(__asan_get_ownership(wild_addr));
374  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
375  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
376  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
377               kGetAllocatedSizeErrorMsg);
378
379  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
380  EXPECT_FALSE(__asan_get_ownership(NULL));
381  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
382
383  // When memory is freed, it's not owned, and call to GetAllocatedSize
384  // is forbidden.
385  free(array);
386  EXPECT_FALSE(__asan_get_ownership(array));
387  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
388  delete int_ptr;
389
390  void *zero_alloc = Ident(malloc(0));
391  if (zero_alloc != 0) {
392    // If malloc(0) is not null, this pointer is owned and should have valid
393    // allocated size.
394    EXPECT_TRUE(__asan_get_ownership(zero_alloc));
395    // Allocated size is 0 or 1 depending on the allocator used.
396    EXPECT_LT(__asan_get_allocated_size(zero_alloc), 2U);
397  }
398  free(zero_alloc);
399}
400
401TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
402  size_t before_malloc, after_malloc, after_free;
403  char *array;
404  const size_t kMallocSize = 100;
405  before_malloc = __asan_get_current_allocated_bytes();
406
407  array = Ident((char*)malloc(kMallocSize));
408  after_malloc = __asan_get_current_allocated_bytes();
409  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
410
411  free(array);
412  after_free = __asan_get_current_allocated_bytes();
413  EXPECT_EQ(before_malloc, after_free);
414}
415
416static void DoDoubleFree() {
417  int *x = Ident(new int);
418  delete Ident(x);
419  delete Ident(x);
420}
421
422#if ASAN_ALLOCATOR_VERSION == 1
423// This test is run in a separate process, so that large malloced
424// chunk won't remain in the free lists after the test.
425// Note: use ASSERT_* instead of EXPECT_* here.
426static void RunGetHeapSizeTestAndDie() {
427  size_t old_heap_size, new_heap_size, heap_growth;
428  // We unlikely have have chunk of this size in free list.
429  static const size_t kLargeMallocSize = 1 << 29;  // 512M
430  old_heap_size = __asan_get_heap_size();
431  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
432  free(Ident(malloc(kLargeMallocSize)));
433  new_heap_size = __asan_get_heap_size();
434  heap_growth = new_heap_size - old_heap_size;
435  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
436  ASSERT_GE(heap_growth, kLargeMallocSize);
437  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
438
439  // Now large chunk should fall into free list, and can be
440  // allocated without increasing heap size.
441  old_heap_size = new_heap_size;
442  free(Ident(malloc(kLargeMallocSize)));
443  heap_growth = __asan_get_heap_size() - old_heap_size;
444  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
445  ASSERT_LT(heap_growth, kLargeMallocSize);
446
447  // Test passed. Now die with expected double-free.
448  DoDoubleFree();
449}
450
451TEST(AddressSanitizerInterface, GetHeapSizeTest) {
452  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
453}
454#elif ASAN_ALLOCATOR_VERSION == 2
455TEST(AddressSanitizerInterface, GetHeapSizeTest) {
456  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
457  // The chunk should be greater than the quarantine size,
458  // otherwise it will be stuck in quarantine instead of being unmaped.
459  static const size_t kLargeMallocSize = 1 << 29;  // 512M
460  uptr old_heap_size = __asan_get_heap_size();
461  for (int i = 0; i < 3; i++) {
462    // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
463    free(Ident(malloc(kLargeMallocSize)));
464    EXPECT_EQ(old_heap_size, __asan_get_heap_size());
465  }
466}
467#endif
468
469// Note: use ASSERT_* instead of EXPECT_* here.
470static void DoLargeMallocForGetFreeBytesTestAndDie() {
471#if ASAN_ALLOCATOR_VERSION == 1
472  // asan_allocator2 does not keep large chunks in free_lists, so this test
473  // will not work.
474  size_t old_free_bytes, new_free_bytes;
475  static const size_t kLargeMallocSize = 1 << 29;  // 512M
476  // If we malloc and free a large memory chunk, it will not fall
477  // into quarantine and will be available for future requests.
478  old_free_bytes = __asan_get_free_bytes();
479  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
480  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
481  free(Ident(malloc(kLargeMallocSize)));
482  new_free_bytes = __asan_get_free_bytes();
483  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
484  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
485#endif  // ASAN_ALLOCATOR_VERSION
486  // Test passed.
487  DoDoubleFree();
488}
489
490TEST(AddressSanitizerInterface, GetFreeBytesTest) {
491#if ASAN_ALLOCATOR_VERSION == 1
492  // Allocate a small chunk. Now allocator probably has a lot of these
493  // chunks to fulfill future requests. So, future requests will decrease
494  // the number of free bytes. Do this only on systems where there
495  // is enough memory for such assumptions.
496  if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) {
497    static const size_t kNumOfChunks = 100;
498    static const size_t kChunkSize = 100;
499    char *chunks[kNumOfChunks];
500    size_t i;
501    size_t old_free_bytes, new_free_bytes;
502    chunks[0] = Ident((char*)malloc(kChunkSize));
503    old_free_bytes = __asan_get_free_bytes();
504    for (i = 1; i < kNumOfChunks; i++) {
505      chunks[i] = Ident((char*)malloc(kChunkSize));
506      new_free_bytes = __asan_get_free_bytes();
507      EXPECT_LT(new_free_bytes, old_free_bytes);
508      old_free_bytes = new_free_bytes;
509    }
510    for (i = 0; i < kNumOfChunks; i++)
511      free(chunks[i]);
512  }
513#endif
514  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
515}
516
517static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
518static const size_t kManyThreadsIterations = 250;
519static const size_t kManyThreadsNumThreads =
520  (SANITIZER_WORDSIZE == 32) ? 40 : 200;
521
522void *ManyThreadsWithStatsWorker(void *arg) {
523  (void)arg;
524  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
525    for (size_t size_index = 0; size_index < 4; size_index++) {
526      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
527    }
528  }
529  // Just one large allocation.
530  free(Ident(malloc(1 << 20)));
531  return 0;
532}
533
534TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
535  size_t before_test, after_test, i;
536  pthread_t threads[kManyThreadsNumThreads];
537  before_test = __asan_get_current_allocated_bytes();
538  for (i = 0; i < kManyThreadsNumThreads; i++) {
539    PTHREAD_CREATE(&threads[i], 0,
540                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
541  }
542  for (i = 0; i < kManyThreadsNumThreads; i++) {
543    PTHREAD_JOIN(threads[i], 0);
544  }
545  after_test = __asan_get_current_allocated_bytes();
546  // ASan stats also reflect memory usage of internal ASan RTL structs,
547  // so we can't check for equality here.
548  EXPECT_LT(after_test, before_test + (1UL<<20));
549}
550
551TEST(AddressSanitizerInterface, ExitCode) {
552  int original_exit_code = __asan_set_error_exit_code(7);
553  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
554  EXPECT_EQ(7, __asan_set_error_exit_code(8));
555  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
556  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
557  EXPECT_EXIT(DoDoubleFree(),
558              ::testing::ExitedWithCode(original_exit_code), "");
559}
560
561static void MyDeathCallback() {
562  fprintf(stderr, "MyDeathCallback\n");
563}
564
565TEST(AddressSanitizerInterface, DeathCallbackTest) {
566  __asan_set_death_callback(MyDeathCallback);
567  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
568  __asan_set_death_callback(NULL);
569}
570
571static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
572
573#define GOOD_ACCESS(ptr, offset)  \
574    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
575
576#define BAD_ACCESS(ptr, offset) \
577    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
578
579TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
580  char *array = Ident((char*)malloc(120));
581  // poison array[40..80)
582  __asan_poison_memory_region(array + 40, 40);
583  GOOD_ACCESS(array, 39);
584  GOOD_ACCESS(array, 80);
585  BAD_ACCESS(array, 40);
586  BAD_ACCESS(array, 60);
587  BAD_ACCESS(array, 79);
588  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
589               kUseAfterPoisonErrorMessage);
590  __asan_unpoison_memory_region(array + 40, 40);
591  // access previously poisoned memory.
592  GOOD_ACCESS(array, 40);
593  GOOD_ACCESS(array, 79);
594  free(array);
595}
596
597TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
598  char *array = Ident((char*)malloc(120));
599  // Poison [0..40) and [80..120)
600  __asan_poison_memory_region(array, 40);
601  __asan_poison_memory_region(array + 80, 40);
602  BAD_ACCESS(array, 20);
603  GOOD_ACCESS(array, 60);
604  BAD_ACCESS(array, 100);
605  // Poison whole array - [0..120)
606  __asan_poison_memory_region(array, 120);
607  BAD_ACCESS(array, 60);
608  // Unpoison [24..96)
609  __asan_unpoison_memory_region(array + 24, 72);
610  BAD_ACCESS(array, 23);
611  GOOD_ACCESS(array, 24);
612  GOOD_ACCESS(array, 60);
613  GOOD_ACCESS(array, 95);
614  BAD_ACCESS(array, 96);
615  free(array);
616}
617
618TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
619  // Vector of capacity 20
620  char *vec = Ident((char*)malloc(20));
621  __asan_poison_memory_region(vec, 20);
622  for (size_t i = 0; i < 7; i++) {
623    // Simulate push_back.
624    __asan_unpoison_memory_region(vec + i, 1);
625    GOOD_ACCESS(vec, i);
626    BAD_ACCESS(vec, i + 1);
627  }
628  for (size_t i = 7; i > 0; i--) {
629    // Simulate pop_back.
630    __asan_poison_memory_region(vec + i - 1, 1);
631    BAD_ACCESS(vec, i - 1);
632    if (i > 1) GOOD_ACCESS(vec, i - 2);
633  }
634  free(vec);
635}
636
637TEST(AddressSanitizerInterface, GlobalRedzones) {
638  GOOD_ACCESS(glob1, 1 - 1);
639  GOOD_ACCESS(glob2, 2 - 1);
640  GOOD_ACCESS(glob3, 3 - 1);
641  GOOD_ACCESS(glob4, 4 - 1);
642  GOOD_ACCESS(glob5, 5 - 1);
643  GOOD_ACCESS(glob6, 6 - 1);
644  GOOD_ACCESS(glob7, 7 - 1);
645  GOOD_ACCESS(glob8, 8 - 1);
646  GOOD_ACCESS(glob9, 9 - 1);
647  GOOD_ACCESS(glob10, 10 - 1);
648  GOOD_ACCESS(glob11, 11 - 1);
649  GOOD_ACCESS(glob12, 12 - 1);
650  GOOD_ACCESS(glob13, 13 - 1);
651  GOOD_ACCESS(glob14, 14 - 1);
652  GOOD_ACCESS(glob15, 15 - 1);
653  GOOD_ACCESS(glob16, 16 - 1);
654  GOOD_ACCESS(glob17, 17 - 1);
655  GOOD_ACCESS(glob1000, 1000 - 1);
656  GOOD_ACCESS(glob10000, 10000 - 1);
657  GOOD_ACCESS(glob100000, 100000 - 1);
658
659  BAD_ACCESS(glob1, 1);
660  BAD_ACCESS(glob2, 2);
661  BAD_ACCESS(glob3, 3);
662  BAD_ACCESS(glob4, 4);
663  BAD_ACCESS(glob5, 5);
664  BAD_ACCESS(glob6, 6);
665  BAD_ACCESS(glob7, 7);
666  BAD_ACCESS(glob8, 8);
667  BAD_ACCESS(glob9, 9);
668  BAD_ACCESS(glob10, 10);
669  BAD_ACCESS(glob11, 11);
670  BAD_ACCESS(glob12, 12);
671  BAD_ACCESS(glob13, 13);
672  BAD_ACCESS(glob14, 14);
673  BAD_ACCESS(glob15, 15);
674  BAD_ACCESS(glob16, 16);
675  BAD_ACCESS(glob17, 17);
676  BAD_ACCESS(glob1000, 1000);
677  BAD_ACCESS(glob1000, 1100);  // Redzone is at least 101 bytes.
678  BAD_ACCESS(glob10000, 10000);
679  BAD_ACCESS(glob10000, 11000);  // Redzone is at least 1001 bytes.
680  BAD_ACCESS(glob100000, 100000);
681  BAD_ACCESS(glob100000, 110000);  // Redzone is at least 10001 bytes.
682}
683
684// Make sure that each aligned block of size "2^granularity" doesn't have
685// "true" value before "false" value.
686static void MakeShadowValid(bool *shadow, int length, int granularity) {
687  bool can_be_poisoned = true;
688  for (int i = length - 1; i >= 0; i--) {
689    if (!shadow[i])
690      can_be_poisoned = false;
691    if (!can_be_poisoned)
692      shadow[i] = false;
693    if (i % (1 << granularity) == 0) {
694      can_be_poisoned = true;
695    }
696  }
697}
698
699TEST(AddressSanitizerInterface, PoisoningStressTest) {
700  const size_t kSize = 24;
701  bool expected[kSize];
702  char *arr = Ident((char*)malloc(kSize));
703  for (size_t l1 = 0; l1 < kSize; l1++) {
704    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
705      for (size_t l2 = 0; l2 < kSize; l2++) {
706        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
707          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
708          __asan_unpoison_memory_region(arr, kSize);
709          __asan_poison_memory_region(arr + l1, s1);
710          __asan_poison_memory_region(arr + l2, s2);
711          memset(expected, false, kSize);
712          memset(expected + l1, true, s1);
713          MakeShadowValid(expected, kSize, /*granularity*/ 3);
714          memset(expected + l2, true, s2);
715          MakeShadowValid(expected, kSize, /*granularity*/ 3);
716          for (size_t i = 0; i < kSize; i++) {
717            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
718          }
719          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
720          __asan_poison_memory_region(arr, kSize);
721          __asan_unpoison_memory_region(arr + l1, s1);
722          __asan_unpoison_memory_region(arr + l2, s2);
723          memset(expected, true, kSize);
724          memset(expected + l1, false, s1);
725          MakeShadowValid(expected, kSize, /*granularity*/ 3);
726          memset(expected + l2, false, s2);
727          MakeShadowValid(expected, kSize, /*granularity*/ 3);
728          for (size_t i = 0; i < kSize; i++) {
729            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
730          }
731        }
732      }
733    }
734  }
735}
736
737TEST(AddressSanitizerInterface, PoisonedRegion) {
738  size_t rz = 16;
739  for (size_t size = 1; size <= 64; size++) {
740    char *p = new char[size];
741    uptr x = reinterpret_cast<uptr>(p);
742    for (size_t beg = 0; beg < size + rz; beg++) {
743      for (size_t end = beg; end < size + rz; end++) {
744        uptr first_poisoned = __asan_region_is_poisoned(x + beg, end - beg);
745        if (beg == end) {
746          EXPECT_FALSE(first_poisoned);
747        } else if (beg < size && end <= size) {
748          EXPECT_FALSE(first_poisoned);
749        } else if (beg >= size) {
750          EXPECT_EQ(x + beg, first_poisoned);
751        } else {
752          EXPECT_GT(end, size);
753          EXPECT_EQ(x + size, first_poisoned);
754        }
755      }
756    }
757    delete [] p;
758  }
759}
760
761// This is a performance benchmark for manual runs.
762// asan's memset interceptor calls mem_is_zero for the entire shadow region.
763// the profile should look like this:
764//     89.10%   [.] __memset_sse2
765//     10.50%   [.] __sanitizer::mem_is_zero
766// I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
767// than memset itself.
768TEST(AddressSanitizerInterface, DISABLED_Stress_memset) {
769  size_t size = 1 << 20;
770  char *x = new char[size];
771  for (int i = 0; i < 100000; i++)
772    Ident(memset)(x, 0, size);
773  delete [] x;
774}
775
776static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
777static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
778
779TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
780  char *array = Ident((char*)malloc(120));
781  __asan_unpoison_memory_region(array, 120);
782  // Try to unpoison not owned memory
783  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
784               kInvalidUnpoisonMessage);
785  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
786               kInvalidUnpoisonMessage);
787
788  __asan_poison_memory_region(array, 120);
789  // Try to poison not owned memory.
790  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
791  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
792               kInvalidPoisonMessage);
793  free(array);
794}
795
796static void ErrorReportCallbackOneToZ(const char *report) {
797  int report_len = strlen(report);
798  ASSERT_EQ(6, write(2, "ABCDEF", 6));
799  ASSERT_EQ(report_len, write(2, report, report_len));
800  ASSERT_EQ(6, write(2, "ABCDEF", 6));
801  _exit(1);
802}
803
804TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
805  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
806  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
807               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
808  __asan_set_error_report_callback(NULL);
809}
810
811TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
812  std::vector<char *> pointers;
813  std::vector<size_t> sizes;
814#if ASAN_ALLOCATOR_VERSION == 1
815  const size_t kNumMallocs =
816      (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
817#elif ASAN_ALLOCATOR_VERSION == 2  // too slow with asan_allocator2. :(
818  const size_t kNumMallocs = 1 << 9;
819#endif
820  for (size_t i = 0; i < kNumMallocs; i++) {
821    size_t size = i * 100 + 1;
822    pointers.push_back((char*)malloc(size));
823    sizes.push_back(size);
824  }
825  for (size_t i = 0; i < 4000000; i++) {
826    EXPECT_FALSE(__asan_get_ownership(&pointers));
827    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
828    size_t idx = i % kNumMallocs;
829    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
830    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
831  }
832  for (size_t i = 0, n = pointers.size(); i < n; i++)
833    free(pointers[i]);
834}
835
836TEST(AddressSanitizerInterface, CallocOverflow) {
837  size_t kArraySize = 4096;
838  volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
839  volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
840  void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
841  EXPECT_EQ(0L, Ident(p));
842}
843