asan_noinst_test.cc revision 0fc2f6935568db9252272bd9c00895255d7f78b9
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "sanitizer/asan_interface.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28
29
30TEST(AddressSanitizer, InternalSimpleDeathTest) {
31  EXPECT_DEATH(exit(1), "");
32}
33
34static void MallocStress(size_t n) {
35  u32 seed = my_rand();
36  __asan::StackTrace stack1;
37  stack1.trace[0] = 0xa123;
38  stack1.trace[1] = 0xa456;
39  stack1.size = 2;
40
41  __asan::StackTrace stack2;
42  stack2.trace[0] = 0xb123;
43  stack2.trace[1] = 0xb456;
44  stack2.size = 2;
45
46  __asan::StackTrace stack3;
47  stack3.trace[0] = 0xc123;
48  stack3.trace[1] = 0xc456;
49  stack3.size = 2;
50
51  std::vector<void *> vec;
52  for (size_t i = 0; i < n; i++) {
53    if ((i % 3) == 0) {
54      if (vec.empty()) continue;
55      size_t idx = my_rand_r(&seed) % vec.size();
56      void *ptr = vec[idx];
57      vec[idx] = vec.back();
58      vec.pop_back();
59      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
60    } else {
61      size_t size = my_rand_r(&seed) % 1000 + 1;
62      switch ((my_rand_r(&seed) % 128)) {
63        case 0: size += 1024; break;
64        case 1: size += 2048; break;
65        case 2: size += 4096; break;
66      }
67      size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
68      char *ptr = (char*)__asan::asan_memalign(alignment, size,
69                                               &stack2, __asan::FROM_MALLOC);
70      vec.push_back(ptr);
71      ptr[0] = 0;
72      ptr[size-1] = 0;
73      ptr[size/2] = 0;
74    }
75  }
76  for (size_t i = 0; i < vec.size(); i++)
77    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
78}
79
80
81TEST(AddressSanitizer, NoInstMallocTest) {
82#ifdef __arm__
83  MallocStress(300000);
84#else
85  MallocStress(1000000);
86#endif
87}
88
89static void PrintShadow(const char *tag, uptr ptr, size_t size) {
90  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
91  uptr prev_shadow = 0;
92  for (sptr i = -32; i < (sptr)size + 32; i++) {
93    uptr shadow = __asan::MemToShadow(ptr + i);
94    if (i == 0 || i == (sptr)size)
95      fprintf(stderr, ".");
96    if (shadow != prev_shadow) {
97      prev_shadow = shadow;
98      fprintf(stderr, "%02x", (int)*(u8*)shadow);
99    }
100  }
101  fprintf(stderr, "\n");
102}
103
104TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
105  for (size_t size = 1; size <= 513; size++) {
106    char *ptr = new char[size];
107    PrintShadow("m", (uptr)ptr, size);
108    delete [] ptr;
109    PrintShadow("f", (uptr)ptr, size);
110  }
111}
112
113static uptr pc_array[] = {
114#if SANITIZER_WORDSIZE == 64
115  0x7effbf756068ULL,
116  0x7effbf75e5abULL,
117  0x7effc0625b7cULL,
118  0x7effc05b8997ULL,
119  0x7effbf990577ULL,
120  0x7effbf990c56ULL,
121  0x7effbf992f3cULL,
122  0x7effbf950c22ULL,
123  0x7effc036dba0ULL,
124  0x7effc03638a3ULL,
125  0x7effc035be4aULL,
126  0x7effc0539c45ULL,
127  0x7effc0539a65ULL,
128  0x7effc03db9b3ULL,
129  0x7effc03db100ULL,
130  0x7effc037c7b8ULL,
131  0x7effc037bfffULL,
132  0x7effc038b777ULL,
133  0x7effc038021cULL,
134  0x7effc037c7d1ULL,
135  0x7effc037bfffULL,
136  0x7effc038b777ULL,
137  0x7effc038021cULL,
138  0x7effc037c7d1ULL,
139  0x7effc037bfffULL,
140  0x7effc038b777ULL,
141  0x7effc038021cULL,
142  0x7effc037c7d1ULL,
143  0x7effc037bfffULL,
144  0x7effc0520d26ULL,
145  0x7effc009ddffULL,
146  0x7effbf90bb50ULL,
147  0x7effbdddfa69ULL,
148  0x7effbdde1fe2ULL,
149  0x7effbdde2424ULL,
150  0x7effbdde27b3ULL,
151  0x7effbddee53bULL,
152  0x7effbdde1988ULL,
153  0x7effbdde0904ULL,
154  0x7effc106ce0dULL,
155  0x7effbcc3fa04ULL,
156  0x7effbcc3f6a4ULL,
157  0x7effbcc3e726ULL,
158  0x7effbcc40852ULL,
159  0x7effb681ec4dULL,
160#endif  // SANITIZER_WORDSIZE
161  0xB0B5E768,
162  0x7B682EC1,
163  0x367F9918,
164  0xAE34E13,
165  0xBA0C6C6,
166  0x13250F46,
167  0xA0D6A8AB,
168  0x2B07C1A8,
169  0x6C844F4A,
170  0x2321B53,
171  0x1F3D4F8F,
172  0x3FE2924B,
173  0xB7A2F568,
174  0xBD23950A,
175  0x61020930,
176  0x33E7970C,
177  0x405998A1,
178  0x59F3551D,
179  0x350E3028,
180  0xBC55A28D,
181  0x361F3AED,
182  0xBEAD0F73,
183  0xAEF28479,
184  0x757E971F,
185  0xAEBA450,
186  0x43AD22F5,
187  0x8C2C50C4,
188  0x7AD8A2E1,
189  0x69EE4EE8,
190  0xC08DFF,
191  0x4BA6538,
192  0x3708AB2,
193  0xC24B6475,
194  0x7C8890D7,
195  0x6662495F,
196  0x9B641689,
197  0xD3596B,
198  0xA1049569,
199  0x44CBC16,
200  0x4D39C39F
201};
202
203void CompressStackTraceTest(size_t n_iter) {
204  u32 seed = my_rand();
205  const size_t kNumPcs = ARRAY_SIZE(pc_array);
206  u32 compressed[2 * kNumPcs];
207
208  for (size_t iter = 0; iter < n_iter; iter++) {
209    std::random_shuffle(pc_array, pc_array + kNumPcs);
210    __asan::StackTrace stack0, stack1;
211    stack0.CopyFrom(pc_array, kNumPcs);
212    stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
213    size_t compress_size =
214      std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
215    size_t n_frames =
216      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
217    Ident(n_frames);
218    assert(n_frames <= stack0.size);
219    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
220    assert(stack1.size == n_frames);
221    for (size_t i = 0; i < stack1.size; i++) {
222      assert(stack0.trace[i] == stack1.trace[i]);
223    }
224  }
225}
226
227TEST(AddressSanitizer, CompressStackTraceTest) {
228  CompressStackTraceTest(10000);
229}
230
231void CompressStackTraceBenchmark(size_t n_iter) {
232  const size_t kNumPcs = ARRAY_SIZE(pc_array);
233  u32 compressed[2 * kNumPcs];
234  std::random_shuffle(pc_array, pc_array + kNumPcs);
235
236  __asan::StackTrace stack0;
237  stack0.CopyFrom(pc_array, kNumPcs);
238  stack0.size = kNumPcs;
239  for (size_t iter = 0; iter < n_iter; iter++) {
240    size_t compress_size = kNumPcs;
241    size_t n_frames =
242      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
243    Ident(n_frames);
244  }
245}
246
247TEST(AddressSanitizer, CompressStackTraceBenchmark) {
248  CompressStackTraceBenchmark(1 << 24);
249}
250
251TEST(AddressSanitizer, QuarantineTest) {
252  __asan::StackTrace stack;
253  stack.trace[0] = 0x890;
254  stack.size = 1;
255
256  const int size = 32;
257  void *p = __asan::asan_malloc(size, &stack);
258  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
259  size_t i;
260  size_t max_i = 1 << 30;
261  for (i = 0; i < max_i; i++) {
262    void *p1 = __asan::asan_malloc(size, &stack);
263    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
264    if (p1 == p) break;
265  }
266  // fprintf(stderr, "i=%ld\n", i);
267  EXPECT_GE(i, 100000U);
268  EXPECT_LT(i, max_i);
269}
270
271void *ThreadedQuarantineTestWorker(void *unused) {
272  (void)unused;
273  u32 seed = my_rand();
274  __asan::StackTrace stack;
275  stack.trace[0] = 0x890;
276  stack.size = 1;
277
278  for (size_t i = 0; i < 1000; i++) {
279    void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
280    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
281  }
282  return NULL;
283}
284
285// Check that the thread local allocators are flushed when threads are
286// destroyed.
287TEST(AddressSanitizer, ThreadedQuarantineTest) {
288  const int n_threads = 3000;
289  size_t mmaped1 = __asan_get_heap_size();
290  for (int i = 0; i < n_threads; i++) {
291    pthread_t t;
292    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
293    PTHREAD_JOIN(t, 0);
294    size_t mmaped2 = __asan_get_heap_size();
295    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
296  }
297}
298
299void *ThreadedOneSizeMallocStress(void *unused) {
300  (void)unused;
301  __asan::StackTrace stack;
302  stack.trace[0] = 0x890;
303  stack.size = 1;
304  const size_t kNumMallocs = 1000;
305  for (int iter = 0; iter < 1000; iter++) {
306    void *p[kNumMallocs];
307    for (size_t i = 0; i < kNumMallocs; i++) {
308      p[i] = __asan::asan_malloc(32, &stack);
309    }
310    for (size_t i = 0; i < kNumMallocs; i++) {
311      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
312    }
313  }
314  return NULL;
315}
316
317TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
318  const int kNumThreads = 4;
319  pthread_t t[kNumThreads];
320  for (int i = 0; i < kNumThreads; i++) {
321    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
322  }
323  for (int i = 0; i < kNumThreads; i++) {
324    PTHREAD_JOIN(t[i], 0);
325  }
326}
327
328TEST(AddressSanitizer, MemsetWildAddressTest) {
329  typedef void*(*memset_p)(void*, int, size_t);
330  // Prevent inlining of memset().
331  volatile memset_p libc_memset = (memset_p)memset;
332  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
333               "unknown-crash.*low shadow");
334  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
335               "unknown-crash.*shadow gap");
336  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
337               "unknown-crash.*high shadow");
338}
339
340TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
341#if ASAN_ALLOCATOR_VERSION == 1
342  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
343#elif ASAN_ALLOCATOR_VERSION == 2
344  EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
345#endif
346  const size_t sizes[] = { 1, 30, 1<<30 };
347  for (size_t i = 0; i < 3; i++) {
348    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
349  }
350}
351
352static const char* kGetAllocatedSizeErrorMsg =
353  "attempting to call __asan_get_allocated_size()";
354
355TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
356  const size_t kArraySize = 100;
357  char *array = Ident((char*)malloc(kArraySize));
358  int *int_ptr = Ident(new int);
359
360  // Allocated memory is owned by allocator. Allocated size should be
361  // equal to requested size.
362  EXPECT_EQ(true, __asan_get_ownership(array));
363  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
364  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
365  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
366
367  // We cannot call GetAllocatedSize from the memory we didn't map,
368  // and from the interior pointers (not returned by previous malloc).
369  void *wild_addr = (void*)0x1;
370  EXPECT_FALSE(__asan_get_ownership(wild_addr));
371  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
372  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
373  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
374               kGetAllocatedSizeErrorMsg);
375
376  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
377  EXPECT_FALSE(__asan_get_ownership(NULL));
378  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
379
380  // When memory is freed, it's not owned, and call to GetAllocatedSize
381  // is forbidden.
382  free(array);
383  EXPECT_FALSE(__asan_get_ownership(array));
384  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
385  delete int_ptr;
386
387  void *zero_alloc = Ident(malloc(0));
388  if (zero_alloc != 0) {
389    // If malloc(0) is not null, this pointer is owned and should have valid
390    // allocated size.
391    EXPECT_TRUE(__asan_get_ownership(zero_alloc));
392    // Allocated size is 0 or 1 depending on the allocator used.
393    EXPECT_LT(__asan_get_allocated_size(zero_alloc), 2);
394  }
395  free(zero_alloc);
396}
397
398TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
399  size_t before_malloc, after_malloc, after_free;
400  char *array;
401  const size_t kMallocSize = 100;
402  before_malloc = __asan_get_current_allocated_bytes();
403
404  array = Ident((char*)malloc(kMallocSize));
405  after_malloc = __asan_get_current_allocated_bytes();
406  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
407
408  free(array);
409  after_free = __asan_get_current_allocated_bytes();
410  EXPECT_EQ(before_malloc, after_free);
411}
412
413static void DoDoubleFree() {
414  int *x = Ident(new int);
415  delete Ident(x);
416  delete Ident(x);
417}
418
419#if ASAN_ALLOCATOR_VERSION == 1
420// This test is run in a separate process, so that large malloced
421// chunk won't remain in the free lists after the test.
422// Note: use ASSERT_* instead of EXPECT_* here.
423static void RunGetHeapSizeTestAndDie() {
424  size_t old_heap_size, new_heap_size, heap_growth;
425  // We unlikely have have chunk of this size in free list.
426  static const size_t kLargeMallocSize = 1 << 29;  // 512M
427  old_heap_size = __asan_get_heap_size();
428  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
429  free(Ident(malloc(kLargeMallocSize)));
430  new_heap_size = __asan_get_heap_size();
431  heap_growth = new_heap_size - old_heap_size;
432  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
433  ASSERT_GE(heap_growth, kLargeMallocSize);
434  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
435
436  // Now large chunk should fall into free list, and can be
437  // allocated without increasing heap size.
438  old_heap_size = new_heap_size;
439  free(Ident(malloc(kLargeMallocSize)));
440  heap_growth = __asan_get_heap_size() - old_heap_size;
441  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
442  ASSERT_LT(heap_growth, kLargeMallocSize);
443
444  // Test passed. Now die with expected double-free.
445  DoDoubleFree();
446}
447
448TEST(AddressSanitizerInterface, GetHeapSizeTest) {
449  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
450}
451#elif ASAN_ALLOCATOR_VERSION == 2
452TEST(AddressSanitizerInterface, GetHeapSizeTest) {
453  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
454  // The chunk should be greater than the quarantine size,
455  // otherwise it will be stuck in quarantine instead of being unmaped.
456  static const size_t kLargeMallocSize = 1 << 29;  // 512M
457  uptr old_heap_size = __asan_get_heap_size();
458  for (int i = 0; i < 3; i++) {
459    // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
460    free(Ident(malloc(kLargeMallocSize)));
461    EXPECT_EQ(old_heap_size, __asan_get_heap_size());
462  }
463}
464#endif
465
466// Note: use ASSERT_* instead of EXPECT_* here.
467static void DoLargeMallocForGetFreeBytesTestAndDie() {
468#if ASAN_ALLOCATOR_VERSION == 1
469  // asan_allocator2 does not keep large chunks in free_lists, so this test
470  // will not work.
471  size_t old_free_bytes, new_free_bytes;
472  static const size_t kLargeMallocSize = 1 << 29;  // 512M
473  // If we malloc and free a large memory chunk, it will not fall
474  // into quarantine and will be available for future requests.
475  old_free_bytes = __asan_get_free_bytes();
476  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
477  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
478  free(Ident(malloc(kLargeMallocSize)));
479  new_free_bytes = __asan_get_free_bytes();
480  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
481  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
482#endif  // ASAN_ALLOCATOR_VERSION
483  // Test passed.
484  DoDoubleFree();
485}
486
487TEST(AddressSanitizerInterface, GetFreeBytesTest) {
488#if ASAN_ALLOCATOR_VERSION == 1
489  // Allocate a small chunk. Now allocator probably has a lot of these
490  // chunks to fulfill future requests. So, future requests will decrease
491  // the number of free bytes. Do this only on systems where there
492  // is enough memory for such assumptions.
493  if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) {
494    static const size_t kNumOfChunks = 100;
495    static const size_t kChunkSize = 100;
496    char *chunks[kNumOfChunks];
497    size_t i;
498    size_t old_free_bytes, new_free_bytes;
499    chunks[0] = Ident((char*)malloc(kChunkSize));
500    old_free_bytes = __asan_get_free_bytes();
501    for (i = 1; i < kNumOfChunks; i++) {
502      chunks[i] = Ident((char*)malloc(kChunkSize));
503      new_free_bytes = __asan_get_free_bytes();
504      EXPECT_LT(new_free_bytes, old_free_bytes);
505      old_free_bytes = new_free_bytes;
506    }
507    for (i = 0; i < kNumOfChunks; i++)
508      free(chunks[i]);
509  }
510#endif
511  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
512}
513
514static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
515static const size_t kManyThreadsIterations = 250;
516static const size_t kManyThreadsNumThreads =
517  (SANITIZER_WORDSIZE == 32) ? 40 : 200;
518
519void *ManyThreadsWithStatsWorker(void *arg) {
520  (void)arg;
521  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
522    for (size_t size_index = 0; size_index < 4; size_index++) {
523      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
524    }
525  }
526  // Just one large allocation.
527  free(Ident(malloc(1 << 20)));
528  return 0;
529}
530
531TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
532  size_t before_test, after_test, i;
533  pthread_t threads[kManyThreadsNumThreads];
534  before_test = __asan_get_current_allocated_bytes();
535  for (i = 0; i < kManyThreadsNumThreads; i++) {
536    PTHREAD_CREATE(&threads[i], 0,
537                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
538  }
539  for (i = 0; i < kManyThreadsNumThreads; i++) {
540    PTHREAD_JOIN(threads[i], 0);
541  }
542  after_test = __asan_get_current_allocated_bytes();
543  // ASan stats also reflect memory usage of internal ASan RTL structs,
544  // so we can't check for equality here.
545  EXPECT_LT(after_test, before_test + (1UL<<20));
546}
547
548TEST(AddressSanitizerInterface, ExitCode) {
549  int original_exit_code = __asan_set_error_exit_code(7);
550  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
551  EXPECT_EQ(7, __asan_set_error_exit_code(8));
552  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
553  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
554  EXPECT_EXIT(DoDoubleFree(),
555              ::testing::ExitedWithCode(original_exit_code), "");
556}
557
558static void MyDeathCallback() {
559  fprintf(stderr, "MyDeathCallback\n");
560}
561
562TEST(AddressSanitizerInterface, DeathCallbackTest) {
563  __asan_set_death_callback(MyDeathCallback);
564  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
565  __asan_set_death_callback(NULL);
566}
567
568static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
569
570#define GOOD_ACCESS(ptr, offset)  \
571    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
572
573#define BAD_ACCESS(ptr, offset) \
574    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
575
576TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
577  char *array = Ident((char*)malloc(120));
578  // poison array[40..80)
579  __asan_poison_memory_region(array + 40, 40);
580  GOOD_ACCESS(array, 39);
581  GOOD_ACCESS(array, 80);
582  BAD_ACCESS(array, 40);
583  BAD_ACCESS(array, 60);
584  BAD_ACCESS(array, 79);
585  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
586               kUseAfterPoisonErrorMessage);
587  __asan_unpoison_memory_region(array + 40, 40);
588  // access previously poisoned memory.
589  GOOD_ACCESS(array, 40);
590  GOOD_ACCESS(array, 79);
591  free(array);
592}
593
594TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
595  char *array = Ident((char*)malloc(120));
596  // Poison [0..40) and [80..120)
597  __asan_poison_memory_region(array, 40);
598  __asan_poison_memory_region(array + 80, 40);
599  BAD_ACCESS(array, 20);
600  GOOD_ACCESS(array, 60);
601  BAD_ACCESS(array, 100);
602  // Poison whole array - [0..120)
603  __asan_poison_memory_region(array, 120);
604  BAD_ACCESS(array, 60);
605  // Unpoison [24..96)
606  __asan_unpoison_memory_region(array + 24, 72);
607  BAD_ACCESS(array, 23);
608  GOOD_ACCESS(array, 24);
609  GOOD_ACCESS(array, 60);
610  GOOD_ACCESS(array, 95);
611  BAD_ACCESS(array, 96);
612  free(array);
613}
614
615TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
616  // Vector of capacity 20
617  char *vec = Ident((char*)malloc(20));
618  __asan_poison_memory_region(vec, 20);
619  for (size_t i = 0; i < 7; i++) {
620    // Simulate push_back.
621    __asan_unpoison_memory_region(vec + i, 1);
622    GOOD_ACCESS(vec, i);
623    BAD_ACCESS(vec, i + 1);
624  }
625  for (size_t i = 7; i > 0; i--) {
626    // Simulate pop_back.
627    __asan_poison_memory_region(vec + i - 1, 1);
628    BAD_ACCESS(vec, i - 1);
629    if (i > 1) GOOD_ACCESS(vec, i - 2);
630  }
631  free(vec);
632}
633
634// Make sure that each aligned block of size "2^granularity" doesn't have
635// "true" value before "false" value.
636static void MakeShadowValid(bool *shadow, int length, int granularity) {
637  bool can_be_poisoned = true;
638  for (int i = length - 1; i >= 0; i--) {
639    if (!shadow[i])
640      can_be_poisoned = false;
641    if (!can_be_poisoned)
642      shadow[i] = false;
643    if (i % (1 << granularity) == 0) {
644      can_be_poisoned = true;
645    }
646  }
647}
648
649TEST(AddressSanitizerInterface, PoisoningStressTest) {
650  const size_t kSize = 24;
651  bool expected[kSize];
652  char *arr = Ident((char*)malloc(kSize));
653  for (size_t l1 = 0; l1 < kSize; l1++) {
654    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
655      for (size_t l2 = 0; l2 < kSize; l2++) {
656        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
657          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
658          __asan_unpoison_memory_region(arr, kSize);
659          __asan_poison_memory_region(arr + l1, s1);
660          __asan_poison_memory_region(arr + l2, s2);
661          memset(expected, false, kSize);
662          memset(expected + l1, true, s1);
663          MakeShadowValid(expected, kSize, /*granularity*/ 3);
664          memset(expected + l2, true, s2);
665          MakeShadowValid(expected, kSize, /*granularity*/ 3);
666          for (size_t i = 0; i < kSize; i++) {
667            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
668          }
669          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
670          __asan_poison_memory_region(arr, kSize);
671          __asan_unpoison_memory_region(arr + l1, s1);
672          __asan_unpoison_memory_region(arr + l2, s2);
673          memset(expected, true, kSize);
674          memset(expected + l1, false, s1);
675          MakeShadowValid(expected, kSize, /*granularity*/ 3);
676          memset(expected + l2, false, s2);
677          MakeShadowValid(expected, kSize, /*granularity*/ 3);
678          for (size_t i = 0; i < kSize; i++) {
679            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
680          }
681        }
682      }
683    }
684  }
685}
686
687TEST(AddressSanitizerInterface, PoisonedRegion) {
688  size_t rz = 16;
689  for (size_t size = 1; size <= 64; size++) {
690    char *p = new char[size];
691    uptr x = reinterpret_cast<uptr>(p);
692    for (size_t beg = 0; beg < size + rz; beg++) {
693      for (size_t end = beg; end < size + rz; end++) {
694        uptr first_poisoned = __asan_region_is_poisoned(x + beg, end - beg);
695        if (beg == end) {
696          EXPECT_FALSE(first_poisoned);
697        } else if (beg < size && end <= size) {
698          EXPECT_FALSE(first_poisoned);
699        } else if (beg >= size) {
700          EXPECT_EQ(x + beg, first_poisoned);
701        } else {
702          EXPECT_GT(end, size);
703          EXPECT_EQ(x + size, first_poisoned);
704        }
705      }
706    }
707    delete [] p;
708  }
709}
710
711// This is a performance benchmark for manual runs.
712// asan's memset interceptor calls mem_is_zero for the entire shadow region.
713// the profile should look like this:
714//     89.10%   [.] __memset_sse2
715//     10.50%   [.] __sanitizer::mem_is_zero
716// I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
717// than memset itself.
718TEST(AddressSanitizerInterface, DISABLED_Stress_memset) {
719  size_t size = 1 << 20;
720  char *x = new char[size];
721  for (int i = 0; i < 100000; i++)
722    Ident(memset)(x, 0, size);
723  delete [] x;
724}
725
726static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
727static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
728
729TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
730  char *array = Ident((char*)malloc(120));
731  __asan_unpoison_memory_region(array, 120);
732  // Try to unpoison not owned memory
733  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
734               kInvalidUnpoisonMessage);
735  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
736               kInvalidUnpoisonMessage);
737
738  __asan_poison_memory_region(array, 120);
739  // Try to poison not owned memory.
740  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
741  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
742               kInvalidPoisonMessage);
743  free(array);
744}
745
746static void ErrorReportCallbackOneToZ(const char *report) {
747  int report_len = strlen(report);
748  ASSERT_EQ(6, write(2, "ABCDEF", 6));
749  ASSERT_EQ(report_len, write(2, report, report_len));
750  ASSERT_EQ(6, write(2, "ABCDEF", 6));
751  _exit(1);
752}
753
754TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
755  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
756  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
757               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
758  __asan_set_error_report_callback(NULL);
759}
760
761TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
762  std::vector<char *> pointers;
763  std::vector<size_t> sizes;
764#if ASAN_ALLOCATOR_VERSION == 1
765  const size_t kNumMallocs =
766      (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
767#elif ASAN_ALLOCATOR_VERSION == 2  // too slow with asan_allocator2. :(
768  const size_t kNumMallocs = 1 << 9;
769#endif
770  for (size_t i = 0; i < kNumMallocs; i++) {
771    size_t size = i * 100 + 1;
772    pointers.push_back((char*)malloc(size));
773    sizes.push_back(size);
774  }
775  for (size_t i = 0; i < 4000000; i++) {
776    EXPECT_FALSE(__asan_get_ownership(&pointers));
777    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
778    size_t idx = i % kNumMallocs;
779    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
780    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
781  }
782  for (size_t i = 0, n = pointers.size(); i < n; i++)
783    free(pointers[i]);
784}
785