asan_noinst_test.cc revision 87b52b910037447eccb92546b24b5e06181a1545
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "sanitizer/asan_interface.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28
29
30TEST(AddressSanitizer, InternalSimpleDeathTest) {
31  EXPECT_DEATH(exit(1), "");
32}
33
34static void MallocStress(size_t n) {
35  u32 seed = my_rand();
36  __asan::StackTrace stack1;
37  stack1.trace[0] = 0xa123;
38  stack1.trace[1] = 0xa456;
39  stack1.size = 2;
40
41  __asan::StackTrace stack2;
42  stack2.trace[0] = 0xb123;
43  stack2.trace[1] = 0xb456;
44  stack2.size = 2;
45
46  __asan::StackTrace stack3;
47  stack3.trace[0] = 0xc123;
48  stack3.trace[1] = 0xc456;
49  stack3.size = 2;
50
51  std::vector<void *> vec;
52  for (size_t i = 0; i < n; i++) {
53    if ((i % 3) == 0) {
54      if (vec.empty()) continue;
55      size_t idx = my_rand_r(&seed) % vec.size();
56      void *ptr = vec[idx];
57      vec[idx] = vec.back();
58      vec.pop_back();
59      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
60    } else {
61      size_t size = my_rand_r(&seed) % 1000 + 1;
62      switch ((my_rand_r(&seed) % 128)) {
63        case 0: size += 1024; break;
64        case 1: size += 2048; break;
65        case 2: size += 4096; break;
66      }
67      size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
68      char *ptr = (char*)__asan::asan_memalign(alignment, size,
69                                               &stack2, __asan::FROM_MALLOC);
70      vec.push_back(ptr);
71      ptr[0] = 0;
72      ptr[size-1] = 0;
73      ptr[size/2] = 0;
74    }
75  }
76  for (size_t i = 0; i < vec.size(); i++)
77    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
78}
79
80
81TEST(AddressSanitizer, NoInstMallocTest) {
82#ifdef __arm__
83  MallocStress(300000);
84#else
85  MallocStress(1000000);
86#endif
87}
88
89static void PrintShadow(const char *tag, uptr ptr, size_t size) {
90  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
91  uptr prev_shadow = 0;
92  for (sptr i = -32; i < (sptr)size + 32; i++) {
93    uptr shadow = __asan::MemToShadow(ptr + i);
94    if (i == 0 || i == (sptr)size)
95      fprintf(stderr, ".");
96    if (shadow != prev_shadow) {
97      prev_shadow = shadow;
98      fprintf(stderr, "%02x", (int)*(u8*)shadow);
99    }
100  }
101  fprintf(stderr, "\n");
102}
103
104TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
105  for (size_t size = 1; size <= 513; size++) {
106    char *ptr = new char[size];
107    PrintShadow("m", (uptr)ptr, size);
108    delete [] ptr;
109    PrintShadow("f", (uptr)ptr, size);
110  }
111}
112
113static uptr pc_array[] = {
114#if SANITIZER_WORDSIZE == 64
115  0x7effbf756068ULL,
116  0x7effbf75e5abULL,
117  0x7effc0625b7cULL,
118  0x7effc05b8997ULL,
119  0x7effbf990577ULL,
120  0x7effbf990c56ULL,
121  0x7effbf992f3cULL,
122  0x7effbf950c22ULL,
123  0x7effc036dba0ULL,
124  0x7effc03638a3ULL,
125  0x7effc035be4aULL,
126  0x7effc0539c45ULL,
127  0x7effc0539a65ULL,
128  0x7effc03db9b3ULL,
129  0x7effc03db100ULL,
130  0x7effc037c7b8ULL,
131  0x7effc037bfffULL,
132  0x7effc038b777ULL,
133  0x7effc038021cULL,
134  0x7effc037c7d1ULL,
135  0x7effc037bfffULL,
136  0x7effc038b777ULL,
137  0x7effc038021cULL,
138  0x7effc037c7d1ULL,
139  0x7effc037bfffULL,
140  0x7effc038b777ULL,
141  0x7effc038021cULL,
142  0x7effc037c7d1ULL,
143  0x7effc037bfffULL,
144  0x7effc0520d26ULL,
145  0x7effc009ddffULL,
146  0x7effbf90bb50ULL,
147  0x7effbdddfa69ULL,
148  0x7effbdde1fe2ULL,
149  0x7effbdde2424ULL,
150  0x7effbdde27b3ULL,
151  0x7effbddee53bULL,
152  0x7effbdde1988ULL,
153  0x7effbdde0904ULL,
154  0x7effc106ce0dULL,
155  0x7effbcc3fa04ULL,
156  0x7effbcc3f6a4ULL,
157  0x7effbcc3e726ULL,
158  0x7effbcc40852ULL,
159  0x7effb681ec4dULL,
160#endif  // SANITIZER_WORDSIZE
161  0xB0B5E768,
162  0x7B682EC1,
163  0x367F9918,
164  0xAE34E13,
165  0xBA0C6C6,
166  0x13250F46,
167  0xA0D6A8AB,
168  0x2B07C1A8,
169  0x6C844F4A,
170  0x2321B53,
171  0x1F3D4F8F,
172  0x3FE2924B,
173  0xB7A2F568,
174  0xBD23950A,
175  0x61020930,
176  0x33E7970C,
177  0x405998A1,
178  0x59F3551D,
179  0x350E3028,
180  0xBC55A28D,
181  0x361F3AED,
182  0xBEAD0F73,
183  0xAEF28479,
184  0x757E971F,
185  0xAEBA450,
186  0x43AD22F5,
187  0x8C2C50C4,
188  0x7AD8A2E1,
189  0x69EE4EE8,
190  0xC08DFF,
191  0x4BA6538,
192  0x3708AB2,
193  0xC24B6475,
194  0x7C8890D7,
195  0x6662495F,
196  0x9B641689,
197  0xD3596B,
198  0xA1049569,
199  0x44CBC16,
200  0x4D39C39F
201};
202
203void CompressStackTraceTest(size_t n_iter) {
204  u32 seed = my_rand();
205  const size_t kNumPcs = ARRAY_SIZE(pc_array);
206  u32 compressed[2 * kNumPcs];
207
208  for (size_t iter = 0; iter < n_iter; iter++) {
209    std::random_shuffle(pc_array, pc_array + kNumPcs);
210    __asan::StackTrace stack0, stack1;
211    stack0.CopyFrom(pc_array, kNumPcs);
212    stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
213    size_t compress_size =
214      std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
215    size_t n_frames =
216      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
217    Ident(n_frames);
218    assert(n_frames <= stack0.size);
219    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
220    assert(stack1.size == n_frames);
221    for (size_t i = 0; i < stack1.size; i++) {
222      assert(stack0.trace[i] == stack1.trace[i]);
223    }
224  }
225}
226
227TEST(AddressSanitizer, CompressStackTraceTest) {
228  CompressStackTraceTest(10000);
229}
230
231void CompressStackTraceBenchmark(size_t n_iter) {
232  const size_t kNumPcs = ARRAY_SIZE(pc_array);
233  u32 compressed[2 * kNumPcs];
234  std::random_shuffle(pc_array, pc_array + kNumPcs);
235
236  __asan::StackTrace stack0;
237  stack0.CopyFrom(pc_array, kNumPcs);
238  stack0.size = kNumPcs;
239  for (size_t iter = 0; iter < n_iter; iter++) {
240    size_t compress_size = kNumPcs;
241    size_t n_frames =
242      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
243    Ident(n_frames);
244  }
245}
246
247TEST(AddressSanitizer, CompressStackTraceBenchmark) {
248  CompressStackTraceBenchmark(1 << 24);
249}
250
251TEST(AddressSanitizer, QuarantineTest) {
252  __asan::StackTrace stack;
253  stack.trace[0] = 0x890;
254  stack.size = 1;
255
256  const int size = 32;
257  void *p = __asan::asan_malloc(size, &stack);
258  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
259  size_t i;
260  size_t max_i = 1 << 30;
261  for (i = 0; i < max_i; i++) {
262    void *p1 = __asan::asan_malloc(size, &stack);
263    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
264    if (p1 == p) break;
265  }
266  // fprintf(stderr, "i=%ld\n", i);
267  EXPECT_GE(i, 100000U);
268  EXPECT_LT(i, max_i);
269}
270
271void *ThreadedQuarantineTestWorker(void *unused) {
272  (void)unused;
273  u32 seed = my_rand();
274  __asan::StackTrace stack;
275  stack.trace[0] = 0x890;
276  stack.size = 1;
277
278  for (size_t i = 0; i < 1000; i++) {
279    void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
280    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
281  }
282  return NULL;
283}
284
285// Check that the thread local allocators are flushed when threads are
286// destroyed.
287TEST(AddressSanitizer, ThreadedQuarantineTest) {
288  const int n_threads = 3000;
289  size_t mmaped1 = __asan_get_heap_size();
290  for (int i = 0; i < n_threads; i++) {
291    pthread_t t;
292    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
293    PTHREAD_JOIN(t, 0);
294    size_t mmaped2 = __asan_get_heap_size();
295    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
296  }
297}
298
299void *ThreadedOneSizeMallocStress(void *unused) {
300  (void)unused;
301  __asan::StackTrace stack;
302  stack.trace[0] = 0x890;
303  stack.size = 1;
304  const size_t kNumMallocs = 1000;
305  for (int iter = 0; iter < 1000; iter++) {
306    void *p[kNumMallocs];
307    for (size_t i = 0; i < kNumMallocs; i++) {
308      p[i] = __asan::asan_malloc(32, &stack);
309    }
310    for (size_t i = 0; i < kNumMallocs; i++) {
311      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
312    }
313  }
314  return NULL;
315}
316
317TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
318  const int kNumThreads = 4;
319  pthread_t t[kNumThreads];
320  for (int i = 0; i < kNumThreads; i++) {
321    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
322  }
323  for (int i = 0; i < kNumThreads; i++) {
324    PTHREAD_JOIN(t[i], 0);
325  }
326}
327
328TEST(AddressSanitizer, MemsetWildAddressTest) {
329  typedef void*(*memset_p)(void*, int, size_t);
330  // Prevent inlining of memset().
331  volatile memset_p libc_memset = (memset_p)memset;
332  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
333               (kLowShadowEnd == 0) ? "unknown-crash.*shadow gap"
334                                    : "unknown-crash.*low shadow");
335  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
336               "unknown-crash.*shadow gap");
337  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
338               "unknown-crash.*high shadow");
339}
340
341TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
342#if ASAN_ALLOCATOR_VERSION == 1
343  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
344#elif ASAN_ALLOCATOR_VERSION == 2
345  EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
346#endif
347  const size_t sizes[] = { 1, 30, 1<<30 };
348  for (size_t i = 0; i < 3; i++) {
349    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
350  }
351}
352
353static const char* kGetAllocatedSizeErrorMsg =
354  "attempting to call __asan_get_allocated_size()";
355
356TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
357  const size_t kArraySize = 100;
358  char *array = Ident((char*)malloc(kArraySize));
359  int *int_ptr = Ident(new int);
360
361  // Allocated memory is owned by allocator. Allocated size should be
362  // equal to requested size.
363  EXPECT_EQ(true, __asan_get_ownership(array));
364  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
365  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
366  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
367
368  // We cannot call GetAllocatedSize from the memory we didn't map,
369  // and from the interior pointers (not returned by previous malloc).
370  void *wild_addr = (void*)0x1;
371  EXPECT_FALSE(__asan_get_ownership(wild_addr));
372  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
373  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
374  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
375               kGetAllocatedSizeErrorMsg);
376
377  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
378  EXPECT_FALSE(__asan_get_ownership(NULL));
379  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
380
381  // When memory is freed, it's not owned, and call to GetAllocatedSize
382  // is forbidden.
383  free(array);
384  EXPECT_FALSE(__asan_get_ownership(array));
385  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
386  delete int_ptr;
387
388  void *zero_alloc = Ident(malloc(0));
389  if (zero_alloc != 0) {
390    // If malloc(0) is not null, this pointer is owned and should have valid
391    // allocated size.
392    EXPECT_TRUE(__asan_get_ownership(zero_alloc));
393    // Allocated size is 0 or 1 depending on the allocator used.
394    EXPECT_LT(__asan_get_allocated_size(zero_alloc), 2U);
395  }
396  free(zero_alloc);
397}
398
399TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
400  size_t before_malloc, after_malloc, after_free;
401  char *array;
402  const size_t kMallocSize = 100;
403  before_malloc = __asan_get_current_allocated_bytes();
404
405  array = Ident((char*)malloc(kMallocSize));
406  after_malloc = __asan_get_current_allocated_bytes();
407  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
408
409  free(array);
410  after_free = __asan_get_current_allocated_bytes();
411  EXPECT_EQ(before_malloc, after_free);
412}
413
414static void DoDoubleFree() {
415  int *x = Ident(new int);
416  delete Ident(x);
417  delete Ident(x);
418}
419
420#if ASAN_ALLOCATOR_VERSION == 1
421// This test is run in a separate process, so that large malloced
422// chunk won't remain in the free lists after the test.
423// Note: use ASSERT_* instead of EXPECT_* here.
424static void RunGetHeapSizeTestAndDie() {
425  size_t old_heap_size, new_heap_size, heap_growth;
426  // We unlikely have have chunk of this size in free list.
427  static const size_t kLargeMallocSize = 1 << 29;  // 512M
428  old_heap_size = __asan_get_heap_size();
429  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
430  free(Ident(malloc(kLargeMallocSize)));
431  new_heap_size = __asan_get_heap_size();
432  heap_growth = new_heap_size - old_heap_size;
433  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
434  ASSERT_GE(heap_growth, kLargeMallocSize);
435  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
436
437  // Now large chunk should fall into free list, and can be
438  // allocated without increasing heap size.
439  old_heap_size = new_heap_size;
440  free(Ident(malloc(kLargeMallocSize)));
441  heap_growth = __asan_get_heap_size() - old_heap_size;
442  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
443  ASSERT_LT(heap_growth, kLargeMallocSize);
444
445  // Test passed. Now die with expected double-free.
446  DoDoubleFree();
447}
448
449TEST(AddressSanitizerInterface, GetHeapSizeTest) {
450  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
451}
452#elif ASAN_ALLOCATOR_VERSION == 2
453TEST(AddressSanitizerInterface, GetHeapSizeTest) {
454  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
455  // The chunk should be greater than the quarantine size,
456  // otherwise it will be stuck in quarantine instead of being unmaped.
457  static const size_t kLargeMallocSize = 1 << 29;  // 512M
458  uptr old_heap_size = __asan_get_heap_size();
459  for (int i = 0; i < 3; i++) {
460    // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
461    free(Ident(malloc(kLargeMallocSize)));
462    EXPECT_EQ(old_heap_size, __asan_get_heap_size());
463  }
464}
465#endif
466
467// Note: use ASSERT_* instead of EXPECT_* here.
468static void DoLargeMallocForGetFreeBytesTestAndDie() {
469#if ASAN_ALLOCATOR_VERSION == 1
470  // asan_allocator2 does not keep large chunks in free_lists, so this test
471  // will not work.
472  size_t old_free_bytes, new_free_bytes;
473  static const size_t kLargeMallocSize = 1 << 29;  // 512M
474  // If we malloc and free a large memory chunk, it will not fall
475  // into quarantine and will be available for future requests.
476  old_free_bytes = __asan_get_free_bytes();
477  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
478  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
479  free(Ident(malloc(kLargeMallocSize)));
480  new_free_bytes = __asan_get_free_bytes();
481  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
482  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
483#endif  // ASAN_ALLOCATOR_VERSION
484  // Test passed.
485  DoDoubleFree();
486}
487
488TEST(AddressSanitizerInterface, GetFreeBytesTest) {
489#if ASAN_ALLOCATOR_VERSION == 1
490  // Allocate a small chunk. Now allocator probably has a lot of these
491  // chunks to fulfill future requests. So, future requests will decrease
492  // the number of free bytes. Do this only on systems where there
493  // is enough memory for such assumptions.
494  if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) {
495    static const size_t kNumOfChunks = 100;
496    static const size_t kChunkSize = 100;
497    char *chunks[kNumOfChunks];
498    size_t i;
499    size_t old_free_bytes, new_free_bytes;
500    chunks[0] = Ident((char*)malloc(kChunkSize));
501    old_free_bytes = __asan_get_free_bytes();
502    for (i = 1; i < kNumOfChunks; i++) {
503      chunks[i] = Ident((char*)malloc(kChunkSize));
504      new_free_bytes = __asan_get_free_bytes();
505      EXPECT_LT(new_free_bytes, old_free_bytes);
506      old_free_bytes = new_free_bytes;
507    }
508    for (i = 0; i < kNumOfChunks; i++)
509      free(chunks[i]);
510  }
511#endif
512  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
513}
514
515static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
516static const size_t kManyThreadsIterations = 250;
517static const size_t kManyThreadsNumThreads =
518  (SANITIZER_WORDSIZE == 32) ? 40 : 200;
519
520void *ManyThreadsWithStatsWorker(void *arg) {
521  (void)arg;
522  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
523    for (size_t size_index = 0; size_index < 4; size_index++) {
524      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
525    }
526  }
527  // Just one large allocation.
528  free(Ident(malloc(1 << 20)));
529  return 0;
530}
531
532TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
533  size_t before_test, after_test, i;
534  pthread_t threads[kManyThreadsNumThreads];
535  before_test = __asan_get_current_allocated_bytes();
536  for (i = 0; i < kManyThreadsNumThreads; i++) {
537    PTHREAD_CREATE(&threads[i], 0,
538                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
539  }
540  for (i = 0; i < kManyThreadsNumThreads; i++) {
541    PTHREAD_JOIN(threads[i], 0);
542  }
543  after_test = __asan_get_current_allocated_bytes();
544  // ASan stats also reflect memory usage of internal ASan RTL structs,
545  // so we can't check for equality here.
546  EXPECT_LT(after_test, before_test + (1UL<<20));
547}
548
549TEST(AddressSanitizerInterface, ExitCode) {
550  int original_exit_code = __asan_set_error_exit_code(7);
551  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
552  EXPECT_EQ(7, __asan_set_error_exit_code(8));
553  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
554  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
555  EXPECT_EXIT(DoDoubleFree(),
556              ::testing::ExitedWithCode(original_exit_code), "");
557}
558
559static void MyDeathCallback() {
560  fprintf(stderr, "MyDeathCallback\n");
561}
562
563TEST(AddressSanitizerInterface, DeathCallbackTest) {
564  __asan_set_death_callback(MyDeathCallback);
565  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
566  __asan_set_death_callback(NULL);
567}
568
569static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
570
571#define GOOD_ACCESS(ptr, offset)  \
572    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
573
574#define BAD_ACCESS(ptr, offset) \
575    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
576
577TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
578  char *array = Ident((char*)malloc(120));
579  // poison array[40..80)
580  __asan_poison_memory_region(array + 40, 40);
581  GOOD_ACCESS(array, 39);
582  GOOD_ACCESS(array, 80);
583  BAD_ACCESS(array, 40);
584  BAD_ACCESS(array, 60);
585  BAD_ACCESS(array, 79);
586  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
587               kUseAfterPoisonErrorMessage);
588  __asan_unpoison_memory_region(array + 40, 40);
589  // access previously poisoned memory.
590  GOOD_ACCESS(array, 40);
591  GOOD_ACCESS(array, 79);
592  free(array);
593}
594
595TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
596  char *array = Ident((char*)malloc(120));
597  // Poison [0..40) and [80..120)
598  __asan_poison_memory_region(array, 40);
599  __asan_poison_memory_region(array + 80, 40);
600  BAD_ACCESS(array, 20);
601  GOOD_ACCESS(array, 60);
602  BAD_ACCESS(array, 100);
603  // Poison whole array - [0..120)
604  __asan_poison_memory_region(array, 120);
605  BAD_ACCESS(array, 60);
606  // Unpoison [24..96)
607  __asan_unpoison_memory_region(array + 24, 72);
608  BAD_ACCESS(array, 23);
609  GOOD_ACCESS(array, 24);
610  GOOD_ACCESS(array, 60);
611  GOOD_ACCESS(array, 95);
612  BAD_ACCESS(array, 96);
613  free(array);
614}
615
616TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
617  // Vector of capacity 20
618  char *vec = Ident((char*)malloc(20));
619  __asan_poison_memory_region(vec, 20);
620  for (size_t i = 0; i < 7; i++) {
621    // Simulate push_back.
622    __asan_unpoison_memory_region(vec + i, 1);
623    GOOD_ACCESS(vec, i);
624    BAD_ACCESS(vec, i + 1);
625  }
626  for (size_t i = 7; i > 0; i--) {
627    // Simulate pop_back.
628    __asan_poison_memory_region(vec + i - 1, 1);
629    BAD_ACCESS(vec, i - 1);
630    if (i > 1) GOOD_ACCESS(vec, i - 2);
631  }
632  free(vec);
633}
634
635// Make sure that each aligned block of size "2^granularity" doesn't have
636// "true" value before "false" value.
637static void MakeShadowValid(bool *shadow, int length, int granularity) {
638  bool can_be_poisoned = true;
639  for (int i = length - 1; i >= 0; i--) {
640    if (!shadow[i])
641      can_be_poisoned = false;
642    if (!can_be_poisoned)
643      shadow[i] = false;
644    if (i % (1 << granularity) == 0) {
645      can_be_poisoned = true;
646    }
647  }
648}
649
650TEST(AddressSanitizerInterface, PoisoningStressTest) {
651  const size_t kSize = 24;
652  bool expected[kSize];
653  char *arr = Ident((char*)malloc(kSize));
654  for (size_t l1 = 0; l1 < kSize; l1++) {
655    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
656      for (size_t l2 = 0; l2 < kSize; l2++) {
657        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
658          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
659          __asan_unpoison_memory_region(arr, kSize);
660          __asan_poison_memory_region(arr + l1, s1);
661          __asan_poison_memory_region(arr + l2, s2);
662          memset(expected, false, kSize);
663          memset(expected + l1, true, s1);
664          MakeShadowValid(expected, kSize, /*granularity*/ 3);
665          memset(expected + l2, true, s2);
666          MakeShadowValid(expected, kSize, /*granularity*/ 3);
667          for (size_t i = 0; i < kSize; i++) {
668            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
669          }
670          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
671          __asan_poison_memory_region(arr, kSize);
672          __asan_unpoison_memory_region(arr + l1, s1);
673          __asan_unpoison_memory_region(arr + l2, s2);
674          memset(expected, true, kSize);
675          memset(expected + l1, false, s1);
676          MakeShadowValid(expected, kSize, /*granularity*/ 3);
677          memset(expected + l2, false, s2);
678          MakeShadowValid(expected, kSize, /*granularity*/ 3);
679          for (size_t i = 0; i < kSize; i++) {
680            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
681          }
682        }
683      }
684    }
685  }
686}
687
688TEST(AddressSanitizerInterface, PoisonedRegion) {
689  size_t rz = 16;
690  for (size_t size = 1; size <= 64; size++) {
691    char *p = new char[size];
692    uptr x = reinterpret_cast<uptr>(p);
693    for (size_t beg = 0; beg < size + rz; beg++) {
694      for (size_t end = beg; end < size + rz; end++) {
695        uptr first_poisoned = __asan_region_is_poisoned(x + beg, end - beg);
696        if (beg == end) {
697          EXPECT_FALSE(first_poisoned);
698        } else if (beg < size && end <= size) {
699          EXPECT_FALSE(first_poisoned);
700        } else if (beg >= size) {
701          EXPECT_EQ(x + beg, first_poisoned);
702        } else {
703          EXPECT_GT(end, size);
704          EXPECT_EQ(x + size, first_poisoned);
705        }
706      }
707    }
708    delete [] p;
709  }
710}
711
712// This is a performance benchmark for manual runs.
713// asan's memset interceptor calls mem_is_zero for the entire shadow region.
714// the profile should look like this:
715//     89.10%   [.] __memset_sse2
716//     10.50%   [.] __sanitizer::mem_is_zero
717// I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
718// than memset itself.
719TEST(AddressSanitizerInterface, DISABLED_Stress_memset) {
720  size_t size = 1 << 20;
721  char *x = new char[size];
722  for (int i = 0; i < 100000; i++)
723    Ident(memset)(x, 0, size);
724  delete [] x;
725}
726
727static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
728static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
729
730TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
731  char *array = Ident((char*)malloc(120));
732  __asan_unpoison_memory_region(array, 120);
733  // Try to unpoison not owned memory
734  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
735               kInvalidUnpoisonMessage);
736  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
737               kInvalidUnpoisonMessage);
738
739  __asan_poison_memory_region(array, 120);
740  // Try to poison not owned memory.
741  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
742  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
743               kInvalidPoisonMessage);
744  free(array);
745}
746
747static void ErrorReportCallbackOneToZ(const char *report) {
748  int report_len = strlen(report);
749  ASSERT_EQ(6, write(2, "ABCDEF", 6));
750  ASSERT_EQ(report_len, write(2, report, report_len));
751  ASSERT_EQ(6, write(2, "ABCDEF", 6));
752  _exit(1);
753}
754
755TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
756  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
757  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
758               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
759  __asan_set_error_report_callback(NULL);
760}
761
762TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
763  std::vector<char *> pointers;
764  std::vector<size_t> sizes;
765#if ASAN_ALLOCATOR_VERSION == 1
766  const size_t kNumMallocs =
767      (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
768#elif ASAN_ALLOCATOR_VERSION == 2  // too slow with asan_allocator2. :(
769  const size_t kNumMallocs = 1 << 9;
770#endif
771  for (size_t i = 0; i < kNumMallocs; i++) {
772    size_t size = i * 100 + 1;
773    pointers.push_back((char*)malloc(size));
774    sizes.push_back(size);
775  }
776  for (size_t i = 0; i < 4000000; i++) {
777    EXPECT_FALSE(__asan_get_ownership(&pointers));
778    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
779    size_t idx = i % kNumMallocs;
780    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
781    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
782  }
783  for (size_t i = 0, n = pointers.size(); i < n; i++)
784    free(pointers[i]);
785}
786