asan_noinst_test.cc revision c70fa28caaaec2134f2c2230821fcc0f0d7ac27e
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20
21#include <assert.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>  // for memset()
25#include <algorithm>
26#include <vector>
27#include <limits>
28
29
30TEST(AddressSanitizer, InternalSimpleDeathTest) {
31  EXPECT_DEATH(exit(1), "");
32}
33
34static void MallocStress(size_t n) {
35  u32 seed = my_rand();
36  __asan::StackTrace stack1;
37  stack1.trace[0] = 0xa123;
38  stack1.trace[1] = 0xa456;
39  stack1.size = 2;
40
41  __asan::StackTrace stack2;
42  stack2.trace[0] = 0xb123;
43  stack2.trace[1] = 0xb456;
44  stack2.size = 2;
45
46  __asan::StackTrace stack3;
47  stack3.trace[0] = 0xc123;
48  stack3.trace[1] = 0xc456;
49  stack3.size = 2;
50
51  std::vector<void *> vec;
52  for (size_t i = 0; i < n; i++) {
53    if ((i % 3) == 0) {
54      if (vec.empty()) continue;
55      size_t idx = my_rand_r(&seed) % vec.size();
56      void *ptr = vec[idx];
57      vec[idx] = vec.back();
58      vec.pop_back();
59      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
60    } else {
61      size_t size = my_rand_r(&seed) % 1000 + 1;
62      switch ((my_rand_r(&seed) % 128)) {
63        case 0: size += 1024; break;
64        case 1: size += 2048; break;
65        case 2: size += 4096; break;
66      }
67      size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
68      char *ptr = (char*)__asan::asan_memalign(alignment, size,
69                                               &stack2, __asan::FROM_MALLOC);
70      vec.push_back(ptr);
71      ptr[0] = 0;
72      ptr[size-1] = 0;
73      ptr[size/2] = 0;
74    }
75  }
76  for (size_t i = 0; i < vec.size(); i++)
77    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
78}
79
80
81TEST(AddressSanitizer, NoInstMallocTest) {
82#ifdef __arm__
83  MallocStress(300000);
84#else
85  MallocStress(1000000);
86#endif
87}
88
89static void PrintShadow(const char *tag, uptr ptr, size_t size) {
90  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
91  uptr prev_shadow = 0;
92  for (sptr i = -32; i < (sptr)size + 32; i++) {
93    uptr shadow = __asan::MemToShadow(ptr + i);
94    if (i == 0 || i == (sptr)size)
95      fprintf(stderr, ".");
96    if (shadow != prev_shadow) {
97      prev_shadow = shadow;
98      fprintf(stderr, "%02x", (int)*(u8*)shadow);
99    }
100  }
101  fprintf(stderr, "\n");
102}
103
104TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
105  for (size_t size = 1; size <= 513; size++) {
106    char *ptr = new char[size];
107    PrintShadow("m", (uptr)ptr, size);
108    delete [] ptr;
109    PrintShadow("f", (uptr)ptr, size);
110  }
111}
112
113static uptr pc_array[] = {
114#if SANITIZER_WORDSIZE == 64
115  0x7effbf756068ULL,
116  0x7effbf75e5abULL,
117  0x7effc0625b7cULL,
118  0x7effc05b8997ULL,
119  0x7effbf990577ULL,
120  0x7effbf990c56ULL,
121  0x7effbf992f3cULL,
122  0x7effbf950c22ULL,
123  0x7effc036dba0ULL,
124  0x7effc03638a3ULL,
125  0x7effc035be4aULL,
126  0x7effc0539c45ULL,
127  0x7effc0539a65ULL,
128  0x7effc03db9b3ULL,
129  0x7effc03db100ULL,
130  0x7effc037c7b8ULL,
131  0x7effc037bfffULL,
132  0x7effc038b777ULL,
133  0x7effc038021cULL,
134  0x7effc037c7d1ULL,
135  0x7effc037bfffULL,
136  0x7effc038b777ULL,
137  0x7effc038021cULL,
138  0x7effc037c7d1ULL,
139  0x7effc037bfffULL,
140  0x7effc038b777ULL,
141  0x7effc038021cULL,
142  0x7effc037c7d1ULL,
143  0x7effc037bfffULL,
144  0x7effc0520d26ULL,
145  0x7effc009ddffULL,
146  0x7effbf90bb50ULL,
147  0x7effbdddfa69ULL,
148  0x7effbdde1fe2ULL,
149  0x7effbdde2424ULL,
150  0x7effbdde27b3ULL,
151  0x7effbddee53bULL,
152  0x7effbdde1988ULL,
153  0x7effbdde0904ULL,
154  0x7effc106ce0dULL,
155  0x7effbcc3fa04ULL,
156  0x7effbcc3f6a4ULL,
157  0x7effbcc3e726ULL,
158  0x7effbcc40852ULL,
159  0x7effb681ec4dULL,
160#endif  // SANITIZER_WORDSIZE
161  0xB0B5E768,
162  0x7B682EC1,
163  0x367F9918,
164  0xAE34E13,
165  0xBA0C6C6,
166  0x13250F46,
167  0xA0D6A8AB,
168  0x2B07C1A8,
169  0x6C844F4A,
170  0x2321B53,
171  0x1F3D4F8F,
172  0x3FE2924B,
173  0xB7A2F568,
174  0xBD23950A,
175  0x61020930,
176  0x33E7970C,
177  0x405998A1,
178  0x59F3551D,
179  0x350E3028,
180  0xBC55A28D,
181  0x361F3AED,
182  0xBEAD0F73,
183  0xAEF28479,
184  0x757E971F,
185  0xAEBA450,
186  0x43AD22F5,
187  0x8C2C50C4,
188  0x7AD8A2E1,
189  0x69EE4EE8,
190  0xC08DFF,
191  0x4BA6538,
192  0x3708AB2,
193  0xC24B6475,
194  0x7C8890D7,
195  0x6662495F,
196  0x9B641689,
197  0xD3596B,
198  0xA1049569,
199  0x44CBC16,
200  0x4D39C39F
201};
202
203void CompressStackTraceTest(size_t n_iter) {
204  u32 seed = my_rand();
205  const size_t kNumPcs = ARRAY_SIZE(pc_array);
206  u32 compressed[2 * kNumPcs];
207
208  for (size_t iter = 0; iter < n_iter; iter++) {
209    std::random_shuffle(pc_array, pc_array + kNumPcs);
210    __asan::StackTrace stack0, stack1;
211    stack0.CopyFrom(pc_array, kNumPcs);
212    stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
213    size_t compress_size =
214      std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
215    size_t n_frames =
216      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
217    Ident(n_frames);
218    assert(n_frames <= stack0.size);
219    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
220    assert(stack1.size == n_frames);
221    for (size_t i = 0; i < stack1.size; i++) {
222      assert(stack0.trace[i] == stack1.trace[i]);
223    }
224  }
225}
226
227TEST(AddressSanitizer, CompressStackTraceTest) {
228  CompressStackTraceTest(10000);
229}
230
231void CompressStackTraceBenchmark(size_t n_iter) {
232  const size_t kNumPcs = ARRAY_SIZE(pc_array);
233  u32 compressed[2 * kNumPcs];
234  std::random_shuffle(pc_array, pc_array + kNumPcs);
235
236  __asan::StackTrace stack0;
237  stack0.CopyFrom(pc_array, kNumPcs);
238  stack0.size = kNumPcs;
239  for (size_t iter = 0; iter < n_iter; iter++) {
240    size_t compress_size = kNumPcs;
241    size_t n_frames =
242      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
243    Ident(n_frames);
244  }
245}
246
247TEST(AddressSanitizer, CompressStackTraceBenchmark) {
248  CompressStackTraceBenchmark(1 << 24);
249}
250
251TEST(AddressSanitizer, QuarantineTest) {
252  __asan::StackTrace stack;
253  stack.trace[0] = 0x890;
254  stack.size = 1;
255
256  const int size = 32;
257  void *p = __asan::asan_malloc(size, &stack);
258  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
259  size_t i;
260  size_t max_i = 1 << 30;
261  for (i = 0; i < max_i; i++) {
262    void *p1 = __asan::asan_malloc(size, &stack);
263    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
264    if (p1 == p) break;
265  }
266  // fprintf(stderr, "i=%ld\n", i);
267  EXPECT_GE(i, 100000U);
268  EXPECT_LT(i, max_i);
269}
270
271void *ThreadedQuarantineTestWorker(void *unused) {
272  (void)unused;
273  u32 seed = my_rand();
274  __asan::StackTrace stack;
275  stack.trace[0] = 0x890;
276  stack.size = 1;
277
278  for (size_t i = 0; i < 1000; i++) {
279    void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
280    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
281  }
282  return NULL;
283}
284
285// Check that the thread local allocators are flushed when threads are
286// destroyed.
287TEST(AddressSanitizer, ThreadedQuarantineTest) {
288  const int n_threads = 3000;
289  size_t mmaped1 = __asan_get_heap_size();
290  for (int i = 0; i < n_threads; i++) {
291    pthread_t t;
292    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
293    PTHREAD_JOIN(t, 0);
294    size_t mmaped2 = __asan_get_heap_size();
295    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
296  }
297}
298
299void *ThreadedOneSizeMallocStress(void *unused) {
300  (void)unused;
301  __asan::StackTrace stack;
302  stack.trace[0] = 0x890;
303  stack.size = 1;
304  const size_t kNumMallocs = 1000;
305  for (int iter = 0; iter < 1000; iter++) {
306    void *p[kNumMallocs];
307    for (size_t i = 0; i < kNumMallocs; i++) {
308      p[i] = __asan::asan_malloc(32, &stack);
309    }
310    for (size_t i = 0; i < kNumMallocs; i++) {
311      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
312    }
313  }
314  return NULL;
315}
316
317TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
318  const int kNumThreads = 4;
319  pthread_t t[kNumThreads];
320  for (int i = 0; i < kNumThreads; i++) {
321    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
322  }
323  for (int i = 0; i < kNumThreads; i++) {
324    PTHREAD_JOIN(t[i], 0);
325  }
326}
327
328TEST(AddressSanitizer, MemsetWildAddressTest) {
329  using __asan::kHighMemEnd;
330  typedef void*(*memset_p)(void*, int, size_t);
331  // Prevent inlining of memset().
332  volatile memset_p libc_memset = (memset_p)memset;
333  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
334               (kLowShadowEnd == 0) ? "unknown-crash.*shadow gap"
335                                    : "unknown-crash.*low shadow");
336  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
337               "unknown-crash.*shadow gap");
338  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
339               "unknown-crash.*high shadow");
340}
341
342TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
343#if ASAN_ALLOCATOR_VERSION == 1
344  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
345#elif ASAN_ALLOCATOR_VERSION == 2
346  EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
347#endif
348  const size_t sizes[] = { 1, 30, 1<<30 };
349  for (size_t i = 0; i < 3; i++) {
350    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
351  }
352}
353
354static const char* kGetAllocatedSizeErrorMsg =
355  "attempting to call __asan_get_allocated_size()";
356
357TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
358  const size_t kArraySize = 100;
359  char *array = Ident((char*)malloc(kArraySize));
360  int *int_ptr = Ident(new int);
361
362  // Allocated memory is owned by allocator. Allocated size should be
363  // equal to requested size.
364  EXPECT_EQ(true, __asan_get_ownership(array));
365  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
366  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
367  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
368
369  // We cannot call GetAllocatedSize from the memory we didn't map,
370  // and from the interior pointers (not returned by previous malloc).
371  void *wild_addr = (void*)0x1;
372  EXPECT_FALSE(__asan_get_ownership(wild_addr));
373  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
374  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
375  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
376               kGetAllocatedSizeErrorMsg);
377
378  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
379  EXPECT_FALSE(__asan_get_ownership(NULL));
380  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
381
382  // When memory is freed, it's not owned, and call to GetAllocatedSize
383  // is forbidden.
384  free(array);
385  EXPECT_FALSE(__asan_get_ownership(array));
386  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
387  delete int_ptr;
388
389  void *zero_alloc = Ident(malloc(0));
390  if (zero_alloc != 0) {
391    // If malloc(0) is not null, this pointer is owned and should have valid
392    // allocated size.
393    EXPECT_TRUE(__asan_get_ownership(zero_alloc));
394    // Allocated size is 0 or 1 depending on the allocator used.
395    EXPECT_LT(__asan_get_allocated_size(zero_alloc), 2U);
396  }
397  free(zero_alloc);
398}
399
400TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
401  size_t before_malloc, after_malloc, after_free;
402  char *array;
403  const size_t kMallocSize = 100;
404  before_malloc = __asan_get_current_allocated_bytes();
405
406  array = Ident((char*)malloc(kMallocSize));
407  after_malloc = __asan_get_current_allocated_bytes();
408  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
409
410  free(array);
411  after_free = __asan_get_current_allocated_bytes();
412  EXPECT_EQ(before_malloc, after_free);
413}
414
415static void DoDoubleFree() {
416  int *x = Ident(new int);
417  delete Ident(x);
418  delete Ident(x);
419}
420
421#if ASAN_ALLOCATOR_VERSION == 1
422// This test is run in a separate process, so that large malloced
423// chunk won't remain in the free lists after the test.
424// Note: use ASSERT_* instead of EXPECT_* here.
425static void RunGetHeapSizeTestAndDie() {
426  size_t old_heap_size, new_heap_size, heap_growth;
427  // We unlikely have have chunk of this size in free list.
428  static const size_t kLargeMallocSize = 1 << 29;  // 512M
429  old_heap_size = __asan_get_heap_size();
430  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
431  free(Ident(malloc(kLargeMallocSize)));
432  new_heap_size = __asan_get_heap_size();
433  heap_growth = new_heap_size - old_heap_size;
434  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
435  ASSERT_GE(heap_growth, kLargeMallocSize);
436  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
437
438  // Now large chunk should fall into free list, and can be
439  // allocated without increasing heap size.
440  old_heap_size = new_heap_size;
441  free(Ident(malloc(kLargeMallocSize)));
442  heap_growth = __asan_get_heap_size() - old_heap_size;
443  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
444  ASSERT_LT(heap_growth, kLargeMallocSize);
445
446  // Test passed. Now die with expected double-free.
447  DoDoubleFree();
448}
449
450TEST(AddressSanitizerInterface, GetHeapSizeTest) {
451  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
452}
453#elif ASAN_ALLOCATOR_VERSION == 2
454TEST(AddressSanitizerInterface, GetHeapSizeTest) {
455  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
456  // The chunk should be greater than the quarantine size,
457  // otherwise it will be stuck in quarantine instead of being unmaped.
458  static const size_t kLargeMallocSize = 1 << 29;  // 512M
459  uptr old_heap_size = __asan_get_heap_size();
460  for (int i = 0; i < 3; i++) {
461    // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
462    free(Ident(malloc(kLargeMallocSize)));
463    EXPECT_EQ(old_heap_size, __asan_get_heap_size());
464  }
465}
466#endif
467
468// Note: use ASSERT_* instead of EXPECT_* here.
469static void DoLargeMallocForGetFreeBytesTestAndDie() {
470#if ASAN_ALLOCATOR_VERSION == 1
471  // asan_allocator2 does not keep large chunks in free_lists, so this test
472  // will not work.
473  size_t old_free_bytes, new_free_bytes;
474  static const size_t kLargeMallocSize = 1 << 29;  // 512M
475  // If we malloc and free a large memory chunk, it will not fall
476  // into quarantine and will be available for future requests.
477  old_free_bytes = __asan_get_free_bytes();
478  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
479  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
480  free(Ident(malloc(kLargeMallocSize)));
481  new_free_bytes = __asan_get_free_bytes();
482  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
483  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
484#endif  // ASAN_ALLOCATOR_VERSION
485  // Test passed.
486  DoDoubleFree();
487}
488
489TEST(AddressSanitizerInterface, GetFreeBytesTest) {
490#if ASAN_ALLOCATOR_VERSION == 1
491  // Allocate a small chunk. Now allocator probably has a lot of these
492  // chunks to fulfill future requests. So, future requests will decrease
493  // the number of free bytes. Do this only on systems where there
494  // is enough memory for such assumptions.
495  if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) {
496    static const size_t kNumOfChunks = 100;
497    static const size_t kChunkSize = 100;
498    char *chunks[kNumOfChunks];
499    size_t i;
500    size_t old_free_bytes, new_free_bytes;
501    chunks[0] = Ident((char*)malloc(kChunkSize));
502    old_free_bytes = __asan_get_free_bytes();
503    for (i = 1; i < kNumOfChunks; i++) {
504      chunks[i] = Ident((char*)malloc(kChunkSize));
505      new_free_bytes = __asan_get_free_bytes();
506      EXPECT_LT(new_free_bytes, old_free_bytes);
507      old_free_bytes = new_free_bytes;
508    }
509    for (i = 0; i < kNumOfChunks; i++)
510      free(chunks[i]);
511  }
512#endif
513  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
514}
515
516static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
517static const size_t kManyThreadsIterations = 250;
518static const size_t kManyThreadsNumThreads =
519  (SANITIZER_WORDSIZE == 32) ? 40 : 200;
520
521void *ManyThreadsWithStatsWorker(void *arg) {
522  (void)arg;
523  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
524    for (size_t size_index = 0; size_index < 4; size_index++) {
525      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
526    }
527  }
528  // Just one large allocation.
529  free(Ident(malloc(1 << 20)));
530  return 0;
531}
532
533TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
534  size_t before_test, after_test, i;
535  pthread_t threads[kManyThreadsNumThreads];
536  before_test = __asan_get_current_allocated_bytes();
537  for (i = 0; i < kManyThreadsNumThreads; i++) {
538    PTHREAD_CREATE(&threads[i], 0,
539                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
540  }
541  for (i = 0; i < kManyThreadsNumThreads; i++) {
542    PTHREAD_JOIN(threads[i], 0);
543  }
544  after_test = __asan_get_current_allocated_bytes();
545  // ASan stats also reflect memory usage of internal ASan RTL structs,
546  // so we can't check for equality here.
547  EXPECT_LT(after_test, before_test + (1UL<<20));
548}
549
550TEST(AddressSanitizerInterface, ExitCode) {
551  int original_exit_code = __asan_set_error_exit_code(7);
552  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
553  EXPECT_EQ(7, __asan_set_error_exit_code(8));
554  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
555  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
556  EXPECT_EXIT(DoDoubleFree(),
557              ::testing::ExitedWithCode(original_exit_code), "");
558}
559
560static void MyDeathCallback() {
561  fprintf(stderr, "MyDeathCallback\n");
562}
563
564TEST(AddressSanitizerInterface, DeathCallbackTest) {
565  __asan_set_death_callback(MyDeathCallback);
566  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
567  __asan_set_death_callback(NULL);
568}
569
570static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
571
572#define GOOD_ACCESS(ptr, offset)  \
573    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
574
575#define BAD_ACCESS(ptr, offset) \
576    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
577
578TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
579  char *array = Ident((char*)malloc(120));
580  // poison array[40..80)
581  __asan_poison_memory_region(array + 40, 40);
582  GOOD_ACCESS(array, 39);
583  GOOD_ACCESS(array, 80);
584  BAD_ACCESS(array, 40);
585  BAD_ACCESS(array, 60);
586  BAD_ACCESS(array, 79);
587  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
588               kUseAfterPoisonErrorMessage);
589  __asan_unpoison_memory_region(array + 40, 40);
590  // access previously poisoned memory.
591  GOOD_ACCESS(array, 40);
592  GOOD_ACCESS(array, 79);
593  free(array);
594}
595
596TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
597  char *array = Ident((char*)malloc(120));
598  // Poison [0..40) and [80..120)
599  __asan_poison_memory_region(array, 40);
600  __asan_poison_memory_region(array + 80, 40);
601  BAD_ACCESS(array, 20);
602  GOOD_ACCESS(array, 60);
603  BAD_ACCESS(array, 100);
604  // Poison whole array - [0..120)
605  __asan_poison_memory_region(array, 120);
606  BAD_ACCESS(array, 60);
607  // Unpoison [24..96)
608  __asan_unpoison_memory_region(array + 24, 72);
609  BAD_ACCESS(array, 23);
610  GOOD_ACCESS(array, 24);
611  GOOD_ACCESS(array, 60);
612  GOOD_ACCESS(array, 95);
613  BAD_ACCESS(array, 96);
614  free(array);
615}
616
617TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
618  // Vector of capacity 20
619  char *vec = Ident((char*)malloc(20));
620  __asan_poison_memory_region(vec, 20);
621  for (size_t i = 0; i < 7; i++) {
622    // Simulate push_back.
623    __asan_unpoison_memory_region(vec + i, 1);
624    GOOD_ACCESS(vec, i);
625    BAD_ACCESS(vec, i + 1);
626  }
627  for (size_t i = 7; i > 0; i--) {
628    // Simulate pop_back.
629    __asan_poison_memory_region(vec + i - 1, 1);
630    BAD_ACCESS(vec, i - 1);
631    if (i > 1) GOOD_ACCESS(vec, i - 2);
632  }
633  free(vec);
634}
635
636TEST(AddressSanitizerInterface, GlobalRedzones) {
637  GOOD_ACCESS(glob1, 1 - 1);
638  GOOD_ACCESS(glob2, 2 - 1);
639  GOOD_ACCESS(glob3, 3 - 1);
640  GOOD_ACCESS(glob4, 4 - 1);
641  GOOD_ACCESS(glob5, 5 - 1);
642  GOOD_ACCESS(glob6, 6 - 1);
643  GOOD_ACCESS(glob7, 7 - 1);
644  GOOD_ACCESS(glob8, 8 - 1);
645  GOOD_ACCESS(glob9, 9 - 1);
646  GOOD_ACCESS(glob10, 10 - 1);
647  GOOD_ACCESS(glob11, 11 - 1);
648  GOOD_ACCESS(glob12, 12 - 1);
649  GOOD_ACCESS(glob13, 13 - 1);
650  GOOD_ACCESS(glob14, 14 - 1);
651  GOOD_ACCESS(glob15, 15 - 1);
652  GOOD_ACCESS(glob16, 16 - 1);
653  GOOD_ACCESS(glob17, 17 - 1);
654  GOOD_ACCESS(glob1000, 1000 - 1);
655  GOOD_ACCESS(glob10000, 10000 - 1);
656  GOOD_ACCESS(glob100000, 100000 - 1);
657
658  BAD_ACCESS(glob1, 1);
659  BAD_ACCESS(glob2, 2);
660  BAD_ACCESS(glob3, 3);
661  BAD_ACCESS(glob4, 4);
662  BAD_ACCESS(glob5, 5);
663  BAD_ACCESS(glob6, 6);
664  BAD_ACCESS(glob7, 7);
665  BAD_ACCESS(glob8, 8);
666  BAD_ACCESS(glob9, 9);
667  BAD_ACCESS(glob10, 10);
668  BAD_ACCESS(glob11, 11);
669  BAD_ACCESS(glob12, 12);
670  BAD_ACCESS(glob13, 13);
671  BAD_ACCESS(glob14, 14);
672  BAD_ACCESS(glob15, 15);
673  BAD_ACCESS(glob16, 16);
674  BAD_ACCESS(glob17, 17);
675  BAD_ACCESS(glob1000, 1000);
676  BAD_ACCESS(glob1000, 1100);  // Redzone is at least 101 bytes.
677  BAD_ACCESS(glob10000, 10000);
678  BAD_ACCESS(glob10000, 11000);  // Redzone is at least 1001 bytes.
679  BAD_ACCESS(glob100000, 100000);
680  BAD_ACCESS(glob100000, 110000);  // Redzone is at least 10001 bytes.
681}
682
683// Make sure that each aligned block of size "2^granularity" doesn't have
684// "true" value before "false" value.
685static void MakeShadowValid(bool *shadow, int length, int granularity) {
686  bool can_be_poisoned = true;
687  for (int i = length - 1; i >= 0; i--) {
688    if (!shadow[i])
689      can_be_poisoned = false;
690    if (!can_be_poisoned)
691      shadow[i] = false;
692    if (i % (1 << granularity) == 0) {
693      can_be_poisoned = true;
694    }
695  }
696}
697
698TEST(AddressSanitizerInterface, PoisoningStressTest) {
699  const size_t kSize = 24;
700  bool expected[kSize];
701  char *arr = Ident((char*)malloc(kSize));
702  for (size_t l1 = 0; l1 < kSize; l1++) {
703    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
704      for (size_t l2 = 0; l2 < kSize; l2++) {
705        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
706          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
707          __asan_unpoison_memory_region(arr, kSize);
708          __asan_poison_memory_region(arr + l1, s1);
709          __asan_poison_memory_region(arr + l2, s2);
710          memset(expected, false, kSize);
711          memset(expected + l1, true, s1);
712          MakeShadowValid(expected, kSize, /*granularity*/ 3);
713          memset(expected + l2, true, s2);
714          MakeShadowValid(expected, kSize, /*granularity*/ 3);
715          for (size_t i = 0; i < kSize; i++) {
716            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
717          }
718          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
719          __asan_poison_memory_region(arr, kSize);
720          __asan_unpoison_memory_region(arr + l1, s1);
721          __asan_unpoison_memory_region(arr + l2, s2);
722          memset(expected, true, kSize);
723          memset(expected + l1, false, s1);
724          MakeShadowValid(expected, kSize, /*granularity*/ 3);
725          memset(expected + l2, false, s2);
726          MakeShadowValid(expected, kSize, /*granularity*/ 3);
727          for (size_t i = 0; i < kSize; i++) {
728            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
729          }
730        }
731      }
732    }
733  }
734}
735
736TEST(AddressSanitizerInterface, PoisonedRegion) {
737  size_t rz = 16;
738  for (size_t size = 1; size <= 64; size++) {
739    char *p = new char[size];
740    uptr x = reinterpret_cast<uptr>(p);
741    for (size_t beg = 0; beg < size + rz; beg++) {
742      for (size_t end = beg; end < size + rz; end++) {
743        uptr first_poisoned = __asan_region_is_poisoned(x + beg, end - beg);
744        if (beg == end) {
745          EXPECT_FALSE(first_poisoned);
746        } else if (beg < size && end <= size) {
747          EXPECT_FALSE(first_poisoned);
748        } else if (beg >= size) {
749          EXPECT_EQ(x + beg, first_poisoned);
750        } else {
751          EXPECT_GT(end, size);
752          EXPECT_EQ(x + size, first_poisoned);
753        }
754      }
755    }
756    delete [] p;
757  }
758}
759
760// This is a performance benchmark for manual runs.
761// asan's memset interceptor calls mem_is_zero for the entire shadow region.
762// the profile should look like this:
763//     89.10%   [.] __memset_sse2
764//     10.50%   [.] __sanitizer::mem_is_zero
765// I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
766// than memset itself.
767TEST(AddressSanitizerInterface, DISABLED_Stress_memset) {
768  size_t size = 1 << 20;
769  char *x = new char[size];
770  for (int i = 0; i < 100000; i++)
771    Ident(memset)(x, 0, size);
772  delete [] x;
773}
774
775static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
776static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
777
778TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
779  char *array = Ident((char*)malloc(120));
780  __asan_unpoison_memory_region(array, 120);
781  // Try to unpoison not owned memory
782  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
783               kInvalidUnpoisonMessage);
784  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
785               kInvalidUnpoisonMessage);
786
787  __asan_poison_memory_region(array, 120);
788  // Try to poison not owned memory.
789  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
790  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
791               kInvalidPoisonMessage);
792  free(array);
793}
794
795static void ErrorReportCallbackOneToZ(const char *report) {
796  int report_len = strlen(report);
797  ASSERT_EQ(6, write(2, "ABCDEF", 6));
798  ASSERT_EQ(report_len, write(2, report, report_len));
799  ASSERT_EQ(6, write(2, "ABCDEF", 6));
800  _exit(1);
801}
802
803TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
804  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
805  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
806               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
807  __asan_set_error_report_callback(NULL);
808}
809
810TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
811  std::vector<char *> pointers;
812  std::vector<size_t> sizes;
813#if ASAN_ALLOCATOR_VERSION == 1
814  const size_t kNumMallocs =
815      (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
816#elif ASAN_ALLOCATOR_VERSION == 2  // too slow with asan_allocator2. :(
817  const size_t kNumMallocs = 1 << 9;
818#endif
819  for (size_t i = 0; i < kNumMallocs; i++) {
820    size_t size = i * 100 + 1;
821    pointers.push_back((char*)malloc(size));
822    sizes.push_back(size);
823  }
824  for (size_t i = 0; i < 4000000; i++) {
825    EXPECT_FALSE(__asan_get_ownership(&pointers));
826    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
827    size_t idx = i % kNumMallocs;
828    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
829    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
830  }
831  for (size_t i = 0, n = pointers.size(); i < n; i++)
832    free(pointers[i]);
833}
834
835TEST(AddressSanitizerInterface, CallocOverflow) {
836  size_t kArraySize = 4096;
837  volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
838  volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
839  void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
840  EXPECT_EQ(0L, Ident(p));
841}
842
843TEST(AddressSanitizerInterface, CallocOverflow2) {
844#if SANITIZER_WORDSIZE == 32
845  size_t kArraySize = 112;
846  volatile size_t kArraySize2 = 43878406;
847  void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
848  EXPECT_EQ(0L, Ident(p));
849#endif
850}
851