asan_noinst_test.cc revision e52810d7144ca35bbaeca0b28d138b386ab90243
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "sanitizer/asan_interface.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28
29// Simple stand-alone pseudorandom number generator.
30// Current algorithm is ANSI C linear congruential PRNG.
31static inline u32 my_rand(u32* state) {
32  return (*state = *state * 1103515245 + 12345) >> 16;
33}
34
35static u32 global_seed = 0;
36
37
38TEST(AddressSanitizer, InternalSimpleDeathTest) {
39  EXPECT_DEATH(exit(1), "");
40}
41
42static void MallocStress(size_t n) {
43  u32 seed = my_rand(&global_seed);
44  __asan::StackTrace stack1;
45  stack1.trace[0] = 0xa123;
46  stack1.trace[1] = 0xa456;
47  stack1.size = 2;
48
49  __asan::StackTrace stack2;
50  stack2.trace[0] = 0xb123;
51  stack2.trace[1] = 0xb456;
52  stack2.size = 2;
53
54  __asan::StackTrace stack3;
55  stack3.trace[0] = 0xc123;
56  stack3.trace[1] = 0xc456;
57  stack3.size = 2;
58
59  std::vector<void *> vec;
60  for (size_t i = 0; i < n; i++) {
61    if ((i % 3) == 0) {
62      if (vec.empty()) continue;
63      size_t idx = my_rand(&seed) % vec.size();
64      void *ptr = vec[idx];
65      vec[idx] = vec.back();
66      vec.pop_back();
67      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
68    } else {
69      size_t size = my_rand(&seed) % 1000 + 1;
70      switch ((my_rand(&seed) % 128)) {
71        case 0: size += 1024; break;
72        case 1: size += 2048; break;
73        case 2: size += 4096; break;
74      }
75      size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
76      char *ptr = (char*)__asan::asan_memalign(alignment, size,
77                                               &stack2, __asan::FROM_MALLOC);
78      vec.push_back(ptr);
79      ptr[0] = 0;
80      ptr[size-1] = 0;
81      ptr[size/2] = 0;
82    }
83  }
84  for (size_t i = 0; i < vec.size(); i++)
85    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
86}
87
88
89TEST(AddressSanitizer, NoInstMallocTest) {
90#ifdef __arm__
91  MallocStress(300000);
92#else
93  MallocStress(1000000);
94#endif
95}
96
97static void PrintShadow(const char *tag, uptr ptr, size_t size) {
98  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
99  uptr prev_shadow = 0;
100  for (sptr i = -32; i < (sptr)size + 32; i++) {
101    uptr shadow = __asan::MemToShadow(ptr + i);
102    if (i == 0 || i == (sptr)size)
103      fprintf(stderr, ".");
104    if (shadow != prev_shadow) {
105      prev_shadow = shadow;
106      fprintf(stderr, "%02x", (int)*(u8*)shadow);
107    }
108  }
109  fprintf(stderr, "\n");
110}
111
112TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
113  for (size_t size = 1; size <= 513; size++) {
114    char *ptr = new char[size];
115    PrintShadow("m", (uptr)ptr, size);
116    delete [] ptr;
117    PrintShadow("f", (uptr)ptr, size);
118  }
119}
120
121static uptr pc_array[] = {
122#if SANITIZER_WORDSIZE == 64
123  0x7effbf756068ULL,
124  0x7effbf75e5abULL,
125  0x7effc0625b7cULL,
126  0x7effc05b8997ULL,
127  0x7effbf990577ULL,
128  0x7effbf990c56ULL,
129  0x7effbf992f3cULL,
130  0x7effbf950c22ULL,
131  0x7effc036dba0ULL,
132  0x7effc03638a3ULL,
133  0x7effc035be4aULL,
134  0x7effc0539c45ULL,
135  0x7effc0539a65ULL,
136  0x7effc03db9b3ULL,
137  0x7effc03db100ULL,
138  0x7effc037c7b8ULL,
139  0x7effc037bfffULL,
140  0x7effc038b777ULL,
141  0x7effc038021cULL,
142  0x7effc037c7d1ULL,
143  0x7effc037bfffULL,
144  0x7effc038b777ULL,
145  0x7effc038021cULL,
146  0x7effc037c7d1ULL,
147  0x7effc037bfffULL,
148  0x7effc038b777ULL,
149  0x7effc038021cULL,
150  0x7effc037c7d1ULL,
151  0x7effc037bfffULL,
152  0x7effc0520d26ULL,
153  0x7effc009ddffULL,
154  0x7effbf90bb50ULL,
155  0x7effbdddfa69ULL,
156  0x7effbdde1fe2ULL,
157  0x7effbdde2424ULL,
158  0x7effbdde27b3ULL,
159  0x7effbddee53bULL,
160  0x7effbdde1988ULL,
161  0x7effbdde0904ULL,
162  0x7effc106ce0dULL,
163  0x7effbcc3fa04ULL,
164  0x7effbcc3f6a4ULL,
165  0x7effbcc3e726ULL,
166  0x7effbcc40852ULL,
167  0x7effb681ec4dULL,
168#endif  // SANITIZER_WORDSIZE
169  0xB0B5E768,
170  0x7B682EC1,
171  0x367F9918,
172  0xAE34E13,
173  0xBA0C6C6,
174  0x13250F46,
175  0xA0D6A8AB,
176  0x2B07C1A8,
177  0x6C844F4A,
178  0x2321B53,
179  0x1F3D4F8F,
180  0x3FE2924B,
181  0xB7A2F568,
182  0xBD23950A,
183  0x61020930,
184  0x33E7970C,
185  0x405998A1,
186  0x59F3551D,
187  0x350E3028,
188  0xBC55A28D,
189  0x361F3AED,
190  0xBEAD0F73,
191  0xAEF28479,
192  0x757E971F,
193  0xAEBA450,
194  0x43AD22F5,
195  0x8C2C50C4,
196  0x7AD8A2E1,
197  0x69EE4EE8,
198  0xC08DFF,
199  0x4BA6538,
200  0x3708AB2,
201  0xC24B6475,
202  0x7C8890D7,
203  0x6662495F,
204  0x9B641689,
205  0xD3596B,
206  0xA1049569,
207  0x44CBC16,
208  0x4D39C39F
209};
210
211void CompressStackTraceTest(size_t n_iter) {
212  u32 seed = my_rand(&global_seed);
213  const size_t kNumPcs = ARRAY_SIZE(pc_array);
214  u32 compressed[2 * kNumPcs];
215
216  for (size_t iter = 0; iter < n_iter; iter++) {
217    std::random_shuffle(pc_array, pc_array + kNumPcs);
218    __asan::StackTrace stack0, stack1;
219    stack0.CopyFrom(pc_array, kNumPcs);
220    stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
221    size_t compress_size =
222      std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
223    size_t n_frames =
224      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
225    Ident(n_frames);
226    assert(n_frames <= stack0.size);
227    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
228    assert(stack1.size == n_frames);
229    for (size_t i = 0; i < stack1.size; i++) {
230      assert(stack0.trace[i] == stack1.trace[i]);
231    }
232  }
233}
234
235TEST(AddressSanitizer, CompressStackTraceTest) {
236  CompressStackTraceTest(10000);
237}
238
239void CompressStackTraceBenchmark(size_t n_iter) {
240  const size_t kNumPcs = ARRAY_SIZE(pc_array);
241  u32 compressed[2 * kNumPcs];
242  std::random_shuffle(pc_array, pc_array + kNumPcs);
243
244  __asan::StackTrace stack0;
245  stack0.CopyFrom(pc_array, kNumPcs);
246  stack0.size = kNumPcs;
247  for (size_t iter = 0; iter < n_iter; iter++) {
248    size_t compress_size = kNumPcs;
249    size_t n_frames =
250      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
251    Ident(n_frames);
252  }
253}
254
255TEST(AddressSanitizer, CompressStackTraceBenchmark) {
256  CompressStackTraceBenchmark(1 << 24);
257}
258
259TEST(AddressSanitizer, QuarantineTest) {
260  __asan::StackTrace stack;
261  stack.trace[0] = 0x890;
262  stack.size = 1;
263
264  const int size = 32;
265  void *p = __asan::asan_malloc(size, &stack);
266  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
267  size_t i;
268  size_t max_i = 1 << 30;
269  for (i = 0; i < max_i; i++) {
270    void *p1 = __asan::asan_malloc(size, &stack);
271    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
272    if (p1 == p) break;
273  }
274  // fprintf(stderr, "i=%ld\n", i);
275  EXPECT_GE(i, 100000U);
276  EXPECT_LT(i, max_i);
277}
278
279void *ThreadedQuarantineTestWorker(void *unused) {
280  (void)unused;
281  u32 seed = my_rand(&global_seed);
282  __asan::StackTrace stack;
283  stack.trace[0] = 0x890;
284  stack.size = 1;
285
286  for (size_t i = 0; i < 1000; i++) {
287    void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
288    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
289  }
290  return NULL;
291}
292
293// Check that the thread local allocators are flushed when threads are
294// destroyed.
295TEST(AddressSanitizer, ThreadedQuarantineTest) {
296  const int n_threads = 3000;
297  size_t mmaped1 = __asan_get_heap_size();
298  for (int i = 0; i < n_threads; i++) {
299    pthread_t t;
300    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
301    PTHREAD_JOIN(t, 0);
302    size_t mmaped2 = __asan_get_heap_size();
303    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
304  }
305}
306
307void *ThreadedOneSizeMallocStress(void *unused) {
308  (void)unused;
309  __asan::StackTrace stack;
310  stack.trace[0] = 0x890;
311  stack.size = 1;
312  const size_t kNumMallocs = 1000;
313  for (int iter = 0; iter < 1000; iter++) {
314    void *p[kNumMallocs];
315    for (size_t i = 0; i < kNumMallocs; i++) {
316      p[i] = __asan::asan_malloc(32, &stack);
317    }
318    for (size_t i = 0; i < kNumMallocs; i++) {
319      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
320    }
321  }
322  return NULL;
323}
324
325TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
326  const int kNumThreads = 4;
327  pthread_t t[kNumThreads];
328  for (int i = 0; i < kNumThreads; i++) {
329    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
330  }
331  for (int i = 0; i < kNumThreads; i++) {
332    PTHREAD_JOIN(t[i], 0);
333  }
334}
335
336TEST(AddressSanitizer, MemsetWildAddressTest) {
337  typedef void*(*memset_p)(void*, int, size_t);
338  // Prevent inlining of memset().
339  volatile memset_p libc_memset = (memset_p)memset;
340  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
341               "unknown-crash.*low shadow");
342  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
343               "unknown-crash.*shadow gap");
344  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
345               "unknown-crash.*high shadow");
346}
347
348TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
349#if ASAN_ALLOCATOR_VERSION == 1
350  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
351#elif ASAN_ALLOCATOR_VERSION == 2
352  EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
353#endif
354  const size_t sizes[] = { 1, 30, 1<<30 };
355  for (size_t i = 0; i < 3; i++) {
356    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
357  }
358}
359
360static const char* kGetAllocatedSizeErrorMsg =
361  "attempting to call __asan_get_allocated_size()";
362
363TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
364  const size_t kArraySize = 100;
365  char *array = Ident((char*)malloc(kArraySize));
366  int *int_ptr = Ident(new int);
367
368  // Allocated memory is owned by allocator. Allocated size should be
369  // equal to requested size.
370  EXPECT_EQ(true, __asan_get_ownership(array));
371  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
372  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
373  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
374
375  // We cannot call GetAllocatedSize from the memory we didn't map,
376  // and from the interior pointers (not returned by previous malloc).
377  void *wild_addr = (void*)0x1;
378  EXPECT_FALSE(__asan_get_ownership(wild_addr));
379  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
380  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
381  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
382               kGetAllocatedSizeErrorMsg);
383
384  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
385  EXPECT_FALSE(__asan_get_ownership(NULL));
386  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
387
388  // When memory is freed, it's not owned, and call to GetAllocatedSize
389  // is forbidden.
390  free(array);
391  EXPECT_FALSE(__asan_get_ownership(array));
392  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
393
394  delete int_ptr;
395}
396
397TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
398  size_t before_malloc, after_malloc, after_free;
399  char *array;
400  const size_t kMallocSize = 100;
401  before_malloc = __asan_get_current_allocated_bytes();
402
403  array = Ident((char*)malloc(kMallocSize));
404  after_malloc = __asan_get_current_allocated_bytes();
405  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
406
407  free(array);
408  after_free = __asan_get_current_allocated_bytes();
409  EXPECT_EQ(before_malloc, after_free);
410}
411
412static void DoDoubleFree() {
413  int *x = Ident(new int);
414  delete Ident(x);
415  delete Ident(x);
416}
417
418#if ASAN_ALLOCATOR_VERSION == 1
419// This test is run in a separate process, so that large malloced
420// chunk won't remain in the free lists after the test.
421// Note: use ASSERT_* instead of EXPECT_* here.
422static void RunGetHeapSizeTestAndDie() {
423  size_t old_heap_size, new_heap_size, heap_growth;
424  // We unlikely have have chunk of this size in free list.
425  static const size_t kLargeMallocSize = 1 << 29;  // 512M
426  old_heap_size = __asan_get_heap_size();
427  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
428  free(Ident(malloc(kLargeMallocSize)));
429  new_heap_size = __asan_get_heap_size();
430  heap_growth = new_heap_size - old_heap_size;
431  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
432  ASSERT_GE(heap_growth, kLargeMallocSize);
433  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
434
435  // Now large chunk should fall into free list, and can be
436  // allocated without increasing heap size.
437  old_heap_size = new_heap_size;
438  free(Ident(malloc(kLargeMallocSize)));
439  heap_growth = __asan_get_heap_size() - old_heap_size;
440  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
441  ASSERT_LT(heap_growth, kLargeMallocSize);
442
443  // Test passed. Now die with expected double-free.
444  DoDoubleFree();
445}
446
447TEST(AddressSanitizerInterface, GetHeapSizeTest) {
448  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
449}
450#elif ASAN_ALLOCATOR_VERSION == 2
451TEST(AddressSanitizerInterface, GetHeapSizeTest) {
452  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
453  // The chunk should be greater than the quarantine size,
454  // otherwise it will be stuck in quarantine instead of being unmaped.
455  static const size_t kLargeMallocSize = 1 << 28;  // 256M
456  uptr old_heap_size = __asan_get_heap_size();
457  for (int i = 0; i < 3; i++) {
458    // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
459    free(Ident(malloc(kLargeMallocSize)));
460    EXPECT_EQ(old_heap_size, __asan_get_heap_size());
461  }
462}
463#endif
464
465// Note: use ASSERT_* instead of EXPECT_* here.
466static void DoLargeMallocForGetFreeBytesTestAndDie() {
467#if ASAN_ALLOCATOR_VERSION == 1
468  // asan_allocator2 does not keep large chunks in free_lists, so this test
469  // will not work.
470  size_t old_free_bytes, new_free_bytes;
471  static const size_t kLargeMallocSize = 1 << 29;  // 512M
472  // If we malloc and free a large memory chunk, it will not fall
473  // into quarantine and will be available for future requests.
474  old_free_bytes = __asan_get_free_bytes();
475  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
476  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
477  free(Ident(malloc(kLargeMallocSize)));
478  new_free_bytes = __asan_get_free_bytes();
479  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
480  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
481#endif  // ASAN_ALLOCATOR_VERSION
482  // Test passed.
483  DoDoubleFree();
484}
485
486TEST(AddressSanitizerInterface, GetFreeBytesTest) {
487  // Allocate a small chunk. Now allocator probably has a lot of these
488  // chunks to fulfill future requests. So, future requests will decrease
489  // the number of free bytes. Do this only on systems where there
490  // is enough memory for such assumptions.
491  if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) {
492    static const size_t kNumOfChunks = 100;
493    static const size_t kChunkSize = 100;
494    char *chunks[kNumOfChunks];
495    size_t i;
496    size_t old_free_bytes, new_free_bytes;
497    chunks[0] = Ident((char*)malloc(kChunkSize));
498    old_free_bytes = __asan_get_free_bytes();
499    for (i = 1; i < kNumOfChunks; i++) {
500      chunks[i] = Ident((char*)malloc(kChunkSize));
501      new_free_bytes = __asan_get_free_bytes();
502      EXPECT_LT(new_free_bytes, old_free_bytes);
503      old_free_bytes = new_free_bytes;
504    }
505    for (i = 0; i < kNumOfChunks; i++)
506      free(chunks[i]);
507  }
508  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
509}
510
511static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
512static const size_t kManyThreadsIterations = 250;
513static const size_t kManyThreadsNumThreads =
514  (SANITIZER_WORDSIZE == 32) ? 40 : 200;
515
516void *ManyThreadsWithStatsWorker(void *arg) {
517  (void)arg;
518  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
519    for (size_t size_index = 0; size_index < 4; size_index++) {
520      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
521    }
522  }
523  // Just one large allocation.
524  free(Ident(malloc(1 << 20)));
525  return 0;
526}
527
528TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
529  size_t before_test, after_test, i;
530  pthread_t threads[kManyThreadsNumThreads];
531  before_test = __asan_get_current_allocated_bytes();
532  for (i = 0; i < kManyThreadsNumThreads; i++) {
533    PTHREAD_CREATE(&threads[i], 0,
534                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
535  }
536  for (i = 0; i < kManyThreadsNumThreads; i++) {
537    PTHREAD_JOIN(threads[i], 0);
538  }
539  after_test = __asan_get_current_allocated_bytes();
540  // ASan stats also reflect memory usage of internal ASan RTL structs,
541  // so we can't check for equality here.
542  EXPECT_LT(after_test, before_test + (1UL<<20));
543}
544
545TEST(AddressSanitizerInterface, ExitCode) {
546  int original_exit_code = __asan_set_error_exit_code(7);
547  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
548  EXPECT_EQ(7, __asan_set_error_exit_code(8));
549  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
550  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
551  EXPECT_EXIT(DoDoubleFree(),
552              ::testing::ExitedWithCode(original_exit_code), "");
553}
554
555static void MyDeathCallback() {
556  fprintf(stderr, "MyDeathCallback\n");
557}
558
559TEST(AddressSanitizerInterface, DeathCallbackTest) {
560  __asan_set_death_callback(MyDeathCallback);
561  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
562  __asan_set_death_callback(NULL);
563}
564
565static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
566
567#define GOOD_ACCESS(ptr, offset)  \
568    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
569
570#define BAD_ACCESS(ptr, offset) \
571    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
572
573TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
574  char *array = Ident((char*)malloc(120));
575  // poison array[40..80)
576  __asan_poison_memory_region(array + 40, 40);
577  GOOD_ACCESS(array, 39);
578  GOOD_ACCESS(array, 80);
579  BAD_ACCESS(array, 40);
580  BAD_ACCESS(array, 60);
581  BAD_ACCESS(array, 79);
582  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
583               kUseAfterPoisonErrorMessage);
584  __asan_unpoison_memory_region(array + 40, 40);
585  // access previously poisoned memory.
586  GOOD_ACCESS(array, 40);
587  GOOD_ACCESS(array, 79);
588  free(array);
589}
590
591TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
592  char *array = Ident((char*)malloc(120));
593  // Poison [0..40) and [80..120)
594  __asan_poison_memory_region(array, 40);
595  __asan_poison_memory_region(array + 80, 40);
596  BAD_ACCESS(array, 20);
597  GOOD_ACCESS(array, 60);
598  BAD_ACCESS(array, 100);
599  // Poison whole array - [0..120)
600  __asan_poison_memory_region(array, 120);
601  BAD_ACCESS(array, 60);
602  // Unpoison [24..96)
603  __asan_unpoison_memory_region(array + 24, 72);
604  BAD_ACCESS(array, 23);
605  GOOD_ACCESS(array, 24);
606  GOOD_ACCESS(array, 60);
607  GOOD_ACCESS(array, 95);
608  BAD_ACCESS(array, 96);
609  free(array);
610}
611
612TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
613  // Vector of capacity 20
614  char *vec = Ident((char*)malloc(20));
615  __asan_poison_memory_region(vec, 20);
616  for (size_t i = 0; i < 7; i++) {
617    // Simulate push_back.
618    __asan_unpoison_memory_region(vec + i, 1);
619    GOOD_ACCESS(vec, i);
620    BAD_ACCESS(vec, i + 1);
621  }
622  for (size_t i = 7; i > 0; i--) {
623    // Simulate pop_back.
624    __asan_poison_memory_region(vec + i - 1, 1);
625    BAD_ACCESS(vec, i - 1);
626    if (i > 1) GOOD_ACCESS(vec, i - 2);
627  }
628  free(vec);
629}
630
631// Make sure that each aligned block of size "2^granularity" doesn't have
632// "true" value before "false" value.
633static void MakeShadowValid(bool *shadow, int length, int granularity) {
634  bool can_be_poisoned = true;
635  for (int i = length - 1; i >= 0; i--) {
636    if (!shadow[i])
637      can_be_poisoned = false;
638    if (!can_be_poisoned)
639      shadow[i] = false;
640    if (i % (1 << granularity) == 0) {
641      can_be_poisoned = true;
642    }
643  }
644}
645
646TEST(AddressSanitizerInterface, PoisoningStressTest) {
647  const size_t kSize = 24;
648  bool expected[kSize];
649  char *arr = Ident((char*)malloc(kSize));
650  for (size_t l1 = 0; l1 < kSize; l1++) {
651    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
652      for (size_t l2 = 0; l2 < kSize; l2++) {
653        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
654          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
655          __asan_unpoison_memory_region(arr, kSize);
656          __asan_poison_memory_region(arr + l1, s1);
657          __asan_poison_memory_region(arr + l2, s2);
658          memset(expected, false, kSize);
659          memset(expected + l1, true, s1);
660          MakeShadowValid(expected, kSize, /*granularity*/ 3);
661          memset(expected + l2, true, s2);
662          MakeShadowValid(expected, kSize, /*granularity*/ 3);
663          for (size_t i = 0; i < kSize; i++) {
664            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
665          }
666          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
667          __asan_poison_memory_region(arr, kSize);
668          __asan_unpoison_memory_region(arr + l1, s1);
669          __asan_unpoison_memory_region(arr + l2, s2);
670          memset(expected, true, kSize);
671          memset(expected + l1, false, s1);
672          MakeShadowValid(expected, kSize, /*granularity*/ 3);
673          memset(expected + l2, false, s2);
674          MakeShadowValid(expected, kSize, /*granularity*/ 3);
675          for (size_t i = 0; i < kSize; i++) {
676            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
677          }
678        }
679      }
680    }
681  }
682}
683
684static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
685static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
686
687TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
688  char *array = Ident((char*)malloc(120));
689  __asan_unpoison_memory_region(array, 120);
690  // Try to unpoison not owned memory
691  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
692               kInvalidUnpoisonMessage);
693  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
694               kInvalidUnpoisonMessage);
695
696  __asan_poison_memory_region(array, 120);
697  // Try to poison not owned memory.
698  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
699  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
700               kInvalidPoisonMessage);
701  free(array);
702}
703
704static void ErrorReportCallbackOneToZ(const char *report) {
705  int report_len = strlen(report);
706  ASSERT_EQ(6, write(2, "ABCDEF", 6));
707  ASSERT_EQ(report_len, write(2, report, report_len));
708  ASSERT_EQ(6, write(2, "ABCDEF", 6));
709  _exit(1);
710}
711
712TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
713  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
714  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
715               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
716  __asan_set_error_report_callback(NULL);
717}
718
719TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
720  std::vector<char *> pointers;
721  std::vector<size_t> sizes;
722  const size_t kNumMallocs =
723      (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
724  for (size_t i = 0; i < kNumMallocs; i++) {
725    size_t size = i * 100 + 1;
726    pointers.push_back((char*)malloc(size));
727    sizes.push_back(size);
728  }
729  for (size_t i = 0; i < 4000000; i++) {
730    EXPECT_FALSE(__asan_get_ownership(&pointers));
731    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
732    size_t idx = i % kNumMallocs;
733    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
734    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
735  }
736  for (size_t i = 0, n = pointers.size(); i < n; i++)
737    free(pointers[i]);
738}
739