asan_noinst_test.cc revision 376bab8add6be88f649ef1a9127c7d39e76e5f7b
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "sanitizer/asan_interface.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28
29// Simple stand-alone pseudorandom number generator.
30// Current algorithm is ANSI C linear congruential PRNG.
31static inline u32 my_rand(u32* state) {
32  return (*state = *state * 1103515245 + 12345) >> 16;
33}
34
35static u32 global_seed = 0;
36
37
38TEST(AddressSanitizer, InternalSimpleDeathTest) {
39  EXPECT_DEATH(exit(1), "");
40}
41
42static void MallocStress(size_t n) {
43  u32 seed = my_rand(&global_seed);
44  __asan::StackTrace stack1;
45  stack1.trace[0] = 0xa123;
46  stack1.trace[1] = 0xa456;
47  stack1.size = 2;
48
49  __asan::StackTrace stack2;
50  stack2.trace[0] = 0xb123;
51  stack2.trace[1] = 0xb456;
52  stack2.size = 2;
53
54  __asan::StackTrace stack3;
55  stack3.trace[0] = 0xc123;
56  stack3.trace[1] = 0xc456;
57  stack3.size = 2;
58
59  std::vector<void *> vec;
60  for (size_t i = 0; i < n; i++) {
61    if ((i % 3) == 0) {
62      if (vec.empty()) continue;
63      size_t idx = my_rand(&seed) % vec.size();
64      void *ptr = vec[idx];
65      vec[idx] = vec.back();
66      vec.pop_back();
67      __asan::asan_free(ptr, &stack1);
68    } else {
69      size_t size = my_rand(&seed) % 1000 + 1;
70      switch ((my_rand(&seed) % 128)) {
71        case 0: size += 1024; break;
72        case 1: size += 2048; break;
73        case 2: size += 4096; break;
74      }
75      size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
76      char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
77      vec.push_back(ptr);
78      ptr[0] = 0;
79      ptr[size-1] = 0;
80      ptr[size/2] = 0;
81    }
82  }
83  for (size_t i = 0; i < vec.size(); i++)
84    __asan::asan_free(vec[i], &stack3);
85}
86
87
88TEST(AddressSanitizer, NoInstMallocTest) {
89#ifdef __arm__
90  MallocStress(300000);
91#else
92  MallocStress(1000000);
93#endif
94}
95
96static void PrintShadow(const char *tag, uptr ptr, size_t size) {
97  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
98  uptr prev_shadow = 0;
99  for (sptr i = -32; i < (sptr)size + 32; i++) {
100    uptr shadow = __asan::MemToShadow(ptr + i);
101    if (i == 0 || i == (sptr)size)
102      fprintf(stderr, ".");
103    if (shadow != prev_shadow) {
104      prev_shadow = shadow;
105      fprintf(stderr, "%02x", (int)*(u8*)shadow);
106    }
107  }
108  fprintf(stderr, "\n");
109}
110
111TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
112  for (size_t size = 1; size <= 513; size++) {
113    char *ptr = new char[size];
114    PrintShadow("m", (uptr)ptr, size);
115    delete [] ptr;
116    PrintShadow("f", (uptr)ptr, size);
117  }
118}
119
120static uptr pc_array[] = {
121#if SANITIZER_WORDSIZE == 64
122  0x7effbf756068ULL,
123  0x7effbf75e5abULL,
124  0x7effc0625b7cULL,
125  0x7effc05b8997ULL,
126  0x7effbf990577ULL,
127  0x7effbf990c56ULL,
128  0x7effbf992f3cULL,
129  0x7effbf950c22ULL,
130  0x7effc036dba0ULL,
131  0x7effc03638a3ULL,
132  0x7effc035be4aULL,
133  0x7effc0539c45ULL,
134  0x7effc0539a65ULL,
135  0x7effc03db9b3ULL,
136  0x7effc03db100ULL,
137  0x7effc037c7b8ULL,
138  0x7effc037bfffULL,
139  0x7effc038b777ULL,
140  0x7effc038021cULL,
141  0x7effc037c7d1ULL,
142  0x7effc037bfffULL,
143  0x7effc038b777ULL,
144  0x7effc038021cULL,
145  0x7effc037c7d1ULL,
146  0x7effc037bfffULL,
147  0x7effc038b777ULL,
148  0x7effc038021cULL,
149  0x7effc037c7d1ULL,
150  0x7effc037bfffULL,
151  0x7effc0520d26ULL,
152  0x7effc009ddffULL,
153  0x7effbf90bb50ULL,
154  0x7effbdddfa69ULL,
155  0x7effbdde1fe2ULL,
156  0x7effbdde2424ULL,
157  0x7effbdde27b3ULL,
158  0x7effbddee53bULL,
159  0x7effbdde1988ULL,
160  0x7effbdde0904ULL,
161  0x7effc106ce0dULL,
162  0x7effbcc3fa04ULL,
163  0x7effbcc3f6a4ULL,
164  0x7effbcc3e726ULL,
165  0x7effbcc40852ULL,
166  0x7effb681ec4dULL,
167#endif  // SANITIZER_WORDSIZE
168  0xB0B5E768,
169  0x7B682EC1,
170  0x367F9918,
171  0xAE34E13,
172  0xBA0C6C6,
173  0x13250F46,
174  0xA0D6A8AB,
175  0x2B07C1A8,
176  0x6C844F4A,
177  0x2321B53,
178  0x1F3D4F8F,
179  0x3FE2924B,
180  0xB7A2F568,
181  0xBD23950A,
182  0x61020930,
183  0x33E7970C,
184  0x405998A1,
185  0x59F3551D,
186  0x350E3028,
187  0xBC55A28D,
188  0x361F3AED,
189  0xBEAD0F73,
190  0xAEF28479,
191  0x757E971F,
192  0xAEBA450,
193  0x43AD22F5,
194  0x8C2C50C4,
195  0x7AD8A2E1,
196  0x69EE4EE8,
197  0xC08DFF,
198  0x4BA6538,
199  0x3708AB2,
200  0xC24B6475,
201  0x7C8890D7,
202  0x6662495F,
203  0x9B641689,
204  0xD3596B,
205  0xA1049569,
206  0x44CBC16,
207  0x4D39C39F
208};
209
210void CompressStackTraceTest(size_t n_iter) {
211  u32 seed = my_rand(&global_seed);
212  const size_t kNumPcs = ARRAY_SIZE(pc_array);
213  u32 compressed[2 * kNumPcs];
214
215  for (size_t iter = 0; iter < n_iter; iter++) {
216    std::random_shuffle(pc_array, pc_array + kNumPcs);
217    __asan::StackTrace stack0, stack1;
218    stack0.CopyFrom(pc_array, kNumPcs);
219    stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
220    size_t compress_size =
221      std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
222    size_t n_frames =
223      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
224    Ident(n_frames);
225    assert(n_frames <= stack0.size);
226    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
227    assert(stack1.size == n_frames);
228    for (size_t i = 0; i < stack1.size; i++) {
229      assert(stack0.trace[i] == stack1.trace[i]);
230    }
231  }
232}
233
234TEST(AddressSanitizer, CompressStackTraceTest) {
235  CompressStackTraceTest(10000);
236}
237
238void CompressStackTraceBenchmark(size_t n_iter) {
239  const size_t kNumPcs = ARRAY_SIZE(pc_array);
240  u32 compressed[2 * kNumPcs];
241  std::random_shuffle(pc_array, pc_array + kNumPcs);
242
243  __asan::StackTrace stack0;
244  stack0.CopyFrom(pc_array, kNumPcs);
245  stack0.size = kNumPcs;
246  for (size_t iter = 0; iter < n_iter; iter++) {
247    size_t compress_size = kNumPcs;
248    size_t n_frames =
249      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
250    Ident(n_frames);
251  }
252}
253
254TEST(AddressSanitizer, CompressStackTraceBenchmark) {
255  CompressStackTraceBenchmark(1 << 24);
256}
257
258TEST(AddressSanitizer, QuarantineTest) {
259  __asan::StackTrace stack;
260  stack.trace[0] = 0x890;
261  stack.size = 1;
262
263  const int size = 32;
264  void *p = __asan::asan_malloc(size, &stack);
265  __asan::asan_free(p, &stack);
266  size_t i;
267  size_t max_i = 1 << 30;
268  for (i = 0; i < max_i; i++) {
269    void *p1 = __asan::asan_malloc(size, &stack);
270    __asan::asan_free(p1, &stack);
271    if (p1 == p) break;
272  }
273  // fprintf(stderr, "i=%ld\n", i);
274  EXPECT_GE(i, 100000U);
275  EXPECT_LT(i, max_i);
276}
277
278void *ThreadedQuarantineTestWorker(void *unused) {
279  (void)unused;
280  u32 seed = my_rand(&global_seed);
281  __asan::StackTrace stack;
282  stack.trace[0] = 0x890;
283  stack.size = 1;
284
285  for (size_t i = 0; i < 1000; i++) {
286    void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
287    __asan::asan_free(p, &stack);
288  }
289  return NULL;
290}
291
292// Check that the thread local allocators are flushed when threads are
293// destroyed.
294TEST(AddressSanitizer, ThreadedQuarantineTest) {
295  const int n_threads = 3000;
296  size_t mmaped1 = __asan_get_heap_size();
297  for (int i = 0; i < n_threads; i++) {
298    pthread_t t;
299    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
300    PTHREAD_JOIN(t, 0);
301    size_t mmaped2 = __asan_get_heap_size();
302    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
303  }
304}
305
306void *ThreadedOneSizeMallocStress(void *unused) {
307  (void)unused;
308  __asan::StackTrace stack;
309  stack.trace[0] = 0x890;
310  stack.size = 1;
311  const size_t kNumMallocs = 1000;
312  for (int iter = 0; iter < 1000; iter++) {
313    void *p[kNumMallocs];
314    for (size_t i = 0; i < kNumMallocs; i++) {
315      p[i] = __asan::asan_malloc(32, &stack);
316    }
317    for (size_t i = 0; i < kNumMallocs; i++) {
318      __asan::asan_free(p[i], &stack);
319    }
320  }
321  return NULL;
322}
323
324TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
325  const int kNumThreads = 4;
326  pthread_t t[kNumThreads];
327  for (int i = 0; i < kNumThreads; i++) {
328    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
329  }
330  for (int i = 0; i < kNumThreads; i++) {
331    PTHREAD_JOIN(t[i], 0);
332  }
333}
334
335TEST(AddressSanitizer, MemsetWildAddressTest) {
336  typedef void*(*memset_p)(void*, int, size_t);
337  // Prevent inlining of memset().
338  volatile memset_p libc_memset = (memset_p)memset;
339  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
340               "unknown-crash.*low shadow");
341  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
342               "unknown-crash.*shadow gap");
343  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
344               "unknown-crash.*high shadow");
345}
346
347TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
348#if ASAN_ALLOCATOR_VERSION == 1
349  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
350#elif ASAN_ALLOCATOR_VERSION == 2
351  EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
352#endif
353  const size_t sizes[] = { 1, 30, 1<<30 };
354  for (size_t i = 0; i < 3; i++) {
355    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
356  }
357}
358
359static const char* kGetAllocatedSizeErrorMsg =
360  "attempting to call __asan_get_allocated_size()";
361
362TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
363  const size_t kArraySize = 100;
364  char *array = Ident((char*)malloc(kArraySize));
365  int *int_ptr = Ident(new int);
366
367  // Allocated memory is owned by allocator. Allocated size should be
368  // equal to requested size.
369  EXPECT_EQ(true, __asan_get_ownership(array));
370  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
371  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
372  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
373
374  // We cannot call GetAllocatedSize from the memory we didn't map,
375  // and from the interior pointers (not returned by previous malloc).
376  void *wild_addr = (void*)0x1;
377  EXPECT_FALSE(__asan_get_ownership(wild_addr));
378  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
379  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
380  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
381               kGetAllocatedSizeErrorMsg);
382
383  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
384  EXPECT_FALSE(__asan_get_ownership(NULL));
385  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
386
387  // When memory is freed, it's not owned, and call to GetAllocatedSize
388  // is forbidden.
389  free(array);
390  EXPECT_FALSE(__asan_get_ownership(array));
391  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
392
393  delete int_ptr;
394}
395
396TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
397  size_t before_malloc, after_malloc, after_free;
398  char *array;
399  const size_t kMallocSize = 100;
400  before_malloc = __asan_get_current_allocated_bytes();
401
402  array = Ident((char*)malloc(kMallocSize));
403  after_malloc = __asan_get_current_allocated_bytes();
404  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
405
406  free(array);
407  after_free = __asan_get_current_allocated_bytes();
408  EXPECT_EQ(before_malloc, after_free);
409}
410
411static void DoDoubleFree() {
412  int *x = Ident(new int);
413  delete Ident(x);
414  delete Ident(x);
415}
416
417#if ASAN_ALLOCATOR_VERSION == 1
418// This test is run in a separate process, so that large malloced
419// chunk won't remain in the free lists after the test.
420// Note: use ASSERT_* instead of EXPECT_* here.
421static void RunGetHeapSizeTestAndDie() {
422  size_t old_heap_size, new_heap_size, heap_growth;
423  // We unlikely have have chunk of this size in free list.
424  static const size_t kLargeMallocSize = 1 << 29;  // 512M
425  old_heap_size = __asan_get_heap_size();
426  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
427  free(Ident(malloc(kLargeMallocSize)));
428  new_heap_size = __asan_get_heap_size();
429  heap_growth = new_heap_size - old_heap_size;
430  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
431  ASSERT_GE(heap_growth, kLargeMallocSize);
432  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
433
434  // Now large chunk should fall into free list, and can be
435  // allocated without increasing heap size.
436  old_heap_size = new_heap_size;
437  free(Ident(malloc(kLargeMallocSize)));
438  heap_growth = __asan_get_heap_size() - old_heap_size;
439  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
440  ASSERT_LT(heap_growth, kLargeMallocSize);
441
442  // Test passed. Now die with expected double-free.
443  DoDoubleFree();
444}
445
446TEST(AddressSanitizerInterface, GetHeapSizeTest) {
447  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
448}
449#elif ASAN_ALLOCATOR_VERSION == 2
450TEST(AddressSanitizerInterface, GetHeapSizeTest) {
451  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
452  // The chunk should be greater than the quarantine size,
453  // otherwise it will be stuck in quarantine instead of being unmaped.
454  static const size_t kLargeMallocSize = 1 << 28;  // 256M
455  uptr old_heap_size = __asan_get_heap_size();
456  for (int i = 0; i < 3; i++) {
457    // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
458    free(Ident(malloc(kLargeMallocSize)));
459    EXPECT_EQ(old_heap_size, __asan_get_heap_size());
460  }
461}
462#endif
463
464// Note: use ASSERT_* instead of EXPECT_* here.
465static void DoLargeMallocForGetFreeBytesTestAndDie() {
466#if ASAN_ALLOCATOR_VERSION == 1
467  // asan_allocator2 does not keep large chunks in free_lists, so this test
468  // will not work.
469  size_t old_free_bytes, new_free_bytes;
470  static const size_t kLargeMallocSize = 1 << 29;  // 512M
471  // If we malloc and free a large memory chunk, it will not fall
472  // into quarantine and will be available for future requests.
473  old_free_bytes = __asan_get_free_bytes();
474  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
475  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
476  free(Ident(malloc(kLargeMallocSize)));
477  new_free_bytes = __asan_get_free_bytes();
478  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
479  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
480#endif  // ASAN_ALLOCATOR_VERSION
481  // Test passed.
482  DoDoubleFree();
483}
484
485TEST(AddressSanitizerInterface, GetFreeBytesTest) {
486  // Allocate a small chunk. Now allocator probably has a lot of these
487  // chunks to fulfill future requests. So, future requests will decrease
488  // the number of free bytes. Do this only on systems where there
489  // is enough memory for such assumptions.
490  if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) {
491    static const size_t kNumOfChunks = 100;
492    static const size_t kChunkSize = 100;
493    char *chunks[kNumOfChunks];
494    size_t i;
495    size_t old_free_bytes, new_free_bytes;
496    chunks[0] = Ident((char*)malloc(kChunkSize));
497    old_free_bytes = __asan_get_free_bytes();
498    for (i = 1; i < kNumOfChunks; i++) {
499      chunks[i] = Ident((char*)malloc(kChunkSize));
500      new_free_bytes = __asan_get_free_bytes();
501      EXPECT_LT(new_free_bytes, old_free_bytes);
502      old_free_bytes = new_free_bytes;
503    }
504    for (i = 0; i < kNumOfChunks; i++)
505      free(chunks[i]);
506  }
507  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
508}
509
510static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
511static const size_t kManyThreadsIterations = 250;
512static const size_t kManyThreadsNumThreads =
513  (SANITIZER_WORDSIZE == 32) ? 40 : 200;
514
515void *ManyThreadsWithStatsWorker(void *arg) {
516  (void)arg;
517  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
518    for (size_t size_index = 0; size_index < 4; size_index++) {
519      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
520    }
521  }
522  return 0;
523}
524
525TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
526  size_t before_test, after_test, i;
527  pthread_t threads[kManyThreadsNumThreads];
528  before_test = __asan_get_current_allocated_bytes();
529  for (i = 0; i < kManyThreadsNumThreads; i++) {
530    PTHREAD_CREATE(&threads[i], 0,
531                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
532  }
533  for (i = 0; i < kManyThreadsNumThreads; i++) {
534    PTHREAD_JOIN(threads[i], 0);
535  }
536  after_test = __asan_get_current_allocated_bytes();
537  // ASan stats also reflect memory usage of internal ASan RTL structs,
538  // so we can't check for equality here.
539  EXPECT_LT(after_test, before_test + (1UL<<20));
540}
541
542TEST(AddressSanitizerInterface, ExitCode) {
543  int original_exit_code = __asan_set_error_exit_code(7);
544  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
545  EXPECT_EQ(7, __asan_set_error_exit_code(8));
546  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
547  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
548  EXPECT_EXIT(DoDoubleFree(),
549              ::testing::ExitedWithCode(original_exit_code), "");
550}
551
552static void MyDeathCallback() {
553  fprintf(stderr, "MyDeathCallback\n");
554}
555
556TEST(AddressSanitizerInterface, DeathCallbackTest) {
557  __asan_set_death_callback(MyDeathCallback);
558  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
559  __asan_set_death_callback(NULL);
560}
561
562static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
563
564#define GOOD_ACCESS(ptr, offset)  \
565    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
566
567#define BAD_ACCESS(ptr, offset) \
568    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
569
570TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
571  char *array = Ident((char*)malloc(120));
572  // poison array[40..80)
573  __asan_poison_memory_region(array + 40, 40);
574  GOOD_ACCESS(array, 39);
575  GOOD_ACCESS(array, 80);
576  BAD_ACCESS(array, 40);
577  BAD_ACCESS(array, 60);
578  BAD_ACCESS(array, 79);
579  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
580               kUseAfterPoisonErrorMessage);
581  __asan_unpoison_memory_region(array + 40, 40);
582  // access previously poisoned memory.
583  GOOD_ACCESS(array, 40);
584  GOOD_ACCESS(array, 79);
585  free(array);
586}
587
588TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
589  char *array = Ident((char*)malloc(120));
590  // Poison [0..40) and [80..120)
591  __asan_poison_memory_region(array, 40);
592  __asan_poison_memory_region(array + 80, 40);
593  BAD_ACCESS(array, 20);
594  GOOD_ACCESS(array, 60);
595  BAD_ACCESS(array, 100);
596  // Poison whole array - [0..120)
597  __asan_poison_memory_region(array, 120);
598  BAD_ACCESS(array, 60);
599  // Unpoison [24..96)
600  __asan_unpoison_memory_region(array + 24, 72);
601  BAD_ACCESS(array, 23);
602  GOOD_ACCESS(array, 24);
603  GOOD_ACCESS(array, 60);
604  GOOD_ACCESS(array, 95);
605  BAD_ACCESS(array, 96);
606  free(array);
607}
608
609TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
610  // Vector of capacity 20
611  char *vec = Ident((char*)malloc(20));
612  __asan_poison_memory_region(vec, 20);
613  for (size_t i = 0; i < 7; i++) {
614    // Simulate push_back.
615    __asan_unpoison_memory_region(vec + i, 1);
616    GOOD_ACCESS(vec, i);
617    BAD_ACCESS(vec, i + 1);
618  }
619  for (size_t i = 7; i > 0; i--) {
620    // Simulate pop_back.
621    __asan_poison_memory_region(vec + i - 1, 1);
622    BAD_ACCESS(vec, i - 1);
623    if (i > 1) GOOD_ACCESS(vec, i - 2);
624  }
625  free(vec);
626}
627
628// Make sure that each aligned block of size "2^granularity" doesn't have
629// "true" value before "false" value.
630static void MakeShadowValid(bool *shadow, int length, int granularity) {
631  bool can_be_poisoned = true;
632  for (int i = length - 1; i >= 0; i--) {
633    if (!shadow[i])
634      can_be_poisoned = false;
635    if (!can_be_poisoned)
636      shadow[i] = false;
637    if (i % (1 << granularity) == 0) {
638      can_be_poisoned = true;
639    }
640  }
641}
642
643TEST(AddressSanitizerInterface, PoisoningStressTest) {
644  const size_t kSize = 24;
645  bool expected[kSize];
646  char *arr = Ident((char*)malloc(kSize));
647  for (size_t l1 = 0; l1 < kSize; l1++) {
648    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
649      for (size_t l2 = 0; l2 < kSize; l2++) {
650        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
651          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
652          __asan_unpoison_memory_region(arr, kSize);
653          __asan_poison_memory_region(arr + l1, s1);
654          __asan_poison_memory_region(arr + l2, s2);
655          memset(expected, false, kSize);
656          memset(expected + l1, true, s1);
657          MakeShadowValid(expected, kSize, /*granularity*/ 3);
658          memset(expected + l2, true, s2);
659          MakeShadowValid(expected, kSize, /*granularity*/ 3);
660          for (size_t i = 0; i < kSize; i++) {
661            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
662          }
663          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
664          __asan_poison_memory_region(arr, kSize);
665          __asan_unpoison_memory_region(arr + l1, s1);
666          __asan_unpoison_memory_region(arr + l2, s2);
667          memset(expected, true, kSize);
668          memset(expected + l1, false, s1);
669          MakeShadowValid(expected, kSize, /*granularity*/ 3);
670          memset(expected + l2, false, s2);
671          MakeShadowValid(expected, kSize, /*granularity*/ 3);
672          for (size_t i = 0; i < kSize; i++) {
673            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
674          }
675        }
676      }
677    }
678  }
679}
680
681static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
682static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
683
684TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
685  char *array = Ident((char*)malloc(120));
686  __asan_unpoison_memory_region(array, 120);
687  // Try to unpoison not owned memory
688  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
689               kInvalidUnpoisonMessage);
690  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
691               kInvalidUnpoisonMessage);
692
693  __asan_poison_memory_region(array, 120);
694  // Try to poison not owned memory.
695  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
696  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
697               kInvalidPoisonMessage);
698  free(array);
699}
700
701static void ErrorReportCallbackOneToZ(const char *report) {
702  int report_len = strlen(report);
703  ASSERT_EQ(6, write(2, "ABCDEF", 6));
704  ASSERT_EQ(report_len, write(2, report, report_len));
705  ASSERT_EQ(6, write(2, "ABCDEF", 6));
706  _exit(1);
707}
708
709TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
710  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
711  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
712               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
713  __asan_set_error_report_callback(NULL);
714}
715
716TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
717  std::vector<char *> pointers;
718  std::vector<size_t> sizes;
719  const size_t kNumMallocs =
720      (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
721  for (size_t i = 0; i < kNumMallocs; i++) {
722    size_t size = i * 100 + 1;
723    pointers.push_back((char*)malloc(size));
724    sizes.push_back(size);
725  }
726  for (size_t i = 0; i < 4000000; i++) {
727    EXPECT_FALSE(__asan_get_ownership(&pointers));
728    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
729    size_t idx = i % kNumMallocs;
730    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
731    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
732  }
733  for (size_t i = 0, n = pointers.size(); i < n; i++)
734    free(pointers[i]);
735}
736