asan_noinst_test.cc revision 33280bba681b7bb683be01749aaba70dc06ffb6c
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_test_utils.h"
19
20#include <assert.h>
21#include <stdio.h>
22#include <stdlib.h>
23#include <string.h>  // for memset()
24#include <algorithm>
25#include <vector>
26#include <limits>
27
28
29TEST(AddressSanitizer, InternalSimpleDeathTest) {
30  EXPECT_DEATH(exit(1), "");
31}
32
33static void MallocStress(size_t n) {
34  u32 seed = my_rand();
35  StackTrace stack1;
36  stack1.trace[0] = 0xa123;
37  stack1.trace[1] = 0xa456;
38  stack1.size = 2;
39
40  StackTrace stack2;
41  stack2.trace[0] = 0xb123;
42  stack2.trace[1] = 0xb456;
43  stack2.size = 2;
44
45  StackTrace stack3;
46  stack3.trace[0] = 0xc123;
47  stack3.trace[1] = 0xc456;
48  stack3.size = 2;
49
50  std::vector<void *> vec;
51  for (size_t i = 0; i < n; i++) {
52    if ((i % 3) == 0) {
53      if (vec.empty()) continue;
54      size_t idx = my_rand_r(&seed) % vec.size();
55      void *ptr = vec[idx];
56      vec[idx] = vec.back();
57      vec.pop_back();
58      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
59    } else {
60      size_t size = my_rand_r(&seed) % 1000 + 1;
61      switch ((my_rand_r(&seed) % 128)) {
62        case 0: size += 1024; break;
63        case 1: size += 2048; break;
64        case 2: size += 4096; break;
65      }
66      size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
67      char *ptr = (char*)__asan::asan_memalign(alignment, size,
68                                               &stack2, __asan::FROM_MALLOC);
69      vec.push_back(ptr);
70      ptr[0] = 0;
71      ptr[size-1] = 0;
72      ptr[size/2] = 0;
73    }
74  }
75  for (size_t i = 0; i < vec.size(); i++)
76    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
77}
78
79
80TEST(AddressSanitizer, NoInstMallocTest) {
81  MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
82}
83
84TEST(AddressSanitizer, ThreadedMallocStressTest) {
85  const int kNumThreads = 4;
86  const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
87  pthread_t t[kNumThreads];
88  for (int i = 0; i < kNumThreads; i++) {
89    PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
90        (void*)kNumIterations);
91  }
92  for (int i = 0; i < kNumThreads; i++) {
93    PTHREAD_JOIN(t[i], 0);
94  }
95}
96
97static void PrintShadow(const char *tag, uptr ptr, size_t size) {
98  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
99  uptr prev_shadow = 0;
100  for (sptr i = -32; i < (sptr)size + 32; i++) {
101    uptr shadow = __asan::MemToShadow(ptr + i);
102    if (i == 0 || i == (sptr)size)
103      fprintf(stderr, ".");
104    if (shadow != prev_shadow) {
105      prev_shadow = shadow;
106      fprintf(stderr, "%02x", (int)*(u8*)shadow);
107    }
108  }
109  fprintf(stderr, "\n");
110}
111
112TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
113  for (size_t size = 1; size <= 513; size++) {
114    char *ptr = new char[size];
115    PrintShadow("m", (uptr)ptr, size);
116    delete [] ptr;
117    PrintShadow("f", (uptr)ptr, size);
118  }
119}
120
121static uptr pc_array[] = {
122#if SANITIZER_WORDSIZE == 64
123  0x7effbf756068ULL,
124  0x7effbf75e5abULL,
125  0x7effc0625b7cULL,
126  0x7effc05b8997ULL,
127  0x7effbf990577ULL,
128  0x7effbf990c56ULL,
129  0x7effbf992f3cULL,
130  0x7effbf950c22ULL,
131  0x7effc036dba0ULL,
132  0x7effc03638a3ULL,
133  0x7effc035be4aULL,
134  0x7effc0539c45ULL,
135  0x7effc0539a65ULL,
136  0x7effc03db9b3ULL,
137  0x7effc03db100ULL,
138  0x7effc037c7b8ULL,
139  0x7effc037bfffULL,
140  0x7effc038b777ULL,
141  0x7effc038021cULL,
142  0x7effc037c7d1ULL,
143  0x7effc037bfffULL,
144  0x7effc038b777ULL,
145  0x7effc038021cULL,
146  0x7effc037c7d1ULL,
147  0x7effc037bfffULL,
148  0x7effc038b777ULL,
149  0x7effc038021cULL,
150  0x7effc037c7d1ULL,
151  0x7effc037bfffULL,
152  0x7effc0520d26ULL,
153  0x7effc009ddffULL,
154  0x7effbf90bb50ULL,
155  0x7effbdddfa69ULL,
156  0x7effbdde1fe2ULL,
157  0x7effbdde2424ULL,
158  0x7effbdde27b3ULL,
159  0x7effbddee53bULL,
160  0x7effbdde1988ULL,
161  0x7effbdde0904ULL,
162  0x7effc106ce0dULL,
163  0x7effbcc3fa04ULL,
164  0x7effbcc3f6a4ULL,
165  0x7effbcc3e726ULL,
166  0x7effbcc40852ULL,
167  0x7effb681ec4dULL,
168#endif  // SANITIZER_WORDSIZE
169  0xB0B5E768,
170  0x7B682EC1,
171  0x367F9918,
172  0xAE34E13,
173  0xBA0C6C6,
174  0x13250F46,
175  0xA0D6A8AB,
176  0x2B07C1A8,
177  0x6C844F4A,
178  0x2321B53,
179  0x1F3D4F8F,
180  0x3FE2924B,
181  0xB7A2F568,
182  0xBD23950A,
183  0x61020930,
184  0x33E7970C,
185  0x405998A1,
186  0x59F3551D,
187  0x350E3028,
188  0xBC55A28D,
189  0x361F3AED,
190  0xBEAD0F73,
191  0xAEF28479,
192  0x757E971F,
193  0xAEBA450,
194  0x43AD22F5,
195  0x8C2C50C4,
196  0x7AD8A2E1,
197  0x69EE4EE8,
198  0xC08DFF,
199  0x4BA6538,
200  0x3708AB2,
201  0xC24B6475,
202  0x7C8890D7,
203  0x6662495F,
204  0x9B641689,
205  0xD3596B,
206  0xA1049569,
207  0x44CBC16,
208  0x4D39C39F
209};
210
211void CompressStackTraceTest(size_t n_iter) {
212  u32 seed = my_rand();
213  const size_t kNumPcs = ARRAY_SIZE(pc_array);
214  u32 compressed[2 * kNumPcs];
215
216  for (size_t iter = 0; iter < n_iter; iter++) {
217    std::random_shuffle(pc_array, pc_array + kNumPcs);
218    StackTrace stack0, stack1;
219    stack0.CopyFrom(pc_array, kNumPcs);
220    stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
221    size_t compress_size =
222      std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
223    size_t n_frames =
224      StackTrace::CompressStack(&stack0, compressed, compress_size);
225    Ident(n_frames);
226    assert(n_frames <= stack0.size);
227    StackTrace::UncompressStack(&stack1, compressed, compress_size);
228    assert(stack1.size == n_frames);
229    for (size_t i = 0; i < stack1.size; i++) {
230      assert(stack0.trace[i] == stack1.trace[i]);
231    }
232  }
233}
234
235TEST(AddressSanitizer, CompressStackTraceTest) {
236  CompressStackTraceTest(10000);
237}
238
239void CompressStackTraceBenchmark(size_t n_iter) {
240  const size_t kNumPcs = ARRAY_SIZE(pc_array);
241  u32 compressed[2 * kNumPcs];
242  std::random_shuffle(pc_array, pc_array + kNumPcs);
243
244  StackTrace stack0;
245  stack0.CopyFrom(pc_array, kNumPcs);
246  stack0.size = kNumPcs;
247  for (size_t iter = 0; iter < n_iter; iter++) {
248    size_t compress_size = kNumPcs;
249    size_t n_frames =
250      StackTrace::CompressStack(&stack0, compressed, compress_size);
251    Ident(n_frames);
252  }
253}
254
255TEST(AddressSanitizer, CompressStackTraceBenchmark) {
256  CompressStackTraceBenchmark(1 << 24);
257}
258
259TEST(AddressSanitizer, QuarantineTest) {
260  StackTrace stack;
261  stack.trace[0] = 0x890;
262  stack.size = 1;
263
264  const int size = 1024;
265  void *p = __asan::asan_malloc(size, &stack);
266  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
267  size_t i;
268  size_t max_i = 1 << 30;
269  for (i = 0; i < max_i; i++) {
270    void *p1 = __asan::asan_malloc(size, &stack);
271    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
272    if (p1 == p) break;
273  }
274  EXPECT_GE(i, 10000U);
275  EXPECT_LT(i, max_i);
276}
277
278void *ThreadedQuarantineTestWorker(void *unused) {
279  (void)unused;
280  u32 seed = my_rand();
281  StackTrace stack;
282  stack.trace[0] = 0x890;
283  stack.size = 1;
284
285  for (size_t i = 0; i < 1000; i++) {
286    void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
287    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
288  }
289  return NULL;
290}
291
292// Check that the thread local allocators are flushed when threads are
293// destroyed.
294TEST(AddressSanitizer, ThreadedQuarantineTest) {
295  const int n_threads = 3000;
296  size_t mmaped1 = __asan_get_heap_size();
297  for (int i = 0; i < n_threads; i++) {
298    pthread_t t;
299    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
300    PTHREAD_JOIN(t, 0);
301    size_t mmaped2 = __asan_get_heap_size();
302    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
303  }
304}
305
306void *ThreadedOneSizeMallocStress(void *unused) {
307  (void)unused;
308  StackTrace stack;
309  stack.trace[0] = 0x890;
310  stack.size = 1;
311  const size_t kNumMallocs = 1000;
312  for (int iter = 0; iter < 1000; iter++) {
313    void *p[kNumMallocs];
314    for (size_t i = 0; i < kNumMallocs; i++) {
315      p[i] = __asan::asan_malloc(32, &stack);
316    }
317    for (size_t i = 0; i < kNumMallocs; i++) {
318      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
319    }
320  }
321  return NULL;
322}
323
324TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
325  const int kNumThreads = 4;
326  pthread_t t[kNumThreads];
327  for (int i = 0; i < kNumThreads; i++) {
328    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
329  }
330  for (int i = 0; i < kNumThreads; i++) {
331    PTHREAD_JOIN(t[i], 0);
332  }
333}
334
335TEST(AddressSanitizer, MemsetWildAddressTest) {
336  using __asan::kHighMemEnd;
337  typedef void*(*memset_p)(void*, int, size_t);
338  // Prevent inlining of memset().
339  volatile memset_p libc_memset = (memset_p)memset;
340  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
341               (kLowShadowEnd == 0) ? "unknown-crash.*shadow gap"
342                                    : "unknown-crash.*low shadow");
343  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
344               "unknown-crash.*shadow gap");
345  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
346               "unknown-crash.*high shadow");
347}
348
349TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
350  EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
351  const size_t sizes[] = { 1, 30, 1<<30 };
352  for (size_t i = 0; i < 3; i++) {
353    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
354  }
355}
356
357static const char* kGetAllocatedSizeErrorMsg =
358  "attempting to call __asan_get_allocated_size()";
359
360TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
361  const size_t kArraySize = 100;
362  char *array = Ident((char*)malloc(kArraySize));
363  int *int_ptr = Ident(new int);
364
365  // Allocated memory is owned by allocator. Allocated size should be
366  // equal to requested size.
367  EXPECT_EQ(true, __asan_get_ownership(array));
368  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
369  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
370  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
371
372  // We cannot call GetAllocatedSize from the memory we didn't map,
373  // and from the interior pointers (not returned by previous malloc).
374  void *wild_addr = (void*)0x1;
375  EXPECT_FALSE(__asan_get_ownership(wild_addr));
376  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
377  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
378  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
379               kGetAllocatedSizeErrorMsg);
380
381  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
382  EXPECT_FALSE(__asan_get_ownership(NULL));
383  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
384
385  // When memory is freed, it's not owned, and call to GetAllocatedSize
386  // is forbidden.
387  free(array);
388  EXPECT_FALSE(__asan_get_ownership(array));
389  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
390  delete int_ptr;
391
392  void *zero_alloc = Ident(malloc(0));
393  if (zero_alloc != 0) {
394    // If malloc(0) is not null, this pointer is owned and should have valid
395    // allocated size.
396    EXPECT_TRUE(__asan_get_ownership(zero_alloc));
397    // Allocated size is 0 or 1 depending on the allocator used.
398    EXPECT_LT(__asan_get_allocated_size(zero_alloc), 2U);
399  }
400  free(zero_alloc);
401}
402
403TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
404  size_t before_malloc, after_malloc, after_free;
405  char *array;
406  const size_t kMallocSize = 100;
407  before_malloc = __asan_get_current_allocated_bytes();
408
409  array = Ident((char*)malloc(kMallocSize));
410  after_malloc = __asan_get_current_allocated_bytes();
411  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
412
413  free(array);
414  after_free = __asan_get_current_allocated_bytes();
415  EXPECT_EQ(before_malloc, after_free);
416}
417
418static void DoDoubleFree() {
419  int *x = Ident(new int);
420  delete Ident(x);
421  delete Ident(x);
422}
423
424TEST(AddressSanitizerInterface, GetHeapSizeTest) {
425  // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
426  // The chunk should be greater than the quarantine size,
427  // otherwise it will be stuck in quarantine instead of being unmaped.
428  static const size_t kLargeMallocSize = (1 << 28) + 1;  // 256M
429  free(Ident(malloc(kLargeMallocSize)));  // Drain quarantine.
430  uptr old_heap_size = __asan_get_heap_size();
431  for (int i = 0; i < 3; i++) {
432    // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
433    free(Ident(malloc(kLargeMallocSize)));
434    EXPECT_EQ(old_heap_size, __asan_get_heap_size());
435  }
436}
437
438static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
439static const size_t kManyThreadsIterations = 250;
440static const size_t kManyThreadsNumThreads =
441  (SANITIZER_WORDSIZE == 32) ? 40 : 200;
442
443void *ManyThreadsWithStatsWorker(void *arg) {
444  (void)arg;
445  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
446    for (size_t size_index = 0; size_index < 4; size_index++) {
447      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
448    }
449  }
450  // Just one large allocation.
451  free(Ident(malloc(1 << 20)));
452  return 0;
453}
454
455TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
456  size_t before_test, after_test, i;
457  pthread_t threads[kManyThreadsNumThreads];
458  before_test = __asan_get_current_allocated_bytes();
459  for (i = 0; i < kManyThreadsNumThreads; i++) {
460    PTHREAD_CREATE(&threads[i], 0,
461                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
462  }
463  for (i = 0; i < kManyThreadsNumThreads; i++) {
464    PTHREAD_JOIN(threads[i], 0);
465  }
466  after_test = __asan_get_current_allocated_bytes();
467  // ASan stats also reflect memory usage of internal ASan RTL structs,
468  // so we can't check for equality here.
469  EXPECT_LT(after_test, before_test + (1UL<<20));
470}
471
472TEST(AddressSanitizerInterface, ExitCode) {
473  int original_exit_code = __asan_set_error_exit_code(7);
474  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
475  EXPECT_EQ(7, __asan_set_error_exit_code(8));
476  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
477  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
478  EXPECT_EXIT(DoDoubleFree(),
479              ::testing::ExitedWithCode(original_exit_code), "");
480}
481
482static void MyDeathCallback() {
483  fprintf(stderr, "MyDeathCallback\n");
484}
485
486TEST(AddressSanitizerInterface, DeathCallbackTest) {
487  __asan_set_death_callback(MyDeathCallback);
488  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
489  __asan_set_death_callback(NULL);
490}
491
492static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
493
494#define GOOD_ACCESS(ptr, offset)  \
495    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
496
497#define BAD_ACCESS(ptr, offset) \
498    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
499
500TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
501  char *array = Ident((char*)malloc(120));
502  // poison array[40..80)
503  __asan_poison_memory_region(array + 40, 40);
504  GOOD_ACCESS(array, 39);
505  GOOD_ACCESS(array, 80);
506  BAD_ACCESS(array, 40);
507  BAD_ACCESS(array, 60);
508  BAD_ACCESS(array, 79);
509  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
510               kUseAfterPoisonErrorMessage);
511  __asan_unpoison_memory_region(array + 40, 40);
512  // access previously poisoned memory.
513  GOOD_ACCESS(array, 40);
514  GOOD_ACCESS(array, 79);
515  free(array);
516}
517
518TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
519  char *array = Ident((char*)malloc(120));
520  // Poison [0..40) and [80..120)
521  __asan_poison_memory_region(array, 40);
522  __asan_poison_memory_region(array + 80, 40);
523  BAD_ACCESS(array, 20);
524  GOOD_ACCESS(array, 60);
525  BAD_ACCESS(array, 100);
526  // Poison whole array - [0..120)
527  __asan_poison_memory_region(array, 120);
528  BAD_ACCESS(array, 60);
529  // Unpoison [24..96)
530  __asan_unpoison_memory_region(array + 24, 72);
531  BAD_ACCESS(array, 23);
532  GOOD_ACCESS(array, 24);
533  GOOD_ACCESS(array, 60);
534  GOOD_ACCESS(array, 95);
535  BAD_ACCESS(array, 96);
536  free(array);
537}
538
539TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
540  // Vector of capacity 20
541  char *vec = Ident((char*)malloc(20));
542  __asan_poison_memory_region(vec, 20);
543  for (size_t i = 0; i < 7; i++) {
544    // Simulate push_back.
545    __asan_unpoison_memory_region(vec + i, 1);
546    GOOD_ACCESS(vec, i);
547    BAD_ACCESS(vec, i + 1);
548  }
549  for (size_t i = 7; i > 0; i--) {
550    // Simulate pop_back.
551    __asan_poison_memory_region(vec + i - 1, 1);
552    BAD_ACCESS(vec, i - 1);
553    if (i > 1) GOOD_ACCESS(vec, i - 2);
554  }
555  free(vec);
556}
557
558TEST(AddressSanitizerInterface, GlobalRedzones) {
559  GOOD_ACCESS(glob1, 1 - 1);
560  GOOD_ACCESS(glob2, 2 - 1);
561  GOOD_ACCESS(glob3, 3 - 1);
562  GOOD_ACCESS(glob4, 4 - 1);
563  GOOD_ACCESS(glob5, 5 - 1);
564  GOOD_ACCESS(glob6, 6 - 1);
565  GOOD_ACCESS(glob7, 7 - 1);
566  GOOD_ACCESS(glob8, 8 - 1);
567  GOOD_ACCESS(glob9, 9 - 1);
568  GOOD_ACCESS(glob10, 10 - 1);
569  GOOD_ACCESS(glob11, 11 - 1);
570  GOOD_ACCESS(glob12, 12 - 1);
571  GOOD_ACCESS(glob13, 13 - 1);
572  GOOD_ACCESS(glob14, 14 - 1);
573  GOOD_ACCESS(glob15, 15 - 1);
574  GOOD_ACCESS(glob16, 16 - 1);
575  GOOD_ACCESS(glob17, 17 - 1);
576  GOOD_ACCESS(glob1000, 1000 - 1);
577  GOOD_ACCESS(glob10000, 10000 - 1);
578  GOOD_ACCESS(glob100000, 100000 - 1);
579
580  BAD_ACCESS(glob1, 1);
581  BAD_ACCESS(glob2, 2);
582  BAD_ACCESS(glob3, 3);
583  BAD_ACCESS(glob4, 4);
584  BAD_ACCESS(glob5, 5);
585  BAD_ACCESS(glob6, 6);
586  BAD_ACCESS(glob7, 7);
587  BAD_ACCESS(glob8, 8);
588  BAD_ACCESS(glob9, 9);
589  BAD_ACCESS(glob10, 10);
590  BAD_ACCESS(glob11, 11);
591  BAD_ACCESS(glob12, 12);
592  BAD_ACCESS(glob13, 13);
593  BAD_ACCESS(glob14, 14);
594  BAD_ACCESS(glob15, 15);
595  BAD_ACCESS(glob16, 16);
596  BAD_ACCESS(glob17, 17);
597  BAD_ACCESS(glob1000, 1000);
598  BAD_ACCESS(glob1000, 1100);  // Redzone is at least 101 bytes.
599  BAD_ACCESS(glob10000, 10000);
600  BAD_ACCESS(glob10000, 11000);  // Redzone is at least 1001 bytes.
601  BAD_ACCESS(glob100000, 100000);
602  BAD_ACCESS(glob100000, 110000);  // Redzone is at least 10001 bytes.
603}
604
605// Make sure that each aligned block of size "2^granularity" doesn't have
606// "true" value before "false" value.
607static void MakeShadowValid(bool *shadow, int length, int granularity) {
608  bool can_be_poisoned = true;
609  for (int i = length - 1; i >= 0; i--) {
610    if (!shadow[i])
611      can_be_poisoned = false;
612    if (!can_be_poisoned)
613      shadow[i] = false;
614    if (i % (1 << granularity) == 0) {
615      can_be_poisoned = true;
616    }
617  }
618}
619
620TEST(AddressSanitizerInterface, PoisoningStressTest) {
621  const size_t kSize = 24;
622  bool expected[kSize];
623  char *arr = Ident((char*)malloc(kSize));
624  for (size_t l1 = 0; l1 < kSize; l1++) {
625    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
626      for (size_t l2 = 0; l2 < kSize; l2++) {
627        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
628          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
629          __asan_unpoison_memory_region(arr, kSize);
630          __asan_poison_memory_region(arr + l1, s1);
631          __asan_poison_memory_region(arr + l2, s2);
632          memset(expected, false, kSize);
633          memset(expected + l1, true, s1);
634          MakeShadowValid(expected, kSize, /*granularity*/ 3);
635          memset(expected + l2, true, s2);
636          MakeShadowValid(expected, kSize, /*granularity*/ 3);
637          for (size_t i = 0; i < kSize; i++) {
638            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
639          }
640          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
641          __asan_poison_memory_region(arr, kSize);
642          __asan_unpoison_memory_region(arr + l1, s1);
643          __asan_unpoison_memory_region(arr + l2, s2);
644          memset(expected, true, kSize);
645          memset(expected + l1, false, s1);
646          MakeShadowValid(expected, kSize, /*granularity*/ 3);
647          memset(expected + l2, false, s2);
648          MakeShadowValid(expected, kSize, /*granularity*/ 3);
649          for (size_t i = 0; i < kSize; i++) {
650            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
651          }
652        }
653      }
654    }
655  }
656}
657
658TEST(AddressSanitizerInterface, PoisonedRegion) {
659  size_t rz = 16;
660  for (size_t size = 1; size <= 64; size++) {
661    char *p = new char[size];
662    uptr x = reinterpret_cast<uptr>(p);
663    for (size_t beg = 0; beg < size + rz; beg++) {
664      for (size_t end = beg; end < size + rz; end++) {
665        uptr first_poisoned = __asan_region_is_poisoned(x + beg, end - beg);
666        if (beg == end) {
667          EXPECT_FALSE(first_poisoned);
668        } else if (beg < size && end <= size) {
669          EXPECT_FALSE(first_poisoned);
670        } else if (beg >= size) {
671          EXPECT_EQ(x + beg, first_poisoned);
672        } else {
673          EXPECT_GT(end, size);
674          EXPECT_EQ(x + size, first_poisoned);
675        }
676      }
677    }
678    delete [] p;
679  }
680}
681
682// This is a performance benchmark for manual runs.
683// asan's memset interceptor calls mem_is_zero for the entire shadow region.
684// the profile should look like this:
685//     89.10%   [.] __memset_sse2
686//     10.50%   [.] __sanitizer::mem_is_zero
687// I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
688// than memset itself.
689TEST(AddressSanitizerInterface, DISABLED_StressLargeMemset) {
690  size_t size = 1 << 20;
691  char *x = new char[size];
692  for (int i = 0; i < 100000; i++)
693    Ident(memset)(x, 0, size);
694  delete [] x;
695}
696
697// Same here, but we run memset with small sizes.
698TEST(AddressSanitizerInterface, DISABLED_StressSmallMemset) {
699  size_t size = 32;
700  char *x = new char[size];
701  for (int i = 0; i < 100000000; i++)
702    Ident(memset)(x, 0, size);
703  delete [] x;
704}
705
706static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
707static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
708
709TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
710  char *array = Ident((char*)malloc(120));
711  __asan_unpoison_memory_region(array, 120);
712  // Try to unpoison not owned memory
713  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
714               kInvalidUnpoisonMessage);
715  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
716               kInvalidUnpoisonMessage);
717
718  __asan_poison_memory_region(array, 120);
719  // Try to poison not owned memory.
720  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
721  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
722               kInvalidPoisonMessage);
723  free(array);
724}
725
726static void ErrorReportCallbackOneToZ(const char *report) {
727  int report_len = strlen(report);
728  ASSERT_EQ(6, write(2, "ABCDEF", 6));
729  ASSERT_EQ(report_len, write(2, report, report_len));
730  ASSERT_EQ(6, write(2, "ABCDEF", 6));
731  _exit(1);
732}
733
734TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
735  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
736  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
737               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
738  __asan_set_error_report_callback(NULL);
739}
740
741TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
742  std::vector<char *> pointers;
743  std::vector<size_t> sizes;
744  const size_t kNumMallocs = 1 << 9;
745  for (size_t i = 0; i < kNumMallocs; i++) {
746    size_t size = i * 100 + 1;
747    pointers.push_back((char*)malloc(size));
748    sizes.push_back(size);
749  }
750  for (size_t i = 0; i < 4000000; i++) {
751    EXPECT_FALSE(__asan_get_ownership(&pointers));
752    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
753    size_t idx = i % kNumMallocs;
754    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
755    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
756  }
757  for (size_t i = 0, n = pointers.size(); i < n; i++)
758    free(pointers[i]);
759}
760
761TEST(AddressSanitizerInterface, CallocOverflow) {
762  size_t kArraySize = 4096;
763  volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
764  volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
765  void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
766  EXPECT_EQ(0L, Ident(p));
767}
768
769TEST(AddressSanitizerInterface, CallocOverflow2) {
770#if SANITIZER_WORDSIZE == 32
771  size_t kArraySize = 112;
772  volatile size_t kArraySize2 = 43878406;
773  void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
774  EXPECT_EQ(0L, Ident(p));
775#endif
776}
777
778TEST(AddressSanitizerInterface, CallocReturnsZeroMem) {
779  size_t sizes[] = {16, 1000, 10000, 100000, 2100000};
780  for (size_t s = 0; s < ARRAY_SIZE(sizes); s++) {
781    size_t size = sizes[s];
782    for (size_t iter = 0; iter < 5; iter++) {
783      char *x = Ident((char*)calloc(1, size));
784      EXPECT_EQ(x[0], 0);
785      EXPECT_EQ(x[size - 1], 0);
786      EXPECT_EQ(x[size / 2], 0);
787      EXPECT_EQ(x[size / 3], 0);
788      EXPECT_EQ(x[size / 4], 0);
789      memset(x, 0x42, size);
790      free(Ident(x));
791      free(Ident(malloc(Ident(1 << 27))));  // Try to drain the quarantine.
792    }
793  }
794}
795