asan_noinst_test.cc revision de55be3899b994731ba2d9e168281d608dab3048
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "sanitizer/asan_interface.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28
29// Simple stand-alone pseudorandom number generator.
30// Current algorithm is ANSI C linear congruential PRNG.
31static inline u32 my_rand(u32* state) {
32  return (*state = *state * 1103515245 + 12345) >> 16;
33}
34
35static u32 global_seed = 0;
36
37
38TEST(AddressSanitizer, InternalSimpleDeathTest) {
39  EXPECT_DEATH(exit(1), "");
40}
41
42static void MallocStress(size_t n) {
43  u32 seed = my_rand(&global_seed);
44  __asan::StackTrace stack1;
45  stack1.trace[0] = 0xa123;
46  stack1.trace[1] = 0xa456;
47  stack1.size = 2;
48
49  __asan::StackTrace stack2;
50  stack2.trace[0] = 0xb123;
51  stack2.trace[1] = 0xb456;
52  stack2.size = 2;
53
54  __asan::StackTrace stack3;
55  stack3.trace[0] = 0xc123;
56  stack3.trace[1] = 0xc456;
57  stack3.size = 2;
58
59  std::vector<void *> vec;
60  for (size_t i = 0; i < n; i++) {
61    if ((i % 3) == 0) {
62      if (vec.empty()) continue;
63      size_t idx = my_rand(&seed) % vec.size();
64      void *ptr = vec[idx];
65      vec[idx] = vec.back();
66      vec.pop_back();
67      __asan::asan_free(ptr, &stack1);
68    } else {
69      size_t size = my_rand(&seed) % 1000 + 1;
70      switch ((my_rand(&seed) % 128)) {
71        case 0: size += 1024; break;
72        case 1: size += 2048; break;
73        case 2: size += 4096; break;
74      }
75      size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
76      char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
77      vec.push_back(ptr);
78      ptr[0] = 0;
79      ptr[size-1] = 0;
80      ptr[size/2] = 0;
81    }
82  }
83  for (size_t i = 0; i < vec.size(); i++)
84    __asan::asan_free(vec[i], &stack3);
85}
86
87
88TEST(AddressSanitizer, NoInstMallocTest) {
89#ifdef __arm__
90  MallocStress(300000);
91#else
92  MallocStress(1000000);
93#endif
94}
95
96static void PrintShadow(const char *tag, uptr ptr, size_t size) {
97  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
98  uptr prev_shadow = 0;
99  for (sptr i = -32; i < (sptr)size + 32; i++) {
100    uptr shadow = __asan::MemToShadow(ptr + i);
101    if (i == 0 || i == (sptr)size)
102      fprintf(stderr, ".");
103    if (shadow != prev_shadow) {
104      prev_shadow = shadow;
105      fprintf(stderr, "%02x", (int)*(u8*)shadow);
106    }
107  }
108  fprintf(stderr, "\n");
109}
110
111TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
112  for (size_t size = 1; size <= 513; size++) {
113    char *ptr = new char[size];
114    PrintShadow("m", (uptr)ptr, size);
115    delete [] ptr;
116    PrintShadow("f", (uptr)ptr, size);
117  }
118}
119
120static uptr pc_array[] = {
121#if __WORDSIZE == 64
122  0x7effbf756068ULL,
123  0x7effbf75e5abULL,
124  0x7effc0625b7cULL,
125  0x7effc05b8997ULL,
126  0x7effbf990577ULL,
127  0x7effbf990c56ULL,
128  0x7effbf992f3cULL,
129  0x7effbf950c22ULL,
130  0x7effc036dba0ULL,
131  0x7effc03638a3ULL,
132  0x7effc035be4aULL,
133  0x7effc0539c45ULL,
134  0x7effc0539a65ULL,
135  0x7effc03db9b3ULL,
136  0x7effc03db100ULL,
137  0x7effc037c7b8ULL,
138  0x7effc037bfffULL,
139  0x7effc038b777ULL,
140  0x7effc038021cULL,
141  0x7effc037c7d1ULL,
142  0x7effc037bfffULL,
143  0x7effc038b777ULL,
144  0x7effc038021cULL,
145  0x7effc037c7d1ULL,
146  0x7effc037bfffULL,
147  0x7effc038b777ULL,
148  0x7effc038021cULL,
149  0x7effc037c7d1ULL,
150  0x7effc037bfffULL,
151  0x7effc0520d26ULL,
152  0x7effc009ddffULL,
153  0x7effbf90bb50ULL,
154  0x7effbdddfa69ULL,
155  0x7effbdde1fe2ULL,
156  0x7effbdde2424ULL,
157  0x7effbdde27b3ULL,
158  0x7effbddee53bULL,
159  0x7effbdde1988ULL,
160  0x7effbdde0904ULL,
161  0x7effc106ce0dULL,
162  0x7effbcc3fa04ULL,
163  0x7effbcc3f6a4ULL,
164  0x7effbcc3e726ULL,
165  0x7effbcc40852ULL,
166  0x7effb681ec4dULL,
167#endif  // __WORDSIZE
168  0xB0B5E768,
169  0x7B682EC1,
170  0x367F9918,
171  0xAE34E13,
172  0xBA0C6C6,
173  0x13250F46,
174  0xA0D6A8AB,
175  0x2B07C1A8,
176  0x6C844F4A,
177  0x2321B53,
178  0x1F3D4F8F,
179  0x3FE2924B,
180  0xB7A2F568,
181  0xBD23950A,
182  0x61020930,
183  0x33E7970C,
184  0x405998A1,
185  0x59F3551D,
186  0x350E3028,
187  0xBC55A28D,
188  0x361F3AED,
189  0xBEAD0F73,
190  0xAEF28479,
191  0x757E971F,
192  0xAEBA450,
193  0x43AD22F5,
194  0x8C2C50C4,
195  0x7AD8A2E1,
196  0x69EE4EE8,
197  0xC08DFF,
198  0x4BA6538,
199  0x3708AB2,
200  0xC24B6475,
201  0x7C8890D7,
202  0x6662495F,
203  0x9B641689,
204  0xD3596B,
205  0xA1049569,
206  0x44CBC16,
207  0x4D39C39F
208};
209
210void CompressStackTraceTest(size_t n_iter) {
211  u32 seed = my_rand(&global_seed);
212  const size_t kNumPcs = ARRAY_SIZE(pc_array);
213  u32 compressed[2 * kNumPcs];
214
215  for (size_t iter = 0; iter < n_iter; iter++) {
216    std::random_shuffle(pc_array, pc_array + kNumPcs);
217    __asan::StackTrace stack0, stack1;
218    stack0.CopyFrom(pc_array, kNumPcs);
219    stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
220    size_t compress_size =
221      std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
222    size_t n_frames =
223      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
224    Ident(n_frames);
225    assert(n_frames <= stack0.size);
226    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
227    assert(stack1.size == n_frames);
228    for (size_t i = 0; i < stack1.size; i++) {
229      assert(stack0.trace[i] == stack1.trace[i]);
230    }
231  }
232}
233
234TEST(AddressSanitizer, CompressStackTraceTest) {
235  CompressStackTraceTest(10000);
236}
237
238void CompressStackTraceBenchmark(size_t n_iter) {
239  const size_t kNumPcs = ARRAY_SIZE(pc_array);
240  u32 compressed[2 * kNumPcs];
241  std::random_shuffle(pc_array, pc_array + kNumPcs);
242
243  __asan::StackTrace stack0;
244  stack0.CopyFrom(pc_array, kNumPcs);
245  stack0.size = kNumPcs;
246  for (size_t iter = 0; iter < n_iter; iter++) {
247    size_t compress_size = kNumPcs;
248    size_t n_frames =
249      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
250    Ident(n_frames);
251  }
252}
253
254TEST(AddressSanitizer, CompressStackTraceBenchmark) {
255  CompressStackTraceBenchmark(1 << 24);
256}
257
258TEST(AddressSanitizer, QuarantineTest) {
259  __asan::StackTrace stack;
260  stack.trace[0] = 0x890;
261  stack.size = 1;
262
263  const int size = 32;
264  void *p = __asan::asan_malloc(size, &stack);
265  __asan::asan_free(p, &stack);
266  size_t i;
267  size_t max_i = 1 << 30;
268  for (i = 0; i < max_i; i++) {
269    void *p1 = __asan::asan_malloc(size, &stack);
270    __asan::asan_free(p1, &stack);
271    if (p1 == p) break;
272  }
273  // fprintf(stderr, "i=%ld\n", i);
274  EXPECT_GE(i, 100000U);
275  EXPECT_LT(i, max_i);
276}
277
278void *ThreadedQuarantineTestWorker(void *unused) {
279  (void)unused;
280  u32 seed = my_rand(&global_seed);
281  __asan::StackTrace stack;
282  stack.trace[0] = 0x890;
283  stack.size = 1;
284
285  for (size_t i = 0; i < 1000; i++) {
286    void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
287    __asan::asan_free(p, &stack);
288  }
289  return NULL;
290}
291
292// Check that the thread local allocators are flushed when threads are
293// destroyed.
294TEST(AddressSanitizer, ThreadedQuarantineTest) {
295  const int n_threads = 3000;
296  size_t mmaped1 = __asan_get_heap_size();
297  for (int i = 0; i < n_threads; i++) {
298    pthread_t t;
299    pthread_create(&t, NULL, ThreadedQuarantineTestWorker, 0);
300    pthread_join(t, 0);
301    size_t mmaped2 = __asan_get_heap_size();
302    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
303  }
304}
305
306void *ThreadedOneSizeMallocStress(void *unused) {
307  (void)unused;
308  __asan::StackTrace stack;
309  stack.trace[0] = 0x890;
310  stack.size = 1;
311  const size_t kNumMallocs = 1000;
312  for (int iter = 0; iter < 1000; iter++) {
313    void *p[kNumMallocs];
314    for (size_t i = 0; i < kNumMallocs; i++) {
315      p[i] = __asan::asan_malloc(32, &stack);
316    }
317    for (size_t i = 0; i < kNumMallocs; i++) {
318      __asan::asan_free(p[i], &stack);
319    }
320  }
321  return NULL;
322}
323
324TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
325  const int kNumThreads = 4;
326  pthread_t t[kNumThreads];
327  for (int i = 0; i < kNumThreads; i++) {
328    pthread_create(&t[i], 0, ThreadedOneSizeMallocStress, 0);
329  }
330  for (int i = 0; i < kNumThreads; i++) {
331    pthread_join(t[i], 0);
332  }
333}
334
335TEST(AddressSanitizer, MemsetWildAddressTest) {
336  typedef void*(*memset_p)(void*, int, size_t);
337  // Prevent inlining of memset().
338  volatile memset_p libc_memset = (memset_p)memset;
339  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + kPageSize), 0, 100),
340               "unknown-crash.*low shadow");
341  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + kPageSize), 0, 100),
342               "unknown-crash.*shadow gap");
343  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + kPageSize), 0, 100),
344               "unknown-crash.*high shadow");
345}
346
347TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
348  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
349  const size_t sizes[] = { 1, 30, 1<<30 };
350  for (size_t i = 0; i < 3; i++) {
351    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
352  }
353}
354
355static const char* kGetAllocatedSizeErrorMsg =
356  "attempting to call __asan_get_allocated_size()";
357
358TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
359  const size_t kArraySize = 100;
360  char *array = Ident((char*)malloc(kArraySize));
361  int *int_ptr = Ident(new int);
362
363  // Allocated memory is owned by allocator. Allocated size should be
364  // equal to requested size.
365  EXPECT_EQ(true, __asan_get_ownership(array));
366  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
367  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
368  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
369
370  // We cannot call GetAllocatedSize from the memory we didn't map,
371  // and from the interior pointers (not returned by previous malloc).
372  void *wild_addr = (void*)0x1;
373  EXPECT_FALSE(__asan_get_ownership(wild_addr));
374  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
375  EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
376  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
377               kGetAllocatedSizeErrorMsg);
378
379  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
380  EXPECT_FALSE(__asan_get_ownership(NULL));
381  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
382
383  // When memory is freed, it's not owned, and call to GetAllocatedSize
384  // is forbidden.
385  free(array);
386  EXPECT_FALSE(__asan_get_ownership(array));
387  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
388
389  delete int_ptr;
390}
391
392TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
393  size_t before_malloc, after_malloc, after_free;
394  char *array;
395  const size_t kMallocSize = 100;
396  before_malloc = __asan_get_current_allocated_bytes();
397
398  array = Ident((char*)malloc(kMallocSize));
399  after_malloc = __asan_get_current_allocated_bytes();
400  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
401
402  free(array);
403  after_free = __asan_get_current_allocated_bytes();
404  EXPECT_EQ(before_malloc, after_free);
405}
406
407static void DoDoubleFree() {
408  int *x = Ident(new int);
409  delete Ident(x);
410  delete Ident(x);
411}
412
413// This test is run in a separate process, so that large malloced
414// chunk won't remain in the free lists after the test.
415// Note: use ASSERT_* instead of EXPECT_* here.
416static void RunGetHeapSizeTestAndDie() {
417  size_t old_heap_size, new_heap_size, heap_growth;
418  // We unlikely have have chunk of this size in free list.
419  static const size_t kLargeMallocSize = 1 << 29;  // 512M
420  old_heap_size = __asan_get_heap_size();
421  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
422  free(Ident(malloc(kLargeMallocSize)));
423  new_heap_size = __asan_get_heap_size();
424  heap_growth = new_heap_size - old_heap_size;
425  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
426  ASSERT_GE(heap_growth, kLargeMallocSize);
427  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
428
429  // Now large chunk should fall into free list, and can be
430  // allocated without increasing heap size.
431  old_heap_size = new_heap_size;
432  free(Ident(malloc(kLargeMallocSize)));
433  heap_growth = __asan_get_heap_size() - old_heap_size;
434  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
435  ASSERT_LT(heap_growth, kLargeMallocSize);
436
437  // Test passed. Now die with expected double-free.
438  DoDoubleFree();
439}
440
441TEST(AddressSanitizerInterface, GetHeapSizeTest) {
442  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
443}
444
445// Note: use ASSERT_* instead of EXPECT_* here.
446static void DoLargeMallocForGetFreeBytesTestAndDie() {
447  size_t old_free_bytes, new_free_bytes;
448  static const size_t kLargeMallocSize = 1 << 29;  // 512M
449  // If we malloc and free a large memory chunk, it will not fall
450  // into quarantine and will be available for future requests.
451  old_free_bytes = __asan_get_free_bytes();
452  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
453  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
454  free(Ident(malloc(kLargeMallocSize)));
455  new_free_bytes = __asan_get_free_bytes();
456  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
457  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
458  // Test passed.
459  DoDoubleFree();
460}
461
462TEST(AddressSanitizerInterface, GetFreeBytesTest) {
463  static const size_t kNumOfChunks = 100;
464  static const size_t kChunkSize = 100;
465  char *chunks[kNumOfChunks];
466  size_t i;
467  size_t old_free_bytes, new_free_bytes;
468  // Allocate a small chunk. Now allocator probably has a lot of these
469  // chunks to fulfill future requests. So, future requests will decrease
470  // the number of free bytes.
471  chunks[0] = Ident((char*)malloc(kChunkSize));
472  old_free_bytes = __asan_get_free_bytes();
473  for (i = 1; i < kNumOfChunks; i++) {
474    chunks[i] = Ident((char*)malloc(kChunkSize));
475    new_free_bytes = __asan_get_free_bytes();
476    EXPECT_LT(new_free_bytes, old_free_bytes);
477    old_free_bytes = new_free_bytes;
478  }
479  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
480  for (i = 0; i < kNumOfChunks; i++)
481    free(chunks[i]);
482}
483
484static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
485static const size_t kManyThreadsIterations = 250;
486static const size_t kManyThreadsNumThreads = (__WORDSIZE == 32) ? 40 : 200;
487
488void *ManyThreadsWithStatsWorker(void *arg) {
489  (void)arg;
490  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
491    for (size_t size_index = 0; size_index < 4; size_index++) {
492      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
493    }
494  }
495  return 0;
496}
497
498TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
499  size_t before_test, after_test, i;
500  pthread_t threads[kManyThreadsNumThreads];
501  before_test = __asan_get_current_allocated_bytes();
502  for (i = 0; i < kManyThreadsNumThreads; i++) {
503    pthread_create(&threads[i], 0,
504                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
505  }
506  for (i = 0; i < kManyThreadsNumThreads; i++) {
507    pthread_join(threads[i], 0);
508  }
509  after_test = __asan_get_current_allocated_bytes();
510  // ASan stats also reflect memory usage of internal ASan RTL structs,
511  // so we can't check for equality here.
512  EXPECT_LT(after_test, before_test + (1UL<<20));
513}
514
515TEST(AddressSanitizerInterface, ExitCode) {
516  int original_exit_code = __asan_set_error_exit_code(7);
517  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
518  EXPECT_EQ(7, __asan_set_error_exit_code(8));
519  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
520  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
521  EXPECT_EXIT(DoDoubleFree(),
522              ::testing::ExitedWithCode(original_exit_code), "");
523}
524
525static void MyDeathCallback() {
526  fprintf(stderr, "MyDeathCallback\n");
527}
528
529TEST(AddressSanitizerInterface, DeathCallbackTest) {
530  __asan_set_death_callback(MyDeathCallback);
531  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
532  __asan_set_death_callback(NULL);
533}
534
535static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
536
537#define GOOD_ACCESS(ptr, offset)  \
538    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
539
540#define BAD_ACCESS(ptr, offset) \
541    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
542
543TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
544  char *array = Ident((char*)malloc(120));
545  // poison array[40..80)
546  __asan_poison_memory_region(array + 40, 40);
547  GOOD_ACCESS(array, 39);
548  GOOD_ACCESS(array, 80);
549  BAD_ACCESS(array, 40);
550  BAD_ACCESS(array, 60);
551  BAD_ACCESS(array, 79);
552  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
553               kUseAfterPoisonErrorMessage);
554  __asan_unpoison_memory_region(array + 40, 40);
555  // access previously poisoned memory.
556  GOOD_ACCESS(array, 40);
557  GOOD_ACCESS(array, 79);
558  free(array);
559}
560
561TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
562  char *array = Ident((char*)malloc(120));
563  // Poison [0..40) and [80..120)
564  __asan_poison_memory_region(array, 40);
565  __asan_poison_memory_region(array + 80, 40);
566  BAD_ACCESS(array, 20);
567  GOOD_ACCESS(array, 60);
568  BAD_ACCESS(array, 100);
569  // Poison whole array - [0..120)
570  __asan_poison_memory_region(array, 120);
571  BAD_ACCESS(array, 60);
572  // Unpoison [24..96)
573  __asan_unpoison_memory_region(array + 24, 72);
574  BAD_ACCESS(array, 23);
575  GOOD_ACCESS(array, 24);
576  GOOD_ACCESS(array, 60);
577  GOOD_ACCESS(array, 95);
578  BAD_ACCESS(array, 96);
579  free(array);
580}
581
582TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
583  // Vector of capacity 20
584  char *vec = Ident((char*)malloc(20));
585  __asan_poison_memory_region(vec, 20);
586  for (size_t i = 0; i < 7; i++) {
587    // Simulate push_back.
588    __asan_unpoison_memory_region(vec + i, 1);
589    GOOD_ACCESS(vec, i);
590    BAD_ACCESS(vec, i + 1);
591  }
592  for (size_t i = 7; i > 0; i--) {
593    // Simulate pop_back.
594    __asan_poison_memory_region(vec + i - 1, 1);
595    BAD_ACCESS(vec, i - 1);
596    if (i > 1) GOOD_ACCESS(vec, i - 2);
597  }
598  free(vec);
599}
600
601// Make sure that each aligned block of size "2^granularity" doesn't have
602// "true" value before "false" value.
603static void MakeShadowValid(bool *shadow, int length, int granularity) {
604  bool can_be_poisoned = true;
605  for (int i = length - 1; i >= 0; i--) {
606    if (!shadow[i])
607      can_be_poisoned = false;
608    if (!can_be_poisoned)
609      shadow[i] = false;
610    if (i % (1 << granularity) == 0) {
611      can_be_poisoned = true;
612    }
613  }
614}
615
616TEST(AddressSanitizerInterface, PoisoningStressTest) {
617  const size_t kSize = 24;
618  bool expected[kSize];
619  char *arr = Ident((char*)malloc(kSize));
620  for (size_t l1 = 0; l1 < kSize; l1++) {
621    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
622      for (size_t l2 = 0; l2 < kSize; l2++) {
623        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
624          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
625          __asan_unpoison_memory_region(arr, kSize);
626          __asan_poison_memory_region(arr + l1, s1);
627          __asan_poison_memory_region(arr + l2, s2);
628          memset(expected, false, kSize);
629          memset(expected + l1, true, s1);
630          MakeShadowValid(expected, kSize, /*granularity*/ 3);
631          memset(expected + l2, true, s2);
632          MakeShadowValid(expected, kSize, /*granularity*/ 3);
633          for (size_t i = 0; i < kSize; i++) {
634            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
635          }
636          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
637          __asan_poison_memory_region(arr, kSize);
638          __asan_unpoison_memory_region(arr + l1, s1);
639          __asan_unpoison_memory_region(arr + l2, s2);
640          memset(expected, true, kSize);
641          memset(expected + l1, false, s1);
642          MakeShadowValid(expected, kSize, /*granularity*/ 3);
643          memset(expected + l2, false, s2);
644          MakeShadowValid(expected, kSize, /*granularity*/ 3);
645          for (size_t i = 0; i < kSize; i++) {
646            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
647          }
648        }
649      }
650    }
651  }
652}
653
654static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
655static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
656
657TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
658  char *array = Ident((char*)malloc(120));
659  __asan_unpoison_memory_region(array, 120);
660  // Try to unpoison not owned memory
661  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
662               kInvalidUnpoisonMessage);
663  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
664               kInvalidUnpoisonMessage);
665
666  __asan_poison_memory_region(array, 120);
667  // Try to poison not owned memory.
668  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
669  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
670               kInvalidPoisonMessage);
671  free(array);
672}
673
674static void ErrorReportCallbackOneToZ(const char *report) {
675  int report_len = strlen(report);
676  ASSERT_EQ(6, write(2, "ABCDEF", 6));
677  ASSERT_EQ(report_len, write(2, report, report_len));
678  ASSERT_EQ(6, write(2, "ABCDEF", 6));
679  _exit(1);
680}
681
682TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
683  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
684  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
685               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
686  __asan_set_error_report_callback(NULL);
687}
688
689TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
690  std::vector<char *> pointers;
691  std::vector<size_t> sizes;
692  const size_t kNumMallocs =
693      (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
694  for (size_t i = 0; i < kNumMallocs; i++) {
695    size_t size = i * 100 + 1;
696    pointers.push_back((char*)malloc(size));
697    sizes.push_back(size);
698  }
699  for (size_t i = 0; i < 4000000; i++) {
700    EXPECT_FALSE(__asan_get_ownership(&pointers));
701    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
702    size_t idx = i % kNumMallocs;
703    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
704    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
705  }
706  for (size_t i = 0, n = pointers.size(); i < n; i++)
707    free(pointers[i]);
708}
709