asan_noinst_test.cc revision 7da8503a90c7f84787aa1ba978e2223893fa7727
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "sanitizer/asan_interface.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28
29// Simple stand-alone pseudorandom number generator.
30// Current algorithm is ANSI C linear congruential PRNG.
31static inline u32 my_rand(u32* state) {
32  return (*state = *state * 1103515245 + 12345) >> 16;
33}
34
35static u32 global_seed = 0;
36
37
38TEST(AddressSanitizer, InternalSimpleDeathTest) {
39  EXPECT_DEATH(exit(1), "");
40}
41
42static void MallocStress(size_t n) {
43  u32 seed = my_rand(&global_seed);
44  __asan::StackTrace stack1;
45  stack1.trace[0] = 0xa123;
46  stack1.trace[1] = 0xa456;
47  stack1.size = 2;
48
49  __asan::StackTrace stack2;
50  stack2.trace[0] = 0xb123;
51  stack2.trace[1] = 0xb456;
52  stack2.size = 2;
53
54  __asan::StackTrace stack3;
55  stack3.trace[0] = 0xc123;
56  stack3.trace[1] = 0xc456;
57  stack3.size = 2;
58
59  std::vector<void *> vec;
60  for (size_t i = 0; i < n; i++) {
61    if ((i % 3) == 0) {
62      if (vec.empty()) continue;
63      size_t idx = my_rand(&seed) % vec.size();
64      void *ptr = vec[idx];
65      vec[idx] = vec.back();
66      vec.pop_back();
67      __asan::asan_free(ptr, &stack1);
68    } else {
69      size_t size = my_rand(&seed) % 1000 + 1;
70      switch ((my_rand(&seed) % 128)) {
71        case 0: size += 1024; break;
72        case 1: size += 2048; break;
73        case 2: size += 4096; break;
74      }
75      size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
76      char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
77      vec.push_back(ptr);
78      ptr[0] = 0;
79      ptr[size-1] = 0;
80      ptr[size/2] = 0;
81    }
82  }
83  for (size_t i = 0; i < vec.size(); i++)
84    __asan::asan_free(vec[i], &stack3);
85}
86
87
88TEST(AddressSanitizer, NoInstMallocTest) {
89#ifdef __arm__
90  MallocStress(300000);
91#else
92  MallocStress(1000000);
93#endif
94}
95
96static void PrintShadow(const char *tag, uptr ptr, size_t size) {
97  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
98  uptr prev_shadow = 0;
99  for (sptr i = -32; i < (sptr)size + 32; i++) {
100    uptr shadow = __asan::MemToShadow(ptr + i);
101    if (i == 0 || i == (sptr)size)
102      fprintf(stderr, ".");
103    if (shadow != prev_shadow) {
104      prev_shadow = shadow;
105      fprintf(stderr, "%02x", (int)*(u8*)shadow);
106    }
107  }
108  fprintf(stderr, "\n");
109}
110
111TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
112  for (size_t size = 1; size <= 513; size++) {
113    char *ptr = new char[size];
114    PrintShadow("m", (uptr)ptr, size);
115    delete [] ptr;
116    PrintShadow("f", (uptr)ptr, size);
117  }
118}
119
120static uptr pc_array[] = {
121#if __WORDSIZE == 64
122  0x7effbf756068ULL,
123  0x7effbf75e5abULL,
124  0x7effc0625b7cULL,
125  0x7effc05b8997ULL,
126  0x7effbf990577ULL,
127  0x7effbf990c56ULL,
128  0x7effbf992f3cULL,
129  0x7effbf950c22ULL,
130  0x7effc036dba0ULL,
131  0x7effc03638a3ULL,
132  0x7effc035be4aULL,
133  0x7effc0539c45ULL,
134  0x7effc0539a65ULL,
135  0x7effc03db9b3ULL,
136  0x7effc03db100ULL,
137  0x7effc037c7b8ULL,
138  0x7effc037bfffULL,
139  0x7effc038b777ULL,
140  0x7effc038021cULL,
141  0x7effc037c7d1ULL,
142  0x7effc037bfffULL,
143  0x7effc038b777ULL,
144  0x7effc038021cULL,
145  0x7effc037c7d1ULL,
146  0x7effc037bfffULL,
147  0x7effc038b777ULL,
148  0x7effc038021cULL,
149  0x7effc037c7d1ULL,
150  0x7effc037bfffULL,
151  0x7effc0520d26ULL,
152  0x7effc009ddffULL,
153  0x7effbf90bb50ULL,
154  0x7effbdddfa69ULL,
155  0x7effbdde1fe2ULL,
156  0x7effbdde2424ULL,
157  0x7effbdde27b3ULL,
158  0x7effbddee53bULL,
159  0x7effbdde1988ULL,
160  0x7effbdde0904ULL,
161  0x7effc106ce0dULL,
162  0x7effbcc3fa04ULL,
163  0x7effbcc3f6a4ULL,
164  0x7effbcc3e726ULL,
165  0x7effbcc40852ULL,
166  0x7effb681ec4dULL,
167#endif  // __WORDSIZE
168  0xB0B5E768,
169  0x7B682EC1,
170  0x367F9918,
171  0xAE34E13,
172  0xBA0C6C6,
173  0x13250F46,
174  0xA0D6A8AB,
175  0x2B07C1A8,
176  0x6C844F4A,
177  0x2321B53,
178  0x1F3D4F8F,
179  0x3FE2924B,
180  0xB7A2F568,
181  0xBD23950A,
182  0x61020930,
183  0x33E7970C,
184  0x405998A1,
185  0x59F3551D,
186  0x350E3028,
187  0xBC55A28D,
188  0x361F3AED,
189  0xBEAD0F73,
190  0xAEF28479,
191  0x757E971F,
192  0xAEBA450,
193  0x43AD22F5,
194  0x8C2C50C4,
195  0x7AD8A2E1,
196  0x69EE4EE8,
197  0xC08DFF,
198  0x4BA6538,
199  0x3708AB2,
200  0xC24B6475,
201  0x7C8890D7,
202  0x6662495F,
203  0x9B641689,
204  0xD3596B,
205  0xA1049569,
206  0x44CBC16,
207  0x4D39C39F
208};
209
210void CompressStackTraceTest(size_t n_iter) {
211  u32 seed = my_rand(&global_seed);
212  const size_t kNumPcs = ARRAY_SIZE(pc_array);
213  u32 compressed[2 * kNumPcs];
214
215  for (size_t iter = 0; iter < n_iter; iter++) {
216    std::random_shuffle(pc_array, pc_array + kNumPcs);
217    __asan::StackTrace stack0, stack1;
218    stack0.CopyFrom(pc_array, kNumPcs);
219    stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
220    size_t compress_size =
221      std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
222    size_t n_frames =
223      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
224    Ident(n_frames);
225    assert(n_frames <= stack0.size);
226    __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size);
227    assert(stack1.size == n_frames);
228    for (size_t i = 0; i < stack1.size; i++) {
229      assert(stack0.trace[i] == stack1.trace[i]);
230    }
231  }
232}
233
234TEST(AddressSanitizer, CompressStackTraceTest) {
235  CompressStackTraceTest(10000);
236}
237
238void CompressStackTraceBenchmark(size_t n_iter) {
239  const size_t kNumPcs = ARRAY_SIZE(pc_array);
240  u32 compressed[2 * kNumPcs];
241  std::random_shuffle(pc_array, pc_array + kNumPcs);
242
243  __asan::StackTrace stack0;
244  stack0.CopyFrom(pc_array, kNumPcs);
245  stack0.size = kNumPcs;
246  for (size_t iter = 0; iter < n_iter; iter++) {
247    size_t compress_size = kNumPcs;
248    size_t n_frames =
249      __asan::StackTrace::CompressStack(&stack0, compressed, compress_size);
250    Ident(n_frames);
251  }
252}
253
254TEST(AddressSanitizer, CompressStackTraceBenchmark) {
255  CompressStackTraceBenchmark(1 << 24);
256}
257
258TEST(AddressSanitizer, QuarantineTest) {
259  __asan::StackTrace stack;
260  stack.trace[0] = 0x890;
261  stack.size = 1;
262
263  const int size = 32;
264  void *p = __asan::asan_malloc(size, &stack);
265  __asan::asan_free(p, &stack);
266  size_t i;
267  size_t max_i = 1 << 30;
268  for (i = 0; i < max_i; i++) {
269    void *p1 = __asan::asan_malloc(size, &stack);
270    __asan::asan_free(p1, &stack);
271    if (p1 == p) break;
272  }
273  // fprintf(stderr, "i=%ld\n", i);
274  EXPECT_GE(i, 100000U);
275  EXPECT_LT(i, max_i);
276}
277
278void *ThreadedQuarantineTestWorker(void *unused) {
279  (void)unused;
280  u32 seed = my_rand(&global_seed);
281  __asan::StackTrace stack;
282  stack.trace[0] = 0x890;
283  stack.size = 1;
284
285  for (size_t i = 0; i < 1000; i++) {
286    void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
287    __asan::asan_free(p, &stack);
288  }
289  return NULL;
290}
291
292// Check that the thread local allocators are flushed when threads are
293// destroyed.
294TEST(AddressSanitizer, ThreadedQuarantineTest) {
295  const int n_threads = 3000;
296  size_t mmaped1 = __asan_get_heap_size();
297  for (int i = 0; i < n_threads; i++) {
298    pthread_t t;
299    pthread_create(&t, NULL, ThreadedQuarantineTestWorker, 0);
300    pthread_join(t, 0);
301    size_t mmaped2 = __asan_get_heap_size();
302    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
303  }
304}
305
306void *ThreadedOneSizeMallocStress(void *unused) {
307  (void)unused;
308  __asan::StackTrace stack;
309  stack.trace[0] = 0x890;
310  stack.size = 1;
311  const size_t kNumMallocs = 1000;
312  for (int iter = 0; iter < 1000; iter++) {
313    void *p[kNumMallocs];
314    for (size_t i = 0; i < kNumMallocs; i++) {
315      p[i] = __asan::asan_malloc(32, &stack);
316    }
317    for (size_t i = 0; i < kNumMallocs; i++) {
318      __asan::asan_free(p[i], &stack);
319    }
320  }
321  return NULL;
322}
323
324TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
325  const int kNumThreads = 4;
326  pthread_t t[kNumThreads];
327  for (int i = 0; i < kNumThreads; i++) {
328    pthread_create(&t[i], 0, ThreadedOneSizeMallocStress, 0);
329  }
330  for (int i = 0; i < kNumThreads; i++) {
331    pthread_join(t[i], 0);
332  }
333}
334
335TEST(AddressSanitizer, MemsetWildAddressTest) {
336  typedef void*(*memset_p)(void*, int, size_t);
337  // Prevent inlining of memset().
338  volatile memset_p libc_memset = (memset_p)memset;
339  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + kPageSize), 0, 100),
340               "unknown-crash.*low shadow");
341  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + kPageSize), 0, 100),
342               "unknown-crash.*shadow gap");
343  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + kPageSize), 0, 100),
344               "unknown-crash.*high shadow");
345}
346
347TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
348  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
349  const size_t sizes[] = { 1, 30, 1<<30 };
350  for (size_t i = 0; i < 3; i++) {
351    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
352  }
353}
354
355static const char* kGetAllocatedSizeErrorMsg =
356  "attempting to call __asan_get_allocated_size()";
357
358TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
359  const size_t kArraySize = 100;
360  char *array = Ident((char*)malloc(kArraySize));
361  int *int_ptr = Ident(new int);
362
363  // Allocated memory is owned by allocator. Allocated size should be
364  // equal to requested size.
365  EXPECT_EQ(true, __asan_get_ownership(array));
366  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
367  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
368  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
369
370  // We cannot call GetAllocatedSize from the memory we didn't map,
371  // and from the interior pointers (not returned by previous malloc).
372  void *wild_addr = (void*)0x1;
373  EXPECT_EQ(false, __asan_get_ownership(wild_addr));
374  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
375  EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
376  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
377               kGetAllocatedSizeErrorMsg);
378
379  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
380  EXPECT_EQ(false, __asan_get_ownership(NULL));
381  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
382
383  // When memory is freed, it's not owned, and call to GetAllocatedSize
384  // is forbidden.
385  free(array);
386  EXPECT_EQ(false, __asan_get_ownership(array));
387  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
388
389  delete int_ptr;
390}
391
392TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
393  size_t before_malloc, after_malloc, after_free;
394  char *array;
395  const size_t kMallocSize = 100;
396  before_malloc = __asan_get_current_allocated_bytes();
397
398  array = Ident((char*)malloc(kMallocSize));
399  after_malloc = __asan_get_current_allocated_bytes();
400  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
401
402  free(array);
403  after_free = __asan_get_current_allocated_bytes();
404  EXPECT_EQ(before_malloc, after_free);
405}
406
407static void DoDoubleFree() {
408  int *x = Ident(new int);
409  delete Ident(x);
410  delete Ident(x);
411}
412
413// This test is run in a separate process, so that large malloced
414// chunk won't remain in the free lists after the test.
415// Note: use ASSERT_* instead of EXPECT_* here.
416static void RunGetHeapSizeTestAndDie() {
417  size_t old_heap_size, new_heap_size, heap_growth;
418  // We unlikely have have chunk of this size in free list.
419  static const size_t kLargeMallocSize = 1 << 29;  // 512M
420  old_heap_size = __asan_get_heap_size();
421  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
422  free(Ident(malloc(kLargeMallocSize)));
423  new_heap_size = __asan_get_heap_size();
424  heap_growth = new_heap_size - old_heap_size;
425  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
426  ASSERT_GE(heap_growth, kLargeMallocSize);
427  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
428
429  // Now large chunk should fall into free list, and can be
430  // allocated without increasing heap size.
431  old_heap_size = new_heap_size;
432  free(Ident(malloc(kLargeMallocSize)));
433  heap_growth = __asan_get_heap_size() - old_heap_size;
434  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
435  ASSERT_LT(heap_growth, kLargeMallocSize);
436
437  // Test passed. Now die with expected double-free.
438  DoDoubleFree();
439}
440
441TEST(AddressSanitizerInterface, GetHeapSizeTest) {
442  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
443}
444
445// Note: use ASSERT_* instead of EXPECT_* here.
446static void DoLargeMallocForGetFreeBytesTestAndDie() {
447  size_t old_free_bytes, new_free_bytes;
448  static const size_t kLargeMallocSize = 1 << 29;  // 512M
449  // If we malloc and free a large memory chunk, it will not fall
450  // into quarantine and will be available for future requests.
451  old_free_bytes = __asan_get_free_bytes();
452  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
453  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
454  free(Ident(malloc(kLargeMallocSize)));
455  new_free_bytes = __asan_get_free_bytes();
456  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
457  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
458  // Test passed.
459  DoDoubleFree();
460}
461
462TEST(AddressSanitizerInterface, GetFreeBytesTest) {
463  static const size_t kNumOfChunks = 100;
464  static const size_t kChunkSize = 100;
465  char *chunks[kNumOfChunks];
466  size_t i;
467  size_t old_free_bytes, new_free_bytes;
468  // Allocate a small chunk. Now allocator probably has a lot of these
469  // chunks to fulfill future requests. So, future requests will decrease
470  // the number of free bytes.
471  chunks[0] = Ident((char*)malloc(kChunkSize));
472  old_free_bytes = __asan_get_free_bytes();
473  for (i = 1; i < kNumOfChunks; i++) {
474    chunks[i] = Ident((char*)malloc(kChunkSize));
475    new_free_bytes = __asan_get_free_bytes();
476    EXPECT_LT(new_free_bytes, old_free_bytes);
477    old_free_bytes = new_free_bytes;
478  }
479  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
480}
481
482static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
483static const size_t kManyThreadsIterations = 250;
484static const size_t kManyThreadsNumThreads = (__WORDSIZE == 32) ? 40 : 200;
485
486void *ManyThreadsWithStatsWorker(void *arg) {
487  (void)arg;
488  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
489    for (size_t size_index = 0; size_index < 4; size_index++) {
490      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
491    }
492  }
493  return 0;
494}
495
496TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
497  size_t before_test, after_test, i;
498  pthread_t threads[kManyThreadsNumThreads];
499  before_test = __asan_get_current_allocated_bytes();
500  for (i = 0; i < kManyThreadsNumThreads; i++) {
501    pthread_create(&threads[i], 0,
502                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
503  }
504  for (i = 0; i < kManyThreadsNumThreads; i++) {
505    pthread_join(threads[i], 0);
506  }
507  after_test = __asan_get_current_allocated_bytes();
508  // ASan stats also reflect memory usage of internal ASan RTL structs,
509  // so we can't check for equality here.
510  EXPECT_LT(after_test, before_test + (1UL<<20));
511}
512
513TEST(AddressSanitizerInterface, ExitCode) {
514  int original_exit_code = __asan_set_error_exit_code(7);
515  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
516  EXPECT_EQ(7, __asan_set_error_exit_code(8));
517  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
518  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
519  EXPECT_EXIT(DoDoubleFree(),
520              ::testing::ExitedWithCode(original_exit_code), "");
521}
522
523static void MyDeathCallback() {
524  fprintf(stderr, "MyDeathCallback\n");
525}
526
527TEST(AddressSanitizerInterface, DeathCallbackTest) {
528  __asan_set_death_callback(MyDeathCallback);
529  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
530  __asan_set_death_callback(NULL);
531}
532
533TEST(AddressSanitizerInterface, OnErrorCallbackTest) {
534  __asan_set_on_error_callback(MyDeathCallback);
535  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback.*double-free");
536  __asan_set_on_error_callback(NULL);
537}
538
539static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
540
541#define GOOD_ACCESS(ptr, offset)  \
542    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
543
544#define BAD_ACCESS(ptr, offset) \
545    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
546
547TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
548  char *array = Ident((char*)malloc(120));
549  // poison array[40..80)
550  __asan_poison_memory_region(array + 40, 40);
551  GOOD_ACCESS(array, 39);
552  GOOD_ACCESS(array, 80);
553  BAD_ACCESS(array, 40);
554  BAD_ACCESS(array, 60);
555  BAD_ACCESS(array, 79);
556  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
557               kUseAfterPoisonErrorMessage);
558  __asan_unpoison_memory_region(array + 40, 40);
559  // access previously poisoned memory.
560  GOOD_ACCESS(array, 40);
561  GOOD_ACCESS(array, 79);
562  free(array);
563}
564
565TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
566  char *array = Ident((char*)malloc(120));
567  // Poison [0..40) and [80..120)
568  __asan_poison_memory_region(array, 40);
569  __asan_poison_memory_region(array + 80, 40);
570  BAD_ACCESS(array, 20);
571  GOOD_ACCESS(array, 60);
572  BAD_ACCESS(array, 100);
573  // Poison whole array - [0..120)
574  __asan_poison_memory_region(array, 120);
575  BAD_ACCESS(array, 60);
576  // Unpoison [24..96)
577  __asan_unpoison_memory_region(array + 24, 72);
578  BAD_ACCESS(array, 23);
579  GOOD_ACCESS(array, 24);
580  GOOD_ACCESS(array, 60);
581  GOOD_ACCESS(array, 95);
582  BAD_ACCESS(array, 96);
583  free(array);
584}
585
586TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
587  // Vector of capacity 20
588  char *vec = Ident((char*)malloc(20));
589  __asan_poison_memory_region(vec, 20);
590  for (size_t i = 0; i < 7; i++) {
591    // Simulate push_back.
592    __asan_unpoison_memory_region(vec + i, 1);
593    GOOD_ACCESS(vec, i);
594    BAD_ACCESS(vec, i + 1);
595  }
596  for (size_t i = 7; i > 0; i--) {
597    // Simulate pop_back.
598    __asan_poison_memory_region(vec + i - 1, 1);
599    BAD_ACCESS(vec, i - 1);
600    if (i > 1) GOOD_ACCESS(vec, i - 2);
601  }
602  free(vec);
603}
604
605// Make sure that each aligned block of size "2^granularity" doesn't have
606// "true" value before "false" value.
607static void MakeShadowValid(bool *shadow, int length, int granularity) {
608  bool can_be_poisoned = true;
609  for (int i = length - 1; i >= 0; i--) {
610    if (!shadow[i])
611      can_be_poisoned = false;
612    if (!can_be_poisoned)
613      shadow[i] = false;
614    if (i % (1 << granularity) == 0) {
615      can_be_poisoned = true;
616    }
617  }
618}
619
620TEST(AddressSanitizerInterface, PoisoningStressTest) {
621  const size_t kSize = 24;
622  bool expected[kSize];
623  char *arr = Ident((char*)malloc(kSize));
624  for (size_t l1 = 0; l1 < kSize; l1++) {
625    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
626      for (size_t l2 = 0; l2 < kSize; l2++) {
627        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
628          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
629          __asan_unpoison_memory_region(arr, kSize);
630          __asan_poison_memory_region(arr + l1, s1);
631          __asan_poison_memory_region(arr + l2, s2);
632          memset(expected, false, kSize);
633          memset(expected + l1, true, s1);
634          MakeShadowValid(expected, kSize, /*granularity*/ 3);
635          memset(expected + l2, true, s2);
636          MakeShadowValid(expected, kSize, /*granularity*/ 3);
637          for (size_t i = 0; i < kSize; i++) {
638            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
639          }
640          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
641          __asan_poison_memory_region(arr, kSize);
642          __asan_unpoison_memory_region(arr + l1, s1);
643          __asan_unpoison_memory_region(arr + l2, s2);
644          memset(expected, true, kSize);
645          memset(expected + l1, false, s1);
646          MakeShadowValid(expected, kSize, /*granularity*/ 3);
647          memset(expected + l2, false, s2);
648          MakeShadowValid(expected, kSize, /*granularity*/ 3);
649          for (size_t i = 0; i < kSize; i++) {
650            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
651          }
652        }
653      }
654    }
655  }
656}
657
658static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
659static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
660
661TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
662  char *array = Ident((char*)malloc(120));
663  __asan_unpoison_memory_region(array, 120);
664  // Try to unpoison not owned memory
665  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
666               kInvalidUnpoisonMessage);
667  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
668               kInvalidUnpoisonMessage);
669
670  __asan_poison_memory_region(array, 120);
671  // Try to poison not owned memory.
672  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
673  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
674               kInvalidPoisonMessage);
675  free(array);
676}
677
678static void ErrorReportCallbackOneToZ(const char *report) {
679  int report_len = strlen(report);
680  ASSERT_EQ(6, write(2, "ABCDEF", 6));
681  ASSERT_EQ(report_len, write(2, report, report_len));
682  ASSERT_EQ(6, write(2, "ABCDEF", 6));
683  _exit(1);
684}
685
686TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
687  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
688  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
689               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
690  __asan_set_error_report_callback(NULL);
691}
692
693TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
694  std::vector<char *> pointers;
695  std::vector<size_t> sizes;
696  const size_t kNumMallocs =
697      (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
698  for (size_t i = 0; i < kNumMallocs; i++) {
699    size_t size = i * 100 + 1;
700    pointers.push_back((char*)malloc(size));
701    sizes.push_back(size);
702  }
703  for (size_t i = 0; i < 4000000; i++) {
704    EXPECT_FALSE(__asan_get_ownership(&pointers));
705    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
706    size_t idx = i % kNumMallocs;
707    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
708    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
709  }
710  for (size_t i = 0, n = pointers.size(); i < n; i++)
711    free(pointers[i]);
712}
713