asan_noinst_test.cc revision 63201b127e21d6fbfb6de62d0e44a5de01288153
1//===-- asan_noinst_test.cc ----------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14#include "asan_allocator.h"
15#include "asan_interface.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20
21#include <assert.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>  // for memset()
25#include <algorithm>
26#include <vector>
27#include "gtest/gtest.h"
28
29// Simple stand-alone pseudorandom number generator.
30// Current algorithm is ANSI C linear congruential PRNG.
31static inline u32 my_rand(u32* state) {
32  return (*state = *state * 1103515245 + 12345) >> 16;
33}
34
35static u32 global_seed = 0;
36
37
38TEST(AddressSanitizer, InternalSimpleDeathTest) {
39  EXPECT_DEATH(exit(1), "");
40}
41
42static void MallocStress(size_t n) {
43  u32 seed = my_rand(&global_seed);
44  __asan::AsanStackTrace stack1;
45  stack1.trace[0] = 0xa123;
46  stack1.trace[1] = 0xa456;
47  stack1.size = 2;
48
49  __asan::AsanStackTrace stack2;
50  stack2.trace[0] = 0xb123;
51  stack2.trace[1] = 0xb456;
52  stack2.size = 2;
53
54  __asan::AsanStackTrace stack3;
55  stack3.trace[0] = 0xc123;
56  stack3.trace[1] = 0xc456;
57  stack3.size = 2;
58
59  std::vector<void *> vec;
60  for (size_t i = 0; i < n; i++) {
61    if ((i % 3) == 0) {
62      if (vec.empty()) continue;
63      size_t idx = my_rand(&seed) % vec.size();
64      void *ptr = vec[idx];
65      vec[idx] = vec.back();
66      vec.pop_back();
67      __asan::asan_free(ptr, &stack1);
68    } else {
69      size_t size = my_rand(&seed) % 1000 + 1;
70      switch ((my_rand(&seed) % 128)) {
71        case 0: size += 1024; break;
72        case 1: size += 2048; break;
73        case 2: size += 4096; break;
74      }
75      size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
76      char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
77      vec.push_back(ptr);
78      ptr[0] = 0;
79      ptr[size-1] = 0;
80      ptr[size/2] = 0;
81    }
82  }
83  for (size_t i = 0; i < vec.size(); i++)
84    __asan::asan_free(vec[i], &stack3);
85}
86
87
88TEST(AddressSanitizer, NoInstMallocTest) {
89#ifdef __arm__
90  MallocStress(300000);
91#else
92  MallocStress(1000000);
93#endif
94}
95
96static void PrintShadow(const char *tag, uptr ptr, size_t size) {
97  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
98  uptr prev_shadow = 0;
99  for (sptr i = -32; i < (sptr)size + 32; i++) {
100    uptr shadow = __asan::MemToShadow(ptr + i);
101    if (i == 0 || i == (sptr)size)
102      fprintf(stderr, ".");
103    if (shadow != prev_shadow) {
104      prev_shadow = shadow;
105      fprintf(stderr, "%02x", (int)*(u8*)shadow);
106    }
107  }
108  fprintf(stderr, "\n");
109}
110
111TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
112  for (size_t size = 1; size <= 513; size++) {
113    char *ptr = new char[size];
114    PrintShadow("m", (uptr)ptr, size);
115    delete [] ptr;
116    PrintShadow("f", (uptr)ptr, size);
117  }
118}
119
120static uptr pc_array[] = {
121#if __WORDSIZE == 64
122  0x7effbf756068ULL,
123  0x7effbf75e5abULL,
124  0x7effc0625b7cULL,
125  0x7effc05b8997ULL,
126  0x7effbf990577ULL,
127  0x7effbf990c56ULL,
128  0x7effbf992f3cULL,
129  0x7effbf950c22ULL,
130  0x7effc036dba0ULL,
131  0x7effc03638a3ULL,
132  0x7effc035be4aULL,
133  0x7effc0539c45ULL,
134  0x7effc0539a65ULL,
135  0x7effc03db9b3ULL,
136  0x7effc03db100ULL,
137  0x7effc037c7b8ULL,
138  0x7effc037bfffULL,
139  0x7effc038b777ULL,
140  0x7effc038021cULL,
141  0x7effc037c7d1ULL,
142  0x7effc037bfffULL,
143  0x7effc038b777ULL,
144  0x7effc038021cULL,
145  0x7effc037c7d1ULL,
146  0x7effc037bfffULL,
147  0x7effc038b777ULL,
148  0x7effc038021cULL,
149  0x7effc037c7d1ULL,
150  0x7effc037bfffULL,
151  0x7effc0520d26ULL,
152  0x7effc009ddffULL,
153  0x7effbf90bb50ULL,
154  0x7effbdddfa69ULL,
155  0x7effbdde1fe2ULL,
156  0x7effbdde2424ULL,
157  0x7effbdde27b3ULL,
158  0x7effbddee53bULL,
159  0x7effbdde1988ULL,
160  0x7effbdde0904ULL,
161  0x7effc106ce0dULL,
162  0x7effbcc3fa04ULL,
163  0x7effbcc3f6a4ULL,
164  0x7effbcc3e726ULL,
165  0x7effbcc40852ULL,
166  0x7effb681ec4dULL,
167#endif  // __WORDSIZE
168  0xB0B5E768,
169  0x7B682EC1,
170  0x367F9918,
171  0xAE34E13,
172  0xBA0C6C6,
173  0x13250F46,
174  0xA0D6A8AB,
175  0x2B07C1A8,
176  0x6C844F4A,
177  0x2321B53,
178  0x1F3D4F8F,
179  0x3FE2924B,
180  0xB7A2F568,
181  0xBD23950A,
182  0x61020930,
183  0x33E7970C,
184  0x405998A1,
185  0x59F3551D,
186  0x350E3028,
187  0xBC55A28D,
188  0x361F3AED,
189  0xBEAD0F73,
190  0xAEF28479,
191  0x757E971F,
192  0xAEBA450,
193  0x43AD22F5,
194  0x8C2C50C4,
195  0x7AD8A2E1,
196  0x69EE4EE8,
197  0xC08DFF,
198  0x4BA6538,
199  0x3708AB2,
200  0xC24B6475,
201  0x7C8890D7,
202  0x6662495F,
203  0x9B641689,
204  0xD3596B,
205  0xA1049569,
206  0x44CBC16,
207  0x4D39C39F
208};
209
210void CompressStackTraceTest(size_t n_iter) {
211  u32 seed = my_rand(&global_seed);
212  const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
213  u32 compressed[2 * kNumPcs];
214
215  for (size_t iter = 0; iter < n_iter; iter++) {
216    std::random_shuffle(pc_array, pc_array + kNumPcs);
217    __asan::AsanStackTrace stack0, stack1;
218    stack0.CopyFrom(pc_array, kNumPcs);
219    stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
220    size_t compress_size =
221      std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
222    size_t n_frames =
223      __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
224    assert(n_frames <= stack0.size);
225    __asan::AsanStackTrace::UncompressStack(&stack1, compressed, compress_size);
226    assert(stack1.size == n_frames);
227    for (size_t i = 0; i < stack1.size; i++) {
228      assert(stack0.trace[i] == stack1.trace[i]);
229    }
230  }
231}
232
233TEST(AddressSanitizer, CompressStackTraceTest) {
234  CompressStackTraceTest(10000);
235}
236
237void CompressStackTraceBenchmark(size_t n_iter) {
238  const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
239  u32 compressed[2 * kNumPcs];
240  std::random_shuffle(pc_array, pc_array + kNumPcs);
241
242  __asan::AsanStackTrace stack0;
243  stack0.CopyFrom(pc_array, kNumPcs);
244  stack0.size = kNumPcs;
245  for (size_t iter = 0; iter < n_iter; iter++) {
246    size_t compress_size = kNumPcs;
247    size_t n_frames =
248      __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
249    Ident(n_frames);
250  }
251}
252
253TEST(AddressSanitizer, CompressStackTraceBenchmark) {
254  CompressStackTraceBenchmark(1 << 24);
255}
256
257TEST(AddressSanitizer, QuarantineTest) {
258  __asan::AsanStackTrace stack;
259  stack.trace[0] = 0x890;
260  stack.size = 1;
261
262  const int size = 32;
263  void *p = __asan::asan_malloc(size, &stack);
264  __asan::asan_free(p, &stack);
265  size_t i;
266  size_t max_i = 1 << 30;
267  for (i = 0; i < max_i; i++) {
268    void *p1 = __asan::asan_malloc(size, &stack);
269    __asan::asan_free(p1, &stack);
270    if (p1 == p) break;
271  }
272  // fprintf(stderr, "i=%ld\n", i);
273  EXPECT_GE(i, 100000U);
274  EXPECT_LT(i, max_i);
275}
276
277void *ThreadedQuarantineTestWorker(void *unused) {
278  u32 seed = my_rand(&global_seed);
279  __asan::AsanStackTrace stack;
280  stack.trace[0] = 0x890;
281  stack.size = 1;
282
283  for (size_t i = 0; i < 1000; i++) {
284    void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
285    __asan::asan_free(p, &stack);
286  }
287  return NULL;
288}
289
290// Check that the thread local allocators are flushed when threads are
291// destroyed.
292TEST(AddressSanitizer, ThreadedQuarantineTest) {
293  const int n_threads = 3000;
294  size_t mmaped1 = __asan_get_heap_size();
295  for (int i = 0; i < n_threads; i++) {
296    pthread_t t;
297    pthread_create(&t, NULL, ThreadedQuarantineTestWorker, 0);
298    pthread_join(t, 0);
299    size_t mmaped2 = __asan_get_heap_size();
300    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
301  }
302}
303
304void *ThreadedOneSizeMallocStress(void *unused) {
305  __asan::AsanStackTrace stack;
306  stack.trace[0] = 0x890;
307  stack.size = 1;
308  const size_t kNumMallocs = 1000;
309  for (int iter = 0; iter < 1000; iter++) {
310    void *p[kNumMallocs];
311    for (size_t i = 0; i < kNumMallocs; i++) {
312      p[i] = __asan::asan_malloc(32, &stack);
313    }
314    for (size_t i = 0; i < kNumMallocs; i++) {
315      __asan::asan_free(p[i], &stack);
316    }
317  }
318  return NULL;
319}
320
321TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
322  const int kNumThreads = 4;
323  pthread_t t[kNumThreads];
324  for (int i = 0; i < kNumThreads; i++) {
325    pthread_create(&t[i], 0, ThreadedOneSizeMallocStress, 0);
326  }
327  for (int i = 0; i < kNumThreads; i++) {
328    pthread_join(t[i], 0);
329  }
330}
331
332TEST(AddressSanitizer, MemsetWildAddressTest) {
333  typedef void*(*memset_p)(void*, int, size_t);
334  // Prevent inlining of memset().
335  volatile memset_p libc_memset = (memset_p)memset;
336  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + kPageSize), 0, 100),
337               "unknown-crash.*low shadow");
338  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + kPageSize), 0, 100),
339               "unknown-crash.*shadow gap");
340  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + kPageSize), 0, 100),
341               "unknown-crash.*high shadow");
342}
343
344TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
345  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
346  const size_t sizes[] = { 1, 30, 1<<30 };
347  for (size_t i = 0; i < 3; i++) {
348    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
349  }
350}
351
352static const char* kGetAllocatedSizeErrorMsg =
353  "attempting to call __asan_get_allocated_size()";
354
355TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
356  const size_t kArraySize = 100;
357  char *array = Ident((char*)malloc(kArraySize));
358  int *int_ptr = Ident(new int);
359
360  // Allocated memory is owned by allocator. Allocated size should be
361  // equal to requested size.
362  EXPECT_EQ(true, __asan_get_ownership(array));
363  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
364  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
365  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
366
367  // We cannot call GetAllocatedSize from the memory we didn't map,
368  // and from the interior pointers (not returned by previous malloc).
369  void *wild_addr = (void*)0x1;
370  EXPECT_EQ(false, __asan_get_ownership(wild_addr));
371  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
372  EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
373  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
374               kGetAllocatedSizeErrorMsg);
375
376  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
377  EXPECT_EQ(false, __asan_get_ownership(NULL));
378  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
379
380  // When memory is freed, it's not owned, and call to GetAllocatedSize
381  // is forbidden.
382  free(array);
383  EXPECT_EQ(false, __asan_get_ownership(array));
384  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
385
386  delete int_ptr;
387}
388
389TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
390  size_t before_malloc, after_malloc, after_free;
391  char *array;
392  const size_t kMallocSize = 100;
393  before_malloc = __asan_get_current_allocated_bytes();
394
395  array = Ident((char*)malloc(kMallocSize));
396  after_malloc = __asan_get_current_allocated_bytes();
397  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
398
399  free(array);
400  after_free = __asan_get_current_allocated_bytes();
401  EXPECT_EQ(before_malloc, after_free);
402}
403
404static void DoDoubleFree() {
405  int *x = Ident(new int);
406  delete Ident(x);
407  delete Ident(x);
408}
409
410// This test is run in a separate process, so that large malloced
411// chunk won't remain in the free lists after the test.
412// Note: use ASSERT_* instead of EXPECT_* here.
413static void RunGetHeapSizeTestAndDie() {
414  size_t old_heap_size, new_heap_size, heap_growth;
415  // We unlikely have have chunk of this size in free list.
416  static const size_t kLargeMallocSize = 1 << 29;  // 512M
417  old_heap_size = __asan_get_heap_size();
418  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
419  free(Ident(malloc(kLargeMallocSize)));
420  new_heap_size = __asan_get_heap_size();
421  heap_growth = new_heap_size - old_heap_size;
422  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
423  ASSERT_GE(heap_growth, kLargeMallocSize);
424  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
425
426  // Now large chunk should fall into free list, and can be
427  // allocated without increasing heap size.
428  old_heap_size = new_heap_size;
429  free(Ident(malloc(kLargeMallocSize)));
430  heap_growth = __asan_get_heap_size() - old_heap_size;
431  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
432  ASSERT_LT(heap_growth, kLargeMallocSize);
433
434  // Test passed. Now die with expected double-free.
435  DoDoubleFree();
436}
437
438TEST(AddressSanitizerInterface, GetHeapSizeTest) {
439  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
440}
441
442// Note: use ASSERT_* instead of EXPECT_* here.
443static void DoLargeMallocForGetFreeBytesTestAndDie() {
444  size_t old_free_bytes, new_free_bytes;
445  static const size_t kLargeMallocSize = 1 << 29;  // 512M
446  // If we malloc and free a large memory chunk, it will not fall
447  // into quarantine and will be available for future requests.
448  old_free_bytes = __asan_get_free_bytes();
449  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
450  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
451  free(Ident(malloc(kLargeMallocSize)));
452  new_free_bytes = __asan_get_free_bytes();
453  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
454  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
455  // Test passed.
456  DoDoubleFree();
457}
458
459TEST(AddressSanitizerInterface, GetFreeBytesTest) {
460  static const size_t kNumOfChunks = 100;
461  static const size_t kChunkSize = 100;
462  char *chunks[kNumOfChunks];
463  size_t i;
464  size_t old_free_bytes, new_free_bytes;
465  // Allocate a small chunk. Now allocator probably has a lot of these
466  // chunks to fulfill future requests. So, future requests will decrease
467  // the number of free bytes.
468  chunks[0] = Ident((char*)malloc(kChunkSize));
469  old_free_bytes = __asan_get_free_bytes();
470  for (i = 1; i < kNumOfChunks; i++) {
471    chunks[i] = Ident((char*)malloc(kChunkSize));
472    new_free_bytes = __asan_get_free_bytes();
473    EXPECT_LT(new_free_bytes, old_free_bytes);
474    old_free_bytes = new_free_bytes;
475  }
476  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
477}
478
479static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
480static const size_t kManyThreadsIterations = 250;
481static const size_t kManyThreadsNumThreads = 200;
482
483void *ManyThreadsWithStatsWorker(void *arg) {
484  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
485    for (size_t size_index = 0; size_index < 4; size_index++) {
486      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
487    }
488  }
489  return 0;
490}
491
492TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
493  size_t before_test, after_test, i;
494  pthread_t threads[kManyThreadsNumThreads];
495  before_test = __asan_get_current_allocated_bytes();
496  for (i = 0; i < kManyThreadsNumThreads; i++) {
497    pthread_create(&threads[i], 0,
498                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
499  }
500  for (i = 0; i < kManyThreadsNumThreads; i++) {
501    pthread_join(threads[i], 0);
502  }
503  after_test = __asan_get_current_allocated_bytes();
504  // ASan stats also reflect memory usage of internal ASan RTL structs,
505  // so we can't check for equality here.
506  EXPECT_LT(after_test, before_test + (1UL<<20));
507}
508
509TEST(AddressSanitizerInterface, ExitCode) {
510  int original_exit_code = __asan_set_error_exit_code(7);
511  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
512  EXPECT_EQ(7, __asan_set_error_exit_code(8));
513  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
514  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
515  EXPECT_EXIT(DoDoubleFree(),
516              ::testing::ExitedWithCode(original_exit_code), "");
517}
518
519static void MyDeathCallback() {
520  fprintf(stderr, "MyDeathCallback\n");
521}
522
523TEST(AddressSanitizerInterface, DeathCallbackTest) {
524  __asan_set_death_callback(MyDeathCallback);
525  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
526  __asan_set_death_callback(NULL);
527}
528
529static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
530
531#define GOOD_ACCESS(ptr, offset)  \
532    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
533
534#define BAD_ACCESS(ptr, offset) \
535    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
536
537TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
538  char *array = Ident((char*)malloc(120));
539  // poison array[40..80)
540  __asan_poison_memory_region(array + 40, 40);
541  GOOD_ACCESS(array, 39);
542  GOOD_ACCESS(array, 80);
543  BAD_ACCESS(array, 40);
544  BAD_ACCESS(array, 60);
545  BAD_ACCESS(array, 79);
546  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
547               kUseAfterPoisonErrorMessage);
548  __asan_unpoison_memory_region(array + 40, 40);
549  // access previously poisoned memory.
550  GOOD_ACCESS(array, 40);
551  GOOD_ACCESS(array, 79);
552  free(array);
553}
554
555TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
556  char *array = Ident((char*)malloc(120));
557  // Poison [0..40) and [80..120)
558  __asan_poison_memory_region(array, 40);
559  __asan_poison_memory_region(array + 80, 40);
560  BAD_ACCESS(array, 20);
561  GOOD_ACCESS(array, 60);
562  BAD_ACCESS(array, 100);
563  // Poison whole array - [0..120)
564  __asan_poison_memory_region(array, 120);
565  BAD_ACCESS(array, 60);
566  // Unpoison [24..96)
567  __asan_unpoison_memory_region(array + 24, 72);
568  BAD_ACCESS(array, 23);
569  GOOD_ACCESS(array, 24);
570  GOOD_ACCESS(array, 60);
571  GOOD_ACCESS(array, 95);
572  BAD_ACCESS(array, 96);
573  free(array);
574}
575
576TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
577  // Vector of capacity 20
578  char *vec = Ident((char*)malloc(20));
579  __asan_poison_memory_region(vec, 20);
580  for (size_t i = 0; i < 7; i++) {
581    // Simulate push_back.
582    __asan_unpoison_memory_region(vec + i, 1);
583    GOOD_ACCESS(vec, i);
584    BAD_ACCESS(vec, i + 1);
585  }
586  for (size_t i = 7; i > 0; i--) {
587    // Simulate pop_back.
588    __asan_poison_memory_region(vec + i - 1, 1);
589    BAD_ACCESS(vec, i - 1);
590    if (i > 1) GOOD_ACCESS(vec, i - 2);
591  }
592  free(vec);
593}
594
595// Make sure that each aligned block of size "2^granularity" doesn't have
596// "true" value before "false" value.
597static void MakeShadowValid(bool *shadow, int length, int granularity) {
598  bool can_be_poisoned = true;
599  for (int i = length - 1; i >= 0; i--) {
600    if (!shadow[i])
601      can_be_poisoned = false;
602    if (!can_be_poisoned)
603      shadow[i] = false;
604    if (i % (1 << granularity) == 0) {
605      can_be_poisoned = true;
606    }
607  }
608}
609
610TEST(AddressSanitizerInterface, PoisoningStressTest) {
611  const size_t kSize = 24;
612  bool expected[kSize];
613  char *arr = Ident((char*)malloc(kSize));
614  for (size_t l1 = 0; l1 < kSize; l1++) {
615    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
616      for (size_t l2 = 0; l2 < kSize; l2++) {
617        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
618          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
619          __asan_unpoison_memory_region(arr, kSize);
620          __asan_poison_memory_region(arr + l1, s1);
621          __asan_poison_memory_region(arr + l2, s2);
622          memset(expected, false, kSize);
623          memset(expected + l1, true, s1);
624          MakeShadowValid(expected, kSize, /*granularity*/ 3);
625          memset(expected + l2, true, s2);
626          MakeShadowValid(expected, kSize, /*granularity*/ 3);
627          for (size_t i = 0; i < kSize; i++) {
628            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
629          }
630          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
631          __asan_poison_memory_region(arr, kSize);
632          __asan_unpoison_memory_region(arr + l1, s1);
633          __asan_unpoison_memory_region(arr + l2, s2);
634          memset(expected, true, kSize);
635          memset(expected + l1, false, s1);
636          MakeShadowValid(expected, kSize, /*granularity*/ 3);
637          memset(expected + l2, false, s2);
638          MakeShadowValid(expected, kSize, /*granularity*/ 3);
639          for (size_t i = 0; i < kSize; i++) {
640            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
641          }
642        }
643      }
644    }
645  }
646}
647
648static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
649static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
650
651TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
652  char *array = Ident((char*)malloc(120));
653  __asan_unpoison_memory_region(array, 120);
654  // Try to unpoison not owned memory
655  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
656               kInvalidUnpoisonMessage);
657  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
658               kInvalidUnpoisonMessage);
659
660  __asan_poison_memory_region(array, 120);
661  // Try to poison not owned memory.
662  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
663  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
664               kInvalidPoisonMessage);
665  free(array);
666}
667
668static void ErrorReportCallbackOneToZ(const char *report) {
669  int len = strlen(report);
670  char *dup = (char*)malloc(len);
671  strcpy(dup, report);
672  for (int i = 0; i < len; i++) {
673    if (dup[i] == '1') dup[i] = 'Z';
674  }
675  int written = write(2, dup, len);
676  ASSERT_EQ(len, written);
677  free(dup);
678}
679
680TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
681  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
682  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1), "size Z");
683  __asan_set_error_report_callback(NULL);
684}
685
686TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
687  std::vector<char *> pointers;
688  std::vector<size_t> sizes;
689  const size_t kNumMallocs =
690      (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
691  for (size_t i = 0; i < kNumMallocs; i++) {
692    size_t size = i * 100 + 1;
693    pointers.push_back((char*)malloc(size));
694    sizes.push_back(size);
695  }
696  for (size_t i = 0; i < 4000000; i++) {
697    EXPECT_FALSE(__asan_get_ownership(&pointers));
698    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
699    size_t idx = i % kNumMallocs;
700    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
701    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
702  }
703  for (size_t i = 0, n = pointers.size(); i < n; i++)
704    free(pointers[i]);
705}
706