asan_noinst_test.cc revision 2c29212c42d457ade0bbd1d01de92195dd9ce925
1//===-- asan_noinst_test.cc ----------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14#include "asan_allocator.h"
15#include "asan_interface.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20#include "asan_test_config.h"
21
22#include <assert.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>  // for memset()
26#include <algorithm>
27#include <vector>
28#include "gtest/gtest.h"
29
30// Simple stand-alone pseudorandom number generator.
31// Current algorithm is ANSI C linear congruential PRNG.
32static inline u32 my_rand(u32* state) {
33  return (*state = *state * 1103515245 + 12345) >> 16;
34}
35
36static u32 global_seed = 0;
37
38
39TEST(AddressSanitizer, InternalSimpleDeathTest) {
40  EXPECT_DEATH(exit(1), "");
41}
42
43static void MallocStress(size_t n) {
44  u32 seed = my_rand(&global_seed);
45  __asan::AsanStackTrace stack1;
46  stack1.trace[0] = 0xa123;
47  stack1.trace[1] = 0xa456;
48  stack1.size = 2;
49
50  __asan::AsanStackTrace stack2;
51  stack2.trace[0] = 0xb123;
52  stack2.trace[1] = 0xb456;
53  stack2.size = 2;
54
55  __asan::AsanStackTrace stack3;
56  stack3.trace[0] = 0xc123;
57  stack3.trace[1] = 0xc456;
58  stack3.size = 2;
59
60  std::vector<void *> vec;
61  for (size_t i = 0; i < n; i++) {
62    if ((i % 3) == 0) {
63      if (vec.empty()) continue;
64      size_t idx = my_rand(&seed) % vec.size();
65      void *ptr = vec[idx];
66      vec[idx] = vec.back();
67      vec.pop_back();
68      __asan::asan_free(ptr, &stack1);
69    } else {
70      size_t size = my_rand(&seed) % 1000 + 1;
71      switch ((my_rand(&seed) % 128)) {
72        case 0: size += 1024; break;
73        case 1: size += 2048; break;
74        case 2: size += 4096; break;
75      }
76      size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
77      char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
78      vec.push_back(ptr);
79      ptr[0] = 0;
80      ptr[size-1] = 0;
81      ptr[size/2] = 0;
82    }
83  }
84  for (size_t i = 0; i < vec.size(); i++)
85    __asan::asan_free(vec[i], &stack3);
86}
87
88
89TEST(AddressSanitizer, NoInstMallocTest) {
90#ifdef __arm__
91  MallocStress(300000);
92#else
93  MallocStress(1000000);
94#endif
95}
96
97static void PrintShadow(const char *tag, uptr ptr, size_t size) {
98  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
99  uptr prev_shadow = 0;
100  for (sptr i = -32; i < (sptr)size + 32; i++) {
101    uptr shadow = __asan::MemToShadow(ptr + i);
102    if (i == 0 || i == (sptr)size)
103      fprintf(stderr, ".");
104    if (shadow != prev_shadow) {
105      prev_shadow = shadow;
106      fprintf(stderr, "%02x", (int)*(u8*)shadow);
107    }
108  }
109  fprintf(stderr, "\n");
110}
111
112TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
113  for (size_t size = 1; size <= 513; size++) {
114    char *ptr = new char[size];
115    PrintShadow("m", (uptr)ptr, size);
116    delete [] ptr;
117    PrintShadow("f", (uptr)ptr, size);
118  }
119}
120
121static uptr pc_array[] = {
122#if __WORDSIZE == 64
123  0x7effbf756068ULL,
124  0x7effbf75e5abULL,
125  0x7effc0625b7cULL,
126  0x7effc05b8997ULL,
127  0x7effbf990577ULL,
128  0x7effbf990c56ULL,
129  0x7effbf992f3cULL,
130  0x7effbf950c22ULL,
131  0x7effc036dba0ULL,
132  0x7effc03638a3ULL,
133  0x7effc035be4aULL,
134  0x7effc0539c45ULL,
135  0x7effc0539a65ULL,
136  0x7effc03db9b3ULL,
137  0x7effc03db100ULL,
138  0x7effc037c7b8ULL,
139  0x7effc037bfffULL,
140  0x7effc038b777ULL,
141  0x7effc038021cULL,
142  0x7effc037c7d1ULL,
143  0x7effc037bfffULL,
144  0x7effc038b777ULL,
145  0x7effc038021cULL,
146  0x7effc037c7d1ULL,
147  0x7effc037bfffULL,
148  0x7effc038b777ULL,
149  0x7effc038021cULL,
150  0x7effc037c7d1ULL,
151  0x7effc037bfffULL,
152  0x7effc0520d26ULL,
153  0x7effc009ddffULL,
154  0x7effbf90bb50ULL,
155  0x7effbdddfa69ULL,
156  0x7effbdde1fe2ULL,
157  0x7effbdde2424ULL,
158  0x7effbdde27b3ULL,
159  0x7effbddee53bULL,
160  0x7effbdde1988ULL,
161  0x7effbdde0904ULL,
162  0x7effc106ce0dULL,
163  0x7effbcc3fa04ULL,
164  0x7effbcc3f6a4ULL,
165  0x7effbcc3e726ULL,
166  0x7effbcc40852ULL,
167  0x7effb681ec4dULL,
168#endif  // __WORDSIZE
169  0xB0B5E768,
170  0x7B682EC1,
171  0x367F9918,
172  0xAE34E13,
173  0xBA0C6C6,
174  0x13250F46,
175  0xA0D6A8AB,
176  0x2B07C1A8,
177  0x6C844F4A,
178  0x2321B53,
179  0x1F3D4F8F,
180  0x3FE2924B,
181  0xB7A2F568,
182  0xBD23950A,
183  0x61020930,
184  0x33E7970C,
185  0x405998A1,
186  0x59F3551D,
187  0x350E3028,
188  0xBC55A28D,
189  0x361F3AED,
190  0xBEAD0F73,
191  0xAEF28479,
192  0x757E971F,
193  0xAEBA450,
194  0x43AD22F5,
195  0x8C2C50C4,
196  0x7AD8A2E1,
197  0x69EE4EE8,
198  0xC08DFF,
199  0x4BA6538,
200  0x3708AB2,
201  0xC24B6475,
202  0x7C8890D7,
203  0x6662495F,
204  0x9B641689,
205  0xD3596B,
206  0xA1049569,
207  0x44CBC16,
208  0x4D39C39F
209};
210
211void CompressStackTraceTest(size_t n_iter) {
212  u32 seed = my_rand(&global_seed);
213  const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
214  u32 compressed[2 * kNumPcs];
215
216  for (size_t iter = 0; iter < n_iter; iter++) {
217    std::random_shuffle(pc_array, pc_array + kNumPcs);
218    __asan::AsanStackTrace stack0, stack1;
219    stack0.CopyFrom(pc_array, kNumPcs);
220    stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
221    size_t compress_size =
222      std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
223    size_t n_frames =
224      __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
225    Ident(n_frames);
226    assert(n_frames <= stack0.size);
227    __asan::AsanStackTrace::UncompressStack(&stack1, compressed, compress_size);
228    assert(stack1.size == n_frames);
229    for (size_t i = 0; i < stack1.size; i++) {
230      assert(stack0.trace[i] == stack1.trace[i]);
231    }
232  }
233}
234
235TEST(AddressSanitizer, CompressStackTraceTest) {
236  CompressStackTraceTest(10000);
237}
238
239void CompressStackTraceBenchmark(size_t n_iter) {
240  const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
241  u32 compressed[2 * kNumPcs];
242  std::random_shuffle(pc_array, pc_array + kNumPcs);
243
244  __asan::AsanStackTrace stack0;
245  stack0.CopyFrom(pc_array, kNumPcs);
246  stack0.size = kNumPcs;
247  for (size_t iter = 0; iter < n_iter; iter++) {
248    size_t compress_size = kNumPcs;
249    size_t n_frames =
250      __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
251    Ident(n_frames);
252  }
253}
254
255TEST(AddressSanitizer, CompressStackTraceBenchmark) {
256  CompressStackTraceBenchmark(1 << 24);
257}
258
259TEST(AddressSanitizer, QuarantineTest) {
260  __asan::AsanStackTrace stack;
261  stack.trace[0] = 0x890;
262  stack.size = 1;
263
264  const int size = 32;
265  void *p = __asan::asan_malloc(size, &stack);
266  __asan::asan_free(p, &stack);
267  size_t i;
268  size_t max_i = 1 << 30;
269  for (i = 0; i < max_i; i++) {
270    void *p1 = __asan::asan_malloc(size, &stack);
271    __asan::asan_free(p1, &stack);
272    if (p1 == p) break;
273  }
274  // fprintf(stderr, "i=%ld\n", i);
275  EXPECT_GE(i, 100000U);
276  EXPECT_LT(i, max_i);
277}
278
279void *ThreadedQuarantineTestWorker(void *unused) {
280  (void)unused;
281  u32 seed = my_rand(&global_seed);
282  __asan::AsanStackTrace stack;
283  stack.trace[0] = 0x890;
284  stack.size = 1;
285
286  for (size_t i = 0; i < 1000; i++) {
287    void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
288    __asan::asan_free(p, &stack);
289  }
290  return NULL;
291}
292
293// Check that the thread local allocators are flushed when threads are
294// destroyed.
295TEST(AddressSanitizer, ThreadedQuarantineTest) {
296  const int n_threads = 3000;
297  size_t mmaped1 = __asan_get_heap_size();
298  for (int i = 0; i < n_threads; i++) {
299    pthread_t t;
300    pthread_create(&t, NULL, ThreadedQuarantineTestWorker, 0);
301    pthread_join(t, 0);
302    size_t mmaped2 = __asan_get_heap_size();
303    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
304  }
305}
306
307void *ThreadedOneSizeMallocStress(void *unused) {
308  (void)unused;
309  __asan::AsanStackTrace stack;
310  stack.trace[0] = 0x890;
311  stack.size = 1;
312  const size_t kNumMallocs = 1000;
313  for (int iter = 0; iter < 1000; iter++) {
314    void *p[kNumMallocs];
315    for (size_t i = 0; i < kNumMallocs; i++) {
316      p[i] = __asan::asan_malloc(32, &stack);
317    }
318    for (size_t i = 0; i < kNumMallocs; i++) {
319      __asan::asan_free(p[i], &stack);
320    }
321  }
322  return NULL;
323}
324
325TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
326  const int kNumThreads = 4;
327  pthread_t t[kNumThreads];
328  for (int i = 0; i < kNumThreads; i++) {
329    pthread_create(&t[i], 0, ThreadedOneSizeMallocStress, 0);
330  }
331  for (int i = 0; i < kNumThreads; i++) {
332    pthread_join(t[i], 0);
333  }
334}
335
336TEST(AddressSanitizer, MemsetWildAddressTest) {
337  typedef void*(*memset_p)(void*, int, size_t);
338  // Prevent inlining of memset().
339  volatile memset_p libc_memset = (memset_p)memset;
340  EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + kPageSize), 0, 100),
341               "unknown-crash.*low shadow");
342  EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + kPageSize), 0, 100),
343               "unknown-crash.*shadow gap");
344  EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + kPageSize), 0, 100),
345               "unknown-crash.*high shadow");
346}
347
348TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
349  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
350  const size_t sizes[] = { 1, 30, 1<<30 };
351  for (size_t i = 0; i < 3; i++) {
352    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
353  }
354}
355
356static const char* kGetAllocatedSizeErrorMsg =
357  "attempting to call __asan_get_allocated_size()";
358
359TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
360  const size_t kArraySize = 100;
361  char *array = Ident((char*)malloc(kArraySize));
362  int *int_ptr = Ident(new int);
363
364  // Allocated memory is owned by allocator. Allocated size should be
365  // equal to requested size.
366  EXPECT_EQ(true, __asan_get_ownership(array));
367  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
368  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
369  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
370
371  // We cannot call GetAllocatedSize from the memory we didn't map,
372  // and from the interior pointers (not returned by previous malloc).
373  void *wild_addr = (void*)0x1;
374  EXPECT_EQ(false, __asan_get_ownership(wild_addr));
375  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
376  EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
377  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
378               kGetAllocatedSizeErrorMsg);
379
380  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
381  EXPECT_EQ(false, __asan_get_ownership(NULL));
382  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
383
384  // When memory is freed, it's not owned, and call to GetAllocatedSize
385  // is forbidden.
386  free(array);
387  EXPECT_EQ(false, __asan_get_ownership(array));
388  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
389
390  delete int_ptr;
391}
392
393TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
394  size_t before_malloc, after_malloc, after_free;
395  char *array;
396  const size_t kMallocSize = 100;
397  before_malloc = __asan_get_current_allocated_bytes();
398
399  array = Ident((char*)malloc(kMallocSize));
400  after_malloc = __asan_get_current_allocated_bytes();
401  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
402
403  free(array);
404  after_free = __asan_get_current_allocated_bytes();
405  EXPECT_EQ(before_malloc, after_free);
406}
407
408static void DoDoubleFree() {
409  int *x = Ident(new int);
410  delete Ident(x);
411  delete Ident(x);
412}
413
414// This test is run in a separate process, so that large malloced
415// chunk won't remain in the free lists after the test.
416// Note: use ASSERT_* instead of EXPECT_* here.
417static void RunGetHeapSizeTestAndDie() {
418  size_t old_heap_size, new_heap_size, heap_growth;
419  // We unlikely have have chunk of this size in free list.
420  static const size_t kLargeMallocSize = 1 << 29;  // 512M
421  old_heap_size = __asan_get_heap_size();
422  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
423  free(Ident(malloc(kLargeMallocSize)));
424  new_heap_size = __asan_get_heap_size();
425  heap_growth = new_heap_size - old_heap_size;
426  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
427  ASSERT_GE(heap_growth, kLargeMallocSize);
428  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
429
430  // Now large chunk should fall into free list, and can be
431  // allocated without increasing heap size.
432  old_heap_size = new_heap_size;
433  free(Ident(malloc(kLargeMallocSize)));
434  heap_growth = __asan_get_heap_size() - old_heap_size;
435  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
436  ASSERT_LT(heap_growth, kLargeMallocSize);
437
438  // Test passed. Now die with expected double-free.
439  DoDoubleFree();
440}
441
442TEST(AddressSanitizerInterface, GetHeapSizeTest) {
443  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
444}
445
446// Note: use ASSERT_* instead of EXPECT_* here.
447static void DoLargeMallocForGetFreeBytesTestAndDie() {
448  size_t old_free_bytes, new_free_bytes;
449  static const size_t kLargeMallocSize = 1 << 29;  // 512M
450  // If we malloc and free a large memory chunk, it will not fall
451  // into quarantine and will be available for future requests.
452  old_free_bytes = __asan_get_free_bytes();
453  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
454  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
455  free(Ident(malloc(kLargeMallocSize)));
456  new_free_bytes = __asan_get_free_bytes();
457  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
458  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
459  // Test passed.
460  DoDoubleFree();
461}
462
463TEST(AddressSanitizerInterface, GetFreeBytesTest) {
464  static const size_t kNumOfChunks = 100;
465  static const size_t kChunkSize = 100;
466  char *chunks[kNumOfChunks];
467  size_t i;
468  size_t old_free_bytes, new_free_bytes;
469  // Allocate a small chunk. Now allocator probably has a lot of these
470  // chunks to fulfill future requests. So, future requests will decrease
471  // the number of free bytes.
472  chunks[0] = Ident((char*)malloc(kChunkSize));
473  old_free_bytes = __asan_get_free_bytes();
474  for (i = 1; i < kNumOfChunks; i++) {
475    chunks[i] = Ident((char*)malloc(kChunkSize));
476    new_free_bytes = __asan_get_free_bytes();
477    EXPECT_LT(new_free_bytes, old_free_bytes);
478    old_free_bytes = new_free_bytes;
479  }
480  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
481}
482
483static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
484static const size_t kManyThreadsIterations = 250;
485static const size_t kManyThreadsNumThreads = (__WORDSIZE == 32) ? 40 : 200;
486
487void *ManyThreadsWithStatsWorker(void *arg) {
488  (void)arg;
489  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
490    for (size_t size_index = 0; size_index < 4; size_index++) {
491      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
492    }
493  }
494  return 0;
495}
496
497TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
498  size_t before_test, after_test, i;
499  pthread_t threads[kManyThreadsNumThreads];
500  before_test = __asan_get_current_allocated_bytes();
501  for (i = 0; i < kManyThreadsNumThreads; i++) {
502    pthread_create(&threads[i], 0,
503                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
504  }
505  for (i = 0; i < kManyThreadsNumThreads; i++) {
506    pthread_join(threads[i], 0);
507  }
508  after_test = __asan_get_current_allocated_bytes();
509  // ASan stats also reflect memory usage of internal ASan RTL structs,
510  // so we can't check for equality here.
511  EXPECT_LT(after_test, before_test + (1UL<<20));
512}
513
514TEST(AddressSanitizerInterface, ExitCode) {
515  int original_exit_code = __asan_set_error_exit_code(7);
516  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
517  EXPECT_EQ(7, __asan_set_error_exit_code(8));
518  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
519  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
520  EXPECT_EXIT(DoDoubleFree(),
521              ::testing::ExitedWithCode(original_exit_code), "");
522}
523
524static void MyDeathCallback() {
525  fprintf(stderr, "MyDeathCallback\n");
526}
527
528TEST(AddressSanitizerInterface, DeathCallbackTest) {
529  __asan_set_death_callback(MyDeathCallback);
530  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
531  __asan_set_death_callback(NULL);
532}
533
534TEST(AddressSanitizerInterface, OnErrorCallbackTest) {
535  __asan_set_on_error_callback(MyDeathCallback);
536  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback.*double-free");
537  __asan_set_on_error_callback(NULL);
538}
539
540static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
541
542#define GOOD_ACCESS(ptr, offset)  \
543    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
544
545#define BAD_ACCESS(ptr, offset) \
546    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
547
548TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
549  char *array = Ident((char*)malloc(120));
550  // poison array[40..80)
551  __asan_poison_memory_region(array + 40, 40);
552  GOOD_ACCESS(array, 39);
553  GOOD_ACCESS(array, 80);
554  BAD_ACCESS(array, 40);
555  BAD_ACCESS(array, 60);
556  BAD_ACCESS(array, 79);
557  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
558               kUseAfterPoisonErrorMessage);
559  __asan_unpoison_memory_region(array + 40, 40);
560  // access previously poisoned memory.
561  GOOD_ACCESS(array, 40);
562  GOOD_ACCESS(array, 79);
563  free(array);
564}
565
566TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
567  char *array = Ident((char*)malloc(120));
568  // Poison [0..40) and [80..120)
569  __asan_poison_memory_region(array, 40);
570  __asan_poison_memory_region(array + 80, 40);
571  BAD_ACCESS(array, 20);
572  GOOD_ACCESS(array, 60);
573  BAD_ACCESS(array, 100);
574  // Poison whole array - [0..120)
575  __asan_poison_memory_region(array, 120);
576  BAD_ACCESS(array, 60);
577  // Unpoison [24..96)
578  __asan_unpoison_memory_region(array + 24, 72);
579  BAD_ACCESS(array, 23);
580  GOOD_ACCESS(array, 24);
581  GOOD_ACCESS(array, 60);
582  GOOD_ACCESS(array, 95);
583  BAD_ACCESS(array, 96);
584  free(array);
585}
586
587TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
588  // Vector of capacity 20
589  char *vec = Ident((char*)malloc(20));
590  __asan_poison_memory_region(vec, 20);
591  for (size_t i = 0; i < 7; i++) {
592    // Simulate push_back.
593    __asan_unpoison_memory_region(vec + i, 1);
594    GOOD_ACCESS(vec, i);
595    BAD_ACCESS(vec, i + 1);
596  }
597  for (size_t i = 7; i > 0; i--) {
598    // Simulate pop_back.
599    __asan_poison_memory_region(vec + i - 1, 1);
600    BAD_ACCESS(vec, i - 1);
601    if (i > 1) GOOD_ACCESS(vec, i - 2);
602  }
603  free(vec);
604}
605
606// Make sure that each aligned block of size "2^granularity" doesn't have
607// "true" value before "false" value.
608static void MakeShadowValid(bool *shadow, int length, int granularity) {
609  bool can_be_poisoned = true;
610  for (int i = length - 1; i >= 0; i--) {
611    if (!shadow[i])
612      can_be_poisoned = false;
613    if (!can_be_poisoned)
614      shadow[i] = false;
615    if (i % (1 << granularity) == 0) {
616      can_be_poisoned = true;
617    }
618  }
619}
620
621TEST(AddressSanitizerInterface, PoisoningStressTest) {
622  const size_t kSize = 24;
623  bool expected[kSize];
624  char *arr = Ident((char*)malloc(kSize));
625  for (size_t l1 = 0; l1 < kSize; l1++) {
626    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
627      for (size_t l2 = 0; l2 < kSize; l2++) {
628        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
629          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
630          __asan_unpoison_memory_region(arr, kSize);
631          __asan_poison_memory_region(arr + l1, s1);
632          __asan_poison_memory_region(arr + l2, s2);
633          memset(expected, false, kSize);
634          memset(expected + l1, true, s1);
635          MakeShadowValid(expected, kSize, /*granularity*/ 3);
636          memset(expected + l2, true, s2);
637          MakeShadowValid(expected, kSize, /*granularity*/ 3);
638          for (size_t i = 0; i < kSize; i++) {
639            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
640          }
641          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
642          __asan_poison_memory_region(arr, kSize);
643          __asan_unpoison_memory_region(arr + l1, s1);
644          __asan_unpoison_memory_region(arr + l2, s2);
645          memset(expected, true, kSize);
646          memset(expected + l1, false, s1);
647          MakeShadowValid(expected, kSize, /*granularity*/ 3);
648          memset(expected + l2, false, s2);
649          MakeShadowValid(expected, kSize, /*granularity*/ 3);
650          for (size_t i = 0; i < kSize; i++) {
651            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
652          }
653        }
654      }
655    }
656  }
657}
658
659static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
660static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
661
662TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
663  char *array = Ident((char*)malloc(120));
664  __asan_unpoison_memory_region(array, 120);
665  // Try to unpoison not owned memory
666  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
667               kInvalidUnpoisonMessage);
668  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
669               kInvalidUnpoisonMessage);
670
671  __asan_poison_memory_region(array, 120);
672  // Try to poison not owned memory.
673  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
674  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
675               kInvalidPoisonMessage);
676  free(array);
677}
678
679static void ErrorReportCallbackOneToZ(const char *report) {
680  write(2, "ABCDEF", 6);
681  write(2, report, strlen(report));
682  write(2, "ABCDEF", 6);
683  _exit(1);
684}
685
686TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
687  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
688  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
689               ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
690  __asan_set_error_report_callback(NULL);
691}
692
693TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
694  std::vector<char *> pointers;
695  std::vector<size_t> sizes;
696  const size_t kNumMallocs =
697      (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
698  for (size_t i = 0; i < kNumMallocs; i++) {
699    size_t size = i * 100 + 1;
700    pointers.push_back((char*)malloc(size));
701    sizes.push_back(size);
702  }
703  for (size_t i = 0; i < 4000000; i++) {
704    EXPECT_FALSE(__asan_get_ownership(&pointers));
705    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
706    size_t idx = i % kNumMallocs;
707    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
708    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
709  }
710  for (size_t i = 0, n = pointers.size(); i < n; i++)
711    free(pointers[i]);
712}
713