asan_noinst_test.cc revision d00ecb64892dcb03c5ae93a654da669b96753b01
1//===-- asan_noinst_test.cc ----------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14#include "asan_allocator.h"
15#include "asan_interface.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_stack.h"
19#include "asan_test_utils.h"
20
21#include <assert.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <vector>
25#include <algorithm>
26#include "gtest/gtest.h"
27
28// Simple stand-alone pseudorandom number generator.
29// Current algorithm is ANSI C linear congruential PRNG.
30static inline u32 my_rand(u32* state) {
31  return (*state = *state * 1103515245 + 12345) >> 16;
32}
33
34static u32 global_seed = 0;
35
36
37TEST(AddressSanitizer, InternalSimpleDeathTest) {
38  EXPECT_DEATH(exit(1), "");
39}
40
41static void MallocStress(size_t n) {
42  u32 seed = my_rand(&global_seed);
43  __asan::AsanStackTrace stack1;
44  stack1.trace[0] = 0xa123;
45  stack1.trace[1] = 0xa456;
46  stack1.size = 2;
47
48  __asan::AsanStackTrace stack2;
49  stack2.trace[0] = 0xb123;
50  stack2.trace[1] = 0xb456;
51  stack2.size = 2;
52
53  __asan::AsanStackTrace stack3;
54  stack3.trace[0] = 0xc123;
55  stack3.trace[1] = 0xc456;
56  stack3.size = 2;
57
58  std::vector<void *> vec;
59  for (size_t i = 0; i < n; i++) {
60    if ((i % 3) == 0) {
61      if (vec.empty()) continue;
62      size_t idx = my_rand(&seed) % vec.size();
63      void *ptr = vec[idx];
64      vec[idx] = vec.back();
65      vec.pop_back();
66      __asan::asan_free(ptr, &stack1);
67    } else {
68      size_t size = my_rand(&seed) % 1000 + 1;
69      switch ((my_rand(&seed) % 128)) {
70        case 0: size += 1024; break;
71        case 1: size += 2048; break;
72        case 2: size += 4096; break;
73      }
74      size_t alignment = 1 << (my_rand(&seed) % 10 + 1);
75      char *ptr = (char*)__asan::asan_memalign(alignment, size, &stack2);
76      vec.push_back(ptr);
77      ptr[0] = 0;
78      ptr[size-1] = 0;
79      ptr[size/2] = 0;
80    }
81  }
82  for (size_t i = 0; i < vec.size(); i++)
83    __asan::asan_free(vec[i], &stack3);
84}
85
86
87TEST(AddressSanitizer, NoInstMallocTest) {
88#ifdef __arm__
89  MallocStress(300000);
90#else
91  MallocStress(1000000);
92#endif
93}
94
95static void PrintShadow(const char *tag, uptr ptr, size_t size) {
96  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
97  uptr prev_shadow = 0;
98  for (sptr i = -32; i < (sptr)size + 32; i++) {
99    uptr shadow = __asan::MemToShadow(ptr + i);
100    if (i == 0 || i == (sptr)size)
101      fprintf(stderr, ".");
102    if (shadow != prev_shadow) {
103      prev_shadow = shadow;
104      fprintf(stderr, "%02x", (int)*(u8*)shadow);
105    }
106  }
107  fprintf(stderr, "\n");
108}
109
110TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
111  for (size_t size = 1; size <= 513; size++) {
112    char *ptr = new char[size];
113    PrintShadow("m", (uptr)ptr, size);
114    delete [] ptr;
115    PrintShadow("f", (uptr)ptr, size);
116  }
117}
118
119static uptr pc_array[] = {
120#if __WORDSIZE == 64
121  0x7effbf756068ULL,
122  0x7effbf75e5abULL,
123  0x7effc0625b7cULL,
124  0x7effc05b8997ULL,
125  0x7effbf990577ULL,
126  0x7effbf990c56ULL,
127  0x7effbf992f3cULL,
128  0x7effbf950c22ULL,
129  0x7effc036dba0ULL,
130  0x7effc03638a3ULL,
131  0x7effc035be4aULL,
132  0x7effc0539c45ULL,
133  0x7effc0539a65ULL,
134  0x7effc03db9b3ULL,
135  0x7effc03db100ULL,
136  0x7effc037c7b8ULL,
137  0x7effc037bfffULL,
138  0x7effc038b777ULL,
139  0x7effc038021cULL,
140  0x7effc037c7d1ULL,
141  0x7effc037bfffULL,
142  0x7effc038b777ULL,
143  0x7effc038021cULL,
144  0x7effc037c7d1ULL,
145  0x7effc037bfffULL,
146  0x7effc038b777ULL,
147  0x7effc038021cULL,
148  0x7effc037c7d1ULL,
149  0x7effc037bfffULL,
150  0x7effc0520d26ULL,
151  0x7effc009ddffULL,
152  0x7effbf90bb50ULL,
153  0x7effbdddfa69ULL,
154  0x7effbdde1fe2ULL,
155  0x7effbdde2424ULL,
156  0x7effbdde27b3ULL,
157  0x7effbddee53bULL,
158  0x7effbdde1988ULL,
159  0x7effbdde0904ULL,
160  0x7effc106ce0dULL,
161  0x7effbcc3fa04ULL,
162  0x7effbcc3f6a4ULL,
163  0x7effbcc3e726ULL,
164  0x7effbcc40852ULL,
165  0x7effb681ec4dULL,
166#endif  // __WORDSIZE
167  0xB0B5E768,
168  0x7B682EC1,
169  0x367F9918,
170  0xAE34E13,
171  0xBA0C6C6,
172  0x13250F46,
173  0xA0D6A8AB,
174  0x2B07C1A8,
175  0x6C844F4A,
176  0x2321B53,
177  0x1F3D4F8F,
178  0x3FE2924B,
179  0xB7A2F568,
180  0xBD23950A,
181  0x61020930,
182  0x33E7970C,
183  0x405998A1,
184  0x59F3551D,
185  0x350E3028,
186  0xBC55A28D,
187  0x361F3AED,
188  0xBEAD0F73,
189  0xAEF28479,
190  0x757E971F,
191  0xAEBA450,
192  0x43AD22F5,
193  0x8C2C50C4,
194  0x7AD8A2E1,
195  0x69EE4EE8,
196  0xC08DFF,
197  0x4BA6538,
198  0x3708AB2,
199  0xC24B6475,
200  0x7C8890D7,
201  0x6662495F,
202  0x9B641689,
203  0xD3596B,
204  0xA1049569,
205  0x44CBC16,
206  0x4D39C39F
207};
208
209void CompressStackTraceTest(size_t n_iter) {
210  u32 seed = my_rand(&global_seed);
211  const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
212  u32 compressed[2 * kNumPcs];
213
214  for (size_t iter = 0; iter < n_iter; iter++) {
215    std::random_shuffle(pc_array, pc_array + kNumPcs);
216    __asan::AsanStackTrace stack0, stack1;
217    stack0.CopyFrom(pc_array, kNumPcs);
218    stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size));
219    size_t compress_size =
220      std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs));
221    size_t n_frames =
222      __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
223    assert(n_frames <= stack0.size);
224    __asan::AsanStackTrace::UncompressStack(&stack1, compressed, compress_size);
225    assert(stack1.size == n_frames);
226    for (size_t i = 0; i < stack1.size; i++) {
227      assert(stack0.trace[i] == stack1.trace[i]);
228    }
229  }
230}
231
232TEST(AddressSanitizer, CompressStackTraceTest) {
233  CompressStackTraceTest(10000);
234}
235
236void CompressStackTraceBenchmark(size_t n_iter) {
237  const size_t kNumPcs = ASAN_ARRAY_SIZE(pc_array);
238  u32 compressed[2 * kNumPcs];
239  std::random_shuffle(pc_array, pc_array + kNumPcs);
240
241  __asan::AsanStackTrace stack0;
242  stack0.CopyFrom(pc_array, kNumPcs);
243  stack0.size = kNumPcs;
244  for (size_t iter = 0; iter < n_iter; iter++) {
245    size_t compress_size = kNumPcs;
246    size_t n_frames =
247      __asan::AsanStackTrace::CompressStack(&stack0, compressed, compress_size);
248    Ident(n_frames);
249  }
250}
251
252TEST(AddressSanitizer, CompressStackTraceBenchmark) {
253  CompressStackTraceBenchmark(1 << 24);
254}
255
256TEST(AddressSanitizer, QuarantineTest) {
257  __asan::AsanStackTrace stack;
258  stack.trace[0] = 0x890;
259  stack.size = 1;
260
261  const int size = 32;
262  void *p = __asan::asan_malloc(size, &stack);
263  __asan::asan_free(p, &stack);
264  size_t i;
265  size_t max_i = 1 << 30;
266  for (i = 0; i < max_i; i++) {
267    void *p1 = __asan::asan_malloc(size, &stack);
268    __asan::asan_free(p1, &stack);
269    if (p1 == p) break;
270  }
271  // fprintf(stderr, "i=%ld\n", i);
272  EXPECT_GE(i, 100000U);
273  EXPECT_LT(i, max_i);
274}
275
276void *ThreadedQuarantineTestWorker(void *unused) {
277  u32 seed = my_rand(&global_seed);
278  __asan::AsanStackTrace stack;
279  stack.trace[0] = 0x890;
280  stack.size = 1;
281
282  for (size_t i = 0; i < 1000; i++) {
283    void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack);
284    __asan::asan_free(p, &stack);
285  }
286  return NULL;
287}
288
289// Check that the thread local allocators are flushed when threads are
290// destroyed.
291TEST(AddressSanitizer, ThreadedQuarantineTest) {
292  const int n_threads = 3000;
293  size_t mmaped1 = __asan_get_heap_size();
294  for (int i = 0; i < n_threads; i++) {
295    pthread_t t;
296    pthread_create(&t, NULL, ThreadedQuarantineTestWorker, 0);
297    pthread_join(t, 0);
298    size_t mmaped2 = __asan_get_heap_size();
299    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
300  }
301}
302
303void *ThreadedOneSizeMallocStress(void *unused) {
304  __asan::AsanStackTrace stack;
305  stack.trace[0] = 0x890;
306  stack.size = 1;
307  const size_t kNumMallocs = 1000;
308  for (int iter = 0; iter < 1000; iter++) {
309    void *p[kNumMallocs];
310    for (size_t i = 0; i < kNumMallocs; i++) {
311      p[i] = __asan::asan_malloc(32, &stack);
312    }
313    for (size_t i = 0; i < kNumMallocs; i++) {
314      __asan::asan_free(p[i], &stack);
315    }
316  }
317  return NULL;
318}
319
320TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
321  const int kNumThreads = 4;
322  pthread_t t[kNumThreads];
323  for (int i = 0; i < kNumThreads; i++) {
324    pthread_create(&t[i], 0, ThreadedOneSizeMallocStress, 0);
325  }
326  for (int i = 0; i < kNumThreads; i++) {
327    pthread_join(t[i], 0);
328  }
329}
330
331TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
332  EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0));
333  const size_t sizes[] = { 1, 30, 1<<30 };
334  for (size_t i = 0; i < 3; i++) {
335    EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
336  }
337}
338
339static const char* kGetAllocatedSizeErrorMsg =
340  "attempting to call __asan_get_allocated_size()";
341
342TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
343  const size_t kArraySize = 100;
344  char *array = Ident((char*)malloc(kArraySize));
345  int *int_ptr = Ident(new int);
346
347  // Allocated memory is owned by allocator. Allocated size should be
348  // equal to requested size.
349  EXPECT_EQ(true, __asan_get_ownership(array));
350  EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
351  EXPECT_EQ(true, __asan_get_ownership(int_ptr));
352  EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
353
354  // We cannot call GetAllocatedSize from the memory we didn't map,
355  // and from the interior pointers (not returned by previous malloc).
356  void *wild_addr = (void*)0x1;
357  EXPECT_EQ(false, __asan_get_ownership(wild_addr));
358  EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
359  EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2));
360  EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
361               kGetAllocatedSizeErrorMsg);
362
363  // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
364  EXPECT_EQ(false, __asan_get_ownership(NULL));
365  EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
366
367  // When memory is freed, it's not owned, and call to GetAllocatedSize
368  // is forbidden.
369  free(array);
370  EXPECT_EQ(false, __asan_get_ownership(array));
371  EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
372
373  delete int_ptr;
374}
375
376TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
377  size_t before_malloc, after_malloc, after_free;
378  char *array;
379  const size_t kMallocSize = 100;
380  before_malloc = __asan_get_current_allocated_bytes();
381
382  array = Ident((char*)malloc(kMallocSize));
383  after_malloc = __asan_get_current_allocated_bytes();
384  EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
385
386  free(array);
387  after_free = __asan_get_current_allocated_bytes();
388  EXPECT_EQ(before_malloc, after_free);
389}
390
391static void DoDoubleFree() {
392  int *x = Ident(new int);
393  delete Ident(x);
394  delete Ident(x);
395}
396
397// This test is run in a separate process, so that large malloced
398// chunk won't remain in the free lists after the test.
399// Note: use ASSERT_* instead of EXPECT_* here.
400static void RunGetHeapSizeTestAndDie() {
401  size_t old_heap_size, new_heap_size, heap_growth;
402  // We unlikely have have chunk of this size in free list.
403  static const size_t kLargeMallocSize = 1 << 29;  // 512M
404  old_heap_size = __asan_get_heap_size();
405  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
406  free(Ident(malloc(kLargeMallocSize)));
407  new_heap_size = __asan_get_heap_size();
408  heap_growth = new_heap_size - old_heap_size;
409  fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth);
410  ASSERT_GE(heap_growth, kLargeMallocSize);
411  ASSERT_LE(heap_growth, 2 * kLargeMallocSize);
412
413  // Now large chunk should fall into free list, and can be
414  // allocated without increasing heap size.
415  old_heap_size = new_heap_size;
416  free(Ident(malloc(kLargeMallocSize)));
417  heap_growth = __asan_get_heap_size() - old_heap_size;
418  fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth);
419  ASSERT_LT(heap_growth, kLargeMallocSize);
420
421  // Test passed. Now die with expected double-free.
422  DoDoubleFree();
423}
424
425TEST(AddressSanitizerInterface, GetHeapSizeTest) {
426  EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free");
427}
428
429// Note: use ASSERT_* instead of EXPECT_* here.
430static void DoLargeMallocForGetFreeBytesTestAndDie() {
431  size_t old_free_bytes, new_free_bytes;
432  static const size_t kLargeMallocSize = 1 << 29;  // 512M
433  // If we malloc and free a large memory chunk, it will not fall
434  // into quarantine and will be available for future requests.
435  old_free_bytes = __asan_get_free_bytes();
436  fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
437  fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes);
438  free(Ident(malloc(kLargeMallocSize)));
439  new_free_bytes = __asan_get_free_bytes();
440  fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes);
441  ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize);
442  // Test passed.
443  DoDoubleFree();
444}
445
446TEST(AddressSanitizerInterface, GetFreeBytesTest) {
447  static const size_t kNumOfChunks = 100;
448  static const size_t kChunkSize = 100;
449  char *chunks[kNumOfChunks];
450  size_t i;
451  size_t old_free_bytes, new_free_bytes;
452  // Allocate a small chunk. Now allocator probably has a lot of these
453  // chunks to fulfill future requests. So, future requests will decrease
454  // the number of free bytes.
455  chunks[0] = Ident((char*)malloc(kChunkSize));
456  old_free_bytes = __asan_get_free_bytes();
457  for (i = 1; i < kNumOfChunks; i++) {
458    chunks[i] = Ident((char*)malloc(kChunkSize));
459    new_free_bytes = __asan_get_free_bytes();
460    EXPECT_LT(new_free_bytes, old_free_bytes);
461    old_free_bytes = new_free_bytes;
462  }
463  EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free");
464}
465
466static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357};
467static const size_t kManyThreadsIterations = 250;
468static const size_t kManyThreadsNumThreads = 200;
469
470void *ManyThreadsWithStatsWorker(void *arg) {
471  for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
472    for (size_t size_index = 0; size_index < 4; size_index++) {
473      free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
474    }
475  }
476  return 0;
477}
478
479TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
480  size_t before_test, after_test, i;
481  pthread_t threads[kManyThreadsNumThreads];
482  before_test = __asan_get_current_allocated_bytes();
483  for (i = 0; i < kManyThreadsNumThreads; i++) {
484    pthread_create(&threads[i], 0,
485                   (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
486  }
487  for (i = 0; i < kManyThreadsNumThreads; i++) {
488    pthread_join(threads[i], 0);
489  }
490  after_test = __asan_get_current_allocated_bytes();
491  // ASan stats also reflect memory usage of internal ASan RTL structs,
492  // so we can't check for equality here.
493  EXPECT_LT(after_test, before_test + (1UL<<20));
494}
495
496TEST(AddressSanitizerInterface, ExitCode) {
497  int original_exit_code = __asan_set_error_exit_code(7);
498  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
499  EXPECT_EQ(7, __asan_set_error_exit_code(8));
500  EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
501  EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
502  EXPECT_EXIT(DoDoubleFree(),
503              ::testing::ExitedWithCode(original_exit_code), "");
504}
505
506static void MyDeathCallback() {
507  fprintf(stderr, "MyDeathCallback\n");
508}
509
510TEST(AddressSanitizerInterface, DeathCallbackTest) {
511  __asan_set_death_callback(MyDeathCallback);
512  EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
513  __asan_set_death_callback(NULL);
514}
515
516static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
517
518#define GOOD_ACCESS(ptr, offset)  \
519    EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
520
521#define BAD_ACCESS(ptr, offset) \
522    EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
523
524TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
525  char *array = Ident((char*)malloc(120));
526  // poison array[40..80)
527  __asan_poison_memory_region(array + 40, 40);
528  GOOD_ACCESS(array, 39);
529  GOOD_ACCESS(array, 80);
530  BAD_ACCESS(array, 40);
531  BAD_ACCESS(array, 60);
532  BAD_ACCESS(array, 79);
533  EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
534               kUseAfterPoisonErrorMessage);
535  __asan_unpoison_memory_region(array + 40, 40);
536  // access previously poisoned memory.
537  GOOD_ACCESS(array, 40);
538  GOOD_ACCESS(array, 79);
539  free(array);
540}
541
542TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
543  char *array = Ident((char*)malloc(120));
544  // Poison [0..40) and [80..120)
545  __asan_poison_memory_region(array, 40);
546  __asan_poison_memory_region(array + 80, 40);
547  BAD_ACCESS(array, 20);
548  GOOD_ACCESS(array, 60);
549  BAD_ACCESS(array, 100);
550  // Poison whole array - [0..120)
551  __asan_poison_memory_region(array, 120);
552  BAD_ACCESS(array, 60);
553  // Unpoison [24..96)
554  __asan_unpoison_memory_region(array + 24, 72);
555  BAD_ACCESS(array, 23);
556  GOOD_ACCESS(array, 24);
557  GOOD_ACCESS(array, 60);
558  GOOD_ACCESS(array, 95);
559  BAD_ACCESS(array, 96);
560  free(array);
561}
562
563TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
564  // Vector of capacity 20
565  char *vec = Ident((char*)malloc(20));
566  __asan_poison_memory_region(vec, 20);
567  for (size_t i = 0; i < 7; i++) {
568    // Simulate push_back.
569    __asan_unpoison_memory_region(vec + i, 1);
570    GOOD_ACCESS(vec, i);
571    BAD_ACCESS(vec, i + 1);
572  }
573  for (size_t i = 7; i > 0; i--) {
574    // Simulate pop_back.
575    __asan_poison_memory_region(vec + i - 1, 1);
576    BAD_ACCESS(vec, i - 1);
577    if (i > 1) GOOD_ACCESS(vec, i - 2);
578  }
579  free(vec);
580}
581
582// Make sure that each aligned block of size "2^granularity" doesn't have
583// "true" value before "false" value.
584static void MakeShadowValid(bool *shadow, int length, int granularity) {
585  bool can_be_poisoned = true;
586  for (int i = length - 1; i >= 0; i--) {
587    can_be_poisoned &= shadow[i];
588    shadow[i] &= can_be_poisoned;
589    if (i % (1 << granularity) == 0) {
590      can_be_poisoned = true;
591    }
592  }
593}
594
595TEST(AddressSanitizerInterface, PoisoningStressTest) {
596  const size_t kSize = 24;
597  bool expected[kSize];
598  char *arr = Ident((char*)malloc(kSize));
599  for (size_t l1 = 0; l1 < kSize; l1++) {
600    for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
601      for (size_t l2 = 0; l2 < kSize; l2++) {
602        for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
603          // Poison [l1, l1+s1), [l2, l2+s2) and check result.
604          __asan_unpoison_memory_region(arr, kSize);
605          __asan_poison_memory_region(arr + l1, s1);
606          __asan_poison_memory_region(arr + l2, s2);
607          memset(expected, false, kSize);
608          memset(expected + l1, true, s1);
609          MakeShadowValid(expected, 24, /*granularity*/ 3);
610          memset(expected + l2, true, s2);
611          MakeShadowValid(expected, 24, /*granularity*/ 3);
612          for (size_t i = 0; i < kSize; i++) {
613            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
614          }
615          // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
616          __asan_poison_memory_region(arr, kSize);
617          __asan_unpoison_memory_region(arr + l1, s1);
618          __asan_unpoison_memory_region(arr + l2, s2);
619          memset(expected, true, kSize);
620          memset(expected + l1, false, s1);
621          MakeShadowValid(expected, 24, /*granularity*/ 3);
622          memset(expected + l2, false, s2);
623          MakeShadowValid(expected, 24, /*granularity*/ 3);
624          for (size_t i = 0; i < kSize; i++) {
625            ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
626          }
627        }
628      }
629    }
630  }
631}
632
633static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
634static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
635
636TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
637  char *array = Ident((char*)malloc(120));
638  __asan_unpoison_memory_region(array, 120);
639  // Try to unpoison not owned memory
640  EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
641               kInvalidUnpoisonMessage);
642  EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
643               kInvalidUnpoisonMessage);
644
645  __asan_poison_memory_region(array, 120);
646  // Try to poison not owned memory.
647  EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
648  EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
649               kInvalidPoisonMessage);
650  free(array);
651}
652
653static void ErrorReportCallbackOneToZ(const char *report) {
654  int len = strlen(report);
655  char *dup = (char*)malloc(len);
656  strcpy(dup, report);
657  for (int i = 0; i < len; i++) {
658    if (dup[i] == '1') dup[i] = 'Z';
659  }
660  write(2, dup, len);
661  free(dup);
662}
663
664TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
665  __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
666  EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1), "size Z");
667  __asan_set_error_report_callback(NULL);
668}
669
670TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
671  std::vector<char *> pointers;
672  std::vector<size_t> sizes;
673  const size_t kNumMallocs =
674      (__WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14;
675  for (size_t i = 0; i < kNumMallocs; i++) {
676    size_t size = i * 100 + 1;
677    pointers.push_back((char*)malloc(size));
678    sizes.push_back(size);
679  }
680  for (size_t i = 0; i < 4000000; i++) {
681    EXPECT_FALSE(__asan_get_ownership(&pointers));
682    EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
683    size_t idx = i % kNumMallocs;
684    EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
685    EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
686  }
687  for (size_t i = 0, n = pointers.size(); i < n; i++)
688    free(pointers[i]);
689}
690
691int main(int argc, char **argv) {
692  testing::GTEST_FLAG(death_test_style) = "threadsafe";
693  testing::InitGoogleTest(&argc, argv);
694  return RUN_ALL_TESTS();
695}
696