asan_noinst_test.cc revision 05fa3808f6ac96023cdf583a1a1b7220e5b451b8
1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_test_utils.h"
19
20#include <assert.h>
21#include <stdio.h>
22#include <stdlib.h>
23#include <string.h>  // for memset()
24#include <algorithm>
25#include <vector>
26#include <limits>
27
28#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
29// Manually set correct ASan mapping scale and offset, as they won't be
30// exported from instrumented sources (there are none).
31# define FLEXIBLE_SHADOW_SCALE kDefaultShadowScale
32# if SANITIZER_ANDROID
33#  define FLEXIBLE_SHADOW_OFFSET (0)
34# else
35#  if SANITIZER_WORDSIZE == 32
36#   if defined(__mips__)
37#     define FLEXIBLE_SHADOW_OFFSET kMIPS32_ShadowOffset32
38#   else
39#     define FLEXIBLE_SHADOW_OFFSET kDefaultShadowOffset32
40#   endif
41#  else
42#   if defined(__powerpc64__)
43#    define FLEXIBLE_SHADOW_OFFSET kPPC64_ShadowOffset64
44#   elif SANITIZER_MAC
45#    define FLEXIBLE_SHADOW_OFFSET kDefaultShadowOffset64
46#   else
47#    define FLEXIBLE_SHADOW_OFFSET kDefaultShort64bitShadowOffset
48#   endif
49#  endif
50# endif
51SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale = FLEXIBLE_SHADOW_SCALE;
52SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset =
53    FLEXIBLE_SHADOW_OFFSET;
54#endif  // ASAN_FLEXIBLE_MAPPING_AND_OFFSET
55
56extern "C" {
57// Set specific ASan options for uninstrumented unittest.
58const char* __asan_default_options() {
59  return "allow_reexec=0";
60}
61}  // extern "C"
62
63// Make sure __asan_init is called before any test case is run.
64struct AsanInitCaller {
65  AsanInitCaller() { __asan_init(); }
66};
67static AsanInitCaller asan_init_caller;
68
69TEST(AddressSanitizer, InternalSimpleDeathTest) {
70  EXPECT_DEATH(exit(1), "");
71}
72
73static void MallocStress(size_t n) {
74  u32 seed = my_rand();
75  StackTrace stack1;
76  stack1.trace[0] = 0xa123;
77  stack1.trace[1] = 0xa456;
78  stack1.size = 2;
79
80  StackTrace stack2;
81  stack2.trace[0] = 0xb123;
82  stack2.trace[1] = 0xb456;
83  stack2.size = 2;
84
85  StackTrace stack3;
86  stack3.trace[0] = 0xc123;
87  stack3.trace[1] = 0xc456;
88  stack3.size = 2;
89
90  std::vector<void *> vec;
91  for (size_t i = 0; i < n; i++) {
92    if ((i % 3) == 0) {
93      if (vec.empty()) continue;
94      size_t idx = my_rand_r(&seed) % vec.size();
95      void *ptr = vec[idx];
96      vec[idx] = vec.back();
97      vec.pop_back();
98      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
99    } else {
100      size_t size = my_rand_r(&seed) % 1000 + 1;
101      switch ((my_rand_r(&seed) % 128)) {
102        case 0: size += 1024; break;
103        case 1: size += 2048; break;
104        case 2: size += 4096; break;
105      }
106      size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
107      char *ptr = (char*)__asan::asan_memalign(alignment, size,
108                                               &stack2, __asan::FROM_MALLOC);
109      EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, &stack2));
110      vec.push_back(ptr);
111      ptr[0] = 0;
112      ptr[size-1] = 0;
113      ptr[size/2] = 0;
114    }
115  }
116  for (size_t i = 0; i < vec.size(); i++)
117    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
118}
119
120
121TEST(AddressSanitizer, NoInstMallocTest) {
122  MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
123}
124
125TEST(AddressSanitizer, ThreadedMallocStressTest) {
126  const int kNumThreads = 4;
127  const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
128  pthread_t t[kNumThreads];
129  for (int i = 0; i < kNumThreads; i++) {
130    PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
131        (void*)kNumIterations);
132  }
133  for (int i = 0; i < kNumThreads; i++) {
134    PTHREAD_JOIN(t[i], 0);
135  }
136}
137
138static void PrintShadow(const char *tag, uptr ptr, size_t size) {
139  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
140  uptr prev_shadow = 0;
141  for (sptr i = -32; i < (sptr)size + 32; i++) {
142    uptr shadow = __asan::MemToShadow(ptr + i);
143    if (i == 0 || i == (sptr)size)
144      fprintf(stderr, ".");
145    if (shadow != prev_shadow) {
146      prev_shadow = shadow;
147      fprintf(stderr, "%02x", (int)*(u8*)shadow);
148    }
149  }
150  fprintf(stderr, "\n");
151}
152
153TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
154  for (size_t size = 1; size <= 513; size++) {
155    char *ptr = new char[size];
156    PrintShadow("m", (uptr)ptr, size);
157    delete [] ptr;
158    PrintShadow("f", (uptr)ptr, size);
159  }
160}
161
162static uptr pc_array[] = {
163#if SANITIZER_WORDSIZE == 64
164  0x7effbf756068ULL,
165  0x7effbf75e5abULL,
166  0x7effc0625b7cULL,
167  0x7effc05b8997ULL,
168  0x7effbf990577ULL,
169  0x7effbf990c56ULL,
170  0x7effbf992f3cULL,
171  0x7effbf950c22ULL,
172  0x7effc036dba0ULL,
173  0x7effc03638a3ULL,
174  0x7effc035be4aULL,
175  0x7effc0539c45ULL,
176  0x7effc0539a65ULL,
177  0x7effc03db9b3ULL,
178  0x7effc03db100ULL,
179  0x7effc037c7b8ULL,
180  0x7effc037bfffULL,
181  0x7effc038b777ULL,
182  0x7effc038021cULL,
183  0x7effc037c7d1ULL,
184  0x7effc037bfffULL,
185  0x7effc038b777ULL,
186  0x7effc038021cULL,
187  0x7effc037c7d1ULL,
188  0x7effc037bfffULL,
189  0x7effc038b777ULL,
190  0x7effc038021cULL,
191  0x7effc037c7d1ULL,
192  0x7effc037bfffULL,
193  0x7effc0520d26ULL,
194  0x7effc009ddffULL,
195  0x7effbf90bb50ULL,
196  0x7effbdddfa69ULL,
197  0x7effbdde1fe2ULL,
198  0x7effbdde2424ULL,
199  0x7effbdde27b3ULL,
200  0x7effbddee53bULL,
201  0x7effbdde1988ULL,
202  0x7effbdde0904ULL,
203  0x7effc106ce0dULL,
204  0x7effbcc3fa04ULL,
205  0x7effbcc3f6a4ULL,
206  0x7effbcc3e726ULL,
207  0x7effbcc40852ULL,
208  0x7effb681ec4dULL,
209#endif  // SANITIZER_WORDSIZE
210  0xB0B5E768,
211  0x7B682EC1,
212  0x367F9918,
213  0xAE34E13,
214  0xBA0C6C6,
215  0x13250F46,
216  0xA0D6A8AB,
217  0x2B07C1A8,
218  0x6C844F4A,
219  0x2321B53,
220  0x1F3D4F8F,
221  0x3FE2924B,
222  0xB7A2F568,
223  0xBD23950A,
224  0x61020930,
225  0x33E7970C,
226  0x405998A1,
227  0x59F3551D,
228  0x350E3028,
229  0xBC55A28D,
230  0x361F3AED,
231  0xBEAD0F73,
232  0xAEF28479,
233  0x757E971F,
234  0xAEBA450,
235  0x43AD22F5,
236  0x8C2C50C4,
237  0x7AD8A2E1,
238  0x69EE4EE8,
239  0xC08DFF,
240  0x4BA6538,
241  0x3708AB2,
242  0xC24B6475,
243  0x7C8890D7,
244  0x6662495F,
245  0x9B641689,
246  0xD3596B,
247  0xA1049569,
248  0x44CBC16,
249  0x4D39C39F
250};
251
252void CompressStackTraceTest(size_t n_iter) {
253  u32 seed = my_rand();
254  const size_t kNumPcs = ARRAY_SIZE(pc_array);
255  u32 compressed[2 * kNumPcs];
256
257  for (size_t iter = 0; iter < n_iter; iter++) {
258    std::random_shuffle(pc_array, pc_array + kNumPcs);
259    StackTrace stack0, stack1;
260    stack0.CopyFrom(pc_array, kNumPcs);
261    stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
262    size_t compress_size =
263      std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
264    size_t n_frames =
265      StackTrace::CompressStack(&stack0, compressed, compress_size);
266    Ident(n_frames);
267    assert(n_frames <= stack0.size);
268    StackTrace::UncompressStack(&stack1, compressed, compress_size);
269    assert(stack1.size == n_frames);
270    for (size_t i = 0; i < stack1.size; i++) {
271      assert(stack0.trace[i] == stack1.trace[i]);
272    }
273  }
274}
275
276TEST(AddressSanitizer, CompressStackTraceTest) {
277  CompressStackTraceTest(10000);
278}
279
280void CompressStackTraceBenchmark(size_t n_iter) {
281  const size_t kNumPcs = ARRAY_SIZE(pc_array);
282  u32 compressed[2 * kNumPcs];
283  std::random_shuffle(pc_array, pc_array + kNumPcs);
284
285  StackTrace stack0;
286  stack0.CopyFrom(pc_array, kNumPcs);
287  stack0.size = kNumPcs;
288  for (size_t iter = 0; iter < n_iter; iter++) {
289    size_t compress_size = kNumPcs;
290    size_t n_frames =
291      StackTrace::CompressStack(&stack0, compressed, compress_size);
292    Ident(n_frames);
293  }
294}
295
296TEST(AddressSanitizer, CompressStackTraceBenchmark) {
297  CompressStackTraceBenchmark(1 << 24);
298}
299
300TEST(AddressSanitizer, QuarantineTest) {
301  StackTrace stack;
302  stack.trace[0] = 0x890;
303  stack.size = 1;
304
305  const int size = 1024;
306  void *p = __asan::asan_malloc(size, &stack);
307  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
308  size_t i;
309  size_t max_i = 1 << 30;
310  for (i = 0; i < max_i; i++) {
311    void *p1 = __asan::asan_malloc(size, &stack);
312    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
313    if (p1 == p) break;
314  }
315  EXPECT_GE(i, 10000U);
316  EXPECT_LT(i, max_i);
317}
318
319void *ThreadedQuarantineTestWorker(void *unused) {
320  (void)unused;
321  u32 seed = my_rand();
322  StackTrace stack;
323  stack.trace[0] = 0x890;
324  stack.size = 1;
325
326  for (size_t i = 0; i < 1000; i++) {
327    void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
328    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
329  }
330  return NULL;
331}
332
333// Check that the thread local allocators are flushed when threads are
334// destroyed.
335TEST(AddressSanitizer, ThreadedQuarantineTest) {
336  const int n_threads = 3000;
337  size_t mmaped1 = __asan_get_heap_size();
338  for (int i = 0; i < n_threads; i++) {
339    pthread_t t;
340    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
341    PTHREAD_JOIN(t, 0);
342    size_t mmaped2 = __asan_get_heap_size();
343    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
344  }
345}
346
347void *ThreadedOneSizeMallocStress(void *unused) {
348  (void)unused;
349  StackTrace stack;
350  stack.trace[0] = 0x890;
351  stack.size = 1;
352  const size_t kNumMallocs = 1000;
353  for (int iter = 0; iter < 1000; iter++) {
354    void *p[kNumMallocs];
355    for (size_t i = 0; i < kNumMallocs; i++) {
356      p[i] = __asan::asan_malloc(32, &stack);
357    }
358    for (size_t i = 0; i < kNumMallocs; i++) {
359      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
360    }
361  }
362  return NULL;
363}
364
365TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
366  const int kNumThreads = 4;
367  pthread_t t[kNumThreads];
368  for (int i = 0; i < kNumThreads; i++) {
369    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
370  }
371  for (int i = 0; i < kNumThreads; i++) {
372    PTHREAD_JOIN(t[i], 0);
373  }
374}
375
376TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
377  using __asan::kHighMemEnd;
378  // Check that __asan_region_is_poisoned works for shadow regions.
379  uptr ptr = kLowShadowBeg + 200;
380  EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
381  ptr = kShadowGapBeg + 200;
382  EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
383  ptr = kHighShadowBeg + 200;
384  EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
385}
386