1//===-- asan_noinst_test.cc -----------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// This test file should be compiled w/o asan instrumentation.
13//===----------------------------------------------------------------------===//
14
15#include "asan_allocator.h"
16#include "asan_internal.h"
17#include "asan_mapping.h"
18#include "asan_test_utils.h"
19#include <sanitizer/allocator_interface.h>
20
21#include <assert.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>  // for memset()
25#include <algorithm>
26#include <vector>
27#include <limits>
28
29// ATTENTION!
30// Please don't call intercepted functions (including malloc() and friends)
31// in this test. The static runtime library is linked explicitly (without
32// -fsanitize=address), thus the interceptors do not work correctly on OS X.
33
34// Make sure __asan_init is called before any test case is run.
35struct AsanInitCaller {
36  AsanInitCaller() {
37    __asan::DisableReexec();
38    __asan_init();
39  }
40};
41static AsanInitCaller asan_init_caller;
42
43TEST(AddressSanitizer, InternalSimpleDeathTest) {
44  EXPECT_DEATH(exit(1), "");
45}
46
47static void MallocStress(size_t n) {
48  u32 seed = my_rand();
49  BufferedStackTrace stack1;
50  stack1.trace_buffer[0] = 0xa123;
51  stack1.trace_buffer[1] = 0xa456;
52  stack1.size = 2;
53
54  BufferedStackTrace stack2;
55  stack2.trace_buffer[0] = 0xb123;
56  stack2.trace_buffer[1] = 0xb456;
57  stack2.size = 2;
58
59  BufferedStackTrace stack3;
60  stack3.trace_buffer[0] = 0xc123;
61  stack3.trace_buffer[1] = 0xc456;
62  stack3.size = 2;
63
64  std::vector<void *> vec;
65  for (size_t i = 0; i < n; i++) {
66    if ((i % 3) == 0) {
67      if (vec.empty()) continue;
68      size_t idx = my_rand_r(&seed) % vec.size();
69      void *ptr = vec[idx];
70      vec[idx] = vec.back();
71      vec.pop_back();
72      __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
73    } else {
74      size_t size = my_rand_r(&seed) % 1000 + 1;
75      switch ((my_rand_r(&seed) % 128)) {
76        case 0: size += 1024; break;
77        case 1: size += 2048; break;
78        case 2: size += 4096; break;
79      }
80      size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
81      char *ptr = (char*)__asan::asan_memalign(alignment, size,
82                                               &stack2, __asan::FROM_MALLOC);
83      EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0));
84      vec.push_back(ptr);
85      ptr[0] = 0;
86      ptr[size-1] = 0;
87      ptr[size/2] = 0;
88    }
89  }
90  for (size_t i = 0; i < vec.size(); i++)
91    __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
92}
93
94
95TEST(AddressSanitizer, NoInstMallocTest) {
96  MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
97}
98
99TEST(AddressSanitizer, ThreadedMallocStressTest) {
100  const int kNumThreads = 4;
101  const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
102  pthread_t t[kNumThreads];
103  for (int i = 0; i < kNumThreads; i++) {
104    PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
105        (void*)kNumIterations);
106  }
107  for (int i = 0; i < kNumThreads; i++) {
108    PTHREAD_JOIN(t[i], 0);
109  }
110}
111
112static void PrintShadow(const char *tag, uptr ptr, size_t size) {
113  fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
114  uptr prev_shadow = 0;
115  for (sptr i = -32; i < (sptr)size + 32; i++) {
116    uptr shadow = __asan::MemToShadow(ptr + i);
117    if (i == 0 || i == (sptr)size)
118      fprintf(stderr, ".");
119    if (shadow != prev_shadow) {
120      prev_shadow = shadow;
121      fprintf(stderr, "%02x", (int)*(u8*)shadow);
122    }
123  }
124  fprintf(stderr, "\n");
125}
126
127TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
128  for (size_t size = 1; size <= 513; size++) {
129    char *ptr = new char[size];
130    PrintShadow("m", (uptr)ptr, size);
131    delete [] ptr;
132    PrintShadow("f", (uptr)ptr, size);
133  }
134}
135
136TEST(AddressSanitizer, QuarantineTest) {
137  BufferedStackTrace stack;
138  stack.trace_buffer[0] = 0x890;
139  stack.size = 1;
140
141  const int size = 1024;
142  void *p = __asan::asan_malloc(size, &stack);
143  __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
144  size_t i;
145  size_t max_i = 1 << 30;
146  for (i = 0; i < max_i; i++) {
147    void *p1 = __asan::asan_malloc(size, &stack);
148    __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
149    if (p1 == p) break;
150  }
151  EXPECT_GE(i, 10000U);
152  EXPECT_LT(i, max_i);
153}
154
155void *ThreadedQuarantineTestWorker(void *unused) {
156  (void)unused;
157  u32 seed = my_rand();
158  BufferedStackTrace stack;
159  stack.trace_buffer[0] = 0x890;
160  stack.size = 1;
161
162  for (size_t i = 0; i < 1000; i++) {
163    void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
164    __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
165  }
166  return NULL;
167}
168
169// Check that the thread local allocators are flushed when threads are
170// destroyed.
171TEST(AddressSanitizer, ThreadedQuarantineTest) {
172  const int n_threads = 3000;
173  size_t mmaped1 = __sanitizer_get_heap_size();
174  for (int i = 0; i < n_threads; i++) {
175    pthread_t t;
176    PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
177    PTHREAD_JOIN(t, 0);
178    size_t mmaped2 = __sanitizer_get_heap_size();
179    EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
180  }
181}
182
183void *ThreadedOneSizeMallocStress(void *unused) {
184  (void)unused;
185  BufferedStackTrace stack;
186  stack.trace_buffer[0] = 0x890;
187  stack.size = 1;
188  const size_t kNumMallocs = 1000;
189  for (int iter = 0; iter < 1000; iter++) {
190    void *p[kNumMallocs];
191    for (size_t i = 0; i < kNumMallocs; i++) {
192      p[i] = __asan::asan_malloc(32, &stack);
193    }
194    for (size_t i = 0; i < kNumMallocs; i++) {
195      __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
196    }
197  }
198  return NULL;
199}
200
201TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
202  const int kNumThreads = 4;
203  pthread_t t[kNumThreads];
204  for (int i = 0; i < kNumThreads; i++) {
205    PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
206  }
207  for (int i = 0; i < kNumThreads; i++) {
208    PTHREAD_JOIN(t[i], 0);
209  }
210}
211
212TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) {
213  using __asan::kHighMemEnd;
214  // Check that __asan_region_is_poisoned works for shadow regions.
215  uptr ptr = kLowShadowBeg + 200;
216  EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
217  ptr = kShadowGapBeg + 200;
218  EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
219  ptr = kHighShadowBeg + 200;
220  EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100));
221}
222
223// Test __asan_load1 & friends.
224TEST(AddressSanitizer, LoadStoreCallbacks) {
225  typedef void (*CB)(uptr p);
226  CB cb[2][5] = {
227      {
228        __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16,
229      }, {
230        __asan_store1, __asan_store2, __asan_store4, __asan_store8,
231        __asan_store16,
232      }
233  };
234
235  uptr buggy_ptr;
236
237  __asan_test_only_reported_buggy_pointer = &buggy_ptr;
238  BufferedStackTrace stack;
239  stack.trace_buffer[0] = 0x890;
240  stack.size = 1;
241
242  for (uptr len = 16; len <= 32; len++) {
243    char *ptr = (char*) __asan::asan_malloc(len, &stack);
244    uptr p = reinterpret_cast<uptr>(ptr);
245    for (uptr is_write = 0; is_write <= 1; is_write++) {
246      for (uptr size_log = 0; size_log <= 4; size_log++) {
247        uptr size = 1 << size_log;
248        CB call = cb[is_write][size_log];
249        // Iterate only size-aligned offsets.
250        for (uptr offset = 0; offset <= len; offset += size) {
251          buggy_ptr = 0;
252          call(p + offset);
253          if (offset + size <= len)
254            EXPECT_EQ(buggy_ptr, 0U);
255          else
256            EXPECT_EQ(buggy_ptr, p + offset);
257        }
258      }
259    }
260    __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC);
261  }
262  __asan_test_only_reported_buggy_pointer = 0;
263}
264