asan_noinst_test.cc revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
1//===-- asan_noinst_test.cc -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// This test file should be compiled w/o asan instrumentation. 13//===----------------------------------------------------------------------===// 14 15#include "asan_allocator.h" 16#include "asan_internal.h" 17#include "asan_mapping.h" 18#include "asan_test_utils.h" 19 20#include <assert.h> 21#include <stdio.h> 22#include <stdlib.h> 23#include <string.h> // for memset() 24#include <algorithm> 25#include <vector> 26#include <limits> 27 28// ATTENTION! 29// Please don't call intercepted functions (including malloc() and friends) 30// in this test. The static runtime library is linked explicitly (without 31// -fsanitize=address), thus the interceptors do not work correctly on OS X. 32 33#if !defined(_WIN32) 34extern "C" { 35// Set specific ASan options for uninstrumented unittest. 36const char* __asan_default_options() { 37 return "allow_reexec=0"; 38} 39} // extern "C" 40#endif 41 42// Make sure __asan_init is called before any test case is run. 43struct AsanInitCaller { 44 AsanInitCaller() { __asan_init(); } 45}; 46static AsanInitCaller asan_init_caller; 47 48TEST(AddressSanitizer, InternalSimpleDeathTest) { 49 EXPECT_DEATH(exit(1), ""); 50} 51 52static void MallocStress(size_t n) { 53 u32 seed = my_rand(); 54 StackTrace stack1; 55 stack1.trace[0] = 0xa123; 56 stack1.trace[1] = 0xa456; 57 stack1.size = 2; 58 59 StackTrace stack2; 60 stack2.trace[0] = 0xb123; 61 stack2.trace[1] = 0xb456; 62 stack2.size = 2; 63 64 StackTrace stack3; 65 stack3.trace[0] = 0xc123; 66 stack3.trace[1] = 0xc456; 67 stack3.size = 2; 68 69 std::vector<void *> vec; 70 for (size_t i = 0; i < n; i++) { 71 if ((i % 3) == 0) { 72 if (vec.empty()) continue; 73 size_t idx = my_rand_r(&seed) % vec.size(); 74 void *ptr = vec[idx]; 75 vec[idx] = vec.back(); 76 vec.pop_back(); 77 __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC); 78 } else { 79 size_t size = my_rand_r(&seed) % 1000 + 1; 80 switch ((my_rand_r(&seed) % 128)) { 81 case 0: size += 1024; break; 82 case 1: size += 2048; break; 83 case 2: size += 4096; break; 84 } 85 size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1); 86 char *ptr = (char*)__asan::asan_memalign(alignment, size, 87 &stack2, __asan::FROM_MALLOC); 88 EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0)); 89 vec.push_back(ptr); 90 ptr[0] = 0; 91 ptr[size-1] = 0; 92 ptr[size/2] = 0; 93 } 94 } 95 for (size_t i = 0; i < vec.size(); i++) 96 __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC); 97} 98 99 100TEST(AddressSanitizer, NoInstMallocTest) { 101 MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000); 102} 103 104TEST(AddressSanitizer, ThreadedMallocStressTest) { 105 const int kNumThreads = 4; 106 const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000; 107 pthread_t t[kNumThreads]; 108 for (int i = 0; i < kNumThreads; i++) { 109 PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress, 110 (void*)kNumIterations); 111 } 112 for (int i = 0; i < kNumThreads; i++) { 113 PTHREAD_JOIN(t[i], 0); 114 } 115} 116 117static void PrintShadow(const char *tag, uptr ptr, size_t size) { 118 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size); 119 uptr prev_shadow = 0; 120 for (sptr i = -32; i < (sptr)size + 32; i++) { 121 uptr shadow = __asan::MemToShadow(ptr + i); 122 if (i == 0 || i == (sptr)size) 123 fprintf(stderr, "."); 124 if (shadow != prev_shadow) { 125 prev_shadow = shadow; 126 fprintf(stderr, "%02x", (int)*(u8*)shadow); 127 } 128 } 129 fprintf(stderr, "\n"); 130} 131 132TEST(AddressSanitizer, DISABLED_InternalPrintShadow) { 133 for (size_t size = 1; size <= 513; size++) { 134 char *ptr = new char[size]; 135 PrintShadow("m", (uptr)ptr, size); 136 delete [] ptr; 137 PrintShadow("f", (uptr)ptr, size); 138 } 139} 140 141TEST(AddressSanitizer, QuarantineTest) { 142 StackTrace stack; 143 stack.trace[0] = 0x890; 144 stack.size = 1; 145 146 const int size = 1024; 147 void *p = __asan::asan_malloc(size, &stack); 148 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 149 size_t i; 150 size_t max_i = 1 << 30; 151 for (i = 0; i < max_i; i++) { 152 void *p1 = __asan::asan_malloc(size, &stack); 153 __asan::asan_free(p1, &stack, __asan::FROM_MALLOC); 154 if (p1 == p) break; 155 } 156 EXPECT_GE(i, 10000U); 157 EXPECT_LT(i, max_i); 158} 159 160void *ThreadedQuarantineTestWorker(void *unused) { 161 (void)unused; 162 u32 seed = my_rand(); 163 StackTrace stack; 164 stack.trace[0] = 0x890; 165 stack.size = 1; 166 167 for (size_t i = 0; i < 1000; i++) { 168 void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack); 169 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 170 } 171 return NULL; 172} 173 174// Check that the thread local allocators are flushed when threads are 175// destroyed. 176TEST(AddressSanitizer, ThreadedQuarantineTest) { 177 const int n_threads = 3000; 178 size_t mmaped1 = __asan_get_heap_size(); 179 for (int i = 0; i < n_threads; i++) { 180 pthread_t t; 181 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0); 182 PTHREAD_JOIN(t, 0); 183 size_t mmaped2 = __asan_get_heap_size(); 184 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20)); 185 } 186} 187 188void *ThreadedOneSizeMallocStress(void *unused) { 189 (void)unused; 190 StackTrace stack; 191 stack.trace[0] = 0x890; 192 stack.size = 1; 193 const size_t kNumMallocs = 1000; 194 for (int iter = 0; iter < 1000; iter++) { 195 void *p[kNumMallocs]; 196 for (size_t i = 0; i < kNumMallocs; i++) { 197 p[i] = __asan::asan_malloc(32, &stack); 198 } 199 for (size_t i = 0; i < kNumMallocs; i++) { 200 __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC); 201 } 202 } 203 return NULL; 204} 205 206TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) { 207 const int kNumThreads = 4; 208 pthread_t t[kNumThreads]; 209 for (int i = 0; i < kNumThreads; i++) { 210 PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0); 211 } 212 for (int i = 0; i < kNumThreads; i++) { 213 PTHREAD_JOIN(t[i], 0); 214 } 215} 216 217TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) { 218 using __asan::kHighMemEnd; 219 // Check that __asan_region_is_poisoned works for shadow regions. 220 uptr ptr = kLowShadowBeg + 200; 221 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 222 ptr = kShadowGapBeg + 200; 223 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 224 ptr = kHighShadowBeg + 200; 225 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 226} 227 228// Test __asan_load1 & friends. 229TEST(AddressSanitizer, LoadStoreCallbacks) { 230 typedef void (*CB)(uptr p); 231 CB cb[2][5] = { 232 { 233 __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16, 234 }, { 235 __asan_store1, __asan_store2, __asan_store4, __asan_store8, 236 __asan_store16, 237 } 238 }; 239 240 uptr buggy_ptr; 241 242 __asan_test_only_reported_buggy_pointer = &buggy_ptr; 243 StackTrace stack; 244 stack.trace[0] = 0x890; 245 stack.size = 1; 246 247 for (uptr len = 16; len <= 32; len++) { 248 char *ptr = (char*) __asan::asan_malloc(len, &stack); 249 uptr p = reinterpret_cast<uptr>(ptr); 250 for (uptr is_write = 0; is_write <= 1; is_write++) { 251 for (uptr size_log = 0; size_log <= 4; size_log++) { 252 uptr size = 1 << size_log; 253 CB call = cb[is_write][size_log]; 254 // Iterate only size-aligned offsets. 255 for (uptr offset = 0; offset <= len; offset += size) { 256 buggy_ptr = 0; 257 call(p + offset); 258 if (offset + size <= len) 259 EXPECT_EQ(buggy_ptr, 0U); 260 else 261 EXPECT_EQ(buggy_ptr, p + offset); 262 } 263 } 264 } 265 __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC); 266 } 267 __asan_test_only_reported_buggy_pointer = 0; 268} 269