asan_noinst_test.cc revision 1b17f5b79d58c5aff291dde05727ad0b215b81c6
1//===-- asan_noinst_test.cc -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// This test file should be compiled w/o asan instrumentation. 13//===----------------------------------------------------------------------===// 14 15#include "asan_allocator.h" 16#include "asan_internal.h" 17#include "asan_mapping.h" 18#include "asan_test_utils.h" 19 20#include <assert.h> 21#include <stdio.h> 22#include <stdlib.h> 23#include <string.h> // for memset() 24#include <algorithm> 25#include <vector> 26#include <limits> 27 28#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1 29// Manually set correct ASan mapping scale and offset, as they won't be 30// exported from instrumented sources (there are none). 31# define FLEXIBLE_SHADOW_SCALE kDefaultShadowScale 32# if SANITIZER_ANDROID 33# define FLEXIBLE_SHADOW_OFFSET (0) 34# else 35# if SANITIZER_WORDSIZE == 32 36# if defined(__mips__) 37# define FLEXIBLE_SHADOW_OFFSET kMIPS32_ShadowOffset32 38# else 39# define FLEXIBLE_SHADOW_OFFSET kDefaultShadowOffset32 40# endif 41# else 42# if defined(__powerpc64__) 43# define FLEXIBLE_SHADOW_OFFSET kPPC64_ShadowOffset64 44# elif SANITIZER_MAC 45# define FLEXIBLE_SHADOW_OFFSET kDefaultShadowOffset64 46# else 47# define FLEXIBLE_SHADOW_OFFSET kDefaultShort64bitShadowOffset 48# endif 49# endif 50# endif 51SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale = FLEXIBLE_SHADOW_SCALE; 52SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset = 53 FLEXIBLE_SHADOW_OFFSET; 54#endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET 55 56extern "C" { 57// Set specific ASan options for uninstrumented unittest. 58const char* __asan_default_options() { 59 return "allow_reexec=0"; 60} 61} // extern "C" 62 63// Make sure __asan_init is called before any test case is run. 64struct AsanInitCaller { 65 AsanInitCaller() { __asan_init(); } 66}; 67static AsanInitCaller asan_init_caller; 68 69TEST(AddressSanitizer, InternalSimpleDeathTest) { 70 EXPECT_DEATH(exit(1), ""); 71} 72 73static void MallocStress(size_t n) { 74 u32 seed = my_rand(); 75 StackTrace stack1; 76 stack1.trace[0] = 0xa123; 77 stack1.trace[1] = 0xa456; 78 stack1.size = 2; 79 80 StackTrace stack2; 81 stack2.trace[0] = 0xb123; 82 stack2.trace[1] = 0xb456; 83 stack2.size = 2; 84 85 StackTrace stack3; 86 stack3.trace[0] = 0xc123; 87 stack3.trace[1] = 0xc456; 88 stack3.size = 2; 89 90 std::vector<void *> vec; 91 for (size_t i = 0; i < n; i++) { 92 if ((i % 3) == 0) { 93 if (vec.empty()) continue; 94 size_t idx = my_rand_r(&seed) % vec.size(); 95 void *ptr = vec[idx]; 96 vec[idx] = vec.back(); 97 vec.pop_back(); 98 __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC); 99 } else { 100 size_t size = my_rand_r(&seed) % 1000 + 1; 101 switch ((my_rand_r(&seed) % 128)) { 102 case 0: size += 1024; break; 103 case 1: size += 2048; break; 104 case 2: size += 4096; break; 105 } 106 size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1); 107 char *ptr = (char*)__asan::asan_memalign(alignment, size, 108 &stack2, __asan::FROM_MALLOC); 109 EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0)); 110 vec.push_back(ptr); 111 ptr[0] = 0; 112 ptr[size-1] = 0; 113 ptr[size/2] = 0; 114 } 115 } 116 for (size_t i = 0; i < vec.size(); i++) 117 __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC); 118} 119 120 121TEST(AddressSanitizer, NoInstMallocTest) { 122 MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000); 123} 124 125TEST(AddressSanitizer, ThreadedMallocStressTest) { 126 const int kNumThreads = 4; 127 const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000; 128 pthread_t t[kNumThreads]; 129 for (int i = 0; i < kNumThreads; i++) { 130 PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress, 131 (void*)kNumIterations); 132 } 133 for (int i = 0; i < kNumThreads; i++) { 134 PTHREAD_JOIN(t[i], 0); 135 } 136} 137 138static void PrintShadow(const char *tag, uptr ptr, size_t size) { 139 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size); 140 uptr prev_shadow = 0; 141 for (sptr i = -32; i < (sptr)size + 32; i++) { 142 uptr shadow = __asan::MemToShadow(ptr + i); 143 if (i == 0 || i == (sptr)size) 144 fprintf(stderr, "."); 145 if (shadow != prev_shadow) { 146 prev_shadow = shadow; 147 fprintf(stderr, "%02x", (int)*(u8*)shadow); 148 } 149 } 150 fprintf(stderr, "\n"); 151} 152 153TEST(AddressSanitizer, DISABLED_InternalPrintShadow) { 154 for (size_t size = 1; size <= 513; size++) { 155 char *ptr = new char[size]; 156 PrintShadow("m", (uptr)ptr, size); 157 delete [] ptr; 158 PrintShadow("f", (uptr)ptr, size); 159 } 160} 161 162TEST(AddressSanitizer, QuarantineTest) { 163 StackTrace stack; 164 stack.trace[0] = 0x890; 165 stack.size = 1; 166 167 const int size = 1024; 168 void *p = __asan::asan_malloc(size, &stack); 169 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 170 size_t i; 171 size_t max_i = 1 << 30; 172 for (i = 0; i < max_i; i++) { 173 void *p1 = __asan::asan_malloc(size, &stack); 174 __asan::asan_free(p1, &stack, __asan::FROM_MALLOC); 175 if (p1 == p) break; 176 } 177 EXPECT_GE(i, 10000U); 178 EXPECT_LT(i, max_i); 179} 180 181void *ThreadedQuarantineTestWorker(void *unused) { 182 (void)unused; 183 u32 seed = my_rand(); 184 StackTrace stack; 185 stack.trace[0] = 0x890; 186 stack.size = 1; 187 188 for (size_t i = 0; i < 1000; i++) { 189 void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack); 190 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 191 } 192 return NULL; 193} 194 195// Check that the thread local allocators are flushed when threads are 196// destroyed. 197TEST(AddressSanitizer, ThreadedQuarantineTest) { 198 const int n_threads = 3000; 199 size_t mmaped1 = __asan_get_heap_size(); 200 for (int i = 0; i < n_threads; i++) { 201 pthread_t t; 202 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0); 203 PTHREAD_JOIN(t, 0); 204 size_t mmaped2 = __asan_get_heap_size(); 205 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20)); 206 } 207} 208 209void *ThreadedOneSizeMallocStress(void *unused) { 210 (void)unused; 211 StackTrace stack; 212 stack.trace[0] = 0x890; 213 stack.size = 1; 214 const size_t kNumMallocs = 1000; 215 for (int iter = 0; iter < 1000; iter++) { 216 void *p[kNumMallocs]; 217 for (size_t i = 0; i < kNumMallocs; i++) { 218 p[i] = __asan::asan_malloc(32, &stack); 219 } 220 for (size_t i = 0; i < kNumMallocs; i++) { 221 __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC); 222 } 223 } 224 return NULL; 225} 226 227TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) { 228 const int kNumThreads = 4; 229 pthread_t t[kNumThreads]; 230 for (int i = 0; i < kNumThreads; i++) { 231 PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0); 232 } 233 for (int i = 0; i < kNumThreads; i++) { 234 PTHREAD_JOIN(t[i], 0); 235 } 236} 237 238TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) { 239 using __asan::kHighMemEnd; 240 // Check that __asan_region_is_poisoned works for shadow regions. 241 uptr ptr = kLowShadowBeg + 200; 242 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 243 ptr = kShadowGapBeg + 200; 244 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 245 ptr = kHighShadowBeg + 200; 246 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 247} 248