asan_interface_test.cc revision 1e172b4bdec57329bf904f063a29f99cddf2d85f
1//===-- asan_interface_test.cc ------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12//===----------------------------------------------------------------------===// 13#include <pthread.h> 14#include <stdio.h> 15#include <string.h> 16 17#include "asan_test_config.h" 18#include "asan_test_utils.h" 19#include "asan_interface.h" 20 21TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) { 22 EXPECT_EQ(1, __asan_get_estimated_allocated_size(0)); 23 const size_t sizes[] = { 1, 30, 1<<30 }; 24 for (size_t i = 0; i < 3; i++) { 25 EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i])); 26 } 27} 28 29static const char* kGetAllocatedSizeErrorMsg = 30 "__asan_get_allocated_size failed"; 31 32TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) { 33 const size_t kArraySize = 100; 34 char *array = Ident((char*)malloc(kArraySize)); 35 int *int_ptr = Ident(new int); 36 37 // Allocated memory is owned by allocator. Allocated size should be 38 // equal to requested size. 39 EXPECT_EQ(true, __asan_get_ownership(array)); 40 EXPECT_EQ(kArraySize, __asan_get_allocated_size(array)); 41 EXPECT_EQ(true, __asan_get_ownership(int_ptr)); 42 EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr)); 43 44 // We cannot call GetAllocatedSize from the memory we didn't map, 45 // and from the interior pointers (not returned by previous malloc). 46 void *wild_addr = (void*)0x1; 47 EXPECT_EQ(false, __asan_get_ownership(wild_addr)); 48 EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg); 49 EXPECT_EQ(false, __asan_get_ownership(array + kArraySize / 2)); 50 EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2), 51 kGetAllocatedSizeErrorMsg); 52 53 // NULL is a valid argument and is owned. 54 EXPECT_EQ(true, __asan_get_ownership(NULL)); 55 EXPECT_EQ(0, __asan_get_allocated_size(NULL)); 56 57 // When memory is freed, it's not owned, and call to GetAllocatedSize 58 // is forbidden. 59 free(array); 60 EXPECT_EQ(false, __asan_get_ownership(array)); 61 EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg); 62 63 delete int_ptr; 64} 65 66TEST(AddressSanitizerInterface, EnableStatisticsTest) { 67 bool old_stats_value = __asan_enable_statistics(true); 68 EXPECT_EQ(true, __asan_enable_statistics(false)); 69 EXPECT_EQ(false, __asan_enable_statistics(old_stats_value)); 70} 71 72TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) { 73 size_t before_malloc, after_malloc, after_free; 74 char *array; 75 const size_t kMallocSize = 100; 76 bool old_stats_value = __asan_enable_statistics(true); 77 before_malloc = __asan_get_current_allocated_bytes(); 78 79 array = Ident((char*)malloc(kMallocSize)); 80 after_malloc = __asan_get_current_allocated_bytes(); 81 EXPECT_EQ(before_malloc + kMallocSize, after_malloc); 82 83 free(array); 84 after_free = __asan_get_current_allocated_bytes(); 85 EXPECT_EQ(before_malloc, after_free); 86 87 __asan_enable_statistics(false); 88 array = Ident((char*)malloc(kMallocSize)); 89 after_malloc = __asan_get_current_allocated_bytes(); 90 EXPECT_EQ(before_malloc, after_malloc); 91 92 free(array); 93 __asan_enable_statistics(old_stats_value); 94} 95 96static void DoDoubleFree() { 97 int *x = Ident(new int); 98 delete Ident(x); 99 delete Ident(x); 100} 101 102// This test is run in a separate process, so that large malloced 103// chunk won't remain in the free lists after the test. 104// Note: use ASSERT_* instead of EXPECT_* here. 105static void RunGetHeapSizeTestAndDie() { 106 size_t old_heap_size, new_heap_size, heap_growth; 107 // We unlikely have have chunk of this size in free list. 108 static const size_t kLargeMallocSize = 1 << 29; // 512M 109 __asan_enable_statistics(true); 110 old_heap_size = __asan_get_heap_size(); 111 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); 112 free(Ident(malloc(kLargeMallocSize))); 113 new_heap_size = __asan_get_heap_size(); 114 heap_growth = new_heap_size - old_heap_size; 115 fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth); 116 ASSERT_GE(heap_growth, kLargeMallocSize); 117 ASSERT_LE(heap_growth, 2 * kLargeMallocSize); 118 119 // Now large chunk should fall into free list, and can be 120 // allocated without increasing heap size. 121 old_heap_size = new_heap_size; 122 free(Ident(malloc(kLargeMallocSize))); 123 heap_growth = __asan_get_heap_size() - old_heap_size; 124 fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth); 125 ASSERT_LT(heap_growth, kLargeMallocSize); 126 127 // Test passed. Now die with expected double-free. 128 DoDoubleFree(); 129} 130 131TEST(AddressSanitizerInterface, GetHeapSizeTest) { 132 EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free"); 133} 134 135// Note: use ASSERT_* instead of EXPECT_* here. 136static void DoLargeMallocForGetFreeBytesTestAndDie() { 137 size_t old_free_bytes, new_free_bytes; 138 static const size_t kLargeMallocSize = 1 << 29; // 512M 139 __asan_enable_statistics(true); 140 // If we malloc and free a large memory chunk, it will not fall 141 // into quarantine and will be available for future requests. 142 old_free_bytes = __asan_get_free_bytes(); 143 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); 144 fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes); 145 free(Ident(malloc(kLargeMallocSize))); 146 new_free_bytes = __asan_get_free_bytes(); 147 fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes); 148 ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize); 149 // Test passed. 150 DoDoubleFree(); 151} 152 153TEST(AddressSanitizerInterface, GetFreeBytesTest) { 154 static const size_t kNumOfChunks = 100; 155 static const size_t kChunkSize = 100; 156 char *chunks[kNumOfChunks]; 157 size_t i; 158 size_t old_free_bytes, new_free_bytes; 159 bool old_stats_value = __asan_enable_statistics(true); 160 // Allocate a small chunk. Now allocator probably has a lot of these 161 // chunks to fulfill future requests. So, future requests will decrease 162 // the number of free bytes. 163 chunks[0] = Ident((char*)malloc(kChunkSize)); 164 old_free_bytes = __asan_get_free_bytes(); 165 for (i = 1; i < kNumOfChunks; i++) { 166 chunks[i] = Ident((char*)malloc(kChunkSize)); 167 new_free_bytes = __asan_get_free_bytes(); 168 EXPECT_LT(new_free_bytes, old_free_bytes); 169 old_free_bytes = new_free_bytes; 170 } 171 // Deleting these chunks will move them to quarantine, number of free 172 // bytes won't increase. 173 for (i = 0; i < kNumOfChunks; i++) { 174 free(chunks[i]); 175 EXPECT_EQ(old_free_bytes, __asan_get_free_bytes()); 176 } 177 EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free"); 178 __asan_enable_statistics(old_stats_value); 179} 180 181static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<20, 357}; 182static const size_t kManyThreadsIterations = 250; 183static const size_t kManyThreadsNumThreads = 200; 184 185void *ManyThreadsWithStatsWorker(void *arg) { 186 for (size_t iter = 0; iter < kManyThreadsIterations; iter++) { 187 for (size_t size_index = 0; size_index < 4; size_index++) { 188 free(Ident(malloc(kManyThreadsMallocSizes[size_index]))); 189 } 190 } 191 return 0; 192} 193 194TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) { 195 size_t before_test, after_test, i; 196 pthread_t threads[kManyThreadsNumThreads]; 197 bool old_stats_value = __asan_enable_statistics(true); 198 before_test = __asan_get_current_allocated_bytes(); 199 for (i = 0; i < kManyThreadsNumThreads; i++) { 200 pthread_create(&threads[i], 0, 201 (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i); 202 } 203 for (i = 0; i < kManyThreadsNumThreads; i++) { 204 pthread_join(threads[i], 0); 205 } 206 after_test = __asan_get_current_allocated_bytes(); 207 // ASan stats also reflect memory usage of internal ASan RTL structs, 208 // so we can't check for equality here. 209 EXPECT_LT(after_test, before_test + (1UL<<20)); 210 __asan_enable_statistics(old_stats_value); 211} 212 213TEST(AddressSanitizerInterface, ExitCode) { 214 int original_exit_code = __asan_set_error_exit_code(7); 215 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), ""); 216 EXPECT_EQ(7, __asan_set_error_exit_code(8)); 217 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), ""); 218 EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code)); 219 EXPECT_EXIT(DoDoubleFree(), 220 ::testing::ExitedWithCode(original_exit_code), ""); 221} 222 223static const char* kUseAfterPoisonErrorMessage = "use-after-poison"; 224 225#define ACCESS(ptr, offset) Ident(*(ptr + offset)) 226 227#define DIE_ON_ACCESS(ptr, offset) \ 228 EXPECT_DEATH(Ident(*(ptr + offset)), kUseAfterPoisonErrorMessage) 229 230TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) { 231 char *array = Ident((char*)malloc(120)); 232 // poison array[40..80) 233 ASAN_POISON_MEMORY_REGION(array + 40, 40); 234 ACCESS(array, 39); 235 ACCESS(array, 80); 236 DIE_ON_ACCESS(array, 40); 237 DIE_ON_ACCESS(array, 60); 238 DIE_ON_ACCESS(array, 79); 239 ASAN_UNPOISON_MEMORY_REGION(array + 40, 40); 240 // access previously poisoned memory. 241 ACCESS(array, 40); 242 ACCESS(array, 79); 243 free(array); 244} 245 246TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) { 247 char *array = Ident((char*)malloc(120)); 248 // Poison [0..40) and [80..120) 249 ASAN_POISON_MEMORY_REGION(array, 40); 250 ASAN_POISON_MEMORY_REGION(array + 80, 40); 251 DIE_ON_ACCESS(array, 20); 252 ACCESS(array, 60); 253 DIE_ON_ACCESS(array, 100); 254 // Poison whole array - [0..120) 255 ASAN_POISON_MEMORY_REGION(array, 120); 256 DIE_ON_ACCESS(array, 60); 257 // Unpoison [24..96) 258 ASAN_UNPOISON_MEMORY_REGION(array + 24, 72); 259 DIE_ON_ACCESS(array, 23); 260 ACCESS(array, 24); 261 ACCESS(array, 60); 262 ACCESS(array, 95); 263 DIE_ON_ACCESS(array, 96); 264 free(array); 265} 266 267TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) { 268 // Vector of capacity 20 269 char *vec = Ident((char*)malloc(20)); 270 ASAN_POISON_MEMORY_REGION(vec, 20); 271 for (size_t i = 0; i < 7; i++) { 272 // Simulate push_back. 273 ASAN_UNPOISON_MEMORY_REGION(vec + i, 1); 274 ACCESS(vec, i); 275 DIE_ON_ACCESS(vec, i + 1); 276 } 277 for (size_t i = 7; i > 0; i--) { 278 // Simulate pop_back. 279 ASAN_POISON_MEMORY_REGION(vec + i - 1, 1); 280 DIE_ON_ACCESS(vec, i - 1); 281 if (i > 1) ACCESS(vec, i - 2); 282 } 283 free(vec); 284} 285 286// Make sure that each aligned block of size "2^granularity" doesn't have 287// "true" value before "false" value. 288static void MakeShadowValid(bool *shadow, int length, int granularity) { 289 bool can_be_poisoned = true; 290 for (int i = length - 1; i >= 0; i--) { 291 can_be_poisoned &= shadow[i]; 292 shadow[i] &= can_be_poisoned; 293 if (i % (1 << granularity) == 0) { 294 can_be_poisoned = true; 295 } 296 } 297} 298 299TEST(AddressSanitizerInterface, PoisoningStressTest) { 300 const size_t kSize = 24; 301 bool expected[kSize]; 302 char *arr = Ident((char*)malloc(kSize)); 303 for (size_t l1 = 0; l1 < kSize; l1++) { 304 for (size_t s1 = 1; l1 + s1 <= kSize; s1++) { 305 for (size_t l2 = 0; l2 < kSize; l2++) { 306 for (size_t s2 = 1; l2 + s2 <= kSize; s2++) { 307 // Poison [l1, l1+s1), [l2, l2+s2) and check result. 308 ASAN_UNPOISON_MEMORY_REGION(arr, kSize); 309 ASAN_POISON_MEMORY_REGION(arr + l1, s1); 310 ASAN_POISON_MEMORY_REGION(arr + l2, s2); 311 memset(expected, false, kSize); 312 memset(expected + l1, true, s1); 313 MakeShadowValid(expected, 24, /*granularity*/ 3); 314 memset(expected + l2, true, s2); 315 MakeShadowValid(expected, 24, /*granularity*/ 3); 316 for (size_t i = 0; i < kSize; i++) { 317 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i)); 318 } 319 // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result. 320 ASAN_POISON_MEMORY_REGION(arr, kSize); 321 ASAN_UNPOISON_MEMORY_REGION(arr + l1, s1); 322 ASAN_UNPOISON_MEMORY_REGION(arr + l2, s2); 323 memset(expected, true, kSize); 324 memset(expected + l1, false, s1); 325 MakeShadowValid(expected, 24, /*granularity*/ 3); 326 memset(expected + l2, false, s2); 327 MakeShadowValid(expected, 24, /*granularity*/ 3); 328 for (size_t i = 0; i < kSize; i++) { 329 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i)); 330 } 331 } 332 } 333 } 334 } 335} 336 337static const char *kInvalidPoisonMessage = "invalid-poison-memory-range"; 338static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range"; 339 340TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) { 341 char *array = Ident((char*)malloc(120)); 342 ASAN_UNPOISON_MEMORY_REGION(array, 120); 343 // Try to unpoison not owned memory 344 EXPECT_DEATH(ASAN_UNPOISON_MEMORY_REGION(array, 121), 345 kInvalidUnpoisonMessage); 346 EXPECT_DEATH(ASAN_UNPOISON_MEMORY_REGION(array - 1, 120), 347 kInvalidUnpoisonMessage); 348 349 ASAN_POISON_MEMORY_REGION(array, 120); 350 // Try to poison not owned memory. 351 EXPECT_DEATH(ASAN_POISON_MEMORY_REGION(array, 121), kInvalidPoisonMessage); 352 EXPECT_DEATH(ASAN_POISON_MEMORY_REGION(array - 1, 120), 353 kInvalidPoisonMessage); 354 free(array); 355} 356