asan_noinst_test.cc revision f1877cf0a314f407ac535ab1606fdac4f9b05026
1//===-- asan_noinst_test.cc -----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// This test file should be compiled w/o asan instrumentation. 13//===----------------------------------------------------------------------===// 14 15#include "asan_allocator.h" 16#include "asan_internal.h" 17#include "asan_mapping.h" 18#include "asan_stack.h" 19#include "asan_test_utils.h" 20#include "sanitizer/asan_interface.h" 21 22#include <assert.h> 23#include <stdio.h> 24#include <stdlib.h> 25#include <string.h> // for memset() 26#include <algorithm> 27#include <vector> 28 29// Simple stand-alone pseudorandom number generator. 30// Current algorithm is ANSI C linear congruential PRNG. 31static inline u32 my_rand(u32* state) { 32 return (*state = *state * 1103515245 + 12345) >> 16; 33} 34 35static u32 global_seed = 0; 36 37 38TEST(AddressSanitizer, InternalSimpleDeathTest) { 39 EXPECT_DEATH(exit(1), ""); 40} 41 42static void MallocStress(size_t n) { 43 u32 seed = my_rand(&global_seed); 44 __asan::StackTrace stack1; 45 stack1.trace[0] = 0xa123; 46 stack1.trace[1] = 0xa456; 47 stack1.size = 2; 48 49 __asan::StackTrace stack2; 50 stack2.trace[0] = 0xb123; 51 stack2.trace[1] = 0xb456; 52 stack2.size = 2; 53 54 __asan::StackTrace stack3; 55 stack3.trace[0] = 0xc123; 56 stack3.trace[1] = 0xc456; 57 stack3.size = 2; 58 59 std::vector<void *> vec; 60 for (size_t i = 0; i < n; i++) { 61 if ((i % 3) == 0) { 62 if (vec.empty()) continue; 63 size_t idx = my_rand(&seed) % vec.size(); 64 void *ptr = vec[idx]; 65 vec[idx] = vec.back(); 66 vec.pop_back(); 67 __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC); 68 } else { 69 size_t size = my_rand(&seed) % 1000 + 1; 70 switch ((my_rand(&seed) % 128)) { 71 case 0: size += 1024; break; 72 case 1: size += 2048; break; 73 case 2: size += 4096; break; 74 } 75 size_t alignment = 1 << (my_rand(&seed) % 10 + 1); 76 char *ptr = (char*)__asan::asan_memalign(alignment, size, 77 &stack2, __asan::FROM_MALLOC); 78 vec.push_back(ptr); 79 ptr[0] = 0; 80 ptr[size-1] = 0; 81 ptr[size/2] = 0; 82 } 83 } 84 for (size_t i = 0; i < vec.size(); i++) 85 __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC); 86} 87 88 89TEST(AddressSanitizer, NoInstMallocTest) { 90#ifdef __arm__ 91 MallocStress(300000); 92#else 93 MallocStress(1000000); 94#endif 95} 96 97static void PrintShadow(const char *tag, uptr ptr, size_t size) { 98 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size); 99 uptr prev_shadow = 0; 100 for (sptr i = -32; i < (sptr)size + 32; i++) { 101 uptr shadow = __asan::MemToShadow(ptr + i); 102 if (i == 0 || i == (sptr)size) 103 fprintf(stderr, "."); 104 if (shadow != prev_shadow) { 105 prev_shadow = shadow; 106 fprintf(stderr, "%02x", (int)*(u8*)shadow); 107 } 108 } 109 fprintf(stderr, "\n"); 110} 111 112TEST(AddressSanitizer, DISABLED_InternalPrintShadow) { 113 for (size_t size = 1; size <= 513; size++) { 114 char *ptr = new char[size]; 115 PrintShadow("m", (uptr)ptr, size); 116 delete [] ptr; 117 PrintShadow("f", (uptr)ptr, size); 118 } 119} 120 121static uptr pc_array[] = { 122#if SANITIZER_WORDSIZE == 64 123 0x7effbf756068ULL, 124 0x7effbf75e5abULL, 125 0x7effc0625b7cULL, 126 0x7effc05b8997ULL, 127 0x7effbf990577ULL, 128 0x7effbf990c56ULL, 129 0x7effbf992f3cULL, 130 0x7effbf950c22ULL, 131 0x7effc036dba0ULL, 132 0x7effc03638a3ULL, 133 0x7effc035be4aULL, 134 0x7effc0539c45ULL, 135 0x7effc0539a65ULL, 136 0x7effc03db9b3ULL, 137 0x7effc03db100ULL, 138 0x7effc037c7b8ULL, 139 0x7effc037bfffULL, 140 0x7effc038b777ULL, 141 0x7effc038021cULL, 142 0x7effc037c7d1ULL, 143 0x7effc037bfffULL, 144 0x7effc038b777ULL, 145 0x7effc038021cULL, 146 0x7effc037c7d1ULL, 147 0x7effc037bfffULL, 148 0x7effc038b777ULL, 149 0x7effc038021cULL, 150 0x7effc037c7d1ULL, 151 0x7effc037bfffULL, 152 0x7effc0520d26ULL, 153 0x7effc009ddffULL, 154 0x7effbf90bb50ULL, 155 0x7effbdddfa69ULL, 156 0x7effbdde1fe2ULL, 157 0x7effbdde2424ULL, 158 0x7effbdde27b3ULL, 159 0x7effbddee53bULL, 160 0x7effbdde1988ULL, 161 0x7effbdde0904ULL, 162 0x7effc106ce0dULL, 163 0x7effbcc3fa04ULL, 164 0x7effbcc3f6a4ULL, 165 0x7effbcc3e726ULL, 166 0x7effbcc40852ULL, 167 0x7effb681ec4dULL, 168#endif // SANITIZER_WORDSIZE 169 0xB0B5E768, 170 0x7B682EC1, 171 0x367F9918, 172 0xAE34E13, 173 0xBA0C6C6, 174 0x13250F46, 175 0xA0D6A8AB, 176 0x2B07C1A8, 177 0x6C844F4A, 178 0x2321B53, 179 0x1F3D4F8F, 180 0x3FE2924B, 181 0xB7A2F568, 182 0xBD23950A, 183 0x61020930, 184 0x33E7970C, 185 0x405998A1, 186 0x59F3551D, 187 0x350E3028, 188 0xBC55A28D, 189 0x361F3AED, 190 0xBEAD0F73, 191 0xAEF28479, 192 0x757E971F, 193 0xAEBA450, 194 0x43AD22F5, 195 0x8C2C50C4, 196 0x7AD8A2E1, 197 0x69EE4EE8, 198 0xC08DFF, 199 0x4BA6538, 200 0x3708AB2, 201 0xC24B6475, 202 0x7C8890D7, 203 0x6662495F, 204 0x9B641689, 205 0xD3596B, 206 0xA1049569, 207 0x44CBC16, 208 0x4D39C39F 209}; 210 211void CompressStackTraceTest(size_t n_iter) { 212 u32 seed = my_rand(&global_seed); 213 const size_t kNumPcs = ARRAY_SIZE(pc_array); 214 u32 compressed[2 * kNumPcs]; 215 216 for (size_t iter = 0; iter < n_iter; iter++) { 217 std::random_shuffle(pc_array, pc_array + kNumPcs); 218 __asan::StackTrace stack0, stack1; 219 stack0.CopyFrom(pc_array, kNumPcs); 220 stack0.size = std::max((size_t)1, (size_t)(my_rand(&seed) % stack0.size)); 221 size_t compress_size = 222 std::max((size_t)2, (size_t)my_rand(&seed) % (2 * kNumPcs)); 223 size_t n_frames = 224 __asan::StackTrace::CompressStack(&stack0, compressed, compress_size); 225 Ident(n_frames); 226 assert(n_frames <= stack0.size); 227 __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size); 228 assert(stack1.size == n_frames); 229 for (size_t i = 0; i < stack1.size; i++) { 230 assert(stack0.trace[i] == stack1.trace[i]); 231 } 232 } 233} 234 235TEST(AddressSanitizer, CompressStackTraceTest) { 236 CompressStackTraceTest(10000); 237} 238 239void CompressStackTraceBenchmark(size_t n_iter) { 240 const size_t kNumPcs = ARRAY_SIZE(pc_array); 241 u32 compressed[2 * kNumPcs]; 242 std::random_shuffle(pc_array, pc_array + kNumPcs); 243 244 __asan::StackTrace stack0; 245 stack0.CopyFrom(pc_array, kNumPcs); 246 stack0.size = kNumPcs; 247 for (size_t iter = 0; iter < n_iter; iter++) { 248 size_t compress_size = kNumPcs; 249 size_t n_frames = 250 __asan::StackTrace::CompressStack(&stack0, compressed, compress_size); 251 Ident(n_frames); 252 } 253} 254 255TEST(AddressSanitizer, CompressStackTraceBenchmark) { 256 CompressStackTraceBenchmark(1 << 24); 257} 258 259TEST(AddressSanitizer, QuarantineTest) { 260 __asan::StackTrace stack; 261 stack.trace[0] = 0x890; 262 stack.size = 1; 263 264 const int size = 32; 265 void *p = __asan::asan_malloc(size, &stack); 266 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 267 size_t i; 268 size_t max_i = 1 << 30; 269 for (i = 0; i < max_i; i++) { 270 void *p1 = __asan::asan_malloc(size, &stack); 271 __asan::asan_free(p1, &stack, __asan::FROM_MALLOC); 272 if (p1 == p) break; 273 } 274 // fprintf(stderr, "i=%ld\n", i); 275 EXPECT_GE(i, 100000U); 276 EXPECT_LT(i, max_i); 277} 278 279void *ThreadedQuarantineTestWorker(void *unused) { 280 (void)unused; 281 u32 seed = my_rand(&global_seed); 282 __asan::StackTrace stack; 283 stack.trace[0] = 0x890; 284 stack.size = 1; 285 286 for (size_t i = 0; i < 1000; i++) { 287 void *p = __asan::asan_malloc(1 + (my_rand(&seed) % 4000), &stack); 288 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 289 } 290 return NULL; 291} 292 293// Check that the thread local allocators are flushed when threads are 294// destroyed. 295TEST(AddressSanitizer, ThreadedQuarantineTest) { 296 const int n_threads = 3000; 297 size_t mmaped1 = __asan_get_heap_size(); 298 for (int i = 0; i < n_threads; i++) { 299 pthread_t t; 300 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0); 301 PTHREAD_JOIN(t, 0); 302 size_t mmaped2 = __asan_get_heap_size(); 303 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20)); 304 } 305} 306 307void *ThreadedOneSizeMallocStress(void *unused) { 308 (void)unused; 309 __asan::StackTrace stack; 310 stack.trace[0] = 0x890; 311 stack.size = 1; 312 const size_t kNumMallocs = 1000; 313 for (int iter = 0; iter < 1000; iter++) { 314 void *p[kNumMallocs]; 315 for (size_t i = 0; i < kNumMallocs; i++) { 316 p[i] = __asan::asan_malloc(32, &stack); 317 } 318 for (size_t i = 0; i < kNumMallocs; i++) { 319 __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC); 320 } 321 } 322 return NULL; 323} 324 325TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) { 326 const int kNumThreads = 4; 327 pthread_t t[kNumThreads]; 328 for (int i = 0; i < kNumThreads; i++) { 329 PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0); 330 } 331 for (int i = 0; i < kNumThreads; i++) { 332 PTHREAD_JOIN(t[i], 0); 333 } 334} 335 336TEST(AddressSanitizer, MemsetWildAddressTest) { 337 typedef void*(*memset_p)(void*, int, size_t); 338 // Prevent inlining of memset(). 339 volatile memset_p libc_memset = (memset_p)memset; 340 EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100), 341 "unknown-crash.*low shadow"); 342 EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100), 343 "unknown-crash.*shadow gap"); 344 EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100), 345 "unknown-crash.*high shadow"); 346} 347 348TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) { 349#if ASAN_ALLOCATOR_VERSION == 1 350 EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0)); 351#elif ASAN_ALLOCATOR_VERSION == 2 352 EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0)); 353#endif 354 const size_t sizes[] = { 1, 30, 1<<30 }; 355 for (size_t i = 0; i < 3; i++) { 356 EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i])); 357 } 358} 359 360static const char* kGetAllocatedSizeErrorMsg = 361 "attempting to call __asan_get_allocated_size()"; 362 363TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) { 364 const size_t kArraySize = 100; 365 char *array = Ident((char*)malloc(kArraySize)); 366 int *int_ptr = Ident(new int); 367 368 // Allocated memory is owned by allocator. Allocated size should be 369 // equal to requested size. 370 EXPECT_EQ(true, __asan_get_ownership(array)); 371 EXPECT_EQ(kArraySize, __asan_get_allocated_size(array)); 372 EXPECT_EQ(true, __asan_get_ownership(int_ptr)); 373 EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr)); 374 375 // We cannot call GetAllocatedSize from the memory we didn't map, 376 // and from the interior pointers (not returned by previous malloc). 377 void *wild_addr = (void*)0x1; 378 EXPECT_FALSE(__asan_get_ownership(wild_addr)); 379 EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg); 380 EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2)); 381 EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2), 382 kGetAllocatedSizeErrorMsg); 383 384 // NULL is not owned, but is a valid argument for __asan_get_allocated_size(). 385 EXPECT_FALSE(__asan_get_ownership(NULL)); 386 EXPECT_EQ(0U, __asan_get_allocated_size(NULL)); 387 388 // When memory is freed, it's not owned, and call to GetAllocatedSize 389 // is forbidden. 390 free(array); 391 EXPECT_FALSE(__asan_get_ownership(array)); 392 EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg); 393 394 delete int_ptr; 395} 396 397TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) { 398 size_t before_malloc, after_malloc, after_free; 399 char *array; 400 const size_t kMallocSize = 100; 401 before_malloc = __asan_get_current_allocated_bytes(); 402 403 array = Ident((char*)malloc(kMallocSize)); 404 after_malloc = __asan_get_current_allocated_bytes(); 405 EXPECT_EQ(before_malloc + kMallocSize, after_malloc); 406 407 free(array); 408 after_free = __asan_get_current_allocated_bytes(); 409 EXPECT_EQ(before_malloc, after_free); 410} 411 412static void DoDoubleFree() { 413 int *x = Ident(new int); 414 delete Ident(x); 415 delete Ident(x); 416} 417 418#if ASAN_ALLOCATOR_VERSION == 1 419// This test is run in a separate process, so that large malloced 420// chunk won't remain in the free lists after the test. 421// Note: use ASSERT_* instead of EXPECT_* here. 422static void RunGetHeapSizeTestAndDie() { 423 size_t old_heap_size, new_heap_size, heap_growth; 424 // We unlikely have have chunk of this size in free list. 425 static const size_t kLargeMallocSize = 1 << 29; // 512M 426 old_heap_size = __asan_get_heap_size(); 427 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); 428 free(Ident(malloc(kLargeMallocSize))); 429 new_heap_size = __asan_get_heap_size(); 430 heap_growth = new_heap_size - old_heap_size; 431 fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth); 432 ASSERT_GE(heap_growth, kLargeMallocSize); 433 ASSERT_LE(heap_growth, 2 * kLargeMallocSize); 434 435 // Now large chunk should fall into free list, and can be 436 // allocated without increasing heap size. 437 old_heap_size = new_heap_size; 438 free(Ident(malloc(kLargeMallocSize))); 439 heap_growth = __asan_get_heap_size() - old_heap_size; 440 fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth); 441 ASSERT_LT(heap_growth, kLargeMallocSize); 442 443 // Test passed. Now die with expected double-free. 444 DoDoubleFree(); 445} 446 447TEST(AddressSanitizerInterface, GetHeapSizeTest) { 448 EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free"); 449} 450#elif ASAN_ALLOCATOR_VERSION == 2 451TEST(AddressSanitizerInterface, GetHeapSizeTest) { 452 // asan_allocator2 does not keep huge chunks in free list, but unmaps them. 453 // The chunk should be greater than the quarantine size, 454 // otherwise it will be stuck in quarantine instead of being unmaped. 455 static const size_t kLargeMallocSize = 1 << 29; // 512M 456 uptr old_heap_size = __asan_get_heap_size(); 457 for (int i = 0; i < 3; i++) { 458 // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); 459 free(Ident(malloc(kLargeMallocSize))); 460 EXPECT_EQ(old_heap_size, __asan_get_heap_size()); 461 } 462} 463#endif 464 465// Note: use ASSERT_* instead of EXPECT_* here. 466static void DoLargeMallocForGetFreeBytesTestAndDie() { 467#if ASAN_ALLOCATOR_VERSION == 1 468 // asan_allocator2 does not keep large chunks in free_lists, so this test 469 // will not work. 470 size_t old_free_bytes, new_free_bytes; 471 static const size_t kLargeMallocSize = 1 << 29; // 512M 472 // If we malloc and free a large memory chunk, it will not fall 473 // into quarantine and will be available for future requests. 474 old_free_bytes = __asan_get_free_bytes(); 475 fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); 476 fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes); 477 free(Ident(malloc(kLargeMallocSize))); 478 new_free_bytes = __asan_get_free_bytes(); 479 fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes); 480 ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize); 481#endif // ASAN_ALLOCATOR_VERSION 482 // Test passed. 483 DoDoubleFree(); 484} 485 486TEST(AddressSanitizerInterface, GetFreeBytesTest) { 487#if ASAN_ALLOCATOR_VERSION == 1 488 // Allocate a small chunk. Now allocator probably has a lot of these 489 // chunks to fulfill future requests. So, future requests will decrease 490 // the number of free bytes. Do this only on systems where there 491 // is enough memory for such assumptions. 492 if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) { 493 static const size_t kNumOfChunks = 100; 494 static const size_t kChunkSize = 100; 495 char *chunks[kNumOfChunks]; 496 size_t i; 497 size_t old_free_bytes, new_free_bytes; 498 chunks[0] = Ident((char*)malloc(kChunkSize)); 499 old_free_bytes = __asan_get_free_bytes(); 500 for (i = 1; i < kNumOfChunks; i++) { 501 chunks[i] = Ident((char*)malloc(kChunkSize)); 502 new_free_bytes = __asan_get_free_bytes(); 503 EXPECT_LT(new_free_bytes, old_free_bytes); 504 old_free_bytes = new_free_bytes; 505 } 506 for (i = 0; i < kNumOfChunks; i++) 507 free(chunks[i]); 508 } 509#endif 510 EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free"); 511} 512 513static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357}; 514static const size_t kManyThreadsIterations = 250; 515static const size_t kManyThreadsNumThreads = 516 (SANITIZER_WORDSIZE == 32) ? 40 : 200; 517 518void *ManyThreadsWithStatsWorker(void *arg) { 519 (void)arg; 520 for (size_t iter = 0; iter < kManyThreadsIterations; iter++) { 521 for (size_t size_index = 0; size_index < 4; size_index++) { 522 free(Ident(malloc(kManyThreadsMallocSizes[size_index]))); 523 } 524 } 525 // Just one large allocation. 526 free(Ident(malloc(1 << 20))); 527 return 0; 528} 529 530TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) { 531 size_t before_test, after_test, i; 532 pthread_t threads[kManyThreadsNumThreads]; 533 before_test = __asan_get_current_allocated_bytes(); 534 for (i = 0; i < kManyThreadsNumThreads; i++) { 535 PTHREAD_CREATE(&threads[i], 0, 536 (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i); 537 } 538 for (i = 0; i < kManyThreadsNumThreads; i++) { 539 PTHREAD_JOIN(threads[i], 0); 540 } 541 after_test = __asan_get_current_allocated_bytes(); 542 // ASan stats also reflect memory usage of internal ASan RTL structs, 543 // so we can't check for equality here. 544 EXPECT_LT(after_test, before_test + (1UL<<20)); 545} 546 547TEST(AddressSanitizerInterface, ExitCode) { 548 int original_exit_code = __asan_set_error_exit_code(7); 549 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), ""); 550 EXPECT_EQ(7, __asan_set_error_exit_code(8)); 551 EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), ""); 552 EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code)); 553 EXPECT_EXIT(DoDoubleFree(), 554 ::testing::ExitedWithCode(original_exit_code), ""); 555} 556 557static void MyDeathCallback() { 558 fprintf(stderr, "MyDeathCallback\n"); 559} 560 561TEST(AddressSanitizerInterface, DeathCallbackTest) { 562 __asan_set_death_callback(MyDeathCallback); 563 EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback"); 564 __asan_set_death_callback(NULL); 565} 566 567static const char* kUseAfterPoisonErrorMessage = "use-after-poison"; 568 569#define GOOD_ACCESS(ptr, offset) \ 570 EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset))) 571 572#define BAD_ACCESS(ptr, offset) \ 573 EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset))) 574 575TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) { 576 char *array = Ident((char*)malloc(120)); 577 // poison array[40..80) 578 __asan_poison_memory_region(array + 40, 40); 579 GOOD_ACCESS(array, 39); 580 GOOD_ACCESS(array, 80); 581 BAD_ACCESS(array, 40); 582 BAD_ACCESS(array, 60); 583 BAD_ACCESS(array, 79); 584 EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1), 585 kUseAfterPoisonErrorMessage); 586 __asan_unpoison_memory_region(array + 40, 40); 587 // access previously poisoned memory. 588 GOOD_ACCESS(array, 40); 589 GOOD_ACCESS(array, 79); 590 free(array); 591} 592 593TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) { 594 char *array = Ident((char*)malloc(120)); 595 // Poison [0..40) and [80..120) 596 __asan_poison_memory_region(array, 40); 597 __asan_poison_memory_region(array + 80, 40); 598 BAD_ACCESS(array, 20); 599 GOOD_ACCESS(array, 60); 600 BAD_ACCESS(array, 100); 601 // Poison whole array - [0..120) 602 __asan_poison_memory_region(array, 120); 603 BAD_ACCESS(array, 60); 604 // Unpoison [24..96) 605 __asan_unpoison_memory_region(array + 24, 72); 606 BAD_ACCESS(array, 23); 607 GOOD_ACCESS(array, 24); 608 GOOD_ACCESS(array, 60); 609 GOOD_ACCESS(array, 95); 610 BAD_ACCESS(array, 96); 611 free(array); 612} 613 614TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) { 615 // Vector of capacity 20 616 char *vec = Ident((char*)malloc(20)); 617 __asan_poison_memory_region(vec, 20); 618 for (size_t i = 0; i < 7; i++) { 619 // Simulate push_back. 620 __asan_unpoison_memory_region(vec + i, 1); 621 GOOD_ACCESS(vec, i); 622 BAD_ACCESS(vec, i + 1); 623 } 624 for (size_t i = 7; i > 0; i--) { 625 // Simulate pop_back. 626 __asan_poison_memory_region(vec + i - 1, 1); 627 BAD_ACCESS(vec, i - 1); 628 if (i > 1) GOOD_ACCESS(vec, i - 2); 629 } 630 free(vec); 631} 632 633// Make sure that each aligned block of size "2^granularity" doesn't have 634// "true" value before "false" value. 635static void MakeShadowValid(bool *shadow, int length, int granularity) { 636 bool can_be_poisoned = true; 637 for (int i = length - 1; i >= 0; i--) { 638 if (!shadow[i]) 639 can_be_poisoned = false; 640 if (!can_be_poisoned) 641 shadow[i] = false; 642 if (i % (1 << granularity) == 0) { 643 can_be_poisoned = true; 644 } 645 } 646} 647 648TEST(AddressSanitizerInterface, PoisoningStressTest) { 649 const size_t kSize = 24; 650 bool expected[kSize]; 651 char *arr = Ident((char*)malloc(kSize)); 652 for (size_t l1 = 0; l1 < kSize; l1++) { 653 for (size_t s1 = 1; l1 + s1 <= kSize; s1++) { 654 for (size_t l2 = 0; l2 < kSize; l2++) { 655 for (size_t s2 = 1; l2 + s2 <= kSize; s2++) { 656 // Poison [l1, l1+s1), [l2, l2+s2) and check result. 657 __asan_unpoison_memory_region(arr, kSize); 658 __asan_poison_memory_region(arr + l1, s1); 659 __asan_poison_memory_region(arr + l2, s2); 660 memset(expected, false, kSize); 661 memset(expected + l1, true, s1); 662 MakeShadowValid(expected, kSize, /*granularity*/ 3); 663 memset(expected + l2, true, s2); 664 MakeShadowValid(expected, kSize, /*granularity*/ 3); 665 for (size_t i = 0; i < kSize; i++) { 666 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i)); 667 } 668 // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result. 669 __asan_poison_memory_region(arr, kSize); 670 __asan_unpoison_memory_region(arr + l1, s1); 671 __asan_unpoison_memory_region(arr + l2, s2); 672 memset(expected, true, kSize); 673 memset(expected + l1, false, s1); 674 MakeShadowValid(expected, kSize, /*granularity*/ 3); 675 memset(expected + l2, false, s2); 676 MakeShadowValid(expected, kSize, /*granularity*/ 3); 677 for (size_t i = 0; i < kSize; i++) { 678 ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i)); 679 } 680 } 681 } 682 } 683 } 684} 685 686static const char *kInvalidPoisonMessage = "invalid-poison-memory-range"; 687static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range"; 688 689TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) { 690 char *array = Ident((char*)malloc(120)); 691 __asan_unpoison_memory_region(array, 120); 692 // Try to unpoison not owned memory 693 EXPECT_DEATH(__asan_unpoison_memory_region(array, 121), 694 kInvalidUnpoisonMessage); 695 EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120), 696 kInvalidUnpoisonMessage); 697 698 __asan_poison_memory_region(array, 120); 699 // Try to poison not owned memory. 700 EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage); 701 EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120), 702 kInvalidPoisonMessage); 703 free(array); 704} 705 706static void ErrorReportCallbackOneToZ(const char *report) { 707 int report_len = strlen(report); 708 ASSERT_EQ(6, write(2, "ABCDEF", 6)); 709 ASSERT_EQ(report_len, write(2, report, report_len)); 710 ASSERT_EQ(6, write(2, "ABCDEF", 6)); 711 _exit(1); 712} 713 714TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) { 715 __asan_set_error_report_callback(ErrorReportCallbackOneToZ); 716 EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1), 717 ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF"); 718 __asan_set_error_report_callback(NULL); 719} 720 721TEST(AddressSanitizerInterface, GetOwnershipStressTest) { 722 std::vector<char *> pointers; 723 std::vector<size_t> sizes; 724#if ASAN_ALLOCATOR_VERSION == 1 725 const size_t kNumMallocs = 726 (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14; 727#elif ASAN_ALLOCATOR_VERSION == 2 // too slow with asan_allocator2. :( 728 const size_t kNumMallocs = 1 << 9; 729#endif 730 for (size_t i = 0; i < kNumMallocs; i++) { 731 size_t size = i * 100 + 1; 732 pointers.push_back((char*)malloc(size)); 733 sizes.push_back(size); 734 } 735 for (size_t i = 0; i < 4000000; i++) { 736 EXPECT_FALSE(__asan_get_ownership(&pointers)); 737 EXPECT_FALSE(__asan_get_ownership((void*)0x1234)); 738 size_t idx = i % kNumMallocs; 739 EXPECT_TRUE(__asan_get_ownership(pointers[idx])); 740 EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx])); 741 } 742 for (size_t i = 0, n = pointers.size(); i < n; i++) 743 free(pointers[i]); 744} 745