asan_allocator.cc revision 37931239a3671ac0d2c7865f3077b0c4f71b94b3
1//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Implementation of ASan's memory allocator. 13// Evey piece of memory (AsanChunk) allocated by the allocator 14// has a left redzone of REDZONE bytes and 15// a right redzone such that the end of the chunk is aligned by REDZONE 16// (i.e. the right redzone is between 0 and REDZONE-1). 17// The left redzone is always poisoned. 18// The right redzone is poisoned on malloc, the body is poisoned on free. 19// Once freed, a chunk is moved to a quarantine (fifo list). 20// After quarantine, a chunk is returned to freelists. 21// 22// The left redzone contains ASan's internal data and the stack trace of 23// the malloc call. 24// Once freed, the body of the chunk contains the stack trace of the free call. 25// 26//===----------------------------------------------------------------------===// 27 28#include "asan_allocator.h" 29#include "asan_interceptors.h" 30#include "asan_interface.h" 31#include "asan_internal.h" 32#include "asan_lock.h" 33#include "asan_mapping.h" 34#include "asan_stats.h" 35#include "asan_thread.h" 36#include "asan_thread_registry.h" 37 38#if defined(_WIN32) && !defined(__clang__) 39#include <intrin.h> 40#endif 41 42namespace __asan { 43 44#define REDZONE FLAG_redzone 45static const size_t kMinAllocSize = REDZONE * 2; 46static const uint64_t kMaxAvailableRam = 128ULL << 30; // 128G 47static const size_t kMaxThreadLocalQuarantine = 1 << 20; // 1M 48 49static const size_t kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20; 50static const size_t kMaxSizeForThreadLocalFreeList = 51 (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17; 52 53// Size classes less than kMallocSizeClassStep are powers of two. 54// All other size classes are multiples of kMallocSizeClassStep. 55static const size_t kMallocSizeClassStepLog = 26; 56static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog; 57 58static const size_t kMaxAllowedMallocSize = 59 (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30; 60 61static inline bool IsAligned(uintptr_t a, uintptr_t alignment) { 62 return (a & (alignment - 1)) == 0; 63} 64 65static inline size_t Log2(size_t x) { 66 CHECK(IsPowerOfTwo(x)); 67#if !defined(_WIN32) || defined(__clang__) 68 return __builtin_ctzl(x); 69#elif defined(_WIN64) 70 unsigned long ret; // NOLINT 71 _BitScanForward64(&ret, x); 72 return ret; 73#else 74 unsigned long ret; // NOLINT 75 _BitScanForward(&ret, x); 76 return ret; 77#endif 78} 79 80static inline size_t RoundUpToPowerOfTwo(size_t size) { 81 CHECK(size); 82 if (IsPowerOfTwo(size)) return size; 83 84 unsigned long up; // NOLINT 85#if !defined(_WIN32) || defined(__clang__) 86 up = __WORDSIZE - 1 - __builtin_clzl(size); 87#elif defined(_WIN64) 88 _BitScanReverse64(&up, size); 89#else 90 _BitScanReverse(&up, size); 91#endif 92 CHECK(size < (1ULL << (up + 1))); 93 CHECK(size > (1ULL << up)); 94 return 1UL << (up + 1); 95} 96 97static inline size_t SizeClassToSize(uint8_t size_class) { 98 CHECK(size_class < kNumberOfSizeClasses); 99 if (size_class <= kMallocSizeClassStepLog) { 100 return 1UL << size_class; 101 } else { 102 return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep; 103 } 104} 105 106static inline uint8_t SizeToSizeClass(size_t size) { 107 uint8_t res = 0; 108 if (size <= kMallocSizeClassStep) { 109 size_t rounded = RoundUpToPowerOfTwo(size); 110 res = Log2(rounded); 111 } else { 112 res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep) 113 + kMallocSizeClassStepLog; 114 } 115 CHECK(res < kNumberOfSizeClasses); 116 CHECK(size <= SizeClassToSize(res)); 117 return res; 118} 119 120// Given REDZONE bytes, we need to mark first size bytes 121// as addressable and the rest REDZONE-size bytes as unaddressable. 122static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) { 123 CHECK(size <= REDZONE); 124 CHECK(IsAligned(mem, REDZONE)); 125 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY)); 126 CHECK(IsPowerOfTwo(REDZONE)); 127 CHECK(REDZONE >= SHADOW_GRANULARITY); 128 PoisonShadowPartialRightRedzone(mem, size, REDZONE, 129 kAsanHeapRightRedzoneMagic); 130} 131 132static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) { 133 CHECK(IsAligned(size, kPageSize)); 134 uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__); 135 PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic); 136 if (FLAG_debug) { 137 Printf("ASAN_MMAP: [%p, %p)\n", res, res + size); 138 } 139 return res; 140} 141 142// Every chunk of memory allocated by this allocator can be in one of 3 states: 143// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 144// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 145// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 146// 147// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not 148// the beginning of a AsanChunk (in which case 'next' contains the address 149// of the AsanChunk). 150// 151// The magic numbers for the enum values are taken randomly. 152enum { 153 CHUNK_AVAILABLE = 0x573B, 154 CHUNK_ALLOCATED = 0x3204, 155 CHUNK_QUARANTINE = 0x1978, 156 CHUNK_MEMALIGN = 0xDC68, 157}; 158 159struct ChunkBase { 160 uint16_t chunk_state; 161 uint8_t size_class; 162 uint32_t offset; // User-visible memory starts at this+offset (beg()). 163 int32_t alloc_tid; 164 int32_t free_tid; 165 size_t used_size; // Size requested by the user. 166 AsanChunk *next; 167 168 uintptr_t beg() { return (uintptr_t)this + offset; } 169 size_t Size() { return SizeClassToSize(size_class); } 170 uint8_t SizeClass() { return size_class; } 171}; 172 173struct AsanChunk: public ChunkBase { 174 uint32_t *compressed_alloc_stack() { 175 CHECK(REDZONE >= sizeof(ChunkBase)); 176 return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase)); 177 } 178 uint32_t *compressed_free_stack() { 179 CHECK(REDZONE >= sizeof(ChunkBase)); 180 return (uint32_t*)((uintptr_t)this + REDZONE); 181 } 182 183 // The left redzone after the ChunkBase is given to the alloc stack trace. 184 size_t compressed_alloc_stack_size() { 185 return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t); 186 } 187 size_t compressed_free_stack_size() { 188 return (REDZONE) / sizeof(uint32_t); 189 } 190 191 bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) { 192 if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) { 193 *offset = addr - beg(); 194 return true; 195 } 196 return false; 197 } 198 199 bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) { 200 if (addr < beg()) { 201 *offset = beg() - addr; 202 return true; 203 } 204 return false; 205 } 206 207 bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) { 208 if (addr + access_size >= beg() + used_size) { 209 if (addr <= beg() + used_size) 210 *offset = 0; 211 else 212 *offset = addr - (beg() + used_size); 213 return true; 214 } 215 return false; 216 } 217 218 void DescribeAddress(uintptr_t addr, size_t access_size) { 219 size_t offset; 220 Printf("%p is located ", addr); 221 if (AddrIsInside(addr, access_size, &offset)) { 222 Printf("%zu bytes inside of", offset); 223 } else if (AddrIsAtLeft(addr, access_size, &offset)) { 224 Printf("%zu bytes to the left of", offset); 225 } else if (AddrIsAtRight(addr, access_size, &offset)) { 226 Printf("%zu bytes to the right of", offset); 227 } else { 228 Printf(" somewhere around (this is AddressSanitizer bug!)"); 229 } 230 Printf(" %zu-byte region [%p,%p)\n", 231 used_size, beg(), beg() + used_size); 232 } 233}; 234 235static AsanChunk *PtrToChunk(uintptr_t ptr) { 236 AsanChunk *m = (AsanChunk*)(ptr - REDZONE); 237 if (m->chunk_state == CHUNK_MEMALIGN) { 238 m = m->next; 239 } 240 return m; 241} 242 243 244void AsanChunkFifoList::PushList(AsanChunkFifoList *q) { 245 CHECK(q->size() > 0); 246 if (last_) { 247 CHECK(first_); 248 CHECK(!last_->next); 249 last_->next = q->first_; 250 last_ = q->last_; 251 } else { 252 CHECK(!first_); 253 last_ = q->last_; 254 first_ = q->first_; 255 CHECK(first_); 256 } 257 CHECK(last_); 258 CHECK(!last_->next); 259 size_ += q->size(); 260 q->clear(); 261} 262 263void AsanChunkFifoList::Push(AsanChunk *n) { 264 CHECK(n->next == NULL); 265 if (last_) { 266 CHECK(first_); 267 CHECK(!last_->next); 268 last_->next = n; 269 last_ = n; 270 } else { 271 CHECK(!first_); 272 last_ = first_ = n; 273 } 274 size_ += n->Size(); 275} 276 277// Interesting performance observation: this function takes up to 15% of overal 278// allocator time. That's because *first_ has been evicted from cache long time 279// ago. Not sure if we can or want to do anything with this. 280AsanChunk *AsanChunkFifoList::Pop() { 281 CHECK(first_); 282 AsanChunk *res = first_; 283 first_ = first_->next; 284 if (first_ == NULL) 285 last_ = NULL; 286 CHECK(size_ >= res->Size()); 287 size_ -= res->Size(); 288 if (last_) { 289 CHECK(!last_->next); 290 } 291 return res; 292} 293 294// All pages we ever allocated. 295struct PageGroup { 296 uintptr_t beg; 297 uintptr_t end; 298 size_t size_of_chunk; 299 uintptr_t last_chunk; 300 bool InRange(uintptr_t addr) { 301 return addr >= beg && addr < end; 302 } 303}; 304 305class MallocInfo { 306 public: 307 308 explicit MallocInfo(LinkerInitialized x) : mu_(x) { } 309 310 AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) { 311 AsanChunk *m = NULL; 312 AsanChunk **fl = &free_lists_[size_class]; 313 { 314 ScopedLock lock(&mu_); 315 for (size_t i = 0; i < n_chunks; i++) { 316 if (!(*fl)) { 317 *fl = GetNewChunks(size_class); 318 } 319 AsanChunk *t = *fl; 320 *fl = t->next; 321 t->next = m; 322 CHECK(t->chunk_state == CHUNK_AVAILABLE); 323 m = t; 324 } 325 } 326 return m; 327 } 328 329 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x, 330 bool eat_free_lists) { 331 CHECK(FLAG_quarantine_size > 0); 332 ScopedLock lock(&mu_); 333 AsanChunkFifoList *q = &x->quarantine_; 334 if (q->size() > 0) { 335 quarantine_.PushList(q); 336 while (quarantine_.size() > FLAG_quarantine_size) { 337 QuarantinePop(); 338 } 339 } 340 if (eat_free_lists) { 341 for (size_t size_class = 0; size_class < kNumberOfSizeClasses; 342 size_class++) { 343 AsanChunk *m = x->free_lists_[size_class]; 344 while (m) { 345 AsanChunk *t = m->next; 346 m->next = free_lists_[size_class]; 347 free_lists_[size_class] = m; 348 m = t; 349 } 350 x->free_lists_[size_class] = 0; 351 } 352 } 353 } 354 355 void BypassThreadLocalQuarantine(AsanChunk *chunk) { 356 ScopedLock lock(&mu_); 357 quarantine_.Push(chunk); 358 } 359 360 AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) { 361 ScopedLock lock(&mu_); 362 return FindChunkByAddr(addr); 363 } 364 365 size_t AllocationSize(uintptr_t ptr) { 366 if (!ptr) return 0; 367 ScopedLock lock(&mu_); 368 369 // first, check if this is our memory 370 PageGroup *g = FindPageGroupUnlocked(ptr); 371 if (!g) return 0; 372 AsanChunk *m = PtrToChunk(ptr); 373 if (m->chunk_state == CHUNK_ALLOCATED) { 374 return m->used_size; 375 } else { 376 return 0; 377 } 378 } 379 380 void ForceLock() { 381 mu_.Lock(); 382 } 383 384 void ForceUnlock() { 385 mu_.Unlock(); 386 } 387 388 void PrintStatus() { 389 ScopedLock lock(&mu_); 390 size_t malloced = 0; 391 392 Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ", 393 quarantine_.size() >> 20, malloced >> 20); 394 for (size_t j = 1; j < kNumberOfSizeClasses; j++) { 395 AsanChunk *i = free_lists_[j]; 396 if (!i) continue; 397 size_t t = 0; 398 for (; i; i = i->next) { 399 t += i->Size(); 400 } 401 Printf("%zu:%zu ", j, t >> 20); 402 } 403 Printf("\n"); 404 } 405 406 PageGroup *FindPageGroup(uintptr_t addr) { 407 ScopedLock lock(&mu_); 408 return FindPageGroupUnlocked(addr); 409 } 410 411 private: 412 PageGroup *FindPageGroupUnlocked(uintptr_t addr) { 413 int n = n_page_groups_; 414 // If the page groups are not sorted yet, sort them. 415 if (n_sorted_page_groups_ < n) { 416 SortArray((uintptr_t*)page_groups_, n); 417 n_sorted_page_groups_ = n; 418 } 419 // Binary search over the page groups. 420 int beg = 0, end = n; 421 while (beg < end) { 422 int med = (beg + end) / 2; 423 uintptr_t g = (uintptr_t)page_groups_[med]; 424 if (addr > g) { 425 // 'g' points to the end of the group, so 'addr' 426 // may not belong to page_groups_[med] or any previous group. 427 beg = med + 1; 428 } else { 429 // 'addr' may belong to page_groups_[med] or a previous group. 430 end = med; 431 } 432 } 433 if (beg >= n) 434 return NULL; 435 PageGroup *g = page_groups_[beg]; 436 CHECK(g); 437 if (g->InRange(addr)) 438 return g; 439 return NULL; 440 } 441 442 // We have an address between two chunks, and we want to report just one. 443 AsanChunk *ChooseChunk(uintptr_t addr, 444 AsanChunk *left_chunk, AsanChunk *right_chunk) { 445 // Prefer an allocated chunk or a chunk from quarantine. 446 if (left_chunk->chunk_state == CHUNK_AVAILABLE && 447 right_chunk->chunk_state != CHUNK_AVAILABLE) 448 return right_chunk; 449 if (right_chunk->chunk_state == CHUNK_AVAILABLE && 450 left_chunk->chunk_state != CHUNK_AVAILABLE) 451 return left_chunk; 452 // Choose based on offset. 453 size_t l_offset = 0, r_offset = 0; 454 CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset)); 455 CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset)); 456 if (l_offset < r_offset) 457 return left_chunk; 458 return right_chunk; 459 } 460 461 AsanChunk *FindChunkByAddr(uintptr_t addr) { 462 PageGroup *g = FindPageGroupUnlocked(addr); 463 if (!g) return 0; 464 CHECK(g->size_of_chunk); 465 uintptr_t offset_from_beg = addr - g->beg; 466 uintptr_t this_chunk_addr = g->beg + 467 (offset_from_beg / g->size_of_chunk) * g->size_of_chunk; 468 CHECK(g->InRange(this_chunk_addr)); 469 AsanChunk *m = (AsanChunk*)this_chunk_addr; 470 CHECK(m->chunk_state == CHUNK_ALLOCATED || 471 m->chunk_state == CHUNK_AVAILABLE || 472 m->chunk_state == CHUNK_QUARANTINE); 473 size_t offset = 0; 474 if (m->AddrIsInside(addr, 1, &offset)) 475 return m; 476 477 if (m->AddrIsAtRight(addr, 1, &offset)) { 478 if (this_chunk_addr == g->last_chunk) // rightmost chunk 479 return m; 480 uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk; 481 CHECK(g->InRange(right_chunk_addr)); 482 return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr); 483 } else { 484 CHECK(m->AddrIsAtLeft(addr, 1, &offset)); 485 if (this_chunk_addr == g->beg) // leftmost chunk 486 return m; 487 uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk; 488 CHECK(g->InRange(left_chunk_addr)); 489 return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m); 490 } 491 } 492 493 void QuarantinePop() { 494 CHECK(quarantine_.size() > 0); 495 AsanChunk *m = quarantine_.Pop(); 496 CHECK(m); 497 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m); 498 499 CHECK(m->chunk_state == CHUNK_QUARANTINE); 500 m->chunk_state = CHUNK_AVAILABLE; 501 PoisonShadow((uintptr_t)m, m->Size(), kAsanHeapLeftRedzoneMagic); 502 CHECK(m->alloc_tid >= 0); 503 CHECK(m->free_tid >= 0); 504 505 size_t size_class = m->SizeClass(); 506 m->next = free_lists_[size_class]; 507 free_lists_[size_class] = m; 508 509 // Statistics. 510 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 511 thread_stats.real_frees++; 512 thread_stats.really_freed += m->used_size; 513 thread_stats.really_freed_redzones += m->Size() - m->used_size; 514 thread_stats.really_freed_by_size[m->SizeClass()]++; 515 } 516 517 // Get a list of newly allocated chunks. 518 AsanChunk *GetNewChunks(uint8_t size_class) { 519 size_t size = SizeClassToSize(size_class); 520 CHECK(IsPowerOfTwo(kMinMmapSize)); 521 CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0); 522 size_t mmap_size = Max(size, kMinMmapSize); 523 size_t n_chunks = mmap_size / size; 524 CHECK(n_chunks * size == mmap_size); 525 if (size < kPageSize) { 526 // Size is small, just poison the last chunk. 527 n_chunks--; 528 } else { 529 // Size is large, allocate an extra page at right and poison it. 530 mmap_size += kPageSize; 531 } 532 CHECK(n_chunks > 0); 533 uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size); 534 535 // Statistics. 536 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 537 thread_stats.mmaps++; 538 thread_stats.mmaped += mmap_size; 539 thread_stats.mmaped_by_size[size_class] += n_chunks; 540 541 AsanChunk *res = NULL; 542 for (size_t i = 0; i < n_chunks; i++) { 543 AsanChunk *m = (AsanChunk*)(mem + i * size); 544 m->chunk_state = CHUNK_AVAILABLE; 545 m->size_class = size_class; 546 m->next = res; 547 res = m; 548 } 549 PageGroup *pg = (PageGroup*)(mem + n_chunks * size); 550 // This memory is already poisoned, no need to poison it again. 551 pg->beg = (uintptr_t)mem; 552 pg->end = pg->beg + mmap_size; 553 pg->size_of_chunk = size; 554 pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1)); 555 int page_group_idx = AtomicInc(&n_page_groups_) - 1; 556 CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_)); 557 page_groups_[page_group_idx] = pg; 558 return res; 559 } 560 561 AsanChunk *free_lists_[kNumberOfSizeClasses]; 562 AsanChunkFifoList quarantine_; 563 AsanLock mu_; 564 565 PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize]; 566 int n_page_groups_; // atomic 567 int n_sorted_page_groups_; 568}; 569 570static MallocInfo malloc_info(LINKER_INITIALIZED); 571 572void AsanThreadLocalMallocStorage::CommitBack() { 573 malloc_info.SwallowThreadLocalMallocStorage(this, true); 574} 575 576static void Describe(uintptr_t addr, size_t access_size) { 577 AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size); 578 if (!m) return; 579 m->DescribeAddress(addr, access_size); 580 CHECK(m->alloc_tid >= 0); 581 AsanThreadSummary *alloc_thread = 582 asanThreadRegistry().FindByTid(m->alloc_tid); 583 AsanStackTrace alloc_stack; 584 AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(), 585 m->compressed_alloc_stack_size()); 586 AsanThread *t = asanThreadRegistry().GetCurrent(); 587 CHECK(t); 588 if (m->free_tid >= 0) { 589 AsanThreadSummary *free_thread = 590 asanThreadRegistry().FindByTid(m->free_tid); 591 Printf("freed by thread T%d here:\n", free_thread->tid()); 592 AsanStackTrace free_stack; 593 AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(), 594 m->compressed_free_stack_size()); 595 free_stack.PrintStack(); 596 Printf("previously allocated by thread T%d here:\n", 597 alloc_thread->tid()); 598 599 alloc_stack.PrintStack(); 600 t->summary()->Announce(); 601 free_thread->Announce(); 602 alloc_thread->Announce(); 603 } else { 604 Printf("allocated by thread T%d here:\n", alloc_thread->tid()); 605 alloc_stack.PrintStack(); 606 t->summary()->Announce(); 607 alloc_thread->Announce(); 608 } 609} 610 611static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) { 612 __asan_init(); 613 CHECK(stack); 614 if (size == 0) { 615 size = 1; // TODO(kcc): do something smarter 616 } 617 CHECK(IsPowerOfTwo(alignment)); 618 size_t rounded_size = RoundUpTo(size, REDZONE); 619 size_t needed_size = rounded_size + REDZONE; 620 if (alignment > REDZONE) { 621 needed_size += alignment; 622 } 623 CHECK(IsAligned(needed_size, REDZONE)); 624 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 625 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size); 626 return 0; 627 } 628 629 uint8_t size_class = SizeToSizeClass(needed_size); 630 size_t size_to_allocate = SizeClassToSize(size_class); 631 CHECK(size_to_allocate >= kMinAllocSize); 632 CHECK(size_to_allocate >= needed_size); 633 CHECK(IsAligned(size_to_allocate, REDZONE)); 634 635 if (FLAG_v >= 3) { 636 Printf("Allocate align: %zu size: %zu class: %u real: %zu\n", 637 alignment, size, size_class, size_to_allocate); 638 } 639 640 AsanThread *t = asanThreadRegistry().GetCurrent(); 641 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 642 // Statistics 643 thread_stats.mallocs++; 644 thread_stats.malloced += size; 645 thread_stats.malloced_redzones += size_to_allocate - size; 646 thread_stats.malloced_by_size[size_class]++; 647 648 AsanChunk *m = NULL; 649 if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) { 650 // get directly from global storage. 651 m = malloc_info.AllocateChunks(size_class, 1); 652 thread_stats.malloc_large++; 653 } else { 654 // get from the thread-local storage. 655 AsanChunk **fl = &t->malloc_storage().free_lists_[size_class]; 656 if (!*fl) { 657 size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate; 658 *fl = malloc_info.AllocateChunks(size_class, n_new_chunks); 659 thread_stats.malloc_small_slow++; 660 } 661 m = *fl; 662 *fl = (*fl)->next; 663 } 664 CHECK(m); 665 CHECK(m->chunk_state == CHUNK_AVAILABLE); 666 m->chunk_state = CHUNK_ALLOCATED; 667 m->next = NULL; 668 CHECK(m->Size() == size_to_allocate); 669 uintptr_t addr = (uintptr_t)m + REDZONE; 670 CHECK(addr == (uintptr_t)m->compressed_free_stack()); 671 672 if (alignment > REDZONE && (addr & (alignment - 1))) { 673 addr = RoundUpTo(addr, alignment); 674 CHECK((addr & (alignment - 1)) == 0); 675 AsanChunk *p = (AsanChunk*)(addr - REDZONE); 676 p->chunk_state = CHUNK_MEMALIGN; 677 p->next = m; 678 } 679 CHECK(m == PtrToChunk(addr)); 680 m->used_size = size; 681 m->offset = addr - (uintptr_t)m; 682 CHECK(m->beg() == addr); 683 m->alloc_tid = t ? t->tid() : 0; 684 m->free_tid = AsanThread::kInvalidTid; 685 AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(), 686 m->compressed_alloc_stack_size()); 687 PoisonShadow(addr, rounded_size, 0); 688 if (size < rounded_size) { 689 PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE, 690 size & (REDZONE - 1)); 691 } 692 if (size <= FLAG_max_malloc_fill_size) { 693 REAL(memset)((void*)addr, 0, rounded_size); 694 } 695 return (uint8_t*)addr; 696} 697 698static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) { 699 if (!ptr) return; 700 CHECK(stack); 701 702 if (FLAG_debug) { 703 CHECK(malloc_info.FindPageGroup((uintptr_t)ptr)); 704 } 705 706 // Printf("Deallocate %p\n", ptr); 707 AsanChunk *m = PtrToChunk((uintptr_t)ptr); 708 709 // Flip the state atomically to avoid race on double-free. 710 uint16_t old_chunk_state = AtomicExchange(&m->chunk_state, CHUNK_QUARANTINE); 711 712 if (old_chunk_state == CHUNK_QUARANTINE) { 713 Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr); 714 stack->PrintStack(); 715 Describe((uintptr_t)ptr, 1); 716 ShowStatsAndAbort(); 717 } else if (old_chunk_state != CHUNK_ALLOCATED) { 718 Report("ERROR: AddressSanitizer attempting free on address which was not" 719 " malloc()-ed: %p\n", ptr); 720 stack->PrintStack(); 721 ShowStatsAndAbort(); 722 } 723 CHECK(old_chunk_state == CHUNK_ALLOCATED); 724 CHECK(m->free_tid == AsanThread::kInvalidTid); 725 CHECK(m->alloc_tid >= 0); 726 AsanThread *t = asanThreadRegistry().GetCurrent(); 727 m->free_tid = t ? t->tid() : 0; 728 AsanStackTrace::CompressStack(stack, m->compressed_free_stack(), 729 m->compressed_free_stack_size()); 730 size_t rounded_size = RoundUpTo(m->used_size, REDZONE); 731 PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic); 732 733 // Statistics. 734 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 735 thread_stats.frees++; 736 thread_stats.freed += m->used_size; 737 thread_stats.freed_by_size[m->SizeClass()]++; 738 739 CHECK(m->chunk_state == CHUNK_QUARANTINE); 740 if (t) { 741 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 742 CHECK(!m->next); 743 ms->quarantine_.Push(m); 744 745 if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) { 746 malloc_info.SwallowThreadLocalMallocStorage(ms, false); 747 } 748 } else { 749 CHECK(!m->next); 750 malloc_info.BypassThreadLocalQuarantine(m); 751 } 752} 753 754static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size, 755 AsanStackTrace *stack) { 756 CHECK(old_ptr && new_size); 757 758 // Statistics. 759 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 760 thread_stats.reallocs++; 761 thread_stats.realloced += new_size; 762 763 AsanChunk *m = PtrToChunk((uintptr_t)old_ptr); 764 CHECK(m->chunk_state == CHUNK_ALLOCATED); 765 size_t old_size = m->used_size; 766 size_t memcpy_size = Min(new_size, old_size); 767 uint8_t *new_ptr = Allocate(0, new_size, stack); 768 if (new_ptr) { 769 CHECK(REAL(memcpy) != NULL); 770 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 771 Deallocate(old_ptr, stack); 772 } 773 return new_ptr; 774} 775 776} // namespace __asan 777 778// Malloc hooks declaration. 779// ASAN_NEW_HOOK(ptr, size) is called immediately after 780// allocation of "size" bytes, which returned "ptr". 781// ASAN_DELETE_HOOK(ptr) is called immediately before 782// deallocation of "ptr". 783// If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user 784// program must provide implementation of this hook. 785// If macro is undefined, the hook is no-op. 786#ifdef ASAN_NEW_HOOK 787extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size); 788#else 789static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { } 790#endif 791 792#ifdef ASAN_DELETE_HOOK 793extern "C" void ASAN_DELETE_HOOK(void *ptr); 794#else 795static inline void ASAN_DELETE_HOOK(void *ptr) { } 796#endif 797 798namespace __asan { 799 800void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) { 801 void *ptr = (void*)Allocate(alignment, size, stack); 802 ASAN_NEW_HOOK(ptr, size); 803 return ptr; 804} 805 806void asan_free(void *ptr, AsanStackTrace *stack) { 807 ASAN_DELETE_HOOK(ptr); 808 Deallocate((uint8_t*)ptr, stack); 809} 810 811void *asan_malloc(size_t size, AsanStackTrace *stack) { 812 void *ptr = (void*)Allocate(0, size, stack); 813 ASAN_NEW_HOOK(ptr, size); 814 return ptr; 815} 816 817void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) { 818 void *ptr = (void*)Allocate(0, nmemb * size, stack); 819 if (ptr) 820 REAL(memset)(ptr, 0, nmemb * size); 821 ASAN_NEW_HOOK(ptr, nmemb * size); 822 return ptr; 823} 824 825void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) { 826 if (p == NULL) { 827 void *ptr = (void*)Allocate(0, size, stack); 828 ASAN_NEW_HOOK(ptr, size); 829 return ptr; 830 } else if (size == 0) { 831 ASAN_DELETE_HOOK(p); 832 Deallocate((uint8_t*)p, stack); 833 return NULL; 834 } 835 return Reallocate((uint8_t*)p, size, stack); 836} 837 838void *asan_valloc(size_t size, AsanStackTrace *stack) { 839 void *ptr = (void*)Allocate(kPageSize, size, stack); 840 ASAN_NEW_HOOK(ptr, size); 841 return ptr; 842} 843 844void *asan_pvalloc(size_t size, AsanStackTrace *stack) { 845 size = RoundUpTo(size, kPageSize); 846 if (size == 0) { 847 // pvalloc(0) should allocate one page. 848 size = kPageSize; 849 } 850 void *ptr = (void*)Allocate(kPageSize, size, stack); 851 ASAN_NEW_HOOK(ptr, size); 852 return ptr; 853} 854 855int asan_posix_memalign(void **memptr, size_t alignment, size_t size, 856 AsanStackTrace *stack) { 857 void *ptr = Allocate(alignment, size, stack); 858 CHECK(IsAligned((uintptr_t)ptr, alignment)); 859 ASAN_NEW_HOOK(ptr, size); 860 *memptr = ptr; 861 return 0; 862} 863 864size_t asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) { 865 CHECK(stack); 866 if (ptr == NULL) return 0; 867 size_t usable_size = malloc_info.AllocationSize((uintptr_t)ptr); 868 if (FLAG_check_malloc_usable_size && (usable_size == 0)) { 869 Report("ERROR: AddressSanitizer attempting to call malloc_usable_size() " 870 "for pointer which is not owned: %p\n", ptr); 871 stack->PrintStack(); 872 Describe((uintptr_t)ptr, 1); 873 ShowStatsAndAbort(); 874 } 875 return usable_size; 876} 877 878size_t asan_mz_size(const void *ptr) { 879 return malloc_info.AllocationSize((uintptr_t)ptr); 880} 881 882void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) { 883 Describe(addr, access_size); 884} 885 886void asan_mz_force_lock() { 887 malloc_info.ForceLock(); 888} 889 890void asan_mz_force_unlock() { 891 malloc_info.ForceUnlock(); 892} 893 894// ---------------------- Fake stack-------------------- {{{1 895FakeStack::FakeStack() { 896 CHECK(REAL(memset) != NULL); 897 REAL(memset)(this, 0, sizeof(*this)); 898} 899 900bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) { 901 uintptr_t mem = allocated_size_classes_[size_class]; 902 uintptr_t size = ClassMmapSize(size_class); 903 bool res = mem && addr >= mem && addr < mem + size; 904 return res; 905} 906 907uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) { 908 for (size_t i = 0; i < kNumberOfSizeClasses; i++) { 909 if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i]; 910 } 911 return 0; 912} 913 914// We may want to compute this during compilation. 915inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) { 916 size_t rounded_size = RoundUpToPowerOfTwo(alloc_size); 917 size_t log = Log2(rounded_size); 918 CHECK(alloc_size <= (1UL << log)); 919 if (!(alloc_size > (1UL << (log-1)))) { 920 Printf("alloc_size %zu log %zu\n", alloc_size, log); 921 } 922 CHECK(alloc_size > (1UL << (log-1))); 923 size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog; 924 CHECK(res < kNumberOfSizeClasses); 925 CHECK(ClassSize(res) >= rounded_size); 926 return res; 927} 928 929void FakeFrameFifo::FifoPush(FakeFrame *node) { 930 CHECK(node); 931 node->next = 0; 932 if (first_ == 0 && last_ == 0) { 933 first_ = last_ = node; 934 } else { 935 CHECK(first_); 936 CHECK(last_); 937 last_->next = node; 938 last_ = node; 939 } 940} 941 942FakeFrame *FakeFrameFifo::FifoPop() { 943 CHECK(first_ && last_ && "Exhausted fake stack"); 944 FakeFrame *res = 0; 945 if (first_ == last_) { 946 res = first_; 947 first_ = last_ = 0; 948 } else { 949 res = first_; 950 first_ = first_->next; 951 } 952 return res; 953} 954 955void FakeStack::Init(size_t stack_size) { 956 stack_size_ = stack_size; 957 alive_ = true; 958} 959 960void FakeStack::Cleanup() { 961 alive_ = false; 962 for (size_t i = 0; i < kNumberOfSizeClasses; i++) { 963 uintptr_t mem = allocated_size_classes_[i]; 964 if (mem) { 965 PoisonShadow(mem, ClassMmapSize(i), 0); 966 allocated_size_classes_[i] = 0; 967 AsanUnmapOrDie((void*)mem, ClassMmapSize(i)); 968 } 969 } 970} 971 972size_t FakeStack::ClassMmapSize(size_t size_class) { 973 return RoundUpToPowerOfTwo(stack_size_); 974} 975 976void FakeStack::AllocateOneSizeClass(size_t size_class) { 977 CHECK(ClassMmapSize(size_class) >= kPageSize); 978 uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie( 979 ClassMmapSize(size_class), __FUNCTION__); 980 // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n", 981 // asanThreadRegistry().GetCurrent()->tid(), 982 // size_class, new_mem, new_mem + ClassMmapSize(size_class), 983 // ClassMmapSize(size_class)); 984 size_t i; 985 for (i = 0; i < ClassMmapSize(size_class); 986 i += ClassSize(size_class)) { 987 size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i)); 988 } 989 CHECK(i == ClassMmapSize(size_class)); 990 allocated_size_classes_[size_class] = new_mem; 991} 992 993uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) { 994 if (!alive_) return real_stack; 995 CHECK(size <= kMaxStackMallocSize && size > 1); 996 size_t size_class = ComputeSizeClass(size); 997 if (!allocated_size_classes_[size_class]) { 998 AllocateOneSizeClass(size_class); 999 } 1000 FakeFrame *fake_frame = size_classes_[size_class].FifoPop(); 1001 CHECK(fake_frame); 1002 fake_frame->size_minus_one = size - 1; 1003 fake_frame->real_stack = real_stack; 1004 while (FakeFrame *top = call_stack_.top()) { 1005 if (top->real_stack > real_stack) break; 1006 call_stack_.LifoPop(); 1007 DeallocateFrame(top); 1008 } 1009 call_stack_.LifoPush(fake_frame); 1010 uintptr_t ptr = (uintptr_t)fake_frame; 1011 PoisonShadow(ptr, size, 0); 1012 return ptr; 1013} 1014 1015void FakeStack::DeallocateFrame(FakeFrame *fake_frame) { 1016 CHECK(alive_); 1017 size_t size = fake_frame->size_minus_one + 1; 1018 size_t size_class = ComputeSizeClass(size); 1019 CHECK(allocated_size_classes_[size_class]); 1020 uintptr_t ptr = (uintptr_t)fake_frame; 1021 CHECK(AddrIsInSizeClass(ptr, size_class)); 1022 CHECK(AddrIsInSizeClass(ptr + size - 1, size_class)); 1023 size_classes_[size_class].FifoPush(fake_frame); 1024} 1025 1026void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) { 1027 FakeFrame *fake_frame = (FakeFrame*)ptr; 1028 CHECK(fake_frame->magic = kRetiredStackFrameMagic); 1029 CHECK(fake_frame->descr != 0); 1030 CHECK(fake_frame->size_minus_one == size - 1); 1031 PoisonShadow(ptr, size, kAsanStackAfterReturnMagic); 1032} 1033 1034} // namespace __asan 1035 1036// ---------------------- Interface ---------------- {{{1 1037using namespace __asan; // NOLINT 1038 1039size_t __asan_stack_malloc(size_t size, size_t real_stack) { 1040 if (!FLAG_use_fake_stack) return real_stack; 1041 AsanThread *t = asanThreadRegistry().GetCurrent(); 1042 if (!t) { 1043 // TSD is gone, use the real stack. 1044 return real_stack; 1045 } 1046 size_t ptr = t->fake_stack().AllocateStack(size, real_stack); 1047 // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack); 1048 return ptr; 1049} 1050 1051void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) { 1052 if (!FLAG_use_fake_stack) return; 1053 if (ptr != real_stack) { 1054 FakeStack::OnFree(ptr, size, real_stack); 1055 } 1056} 1057 1058// ASan allocator doesn't reserve extra bytes, so normally we would 1059// just return "size". 1060size_t __asan_get_estimated_allocated_size(size_t size) { 1061 if (size == 0) return 1; 1062 return Min(size, kMaxAllowedMallocSize); 1063} 1064 1065bool __asan_get_ownership(const void *p) { 1066 return malloc_info.AllocationSize((uintptr_t)p) > 0; 1067} 1068 1069size_t __asan_get_allocated_size(const void *p) { 1070 if (p == NULL) return 0; 1071 size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p); 1072 // Die if p is not malloced or if it is already freed. 1073 if (allocated_size == 0) { 1074 Report("ERROR: AddressSanitizer attempting to call " 1075 "__asan_get_allocated_size() for pointer which is " 1076 "not owned: %p\n", p); 1077 PRINT_CURRENT_STACK(); 1078 Describe((uintptr_t)p, 1); 1079 ShowStatsAndAbort(); 1080 } 1081 return allocated_size; 1082} 1083