asan_allocator2.cc revision 65199f1b253c4bfb225805629217acb8f0b1e185
1//===-- asan_allocator2.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Implementation of ASan's memory allocator, 2-nd version. 13// This variant uses the allocator from sanitizer_common, i.e. the one shared 14// with ThreadSanitizer and MemorySanitizer. 15// 16// Status: under development, not enabled by default yet. 17//===----------------------------------------------------------------------===// 18#include "asan_allocator.h" 19#if ASAN_ALLOCATOR_VERSION == 2 20 21#include "asan_mapping.h" 22#include "asan_report.h" 23#include "asan_thread.h" 24#include "asan_thread_registry.h" 25#include "sanitizer/asan_interface.h" 26#include "sanitizer_common/sanitizer_allocator.h" 27#include "sanitizer_common/sanitizer_internal_defs.h" 28#include "sanitizer_common/sanitizer_list.h" 29#include "sanitizer_common/sanitizer_stackdepot.h" 30#include "sanitizer_common/sanitizer_quarantine.h" 31 32namespace __asan { 33 34struct AsanMapUnmapCallback { 35 void OnMap(uptr p, uptr size) const { 36 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 37 // Statistics. 38 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 39 thread_stats.mmaps++; 40 thread_stats.mmaped += size; 41 } 42 void OnUnmap(uptr p, uptr size) const { 43 PoisonShadow(p, size, 0); 44 // We are about to unmap a chunk of user memory. 45 // Mark the corresponding shadow memory as not needed. 46 // Since asan's mapping is compacting, the shadow chunk may be 47 // not page-aligned, so we only flush the page-aligned portion. 48 uptr page_size = GetPageSizeCached(); 49 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); 50 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); 51 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 52 // Statistics. 53 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 54 thread_stats.munmaps++; 55 thread_stats.munmaped += size; 56 } 57}; 58 59#if SANITIZER_WORDSIZE == 64 60#if defined(__powerpc64__) 61const uptr kAllocatorSpace = 0xa0000000000ULL; 62#else 63const uptr kAllocatorSpace = 0x600000000000ULL; 64#endif 65const uptr kAllocatorSize = 0x10000000000ULL; // 1T. 66typedef DefaultSizeClassMap SizeClassMap; 67typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, 68 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 69#elif SANITIZER_WORDSIZE == 32 70static const u64 kAddressSpaceSize = 1ULL << 32; 71typedef CompactSizeClassMap SizeClassMap; 72typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, 73 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 74#endif 75 76typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 77typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; 78typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 79 SecondaryAllocator> Allocator; 80 81// We can not use THREADLOCAL because it is not supported on some of the 82// platforms we care about (OSX 10.6, Android). 83// static THREADLOCAL AllocatorCache cache; 84AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 85 CHECK(ms); 86 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); 87 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); 88} 89 90static Allocator allocator; 91 92static const uptr kMaxAllowedMallocSize = 93 FIRST_32_SECOND_64(3UL << 30, 8UL << 30); 94 95static const uptr kMaxThreadLocalQuarantine = 96 FIRST_32_SECOND_64(1 << 18, 1 << 20); 97 98static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected. 99 100// Every chunk of memory allocated by this allocator can be in one of 3 states: 101// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 102// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 103// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 104enum { 105 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. 106 CHUNK_ALLOCATED = 2, 107 CHUNK_QUARANTINE = 3 108}; 109 110// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 111// We use adaptive redzones: for larger allocation larger redzones are used. 112static u32 RZLog2Size(u32 rz_log) { 113 CHECK_LT(rz_log, 8); 114 return 16 << rz_log; 115} 116 117static u32 RZSize2Log(u32 rz_size) { 118 CHECK_GE(rz_size, 16); 119 CHECK_LE(rz_size, 2048); 120 CHECK(IsPowerOfTwo(rz_size)); 121 u32 res = __builtin_ctz(rz_size) - 4; 122 CHECK_EQ(rz_size, RZLog2Size(res)); 123 return res; 124} 125 126static uptr ComputeRZLog(uptr user_requested_size) { 127 u32 rz_log = 128 user_requested_size <= 64 - 16 ? 0 : 129 user_requested_size <= 128 - 32 ? 1 : 130 user_requested_size <= 512 - 64 ? 2 : 131 user_requested_size <= 4096 - 128 ? 3 : 132 user_requested_size <= (1 << 14) - 256 ? 4 : 133 user_requested_size <= (1 << 15) - 512 ? 5 : 134 user_requested_size <= (1 << 16) - 1024 ? 6 : 7; 135 return Max(rz_log, RZSize2Log(flags()->redzone)); 136} 137 138// The memory chunk allocated from the underlying allocator looks like this: 139// L L L L L L H H U U U U U U R R 140// L -- left redzone words (0 or more bytes) 141// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 142// U -- user memory. 143// R -- right redzone (0 or more bytes) 144// ChunkBase consists of ChunkHeader and other bytes that overlap with user 145// memory. 146 147// If a memory chunk is allocated by memalign and we had to increase the 148// allocation size to achieve the proper alignment, then we store this magic 149// value in the first uptr word of the memory block and store the address of 150// ChunkBase in the next uptr. 151// M B ? ? ? L L L L L L H H U U U U U U 152// M -- magic value kMemalignMagic 153// B -- address of ChunkHeader pointing to the first 'H' 154static const uptr kMemalignMagic = 0xCC6E96B9; 155 156struct ChunkHeader { 157 // 1-st 8 bytes. 158 u32 chunk_state : 8; // Must be first. 159 u32 alloc_tid : 24; 160 161 u32 free_tid : 24; 162 u32 from_memalign : 1; 163 u32 alloc_type : 2; 164 u32 rz_log : 3; 165 // 2-nd 8 bytes 166 // This field is used for small sizes. For large sizes it is equal to 167 // SizeClassMap::kMaxSize and the actual size is stored in the 168 // SecondaryAllocator's metadata. 169 u32 user_requested_size; 170 u32 alloc_context_id; 171}; 172 173struct ChunkBase : ChunkHeader { 174 // Header2, intersects with user memory. 175 AsanChunk *next; 176 u32 free_context_id; 177}; 178 179static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 180static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 181COMPILER_CHECK(kChunkHeaderSize == 16); 182COMPILER_CHECK(kChunkHeader2Size <= 16); 183 184struct AsanChunk: ChunkBase { 185 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 186 uptr UsedSize() { 187 if (user_requested_size != SizeClassMap::kMaxSize) 188 return user_requested_size; 189 return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg())); 190 } 191 void *AllocBeg() { 192 if (from_memalign) 193 return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); 194 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); 195 } 196 // We store the alloc/free stack traces in the chunk itself. 197 u32 *AllocStackBeg() { 198 return (u32*)(Beg() - RZLog2Size(rz_log)); 199 } 200 uptr AllocStackSize() { 201 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize); 202 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32); 203 } 204 u32 *FreeStackBeg() { 205 return (u32*)(Beg() + kChunkHeader2Size); 206 } 207 uptr FreeStackSize() { 208 if (user_requested_size < kChunkHeader2Size) return 0; 209 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); 210 return (available - kChunkHeader2Size) / sizeof(u32); 211 } 212}; 213 214uptr AsanChunkView::Beg() { return chunk_->Beg(); } 215uptr AsanChunkView::End() { return Beg() + UsedSize(); } 216uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 217uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 218uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 219 220static void GetStackTraceFromId(u32 id, StackTrace *stack) { 221 CHECK(id); 222 uptr size = 0; 223 const uptr *trace = StackDepotGet(id, &size); 224 CHECK_LT(size, kStackTraceMax); 225 internal_memcpy(stack->trace, trace, sizeof(uptr) * size); 226 stack->size = size; 227} 228 229void AsanChunkView::GetAllocStack(StackTrace *stack) { 230 if (flags()->use_stack_depot) 231 GetStackTraceFromId(chunk_->alloc_context_id, stack); 232 else 233 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), 234 chunk_->AllocStackSize()); 235} 236 237void AsanChunkView::GetFreeStack(StackTrace *stack) { 238 if (flags()->use_stack_depot) 239 GetStackTraceFromId(chunk_->free_context_id, stack); 240 else 241 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), 242 chunk_->FreeStackSize()); 243} 244 245struct QuarantineCallback; 246typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 247typedef AsanQuarantine::Cache QuarantineCache; 248static AsanQuarantine quarantine(LINKER_INITIALIZED); 249static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); 250static AllocatorCache fallback_allocator_cache; 251static SpinMutex fallback_mutex; 252 253QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 254 CHECK(ms); 255 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 256 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 257} 258 259struct QuarantineCallback { 260 explicit QuarantineCallback(AllocatorCache *cache) 261 : cache_(cache) { 262 } 263 264 void Recycle(AsanChunk *m) { 265 CHECK(m->chunk_state == CHUNK_QUARANTINE); 266 m->chunk_state = CHUNK_AVAILABLE; 267 CHECK_NE(m->alloc_tid, kInvalidTid); 268 CHECK_NE(m->free_tid, kInvalidTid); 269 PoisonShadow(m->Beg(), 270 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 271 kAsanHeapLeftRedzoneMagic); 272 void *p = reinterpret_cast<void *>(m->AllocBeg()); 273 if (m->from_memalign) { 274 uptr *memalign_magic = reinterpret_cast<uptr *>(p); 275 CHECK_EQ(memalign_magic[0], kMemalignMagic); 276 CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m)); 277 } 278 279 // Statistics. 280 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 281 thread_stats.real_frees++; 282 thread_stats.really_freed += m->UsedSize(); 283 284 allocator.Deallocate(cache_, p); 285 } 286 287 void *Allocate(uptr size) { 288 return allocator.Allocate(cache_, size, 1, false); 289 } 290 291 void Deallocate(void *p) { 292 allocator.Deallocate(cache_, p); 293 } 294 295 AllocatorCache *cache_; 296}; 297 298static void Init() { 299 static int inited = 0; 300 if (inited) return; 301 __asan_init(); 302 inited = true; // this must happen before any threads are created. 303 allocator.Init(); 304 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); 305} 306 307static void *Allocate(uptr size, uptr alignment, StackTrace *stack, 308 AllocType alloc_type) { 309 Init(); 310 CHECK(stack); 311 const uptr min_alignment = SHADOW_GRANULARITY; 312 if (alignment < min_alignment) 313 alignment = min_alignment; 314 if (size == 0) { 315 if (alignment <= kReturnOnZeroMalloc) 316 return reinterpret_cast<void *>(kReturnOnZeroMalloc); 317 else 318 return 0; // 0 bytes with large alignment requested. Just return 0. 319 } 320 CHECK(IsPowerOfTwo(alignment)); 321 uptr rz_log = ComputeRZLog(size); 322 uptr rz_size = RZLog2Size(rz_log); 323 uptr rounded_size = RoundUpTo(size, alignment); 324 if (rounded_size < kChunkHeader2Size) 325 rounded_size = kChunkHeader2Size; 326 uptr needed_size = rounded_size + rz_size; 327 if (alignment > min_alignment) 328 needed_size += alignment; 329 bool using_primary_allocator = true; 330 // If we are allocating from the secondary allocator, there will be no 331 // automatic right redzone, so add the right redzone manually. 332 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { 333 needed_size += rz_size; 334 using_primary_allocator = false; 335 } 336 CHECK(IsAligned(needed_size, min_alignment)); 337 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 338 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 339 (void*)size); 340 return 0; 341 } 342 343 AsanThread *t = asanThreadRegistry().GetCurrent(); 344 void *allocated; 345 if (t) { 346 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 347 allocated = allocator.Allocate(cache, needed_size, 8, false); 348 } else { 349 SpinMutexLock l(&fallback_mutex); 350 AllocatorCache *cache = &fallback_allocator_cache; 351 allocated = allocator.Allocate(cache, needed_size, 8, false); 352 } 353 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 354 // Clear the first allocated word (an old kMemalignMagic may still be there). 355 reinterpret_cast<uptr *>(alloc_beg)[0] = 0; 356 uptr alloc_end = alloc_beg + needed_size; 357 uptr beg_plus_redzone = alloc_beg + rz_size; 358 uptr user_beg = beg_plus_redzone; 359 if (!IsAligned(user_beg, alignment)) 360 user_beg = RoundUpTo(user_beg, alignment); 361 uptr user_end = user_beg + size; 362 CHECK_LE(user_end, alloc_end); 363 uptr chunk_beg = user_beg - kChunkHeaderSize; 364 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 365 m->chunk_state = CHUNK_ALLOCATED; 366 m->alloc_type = alloc_type; 367 m->rz_log = rz_log; 368 u32 alloc_tid = t ? t->tid() : 0; 369 m->alloc_tid = alloc_tid; 370 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 371 m->free_tid = kInvalidTid; 372 m->from_memalign = user_beg != beg_plus_redzone; 373 if (m->from_memalign) { 374 CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg); 375 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 376 memalign_magic[0] = kMemalignMagic; 377 memalign_magic[1] = chunk_beg; 378 } 379 if (using_primary_allocator) { 380 CHECK(size); 381 m->user_requested_size = size; 382 CHECK(allocator.FromPrimary(allocated)); 383 } else { 384 CHECK(!allocator.FromPrimary(allocated)); 385 m->user_requested_size = SizeClassMap::kMaxSize; 386 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); 387 meta[0] = size; 388 meta[1] = chunk_beg; 389 } 390 391 if (flags()->use_stack_depot) { 392 m->alloc_context_id = StackDepotPut(stack->trace, stack->size); 393 } else { 394 m->alloc_context_id = 0; 395 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); 396 } 397 398 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 399 // Unpoison the bulk of the memory region. 400 if (size_rounded_down_to_granularity) 401 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 402 // Deal with the end of the region if size is not aligned to granularity. 403 if (size != size_rounded_down_to_granularity && flags()->poison_heap) { 404 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 405 *shadow = size & (SHADOW_GRANULARITY - 1); 406 } 407 408 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 409 thread_stats.mallocs++; 410 thread_stats.malloced += size; 411 thread_stats.malloced_redzones += needed_size - size; 412 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); 413 thread_stats.malloced_by_size[class_id]++; 414 if (needed_size > SizeClassMap::kMaxSize) 415 thread_stats.malloc_large++; 416 417 void *res = reinterpret_cast<void *>(user_beg); 418 ASAN_MALLOC_HOOK(res, size); 419 return res; 420} 421 422static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { 423 uptr p = reinterpret_cast<uptr>(ptr); 424 if (p == 0 || p == kReturnOnZeroMalloc) return; 425 uptr chunk_beg = p - kChunkHeaderSize; 426 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 427 428 // Flip the chunk_state atomically to avoid race on double-free. 429 u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE, 430 memory_order_relaxed); 431 432 if (old_chunk_state == CHUNK_QUARANTINE) 433 ReportDoubleFree((uptr)ptr, stack); 434 else if (old_chunk_state != CHUNK_ALLOCATED) 435 ReportFreeNotMalloced((uptr)ptr, stack); 436 CHECK(old_chunk_state == CHUNK_ALLOCATED); 437 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) 438 ReportAllocTypeMismatch((uptr)ptr, stack, 439 (AllocType)m->alloc_type, (AllocType)alloc_type); 440 441 CHECK_GE(m->alloc_tid, 0); 442 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 443 CHECK_EQ(m->free_tid, kInvalidTid); 444 AsanThread *t = asanThreadRegistry().GetCurrent(); 445 m->free_tid = t ? t->tid() : 0; 446 if (flags()->use_stack_depot) { 447 m->free_context_id = StackDepotPut(stack->trace, stack->size); 448 } else { 449 m->free_context_id = 0; 450 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); 451 } 452 CHECK(m->chunk_state == CHUNK_QUARANTINE); 453 // Poison the region. 454 PoisonShadow(m->Beg(), 455 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 456 kAsanHeapFreeMagic); 457 458 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 459 thread_stats.frees++; 460 thread_stats.freed += m->UsedSize(); 461 462 // Push into quarantine. 463 if (t) { 464 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 465 AllocatorCache *ac = GetAllocatorCache(ms); 466 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), 467 m, m->UsedSize()); 468 } else { 469 SpinMutexLock l(&fallback_mutex); 470 AllocatorCache *ac = &fallback_allocator_cache; 471 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), 472 m, m->UsedSize()); 473 } 474 475 ASAN_FREE_HOOK(ptr); 476} 477 478static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 479 CHECK(old_ptr && new_size); 480 uptr p = reinterpret_cast<uptr>(old_ptr); 481 uptr chunk_beg = p - kChunkHeaderSize; 482 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 483 484 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 485 thread_stats.reallocs++; 486 thread_stats.realloced += new_size; 487 488 CHECK(m->chunk_state == CHUNK_ALLOCATED); 489 uptr old_size = m->UsedSize(); 490 uptr memcpy_size = Min(new_size, old_size); 491 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC); 492 if (new_ptr) { 493 CHECK(REAL(memcpy) != 0); 494 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 495 Deallocate(old_ptr, stack, FROM_MALLOC); 496 } 497 return new_ptr; 498} 499 500static AsanChunk *GetAsanChunkByAddr(uptr p) { 501 void *ptr = reinterpret_cast<void *>(p); 502 uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr)); 503 if (!alloc_beg) return 0; 504 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 505 if (memalign_magic[0] == kMemalignMagic) { 506 AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]); 507 CHECK(m->from_memalign); 508 return m; 509 } 510 if (!allocator.FromPrimary(ptr)) { 511 uptr *meta = reinterpret_cast<uptr *>( 512 allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg))); 513 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); 514 return m; 515 } 516 uptr actual_size = allocator.GetActuallyAllocatedSize(ptr); 517 CHECK_LE(actual_size, SizeClassMap::kMaxSize); 518 // We know the actually allocted size, but we don't know the redzone size. 519 // Just try all possible redzone sizes. 520 for (u32 rz_log = 0; rz_log < 8; rz_log++) { 521 u32 rz_size = RZLog2Size(rz_log); 522 uptr max_possible_size = actual_size - rz_size; 523 if (ComputeRZLog(max_possible_size) != rz_log) 524 continue; 525 return reinterpret_cast<AsanChunk *>( 526 alloc_beg + rz_size - kChunkHeaderSize); 527 } 528 return 0; 529} 530 531static uptr AllocationSize(uptr p) { 532 AsanChunk *m = GetAsanChunkByAddr(p); 533 if (!m) return 0; 534 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 535 if (m->Beg() != p) return 0; 536 return m->UsedSize(); 537} 538 539// We have an address between two chunks, and we want to report just one. 540AsanChunk *ChooseChunk(uptr addr, 541 AsanChunk *left_chunk, AsanChunk *right_chunk) { 542 // Prefer an allocated chunk over freed chunk and freed chunk 543 // over available chunk. 544 if (left_chunk->chunk_state != right_chunk->chunk_state) { 545 if (left_chunk->chunk_state == CHUNK_ALLOCATED) 546 return left_chunk; 547 if (right_chunk->chunk_state == CHUNK_ALLOCATED) 548 return right_chunk; 549 if (left_chunk->chunk_state == CHUNK_QUARANTINE) 550 return left_chunk; 551 if (right_chunk->chunk_state == CHUNK_QUARANTINE) 552 return right_chunk; 553 } 554 // Same chunk_state: choose based on offset. 555 uptr l_offset = 0, r_offset = 0; 556 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 557 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 558 if (l_offset < r_offset) 559 return left_chunk; 560 return right_chunk; 561} 562 563AsanChunkView FindHeapChunkByAddress(uptr addr) { 564 AsanChunk *m1 = GetAsanChunkByAddr(addr); 565 if (!m1) return AsanChunkView(m1); 566 uptr offset = 0; 567 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 568 // The address is in the chunk's left redzone, so maybe it is actually 569 // a right buffer overflow from the other chunk to the left. 570 // Search a bit to the left to see if there is another chunk. 571 AsanChunk *m2 = 0; 572 for (uptr l = 1; l < GetPageSizeCached(); l++) { 573 m2 = GetAsanChunkByAddr(addr - l); 574 if (m2 == m1) continue; // Still the same chunk. 575 break; 576 } 577 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 578 m1 = ChooseChunk(addr, m2, m1); 579 } 580 return AsanChunkView(m1); 581} 582 583void AsanThreadLocalMallocStorage::CommitBack() { 584 AllocatorCache *ac = GetAllocatorCache(this); 585 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); 586 allocator.SwallowCache(GetAllocatorCache(this)); 587} 588 589void PrintInternalAllocatorStats() { 590 allocator.PrintStats(); 591} 592 593SANITIZER_INTERFACE_ATTRIBUTE 594void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 595 AllocType alloc_type) { 596 return Allocate(size, alignment, stack, alloc_type); 597} 598 599SANITIZER_INTERFACE_ATTRIBUTE 600void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { 601 Deallocate(ptr, stack, alloc_type); 602} 603 604SANITIZER_INTERFACE_ATTRIBUTE 605void *asan_malloc(uptr size, StackTrace *stack) { 606 return Allocate(size, 8, stack, FROM_MALLOC); 607} 608 609void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 610 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; 611 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC); 612 if (ptr) 613 REAL(memset)(ptr, 0, nmemb * size); 614 return ptr; 615} 616 617void *asan_realloc(void *p, uptr size, StackTrace *stack) { 618 if (p == 0) 619 return Allocate(size, 8, stack, FROM_MALLOC); 620 if (size == 0) { 621 Deallocate(p, stack, FROM_MALLOC); 622 return 0; 623 } 624 return Reallocate(p, size, stack); 625} 626 627void *asan_valloc(uptr size, StackTrace *stack) { 628 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC); 629} 630 631void *asan_pvalloc(uptr size, StackTrace *stack) { 632 uptr PageSize = GetPageSizeCached(); 633 size = RoundUpTo(size, PageSize); 634 if (size == 0) { 635 // pvalloc(0) should allocate one page. 636 size = PageSize; 637 } 638 return Allocate(size, PageSize, stack, FROM_MALLOC); 639} 640 641int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 642 StackTrace *stack) { 643 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC); 644 CHECK(IsAligned((uptr)ptr, alignment)); 645 *memptr = ptr; 646 return 0; 647} 648 649uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { 650 CHECK(stack); 651 if (ptr == 0) return 0; 652 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 653 if (flags()->check_malloc_usable_size && (usable_size == 0)) 654 ReportMallocUsableSizeNotOwned((uptr)ptr, stack); 655 return usable_size; 656} 657 658uptr asan_mz_size(const void *ptr) { 659 UNIMPLEMENTED(); 660 return 0; 661} 662 663void asan_mz_force_lock() { 664 UNIMPLEMENTED(); 665} 666 667void asan_mz_force_unlock() { 668 UNIMPLEMENTED(); 669} 670 671} // namespace __asan 672 673// ---------------------- Interface ---------------- {{{1 674using namespace __asan; // NOLINT 675 676// ASan allocator doesn't reserve extra bytes, so normally we would 677// just return "size". We don't want to expose our redzone sizes, etc here. 678uptr __asan_get_estimated_allocated_size(uptr size) { 679 return size; 680} 681 682bool __asan_get_ownership(const void *p) { 683 uptr ptr = reinterpret_cast<uptr>(p); 684 return (ptr == kReturnOnZeroMalloc) || (AllocationSize(ptr) > 0); 685} 686 687uptr __asan_get_allocated_size(const void *p) { 688 if (p == 0) return 0; 689 uptr ptr = reinterpret_cast<uptr>(p); 690 uptr allocated_size = AllocationSize(ptr); 691 // Die if p is not malloced or if it is already freed. 692 if (allocated_size == 0 && ptr != kReturnOnZeroMalloc) { 693 GET_STACK_TRACE_FATAL_HERE; 694 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); 695 } 696 return allocated_size; 697} 698 699#if !SANITIZER_SUPPORTS_WEAK_HOOKS 700// Provide default (no-op) implementation of malloc hooks. 701extern "C" { 702SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 703void __asan_malloc_hook(void *ptr, uptr size) { 704 (void)ptr; 705 (void)size; 706} 707SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 708void __asan_free_hook(void *ptr) { 709 (void)ptr; 710} 711} // extern "C" 712#endif 713 714 715#endif // ASAN_ALLOCATOR_VERSION 716