asan_allocator2.cc revision 3f8ce73999c1ef82a8a835e9e87b0d4ce8c5810b
15c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)//===-- asan_allocator2.cc ------------------------------------------------===// 25c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// 35c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// The LLVM Compiler Infrastructure 45c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// 55c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// This file is distributed under the University of Illinois Open Source 65c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// License. See LICENSE.TXT for details. 75c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// 85c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)//===----------------------------------------------------------------------===// 95c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// 105c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// This file is a part of AddressSanitizer, an address sanity checker. 115c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// 125c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// Implementation of ASan's memory allocator, 2-nd version. 135c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// This variant uses the allocator from sanitizer_common, i.e. the one shared 145c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// with ThreadSanitizer and MemorySanitizer. 155c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)// 165c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)//===----------------------------------------------------------------------===// 175c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "asan_allocator.h" 185c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles) 195c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "asan_mapping.h" 205c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "asan_poisoning.h" 215c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "asan_report.h" 225c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "asan_thread.h" 235c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "sanitizer_common/sanitizer_allocator.h" 2402772c6a72f1ee0b226341a4f4439970c29fc861Ben Murdoch#include "sanitizer_common/sanitizer_flags.h" 255c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "sanitizer_common/sanitizer_internal_defs.h" 265c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "sanitizer_common/sanitizer_list.h" 2751b2906e11752df6c18351cf520e30522d3b53a1Torne (Richard Coles)#include "sanitizer_common/sanitizer_stackdepot.h" 285c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "sanitizer_common/sanitizer_quarantine.h" 295c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)#include "lsan/lsan_common.h" 305c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles) 315c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles)namespace __asan { 325c87bf8b86a7c82ef50fb7a89697d8e02e2553beTorne (Richard Coles) 33struct AsanMapUnmapCallback { 34 void OnMap(uptr p, uptr size) const { 35 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 36 // Statistics. 37 AsanStats &thread_stats = GetCurrentThreadStats(); 38 thread_stats.mmaps++; 39 thread_stats.mmaped += size; 40 } 41 void OnUnmap(uptr p, uptr size) const { 42 PoisonShadow(p, size, 0); 43 // We are about to unmap a chunk of user memory. 44 // Mark the corresponding shadow memory as not needed. 45 // Since asan's mapping is compacting, the shadow chunk may be 46 // not page-aligned, so we only flush the page-aligned portion. 47 uptr page_size = GetPageSizeCached(); 48 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); 49 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); 50 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 51 // Statistics. 52 AsanStats &thread_stats = GetCurrentThreadStats(); 53 thread_stats.munmaps++; 54 thread_stats.munmaped += size; 55 } 56}; 57 58#if SANITIZER_WORDSIZE == 64 59#if defined(__powerpc64__) 60const uptr kAllocatorSpace = 0xa0000000000ULL; 61const uptr kAllocatorSize = 0x20000000000ULL; // 2T. 62#else 63const uptr kAllocatorSpace = 0x600000000000ULL; 64const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 65#endif 66typedef DefaultSizeClassMap SizeClassMap; 67typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, 68 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 69#elif SANITIZER_WORDSIZE == 32 70static const u64 kAddressSpaceSize = 1ULL << 32; 71typedef CompactSizeClassMap SizeClassMap; 72static const uptr kRegionSizeLog = 20; 73static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog; 74typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, 75 SizeClassMap, kRegionSizeLog, 76 FlatByteMap<kFlatByteMapSize>, 77 AsanMapUnmapCallback> PrimaryAllocator; 78#endif 79 80typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 81typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; 82typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 83 SecondaryAllocator> Allocator; 84 85// We can not use THREADLOCAL because it is not supported on some of the 86// platforms we care about (OSX 10.6, Android). 87// static THREADLOCAL AllocatorCache cache; 88AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 89 CHECK(ms); 90 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); 91 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); 92} 93 94static Allocator allocator; 95 96static const uptr kMaxAllowedMallocSize = 97 FIRST_32_SECOND_64(3UL << 30, 64UL << 30); 98 99static const uptr kMaxThreadLocalQuarantine = 100 FIRST_32_SECOND_64(1 << 18, 1 << 20); 101 102// Every chunk of memory allocated by this allocator can be in one of 3 states: 103// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 104// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 105// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 106enum { 107 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. 108 CHUNK_ALLOCATED = 2, 109 CHUNK_QUARANTINE = 3 110}; 111 112// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 113// We use adaptive redzones: for larger allocation larger redzones are used. 114static u32 RZLog2Size(u32 rz_log) { 115 CHECK_LT(rz_log, 8); 116 return 16 << rz_log; 117} 118 119static u32 RZSize2Log(u32 rz_size) { 120 CHECK_GE(rz_size, 16); 121 CHECK_LE(rz_size, 2048); 122 CHECK(IsPowerOfTwo(rz_size)); 123 u32 res = Log2(rz_size) - 4; 124 CHECK_EQ(rz_size, RZLog2Size(res)); 125 return res; 126} 127 128static uptr ComputeRZLog(uptr user_requested_size) { 129 u32 rz_log = 130 user_requested_size <= 64 - 16 ? 0 : 131 user_requested_size <= 128 - 32 ? 1 : 132 user_requested_size <= 512 - 64 ? 2 : 133 user_requested_size <= 4096 - 128 ? 3 : 134 user_requested_size <= (1 << 14) - 256 ? 4 : 135 user_requested_size <= (1 << 15) - 512 ? 5 : 136 user_requested_size <= (1 << 16) - 1024 ? 6 : 7; 137 return Max(rz_log, RZSize2Log(flags()->redzone)); 138} 139 140// The memory chunk allocated from the underlying allocator looks like this: 141// L L L L L L H H U U U U U U R R 142// L -- left redzone words (0 or more bytes) 143// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 144// U -- user memory. 145// R -- right redzone (0 or more bytes) 146// ChunkBase consists of ChunkHeader and other bytes that overlap with user 147// memory. 148 149// If the left redzone is greater than the ChunkHeader size we store a magic 150// value in the first uptr word of the memory block and store the address of 151// ChunkBase in the next uptr. 152// M B L L L L L L L L L H H U U U U U U 153// | ^ 154// ---------------------| 155// M -- magic value kAllocBegMagic 156// B -- address of ChunkHeader pointing to the first 'H' 157static const uptr kAllocBegMagic = 0xCC6E96B9; 158 159struct ChunkHeader { 160 // 1-st 8 bytes. 161 u32 chunk_state : 8; // Must be first. 162 u32 alloc_tid : 24; 163 164 u32 free_tid : 24; 165 u32 from_memalign : 1; 166 u32 alloc_type : 2; 167 u32 rz_log : 3; 168 u32 lsan_tag : 2; 169 // 2-nd 8 bytes 170 // This field is used for small sizes. For large sizes it is equal to 171 // SizeClassMap::kMaxSize and the actual size is stored in the 172 // SecondaryAllocator's metadata. 173 u32 user_requested_size; 174 u32 alloc_context_id; 175}; 176 177struct ChunkBase : ChunkHeader { 178 // Header2, intersects with user memory. 179 u32 free_context_id; 180}; 181 182static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 183static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 184COMPILER_CHECK(kChunkHeaderSize == 16); 185COMPILER_CHECK(kChunkHeader2Size <= 16); 186 187struct AsanChunk: ChunkBase { 188 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 189 uptr UsedSize(bool locked_version = false) { 190 if (user_requested_size != SizeClassMap::kMaxSize) 191 return user_requested_size; 192 return *reinterpret_cast<uptr *>( 193 allocator.GetMetaData(AllocBeg(locked_version))); 194 } 195 void *AllocBeg(bool locked_version = false) { 196 if (from_memalign) { 197 if (locked_version) 198 return allocator.GetBlockBeginFastLocked( 199 reinterpret_cast<void *>(this)); 200 return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); 201 } 202 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); 203 } 204 // If we don't use stack depot, we store the alloc/free stack traces 205 // in the chunk itself. 206 u32 *AllocStackBeg() { 207 return (u32*)(Beg() - RZLog2Size(rz_log)); 208 } 209 uptr AllocStackSize() { 210 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize); 211 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32); 212 } 213 u32 *FreeStackBeg() { 214 return (u32*)(Beg() + kChunkHeader2Size); 215 } 216 uptr FreeStackSize() { 217 if (user_requested_size < kChunkHeader2Size) return 0; 218 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); 219 return (available - kChunkHeader2Size) / sizeof(u32); 220 } 221 bool AddrIsInside(uptr addr, bool locked_version = false) { 222 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); 223 } 224}; 225 226bool AsanChunkView::IsValid() { 227 return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE; 228} 229uptr AsanChunkView::Beg() { return chunk_->Beg(); } 230uptr AsanChunkView::End() { return Beg() + UsedSize(); } 231uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 232uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 233uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 234 235static void GetStackTraceFromId(u32 id, StackTrace *stack) { 236 CHECK(id); 237 uptr size = 0; 238 const uptr *trace = StackDepotGet(id, &size); 239 CHECK(trace); 240 stack->CopyFrom(trace, size); 241} 242 243void AsanChunkView::GetAllocStack(StackTrace *stack) { 244 if (flags()->use_stack_depot) 245 GetStackTraceFromId(chunk_->alloc_context_id, stack); 246 else 247 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), 248 chunk_->AllocStackSize()); 249} 250 251void AsanChunkView::GetFreeStack(StackTrace *stack) { 252 if (flags()->use_stack_depot) 253 GetStackTraceFromId(chunk_->free_context_id, stack); 254 else 255 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), 256 chunk_->FreeStackSize()); 257} 258 259struct QuarantineCallback; 260typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 261typedef AsanQuarantine::Cache QuarantineCache; 262static AsanQuarantine quarantine(LINKER_INITIALIZED); 263static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); 264static AllocatorCache fallback_allocator_cache; 265static SpinMutex fallback_mutex; 266 267QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 268 CHECK(ms); 269 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 270 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 271} 272 273struct QuarantineCallback { 274 explicit QuarantineCallback(AllocatorCache *cache) 275 : cache_(cache) { 276 } 277 278 void Recycle(AsanChunk *m) { 279 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); 280 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); 281 CHECK_NE(m->alloc_tid, kInvalidTid); 282 CHECK_NE(m->free_tid, kInvalidTid); 283 PoisonShadow(m->Beg(), 284 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 285 kAsanHeapLeftRedzoneMagic); 286 void *p = reinterpret_cast<void *>(m->AllocBeg()); 287 if (p != m) { 288 uptr *alloc_magic = reinterpret_cast<uptr *>(p); 289 CHECK_EQ(alloc_magic[0], kAllocBegMagic); 290 // Clear the magic value, as allocator internals may overwrite the 291 // contents of deallocated chunk, confusing GetAsanChunk lookup. 292 alloc_magic[0] = 0; 293 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m)); 294 } 295 296 // Statistics. 297 AsanStats &thread_stats = GetCurrentThreadStats(); 298 thread_stats.real_frees++; 299 thread_stats.really_freed += m->UsedSize(); 300 301 allocator.Deallocate(cache_, p); 302 } 303 304 void *Allocate(uptr size) { 305 return allocator.Allocate(cache_, size, 1, false); 306 } 307 308 void Deallocate(void *p) { 309 allocator.Deallocate(cache_, p); 310 } 311 312 AllocatorCache *cache_; 313}; 314 315void InitializeAllocator() { 316 allocator.Init(); 317 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); 318} 319 320static void *Allocate(uptr size, uptr alignment, StackTrace *stack, 321 AllocType alloc_type, bool can_fill) { 322 if (!asan_inited) 323 __asan_init(); 324 Flags &fl = *flags(); 325 CHECK(stack); 326 const uptr min_alignment = SHADOW_GRANULARITY; 327 if (alignment < min_alignment) 328 alignment = min_alignment; 329 if (size == 0) { 330 // We'd be happy to avoid allocating memory for zero-size requests, but 331 // some programs/tests depend on this behavior and assume that malloc would 332 // not return NULL even for zero-size allocations. Moreover, it looks like 333 // operator new should never return NULL, and results of consecutive "new" 334 // calls must be different even if the allocated size is zero. 335 size = 1; 336 } 337 CHECK(IsPowerOfTwo(alignment)); 338 uptr rz_log = ComputeRZLog(size); 339 uptr rz_size = RZLog2Size(rz_log); 340 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); 341 uptr needed_size = rounded_size + rz_size; 342 if (alignment > min_alignment) 343 needed_size += alignment; 344 bool using_primary_allocator = true; 345 // If we are allocating from the secondary allocator, there will be no 346 // automatic right redzone, so add the right redzone manually. 347 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { 348 needed_size += rz_size; 349 using_primary_allocator = false; 350 } 351 CHECK(IsAligned(needed_size, min_alignment)); 352 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 353 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 354 (void*)size); 355 return AllocatorReturnNull(); 356 } 357 358 AsanThread *t = GetCurrentThread(); 359 void *allocated; 360 if (t) { 361 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 362 allocated = allocator.Allocate(cache, needed_size, 8, false); 363 } else { 364 SpinMutexLock l(&fallback_mutex); 365 AllocatorCache *cache = &fallback_allocator_cache; 366 allocated = allocator.Allocate(cache, needed_size, 8, false); 367 } 368 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 369 uptr alloc_end = alloc_beg + needed_size; 370 uptr beg_plus_redzone = alloc_beg + rz_size; 371 uptr user_beg = beg_plus_redzone; 372 if (!IsAligned(user_beg, alignment)) 373 user_beg = RoundUpTo(user_beg, alignment); 374 uptr user_end = user_beg + size; 375 CHECK_LE(user_end, alloc_end); 376 uptr chunk_beg = user_beg - kChunkHeaderSize; 377 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 378 m->alloc_type = alloc_type; 379 m->rz_log = rz_log; 380 u32 alloc_tid = t ? t->tid() : 0; 381 m->alloc_tid = alloc_tid; 382 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 383 m->free_tid = kInvalidTid; 384 m->from_memalign = user_beg != beg_plus_redzone; 385 if (alloc_beg != chunk_beg) { 386 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); 387 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; 388 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; 389 } 390 if (using_primary_allocator) { 391 CHECK(size); 392 m->user_requested_size = size; 393 CHECK(allocator.FromPrimary(allocated)); 394 } else { 395 CHECK(!allocator.FromPrimary(allocated)); 396 m->user_requested_size = SizeClassMap::kMaxSize; 397 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); 398 meta[0] = size; 399 meta[1] = chunk_beg; 400 } 401 402 if (fl.use_stack_depot) { 403 m->alloc_context_id = StackDepotPut(stack->trace, stack->size); 404 } else { 405 m->alloc_context_id = 0; 406 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); 407 } 408 409 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 410 // Unpoison the bulk of the memory region. 411 if (size_rounded_down_to_granularity) 412 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 413 // Deal with the end of the region if size is not aligned to granularity. 414 if (size != size_rounded_down_to_granularity && fl.poison_heap) { 415 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 416 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; 417 } 418 419 AsanStats &thread_stats = GetCurrentThreadStats(); 420 thread_stats.mallocs++; 421 thread_stats.malloced += size; 422 thread_stats.malloced_redzones += needed_size - size; 423 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); 424 thread_stats.malloced_by_size[class_id]++; 425 if (needed_size > SizeClassMap::kMaxSize) 426 thread_stats.malloc_large++; 427 428 void *res = reinterpret_cast<void *>(user_beg); 429 if (can_fill && fl.max_malloc_fill_size) { 430 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); 431 REAL(memset)(res, fl.malloc_fill_byte, fill_size); 432 } 433#if CAN_SANITIZE_LEAKS 434 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored 435 : __lsan::kDirectlyLeaked; 436#endif 437 // Must be the last mutation of metadata in this function. 438 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); 439 ASAN_MALLOC_HOOK(res, size); 440 return res; 441} 442 443static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) { 444 if (chunk_state == CHUNK_QUARANTINE) 445 ReportDoubleFree((uptr)ptr, stack); 446 else 447 ReportFreeNotMalloced((uptr)ptr, stack); 448} 449 450static void AtomicallySetQuarantineFlag(AsanChunk *m, 451 void *ptr, StackTrace *stack) { 452 u8 old_chunk_state = CHUNK_ALLOCATED; 453 // Flip the chunk_state atomically to avoid race on double-free. 454 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, 455 CHUNK_QUARANTINE, memory_order_acquire)) 456 ReportInvalidFree(ptr, old_chunk_state, stack); 457 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); 458} 459 460// Expects the chunk to already be marked as quarantined by using 461// AtomicallySetQuarantineFlag. 462static void QuarantineChunk(AsanChunk *m, void *ptr, 463 StackTrace *stack, AllocType alloc_type) { 464 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); 465 466 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) 467 ReportAllocTypeMismatch((uptr)ptr, stack, 468 (AllocType)m->alloc_type, (AllocType)alloc_type); 469 470 CHECK_GE(m->alloc_tid, 0); 471 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 472 CHECK_EQ(m->free_tid, kInvalidTid); 473 AsanThread *t = GetCurrentThread(); 474 m->free_tid = t ? t->tid() : 0; 475 if (flags()->use_stack_depot) { 476 m->free_context_id = StackDepotPut(stack->trace, stack->size); 477 } else { 478 m->free_context_id = 0; 479 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); 480 } 481 // Poison the region. 482 PoisonShadow(m->Beg(), 483 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 484 kAsanHeapFreeMagic); 485 486 AsanStats &thread_stats = GetCurrentThreadStats(); 487 thread_stats.frees++; 488 thread_stats.freed += m->UsedSize(); 489 490 // Push into quarantine. 491 if (t) { 492 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 493 AllocatorCache *ac = GetAllocatorCache(ms); 494 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), 495 m, m->UsedSize()); 496 } else { 497 SpinMutexLock l(&fallback_mutex); 498 AllocatorCache *ac = &fallback_allocator_cache; 499 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), 500 m, m->UsedSize()); 501 } 502} 503 504static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { 505 uptr p = reinterpret_cast<uptr>(ptr); 506 if (p == 0) return; 507 508 uptr chunk_beg = p - kChunkHeaderSize; 509 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 510 ASAN_FREE_HOOK(ptr); 511 // Must mark the chunk as quarantined before any changes to its metadata. 512 AtomicallySetQuarantineFlag(m, ptr, stack); 513 QuarantineChunk(m, ptr, stack, alloc_type); 514} 515 516static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 517 CHECK(old_ptr && new_size); 518 uptr p = reinterpret_cast<uptr>(old_ptr); 519 uptr chunk_beg = p - kChunkHeaderSize; 520 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 521 522 AsanStats &thread_stats = GetCurrentThreadStats(); 523 thread_stats.reallocs++; 524 thread_stats.realloced += new_size; 525 526 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); 527 if (new_ptr) { 528 u8 chunk_state = m->chunk_state; 529 if (chunk_state != CHUNK_ALLOCATED) 530 ReportInvalidFree(old_ptr, chunk_state, stack); 531 CHECK_NE(REAL(memcpy), (void*)0); 532 uptr memcpy_size = Min(new_size, m->UsedSize()); 533 // If realloc() races with free(), we may start copying freed memory. 534 // However, we will report racy double-free later anyway. 535 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 536 Deallocate(old_ptr, stack, FROM_MALLOC); 537 } 538 return new_ptr; 539} 540 541// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). 542static AsanChunk *GetAsanChunk(void *alloc_beg) { 543 if (!alloc_beg) return 0; 544 if (!allocator.FromPrimary(alloc_beg)) { 545 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); 546 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); 547 return m; 548 } 549 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); 550 if (alloc_magic[0] == kAllocBegMagic) 551 return reinterpret_cast<AsanChunk *>(alloc_magic[1]); 552 return reinterpret_cast<AsanChunk *>(alloc_beg); 553} 554 555static AsanChunk *GetAsanChunkByAddr(uptr p) { 556 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); 557 return GetAsanChunk(alloc_beg); 558} 559 560// Allocator must be locked when this function is called. 561static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { 562 void *alloc_beg = 563 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); 564 return GetAsanChunk(alloc_beg); 565} 566 567static uptr AllocationSize(uptr p) { 568 AsanChunk *m = GetAsanChunkByAddr(p); 569 if (!m) return 0; 570 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 571 if (m->Beg() != p) return 0; 572 return m->UsedSize(); 573} 574 575// We have an address between two chunks, and we want to report just one. 576AsanChunk *ChooseChunk(uptr addr, 577 AsanChunk *left_chunk, AsanChunk *right_chunk) { 578 // Prefer an allocated chunk over freed chunk and freed chunk 579 // over available chunk. 580 if (left_chunk->chunk_state != right_chunk->chunk_state) { 581 if (left_chunk->chunk_state == CHUNK_ALLOCATED) 582 return left_chunk; 583 if (right_chunk->chunk_state == CHUNK_ALLOCATED) 584 return right_chunk; 585 if (left_chunk->chunk_state == CHUNK_QUARANTINE) 586 return left_chunk; 587 if (right_chunk->chunk_state == CHUNK_QUARANTINE) 588 return right_chunk; 589 } 590 // Same chunk_state: choose based on offset. 591 sptr l_offset = 0, r_offset = 0; 592 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 593 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 594 if (l_offset < r_offset) 595 return left_chunk; 596 return right_chunk; 597} 598 599AsanChunkView FindHeapChunkByAddress(uptr addr) { 600 AsanChunk *m1 = GetAsanChunkByAddr(addr); 601 if (!m1) return AsanChunkView(m1); 602 sptr offset = 0; 603 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 604 // The address is in the chunk's left redzone, so maybe it is actually 605 // a right buffer overflow from the other chunk to the left. 606 // Search a bit to the left to see if there is another chunk. 607 AsanChunk *m2 = 0; 608 for (uptr l = 1; l < GetPageSizeCached(); l++) { 609 m2 = GetAsanChunkByAddr(addr - l); 610 if (m2 == m1) continue; // Still the same chunk. 611 break; 612 } 613 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 614 m1 = ChooseChunk(addr, m2, m1); 615 } 616 return AsanChunkView(m1); 617} 618 619void AsanThreadLocalMallocStorage::CommitBack() { 620 AllocatorCache *ac = GetAllocatorCache(this); 621 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); 622 allocator.SwallowCache(GetAllocatorCache(this)); 623} 624 625void PrintInternalAllocatorStats() { 626 allocator.PrintStats(); 627} 628 629void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 630 AllocType alloc_type) { 631 return Allocate(size, alignment, stack, alloc_type, true); 632} 633 634void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { 635 Deallocate(ptr, stack, alloc_type); 636} 637 638void *asan_malloc(uptr size, StackTrace *stack) { 639 return Allocate(size, 8, stack, FROM_MALLOC, true); 640} 641 642void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 643 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) 644 return AllocatorReturnNull(); 645 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); 646 // If the memory comes from the secondary allocator no need to clear it 647 // as it comes directly from mmap. 648 if (ptr && allocator.FromPrimary(ptr)) 649 REAL(memset)(ptr, 0, nmemb * size); 650 return ptr; 651} 652 653void *asan_realloc(void *p, uptr size, StackTrace *stack) { 654 if (p == 0) 655 return Allocate(size, 8, stack, FROM_MALLOC, true); 656 if (size == 0) { 657 Deallocate(p, stack, FROM_MALLOC); 658 return 0; 659 } 660 return Reallocate(p, size, stack); 661} 662 663void *asan_valloc(uptr size, StackTrace *stack) { 664 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); 665} 666 667void *asan_pvalloc(uptr size, StackTrace *stack) { 668 uptr PageSize = GetPageSizeCached(); 669 size = RoundUpTo(size, PageSize); 670 if (size == 0) { 671 // pvalloc(0) should allocate one page. 672 size = PageSize; 673 } 674 return Allocate(size, PageSize, stack, FROM_MALLOC, true); 675} 676 677int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 678 StackTrace *stack) { 679 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); 680 CHECK(IsAligned((uptr)ptr, alignment)); 681 *memptr = ptr; 682 return 0; 683} 684 685uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { 686 CHECK(stack); 687 if (ptr == 0) return 0; 688 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 689 if (flags()->check_malloc_usable_size && (usable_size == 0)) 690 ReportMallocUsableSizeNotOwned((uptr)ptr, stack); 691 return usable_size; 692} 693 694uptr asan_mz_size(const void *ptr) { 695 return AllocationSize(reinterpret_cast<uptr>(ptr)); 696} 697 698void asan_mz_force_lock() { 699 allocator.ForceLock(); 700 fallback_mutex.Lock(); 701} 702 703void asan_mz_force_unlock() { 704 fallback_mutex.Unlock(); 705 allocator.ForceUnlock(); 706} 707 708} // namespace __asan 709 710// --- Implementation of LSan-specific functions --- {{{1 711namespace __lsan { 712void LockAllocator() { 713 __asan::allocator.ForceLock(); 714} 715 716void UnlockAllocator() { 717 __asan::allocator.ForceUnlock(); 718} 719 720void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 721 *begin = (uptr)&__asan::allocator; 722 *end = *begin + sizeof(__asan::allocator); 723} 724 725uptr PointsIntoChunk(void* p) { 726 uptr addr = reinterpret_cast<uptr>(p); 727 __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); 728 if (!m) return 0; 729 uptr chunk = m->Beg(); 730 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && 731 m->AddrIsInside(addr, /*locked_version=*/true)) 732 return chunk; 733 return 0; 734} 735 736uptr GetUserBegin(uptr chunk) { 737 __asan::AsanChunk *m = 738 __asan::GetAsanChunkByAddrFastLocked(chunk); 739 CHECK(m); 740 return m->Beg(); 741} 742 743LsanMetadata::LsanMetadata(uptr chunk) { 744 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); 745} 746 747bool LsanMetadata::allocated() const { 748 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 749 return m->chunk_state == __asan::CHUNK_ALLOCATED; 750} 751 752ChunkTag LsanMetadata::tag() const { 753 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 754 return static_cast<ChunkTag>(m->lsan_tag); 755} 756 757void LsanMetadata::set_tag(ChunkTag value) { 758 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 759 m->lsan_tag = value; 760} 761 762uptr LsanMetadata::requested_size() const { 763 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 764 return m->UsedSize(/*locked_version=*/true); 765} 766 767u32 LsanMetadata::stack_trace_id() const { 768 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 769 return m->alloc_context_id; 770} 771 772void ForEachChunk(ForEachChunkCallback callback, void *arg) { 773 __asan::allocator.ForEachChunk(callback, arg); 774} 775 776IgnoreObjectResult IgnoreObjectLocked(const void *p) { 777 uptr addr = reinterpret_cast<uptr>(p); 778 __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr); 779 if (!m) return kIgnoreObjectInvalid; 780 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { 781 if (m->lsan_tag == kIgnored) 782 return kIgnoreObjectAlreadyIgnored; 783 m->lsan_tag = __lsan::kIgnored; 784 return kIgnoreObjectSuccess; 785 } else { 786 return kIgnoreObjectInvalid; 787 } 788} 789} // namespace __lsan 790 791// ---------------------- Interface ---------------- {{{1 792using namespace __asan; // NOLINT 793 794// ASan allocator doesn't reserve extra bytes, so normally we would 795// just return "size". We don't want to expose our redzone sizes, etc here. 796uptr __asan_get_estimated_allocated_size(uptr size) { 797 return size; 798} 799 800bool __asan_get_ownership(const void *p) { 801 uptr ptr = reinterpret_cast<uptr>(p); 802 return (AllocationSize(ptr) > 0); 803} 804 805uptr __asan_get_allocated_size(const void *p) { 806 if (p == 0) return 0; 807 uptr ptr = reinterpret_cast<uptr>(p); 808 uptr allocated_size = AllocationSize(ptr); 809 // Die if p is not malloced or if it is already freed. 810 if (allocated_size == 0) { 811 GET_STACK_TRACE_FATAL_HERE; 812 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); 813 } 814 return allocated_size; 815} 816 817#if !SANITIZER_SUPPORTS_WEAK_HOOKS 818// Provide default (no-op) implementation of malloc hooks. 819extern "C" { 820SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 821void __asan_malloc_hook(void *ptr, uptr size) { 822 (void)ptr; 823 (void)size; 824} 825SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 826void __asan_free_hook(void *ptr) { 827 (void)ptr; 828} 829} // extern "C" 830#endif 831