asan_allocator2.cc revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
1//===-- asan_allocator2.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Implementation of ASan's memory allocator, 2-nd version. 13// This variant uses the allocator from sanitizer_common, i.e. the one shared 14// with ThreadSanitizer and MemorySanitizer. 15// 16//===----------------------------------------------------------------------===// 17#include "asan_allocator.h" 18 19#include "asan_mapping.h" 20#include "asan_poisoning.h" 21#include "asan_report.h" 22#include "asan_stack.h" 23#include "asan_thread.h" 24#include "sanitizer_common/sanitizer_flags.h" 25#include "sanitizer_common/sanitizer_internal_defs.h" 26#include "sanitizer_common/sanitizer_list.h" 27#include "sanitizer_common/sanitizer_stackdepot.h" 28#include "sanitizer_common/sanitizer_quarantine.h" 29#include "lsan/lsan_common.h" 30 31namespace __asan { 32 33void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { 34 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 35 // Statistics. 36 AsanStats &thread_stats = GetCurrentThreadStats(); 37 thread_stats.mmaps++; 38 thread_stats.mmaped += size; 39} 40void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { 41 PoisonShadow(p, size, 0); 42 // We are about to unmap a chunk of user memory. 43 // Mark the corresponding shadow memory as not needed. 44 FlushUnneededASanShadowMemory(p, size); 45 // Statistics. 46 AsanStats &thread_stats = GetCurrentThreadStats(); 47 thread_stats.munmaps++; 48 thread_stats.munmaped += size; 49} 50 51// We can not use THREADLOCAL because it is not supported on some of the 52// platforms we care about (OSX 10.6, Android). 53// static THREADLOCAL AllocatorCache cache; 54AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 55 CHECK(ms); 56 return &ms->allocator2_cache; 57} 58 59static Allocator allocator; 60 61static const uptr kMaxAllowedMallocSize = 62 FIRST_32_SECOND_64(3UL << 30, 64UL << 30); 63 64static const uptr kMaxThreadLocalQuarantine = 65 FIRST_32_SECOND_64(1 << 18, 1 << 20); 66 67// Every chunk of memory allocated by this allocator can be in one of 3 states: 68// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 69// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 70// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 71enum { 72 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. 73 CHUNK_ALLOCATED = 2, 74 CHUNK_QUARANTINE = 3 75}; 76 77// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 78// We use adaptive redzones: for larger allocation larger redzones are used. 79static u32 RZLog2Size(u32 rz_log) { 80 CHECK_LT(rz_log, 8); 81 return 16 << rz_log; 82} 83 84static u32 RZSize2Log(u32 rz_size) { 85 CHECK_GE(rz_size, 16); 86 CHECK_LE(rz_size, 2048); 87 CHECK(IsPowerOfTwo(rz_size)); 88 u32 res = Log2(rz_size) - 4; 89 CHECK_EQ(rz_size, RZLog2Size(res)); 90 return res; 91} 92 93static uptr ComputeRZLog(uptr user_requested_size) { 94 u32 rz_log = 95 user_requested_size <= 64 - 16 ? 0 : 96 user_requested_size <= 128 - 32 ? 1 : 97 user_requested_size <= 512 - 64 ? 2 : 98 user_requested_size <= 4096 - 128 ? 3 : 99 user_requested_size <= (1 << 14) - 256 ? 4 : 100 user_requested_size <= (1 << 15) - 512 ? 5 : 101 user_requested_size <= (1 << 16) - 1024 ? 6 : 7; 102 return Min(Max(rz_log, RZSize2Log(flags()->redzone)), 103 RZSize2Log(flags()->max_redzone)); 104} 105 106// The memory chunk allocated from the underlying allocator looks like this: 107// L L L L L L H H U U U U U U R R 108// L -- left redzone words (0 or more bytes) 109// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 110// U -- user memory. 111// R -- right redzone (0 or more bytes) 112// ChunkBase consists of ChunkHeader and other bytes that overlap with user 113// memory. 114 115// If the left redzone is greater than the ChunkHeader size we store a magic 116// value in the first uptr word of the memory block and store the address of 117// ChunkBase in the next uptr. 118// M B L L L L L L L L L H H U U U U U U 119// | ^ 120// ---------------------| 121// M -- magic value kAllocBegMagic 122// B -- address of ChunkHeader pointing to the first 'H' 123static const uptr kAllocBegMagic = 0xCC6E96B9; 124 125struct ChunkHeader { 126 // 1-st 8 bytes. 127 u32 chunk_state : 8; // Must be first. 128 u32 alloc_tid : 24; 129 130 u32 free_tid : 24; 131 u32 from_memalign : 1; 132 u32 alloc_type : 2; 133 u32 rz_log : 3; 134 u32 lsan_tag : 2; 135 // 2-nd 8 bytes 136 // This field is used for small sizes. For large sizes it is equal to 137 // SizeClassMap::kMaxSize and the actual size is stored in the 138 // SecondaryAllocator's metadata. 139 u32 user_requested_size; 140 u32 alloc_context_id; 141}; 142 143struct ChunkBase : ChunkHeader { 144 // Header2, intersects with user memory. 145 u32 free_context_id; 146}; 147 148static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 149static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 150COMPILER_CHECK(kChunkHeaderSize == 16); 151COMPILER_CHECK(kChunkHeader2Size <= 16); 152 153struct AsanChunk: ChunkBase { 154 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 155 uptr UsedSize(bool locked_version = false) { 156 if (user_requested_size != SizeClassMap::kMaxSize) 157 return user_requested_size; 158 return *reinterpret_cast<uptr *>( 159 allocator.GetMetaData(AllocBeg(locked_version))); 160 } 161 void *AllocBeg(bool locked_version = false) { 162 if (from_memalign) { 163 if (locked_version) 164 return allocator.GetBlockBeginFastLocked( 165 reinterpret_cast<void *>(this)); 166 return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); 167 } 168 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); 169 } 170 // If we don't use stack depot, we store the alloc/free stack traces 171 // in the chunk itself. 172 u32 *AllocStackBeg() { 173 return (u32*)(Beg() - RZLog2Size(rz_log)); 174 } 175 uptr AllocStackSize() { 176 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize); 177 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32); 178 } 179 u32 *FreeStackBeg() { 180 return (u32*)(Beg() + kChunkHeader2Size); 181 } 182 uptr FreeStackSize() { 183 if (user_requested_size < kChunkHeader2Size) return 0; 184 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); 185 return (available - kChunkHeader2Size) / sizeof(u32); 186 } 187 bool AddrIsInside(uptr addr, bool locked_version = false) { 188 return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); 189 } 190}; 191 192bool AsanChunkView::IsValid() { 193 return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE; 194} 195uptr AsanChunkView::Beg() { return chunk_->Beg(); } 196uptr AsanChunkView::End() { return Beg() + UsedSize(); } 197uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 198uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 199uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 200 201static void GetStackTraceFromId(u32 id, StackTrace *stack) { 202 CHECK(id); 203 uptr size = 0; 204 const uptr *trace = StackDepotGet(id, &size); 205 CHECK(trace); 206 stack->CopyFrom(trace, size); 207} 208 209void AsanChunkView::GetAllocStack(StackTrace *stack) { 210 GetStackTraceFromId(chunk_->alloc_context_id, stack); 211} 212 213void AsanChunkView::GetFreeStack(StackTrace *stack) { 214 GetStackTraceFromId(chunk_->free_context_id, stack); 215} 216 217struct QuarantineCallback; 218typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 219typedef AsanQuarantine::Cache QuarantineCache; 220static AsanQuarantine quarantine(LINKER_INITIALIZED); 221static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); 222static AllocatorCache fallback_allocator_cache; 223static SpinMutex fallback_mutex; 224 225QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 226 CHECK(ms); 227 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 228 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 229} 230 231struct QuarantineCallback { 232 explicit QuarantineCallback(AllocatorCache *cache) 233 : cache_(cache) { 234 } 235 236 void Recycle(AsanChunk *m) { 237 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); 238 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); 239 CHECK_NE(m->alloc_tid, kInvalidTid); 240 CHECK_NE(m->free_tid, kInvalidTid); 241 PoisonShadow(m->Beg(), 242 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 243 kAsanHeapLeftRedzoneMagic); 244 void *p = reinterpret_cast<void *>(m->AllocBeg()); 245 if (p != m) { 246 uptr *alloc_magic = reinterpret_cast<uptr *>(p); 247 CHECK_EQ(alloc_magic[0], kAllocBegMagic); 248 // Clear the magic value, as allocator internals may overwrite the 249 // contents of deallocated chunk, confusing GetAsanChunk lookup. 250 alloc_magic[0] = 0; 251 CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m)); 252 } 253 254 // Statistics. 255 AsanStats &thread_stats = GetCurrentThreadStats(); 256 thread_stats.real_frees++; 257 thread_stats.really_freed += m->UsedSize(); 258 259 allocator.Deallocate(cache_, p); 260 } 261 262 void *Allocate(uptr size) { 263 return allocator.Allocate(cache_, size, 1, false); 264 } 265 266 void Deallocate(void *p) { 267 allocator.Deallocate(cache_, p); 268 } 269 270 AllocatorCache *cache_; 271}; 272 273void InitializeAllocator() { 274 allocator.Init(); 275 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); 276} 277 278void ReInitializeAllocator() { 279 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); 280} 281 282static void *Allocate(uptr size, uptr alignment, StackTrace *stack, 283 AllocType alloc_type, bool can_fill) { 284 if (UNLIKELY(!asan_inited)) 285 AsanInitFromRtl(); 286 Flags &fl = *flags(); 287 CHECK(stack); 288 const uptr min_alignment = SHADOW_GRANULARITY; 289 if (alignment < min_alignment) 290 alignment = min_alignment; 291 if (size == 0) { 292 // We'd be happy to avoid allocating memory for zero-size requests, but 293 // some programs/tests depend on this behavior and assume that malloc would 294 // not return NULL even for zero-size allocations. Moreover, it looks like 295 // operator new should never return NULL, and results of consecutive "new" 296 // calls must be different even if the allocated size is zero. 297 size = 1; 298 } 299 CHECK(IsPowerOfTwo(alignment)); 300 uptr rz_log = ComputeRZLog(size); 301 uptr rz_size = RZLog2Size(rz_log); 302 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); 303 uptr needed_size = rounded_size + rz_size; 304 if (alignment > min_alignment) 305 needed_size += alignment; 306 bool using_primary_allocator = true; 307 // If we are allocating from the secondary allocator, there will be no 308 // automatic right redzone, so add the right redzone manually. 309 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { 310 needed_size += rz_size; 311 using_primary_allocator = false; 312 } 313 CHECK(IsAligned(needed_size, min_alignment)); 314 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 315 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 316 (void*)size); 317 return AllocatorReturnNull(); 318 } 319 320 AsanThread *t = GetCurrentThread(); 321 void *allocated; 322 if (t) { 323 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 324 allocated = allocator.Allocate(cache, needed_size, 8, false); 325 } else { 326 SpinMutexLock l(&fallback_mutex); 327 AllocatorCache *cache = &fallback_allocator_cache; 328 allocated = allocator.Allocate(cache, needed_size, 8, false); 329 } 330 331 if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) { 332 // Heap poisoning is enabled, but the allocator provides an unpoisoned 333 // chunk. This is possible if flags()->poison_heap was disabled for some 334 // time, for example, due to flags()->start_disabled. 335 // Anyway, poison the block before using it for anything else. 336 uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); 337 PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); 338 } 339 340 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 341 uptr alloc_end = alloc_beg + needed_size; 342 uptr beg_plus_redzone = alloc_beg + rz_size; 343 uptr user_beg = beg_plus_redzone; 344 if (!IsAligned(user_beg, alignment)) 345 user_beg = RoundUpTo(user_beg, alignment); 346 uptr user_end = user_beg + size; 347 CHECK_LE(user_end, alloc_end); 348 uptr chunk_beg = user_beg - kChunkHeaderSize; 349 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 350 m->alloc_type = alloc_type; 351 m->rz_log = rz_log; 352 u32 alloc_tid = t ? t->tid() : 0; 353 m->alloc_tid = alloc_tid; 354 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 355 m->free_tid = kInvalidTid; 356 m->from_memalign = user_beg != beg_plus_redzone; 357 if (alloc_beg != chunk_beg) { 358 CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); 359 reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; 360 reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; 361 } 362 if (using_primary_allocator) { 363 CHECK(size); 364 m->user_requested_size = size; 365 CHECK(allocator.FromPrimary(allocated)); 366 } else { 367 CHECK(!allocator.FromPrimary(allocated)); 368 m->user_requested_size = SizeClassMap::kMaxSize; 369 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); 370 meta[0] = size; 371 meta[1] = chunk_beg; 372 } 373 374 m->alloc_context_id = StackDepotPut(stack->trace, stack->size); 375 376 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 377 // Unpoison the bulk of the memory region. 378 if (size_rounded_down_to_granularity) 379 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 380 // Deal with the end of the region if size is not aligned to granularity. 381 if (size != size_rounded_down_to_granularity && fl.poison_heap) { 382 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 383 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; 384 } 385 386 AsanStats &thread_stats = GetCurrentThreadStats(); 387 thread_stats.mallocs++; 388 thread_stats.malloced += size; 389 thread_stats.malloced_redzones += needed_size - size; 390 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); 391 thread_stats.malloced_by_size[class_id]++; 392 if (needed_size > SizeClassMap::kMaxSize) 393 thread_stats.malloc_large++; 394 395 void *res = reinterpret_cast<void *>(user_beg); 396 if (can_fill && fl.max_malloc_fill_size) { 397 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); 398 REAL(memset)(res, fl.malloc_fill_byte, fill_size); 399 } 400#if CAN_SANITIZE_LEAKS 401 m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored 402 : __lsan::kDirectlyLeaked; 403#endif 404 // Must be the last mutation of metadata in this function. 405 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); 406 ASAN_MALLOC_HOOK(res, size); 407 return res; 408} 409 410static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) { 411 if (chunk_state == CHUNK_QUARANTINE) 412 ReportDoubleFree((uptr)ptr, stack); 413 else 414 ReportFreeNotMalloced((uptr)ptr, stack); 415} 416 417static void AtomicallySetQuarantineFlag(AsanChunk *m, 418 void *ptr, StackTrace *stack) { 419 u8 old_chunk_state = CHUNK_ALLOCATED; 420 // Flip the chunk_state atomically to avoid race on double-free. 421 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, 422 CHUNK_QUARANTINE, memory_order_acquire)) 423 ReportInvalidFree(ptr, old_chunk_state, stack); 424 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); 425} 426 427// Expects the chunk to already be marked as quarantined by using 428// AtomicallySetQuarantineFlag. 429static void QuarantineChunk(AsanChunk *m, void *ptr, 430 StackTrace *stack, AllocType alloc_type) { 431 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); 432 433 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) 434 ReportAllocTypeMismatch((uptr)ptr, stack, 435 (AllocType)m->alloc_type, (AllocType)alloc_type); 436 437 CHECK_GE(m->alloc_tid, 0); 438 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 439 CHECK_EQ(m->free_tid, kInvalidTid); 440 AsanThread *t = GetCurrentThread(); 441 m->free_tid = t ? t->tid() : 0; 442 m->free_context_id = StackDepotPut(stack->trace, stack->size); 443 // Poison the region. 444 PoisonShadow(m->Beg(), 445 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 446 kAsanHeapFreeMagic); 447 448 AsanStats &thread_stats = GetCurrentThreadStats(); 449 thread_stats.frees++; 450 thread_stats.freed += m->UsedSize(); 451 452 // Push into quarantine. 453 if (t) { 454 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 455 AllocatorCache *ac = GetAllocatorCache(ms); 456 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), 457 m, m->UsedSize()); 458 } else { 459 SpinMutexLock l(&fallback_mutex); 460 AllocatorCache *ac = &fallback_allocator_cache; 461 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), 462 m, m->UsedSize()); 463 } 464} 465 466static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { 467 uptr p = reinterpret_cast<uptr>(ptr); 468 if (p == 0) return; 469 470 uptr chunk_beg = p - kChunkHeaderSize; 471 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 472 ASAN_FREE_HOOK(ptr); 473 // Must mark the chunk as quarantined before any changes to its metadata. 474 AtomicallySetQuarantineFlag(m, ptr, stack); 475 QuarantineChunk(m, ptr, stack, alloc_type); 476} 477 478static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 479 CHECK(old_ptr && new_size); 480 uptr p = reinterpret_cast<uptr>(old_ptr); 481 uptr chunk_beg = p - kChunkHeaderSize; 482 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 483 484 AsanStats &thread_stats = GetCurrentThreadStats(); 485 thread_stats.reallocs++; 486 thread_stats.realloced += new_size; 487 488 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); 489 if (new_ptr) { 490 u8 chunk_state = m->chunk_state; 491 if (chunk_state != CHUNK_ALLOCATED) 492 ReportInvalidFree(old_ptr, chunk_state, stack); 493 CHECK_NE(REAL(memcpy), (void*)0); 494 uptr memcpy_size = Min(new_size, m->UsedSize()); 495 // If realloc() races with free(), we may start copying freed memory. 496 // However, we will report racy double-free later anyway. 497 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 498 Deallocate(old_ptr, stack, FROM_MALLOC); 499 } 500 return new_ptr; 501} 502 503// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). 504static AsanChunk *GetAsanChunk(void *alloc_beg) { 505 if (!alloc_beg) return 0; 506 if (!allocator.FromPrimary(alloc_beg)) { 507 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); 508 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); 509 return m; 510 } 511 uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); 512 if (alloc_magic[0] == kAllocBegMagic) 513 return reinterpret_cast<AsanChunk *>(alloc_magic[1]); 514 return reinterpret_cast<AsanChunk *>(alloc_beg); 515} 516 517static AsanChunk *GetAsanChunkByAddr(uptr p) { 518 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); 519 return GetAsanChunk(alloc_beg); 520} 521 522// Allocator must be locked when this function is called. 523static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { 524 void *alloc_beg = 525 allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); 526 return GetAsanChunk(alloc_beg); 527} 528 529static uptr AllocationSize(uptr p) { 530 AsanChunk *m = GetAsanChunkByAddr(p); 531 if (!m) return 0; 532 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 533 if (m->Beg() != p) return 0; 534 return m->UsedSize(); 535} 536 537// We have an address between two chunks, and we want to report just one. 538AsanChunk *ChooseChunk(uptr addr, 539 AsanChunk *left_chunk, AsanChunk *right_chunk) { 540 // Prefer an allocated chunk over freed chunk and freed chunk 541 // over available chunk. 542 if (left_chunk->chunk_state != right_chunk->chunk_state) { 543 if (left_chunk->chunk_state == CHUNK_ALLOCATED) 544 return left_chunk; 545 if (right_chunk->chunk_state == CHUNK_ALLOCATED) 546 return right_chunk; 547 if (left_chunk->chunk_state == CHUNK_QUARANTINE) 548 return left_chunk; 549 if (right_chunk->chunk_state == CHUNK_QUARANTINE) 550 return right_chunk; 551 } 552 // Same chunk_state: choose based on offset. 553 sptr l_offset = 0, r_offset = 0; 554 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 555 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 556 if (l_offset < r_offset) 557 return left_chunk; 558 return right_chunk; 559} 560 561AsanChunkView FindHeapChunkByAddress(uptr addr) { 562 AsanChunk *m1 = GetAsanChunkByAddr(addr); 563 if (!m1) return AsanChunkView(m1); 564 sptr offset = 0; 565 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 566 // The address is in the chunk's left redzone, so maybe it is actually 567 // a right buffer overflow from the other chunk to the left. 568 // Search a bit to the left to see if there is another chunk. 569 AsanChunk *m2 = 0; 570 for (uptr l = 1; l < GetPageSizeCached(); l++) { 571 m2 = GetAsanChunkByAddr(addr - l); 572 if (m2 == m1) continue; // Still the same chunk. 573 break; 574 } 575 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 576 m1 = ChooseChunk(addr, m2, m1); 577 } 578 return AsanChunkView(m1); 579} 580 581void AsanThreadLocalMallocStorage::CommitBack() { 582 AllocatorCache *ac = GetAllocatorCache(this); 583 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); 584 allocator.SwallowCache(GetAllocatorCache(this)); 585} 586 587void PrintInternalAllocatorStats() { 588 allocator.PrintStats(); 589} 590 591void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 592 AllocType alloc_type) { 593 return Allocate(size, alignment, stack, alloc_type, true); 594} 595 596void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { 597 Deallocate(ptr, stack, alloc_type); 598} 599 600void *asan_malloc(uptr size, StackTrace *stack) { 601 return Allocate(size, 8, stack, FROM_MALLOC, true); 602} 603 604void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 605 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) 606 return AllocatorReturnNull(); 607 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); 608 // If the memory comes from the secondary allocator no need to clear it 609 // as it comes directly from mmap. 610 if (ptr && allocator.FromPrimary(ptr)) 611 REAL(memset)(ptr, 0, nmemb * size); 612 return ptr; 613} 614 615void *asan_realloc(void *p, uptr size, StackTrace *stack) { 616 if (p == 0) 617 return Allocate(size, 8, stack, FROM_MALLOC, true); 618 if (size == 0) { 619 Deallocate(p, stack, FROM_MALLOC); 620 return 0; 621 } 622 return Reallocate(p, size, stack); 623} 624 625void *asan_valloc(uptr size, StackTrace *stack) { 626 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); 627} 628 629void *asan_pvalloc(uptr size, StackTrace *stack) { 630 uptr PageSize = GetPageSizeCached(); 631 size = RoundUpTo(size, PageSize); 632 if (size == 0) { 633 // pvalloc(0) should allocate one page. 634 size = PageSize; 635 } 636 return Allocate(size, PageSize, stack, FROM_MALLOC, true); 637} 638 639int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 640 StackTrace *stack) { 641 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); 642 CHECK(IsAligned((uptr)ptr, alignment)); 643 *memptr = ptr; 644 return 0; 645} 646 647uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) { 648 if (ptr == 0) return 0; 649 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 650 if (flags()->check_malloc_usable_size && (usable_size == 0)) { 651 GET_STACK_TRACE_FATAL(pc, bp); 652 ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); 653 } 654 return usable_size; 655} 656 657uptr asan_mz_size(const void *ptr) { 658 return AllocationSize(reinterpret_cast<uptr>(ptr)); 659} 660 661void asan_mz_force_lock() { 662 allocator.ForceLock(); 663 fallback_mutex.Lock(); 664} 665 666void asan_mz_force_unlock() { 667 fallback_mutex.Unlock(); 668 allocator.ForceUnlock(); 669} 670 671} // namespace __asan 672 673// --- Implementation of LSan-specific functions --- {{{1 674namespace __lsan { 675void LockAllocator() { 676 __asan::allocator.ForceLock(); 677} 678 679void UnlockAllocator() { 680 __asan::allocator.ForceUnlock(); 681} 682 683void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 684 *begin = (uptr)&__asan::allocator; 685 *end = *begin + sizeof(__asan::allocator); 686} 687 688uptr PointsIntoChunk(void* p) { 689 uptr addr = reinterpret_cast<uptr>(p); 690 __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); 691 if (!m) return 0; 692 uptr chunk = m->Beg(); 693 if (m->chunk_state != __asan::CHUNK_ALLOCATED) 694 return 0; 695 if (m->AddrIsInside(addr, /*locked_version=*/true)) 696 return chunk; 697 if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), 698 addr)) 699 return chunk; 700 return 0; 701} 702 703uptr GetUserBegin(uptr chunk) { 704 __asan::AsanChunk *m = 705 __asan::GetAsanChunkByAddrFastLocked(chunk); 706 CHECK(m); 707 return m->Beg(); 708} 709 710LsanMetadata::LsanMetadata(uptr chunk) { 711 metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); 712} 713 714bool LsanMetadata::allocated() const { 715 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 716 return m->chunk_state == __asan::CHUNK_ALLOCATED; 717} 718 719ChunkTag LsanMetadata::tag() const { 720 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 721 return static_cast<ChunkTag>(m->lsan_tag); 722} 723 724void LsanMetadata::set_tag(ChunkTag value) { 725 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 726 m->lsan_tag = value; 727} 728 729uptr LsanMetadata::requested_size() const { 730 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 731 return m->UsedSize(/*locked_version=*/true); 732} 733 734u32 LsanMetadata::stack_trace_id() const { 735 __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); 736 return m->alloc_context_id; 737} 738 739void ForEachChunk(ForEachChunkCallback callback, void *arg) { 740 __asan::allocator.ForEachChunk(callback, arg); 741} 742 743IgnoreObjectResult IgnoreObjectLocked(const void *p) { 744 uptr addr = reinterpret_cast<uptr>(p); 745 __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr); 746 if (!m) return kIgnoreObjectInvalid; 747 if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { 748 if (m->lsan_tag == kIgnored) 749 return kIgnoreObjectAlreadyIgnored; 750 m->lsan_tag = __lsan::kIgnored; 751 return kIgnoreObjectSuccess; 752 } else { 753 return kIgnoreObjectInvalid; 754 } 755} 756} // namespace __lsan 757 758// ---------------------- Interface ---------------- {{{1 759using namespace __asan; // NOLINT 760 761// ASan allocator doesn't reserve extra bytes, so normally we would 762// just return "size". We don't want to expose our redzone sizes, etc here. 763uptr __asan_get_estimated_allocated_size(uptr size) { 764 return size; 765} 766 767int __asan_get_ownership(const void *p) { 768 uptr ptr = reinterpret_cast<uptr>(p); 769 return (AllocationSize(ptr) > 0); 770} 771 772uptr __asan_get_allocated_size(const void *p) { 773 if (p == 0) return 0; 774 uptr ptr = reinterpret_cast<uptr>(p); 775 uptr allocated_size = AllocationSize(ptr); 776 // Die if p is not malloced or if it is already freed. 777 if (allocated_size == 0) { 778 GET_STACK_TRACE_FATAL_HERE; 779 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); 780 } 781 return allocated_size; 782} 783 784#if !SANITIZER_SUPPORTS_WEAK_HOOKS 785// Provide default (no-op) implementation of malloc hooks. 786extern "C" { 787SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 788void __asan_malloc_hook(void *ptr, uptr size) { 789 (void)ptr; 790 (void)size; 791} 792SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 793void __asan_free_hook(void *ptr) { 794 (void)ptr; 795} 796} // extern "C" 797#endif 798