asan_allocator2.cc revision b1971ca4a3057916ca90a733c672a08127d5fe67
1//===-- asan_allocator2.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Implementation of ASan's memory allocator, 2-nd version. 13// This variant uses the allocator from sanitizer_common, i.e. the one shared 14// with ThreadSanitizer and MemorySanitizer. 15// 16// Status: under development, not enabled by default yet. 17//===----------------------------------------------------------------------===// 18#include "asan_allocator.h" 19 20#include "asan_mapping.h" 21#include "asan_poisoning.h" 22#include "asan_report.h" 23#include "asan_thread.h" 24#include "sanitizer_common/sanitizer_allocator.h" 25#include "sanitizer_common/sanitizer_internal_defs.h" 26#include "sanitizer_common/sanitizer_list.h" 27#include "sanitizer_common/sanitizer_stackdepot.h" 28#include "sanitizer_common/sanitizer_quarantine.h" 29 30namespace __asan { 31 32struct AsanMapUnmapCallback { 33 void OnMap(uptr p, uptr size) const { 34 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 35 // Statistics. 36 AsanStats &thread_stats = GetCurrentThreadStats(); 37 thread_stats.mmaps++; 38 thread_stats.mmaped += size; 39 } 40 void OnUnmap(uptr p, uptr size) const { 41 PoisonShadow(p, size, 0); 42 // We are about to unmap a chunk of user memory. 43 // Mark the corresponding shadow memory as not needed. 44 // Since asan's mapping is compacting, the shadow chunk may be 45 // not page-aligned, so we only flush the page-aligned portion. 46 uptr page_size = GetPageSizeCached(); 47 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); 48 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); 49 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 50 // Statistics. 51 AsanStats &thread_stats = GetCurrentThreadStats(); 52 thread_stats.munmaps++; 53 thread_stats.munmaped += size; 54 } 55}; 56 57#if SANITIZER_WORDSIZE == 64 58#if defined(__powerpc64__) 59const uptr kAllocatorSpace = 0xa0000000000ULL; 60#else 61const uptr kAllocatorSpace = 0x600000000000ULL; 62#endif 63const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 64typedef DefaultSizeClassMap SizeClassMap; 65typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, 66 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 67#elif SANITIZER_WORDSIZE == 32 68static const u64 kAddressSpaceSize = 1ULL << 32; 69typedef CompactSizeClassMap SizeClassMap; 70typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, 71 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 72#endif 73 74typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 75typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; 76typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 77 SecondaryAllocator> Allocator; 78 79// We can not use THREADLOCAL because it is not supported on some of the 80// platforms we care about (OSX 10.6, Android). 81// static THREADLOCAL AllocatorCache cache; 82AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 83 CHECK(ms); 84 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); 85 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); 86} 87 88static Allocator allocator; 89 90static const uptr kMaxAllowedMallocSize = 91 FIRST_32_SECOND_64(3UL << 30, 8UL << 30); 92 93static const uptr kMaxThreadLocalQuarantine = 94 FIRST_32_SECOND_64(1 << 18, 1 << 20); 95 96// Every chunk of memory allocated by this allocator can be in one of 3 states: 97// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 98// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 99// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 100enum { 101 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. 102 CHUNK_ALLOCATED = 2, 103 CHUNK_QUARANTINE = 3 104}; 105 106// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 107// We use adaptive redzones: for larger allocation larger redzones are used. 108static u32 RZLog2Size(u32 rz_log) { 109 CHECK_LT(rz_log, 8); 110 return 16 << rz_log; 111} 112 113static u32 RZSize2Log(u32 rz_size) { 114 CHECK_GE(rz_size, 16); 115 CHECK_LE(rz_size, 2048); 116 CHECK(IsPowerOfTwo(rz_size)); 117 u32 res = Log2(rz_size) - 4; 118 CHECK_EQ(rz_size, RZLog2Size(res)); 119 return res; 120} 121 122static uptr ComputeRZLog(uptr user_requested_size) { 123 u32 rz_log = 124 user_requested_size <= 64 - 16 ? 0 : 125 user_requested_size <= 128 - 32 ? 1 : 126 user_requested_size <= 512 - 64 ? 2 : 127 user_requested_size <= 4096 - 128 ? 3 : 128 user_requested_size <= (1 << 14) - 256 ? 4 : 129 user_requested_size <= (1 << 15) - 512 ? 5 : 130 user_requested_size <= (1 << 16) - 1024 ? 6 : 7; 131 return Max(rz_log, RZSize2Log(flags()->redzone)); 132} 133 134// The memory chunk allocated from the underlying allocator looks like this: 135// L L L L L L H H U U U U U U R R 136// L -- left redzone words (0 or more bytes) 137// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 138// U -- user memory. 139// R -- right redzone (0 or more bytes) 140// ChunkBase consists of ChunkHeader and other bytes that overlap with user 141// memory. 142 143// If a memory chunk is allocated by memalign and we had to increase the 144// allocation size to achieve the proper alignment, then we store this magic 145// value in the first uptr word of the memory block and store the address of 146// ChunkBase in the next uptr. 147// M B ? ? ? L L L L L L H H U U U U U U 148// M -- magic value kMemalignMagic 149// B -- address of ChunkHeader pointing to the first 'H' 150static const uptr kMemalignMagic = 0xCC6E96B9; 151 152struct ChunkHeader { 153 // 1-st 8 bytes. 154 u32 chunk_state : 8; // Must be first. 155 u32 alloc_tid : 24; 156 157 u32 free_tid : 24; 158 u32 from_memalign : 1; 159 u32 alloc_type : 2; 160 u32 rz_log : 3; 161 // 2-nd 8 bytes 162 // This field is used for small sizes. For large sizes it is equal to 163 // SizeClassMap::kMaxSize and the actual size is stored in the 164 // SecondaryAllocator's metadata. 165 u32 user_requested_size; 166 u32 alloc_context_id; 167}; 168 169struct ChunkBase : ChunkHeader { 170 // Header2, intersects with user memory. 171 AsanChunk *next; 172 u32 free_context_id; 173}; 174 175static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 176static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 177COMPILER_CHECK(kChunkHeaderSize == 16); 178COMPILER_CHECK(kChunkHeader2Size <= 16); 179 180struct AsanChunk: ChunkBase { 181 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 182 uptr UsedSize() { 183 if (user_requested_size != SizeClassMap::kMaxSize) 184 return user_requested_size; 185 return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg())); 186 } 187 void *AllocBeg() { 188 if (from_memalign) 189 return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); 190 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); 191 } 192 // We store the alloc/free stack traces in the chunk itself. 193 u32 *AllocStackBeg() { 194 return (u32*)(Beg() - RZLog2Size(rz_log)); 195 } 196 uptr AllocStackSize() { 197 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize); 198 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32); 199 } 200 u32 *FreeStackBeg() { 201 return (u32*)(Beg() + kChunkHeader2Size); 202 } 203 uptr FreeStackSize() { 204 if (user_requested_size < kChunkHeader2Size) return 0; 205 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); 206 return (available - kChunkHeader2Size) / sizeof(u32); 207 } 208}; 209 210uptr AsanChunkView::Beg() { return chunk_->Beg(); } 211uptr AsanChunkView::End() { return Beg() + UsedSize(); } 212uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 213uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 214uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 215 216static void GetStackTraceFromId(u32 id, StackTrace *stack) { 217 CHECK(id); 218 uptr size = 0; 219 const uptr *trace = StackDepotGet(id, &size); 220 CHECK_LT(size, kStackTraceMax); 221 internal_memcpy(stack->trace, trace, sizeof(uptr) * size); 222 stack->size = size; 223} 224 225void AsanChunkView::GetAllocStack(StackTrace *stack) { 226 if (flags()->use_stack_depot) 227 GetStackTraceFromId(chunk_->alloc_context_id, stack); 228 else 229 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), 230 chunk_->AllocStackSize()); 231} 232 233void AsanChunkView::GetFreeStack(StackTrace *stack) { 234 if (flags()->use_stack_depot) 235 GetStackTraceFromId(chunk_->free_context_id, stack); 236 else 237 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), 238 chunk_->FreeStackSize()); 239} 240 241struct QuarantineCallback; 242typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 243typedef AsanQuarantine::Cache QuarantineCache; 244static AsanQuarantine quarantine(LINKER_INITIALIZED); 245static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); 246static AllocatorCache fallback_allocator_cache; 247static SpinMutex fallback_mutex; 248 249QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 250 CHECK(ms); 251 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 252 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 253} 254 255struct QuarantineCallback { 256 explicit QuarantineCallback(AllocatorCache *cache) 257 : cache_(cache) { 258 } 259 260 void Recycle(AsanChunk *m) { 261 CHECK(m->chunk_state == CHUNK_QUARANTINE); 262 m->chunk_state = CHUNK_AVAILABLE; 263 CHECK_NE(m->alloc_tid, kInvalidTid); 264 CHECK_NE(m->free_tid, kInvalidTid); 265 PoisonShadow(m->Beg(), 266 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 267 kAsanHeapLeftRedzoneMagic); 268 void *p = reinterpret_cast<void *>(m->AllocBeg()); 269 if (m->from_memalign) { 270 uptr *memalign_magic = reinterpret_cast<uptr *>(p); 271 CHECK_EQ(memalign_magic[0], kMemalignMagic); 272 CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m)); 273 } 274 275 // Statistics. 276 AsanStats &thread_stats = GetCurrentThreadStats(); 277 thread_stats.real_frees++; 278 thread_stats.really_freed += m->UsedSize(); 279 280 allocator.Deallocate(cache_, p); 281 } 282 283 void *Allocate(uptr size) { 284 return allocator.Allocate(cache_, size, 1, false); 285 } 286 287 void Deallocate(void *p) { 288 allocator.Deallocate(cache_, p); 289 } 290 291 AllocatorCache *cache_; 292}; 293 294void InitializeAllocator() { 295 allocator.Init(); 296 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); 297} 298 299static void *Allocate(uptr size, uptr alignment, StackTrace *stack, 300 AllocType alloc_type, bool can_fill) { 301 if (!asan_inited) 302 __asan_init(); 303 Flags &fl = *flags(); 304 CHECK(stack); 305 const uptr min_alignment = SHADOW_GRANULARITY; 306 if (alignment < min_alignment) 307 alignment = min_alignment; 308 if (size == 0) { 309 // We'd be happy to avoid allocating memory for zero-size requests, but 310 // some programs/tests depend on this behavior and assume that malloc would 311 // not return NULL even for zero-size allocations. Moreover, it looks like 312 // operator new should never return NULL, and results of consecutive "new" 313 // calls must be different even if the allocated size is zero. 314 size = 1; 315 } 316 CHECK(IsPowerOfTwo(alignment)); 317 uptr rz_log = ComputeRZLog(size); 318 uptr rz_size = RZLog2Size(rz_log); 319 uptr rounded_size = RoundUpTo(size, alignment); 320 if (rounded_size < kChunkHeader2Size) 321 rounded_size = kChunkHeader2Size; 322 uptr needed_size = rounded_size + rz_size; 323 if (alignment > min_alignment) 324 needed_size += alignment; 325 bool using_primary_allocator = true; 326 // If we are allocating from the secondary allocator, there will be no 327 // automatic right redzone, so add the right redzone manually. 328 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { 329 needed_size += rz_size; 330 using_primary_allocator = false; 331 } 332 CHECK(IsAligned(needed_size, min_alignment)); 333 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 334 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 335 (void*)size); 336 return 0; 337 } 338 339 AsanThread *t = GetCurrentThread(); 340 void *allocated; 341 if (t) { 342 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 343 allocated = allocator.Allocate(cache, needed_size, 8, false); 344 } else { 345 SpinMutexLock l(&fallback_mutex); 346 AllocatorCache *cache = &fallback_allocator_cache; 347 allocated = allocator.Allocate(cache, needed_size, 8, false); 348 } 349 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 350 // Clear the first allocated word (an old kMemalignMagic may still be there). 351 reinterpret_cast<uptr *>(alloc_beg)[0] = 0; 352 uptr alloc_end = alloc_beg + needed_size; 353 uptr beg_plus_redzone = alloc_beg + rz_size; 354 uptr user_beg = beg_plus_redzone; 355 if (!IsAligned(user_beg, alignment)) 356 user_beg = RoundUpTo(user_beg, alignment); 357 uptr user_end = user_beg + size; 358 CHECK_LE(user_end, alloc_end); 359 uptr chunk_beg = user_beg - kChunkHeaderSize; 360 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 361 m->chunk_state = CHUNK_ALLOCATED; 362 m->alloc_type = alloc_type; 363 m->rz_log = rz_log; 364 u32 alloc_tid = t ? t->tid() : 0; 365 m->alloc_tid = alloc_tid; 366 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 367 m->free_tid = kInvalidTid; 368 m->from_memalign = user_beg != beg_plus_redzone; 369 if (m->from_memalign) { 370 CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg); 371 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 372 memalign_magic[0] = kMemalignMagic; 373 memalign_magic[1] = chunk_beg; 374 } 375 if (using_primary_allocator) { 376 CHECK(size); 377 m->user_requested_size = size; 378 CHECK(allocator.FromPrimary(allocated)); 379 } else { 380 CHECK(!allocator.FromPrimary(allocated)); 381 m->user_requested_size = SizeClassMap::kMaxSize; 382 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); 383 meta[0] = size; 384 meta[1] = chunk_beg; 385 } 386 387 if (fl.use_stack_depot) { 388 m->alloc_context_id = StackDepotPut(stack->trace, stack->size); 389 } else { 390 m->alloc_context_id = 0; 391 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); 392 } 393 394 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 395 // Unpoison the bulk of the memory region. 396 if (size_rounded_down_to_granularity) 397 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 398 // Deal with the end of the region if size is not aligned to granularity. 399 if (size != size_rounded_down_to_granularity && fl.poison_heap) { 400 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 401 *shadow = size & (SHADOW_GRANULARITY - 1); 402 } 403 404 AsanStats &thread_stats = GetCurrentThreadStats(); 405 thread_stats.mallocs++; 406 thread_stats.malloced += size; 407 thread_stats.malloced_redzones += needed_size - size; 408 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); 409 thread_stats.malloced_by_size[class_id]++; 410 if (needed_size > SizeClassMap::kMaxSize) 411 thread_stats.malloc_large++; 412 413 void *res = reinterpret_cast<void *>(user_beg); 414 if (can_fill && fl.max_malloc_fill_size) { 415 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); 416 REAL(memset)(res, fl.malloc_fill_byte, fill_size); 417 } 418 ASAN_MALLOC_HOOK(res, size); 419 return res; 420} 421 422static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { 423 uptr p = reinterpret_cast<uptr>(ptr); 424 if (p == 0) return; 425 ASAN_FREE_HOOK(ptr); 426 uptr chunk_beg = p - kChunkHeaderSize; 427 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 428 429 u8 old_chunk_state = CHUNK_ALLOCATED; 430 // Flip the chunk_state atomically to avoid race on double-free. 431 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, 432 CHUNK_QUARANTINE, memory_order_relaxed)) { 433 if (old_chunk_state == CHUNK_QUARANTINE) 434 ReportDoubleFree((uptr)ptr, stack); 435 else 436 ReportFreeNotMalloced((uptr)ptr, stack); 437 } 438 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); 439 440 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) 441 ReportAllocTypeMismatch((uptr)ptr, stack, 442 (AllocType)m->alloc_type, (AllocType)alloc_type); 443 444 CHECK_GE(m->alloc_tid, 0); 445 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 446 CHECK_EQ(m->free_tid, kInvalidTid); 447 AsanThread *t = GetCurrentThread(); 448 m->free_tid = t ? t->tid() : 0; 449 if (flags()->use_stack_depot) { 450 m->free_context_id = StackDepotPut(stack->trace, stack->size); 451 } else { 452 m->free_context_id = 0; 453 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); 454 } 455 CHECK(m->chunk_state == CHUNK_QUARANTINE); 456 // Poison the region. 457 PoisonShadow(m->Beg(), 458 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 459 kAsanHeapFreeMagic); 460 461 AsanStats &thread_stats = GetCurrentThreadStats(); 462 thread_stats.frees++; 463 thread_stats.freed += m->UsedSize(); 464 465 // Push into quarantine. 466 if (t) { 467 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 468 AllocatorCache *ac = GetAllocatorCache(ms); 469 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), 470 m, m->UsedSize()); 471 } else { 472 SpinMutexLock l(&fallback_mutex); 473 AllocatorCache *ac = &fallback_allocator_cache; 474 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), 475 m, m->UsedSize()); 476 } 477} 478 479static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 480 CHECK(old_ptr && new_size); 481 uptr p = reinterpret_cast<uptr>(old_ptr); 482 uptr chunk_beg = p - kChunkHeaderSize; 483 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 484 485 AsanStats &thread_stats = GetCurrentThreadStats(); 486 thread_stats.reallocs++; 487 thread_stats.realloced += new_size; 488 489 CHECK(m->chunk_state == CHUNK_ALLOCATED); 490 uptr old_size = m->UsedSize(); 491 uptr memcpy_size = Min(new_size, old_size); 492 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); 493 if (new_ptr) { 494 CHECK_NE(REAL(memcpy), (void*)0); 495 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 496 Deallocate(old_ptr, stack, FROM_MALLOC); 497 } 498 return new_ptr; 499} 500 501static AsanChunk *GetAsanChunkByAddr(uptr p) { 502 void *ptr = reinterpret_cast<void *>(p); 503 uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr)); 504 if (!alloc_beg) return 0; 505 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 506 if (memalign_magic[0] == kMemalignMagic) { 507 AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]); 508 CHECK(m->from_memalign); 509 return m; 510 } 511 if (!allocator.FromPrimary(ptr)) { 512 uptr *meta = reinterpret_cast<uptr *>( 513 allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg))); 514 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); 515 return m; 516 } 517 uptr actual_size = allocator.GetActuallyAllocatedSize(ptr); 518 CHECK_LE(actual_size, SizeClassMap::kMaxSize); 519 // We know the actually allocted size, but we don't know the redzone size. 520 // Just try all possible redzone sizes. 521 for (u32 rz_log = 0; rz_log < 8; rz_log++) { 522 u32 rz_size = RZLog2Size(rz_log); 523 uptr max_possible_size = actual_size - rz_size; 524 if (ComputeRZLog(max_possible_size) != rz_log) 525 continue; 526 return reinterpret_cast<AsanChunk *>( 527 alloc_beg + rz_size - kChunkHeaderSize); 528 } 529 return 0; 530} 531 532static uptr AllocationSize(uptr p) { 533 AsanChunk *m = GetAsanChunkByAddr(p); 534 if (!m) return 0; 535 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 536 if (m->Beg() != p) return 0; 537 return m->UsedSize(); 538} 539 540// We have an address between two chunks, and we want to report just one. 541AsanChunk *ChooseChunk(uptr addr, 542 AsanChunk *left_chunk, AsanChunk *right_chunk) { 543 // Prefer an allocated chunk over freed chunk and freed chunk 544 // over available chunk. 545 if (left_chunk->chunk_state != right_chunk->chunk_state) { 546 if (left_chunk->chunk_state == CHUNK_ALLOCATED) 547 return left_chunk; 548 if (right_chunk->chunk_state == CHUNK_ALLOCATED) 549 return right_chunk; 550 if (left_chunk->chunk_state == CHUNK_QUARANTINE) 551 return left_chunk; 552 if (right_chunk->chunk_state == CHUNK_QUARANTINE) 553 return right_chunk; 554 } 555 // Same chunk_state: choose based on offset. 556 sptr l_offset = 0, r_offset = 0; 557 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 558 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 559 if (l_offset < r_offset) 560 return left_chunk; 561 return right_chunk; 562} 563 564AsanChunkView FindHeapChunkByAddress(uptr addr) { 565 AsanChunk *m1 = GetAsanChunkByAddr(addr); 566 if (!m1) return AsanChunkView(m1); 567 sptr offset = 0; 568 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 569 // The address is in the chunk's left redzone, so maybe it is actually 570 // a right buffer overflow from the other chunk to the left. 571 // Search a bit to the left to see if there is another chunk. 572 AsanChunk *m2 = 0; 573 for (uptr l = 1; l < GetPageSizeCached(); l++) { 574 m2 = GetAsanChunkByAddr(addr - l); 575 if (m2 == m1) continue; // Still the same chunk. 576 break; 577 } 578 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 579 m1 = ChooseChunk(addr, m2, m1); 580 } 581 return AsanChunkView(m1); 582} 583 584void AsanThreadLocalMallocStorage::CommitBack() { 585 AllocatorCache *ac = GetAllocatorCache(this); 586 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); 587 allocator.SwallowCache(GetAllocatorCache(this)); 588} 589 590void PrintInternalAllocatorStats() { 591 allocator.PrintStats(); 592} 593 594SANITIZER_INTERFACE_ATTRIBUTE 595void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 596 AllocType alloc_type) { 597 return Allocate(size, alignment, stack, alloc_type, true); 598} 599 600SANITIZER_INTERFACE_ATTRIBUTE 601void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { 602 Deallocate(ptr, stack, alloc_type); 603} 604 605SANITIZER_INTERFACE_ATTRIBUTE 606void *asan_malloc(uptr size, StackTrace *stack) { 607 return Allocate(size, 8, stack, FROM_MALLOC, true); 608} 609 610void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 611 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; 612 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); 613 // If the memory comes from the secondary allocator no need to clear it 614 // as it comes directly from mmap. 615 if (ptr && allocator.FromPrimary(ptr)) 616 REAL(memset)(ptr, 0, nmemb * size); 617 return ptr; 618} 619 620void *asan_realloc(void *p, uptr size, StackTrace *stack) { 621 if (p == 0) 622 return Allocate(size, 8, stack, FROM_MALLOC, true); 623 if (size == 0) { 624 Deallocate(p, stack, FROM_MALLOC); 625 return 0; 626 } 627 return Reallocate(p, size, stack); 628} 629 630void *asan_valloc(uptr size, StackTrace *stack) { 631 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); 632} 633 634void *asan_pvalloc(uptr size, StackTrace *stack) { 635 uptr PageSize = GetPageSizeCached(); 636 size = RoundUpTo(size, PageSize); 637 if (size == 0) { 638 // pvalloc(0) should allocate one page. 639 size = PageSize; 640 } 641 return Allocate(size, PageSize, stack, FROM_MALLOC, true); 642} 643 644int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 645 StackTrace *stack) { 646 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); 647 CHECK(IsAligned((uptr)ptr, alignment)); 648 *memptr = ptr; 649 return 0; 650} 651 652uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { 653 CHECK(stack); 654 if (ptr == 0) return 0; 655 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 656 if (flags()->check_malloc_usable_size && (usable_size == 0)) 657 ReportMallocUsableSizeNotOwned((uptr)ptr, stack); 658 return usable_size; 659} 660 661uptr asan_mz_size(const void *ptr) { 662 return AllocationSize(reinterpret_cast<uptr>(ptr)); 663} 664 665void asan_mz_force_lock() { 666 allocator.ForceLock(); 667 fallback_mutex.Lock(); 668} 669 670void asan_mz_force_unlock() { 671 fallback_mutex.Unlock(); 672 allocator.ForceUnlock(); 673} 674 675} // namespace __asan 676 677// ---------------------- Interface ---------------- {{{1 678using namespace __asan; // NOLINT 679 680// ASan allocator doesn't reserve extra bytes, so normally we would 681// just return "size". We don't want to expose our redzone sizes, etc here. 682uptr __asan_get_estimated_allocated_size(uptr size) { 683 return size; 684} 685 686bool __asan_get_ownership(const void *p) { 687 uptr ptr = reinterpret_cast<uptr>(p); 688 return (AllocationSize(ptr) > 0); 689} 690 691uptr __asan_get_allocated_size(const void *p) { 692 if (p == 0) return 0; 693 uptr ptr = reinterpret_cast<uptr>(p); 694 uptr allocated_size = AllocationSize(ptr); 695 // Die if p is not malloced or if it is already freed. 696 if (allocated_size == 0) { 697 GET_STACK_TRACE_FATAL_HERE; 698 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); 699 } 700 return allocated_size; 701} 702 703#if !SANITIZER_SUPPORTS_WEAK_HOOKS 704// Provide default (no-op) implementation of malloc hooks. 705extern "C" { 706SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 707void __asan_malloc_hook(void *ptr, uptr size) { 708 (void)ptr; 709 (void)size; 710} 711SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 712void __asan_free_hook(void *ptr) { 713 (void)ptr; 714} 715} // extern "C" 716#endif 717