asan_allocator2.cc revision e7ca05603222f61bab630f619bf59c0c927ff7de
1//===-- asan_allocator2.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Implementation of ASan's memory allocator, 2-nd version. 13// This variant uses the allocator from sanitizer_common, i.e. the one shared 14// with ThreadSanitizer and MemorySanitizer. 15// 16// Status: under development, not enabled by default yet. 17//===----------------------------------------------------------------------===// 18#include "asan_allocator.h" 19 20#include "asan_mapping.h" 21#include "asan_poisoning.h" 22#include "asan_report.h" 23#include "asan_thread.h" 24#include "sanitizer_common/sanitizer_allocator.h" 25#include "sanitizer_common/sanitizer_flags.h" 26#include "sanitizer_common/sanitizer_internal_defs.h" 27#include "sanitizer_common/sanitizer_list.h" 28#include "sanitizer_common/sanitizer_stackdepot.h" 29#include "sanitizer_common/sanitizer_quarantine.h" 30 31namespace __asan { 32 33struct AsanMapUnmapCallback { 34 void OnMap(uptr p, uptr size) const { 35 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 36 // Statistics. 37 AsanStats &thread_stats = GetCurrentThreadStats(); 38 thread_stats.mmaps++; 39 thread_stats.mmaped += size; 40 } 41 void OnUnmap(uptr p, uptr size) const { 42 PoisonShadow(p, size, 0); 43 // We are about to unmap a chunk of user memory. 44 // Mark the corresponding shadow memory as not needed. 45 // Since asan's mapping is compacting, the shadow chunk may be 46 // not page-aligned, so we only flush the page-aligned portion. 47 uptr page_size = GetPageSizeCached(); 48 uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); 49 uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); 50 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 51 // Statistics. 52 AsanStats &thread_stats = GetCurrentThreadStats(); 53 thread_stats.munmaps++; 54 thread_stats.munmaped += size; 55 } 56}; 57 58#if SANITIZER_WORDSIZE == 64 59#if defined(__powerpc64__) 60const uptr kAllocatorSpace = 0xa0000000000ULL; 61const uptr kAllocatorSize = 0x20000000000ULL; // 2T. 62#else 63const uptr kAllocatorSpace = 0x600000000000ULL; 64const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 65#endif 66typedef DefaultSizeClassMap SizeClassMap; 67typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, 68 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 69#elif SANITIZER_WORDSIZE == 32 70static const u64 kAddressSpaceSize = 1ULL << 32; 71typedef CompactSizeClassMap SizeClassMap; 72static const uptr kRegionSizeLog = 20; 73static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog; 74typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, 75 SizeClassMap, kRegionSizeLog, 76 FlatByteMap<kFlatByteMapSize>, 77 AsanMapUnmapCallback> PrimaryAllocator; 78#endif 79 80typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 81typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; 82typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 83 SecondaryAllocator> Allocator; 84 85// We can not use THREADLOCAL because it is not supported on some of the 86// platforms we care about (OSX 10.6, Android). 87// static THREADLOCAL AllocatorCache cache; 88AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 89 CHECK(ms); 90 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); 91 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); 92} 93 94static Allocator allocator; 95 96static const uptr kMaxAllowedMallocSize = 97 FIRST_32_SECOND_64(3UL << 30, 8UL << 30); 98 99static const uptr kMaxThreadLocalQuarantine = 100 FIRST_32_SECOND_64(1 << 18, 1 << 20); 101 102// Every chunk of memory allocated by this allocator can be in one of 3 states: 103// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 104// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 105// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 106enum { 107 CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. 108 CHUNK_ALLOCATED = 2, 109 CHUNK_QUARANTINE = 3 110}; 111 112// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 113// We use adaptive redzones: for larger allocation larger redzones are used. 114static u32 RZLog2Size(u32 rz_log) { 115 CHECK_LT(rz_log, 8); 116 return 16 << rz_log; 117} 118 119static u32 RZSize2Log(u32 rz_size) { 120 CHECK_GE(rz_size, 16); 121 CHECK_LE(rz_size, 2048); 122 CHECK(IsPowerOfTwo(rz_size)); 123 u32 res = Log2(rz_size) - 4; 124 CHECK_EQ(rz_size, RZLog2Size(res)); 125 return res; 126} 127 128static uptr ComputeRZLog(uptr user_requested_size) { 129 u32 rz_log = 130 user_requested_size <= 64 - 16 ? 0 : 131 user_requested_size <= 128 - 32 ? 1 : 132 user_requested_size <= 512 - 64 ? 2 : 133 user_requested_size <= 4096 - 128 ? 3 : 134 user_requested_size <= (1 << 14) - 256 ? 4 : 135 user_requested_size <= (1 << 15) - 512 ? 5 : 136 user_requested_size <= (1 << 16) - 1024 ? 6 : 7; 137 return Max(rz_log, RZSize2Log(flags()->redzone)); 138} 139 140// The memory chunk allocated from the underlying allocator looks like this: 141// L L L L L L H H U U U U U U R R 142// L -- left redzone words (0 or more bytes) 143// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 144// U -- user memory. 145// R -- right redzone (0 or more bytes) 146// ChunkBase consists of ChunkHeader and other bytes that overlap with user 147// memory. 148 149// If a memory chunk is allocated by memalign and we had to increase the 150// allocation size to achieve the proper alignment, then we store this magic 151// value in the first uptr word of the memory block and store the address of 152// ChunkBase in the next uptr. 153// M B ? ? ? L L L L L L H H U U U U U U 154// M -- magic value kMemalignMagic 155// B -- address of ChunkHeader pointing to the first 'H' 156static const uptr kMemalignMagic = 0xCC6E96B9; 157 158struct ChunkHeader { 159 // 1-st 8 bytes. 160 u32 chunk_state : 8; // Must be first. 161 u32 alloc_tid : 24; 162 163 u32 free_tid : 24; 164 u32 from_memalign : 1; 165 u32 alloc_type : 2; 166 u32 rz_log : 3; 167 // 2-nd 8 bytes 168 // This field is used for small sizes. For large sizes it is equal to 169 // SizeClassMap::kMaxSize and the actual size is stored in the 170 // SecondaryAllocator's metadata. 171 u32 user_requested_size; 172 u32 alloc_context_id; 173}; 174 175struct ChunkBase : ChunkHeader { 176 // Header2, intersects with user memory. 177 u32 free_context_id; 178}; 179 180static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 181static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 182COMPILER_CHECK(kChunkHeaderSize == 16); 183COMPILER_CHECK(kChunkHeader2Size <= 16); 184 185struct AsanChunk: ChunkBase { 186 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 187 uptr UsedSize() { 188 if (user_requested_size != SizeClassMap::kMaxSize) 189 return user_requested_size; 190 return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg())); 191 } 192 void *AllocBeg() { 193 if (from_memalign) 194 return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); 195 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); 196 } 197 // If we don't use stack depot, we store the alloc/free stack traces 198 // in the chunk itself. 199 u32 *AllocStackBeg() { 200 return (u32*)(Beg() - RZLog2Size(rz_log)); 201 } 202 uptr AllocStackSize() { 203 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize); 204 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32); 205 } 206 u32 *FreeStackBeg() { 207 return (u32*)(Beg() + kChunkHeader2Size); 208 } 209 uptr FreeStackSize() { 210 if (user_requested_size < kChunkHeader2Size) return 0; 211 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); 212 return (available - kChunkHeader2Size) / sizeof(u32); 213 } 214}; 215 216uptr AsanChunkView::Beg() { return chunk_->Beg(); } 217uptr AsanChunkView::End() { return Beg() + UsedSize(); } 218uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 219uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 220uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 221 222static void GetStackTraceFromId(u32 id, StackTrace *stack) { 223 CHECK(id); 224 uptr size = 0; 225 const uptr *trace = StackDepotGet(id, &size); 226 CHECK_LT(size, kStackTraceMax); 227 internal_memcpy(stack->trace, trace, sizeof(uptr) * size); 228 stack->size = size; 229} 230 231void AsanChunkView::GetAllocStack(StackTrace *stack) { 232 if (flags()->use_stack_depot) 233 GetStackTraceFromId(chunk_->alloc_context_id, stack); 234 else 235 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), 236 chunk_->AllocStackSize()); 237} 238 239void AsanChunkView::GetFreeStack(StackTrace *stack) { 240 if (flags()->use_stack_depot) 241 GetStackTraceFromId(chunk_->free_context_id, stack); 242 else 243 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), 244 chunk_->FreeStackSize()); 245} 246 247struct QuarantineCallback; 248typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 249typedef AsanQuarantine::Cache QuarantineCache; 250static AsanQuarantine quarantine(LINKER_INITIALIZED); 251static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); 252static AllocatorCache fallback_allocator_cache; 253static SpinMutex fallback_mutex; 254 255QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 256 CHECK(ms); 257 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 258 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 259} 260 261struct QuarantineCallback { 262 explicit QuarantineCallback(AllocatorCache *cache) 263 : cache_(cache) { 264 } 265 266 void Recycle(AsanChunk *m) { 267 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); 268 atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); 269 CHECK_NE(m->alloc_tid, kInvalidTid); 270 CHECK_NE(m->free_tid, kInvalidTid); 271 PoisonShadow(m->Beg(), 272 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 273 kAsanHeapLeftRedzoneMagic); 274 void *p = reinterpret_cast<void *>(m->AllocBeg()); 275 if (m->from_memalign) { 276 uptr *memalign_magic = reinterpret_cast<uptr *>(p); 277 CHECK_EQ(memalign_magic[0], kMemalignMagic); 278 CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m)); 279 } 280 281 // Statistics. 282 AsanStats &thread_stats = GetCurrentThreadStats(); 283 thread_stats.real_frees++; 284 thread_stats.really_freed += m->UsedSize(); 285 286 allocator.Deallocate(cache_, p); 287 } 288 289 void *Allocate(uptr size) { 290 return allocator.Allocate(cache_, size, 1, false); 291 } 292 293 void Deallocate(void *p) { 294 allocator.Deallocate(cache_, p); 295 } 296 297 AllocatorCache *cache_; 298}; 299 300void InitializeAllocator() { 301 allocator.Init(); 302 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); 303} 304 305static void *Allocate(uptr size, uptr alignment, StackTrace *stack, 306 AllocType alloc_type, bool can_fill) { 307 if (!asan_inited) 308 __asan_init(); 309 Flags &fl = *flags(); 310 CHECK(stack); 311 const uptr min_alignment = SHADOW_GRANULARITY; 312 if (alignment < min_alignment) 313 alignment = min_alignment; 314 if (size == 0) { 315 // We'd be happy to avoid allocating memory for zero-size requests, but 316 // some programs/tests depend on this behavior and assume that malloc would 317 // not return NULL even for zero-size allocations. Moreover, it looks like 318 // operator new should never return NULL, and results of consecutive "new" 319 // calls must be different even if the allocated size is zero. 320 size = 1; 321 } 322 CHECK(IsPowerOfTwo(alignment)); 323 uptr rz_log = ComputeRZLog(size); 324 uptr rz_size = RZLog2Size(rz_log); 325 uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); 326 uptr needed_size = rounded_size + rz_size; 327 if (alignment > min_alignment) 328 needed_size += alignment; 329 bool using_primary_allocator = true; 330 // If we are allocating from the secondary allocator, there will be no 331 // automatic right redzone, so add the right redzone manually. 332 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { 333 needed_size += rz_size; 334 using_primary_allocator = false; 335 } 336 CHECK(IsAligned(needed_size, min_alignment)); 337 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 338 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 339 (void*)size); 340 return 0; 341 } 342 343 AsanThread *t = GetCurrentThread(); 344 void *allocated; 345 if (t) { 346 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 347 allocated = allocator.Allocate(cache, needed_size, 8, false); 348 } else { 349 SpinMutexLock l(&fallback_mutex); 350 AllocatorCache *cache = &fallback_allocator_cache; 351 allocated = allocator.Allocate(cache, needed_size, 8, false); 352 } 353 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 354 // Clear the first allocated word (an old kMemalignMagic may still be there). 355 reinterpret_cast<uptr *>(alloc_beg)[0] = 0; 356 uptr alloc_end = alloc_beg + needed_size; 357 uptr beg_plus_redzone = alloc_beg + rz_size; 358 uptr user_beg = beg_plus_redzone; 359 if (!IsAligned(user_beg, alignment)) 360 user_beg = RoundUpTo(user_beg, alignment); 361 uptr user_end = user_beg + size; 362 CHECK_LE(user_end, alloc_end); 363 uptr chunk_beg = user_beg - kChunkHeaderSize; 364 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 365 m->alloc_type = alloc_type; 366 m->rz_log = rz_log; 367 u32 alloc_tid = t ? t->tid() : 0; 368 m->alloc_tid = alloc_tid; 369 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 370 m->free_tid = kInvalidTid; 371 m->from_memalign = user_beg != beg_plus_redzone; 372 if (m->from_memalign) { 373 CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg); 374 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 375 memalign_magic[0] = kMemalignMagic; 376 memalign_magic[1] = chunk_beg; 377 } 378 if (using_primary_allocator) { 379 CHECK(size); 380 m->user_requested_size = size; 381 CHECK(allocator.FromPrimary(allocated)); 382 } else { 383 CHECK(!allocator.FromPrimary(allocated)); 384 m->user_requested_size = SizeClassMap::kMaxSize; 385 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); 386 meta[0] = size; 387 meta[1] = chunk_beg; 388 } 389 390 if (fl.use_stack_depot) { 391 m->alloc_context_id = StackDepotPut(stack->trace, stack->size); 392 } else { 393 m->alloc_context_id = 0; 394 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); 395 } 396 397 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 398 // Unpoison the bulk of the memory region. 399 if (size_rounded_down_to_granularity) 400 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 401 // Deal with the end of the region if size is not aligned to granularity. 402 if (size != size_rounded_down_to_granularity && fl.poison_heap) { 403 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 404 *shadow = size & (SHADOW_GRANULARITY - 1); 405 } 406 407 AsanStats &thread_stats = GetCurrentThreadStats(); 408 thread_stats.mallocs++; 409 thread_stats.malloced += size; 410 thread_stats.malloced_redzones += needed_size - size; 411 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); 412 thread_stats.malloced_by_size[class_id]++; 413 if (needed_size > SizeClassMap::kMaxSize) 414 thread_stats.malloc_large++; 415 416 void *res = reinterpret_cast<void *>(user_beg); 417 if (can_fill && fl.max_malloc_fill_size) { 418 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); 419 REAL(memset)(res, fl.malloc_fill_byte, fill_size); 420 } 421 // Must be the last mutation of metadata in this function. 422 atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); 423 ASAN_MALLOC_HOOK(res, size); 424 return res; 425} 426 427static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { 428 uptr p = reinterpret_cast<uptr>(ptr); 429 if (p == 0) return; 430 ASAN_FREE_HOOK(ptr); 431 uptr chunk_beg = p - kChunkHeaderSize; 432 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 433 434 u8 old_chunk_state = CHUNK_ALLOCATED; 435 // Flip the chunk_state atomically to avoid race on double-free. 436 // Must be the first mutation of metadata in this function. 437 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, 438 CHUNK_QUARANTINE, memory_order_acquire)) { 439 if (old_chunk_state == CHUNK_QUARANTINE) 440 ReportDoubleFree((uptr)ptr, stack); 441 else 442 ReportFreeNotMalloced((uptr)ptr, stack); 443 } 444 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); 445 446 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) 447 ReportAllocTypeMismatch((uptr)ptr, stack, 448 (AllocType)m->alloc_type, (AllocType)alloc_type); 449 450 CHECK_GE(m->alloc_tid, 0); 451 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 452 CHECK_EQ(m->free_tid, kInvalidTid); 453 AsanThread *t = GetCurrentThread(); 454 m->free_tid = t ? t->tid() : 0; 455 if (flags()->use_stack_depot) { 456 m->free_context_id = StackDepotPut(stack->trace, stack->size); 457 } else { 458 m->free_context_id = 0; 459 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); 460 } 461 CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); 462 // Poison the region. 463 PoisonShadow(m->Beg(), 464 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 465 kAsanHeapFreeMagic); 466 467 AsanStats &thread_stats = GetCurrentThreadStats(); 468 thread_stats.frees++; 469 thread_stats.freed += m->UsedSize(); 470 471 // Push into quarantine. 472 if (t) { 473 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 474 AllocatorCache *ac = GetAllocatorCache(ms); 475 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), 476 m, m->UsedSize()); 477 } else { 478 SpinMutexLock l(&fallback_mutex); 479 AllocatorCache *ac = &fallback_allocator_cache; 480 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), 481 m, m->UsedSize()); 482 } 483} 484 485static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 486 CHECK(old_ptr && new_size); 487 uptr p = reinterpret_cast<uptr>(old_ptr); 488 uptr chunk_beg = p - kChunkHeaderSize; 489 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 490 491 AsanStats &thread_stats = GetCurrentThreadStats(); 492 thread_stats.reallocs++; 493 thread_stats.realloced += new_size; 494 495 CHECK_EQ(m->chunk_state, CHUNK_ALLOCATED); 496 uptr old_size = m->UsedSize(); 497 uptr memcpy_size = Min(new_size, old_size); 498 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); 499 if (new_ptr) { 500 CHECK_NE(REAL(memcpy), (void*)0); 501 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 502 Deallocate(old_ptr, stack, FROM_MALLOC); 503 } 504 return new_ptr; 505} 506 507static AsanChunk *GetAsanChunkByAddr(uptr p) { 508 void *ptr = reinterpret_cast<void *>(p); 509 uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr)); 510 if (!alloc_beg) return 0; 511 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 512 if (memalign_magic[0] == kMemalignMagic) { 513 AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]); 514 CHECK(m->from_memalign); 515 return m; 516 } 517 if (!allocator.FromPrimary(ptr)) { 518 uptr *meta = reinterpret_cast<uptr *>( 519 allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg))); 520 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); 521 return m; 522 } 523 uptr actual_size = allocator.GetActuallyAllocatedSize(ptr); 524 CHECK_LE(actual_size, SizeClassMap::kMaxSize); 525 // We know the actually allocted size, but we don't know the redzone size. 526 // Just try all possible redzone sizes. 527 for (u32 rz_log = 0; rz_log < 8; rz_log++) { 528 u32 rz_size = RZLog2Size(rz_log); 529 uptr max_possible_size = actual_size - rz_size; 530 if (ComputeRZLog(max_possible_size) != rz_log) 531 continue; 532 return reinterpret_cast<AsanChunk *>( 533 alloc_beg + rz_size - kChunkHeaderSize); 534 } 535 return 0; 536} 537 538static uptr AllocationSize(uptr p) { 539 AsanChunk *m = GetAsanChunkByAddr(p); 540 if (!m) return 0; 541 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 542 if (m->Beg() != p) return 0; 543 return m->UsedSize(); 544} 545 546// We have an address between two chunks, and we want to report just one. 547AsanChunk *ChooseChunk(uptr addr, 548 AsanChunk *left_chunk, AsanChunk *right_chunk) { 549 // Prefer an allocated chunk over freed chunk and freed chunk 550 // over available chunk. 551 if (left_chunk->chunk_state != right_chunk->chunk_state) { 552 if (left_chunk->chunk_state == CHUNK_ALLOCATED) 553 return left_chunk; 554 if (right_chunk->chunk_state == CHUNK_ALLOCATED) 555 return right_chunk; 556 if (left_chunk->chunk_state == CHUNK_QUARANTINE) 557 return left_chunk; 558 if (right_chunk->chunk_state == CHUNK_QUARANTINE) 559 return right_chunk; 560 } 561 // Same chunk_state: choose based on offset. 562 sptr l_offset = 0, r_offset = 0; 563 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 564 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 565 if (l_offset < r_offset) 566 return left_chunk; 567 return right_chunk; 568} 569 570AsanChunkView FindHeapChunkByAddress(uptr addr) { 571 AsanChunk *m1 = GetAsanChunkByAddr(addr); 572 if (!m1) return AsanChunkView(m1); 573 sptr offset = 0; 574 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 575 // The address is in the chunk's left redzone, so maybe it is actually 576 // a right buffer overflow from the other chunk to the left. 577 // Search a bit to the left to see if there is another chunk. 578 AsanChunk *m2 = 0; 579 for (uptr l = 1; l < GetPageSizeCached(); l++) { 580 m2 = GetAsanChunkByAddr(addr - l); 581 if (m2 == m1) continue; // Still the same chunk. 582 break; 583 } 584 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 585 m1 = ChooseChunk(addr, m2, m1); 586 } 587 return AsanChunkView(m1); 588} 589 590void AsanThreadLocalMallocStorage::CommitBack() { 591 AllocatorCache *ac = GetAllocatorCache(this); 592 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); 593 allocator.SwallowCache(GetAllocatorCache(this)); 594} 595 596void PrintInternalAllocatorStats() { 597 allocator.PrintStats(); 598} 599 600SANITIZER_INTERFACE_ATTRIBUTE 601void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 602 AllocType alloc_type) { 603 return Allocate(size, alignment, stack, alloc_type, true); 604} 605 606SANITIZER_INTERFACE_ATTRIBUTE 607void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { 608 Deallocate(ptr, stack, alloc_type); 609} 610 611SANITIZER_INTERFACE_ATTRIBUTE 612void *asan_malloc(uptr size, StackTrace *stack) { 613 return Allocate(size, 8, stack, FROM_MALLOC, true); 614} 615 616void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 617 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; 618 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); 619 // If the memory comes from the secondary allocator no need to clear it 620 // as it comes directly from mmap. 621 if (ptr && allocator.FromPrimary(ptr)) 622 REAL(memset)(ptr, 0, nmemb * size); 623 return ptr; 624} 625 626void *asan_realloc(void *p, uptr size, StackTrace *stack) { 627 if (p == 0) 628 return Allocate(size, 8, stack, FROM_MALLOC, true); 629 if (size == 0) { 630 Deallocate(p, stack, FROM_MALLOC); 631 return 0; 632 } 633 return Reallocate(p, size, stack); 634} 635 636void *asan_valloc(uptr size, StackTrace *stack) { 637 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); 638} 639 640void *asan_pvalloc(uptr size, StackTrace *stack) { 641 uptr PageSize = GetPageSizeCached(); 642 size = RoundUpTo(size, PageSize); 643 if (size == 0) { 644 // pvalloc(0) should allocate one page. 645 size = PageSize; 646 } 647 return Allocate(size, PageSize, stack, FROM_MALLOC, true); 648} 649 650int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 651 StackTrace *stack) { 652 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); 653 CHECK(IsAligned((uptr)ptr, alignment)); 654 *memptr = ptr; 655 return 0; 656} 657 658uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { 659 CHECK(stack); 660 if (ptr == 0) return 0; 661 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 662 if (flags()->check_malloc_usable_size && (usable_size == 0)) 663 ReportMallocUsableSizeNotOwned((uptr)ptr, stack); 664 return usable_size; 665} 666 667uptr asan_mz_size(const void *ptr) { 668 return AllocationSize(reinterpret_cast<uptr>(ptr)); 669} 670 671void asan_mz_force_lock() { 672 allocator.ForceLock(); 673 fallback_mutex.Lock(); 674} 675 676void asan_mz_force_unlock() { 677 fallback_mutex.Unlock(); 678 allocator.ForceUnlock(); 679} 680 681} // namespace __asan 682 683// ---------------------- Interface ---------------- {{{1 684using namespace __asan; // NOLINT 685 686// ASan allocator doesn't reserve extra bytes, so normally we would 687// just return "size". We don't want to expose our redzone sizes, etc here. 688uptr __asan_get_estimated_allocated_size(uptr size) { 689 return size; 690} 691 692bool __asan_get_ownership(const void *p) { 693 uptr ptr = reinterpret_cast<uptr>(p); 694 return (AllocationSize(ptr) > 0); 695} 696 697uptr __asan_get_allocated_size(const void *p) { 698 if (p == 0) return 0; 699 uptr ptr = reinterpret_cast<uptr>(p); 700 uptr allocated_size = AllocationSize(ptr); 701 // Die if p is not malloced or if it is already freed. 702 if (allocated_size == 0) { 703 GET_STACK_TRACE_FATAL_HERE; 704 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); 705 } 706 return allocated_size; 707} 708 709#if !SANITIZER_SUPPORTS_WEAK_HOOKS 710// Provide default (no-op) implementation of malloc hooks. 711extern "C" { 712SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 713void __asan_malloc_hook(void *ptr, uptr size) { 714 (void)ptr; 715 (void)size; 716} 717SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 718void __asan_free_hook(void *ptr) { 719 (void)ptr; 720} 721} // extern "C" 722#endif 723