asan_allocator2.cc revision 73bad81febb2a872627c03e579beea1da4b49294
1//===-- asan_allocator2.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Implementation of ASan's memory allocator, 2-nd version. 13// This variant uses the allocator from sanitizer_common, i.e. the one shared 14// with ThreadSanitizer and MemorySanitizer. 15// 16// Status: under development, not enabled by default yet. 17//===----------------------------------------------------------------------===// 18#include "asan_allocator.h" 19#if ASAN_ALLOCATOR_VERSION == 2 20 21#include "asan_mapping.h" 22#include "asan_report.h" 23#include "asan_thread.h" 24#include "asan_thread_registry.h" 25#include "sanitizer/asan_interface.h" 26#include "sanitizer_common/sanitizer_allocator.h" 27#include "sanitizer_common/sanitizer_internal_defs.h" 28#include "sanitizer_common/sanitizer_list.h" 29 30namespace __asan { 31 32struct AsanMapUnmapCallback { 33 void OnMap(uptr p, uptr size) const { 34 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 35 // Statistics. 36 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 37 thread_stats.mmaps++; 38 thread_stats.mmaped += size; 39 // thread_stats.mmaped_by_size[size_class] += n_chunks; 40 } 41 void OnUnmap(uptr p, uptr size) const { 42 PoisonShadow(p, size, 0); 43 // Statistics. 44 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 45 thread_stats.munmaps++; 46 thread_stats.munmaped += size; 47 } 48}; 49 50#if SANITIZER_WORDSIZE == 64 51const uptr kAllocatorSpace = 0x600000000000ULL; 52const uptr kAllocatorSize = 0x10000000000ULL; // 1T. 53typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, 54 DefaultSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 55#elif SANITIZER_WORDSIZE == 32 56static const u64 kAddressSpaceSize = 1ULL << 32; 57typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, 58 CompactSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 59#endif 60 61typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 62typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; 63typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 64 SecondaryAllocator> Allocator; 65 66// We can not use THREADLOCAL because it is not supported on some of the 67// platforms we care about (OSX 10.6, Android). 68// static THREADLOCAL AllocatorCache cache; 69AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 70 CHECK(ms); 71 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); 72 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); 73} 74 75static Allocator allocator; 76 77static const uptr kMaxAllowedMallocSize = 78 FIRST_32_SECOND_64(3UL << 30, 8UL << 30); 79 80static const uptr kMaxThreadLocalQuarantine = 81 FIRST_32_SECOND_64(1 << 18, 1 << 20); 82 83static const uptr kReturnOnZeroMalloc = 0x0123; // Zero page is protected. 84 85static int inited = 0; 86 87static void Init() { 88 if (inited) return; 89 __asan_init(); 90 inited = true; // this must happen before any threads are created. 91 allocator.Init(); 92} 93 94// Every chunk of memory allocated by this allocator can be in one of 3 states: 95// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 96// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 97// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 98enum { 99 CHUNK_AVAILABLE = 1, 100 CHUNK_ALLOCATED = 2, 101 CHUNK_QUARANTINE = 3 102}; 103 104// The memory chunk allocated from the underlying allocator looks like this: 105// L L L L L L H H U U U U U U R R 106// L -- left redzone words (0 or more bytes) 107// H -- ChunkHeader (16 bytes on 64-bit arch, 8 bytes on 32-bit arch). 108// ChunkHeader is also a part of the left redzone. 109// U -- user memory. 110// R -- right redzone (0 or more bytes) 111// ChunkBase consists of ChunkHeader and other bytes that overlap with user 112// memory. 113 114#if SANITIZER_WORDSIZE == 64 115struct ChunkBase { 116 // 1-st 8 bytes. 117 uptr chunk_state : 8; // Must be first. 118 uptr alloc_tid : 24; 119 uptr free_tid : 24; 120 uptr from_memalign : 1; 121 // 2-nd 8 bytes 122 uptr user_requested_size; 123 // Header2 (intersects with user memory). 124 // 3-rd 8 bytes. These overlap with the user memory. 125 AsanChunk *next; 126}; 127 128static const uptr kChunkHeaderSize = 16; 129static const uptr kChunkHeader2Size = 8; 130 131#elif SANITIZER_WORDSIZE == 32 132struct ChunkBase { 133 // 1-st 8 bytes. 134 uptr chunk_state : 8; // Must be first. 135 uptr alloc_tid : 24; 136 uptr from_memalign : 1; 137 uptr free_tid : 24; 138 // 2-nd 8 bytes 139 uptr user_requested_size; 140 AsanChunk *next; 141 // Header2 empty. 142}; 143 144static const uptr kChunkHeaderSize = 16; 145static const uptr kChunkHeader2Size = 0; 146#endif 147COMPILER_CHECK(sizeof(ChunkBase) == kChunkHeaderSize + kChunkHeader2Size); 148 149static uptr ComputeRZSize(uptr user_requested_size) { 150 // FIXME: implement adaptive redzones. 151 return flags()->redzone; 152} 153 154struct AsanChunk: ChunkBase { 155 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 156 uptr UsedSize() { return user_requested_size; } 157 // We store the alloc/free stack traces in the chunk itself. 158 u32 *AllocStackBeg() { 159 return (u32*)(Beg() - ComputeRZSize(UsedSize())); 160 } 161 uptr AllocStackSize() { 162 return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32); 163 } 164 u32 *FreeStackBeg() { 165 return (u32*)(Beg() + kChunkHeader2Size); 166 } 167 uptr FreeStackSize() { 168 uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY), 169 ComputeRZSize(UsedSize())); 170 return (available - kChunkHeader2Size) / sizeof(u32); 171 } 172}; 173 174uptr AsanChunkView::Beg() { return chunk_->Beg(); } 175uptr AsanChunkView::End() { return Beg() + UsedSize(); } 176uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 177uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 178uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 179 180void AsanChunkView::GetAllocStack(StackTrace *stack) { 181 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), 182 chunk_->AllocStackSize()); 183} 184 185void AsanChunkView::GetFreeStack(StackTrace *stack) { 186 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), 187 chunk_->FreeStackSize()); 188} 189 190class Quarantine: public AsanChunkFifoList { 191 public: 192 void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) { 193 AsanChunkFifoList *q = &ms->quarantine_; 194 if (!q->size()) return; 195 SpinMutexLock l(&mutex_); 196 PushList(q); 197 PopAndDeallocateLoop(ms); 198 } 199 200 void BypassThreadLocalQuarantine(AsanChunk *m) { 201 SpinMutexLock l(&mutex_); 202 Push(m); 203 } 204 205 private: 206 void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) { 207 while (size() > (uptr)flags()->quarantine_size) { 208 PopAndDeallocate(ms); 209 } 210 } 211 void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) { 212 CHECK_GT(size(), 0); 213 AsanChunk *m = Pop(); 214 CHECK(m); 215 CHECK(m->chunk_state == CHUNK_QUARANTINE); 216 m->chunk_state = CHUNK_AVAILABLE; 217 CHECK_NE(m->alloc_tid, kInvalidTid); 218 CHECK_NE(m->free_tid, kInvalidTid); 219 PoisonShadow(m->Beg(), 220 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY), 221 kAsanHeapLeftRedzoneMagic); 222 uptr alloc_beg = m->Beg() - ComputeRZSize(m->user_requested_size); 223 void *p = reinterpret_cast<void *>(alloc_beg); 224 if (m->from_memalign) 225 p = allocator.GetBlockBegin(p); 226 227 // Statistics. 228 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 229 thread_stats.real_frees++; 230 thread_stats.really_freed += m->UsedSize(); 231 232 allocator.Deallocate(GetAllocatorCache(ms), p); 233 } 234 SpinMutex mutex_; 235}; 236 237static Quarantine quarantine; 238 239void AsanChunkFifoList::PushList(AsanChunkFifoList *q) { 240 CHECK(q->size() > 0); 241 size_ += q->size(); 242 append_back(q); 243 q->clear(); 244} 245 246void AsanChunkFifoList::Push(AsanChunk *n) { 247 push_back(n); 248 size_ += n->UsedSize(); 249} 250 251// Interesting performance observation: this function takes up to 15% of overal 252// allocator time. That's because *first_ has been evicted from cache long time 253// ago. Not sure if we can or want to do anything with this. 254AsanChunk *AsanChunkFifoList::Pop() { 255 CHECK(first_); 256 AsanChunk *res = front(); 257 size_ -= res->UsedSize(); 258 pop_front(); 259 return res; 260} 261 262static void *Allocate(uptr size, uptr alignment, StackTrace *stack) { 263 Init(); 264 CHECK(stack); 265 if (alignment < 8) alignment = 8; 266 if (size == 0) 267 return reinterpret_cast<void *>(kReturnOnZeroMalloc); 268 CHECK(IsPowerOfTwo(alignment)); 269 uptr rz_size = ComputeRZSize(size); 270 uptr rounded_size = RoundUpTo(size, rz_size); 271 uptr needed_size = rounded_size + rz_size; 272 if (alignment > rz_size) 273 needed_size += alignment; 274 CHECK(IsAligned(needed_size, rz_size)); 275 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 276 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 277 (void*)size); 278 return 0; 279 } 280 281 AsanThread *t = asanThreadRegistry().GetCurrent(); 282 // Printf("t = %p\n", t); 283 CHECK(t); // FIXME 284 void *allocated = allocator.Allocate( 285 GetAllocatorCache(&t->malloc_storage()), needed_size, 8, false); 286 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 287 uptr alloc_end = alloc_beg + needed_size; 288 uptr beg_plus_redzone = alloc_beg + rz_size; 289 uptr user_beg = beg_plus_redzone; 290 if (!IsAligned(user_beg, alignment)) 291 user_beg = RoundUpTo(user_beg, alignment); 292 uptr user_end = user_beg + size; 293 CHECK_LE(user_end, alloc_end); 294 uptr chunk_beg = user_beg - kChunkHeaderSize; 295 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 296 m->chunk_state = CHUNK_ALLOCATED; 297 u32 alloc_tid = t ? t->tid() : 0; 298 m->alloc_tid = alloc_tid; 299 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 300 m->free_tid = kInvalidTid; 301 m->from_memalign = user_beg != beg_plus_redzone; 302 m->user_requested_size = size; 303 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); 304 305 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 306 // Unpoison the bulk of the memory region. 307 if (size_rounded_down_to_granularity) 308 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 309 // Deal with the end of the region if size is not aligned to granularity. 310 if (size != size_rounded_down_to_granularity && flags()->poison_heap) { 311 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 312 *shadow = size & (SHADOW_GRANULARITY - 1); 313 } 314 315 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 316 thread_stats.mallocs++; 317 thread_stats.malloced += size; 318 319 void *res = reinterpret_cast<void *>(user_beg); 320 ASAN_MALLOC_HOOK(res, size); 321 return res; 322} 323 324static void Deallocate(void *ptr, StackTrace *stack) { 325 uptr p = reinterpret_cast<uptr>(ptr); 326 if (p == 0 || p == kReturnOnZeroMalloc) return; 327 uptr chunk_beg = p - kChunkHeaderSize; 328 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 329 330 // Flip the chunk_state atomically to avoid race on double-free. 331 u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE, 332 memory_order_acq_rel); 333 334 if (old_chunk_state == CHUNK_QUARANTINE) 335 ReportDoubleFree((uptr)ptr, stack); 336 else if (old_chunk_state != CHUNK_ALLOCATED) 337 ReportFreeNotMalloced((uptr)ptr, stack); 338 CHECK(old_chunk_state == CHUNK_ALLOCATED); 339 340 CHECK_GE(m->alloc_tid, 0); 341 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 342 CHECK_EQ(m->free_tid, kInvalidTid); 343 AsanThread *t = asanThreadRegistry().GetCurrent(); 344 m->free_tid = t ? t->tid() : 0; 345 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); 346 CHECK(m->chunk_state == CHUNK_QUARANTINE); 347 // Poison the region. 348 PoisonShadow(m->Beg(), 349 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY), 350 kAsanHeapFreeMagic); 351 352 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 353 thread_stats.frees++; 354 thread_stats.freed += m->UsedSize(); 355 356 // Push into quarantine. 357 if (t) { 358 AsanChunkFifoList &q = t->malloc_storage().quarantine_; 359 q.Push(m); 360 361 if (q.size() > kMaxThreadLocalQuarantine) 362 quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage()); 363 } else { 364 quarantine.BypassThreadLocalQuarantine(m); 365 } 366 367 ASAN_FREE_HOOK(ptr); 368} 369 370static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 371 CHECK(old_ptr && new_size); 372 uptr p = reinterpret_cast<uptr>(old_ptr); 373 uptr chunk_beg = p - kChunkHeaderSize; 374 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 375 376 CHECK(m->chunk_state == CHUNK_ALLOCATED); 377 uptr old_size = m->UsedSize(); 378 uptr memcpy_size = Min(new_size, old_size); 379 void *new_ptr = Allocate(new_size, 8, stack); 380 if (new_ptr) { 381 CHECK(REAL(memcpy) != 0); 382 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 383 Deallocate(old_ptr, stack); 384 } 385 return new_ptr; 386} 387 388static AsanChunk *GetAsanChunkByAddr(uptr p) { 389 uptr alloc_beg = reinterpret_cast<uptr>( 390 allocator.GetBlockBegin(reinterpret_cast<void *>(p))); 391 if (!alloc_beg) return 0; 392 // FIXME: this does not take into account memalign. 393 uptr chunk_beg = alloc_beg + ComputeRZSize(0) - kChunkHeaderSize; 394 return reinterpret_cast<AsanChunk *>(chunk_beg); 395} 396 397static uptr AllocationSize(uptr p) { 398 AsanChunk *m = GetAsanChunkByAddr(p); 399 if (!m) return 0; 400 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 401 if (m->Beg() != p) return 0; 402 return m->UsedSize(); 403} 404 405// We have an address between two chunks, and we want to report just one. 406AsanChunk *ChooseChunk(uptr addr, 407 AsanChunk *left_chunk, AsanChunk *right_chunk) { 408 // Prefer an allocated chunk or a chunk from quarantine. 409 if (left_chunk->chunk_state == CHUNK_AVAILABLE && 410 right_chunk->chunk_state != CHUNK_AVAILABLE) 411 return right_chunk; 412 if (right_chunk->chunk_state == CHUNK_AVAILABLE && 413 left_chunk->chunk_state != CHUNK_AVAILABLE) 414 return left_chunk; 415 // Choose based on offset. 416 uptr l_offset = 0, r_offset = 0; 417 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 418 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 419 if (l_offset < r_offset) 420 return left_chunk; 421 return right_chunk; 422} 423 424AsanChunkView FindHeapChunkByAddress(uptr addr) { 425 AsanChunk *m1 = GetAsanChunkByAddr(addr); 426 if (!m1) return AsanChunkView(m1); 427 uptr offset = 0; 428 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 429 // The address is in the chunk's left redzone, so maybe it is actually 430 // a right buffer overflow from the other chunk to the left. 431 // Search a bit to the left to see if there is another chunk. 432 AsanChunk *m2 = 0; 433 for (uptr l = 1; l < GetPageSizeCached(); l++) { 434 m2 = GetAsanChunkByAddr(addr - l); 435 if (m2 == m1) continue; // Still the same chunk. 436 Printf("m1 %p m2 %p l %zd\n", m1, m2, l); 437 break; 438 } 439 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 440 m1 = ChooseChunk(addr, m2, m1); 441 } 442 return AsanChunkView(m1); 443} 444 445void AsanThreadLocalMallocStorage::CommitBack() { 446 quarantine.SwallowThreadLocalQuarantine(this); 447 allocator.SwallowCache(GetAllocatorCache(this)); 448} 449 450SANITIZER_INTERFACE_ATTRIBUTE 451void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) { 452 return Allocate(size, alignment, stack); 453} 454 455SANITIZER_INTERFACE_ATTRIBUTE 456void asan_free(void *ptr, StackTrace *stack) { 457 Deallocate(ptr, stack); 458} 459 460SANITIZER_INTERFACE_ATTRIBUTE 461void *asan_malloc(uptr size, StackTrace *stack) { 462 return Allocate(size, 8, stack); 463} 464 465void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 466 void *ptr = Allocate(nmemb * size, 8, stack); 467 if (ptr) 468 REAL(memset)(ptr, 0, nmemb * size); 469 return ptr; 470} 471 472void *asan_realloc(void *p, uptr size, StackTrace *stack) { 473 if (p == 0) 474 return Allocate(size, 8, stack); 475 if (size == 0) { 476 Deallocate(p, stack); 477 return 0; 478 } 479 return Reallocate(p, size, stack); 480} 481 482void *asan_valloc(uptr size, StackTrace *stack) { 483 return Allocate(size, GetPageSizeCached(), stack); 484} 485 486void *asan_pvalloc(uptr size, StackTrace *stack) { 487 uptr PageSize = GetPageSizeCached(); 488 size = RoundUpTo(size, PageSize); 489 if (size == 0) { 490 // pvalloc(0) should allocate one page. 491 size = PageSize; 492 } 493 return Allocate(size, PageSize, stack); 494} 495 496int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 497 StackTrace *stack) { 498 void *ptr = Allocate(size, alignment, stack); 499 CHECK(IsAligned((uptr)ptr, alignment)); 500 *memptr = ptr; 501 return 0; 502} 503 504uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { 505 CHECK(stack); 506 if (ptr == 0) return 0; 507 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 508 if (flags()->check_malloc_usable_size && (usable_size == 0)) 509 ReportMallocUsableSizeNotOwned((uptr)ptr, stack); 510 return usable_size; 511} 512 513uptr asan_mz_size(const void *ptr) { 514 UNIMPLEMENTED(); 515 return 0; 516} 517 518void asan_mz_force_lock() { 519 UNIMPLEMENTED(); 520} 521 522void asan_mz_force_unlock() { 523 UNIMPLEMENTED(); 524} 525 526} // namespace __asan 527 528// ---------------------- Interface ---------------- {{{1 529using namespace __asan; // NOLINT 530 531// ASan allocator doesn't reserve extra bytes, so normally we would 532// just return "size". We don't want to expose our redzone sizes, etc here. 533uptr __asan_get_estimated_allocated_size(uptr size) { 534 return size; 535} 536 537bool __asan_get_ownership(const void *p) { 538 return AllocationSize(reinterpret_cast<uptr>(p)) > 0; 539} 540 541uptr __asan_get_allocated_size(const void *p) { 542 if (p == 0) return 0; 543 uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p)); 544 // Die if p is not malloced or if it is already freed. 545 if (allocated_size == 0) { 546 GET_STACK_TRACE_FATAL_HERE; 547 ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack); 548 } 549 return allocated_size; 550} 551 552#if !SANITIZER_SUPPORTS_WEAK_HOOKS 553// Provide default (no-op) implementation of malloc hooks. 554extern "C" { 555SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 556void __asan_malloc_hook(void *ptr, uptr size) { 557 (void)ptr; 558 (void)size; 559} 560SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 561void __asan_free_hook(void *ptr) { 562 (void)ptr; 563} 564} // extern "C" 565#endif 566 567 568#endif // ASAN_ALLOCATOR_VERSION 569