asan_allocator2.cc revision e11c5c5a8cd6e448ddf3c69f783eb655cf4aab01
1//===-- asan_allocator2.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Implementation of ASan's memory allocator, 2-nd version. 13// This variant uses the allocator from sanitizer_common, i.e. the one shared 14// with ThreadSanitizer and MemorySanitizer. 15// 16// Status: under development, not enabled by default yet. 17//===----------------------------------------------------------------------===// 18#include "asan_allocator.h" 19#if ASAN_ALLOCATOR_VERSION == 2 20 21#include "asan_mapping.h" 22#include "asan_report.h" 23#include "asan_thread.h" 24#include "asan_thread_registry.h" 25#include "sanitizer/asan_interface.h" 26#include "sanitizer_common/sanitizer_allocator.h" 27#include "sanitizer_common/sanitizer_internal_defs.h" 28#include "sanitizer_common/sanitizer_list.h" 29 30namespace __asan { 31 32struct AsanMapUnmapCallback { 33 void OnMap(uptr p, uptr size) const { 34 PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 35 // Statistics. 36 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 37 thread_stats.mmaps++; 38 thread_stats.mmaped += size; 39 // thread_stats.mmaped_by_size[size_class] += n_chunks; 40 } 41 void OnUnmap(uptr p, uptr size) const { 42 PoisonShadow(p, size, 0); 43 // Statistics. 44 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 45 thread_stats.munmaps++; 46 thread_stats.munmaped += size; 47 } 48}; 49 50#if SANITIZER_WORDSIZE == 64 51const uptr kAllocatorSpace = 0x600000000000ULL; 52const uptr kAllocatorSize = 0x10000000000ULL; // 1T. 53typedef DefaultSizeClassMap SizeClassMap; 54typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, 55 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 56#elif SANITIZER_WORDSIZE == 32 57static const u64 kAddressSpaceSize = 1ULL << 32; 58typedef CompactSizeClassMap SizeClassMap; 59typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, 60 SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 61#endif 62 63typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 64typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; 65typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 66 SecondaryAllocator> Allocator; 67 68// We can not use THREADLOCAL because it is not supported on some of the 69// platforms we care about (OSX 10.6, Android). 70// static THREADLOCAL AllocatorCache cache; 71AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 72 CHECK(ms); 73 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); 74 return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); 75} 76 77static Allocator allocator; 78 79static const uptr kMaxAllowedMallocSize = 80 FIRST_32_SECOND_64(3UL << 30, 8UL << 30); 81 82static const uptr kMaxThreadLocalQuarantine = 83 FIRST_32_SECOND_64(1 << 18, 1 << 20); 84 85static const uptr kReturnOnZeroMalloc = 0x0123; // Zero page is protected. 86 87static int inited = 0; 88 89static void Init() { 90 if (inited) return; 91 __asan_init(); 92 inited = true; // this must happen before any threads are created. 93 allocator.Init(); 94} 95 96// Every chunk of memory allocated by this allocator can be in one of 3 states: 97// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 98// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 99// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 100enum { 101 CHUNK_AVAILABLE = 1, 102 CHUNK_ALLOCATED = 2, 103 CHUNK_QUARANTINE = 3 104}; 105 106// The memory chunk allocated from the underlying allocator looks like this: 107// L L L L L L H H U U U U U U R R 108// L -- left redzone words (0 or more bytes) 109// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 110// U -- user memory. 111// R -- right redzone (0 or more bytes) 112// ChunkBase consists of ChunkHeader and other bytes that overlap with user 113// memory. 114 115// If a memory chunk is allocated by memalign and we had to increase the 116// allocation size to achieve the proper alignment, then we store this magic 117// value in the first uptr word of the memory block and store the address of 118// ChunkBase in the next uptr. 119// M B ? ? ? L L L L L L H H U U U U U U 120// M -- magic value kMemalignMagic 121// B -- address of ChunkHeader pointing to the first 'H' 122static const uptr kMemalignMagic = 0xCC6E96B9; 123 124#if SANITIZER_WORDSIZE == 64 125struct ChunkBase { 126 // 1-st 8 bytes. 127 uptr chunk_state : 8; // Must be first. 128 uptr alloc_tid : 24; 129 130 uptr free_tid : 24; 131 uptr from_memalign : 1; 132 uptr alloc_type : 2; 133 // 2-nd 8 bytes 134 uptr user_requested_size; 135 // Header2 (intersects with user memory). 136 // 3-rd 8 bytes. These overlap with the user memory. 137 AsanChunk *next; 138}; 139 140static const uptr kChunkHeaderSize = 16; 141static const uptr kChunkHeader2Size = 8; 142 143#elif SANITIZER_WORDSIZE == 32 144struct ChunkBase { 145 // 1-st 8 bytes. 146 uptr chunk_state : 8; // Must be first. 147 uptr alloc_tid : 24; 148 149 uptr from_memalign : 1; 150 uptr alloc_type : 2; 151 uptr free_tid : 24; 152 // 2-nd 8 bytes 153 uptr user_requested_size; 154 AsanChunk *next; 155 // Header2 empty. 156}; 157 158static const uptr kChunkHeaderSize = 16; 159static const uptr kChunkHeader2Size = 0; 160#endif 161COMPILER_CHECK(sizeof(ChunkBase) == kChunkHeaderSize + kChunkHeader2Size); 162 163static uptr ComputeRZSize(uptr user_requested_size) { 164 // FIXME: implement adaptive redzones. 165 return flags()->redzone; 166} 167 168struct AsanChunk: ChunkBase { 169 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 170 uptr UsedSize() { return user_requested_size; } 171 // We store the alloc/free stack traces in the chunk itself. 172 u32 *AllocStackBeg() { 173 return (u32*)(Beg() - ComputeRZSize(UsedSize())); 174 } 175 uptr AllocStackSize() { 176 return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32); 177 } 178 u32 *FreeStackBeg() { 179 return (u32*)(Beg() + kChunkHeader2Size); 180 } 181 uptr FreeStackSize() { 182 uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY), 183 ComputeRZSize(UsedSize())); 184 return (available - kChunkHeader2Size) / sizeof(u32); 185 } 186}; 187 188uptr AsanChunkView::Beg() { return chunk_->Beg(); } 189uptr AsanChunkView::End() { return Beg() + UsedSize(); } 190uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 191uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 192uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 193 194void AsanChunkView::GetAllocStack(StackTrace *stack) { 195 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), 196 chunk_->AllocStackSize()); 197} 198 199void AsanChunkView::GetFreeStack(StackTrace *stack) { 200 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), 201 chunk_->FreeStackSize()); 202} 203 204class Quarantine: public AsanChunkFifoList { 205 public: 206 void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) { 207 AsanChunkFifoList *q = &ms->quarantine_; 208 if (!q->size()) return; 209 SpinMutexLock l(&mutex_); 210 PushList(q); 211 PopAndDeallocateLoop(ms); 212 } 213 214 void BypassThreadLocalQuarantine(AsanChunk *m) { 215 SpinMutexLock l(&mutex_); 216 Push(m); 217 } 218 219 private: 220 void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) { 221 while (size() > (uptr)flags()->quarantine_size) { 222 PopAndDeallocate(ms); 223 } 224 } 225 void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) { 226 CHECK_GT(size(), 0); 227 AsanChunk *m = Pop(); 228 CHECK(m); 229 CHECK(m->chunk_state == CHUNK_QUARANTINE); 230 m->chunk_state = CHUNK_AVAILABLE; 231 CHECK_NE(m->alloc_tid, kInvalidTid); 232 CHECK_NE(m->free_tid, kInvalidTid); 233 PoisonShadow(m->Beg(), 234 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY), 235 kAsanHeapLeftRedzoneMagic); 236 uptr alloc_beg = m->Beg() - ComputeRZSize(m->user_requested_size); 237 void *p = reinterpret_cast<void *>(alloc_beg); 238 if (m->from_memalign) { 239 p = allocator.GetBlockBegin(p); 240 uptr *memalign_magic = reinterpret_cast<uptr *>(p); 241 CHECK_EQ(memalign_magic[0], kMemalignMagic); 242 CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m)); 243 } 244 245 // Statistics. 246 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 247 thread_stats.real_frees++; 248 thread_stats.really_freed += m->UsedSize(); 249 250 allocator.Deallocate(GetAllocatorCache(ms), p); 251 } 252 SpinMutex mutex_; 253}; 254 255static Quarantine quarantine; 256 257void AsanChunkFifoList::PushList(AsanChunkFifoList *q) { 258 CHECK(q->size() > 0); 259 size_ += q->size(); 260 append_back(q); 261 q->clear(); 262} 263 264void AsanChunkFifoList::Push(AsanChunk *n) { 265 push_back(n); 266 size_ += n->UsedSize(); 267} 268 269// Interesting performance observation: this function takes up to 15% of overal 270// allocator time. That's because *first_ has been evicted from cache long time 271// ago. Not sure if we can or want to do anything with this. 272AsanChunk *AsanChunkFifoList::Pop() { 273 CHECK(first_); 274 AsanChunk *res = front(); 275 size_ -= res->UsedSize(); 276 pop_front(); 277 return res; 278} 279 280static void *Allocate(uptr size, uptr alignment, StackTrace *stack, 281 AllocType alloc_type) { 282 Init(); 283 CHECK(stack); 284 if (alignment < 8) alignment = 8; 285 if (size == 0) 286 return reinterpret_cast<void *>(kReturnOnZeroMalloc); 287 CHECK(IsPowerOfTwo(alignment)); 288 uptr rz_size = ComputeRZSize(size); 289 uptr rounded_size = RoundUpTo(size, rz_size); 290 uptr needed_size = rounded_size + rz_size; 291 if (alignment > rz_size) 292 needed_size += alignment; 293 CHECK(IsAligned(needed_size, rz_size)); 294 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 295 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 296 (void*)size); 297 return 0; 298 } 299 300 AsanThread *t = asanThreadRegistry().GetCurrent(); 301 // Printf("t = %p\n", t); 302 CHECK(t); // FIXME 303 void *allocated = allocator.Allocate( 304 GetAllocatorCache(&t->malloc_storage()), needed_size, 8, false); 305 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 306 uptr alloc_end = alloc_beg + needed_size; 307 uptr beg_plus_redzone = alloc_beg + rz_size; 308 uptr user_beg = beg_plus_redzone; 309 if (!IsAligned(user_beg, alignment)) 310 user_beg = RoundUpTo(user_beg, alignment); 311 uptr user_end = user_beg + size; 312 CHECK_LE(user_end, alloc_end); 313 uptr chunk_beg = user_beg - kChunkHeaderSize; 314 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 315 m->chunk_state = CHUNK_ALLOCATED; 316 m->alloc_type = alloc_type; 317 u32 alloc_tid = t ? t->tid() : 0; 318 m->alloc_tid = alloc_tid; 319 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 320 m->free_tid = kInvalidTid; 321 m->from_memalign = user_beg != beg_plus_redzone; 322 if (m->from_memalign) { 323 CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg); 324 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 325 memalign_magic[0] = kMemalignMagic; 326 memalign_magic[1] = chunk_beg; 327 } 328 m->user_requested_size = size; 329 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); 330 331 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 332 // Unpoison the bulk of the memory region. 333 if (size_rounded_down_to_granularity) 334 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 335 // Deal with the end of the region if size is not aligned to granularity. 336 if (size != size_rounded_down_to_granularity && flags()->poison_heap) { 337 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 338 *shadow = size & (SHADOW_GRANULARITY - 1); 339 } 340 341 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 342 thread_stats.mallocs++; 343 thread_stats.malloced += size; 344 thread_stats.malloced_redzones += needed_size - size; 345 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); 346 thread_stats.malloced_by_size[class_id]++; 347 if (needed_size > SizeClassMap::kMaxSize) 348 thread_stats.malloc_large++; 349 350 void *res = reinterpret_cast<void *>(user_beg); 351 ASAN_MALLOC_HOOK(res, size); 352 return res; 353} 354 355static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { 356 uptr p = reinterpret_cast<uptr>(ptr); 357 if (p == 0 || p == kReturnOnZeroMalloc) return; 358 uptr chunk_beg = p - kChunkHeaderSize; 359 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 360 361 // Flip the chunk_state atomically to avoid race on double-free. 362 u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE, 363 memory_order_acq_rel); 364 365 if (old_chunk_state == CHUNK_QUARANTINE) 366 ReportDoubleFree((uptr)ptr, stack); 367 else if (old_chunk_state != CHUNK_ALLOCATED) 368 ReportFreeNotMalloced((uptr)ptr, stack); 369 CHECK(old_chunk_state == CHUNK_ALLOCATED); 370 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) 371 ReportAllocTypeMismatch((uptr)ptr, stack, 372 (AllocType)m->alloc_type, (AllocType)alloc_type); 373 374 CHECK_GE(m->alloc_tid, 0); 375 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 376 CHECK_EQ(m->free_tid, kInvalidTid); 377 AsanThread *t = asanThreadRegistry().GetCurrent(); 378 m->free_tid = t ? t->tid() : 0; 379 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); 380 CHECK(m->chunk_state == CHUNK_QUARANTINE); 381 // Poison the region. 382 PoisonShadow(m->Beg(), 383 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY), 384 kAsanHeapFreeMagic); 385 386 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 387 thread_stats.frees++; 388 thread_stats.freed += m->UsedSize(); 389 390 // Push into quarantine. 391 if (t) { 392 AsanChunkFifoList &q = t->malloc_storage().quarantine_; 393 q.Push(m); 394 395 if (q.size() > kMaxThreadLocalQuarantine) 396 quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage()); 397 } else { 398 quarantine.BypassThreadLocalQuarantine(m); 399 } 400 401 ASAN_FREE_HOOK(ptr); 402} 403 404static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 405 CHECK(old_ptr && new_size); 406 uptr p = reinterpret_cast<uptr>(old_ptr); 407 uptr chunk_beg = p - kChunkHeaderSize; 408 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 409 410 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); 411 thread_stats.reallocs++; 412 thread_stats.realloced += new_size; 413 414 CHECK(m->chunk_state == CHUNK_ALLOCATED); 415 uptr old_size = m->UsedSize(); 416 uptr memcpy_size = Min(new_size, old_size); 417 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC); 418 if (new_ptr) { 419 CHECK(REAL(memcpy) != 0); 420 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 421 Deallocate(old_ptr, stack, FROM_MALLOC); 422 } 423 return new_ptr; 424} 425 426static AsanChunk *GetAsanChunkByAddr(uptr p) { 427 uptr alloc_beg = reinterpret_cast<uptr>( 428 allocator.GetBlockBegin(reinterpret_cast<void *>(p))); 429 if (!alloc_beg) return 0; 430 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 431 if (memalign_magic[0] == kMemalignMagic) { 432 AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]); 433 CHECK(m->from_memalign); 434 return m; 435 } 436 uptr chunk_beg = alloc_beg + ComputeRZSize(0) - kChunkHeaderSize; 437 return reinterpret_cast<AsanChunk *>(chunk_beg); 438} 439 440static uptr AllocationSize(uptr p) { 441 AsanChunk *m = GetAsanChunkByAddr(p); 442 if (!m) return 0; 443 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 444 if (m->Beg() != p) return 0; 445 return m->UsedSize(); 446} 447 448// We have an address between two chunks, and we want to report just one. 449AsanChunk *ChooseChunk(uptr addr, 450 AsanChunk *left_chunk, AsanChunk *right_chunk) { 451 // Prefer an allocated chunk or a chunk from quarantine. 452 if (left_chunk->chunk_state == CHUNK_AVAILABLE && 453 right_chunk->chunk_state != CHUNK_AVAILABLE) 454 return right_chunk; 455 if (right_chunk->chunk_state == CHUNK_AVAILABLE && 456 left_chunk->chunk_state != CHUNK_AVAILABLE) 457 return left_chunk; 458 // Choose based on offset. 459 uptr l_offset = 0, r_offset = 0; 460 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 461 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 462 if (l_offset < r_offset) 463 return left_chunk; 464 return right_chunk; 465} 466 467AsanChunkView FindHeapChunkByAddress(uptr addr) { 468 AsanChunk *m1 = GetAsanChunkByAddr(addr); 469 if (!m1) return AsanChunkView(m1); 470 uptr offset = 0; 471 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 472 // The address is in the chunk's left redzone, so maybe it is actually 473 // a right buffer overflow from the other chunk to the left. 474 // Search a bit to the left to see if there is another chunk. 475 AsanChunk *m2 = 0; 476 for (uptr l = 1; l < GetPageSizeCached(); l++) { 477 m2 = GetAsanChunkByAddr(addr - l); 478 if (m2 == m1) continue; // Still the same chunk. 479 Printf("m1 %p m2 %p l %zd\n", m1, m2, l); 480 break; 481 } 482 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 483 m1 = ChooseChunk(addr, m2, m1); 484 } 485 return AsanChunkView(m1); 486} 487 488void AsanThreadLocalMallocStorage::CommitBack() { 489 quarantine.SwallowThreadLocalQuarantine(this); 490 allocator.SwallowCache(GetAllocatorCache(this)); 491} 492 493SANITIZER_INTERFACE_ATTRIBUTE 494void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 495 AllocType alloc_type) { 496 return Allocate(size, alignment, stack, alloc_type); 497} 498 499SANITIZER_INTERFACE_ATTRIBUTE 500void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { 501 Deallocate(ptr, stack, alloc_type); 502} 503 504SANITIZER_INTERFACE_ATTRIBUTE 505void *asan_malloc(uptr size, StackTrace *stack) { 506 return Allocate(size, 8, stack, FROM_MALLOC); 507} 508 509void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 510 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC); 511 if (ptr) 512 REAL(memset)(ptr, 0, nmemb * size); 513 return ptr; 514} 515 516void *asan_realloc(void *p, uptr size, StackTrace *stack) { 517 if (p == 0) 518 return Allocate(size, 8, stack, FROM_MALLOC); 519 if (size == 0) { 520 Deallocate(p, stack, FROM_MALLOC); 521 return 0; 522 } 523 return Reallocate(p, size, stack); 524} 525 526void *asan_valloc(uptr size, StackTrace *stack) { 527 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC); 528} 529 530void *asan_pvalloc(uptr size, StackTrace *stack) { 531 uptr PageSize = GetPageSizeCached(); 532 size = RoundUpTo(size, PageSize); 533 if (size == 0) { 534 // pvalloc(0) should allocate one page. 535 size = PageSize; 536 } 537 return Allocate(size, PageSize, stack, FROM_MALLOC); 538} 539 540int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 541 StackTrace *stack) { 542 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC); 543 CHECK(IsAligned((uptr)ptr, alignment)); 544 *memptr = ptr; 545 return 0; 546} 547 548uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { 549 CHECK(stack); 550 if (ptr == 0) return 0; 551 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 552 if (flags()->check_malloc_usable_size && (usable_size == 0)) 553 ReportMallocUsableSizeNotOwned((uptr)ptr, stack); 554 return usable_size; 555} 556 557uptr asan_mz_size(const void *ptr) { 558 UNIMPLEMENTED(); 559 return 0; 560} 561 562void asan_mz_force_lock() { 563 UNIMPLEMENTED(); 564} 565 566void asan_mz_force_unlock() { 567 UNIMPLEMENTED(); 568} 569 570} // namespace __asan 571 572// ---------------------- Interface ---------------- {{{1 573using namespace __asan; // NOLINT 574 575// ASan allocator doesn't reserve extra bytes, so normally we would 576// just return "size". We don't want to expose our redzone sizes, etc here. 577uptr __asan_get_estimated_allocated_size(uptr size) { 578 return size; 579} 580 581bool __asan_get_ownership(const void *p) { 582 return AllocationSize(reinterpret_cast<uptr>(p)) > 0; 583} 584 585uptr __asan_get_allocated_size(const void *p) { 586 if (p == 0) return 0; 587 uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p)); 588 // Die if p is not malloced or if it is already freed. 589 if (allocated_size == 0) { 590 GET_STACK_TRACE_FATAL_HERE; 591 ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack); 592 } 593 return allocated_size; 594} 595 596#if !SANITIZER_SUPPORTS_WEAK_HOOKS 597// Provide default (no-op) implementation of malloc hooks. 598extern "C" { 599SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 600void __asan_malloc_hook(void *ptr, uptr size) { 601 (void)ptr; 602 (void)size; 603} 604SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 605void __asan_free_hook(void *ptr) { 606 (void)ptr; 607} 608} // extern "C" 609#endif 610 611 612#endif // ASAN_ALLOCATOR_VERSION 613