asan_allocator2.cc revision ed20ebe35c64b8c7043447f6a48b0e5adc89aded
12a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)//===-- asan_allocator2.cc ------------------------------------------------===// 22a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// 32a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// The LLVM Compiler Infrastructure 42a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// 52a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// This file is distributed under the University of Illinois Open Source 62a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// License. See LICENSE.TXT for details. 746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// 846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)//===----------------------------------------------------------------------===// 946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// 1046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// This file is a part of AddressSanitizer, an address sanity checker. 1146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// 1246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// Implementation of ASan's memory allocator, 2-nd version. 1303b57e008b61dfcb1fbad3aea950ae0e001748b0Torne (Richard Coles)// This variant uses the allocator from sanitizer_common, i.e. the one shared 1446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// with ThreadSanitizer and MemorySanitizer. 1546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// 1646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// Status: under development, not enabled by default yet. 1746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)//===----------------------------------------------------------------------===// 1846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "asan_allocator.h" 1946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 2046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "asan_mapping.h" 2146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "asan_poisoning.h" 22c2e0dbddbe15c98d52c4786dac06cb8952a8ae6dTorne (Richard Coles)#include "asan_report.h" 2346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "asan_thread.h" 2446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "sanitizer_common/sanitizer_allocator.h" 2546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "sanitizer_common/sanitizer_flags.h" 2646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "sanitizer_common/sanitizer_internal_defs.h" 2746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#include "sanitizer_common/sanitizer_list.h" 282a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "sanitizer_common/sanitizer_stackdepot.h" 292a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)#include "sanitizer_common/sanitizer_quarantine.h" 302a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 3146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)namespace __asan { 3246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 3346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)struct AsanMapUnmapCallback { 3446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) void OnMap(uptr p, uptr size) const { 3546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); 3646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // Statistics. 3746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) AsanStats &thread_stats = GetCurrentThreadStats(); 3846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) thread_stats.mmaps++; 3946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) thread_stats.mmaped += size; 4046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) } 4146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) void OnUnmap(uptr p, uptr size) const { 4246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) PoisonShadow(p, size, 0); 4346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // We are about to unmap a chunk of user memory. 4446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // Mark the corresponding shadow memory as not needed. 4546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // Since asan's mapping is compacting, the shadow chunk may be 4646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // not page-aligned, so we only flush the page-aligned portion. 4746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) uptr page_size = GetPageSizeCached(); 4846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size); 4946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); 5046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); 5146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // Statistics. 5246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) AsanStats &thread_stats = GetCurrentThreadStats(); 5346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) thread_stats.munmaps++; 5446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) thread_stats.munmaped += size; 5546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) } 5646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)}; 5746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 5846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#if SANITIZER_WORDSIZE == 64 5946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#if defined(__powerpc64__) 6046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)const uptr kAllocatorSpace = 0xa0000000000ULL; 6146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#else 6246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)const uptr kAllocatorSpace = 0x600000000000ULL; 6346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#endif 6446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 6546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)typedef DefaultSizeClassMap SizeClassMap; 6646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, 6746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 6846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#elif SANITIZER_WORDSIZE == 32 6946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static const u64 kAddressSpaceSize = 1ULL << 32; 7046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)typedef CompactSizeClassMap SizeClassMap; 7146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, 7246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; 7346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)#endif 7446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 7546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 7646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; 7746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 7846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) SecondaryAllocator> Allocator; 7946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 8046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// We can not use THREADLOCAL because it is not supported on some of the 8146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// platforms we care about (OSX 10.6, Android). 8246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// static THREADLOCAL AllocatorCache cache; 8346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { 8446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHECK(ms); 8546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache)); 8646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache); 8746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)} 8846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 8946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static Allocator allocator; 9046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 9146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static const uptr kMaxAllowedMallocSize = 9246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) FIRST_32_SECOND_64(3UL << 30, 8UL << 30); 9346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 9446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static const uptr kMaxThreadLocalQuarantine = 9546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) FIRST_32_SECOND_64(1 << 18, 1 << 20); 9646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 9746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// Every chunk of memory allocated by this allocator can be in one of 3 states: 9846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. 9946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. 10046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. 10146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)enum { 10246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. 10346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHUNK_ALLOCATED = 2, 10446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHUNK_QUARANTINE = 3 10546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)}; 10646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 10746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. 10846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// We use adaptive redzones: for larger allocation larger redzones are used. 10946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static u32 RZLog2Size(u32 rz_log) { 11046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHECK_LT(rz_log, 8); 11146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) return 16 << rz_log; 11246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)} 11346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 11446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static u32 RZSize2Log(u32 rz_size) { 11546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHECK_GE(rz_size, 16); 11646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHECK_LE(rz_size, 2048); 11746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHECK(IsPowerOfTwo(rz_size)); 11846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 res = Log2(rz_size) - 4; 11946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) CHECK_EQ(rz_size, RZLog2Size(res)); 12046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) return res; 12146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)} 12246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 12346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static uptr ComputeRZLog(uptr user_requested_size) { 12446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 rz_log = 1252a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) user_requested_size <= 64 - 16 ? 0 : 1262a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) user_requested_size <= 128 - 32 ? 1 : 1272a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) user_requested_size <= 512 - 64 ? 2 : 1282a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) user_requested_size <= 4096 - 128 ? 3 : 1292a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) user_requested_size <= (1 << 14) - 256 ? 4 : 13046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) user_requested_size <= (1 << 15) - 512 ? 5 : 1312a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) user_requested_size <= (1 << 16) - 1024 ? 6 : 7; 1322a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) return Max(rz_log, RZSize2Log(flags()->redzone)); 1332a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)} 1342a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 1352a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// The memory chunk allocated from the underlying allocator looks like this: 1362a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// L L L L L L H H U U U U U U R R 1372a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// L -- left redzone words (0 or more bytes) 1382a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. 1392a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// U -- user memory. 1402a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// R -- right redzone (0 or more bytes) 1412a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// ChunkBase consists of ChunkHeader and other bytes that overlap with user 1422a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// memory. 1432a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 1442a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)// If a memory chunk is allocated by memalign and we had to increase the 14546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// allocation size to achieve the proper alignment, then we store this magic 14646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// value in the first uptr word of the memory block and store the address of 14746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// ChunkBase in the next uptr. 14846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// M B ? ? ? L L L L L L H H U U U U U U 14946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// M -- magic value kMemalignMagic 15046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)// B -- address of ChunkHeader pointing to the first 'H' 15146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)static const uptr kMemalignMagic = 0xCC6E96B9; 15246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 15346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)struct ChunkHeader { 15446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // 1-st 8 bytes. 15546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 chunk_state : 8; // Must be first. 15646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 alloc_tid : 24; 15746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 15846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 free_tid : 24; 15946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 from_memalign : 1; 16046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 alloc_type : 2; 16146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 rz_log : 3; 16246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // 2-nd 8 bytes 16346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // This field is used for small sizes. For large sizes it is equal to 16446d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // SizeClassMap::kMaxSize and the actual size is stored in the 16546d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // SecondaryAllocator's metadata. 16646d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 user_requested_size; 16746d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 alloc_context_id; 16846d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)}; 16946d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) 17046d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles)struct ChunkBase : ChunkHeader { 17146d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) // Header2, intersects with user memory. 17246d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) AsanChunk *next; 17346d4c2bc3267f3f028f39e7e311b0f89aba2e4fdTorne (Richard Coles) u32 free_context_id; 1742a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)}; 1752a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles) 1762a99a7e74a7f215066514fe81d2bfa6639d9edddTorne (Richard Coles)static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 177static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; 178COMPILER_CHECK(kChunkHeaderSize == 16); 179COMPILER_CHECK(kChunkHeader2Size <= 16); 180 181struct AsanChunk: ChunkBase { 182 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 183 uptr UsedSize() { 184 if (user_requested_size != SizeClassMap::kMaxSize) 185 return user_requested_size; 186 return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg())); 187 } 188 void *AllocBeg() { 189 if (from_memalign) 190 return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); 191 return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); 192 } 193 // We store the alloc/free stack traces in the chunk itself. 194 u32 *AllocStackBeg() { 195 return (u32*)(Beg() - RZLog2Size(rz_log)); 196 } 197 uptr AllocStackSize() { 198 CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize); 199 return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32); 200 } 201 u32 *FreeStackBeg() { 202 return (u32*)(Beg() + kChunkHeader2Size); 203 } 204 uptr FreeStackSize() { 205 if (user_requested_size < kChunkHeader2Size) return 0; 206 uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); 207 return (available - kChunkHeader2Size) / sizeof(u32); 208 } 209}; 210 211uptr AsanChunkView::Beg() { return chunk_->Beg(); } 212uptr AsanChunkView::End() { return Beg() + UsedSize(); } 213uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } 214uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } 215uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } 216 217static void GetStackTraceFromId(u32 id, StackTrace *stack) { 218 CHECK(id); 219 uptr size = 0; 220 const uptr *trace = StackDepotGet(id, &size); 221 CHECK_LT(size, kStackTraceMax); 222 internal_memcpy(stack->trace, trace, sizeof(uptr) * size); 223 stack->size = size; 224} 225 226void AsanChunkView::GetAllocStack(StackTrace *stack) { 227 if (flags()->use_stack_depot) 228 GetStackTraceFromId(chunk_->alloc_context_id, stack); 229 else 230 StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(), 231 chunk_->AllocStackSize()); 232} 233 234void AsanChunkView::GetFreeStack(StackTrace *stack) { 235 if (flags()->use_stack_depot) 236 GetStackTraceFromId(chunk_->free_context_id, stack); 237 else 238 StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(), 239 chunk_->FreeStackSize()); 240} 241 242struct QuarantineCallback; 243typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; 244typedef AsanQuarantine::Cache QuarantineCache; 245static AsanQuarantine quarantine(LINKER_INITIALIZED); 246static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); 247static AllocatorCache fallback_allocator_cache; 248static SpinMutex fallback_mutex; 249 250QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { 251 CHECK(ms); 252 CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); 253 return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); 254} 255 256struct QuarantineCallback { 257 explicit QuarantineCallback(AllocatorCache *cache) 258 : cache_(cache) { 259 } 260 261 void Recycle(AsanChunk *m) { 262 CHECK(m->chunk_state == CHUNK_QUARANTINE); 263 m->chunk_state = CHUNK_AVAILABLE; 264 CHECK_NE(m->alloc_tid, kInvalidTid); 265 CHECK_NE(m->free_tid, kInvalidTid); 266 PoisonShadow(m->Beg(), 267 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 268 kAsanHeapLeftRedzoneMagic); 269 void *p = reinterpret_cast<void *>(m->AllocBeg()); 270 if (m->from_memalign) { 271 uptr *memalign_magic = reinterpret_cast<uptr *>(p); 272 CHECK_EQ(memalign_magic[0], kMemalignMagic); 273 CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m)); 274 } 275 276 // Statistics. 277 AsanStats &thread_stats = GetCurrentThreadStats(); 278 thread_stats.real_frees++; 279 thread_stats.really_freed += m->UsedSize(); 280 281 allocator.Deallocate(cache_, p); 282 } 283 284 void *Allocate(uptr size) { 285 return allocator.Allocate(cache_, size, 1, false); 286 } 287 288 void Deallocate(void *p) { 289 allocator.Deallocate(cache_, p); 290 } 291 292 AllocatorCache *cache_; 293}; 294 295void InitializeAllocator() { 296 allocator.Init(); 297 quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); 298} 299 300static void *Allocate(uptr size, uptr alignment, StackTrace *stack, 301 AllocType alloc_type, bool can_fill) { 302 if (!asan_inited) 303 __asan_init(); 304 Flags &fl = *flags(); 305 CHECK(stack); 306 const uptr min_alignment = SHADOW_GRANULARITY; 307 if (alignment < min_alignment) 308 alignment = min_alignment; 309 if (size == 0) { 310 // We'd be happy to avoid allocating memory for zero-size requests, but 311 // some programs/tests depend on this behavior and assume that malloc would 312 // not return NULL even for zero-size allocations. Moreover, it looks like 313 // operator new should never return NULL, and results of consecutive "new" 314 // calls must be different even if the allocated size is zero. 315 size = 1; 316 } 317 CHECK(IsPowerOfTwo(alignment)); 318 uptr rz_log = ComputeRZLog(size); 319 uptr rz_size = RZLog2Size(rz_log); 320 uptr rounded_size = RoundUpTo(size, alignment); 321 if (rounded_size < kChunkHeader2Size) 322 rounded_size = kChunkHeader2Size; 323 uptr needed_size = rounded_size + rz_size; 324 if (alignment > min_alignment) 325 needed_size += alignment; 326 bool using_primary_allocator = true; 327 // If we are allocating from the secondary allocator, there will be no 328 // automatic right redzone, so add the right redzone manually. 329 if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { 330 needed_size += rz_size; 331 using_primary_allocator = false; 332 } 333 CHECK(IsAligned(needed_size, min_alignment)); 334 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { 335 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", 336 (void*)size); 337 return 0; 338 } 339 340 AsanThread *t = GetCurrentThread(); 341 void *allocated; 342 if (t) { 343 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 344 allocated = allocator.Allocate(cache, needed_size, 8, false); 345 } else { 346 SpinMutexLock l(&fallback_mutex); 347 AllocatorCache *cache = &fallback_allocator_cache; 348 allocated = allocator.Allocate(cache, needed_size, 8, false); 349 } 350 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 351 // Clear the first allocated word (an old kMemalignMagic may still be there). 352 reinterpret_cast<uptr *>(alloc_beg)[0] = 0; 353 uptr alloc_end = alloc_beg + needed_size; 354 uptr beg_plus_redzone = alloc_beg + rz_size; 355 uptr user_beg = beg_plus_redzone; 356 if (!IsAligned(user_beg, alignment)) 357 user_beg = RoundUpTo(user_beg, alignment); 358 uptr user_end = user_beg + size; 359 CHECK_LE(user_end, alloc_end); 360 uptr chunk_beg = user_beg - kChunkHeaderSize; 361 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 362 m->chunk_state = CHUNK_ALLOCATED; 363 m->alloc_type = alloc_type; 364 m->rz_log = rz_log; 365 u32 alloc_tid = t ? t->tid() : 0; 366 m->alloc_tid = alloc_tid; 367 CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? 368 m->free_tid = kInvalidTid; 369 m->from_memalign = user_beg != beg_plus_redzone; 370 if (m->from_memalign) { 371 CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg); 372 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 373 memalign_magic[0] = kMemalignMagic; 374 memalign_magic[1] = chunk_beg; 375 } 376 if (using_primary_allocator) { 377 CHECK(size); 378 m->user_requested_size = size; 379 CHECK(allocator.FromPrimary(allocated)); 380 } else { 381 CHECK(!allocator.FromPrimary(allocated)); 382 m->user_requested_size = SizeClassMap::kMaxSize; 383 uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); 384 meta[0] = size; 385 meta[1] = chunk_beg; 386 } 387 388 if (fl.use_stack_depot) { 389 m->alloc_context_id = StackDepotPut(stack->trace, stack->size); 390 } else { 391 m->alloc_context_id = 0; 392 StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize()); 393 } 394 395 uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); 396 // Unpoison the bulk of the memory region. 397 if (size_rounded_down_to_granularity) 398 PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); 399 // Deal with the end of the region if size is not aligned to granularity. 400 if (size != size_rounded_down_to_granularity && fl.poison_heap) { 401 u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); 402 *shadow = size & (SHADOW_GRANULARITY - 1); 403 } 404 405 AsanStats &thread_stats = GetCurrentThreadStats(); 406 thread_stats.mallocs++; 407 thread_stats.malloced += size; 408 thread_stats.malloced_redzones += needed_size - size; 409 uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); 410 thread_stats.malloced_by_size[class_id]++; 411 if (needed_size > SizeClassMap::kMaxSize) 412 thread_stats.malloc_large++; 413 414 void *res = reinterpret_cast<void *>(user_beg); 415 if (can_fill && fl.max_malloc_fill_size) { 416 uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); 417 REAL(memset)(res, fl.malloc_fill_byte, fill_size); 418 } 419 ASAN_MALLOC_HOOK(res, size); 420 return res; 421} 422 423static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { 424 uptr p = reinterpret_cast<uptr>(ptr); 425 if (p == 0) return; 426 ASAN_FREE_HOOK(ptr); 427 uptr chunk_beg = p - kChunkHeaderSize; 428 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 429 430 u8 old_chunk_state = CHUNK_ALLOCATED; 431 // Flip the chunk_state atomically to avoid race on double-free. 432 if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, 433 CHUNK_QUARANTINE, memory_order_relaxed)) { 434 if (old_chunk_state == CHUNK_QUARANTINE) 435 ReportDoubleFree((uptr)ptr, stack); 436 else 437 ReportFreeNotMalloced((uptr)ptr, stack); 438 } 439 CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); 440 441 if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) 442 ReportAllocTypeMismatch((uptr)ptr, stack, 443 (AllocType)m->alloc_type, (AllocType)alloc_type); 444 445 CHECK_GE(m->alloc_tid, 0); 446 if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. 447 CHECK_EQ(m->free_tid, kInvalidTid); 448 AsanThread *t = GetCurrentThread(); 449 m->free_tid = t ? t->tid() : 0; 450 if (flags()->use_stack_depot) { 451 m->free_context_id = StackDepotPut(stack->trace, stack->size); 452 } else { 453 m->free_context_id = 0; 454 StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); 455 } 456 CHECK(m->chunk_state == CHUNK_QUARANTINE); 457 // Poison the region. 458 PoisonShadow(m->Beg(), 459 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), 460 kAsanHeapFreeMagic); 461 462 AsanStats &thread_stats = GetCurrentThreadStats(); 463 thread_stats.frees++; 464 thread_stats.freed += m->UsedSize(); 465 466 // Push into quarantine. 467 if (t) { 468 AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); 469 AllocatorCache *ac = GetAllocatorCache(ms); 470 quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), 471 m, m->UsedSize()); 472 } else { 473 SpinMutexLock l(&fallback_mutex); 474 AllocatorCache *ac = &fallback_allocator_cache; 475 quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), 476 m, m->UsedSize()); 477 } 478} 479 480static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { 481 CHECK(old_ptr && new_size); 482 uptr p = reinterpret_cast<uptr>(old_ptr); 483 uptr chunk_beg = p - kChunkHeaderSize; 484 AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); 485 486 AsanStats &thread_stats = GetCurrentThreadStats(); 487 thread_stats.reallocs++; 488 thread_stats.realloced += new_size; 489 490 CHECK(m->chunk_state == CHUNK_ALLOCATED); 491 uptr old_size = m->UsedSize(); 492 uptr memcpy_size = Min(new_size, old_size); 493 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); 494 if (new_ptr) { 495 CHECK_NE(REAL(memcpy), (void*)0); 496 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 497 Deallocate(old_ptr, stack, FROM_MALLOC); 498 } 499 return new_ptr; 500} 501 502static AsanChunk *GetAsanChunkByAddr(uptr p) { 503 void *ptr = reinterpret_cast<void *>(p); 504 uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr)); 505 if (!alloc_beg) return 0; 506 uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg); 507 if (memalign_magic[0] == kMemalignMagic) { 508 AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]); 509 CHECK(m->from_memalign); 510 return m; 511 } 512 if (!allocator.FromPrimary(ptr)) { 513 uptr *meta = reinterpret_cast<uptr *>( 514 allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg))); 515 AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); 516 return m; 517 } 518 uptr actual_size = allocator.GetActuallyAllocatedSize(ptr); 519 CHECK_LE(actual_size, SizeClassMap::kMaxSize); 520 // We know the actually allocted size, but we don't know the redzone size. 521 // Just try all possible redzone sizes. 522 for (u32 rz_log = 0; rz_log < 8; rz_log++) { 523 u32 rz_size = RZLog2Size(rz_log); 524 uptr max_possible_size = actual_size - rz_size; 525 if (ComputeRZLog(max_possible_size) != rz_log) 526 continue; 527 return reinterpret_cast<AsanChunk *>( 528 alloc_beg + rz_size - kChunkHeaderSize); 529 } 530 return 0; 531} 532 533static uptr AllocationSize(uptr p) { 534 AsanChunk *m = GetAsanChunkByAddr(p); 535 if (!m) return 0; 536 if (m->chunk_state != CHUNK_ALLOCATED) return 0; 537 if (m->Beg() != p) return 0; 538 return m->UsedSize(); 539} 540 541// We have an address between two chunks, and we want to report just one. 542AsanChunk *ChooseChunk(uptr addr, 543 AsanChunk *left_chunk, AsanChunk *right_chunk) { 544 // Prefer an allocated chunk over freed chunk and freed chunk 545 // over available chunk. 546 if (left_chunk->chunk_state != right_chunk->chunk_state) { 547 if (left_chunk->chunk_state == CHUNK_ALLOCATED) 548 return left_chunk; 549 if (right_chunk->chunk_state == CHUNK_ALLOCATED) 550 return right_chunk; 551 if (left_chunk->chunk_state == CHUNK_QUARANTINE) 552 return left_chunk; 553 if (right_chunk->chunk_state == CHUNK_QUARANTINE) 554 return right_chunk; 555 } 556 // Same chunk_state: choose based on offset. 557 sptr l_offset = 0, r_offset = 0; 558 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); 559 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); 560 if (l_offset < r_offset) 561 return left_chunk; 562 return right_chunk; 563} 564 565AsanChunkView FindHeapChunkByAddress(uptr addr) { 566 AsanChunk *m1 = GetAsanChunkByAddr(addr); 567 if (!m1) return AsanChunkView(m1); 568 sptr offset = 0; 569 if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { 570 // The address is in the chunk's left redzone, so maybe it is actually 571 // a right buffer overflow from the other chunk to the left. 572 // Search a bit to the left to see if there is another chunk. 573 AsanChunk *m2 = 0; 574 for (uptr l = 1; l < GetPageSizeCached(); l++) { 575 m2 = GetAsanChunkByAddr(addr - l); 576 if (m2 == m1) continue; // Still the same chunk. 577 break; 578 } 579 if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) 580 m1 = ChooseChunk(addr, m2, m1); 581 } 582 return AsanChunkView(m1); 583} 584 585void AsanThreadLocalMallocStorage::CommitBack() { 586 AllocatorCache *ac = GetAllocatorCache(this); 587 quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); 588 allocator.SwallowCache(GetAllocatorCache(this)); 589} 590 591void PrintInternalAllocatorStats() { 592 allocator.PrintStats(); 593} 594 595SANITIZER_INTERFACE_ATTRIBUTE 596void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, 597 AllocType alloc_type) { 598 return Allocate(size, alignment, stack, alloc_type, true); 599} 600 601SANITIZER_INTERFACE_ATTRIBUTE 602void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { 603 Deallocate(ptr, stack, alloc_type); 604} 605 606SANITIZER_INTERFACE_ATTRIBUTE 607void *asan_malloc(uptr size, StackTrace *stack) { 608 return Allocate(size, 8, stack, FROM_MALLOC, true); 609} 610 611void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { 612 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; 613 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); 614 // If the memory comes from the secondary allocator no need to clear it 615 // as it comes directly from mmap. 616 if (ptr && allocator.FromPrimary(ptr)) 617 REAL(memset)(ptr, 0, nmemb * size); 618 return ptr; 619} 620 621void *asan_realloc(void *p, uptr size, StackTrace *stack) { 622 if (p == 0) 623 return Allocate(size, 8, stack, FROM_MALLOC, true); 624 if (size == 0) { 625 Deallocate(p, stack, FROM_MALLOC); 626 return 0; 627 } 628 return Reallocate(p, size, stack); 629} 630 631void *asan_valloc(uptr size, StackTrace *stack) { 632 return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); 633} 634 635void *asan_pvalloc(uptr size, StackTrace *stack) { 636 uptr PageSize = GetPageSizeCached(); 637 size = RoundUpTo(size, PageSize); 638 if (size == 0) { 639 // pvalloc(0) should allocate one page. 640 size = PageSize; 641 } 642 return Allocate(size, PageSize, stack, FROM_MALLOC, true); 643} 644 645int asan_posix_memalign(void **memptr, uptr alignment, uptr size, 646 StackTrace *stack) { 647 void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); 648 CHECK(IsAligned((uptr)ptr, alignment)); 649 *memptr = ptr; 650 return 0; 651} 652 653uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { 654 CHECK(stack); 655 if (ptr == 0) return 0; 656 uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); 657 if (flags()->check_malloc_usable_size && (usable_size == 0)) 658 ReportMallocUsableSizeNotOwned((uptr)ptr, stack); 659 return usable_size; 660} 661 662uptr asan_mz_size(const void *ptr) { 663 return AllocationSize(reinterpret_cast<uptr>(ptr)); 664} 665 666void asan_mz_force_lock() { 667 allocator.ForceLock(); 668 fallback_mutex.Lock(); 669} 670 671void asan_mz_force_unlock() { 672 fallback_mutex.Unlock(); 673 allocator.ForceUnlock(); 674} 675 676} // namespace __asan 677 678// ---------------------- Interface ---------------- {{{1 679using namespace __asan; // NOLINT 680 681// ASan allocator doesn't reserve extra bytes, so normally we would 682// just return "size". We don't want to expose our redzone sizes, etc here. 683uptr __asan_get_estimated_allocated_size(uptr size) { 684 return size; 685} 686 687bool __asan_get_ownership(const void *p) { 688 uptr ptr = reinterpret_cast<uptr>(p); 689 return (AllocationSize(ptr) > 0); 690} 691 692uptr __asan_get_allocated_size(const void *p) { 693 if (p == 0) return 0; 694 uptr ptr = reinterpret_cast<uptr>(p); 695 uptr allocated_size = AllocationSize(ptr); 696 // Die if p is not malloced or if it is already freed. 697 if (allocated_size == 0) { 698 GET_STACK_TRACE_FATAL_HERE; 699 ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); 700 } 701 return allocated_size; 702} 703 704#if !SANITIZER_SUPPORTS_WEAK_HOOKS 705// Provide default (no-op) implementation of malloc hooks. 706extern "C" { 707SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 708void __asan_malloc_hook(void *ptr, uptr size) { 709 (void)ptr; 710 (void)size; 711} 712SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE 713void __asan_free_hook(void *ptr) { 714 (void)ptr; 715} 716} // extern "C" 717#endif 718