1//=-- lsan_allocator.cc ---------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// See lsan_allocator.h for details. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_allocator.h" 16 17#include "sanitizer_common/sanitizer_allocator.h" 18#include "sanitizer_common/sanitizer_allocator_interface.h" 19#include "sanitizer_common/sanitizer_internal_defs.h" 20#include "sanitizer_common/sanitizer_stackdepot.h" 21#include "sanitizer_common/sanitizer_stacktrace.h" 22#include "lsan_common.h" 23 24extern "C" void *memset(void *ptr, int value, uptr num); 25 26namespace __lsan { 27 28struct ChunkMetadata { 29 bool allocated : 8; // Must be first. 30 ChunkTag tag : 2; 31 uptr requested_size : 54; 32 u32 stack_trace_id; 33}; 34 35#if defined(__mips64) 36static const uptr kMaxAllowedMallocSize = 4UL << 30; 37static const uptr kRegionSizeLog = 20; 38static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; 39typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; 40typedef CompactSizeClassMap SizeClassMap; 41typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 42 sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap> 43 PrimaryAllocator; 44#else 45static const uptr kMaxAllowedMallocSize = 8UL << 30; 46static const uptr kAllocatorSpace = 0x600000000000ULL; 47static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 48typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 49 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator; 50#endif 51typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 52typedef LargeMmapAllocator<> SecondaryAllocator; 53typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 54 SecondaryAllocator> Allocator; 55 56static Allocator allocator; 57static THREADLOCAL AllocatorCache cache; 58 59void InitializeAllocator() { 60 allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null); 61} 62 63void AllocatorThreadFinish() { 64 allocator.SwallowCache(&cache); 65} 66 67static ChunkMetadata *Metadata(const void *p) { 68 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); 69} 70 71static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { 72 if (!p) return; 73 ChunkMetadata *m = Metadata(p); 74 CHECK(m); 75 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; 76 m->stack_trace_id = StackDepotPut(stack); 77 m->requested_size = size; 78 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); 79} 80 81static void RegisterDeallocation(void *p) { 82 if (!p) return; 83 ChunkMetadata *m = Metadata(p); 84 CHECK(m); 85 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); 86} 87 88void *Allocate(const StackTrace &stack, uptr size, uptr alignment, 89 bool cleared) { 90 if (size == 0) 91 size = 1; 92 if (size > kMaxAllowedMallocSize) { 93 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); 94 return 0; 95 } 96 void *p = allocator.Allocate(&cache, size, alignment, false); 97 // Do not rely on the allocator to clear the memory (it's slow). 98 if (cleared && allocator.FromPrimary(p)) 99 memset(p, 0, size); 100 RegisterAllocation(stack, p, size); 101 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); 102 return p; 103} 104 105void Deallocate(void *p) { 106 if (&__sanitizer_free_hook) __sanitizer_free_hook(p); 107 RegisterDeallocation(p); 108 allocator.Deallocate(&cache, p); 109} 110 111void *Reallocate(const StackTrace &stack, void *p, uptr new_size, 112 uptr alignment) { 113 RegisterDeallocation(p); 114 if (new_size > kMaxAllowedMallocSize) { 115 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); 116 allocator.Deallocate(&cache, p); 117 return 0; 118 } 119 p = allocator.Reallocate(&cache, p, new_size, alignment); 120 RegisterAllocation(stack, p, new_size); 121 return p; 122} 123 124void GetAllocatorCacheRange(uptr *begin, uptr *end) { 125 *begin = (uptr)&cache; 126 *end = *begin + sizeof(cache); 127} 128 129uptr GetMallocUsableSize(const void *p) { 130 ChunkMetadata *m = Metadata(p); 131 if (!m) return 0; 132 return m->requested_size; 133} 134 135///// Interface to the common LSan module. ///// 136 137void LockAllocator() { 138 allocator.ForceLock(); 139} 140 141void UnlockAllocator() { 142 allocator.ForceUnlock(); 143} 144 145void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 146 *begin = (uptr)&allocator; 147 *end = *begin + sizeof(allocator); 148} 149 150uptr PointsIntoChunk(void* p) { 151 uptr addr = reinterpret_cast<uptr>(p); 152 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); 153 if (!chunk) return 0; 154 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 155 // valid, but we don't want that. 156 if (addr < chunk) return 0; 157 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); 158 CHECK(m); 159 if (!m->allocated) 160 return 0; 161 if (addr < chunk + m->requested_size) 162 return chunk; 163 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) 164 return chunk; 165 return 0; 166} 167 168uptr GetUserBegin(uptr chunk) { 169 return chunk; 170} 171 172LsanMetadata::LsanMetadata(uptr chunk) { 173 metadata_ = Metadata(reinterpret_cast<void *>(chunk)); 174 CHECK(metadata_); 175} 176 177bool LsanMetadata::allocated() const { 178 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; 179} 180 181ChunkTag LsanMetadata::tag() const { 182 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; 183} 184 185void LsanMetadata::set_tag(ChunkTag value) { 186 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; 187} 188 189uptr LsanMetadata::requested_size() const { 190 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; 191} 192 193u32 LsanMetadata::stack_trace_id() const { 194 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; 195} 196 197void ForEachChunk(ForEachChunkCallback callback, void *arg) { 198 allocator.ForEachChunk(callback, arg); 199} 200 201IgnoreObjectResult IgnoreObjectLocked(const void *p) { 202 void *chunk = allocator.GetBlockBegin(p); 203 if (!chunk || p < chunk) return kIgnoreObjectInvalid; 204 ChunkMetadata *m = Metadata(chunk); 205 CHECK(m); 206 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { 207 if (m->tag == kIgnored) 208 return kIgnoreObjectAlreadyIgnored; 209 m->tag = kIgnored; 210 return kIgnoreObjectSuccess; 211 } else { 212 return kIgnoreObjectInvalid; 213 } 214} 215} // namespace __lsan 216 217using namespace __lsan; 218 219extern "C" { 220SANITIZER_INTERFACE_ATTRIBUTE 221uptr __sanitizer_get_current_allocated_bytes() { 222 uptr stats[AllocatorStatCount]; 223 allocator.GetStats(stats); 224 return stats[AllocatorStatAllocated]; 225} 226 227SANITIZER_INTERFACE_ATTRIBUTE 228uptr __sanitizer_get_heap_size() { 229 uptr stats[AllocatorStatCount]; 230 allocator.GetStats(stats); 231 return stats[AllocatorStatMapped]; 232} 233 234SANITIZER_INTERFACE_ATTRIBUTE 235uptr __sanitizer_get_free_bytes() { return 0; } 236 237SANITIZER_INTERFACE_ATTRIBUTE 238uptr __sanitizer_get_unmapped_bytes() { return 0; } 239 240SANITIZER_INTERFACE_ATTRIBUTE 241uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 242 243SANITIZER_INTERFACE_ATTRIBUTE 244int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; } 245 246SANITIZER_INTERFACE_ATTRIBUTE 247uptr __sanitizer_get_allocated_size(const void *p) { 248 return GetMallocUsableSize(p); 249} 250} // extern "C" 251