1//=-- lsan_allocator.cc ---------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// See lsan_allocator.h for details. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_allocator.h" 16 17#include "sanitizer_common/sanitizer_allocator.h" 18#include "sanitizer_common/sanitizer_internal_defs.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "lsan_common.h" 22 23extern "C" void *memset(void *ptr, int value, uptr num); 24 25namespace __lsan { 26 27static const uptr kMaxAllowedMallocSize = 8UL << 30; 28static const uptr kAllocatorSpace = 0x600000000000ULL; 29static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. 30 31struct ChunkMetadata { 32 bool allocated : 8; // Must be first. 33 ChunkTag tag : 2; 34 uptr requested_size : 54; 35 u32 stack_trace_id; 36}; 37 38typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 39 sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator; 40typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 41typedef LargeMmapAllocator<> SecondaryAllocator; 42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 43 SecondaryAllocator> Allocator; 44 45static Allocator allocator; 46static THREADLOCAL AllocatorCache cache; 47 48void InitializeAllocator() { 49 allocator.Init(); 50} 51 52void AllocatorThreadFinish() { 53 allocator.SwallowCache(&cache); 54} 55 56static ChunkMetadata *Metadata(void *p) { 57 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); 58} 59 60static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { 61 if (!p) return; 62 ChunkMetadata *m = Metadata(p); 63 CHECK(m); 64 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; 65 m->stack_trace_id = StackDepotPut(stack.trace, stack.size); 66 m->requested_size = size; 67 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); 68} 69 70static void RegisterDeallocation(void *p) { 71 if (!p) return; 72 ChunkMetadata *m = Metadata(p); 73 CHECK(m); 74 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); 75} 76 77void *Allocate(const StackTrace &stack, uptr size, uptr alignment, 78 bool cleared) { 79 if (size == 0) 80 size = 1; 81 if (size > kMaxAllowedMallocSize) { 82 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); 83 return 0; 84 } 85 void *p = allocator.Allocate(&cache, size, alignment, false); 86 // Do not rely on the allocator to clear the memory (it's slow). 87 if (cleared && allocator.FromPrimary(p)) 88 memset(p, 0, size); 89 RegisterAllocation(stack, p, size); 90 return p; 91} 92 93void Deallocate(void *p) { 94 RegisterDeallocation(p); 95 allocator.Deallocate(&cache, p); 96} 97 98void *Reallocate(const StackTrace &stack, void *p, uptr new_size, 99 uptr alignment) { 100 RegisterDeallocation(p); 101 if (new_size > kMaxAllowedMallocSize) { 102 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); 103 allocator.Deallocate(&cache, p); 104 return 0; 105 } 106 p = allocator.Reallocate(&cache, p, new_size, alignment); 107 RegisterAllocation(stack, p, new_size); 108 return p; 109} 110 111void GetAllocatorCacheRange(uptr *begin, uptr *end) { 112 *begin = (uptr)&cache; 113 *end = *begin + sizeof(cache); 114} 115 116uptr GetMallocUsableSize(void *p) { 117 ChunkMetadata *m = Metadata(p); 118 if (!m) return 0; 119 return m->requested_size; 120} 121 122///// Interface to the common LSan module. ///// 123 124void LockAllocator() { 125 allocator.ForceLock(); 126} 127 128void UnlockAllocator() { 129 allocator.ForceUnlock(); 130} 131 132void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 133 *begin = (uptr)&allocator; 134 *end = *begin + sizeof(allocator); 135} 136 137uptr PointsIntoChunk(void* p) { 138 uptr addr = reinterpret_cast<uptr>(p); 139 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); 140 if (!chunk) return 0; 141 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 142 // valid, but we don't want that. 143 if (addr < chunk) return 0; 144 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); 145 CHECK(m); 146 if (!m->allocated) 147 return 0; 148 if (addr < chunk + m->requested_size) 149 return chunk; 150 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) 151 return chunk; 152 return 0; 153} 154 155uptr GetUserBegin(uptr chunk) { 156 return chunk; 157} 158 159LsanMetadata::LsanMetadata(uptr chunk) { 160 metadata_ = Metadata(reinterpret_cast<void *>(chunk)); 161 CHECK(metadata_); 162} 163 164bool LsanMetadata::allocated() const { 165 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; 166} 167 168ChunkTag LsanMetadata::tag() const { 169 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; 170} 171 172void LsanMetadata::set_tag(ChunkTag value) { 173 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; 174} 175 176uptr LsanMetadata::requested_size() const { 177 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; 178} 179 180u32 LsanMetadata::stack_trace_id() const { 181 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; 182} 183 184void ForEachChunk(ForEachChunkCallback callback, void *arg) { 185 allocator.ForEachChunk(callback, arg); 186} 187 188IgnoreObjectResult IgnoreObjectLocked(const void *p) { 189 void *chunk = allocator.GetBlockBegin(p); 190 if (!chunk || p < chunk) return kIgnoreObjectInvalid; 191 ChunkMetadata *m = Metadata(chunk); 192 CHECK(m); 193 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { 194 if (m->tag == kIgnored) 195 return kIgnoreObjectAlreadyIgnored; 196 m->tag = kIgnored; 197 return kIgnoreObjectSuccess; 198 } else { 199 return kIgnoreObjectInvalid; 200 } 201} 202} // namespace __lsan 203