lsan_allocator.cc revision 200afbd8ba4904241c1ebcef4fa79d739ca01f73
1//=-- lsan_allocator.cc ---------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of LeakSanitizer. 11// See lsan_allocator.h for details. 12// 13//===----------------------------------------------------------------------===// 14 15#include "lsan_allocator.h" 16 17#include "sanitizer_common/sanitizer_allocator.h" 18#include "sanitizer_common/sanitizer_internal_defs.h" 19#include "sanitizer_common/sanitizer_stackdepot.h" 20#include "sanitizer_common/sanitizer_stacktrace.h" 21#include "lsan_common.h" 22 23namespace __lsan { 24 25static const uptr kMaxAllowedMallocSize = 26 FIRST_32_SECOND_64(3UL << 30, 8UL << 30); 27 28static const uptr kAllocatorSpace = 0x600000000000ULL; 29static const uptr kAllocatorSize = 0x10000000000ULL; // 1T. 30 31struct ChunkMetadata { 32 bool allocated : 8; // Must be first. 33 ChunkTag tag : 2; 34 uptr requested_size : 54; 35 u32 stack_trace_id; 36}; 37 38typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 39 sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator; 40typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 41typedef LargeMmapAllocator<> SecondaryAllocator; 42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 43 SecondaryAllocator> Allocator; 44 45static Allocator allocator; 46static THREADLOCAL AllocatorCache cache; 47 48void InitializeAllocator() { 49 allocator.Init(); 50} 51 52void AllocatorThreadFinish() { 53 allocator.SwallowCache(&cache); 54} 55 56static ChunkMetadata *Metadata(void *p) { 57 return (ChunkMetadata *)allocator.GetMetaData(p); 58} 59 60static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { 61 if (!p) return; 62 ChunkMetadata *m = Metadata(p); 63 CHECK(m); 64 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; 65 m->stack_trace_id = StackDepotPut(stack.trace, stack.size); 66 m->requested_size = size; 67 atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed); 68} 69 70static void RegisterDeallocation(void *p) { 71 if (!p) return; 72 ChunkMetadata *m = Metadata(p); 73 CHECK(m); 74 atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed); 75} 76 77void *Allocate(const StackTrace &stack, uptr size, uptr alignment, 78 bool cleared) { 79 if (size == 0) 80 size = 1; 81 if (size > kMaxAllowedMallocSize) { 82 Report("WARNING: LeakSanitizer failed to allocate %p bytes\n", 83 (void*)size); 84 return 0; 85 } 86 void *p = allocator.Allocate(&cache, size, alignment, cleared); 87 RegisterAllocation(stack, p, size); 88 return p; 89} 90 91void Deallocate(void *p) { 92 RegisterDeallocation(p); 93 allocator.Deallocate(&cache, p); 94} 95 96void *Reallocate(const StackTrace &stack, void *p, uptr new_size, 97 uptr alignment) { 98 RegisterDeallocation(p); 99 if (new_size > kMaxAllowedMallocSize) { 100 Report("WARNING: LeakSanitizer failed to allocate %p bytes\n", 101 (void*)new_size); 102 allocator.Deallocate(&cache, p); 103 return 0; 104 } 105 p = allocator.Reallocate(&cache, p, new_size, alignment); 106 RegisterAllocation(stack, p, new_size); 107 return p; 108} 109 110void GetAllocatorCacheRange(uptr *begin, uptr *end) { 111 *begin = (uptr)&cache; 112 *end = *begin + sizeof(cache); 113} 114 115uptr GetMallocUsableSize(void *p) { 116 ChunkMetadata *m = Metadata(p); 117 if (!m) return 0; 118 return m->requested_size; 119} 120 121///// Interface to the common LSan module. ///// 122 123void LockAllocator() { 124 allocator.ForceLock(); 125} 126 127void UnlockAllocator() { 128 allocator.ForceUnlock(); 129} 130 131void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 132 *begin = (uptr)&allocator; 133 *end = *begin + sizeof(allocator); 134} 135 136void *PointsIntoChunk(void* p) { 137 void *chunk = allocator.GetBlockBeginFastLocked(p); 138 if (!chunk) return 0; 139 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 140 // valid, but we don't want that. 141 if (p < chunk) return 0; 142 ChunkMetadata *m = Metadata(chunk); 143 CHECK(m); 144 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) 145 return chunk; 146 return 0; 147} 148 149void *GetUserBegin(void *p) { 150 return p; 151} 152 153LsanMetadata::LsanMetadata(void *chunk) { 154 metadata_ = Metadata(chunk); 155 CHECK(metadata_); 156} 157 158bool LsanMetadata::allocated() const { 159 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; 160} 161 162ChunkTag LsanMetadata::tag() const { 163 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; 164} 165 166void LsanMetadata::set_tag(ChunkTag value) { 167 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; 168} 169 170uptr LsanMetadata::requested_size() const { 171 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; 172} 173 174u32 LsanMetadata::stack_trace_id() const { 175 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; 176} 177 178template<typename Callable> 179void ForEachChunk(Callable const &callback) { 180 allocator.ForEachChunk(callback); 181} 182 183template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>( 184 ProcessPlatformSpecificAllocationsCb const &callback); 185template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback); 186template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback); 187template void ForEachChunk<MarkIndirectlyLeakedCb>( 188 MarkIndirectlyLeakedCb const &callback); 189template void ForEachChunk<CollectIgnoredCb>( 190 CollectIgnoredCb const &callback); 191 192IgnoreObjectResult IgnoreObjectLocked(const void *p) { 193 void *chunk = allocator.GetBlockBegin(p); 194 if (!chunk || p < chunk) return kIgnoreObjectInvalid; 195 ChunkMetadata *m = Metadata(chunk); 196 CHECK(m); 197 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { 198 if (m->tag == kIgnored) 199 return kIgnoreObjectAlreadyIgnored; 200 m->tag = kIgnored; 201 return kIgnoreObjectSuccess; 202 } else { 203 return kIgnoreObjectInvalid; 204 } 205} 206} // namespace __lsan 207