lsan_allocator.cc revision 46ed75f54f1c3b7863ae74aec8c2c015d572f027
1//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
18#include "sanitizer_common/sanitizer_internal_defs.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "lsan_common.h"
22
23namespace __lsan {
24
25static const uptr kMaxAllowedMallocSize =
26    FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
27
28static const uptr kAllocatorSpace = 0x600000000000ULL;
29static const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
30
31struct ChunkMetadata {
32  bool allocated : 8;  // Must be first.
33  ChunkTag tag : 2;
34  uptr requested_size : 54;
35  u32 stack_trace_id;
36};
37
38typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
39        sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
40typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41typedef LargeMmapAllocator<> SecondaryAllocator;
42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
43          SecondaryAllocator> Allocator;
44
45static Allocator allocator;
46static THREADLOCAL AllocatorCache cache;
47// All allocations made while this is > 0 will be treated as non-leaks.
48static THREADLOCAL uptr lsan_disabled;
49
50void InitializeAllocator() {
51  allocator.Init();
52}
53
54void AllocatorThreadFinish() {
55  allocator.SwallowCache(&cache);
56}
57
58static ChunkMetadata *Metadata(void *p) {
59  return (ChunkMetadata *)allocator.GetMetaData(p);
60}
61
62static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
63  if (!p) return;
64  ChunkMetadata *m = Metadata(p);
65  CHECK(m);
66  m->tag = lsan_disabled ? kSuppressed : kDirectlyLeaked;
67  m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
68  m->requested_size = size;
69  atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed);
70}
71
72static void RegisterDeallocation(void *p) {
73  if (!p) return;
74  ChunkMetadata *m = Metadata(p);
75  CHECK(m);
76  atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed);
77}
78
79void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
80               bool cleared) {
81  if (size == 0)
82    size = 1;
83  if (size > kMaxAllowedMallocSize) {
84      Report("WARNING: LeakSanitizer failed to allocate %p bytes\n",
85             (void*)size);
86      return 0;
87  }
88  void *p = allocator.Allocate(&cache, size, alignment, cleared);
89  RegisterAllocation(stack, p, size);
90  return p;
91}
92
93void Deallocate(void *p) {
94  RegisterDeallocation(p);
95  allocator.Deallocate(&cache, p);
96}
97
98void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
99                 uptr alignment) {
100  RegisterDeallocation(p);
101  if (new_size > kMaxAllowedMallocSize) {
102      Report("WARNING: LeakSanitizer failed to allocate %p bytes\n",
103             (void*)new_size);
104      allocator.Deallocate(&cache, p);
105      return 0;
106  }
107  p = allocator.Reallocate(&cache, p, new_size, alignment);
108  RegisterAllocation(stack, p, new_size);
109  return p;
110}
111
112void GetAllocatorCacheRange(uptr *begin, uptr *end) {
113  *begin = (uptr)&cache;
114  *end = *begin + sizeof(cache);
115}
116
117uptr GetMallocUsableSize(void *p) {
118  ChunkMetadata *m = Metadata(p);
119  if (!m) return 0;
120  return m->requested_size;
121}
122
123///// Interface to the common LSan module. /////
124
125void LockAllocator() {
126  allocator.ForceLock();
127}
128
129void UnlockAllocator() {
130  allocator.ForceUnlock();
131}
132
133void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
134  *begin = (uptr)&allocator;
135  *end = *begin + sizeof(allocator);
136}
137
138void *PointsIntoChunk(void* p) {
139  void *chunk = allocator.GetBlockBeginFastLocked(p);
140  if (!chunk) return 0;
141  // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
142  // valid, but we don't want that.
143  if (p < chunk) return 0;
144  ChunkMetadata *m = Metadata(chunk);
145  CHECK(m);
146  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size)
147    return chunk;
148  return 0;
149}
150
151void *GetUserBegin(void *p) {
152  return p;
153}
154
155LsanMetadata::LsanMetadata(void *chunk) {
156  metadata_ = Metadata(chunk);
157  CHECK(metadata_);
158}
159
160bool LsanMetadata::allocated() const {
161  return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
162}
163
164ChunkTag LsanMetadata::tag() const {
165  return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
166}
167
168void LsanMetadata::set_tag(ChunkTag value) {
169  reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
170}
171
172uptr LsanMetadata::requested_size() const {
173  return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
174}
175
176u32 LsanMetadata::stack_trace_id() const {
177  return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
178}
179
180template<typename Callable>
181void ForEachChunk(Callable const &callback) {
182  allocator.ForEachChunk(callback);
183}
184
185template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
186    ProcessPlatformSpecificAllocationsCb const &callback);
187template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
188template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
189template void ForEachChunk<MarkIndirectlyLeakedCb>(
190    MarkIndirectlyLeakedCb const &callback);
191template void ForEachChunk<CollectSuppressedCb>(
192    CollectSuppressedCb const &callback);
193
194IgnoreObjectResult IgnoreObjectLocked(const void *p) {
195  void *chunk = allocator.GetBlockBegin(p);
196  if (!chunk || p < chunk) return kIgnoreObjectInvalid;
197  ChunkMetadata *m = Metadata(chunk);
198  CHECK(m);
199  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
200    if (m->tag == kSuppressed)
201      return kIgnoreObjectAlreadyIgnored;
202    m->tag = kSuppressed;
203    return kIgnoreObjectSuccess;
204  } else {
205    return kIgnoreObjectInvalid;
206  }
207}
208}  // namespace __lsan
209
210extern "C" {
211SANITIZER_INTERFACE_ATTRIBUTE
212void __lsan_disable() {
213  __lsan::lsan_disabled++;
214}
215
216SANITIZER_INTERFACE_ATTRIBUTE
217void __lsan_enable() {
218  if (!__lsan::lsan_disabled) {
219    Report("Unmatched call to __lsan_enable().\n");
220    Die();
221  }
222  __lsan::lsan_disabled--;
223}
224}  // extern "C"
225
226