lsan_allocator.cc revision 29b756851c04df07f1584dfd77021dd2f37a7391
1//=-- lsan_allocator.cc ---------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// See lsan_allocator.h for details.
12//
13//===----------------------------------------------------------------------===//
14
15#include "lsan_allocator.h"
16
17#include "sanitizer_common/sanitizer_allocator.h"
18#include "sanitizer_common/sanitizer_internal_defs.h"
19#include "sanitizer_common/sanitizer_stackdepot.h"
20#include "sanitizer_common/sanitizer_stacktrace.h"
21#include "lsan_common.h"
22
23namespace __lsan {
24
25static const uptr kMaxAllowedMallocSize =
26    FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
27
28static const uptr kAllocatorSpace = 0x600000000000ULL;
29static const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
30
31struct ChunkMetadata {
32  bool allocated : 8;  // Must be first.
33  ChunkTag tag : 2;
34  uptr requested_size : 54;
35  u32 stack_trace_id;
36};
37
38typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
39        sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
40typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41typedef LargeMmapAllocator<> SecondaryAllocator;
42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
43          SecondaryAllocator> Allocator;
44
45static Allocator allocator;
46static THREADLOCAL AllocatorCache cache;
47
48void InitializeAllocator() {
49  allocator.Init();
50}
51
52void AllocatorThreadFinish() {
53  allocator.SwallowCache(&cache);
54}
55
56static ChunkMetadata *Metadata(void *p) {
57  return (ChunkMetadata *)allocator.GetMetaData(p);
58}
59
60static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
61  if (!p) return;
62  ChunkMetadata *m = Metadata(p);
63  CHECK(m);
64  m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
65  m->requested_size = size;
66  atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed);
67}
68
69static void RegisterDeallocation(void *p) {
70  if (!p) return;
71  ChunkMetadata *m = Metadata(p);
72  CHECK(m);
73  atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed);
74}
75
76void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
77               bool cleared) {
78  if (size == 0)
79    size = 1;
80  if (size > kMaxAllowedMallocSize) {
81      Report("WARNING: LeakSanitizer failed to allocate %p bytes\n",
82             (void*)size);
83      return 0;
84  }
85  void *p = allocator.Allocate(&cache, size, alignment, cleared);
86  RegisterAllocation(stack, p, size);
87  return p;
88}
89
90void Deallocate(void *p) {
91  RegisterDeallocation(p);
92  allocator.Deallocate(&cache, p);
93}
94
95void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
96                 uptr alignment) {
97  RegisterDeallocation(p);
98  if (new_size > kMaxAllowedMallocSize) {
99      Report("WARNING: LeakSanitizer failed to allocate %p bytes\n",
100             (void*)new_size);
101      allocator.Deallocate(&cache, p);
102      return 0;
103  }
104  p = allocator.Reallocate(&cache, p, new_size, alignment);
105  RegisterAllocation(stack, p, new_size);
106  return p;
107}
108
109void GetAllocatorCacheRange(uptr *begin, uptr *end) {
110  *begin = (uptr)&cache;
111  *end = *begin + sizeof(cache);
112}
113
114uptr GetMallocUsableSize(void *p) {
115  ChunkMetadata *m = Metadata(p);
116  if (!m) return 0;
117  return m->requested_size;
118}
119
120///// Interface to the common LSan module. /////
121
122void LockAllocator() {
123  allocator.ForceLock();
124}
125
126void UnlockAllocator() {
127  allocator.ForceUnlock();
128}
129
130void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
131  *begin = (uptr)&allocator;
132  *end = *begin + sizeof(allocator);
133}
134
135void *PointsIntoChunk(void* p) {
136  if (!allocator.PointerIsMine(p)) return 0;
137  void *chunk = allocator.GetBlockBegin(p);
138  if (!chunk) return 0;
139  // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
140  // valid, but we don't want that.
141  if (p < chunk) return 0;
142  ChunkMetadata *m = Metadata(chunk);
143  CHECK(m);
144  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size)
145    return chunk;
146  return 0;
147}
148
149void *GetUserBegin(void *p) {
150  return p;
151}
152
153LsanMetadata::LsanMetadata(void *chunk) {
154  metadata_ = Metadata(chunk);
155  CHECK(metadata_);
156}
157
158bool LsanMetadata::allocated() const {
159  return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
160}
161
162ChunkTag LsanMetadata::tag() const {
163  return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
164}
165
166void LsanMetadata::set_tag(ChunkTag value) {
167  reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
168}
169
170uptr LsanMetadata::requested_size() const {
171  return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
172}
173
174u32 LsanMetadata::stack_trace_id() const {
175  return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
176}
177
178template<typename Callable>
179void ForEachChunk(Callable const &callback) {
180  allocator.ForEachChunk(callback);
181}
182
183template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
184    ProcessPlatformSpecificAllocationsCb const &callback);
185template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
186template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
187template void ForEachChunk<MarkIndirectlyLeakedCb>(
188    MarkIndirectlyLeakedCb const &callback);
189template void ForEachChunk<ReportLeakedCb>(ReportLeakedCb const &callback);
190template void ForEachChunk<ClearTagCb>(ClearTagCb const &callback);
191
192}  // namespace __lsan
193