asan_allocator2.cc revision 84a996fc1057ffea9213608c47a54c3d3d3aed02
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_thread.h"
22#include "asan_thread_registry.h"
23#include "sanitizer/asan_interface.h"
24#include "sanitizer_common/sanitizer_allocator.h"
25#include "sanitizer_common/sanitizer_internal_defs.h"
26
27namespace __asan {
28
29#if SANITIZER_WORDSIZE == 64
30const uptr kAllocatorSpace = 0x600000000000ULL;
31const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
32typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
33    DefaultSizeClassMap> PrimaryAllocator;
34#elif SANITIZER_WORDSIZE == 32
35static const u64 kAddressSpaceSize = 1ULL << 32;
36typedef SizeClassAllocator32<
37  0, kAddressSpaceSize, 16, CompactSizeClassMap> PrimaryAllocator;
38#endif
39
40typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41typedef LargeMmapAllocator SecondaryAllocator;
42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
43    SecondaryAllocator> Allocator;
44
45static THREADLOCAL AllocatorCache cache;
46static Allocator allocator;
47
48static const uptr kMaxAllowedMallocSize =
49    (SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
50
51static int inited = 0;
52
53static void Init() {
54  if (inited) return;
55  __asan_init();
56  inited = true;  // this must happen before any threads are created.
57  allocator.Init();
58}
59
60// Every chunk of memory allocated by this allocator can be in one of 3 states:
61// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
62// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
63// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
64enum {
65  CHUNK_AVAILABLE  = 1,
66  CHUNK_ALLOCATED  = 2,
67  CHUNK_QUARANTINE = 3
68};
69
70// The memory chunk allocated from the underlying allocator looks like this:
71// L L L L L L H H U U U U U U R R
72//   L -- left redzone words (0 or more bytes)
73//   H -- ChunkHeader (16 bytes on 64-bit arch, 8 bytes on 32-bit arch).
74//     ChunkHeader is also a part of the left redzone.
75//   U -- user memory.
76//   R -- right redzone (0 or more bytes)
77// ChunkBase consists of ChunkHeader and other bytes that overlap with user
78// memory.
79
80#if SANITIZER_WORDSIZE == 64
81struct ChunkBase {
82  // 1-st 8 bytes.
83  uptr chunk_state       : 8;  // Must be first.
84  uptr alloc_tid         : 24;
85  uptr free_tid          : 24;
86  uptr from_memalign     : 1;
87  // 2-nd 8 bytes
88  uptr user_requested_size;
89  // End of ChunkHeader.
90  // 3-rd 8 bytes. These overlap with the user memory.
91  AsanChunk *next;
92};
93
94static const uptr kChunkHeaderSize = 16;
95COMPILER_CHECK(sizeof(ChunkBase) == 24);
96
97#elif SANITIZER_WORDSIZE == 32
98struct ChunkBase {
99  // 1-st 8 bytes.
100  uptr chunk_state       : 8;  // Must be first.
101  uptr from_memalign     : 1;
102  uptr alloc_tid         : 23;
103  uptr user_requested_size;
104  // End of ChunkHeader.
105  // 2-nd 8 bytes. These overlap with the user memory.
106  AsanChunk *next;
107  uptr  free_tid;
108};
109
110COMPILER_CHECK(sizeof(ChunkBase) == 16);
111static const uptr kChunkHeaderSize = 8;
112#endif
113
114struct AsanChunk: ChunkBase {
115  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
116  uptr UsedSize() { return user_requested_size; }
117};
118
119uptr AsanChunkView::Beg() { return chunk_->Beg(); }
120uptr AsanChunkView::End() { return Beg() + UsedSize(); }
121uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
122uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
123uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
124
125void AsanChunkView::GetAllocStack(StackTrace *stack) {
126  stack->size = 0;
127}
128
129void AsanChunkView::GetFreeStack(StackTrace *stack) {
130  stack->size = 0;
131}
132
133static const uptr kReturnOnZeroMalloc = 0x0123;  // Zero page is protected.
134
135static uptr ComputeRZSize(uptr user_requested_size) {
136  // FIXME: implement adaptive redzones.
137  return flags()->redzone;
138}
139
140static void *Allocate(uptr size, uptr alignment, StackTrace *stack) {
141  Init();
142  CHECK(stack);
143  if (alignment < 8) alignment = 8;
144  if (size == 0)
145    return reinterpret_cast<void *>(kReturnOnZeroMalloc);
146  CHECK(IsPowerOfTwo(alignment));
147  uptr rz_size = ComputeRZSize(size);
148  uptr rounded_size = RoundUpTo(size, rz_size);
149  uptr needed_size = rounded_size + rz_size;
150  if (alignment > rz_size)
151    needed_size += alignment;
152  CHECK(IsAligned(needed_size, rz_size));
153  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
154    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
155           (void*)size);
156    return 0;
157  }
158
159  AsanThread *t = asanThreadRegistry().GetCurrent();
160  void *allocated = allocator.Allocate(&cache, needed_size, 8, false);
161  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
162  uptr alloc_end = alloc_beg + needed_size;
163  uptr beg_plus_redzone = alloc_beg + rz_size;
164  uptr user_beg = beg_plus_redzone;
165  if (!IsAligned(user_beg, alignment))
166    user_beg = RoundUpTo(user_beg, alignment);
167  uptr user_end = user_beg + size;
168  CHECK_LE(user_end, alloc_end);
169  uptr chunk_beg = user_beg - kChunkHeaderSize;
170//  Printf("allocated: %p beg_plus_redzone %p chunk_beg %p\n",
171//         allocated, beg_plus_redzone, chunk_beg);
172  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
173  m->chunk_state = CHUNK_ALLOCATED;
174  u32 alloc_tid = t ? t->tid() : 0;
175  m->alloc_tid = alloc_tid;
176  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
177  m->from_memalign = user_beg != beg_plus_redzone;
178  m->user_requested_size = size;
179
180  void *res = reinterpret_cast<void *>(user_beg);
181  ASAN_MALLOC_HOOK(res, size);
182  return res;
183}
184
185static void Deallocate(void *ptr, StackTrace *stack) {
186  uptr p = reinterpret_cast<uptr>(ptr);
187  if (p == 0 || p == kReturnOnZeroMalloc) return;
188  uptr chunk_beg = p - kChunkHeaderSize;
189  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
190  uptr alloc_beg = p - ComputeRZSize(m->user_requested_size);
191  if (m->from_memalign)
192    alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
193  ASAN_FREE_HOOK(ptr);
194  allocator.Deallocate(&cache, reinterpret_cast<void *>(alloc_beg));
195}
196
197AsanChunkView FindHeapChunkByAddress(uptr address) {
198  UNIMPLEMENTED();
199  return AsanChunkView(0);
200}
201
202void AsanThreadLocalMallocStorage::CommitBack() {
203  UNIMPLEMENTED();
204}
205
206SANITIZER_INTERFACE_ATTRIBUTE
207void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
208  return Allocate(size, alignment, stack);
209}
210
211SANITIZER_INTERFACE_ATTRIBUTE
212void asan_free(void *ptr, StackTrace *stack) {
213  Deallocate(ptr, stack);
214  return;
215}
216
217SANITIZER_INTERFACE_ATTRIBUTE
218void *asan_malloc(uptr size, StackTrace *stack) {
219  return Allocate(size, 8, stack);
220}
221
222void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
223  UNIMPLEMENTED();
224  return 0;
225}
226
227void *asan_realloc(void *p, uptr size, StackTrace *stack) {
228  UNIMPLEMENTED();
229  return 0;
230}
231
232void *asan_valloc(uptr size, StackTrace *stack) {
233  UNIMPLEMENTED();
234  return 0;
235}
236
237void *asan_pvalloc(uptr size, StackTrace *stack) {
238  UNIMPLEMENTED();
239  return 0;
240}
241
242int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
243                          StackTrace *stack) {
244  UNIMPLEMENTED();
245  return 0;
246}
247
248uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
249  UNIMPLEMENTED();
250  return 0;
251}
252
253uptr asan_mz_size(const void *ptr) {
254  UNIMPLEMENTED();
255  return 0;
256}
257
258void asan_mz_force_lock() {
259  UNIMPLEMENTED();
260}
261
262void asan_mz_force_unlock() {
263  UNIMPLEMENTED();
264}
265
266}  // namespace __asan
267
268// ---------------------- Interface ---------------- {{{1
269using namespace __asan;  // NOLINT
270
271// ASan allocator doesn't reserve extra bytes, so normally we would
272// just return "size".
273uptr __asan_get_estimated_allocated_size(uptr size) {
274  UNIMPLEMENTED();
275  return 0;
276}
277
278bool __asan_get_ownership(const void *p) {
279  UNIMPLEMENTED();
280  return false;
281}
282
283uptr __asan_get_allocated_size(const void *p) {
284  UNIMPLEMENTED();
285  return 0;
286}
287
288#if !SANITIZER_SUPPORTS_WEAK_HOOKS
289// Provide default (no-op) implementation of malloc hooks.
290extern "C" {
291SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
292void __asan_malloc_hook(void *ptr, uptr size) {
293  (void)ptr;
294  (void)size;
295}
296SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
297void __asan_free_hook(void *ptr) {
298  (void)ptr;
299}
300}  // extern "C"
301#endif
302
303
304#endif  // ASAN_ALLOCATOR_VERSION
305