asan_allocator2.cc revision bc9940eedb51dd43d844a4c46e17bc89f872781f
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_mapping.h"
22#include "asan_thread.h"
23#include "asan_thread_registry.h"
24#include "sanitizer/asan_interface.h"
25#include "sanitizer_common/sanitizer_allocator.h"
26#include "sanitizer_common/sanitizer_internal_defs.h"
27
28namespace __asan {
29
30struct AsanMapUnmapCallback {
31  void OnMap(uptr p, uptr size) const {
32    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
33  }
34  void OnUnmap(uptr p, uptr size) const {
35    PoisonShadow(p, size, 0);
36  }
37};
38
39#if SANITIZER_WORDSIZE == 64
40const uptr kAllocatorSpace = 0x600000000000ULL;
41const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
42typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
43    DefaultSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
44#elif SANITIZER_WORDSIZE == 32
45static const u64 kAddressSpaceSize = 1ULL << 32;
46typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
47  CompactSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
48#endif
49
50typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
51typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
52typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
53    SecondaryAllocator> Allocator;
54
55static THREADLOCAL AllocatorCache cache;
56static Allocator allocator;
57
58static const uptr kMaxAllowedMallocSize =
59    (SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
60
61static int inited = 0;
62
63static void Init() {
64  if (inited) return;
65  __asan_init();
66  inited = true;  // this must happen before any threads are created.
67  allocator.Init();
68}
69
70// Every chunk of memory allocated by this allocator can be in one of 3 states:
71// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
72// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
73// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
74enum {
75  CHUNK_AVAILABLE  = 1,
76  CHUNK_ALLOCATED  = 2,
77  CHUNK_QUARANTINE = 3
78};
79
80// The memory chunk allocated from the underlying allocator looks like this:
81// L L L L L L H H U U U U U U R R
82//   L -- left redzone words (0 or more bytes)
83//   H -- ChunkHeader (16 bytes on 64-bit arch, 8 bytes on 32-bit arch).
84//     ChunkHeader is also a part of the left redzone.
85//   U -- user memory.
86//   R -- right redzone (0 or more bytes)
87// ChunkBase consists of ChunkHeader and other bytes that overlap with user
88// memory.
89
90#if SANITIZER_WORDSIZE == 64
91struct ChunkBase {
92  // 1-st 8 bytes.
93  uptr chunk_state       : 8;  // Must be first.
94  uptr alloc_tid         : 24;
95  uptr free_tid          : 24;
96  uptr from_memalign     : 1;
97  // 2-nd 8 bytes
98  uptr user_requested_size;
99  // End of ChunkHeader.
100  // 3-rd 8 bytes. These overlap with the user memory.
101  AsanChunk *next;
102};
103
104static const uptr kChunkHeaderSize = 16;
105COMPILER_CHECK(sizeof(ChunkBase) == 24);
106
107#elif SANITIZER_WORDSIZE == 32
108struct ChunkBase {
109  // 1-st 8 bytes.
110  uptr chunk_state       : 8;  // Must be first.
111  uptr from_memalign     : 1;
112  uptr alloc_tid         : 23;
113  uptr user_requested_size;
114  // End of ChunkHeader.
115  // 2-nd 8 bytes. These overlap with the user memory.
116  AsanChunk *next;
117  uptr  free_tid;
118};
119
120COMPILER_CHECK(sizeof(ChunkBase) == 16);
121static const uptr kChunkHeaderSize = 8;
122#endif
123
124struct AsanChunk: ChunkBase {
125  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
126  uptr UsedSize() { return user_requested_size; }
127};
128
129uptr AsanChunkView::Beg() { return chunk_->Beg(); }
130uptr AsanChunkView::End() { return Beg() + UsedSize(); }
131uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
132uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
133uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
134
135void AsanChunkView::GetAllocStack(StackTrace *stack) {
136  stack->size = 0;
137}
138
139void AsanChunkView::GetFreeStack(StackTrace *stack) {
140  stack->size = 0;
141}
142
143static const uptr kReturnOnZeroMalloc = 0x0123;  // Zero page is protected.
144
145static uptr ComputeRZSize(uptr user_requested_size) {
146  // FIXME: implement adaptive redzones.
147  return flags()->redzone;
148}
149
150static void *Allocate(uptr size, uptr alignment, StackTrace *stack) {
151  Init();
152  CHECK(stack);
153  if (alignment < 8) alignment = 8;
154  if (size == 0)
155    return reinterpret_cast<void *>(kReturnOnZeroMalloc);
156  CHECK(IsPowerOfTwo(alignment));
157  uptr rz_size = ComputeRZSize(size);
158  uptr rounded_size = RoundUpTo(size, rz_size);
159  uptr needed_size = rounded_size + rz_size;
160  if (alignment > rz_size)
161    needed_size += alignment;
162  CHECK(IsAligned(needed_size, rz_size));
163  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
164    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
165           (void*)size);
166    return 0;
167  }
168
169  AsanThread *t = asanThreadRegistry().GetCurrent();
170  void *allocated = allocator.Allocate(&cache, needed_size, 8, false);
171  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
172  uptr alloc_end = alloc_beg + needed_size;
173  uptr beg_plus_redzone = alloc_beg + rz_size;
174  uptr user_beg = beg_plus_redzone;
175  if (!IsAligned(user_beg, alignment))
176    user_beg = RoundUpTo(user_beg, alignment);
177  uptr user_end = user_beg + size;
178  CHECK_LE(user_end, alloc_end);
179  uptr chunk_beg = user_beg - kChunkHeaderSize;
180//  Printf("allocated: %p beg_plus_redzone %p chunk_beg %p\n",
181//         allocated, beg_plus_redzone, chunk_beg);
182  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
183  m->chunk_state = CHUNK_ALLOCATED;
184  u32 alloc_tid = t ? t->tid() : 0;
185  m->alloc_tid = alloc_tid;
186  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
187  m->from_memalign = user_beg != beg_plus_redzone;
188  m->user_requested_size = size;
189
190  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
191  // Unpoison the bulk of the memory region.
192  if (size_rounded_down_to_granularity)
193    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
194  // Deal with the end of the region if size is not aligned to granularity.
195  if (size != size_rounded_down_to_granularity) {
196    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
197    *shadow = size & (SHADOW_GRANULARITY - 1);
198  }
199
200  void *res = reinterpret_cast<void *>(user_beg);
201  ASAN_MALLOC_HOOK(res, size);
202  return res;
203}
204
205static void Deallocate(void *ptr, StackTrace *stack) {
206  uptr p = reinterpret_cast<uptr>(ptr);
207  if (p == 0 || p == kReturnOnZeroMalloc) return;
208  uptr chunk_beg = p - kChunkHeaderSize;
209  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
210  uptr alloc_beg = p - ComputeRZSize(m->user_requested_size);
211  if (m->from_memalign)
212    alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
213  // Poison the region.
214  PoisonShadow(m->Beg(), RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
215               kAsanHeapFreeMagic);
216  ASAN_FREE_HOOK(ptr);
217  allocator.Deallocate(&cache, reinterpret_cast<void *>(alloc_beg));
218}
219
220AsanChunkView FindHeapChunkByAddress(uptr address) {
221  UNIMPLEMENTED();
222  return AsanChunkView(0);
223}
224
225void AsanThreadLocalMallocStorage::CommitBack() {
226  UNIMPLEMENTED();
227}
228
229SANITIZER_INTERFACE_ATTRIBUTE
230void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
231  return Allocate(size, alignment, stack);
232}
233
234SANITIZER_INTERFACE_ATTRIBUTE
235void asan_free(void *ptr, StackTrace *stack) {
236  Deallocate(ptr, stack);
237  return;
238}
239
240SANITIZER_INTERFACE_ATTRIBUTE
241void *asan_malloc(uptr size, StackTrace *stack) {
242  return Allocate(size, 8, stack);
243}
244
245void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
246  UNIMPLEMENTED();
247  return 0;
248}
249
250void *asan_realloc(void *p, uptr size, StackTrace *stack) {
251  UNIMPLEMENTED();
252  return 0;
253}
254
255void *asan_valloc(uptr size, StackTrace *stack) {
256  UNIMPLEMENTED();
257  return 0;
258}
259
260void *asan_pvalloc(uptr size, StackTrace *stack) {
261  UNIMPLEMENTED();
262  return 0;
263}
264
265int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
266                          StackTrace *stack) {
267  UNIMPLEMENTED();
268  return 0;
269}
270
271uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
272  UNIMPLEMENTED();
273  return 0;
274}
275
276uptr asan_mz_size(const void *ptr) {
277  UNIMPLEMENTED();
278  return 0;
279}
280
281void asan_mz_force_lock() {
282  UNIMPLEMENTED();
283}
284
285void asan_mz_force_unlock() {
286  UNIMPLEMENTED();
287}
288
289}  // namespace __asan
290
291// ---------------------- Interface ---------------- {{{1
292using namespace __asan;  // NOLINT
293
294// ASan allocator doesn't reserve extra bytes, so normally we would
295// just return "size".
296uptr __asan_get_estimated_allocated_size(uptr size) {
297  UNIMPLEMENTED();
298  return 0;
299}
300
301bool __asan_get_ownership(const void *p) {
302  UNIMPLEMENTED();
303  return false;
304}
305
306uptr __asan_get_allocated_size(const void *p) {
307  UNIMPLEMENTED();
308  return 0;
309}
310
311#if !SANITIZER_SUPPORTS_WEAK_HOOKS
312// Provide default (no-op) implementation of malloc hooks.
313extern "C" {
314SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
315void __asan_malloc_hook(void *ptr, uptr size) {
316  (void)ptr;
317  (void)size;
318}
319SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
320void __asan_free_hook(void *ptr) {
321  (void)ptr;
322}
323}  // extern "C"
324#endif
325
326
327#endif  // ASAN_ALLOCATOR_VERSION
328