asan_allocator2.cc revision d4d2594c02c9fdf40072fa36446eef200abdf8e5
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_mapping.h"
22#include "asan_report.h"
23#include "asan_thread.h"
24#include "asan_thread_registry.h"
25#include "sanitizer/asan_interface.h"
26#include "sanitizer_common/sanitizer_allocator.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29
30namespace __asan {
31
32struct AsanMapUnmapCallback {
33  void OnMap(uptr p, uptr size) const {
34    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
35  }
36  void OnUnmap(uptr p, uptr size) const {
37    PoisonShadow(p, size, 0);
38  }
39};
40
41#if SANITIZER_WORDSIZE == 64
42const uptr kAllocatorSpace = 0x600000000000ULL;
43const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
44typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
45    DefaultSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
46#elif SANITIZER_WORDSIZE == 32
47static const u64 kAddressSpaceSize = 1ULL << 32;
48typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
49  CompactSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
50#endif
51
52typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
53typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
54typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
55    SecondaryAllocator> Allocator;
56
57static THREADLOCAL AllocatorCache cache;
58static Allocator allocator;
59
60static const uptr kMaxAllowedMallocSize =
61  FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
62
63static const uptr kMaxThreadLocalQuarantine =
64  FIRST_32_SECOND_64(1 << 18, 1 << 21);
65
66static const uptr kReturnOnZeroMalloc = 0x0123;  // Zero page is protected.
67
68static int inited = 0;
69
70static void Init() {
71  if (inited) return;
72  __asan_init();
73  inited = true;  // this must happen before any threads are created.
74  allocator.Init();
75}
76
77// Every chunk of memory allocated by this allocator can be in one of 3 states:
78// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
79// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
80// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
81enum {
82  CHUNK_AVAILABLE  = 1,
83  CHUNK_ALLOCATED  = 2,
84  CHUNK_QUARANTINE = 3
85};
86
87// The memory chunk allocated from the underlying allocator looks like this:
88// L L L L L L H H U U U U U U R R
89//   L -- left redzone words (0 or more bytes)
90//   H -- ChunkHeader (16 bytes on 64-bit arch, 8 bytes on 32-bit arch).
91//     ChunkHeader is also a part of the left redzone.
92//   U -- user memory.
93//   R -- right redzone (0 or more bytes)
94// ChunkBase consists of ChunkHeader and other bytes that overlap with user
95// memory.
96
97#if SANITIZER_WORDSIZE == 64
98struct ChunkBase {
99  // 1-st 8 bytes.
100  uptr chunk_state       : 8;  // Must be first.
101  uptr alloc_tid         : 24;
102  uptr free_tid          : 24;
103  uptr from_memalign     : 1;
104  // 2-nd 8 bytes
105  uptr user_requested_size;
106  // Header2 (intersects with user memory).
107  // 3-rd 8 bytes. These overlap with the user memory.
108  AsanChunk *next;
109};
110
111static const uptr kChunkHeaderSize = 16;
112static const uptr kChunkHeader2Size = 8;
113
114#elif SANITIZER_WORDSIZE == 32
115struct ChunkBase {
116  // 1-st 8 bytes.
117  uptr chunk_state       : 8;  // Must be first.
118  uptr from_memalign     : 1;
119  uptr alloc_tid         : 23;
120  uptr user_requested_size;
121  // Header2 (intersects with user memory).
122  // 2-nd 8 bytes. These overlap with the user memory.
123  AsanChunk *next;
124  uptr  free_tid;
125};
126
127static const uptr kChunkHeaderSize = 8;
128static const uptr kChunkHeader2Size = 8;
129#endif
130COMPILER_CHECK(sizeof(ChunkBase) == kChunkHeaderSize + kChunkHeader2Size);
131
132static uptr ComputeRZSize(uptr user_requested_size) {
133  // FIXME: implement adaptive redzones.
134  return flags()->redzone;
135}
136
137struct AsanChunk: ChunkBase {
138  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
139  uptr UsedSize() { return user_requested_size; }
140  // We store the alloc/free stack traces in the chunk itself.
141  u32 *AllocStackBeg() {
142    return (u32*)(Beg() - ComputeRZSize(UsedSize()));
143  }
144  uptr AllocStackSize() {
145    return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32);
146  }
147  u32 *FreeStackBeg() {
148    return (u32*)(Beg() + kChunkHeader2Size);
149  }
150  uptr FreeStackSize() {
151    uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY),
152                         ComputeRZSize(UsedSize()));
153    return (available - kChunkHeader2Size) / sizeof(u32);
154  }
155};
156
157uptr AsanChunkView::Beg() { return chunk_->Beg(); }
158uptr AsanChunkView::End() { return Beg() + UsedSize(); }
159uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
160uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
161uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
162
163void AsanChunkView::GetAllocStack(StackTrace *stack) {
164  StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
165                              chunk_->AllocStackSize());
166}
167
168void AsanChunkView::GetFreeStack(StackTrace *stack) {
169  StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
170                              chunk_->FreeStackSize());
171}
172
173class Quarantine: public AsanChunkFifoList {
174 public:
175  void SwallowThreadLocalQuarantine(AsanChunkFifoList *q) {
176    if (!q->size()) return;
177    // Printf("SwallowThreadLocalQuarantine %zd\n", q->size());
178    SpinMutexLock l(&mutex_);
179    PushList(q);
180    PopAndDeallocateLoop();
181  }
182  void BypassThreadLocalQuarantine(AsanChunk *m) {
183    SpinMutexLock l(&mutex_);
184    Push(m);
185    PopAndDeallocateLoop();
186  }
187
188 private:
189  void PopAndDeallocateLoop() {
190    while (size() > (uptr)flags()->quarantine_size) {
191      PopAndDeallocate();
192    }
193  }
194  void PopAndDeallocate() {
195    CHECK_GT(size(), 0);
196    AsanChunk *m = Pop();
197    CHECK(m);
198    CHECK(m->chunk_state == CHUNK_QUARANTINE);
199    m->chunk_state = CHUNK_AVAILABLE;
200    CHECK_NE(m->alloc_tid, kInvalidTid);
201    CHECK_NE(m->free_tid, kInvalidTid);
202    PoisonShadow(m->Beg(),
203                 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
204                 kAsanHeapLeftRedzoneMagic);
205    uptr alloc_beg = m->Beg() - ComputeRZSize(m->user_requested_size);
206    void *p = reinterpret_cast<void *>(alloc_beg);
207    if (m->from_memalign)
208      p = allocator.GetBlockBegin(p);
209    allocator.Deallocate(&cache, p);
210  }
211  SpinMutex mutex_;
212};
213
214static Quarantine quarantine;
215
216void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
217  CHECK(q->size() > 0);
218  size_ += q->size();
219  append_back(q);
220  q->clear();
221}
222
223void AsanChunkFifoList::Push(AsanChunk *n) {
224  push_back(n);
225  size_ += n->UsedSize();
226}
227
228// Interesting performance observation: this function takes up to 15% of overal
229// allocator time. That's because *first_ has been evicted from cache long time
230// ago. Not sure if we can or want to do anything with this.
231AsanChunk *AsanChunkFifoList::Pop() {
232  CHECK(first_);
233  AsanChunk *res = front();
234  size_ -= res->UsedSize();
235  pop_front();
236  return res;
237}
238
239static void *Allocate(uptr size, uptr alignment, StackTrace *stack) {
240  Init();
241  CHECK(stack);
242  if (alignment < 8) alignment = 8;
243  if (size == 0)
244    return reinterpret_cast<void *>(kReturnOnZeroMalloc);
245  CHECK(IsPowerOfTwo(alignment));
246  uptr rz_size = ComputeRZSize(size);
247  uptr rounded_size = RoundUpTo(size, rz_size);
248  uptr needed_size = rounded_size + rz_size;
249  if (alignment > rz_size)
250    needed_size += alignment;
251  CHECK(IsAligned(needed_size, rz_size));
252  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
253    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
254           (void*)size);
255    return 0;
256  }
257
258  AsanThread *t = asanThreadRegistry().GetCurrent();
259  void *allocated = allocator.Allocate(&cache, needed_size, 8, false);
260  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
261  uptr alloc_end = alloc_beg + needed_size;
262  uptr beg_plus_redzone = alloc_beg + rz_size;
263  uptr user_beg = beg_plus_redzone;
264  if (!IsAligned(user_beg, alignment))
265    user_beg = RoundUpTo(user_beg, alignment);
266  uptr user_end = user_beg + size;
267  CHECK_LE(user_end, alloc_end);
268  uptr chunk_beg = user_beg - kChunkHeaderSize;
269  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
270  m->chunk_state = CHUNK_ALLOCATED;
271  u32 alloc_tid = t ? t->tid() : 0;
272  m->alloc_tid = alloc_tid;
273  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
274  m->free_tid = kInvalidTid;
275  m->from_memalign = user_beg != beg_plus_redzone;
276  m->user_requested_size = size;
277  StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
278
279  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
280  // Unpoison the bulk of the memory region.
281  if (size_rounded_down_to_granularity)
282    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
283  // Deal with the end of the region if size is not aligned to granularity.
284  if (size != size_rounded_down_to_granularity) {
285    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
286    *shadow = size & (SHADOW_GRANULARITY - 1);
287  }
288
289  void *res = reinterpret_cast<void *>(user_beg);
290  ASAN_MALLOC_HOOK(res, size);
291  return res;
292}
293
294static void Deallocate(void *ptr, StackTrace *stack) {
295  uptr p = reinterpret_cast<uptr>(ptr);
296  if (p == 0 || p == kReturnOnZeroMalloc) return;
297  uptr chunk_beg = p - kChunkHeaderSize;
298  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
299
300  // Flip the chunk_state atomically to avoid race on double-free.
301  u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
302                                       memory_order_acq_rel);
303
304  if (old_chunk_state == CHUNK_QUARANTINE)
305    ReportDoubleFree((uptr)ptr, stack);
306  else if (old_chunk_state != CHUNK_ALLOCATED)
307    ReportFreeNotMalloced((uptr)ptr, stack);
308  CHECK(old_chunk_state == CHUNK_ALLOCATED);
309
310  CHECK_GE(m->alloc_tid, 0);
311  CHECK_EQ(m->free_tid, kInvalidTid);
312  AsanThread *t = asanThreadRegistry().GetCurrent();
313  m->free_tid = t ? t->tid() : 0;
314  StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
315  CHECK(m->chunk_state == CHUNK_QUARANTINE);
316  // Poison the region.
317  PoisonShadow(m->Beg(),
318               RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
319               kAsanHeapFreeMagic);
320
321  // Push into quarantine.
322  if (t) {
323    AsanChunkFifoList &q = t->malloc_storage().quarantine_;
324    q.Push(m);
325
326    if (q.size() > kMaxThreadLocalQuarantine)
327      quarantine.SwallowThreadLocalQuarantine(&q);
328  } else {
329    quarantine.BypassThreadLocalQuarantine(m);
330  }
331
332  ASAN_FREE_HOOK(ptr);
333}
334
335static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
336  CHECK(old_ptr && new_size);
337  uptr p = reinterpret_cast<uptr>(old_ptr);
338  uptr chunk_beg = p - kChunkHeaderSize;
339  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
340
341  CHECK(m->chunk_state == CHUNK_ALLOCATED);
342  uptr old_size = m->UsedSize();
343  uptr memcpy_size = Min(new_size, old_size);
344  void *new_ptr = Allocate(new_size, 8, stack);
345  if (new_ptr) {
346    CHECK(REAL(memcpy) != 0);
347    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
348    Deallocate(old_ptr, stack);
349  }
350  return new_ptr;
351}
352
353
354
355AsanChunkView FindHeapChunkByAddress(uptr address) {
356  uptr alloc_beg = (uptr)allocator.GetBlockBegin((void*)address);
357  // FIXME: this does not take into account memalign.
358  return AsanChunkView((AsanChunk *)(alloc_beg + ComputeRZSize(0)
359                                     - kChunkHeaderSize));
360}
361
362void AsanThreadLocalMallocStorage::CommitBack() {
363  UNIMPLEMENTED();
364}
365
366SANITIZER_INTERFACE_ATTRIBUTE
367void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
368  return Allocate(size, alignment, stack);
369}
370
371SANITIZER_INTERFACE_ATTRIBUTE
372void asan_free(void *ptr, StackTrace *stack) {
373  Deallocate(ptr, stack);
374}
375
376SANITIZER_INTERFACE_ATTRIBUTE
377void *asan_malloc(uptr size, StackTrace *stack) {
378  return Allocate(size, 8, stack);
379}
380
381void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
382  void *ptr = Allocate(nmemb * size, 8, stack);
383  if (ptr)
384    REAL(memset)(ptr, 0, nmemb * size);
385  return 0;
386}
387
388void *asan_realloc(void *p, uptr size, StackTrace *stack) {
389  if (p == 0)
390    return Allocate(size, 8, stack);
391  if (size == 0) {
392    Deallocate(p, stack);
393    return 0;
394  }
395  return Reallocate(p, size, stack);
396}
397
398void *asan_valloc(uptr size, StackTrace *stack) {
399  return Allocate(size, GetPageSizeCached(), stack);
400}
401
402void *asan_pvalloc(uptr size, StackTrace *stack) {
403  uptr PageSize = GetPageSizeCached();
404  size = RoundUpTo(size, PageSize);
405  if (size == 0) {
406    // pvalloc(0) should allocate one page.
407    size = PageSize;
408  }
409  return Allocate(size, PageSize, stack);
410}
411
412int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
413                        StackTrace *stack) {
414  void *ptr = Allocate(size, alignment, stack);
415  CHECK(IsAligned((uptr)ptr, alignment));
416  *memptr = ptr;
417  return 0;
418}
419
420uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
421  UNIMPLEMENTED();
422  return 0;
423}
424
425uptr asan_mz_size(const void *ptr) {
426  UNIMPLEMENTED();
427  return 0;
428}
429
430void asan_mz_force_lock() {
431  UNIMPLEMENTED();
432}
433
434void asan_mz_force_unlock() {
435  UNIMPLEMENTED();
436}
437
438}  // namespace __asan
439
440// ---------------------- Interface ---------------- {{{1
441using namespace __asan;  // NOLINT
442
443// ASan allocator doesn't reserve extra bytes, so normally we would
444// just return "size".
445uptr __asan_get_estimated_allocated_size(uptr size) {
446  UNIMPLEMENTED();
447  return 0;
448}
449
450bool __asan_get_ownership(const void *p) {
451  UNIMPLEMENTED();
452  return false;
453}
454
455uptr __asan_get_allocated_size(const void *p) {
456  UNIMPLEMENTED();
457  return 0;
458}
459
460#if !SANITIZER_SUPPORTS_WEAK_HOOKS
461// Provide default (no-op) implementation of malloc hooks.
462extern "C" {
463SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
464void __asan_malloc_hook(void *ptr, uptr size) {
465  (void)ptr;
466  (void)size;
467}
468SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
469void __asan_free_hook(void *ptr) {
470  (void)ptr;
471}
472}  // extern "C"
473#endif
474
475
476#endif  // ASAN_ALLOCATOR_VERSION
477