asan_allocator2.cc revision a93c02c74f6350b4ca6358978d8175ebb6ac3604
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_mapping.h"
22#include "asan_report.h"
23#include "asan_thread.h"
24#include "asan_thread_registry.h"
25#include "sanitizer/asan_interface.h"
26#include "sanitizer_common/sanitizer_allocator.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29
30namespace __asan {
31
32struct AsanMapUnmapCallback {
33  void OnMap(uptr p, uptr size) const {
34    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
35  }
36  void OnUnmap(uptr p, uptr size) const {
37    PoisonShadow(p, size, 0);
38  }
39};
40
41#if SANITIZER_WORDSIZE == 64
42const uptr kAllocatorSpace = 0x600000000000ULL;
43const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
44typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
45    DefaultSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
46#elif SANITIZER_WORDSIZE == 32
47static const u64 kAddressSpaceSize = 1ULL << 32;
48typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
49  CompactSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
50#endif
51
52typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
53typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
54typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
55    SecondaryAllocator> Allocator;
56
57// We can not use THREADLOCAL because it is not supported on some of the
58// platforms we care about (OSX 10.6, Android).
59// static THREADLOCAL AllocatorCache cache;
60AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
61  CHECK(ms);
62  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
63  return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
64}
65
66static Allocator allocator;
67
68static const uptr kMaxAllowedMallocSize =
69  FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
70
71static const uptr kMaxThreadLocalQuarantine =
72  FIRST_32_SECOND_64(1 << 18, 1 << 20);
73
74static const uptr kReturnOnZeroMalloc = 0x0123;  // Zero page is protected.
75
76static int inited = 0;
77
78static void Init() {
79  if (inited) return;
80  __asan_init();
81  inited = true;  // this must happen before any threads are created.
82  allocator.Init();
83}
84
85// Every chunk of memory allocated by this allocator can be in one of 3 states:
86// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
87// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
88// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
89enum {
90  CHUNK_AVAILABLE  = 1,
91  CHUNK_ALLOCATED  = 2,
92  CHUNK_QUARANTINE = 3
93};
94
95// The memory chunk allocated from the underlying allocator looks like this:
96// L L L L L L H H U U U U U U R R
97//   L -- left redzone words (0 or more bytes)
98//   H -- ChunkHeader (16 bytes on 64-bit arch, 8 bytes on 32-bit arch).
99//     ChunkHeader is also a part of the left redzone.
100//   U -- user memory.
101//   R -- right redzone (0 or more bytes)
102// ChunkBase consists of ChunkHeader and other bytes that overlap with user
103// memory.
104
105#if SANITIZER_WORDSIZE == 64
106struct ChunkBase {
107  // 1-st 8 bytes.
108  uptr chunk_state       : 8;  // Must be first.
109  uptr alloc_tid         : 24;
110  uptr free_tid          : 24;
111  uptr from_memalign     : 1;
112  // 2-nd 8 bytes
113  uptr user_requested_size;
114  // Header2 (intersects with user memory).
115  // 3-rd 8 bytes. These overlap with the user memory.
116  AsanChunk *next;
117};
118
119static const uptr kChunkHeaderSize = 16;
120static const uptr kChunkHeader2Size = 8;
121
122#elif SANITIZER_WORDSIZE == 32
123struct ChunkBase {
124  // 1-st 8 bytes.
125  uptr chunk_state       : 8;  // Must be first.
126  uptr alloc_tid         : 24;
127  uptr from_memalign     : 1;
128  uptr free_tid          : 24;
129  // 2-nd 8 bytes
130  uptr user_requested_size;
131  AsanChunk *next;
132  // Header2 empty.
133};
134
135static const uptr kChunkHeaderSize = 16;
136static const uptr kChunkHeader2Size = 0;
137#endif
138COMPILER_CHECK(sizeof(ChunkBase) == kChunkHeaderSize + kChunkHeader2Size);
139
140static uptr ComputeRZSize(uptr user_requested_size) {
141  // FIXME: implement adaptive redzones.
142  return flags()->redzone;
143}
144
145struct AsanChunk: ChunkBase {
146  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
147  uptr UsedSize() { return user_requested_size; }
148  // We store the alloc/free stack traces in the chunk itself.
149  u32 *AllocStackBeg() {
150    return (u32*)(Beg() - ComputeRZSize(UsedSize()));
151  }
152  uptr AllocStackSize() {
153    return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32);
154  }
155  u32 *FreeStackBeg() {
156    return (u32*)(Beg() + kChunkHeader2Size);
157  }
158  uptr FreeStackSize() {
159    uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY),
160                         ComputeRZSize(UsedSize()));
161    return (available - kChunkHeader2Size) / sizeof(u32);
162  }
163};
164
165uptr AsanChunkView::Beg() { return chunk_->Beg(); }
166uptr AsanChunkView::End() { return Beg() + UsedSize(); }
167uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
168uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
169uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
170
171void AsanChunkView::GetAllocStack(StackTrace *stack) {
172  StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
173                              chunk_->AllocStackSize());
174}
175
176void AsanChunkView::GetFreeStack(StackTrace *stack) {
177  StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
178                              chunk_->FreeStackSize());
179}
180
181class Quarantine: public AsanChunkFifoList {
182 public:
183  void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
184    AsanChunkFifoList *q = &ms->quarantine_;
185    if (!q->size()) return;
186    SpinMutexLock l(&mutex_);
187    PushList(q);
188    PopAndDeallocateLoop(ms);
189  }
190  void BypassThreadLocalQuarantine(AsanChunk *m) {
191    SpinMutexLock l(&mutex_);
192    Push(m);
193  }
194
195 private:
196  void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
197    while (size() > (uptr)flags()->quarantine_size) {
198      PopAndDeallocate(ms);
199    }
200  }
201  void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
202    CHECK_GT(size(), 0);
203    AsanChunk *m = Pop();
204    CHECK(m);
205    CHECK(m->chunk_state == CHUNK_QUARANTINE);
206    m->chunk_state = CHUNK_AVAILABLE;
207    CHECK_NE(m->alloc_tid, kInvalidTid);
208    CHECK_NE(m->free_tid, kInvalidTid);
209    PoisonShadow(m->Beg(),
210                 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
211                 kAsanHeapLeftRedzoneMagic);
212    uptr alloc_beg = m->Beg() - ComputeRZSize(m->user_requested_size);
213    void *p = reinterpret_cast<void *>(alloc_beg);
214    if (m->from_memalign)
215      p = allocator.GetBlockBegin(p);
216    allocator.Deallocate(GetAllocatorCache(ms), p);
217  }
218  SpinMutex mutex_;
219};
220
221static Quarantine quarantine;
222
223void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
224  CHECK(q->size() > 0);
225  size_ += q->size();
226  append_back(q);
227  q->clear();
228}
229
230void AsanChunkFifoList::Push(AsanChunk *n) {
231  push_back(n);
232  size_ += n->UsedSize();
233}
234
235// Interesting performance observation: this function takes up to 15% of overal
236// allocator time. That's because *first_ has been evicted from cache long time
237// ago. Not sure if we can or want to do anything with this.
238AsanChunk *AsanChunkFifoList::Pop() {
239  CHECK(first_);
240  AsanChunk *res = front();
241  size_ -= res->UsedSize();
242  pop_front();
243  return res;
244}
245
246static void *Allocate(uptr size, uptr alignment, StackTrace *stack) {
247  Init();
248  CHECK(stack);
249  if (alignment < 8) alignment = 8;
250  if (size == 0)
251    return reinterpret_cast<void *>(kReturnOnZeroMalloc);
252  CHECK(IsPowerOfTwo(alignment));
253  uptr rz_size = ComputeRZSize(size);
254  uptr rounded_size = RoundUpTo(size, rz_size);
255  uptr needed_size = rounded_size + rz_size;
256  if (alignment > rz_size)
257    needed_size += alignment;
258  CHECK(IsAligned(needed_size, rz_size));
259  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
260    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
261           (void*)size);
262    return 0;
263  }
264
265  AsanThread *t = asanThreadRegistry().GetCurrent();
266  // Printf("t = %p\n", t);
267  CHECK(t);  // FIXME
268  void *allocated = allocator.Allocate(
269      GetAllocatorCache(&t->malloc_storage()), needed_size, 8, false);
270  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
271  uptr alloc_end = alloc_beg + needed_size;
272  uptr beg_plus_redzone = alloc_beg + rz_size;
273  uptr user_beg = beg_plus_redzone;
274  if (!IsAligned(user_beg, alignment))
275    user_beg = RoundUpTo(user_beg, alignment);
276  uptr user_end = user_beg + size;
277  CHECK_LE(user_end, alloc_end);
278  uptr chunk_beg = user_beg - kChunkHeaderSize;
279  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
280  m->chunk_state = CHUNK_ALLOCATED;
281  u32 alloc_tid = t ? t->tid() : 0;
282  m->alloc_tid = alloc_tid;
283  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
284  m->free_tid = kInvalidTid;
285  m->from_memalign = user_beg != beg_plus_redzone;
286  m->user_requested_size = size;
287  StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
288
289  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
290  // Unpoison the bulk of the memory region.
291  if (size_rounded_down_to_granularity)
292    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
293  // Deal with the end of the region if size is not aligned to granularity.
294  if (size != size_rounded_down_to_granularity) {
295    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
296    *shadow = size & (SHADOW_GRANULARITY - 1);
297  }
298
299  void *res = reinterpret_cast<void *>(user_beg);
300  ASAN_MALLOC_HOOK(res, size);
301  return res;
302}
303
304static void Deallocate(void *ptr, StackTrace *stack) {
305  uptr p = reinterpret_cast<uptr>(ptr);
306  if (p == 0 || p == kReturnOnZeroMalloc) return;
307  uptr chunk_beg = p - kChunkHeaderSize;
308  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
309
310  // Flip the chunk_state atomically to avoid race on double-free.
311  u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
312                                       memory_order_acq_rel);
313
314  if (old_chunk_state == CHUNK_QUARANTINE)
315    ReportDoubleFree((uptr)ptr, stack);
316  else if (old_chunk_state != CHUNK_ALLOCATED)
317    ReportFreeNotMalloced((uptr)ptr, stack);
318  CHECK(old_chunk_state == CHUNK_ALLOCATED);
319
320  CHECK_GE(m->alloc_tid, 0);
321  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
322    CHECK_EQ(m->free_tid, kInvalidTid);
323  AsanThread *t = asanThreadRegistry().GetCurrent();
324  m->free_tid = t ? t->tid() : 0;
325  StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
326  CHECK(m->chunk_state == CHUNK_QUARANTINE);
327  // Poison the region.
328  PoisonShadow(m->Beg(),
329               RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
330               kAsanHeapFreeMagic);
331
332  // Push into quarantine.
333  if (t) {
334    AsanChunkFifoList &q = t->malloc_storage().quarantine_;
335    q.Push(m);
336
337    if (q.size() > kMaxThreadLocalQuarantine)
338      quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
339  } else {
340    quarantine.BypassThreadLocalQuarantine(m);
341  }
342
343  ASAN_FREE_HOOK(ptr);
344}
345
346static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
347  CHECK(old_ptr && new_size);
348  uptr p = reinterpret_cast<uptr>(old_ptr);
349  uptr chunk_beg = p - kChunkHeaderSize;
350  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
351
352  CHECK(m->chunk_state == CHUNK_ALLOCATED);
353  uptr old_size = m->UsedSize();
354  uptr memcpy_size = Min(new_size, old_size);
355  void *new_ptr = Allocate(new_size, 8, stack);
356  if (new_ptr) {
357    CHECK(REAL(memcpy) != 0);
358    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
359    Deallocate(old_ptr, stack);
360  }
361  return new_ptr;
362}
363
364static AsanChunk *GetAsanChunkByAddr(uptr p) {
365  uptr alloc_beg = reinterpret_cast<uptr>(
366      allocator.GetBlockBegin(reinterpret_cast<void *>(p)));
367  if (!alloc_beg) return 0;
368  // FIXME: this does not take into account memalign.
369  uptr chunk_beg = alloc_beg + ComputeRZSize(0) - kChunkHeaderSize;
370  return reinterpret_cast<AsanChunk *>(chunk_beg);
371}
372
373static uptr AllocationSize(uptr p) {
374  AsanChunk *m = GetAsanChunkByAddr(p);
375  if (!m) return 0;
376  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
377  if (m->Beg() != p) return 0;
378  return m->UsedSize();
379}
380
381AsanChunkView FindHeapChunkByAddress(uptr address) {
382  return AsanChunkView(GetAsanChunkByAddr(address));
383}
384
385void AsanThreadLocalMallocStorage::CommitBack() {
386  quarantine.SwallowThreadLocalQuarantine(this);
387}
388
389SANITIZER_INTERFACE_ATTRIBUTE
390void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
391  return Allocate(size, alignment, stack);
392}
393
394SANITIZER_INTERFACE_ATTRIBUTE
395void asan_free(void *ptr, StackTrace *stack) {
396  Deallocate(ptr, stack);
397}
398
399SANITIZER_INTERFACE_ATTRIBUTE
400void *asan_malloc(uptr size, StackTrace *stack) {
401  return Allocate(size, 8, stack);
402}
403
404void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
405  void *ptr = Allocate(nmemb * size, 8, stack);
406  if (ptr)
407    REAL(memset)(ptr, 0, nmemb * size);
408  return ptr;
409}
410
411void *asan_realloc(void *p, uptr size, StackTrace *stack) {
412  if (p == 0)
413    return Allocate(size, 8, stack);
414  if (size == 0) {
415    Deallocate(p, stack);
416    return 0;
417  }
418  return Reallocate(p, size, stack);
419}
420
421void *asan_valloc(uptr size, StackTrace *stack) {
422  return Allocate(size, GetPageSizeCached(), stack);
423}
424
425void *asan_pvalloc(uptr size, StackTrace *stack) {
426  uptr PageSize = GetPageSizeCached();
427  size = RoundUpTo(size, PageSize);
428  if (size == 0) {
429    // pvalloc(0) should allocate one page.
430    size = PageSize;
431  }
432  return Allocate(size, PageSize, stack);
433}
434
435int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
436                        StackTrace *stack) {
437  void *ptr = Allocate(size, alignment, stack);
438  CHECK(IsAligned((uptr)ptr, alignment));
439  *memptr = ptr;
440  return 0;
441}
442
443uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
444  CHECK(stack);
445  if (ptr == 0) return 0;
446  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
447  if (flags()->check_malloc_usable_size && (usable_size == 0))
448    ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
449  return usable_size;
450}
451
452uptr asan_mz_size(const void *ptr) {
453  UNIMPLEMENTED();
454  return 0;
455}
456
457void asan_mz_force_lock() {
458  UNIMPLEMENTED();
459}
460
461void asan_mz_force_unlock() {
462  UNIMPLEMENTED();
463}
464
465}  // namespace __asan
466
467// ---------------------- Interface ---------------- {{{1
468using namespace __asan;  // NOLINT
469
470// ASan allocator doesn't reserve extra bytes, so normally we would
471// just return "size".
472uptr __asan_get_estimated_allocated_size(uptr size) {
473  UNIMPLEMENTED();
474  return 0;
475}
476
477bool __asan_get_ownership(const void *p) {
478  UNIMPLEMENTED();
479  return false;
480}
481
482uptr __asan_get_allocated_size(const void *p) {
483  UNIMPLEMENTED();
484  return 0;
485}
486
487#if !SANITIZER_SUPPORTS_WEAK_HOOKS
488// Provide default (no-op) implementation of malloc hooks.
489extern "C" {
490SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
491void __asan_malloc_hook(void *ptr, uptr size) {
492  (void)ptr;
493  (void)size;
494}
495SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
496void __asan_free_hook(void *ptr) {
497  (void)ptr;
498}
499}  // extern "C"
500#endif
501
502
503#endif  // ASAN_ALLOCATOR_VERSION
504