asan_allocator2.cc revision e3091193af47f4932b42ba1773416dfeb3aa2e87
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_mapping.h"
22#include "asan_report.h"
23#include "asan_thread.h"
24#include "asan_thread_registry.h"
25#include "sanitizer/asan_interface.h"
26#include "sanitizer_common/sanitizer_allocator.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29
30namespace __asan {
31
32struct AsanMapUnmapCallback {
33  void OnMap(uptr p, uptr size) const {
34    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
35    // Statistics.
36    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
37    thread_stats.mmaps++;
38    thread_stats.mmaped += size;
39    // thread_stats.mmaped_by_size[size_class] += n_chunks;
40  }
41  void OnUnmap(uptr p, uptr size) const {
42    PoisonShadow(p, size, 0);
43    // Statistics.
44    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
45    thread_stats.munmaps++;
46    thread_stats.munmaped += size;
47  }
48};
49
50#if SANITIZER_WORDSIZE == 64
51const uptr kAllocatorSpace = 0x600000000000ULL;
52const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
53typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
54    DefaultSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
55#elif SANITIZER_WORDSIZE == 32
56static const u64 kAddressSpaceSize = 1ULL << 32;
57typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
58  CompactSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
59#endif
60
61typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
62typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
63typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
64    SecondaryAllocator> Allocator;
65
66// We can not use THREADLOCAL because it is not supported on some of the
67// platforms we care about (OSX 10.6, Android).
68// static THREADLOCAL AllocatorCache cache;
69AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
70  CHECK(ms);
71  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
72  return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
73}
74
75static Allocator allocator;
76
77static const uptr kMaxAllowedMallocSize =
78  FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
79
80static const uptr kMaxThreadLocalQuarantine =
81  FIRST_32_SECOND_64(1 << 18, 1 << 20);
82
83static const uptr kReturnOnZeroMalloc = 0x0123;  // Zero page is protected.
84
85static int inited = 0;
86
87static void Init() {
88  if (inited) return;
89  __asan_init();
90  inited = true;  // this must happen before any threads are created.
91  allocator.Init();
92}
93
94// Every chunk of memory allocated by this allocator can be in one of 3 states:
95// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
96// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
97// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
98enum {
99  CHUNK_AVAILABLE  = 1,
100  CHUNK_ALLOCATED  = 2,
101  CHUNK_QUARANTINE = 3
102};
103
104// The memory chunk allocated from the underlying allocator looks like this:
105// L L L L L L H H U U U U U U R R
106//   L -- left redzone words (0 or more bytes)
107//   H -- ChunkHeader (16 bytes on 64-bit arch, 8 bytes on 32-bit arch).
108//     ChunkHeader is also a part of the left redzone.
109//   U -- user memory.
110//   R -- right redzone (0 or more bytes)
111// ChunkBase consists of ChunkHeader and other bytes that overlap with user
112// memory.
113
114#if SANITIZER_WORDSIZE == 64
115struct ChunkBase {
116  // 1-st 8 bytes.
117  uptr chunk_state       : 8;  // Must be first.
118  uptr alloc_tid         : 24;
119  uptr free_tid          : 24;
120  uptr from_memalign     : 1;
121  // 2-nd 8 bytes
122  uptr user_requested_size;
123  // Header2 (intersects with user memory).
124  // 3-rd 8 bytes. These overlap with the user memory.
125  AsanChunk *next;
126};
127
128static const uptr kChunkHeaderSize = 16;
129static const uptr kChunkHeader2Size = 8;
130
131#elif SANITIZER_WORDSIZE == 32
132struct ChunkBase {
133  // 1-st 8 bytes.
134  uptr chunk_state       : 8;  // Must be first.
135  uptr alloc_tid         : 24;
136  uptr from_memalign     : 1;
137  uptr free_tid          : 24;
138  // 2-nd 8 bytes
139  uptr user_requested_size;
140  AsanChunk *next;
141  // Header2 empty.
142};
143
144static const uptr kChunkHeaderSize = 16;
145static const uptr kChunkHeader2Size = 0;
146#endif
147COMPILER_CHECK(sizeof(ChunkBase) == kChunkHeaderSize + kChunkHeader2Size);
148
149static uptr ComputeRZSize(uptr user_requested_size) {
150  // FIXME: implement adaptive redzones.
151  return flags()->redzone;
152}
153
154struct AsanChunk: ChunkBase {
155  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
156  uptr UsedSize() { return user_requested_size; }
157  // We store the alloc/free stack traces in the chunk itself.
158  u32 *AllocStackBeg() {
159    return (u32*)(Beg() - ComputeRZSize(UsedSize()));
160  }
161  uptr AllocStackSize() {
162    return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32);
163  }
164  u32 *FreeStackBeg() {
165    return (u32*)(Beg() + kChunkHeader2Size);
166  }
167  uptr FreeStackSize() {
168    uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY),
169                         ComputeRZSize(UsedSize()));
170    return (available - kChunkHeader2Size) / sizeof(u32);
171  }
172};
173
174uptr AsanChunkView::Beg() { return chunk_->Beg(); }
175uptr AsanChunkView::End() { return Beg() + UsedSize(); }
176uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
177uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
178uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
179
180void AsanChunkView::GetAllocStack(StackTrace *stack) {
181  StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
182                              chunk_->AllocStackSize());
183}
184
185void AsanChunkView::GetFreeStack(StackTrace *stack) {
186  StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
187                              chunk_->FreeStackSize());
188}
189
190class Quarantine: public AsanChunkFifoList {
191 public:
192  void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
193    AsanChunkFifoList *q = &ms->quarantine_;
194    if (!q->size()) return;
195    SpinMutexLock l(&mutex_);
196    PushList(q);
197    PopAndDeallocateLoop(ms);
198  }
199  void SwallowThreadLocalCache(AllocatorCache *cache) {
200    // FIXME.
201  }
202  void BypassThreadLocalQuarantine(AsanChunk *m) {
203    SpinMutexLock l(&mutex_);
204    Push(m);
205  }
206
207 private:
208  void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
209    while (size() > (uptr)flags()->quarantine_size) {
210      PopAndDeallocate(ms);
211    }
212  }
213  void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
214    CHECK_GT(size(), 0);
215    AsanChunk *m = Pop();
216    CHECK(m);
217    CHECK(m->chunk_state == CHUNK_QUARANTINE);
218    m->chunk_state = CHUNK_AVAILABLE;
219    CHECK_NE(m->alloc_tid, kInvalidTid);
220    CHECK_NE(m->free_tid, kInvalidTid);
221    PoisonShadow(m->Beg(),
222                 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
223                 kAsanHeapLeftRedzoneMagic);
224    uptr alloc_beg = m->Beg() - ComputeRZSize(m->user_requested_size);
225    void *p = reinterpret_cast<void *>(alloc_beg);
226    if (m->from_memalign)
227      p = allocator.GetBlockBegin(p);
228    allocator.Deallocate(GetAllocatorCache(ms), p);
229  }
230  SpinMutex mutex_;
231};
232
233static Quarantine quarantine;
234
235void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
236  CHECK(q->size() > 0);
237  size_ += q->size();
238  append_back(q);
239  q->clear();
240}
241
242void AsanChunkFifoList::Push(AsanChunk *n) {
243  push_back(n);
244  size_ += n->UsedSize();
245}
246
247// Interesting performance observation: this function takes up to 15% of overal
248// allocator time. That's because *first_ has been evicted from cache long time
249// ago. Not sure if we can or want to do anything with this.
250AsanChunk *AsanChunkFifoList::Pop() {
251  CHECK(first_);
252  AsanChunk *res = front();
253  size_ -= res->UsedSize();
254  pop_front();
255  return res;
256}
257
258static void *Allocate(uptr size, uptr alignment, StackTrace *stack) {
259  Init();
260  CHECK(stack);
261  if (alignment < 8) alignment = 8;
262  if (size == 0)
263    return reinterpret_cast<void *>(kReturnOnZeroMalloc);
264  CHECK(IsPowerOfTwo(alignment));
265  uptr rz_size = ComputeRZSize(size);
266  uptr rounded_size = RoundUpTo(size, rz_size);
267  uptr needed_size = rounded_size + rz_size;
268  if (alignment > rz_size)
269    needed_size += alignment;
270  CHECK(IsAligned(needed_size, rz_size));
271  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
272    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
273           (void*)size);
274    return 0;
275  }
276
277  AsanThread *t = asanThreadRegistry().GetCurrent();
278  // Printf("t = %p\n", t);
279  CHECK(t);  // FIXME
280  void *allocated = allocator.Allocate(
281      GetAllocatorCache(&t->malloc_storage()), needed_size, 8, false);
282  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
283  uptr alloc_end = alloc_beg + needed_size;
284  uptr beg_plus_redzone = alloc_beg + rz_size;
285  uptr user_beg = beg_plus_redzone;
286  if (!IsAligned(user_beg, alignment))
287    user_beg = RoundUpTo(user_beg, alignment);
288  uptr user_end = user_beg + size;
289  CHECK_LE(user_end, alloc_end);
290  uptr chunk_beg = user_beg - kChunkHeaderSize;
291  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
292  m->chunk_state = CHUNK_ALLOCATED;
293  u32 alloc_tid = t ? t->tid() : 0;
294  m->alloc_tid = alloc_tid;
295  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
296  m->free_tid = kInvalidTid;
297  m->from_memalign = user_beg != beg_plus_redzone;
298  m->user_requested_size = size;
299  StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
300
301  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
302  // Unpoison the bulk of the memory region.
303  if (size_rounded_down_to_granularity)
304    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
305  // Deal with the end of the region if size is not aligned to granularity.
306  if (size != size_rounded_down_to_granularity) {
307    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
308    *shadow = size & (SHADOW_GRANULARITY - 1);
309  }
310
311  void *res = reinterpret_cast<void *>(user_beg);
312  ASAN_MALLOC_HOOK(res, size);
313  return res;
314}
315
316static void Deallocate(void *ptr, StackTrace *stack) {
317  uptr p = reinterpret_cast<uptr>(ptr);
318  if (p == 0 || p == kReturnOnZeroMalloc) return;
319  uptr chunk_beg = p - kChunkHeaderSize;
320  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
321
322  // Flip the chunk_state atomically to avoid race on double-free.
323  u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
324                                       memory_order_acq_rel);
325
326  if (old_chunk_state == CHUNK_QUARANTINE)
327    ReportDoubleFree((uptr)ptr, stack);
328  else if (old_chunk_state != CHUNK_ALLOCATED)
329    ReportFreeNotMalloced((uptr)ptr, stack);
330  CHECK(old_chunk_state == CHUNK_ALLOCATED);
331
332  CHECK_GE(m->alloc_tid, 0);
333  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
334    CHECK_EQ(m->free_tid, kInvalidTid);
335  AsanThread *t = asanThreadRegistry().GetCurrent();
336  m->free_tid = t ? t->tid() : 0;
337  StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
338  CHECK(m->chunk_state == CHUNK_QUARANTINE);
339  // Poison the region.
340  PoisonShadow(m->Beg(),
341               RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
342               kAsanHeapFreeMagic);
343
344  // Push into quarantine.
345  if (t) {
346    AsanChunkFifoList &q = t->malloc_storage().quarantine_;
347    q.Push(m);
348
349    if (q.size() > kMaxThreadLocalQuarantine)
350      quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
351  } else {
352    quarantine.BypassThreadLocalQuarantine(m);
353  }
354
355  ASAN_FREE_HOOK(ptr);
356}
357
358static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
359  CHECK(old_ptr && new_size);
360  uptr p = reinterpret_cast<uptr>(old_ptr);
361  uptr chunk_beg = p - kChunkHeaderSize;
362  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
363
364  CHECK(m->chunk_state == CHUNK_ALLOCATED);
365  uptr old_size = m->UsedSize();
366  uptr memcpy_size = Min(new_size, old_size);
367  void *new_ptr = Allocate(new_size, 8, stack);
368  if (new_ptr) {
369    CHECK(REAL(memcpy) != 0);
370    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
371    Deallocate(old_ptr, stack);
372  }
373  return new_ptr;
374}
375
376static AsanChunk *GetAsanChunkByAddr(uptr p) {
377  uptr alloc_beg = reinterpret_cast<uptr>(
378      allocator.GetBlockBegin(reinterpret_cast<void *>(p)));
379  if (!alloc_beg) return 0;
380  // FIXME: this does not take into account memalign.
381  uptr chunk_beg = alloc_beg + ComputeRZSize(0) - kChunkHeaderSize;
382  return reinterpret_cast<AsanChunk *>(chunk_beg);
383}
384
385static uptr AllocationSize(uptr p) {
386  AsanChunk *m = GetAsanChunkByAddr(p);
387  if (!m) return 0;
388  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
389  if (m->Beg() != p) return 0;
390  return m->UsedSize();
391}
392
393// We have an address between two chunks, and we want to report just one.
394AsanChunk *ChooseChunk(uptr addr,
395                       AsanChunk *left_chunk, AsanChunk *right_chunk) {
396  // Prefer an allocated chunk or a chunk from quarantine.
397  if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
398      right_chunk->chunk_state != CHUNK_AVAILABLE)
399    return right_chunk;
400  if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
401      left_chunk->chunk_state != CHUNK_AVAILABLE)
402    return left_chunk;
403  // Choose based on offset.
404  uptr l_offset = 0, r_offset = 0;
405  CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
406  CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
407  if (l_offset < r_offset)
408    return left_chunk;
409  return right_chunk;
410}
411
412AsanChunkView FindHeapChunkByAddress(uptr addr) {
413  AsanChunk *m1 = GetAsanChunkByAddr(addr);
414  if (!m1) return AsanChunkView(m1);
415  uptr offset = 0;
416  if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
417    // The address is in the chunk's left redzone, so maybe it is actually
418    // a right buffer overflow from the other chunk to the left.
419    // Search a bit to the left to see if there is another chunk.
420    AsanChunk *m2 = 0;
421    for (uptr l = 1; l < GetPageSizeCached(); l++) {
422      m2 = GetAsanChunkByAddr(addr - l);
423      if (m2 == m1) continue;  // Still the same chunk.
424      Printf("m1 %p m2 %p l %zd\n", m1, m2, l);
425      break;
426    }
427    if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
428      m1 = ChooseChunk(addr, m2, m1);
429  }
430  return AsanChunkView(m1);
431}
432
433void AsanThreadLocalMallocStorage::CommitBack() {
434  quarantine.SwallowThreadLocalQuarantine(this);
435  quarantine.SwallowThreadLocalCache(GetAllocatorCache(this));
436}
437
438SANITIZER_INTERFACE_ATTRIBUTE
439void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
440  return Allocate(size, alignment, stack);
441}
442
443SANITIZER_INTERFACE_ATTRIBUTE
444void asan_free(void *ptr, StackTrace *stack) {
445  Deallocate(ptr, stack);
446}
447
448SANITIZER_INTERFACE_ATTRIBUTE
449void *asan_malloc(uptr size, StackTrace *stack) {
450  return Allocate(size, 8, stack);
451}
452
453void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
454  void *ptr = Allocate(nmemb * size, 8, stack);
455  if (ptr)
456    REAL(memset)(ptr, 0, nmemb * size);
457  return ptr;
458}
459
460void *asan_realloc(void *p, uptr size, StackTrace *stack) {
461  if (p == 0)
462    return Allocate(size, 8, stack);
463  if (size == 0) {
464    Deallocate(p, stack);
465    return 0;
466  }
467  return Reallocate(p, size, stack);
468}
469
470void *asan_valloc(uptr size, StackTrace *stack) {
471  return Allocate(size, GetPageSizeCached(), stack);
472}
473
474void *asan_pvalloc(uptr size, StackTrace *stack) {
475  uptr PageSize = GetPageSizeCached();
476  size = RoundUpTo(size, PageSize);
477  if (size == 0) {
478    // pvalloc(0) should allocate one page.
479    size = PageSize;
480  }
481  return Allocate(size, PageSize, stack);
482}
483
484int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
485                        StackTrace *stack) {
486  void *ptr = Allocate(size, alignment, stack);
487  CHECK(IsAligned((uptr)ptr, alignment));
488  *memptr = ptr;
489  return 0;
490}
491
492uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
493  CHECK(stack);
494  if (ptr == 0) return 0;
495  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
496  if (flags()->check_malloc_usable_size && (usable_size == 0))
497    ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
498  return usable_size;
499}
500
501uptr asan_mz_size(const void *ptr) {
502  UNIMPLEMENTED();
503  return 0;
504}
505
506void asan_mz_force_lock() {
507  UNIMPLEMENTED();
508}
509
510void asan_mz_force_unlock() {
511  UNIMPLEMENTED();
512}
513
514}  // namespace __asan
515
516// ---------------------- Interface ---------------- {{{1
517using namespace __asan;  // NOLINT
518
519// ASan allocator doesn't reserve extra bytes, so normally we would
520// just return "size".
521uptr __asan_get_estimated_allocated_size(uptr size) {
522  UNIMPLEMENTED();
523  return 0;
524}
525
526bool __asan_get_ownership(const void *p) {
527  UNIMPLEMENTED();
528  return false;
529}
530
531uptr __asan_get_allocated_size(const void *p) {
532  UNIMPLEMENTED();
533  return 0;
534}
535
536#if !SANITIZER_SUPPORTS_WEAK_HOOKS
537// Provide default (no-op) implementation of malloc hooks.
538extern "C" {
539SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
540void __asan_malloc_hook(void *ptr, uptr size) {
541  (void)ptr;
542  (void)size;
543}
544SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
545void __asan_free_hook(void *ptr) {
546  (void)ptr;
547}
548}  // extern "C"
549#endif
550
551
552#endif  // ASAN_ALLOCATOR_VERSION
553