asan_allocator.cc revision f7c1d18183d2dfbd02864cf47b3239d6a5d717c0
1//===-- asan_allocator.cc -------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator.
13// Evey piece of memory (AsanChunk) allocated by the allocator
14// has a left redzone of REDZONE bytes and
15// a right redzone such that the end of the chunk is aligned by REDZONE
16// (i.e. the right redzone is between 0 and REDZONE-1).
17// The left redzone is always poisoned.
18// The right redzone is poisoned on malloc, the body is poisoned on free.
19// Once freed, a chunk is moved to a quarantine (fifo list).
20// After quarantine, a chunk is returned to freelists.
21//
22// The left redzone contains ASan's internal data and the stack trace of
23// the malloc call.
24// Once freed, the body of the chunk contains the stack trace of the free call.
25//
26//===----------------------------------------------------------------------===//
27
28#include "asan_allocator.h"
29#include "asan_interceptors.h"
30#include "asan_interface.h"
31#include "asan_internal.h"
32#include "asan_lock.h"
33#include "asan_mapping.h"
34#include "asan_stats.h"
35#include "asan_report.h"
36#include "asan_thread.h"
37#include "asan_thread_registry.h"
38#include "sanitizer_common/sanitizer_atomic.h"
39
40#if defined(_WIN32) && !defined(__clang__)
41#include <intrin.h>
42#endif
43
44namespace __asan {
45
46#define REDZONE ((uptr)(flags()->redzone))
47static const uptr kMinAllocSize = REDZONE * 2;
48static const u64 kMaxAvailableRam = 128ULL << 30;  // 128G
49static const uptr kMaxThreadLocalQuarantine = 1 << 20;  // 1M
50
51static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
52static const uptr kMaxSizeForThreadLocalFreeList =
53    (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
54
55// Size classes less than kMallocSizeClassStep are powers of two.
56// All other size classes are multiples of kMallocSizeClassStep.
57static const uptr kMallocSizeClassStepLog = 26;
58static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
59
60static const uptr kMaxAllowedMallocSize =
61    (__WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
62
63static inline bool IsAligned(uptr a, uptr alignment) {
64  return (a & (alignment - 1)) == 0;
65}
66
67static inline uptr Log2(uptr x) {
68  CHECK(IsPowerOfTwo(x));
69#if !defined(_WIN32) || defined(__clang__)
70  return __builtin_ctzl(x);
71#elif defined(_WIN64)
72  unsigned long ret;  // NOLINT
73  _BitScanForward64(&ret, x);
74  return ret;
75#else
76  unsigned long ret;  // NOLINT
77  _BitScanForward(&ret, x);
78  return ret;
79#endif
80}
81
82static inline uptr RoundUpToPowerOfTwo(uptr size) {
83  CHECK(size);
84  if (IsPowerOfTwo(size)) return size;
85
86  unsigned long up;  // NOLINT
87#if !defined(_WIN32) || defined(__clang__)
88  up = __WORDSIZE - 1 - __builtin_clzl(size);
89#elif defined(_WIN64)
90  _BitScanReverse64(&up, size);
91#else
92  _BitScanReverse(&up, size);
93#endif
94  CHECK(size < (1ULL << (up + 1)));
95  CHECK(size > (1ULL << up));
96  return 1UL << (up + 1);
97}
98
99static inline uptr SizeClassToSize(u8 size_class) {
100  CHECK(size_class < kNumberOfSizeClasses);
101  if (size_class <= kMallocSizeClassStepLog) {
102    return 1UL << size_class;
103  } else {
104    return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
105  }
106}
107
108static inline u8 SizeToSizeClass(uptr size) {
109  u8 res = 0;
110  if (size <= kMallocSizeClassStep) {
111    uptr rounded = RoundUpToPowerOfTwo(size);
112    res = Log2(rounded);
113  } else {
114    res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
115        + kMallocSizeClassStepLog;
116  }
117  CHECK(res < kNumberOfSizeClasses);
118  CHECK(size <= SizeClassToSize(res));
119  return res;
120}
121
122// Given REDZONE bytes, we need to mark first size bytes
123// as addressable and the rest REDZONE-size bytes as unaddressable.
124static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
125  CHECK(size <= REDZONE);
126  CHECK(IsAligned(mem, REDZONE));
127  CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
128  CHECK(IsPowerOfTwo(REDZONE));
129  CHECK(REDZONE >= SHADOW_GRANULARITY);
130  PoisonShadowPartialRightRedzone(mem, size, REDZONE,
131                                  kAsanHeapRightRedzoneMagic);
132}
133
134static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
135  CHECK(IsAligned(size, kPageSize));
136  u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
137  PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
138  if (flags()->debug) {
139    Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
140  }
141  return res;
142}
143
144// Every chunk of memory allocated by this allocator can be in one of 3 states:
145// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
146// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
147// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
148//
149// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
150// the beginning of a AsanChunk (in which the actual chunk resides at
151// this - this->used_size).
152//
153// The magic numbers for the enum values are taken randomly.
154enum {
155  CHUNK_AVAILABLE  = 0x57,
156  CHUNK_ALLOCATED  = 0x32,
157  CHUNK_QUARANTINE = 0x19,
158  CHUNK_MEMALIGN   = 0xDC
159};
160
161struct ChunkBase {
162  // First 8 bytes.
163  uptr  chunk_state : 8;
164  uptr  alloc_tid   : 24;
165  uptr  size_class  : 8;
166  uptr  free_tid    : 24;
167
168  // Second 8 bytes.
169  uptr alignment_log : 8;
170  uptr used_size : FIRST_32_SECOND_64(32, 56);  // Size requested by the user.
171
172  // This field may overlap with the user area and thus should not
173  // be used while the chunk is in CHUNK_ALLOCATED state.
174  AsanChunk *next;
175
176  // Typically the beginning of the user-accessible memory is 'this'+REDZONE
177  // and is also aligned by REDZONE. However, if the memory is allocated
178  // by memalign, the alignment might be higher and the user-accessible memory
179  // starts at the first properly aligned address after 'this'.
180  uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); }
181  uptr Size() { return SizeClassToSize(size_class); }
182  u8 SizeClass() { return size_class; }
183};
184
185struct AsanChunk: public ChunkBase {
186  u32 *compressed_alloc_stack() {
187    return (u32*)((uptr)this + sizeof(ChunkBase));
188  }
189  u32 *compressed_free_stack() {
190    return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase)));
191  }
192
193  // The left redzone after the ChunkBase is given to the alloc stack trace.
194  uptr compressed_alloc_stack_size() {
195    if (REDZONE < sizeof(ChunkBase)) return 0;
196    return (REDZONE - sizeof(ChunkBase)) / sizeof(u32);
197  }
198  uptr compressed_free_stack_size() {
199    if (REDZONE < sizeof(ChunkBase)) return 0;
200    return (REDZONE) / sizeof(u32);
201  }
202
203  bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
204    if (addr >= Beg() && (addr + access_size) <= (Beg() + used_size)) {
205      *offset = addr - Beg();
206      return true;
207    }
208    return false;
209  }
210
211  bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
212    if (addr < Beg()) {
213      *offset = Beg() - addr;
214      return true;
215    }
216    return false;
217  }
218
219  bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
220    if (addr + access_size >= Beg() + used_size) {
221      if (addr <= Beg() + used_size)
222        *offset = 0;
223      else
224        *offset = addr - (Beg() + used_size);
225      return true;
226    }
227    return false;
228  }
229
230  void DescribeAddress(uptr addr, uptr access_size) {
231    uptr offset;
232    AsanPrintf("%p is located ", (void*)addr);
233    if (AddrIsInside(addr, access_size, &offset)) {
234      AsanPrintf("%zu bytes inside of", offset);
235    } else if (AddrIsAtLeft(addr, access_size, &offset)) {
236      AsanPrintf("%zu bytes to the left of", offset);
237    } else if (AddrIsAtRight(addr, access_size, &offset)) {
238      AsanPrintf("%zu bytes to the right of", offset);
239    } else {
240      AsanPrintf(" somewhere around (this is AddressSanitizer bug!)");
241    }
242    AsanPrintf(" %zu-byte region [%p,%p)\n",
243               used_size, (void*)Beg(), (void*)(Beg() + used_size));
244  }
245};
246
247static AsanChunk *PtrToChunk(uptr ptr) {
248  AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
249  if (m->chunk_state == CHUNK_MEMALIGN) {
250    m = (AsanChunk*)((uptr)m - m->used_size);
251  }
252  return m;
253}
254
255
256void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
257  CHECK(q->size() > 0);
258  if (last_) {
259    CHECK(first_);
260    CHECK(!last_->next);
261    last_->next = q->first_;
262    last_ = q->last_;
263  } else {
264    CHECK(!first_);
265    last_ = q->last_;
266    first_ = q->first_;
267    CHECK(first_);
268  }
269  CHECK(last_);
270  CHECK(!last_->next);
271  size_ += q->size();
272  q->clear();
273}
274
275void AsanChunkFifoList::Push(AsanChunk *n) {
276  CHECK(n->next == 0);
277  if (last_) {
278    CHECK(first_);
279    CHECK(!last_->next);
280    last_->next = n;
281    last_ = n;
282  } else {
283    CHECK(!first_);
284    last_ = first_ = n;
285  }
286  size_ += n->Size();
287}
288
289// Interesting performance observation: this function takes up to 15% of overal
290// allocator time. That's because *first_ has been evicted from cache long time
291// ago. Not sure if we can or want to do anything with this.
292AsanChunk *AsanChunkFifoList::Pop() {
293  CHECK(first_);
294  AsanChunk *res = first_;
295  first_ = first_->next;
296  if (first_ == 0)
297    last_ = 0;
298  CHECK(size_ >= res->Size());
299  size_ -= res->Size();
300  if (last_) {
301    CHECK(!last_->next);
302  }
303  return res;
304}
305
306// All pages we ever allocated.
307struct PageGroup {
308  uptr beg;
309  uptr end;
310  uptr size_of_chunk;
311  uptr last_chunk;
312  bool InRange(uptr addr) {
313    return addr >= beg && addr < end;
314  }
315};
316
317class MallocInfo {
318 public:
319
320  explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
321
322  AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) {
323    AsanChunk *m = 0;
324    AsanChunk **fl = &free_lists_[size_class];
325    {
326      ScopedLock lock(&mu_);
327      for (uptr i = 0; i < n_chunks; i++) {
328        if (!(*fl)) {
329          *fl = GetNewChunks(size_class);
330        }
331        AsanChunk *t = *fl;
332        *fl = t->next;
333        t->next = m;
334        CHECK(t->chunk_state == CHUNK_AVAILABLE);
335        m = t;
336      }
337    }
338    return m;
339  }
340
341  void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
342                                       bool eat_free_lists) {
343    CHECK(flags()->quarantine_size > 0);
344    ScopedLock lock(&mu_);
345    AsanChunkFifoList *q = &x->quarantine_;
346    if (q->size() > 0) {
347      quarantine_.PushList(q);
348      while (quarantine_.size() > (uptr)flags()->quarantine_size) {
349        QuarantinePop();
350      }
351    }
352    if (eat_free_lists) {
353      for (uptr size_class = 0; size_class < kNumberOfSizeClasses;
354           size_class++) {
355        AsanChunk *m = x->free_lists_[size_class];
356        while (m) {
357          AsanChunk *t = m->next;
358          m->next = free_lists_[size_class];
359          free_lists_[size_class] = m;
360          m = t;
361        }
362        x->free_lists_[size_class] = 0;
363      }
364    }
365  }
366
367  void BypassThreadLocalQuarantine(AsanChunk *chunk) {
368    ScopedLock lock(&mu_);
369    quarantine_.Push(chunk);
370  }
371
372  AsanChunk *FindMallocedOrFreed(uptr addr, uptr access_size) {
373    ScopedLock lock(&mu_);
374    return FindChunkByAddr(addr);
375  }
376
377  uptr AllocationSize(uptr ptr) {
378    if (!ptr) return 0;
379    ScopedLock lock(&mu_);
380
381    // Make sure this is our chunk and |ptr| actually points to the beginning
382    // of the allocated memory.
383    AsanChunk *m = FindChunkByAddr(ptr);
384    if (!m || m->Beg() != ptr) return 0;
385
386    if (m->chunk_state == CHUNK_ALLOCATED) {
387      return m->used_size;
388    } else {
389      return 0;
390    }
391  }
392
393  void ForceLock() {
394    mu_.Lock();
395  }
396
397  void ForceUnlock() {
398    mu_.Unlock();
399  }
400
401  void PrintStatus() {
402    ScopedLock lock(&mu_);
403    uptr malloced = 0;
404
405    Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
406           quarantine_.size() >> 20, malloced >> 20);
407    for (uptr j = 1; j < kNumberOfSizeClasses; j++) {
408      AsanChunk *i = free_lists_[j];
409      if (!i) continue;
410      uptr t = 0;
411      for (; i; i = i->next) {
412        t += i->Size();
413      }
414      Printf("%zu:%zu ", j, t >> 20);
415    }
416    Printf("\n");
417  }
418
419  PageGroup *FindPageGroup(uptr addr) {
420    ScopedLock lock(&mu_);
421    return FindPageGroupUnlocked(addr);
422  }
423
424 private:
425  PageGroup *FindPageGroupUnlocked(uptr addr) {
426    int n = atomic_load(&n_page_groups_, memory_order_relaxed);
427    // If the page groups are not sorted yet, sort them.
428    if (n_sorted_page_groups_ < n) {
429      SortArray((uptr*)page_groups_, n);
430      n_sorted_page_groups_ = n;
431    }
432    // Binary search over the page groups.
433    int beg = 0, end = n;
434    while (beg < end) {
435      int med = (beg + end) / 2;
436      uptr g = (uptr)page_groups_[med];
437      if (addr > g) {
438        // 'g' points to the end of the group, so 'addr'
439        // may not belong to page_groups_[med] or any previous group.
440        beg = med + 1;
441      } else {
442        // 'addr' may belong to page_groups_[med] or a previous group.
443        end = med;
444      }
445    }
446    if (beg >= n)
447      return 0;
448    PageGroup *g = page_groups_[beg];
449    CHECK(g);
450    if (g->InRange(addr))
451      return g;
452    return 0;
453  }
454
455  // We have an address between two chunks, and we want to report just one.
456  AsanChunk *ChooseChunk(uptr addr,
457                         AsanChunk *left_chunk, AsanChunk *right_chunk) {
458    // Prefer an allocated chunk or a chunk from quarantine.
459    if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
460        right_chunk->chunk_state != CHUNK_AVAILABLE)
461      return right_chunk;
462    if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
463        left_chunk->chunk_state != CHUNK_AVAILABLE)
464      return left_chunk;
465    // Choose based on offset.
466    uptr l_offset = 0, r_offset = 0;
467    CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
468    CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
469    if (l_offset < r_offset)
470      return left_chunk;
471    return right_chunk;
472  }
473
474  AsanChunk *FindChunkByAddr(uptr addr) {
475    PageGroup *g = FindPageGroupUnlocked(addr);
476    if (!g) return 0;
477    CHECK(g->size_of_chunk);
478    uptr offset_from_beg = addr - g->beg;
479    uptr this_chunk_addr = g->beg +
480        (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
481    CHECK(g->InRange(this_chunk_addr));
482    AsanChunk *m = (AsanChunk*)this_chunk_addr;
483    CHECK(m->chunk_state == CHUNK_ALLOCATED ||
484          m->chunk_state == CHUNK_AVAILABLE ||
485          m->chunk_state == CHUNK_QUARANTINE);
486    uptr offset = 0;
487    if (m->AddrIsInside(addr, 1, &offset))
488      return m;
489
490    if (m->AddrIsAtRight(addr, 1, &offset)) {
491      if (this_chunk_addr == g->last_chunk)  // rightmost chunk
492        return m;
493      uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk;
494      CHECK(g->InRange(right_chunk_addr));
495      return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
496    } else {
497      CHECK(m->AddrIsAtLeft(addr, 1, &offset));
498      if (this_chunk_addr == g->beg)  // leftmost chunk
499        return m;
500      uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk;
501      CHECK(g->InRange(left_chunk_addr));
502      return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
503    }
504  }
505
506  void QuarantinePop() {
507    CHECK(quarantine_.size() > 0);
508    AsanChunk *m = quarantine_.Pop();
509    CHECK(m);
510    // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
511
512    CHECK(m->chunk_state == CHUNK_QUARANTINE);
513    m->chunk_state = CHUNK_AVAILABLE;
514    PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic);
515    CHECK(m->alloc_tid >= 0);
516    CHECK(m->free_tid >= 0);
517
518    uptr size_class = m->SizeClass();
519    m->next = free_lists_[size_class];
520    free_lists_[size_class] = m;
521
522    // Statistics.
523    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
524    thread_stats.real_frees++;
525    thread_stats.really_freed += m->used_size;
526    thread_stats.really_freed_redzones += m->Size() - m->used_size;
527    thread_stats.really_freed_by_size[m->SizeClass()]++;
528  }
529
530  // Get a list of newly allocated chunks.
531  AsanChunk *GetNewChunks(u8 size_class) {
532    uptr size = SizeClassToSize(size_class);
533    CHECK(IsPowerOfTwo(kMinMmapSize));
534    CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
535    uptr mmap_size = Max(size, kMinMmapSize);
536    uptr n_chunks = mmap_size / size;
537    CHECK(n_chunks * size == mmap_size);
538    if (size < kPageSize) {
539      // Size is small, just poison the last chunk.
540      n_chunks--;
541    } else {
542      // Size is large, allocate an extra page at right and poison it.
543      mmap_size += kPageSize;
544    }
545    CHECK(n_chunks > 0);
546    u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
547
548    // Statistics.
549    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
550    thread_stats.mmaps++;
551    thread_stats.mmaped += mmap_size;
552    thread_stats.mmaped_by_size[size_class] += n_chunks;
553
554    AsanChunk *res = 0;
555    for (uptr i = 0; i < n_chunks; i++) {
556      AsanChunk *m = (AsanChunk*)(mem + i * size);
557      m->chunk_state = CHUNK_AVAILABLE;
558      m->size_class = size_class;
559      m->next = res;
560      res = m;
561    }
562    PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
563    // This memory is already poisoned, no need to poison it again.
564    pg->beg = (uptr)mem;
565    pg->end = pg->beg + mmap_size;
566    pg->size_of_chunk = size;
567    pg->last_chunk = (uptr)(mem + size * (n_chunks - 1));
568    int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed);
569    CHECK(idx < (int)ASAN_ARRAY_SIZE(page_groups_));
570    page_groups_[idx] = pg;
571    return res;
572  }
573
574  AsanChunk *free_lists_[kNumberOfSizeClasses];
575  AsanChunkFifoList quarantine_;
576  AsanLock mu_;
577
578  PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
579  atomic_uint32_t n_page_groups_;
580  int n_sorted_page_groups_;
581};
582
583static MallocInfo malloc_info(LINKER_INITIALIZED);
584
585void AsanThreadLocalMallocStorage::CommitBack() {
586  malloc_info.SwallowThreadLocalMallocStorage(this, true);
587}
588
589void DescribeHeapAddress(uptr addr, uptr access_size) {
590  AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
591  if (!m) return;
592  m->DescribeAddress(addr, access_size);
593  CHECK(m->alloc_tid >= 0);
594  AsanThreadSummary *alloc_thread =
595      asanThreadRegistry().FindByTid(m->alloc_tid);
596  AsanStackTrace alloc_stack;
597  AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
598                                  m->compressed_alloc_stack_size());
599  AsanThread *t = asanThreadRegistry().GetCurrent();
600  CHECK(t);
601  if (m->free_tid != kInvalidTid) {
602    AsanThreadSummary *free_thread =
603        asanThreadRegistry().FindByTid(m->free_tid);
604    AsanPrintf("freed by thread T%d here:\n", free_thread->tid());
605    AsanStackTrace free_stack;
606    AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
607                                    m->compressed_free_stack_size());
608    free_stack.PrintStack();
609    AsanPrintf("previously allocated by thread T%d here:\n",
610               alloc_thread->tid());
611
612    alloc_stack.PrintStack();
613    t->summary()->Announce();
614    free_thread->Announce();
615    alloc_thread->Announce();
616  } else {
617    AsanPrintf("allocated by thread T%d here:\n", alloc_thread->tid());
618    alloc_stack.PrintStack();
619    t->summary()->Announce();
620    alloc_thread->Announce();
621  }
622}
623
624static u8 *Allocate(uptr alignment, uptr size, AsanStackTrace *stack) {
625  __asan_init();
626  CHECK(stack);
627  if (size == 0) {
628    size = 1;  // TODO(kcc): do something smarter
629  }
630  CHECK(IsPowerOfTwo(alignment));
631  uptr rounded_size = RoundUpTo(size, REDZONE);
632  uptr needed_size = rounded_size + REDZONE;
633  if (alignment > REDZONE) {
634    needed_size += alignment;
635  }
636  CHECK(IsAligned(needed_size, REDZONE));
637  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
638    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
639           (void*)size);
640    return 0;
641  }
642
643  u8 size_class = SizeToSizeClass(needed_size);
644  uptr size_to_allocate = SizeClassToSize(size_class);
645  CHECK(size_to_allocate >= kMinAllocSize);
646  CHECK(size_to_allocate >= needed_size);
647  CHECK(IsAligned(size_to_allocate, REDZONE));
648
649  if (flags()->verbosity >= 3) {
650    Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
651         alignment, size, size_class, size_to_allocate);
652  }
653
654  AsanThread *t = asanThreadRegistry().GetCurrent();
655  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
656  // Statistics
657  thread_stats.mallocs++;
658  thread_stats.malloced += size;
659  thread_stats.malloced_redzones += size_to_allocate - size;
660  thread_stats.malloced_by_size[size_class]++;
661
662  AsanChunk *m = 0;
663  if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
664    // get directly from global storage.
665    m = malloc_info.AllocateChunks(size_class, 1);
666    thread_stats.malloc_large++;
667  } else {
668    // get from the thread-local storage.
669    AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
670    if (!*fl) {
671      uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
672      *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
673      thread_stats.malloc_small_slow++;
674    }
675    m = *fl;
676    *fl = (*fl)->next;
677  }
678  CHECK(m);
679  CHECK(m->chunk_state == CHUNK_AVAILABLE);
680  m->chunk_state = CHUNK_ALLOCATED;
681  m->next = 0;
682  CHECK(m->Size() == size_to_allocate);
683  uptr addr = (uptr)m + REDZONE;
684  CHECK(addr <= (uptr)m->compressed_free_stack());
685
686  if (alignment > REDZONE && (addr & (alignment - 1))) {
687    addr = RoundUpTo(addr, alignment);
688    CHECK((addr & (alignment - 1)) == 0);
689    AsanChunk *p = (AsanChunk*)(addr - REDZONE);
690    p->chunk_state = CHUNK_MEMALIGN;
691    p->used_size = (uptr)p - (uptr)m;
692    m->alignment_log = Log2(alignment);
693    CHECK(m->Beg() == addr);
694  } else {
695    m->alignment_log = Log2(REDZONE);
696  }
697  CHECK(m == PtrToChunk(addr));
698  m->used_size = size;
699  CHECK(m->Beg() == addr);
700  m->alloc_tid = t ? t->tid() : 0;
701  m->free_tid   = kInvalidTid;
702  AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
703                                m->compressed_alloc_stack_size());
704  PoisonShadow(addr, rounded_size, 0);
705  if (size < rounded_size) {
706    PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
707                                  size & (REDZONE - 1));
708  }
709  if (size <= (uptr)(flags()->max_malloc_fill_size)) {
710    REAL(memset)((void*)addr, 0, rounded_size);
711  }
712  return (u8*)addr;
713}
714
715static void Deallocate(u8 *ptr, AsanStackTrace *stack) {
716  if (!ptr) return;
717  CHECK(stack);
718
719  if (flags()->debug) {
720    CHECK(malloc_info.FindPageGroup((uptr)ptr));
721  }
722
723  // Printf("Deallocate %p\n", ptr);
724  AsanChunk *m = PtrToChunk((uptr)ptr);
725
726  // Flip the chunk_state atomically to avoid race on double-free.
727  u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
728                                       memory_order_acq_rel);
729
730  if (old_chunk_state == CHUNK_QUARANTINE) {
731    ReportDoubleFree((uptr)ptr, stack);
732  } else if (old_chunk_state != CHUNK_ALLOCATED) {
733    ReportFreeNotMalloced((uptr)ptr, stack);
734  }
735  CHECK(old_chunk_state == CHUNK_ALLOCATED);
736  // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
737  CHECK(REDZONE <= 16 || !m->next);
738  CHECK(m->free_tid == kInvalidTid);
739  CHECK(m->alloc_tid >= 0);
740  AsanThread *t = asanThreadRegistry().GetCurrent();
741  m->free_tid = t ? t->tid() : 0;
742  AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
743                                m->compressed_free_stack_size());
744  uptr rounded_size = RoundUpTo(m->used_size, REDZONE);
745  PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic);
746
747  // Statistics.
748  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
749  thread_stats.frees++;
750  thread_stats.freed += m->used_size;
751  thread_stats.freed_by_size[m->SizeClass()]++;
752
753  CHECK(m->chunk_state == CHUNK_QUARANTINE);
754
755  if (t) {
756    AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
757    ms->quarantine_.Push(m);
758
759    if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
760      malloc_info.SwallowThreadLocalMallocStorage(ms, false);
761    }
762  } else {
763    malloc_info.BypassThreadLocalQuarantine(m);
764  }
765}
766
767static u8 *Reallocate(u8 *old_ptr, uptr new_size,
768                           AsanStackTrace *stack) {
769  CHECK(old_ptr && new_size);
770
771  // Statistics.
772  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
773  thread_stats.reallocs++;
774  thread_stats.realloced += new_size;
775
776  AsanChunk *m = PtrToChunk((uptr)old_ptr);
777  CHECK(m->chunk_state == CHUNK_ALLOCATED);
778  uptr old_size = m->used_size;
779  uptr memcpy_size = Min(new_size, old_size);
780  u8 *new_ptr = Allocate(0, new_size, stack);
781  if (new_ptr) {
782    CHECK(REAL(memcpy) != 0);
783    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
784    Deallocate(old_ptr, stack);
785  }
786  return new_ptr;
787}
788
789}  // namespace __asan
790
791// Malloc hooks declaration.
792// ASAN_NEW_HOOK(ptr, size) is called immediately after
793//   allocation of "size" bytes, which returned "ptr".
794// ASAN_DELETE_HOOK(ptr) is called immediately before
795//   deallocation of "ptr".
796// If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
797// program must provide implementation of this hook.
798// If macro is undefined, the hook is no-op.
799#ifdef ASAN_NEW_HOOK
800extern "C" void ASAN_NEW_HOOK(void *ptr, uptr size);
801#else
802static inline void ASAN_NEW_HOOK(void *ptr, uptr size) { }
803#endif
804
805#ifdef ASAN_DELETE_HOOK
806extern "C" void ASAN_DELETE_HOOK(void *ptr);
807#else
808static inline void ASAN_DELETE_HOOK(void *ptr) { }
809#endif
810
811namespace __asan {
812
813void *asan_memalign(uptr alignment, uptr size, AsanStackTrace *stack) {
814  void *ptr = (void*)Allocate(alignment, size, stack);
815  ASAN_NEW_HOOK(ptr, size);
816  return ptr;
817}
818
819void asan_free(void *ptr, AsanStackTrace *stack) {
820  ASAN_DELETE_HOOK(ptr);
821  Deallocate((u8*)ptr, stack);
822}
823
824void *asan_malloc(uptr size, AsanStackTrace *stack) {
825  void *ptr = (void*)Allocate(0, size, stack);
826  ASAN_NEW_HOOK(ptr, size);
827  return ptr;
828}
829
830void *asan_calloc(uptr nmemb, uptr size, AsanStackTrace *stack) {
831  void *ptr = (void*)Allocate(0, nmemb * size, stack);
832  if (ptr)
833    REAL(memset)(ptr, 0, nmemb * size);
834  ASAN_NEW_HOOK(ptr, nmemb * size);
835  return ptr;
836}
837
838void *asan_realloc(void *p, uptr size, AsanStackTrace *stack) {
839  if (p == 0) {
840    void *ptr = (void*)Allocate(0, size, stack);
841    ASAN_NEW_HOOK(ptr, size);
842    return ptr;
843  } else if (size == 0) {
844    ASAN_DELETE_HOOK(p);
845    Deallocate((u8*)p, stack);
846    return 0;
847  }
848  return Reallocate((u8*)p, size, stack);
849}
850
851void *asan_valloc(uptr size, AsanStackTrace *stack) {
852  void *ptr = (void*)Allocate(kPageSize, size, stack);
853  ASAN_NEW_HOOK(ptr, size);
854  return ptr;
855}
856
857void *asan_pvalloc(uptr size, AsanStackTrace *stack) {
858  size = RoundUpTo(size, kPageSize);
859  if (size == 0) {
860    // pvalloc(0) should allocate one page.
861    size = kPageSize;
862  }
863  void *ptr = (void*)Allocate(kPageSize, size, stack);
864  ASAN_NEW_HOOK(ptr, size);
865  return ptr;
866}
867
868int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
869                          AsanStackTrace *stack) {
870  void *ptr = Allocate(alignment, size, stack);
871  CHECK(IsAligned((uptr)ptr, alignment));
872  ASAN_NEW_HOOK(ptr, size);
873  *memptr = ptr;
874  return 0;
875}
876
877uptr asan_malloc_usable_size(void *ptr, AsanStackTrace *stack) {
878  CHECK(stack);
879  if (ptr == 0) return 0;
880  uptr usable_size = malloc_info.AllocationSize((uptr)ptr);
881  if (flags()->check_malloc_usable_size && (usable_size == 0)) {
882    ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
883  }
884  return usable_size;
885}
886
887uptr asan_mz_size(const void *ptr) {
888  return malloc_info.AllocationSize((uptr)ptr);
889}
890
891void asan_mz_force_lock() {
892  malloc_info.ForceLock();
893}
894
895void asan_mz_force_unlock() {
896  malloc_info.ForceUnlock();
897}
898
899// ---------------------- Fake stack-------------------- {{{1
900FakeStack::FakeStack() {
901  CHECK(REAL(memset) != 0);
902  REAL(memset)(this, 0, sizeof(*this));
903}
904
905bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
906  uptr mem = allocated_size_classes_[size_class];
907  uptr size = ClassMmapSize(size_class);
908  bool res = mem && addr >= mem && addr < mem + size;
909  return res;
910}
911
912uptr FakeStack::AddrIsInFakeStack(uptr addr) {
913  for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
914    if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
915  }
916  return 0;
917}
918
919// We may want to compute this during compilation.
920inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
921  uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
922  uptr log = Log2(rounded_size);
923  CHECK(alloc_size <= (1UL << log));
924  if (!(alloc_size > (1UL << (log-1)))) {
925    Printf("alloc_size %zu log %zu\n", alloc_size, log);
926  }
927  CHECK(alloc_size > (1UL << (log-1)));
928  uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
929  CHECK(res < kNumberOfSizeClasses);
930  CHECK(ClassSize(res) >= rounded_size);
931  return res;
932}
933
934void FakeFrameFifo::FifoPush(FakeFrame *node) {
935  CHECK(node);
936  node->next = 0;
937  if (first_ == 0 && last_ == 0) {
938    first_ = last_ = node;
939  } else {
940    CHECK(first_);
941    CHECK(last_);
942    last_->next = node;
943    last_ = node;
944  }
945}
946
947FakeFrame *FakeFrameFifo::FifoPop() {
948  CHECK(first_ && last_ && "Exhausted fake stack");
949  FakeFrame *res = 0;
950  if (first_ == last_) {
951    res = first_;
952    first_ = last_ = 0;
953  } else {
954    res = first_;
955    first_ = first_->next;
956  }
957  return res;
958}
959
960void FakeStack::Init(uptr stack_size) {
961  stack_size_ = stack_size;
962  alive_ = true;
963}
964
965void FakeStack::Cleanup() {
966  alive_ = false;
967  for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
968    uptr mem = allocated_size_classes_[i];
969    if (mem) {
970      PoisonShadow(mem, ClassMmapSize(i), 0);
971      allocated_size_classes_[i] = 0;
972      UnmapOrDie((void*)mem, ClassMmapSize(i));
973    }
974  }
975}
976
977uptr FakeStack::ClassMmapSize(uptr size_class) {
978  return RoundUpToPowerOfTwo(stack_size_);
979}
980
981void FakeStack::AllocateOneSizeClass(uptr size_class) {
982  CHECK(ClassMmapSize(size_class) >= kPageSize);
983  uptr new_mem = (uptr)MmapOrDie(
984      ClassMmapSize(size_class), __FUNCTION__);
985  // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
986  //       asanThreadRegistry().GetCurrent()->tid(),
987  //       size_class, new_mem, new_mem + ClassMmapSize(size_class),
988  //       ClassMmapSize(size_class));
989  uptr i;
990  for (i = 0; i < ClassMmapSize(size_class);
991       i += ClassSize(size_class)) {
992    size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
993  }
994  CHECK(i == ClassMmapSize(size_class));
995  allocated_size_classes_[size_class] = new_mem;
996}
997
998uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
999  if (!alive_) return real_stack;
1000  CHECK(size <= kMaxStackMallocSize && size > 1);
1001  uptr size_class = ComputeSizeClass(size);
1002  if (!allocated_size_classes_[size_class]) {
1003    AllocateOneSizeClass(size_class);
1004  }
1005  FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
1006  CHECK(fake_frame);
1007  fake_frame->size_minus_one = size - 1;
1008  fake_frame->real_stack = real_stack;
1009  while (FakeFrame *top = call_stack_.top()) {
1010    if (top->real_stack > real_stack) break;
1011    call_stack_.LifoPop();
1012    DeallocateFrame(top);
1013  }
1014  call_stack_.LifoPush(fake_frame);
1015  uptr ptr = (uptr)fake_frame;
1016  PoisonShadow(ptr, size, 0);
1017  return ptr;
1018}
1019
1020void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
1021  CHECK(alive_);
1022  uptr size = fake_frame->size_minus_one + 1;
1023  uptr size_class = ComputeSizeClass(size);
1024  CHECK(allocated_size_classes_[size_class]);
1025  uptr ptr = (uptr)fake_frame;
1026  CHECK(AddrIsInSizeClass(ptr, size_class));
1027  CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
1028  size_classes_[size_class].FifoPush(fake_frame);
1029}
1030
1031void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
1032  FakeFrame *fake_frame = (FakeFrame*)ptr;
1033  CHECK(fake_frame->magic = kRetiredStackFrameMagic);
1034  CHECK(fake_frame->descr != 0);
1035  CHECK(fake_frame->size_minus_one == size - 1);
1036  PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
1037}
1038
1039}  // namespace __asan
1040
1041// ---------------------- Interface ---------------- {{{1
1042using namespace __asan;  // NOLINT
1043
1044uptr __asan_stack_malloc(uptr size, uptr real_stack) {
1045  if (!flags()->use_fake_stack) return real_stack;
1046  AsanThread *t = asanThreadRegistry().GetCurrent();
1047  if (!t) {
1048    // TSD is gone, use the real stack.
1049    return real_stack;
1050  }
1051  uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
1052  // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
1053  return ptr;
1054}
1055
1056void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
1057  if (!flags()->use_fake_stack) return;
1058  if (ptr != real_stack) {
1059    FakeStack::OnFree(ptr, size, real_stack);
1060  }
1061}
1062
1063// ASan allocator doesn't reserve extra bytes, so normally we would
1064// just return "size".
1065uptr __asan_get_estimated_allocated_size(uptr size) {
1066  if (size == 0) return 1;
1067  return Min(size, kMaxAllowedMallocSize);
1068}
1069
1070bool __asan_get_ownership(const void *p) {
1071  return malloc_info.AllocationSize((uptr)p) > 0;
1072}
1073
1074uptr __asan_get_allocated_size(const void *p) {
1075  if (p == 0) return 0;
1076  uptr allocated_size = malloc_info.AllocationSize((uptr)p);
1077  // Die if p is not malloced or if it is already freed.
1078  if (allocated_size == 0) {
1079    GET_STACK_TRACE_HERE(kStackTraceMax);
1080    ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
1081  }
1082  return allocated_size;
1083}
1084