asan_allocator.cc revision a874fe5b5d67152e4e737498d532eec80940bdcd
1//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator.
13// Evey piece of memory (AsanChunk) allocated by the allocator
14// has a left redzone of REDZONE bytes and
15// a right redzone such that the end of the chunk is aligned by REDZONE
16// (i.e. the right redzone is between 0 and REDZONE-1).
17// The left redzone is always poisoned.
18// The right redzone is poisoned on malloc, the body is poisoned on free.
19// Once freed, a chunk is moved to a quarantine (fifo list).
20// After quarantine, a chunk is returned to freelists.
21//
22// The left redzone contains ASan's internal data and the stack trace of
23// the malloc call.
24// Once freed, the body of the chunk contains the stack trace of the free call.
25//
26//===----------------------------------------------------------------------===//
27
28#include "asan_allocator.h"
29#include "asan_interceptors.h"
30#include "asan_interface.h"
31#include "asan_internal.h"
32#include "asan_lock.h"
33#include "asan_mapping.h"
34#include "asan_stats.h"
35#include "asan_thread.h"
36#include "asan_thread_registry.h"
37
38#include <stdint.h>
39#include <string.h>
40#include <unistd.h>
41
42namespace __asan {
43
44#define  REDZONE FLAG_redzone
45static const size_t kMinAllocSize = REDZONE * 2;
46static const size_t kMinMmapSize  = 4UL << 20;  // 4M
47static const uint64_t kMaxAvailableRam = 128ULL << 30;  // 128G
48static const size_t kMaxThreadLocalQuarantine = 1 << 20;  // 1M
49static const size_t kMaxSizeForThreadLocalFreeList = 1 << 17;
50
51// Size classes less than kMallocSizeClassStep are powers of two.
52// All other size classes are multiples of kMallocSizeClassStep.
53static const size_t kMallocSizeClassStepLog = 26;
54static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
55
56#if __WORDSIZE == 32
57static const size_t kMaxAllowedMallocSize = 3UL << 30;  // 3G
58#else
59static const size_t kMaxAllowedMallocSize = 8UL << 30;  // 8G
60#endif
61
62static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
63  return (a & (alignment - 1)) == 0;
64}
65
66static inline size_t Log2(size_t x) {
67  CHECK(IsPowerOfTwo(x));
68  return __builtin_ctzl(x);
69}
70
71static inline size_t RoundUpToPowerOfTwo(size_t size) {
72  CHECK(size);
73  if (IsPowerOfTwo(size)) return size;
74  size_t up = __WORDSIZE - __builtin_clzl(size);
75  CHECK(size < (1ULL << up));
76  CHECK(size > (1ULL << (up - 1)));
77  return 1UL << up;
78}
79
80static inline size_t SizeClassToSize(uint8_t size_class) {
81  CHECK(size_class < kNumberOfSizeClasses);
82  if (size_class <= kMallocSizeClassStepLog) {
83    return 1UL << size_class;
84  } else {
85    return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
86  }
87}
88
89static inline uint8_t SizeToSizeClass(size_t size) {
90  uint8_t res = 0;
91  if (size <= kMallocSizeClassStep) {
92    size_t rounded = RoundUpToPowerOfTwo(size);
93    res = Log2(rounded);
94  } else {
95    res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
96        + kMallocSizeClassStepLog;
97  }
98  CHECK(res < kNumberOfSizeClasses);
99  CHECK(size <= SizeClassToSize(res));
100  return res;
101}
102
103// Given REDZONE bytes, we need to mark first size bytes
104// as addressable and the rest REDZONE-size bytes as unaddressable.
105static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
106  CHECK(size <= REDZONE);
107  CHECK(IsAligned(mem, REDZONE));
108  CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
109  CHECK(IsPowerOfTwo(REDZONE));
110  CHECK(REDZONE >= SHADOW_GRANULARITY);
111  PoisonShadowPartialRightRedzone(mem, size, REDZONE,
112                                  kAsanHeapRightRedzoneMagic);
113}
114
115static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
116  CHECK(IsAligned(size, kPageSize));
117  uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
118  PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
119  if (FLAG_debug) {
120    Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
121  }
122  return res;
123}
124
125// Every chunk of memory allocated by this allocator can be in one of 3 states:
126// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
127// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
128// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
129//
130// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
131// the beginning of a AsanChunk (in which case 'next' contains the address
132// of the AsanChunk).
133//
134// The magic numbers for the enum values are taken randomly.
135enum {
136  CHUNK_AVAILABLE  = 0x573B,
137  CHUNK_ALLOCATED  = 0x3204,
138  CHUNK_QUARANTINE = 0x1978,
139  CHUNK_MEMALIGN   = 0xDC68,
140};
141
142struct ChunkBase {
143  uint16_t   chunk_state;
144  uint8_t    size_class;
145  uint32_t   offset;  // User-visible memory starts at this+offset (beg()).
146  int32_t    alloc_tid;
147  int32_t    free_tid;
148  size_t     used_size;  // Size requested by the user.
149  AsanChunk *next;
150
151  uintptr_t   beg() { return (uintptr_t)this + offset; }
152  size_t Size() { return SizeClassToSize(size_class); }
153  uint8_t SizeClass() { return size_class; }
154};
155
156struct AsanChunk: public ChunkBase {
157  uint32_t *compressed_alloc_stack() {
158    CHECK(REDZONE >= sizeof(ChunkBase));
159    return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
160  }
161  uint32_t *compressed_free_stack() {
162    CHECK(REDZONE >= sizeof(ChunkBase));
163    return (uint32_t*)((uintptr_t)this + REDZONE);
164  }
165
166  // The left redzone after the ChunkBase is given to the alloc stack trace.
167  size_t compressed_alloc_stack_size() {
168    return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
169  }
170  size_t compressed_free_stack_size() {
171    return (REDZONE) / sizeof(uint32_t);
172  }
173
174  bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
175    if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
176      *offset = addr - beg();
177      return true;
178    }
179    return false;
180  }
181
182  bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
183    if (addr < beg()) {
184      *offset = beg() - addr;
185      return true;
186    }
187    return false;
188  }
189
190  bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
191    if (addr + access_size >= beg() + used_size) {
192      if (addr <= beg() + used_size)
193        *offset = 0;
194      else
195        *offset = addr - (beg() + used_size);
196      return true;
197    }
198    return false;
199  }
200
201  void DescribeAddress(uintptr_t addr, size_t access_size) {
202    size_t offset;
203    Printf("%p is located ", addr);
204    if (AddrIsInside(addr, access_size, &offset)) {
205      Printf("%ld bytes inside of", offset);
206    } else if (AddrIsAtLeft(addr, access_size, &offset)) {
207      Printf("%ld bytes to the left of", offset);
208    } else if (AddrIsAtRight(addr, access_size, &offset)) {
209      Printf("%ld bytes to the right of", offset);
210    } else {
211      Printf(" somewhere around (this is AddressSanitizer bug!)");
212    }
213    Printf(" %lu-byte region [%p,%p)\n",
214           used_size, beg(), beg() + used_size);
215  }
216};
217
218static AsanChunk *PtrToChunk(uintptr_t ptr) {
219  AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
220  if (m->chunk_state == CHUNK_MEMALIGN) {
221    m = m->next;
222  }
223  return m;
224}
225
226
227void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
228  if (last_) {
229    CHECK(first_);
230    CHECK(!last_->next);
231    last_->next = q->first_;
232    last_ = q->last_;
233  } else {
234    CHECK(!first_);
235    last_ = q->last_;
236    first_ = q->first_;
237  }
238  size_ += q->size();
239  q->clear();
240}
241
242void AsanChunkFifoList::Push(AsanChunk *n) {
243  CHECK(n->next == NULL);
244  if (last_) {
245    CHECK(first_);
246    CHECK(!last_->next);
247    last_->next = n;
248    last_ = n;
249  } else {
250    CHECK(!first_);
251    last_ = first_ = n;
252  }
253  size_ += n->Size();
254}
255
256// Interesting performance observation: this function takes up to 15% of overal
257// allocator time. That's because *first_ has been evicted from cache long time
258// ago. Not sure if we can or want to do anything with this.
259AsanChunk *AsanChunkFifoList::Pop() {
260  CHECK(first_);
261  AsanChunk *res = first_;
262  first_ = first_->next;
263  if (first_ == NULL)
264    last_ = NULL;
265  CHECK(size_ >= res->Size());
266  size_ -= res->Size();
267  if (last_) {
268    CHECK(!last_->next);
269  }
270  return res;
271}
272
273// All pages we ever allocated.
274struct PageGroup {
275  uintptr_t beg;
276  uintptr_t end;
277  size_t size_of_chunk;
278  uintptr_t last_chunk;
279  bool InRange(uintptr_t addr) {
280    return addr >= beg && addr < end;
281  }
282};
283
284class MallocInfo {
285 public:
286
287  explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
288
289  AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
290    AsanChunk *m = NULL;
291    AsanChunk **fl = &free_lists_[size_class];
292    {
293      ScopedLock lock(&mu_);
294      for (size_t i = 0; i < n_chunks; i++) {
295        if (!(*fl)) {
296          *fl = GetNewChunks(size_class);
297        }
298        AsanChunk *t = *fl;
299        *fl = t->next;
300        t->next = m;
301        CHECK(t->chunk_state == CHUNK_AVAILABLE);
302        m = t;
303      }
304    }
305    return m;
306  }
307
308  void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
309                                       bool eat_free_lists) {
310    CHECK(FLAG_quarantine_size > 0);
311    ScopedLock lock(&mu_);
312    AsanChunkFifoList *q = &x->quarantine_;
313    if (q->size() > 0) {
314      quarantine_.PushList(q);
315      while (quarantine_.size() > FLAG_quarantine_size) {
316        QuarantinePop();
317      }
318    }
319    if (eat_free_lists) {
320      for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
321           size_class++) {
322        AsanChunk *m = x->free_lists_[size_class];
323        while (m) {
324          AsanChunk *t = m->next;
325          m->next = free_lists_[size_class];
326          free_lists_[size_class] = m;
327          m = t;
328        }
329        x->free_lists_[size_class] = 0;
330      }
331    }
332  }
333
334  void BypassThreadLocalQuarantine(AsanChunk *chunk) {
335    ScopedLock lock(&mu_);
336    quarantine_.Push(chunk);
337  }
338
339  AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
340    ScopedLock lock(&mu_);
341    return FindChunkByAddr(addr);
342  }
343
344  // TODO(glider): AllocationSize() may become very slow if the size of
345  // page_groups_ grows. This can be fixed by increasing kMinMmapSize,
346  // but a better solution is to speed up the search somehow.
347  size_t AllocationSize(uintptr_t ptr) {
348    ScopedLock lock(&mu_);
349
350    // first, check if this is our memory
351    PageGroup *g = FindPageGroupUnlocked(ptr);
352    if (!g) return 0;
353    AsanChunk *m = PtrToChunk(ptr);
354    if (m->chunk_state == CHUNK_ALLOCATED) {
355      return m->used_size;
356    } else {
357      return 0;
358    }
359  }
360
361  void ForceLock() {
362    mu_.Lock();
363  }
364
365  void ForceUnlock() {
366    mu_.Unlock();
367  }
368
369  void PrintStatus() {
370    ScopedLock lock(&mu_);
371    size_t malloced = 0;
372
373    Printf(" MallocInfo: in quarantine: %ld malloced: %ld; ",
374           quarantine_.size() >> 20, malloced >> 20);
375    for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
376      AsanChunk *i = free_lists_[j];
377      if (!i) continue;
378      size_t t = 0;
379      for (; i; i = i->next) {
380        t += i->Size();
381      }
382      Printf("%ld:%ld ", j, t >> 20);
383    }
384    Printf("\n");
385  }
386
387  PageGroup *FindPageGroup(uintptr_t addr) {
388    ScopedLock lock(&mu_);
389    return FindPageGroupUnlocked(addr);
390  }
391
392 private:
393  PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
394    for (int i = 0; i < n_page_groups_; i++) {
395      PageGroup *g = page_groups_[i];
396      if (g->InRange(addr)) {
397        return g;
398      }
399    }
400    return NULL;
401  }
402
403  // We have an address between two chunks, and we want to report just one.
404  AsanChunk *ChooseChunk(uintptr_t addr,
405                         AsanChunk *left_chunk, AsanChunk *right_chunk) {
406    // Prefer an allocated chunk or a chunk from quarantine.
407    if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
408        right_chunk->chunk_state != CHUNK_AVAILABLE)
409      return right_chunk;
410    if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
411        left_chunk->chunk_state != CHUNK_AVAILABLE)
412      return left_chunk;
413    // Choose based on offset.
414    size_t l_offset = 0, r_offset = 0;
415    CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
416    CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
417    if (l_offset < r_offset)
418      return left_chunk;
419    return right_chunk;
420  }
421
422  AsanChunk *FindChunkByAddr(uintptr_t addr) {
423    PageGroup *g = FindPageGroupUnlocked(addr);
424    if (!g) return 0;
425    CHECK(g->size_of_chunk);
426    uintptr_t offset_from_beg = addr - g->beg;
427    uintptr_t this_chunk_addr = g->beg +
428        (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
429    CHECK(g->InRange(this_chunk_addr));
430    AsanChunk *m = (AsanChunk*)this_chunk_addr;
431    CHECK(m->chunk_state == CHUNK_ALLOCATED ||
432          m->chunk_state == CHUNK_AVAILABLE ||
433          m->chunk_state == CHUNK_QUARANTINE);
434    size_t offset = 0;
435    if (m->AddrIsInside(addr, 1, &offset))
436      return m;
437
438    if (m->AddrIsAtRight(addr, 1, &offset)) {
439      if (this_chunk_addr == g->last_chunk)  // rightmost chunk
440        return m;
441      uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
442      CHECK(g->InRange(right_chunk_addr));
443      return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
444    } else {
445      CHECK(m->AddrIsAtLeft(addr, 1, &offset));
446      if (this_chunk_addr == g->beg)  // leftmost chunk
447        return m;
448      uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
449      CHECK(g->InRange(left_chunk_addr));
450      return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
451    }
452  }
453
454  void QuarantinePop() {
455    CHECK(quarantine_.size() > 0);
456    AsanChunk *m = quarantine_.Pop();
457    CHECK(m);
458    // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
459
460    CHECK(m->chunk_state == CHUNK_QUARANTINE);
461    m->chunk_state = CHUNK_AVAILABLE;
462    CHECK(m->alloc_tid >= 0);
463    CHECK(m->free_tid >= 0);
464
465    size_t size_class = m->SizeClass();
466    m->next = free_lists_[size_class];
467    free_lists_[size_class] = m;
468
469    // Statistics.
470    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
471    thread_stats.real_frees++;
472    thread_stats.really_freed += m->used_size;
473    thread_stats.really_freed_redzones += m->Size() - m->used_size;
474    thread_stats.really_freed_by_size[m->SizeClass()]++;
475  }
476
477  // Get a list of newly allocated chunks.
478  AsanChunk *GetNewChunks(uint8_t size_class) {
479    size_t size = SizeClassToSize(size_class);
480    CHECK(IsPowerOfTwo(kMinMmapSize));
481    CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
482    size_t mmap_size = Max(size, kMinMmapSize);
483    size_t n_chunks = mmap_size / size;
484    CHECK(n_chunks * size == mmap_size);
485    if (size < kPageSize) {
486      // Size is small, just poison the last chunk.
487      n_chunks--;
488    } else {
489      // Size is large, allocate an extra page at right and poison it.
490      mmap_size += kPageSize;
491    }
492    CHECK(n_chunks > 0);
493    uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
494
495    // Statistics.
496    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
497    thread_stats.mmaps++;
498    thread_stats.mmaped += mmap_size;
499    thread_stats.mmaped_by_size[size_class] += n_chunks;
500
501    AsanChunk *res = NULL;
502    for (size_t i = 0; i < n_chunks; i++) {
503      AsanChunk *m = (AsanChunk*)(mem + i * size);
504      m->chunk_state = CHUNK_AVAILABLE;
505      m->size_class = size_class;
506      m->next = res;
507      res = m;
508    }
509    PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
510    // This memory is already poisoned, no need to poison it again.
511    pg->beg = (uintptr_t)mem;
512    pg->end = pg->beg + mmap_size;
513    pg->size_of_chunk = size;
514    pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
515    int page_group_idx = AtomicInc(&n_page_groups_) - 1;
516    CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
517    page_groups_[page_group_idx] = pg;
518    return res;
519  }
520
521  AsanChunk *free_lists_[kNumberOfSizeClasses];
522  AsanChunkFifoList quarantine_;
523  AsanLock mu_;
524
525  PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
526  int n_page_groups_;  // atomic
527};
528
529static MallocInfo malloc_info(LINKER_INITIALIZED);
530
531void AsanThreadLocalMallocStorage::CommitBack() {
532  malloc_info.SwallowThreadLocalMallocStorage(this, true);
533}
534
535static void Describe(uintptr_t addr, size_t access_size) {
536  AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
537  if (!m) return;
538  m->DescribeAddress(addr, access_size);
539  CHECK(m->alloc_tid >= 0);
540  AsanThreadSummary *alloc_thread =
541      asanThreadRegistry().FindByTid(m->alloc_tid);
542  AsanStackTrace alloc_stack;
543  AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
544                                  m->compressed_alloc_stack_size());
545  AsanThread *t = asanThreadRegistry().GetCurrent();
546  CHECK(t);
547  if (m->free_tid >= 0) {
548    AsanThreadSummary *free_thread =
549        asanThreadRegistry().FindByTid(m->free_tid);
550    Printf("freed by thread T%d here:\n", free_thread->tid());
551    AsanStackTrace free_stack;
552    AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
553                                    m->compressed_free_stack_size());
554    free_stack.PrintStack();
555    Printf("previously allocated by thread T%d here:\n",
556           alloc_thread->tid());
557
558    alloc_stack.PrintStack();
559    t->summary()->Announce();
560    free_thread->Announce();
561    alloc_thread->Announce();
562  } else {
563    Printf("allocated by thread T%d here:\n", alloc_thread->tid());
564    alloc_stack.PrintStack();
565    t->summary()->Announce();
566    alloc_thread->Announce();
567  }
568}
569
570static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
571  __asan_init();
572  CHECK(stack);
573  if (size == 0) {
574    size = 1;  // TODO(kcc): do something smarter
575  }
576  CHECK(IsPowerOfTwo(alignment));
577  size_t rounded_size = RoundUpTo(size, REDZONE);
578  size_t needed_size = rounded_size + REDZONE;
579  if (alignment > REDZONE) {
580    needed_size += alignment;
581  }
582  CHECK(IsAligned(needed_size, REDZONE));
583  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
584    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
585    return 0;
586  }
587
588  uint8_t size_class = SizeToSizeClass(needed_size);
589  size_t size_to_allocate = SizeClassToSize(size_class);
590  CHECK(size_to_allocate >= kMinAllocSize);
591  CHECK(size_to_allocate >= needed_size);
592  CHECK(IsAligned(size_to_allocate, REDZONE));
593
594  if (FLAG_v >= 2) {
595    Printf("Allocate align: %ld size: %ld class: %d real: %ld\n",
596         alignment, size, size_class, size_to_allocate);
597  }
598
599  AsanThread *t = asanThreadRegistry().GetCurrent();
600  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
601  // Statistics
602  thread_stats.mallocs++;
603  thread_stats.malloced += size;
604  thread_stats.malloced_redzones += size_to_allocate - size;
605  thread_stats.malloced_by_size[size_class]++;
606
607  AsanChunk *m = NULL;
608  if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
609    // get directly from global storage.
610    m = malloc_info.AllocateChunks(size_class, 1);
611    thread_stats.malloc_large++;
612  } else {
613    // get from the thread-local storage.
614    AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
615    if (!*fl) {
616      size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
617      *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
618      thread_stats.malloc_small_slow++;
619    }
620    m = *fl;
621    *fl = (*fl)->next;
622  }
623  CHECK(m);
624  CHECK(m->chunk_state == CHUNK_AVAILABLE);
625  m->chunk_state = CHUNK_ALLOCATED;
626  m->next = NULL;
627  CHECK(m->Size() == size_to_allocate);
628  uintptr_t addr = (uintptr_t)m + REDZONE;
629  CHECK(addr == (uintptr_t)m->compressed_free_stack());
630
631  if (alignment > REDZONE && (addr & (alignment - 1))) {
632    addr = RoundUpTo(addr, alignment);
633    CHECK((addr & (alignment - 1)) == 0);
634    AsanChunk *p = (AsanChunk*)(addr - REDZONE);
635    p->chunk_state = CHUNK_MEMALIGN;
636    p->next = m;
637  }
638  CHECK(m == PtrToChunk(addr));
639  m->used_size = size;
640  m->offset = addr - (uintptr_t)m;
641  CHECK(m->beg() == addr);
642  m->alloc_tid = t ? t->tid() : 0;
643  m->free_tid   = AsanThread::kInvalidTid;
644  AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
645                                m->compressed_alloc_stack_size());
646  PoisonShadow(addr, rounded_size, 0);
647  if (size < rounded_size) {
648    PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
649                                  size & (REDZONE - 1));
650  }
651  if (size <= FLAG_max_malloc_fill_size) {
652    real_memset((void*)addr, 0, rounded_size);
653  }
654  return (uint8_t*)addr;
655}
656
657static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
658  if (!ptr) return;
659  CHECK(stack);
660
661  if (FLAG_debug) {
662    CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
663  }
664
665  // Printf("Deallocate %p\n", ptr);
666  AsanChunk *m = PtrToChunk((uintptr_t)ptr);
667  if (m->chunk_state == CHUNK_QUARANTINE) {
668    Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
669    stack->PrintStack();
670    m->DescribeAddress((uintptr_t)ptr, 1);
671    ShowStatsAndAbort();
672  } else if (m->chunk_state != CHUNK_ALLOCATED) {
673    Report("ERROR: AddressSanitizer attempting free on address which was not"
674           " malloc()-ed: %p\n", ptr);
675    stack->PrintStack();
676    ShowStatsAndAbort();
677  }
678  CHECK(m->chunk_state == CHUNK_ALLOCATED);
679  CHECK(m->free_tid == AsanThread::kInvalidTid);
680  CHECK(m->alloc_tid >= 0);
681  AsanThread *t = asanThreadRegistry().GetCurrent();
682  m->free_tid = t ? t->tid() : 0;
683  AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
684                                m->compressed_free_stack_size());
685  size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
686  PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
687
688  // Statistics.
689  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
690  thread_stats.frees++;
691  thread_stats.freed += m->used_size;
692  thread_stats.freed_by_size[m->SizeClass()]++;
693
694  m->chunk_state = CHUNK_QUARANTINE;
695  if (t) {
696    AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
697    CHECK(!m->next);
698    ms->quarantine_.Push(m);
699
700    if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
701      malloc_info.SwallowThreadLocalMallocStorage(ms, false);
702    }
703  } else {
704    CHECK(!m->next);
705    malloc_info.BypassThreadLocalQuarantine(m);
706  }
707}
708
709static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
710                           AsanStackTrace *stack) {
711  CHECK(old_ptr && new_size);
712
713  // Statistics.
714  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
715  thread_stats.reallocs++;
716  thread_stats.realloced += new_size;
717
718  AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
719  CHECK(m->chunk_state == CHUNK_ALLOCATED);
720  size_t old_size = m->used_size;
721  size_t memcpy_size = Min(new_size, old_size);
722  uint8_t *new_ptr = Allocate(0, new_size, stack);
723  if (new_ptr) {
724    real_memcpy(new_ptr, old_ptr, memcpy_size);
725    Deallocate(old_ptr, stack);
726  }
727  return new_ptr;
728}
729
730}  // namespace __asan
731
732// Malloc hooks declaration.
733// ASAN_NEW_HOOK(ptr, size) is called immediately after
734//   allocation of "size" bytes, which returned "ptr".
735// ASAN_DELETE_HOOK(ptr) is called immediately before
736//   deallocation of "ptr".
737// If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
738// program must provide implementation of this hook.
739// If macro is undefined, the hook is no-op.
740#ifdef ASAN_NEW_HOOK
741extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
742#else
743static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
744#endif
745
746#ifdef ASAN_DELETE_HOOK
747extern "C" void ASAN_DELETE_HOOK(void *ptr);
748#else
749static inline void ASAN_DELETE_HOOK(void *ptr) { }
750#endif
751
752namespace __asan {
753
754void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
755  void *ptr = (void*)Allocate(alignment, size, stack);
756  ASAN_NEW_HOOK(ptr, size);
757  return ptr;
758}
759
760void asan_free(void *ptr, AsanStackTrace *stack) {
761  ASAN_DELETE_HOOK(ptr);
762  Deallocate((uint8_t*)ptr, stack);
763}
764
765void *asan_malloc(size_t size, AsanStackTrace *stack) {
766  void *ptr = (void*)Allocate(0, size, stack);
767  ASAN_NEW_HOOK(ptr, size);
768  return ptr;
769}
770
771void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
772  void *ptr = (void*)Allocate(0, nmemb * size, stack);
773  if (ptr)
774    real_memset(ptr, 0, nmemb * size);
775  ASAN_NEW_HOOK(ptr, nmemb * size);
776  return ptr;
777}
778
779void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
780  if (p == NULL) {
781    void *ptr = (void*)Allocate(0, size, stack);
782    ASAN_NEW_HOOK(ptr, size);
783    return ptr;
784  } else if (size == 0) {
785    ASAN_DELETE_HOOK(p);
786    Deallocate((uint8_t*)p, stack);
787    return NULL;
788  }
789  return Reallocate((uint8_t*)p, size, stack);
790}
791
792void *asan_valloc(size_t size, AsanStackTrace *stack) {
793  void *ptr = (void*)Allocate(kPageSize, size, stack);
794  ASAN_NEW_HOOK(ptr, size);
795  return ptr;
796}
797
798void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
799  size = RoundUpTo(size, kPageSize);
800  if (size == 0) {
801    // pvalloc(0) should allocate one page.
802    size = kPageSize;
803  }
804  void *ptr = (void*)Allocate(kPageSize, size, stack);
805  ASAN_NEW_HOOK(ptr, size);
806  return ptr;
807}
808
809int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
810                          AsanStackTrace *stack) {
811  void *ptr = Allocate(alignment, size, stack);
812  CHECK(IsAligned((uintptr_t)ptr, alignment));
813  ASAN_NEW_HOOK(ptr, size);
814  *memptr = ptr;
815  return 0;
816}
817
818size_t __asan_mz_size(const void *ptr) {
819  return malloc_info.AllocationSize((uintptr_t)ptr);
820}
821
822void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
823  Describe(addr, access_size);
824}
825
826void __asan_mz_force_lock() {
827  malloc_info.ForceLock();
828}
829
830void __asan_mz_force_unlock() {
831  malloc_info.ForceUnlock();
832}
833
834// ---------------------- Fake stack-------------------- {{{1
835FakeStack::FakeStack() {
836  CHECK(real_memset);
837  real_memset(this, 0, sizeof(*this));
838}
839
840bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
841  uintptr_t mem = allocated_size_classes_[size_class];
842  uintptr_t size = ClassMmapSize(size_class);
843  bool res = mem && addr >= mem && addr < mem + size;
844  return res;
845}
846
847uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
848  for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
849    if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
850  }
851  return 0;
852}
853
854// We may want to compute this during compilation.
855inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
856  size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
857  size_t log = Log2(rounded_size);
858  CHECK(alloc_size <= (1UL << log));
859  if (!(alloc_size > (1UL << (log-1)))) {
860    Printf("alloc_size %ld log %ld\n", alloc_size, log);
861  }
862  CHECK(alloc_size > (1UL << (log-1)));
863  size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
864  CHECK(res < kNumberOfSizeClasses);
865  CHECK(ClassSize(res) >= rounded_size);
866  return res;
867}
868
869void FakeFrameFifo::FifoPush(FakeFrame *node) {
870  CHECK(node);
871  node->next = 0;
872  if (first_ == 0 && last_ == 0) {
873    first_ = last_ = node;
874  } else {
875    CHECK(first_);
876    CHECK(last_);
877    last_->next = node;
878    last_ = node;
879  }
880}
881
882FakeFrame *FakeFrameFifo::FifoPop() {
883  CHECK(first_ && last_ && "Exhausted fake stack");
884  FakeFrame *res = 0;
885  if (first_ == last_) {
886    res = first_;
887    first_ = last_ = 0;
888  } else {
889    res = first_;
890    first_ = first_->next;
891  }
892  return res;
893}
894
895void FakeStack::Init(size_t stack_size) {
896  stack_size_ = stack_size;
897  alive_ = true;
898}
899
900void FakeStack::Cleanup() {
901  alive_ = false;
902  for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
903    uintptr_t mem = allocated_size_classes_[i];
904    if (mem) {
905      PoisonShadow(mem, ClassMmapSize(i), 0);
906      allocated_size_classes_[i] = 0;
907      AsanUnmapOrDie((void*)mem, ClassMmapSize(i));
908    }
909  }
910}
911
912size_t FakeStack::ClassMmapSize(size_t size_class) {
913  return RoundUpToPowerOfTwo(stack_size_);
914}
915
916void FakeStack::AllocateOneSizeClass(size_t size_class) {
917  CHECK(ClassMmapSize(size_class) >= kPageSize);
918  uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie(
919      ClassMmapSize(size_class), __FUNCTION__);
920  // Printf("T%d new_mem[%ld]: %p-%p mmap %ld\n",
921  //       asanThreadRegistry().GetCurrent()->tid(),
922  //       size_class, new_mem, new_mem + ClassMmapSize(size_class),
923  //       ClassMmapSize(size_class));
924  size_t i;
925  for (i = 0; i < ClassMmapSize(size_class);
926       i += ClassSize(size_class)) {
927    size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
928  }
929  CHECK(i == ClassMmapSize(size_class));
930  allocated_size_classes_[size_class] = new_mem;
931}
932
933uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
934  if (!alive_) return real_stack;
935  CHECK(size <= kMaxStackMallocSize && size > 1);
936  size_t size_class = ComputeSizeClass(size);
937  if (!allocated_size_classes_[size_class]) {
938    AllocateOneSizeClass(size_class);
939  }
940  FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
941  CHECK(fake_frame);
942  fake_frame->size_minus_one = size - 1;
943  fake_frame->real_stack = real_stack;
944  while (FakeFrame *top = call_stack_.top()) {
945    if (top->real_stack > real_stack) break;
946    call_stack_.LifoPop();
947    DeallocateFrame(top);
948  }
949  call_stack_.LifoPush(fake_frame);
950  uintptr_t ptr = (uintptr_t)fake_frame;
951  PoisonShadow(ptr, size, 0);
952  return ptr;
953}
954
955void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
956  CHECK(alive_);
957  size_t size = fake_frame->size_minus_one + 1;
958  size_t size_class = ComputeSizeClass(size);
959  CHECK(allocated_size_classes_[size_class]);
960  uintptr_t ptr = (uintptr_t)fake_frame;
961  CHECK(AddrIsInSizeClass(ptr, size_class));
962  CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
963  size_classes_[size_class].FifoPush(fake_frame);
964}
965
966void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
967  FakeFrame *fake_frame = (FakeFrame*)ptr;
968  CHECK(fake_frame->magic = kRetiredStackFrameMagic);
969  CHECK(fake_frame->descr != 0);
970  CHECK(fake_frame->size_minus_one == size - 1);
971  PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
972}
973
974}  // namespace __asan
975
976// ---------------------- Interface ---------------- {{{1
977using namespace __asan;  // NOLINT
978
979size_t __asan_stack_malloc(size_t size, size_t real_stack) {
980  if (!FLAG_use_fake_stack) return real_stack;
981  AsanThread *t = asanThreadRegistry().GetCurrent();
982  if (!t) {
983    // TSD is gone, use the real stack.
984    return real_stack;
985  }
986  size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
987  // Printf("__asan_stack_malloc %p %ld %p\n", ptr, size, real_stack);
988  return ptr;
989}
990
991void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
992  if (!FLAG_use_fake_stack) return;
993  if (ptr != real_stack) {
994    FakeStack::OnFree(ptr, size, real_stack);
995  }
996}
997
998// ASan allocator doesn't reserve extra bytes, so normally we would
999// just return "size".
1000size_t __asan_get_estimated_allocated_size(size_t size) {
1001  if (size == 0) return 1;
1002  return Min(size, kMaxAllowedMallocSize);
1003}
1004
1005bool __asan_get_ownership(const void *p) {
1006  return (p == NULL) ||
1007      (malloc_info.AllocationSize((uintptr_t)p) > 0);
1008}
1009
1010size_t __asan_get_allocated_size(const void *p) {
1011  if (p == NULL) return 0;
1012  size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
1013  // Die if p is not malloced or if it is already freed.
1014  if (allocated_size == 0) {
1015    Printf("__asan_get_allocated_size failed, ptr=%p is not owned\n", p);
1016    PRINT_CURRENT_STACK();
1017    ShowStatsAndAbort();
1018  }
1019  return allocated_size;
1020}
1021