asan_allocator.cc revision ef14ff6512d7b2e20aa3206dff820b5f90285420
1//===-- asan_allocator.cc ---------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator.
13// Evey piece of memory (AsanChunk) allocated by the allocator
14// has a left redzone of REDZONE bytes and
15// a right redzone such that the end of the chunk is aligned by REDZONE
16// (i.e. the right redzone is between 0 and REDZONE-1).
17// The left redzone is always poisoned.
18// The right redzone is poisoned on malloc, the body is poisoned on free.
19// Once freed, a chunk is moved to a quarantine (fifo list).
20// After quarantine, a chunk is returned to freelists.
21//
22// The left redzone contains ASan's internal data and the stack trace of
23// the malloc call.
24// Once freed, the body of the chunk contains the stack trace of the free call.
25//
26//===----------------------------------------------------------------------===//
27
28#include "asan_allocator.h"
29#include "asan_interceptors.h"
30#include "asan_interface.h"
31#include "asan_internal.h"
32#include "asan_lock.h"
33#include "asan_mapping.h"
34#include "asan_stats.h"
35#include "asan_thread.h"
36#include "asan_thread_registry.h"
37
38namespace __asan {
39
40#define  REDZONE FLAG_redzone
41static const size_t kMinAllocSize = REDZONE * 2;
42static const size_t kMinMmapSize  = 4UL << 20;  // 4M
43static const uint64_t kMaxAvailableRam = 128ULL << 30;  // 128G
44static const size_t kMaxThreadLocalQuarantine = 1 << 20;  // 1M
45static const size_t kMaxSizeForThreadLocalFreeList = 1 << 17;
46
47// Size classes less than kMallocSizeClassStep are powers of two.
48// All other size classes are multiples of kMallocSizeClassStep.
49static const size_t kMallocSizeClassStepLog = 26;
50static const size_t kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
51
52#if __WORDSIZE == 32
53static const size_t kMaxAllowedMallocSize = 3UL << 30;  // 3G
54#else
55static const size_t kMaxAllowedMallocSize = 8UL << 30;  // 8G
56#endif
57
58static inline bool IsAligned(uintptr_t a, uintptr_t alignment) {
59  return (a & (alignment - 1)) == 0;
60}
61
62static inline size_t Log2(size_t x) {
63  CHECK(IsPowerOfTwo(x));
64  return __builtin_ctzl(x);
65}
66
67static inline size_t RoundUpToPowerOfTwo(size_t size) {
68  CHECK(size);
69  if (IsPowerOfTwo(size)) return size;
70  size_t up = __WORDSIZE - __builtin_clzl(size);
71  CHECK(size < (1ULL << up));
72  CHECK(size > (1ULL << (up - 1)));
73  return 1UL << up;
74}
75
76static inline size_t SizeClassToSize(uint8_t size_class) {
77  CHECK(size_class < kNumberOfSizeClasses);
78  if (size_class <= kMallocSizeClassStepLog) {
79    return 1UL << size_class;
80  } else {
81    return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
82  }
83}
84
85static inline uint8_t SizeToSizeClass(size_t size) {
86  uint8_t res = 0;
87  if (size <= kMallocSizeClassStep) {
88    size_t rounded = RoundUpToPowerOfTwo(size);
89    res = Log2(rounded);
90  } else {
91    res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
92        + kMallocSizeClassStepLog;
93  }
94  CHECK(res < kNumberOfSizeClasses);
95  CHECK(size <= SizeClassToSize(res));
96  return res;
97}
98
99// Given REDZONE bytes, we need to mark first size bytes
100// as addressable and the rest REDZONE-size bytes as unaddressable.
101static void PoisonHeapPartialRightRedzone(uintptr_t mem, size_t size) {
102  CHECK(size <= REDZONE);
103  CHECK(IsAligned(mem, REDZONE));
104  CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
105  CHECK(IsPowerOfTwo(REDZONE));
106  CHECK(REDZONE >= SHADOW_GRANULARITY);
107  PoisonShadowPartialRightRedzone(mem, size, REDZONE,
108                                  kAsanHeapRightRedzoneMagic);
109}
110
111static uint8_t *MmapNewPagesAndPoisonShadow(size_t size) {
112  CHECK(IsAligned(size, kPageSize));
113  uint8_t *res = (uint8_t*)AsanMmapSomewhereOrDie(size, __FUNCTION__);
114  PoisonShadow((uintptr_t)res, size, kAsanHeapLeftRedzoneMagic);
115  if (FLAG_debug) {
116    Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
117  }
118  return res;
119}
120
121// Every chunk of memory allocated by this allocator can be in one of 3 states:
122// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
123// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
124// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
125//
126// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
127// the beginning of a AsanChunk (in which case 'next' contains the address
128// of the AsanChunk).
129//
130// The magic numbers for the enum values are taken randomly.
131enum {
132  CHUNK_AVAILABLE  = 0x573B,
133  CHUNK_ALLOCATED  = 0x3204,
134  CHUNK_QUARANTINE = 0x1978,
135  CHUNK_MEMALIGN   = 0xDC68,
136};
137
138struct ChunkBase {
139  uint16_t   chunk_state;
140  uint8_t    size_class;
141  uint32_t   offset;  // User-visible memory starts at this+offset (beg()).
142  int32_t    alloc_tid;
143  int32_t    free_tid;
144  size_t     used_size;  // Size requested by the user.
145  AsanChunk *next;
146
147  uintptr_t   beg() { return (uintptr_t)this + offset; }
148  size_t Size() { return SizeClassToSize(size_class); }
149  uint8_t SizeClass() { return size_class; }
150};
151
152struct AsanChunk: public ChunkBase {
153  uint32_t *compressed_alloc_stack() {
154    CHECK(REDZONE >= sizeof(ChunkBase));
155    return (uint32_t*)((uintptr_t)this + sizeof(ChunkBase));
156  }
157  uint32_t *compressed_free_stack() {
158    CHECK(REDZONE >= sizeof(ChunkBase));
159    return (uint32_t*)((uintptr_t)this + REDZONE);
160  }
161
162  // The left redzone after the ChunkBase is given to the alloc stack trace.
163  size_t compressed_alloc_stack_size() {
164    return (REDZONE - sizeof(ChunkBase)) / sizeof(uint32_t);
165  }
166  size_t compressed_free_stack_size() {
167    return (REDZONE) / sizeof(uint32_t);
168  }
169
170  bool AddrIsInside(uintptr_t addr, size_t access_size, size_t *offset) {
171    if (addr >= beg() && (addr + access_size) <= (beg() + used_size)) {
172      *offset = addr - beg();
173      return true;
174    }
175    return false;
176  }
177
178  bool AddrIsAtLeft(uintptr_t addr, size_t access_size, size_t *offset) {
179    if (addr < beg()) {
180      *offset = beg() - addr;
181      return true;
182    }
183    return false;
184  }
185
186  bool AddrIsAtRight(uintptr_t addr, size_t access_size, size_t *offset) {
187    if (addr + access_size >= beg() + used_size) {
188      if (addr <= beg() + used_size)
189        *offset = 0;
190      else
191        *offset = addr - (beg() + used_size);
192      return true;
193    }
194    return false;
195  }
196
197  void DescribeAddress(uintptr_t addr, size_t access_size) {
198    size_t offset;
199    Printf("%p is located ", addr);
200    if (AddrIsInside(addr, access_size, &offset)) {
201      Printf("%ld bytes inside of", offset);
202    } else if (AddrIsAtLeft(addr, access_size, &offset)) {
203      Printf("%ld bytes to the left of", offset);
204    } else if (AddrIsAtRight(addr, access_size, &offset)) {
205      Printf("%ld bytes to the right of", offset);
206    } else {
207      Printf(" somewhere around (this is AddressSanitizer bug!)");
208    }
209    Printf(" %lu-byte region [%p,%p)\n",
210           used_size, beg(), beg() + used_size);
211  }
212};
213
214static AsanChunk *PtrToChunk(uintptr_t ptr) {
215  AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
216  if (m->chunk_state == CHUNK_MEMALIGN) {
217    m = m->next;
218  }
219  return m;
220}
221
222
223void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
224  if (last_) {
225    CHECK(first_);
226    CHECK(!last_->next);
227    last_->next = q->first_;
228    last_ = q->last_;
229  } else {
230    CHECK(!first_);
231    last_ = q->last_;
232    first_ = q->first_;
233  }
234  size_ += q->size();
235  q->clear();
236}
237
238void AsanChunkFifoList::Push(AsanChunk *n) {
239  CHECK(n->next == NULL);
240  if (last_) {
241    CHECK(first_);
242    CHECK(!last_->next);
243    last_->next = n;
244    last_ = n;
245  } else {
246    CHECK(!first_);
247    last_ = first_ = n;
248  }
249  size_ += n->Size();
250}
251
252// Interesting performance observation: this function takes up to 15% of overal
253// allocator time. That's because *first_ has been evicted from cache long time
254// ago. Not sure if we can or want to do anything with this.
255AsanChunk *AsanChunkFifoList::Pop() {
256  CHECK(first_);
257  AsanChunk *res = first_;
258  first_ = first_->next;
259  if (first_ == NULL)
260    last_ = NULL;
261  CHECK(size_ >= res->Size());
262  size_ -= res->Size();
263  if (last_) {
264    CHECK(!last_->next);
265  }
266  return res;
267}
268
269// All pages we ever allocated.
270struct PageGroup {
271  uintptr_t beg;
272  uintptr_t end;
273  size_t size_of_chunk;
274  uintptr_t last_chunk;
275  bool InRange(uintptr_t addr) {
276    return addr >= beg && addr < end;
277  }
278};
279
280class MallocInfo {
281 public:
282
283  explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
284
285  AsanChunk *AllocateChunks(uint8_t size_class, size_t n_chunks) {
286    AsanChunk *m = NULL;
287    AsanChunk **fl = &free_lists_[size_class];
288    {
289      ScopedLock lock(&mu_);
290      for (size_t i = 0; i < n_chunks; i++) {
291        if (!(*fl)) {
292          *fl = GetNewChunks(size_class);
293        }
294        AsanChunk *t = *fl;
295        *fl = t->next;
296        t->next = m;
297        CHECK(t->chunk_state == CHUNK_AVAILABLE);
298        m = t;
299      }
300    }
301    return m;
302  }
303
304  void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
305                                       bool eat_free_lists) {
306    CHECK(FLAG_quarantine_size > 0);
307    ScopedLock lock(&mu_);
308    AsanChunkFifoList *q = &x->quarantine_;
309    if (q->size() > 0) {
310      quarantine_.PushList(q);
311      while (quarantine_.size() > FLAG_quarantine_size) {
312        QuarantinePop();
313      }
314    }
315    if (eat_free_lists) {
316      for (size_t size_class = 0; size_class < kNumberOfSizeClasses;
317           size_class++) {
318        AsanChunk *m = x->free_lists_[size_class];
319        while (m) {
320          AsanChunk *t = m->next;
321          m->next = free_lists_[size_class];
322          free_lists_[size_class] = m;
323          m = t;
324        }
325        x->free_lists_[size_class] = 0;
326      }
327    }
328  }
329
330  void BypassThreadLocalQuarantine(AsanChunk *chunk) {
331    ScopedLock lock(&mu_);
332    quarantine_.Push(chunk);
333  }
334
335  AsanChunk *FindMallocedOrFreed(uintptr_t addr, size_t access_size) {
336    ScopedLock lock(&mu_);
337    return FindChunkByAddr(addr);
338  }
339
340  // TODO(glider): AllocationSize() may become very slow if the size of
341  // page_groups_ grows. This can be fixed by increasing kMinMmapSize,
342  // but a better solution is to speed up the search somehow.
343  size_t AllocationSize(uintptr_t ptr) {
344    ScopedLock lock(&mu_);
345
346    // first, check if this is our memory
347    PageGroup *g = FindPageGroupUnlocked(ptr);
348    if (!g) return 0;
349    AsanChunk *m = PtrToChunk(ptr);
350    if (m->chunk_state == CHUNK_ALLOCATED) {
351      return m->used_size;
352    } else {
353      return 0;
354    }
355  }
356
357  void ForceLock() {
358    mu_.Lock();
359  }
360
361  void ForceUnlock() {
362    mu_.Unlock();
363  }
364
365  void PrintStatus() {
366    ScopedLock lock(&mu_);
367    size_t malloced = 0;
368
369    Printf(" MallocInfo: in quarantine: %ld malloced: %ld; ",
370           quarantine_.size() >> 20, malloced >> 20);
371    for (size_t j = 1; j < kNumberOfSizeClasses; j++) {
372      AsanChunk *i = free_lists_[j];
373      if (!i) continue;
374      size_t t = 0;
375      for (; i; i = i->next) {
376        t += i->Size();
377      }
378      Printf("%ld:%ld ", j, t >> 20);
379    }
380    Printf("\n");
381  }
382
383  PageGroup *FindPageGroup(uintptr_t addr) {
384    ScopedLock lock(&mu_);
385    return FindPageGroupUnlocked(addr);
386  }
387
388 private:
389  PageGroup *FindPageGroupUnlocked(uintptr_t addr) {
390    for (int i = 0; i < n_page_groups_; i++) {
391      PageGroup *g = page_groups_[i];
392      if (g->InRange(addr)) {
393        return g;
394      }
395    }
396    return NULL;
397  }
398
399  // We have an address between two chunks, and we want to report just one.
400  AsanChunk *ChooseChunk(uintptr_t addr,
401                         AsanChunk *left_chunk, AsanChunk *right_chunk) {
402    // Prefer an allocated chunk or a chunk from quarantine.
403    if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
404        right_chunk->chunk_state != CHUNK_AVAILABLE)
405      return right_chunk;
406    if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
407        left_chunk->chunk_state != CHUNK_AVAILABLE)
408      return left_chunk;
409    // Choose based on offset.
410    size_t l_offset = 0, r_offset = 0;
411    CHECK(left_chunk->AddrIsAtRight(addr, 1, &l_offset));
412    CHECK(right_chunk->AddrIsAtLeft(addr, 1, &r_offset));
413    if (l_offset < r_offset)
414      return left_chunk;
415    return right_chunk;
416  }
417
418  AsanChunk *FindChunkByAddr(uintptr_t addr) {
419    PageGroup *g = FindPageGroupUnlocked(addr);
420    if (!g) return 0;
421    CHECK(g->size_of_chunk);
422    uintptr_t offset_from_beg = addr - g->beg;
423    uintptr_t this_chunk_addr = g->beg +
424        (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
425    CHECK(g->InRange(this_chunk_addr));
426    AsanChunk *m = (AsanChunk*)this_chunk_addr;
427    CHECK(m->chunk_state == CHUNK_ALLOCATED ||
428          m->chunk_state == CHUNK_AVAILABLE ||
429          m->chunk_state == CHUNK_QUARANTINE);
430    size_t offset = 0;
431    if (m->AddrIsInside(addr, 1, &offset))
432      return m;
433
434    if (m->AddrIsAtRight(addr, 1, &offset)) {
435      if (this_chunk_addr == g->last_chunk)  // rightmost chunk
436        return m;
437      uintptr_t right_chunk_addr = this_chunk_addr + g->size_of_chunk;
438      CHECK(g->InRange(right_chunk_addr));
439      return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
440    } else {
441      CHECK(m->AddrIsAtLeft(addr, 1, &offset));
442      if (this_chunk_addr == g->beg)  // leftmost chunk
443        return m;
444      uintptr_t left_chunk_addr = this_chunk_addr - g->size_of_chunk;
445      CHECK(g->InRange(left_chunk_addr));
446      return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
447    }
448  }
449
450  void QuarantinePop() {
451    CHECK(quarantine_.size() > 0);
452    AsanChunk *m = quarantine_.Pop();
453    CHECK(m);
454    // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
455
456    CHECK(m->chunk_state == CHUNK_QUARANTINE);
457    m->chunk_state = CHUNK_AVAILABLE;
458    CHECK(m->alloc_tid >= 0);
459    CHECK(m->free_tid >= 0);
460
461    size_t size_class = m->SizeClass();
462    m->next = free_lists_[size_class];
463    free_lists_[size_class] = m;
464
465    // Statistics.
466    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
467    thread_stats.real_frees++;
468    thread_stats.really_freed += m->used_size;
469    thread_stats.really_freed_redzones += m->Size() - m->used_size;
470    thread_stats.really_freed_by_size[m->SizeClass()]++;
471  }
472
473  // Get a list of newly allocated chunks.
474  AsanChunk *GetNewChunks(uint8_t size_class) {
475    size_t size = SizeClassToSize(size_class);
476    CHECK(IsPowerOfTwo(kMinMmapSize));
477    CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
478    size_t mmap_size = Max(size, kMinMmapSize);
479    size_t n_chunks = mmap_size / size;
480    CHECK(n_chunks * size == mmap_size);
481    if (size < kPageSize) {
482      // Size is small, just poison the last chunk.
483      n_chunks--;
484    } else {
485      // Size is large, allocate an extra page at right and poison it.
486      mmap_size += kPageSize;
487    }
488    CHECK(n_chunks > 0);
489    uint8_t *mem = MmapNewPagesAndPoisonShadow(mmap_size);
490
491    // Statistics.
492    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
493    thread_stats.mmaps++;
494    thread_stats.mmaped += mmap_size;
495    thread_stats.mmaped_by_size[size_class] += n_chunks;
496
497    AsanChunk *res = NULL;
498    for (size_t i = 0; i < n_chunks; i++) {
499      AsanChunk *m = (AsanChunk*)(mem + i * size);
500      m->chunk_state = CHUNK_AVAILABLE;
501      m->size_class = size_class;
502      m->next = res;
503      res = m;
504    }
505    PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
506    // This memory is already poisoned, no need to poison it again.
507    pg->beg = (uintptr_t)mem;
508    pg->end = pg->beg + mmap_size;
509    pg->size_of_chunk = size;
510    pg->last_chunk = (uintptr_t)(mem + size * (n_chunks - 1));
511    int page_group_idx = AtomicInc(&n_page_groups_) - 1;
512    CHECK(page_group_idx < (int)ASAN_ARRAY_SIZE(page_groups_));
513    page_groups_[page_group_idx] = pg;
514    return res;
515  }
516
517  AsanChunk *free_lists_[kNumberOfSizeClasses];
518  AsanChunkFifoList quarantine_;
519  AsanLock mu_;
520
521  PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
522  int n_page_groups_;  // atomic
523};
524
525static MallocInfo malloc_info(LINKER_INITIALIZED);
526
527void AsanThreadLocalMallocStorage::CommitBack() {
528  malloc_info.SwallowThreadLocalMallocStorage(this, true);
529}
530
531static void Describe(uintptr_t addr, size_t access_size) {
532  AsanChunk *m = malloc_info.FindMallocedOrFreed(addr, access_size);
533  if (!m) return;
534  m->DescribeAddress(addr, access_size);
535  CHECK(m->alloc_tid >= 0);
536  AsanThreadSummary *alloc_thread =
537      asanThreadRegistry().FindByTid(m->alloc_tid);
538  AsanStackTrace alloc_stack;
539  AsanStackTrace::UncompressStack(&alloc_stack, m->compressed_alloc_stack(),
540                                  m->compressed_alloc_stack_size());
541  AsanThread *t = asanThreadRegistry().GetCurrent();
542  CHECK(t);
543  if (m->free_tid >= 0) {
544    AsanThreadSummary *free_thread =
545        asanThreadRegistry().FindByTid(m->free_tid);
546    Printf("freed by thread T%d here:\n", free_thread->tid());
547    AsanStackTrace free_stack;
548    AsanStackTrace::UncompressStack(&free_stack, m->compressed_free_stack(),
549                                    m->compressed_free_stack_size());
550    free_stack.PrintStack();
551    Printf("previously allocated by thread T%d here:\n",
552           alloc_thread->tid());
553
554    alloc_stack.PrintStack();
555    t->summary()->Announce();
556    free_thread->Announce();
557    alloc_thread->Announce();
558  } else {
559    Printf("allocated by thread T%d here:\n", alloc_thread->tid());
560    alloc_stack.PrintStack();
561    t->summary()->Announce();
562    alloc_thread->Announce();
563  }
564}
565
566static uint8_t *Allocate(size_t alignment, size_t size, AsanStackTrace *stack) {
567  __asan_init();
568  CHECK(stack);
569  if (size == 0) {
570    size = 1;  // TODO(kcc): do something smarter
571  }
572  CHECK(IsPowerOfTwo(alignment));
573  size_t rounded_size = RoundUpTo(size, REDZONE);
574  size_t needed_size = rounded_size + REDZONE;
575  if (alignment > REDZONE) {
576    needed_size += alignment;
577  }
578  CHECK(IsAligned(needed_size, REDZONE));
579  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
580    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", size);
581    return 0;
582  }
583
584  uint8_t size_class = SizeToSizeClass(needed_size);
585  size_t size_to_allocate = SizeClassToSize(size_class);
586  CHECK(size_to_allocate >= kMinAllocSize);
587  CHECK(size_to_allocate >= needed_size);
588  CHECK(IsAligned(size_to_allocate, REDZONE));
589
590  if (FLAG_v >= 2) {
591    Printf("Allocate align: %ld size: %ld class: %d real: %ld\n",
592         alignment, size, size_class, size_to_allocate);
593  }
594
595  AsanThread *t = asanThreadRegistry().GetCurrent();
596  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
597  // Statistics
598  thread_stats.mallocs++;
599  thread_stats.malloced += size;
600  thread_stats.malloced_redzones += size_to_allocate - size;
601  thread_stats.malloced_by_size[size_class]++;
602
603  AsanChunk *m = NULL;
604  if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
605    // get directly from global storage.
606    m = malloc_info.AllocateChunks(size_class, 1);
607    thread_stats.malloc_large++;
608  } else {
609    // get from the thread-local storage.
610    AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
611    if (!*fl) {
612      size_t n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
613      *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
614      thread_stats.malloc_small_slow++;
615    }
616    m = *fl;
617    *fl = (*fl)->next;
618  }
619  CHECK(m);
620  CHECK(m->chunk_state == CHUNK_AVAILABLE);
621  m->chunk_state = CHUNK_ALLOCATED;
622  m->next = NULL;
623  CHECK(m->Size() == size_to_allocate);
624  uintptr_t addr = (uintptr_t)m + REDZONE;
625  CHECK(addr == (uintptr_t)m->compressed_free_stack());
626
627  if (alignment > REDZONE && (addr & (alignment - 1))) {
628    addr = RoundUpTo(addr, alignment);
629    CHECK((addr & (alignment - 1)) == 0);
630    AsanChunk *p = (AsanChunk*)(addr - REDZONE);
631    p->chunk_state = CHUNK_MEMALIGN;
632    p->next = m;
633  }
634  CHECK(m == PtrToChunk(addr));
635  m->used_size = size;
636  m->offset = addr - (uintptr_t)m;
637  CHECK(m->beg() == addr);
638  m->alloc_tid = t ? t->tid() : 0;
639  m->free_tid   = AsanThread::kInvalidTid;
640  AsanStackTrace::CompressStack(stack, m->compressed_alloc_stack(),
641                                m->compressed_alloc_stack_size());
642  PoisonShadow(addr, rounded_size, 0);
643  if (size < rounded_size) {
644    PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
645                                  size & (REDZONE - 1));
646  }
647  if (size <= FLAG_max_malloc_fill_size) {
648    real_memset((void*)addr, 0, rounded_size);
649  }
650  return (uint8_t*)addr;
651}
652
653static void Deallocate(uint8_t *ptr, AsanStackTrace *stack) {
654  if (!ptr) return;
655  CHECK(stack);
656
657  if (FLAG_debug) {
658    CHECK(malloc_info.FindPageGroup((uintptr_t)ptr));
659  }
660
661  // Printf("Deallocate %p\n", ptr);
662  AsanChunk *m = PtrToChunk((uintptr_t)ptr);
663  if (m->chunk_state == CHUNK_QUARANTINE) {
664    Report("ERROR: AddressSanitizer attempting double-free on %p:\n", ptr);
665    stack->PrintStack();
666    m->DescribeAddress((uintptr_t)ptr, 1);
667    ShowStatsAndAbort();
668  } else if (m->chunk_state != CHUNK_ALLOCATED) {
669    Report("ERROR: AddressSanitizer attempting free on address which was not"
670           " malloc()-ed: %p\n", ptr);
671    stack->PrintStack();
672    ShowStatsAndAbort();
673  }
674  CHECK(m->chunk_state == CHUNK_ALLOCATED);
675  CHECK(m->free_tid == AsanThread::kInvalidTid);
676  CHECK(m->alloc_tid >= 0);
677  AsanThread *t = asanThreadRegistry().GetCurrent();
678  m->free_tid = t ? t->tid() : 0;
679  AsanStackTrace::CompressStack(stack, m->compressed_free_stack(),
680                                m->compressed_free_stack_size());
681  size_t rounded_size = RoundUpTo(m->used_size, REDZONE);
682  PoisonShadow((uintptr_t)ptr, rounded_size, kAsanHeapFreeMagic);
683
684  // Statistics.
685  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
686  thread_stats.frees++;
687  thread_stats.freed += m->used_size;
688  thread_stats.freed_by_size[m->SizeClass()]++;
689
690  m->chunk_state = CHUNK_QUARANTINE;
691  if (t) {
692    AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
693    CHECK(!m->next);
694    ms->quarantine_.Push(m);
695
696    if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
697      malloc_info.SwallowThreadLocalMallocStorage(ms, false);
698    }
699  } else {
700    CHECK(!m->next);
701    malloc_info.BypassThreadLocalQuarantine(m);
702  }
703}
704
705static uint8_t *Reallocate(uint8_t *old_ptr, size_t new_size,
706                           AsanStackTrace *stack) {
707  CHECK(old_ptr && new_size);
708
709  // Statistics.
710  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
711  thread_stats.reallocs++;
712  thread_stats.realloced += new_size;
713
714  AsanChunk *m = PtrToChunk((uintptr_t)old_ptr);
715  CHECK(m->chunk_state == CHUNK_ALLOCATED);
716  size_t old_size = m->used_size;
717  size_t memcpy_size = Min(new_size, old_size);
718  uint8_t *new_ptr = Allocate(0, new_size, stack);
719  if (new_ptr) {
720    real_memcpy(new_ptr, old_ptr, memcpy_size);
721    Deallocate(old_ptr, stack);
722  }
723  return new_ptr;
724}
725
726}  // namespace __asan
727
728// Malloc hooks declaration.
729// ASAN_NEW_HOOK(ptr, size) is called immediately after
730//   allocation of "size" bytes, which returned "ptr".
731// ASAN_DELETE_HOOK(ptr) is called immediately before
732//   deallocation of "ptr".
733// If ASAN_NEW_HOOK or ASAN_DELETE_HOOK is defined, user
734// program must provide implementation of this hook.
735// If macro is undefined, the hook is no-op.
736#ifdef ASAN_NEW_HOOK
737extern "C" void ASAN_NEW_HOOK(void *ptr, size_t size);
738#else
739static inline void ASAN_NEW_HOOK(void *ptr, size_t size) { }
740#endif
741
742#ifdef ASAN_DELETE_HOOK
743extern "C" void ASAN_DELETE_HOOK(void *ptr);
744#else
745static inline void ASAN_DELETE_HOOK(void *ptr) { }
746#endif
747
748namespace __asan {
749
750void *asan_memalign(size_t alignment, size_t size, AsanStackTrace *stack) {
751  void *ptr = (void*)Allocate(alignment, size, stack);
752  ASAN_NEW_HOOK(ptr, size);
753  return ptr;
754}
755
756void asan_free(void *ptr, AsanStackTrace *stack) {
757  ASAN_DELETE_HOOK(ptr);
758  Deallocate((uint8_t*)ptr, stack);
759}
760
761void *asan_malloc(size_t size, AsanStackTrace *stack) {
762  void *ptr = (void*)Allocate(0, size, stack);
763  ASAN_NEW_HOOK(ptr, size);
764  return ptr;
765}
766
767void *asan_calloc(size_t nmemb, size_t size, AsanStackTrace *stack) {
768  void *ptr = (void*)Allocate(0, nmemb * size, stack);
769  if (ptr)
770    real_memset(ptr, 0, nmemb * size);
771  ASAN_NEW_HOOK(ptr, nmemb * size);
772  return ptr;
773}
774
775void *asan_realloc(void *p, size_t size, AsanStackTrace *stack) {
776  if (p == NULL) {
777    void *ptr = (void*)Allocate(0, size, stack);
778    ASAN_NEW_HOOK(ptr, size);
779    return ptr;
780  } else if (size == 0) {
781    ASAN_DELETE_HOOK(p);
782    Deallocate((uint8_t*)p, stack);
783    return NULL;
784  }
785  return Reallocate((uint8_t*)p, size, stack);
786}
787
788void *asan_valloc(size_t size, AsanStackTrace *stack) {
789  void *ptr = (void*)Allocate(kPageSize, size, stack);
790  ASAN_NEW_HOOK(ptr, size);
791  return ptr;
792}
793
794void *asan_pvalloc(size_t size, AsanStackTrace *stack) {
795  size = RoundUpTo(size, kPageSize);
796  if (size == 0) {
797    // pvalloc(0) should allocate one page.
798    size = kPageSize;
799  }
800  void *ptr = (void*)Allocate(kPageSize, size, stack);
801  ASAN_NEW_HOOK(ptr, size);
802  return ptr;
803}
804
805int asan_posix_memalign(void **memptr, size_t alignment, size_t size,
806                          AsanStackTrace *stack) {
807  void *ptr = Allocate(alignment, size, stack);
808  CHECK(IsAligned((uintptr_t)ptr, alignment));
809  ASAN_NEW_HOOK(ptr, size);
810  *memptr = ptr;
811  return 0;
812}
813
814size_t __asan_mz_size(const void *ptr) {
815  return malloc_info.AllocationSize((uintptr_t)ptr);
816}
817
818void DescribeHeapAddress(uintptr_t addr, uintptr_t access_size) {
819  Describe(addr, access_size);
820}
821
822void __asan_mz_force_lock() {
823  malloc_info.ForceLock();
824}
825
826void __asan_mz_force_unlock() {
827  malloc_info.ForceUnlock();
828}
829
830// ---------------------- Fake stack-------------------- {{{1
831FakeStack::FakeStack() {
832  CHECK(real_memset);
833  real_memset(this, 0, sizeof(*this));
834}
835
836bool FakeStack::AddrIsInSizeClass(uintptr_t addr, size_t size_class) {
837  uintptr_t mem = allocated_size_classes_[size_class];
838  uintptr_t size = ClassMmapSize(size_class);
839  bool res = mem && addr >= mem && addr < mem + size;
840  return res;
841}
842
843uintptr_t FakeStack::AddrIsInFakeStack(uintptr_t addr) {
844  for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
845    if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
846  }
847  return 0;
848}
849
850// We may want to compute this during compilation.
851inline size_t FakeStack::ComputeSizeClass(size_t alloc_size) {
852  size_t rounded_size = RoundUpToPowerOfTwo(alloc_size);
853  size_t log = Log2(rounded_size);
854  CHECK(alloc_size <= (1UL << log));
855  if (!(alloc_size > (1UL << (log-1)))) {
856    Printf("alloc_size %ld log %ld\n", alloc_size, log);
857  }
858  CHECK(alloc_size > (1UL << (log-1)));
859  size_t res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
860  CHECK(res < kNumberOfSizeClasses);
861  CHECK(ClassSize(res) >= rounded_size);
862  return res;
863}
864
865void FakeFrameFifo::FifoPush(FakeFrame *node) {
866  CHECK(node);
867  node->next = 0;
868  if (first_ == 0 && last_ == 0) {
869    first_ = last_ = node;
870  } else {
871    CHECK(first_);
872    CHECK(last_);
873    last_->next = node;
874    last_ = node;
875  }
876}
877
878FakeFrame *FakeFrameFifo::FifoPop() {
879  CHECK(first_ && last_ && "Exhausted fake stack");
880  FakeFrame *res = 0;
881  if (first_ == last_) {
882    res = first_;
883    first_ = last_ = 0;
884  } else {
885    res = first_;
886    first_ = first_->next;
887  }
888  return res;
889}
890
891void FakeStack::Init(size_t stack_size) {
892  stack_size_ = stack_size;
893  alive_ = true;
894}
895
896void FakeStack::Cleanup() {
897  alive_ = false;
898  for (size_t i = 0; i < kNumberOfSizeClasses; i++) {
899    uintptr_t mem = allocated_size_classes_[i];
900    if (mem) {
901      PoisonShadow(mem, ClassMmapSize(i), 0);
902      allocated_size_classes_[i] = 0;
903      AsanUnmapOrDie((void*)mem, ClassMmapSize(i));
904    }
905  }
906}
907
908size_t FakeStack::ClassMmapSize(size_t size_class) {
909  return RoundUpToPowerOfTwo(stack_size_);
910}
911
912void FakeStack::AllocateOneSizeClass(size_t size_class) {
913  CHECK(ClassMmapSize(size_class) >= kPageSize);
914  uintptr_t new_mem = (uintptr_t)AsanMmapSomewhereOrDie(
915      ClassMmapSize(size_class), __FUNCTION__);
916  // Printf("T%d new_mem[%ld]: %p-%p mmap %ld\n",
917  //       asanThreadRegistry().GetCurrent()->tid(),
918  //       size_class, new_mem, new_mem + ClassMmapSize(size_class),
919  //       ClassMmapSize(size_class));
920  size_t i;
921  for (i = 0; i < ClassMmapSize(size_class);
922       i += ClassSize(size_class)) {
923    size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
924  }
925  CHECK(i == ClassMmapSize(size_class));
926  allocated_size_classes_[size_class] = new_mem;
927}
928
929uintptr_t FakeStack::AllocateStack(size_t size, size_t real_stack) {
930  if (!alive_) return real_stack;
931  CHECK(size <= kMaxStackMallocSize && size > 1);
932  size_t size_class = ComputeSizeClass(size);
933  if (!allocated_size_classes_[size_class]) {
934    AllocateOneSizeClass(size_class);
935  }
936  FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
937  CHECK(fake_frame);
938  fake_frame->size_minus_one = size - 1;
939  fake_frame->real_stack = real_stack;
940  while (FakeFrame *top = call_stack_.top()) {
941    if (top->real_stack > real_stack) break;
942    call_stack_.LifoPop();
943    DeallocateFrame(top);
944  }
945  call_stack_.LifoPush(fake_frame);
946  uintptr_t ptr = (uintptr_t)fake_frame;
947  PoisonShadow(ptr, size, 0);
948  return ptr;
949}
950
951void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
952  CHECK(alive_);
953  size_t size = fake_frame->size_minus_one + 1;
954  size_t size_class = ComputeSizeClass(size);
955  CHECK(allocated_size_classes_[size_class]);
956  uintptr_t ptr = (uintptr_t)fake_frame;
957  CHECK(AddrIsInSizeClass(ptr, size_class));
958  CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
959  size_classes_[size_class].FifoPush(fake_frame);
960}
961
962void FakeStack::OnFree(size_t ptr, size_t size, size_t real_stack) {
963  FakeFrame *fake_frame = (FakeFrame*)ptr;
964  CHECK(fake_frame->magic = kRetiredStackFrameMagic);
965  CHECK(fake_frame->descr != 0);
966  CHECK(fake_frame->size_minus_one == size - 1);
967  PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
968}
969
970}  // namespace __asan
971
972// ---------------------- Interface ---------------- {{{1
973using namespace __asan;  // NOLINT
974
975size_t __asan_stack_malloc(size_t size, size_t real_stack) {
976  if (!FLAG_use_fake_stack) return real_stack;
977  AsanThread *t = asanThreadRegistry().GetCurrent();
978  if (!t) {
979    // TSD is gone, use the real stack.
980    return real_stack;
981  }
982  size_t ptr = t->fake_stack().AllocateStack(size, real_stack);
983  // Printf("__asan_stack_malloc %p %ld %p\n", ptr, size, real_stack);
984  return ptr;
985}
986
987void __asan_stack_free(size_t ptr, size_t size, size_t real_stack) {
988  if (!FLAG_use_fake_stack) return;
989  if (ptr != real_stack) {
990    FakeStack::OnFree(ptr, size, real_stack);
991  }
992}
993
994// ASan allocator doesn't reserve extra bytes, so normally we would
995// just return "size".
996size_t __asan_get_estimated_allocated_size(size_t size) {
997  if (size == 0) return 1;
998  return Min(size, kMaxAllowedMallocSize);
999}
1000
1001bool __asan_get_ownership(const void *p) {
1002  return (p == NULL) ||
1003      (malloc_info.AllocationSize((uintptr_t)p) > 0);
1004}
1005
1006size_t __asan_get_allocated_size(const void *p) {
1007  if (p == NULL) return 0;
1008  size_t allocated_size = malloc_info.AllocationSize((uintptr_t)p);
1009  // Die if p is not malloced or if it is already freed.
1010  if (allocated_size == 0) {
1011    Printf("__asan_get_allocated_size failed, ptr=%p is not owned\n", p);
1012    PRINT_CURRENT_STACK();
1013    ShowStatsAndAbort();
1014  }
1015  return allocated_size;
1016}
1017