asan_allocator2.cc revision fb98d3dc1e3a5eb9130fb7f16b8a50b616498139
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19
20#include "asan_mapping.h"
21#include "asan_poisoning.h"
22#include "asan_report.h"
23#include "asan_thread.h"
24#include "sanitizer_common/sanitizer_allocator.h"
25#include "sanitizer_common/sanitizer_flags.h"
26#include "sanitizer_common/sanitizer_internal_defs.h"
27#include "sanitizer_common/sanitizer_list.h"
28#include "sanitizer_common/sanitizer_stackdepot.h"
29#include "sanitizer_common/sanitizer_quarantine.h"
30
31namespace __asan {
32
33struct AsanMapUnmapCallback {
34  void OnMap(uptr p, uptr size) const {
35    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
36    // Statistics.
37    AsanStats &thread_stats = GetCurrentThreadStats();
38    thread_stats.mmaps++;
39    thread_stats.mmaped += size;
40  }
41  void OnUnmap(uptr p, uptr size) const {
42    PoisonShadow(p, size, 0);
43    // We are about to unmap a chunk of user memory.
44    // Mark the corresponding shadow memory as not needed.
45    // Since asan's mapping is compacting, the shadow chunk may be
46    // not page-aligned, so we only flush the page-aligned portion.
47    uptr page_size = GetPageSizeCached();
48    uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
49    uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
50    FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
51    // Statistics.
52    AsanStats &thread_stats = GetCurrentThreadStats();
53    thread_stats.munmaps++;
54    thread_stats.munmaped += size;
55  }
56};
57
58#if SANITIZER_WORDSIZE == 64
59#if defined(__powerpc64__)
60const uptr kAllocatorSpace =  0xa0000000000ULL;
61const uptr kAllocatorSize  =  0x20000000000ULL;  // 2T.
62#else
63const uptr kAllocatorSpace = 0x600000000000ULL;
64const uptr kAllocatorSize  =  0x40000000000ULL;  // 4T.
65#endif
66typedef DefaultSizeClassMap SizeClassMap;
67typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
68    SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
69#elif SANITIZER_WORDSIZE == 32
70static const u64 kAddressSpaceSize = 1ULL << 32;
71typedef CompactSizeClassMap SizeClassMap;
72typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
73  SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
74#endif
75
76typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
77typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
78typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
79    SecondaryAllocator> Allocator;
80
81// We can not use THREADLOCAL because it is not supported on some of the
82// platforms we care about (OSX 10.6, Android).
83// static THREADLOCAL AllocatorCache cache;
84AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
85  CHECK(ms);
86  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
87  return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
88}
89
90static Allocator allocator;
91
92static const uptr kMaxAllowedMallocSize =
93  FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
94
95static const uptr kMaxThreadLocalQuarantine =
96  FIRST_32_SECOND_64(1 << 18, 1 << 20);
97
98// Every chunk of memory allocated by this allocator can be in one of 3 states:
99// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
100// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
101// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
102enum {
103  CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
104  CHUNK_ALLOCATED  = 2,
105  CHUNK_QUARANTINE = 3
106};
107
108// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
109// We use adaptive redzones: for larger allocation larger redzones are used.
110static u32 RZLog2Size(u32 rz_log) {
111  CHECK_LT(rz_log, 8);
112  return 16 << rz_log;
113}
114
115static u32 RZSize2Log(u32 rz_size) {
116  CHECK_GE(rz_size, 16);
117  CHECK_LE(rz_size, 2048);
118  CHECK(IsPowerOfTwo(rz_size));
119  u32 res = Log2(rz_size) - 4;
120  CHECK_EQ(rz_size, RZLog2Size(res));
121  return res;
122}
123
124static uptr ComputeRZLog(uptr user_requested_size) {
125  u32 rz_log =
126    user_requested_size <= 64        - 16   ? 0 :
127    user_requested_size <= 128       - 32   ? 1 :
128    user_requested_size <= 512       - 64   ? 2 :
129    user_requested_size <= 4096      - 128  ? 3 :
130    user_requested_size <= (1 << 14) - 256  ? 4 :
131    user_requested_size <= (1 << 15) - 512  ? 5 :
132    user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
133  return Max(rz_log, RZSize2Log(flags()->redzone));
134}
135
136// The memory chunk allocated from the underlying allocator looks like this:
137// L L L L L L H H U U U U U U R R
138//   L -- left redzone words (0 or more bytes)
139//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
140//   U -- user memory.
141//   R -- right redzone (0 or more bytes)
142// ChunkBase consists of ChunkHeader and other bytes that overlap with user
143// memory.
144
145// If a memory chunk is allocated by memalign and we had to increase the
146// allocation size to achieve the proper alignment, then we store this magic
147// value in the first uptr word of the memory block and store the address of
148// ChunkBase in the next uptr.
149// M B ? ? ? L L L L L L  H H U U U U U U
150//   M -- magic value kMemalignMagic
151//   B -- address of ChunkHeader pointing to the first 'H'
152static const uptr kMemalignMagic = 0xCC6E96B9;
153
154struct ChunkHeader {
155  // 1-st 8 bytes.
156  u32 chunk_state       : 8;  // Must be first.
157  u32 alloc_tid         : 24;
158
159  u32 free_tid          : 24;
160  u32 from_memalign     : 1;
161  u32 alloc_type        : 2;
162  u32 rz_log            : 3;
163  // 2-nd 8 bytes
164  // This field is used for small sizes. For large sizes it is equal to
165  // SizeClassMap::kMaxSize and the actual size is stored in the
166  // SecondaryAllocator's metadata.
167  u32 user_requested_size;
168  u32 alloc_context_id;
169};
170
171struct ChunkBase : ChunkHeader {
172  // Header2, intersects with user memory.
173  AsanChunk *next;
174  u32 free_context_id;
175};
176
177static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
178static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
179COMPILER_CHECK(kChunkHeaderSize == 16);
180COMPILER_CHECK(kChunkHeader2Size <= 16);
181
182struct AsanChunk: ChunkBase {
183  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
184  uptr UsedSize() {
185    if (user_requested_size != SizeClassMap::kMaxSize)
186      return user_requested_size;
187    return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
188  }
189  void *AllocBeg() {
190    if (from_memalign)
191      return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
192    return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
193  }
194  // We store the alloc/free stack traces in the chunk itself.
195  u32 *AllocStackBeg() {
196    return (u32*)(Beg() - RZLog2Size(rz_log));
197  }
198  uptr AllocStackSize() {
199    CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
200    return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
201  }
202  u32 *FreeStackBeg() {
203    return (u32*)(Beg() + kChunkHeader2Size);
204  }
205  uptr FreeStackSize() {
206    if (user_requested_size < kChunkHeader2Size) return 0;
207    uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
208    return (available - kChunkHeader2Size) / sizeof(u32);
209  }
210};
211
212uptr AsanChunkView::Beg() { return chunk_->Beg(); }
213uptr AsanChunkView::End() { return Beg() + UsedSize(); }
214uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
215uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
216uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
217
218static void GetStackTraceFromId(u32 id, StackTrace *stack) {
219  CHECK(id);
220  uptr size = 0;
221  const uptr *trace = StackDepotGet(id, &size);
222  CHECK_LT(size, kStackTraceMax);
223  internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
224  stack->size = size;
225}
226
227void AsanChunkView::GetAllocStack(StackTrace *stack) {
228  if (flags()->use_stack_depot)
229    GetStackTraceFromId(chunk_->alloc_context_id, stack);
230  else
231    StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
232                                chunk_->AllocStackSize());
233}
234
235void AsanChunkView::GetFreeStack(StackTrace *stack) {
236  if (flags()->use_stack_depot)
237    GetStackTraceFromId(chunk_->free_context_id, stack);
238  else
239    StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
240                                chunk_->FreeStackSize());
241}
242
243struct QuarantineCallback;
244typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
245typedef AsanQuarantine::Cache QuarantineCache;
246static AsanQuarantine quarantine(LINKER_INITIALIZED);
247static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
248static AllocatorCache fallback_allocator_cache;
249static SpinMutex fallback_mutex;
250
251QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
252  CHECK(ms);
253  CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
254  return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
255}
256
257struct QuarantineCallback {
258  explicit QuarantineCallback(AllocatorCache *cache)
259      : cache_(cache) {
260  }
261
262  void Recycle(AsanChunk *m) {
263    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
264    m->chunk_state = CHUNK_AVAILABLE;
265    CHECK_NE(m->alloc_tid, kInvalidTid);
266    CHECK_NE(m->free_tid, kInvalidTid);
267    PoisonShadow(m->Beg(),
268                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
269                 kAsanHeapLeftRedzoneMagic);
270    void *p = reinterpret_cast<void *>(m->AllocBeg());
271    if (m->from_memalign) {
272      uptr *memalign_magic = reinterpret_cast<uptr *>(p);
273      CHECK_EQ(memalign_magic[0], kMemalignMagic);
274      CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
275    }
276
277    // Statistics.
278    AsanStats &thread_stats = GetCurrentThreadStats();
279    thread_stats.real_frees++;
280    thread_stats.really_freed += m->UsedSize();
281
282    allocator.Deallocate(cache_, p);
283  }
284
285  void *Allocate(uptr size) {
286    return allocator.Allocate(cache_, size, 1, false);
287  }
288
289  void Deallocate(void *p) {
290    allocator.Deallocate(cache_, p);
291  }
292
293  AllocatorCache *cache_;
294};
295
296void InitializeAllocator() {
297  allocator.Init();
298  quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
299}
300
301static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
302                      AllocType alloc_type, bool can_fill) {
303  if (!asan_inited)
304    __asan_init();
305  Flags &fl = *flags();
306  CHECK(stack);
307  const uptr min_alignment = SHADOW_GRANULARITY;
308  if (alignment < min_alignment)
309    alignment = min_alignment;
310  if (size == 0) {
311    // We'd be happy to avoid allocating memory for zero-size requests, but
312    // some programs/tests depend on this behavior and assume that malloc would
313    // not return NULL even for zero-size allocations. Moreover, it looks like
314    // operator new should never return NULL, and results of consecutive "new"
315    // calls must be different even if the allocated size is zero.
316    size = 1;
317  }
318  CHECK(IsPowerOfTwo(alignment));
319  uptr rz_log = ComputeRZLog(size);
320  uptr rz_size = RZLog2Size(rz_log);
321  uptr rounded_size = RoundUpTo(size, alignment);
322  if (rounded_size < kChunkHeader2Size)
323    rounded_size = kChunkHeader2Size;
324  uptr needed_size = rounded_size + rz_size;
325  if (alignment > min_alignment)
326    needed_size += alignment;
327  bool using_primary_allocator = true;
328  // If we are allocating from the secondary allocator, there will be no
329  // automatic right redzone, so add the right redzone manually.
330  if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
331    needed_size += rz_size;
332    using_primary_allocator = false;
333  }
334  CHECK(IsAligned(needed_size, min_alignment));
335  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
336    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
337           (void*)size);
338    return 0;
339  }
340
341  AsanThread *t = GetCurrentThread();
342  void *allocated;
343  if (t) {
344    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
345    allocated = allocator.Allocate(cache, needed_size, 8, false);
346  } else {
347    SpinMutexLock l(&fallback_mutex);
348    AllocatorCache *cache = &fallback_allocator_cache;
349    allocated = allocator.Allocate(cache, needed_size, 8, false);
350  }
351  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
352  // Clear the first allocated word (an old kMemalignMagic may still be there).
353  reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
354  uptr alloc_end = alloc_beg + needed_size;
355  uptr beg_plus_redzone = alloc_beg + rz_size;
356  uptr user_beg = beg_plus_redzone;
357  if (!IsAligned(user_beg, alignment))
358    user_beg = RoundUpTo(user_beg, alignment);
359  uptr user_end = user_beg + size;
360  CHECK_LE(user_end, alloc_end);
361  uptr chunk_beg = user_beg - kChunkHeaderSize;
362  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
363  m->chunk_state = CHUNK_ALLOCATED;
364  m->alloc_type = alloc_type;
365  m->rz_log = rz_log;
366  u32 alloc_tid = t ? t->tid() : 0;
367  m->alloc_tid = alloc_tid;
368  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
369  m->free_tid = kInvalidTid;
370  m->from_memalign = user_beg != beg_plus_redzone;
371  if (m->from_memalign) {
372    CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
373    uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
374    memalign_magic[0] = kMemalignMagic;
375    memalign_magic[1] = chunk_beg;
376  }
377  if (using_primary_allocator) {
378    CHECK(size);
379    m->user_requested_size = size;
380    CHECK(allocator.FromPrimary(allocated));
381  } else {
382    CHECK(!allocator.FromPrimary(allocated));
383    m->user_requested_size = SizeClassMap::kMaxSize;
384    uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
385    meta[0] = size;
386    meta[1] = chunk_beg;
387  }
388
389  if (fl.use_stack_depot) {
390    m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
391  } else {
392    m->alloc_context_id = 0;
393    StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
394  }
395
396  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
397  // Unpoison the bulk of the memory region.
398  if (size_rounded_down_to_granularity)
399    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
400  // Deal with the end of the region if size is not aligned to granularity.
401  if (size != size_rounded_down_to_granularity && fl.poison_heap) {
402    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
403    *shadow = size & (SHADOW_GRANULARITY - 1);
404  }
405
406  AsanStats &thread_stats = GetCurrentThreadStats();
407  thread_stats.mallocs++;
408  thread_stats.malloced += size;
409  thread_stats.malloced_redzones += needed_size - size;
410  uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
411  thread_stats.malloced_by_size[class_id]++;
412  if (needed_size > SizeClassMap::kMaxSize)
413    thread_stats.malloc_large++;
414
415  void *res = reinterpret_cast<void *>(user_beg);
416  if (can_fill && fl.max_malloc_fill_size) {
417    uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
418    REAL(memset)(res, fl.malloc_fill_byte, fill_size);
419  }
420  ASAN_MALLOC_HOOK(res, size);
421  return res;
422}
423
424static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
425  uptr p = reinterpret_cast<uptr>(ptr);
426  if (p == 0) return;
427  ASAN_FREE_HOOK(ptr);
428  uptr chunk_beg = p - kChunkHeaderSize;
429  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
430
431  u8 old_chunk_state = CHUNK_ALLOCATED;
432  // Flip the chunk_state atomically to avoid race on double-free.
433  if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
434                                      CHUNK_QUARANTINE, memory_order_relaxed)) {
435    if (old_chunk_state == CHUNK_QUARANTINE)
436      ReportDoubleFree((uptr)ptr, stack);
437    else
438      ReportFreeNotMalloced((uptr)ptr, stack);
439  }
440  CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
441
442  if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
443    ReportAllocTypeMismatch((uptr)ptr, stack,
444                            (AllocType)m->alloc_type, (AllocType)alloc_type);
445
446  CHECK_GE(m->alloc_tid, 0);
447  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
448    CHECK_EQ(m->free_tid, kInvalidTid);
449  AsanThread *t = GetCurrentThread();
450  m->free_tid = t ? t->tid() : 0;
451  if (flags()->use_stack_depot) {
452    m->free_context_id = StackDepotPut(stack->trace, stack->size);
453  } else {
454    m->free_context_id = 0;
455    StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
456  }
457  CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
458  // Poison the region.
459  PoisonShadow(m->Beg(),
460               RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
461               kAsanHeapFreeMagic);
462
463  AsanStats &thread_stats = GetCurrentThreadStats();
464  thread_stats.frees++;
465  thread_stats.freed += m->UsedSize();
466
467  // Push into quarantine.
468  if (t) {
469    AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
470    AllocatorCache *ac = GetAllocatorCache(ms);
471    quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
472                   m, m->UsedSize());
473  } else {
474    SpinMutexLock l(&fallback_mutex);
475    AllocatorCache *ac = &fallback_allocator_cache;
476    quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
477                   m, m->UsedSize());
478  }
479}
480
481static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
482  CHECK(old_ptr && new_size);
483  uptr p = reinterpret_cast<uptr>(old_ptr);
484  uptr chunk_beg = p - kChunkHeaderSize;
485  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
486
487  AsanStats &thread_stats = GetCurrentThreadStats();
488  thread_stats.reallocs++;
489  thread_stats.realloced += new_size;
490
491  CHECK_EQ(m->chunk_state, CHUNK_ALLOCATED);
492  uptr old_size = m->UsedSize();
493  uptr memcpy_size = Min(new_size, old_size);
494  void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
495  if (new_ptr) {
496    CHECK_NE(REAL(memcpy), (void*)0);
497    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
498    Deallocate(old_ptr, stack, FROM_MALLOC);
499  }
500  return new_ptr;
501}
502
503static AsanChunk *GetAsanChunkByAddr(uptr p) {
504  void *ptr = reinterpret_cast<void *>(p);
505  uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
506  if (!alloc_beg) return 0;
507  uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
508  if (memalign_magic[0] == kMemalignMagic) {
509    AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
510    CHECK(m->from_memalign);
511    return m;
512  }
513  if (!allocator.FromPrimary(ptr)) {
514    uptr *meta = reinterpret_cast<uptr *>(
515        allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
516    AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
517    return m;
518  }
519  uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
520  CHECK_LE(actual_size, SizeClassMap::kMaxSize);
521  // We know the actually allocted size, but we don't know the redzone size.
522  // Just try all possible redzone sizes.
523  for (u32 rz_log = 0; rz_log < 8; rz_log++) {
524    u32 rz_size = RZLog2Size(rz_log);
525    uptr max_possible_size = actual_size - rz_size;
526    if (ComputeRZLog(max_possible_size) != rz_log)
527      continue;
528    return reinterpret_cast<AsanChunk *>(
529        alloc_beg + rz_size - kChunkHeaderSize);
530  }
531  return 0;
532}
533
534static uptr AllocationSize(uptr p) {
535  AsanChunk *m = GetAsanChunkByAddr(p);
536  if (!m) return 0;
537  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
538  if (m->Beg() != p) return 0;
539  return m->UsedSize();
540}
541
542// We have an address between two chunks, and we want to report just one.
543AsanChunk *ChooseChunk(uptr addr,
544                       AsanChunk *left_chunk, AsanChunk *right_chunk) {
545  // Prefer an allocated chunk over freed chunk and freed chunk
546  // over available chunk.
547  if (left_chunk->chunk_state != right_chunk->chunk_state) {
548    if (left_chunk->chunk_state == CHUNK_ALLOCATED)
549      return left_chunk;
550    if (right_chunk->chunk_state == CHUNK_ALLOCATED)
551      return right_chunk;
552    if (left_chunk->chunk_state == CHUNK_QUARANTINE)
553      return left_chunk;
554    if (right_chunk->chunk_state == CHUNK_QUARANTINE)
555      return right_chunk;
556  }
557  // Same chunk_state: choose based on offset.
558  sptr l_offset = 0, r_offset = 0;
559  CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
560  CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
561  if (l_offset < r_offset)
562    return left_chunk;
563  return right_chunk;
564}
565
566AsanChunkView FindHeapChunkByAddress(uptr addr) {
567  AsanChunk *m1 = GetAsanChunkByAddr(addr);
568  if (!m1) return AsanChunkView(m1);
569  sptr offset = 0;
570  if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
571    // The address is in the chunk's left redzone, so maybe it is actually
572    // a right buffer overflow from the other chunk to the left.
573    // Search a bit to the left to see if there is another chunk.
574    AsanChunk *m2 = 0;
575    for (uptr l = 1; l < GetPageSizeCached(); l++) {
576      m2 = GetAsanChunkByAddr(addr - l);
577      if (m2 == m1) continue;  // Still the same chunk.
578      break;
579    }
580    if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
581      m1 = ChooseChunk(addr, m2, m1);
582  }
583  return AsanChunkView(m1);
584}
585
586void AsanThreadLocalMallocStorage::CommitBack() {
587  AllocatorCache *ac = GetAllocatorCache(this);
588  quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
589  allocator.SwallowCache(GetAllocatorCache(this));
590}
591
592void PrintInternalAllocatorStats() {
593  allocator.PrintStats();
594}
595
596SANITIZER_INTERFACE_ATTRIBUTE
597void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
598                    AllocType alloc_type) {
599  return Allocate(size, alignment, stack, alloc_type, true);
600}
601
602SANITIZER_INTERFACE_ATTRIBUTE
603void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
604  Deallocate(ptr, stack, alloc_type);
605}
606
607SANITIZER_INTERFACE_ATTRIBUTE
608void *asan_malloc(uptr size, StackTrace *stack) {
609  return Allocate(size, 8, stack, FROM_MALLOC, true);
610}
611
612void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
613  if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
614  void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
615  // If the memory comes from the secondary allocator no need to clear it
616  // as it comes directly from mmap.
617  if (ptr && allocator.FromPrimary(ptr))
618    REAL(memset)(ptr, 0, nmemb * size);
619  return ptr;
620}
621
622void *asan_realloc(void *p, uptr size, StackTrace *stack) {
623  if (p == 0)
624    return Allocate(size, 8, stack, FROM_MALLOC, true);
625  if (size == 0) {
626    Deallocate(p, stack, FROM_MALLOC);
627    return 0;
628  }
629  return Reallocate(p, size, stack);
630}
631
632void *asan_valloc(uptr size, StackTrace *stack) {
633  return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
634}
635
636void *asan_pvalloc(uptr size, StackTrace *stack) {
637  uptr PageSize = GetPageSizeCached();
638  size = RoundUpTo(size, PageSize);
639  if (size == 0) {
640    // pvalloc(0) should allocate one page.
641    size = PageSize;
642  }
643  return Allocate(size, PageSize, stack, FROM_MALLOC, true);
644}
645
646int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
647                        StackTrace *stack) {
648  void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
649  CHECK(IsAligned((uptr)ptr, alignment));
650  *memptr = ptr;
651  return 0;
652}
653
654uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
655  CHECK(stack);
656  if (ptr == 0) return 0;
657  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
658  if (flags()->check_malloc_usable_size && (usable_size == 0))
659    ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
660  return usable_size;
661}
662
663uptr asan_mz_size(const void *ptr) {
664  return AllocationSize(reinterpret_cast<uptr>(ptr));
665}
666
667void asan_mz_force_lock() {
668  allocator.ForceLock();
669  fallback_mutex.Lock();
670}
671
672void asan_mz_force_unlock() {
673  fallback_mutex.Unlock();
674  allocator.ForceUnlock();
675}
676
677}  // namespace __asan
678
679// ---------------------- Interface ---------------- {{{1
680using namespace __asan;  // NOLINT
681
682// ASan allocator doesn't reserve extra bytes, so normally we would
683// just return "size". We don't want to expose our redzone sizes, etc here.
684uptr __asan_get_estimated_allocated_size(uptr size) {
685  return size;
686}
687
688bool __asan_get_ownership(const void *p) {
689  uptr ptr = reinterpret_cast<uptr>(p);
690  return (AllocationSize(ptr) > 0);
691}
692
693uptr __asan_get_allocated_size(const void *p) {
694  if (p == 0) return 0;
695  uptr ptr = reinterpret_cast<uptr>(p);
696  uptr allocated_size = AllocationSize(ptr);
697  // Die if p is not malloced or if it is already freed.
698  if (allocated_size == 0) {
699    GET_STACK_TRACE_FATAL_HERE;
700    ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
701  }
702  return allocated_size;
703}
704
705#if !SANITIZER_SUPPORTS_WEAK_HOOKS
706// Provide default (no-op) implementation of malloc hooks.
707extern "C" {
708SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
709void __asan_malloc_hook(void *ptr, uptr size) {
710  (void)ptr;
711  (void)size;
712}
713SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
714void __asan_free_hook(void *ptr) {
715  (void)ptr;
716}
717}  // extern "C"
718#endif
719