1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16//===----------------------------------------------------------------------===//
17#include "asan_allocator.h"
18
19#include "asan_mapping.h"
20#include "asan_poisoning.h"
21#include "asan_report.h"
22#include "asan_stack.h"
23#include "asan_thread.h"
24#include "sanitizer_common/sanitizer_allocator_interface.h"
25#include "sanitizer_common/sanitizer_flags.h"
26#include "sanitizer_common/sanitizer_internal_defs.h"
27#include "sanitizer_common/sanitizer_list.h"
28#include "sanitizer_common/sanitizer_stackdepot.h"
29#include "sanitizer_common/sanitizer_quarantine.h"
30#include "lsan/lsan_common.h"
31
32namespace __asan {
33
34void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
35  PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
36  // Statistics.
37  AsanStats &thread_stats = GetCurrentThreadStats();
38  thread_stats.mmaps++;
39  thread_stats.mmaped += size;
40}
41void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
42  PoisonShadow(p, size, 0);
43  // We are about to unmap a chunk of user memory.
44  // Mark the corresponding shadow memory as not needed.
45  FlushUnneededASanShadowMemory(p, size);
46  // Statistics.
47  AsanStats &thread_stats = GetCurrentThreadStats();
48  thread_stats.munmaps++;
49  thread_stats.munmaped += size;
50}
51
52// We can not use THREADLOCAL because it is not supported on some of the
53// platforms we care about (OSX 10.6, Android).
54// static THREADLOCAL AllocatorCache cache;
55AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
56  CHECK(ms);
57  return &ms->allocator2_cache;
58}
59
60static Allocator allocator;
61
62static const uptr kMaxAllowedMallocSize =
63  FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
64
65static const uptr kMaxThreadLocalQuarantine =
66  FIRST_32_SECOND_64(1 << 18, 1 << 20);
67
68// Every chunk of memory allocated by this allocator can be in one of 3 states:
69// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
70// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
71// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
72enum {
73  CHUNK_AVAILABLE  = 0,  // 0 is the default value even if we didn't set it.
74  CHUNK_ALLOCATED  = 2,
75  CHUNK_QUARANTINE = 3
76};
77
78// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
79// We use adaptive redzones: for larger allocation larger redzones are used.
80static u32 RZLog2Size(u32 rz_log) {
81  CHECK_LT(rz_log, 8);
82  return 16 << rz_log;
83}
84
85static u32 RZSize2Log(u32 rz_size) {
86  CHECK_GE(rz_size, 16);
87  CHECK_LE(rz_size, 2048);
88  CHECK(IsPowerOfTwo(rz_size));
89  u32 res = Log2(rz_size) - 4;
90  CHECK_EQ(rz_size, RZLog2Size(res));
91  return res;
92}
93
94static uptr ComputeRZLog(uptr user_requested_size) {
95  u32 rz_log =
96    user_requested_size <= 64        - 16   ? 0 :
97    user_requested_size <= 128       - 32   ? 1 :
98    user_requested_size <= 512       - 64   ? 2 :
99    user_requested_size <= 4096      - 128  ? 3 :
100    user_requested_size <= (1 << 14) - 256  ? 4 :
101    user_requested_size <= (1 << 15) - 512  ? 5 :
102    user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
103  return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
104             RZSize2Log(flags()->max_redzone));
105}
106
107// The memory chunk allocated from the underlying allocator looks like this:
108// L L L L L L H H U U U U U U R R
109//   L -- left redzone words (0 or more bytes)
110//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
111//   U -- user memory.
112//   R -- right redzone (0 or more bytes)
113// ChunkBase consists of ChunkHeader and other bytes that overlap with user
114// memory.
115
116// If the left redzone is greater than the ChunkHeader size we store a magic
117// value in the first uptr word of the memory block and store the address of
118// ChunkBase in the next uptr.
119// M B L L L L L L L L L  H H U U U U U U
120//   |                    ^
121//   ---------------------|
122//   M -- magic value kAllocBegMagic
123//   B -- address of ChunkHeader pointing to the first 'H'
124static const uptr kAllocBegMagic = 0xCC6E96B9;
125
126struct ChunkHeader {
127  // 1-st 8 bytes.
128  u32 chunk_state       : 8;  // Must be first.
129  u32 alloc_tid         : 24;
130
131  u32 free_tid          : 24;
132  u32 from_memalign     : 1;
133  u32 alloc_type        : 2;
134  u32 rz_log            : 3;
135  u32 lsan_tag          : 2;
136  // 2-nd 8 bytes
137  // This field is used for small sizes. For large sizes it is equal to
138  // SizeClassMap::kMaxSize and the actual size is stored in the
139  // SecondaryAllocator's metadata.
140  u32 user_requested_size;
141  u32 alloc_context_id;
142};
143
144struct ChunkBase : ChunkHeader {
145  // Header2, intersects with user memory.
146  u32 free_context_id;
147};
148
149static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
150static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
151COMPILER_CHECK(kChunkHeaderSize == 16);
152COMPILER_CHECK(kChunkHeader2Size <= 16);
153
154struct AsanChunk: ChunkBase {
155  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
156  uptr UsedSize(bool locked_version = false) {
157    if (user_requested_size != SizeClassMap::kMaxSize)
158      return user_requested_size;
159    return *reinterpret_cast<uptr *>(
160                allocator.GetMetaData(AllocBeg(locked_version)));
161  }
162  void *AllocBeg(bool locked_version = false) {
163    if (from_memalign) {
164      if (locked_version)
165        return allocator.GetBlockBeginFastLocked(
166            reinterpret_cast<void *>(this));
167      return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
168    }
169    return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
170  }
171  // If we don't use stack depot, we store the alloc/free stack traces
172  // in the chunk itself.
173  u32 *AllocStackBeg() {
174    return (u32*)(Beg() - RZLog2Size(rz_log));
175  }
176  uptr AllocStackSize() {
177    CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
178    return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
179  }
180  u32 *FreeStackBeg() {
181    return (u32*)(Beg() + kChunkHeader2Size);
182  }
183  uptr FreeStackSize() {
184    if (user_requested_size < kChunkHeader2Size) return 0;
185    uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
186    return (available - kChunkHeader2Size) / sizeof(u32);
187  }
188  bool AddrIsInside(uptr addr, bool locked_version = false) {
189    return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
190  }
191};
192
193bool AsanChunkView::IsValid() {
194  return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
195}
196uptr AsanChunkView::Beg() { return chunk_->Beg(); }
197uptr AsanChunkView::End() { return Beg() + UsedSize(); }
198uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
199uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
200uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
201
202static void GetStackTraceFromId(u32 id, StackTrace *stack) {
203  CHECK(id);
204  uptr size = 0;
205  const uptr *trace = StackDepotGet(id, &size);
206  CHECK(trace);
207  stack->CopyFrom(trace, size);
208}
209
210void AsanChunkView::GetAllocStack(StackTrace *stack) {
211  GetStackTraceFromId(chunk_->alloc_context_id, stack);
212}
213
214void AsanChunkView::GetFreeStack(StackTrace *stack) {
215  GetStackTraceFromId(chunk_->free_context_id, stack);
216}
217
218struct QuarantineCallback;
219typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
220typedef AsanQuarantine::Cache QuarantineCache;
221static AsanQuarantine quarantine(LINKER_INITIALIZED);
222static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
223static AllocatorCache fallback_allocator_cache;
224static SpinMutex fallback_mutex;
225
226QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
227  CHECK(ms);
228  CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
229  return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
230}
231
232struct QuarantineCallback {
233  explicit QuarantineCallback(AllocatorCache *cache)
234      : cache_(cache) {
235  }
236
237  void Recycle(AsanChunk *m) {
238    CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
239    atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
240    CHECK_NE(m->alloc_tid, kInvalidTid);
241    CHECK_NE(m->free_tid, kInvalidTid);
242    PoisonShadow(m->Beg(),
243                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
244                 kAsanHeapLeftRedzoneMagic);
245    void *p = reinterpret_cast<void *>(m->AllocBeg());
246    if (p != m) {
247      uptr *alloc_magic = reinterpret_cast<uptr *>(p);
248      CHECK_EQ(alloc_magic[0], kAllocBegMagic);
249      // Clear the magic value, as allocator internals may overwrite the
250      // contents of deallocated chunk, confusing GetAsanChunk lookup.
251      alloc_magic[0] = 0;
252      CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
253    }
254
255    // Statistics.
256    AsanStats &thread_stats = GetCurrentThreadStats();
257    thread_stats.real_frees++;
258    thread_stats.really_freed += m->UsedSize();
259
260    allocator.Deallocate(cache_, p);
261  }
262
263  void *Allocate(uptr size) {
264    return allocator.Allocate(cache_, size, 1, false);
265  }
266
267  void Deallocate(void *p) {
268    allocator.Deallocate(cache_, p);
269  }
270
271  AllocatorCache *cache_;
272};
273
274void InitializeAllocator() {
275  allocator.Init();
276  quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
277}
278
279void ReInitializeAllocator() {
280  quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
281}
282
283static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
284                      AllocType alloc_type, bool can_fill) {
285  if (UNLIKELY(!asan_inited))
286    AsanInitFromRtl();
287  Flags &fl = *flags();
288  CHECK(stack);
289  const uptr min_alignment = SHADOW_GRANULARITY;
290  if (alignment < min_alignment)
291    alignment = min_alignment;
292  if (size == 0) {
293    // We'd be happy to avoid allocating memory for zero-size requests, but
294    // some programs/tests depend on this behavior and assume that malloc would
295    // not return NULL even for zero-size allocations. Moreover, it looks like
296    // operator new should never return NULL, and results of consecutive "new"
297    // calls must be different even if the allocated size is zero.
298    size = 1;
299  }
300  CHECK(IsPowerOfTwo(alignment));
301  uptr rz_log = ComputeRZLog(size);
302  uptr rz_size = RZLog2Size(rz_log);
303  uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
304  uptr needed_size = rounded_size + rz_size;
305  if (alignment > min_alignment)
306    needed_size += alignment;
307  bool using_primary_allocator = true;
308  // If we are allocating from the secondary allocator, there will be no
309  // automatic right redzone, so add the right redzone manually.
310  if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
311    needed_size += rz_size;
312    using_primary_allocator = false;
313  }
314  CHECK(IsAligned(needed_size, min_alignment));
315  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
316    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
317           (void*)size);
318    return AllocatorReturnNull();
319  }
320
321  AsanThread *t = GetCurrentThread();
322  void *allocated;
323  if (t) {
324    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
325    allocated = allocator.Allocate(cache, needed_size, 8, false);
326  } else {
327    SpinMutexLock l(&fallback_mutex);
328    AllocatorCache *cache = &fallback_allocator_cache;
329    allocated = allocator.Allocate(cache, needed_size, 8, false);
330  }
331
332  if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
333    // Heap poisoning is enabled, but the allocator provides an unpoisoned
334    // chunk. This is possible if flags()->poison_heap was disabled for some
335    // time, for example, due to flags()->start_disabled.
336    // Anyway, poison the block before using it for anything else.
337    uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
338    PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
339  }
340
341  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
342  uptr alloc_end = alloc_beg + needed_size;
343  uptr beg_plus_redzone = alloc_beg + rz_size;
344  uptr user_beg = beg_plus_redzone;
345  if (!IsAligned(user_beg, alignment))
346    user_beg = RoundUpTo(user_beg, alignment);
347  uptr user_end = user_beg + size;
348  CHECK_LE(user_end, alloc_end);
349  uptr chunk_beg = user_beg - kChunkHeaderSize;
350  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
351  m->alloc_type = alloc_type;
352  m->rz_log = rz_log;
353  u32 alloc_tid = t ? t->tid() : 0;
354  m->alloc_tid = alloc_tid;
355  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
356  m->free_tid = kInvalidTid;
357  m->from_memalign = user_beg != beg_plus_redzone;
358  if (alloc_beg != chunk_beg) {
359    CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
360    reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
361    reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
362  }
363  if (using_primary_allocator) {
364    CHECK(size);
365    m->user_requested_size = size;
366    CHECK(allocator.FromPrimary(allocated));
367  } else {
368    CHECK(!allocator.FromPrimary(allocated));
369    m->user_requested_size = SizeClassMap::kMaxSize;
370    uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
371    meta[0] = size;
372    meta[1] = chunk_beg;
373  }
374
375  m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
376
377  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
378  // Unpoison the bulk of the memory region.
379  if (size_rounded_down_to_granularity)
380    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
381  // Deal with the end of the region if size is not aligned to granularity.
382  if (size != size_rounded_down_to_granularity && fl.poison_heap) {
383    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
384    *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
385  }
386
387  AsanStats &thread_stats = GetCurrentThreadStats();
388  thread_stats.mallocs++;
389  thread_stats.malloced += size;
390  thread_stats.malloced_redzones += needed_size - size;
391  uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
392  thread_stats.malloced_by_size[class_id]++;
393  if (needed_size > SizeClassMap::kMaxSize)
394    thread_stats.malloc_large++;
395
396  void *res = reinterpret_cast<void *>(user_beg);
397  if (can_fill && fl.max_malloc_fill_size) {
398    uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
399    REAL(memset)(res, fl.malloc_fill_byte, fill_size);
400  }
401#if CAN_SANITIZE_LEAKS
402  m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
403                                               : __lsan::kDirectlyLeaked;
404#endif
405  // Must be the last mutation of metadata in this function.
406  atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
407  ASAN_MALLOC_HOOK(res, size);
408  return res;
409}
410
411static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
412  if (chunk_state == CHUNK_QUARANTINE)
413    ReportDoubleFree((uptr)ptr, stack);
414  else
415    ReportFreeNotMalloced((uptr)ptr, stack);
416}
417
418static void AtomicallySetQuarantineFlag(AsanChunk *m,
419                                        void *ptr, StackTrace *stack) {
420  u8 old_chunk_state = CHUNK_ALLOCATED;
421  // Flip the chunk_state atomically to avoid race on double-free.
422  if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
423                                      CHUNK_QUARANTINE, memory_order_acquire))
424    ReportInvalidFree(ptr, old_chunk_state, stack);
425  CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
426}
427
428// Expects the chunk to already be marked as quarantined by using
429// AtomicallySetQuarantineFlag.
430static void QuarantineChunk(AsanChunk *m, void *ptr,
431                            StackTrace *stack, AllocType alloc_type) {
432  CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
433
434  if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
435    ReportAllocTypeMismatch((uptr)ptr, stack,
436                            (AllocType)m->alloc_type, (AllocType)alloc_type);
437
438  CHECK_GE(m->alloc_tid, 0);
439  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
440    CHECK_EQ(m->free_tid, kInvalidTid);
441  AsanThread *t = GetCurrentThread();
442  m->free_tid = t ? t->tid() : 0;
443  m->free_context_id = StackDepotPut(stack->trace, stack->size);
444  // Poison the region.
445  PoisonShadow(m->Beg(),
446               RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
447               kAsanHeapFreeMagic);
448
449  AsanStats &thread_stats = GetCurrentThreadStats();
450  thread_stats.frees++;
451  thread_stats.freed += m->UsedSize();
452
453  // Push into quarantine.
454  if (t) {
455    AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
456    AllocatorCache *ac = GetAllocatorCache(ms);
457    quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
458                   m, m->UsedSize());
459  } else {
460    SpinMutexLock l(&fallback_mutex);
461    AllocatorCache *ac = &fallback_allocator_cache;
462    quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
463                   m, m->UsedSize());
464  }
465}
466
467static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
468  uptr p = reinterpret_cast<uptr>(ptr);
469  if (p == 0) return;
470
471  uptr chunk_beg = p - kChunkHeaderSize;
472  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
473  ASAN_FREE_HOOK(ptr);
474  // Must mark the chunk as quarantined before any changes to its metadata.
475  AtomicallySetQuarantineFlag(m, ptr, stack);
476  QuarantineChunk(m, ptr, stack, alloc_type);
477}
478
479static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
480  CHECK(old_ptr && new_size);
481  uptr p = reinterpret_cast<uptr>(old_ptr);
482  uptr chunk_beg = p - kChunkHeaderSize;
483  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
484
485  AsanStats &thread_stats = GetCurrentThreadStats();
486  thread_stats.reallocs++;
487  thread_stats.realloced += new_size;
488
489  void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
490  if (new_ptr) {
491    u8 chunk_state = m->chunk_state;
492    if (chunk_state != CHUNK_ALLOCATED)
493      ReportInvalidFree(old_ptr, chunk_state, stack);
494    CHECK_NE(REAL(memcpy), (void*)0);
495    uptr memcpy_size = Min(new_size, m->UsedSize());
496    // If realloc() races with free(), we may start copying freed memory.
497    // However, we will report racy double-free later anyway.
498    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
499    Deallocate(old_ptr, stack, FROM_MALLOC);
500  }
501  return new_ptr;
502}
503
504// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
505static AsanChunk *GetAsanChunk(void *alloc_beg) {
506  if (!alloc_beg) return 0;
507  if (!allocator.FromPrimary(alloc_beg)) {
508    uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
509    AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
510    return m;
511  }
512  uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
513  if (alloc_magic[0] == kAllocBegMagic)
514    return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
515  return reinterpret_cast<AsanChunk *>(alloc_beg);
516}
517
518static AsanChunk *GetAsanChunkByAddr(uptr p) {
519  void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
520  return GetAsanChunk(alloc_beg);
521}
522
523// Allocator must be locked when this function is called.
524static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
525  void *alloc_beg =
526      allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
527  return GetAsanChunk(alloc_beg);
528}
529
530static uptr AllocationSize(uptr p) {
531  AsanChunk *m = GetAsanChunkByAddr(p);
532  if (!m) return 0;
533  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
534  if (m->Beg() != p) return 0;
535  return m->UsedSize();
536}
537
538// We have an address between two chunks, and we want to report just one.
539AsanChunk *ChooseChunk(uptr addr,
540                       AsanChunk *left_chunk, AsanChunk *right_chunk) {
541  // Prefer an allocated chunk over freed chunk and freed chunk
542  // over available chunk.
543  if (left_chunk->chunk_state != right_chunk->chunk_state) {
544    if (left_chunk->chunk_state == CHUNK_ALLOCATED)
545      return left_chunk;
546    if (right_chunk->chunk_state == CHUNK_ALLOCATED)
547      return right_chunk;
548    if (left_chunk->chunk_state == CHUNK_QUARANTINE)
549      return left_chunk;
550    if (right_chunk->chunk_state == CHUNK_QUARANTINE)
551      return right_chunk;
552  }
553  // Same chunk_state: choose based on offset.
554  sptr l_offset = 0, r_offset = 0;
555  CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
556  CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
557  if (l_offset < r_offset)
558    return left_chunk;
559  return right_chunk;
560}
561
562AsanChunkView FindHeapChunkByAddress(uptr addr) {
563  AsanChunk *m1 = GetAsanChunkByAddr(addr);
564  if (!m1) return AsanChunkView(m1);
565  sptr offset = 0;
566  if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
567    // The address is in the chunk's left redzone, so maybe it is actually
568    // a right buffer overflow from the other chunk to the left.
569    // Search a bit to the left to see if there is another chunk.
570    AsanChunk *m2 = 0;
571    for (uptr l = 1; l < GetPageSizeCached(); l++) {
572      m2 = GetAsanChunkByAddr(addr - l);
573      if (m2 == m1) continue;  // Still the same chunk.
574      break;
575    }
576    if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
577      m1 = ChooseChunk(addr, m2, m1);
578  }
579  return AsanChunkView(m1);
580}
581
582void AsanThreadLocalMallocStorage::CommitBack() {
583  AllocatorCache *ac = GetAllocatorCache(this);
584  quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
585  allocator.SwallowCache(GetAllocatorCache(this));
586}
587
588void PrintInternalAllocatorStats() {
589  allocator.PrintStats();
590}
591
592void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
593                    AllocType alloc_type) {
594  return Allocate(size, alignment, stack, alloc_type, true);
595}
596
597void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
598  Deallocate(ptr, stack, alloc_type);
599}
600
601void *asan_malloc(uptr size, StackTrace *stack) {
602  return Allocate(size, 8, stack, FROM_MALLOC, true);
603}
604
605void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
606  if (CallocShouldReturnNullDueToOverflow(size, nmemb))
607    return AllocatorReturnNull();
608  void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
609  // If the memory comes from the secondary allocator no need to clear it
610  // as it comes directly from mmap.
611  if (ptr && allocator.FromPrimary(ptr))
612    REAL(memset)(ptr, 0, nmemb * size);
613  return ptr;
614}
615
616void *asan_realloc(void *p, uptr size, StackTrace *stack) {
617  if (p == 0)
618    return Allocate(size, 8, stack, FROM_MALLOC, true);
619  if (size == 0) {
620    Deallocate(p, stack, FROM_MALLOC);
621    return 0;
622  }
623  return Reallocate(p, size, stack);
624}
625
626void *asan_valloc(uptr size, StackTrace *stack) {
627  return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
628}
629
630void *asan_pvalloc(uptr size, StackTrace *stack) {
631  uptr PageSize = GetPageSizeCached();
632  size = RoundUpTo(size, PageSize);
633  if (size == 0) {
634    // pvalloc(0) should allocate one page.
635    size = PageSize;
636  }
637  return Allocate(size, PageSize, stack, FROM_MALLOC, true);
638}
639
640int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
641                        StackTrace *stack) {
642  void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
643  CHECK(IsAligned((uptr)ptr, alignment));
644  *memptr = ptr;
645  return 0;
646}
647
648uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
649  if (ptr == 0) return 0;
650  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
651  if (flags()->check_malloc_usable_size && (usable_size == 0)) {
652    GET_STACK_TRACE_FATAL(pc, bp);
653    ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
654  }
655  return usable_size;
656}
657
658uptr asan_mz_size(const void *ptr) {
659  return AllocationSize(reinterpret_cast<uptr>(ptr));
660}
661
662void asan_mz_force_lock() {
663  allocator.ForceLock();
664  fallback_mutex.Lock();
665}
666
667void asan_mz_force_unlock() {
668  fallback_mutex.Unlock();
669  allocator.ForceUnlock();
670}
671
672}  // namespace __asan
673
674// --- Implementation of LSan-specific functions --- {{{1
675namespace __lsan {
676void LockAllocator() {
677  __asan::allocator.ForceLock();
678}
679
680void UnlockAllocator() {
681  __asan::allocator.ForceUnlock();
682}
683
684void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
685  *begin = (uptr)&__asan::allocator;
686  *end = *begin + sizeof(__asan::allocator);
687}
688
689uptr PointsIntoChunk(void* p) {
690  uptr addr = reinterpret_cast<uptr>(p);
691  __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
692  if (!m) return 0;
693  uptr chunk = m->Beg();
694  if (m->chunk_state != __asan::CHUNK_ALLOCATED)
695    return 0;
696  if (m->AddrIsInside(addr, /*locked_version=*/true))
697    return chunk;
698  if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
699                                  addr))
700    return chunk;
701  return 0;
702}
703
704uptr GetUserBegin(uptr chunk) {
705  __asan::AsanChunk *m =
706      __asan::GetAsanChunkByAddrFastLocked(chunk);
707  CHECK(m);
708  return m->Beg();
709}
710
711LsanMetadata::LsanMetadata(uptr chunk) {
712  metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
713}
714
715bool LsanMetadata::allocated() const {
716  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
717  return m->chunk_state == __asan::CHUNK_ALLOCATED;
718}
719
720ChunkTag LsanMetadata::tag() const {
721  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
722  return static_cast<ChunkTag>(m->lsan_tag);
723}
724
725void LsanMetadata::set_tag(ChunkTag value) {
726  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
727  m->lsan_tag = value;
728}
729
730uptr LsanMetadata::requested_size() const {
731  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
732  return m->UsedSize(/*locked_version=*/true);
733}
734
735u32 LsanMetadata::stack_trace_id() const {
736  __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
737  return m->alloc_context_id;
738}
739
740void ForEachChunk(ForEachChunkCallback callback, void *arg) {
741  __asan::allocator.ForEachChunk(callback, arg);
742}
743
744IgnoreObjectResult IgnoreObjectLocked(const void *p) {
745  uptr addr = reinterpret_cast<uptr>(p);
746  __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
747  if (!m) return kIgnoreObjectInvalid;
748  if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
749    if (m->lsan_tag == kIgnored)
750      return kIgnoreObjectAlreadyIgnored;
751    m->lsan_tag = __lsan::kIgnored;
752    return kIgnoreObjectSuccess;
753  } else {
754    return kIgnoreObjectInvalid;
755  }
756}
757}  // namespace __lsan
758
759// ---------------------- Interface ---------------- {{{1
760using namespace __asan;  // NOLINT
761
762// ASan allocator doesn't reserve extra bytes, so normally we would
763// just return "size". We don't want to expose our redzone sizes, etc here.
764uptr __sanitizer_get_estimated_allocated_size(uptr size) {
765  return size;
766}
767uptr __asan_get_estimated_allocated_size(uptr size) {
768  return __sanitizer_get_estimated_allocated_size(size);
769}
770
771int __sanitizer_get_ownership(const void *p) {
772  uptr ptr = reinterpret_cast<uptr>(p);
773  return (AllocationSize(ptr) > 0);
774}
775int __asan_get_ownership(const void *p) {
776  return __sanitizer_get_ownership(p);
777}
778
779uptr __sanitizer_get_allocated_size(const void *p) {
780  if (p == 0) return 0;
781  uptr ptr = reinterpret_cast<uptr>(p);
782  uptr allocated_size = AllocationSize(ptr);
783  // Die if p is not malloced or if it is already freed.
784  if (allocated_size == 0) {
785    GET_STACK_TRACE_FATAL_HERE;
786    ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
787  }
788  return allocated_size;
789}
790uptr __asan_get_allocated_size(const void *p) {
791  return __sanitizer_get_allocated_size(p);
792}
793
794#if !SANITIZER_SUPPORTS_WEAK_HOOKS
795// Provide default (no-op) implementation of malloc hooks.
796extern "C" {
797SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
798void __asan_malloc_hook(void *ptr, uptr size) {
799  (void)ptr;
800  (void)size;
801}
802SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
803void __asan_free_hook(void *ptr) {
804  (void)ptr;
805}
806SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
807void __sanitizer_malloc_hook(void *ptr, uptr size) {
808  (void)ptr;
809  (void)size;
810}
811SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
812void __sanitizer_free_hook(void *ptr) {
813  (void)ptr;
814}
815}  // extern "C"
816#endif
817