asan_allocator2.cc revision cab6133c5d7478e96882cb54467e29b3716c0d89
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_mapping.h"
22#include "asan_report.h"
23#include "asan_thread.h"
24#include "asan_thread_registry.h"
25#include "sanitizer/asan_interface.h"
26#include "sanitizer_common/sanitizer_allocator.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29
30namespace __asan {
31
32struct AsanMapUnmapCallback {
33  void OnMap(uptr p, uptr size) const {
34    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
35    // Statistics.
36    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
37    thread_stats.mmaps++;
38    thread_stats.mmaped += size;
39    // thread_stats.mmaped_by_size[size_class] += n_chunks;
40  }
41  void OnUnmap(uptr p, uptr size) const {
42    PoisonShadow(p, size, 0);
43    // Statistics.
44    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
45    thread_stats.munmaps++;
46    thread_stats.munmaped += size;
47  }
48};
49
50#if SANITIZER_WORDSIZE == 64
51const uptr kAllocatorSpace = 0x600000000000ULL;
52const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
53typedef DefaultSizeClassMap SizeClassMap;
54typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
55    SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
56#elif SANITIZER_WORDSIZE == 32
57static const u64 kAddressSpaceSize = 1ULL << 32;
58typedef CompactSizeClassMap SizeClassMap;
59typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
60  SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
61#endif
62
63typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
64typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
65typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
66    SecondaryAllocator> Allocator;
67
68// We can not use THREADLOCAL because it is not supported on some of the
69// platforms we care about (OSX 10.6, Android).
70// static THREADLOCAL AllocatorCache cache;
71AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
72  CHECK(ms);
73  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
74  return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
75}
76
77static Allocator allocator;
78
79static const uptr kMaxAllowedMallocSize =
80  FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
81
82static const uptr kMaxThreadLocalQuarantine =
83  FIRST_32_SECOND_64(1 << 18, 1 << 20);
84
85static const uptr kReturnOnZeroMalloc = 2048;  // Zero page is protected.
86
87static int inited = 0;
88
89static void Init() {
90  if (inited) return;
91  __asan_init();
92  inited = true;  // this must happen before any threads are created.
93  allocator.Init();
94}
95
96// Every chunk of memory allocated by this allocator can be in one of 3 states:
97// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
98// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
99// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
100enum {
101  CHUNK_AVAILABLE  = 1,
102  CHUNK_ALLOCATED  = 2,
103  CHUNK_QUARANTINE = 3
104};
105
106// The memory chunk allocated from the underlying allocator looks like this:
107// L L L L L L H H U U U U U U R R
108//   L -- left redzone words (0 or more bytes)
109//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
110//   U -- user memory.
111//   R -- right redzone (0 or more bytes)
112// ChunkBase consists of ChunkHeader and other bytes that overlap with user
113// memory.
114
115// If a memory chunk is allocated by memalign and we had to increase the
116// allocation size to achieve the proper alignment, then we store this magic
117// value in the first uptr word of the memory block and store the address of
118// ChunkBase in the next uptr.
119// M B ? ? ? L L L L L L  H H U U U U U U
120//   M -- magic value kMemalignMagic
121//   B -- address of ChunkHeader pointing to the first 'H'
122static const uptr kMemalignMagic = 0xCC6E96B9;
123
124#if SANITIZER_WORDSIZE == 64
125struct ChunkBase {
126  // 1-st 8 bytes.
127  uptr chunk_state       : 8;  // Must be first.
128  uptr alloc_tid         : 24;
129
130  uptr free_tid          : 24;
131  uptr from_memalign     : 1;
132  uptr alloc_type        : 2;
133  // 2-nd 8 bytes
134  uptr user_requested_size;
135  // Header2 (intersects with user memory).
136  // 3-rd 8 bytes. These overlap with the user memory.
137  AsanChunk *next;
138};
139
140static const uptr kChunkHeaderSize = 16;
141static const uptr kChunkHeader2Size = 8;
142
143#elif SANITIZER_WORDSIZE == 32
144struct ChunkBase {
145  // 1-st 8 bytes.
146  uptr chunk_state       : 8;  // Must be first.
147  uptr alloc_tid         : 24;
148
149  uptr from_memalign     : 1;
150  uptr alloc_type        : 2;
151  uptr free_tid          : 24;
152  // 2-nd 8 bytes
153  uptr user_requested_size;
154  AsanChunk *next;
155  // Header2 empty.
156};
157
158static const uptr kChunkHeaderSize = 16;
159static const uptr kChunkHeader2Size = 0;
160#endif
161COMPILER_CHECK(sizeof(ChunkBase) == kChunkHeaderSize + kChunkHeader2Size);
162
163static uptr ComputeRZSize(uptr user_requested_size) {
164  // FIXME: implement adaptive redzones.
165  return flags()->redzone;
166}
167
168struct AsanChunk: ChunkBase {
169  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
170  uptr UsedSize() { return user_requested_size; }
171  // We store the alloc/free stack traces in the chunk itself.
172  u32 *AllocStackBeg() {
173    return (u32*)(Beg() - ComputeRZSize(UsedSize()));
174  }
175  uptr AllocStackSize() {
176    return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32);
177  }
178  u32 *FreeStackBeg() {
179    return (u32*)(Beg() + kChunkHeader2Size);
180  }
181  uptr FreeStackSize() {
182    uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY),
183                         ComputeRZSize(UsedSize()));
184    return (available - kChunkHeader2Size) / sizeof(u32);
185  }
186};
187
188uptr AsanChunkView::Beg() { return chunk_->Beg(); }
189uptr AsanChunkView::End() { return Beg() + UsedSize(); }
190uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
191uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
192uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
193
194void AsanChunkView::GetAllocStack(StackTrace *stack) {
195  StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
196                              chunk_->AllocStackSize());
197}
198
199void AsanChunkView::GetFreeStack(StackTrace *stack) {
200  StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
201                              chunk_->FreeStackSize());
202}
203
204class Quarantine: public AsanChunkFifoList {
205 public:
206  void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
207    AsanChunkFifoList *q = &ms->quarantine_;
208    if (!q->size()) return;
209    SpinMutexLock l(&mutex_);
210    PushList(q);
211    PopAndDeallocateLoop(ms);
212  }
213
214  void BypassThreadLocalQuarantine(AsanChunk *m) {
215    SpinMutexLock l(&mutex_);
216    Push(m);
217  }
218
219 private:
220  void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
221    while (size() > (uptr)flags()->quarantine_size) {
222      PopAndDeallocate(ms);
223    }
224  }
225  void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
226    CHECK_GT(size(), 0);
227    AsanChunk *m = Pop();
228    CHECK(m);
229    CHECK(m->chunk_state == CHUNK_QUARANTINE);
230    m->chunk_state = CHUNK_AVAILABLE;
231    CHECK_NE(m->alloc_tid, kInvalidTid);
232    CHECK_NE(m->free_tid, kInvalidTid);
233    PoisonShadow(m->Beg(),
234                 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
235                 kAsanHeapLeftRedzoneMagic);
236    uptr alloc_beg = m->Beg() - ComputeRZSize(m->user_requested_size);
237    void *p = reinterpret_cast<void *>(alloc_beg);
238    if (m->from_memalign) {
239      p = allocator.GetBlockBegin(p);
240      uptr *memalign_magic = reinterpret_cast<uptr *>(p);
241      CHECK_EQ(memalign_magic[0], kMemalignMagic);
242      CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
243    }
244
245    // Statistics.
246    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
247    thread_stats.real_frees++;
248    thread_stats.really_freed += m->UsedSize();
249
250    allocator.Deallocate(GetAllocatorCache(ms), p);
251  }
252  SpinMutex mutex_;
253};
254
255static Quarantine quarantine;
256
257void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
258  CHECK(q->size() > 0);
259  size_ += q->size();
260  append_back(q);
261  q->clear();
262}
263
264void AsanChunkFifoList::Push(AsanChunk *n) {
265  push_back(n);
266  size_ += n->UsedSize();
267}
268
269// Interesting performance observation: this function takes up to 15% of overal
270// allocator time. That's because *first_ has been evicted from cache long time
271// ago. Not sure if we can or want to do anything with this.
272AsanChunk *AsanChunkFifoList::Pop() {
273  CHECK(first_);
274  AsanChunk *res = front();
275  size_ -= res->UsedSize();
276  pop_front();
277  return res;
278}
279
280static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
281                      AllocType alloc_type) {
282  Init();
283  CHECK(stack);
284  if (alignment < 8) alignment = 8;
285  if (size == 0) {
286    if (alignment <= kReturnOnZeroMalloc)
287      return reinterpret_cast<void *>(kReturnOnZeroMalloc);
288    else
289      return 0;  // 0 bytes with large alignment requested. Just return 0.
290  }
291  CHECK(IsPowerOfTwo(alignment));
292  uptr rz_size = ComputeRZSize(size);
293  uptr rounded_size = RoundUpTo(size, rz_size);
294  uptr needed_size = rounded_size + rz_size;
295  if (alignment > rz_size)
296    needed_size += alignment;
297  CHECK(IsAligned(needed_size, rz_size));
298  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
299    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
300           (void*)size);
301    return 0;
302  }
303
304  AsanThread *t = asanThreadRegistry().GetCurrent();
305  AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0;
306  void *allocated = allocator.Allocate(cache, needed_size, 8, false);
307  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
308  uptr alloc_end = alloc_beg + needed_size;
309  uptr beg_plus_redzone = alloc_beg + rz_size;
310  uptr user_beg = beg_plus_redzone;
311  if (!IsAligned(user_beg, alignment))
312    user_beg = RoundUpTo(user_beg, alignment);
313  uptr user_end = user_beg + size;
314  CHECK_LE(user_end, alloc_end);
315  uptr chunk_beg = user_beg - kChunkHeaderSize;
316  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
317  m->chunk_state = CHUNK_ALLOCATED;
318  m->alloc_type = alloc_type;
319  u32 alloc_tid = t ? t->tid() : 0;
320  m->alloc_tid = alloc_tid;
321  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
322  m->free_tid = kInvalidTid;
323  m->from_memalign = user_beg != beg_plus_redzone;
324  if (m->from_memalign) {
325    CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
326    uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
327    memalign_magic[0] = kMemalignMagic;
328    memalign_magic[1] = chunk_beg;
329  }
330  m->user_requested_size = size;
331  StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
332
333  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
334  // Unpoison the bulk of the memory region.
335  if (size_rounded_down_to_granularity)
336    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
337  // Deal with the end of the region if size is not aligned to granularity.
338  if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
339    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
340    *shadow = size & (SHADOW_GRANULARITY - 1);
341  }
342
343  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
344  thread_stats.mallocs++;
345  thread_stats.malloced += size;
346  thread_stats.malloced_redzones += needed_size - size;
347  uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
348  thread_stats.malloced_by_size[class_id]++;
349  if (needed_size > SizeClassMap::kMaxSize)
350    thread_stats.malloc_large++;
351
352  void *res = reinterpret_cast<void *>(user_beg);
353  ASAN_MALLOC_HOOK(res, size);
354  return res;
355}
356
357static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
358  uptr p = reinterpret_cast<uptr>(ptr);
359  if (p == 0 || p == kReturnOnZeroMalloc) return;
360  uptr chunk_beg = p - kChunkHeaderSize;
361  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
362
363  // Flip the chunk_state atomically to avoid race on double-free.
364  u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
365                                       memory_order_acq_rel);
366
367  if (old_chunk_state == CHUNK_QUARANTINE)
368    ReportDoubleFree((uptr)ptr, stack);
369  else if (old_chunk_state != CHUNK_ALLOCATED)
370    ReportFreeNotMalloced((uptr)ptr, stack);
371  CHECK(old_chunk_state == CHUNK_ALLOCATED);
372  if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
373    ReportAllocTypeMismatch((uptr)ptr, stack,
374                            (AllocType)m->alloc_type, (AllocType)alloc_type);
375
376  CHECK_GE(m->alloc_tid, 0);
377  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
378    CHECK_EQ(m->free_tid, kInvalidTid);
379  AsanThread *t = asanThreadRegistry().GetCurrent();
380  m->free_tid = t ? t->tid() : 0;
381  StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
382  CHECK(m->chunk_state == CHUNK_QUARANTINE);
383  // Poison the region.
384  PoisonShadow(m->Beg(),
385               RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
386               kAsanHeapFreeMagic);
387
388  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
389  thread_stats.frees++;
390  thread_stats.freed += m->UsedSize();
391
392  // Push into quarantine.
393  if (t) {
394    AsanChunkFifoList &q = t->malloc_storage().quarantine_;
395    q.Push(m);
396
397    if (q.size() > kMaxThreadLocalQuarantine)
398      quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
399  } else {
400    quarantine.BypassThreadLocalQuarantine(m);
401  }
402
403  ASAN_FREE_HOOK(ptr);
404}
405
406static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
407  CHECK(old_ptr && new_size);
408  uptr p = reinterpret_cast<uptr>(old_ptr);
409  uptr chunk_beg = p - kChunkHeaderSize;
410  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
411
412  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
413  thread_stats.reallocs++;
414  thread_stats.realloced += new_size;
415
416  CHECK(m->chunk_state == CHUNK_ALLOCATED);
417  uptr old_size = m->UsedSize();
418  uptr memcpy_size = Min(new_size, old_size);
419  void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
420  if (new_ptr) {
421    CHECK(REAL(memcpy) != 0);
422    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
423    Deallocate(old_ptr, stack, FROM_MALLOC);
424  }
425  return new_ptr;
426}
427
428static AsanChunk *GetAsanChunkByAddr(uptr p) {
429  uptr alloc_beg = reinterpret_cast<uptr>(
430      allocator.GetBlockBegin(reinterpret_cast<void *>(p)));
431  if (!alloc_beg) return 0;
432  uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
433  if (memalign_magic[0] == kMemalignMagic) {
434      AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
435      CHECK(m->from_memalign);
436      return m;
437  }
438  uptr chunk_beg = alloc_beg + ComputeRZSize(0) - kChunkHeaderSize;
439  return reinterpret_cast<AsanChunk *>(chunk_beg);
440}
441
442static uptr AllocationSize(uptr p) {
443  AsanChunk *m = GetAsanChunkByAddr(p);
444  if (!m) return 0;
445  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
446  if (m->Beg() != p) return 0;
447  return m->UsedSize();
448}
449
450// We have an address between two chunks, and we want to report just one.
451AsanChunk *ChooseChunk(uptr addr,
452                       AsanChunk *left_chunk, AsanChunk *right_chunk) {
453  // Prefer an allocated chunk or a chunk from quarantine.
454  if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
455      right_chunk->chunk_state != CHUNK_AVAILABLE)
456    return right_chunk;
457  if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
458      left_chunk->chunk_state != CHUNK_AVAILABLE)
459    return left_chunk;
460  // Choose based on offset.
461  uptr l_offset = 0, r_offset = 0;
462  CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
463  CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
464  if (l_offset < r_offset)
465    return left_chunk;
466  return right_chunk;
467}
468
469AsanChunkView FindHeapChunkByAddress(uptr addr) {
470  AsanChunk *m1 = GetAsanChunkByAddr(addr);
471  if (!m1) return AsanChunkView(m1);
472  uptr offset = 0;
473  if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
474    // The address is in the chunk's left redzone, so maybe it is actually
475    // a right buffer overflow from the other chunk to the left.
476    // Search a bit to the left to see if there is another chunk.
477    AsanChunk *m2 = 0;
478    for (uptr l = 1; l < GetPageSizeCached(); l++) {
479      m2 = GetAsanChunkByAddr(addr - l);
480      if (m2 == m1) continue;  // Still the same chunk.
481      Printf("m1 %p m2 %p l %zd\n", m1, m2, l);
482      break;
483    }
484    if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
485      m1 = ChooseChunk(addr, m2, m1);
486  }
487  return AsanChunkView(m1);
488}
489
490void AsanThreadLocalMallocStorage::CommitBack() {
491  quarantine.SwallowThreadLocalQuarantine(this);
492  allocator.SwallowCache(GetAllocatorCache(this));
493}
494
495SANITIZER_INTERFACE_ATTRIBUTE
496void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
497                    AllocType alloc_type) {
498  return Allocate(size, alignment, stack, alloc_type);
499}
500
501SANITIZER_INTERFACE_ATTRIBUTE
502void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
503  Deallocate(ptr, stack, alloc_type);
504}
505
506SANITIZER_INTERFACE_ATTRIBUTE
507void *asan_malloc(uptr size, StackTrace *stack) {
508  return Allocate(size, 8, stack, FROM_MALLOC);
509}
510
511void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
512  void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
513  if (ptr)
514    REAL(memset)(ptr, 0, nmemb * size);
515  return ptr;
516}
517
518void *asan_realloc(void *p, uptr size, StackTrace *stack) {
519  if (p == 0)
520    return Allocate(size, 8, stack, FROM_MALLOC);
521  if (size == 0) {
522    Deallocate(p, stack, FROM_MALLOC);
523    return 0;
524  }
525  return Reallocate(p, size, stack);
526}
527
528void *asan_valloc(uptr size, StackTrace *stack) {
529  return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
530}
531
532void *asan_pvalloc(uptr size, StackTrace *stack) {
533  uptr PageSize = GetPageSizeCached();
534  size = RoundUpTo(size, PageSize);
535  if (size == 0) {
536    // pvalloc(0) should allocate one page.
537    size = PageSize;
538  }
539  return Allocate(size, PageSize, stack, FROM_MALLOC);
540}
541
542int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
543                        StackTrace *stack) {
544  void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
545  CHECK(IsAligned((uptr)ptr, alignment));
546  *memptr = ptr;
547  return 0;
548}
549
550uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
551  CHECK(stack);
552  if (ptr == 0) return 0;
553  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
554  if (flags()->check_malloc_usable_size && (usable_size == 0))
555    ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
556  return usable_size;
557}
558
559uptr asan_mz_size(const void *ptr) {
560  UNIMPLEMENTED();
561  return 0;
562}
563
564void asan_mz_force_lock() {
565  UNIMPLEMENTED();
566}
567
568void asan_mz_force_unlock() {
569  UNIMPLEMENTED();
570}
571
572}  // namespace __asan
573
574// ---------------------- Interface ---------------- {{{1
575using namespace __asan;  // NOLINT
576
577// ASan allocator doesn't reserve extra bytes, so normally we would
578// just return "size". We don't want to expose our redzone sizes, etc here.
579uptr __asan_get_estimated_allocated_size(uptr size) {
580  return size;
581}
582
583bool __asan_get_ownership(const void *p) {
584  return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
585}
586
587uptr __asan_get_allocated_size(const void *p) {
588  if (p == 0) return 0;
589  uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
590  // Die if p is not malloced or if it is already freed.
591  if (allocated_size == 0) {
592    GET_STACK_TRACE_FATAL_HERE;
593    ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
594  }
595  return allocated_size;
596}
597
598#if !SANITIZER_SUPPORTS_WEAK_HOOKS
599// Provide default (no-op) implementation of malloc hooks.
600extern "C" {
601SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
602void __asan_malloc_hook(void *ptr, uptr size) {
603  (void)ptr;
604  (void)size;
605}
606SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
607void __asan_free_hook(void *ptr) {
608  (void)ptr;
609}
610}  // extern "C"
611#endif
612
613
614#endif  // ASAN_ALLOCATOR_VERSION
615