asan_allocator2.cc revision fe6d91684bcda766593800f6307233f1a33d31f6
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_mapping.h"
22#include "asan_report.h"
23#include "asan_thread.h"
24#include "asan_thread_registry.h"
25#include "sanitizer/asan_interface.h"
26#include "sanitizer_common/sanitizer_allocator.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29
30namespace __asan {
31
32struct AsanMapUnmapCallback {
33  void OnMap(uptr p, uptr size) const {
34    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
35    // Statistics.
36    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
37    thread_stats.mmaps++;
38    thread_stats.mmaped += size;
39    // thread_stats.mmaped_by_size[size_class] += n_chunks;
40  }
41  void OnUnmap(uptr p, uptr size) const {
42    PoisonShadow(p, size, 0);
43    // Statistics.
44    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
45    thread_stats.munmaps++;
46    thread_stats.munmaped += size;
47  }
48};
49
50#if SANITIZER_WORDSIZE == 64
51const uptr kAllocatorSpace = 0x600000000000ULL;
52const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
53typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
54    DefaultSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
55#elif SANITIZER_WORDSIZE == 32
56static const u64 kAddressSpaceSize = 1ULL << 32;
57typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
58  CompactSizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
59#endif
60
61typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
62typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
63typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
64    SecondaryAllocator> Allocator;
65
66// We can not use THREADLOCAL because it is not supported on some of the
67// platforms we care about (OSX 10.6, Android).
68// static THREADLOCAL AllocatorCache cache;
69AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
70  CHECK(ms);
71  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
72  return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
73}
74
75static Allocator allocator;
76
77static const uptr kMaxAllowedMallocSize =
78  FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
79
80static const uptr kMaxThreadLocalQuarantine =
81  FIRST_32_SECOND_64(1 << 18, 1 << 20);
82
83static const uptr kReturnOnZeroMalloc = 0x0123;  // Zero page is protected.
84
85static int inited = 0;
86
87static void Init() {
88  if (inited) return;
89  __asan_init();
90  inited = true;  // this must happen before any threads are created.
91  allocator.Init();
92}
93
94// Every chunk of memory allocated by this allocator can be in one of 3 states:
95// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
96// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
97// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
98enum {
99  CHUNK_AVAILABLE  = 1,
100  CHUNK_ALLOCATED  = 2,
101  CHUNK_QUARANTINE = 3
102};
103
104// The memory chunk allocated from the underlying allocator looks like this:
105// L L L L L L H H U U U U U U R R
106//   L -- left redzone words (0 or more bytes)
107//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
108//   U -- user memory.
109//   R -- right redzone (0 or more bytes)
110// ChunkBase consists of ChunkHeader and other bytes that overlap with user
111// memory.
112
113// If a memory chunk is allocated by memalign and we had to increase the
114// allocation size to achieve the proper alignment, then we store this magic
115// value in the first uptr word of the memory block and store the address of
116// ChunkBase in the next uptr.
117// M B ? ? ? L L L L L L  H H U U U U U U
118//   M -- magic value kMemalignMagic
119//   B -- address of ChunkHeader pointing to the first 'H'
120static const uptr kMemalignMagic = 0xCC6E96B9;
121
122#if SANITIZER_WORDSIZE == 64
123struct ChunkBase {
124  // 1-st 8 bytes.
125  uptr chunk_state       : 8;  // Must be first.
126  uptr alloc_tid         : 24;
127
128  uptr free_tid          : 24;
129  uptr from_memalign     : 1;
130  uptr alloc_type        : 2;
131  // 2-nd 8 bytes
132  uptr user_requested_size;
133  // Header2 (intersects with user memory).
134  // 3-rd 8 bytes. These overlap with the user memory.
135  AsanChunk *next;
136};
137
138static const uptr kChunkHeaderSize = 16;
139static const uptr kChunkHeader2Size = 8;
140
141#elif SANITIZER_WORDSIZE == 32
142struct ChunkBase {
143  // 1-st 8 bytes.
144  uptr chunk_state       : 8;  // Must be first.
145  uptr alloc_tid         : 24;
146
147  uptr from_memalign     : 1;
148  uptr alloc_type        : 2;
149  uptr free_tid          : 24;
150  // 2-nd 8 bytes
151  uptr user_requested_size;
152  AsanChunk *next;
153  // Header2 empty.
154};
155
156static const uptr kChunkHeaderSize = 16;
157static const uptr kChunkHeader2Size = 0;
158#endif
159COMPILER_CHECK(sizeof(ChunkBase) == kChunkHeaderSize + kChunkHeader2Size);
160
161static uptr ComputeRZSize(uptr user_requested_size) {
162  // FIXME: implement adaptive redzones.
163  return flags()->redzone;
164}
165
166struct AsanChunk: ChunkBase {
167  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
168  uptr UsedSize() { return user_requested_size; }
169  // We store the alloc/free stack traces in the chunk itself.
170  u32 *AllocStackBeg() {
171    return (u32*)(Beg() - ComputeRZSize(UsedSize()));
172  }
173  uptr AllocStackSize() {
174    return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32);
175  }
176  u32 *FreeStackBeg() {
177    return (u32*)(Beg() + kChunkHeader2Size);
178  }
179  uptr FreeStackSize() {
180    uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY),
181                         ComputeRZSize(UsedSize()));
182    return (available - kChunkHeader2Size) / sizeof(u32);
183  }
184};
185
186uptr AsanChunkView::Beg() { return chunk_->Beg(); }
187uptr AsanChunkView::End() { return Beg() + UsedSize(); }
188uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
189uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
190uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
191
192void AsanChunkView::GetAllocStack(StackTrace *stack) {
193  StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
194                              chunk_->AllocStackSize());
195}
196
197void AsanChunkView::GetFreeStack(StackTrace *stack) {
198  StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
199                              chunk_->FreeStackSize());
200}
201
202class Quarantine: public AsanChunkFifoList {
203 public:
204  void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
205    AsanChunkFifoList *q = &ms->quarantine_;
206    if (!q->size()) return;
207    SpinMutexLock l(&mutex_);
208    PushList(q);
209    PopAndDeallocateLoop(ms);
210  }
211
212  void BypassThreadLocalQuarantine(AsanChunk *m) {
213    SpinMutexLock l(&mutex_);
214    Push(m);
215  }
216
217 private:
218  void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
219    while (size() > (uptr)flags()->quarantine_size) {
220      PopAndDeallocate(ms);
221    }
222  }
223  void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
224    CHECK_GT(size(), 0);
225    AsanChunk *m = Pop();
226    CHECK(m);
227    CHECK(m->chunk_state == CHUNK_QUARANTINE);
228    m->chunk_state = CHUNK_AVAILABLE;
229    CHECK_NE(m->alloc_tid, kInvalidTid);
230    CHECK_NE(m->free_tid, kInvalidTid);
231    PoisonShadow(m->Beg(),
232                 RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
233                 kAsanHeapLeftRedzoneMagic);
234    uptr alloc_beg = m->Beg() - ComputeRZSize(m->user_requested_size);
235    void *p = reinterpret_cast<void *>(alloc_beg);
236    if (m->from_memalign) {
237      p = allocator.GetBlockBegin(p);
238      uptr *memalign_magic = reinterpret_cast<uptr *>(p);
239      CHECK_EQ(memalign_magic[0], kMemalignMagic);
240      CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
241    }
242
243    // Statistics.
244    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
245    thread_stats.real_frees++;
246    thread_stats.really_freed += m->UsedSize();
247
248    allocator.Deallocate(GetAllocatorCache(ms), p);
249  }
250  SpinMutex mutex_;
251};
252
253static Quarantine quarantine;
254
255void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
256  CHECK(q->size() > 0);
257  size_ += q->size();
258  append_back(q);
259  q->clear();
260}
261
262void AsanChunkFifoList::Push(AsanChunk *n) {
263  push_back(n);
264  size_ += n->UsedSize();
265}
266
267// Interesting performance observation: this function takes up to 15% of overal
268// allocator time. That's because *first_ has been evicted from cache long time
269// ago. Not sure if we can or want to do anything with this.
270AsanChunk *AsanChunkFifoList::Pop() {
271  CHECK(first_);
272  AsanChunk *res = front();
273  size_ -= res->UsedSize();
274  pop_front();
275  return res;
276}
277
278static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
279                      AllocType alloc_type) {
280  Init();
281  CHECK(stack);
282  if (alignment < 8) alignment = 8;
283  if (size == 0)
284    return reinterpret_cast<void *>(kReturnOnZeroMalloc);
285  CHECK(IsPowerOfTwo(alignment));
286  uptr rz_size = ComputeRZSize(size);
287  uptr rounded_size = RoundUpTo(size, rz_size);
288  uptr needed_size = rounded_size + rz_size;
289  if (alignment > rz_size)
290    needed_size += alignment;
291  CHECK(IsAligned(needed_size, rz_size));
292  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
293    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
294           (void*)size);
295    return 0;
296  }
297
298  AsanThread *t = asanThreadRegistry().GetCurrent();
299  // Printf("t = %p\n", t);
300  CHECK(t);  // FIXME
301  void *allocated = allocator.Allocate(
302      GetAllocatorCache(&t->malloc_storage()), needed_size, 8, false);
303  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
304  uptr alloc_end = alloc_beg + needed_size;
305  uptr beg_plus_redzone = alloc_beg + rz_size;
306  uptr user_beg = beg_plus_redzone;
307  if (!IsAligned(user_beg, alignment))
308    user_beg = RoundUpTo(user_beg, alignment);
309  uptr user_end = user_beg + size;
310  CHECK_LE(user_end, alloc_end);
311  uptr chunk_beg = user_beg - kChunkHeaderSize;
312  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
313  m->chunk_state = CHUNK_ALLOCATED;
314  m->alloc_type = alloc_type;
315  u32 alloc_tid = t ? t->tid() : 0;
316  m->alloc_tid = alloc_tid;
317  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
318  m->free_tid = kInvalidTid;
319  m->from_memalign = user_beg != beg_plus_redzone;
320  if (m->from_memalign) {
321    CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
322    uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
323    memalign_magic[0] = kMemalignMagic;
324    memalign_magic[1] = chunk_beg;
325  }
326  m->user_requested_size = size;
327  StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
328
329  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
330  // Unpoison the bulk of the memory region.
331  if (size_rounded_down_to_granularity)
332    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
333  // Deal with the end of the region if size is not aligned to granularity.
334  if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
335    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
336    *shadow = size & (SHADOW_GRANULARITY - 1);
337  }
338
339  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
340  thread_stats.mallocs++;
341  thread_stats.malloced += size;
342
343  void *res = reinterpret_cast<void *>(user_beg);
344  ASAN_MALLOC_HOOK(res, size);
345  return res;
346}
347
348static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
349  uptr p = reinterpret_cast<uptr>(ptr);
350  if (p == 0 || p == kReturnOnZeroMalloc) return;
351  uptr chunk_beg = p - kChunkHeaderSize;
352  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
353
354  // Flip the chunk_state atomically to avoid race on double-free.
355  u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
356                                       memory_order_acq_rel);
357
358  if (old_chunk_state == CHUNK_QUARANTINE)
359    ReportDoubleFree((uptr)ptr, stack);
360  else if (old_chunk_state != CHUNK_ALLOCATED)
361    ReportFreeNotMalloced((uptr)ptr, stack);
362  CHECK(old_chunk_state == CHUNK_ALLOCATED);
363  if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
364    ReportAllocTypeMismatch((uptr)ptr, stack,
365                            (AllocType)m->alloc_type, (AllocType)alloc_type);
366
367  CHECK_GE(m->alloc_tid, 0);
368  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
369    CHECK_EQ(m->free_tid, kInvalidTid);
370  AsanThread *t = asanThreadRegistry().GetCurrent();
371  m->free_tid = t ? t->tid() : 0;
372  StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
373  CHECK(m->chunk_state == CHUNK_QUARANTINE);
374  // Poison the region.
375  PoisonShadow(m->Beg(),
376               RoundUpTo(m->user_requested_size, SHADOW_GRANULARITY),
377               kAsanHeapFreeMagic);
378
379  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
380  thread_stats.frees++;
381  thread_stats.freed += m->UsedSize();
382
383  // Push into quarantine.
384  if (t) {
385    AsanChunkFifoList &q = t->malloc_storage().quarantine_;
386    q.Push(m);
387
388    if (q.size() > kMaxThreadLocalQuarantine)
389      quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
390  } else {
391    quarantine.BypassThreadLocalQuarantine(m);
392  }
393
394  ASAN_FREE_HOOK(ptr);
395}
396
397static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
398  CHECK(old_ptr && new_size);
399  uptr p = reinterpret_cast<uptr>(old_ptr);
400  uptr chunk_beg = p - kChunkHeaderSize;
401  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
402
403  CHECK(m->chunk_state == CHUNK_ALLOCATED);
404  uptr old_size = m->UsedSize();
405  uptr memcpy_size = Min(new_size, old_size);
406  void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
407  if (new_ptr) {
408    CHECK(REAL(memcpy) != 0);
409    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
410    Deallocate(old_ptr, stack, FROM_MALLOC);
411  }
412  return new_ptr;
413}
414
415static AsanChunk *GetAsanChunkByAddr(uptr p) {
416  uptr alloc_beg = reinterpret_cast<uptr>(
417      allocator.GetBlockBegin(reinterpret_cast<void *>(p)));
418  if (!alloc_beg) return 0;
419  uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
420  if (memalign_magic[0] == kMemalignMagic) {
421      AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
422      CHECK(m->from_memalign);
423      return m;
424  }
425  uptr chunk_beg = alloc_beg + ComputeRZSize(0) - kChunkHeaderSize;
426  return reinterpret_cast<AsanChunk *>(chunk_beg);
427}
428
429static uptr AllocationSize(uptr p) {
430  AsanChunk *m = GetAsanChunkByAddr(p);
431  if (!m) return 0;
432  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
433  if (m->Beg() != p) return 0;
434  return m->UsedSize();
435}
436
437// We have an address between two chunks, and we want to report just one.
438AsanChunk *ChooseChunk(uptr addr,
439                       AsanChunk *left_chunk, AsanChunk *right_chunk) {
440  // Prefer an allocated chunk or a chunk from quarantine.
441  if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
442      right_chunk->chunk_state != CHUNK_AVAILABLE)
443    return right_chunk;
444  if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
445      left_chunk->chunk_state != CHUNK_AVAILABLE)
446    return left_chunk;
447  // Choose based on offset.
448  uptr l_offset = 0, r_offset = 0;
449  CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
450  CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
451  if (l_offset < r_offset)
452    return left_chunk;
453  return right_chunk;
454}
455
456AsanChunkView FindHeapChunkByAddress(uptr addr) {
457  AsanChunk *m1 = GetAsanChunkByAddr(addr);
458  if (!m1) return AsanChunkView(m1);
459  uptr offset = 0;
460  if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
461    // The address is in the chunk's left redzone, so maybe it is actually
462    // a right buffer overflow from the other chunk to the left.
463    // Search a bit to the left to see if there is another chunk.
464    AsanChunk *m2 = 0;
465    for (uptr l = 1; l < GetPageSizeCached(); l++) {
466      m2 = GetAsanChunkByAddr(addr - l);
467      if (m2 == m1) continue;  // Still the same chunk.
468      Printf("m1 %p m2 %p l %zd\n", m1, m2, l);
469      break;
470    }
471    if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
472      m1 = ChooseChunk(addr, m2, m1);
473  }
474  return AsanChunkView(m1);
475}
476
477void AsanThreadLocalMallocStorage::CommitBack() {
478  quarantine.SwallowThreadLocalQuarantine(this);
479  allocator.SwallowCache(GetAllocatorCache(this));
480}
481
482SANITIZER_INTERFACE_ATTRIBUTE
483void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
484                    AllocType alloc_type) {
485  return Allocate(size, alignment, stack, alloc_type);
486}
487
488SANITIZER_INTERFACE_ATTRIBUTE
489void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
490  Deallocate(ptr, stack, alloc_type);
491}
492
493SANITIZER_INTERFACE_ATTRIBUTE
494void *asan_malloc(uptr size, StackTrace *stack) {
495  return Allocate(size, 8, stack, FROM_MALLOC);
496}
497
498void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
499  void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
500  if (ptr)
501    REAL(memset)(ptr, 0, nmemb * size);
502  return ptr;
503}
504
505void *asan_realloc(void *p, uptr size, StackTrace *stack) {
506  if (p == 0)
507    return Allocate(size, 8, stack, FROM_MALLOC);
508  if (size == 0) {
509    Deallocate(p, stack, FROM_MALLOC);
510    return 0;
511  }
512  return Reallocate(p, size, stack);
513}
514
515void *asan_valloc(uptr size, StackTrace *stack) {
516  return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
517}
518
519void *asan_pvalloc(uptr size, StackTrace *stack) {
520  uptr PageSize = GetPageSizeCached();
521  size = RoundUpTo(size, PageSize);
522  if (size == 0) {
523    // pvalloc(0) should allocate one page.
524    size = PageSize;
525  }
526  return Allocate(size, PageSize, stack, FROM_MALLOC);
527}
528
529int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
530                        StackTrace *stack) {
531  void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
532  CHECK(IsAligned((uptr)ptr, alignment));
533  *memptr = ptr;
534  return 0;
535}
536
537uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
538  CHECK(stack);
539  if (ptr == 0) return 0;
540  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
541  if (flags()->check_malloc_usable_size && (usable_size == 0))
542    ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
543  return usable_size;
544}
545
546uptr asan_mz_size(const void *ptr) {
547  UNIMPLEMENTED();
548  return 0;
549}
550
551void asan_mz_force_lock() {
552  UNIMPLEMENTED();
553}
554
555void asan_mz_force_unlock() {
556  UNIMPLEMENTED();
557}
558
559}  // namespace __asan
560
561// ---------------------- Interface ---------------- {{{1
562using namespace __asan;  // NOLINT
563
564// ASan allocator doesn't reserve extra bytes, so normally we would
565// just return "size". We don't want to expose our redzone sizes, etc here.
566uptr __asan_get_estimated_allocated_size(uptr size) {
567  return size;
568}
569
570bool __asan_get_ownership(const void *p) {
571  return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
572}
573
574uptr __asan_get_allocated_size(const void *p) {
575  if (p == 0) return 0;
576  uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
577  // Die if p is not malloced or if it is already freed.
578  if (allocated_size == 0) {
579    GET_STACK_TRACE_FATAL_HERE;
580    ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
581  }
582  return allocated_size;
583}
584
585#if !SANITIZER_SUPPORTS_WEAK_HOOKS
586// Provide default (no-op) implementation of malloc hooks.
587extern "C" {
588SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
589void __asan_malloc_hook(void *ptr, uptr size) {
590  (void)ptr;
591  (void)size;
592}
593SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
594void __asan_free_hook(void *ptr) {
595  (void)ptr;
596}
597}  // extern "C"
598#endif
599
600
601#endif  // ASAN_ALLOCATOR_VERSION
602