asan_allocator2.cc revision 111a0716d714aa2597e333d160cf1f271695bab7
1//===-- asan_allocator2.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Implementation of ASan's memory allocator, 2-nd version.
13// This variant uses the allocator from sanitizer_common, i.e. the one shared
14// with ThreadSanitizer and MemorySanitizer.
15//
16// Status: under development, not enabled by default yet.
17//===----------------------------------------------------------------------===//
18#include "asan_allocator.h"
19#if ASAN_ALLOCATOR_VERSION == 2
20
21#include "asan_mapping.h"
22#include "asan_report.h"
23#include "asan_thread.h"
24#include "asan_thread_registry.h"
25#include "sanitizer/asan_interface.h"
26#include "sanitizer_common/sanitizer_allocator.h"
27#include "sanitizer_common/sanitizer_internal_defs.h"
28#include "sanitizer_common/sanitizer_list.h"
29
30namespace __asan {
31
32struct AsanMapUnmapCallback {
33  void OnMap(uptr p, uptr size) const {
34    PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
35    // Statistics.
36    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
37    thread_stats.mmaps++;
38    thread_stats.mmaped += size;
39    // thread_stats.mmaped_by_size[size_class] += n_chunks;
40  }
41  void OnUnmap(uptr p, uptr size) const {
42    PoisonShadow(p, size, 0);
43    // Statistics.
44    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
45    thread_stats.munmaps++;
46    thread_stats.munmaped += size;
47  }
48};
49
50#if SANITIZER_WORDSIZE == 64
51const uptr kAllocatorSpace = 0x600000000000ULL;
52const uptr kAllocatorSize  =  0x10000000000ULL;  // 1T.
53typedef DefaultSizeClassMap SizeClassMap;
54typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
55    SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
56#elif SANITIZER_WORDSIZE == 32
57static const u64 kAddressSpaceSize = 1ULL << 32;
58typedef CompactSizeClassMap SizeClassMap;
59typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
60  SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
61#endif
62
63typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
64typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
65typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
66    SecondaryAllocator> Allocator;
67
68// We can not use THREADLOCAL because it is not supported on some of the
69// platforms we care about (OSX 10.6, Android).
70// static THREADLOCAL AllocatorCache cache;
71AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
72  CHECK(ms);
73  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
74  return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
75}
76
77static Allocator allocator;
78
79static const uptr kMaxAllowedMallocSize =
80  FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
81
82static const uptr kMaxThreadLocalQuarantine =
83  FIRST_32_SECOND_64(1 << 18, 1 << 20);
84
85static const uptr kReturnOnZeroMalloc = 2048;  // Zero page is protected.
86
87static int inited = 0;
88
89static void Init() {
90  if (inited) return;
91  __asan_init();
92  inited = true;  // this must happen before any threads are created.
93  allocator.Init();
94}
95
96// Every chunk of memory allocated by this allocator can be in one of 3 states:
97// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
98// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
99// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
100enum {
101  CHUNK_AVAILABLE  = 1,
102  CHUNK_ALLOCATED  = 2,
103  CHUNK_QUARANTINE = 3
104};
105
106// The memory chunk allocated from the underlying allocator looks like this:
107// L L L L L L H H U U U U U U R R
108//   L -- left redzone words (0 or more bytes)
109//   H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
110//   U -- user memory.
111//   R -- right redzone (0 or more bytes)
112// ChunkBase consists of ChunkHeader and other bytes that overlap with user
113// memory.
114
115// If a memory chunk is allocated by memalign and we had to increase the
116// allocation size to achieve the proper alignment, then we store this magic
117// value in the first uptr word of the memory block and store the address of
118// ChunkBase in the next uptr.
119// M B ? ? ? L L L L L L  H H U U U U U U
120//   M -- magic value kMemalignMagic
121//   B -- address of ChunkHeader pointing to the first 'H'
122static const uptr kMemalignMagic = 0xCC6E96B9;
123
124struct ChunkHeader {
125  // 1-st 8 bytes.
126  u32 chunk_state       : 8;  // Must be first.
127  u32 alloc_tid         : 24;
128
129  u32 free_tid          : 24;
130  u32 from_memalign     : 1;
131  u32 alloc_type        : 2;
132  // 2-nd 8 bytes
133  // This field is used for small sizes. For large sizes it is equal to
134  // SizeClassMap::kMaxSize and the actual size is stored in the
135  // SecondaryAllocator's metadata.
136  u32 user_requested_size;
137  u32 alloc_context_id;
138};
139
140struct ChunkBase : ChunkHeader {
141  // Header2, intersects with user memory.
142  AsanChunk *next;
143  u32 free_context_id;
144};
145
146static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
147static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
148COMPILER_CHECK(kChunkHeaderSize == 16);
149COMPILER_CHECK(kChunkHeader2Size <= 16);
150
151static uptr ComputeRZSize(uptr user_requested_size) {
152  // FIXME: implement adaptive redzones.
153  return flags()->redzone;
154}
155
156struct AsanChunk: ChunkBase {
157  uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
158  uptr UsedSize() {
159    if (user_requested_size != SizeClassMap::kMaxSize)
160      return user_requested_size;
161    return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
162  }
163  void *AllocBeg() {
164    if (from_memalign)
165      return reinterpret_cast<uptr>(
166          allocator.GetBlockBegin(reinterpret_cast<void *>(this)));
167    return Beg() - ComputeRZSize(0);
168  }
169  // We store the alloc/free stack traces in the chunk itself.
170  u32 *AllocStackBeg() {
171    return (u32*)(Beg() - ComputeRZSize(UsedSize()));
172  }
173  uptr AllocStackSize() {
174    return (ComputeRZSize(UsedSize()) - kChunkHeaderSize) / sizeof(u32);
175  }
176  u32 *FreeStackBeg() {
177    return (u32*)(Beg() + kChunkHeader2Size);
178  }
179  uptr FreeStackSize() {
180    uptr available = Max(RoundUpTo(UsedSize(), SHADOW_GRANULARITY),
181                         ComputeRZSize(UsedSize()));
182    return (available - kChunkHeader2Size) / sizeof(u32);
183  }
184};
185
186uptr AsanChunkView::Beg() { return chunk_->Beg(); }
187uptr AsanChunkView::End() { return Beg() + UsedSize(); }
188uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
189uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
190uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
191
192void AsanChunkView::GetAllocStack(StackTrace *stack) {
193  StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
194                              chunk_->AllocStackSize());
195}
196
197void AsanChunkView::GetFreeStack(StackTrace *stack) {
198  StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
199                              chunk_->FreeStackSize());
200}
201
202class Quarantine: public AsanChunkFifoList {
203 public:
204  void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
205    AsanChunkFifoList *q = &ms->quarantine_;
206    if (!q->size()) return;
207    SpinMutexLock l(&mutex_);
208    PushList(q);
209    PopAndDeallocateLoop(ms);
210  }
211
212  void BypassThreadLocalQuarantine(AsanChunk *m) {
213    SpinMutexLock l(&mutex_);
214    Push(m);
215  }
216
217 private:
218  void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
219    while (size() > (uptr)flags()->quarantine_size) {
220      PopAndDeallocate(ms);
221    }
222  }
223  void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
224    CHECK_GT(size(), 0);
225    AsanChunk *m = Pop();
226    CHECK(m);
227    CHECK(m->chunk_state == CHUNK_QUARANTINE);
228    m->chunk_state = CHUNK_AVAILABLE;
229    CHECK_NE(m->alloc_tid, kInvalidTid);
230    CHECK_NE(m->free_tid, kInvalidTid);
231    PoisonShadow(m->Beg(),
232                 RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
233                 kAsanHeapLeftRedzoneMagic);
234    uptr alloc_beg = m->Beg() - ComputeRZSize(m->UsedSize());
235    void *p = reinterpret_cast<void *>(alloc_beg);
236    if (m->from_memalign) {
237      p = allocator.GetBlockBegin(p);
238      uptr *memalign_magic = reinterpret_cast<uptr *>(p);
239      CHECK_EQ(memalign_magic[0], kMemalignMagic);
240      CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
241    }
242
243    // Statistics.
244    AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
245    thread_stats.real_frees++;
246    thread_stats.really_freed += m->UsedSize();
247
248    allocator.Deallocate(GetAllocatorCache(ms), p);
249  }
250  SpinMutex mutex_;
251};
252
253static Quarantine quarantine;
254
255void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
256  CHECK(q->size() > 0);
257  size_ += q->size();
258  append_back(q);
259  q->clear();
260}
261
262void AsanChunkFifoList::Push(AsanChunk *n) {
263  push_back(n);
264  size_ += n->UsedSize();
265}
266
267// Interesting performance observation: this function takes up to 15% of overal
268// allocator time. That's because *first_ has been evicted from cache long time
269// ago. Not sure if we can or want to do anything with this.
270AsanChunk *AsanChunkFifoList::Pop() {
271  CHECK(first_);
272  AsanChunk *res = front();
273  size_ -= res->UsedSize();
274  pop_front();
275  return res;
276}
277
278static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
279                      AllocType alloc_type) {
280  Init();
281  CHECK(stack);
282  if (alignment < 8) alignment = 8;
283  if (size == 0) {
284    if (alignment <= kReturnOnZeroMalloc)
285      return reinterpret_cast<void *>(kReturnOnZeroMalloc);
286    else
287      return 0;  // 0 bytes with large alignment requested. Just return 0.
288  }
289  CHECK(IsPowerOfTwo(alignment));
290  uptr rz_size = ComputeRZSize(size);
291  uptr rounded_size = RoundUpTo(size, rz_size);
292  uptr needed_size = rounded_size + rz_size;
293  if (alignment > rz_size)
294    needed_size += alignment;
295  bool using_primary_allocator = true;
296  // If we are allocating from the secondary allocator, there will be no
297  // automatic right redzone, so add the right redzone manually.
298  if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
299    needed_size += rz_size;
300    using_primary_allocator = false;
301  }
302  CHECK(IsAligned(needed_size, rz_size));
303  if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
304    Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
305           (void*)size);
306    return 0;
307  }
308
309  AsanThread *t = asanThreadRegistry().GetCurrent();
310  AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0;
311  void *allocated = allocator.Allocate(cache, needed_size, 8, false);
312  uptr alloc_beg = reinterpret_cast<uptr>(allocated);
313  uptr alloc_end = alloc_beg + needed_size;
314  uptr beg_plus_redzone = alloc_beg + rz_size;
315  uptr user_beg = beg_plus_redzone;
316  if (!IsAligned(user_beg, alignment))
317    user_beg = RoundUpTo(user_beg, alignment);
318  uptr user_end = user_beg + size;
319  CHECK_LE(user_end, alloc_end);
320  uptr chunk_beg = user_beg - kChunkHeaderSize;
321  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
322  m->chunk_state = CHUNK_ALLOCATED;
323  m->alloc_type = alloc_type;
324  u32 alloc_tid = t ? t->tid() : 0;
325  m->alloc_tid = alloc_tid;
326  CHECK_EQ(alloc_tid, m->alloc_tid);  // Does alloc_tid fit into the bitfield?
327  m->free_tid = kInvalidTid;
328  m->from_memalign = user_beg != beg_plus_redzone;
329  if (m->from_memalign) {
330    CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
331    uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
332    memalign_magic[0] = kMemalignMagic;
333    memalign_magic[1] = chunk_beg;
334  }
335  if (using_primary_allocator) {
336    CHECK(size);
337    m->user_requested_size = size;
338    CHECK(allocator.FromPrimary(allocated));
339  } else {
340    CHECK(!allocator.FromPrimary(allocated));
341    m->user_requested_size = SizeClassMap::kMaxSize;
342    *reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)) = size;
343  }
344  StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
345
346  uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
347  // Unpoison the bulk of the memory region.
348  if (size_rounded_down_to_granularity)
349    PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
350  // Deal with the end of the region if size is not aligned to granularity.
351  if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
352    u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
353    *shadow = size & (SHADOW_GRANULARITY - 1);
354  }
355
356  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
357  thread_stats.mallocs++;
358  thread_stats.malloced += size;
359  thread_stats.malloced_redzones += needed_size - size;
360  uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
361  thread_stats.malloced_by_size[class_id]++;
362  if (needed_size > SizeClassMap::kMaxSize)
363    thread_stats.malloc_large++;
364
365  void *res = reinterpret_cast<void *>(user_beg);
366  ASAN_MALLOC_HOOK(res, size);
367  return res;
368}
369
370static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
371  uptr p = reinterpret_cast<uptr>(ptr);
372  if (p == 0 || p == kReturnOnZeroMalloc) return;
373  uptr chunk_beg = p - kChunkHeaderSize;
374  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
375
376  // Flip the chunk_state atomically to avoid race on double-free.
377  u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
378                                       memory_order_acq_rel);
379
380  if (old_chunk_state == CHUNK_QUARANTINE)
381    ReportDoubleFree((uptr)ptr, stack);
382  else if (old_chunk_state != CHUNK_ALLOCATED)
383    ReportFreeNotMalloced((uptr)ptr, stack);
384  CHECK(old_chunk_state == CHUNK_ALLOCATED);
385  if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
386    ReportAllocTypeMismatch((uptr)ptr, stack,
387                            (AllocType)m->alloc_type, (AllocType)alloc_type);
388
389  CHECK_GE(m->alloc_tid, 0);
390  if (SANITIZER_WORDSIZE == 64)  // On 32-bits this resides in user area.
391    CHECK_EQ(m->free_tid, kInvalidTid);
392  AsanThread *t = asanThreadRegistry().GetCurrent();
393  m->free_tid = t ? t->tid() : 0;
394  StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
395  CHECK(m->chunk_state == CHUNK_QUARANTINE);
396  // Poison the region.
397  PoisonShadow(m->Beg(),
398               RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
399               kAsanHeapFreeMagic);
400
401  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
402  thread_stats.frees++;
403  thread_stats.freed += m->UsedSize();
404
405  // Push into quarantine.
406  if (t) {
407    AsanChunkFifoList &q = t->malloc_storage().quarantine_;
408    q.Push(m);
409
410    if (q.size() > kMaxThreadLocalQuarantine)
411      quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
412  } else {
413    quarantine.BypassThreadLocalQuarantine(m);
414  }
415
416  ASAN_FREE_HOOK(ptr);
417}
418
419static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
420  CHECK(old_ptr && new_size);
421  uptr p = reinterpret_cast<uptr>(old_ptr);
422  uptr chunk_beg = p - kChunkHeaderSize;
423  AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
424
425  AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
426  thread_stats.reallocs++;
427  thread_stats.realloced += new_size;
428
429  CHECK(m->chunk_state == CHUNK_ALLOCATED);
430  uptr old_size = m->UsedSize();
431  uptr memcpy_size = Min(new_size, old_size);
432  void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
433  if (new_ptr) {
434    CHECK(REAL(memcpy) != 0);
435    REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
436    Deallocate(old_ptr, stack, FROM_MALLOC);
437  }
438  return new_ptr;
439}
440
441static AsanChunk *GetAsanChunkByAddr(uptr p) {
442  uptr alloc_beg = reinterpret_cast<uptr>(
443      allocator.GetBlockBegin(reinterpret_cast<void *>(p)));
444  if (!alloc_beg) return 0;
445  uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
446  if (memalign_magic[0] == kMemalignMagic) {
447      AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
448      CHECK(m->from_memalign);
449      return m;
450  }
451  uptr chunk_beg = alloc_beg + ComputeRZSize(0) - kChunkHeaderSize;
452  return reinterpret_cast<AsanChunk *>(chunk_beg);
453}
454
455static uptr AllocationSize(uptr p) {
456  AsanChunk *m = GetAsanChunkByAddr(p);
457  if (!m) return 0;
458  if (m->chunk_state != CHUNK_ALLOCATED) return 0;
459  if (m->Beg() != p) return 0;
460  return m->UsedSize();
461}
462
463// We have an address between two chunks, and we want to report just one.
464AsanChunk *ChooseChunk(uptr addr,
465                       AsanChunk *left_chunk, AsanChunk *right_chunk) {
466  // Prefer an allocated chunk or a chunk from quarantine.
467  if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
468      right_chunk->chunk_state != CHUNK_AVAILABLE)
469    return right_chunk;
470  if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
471      left_chunk->chunk_state != CHUNK_AVAILABLE)
472    return left_chunk;
473  // Choose based on offset.
474  uptr l_offset = 0, r_offset = 0;
475  CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
476  CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
477  if (l_offset < r_offset)
478    return left_chunk;
479  return right_chunk;
480}
481
482AsanChunkView FindHeapChunkByAddress(uptr addr) {
483  AsanChunk *m1 = GetAsanChunkByAddr(addr);
484  if (!m1) return AsanChunkView(m1);
485  uptr offset = 0;
486  if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
487    // The address is in the chunk's left redzone, so maybe it is actually
488    // a right buffer overflow from the other chunk to the left.
489    // Search a bit to the left to see if there is another chunk.
490    AsanChunk *m2 = 0;
491    for (uptr l = 1; l < GetPageSizeCached(); l++) {
492      m2 = GetAsanChunkByAddr(addr - l);
493      if (m2 == m1) continue;  // Still the same chunk.
494      break;
495    }
496    if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
497      m1 = ChooseChunk(addr, m2, m1);
498  }
499  return AsanChunkView(m1);
500}
501
502void AsanThreadLocalMallocStorage::CommitBack() {
503  quarantine.SwallowThreadLocalQuarantine(this);
504  allocator.SwallowCache(GetAllocatorCache(this));
505}
506
507SANITIZER_INTERFACE_ATTRIBUTE
508void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
509                    AllocType alloc_type) {
510  return Allocate(size, alignment, stack, alloc_type);
511}
512
513SANITIZER_INTERFACE_ATTRIBUTE
514void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
515  Deallocate(ptr, stack, alloc_type);
516}
517
518SANITIZER_INTERFACE_ATTRIBUTE
519void *asan_malloc(uptr size, StackTrace *stack) {
520  return Allocate(size, 8, stack, FROM_MALLOC);
521}
522
523void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
524  void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
525  if (ptr)
526    REAL(memset)(ptr, 0, nmemb * size);
527  return ptr;
528}
529
530void *asan_realloc(void *p, uptr size, StackTrace *stack) {
531  if (p == 0)
532    return Allocate(size, 8, stack, FROM_MALLOC);
533  if (size == 0) {
534    Deallocate(p, stack, FROM_MALLOC);
535    return 0;
536  }
537  return Reallocate(p, size, stack);
538}
539
540void *asan_valloc(uptr size, StackTrace *stack) {
541  return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
542}
543
544void *asan_pvalloc(uptr size, StackTrace *stack) {
545  uptr PageSize = GetPageSizeCached();
546  size = RoundUpTo(size, PageSize);
547  if (size == 0) {
548    // pvalloc(0) should allocate one page.
549    size = PageSize;
550  }
551  return Allocate(size, PageSize, stack, FROM_MALLOC);
552}
553
554int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
555                        StackTrace *stack) {
556  void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
557  CHECK(IsAligned((uptr)ptr, alignment));
558  *memptr = ptr;
559  return 0;
560}
561
562uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
563  CHECK(stack);
564  if (ptr == 0) return 0;
565  uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
566  if (flags()->check_malloc_usable_size && (usable_size == 0))
567    ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
568  return usable_size;
569}
570
571uptr asan_mz_size(const void *ptr) {
572  UNIMPLEMENTED();
573  return 0;
574}
575
576void asan_mz_force_lock() {
577  UNIMPLEMENTED();
578}
579
580void asan_mz_force_unlock() {
581  UNIMPLEMENTED();
582}
583
584}  // namespace __asan
585
586// ---------------------- Interface ---------------- {{{1
587using namespace __asan;  // NOLINT
588
589// ASan allocator doesn't reserve extra bytes, so normally we would
590// just return "size". We don't want to expose our redzone sizes, etc here.
591uptr __asan_get_estimated_allocated_size(uptr size) {
592  return size;
593}
594
595bool __asan_get_ownership(const void *p) {
596  return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
597}
598
599uptr __asan_get_allocated_size(const void *p) {
600  if (p == 0) return 0;
601  uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
602  // Die if p is not malloced or if it is already freed.
603  if (allocated_size == 0) {
604    GET_STACK_TRACE_FATAL_HERE;
605    ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
606  }
607  return allocated_size;
608}
609
610#if !SANITIZER_SUPPORTS_WEAK_HOOKS
611// Provide default (no-op) implementation of malloc hooks.
612extern "C" {
613SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
614void __asan_malloc_hook(void *ptr, uptr size) {
615  (void)ptr;
616  (void)size;
617}
618SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
619void __asan_free_hook(void *ptr) {
620  (void)ptr;
621}
622}  // extern "C"
623#endif
624
625
626#endif  // ASAN_ALLOCATOR_VERSION
627