1//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9///
10/// Scudo Hardened Allocator implementation.
11/// It uses the sanitizer_common allocator as a base and aims at mitigating
12/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
13/// header, a delayed free list, and additional sanity checks.
14///
15//===----------------------------------------------------------------------===//
16
17#include "scudo_allocator.h"
18#include "scudo_utils.h"
19
20#include "sanitizer_common/sanitizer_allocator_interface.h"
21#include "sanitizer_common/sanitizer_quarantine.h"
22
23#include <limits.h>
24#include <pthread.h>
25#include <smmintrin.h>
26
27#include <atomic>
28#include <cstring>
29
30namespace __scudo {
31
32const uptr AllocatorSpace = ~0ULL;
33const uptr AllocatorSize  =  0x10000000000ULL;
34const uptr MinAlignmentLog = 4; // 16 bytes for x64
35const uptr MaxAlignmentLog = 24;
36
37typedef DefaultSizeClassMap SizeClassMap;
38typedef SizeClassAllocator64<AllocatorSpace, AllocatorSize, 0, SizeClassMap>
39  PrimaryAllocator;
40typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
41typedef LargeMmapAllocator<> SecondaryAllocator;
42typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
43  ScudoAllocator;
44
45static ScudoAllocator &getAllocator();
46
47static thread_local Xorshift128Plus Prng;
48// Global static cookie, initialized at start-up.
49static u64 Cookie;
50
51enum ChunkState : u8 {
52  ChunkAvailable  = 0,
53  ChunkAllocated  = 1,
54  ChunkQuarantine = 2
55};
56
57typedef unsigned __int128 PackedHeader;
58typedef std::atomic<PackedHeader> AtomicPackedHeader;
59
60// Our header requires 128-bit of storage on x64 (the only platform supported
61// as of now), which fits nicely with the alignment requirements.
62// Having the offset saves us from using functions such as GetBlockBegin, that
63// is fairly costly. Our first implementation used the MetaData as well, which
64// offers the advantage of being stored away from the chunk itself, but
65// accessing it was costly as well.
66// The header will be atomically loaded and stored using the 16-byte primitives
67// offered by the platform (likely requires cmpxchg16b support).
68struct UnpackedHeader {
69  // 1st 8 bytes
70  u16 Checksum      : 16;
71  u64 RequestedSize : 40; // Needed for reallocation purposes.
72  u8  State         : 2;  // available, allocated, or quarantined
73  u8  AllocType     : 2;  // malloc, new, new[], or memalign
74  u8  Unused_0_     : 4;
75  // 2nd 8 bytes
76  u64 Offset        : 20; // Offset from the beginning of the backend
77                          // allocation to the beginning chunk itself, in
78                          // multiples of MinAlignment. See comment about its
79                          // maximum value and test in Initialize.
80  u64 Unused_1_     : 28;
81  u16 Salt          : 16;
82};
83
84COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
85
86const uptr ChunkHeaderSize = sizeof(PackedHeader);
87
88struct ScudoChunk : UnpackedHeader {
89  // We can't use the offset member of the chunk itself, as we would double
90  // fetch it without any warranty that it wouldn't have been tampered. To
91  // prevent this, we work with a local copy of the header.
92  void *AllocBeg(UnpackedHeader *Header) {
93    return reinterpret_cast<void *>(
94        reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
95  }
96
97  // CRC32 checksum of the Chunk pointer and its ChunkHeader.
98  // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction.
99  u16 Checksum(UnpackedHeader *Header) const {
100    u64 HeaderHolder[2];
101    memcpy(HeaderHolder, Header, sizeof(HeaderHolder));
102    u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this));
103    // This is somewhat of a shortcut. The checksum is stored in the 16 least
104    // significant bits of the first 8 bytes of the header, hence zero-ing
105    // those bits out. It would be more valid to zero the checksum field of the
106    // UnpackedHeader, but would require holding an additional copy of it.
107    Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL);
108    Crc = _mm_crc32_u64(Crc, HeaderHolder[1]);
109    return static_cast<u16>(Crc);
110  }
111
112  // Loads and unpacks the header, verifying the checksum in the process.
113  void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
114    const AtomicPackedHeader *AtomicHeader =
115        reinterpret_cast<const AtomicPackedHeader *>(this);
116    PackedHeader NewPackedHeader =
117        AtomicHeader->load(std::memory_order_relaxed);
118    *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
119    if ((NewUnpackedHeader->Unused_0_ != 0) ||
120        (NewUnpackedHeader->Unused_1_ != 0) ||
121        (NewUnpackedHeader->Checksum != Checksum(NewUnpackedHeader))) {
122      dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
123    }
124  }
125
126  // Packs and stores the header, computing the checksum in the process.
127  void storeHeader(UnpackedHeader *NewUnpackedHeader) {
128    NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
129    PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
130    AtomicPackedHeader *AtomicHeader =
131        reinterpret_cast<AtomicPackedHeader *>(this);
132    AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
133  }
134
135  // Packs and stores the header, computing the checksum in the process. We
136  // compare the current header with the expected provided one to ensure that
137  // we are not being raced by a corruption occurring in another thread.
138  void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
139                             UnpackedHeader *OldUnpackedHeader) {
140    NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
141    PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
142    PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
143    AtomicPackedHeader *AtomicHeader =
144        reinterpret_cast<AtomicPackedHeader *>(this);
145    if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
146                                               NewPackedHeader,
147                                               std::memory_order_relaxed,
148                                               std::memory_order_relaxed)) {
149      dieWithMessage("ERROR: race on chunk header at address %p\n", this);
150    }
151  }
152};
153
154static bool ScudoInitIsRunning = false;
155
156static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
157static pthread_key_t pkey;
158
159static thread_local bool ThreadInited = false;
160static thread_local bool ThreadTornDown = false;
161static thread_local AllocatorCache Cache;
162
163static void teardownThread(void *p) {
164  uptr v = reinterpret_cast<uptr>(p);
165  // The glibc POSIX thread-local-storage deallocation routine calls user
166  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
167  // We want to be called last since other destructors might call free and the
168  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
169  // quarantine and swallowing the cache.
170  if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
171    pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1));
172    return;
173  }
174  drainQuarantine();
175  getAllocator().DestroyCache(&Cache);
176  ThreadTornDown = true;
177}
178
179static void initInternal() {
180  SanitizerToolName = "Scudo";
181  CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
182  ScudoInitIsRunning = true;
183
184  initFlags();
185
186  AllocatorOptions Options;
187  Options.setFrom(getFlags(), common_flags());
188  initAllocator(Options);
189
190  ScudoInitIsRunning = false;
191}
192
193static void initGlobal() {
194  pthread_key_create(&pkey, teardownThread);
195  initInternal();
196}
197
198static void NOINLINE initThread() {
199  pthread_once(&GlobalInited, initGlobal);
200  pthread_setspecific(pkey, reinterpret_cast<void *>(1));
201  getAllocator().InitCache(&Cache);
202  ThreadInited = true;
203}
204
205struct QuarantineCallback {
206  explicit QuarantineCallback(AllocatorCache *Cache)
207    : Cache_(Cache) {}
208
209  // Chunk recycling function, returns a quarantined chunk to the backend.
210  void Recycle(ScudoChunk *Chunk) {
211    UnpackedHeader Header;
212    Chunk->loadHeader(&Header);
213    if (Header.State != ChunkQuarantine) {
214      dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
215                     Chunk);
216    }
217    void *Ptr = Chunk->AllocBeg(&Header);
218    getAllocator().Deallocate(Cache_, Ptr);
219  }
220
221  /// Internal quarantine allocation and deallocation functions.
222  void *Allocate(uptr Size) {
223    // The internal quarantine memory cannot be protected by us. But the only
224    // structures allocated are QuarantineBatch, that are 8KB for x64. So we
225    // will use mmap for those, and given that Deallocate doesn't pass a size
226    // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
227    // TODO(kostyak): switching to mmap impacts greatly performances, we have
228    //                to find another solution
229    // CHECK_EQ(Size, sizeof(QuarantineBatch));
230    // return MmapOrDie(Size, "QuarantineBatch");
231    return getAllocator().Allocate(Cache_, Size, 1, false);
232  }
233
234  void Deallocate(void *Ptr) {
235    // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
236    getAllocator().Deallocate(Cache_, Ptr);
237  }
238
239  AllocatorCache *Cache_;
240};
241
242typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
243typedef ScudoQuarantine::Cache QuarantineCache;
244static thread_local QuarantineCache ThreadQuarantineCache;
245
246void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
247  MayReturnNull = cf->allocator_may_return_null;
248  QuarantineSizeMb = f->QuarantineSizeMb;
249  ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
250  DeallocationTypeMismatch = f->DeallocationTypeMismatch;
251  DeleteSizeMismatch = f->DeleteSizeMismatch;
252  ZeroContents = f->ZeroContents;
253}
254
255void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
256  cf->allocator_may_return_null = MayReturnNull;
257  f->QuarantineSizeMb = QuarantineSizeMb;
258  f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
259  f->DeallocationTypeMismatch = DeallocationTypeMismatch;
260  f->DeleteSizeMismatch = DeleteSizeMismatch;
261  f->ZeroContents = ZeroContents;
262}
263
264struct Allocator {
265  static const uptr MaxAllowedMallocSize = 1ULL << 40;
266  static const uptr MinAlignment = 1 << MinAlignmentLog;
267  static const uptr MaxAlignment = 1 << MaxAlignmentLog; // 16 MB
268
269  ScudoAllocator BackendAllocator;
270  ScudoQuarantine AllocatorQuarantine;
271
272  // The fallback caches are used when the thread local caches have been
273  // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
274  // be accessed by different threads.
275  StaticSpinMutex FallbackMutex;
276  AllocatorCache FallbackAllocatorCache;
277  QuarantineCache FallbackQuarantineCache;
278
279  bool DeallocationTypeMismatch;
280  bool ZeroContents;
281  bool DeleteSizeMismatch;
282
283  explicit Allocator(LinkerInitialized)
284    : AllocatorQuarantine(LINKER_INITIALIZED),
285      FallbackQuarantineCache(LINKER_INITIALIZED) {}
286
287  void init(const AllocatorOptions &Options) {
288    // Currently SSE 4.2 support is required. This might change later.
289    CHECK(testCPUFeature(SSE4_2)); // for crc32
290
291    // Verify that the header offset field can hold the maximum offset. In the
292    // worst case scenario, the backend allocation is already aligned on
293    // MaxAlignment, so in order to store the header and still be aligned, we
294    // add an extra MaxAlignment. As a result, the offset from the beginning of
295    // the backend allocation to the chunk will be MaxAlignment -
296    // ChunkHeaderSize.
297    UnpackedHeader Header = {};
298    uptr MaximumOffset = (MaxAlignment - ChunkHeaderSize) >> MinAlignmentLog;
299    Header.Offset = MaximumOffset;
300    if (Header.Offset != MaximumOffset) {
301      dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
302                     "header\n");
303    }
304
305    DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
306    DeleteSizeMismatch = Options.DeleteSizeMismatch;
307    ZeroContents = Options.ZeroContents;
308    BackendAllocator.Init(Options.MayReturnNull);
309    AllocatorQuarantine.Init(static_cast<uptr>(Options.QuarantineSizeMb) << 20,
310                             static_cast<uptr>(
311                                 Options.ThreadLocalQuarantineSizeKb) << 10);
312    BackendAllocator.InitCache(&FallbackAllocatorCache);
313    Cookie = Prng.Next();
314  }
315
316  // Allocates a chunk.
317  void *allocate(uptr Size, uptr Alignment, AllocType Type) {
318    if (UNLIKELY(!ThreadInited))
319      initThread();
320    if (!IsPowerOfTwo(Alignment)) {
321      dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
322    }
323    if (Alignment > MaxAlignment)
324      return BackendAllocator.ReturnNullOrDie();
325    if (Alignment < MinAlignment)
326      Alignment = MinAlignment;
327    if (Size == 0)
328      Size = 1;
329    if (Size >= MaxAllowedMallocSize)
330      return BackendAllocator.ReturnNullOrDie();
331    uptr RoundedSize = RoundUpTo(Size, MinAlignment);
332    uptr ExtraBytes = ChunkHeaderSize;
333    if (Alignment > MinAlignment)
334      ExtraBytes += Alignment;
335    uptr NeededSize = RoundedSize + ExtraBytes;
336    if (NeededSize >= MaxAllowedMallocSize)
337      return BackendAllocator.ReturnNullOrDie();
338
339    void *Ptr;
340    if (LIKELY(!ThreadTornDown)) {
341      Ptr = BackendAllocator.Allocate(&Cache, NeededSize, MinAlignment);
342    } else {
343      SpinMutexLock l(&FallbackMutex);
344      Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
345                               MinAlignment);
346    }
347    if (!Ptr)
348      return BackendAllocator.ReturnNullOrDie();
349
350    // If requested, we will zero out the entire contents of the returned chunk.
351    if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
352       memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
353
354    uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
355    uptr ChunkBeg = AllocBeg + ChunkHeaderSize;
356    if (!IsAligned(ChunkBeg, Alignment))
357      ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
358    CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
359    ScudoChunk *Chunk =
360        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
361    UnpackedHeader Header = {};
362    Header.State = ChunkAllocated;
363    Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog;
364    Header.AllocType = Type;
365    Header.RequestedSize = Size;
366    Header.Salt = static_cast<u16>(Prng.Next());
367    Chunk->storeHeader(&Header);
368    void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
369    // TODO(kostyak): hooks sound like a terrible idea security wise but might
370    //                be needed for things to work properly?
371    // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
372    return UserPtr;
373  }
374
375  // Deallocates a Chunk, which means adding it to the delayed free list (or
376  // Quarantine).
377  void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
378    if (UNLIKELY(!ThreadInited))
379      initThread();
380    // TODO(kostyak): see hook comment above
381    // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
382    if (!UserPtr)
383      return;
384    uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
385    if (!IsAligned(ChunkBeg, MinAlignment)) {
386      dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
387                     "aligned at address %p\n", UserPtr);
388    }
389    ScudoChunk *Chunk =
390        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
391    UnpackedHeader OldHeader;
392    Chunk->loadHeader(&OldHeader);
393    if (OldHeader.State != ChunkAllocated) {
394      dieWithMessage("ERROR: invalid chunk state when deallocating address "
395                     "%p\n", Chunk);
396    }
397    UnpackedHeader NewHeader = OldHeader;
398    NewHeader.State = ChunkQuarantine;
399    Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
400    if (DeallocationTypeMismatch) {
401      // The deallocation type has to match the allocation one.
402      if (NewHeader.AllocType != Type) {
403        // With the exception of memalign'd Chunks, that can be still be free'd.
404        if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
405          dieWithMessage("ERROR: allocation type mismatch on address %p\n",
406                         Chunk);
407        }
408      }
409    }
410    uptr Size = NewHeader.RequestedSize;
411    if (DeleteSizeMismatch) {
412      if (DeleteSize && DeleteSize != Size) {
413        dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
414                       Chunk);
415      }
416    }
417    if (LIKELY(!ThreadTornDown)) {
418      AllocatorQuarantine.Put(&ThreadQuarantineCache,
419                              QuarantineCallback(&Cache), Chunk, Size);
420    } else {
421      SpinMutexLock l(&FallbackMutex);
422      AllocatorQuarantine.Put(&FallbackQuarantineCache,
423                              QuarantineCallback(&FallbackAllocatorCache),
424                              Chunk, Size);
425    }
426  }
427
428  // Returns the actual usable size of a chunk. Since this requires loading the
429  // header, we will return it in the second parameter, as it can be required
430  // by the caller to perform additional processing.
431  uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
432    if (UNLIKELY(!ThreadInited))
433      initThread();
434    if (!Ptr)
435      return 0;
436    uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
437    ScudoChunk *Chunk =
438        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
439    Chunk->loadHeader(Header);
440    // Getting the usable size of a chunk only makes sense if it's allocated.
441    if (Header->State != ChunkAllocated) {
442      dieWithMessage("ERROR: attempted to size a non-allocated chunk at "
443                     "address %p\n", Chunk);
444    }
445    uptr Size =
446        BackendAllocator.GetActuallyAllocatedSize(Chunk->AllocBeg(Header));
447    // UsableSize works as malloc_usable_size, which is also what (AFAIU)
448    // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This
449    // means we will return the size of the chunk from the user beginning to
450    // the end of the 'user' allocation, hence us subtracting the header size
451    // and the offset from the size.
452    if (Size == 0)
453      return Size;
454    return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog);
455  }
456
457  // Helper function that doesn't care about the header.
458  uptr getUsableSize(const void *Ptr) {
459    UnpackedHeader Header;
460    return getUsableSize(Ptr, &Header);
461  }
462
463  // Reallocates a chunk. We can save on a new allocation if the new requested
464  // size still fits in the chunk.
465  void *reallocate(void *OldPtr, uptr NewSize) {
466    if (UNLIKELY(!ThreadInited))
467      initThread();
468    UnpackedHeader OldHeader;
469    uptr Size = getUsableSize(OldPtr, &OldHeader);
470    uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
471    ScudoChunk *Chunk =
472        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
473    if (OldHeader.AllocType != FromMalloc) {
474      dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
475                     Chunk);
476    }
477    UnpackedHeader NewHeader = OldHeader;
478    // The new size still fits in the current chunk.
479    if (NewSize <= Size) {
480      NewHeader.RequestedSize = NewSize;
481      Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
482      return OldPtr;
483    }
484    // Otherwise, we have to allocate a new chunk and copy the contents of the
485    // old one.
486    void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
487    if (NewPtr) {
488      uptr OldSize = OldHeader.RequestedSize;
489      memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
490      NewHeader.State = ChunkQuarantine;
491      Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
492      if (LIKELY(!ThreadTornDown)) {
493        AllocatorQuarantine.Put(&ThreadQuarantineCache,
494                                QuarantineCallback(&Cache), Chunk, OldSize);
495      } else {
496        SpinMutexLock l(&FallbackMutex);
497        AllocatorQuarantine.Put(&FallbackQuarantineCache,
498                                QuarantineCallback(&FallbackAllocatorCache),
499                                Chunk, OldSize);
500      }
501    }
502    return NewPtr;
503  }
504
505  void *calloc(uptr NMemB, uptr Size) {
506    if (UNLIKELY(!ThreadInited))
507      initThread();
508    uptr Total = NMemB * Size;
509    if (Size != 0 && Total / Size != NMemB) // Overflow check
510      return BackendAllocator.ReturnNullOrDie();
511    void *Ptr = allocate(Total, MinAlignment, FromMalloc);
512    // If ZeroContents, the content of the chunk has already been zero'd out.
513    if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
514      memset(Ptr, 0, getUsableSize(Ptr));
515    return Ptr;
516  }
517
518  void drainQuarantine() {
519    AllocatorQuarantine.Drain(&ThreadQuarantineCache,
520                              QuarantineCallback(&Cache));
521  }
522};
523
524static Allocator Instance(LINKER_INITIALIZED);
525
526static ScudoAllocator &getAllocator() {
527  return Instance.BackendAllocator;
528}
529
530void initAllocator(const AllocatorOptions &Options) {
531  Instance.init(Options);
532}
533
534void drainQuarantine() {
535  Instance.drainQuarantine();
536}
537
538void *scudoMalloc(uptr Size, AllocType Type) {
539  return Instance.allocate(Size, Allocator::MinAlignment, Type);
540}
541
542void scudoFree(void *Ptr, AllocType Type) {
543  Instance.deallocate(Ptr, 0, Type);
544}
545
546void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
547  Instance.deallocate(Ptr, Size, Type);
548}
549
550void *scudoRealloc(void *Ptr, uptr Size) {
551  if (!Ptr)
552    return Instance.allocate(Size, Allocator::MinAlignment, FromMalloc);
553  if (Size == 0) {
554    Instance.deallocate(Ptr, 0, FromMalloc);
555    return nullptr;
556  }
557  return Instance.reallocate(Ptr, Size);
558}
559
560void *scudoCalloc(uptr NMemB, uptr Size) {
561  return Instance.calloc(NMemB, Size);
562}
563
564void *scudoValloc(uptr Size) {
565  return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
566}
567
568void *scudoMemalign(uptr Alignment, uptr Size) {
569  return Instance.allocate(Size, Alignment, FromMemalign);
570}
571
572void *scudoPvalloc(uptr Size) {
573  uptr PageSize = GetPageSizeCached();
574  Size = RoundUpTo(Size, PageSize);
575  if (Size == 0) {
576    // pvalloc(0) should allocate one page.
577    Size = PageSize;
578  }
579  return Instance.allocate(Size, PageSize, FromMemalign);
580}
581
582int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
583  *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
584  return 0;
585}
586
587void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
588  // size must be a multiple of the alignment. To avoid a division, we first
589  // make sure that alignment is a power of 2.
590  CHECK(IsPowerOfTwo(Alignment));
591  CHECK_EQ((Size & (Alignment - 1)), 0);
592  return Instance.allocate(Size, Alignment, FromMalloc);
593}
594
595uptr scudoMallocUsableSize(void *Ptr) {
596  return Instance.getUsableSize(Ptr);
597}
598
599} // namespace __scudo
600
601using namespace __scudo;
602
603// MallocExtension helper functions
604
605uptr __sanitizer_get_current_allocated_bytes() {
606  uptr stats[AllocatorStatCount];
607  getAllocator().GetStats(stats);
608  return stats[AllocatorStatAllocated];
609}
610
611uptr __sanitizer_get_heap_size() {
612  uptr stats[AllocatorStatCount];
613  getAllocator().GetStats(stats);
614  return stats[AllocatorStatMapped];
615}
616
617uptr __sanitizer_get_free_bytes() {
618  return 1;
619}
620
621uptr __sanitizer_get_unmapped_bytes() {
622  return 1;
623}
624
625uptr __sanitizer_get_estimated_allocated_size(uptr size) {
626  return size;
627}
628
629int __sanitizer_get_ownership(const void *p) {
630  return Instance.getUsableSize(p) != 0;
631}
632
633uptr __sanitizer_get_allocated_size(const void *p) {
634  return Instance.getUsableSize(p);
635}
636