1//===-- sanitizer_allocator.cc --------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is shared between AddressSanitizer and ThreadSanitizer 11// run-time libraries. 12// This allocator is used inside run-times. 13//===----------------------------------------------------------------------===// 14#include "sanitizer_allocator.h" 15#include "sanitizer_allocator_internal.h" 16#include "sanitizer_common.h" 17 18namespace __sanitizer { 19 20// ThreadSanitizer for Go uses libc malloc/free. 21#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC) 22# if SANITIZER_LINUX && !SANITIZER_ANDROID 23extern "C" void *__libc_malloc(uptr size); 24extern "C" void __libc_free(void *ptr); 25# define LIBC_MALLOC __libc_malloc 26# define LIBC_FREE __libc_free 27# else 28# include <stdlib.h> 29# define LIBC_MALLOC malloc 30# define LIBC_FREE free 31# endif 32 33static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { 34 (void)cache; 35 return LIBC_MALLOC(size); 36} 37 38static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { 39 (void)cache; 40 LIBC_FREE(ptr); 41} 42 43InternalAllocator *internal_allocator() { 44 return 0; 45} 46 47#else // SANITIZER_GO 48 49static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; 50static atomic_uint8_t internal_allocator_initialized; 51static StaticSpinMutex internal_alloc_init_mu; 52 53static InternalAllocatorCache internal_allocator_cache; 54static StaticSpinMutex internal_allocator_cache_mu; 55 56InternalAllocator *internal_allocator() { 57 InternalAllocator *internal_allocator_instance = 58 reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); 59 if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { 60 SpinMutexLock l(&internal_alloc_init_mu); 61 if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 62 0) { 63 internal_allocator_instance->Init(/* may_return_null*/ false); 64 atomic_store(&internal_allocator_initialized, 1, memory_order_release); 65 } 66 } 67 return internal_allocator_instance; 68} 69 70static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { 71 if (cache == 0) { 72 SpinMutexLock l(&internal_allocator_cache_mu); 73 return internal_allocator()->Allocate(&internal_allocator_cache, size, 8, 74 false); 75 } 76 return internal_allocator()->Allocate(cache, size, 8, false); 77} 78 79static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { 80 if (cache == 0) { 81 SpinMutexLock l(&internal_allocator_cache_mu); 82 return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); 83 } 84 internal_allocator()->Deallocate(cache, ptr); 85} 86 87#endif // SANITIZER_GO 88 89const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; 90 91void *InternalAlloc(uptr size, InternalAllocatorCache *cache) { 92 if (size + sizeof(u64) < size) 93 return 0; 94 void *p = RawInternalAlloc(size + sizeof(u64), cache); 95 if (p == 0) 96 return 0; 97 ((u64*)p)[0] = kBlockMagic; 98 return (char*)p + sizeof(u64); 99} 100 101void InternalFree(void *addr, InternalAllocatorCache *cache) { 102 if (addr == 0) 103 return; 104 addr = (char*)addr - sizeof(u64); 105 CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); 106 ((u64*)addr)[0] = 0; 107 RawInternalFree(addr, cache); 108} 109 110// LowLevelAllocator 111static LowLevelAllocateCallback low_level_alloc_callback; 112 113void *LowLevelAllocator::Allocate(uptr size) { 114 // Align allocation size. 115 size = RoundUpTo(size, 8); 116 if (allocated_end_ - allocated_current_ < (sptr)size) { 117 uptr size_to_allocate = Max(size, GetPageSizeCached()); 118 allocated_current_ = 119 (char*)MmapOrDie(size_to_allocate, __func__); 120 allocated_end_ = allocated_current_ + size_to_allocate; 121 if (low_level_alloc_callback) { 122 low_level_alloc_callback((uptr)allocated_current_, 123 size_to_allocate); 124 } 125 } 126 CHECK(allocated_end_ - allocated_current_ >= (sptr)size); 127 void *res = allocated_current_; 128 allocated_current_ += size; 129 return res; 130} 131 132void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { 133 low_level_alloc_callback = callback; 134} 135 136bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { 137 if (!size) return false; 138 uptr max = (uptr)-1L; 139 return (max / size) < n; 140} 141 142void NORETURN ReportAllocatorCannotReturnNull() { 143 Report("%s's allocator is terminating the process instead of returning 0\n", 144 SanitizerToolName); 145 Report("If you don't like this behavior set allocator_may_return_null=1\n"); 146 CHECK(0); 147 Die(); 148} 149 150} // namespace __sanitizer 151