1//===-- msan_allocator.cc --------------------------- ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of MemorySanitizer. 11// 12// MemorySanitizer allocator. 13//===----------------------------------------------------------------------===// 14 15#include "sanitizer_common/sanitizer_allocator.h" 16#include "sanitizer_common/sanitizer_allocator_interface.h" 17#include "msan.h" 18#include "msan_allocator.h" 19#include "msan_origin.h" 20#include "msan_thread.h" 21#include "msan_poisoning.h" 22 23namespace __msan { 24 25struct Metadata { 26 uptr requested_size; 27}; 28 29struct MsanMapUnmapCallback { 30 void OnMap(uptr p, uptr size) const {} 31 void OnUnmap(uptr p, uptr size) const { 32 __msan_unpoison((void *)p, size); 33 34 // We are about to unmap a chunk of user memory. 35 // Mark the corresponding shadow memory as not needed. 36 FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size); 37 if (__msan_get_track_origins()) 38 FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size); 39 } 40}; 41 42#if defined(__mips64) 43 static const uptr kMaxAllowedMallocSize = 2UL << 30; 44 static const uptr kRegionSizeLog = 20; 45 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; 46 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; 47 typedef CompactSizeClassMap SizeClassMap; 48 49 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata), 50 SizeClassMap, kRegionSizeLog, ByteMap, 51 MsanMapUnmapCallback> PrimaryAllocator; 52 53#elif defined(__x86_64__) 54#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING) 55 static const uptr kAllocatorSpace = 0x700000000000ULL; 56#else 57 static const uptr kAllocatorSpace = 0x600000000000ULL; 58#endif 59 static const uptr kAllocatorSize = 0x80000000000; // 8T. 60 static const uptr kMetadataSize = sizeof(Metadata); 61 static const uptr kMaxAllowedMallocSize = 8UL << 30; 62 63 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize, 64 DefaultSizeClassMap, 65 MsanMapUnmapCallback> PrimaryAllocator; 66 67#elif defined(__powerpc64__) 68 static const uptr kAllocatorSpace = 0x300000000000; 69 static const uptr kAllocatorSize = 0x020000000000; // 2T 70 static const uptr kMetadataSize = sizeof(Metadata); 71 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G 72 73 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize, 74 DefaultSizeClassMap, 75 MsanMapUnmapCallback> PrimaryAllocator; 76#elif defined(__aarch64__) 77 static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G 78 static const uptr kRegionSizeLog = 20; 79 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; 80 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; 81 typedef CompactSizeClassMap SizeClassMap; 82 83 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata), 84 SizeClassMap, kRegionSizeLog, ByteMap, 85 MsanMapUnmapCallback> PrimaryAllocator; 86#endif 87typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 88typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator; 89typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 90 SecondaryAllocator> Allocator; 91 92static Allocator allocator; 93static AllocatorCache fallback_allocator_cache; 94static SpinMutex fallback_mutex; 95 96void MsanAllocatorInit() { 97 allocator.Init(common_flags()->allocator_may_return_null); 98} 99 100AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { 101 CHECK(ms); 102 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); 103 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); 104} 105 106void MsanThreadLocalMallocStorage::CommitBack() { 107 allocator.SwallowCache(GetAllocatorCache(this)); 108} 109 110static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, 111 bool zeroise) { 112 if (size > kMaxAllowedMallocSize) { 113 Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", 114 (void *)size); 115 return allocator.ReturnNullOrDie(); 116 } 117 MsanThread *t = GetCurrentThread(); 118 void *allocated; 119 if (t) { 120 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 121 allocated = allocator.Allocate(cache, size, alignment, false); 122 } else { 123 SpinMutexLock l(&fallback_mutex); 124 AllocatorCache *cache = &fallback_allocator_cache; 125 allocated = allocator.Allocate(cache, size, alignment, false); 126 } 127 Metadata *meta = 128 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 129 meta->requested_size = size; 130 if (zeroise) { 131 __msan_clear_and_unpoison(allocated, size); 132 } else if (flags()->poison_in_malloc) { 133 __msan_poison(allocated, size); 134 if (__msan_get_track_origins()) { 135 stack->tag = StackTrace::TAG_ALLOC; 136 Origin o = Origin::CreateHeapOrigin(stack); 137 __msan_set_origin(allocated, size, o.raw_id()); 138 } 139 } 140 MSAN_MALLOC_HOOK(allocated, size); 141 return allocated; 142} 143 144void MsanDeallocate(StackTrace *stack, void *p) { 145 CHECK(p); 146 MSAN_FREE_HOOK(p); 147 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p)); 148 uptr size = meta->requested_size; 149 meta->requested_size = 0; 150 // This memory will not be reused by anyone else, so we are free to keep it 151 // poisoned. 152 if (flags()->poison_in_free) { 153 __msan_poison(p, size); 154 if (__msan_get_track_origins()) { 155 stack->tag = StackTrace::TAG_DEALLOC; 156 Origin o = Origin::CreateHeapOrigin(stack); 157 __msan_set_origin(p, size, o.raw_id()); 158 } 159 } 160 MsanThread *t = GetCurrentThread(); 161 if (t) { 162 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 163 allocator.Deallocate(cache, p); 164 } else { 165 SpinMutexLock l(&fallback_mutex); 166 AllocatorCache *cache = &fallback_allocator_cache; 167 allocator.Deallocate(cache, p); 168 } 169} 170 171void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { 172 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) 173 return allocator.ReturnNullOrDie(); 174 return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true); 175} 176 177void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, 178 uptr alignment, bool zeroise) { 179 if (!old_p) 180 return MsanAllocate(stack, new_size, alignment, zeroise); 181 if (!new_size) { 182 MsanDeallocate(stack, old_p); 183 return nullptr; 184 } 185 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p)); 186 uptr old_size = meta->requested_size; 187 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); 188 if (new_size <= actually_allocated_size) { 189 // We are not reallocating here. 190 meta->requested_size = new_size; 191 if (new_size > old_size) { 192 if (zeroise) { 193 __msan_clear_and_unpoison((char *)old_p + old_size, 194 new_size - old_size); 195 } else if (flags()->poison_in_malloc) { 196 stack->tag = StackTrace::TAG_ALLOC; 197 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); 198 } 199 } 200 return old_p; 201 } 202 uptr memcpy_size = Min(new_size, old_size); 203 void *new_p = MsanAllocate(stack, new_size, alignment, zeroise); 204 // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size); 205 if (new_p) { 206 CopyMemory(new_p, old_p, memcpy_size, stack); 207 MsanDeallocate(stack, old_p); 208 } 209 return new_p; 210} 211 212static uptr AllocationSize(const void *p) { 213 if (!p) return 0; 214 const void *beg = allocator.GetBlockBegin(p); 215 if (beg != p) return 0; 216 Metadata *b = (Metadata *)allocator.GetMetaData(p); 217 return b->requested_size; 218} 219 220} // namespace __msan 221 222using namespace __msan; 223 224uptr __sanitizer_get_current_allocated_bytes() { 225 uptr stats[AllocatorStatCount]; 226 allocator.GetStats(stats); 227 return stats[AllocatorStatAllocated]; 228} 229 230uptr __sanitizer_get_heap_size() { 231 uptr stats[AllocatorStatCount]; 232 allocator.GetStats(stats); 233 return stats[AllocatorStatMapped]; 234} 235 236uptr __sanitizer_get_free_bytes() { return 1; } 237 238uptr __sanitizer_get_unmapped_bytes() { return 1; } 239 240uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 241 242int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 243 244uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 245