1//===-- msan_allocator.cc --------------------------- ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of MemorySanitizer. 11// 12// MemorySanitizer allocator. 13//===----------------------------------------------------------------------===// 14 15#include "sanitizer_common/sanitizer_allocator.h" 16#include "sanitizer_common/sanitizer_allocator_interface.h" 17#include "msan.h" 18#include "msan_allocator.h" 19#include "msan_origin.h" 20#include "msan_thread.h" 21#include "msan_poisoning.h" 22 23namespace __msan { 24 25struct Metadata { 26 uptr requested_size; 27}; 28 29struct MsanMapUnmapCallback { 30 void OnMap(uptr p, uptr size) const {} 31 void OnUnmap(uptr p, uptr size) const { 32 __msan_unpoison((void *)p, size); 33 34 // We are about to unmap a chunk of user memory. 35 // Mark the corresponding shadow memory as not needed. 36 FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size); 37 if (__msan_get_track_origins()) 38 FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size); 39 } 40}; 41 42#if defined(__mips64) 43 static const uptr kMaxAllowedMallocSize = 2UL << 30; 44 static const uptr kRegionSizeLog = 20; 45 static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; 46 typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; 47 typedef CompactSizeClassMap SizeClassMap; 48 49 typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata), 50 SizeClassMap, kRegionSizeLog, ByteMap, 51 MsanMapUnmapCallback> PrimaryAllocator; 52#elif defined(__x86_64__) 53 static const uptr kAllocatorSpace = 0x600000000000ULL; 54 static const uptr kAllocatorSize = 0x80000000000; // 8T. 55 static const uptr kMetadataSize = sizeof(Metadata); 56 static const uptr kMaxAllowedMallocSize = 8UL << 30; 57 58 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize, 59 DefaultSizeClassMap, 60 MsanMapUnmapCallback> PrimaryAllocator; 61#endif 62typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; 63typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator; 64typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, 65 SecondaryAllocator> Allocator; 66 67static Allocator allocator; 68static AllocatorCache fallback_allocator_cache; 69static SpinMutex fallback_mutex; 70 71static int inited = 0; 72 73static inline void Init() { 74 if (inited) return; 75 __msan_init(); 76 inited = true; // this must happen before any threads are created. 77 allocator.Init(common_flags()->allocator_may_return_null); 78} 79 80AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { 81 CHECK(ms); 82 CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); 83 return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); 84} 85 86void MsanThreadLocalMallocStorage::CommitBack() { 87 allocator.SwallowCache(GetAllocatorCache(this)); 88} 89 90static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, 91 bool zeroise) { 92 Init(); 93 if (size > kMaxAllowedMallocSize) { 94 Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", 95 (void *)size); 96 return allocator.ReturnNullOrDie(); 97 } 98 MsanThread *t = GetCurrentThread(); 99 void *allocated; 100 if (t) { 101 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 102 allocated = allocator.Allocate(cache, size, alignment, false); 103 } else { 104 SpinMutexLock l(&fallback_mutex); 105 AllocatorCache *cache = &fallback_allocator_cache; 106 allocated = allocator.Allocate(cache, size, alignment, false); 107 } 108 Metadata *meta = 109 reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); 110 meta->requested_size = size; 111 if (zeroise) { 112 __msan_clear_and_unpoison(allocated, size); 113 } else if (flags()->poison_in_malloc) { 114 __msan_poison(allocated, size); 115 if (__msan_get_track_origins()) { 116 stack->tag = StackTrace::TAG_ALLOC; 117 Origin o = Origin::CreateHeapOrigin(stack); 118 __msan_set_origin(allocated, size, o.raw_id()); 119 } 120 } 121 MSAN_MALLOC_HOOK(allocated, size); 122 return allocated; 123} 124 125void MsanDeallocate(StackTrace *stack, void *p) { 126 CHECK(p); 127 Init(); 128 MSAN_FREE_HOOK(p); 129 Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p)); 130 uptr size = meta->requested_size; 131 meta->requested_size = 0; 132 // This memory will not be reused by anyone else, so we are free to keep it 133 // poisoned. 134 if (flags()->poison_in_free) { 135 __msan_poison(p, size); 136 if (__msan_get_track_origins()) { 137 stack->tag = StackTrace::TAG_DEALLOC; 138 Origin o = Origin::CreateHeapOrigin(stack); 139 __msan_set_origin(p, size, o.raw_id()); 140 } 141 } 142 MsanThread *t = GetCurrentThread(); 143 if (t) { 144 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 145 allocator.Deallocate(cache, p); 146 } else { 147 SpinMutexLock l(&fallback_mutex); 148 AllocatorCache *cache = &fallback_allocator_cache; 149 allocator.Deallocate(cache, p); 150 } 151} 152 153void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { 154 Init(); 155 if (CallocShouldReturnNullDueToOverflow(size, nmemb)) 156 return allocator.ReturnNullOrDie(); 157 return MsanReallocate(stack, 0, nmemb * size, sizeof(u64), true); 158} 159 160void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, 161 uptr alignment, bool zeroise) { 162 if (!old_p) 163 return MsanAllocate(stack, new_size, alignment, zeroise); 164 if (!new_size) { 165 MsanDeallocate(stack, old_p); 166 return 0; 167 } 168 Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p)); 169 uptr old_size = meta->requested_size; 170 uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); 171 if (new_size <= actually_allocated_size) { 172 // We are not reallocating here. 173 meta->requested_size = new_size; 174 if (new_size > old_size) { 175 if (zeroise) { 176 __msan_clear_and_unpoison((char *)old_p + old_size, 177 new_size - old_size); 178 } else if (flags()->poison_in_malloc) { 179 stack->tag = StackTrace::TAG_ALLOC; 180 PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); 181 } 182 } 183 return old_p; 184 } 185 uptr memcpy_size = Min(new_size, old_size); 186 void *new_p = MsanAllocate(stack, new_size, alignment, zeroise); 187 // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size); 188 if (new_p) { 189 CopyMemory(new_p, old_p, memcpy_size, stack); 190 MsanDeallocate(stack, old_p); 191 } 192 return new_p; 193} 194 195static uptr AllocationSize(const void *p) { 196 if (p == 0) return 0; 197 const void *beg = allocator.GetBlockBegin(p); 198 if (beg != p) return 0; 199 Metadata *b = (Metadata *)allocator.GetMetaData(p); 200 return b->requested_size; 201} 202 203} // namespace __msan 204 205using namespace __msan; 206 207uptr __sanitizer_get_current_allocated_bytes() { 208 uptr stats[AllocatorStatCount]; 209 allocator.GetStats(stats); 210 return stats[AllocatorStatAllocated]; 211} 212 213uptr __sanitizer_get_heap_size() { 214 uptr stats[AllocatorStatCount]; 215 allocator.GetStats(stats); 216 return stats[AllocatorStatMapped]; 217} 218 219uptr __sanitizer_get_free_bytes() { return 1; } 220 221uptr __sanitizer_get_unmapped_bytes() { return 1; } 222 223uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 224 225int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } 226 227uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } 228