msan_allocator.cc revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
1//===-- msan_allocator.cc --------------------------- ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of MemorySanitizer.
11//
12// MemorySanitizer allocator.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_allocator.h"
16#include "sanitizer_common/sanitizer_stackdepot.h"
17#include "msan.h"
18#include "msan_allocator.h"
19#include "msan_chained_origin_depot.h"
20#include "msan_origin.h"
21#include "msan_thread.h"
22
23namespace __msan {
24
25struct Metadata {
26  uptr requested_size;
27};
28
29struct MsanMapUnmapCallback {
30  void OnMap(uptr p, uptr size) const {}
31  void OnUnmap(uptr p, uptr size) const {
32    __msan_unpoison((void *)p, size);
33
34    // We are about to unmap a chunk of user memory.
35    // Mark the corresponding shadow memory as not needed.
36    FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size);
37    if (__msan_get_track_origins())
38      FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size);
39  }
40};
41
42static const uptr kAllocatorSpace = 0x600000000000ULL;
43static const uptr kAllocatorSize   = 0x80000000000;  // 8T.
44static const uptr kMetadataSize  = sizeof(Metadata);
45static const uptr kMaxAllowedMallocSize = 8UL << 30;
46
47typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
48                             DefaultSizeClassMap,
49                             MsanMapUnmapCallback> PrimaryAllocator;
50typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
51typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
52typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
53                          SecondaryAllocator> Allocator;
54
55static Allocator allocator;
56static AllocatorCache fallback_allocator_cache;
57static SpinMutex fallback_mutex;
58
59static int inited = 0;
60
61static inline void Init() {
62  if (inited) return;
63  __msan_init();
64  inited = true;  // this must happen before any threads are created.
65  allocator.Init();
66}
67
68AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
69  CHECK(ms);
70  CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
71  return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
72}
73
74void MsanThreadLocalMallocStorage::CommitBack() {
75  allocator.SwallowCache(GetAllocatorCache(this));
76}
77
78static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
79                          bool zeroise) {
80  Init();
81  if (size > kMaxAllowedMallocSize) {
82    Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
83           (void *)size);
84    return AllocatorReturnNull();
85  }
86  MsanThread *t = GetCurrentThread();
87  void *allocated;
88  if (t) {
89    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
90    allocated = allocator.Allocate(cache, size, alignment, false);
91  } else {
92    SpinMutexLock l(&fallback_mutex);
93    AllocatorCache *cache = &fallback_allocator_cache;
94    allocated = allocator.Allocate(cache, size, alignment, false);
95  }
96  Metadata *meta =
97      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
98  meta->requested_size = size;
99  if (zeroise) {
100    __msan_clear_and_unpoison(allocated, size);
101  } else if (flags()->poison_in_malloc) {
102    __msan_poison(allocated, size);
103    if (__msan_get_track_origins()) {
104      u32 stack_id = StackDepotPut(stack->trace, stack->size);
105      CHECK(stack_id);
106      u32 id;
107      ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
108      __msan_set_origin(allocated, size, Origin(id, 1).raw_id());
109    }
110  }
111  MSAN_MALLOC_HOOK(allocated, size);
112  return allocated;
113}
114
115void MsanDeallocate(StackTrace *stack, void *p) {
116  CHECK(p);
117  Init();
118  MSAN_FREE_HOOK(p);
119  Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(p));
120  uptr size = meta->requested_size;
121  meta->requested_size = 0;
122  // This memory will not be reused by anyone else, so we are free to keep it
123  // poisoned.
124  if (flags()->poison_in_free) {
125    __msan_poison(p, size);
126    if (__msan_get_track_origins()) {
127      u32 stack_id = StackDepotPut(stack->trace, stack->size);
128      CHECK(stack_id);
129      u32 id;
130      ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
131      __msan_set_origin(p, size,  Origin(id, 1).raw_id());
132    }
133  }
134  MsanThread *t = GetCurrentThread();
135  if (t) {
136    AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
137    allocator.Deallocate(cache, p);
138  } else {
139    SpinMutexLock l(&fallback_mutex);
140    AllocatorCache *cache = &fallback_allocator_cache;
141    allocator.Deallocate(cache, p);
142  }
143}
144
145void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
146                     uptr alignment, bool zeroise) {
147  if (!old_p)
148    return MsanAllocate(stack, new_size, alignment, zeroise);
149  if (!new_size) {
150    MsanDeallocate(stack, old_p);
151    return 0;
152  }
153  Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
154  uptr old_size = meta->requested_size;
155  uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
156  if (new_size <= actually_allocated_size) {
157    // We are not reallocating here.
158    meta->requested_size = new_size;
159    if (new_size > old_size)
160      __msan_poison((char*)old_p + old_size, new_size - old_size);
161    return old_p;
162  }
163  uptr memcpy_size = Min(new_size, old_size);
164  void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
165  // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
166  if (new_p) {
167    __msan_memcpy(new_p, old_p, memcpy_size);
168    MsanDeallocate(stack, old_p);
169  }
170  return new_p;
171}
172
173static uptr AllocationSize(const void *p) {
174  if (p == 0)
175    return 0;
176  const void *beg = allocator.GetBlockBegin(p);
177  if (beg != p)
178    return 0;
179  Metadata *b = (Metadata*)allocator.GetMetaData(p);
180  return b->requested_size;
181}
182
183}  // namespace __msan
184
185using namespace __msan;
186
187uptr __msan_get_current_allocated_bytes() {
188  u64 stats[AllocatorStatCount];
189  allocator.GetStats(stats);
190  u64 m = stats[AllocatorStatMalloced];
191  u64 f = stats[AllocatorStatFreed];
192  return m >= f ? m - f : 1;
193}
194
195uptr __msan_get_heap_size() {
196  u64 stats[AllocatorStatCount];
197  allocator.GetStats(stats);
198  u64 m = stats[AllocatorStatMmapped];
199  u64 f = stats[AllocatorStatUnmapped];
200  return m >= f ? m - f : 1;
201}
202
203uptr __msan_get_free_bytes() {
204  return 1;
205}
206
207uptr __msan_get_unmapped_bytes() {
208  return 1;
209}
210
211uptr __msan_get_estimated_allocated_size(uptr size) {
212  return size;
213}
214
215int __msan_get_ownership(const void *p) {
216  return AllocationSize(p) != 0;
217}
218
219uptr __msan_get_allocated_size(const void *p) {
220  return AllocationSize(p);
221}
222