tsan_mman.cc revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
1//===-- tsan_mman.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_common.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "tsan_mman.h"
16#include "tsan_rtl.h"
17#include "tsan_report.h"
18#include "tsan_flags.h"
19
20// May be overriden by front-end.
21extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
22  (void)ptr;
23  (void)size;
24}
25
26extern "C" void WEAK __tsan_free_hook(void *ptr) {
27  (void)ptr;
28}
29
30namespace __tsan {
31
32COMPILER_CHECK(sizeof(MBlock) == 16);
33
34void MBlock::Lock() {
35  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
36  uptr v = atomic_load(a, memory_order_relaxed);
37  for (int iter = 0;; iter++) {
38    if (v & 1) {
39      if (iter < 10)
40        proc_yield(20);
41      else
42        internal_sched_yield();
43      v = atomic_load(a, memory_order_relaxed);
44      continue;
45    }
46    if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
47      break;
48  }
49}
50
51void MBlock::Unlock() {
52  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
53  uptr v = atomic_load(a, memory_order_relaxed);
54  DCHECK(v & 1);
55  atomic_store(a, v & ~1, memory_order_relaxed);
56}
57
58struct MapUnmapCallback {
59  void OnMap(uptr p, uptr size) const { }
60  void OnUnmap(uptr p, uptr size) const {
61    // We are about to unmap a chunk of user memory.
62    // Mark the corresponding shadow memory as not needed.
63    DontNeedShadowFor(p, size);
64  }
65};
66
67static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
68Allocator *allocator() {
69  return reinterpret_cast<Allocator*>(&allocator_placeholder);
70}
71
72void InitializeAllocator() {
73  allocator()->Init();
74}
75
76void AllocatorThreadStart(ThreadState *thr) {
77  allocator()->InitCache(&thr->alloc_cache);
78  internal_allocator()->InitCache(&thr->internal_alloc_cache);
79}
80
81void AllocatorThreadFinish(ThreadState *thr) {
82  allocator()->DestroyCache(&thr->alloc_cache);
83  internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
84}
85
86void AllocatorPrintStats() {
87  allocator()->PrintStats();
88}
89
90static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
91  if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
92    return;
93  StackTrace stack;
94  stack.ObtainCurrent(thr, pc);
95  ThreadRegistryLock l(ctx->thread_registry);
96  ScopedReport rep(ReportTypeSignalUnsafe);
97  if (!IsFiredSuppression(ctx, rep, stack)) {
98    rep.AddStack(&stack);
99    OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
100  }
101}
102
103void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
104  if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
105    return AllocatorReturnNull();
106  void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
107  if (p == 0)
108    return 0;
109  MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
110  b->Init(sz, thr->tid, CurrentStackId(thr, pc));
111  if (ctx && ctx->initialized) {
112    if (thr->ignore_reads_and_writes == 0)
113      MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
114    else
115      MemoryResetRange(thr, pc, (uptr)p, sz);
116  }
117  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
118  SignalUnsafeCall(thr, pc);
119  return p;
120}
121
122void user_free(ThreadState *thr, uptr pc, void *p) {
123  CHECK_NE(p, (void*)0);
124  DPrintf("#%d: free(%p)\n", thr->tid, p);
125  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
126  if (b->ListHead()) {
127    MBlock::ScopedLock l(b);
128    for (SyncVar *s = b->ListHead(); s;) {
129      SyncVar *res = s;
130      s = s->next;
131      StatInc(thr, StatSyncDestroyed);
132      res->mtx.Lock();
133      res->mtx.Unlock();
134      DestroyAndFree(res);
135    }
136    b->ListReset();
137  }
138  if (ctx && ctx->initialized) {
139    if (thr->ignore_reads_and_writes == 0)
140      MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
141  }
142  allocator()->Deallocate(&thr->alloc_cache, p);
143  SignalUnsafeCall(thr, pc);
144}
145
146void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
147  void *p2 = 0;
148  // FIXME: Handle "shrinking" more efficiently,
149  // it seems that some software actually does this.
150  if (sz) {
151    p2 = user_alloc(thr, pc, sz);
152    if (p2 == 0)
153      return 0;
154    if (p) {
155      MBlock *b = user_mblock(thr, p);
156      CHECK_NE(b, 0);
157      internal_memcpy(p2, p, min(b->Size(), sz));
158    }
159  }
160  if (p)
161    user_free(thr, pc, p);
162  return p2;
163}
164
165uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
166  if (p == 0)
167    return 0;
168  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
169  return b ? b->Size() : 0;
170}
171
172MBlock *user_mblock(ThreadState *thr, void *p) {
173  CHECK_NE(p, 0);
174  Allocator *a = allocator();
175  void *b = a->GetBlockBegin(p);
176  if (b == 0)
177    return 0;
178  return (MBlock*)a->GetMetaData(b);
179}
180
181void invoke_malloc_hook(void *ptr, uptr size) {
182  ThreadState *thr = cur_thread();
183  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
184    return;
185  __tsan_malloc_hook(ptr, size);
186}
187
188void invoke_free_hook(void *ptr) {
189  ThreadState *thr = cur_thread();
190  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
191    return;
192  __tsan_free_hook(ptr);
193}
194
195void *internal_alloc(MBlockType typ, uptr sz) {
196  ThreadState *thr = cur_thread();
197  CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
198  if (thr->nomalloc) {
199    thr->nomalloc = 0;  // CHECK calls internal_malloc().
200    CHECK(0);
201  }
202  return InternalAlloc(sz, &thr->internal_alloc_cache);
203}
204
205void internal_free(void *p) {
206  ThreadState *thr = cur_thread();
207  if (thr->nomalloc) {
208    thr->nomalloc = 0;  // CHECK calls internal_malloc().
209    CHECK(0);
210  }
211  InternalFree(p, &thr->internal_alloc_cache);
212}
213
214}  // namespace __tsan
215
216using namespace __tsan;
217
218extern "C" {
219uptr __tsan_get_current_allocated_bytes() {
220  u64 stats[AllocatorStatCount];
221  allocator()->GetStats(stats);
222  u64 m = stats[AllocatorStatMalloced];
223  u64 f = stats[AllocatorStatFreed];
224  return m >= f ? m - f : 1;
225}
226
227uptr __tsan_get_heap_size() {
228  u64 stats[AllocatorStatCount];
229  allocator()->GetStats(stats);
230  u64 m = stats[AllocatorStatMmapped];
231  u64 f = stats[AllocatorStatUnmapped];
232  return m >= f ? m - f : 1;
233}
234
235uptr __tsan_get_free_bytes() {
236  return 1;
237}
238
239uptr __tsan_get_unmapped_bytes() {
240  return 1;
241}
242
243uptr __tsan_get_estimated_allocated_size(uptr size) {
244  return size;
245}
246
247bool __tsan_get_ownership(void *p) {
248  return allocator()->GetBlockBegin(p) != 0;
249}
250
251uptr __tsan_get_allocated_size(void *p) {
252  if (p == 0)
253    return 0;
254  p = allocator()->GetBlockBegin(p);
255  if (p == 0)
256    return 0;
257  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
258  return b->Size();
259}
260
261void __tsan_on_thread_idle() {
262  ThreadState *thr = cur_thread();
263  allocator()->SwallowCache(&thr->alloc_cache);
264  internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
265}
266}  // extern "C"
267