tsan_mman.cc revision ff90a95c26198e9e794e186365a62511439e0ca0
1//===-- tsan_mman.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_common.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "tsan_mman.h"
16#include "tsan_rtl.h"
17#include "tsan_report.h"
18#include "tsan_flags.h"
19
20// May be overriden by front-end.
21extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
22  (void)ptr;
23  (void)size;
24}
25
26extern "C" void WEAK __tsan_free_hook(void *ptr) {
27  (void)ptr;
28}
29
30namespace __tsan {
31
32COMPILER_CHECK(sizeof(MBlock) == 16);
33
34void MBlock::Lock() {
35  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
36  uptr v = atomic_load(a, memory_order_relaxed);
37  for (int iter = 0;; iter++) {
38    if (v & 1) {
39      if (iter < 10)
40        proc_yield(20);
41      else
42        internal_sched_yield();
43      v = atomic_load(a, memory_order_relaxed);
44      continue;
45    }
46    if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
47      break;
48  }
49}
50
51void MBlock::Unlock() {
52  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
53  uptr v = atomic_load(a, memory_order_relaxed);
54  DCHECK(v & 1);
55  atomic_store(a, v & ~1, memory_order_relaxed);
56}
57
58struct MapUnmapCallback {
59  void OnMap(uptr p, uptr size) const { }
60  void OnUnmap(uptr p, uptr size) const {
61    // We are about to unmap a chunk of user memory.
62    // Mark the corresponding shadow memory as not needed.
63    DontNeedShadowFor(p, size);
64  }
65};
66
67static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
68Allocator *allocator() {
69  return reinterpret_cast<Allocator*>(&allocator_placeholder);
70}
71
72void InitializeAllocator() {
73  allocator()->Init();
74}
75
76void AllocatorThreadStart(ThreadState *thr) {
77  allocator()->InitCache(&thr->alloc_cache);
78  internal_allocator()->InitCache(&thr->internal_alloc_cache);
79}
80
81void AllocatorThreadFinish(ThreadState *thr) {
82  allocator()->DestroyCache(&thr->alloc_cache);
83  internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
84}
85
86void AllocatorPrintStats() {
87  allocator()->PrintStats();
88}
89
90static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
91  if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
92    return;
93  Context *ctx = CTX();
94  StackTrace stack;
95  stack.ObtainCurrent(thr, pc);
96  ThreadRegistryLock l(ctx->thread_registry);
97  ScopedReport rep(ReportTypeSignalUnsafe);
98  if (!IsFiredSuppression(ctx, rep, stack)) {
99    rep.AddStack(&stack);
100    OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
101  }
102}
103
104void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
105  CHECK_GT(thr->in_rtl, 0);
106  if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
107    return AllocatorReturnNull();
108  void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
109  if (p == 0)
110    return 0;
111  MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
112  b->Init(sz, thr->tid, CurrentStackId(thr, pc));
113  if (CTX() && CTX()->initialized) {
114    if (thr->ignore_reads_and_writes == 0)
115      MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
116    else
117      MemoryResetRange(thr, pc, (uptr)p, sz);
118  }
119  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
120  SignalUnsafeCall(thr, pc);
121  return p;
122}
123
124void user_free(ThreadState *thr, uptr pc, void *p) {
125  CHECK_GT(thr->in_rtl, 0);
126  CHECK_NE(p, (void*)0);
127  DPrintf("#%d: free(%p)\n", thr->tid, p);
128  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
129  if (b->ListHead()) {
130    MBlock::ScopedLock l(b);
131    for (SyncVar *s = b->ListHead(); s;) {
132      SyncVar *res = s;
133      s = s->next;
134      StatInc(thr, StatSyncDestroyed);
135      res->mtx.Lock();
136      res->mtx.Unlock();
137      DestroyAndFree(res);
138    }
139    b->ListReset();
140  }
141  if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
142    if (thr->ignore_reads_and_writes == 0)
143      MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
144  }
145  allocator()->Deallocate(&thr->alloc_cache, p);
146  SignalUnsafeCall(thr, pc);
147}
148
149void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
150  CHECK_GT(thr->in_rtl, 0);
151  void *p2 = 0;
152  // FIXME: Handle "shrinking" more efficiently,
153  // it seems that some software actually does this.
154  if (sz) {
155    p2 = user_alloc(thr, pc, sz);
156    if (p2 == 0)
157      return 0;
158    if (p) {
159      MBlock *b = user_mblock(thr, p);
160      CHECK_NE(b, 0);
161      internal_memcpy(p2, p, min(b->Size(), sz));
162    }
163  }
164  if (p)
165    user_free(thr, pc, p);
166  return p2;
167}
168
169uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
170  CHECK_GT(thr->in_rtl, 0);
171  if (p == 0)
172    return 0;
173  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
174  return b ? b->Size() : 0;
175}
176
177MBlock *user_mblock(ThreadState *thr, void *p) {
178  CHECK_NE(p, 0);
179  Allocator *a = allocator();
180  void *b = a->GetBlockBegin(p);
181  if (b == 0)
182    return 0;
183  return (MBlock*)a->GetMetaData(b);
184}
185
186void invoke_malloc_hook(void *ptr, uptr size) {
187  Context *ctx = CTX();
188  ThreadState *thr = cur_thread();
189  if (ctx == 0 || !ctx->initialized || thr->in_rtl)
190    return;
191  __tsan_malloc_hook(ptr, size);
192}
193
194void invoke_free_hook(void *ptr) {
195  Context *ctx = CTX();
196  ThreadState *thr = cur_thread();
197  if (ctx == 0 || !ctx->initialized || thr->in_rtl)
198    return;
199  __tsan_free_hook(ptr);
200}
201
202void *internal_alloc(MBlockType typ, uptr sz) {
203  ThreadState *thr = cur_thread();
204  CHECK_GT(thr->in_rtl, 0);
205  CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
206  if (thr->nomalloc) {
207    thr->nomalloc = 0;  // CHECK calls internal_malloc().
208    CHECK(0);
209  }
210  return InternalAlloc(sz, &thr->internal_alloc_cache);
211}
212
213void internal_free(void *p) {
214  ThreadState *thr = cur_thread();
215  CHECK_GT(thr->in_rtl, 0);
216  if (thr->nomalloc) {
217    thr->nomalloc = 0;  // CHECK calls internal_malloc().
218    CHECK(0);
219  }
220  InternalFree(p, &thr->internal_alloc_cache);
221}
222
223}  // namespace __tsan
224
225using namespace __tsan;
226
227extern "C" {
228uptr __tsan_get_current_allocated_bytes() {
229  u64 stats[AllocatorStatCount];
230  allocator()->GetStats(stats);
231  u64 m = stats[AllocatorStatMalloced];
232  u64 f = stats[AllocatorStatFreed];
233  return m >= f ? m - f : 1;
234}
235
236uptr __tsan_get_heap_size() {
237  u64 stats[AllocatorStatCount];
238  allocator()->GetStats(stats);
239  u64 m = stats[AllocatorStatMmapped];
240  u64 f = stats[AllocatorStatUnmapped];
241  return m >= f ? m - f : 1;
242}
243
244uptr __tsan_get_free_bytes() {
245  return 1;
246}
247
248uptr __tsan_get_unmapped_bytes() {
249  return 1;
250}
251
252uptr __tsan_get_estimated_allocated_size(uptr size) {
253  return size;
254}
255
256bool __tsan_get_ownership(void *p) {
257  return allocator()->GetBlockBegin(p) != 0;
258}
259
260uptr __tsan_get_allocated_size(void *p) {
261  if (p == 0)
262    return 0;
263  p = allocator()->GetBlockBegin(p);
264  if (p == 0)
265    return 0;
266  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
267  return b->Size();
268}
269
270void __tsan_on_thread_idle() {
271  ThreadState *thr = cur_thread();
272  allocator()->SwallowCache(&thr->alloc_cache);
273  internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
274}
275}  // extern "C"
276