1//===-- tsan_mman.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_common.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "tsan_mman.h"
16#include "tsan_rtl.h"
17#include "tsan_report.h"
18#include "tsan_flags.h"
19
20// May be overriden by front-end.
21extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
22  (void)ptr;
23  (void)size;
24}
25
26extern "C" void WEAK __tsan_free_hook(void *ptr) {
27  (void)ptr;
28}
29
30namespace __tsan {
31
32COMPILER_CHECK(sizeof(MBlock) == 16);
33
34void MBlock::Lock() {
35  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
36  uptr v = atomic_load(a, memory_order_relaxed);
37  for (int iter = 0;; iter++) {
38    if (v & 1) {
39      if (iter < 10)
40        proc_yield(20);
41      else
42        internal_sched_yield();
43      v = atomic_load(a, memory_order_relaxed);
44      continue;
45    }
46    if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
47      break;
48  }
49}
50
51void MBlock::Unlock() {
52  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
53  uptr v = atomic_load(a, memory_order_relaxed);
54  DCHECK(v & 1);
55  atomic_store(a, v & ~1, memory_order_relaxed);
56}
57
58struct MapUnmapCallback {
59  void OnMap(uptr p, uptr size) const { }
60  void OnUnmap(uptr p, uptr size) const {
61    // We are about to unmap a chunk of user memory.
62    // Mark the corresponding shadow memory as not needed.
63    DontNeedShadowFor(p, size);
64  }
65};
66
67static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
68Allocator *allocator() {
69  return reinterpret_cast<Allocator*>(&allocator_placeholder);
70}
71
72void InitializeAllocator() {
73  allocator()->Init();
74}
75
76void AllocatorThreadStart(ThreadState *thr) {
77  allocator()->InitCache(&thr->alloc_cache);
78}
79
80void AllocatorThreadFinish(ThreadState *thr) {
81  allocator()->DestroyCache(&thr->alloc_cache);
82}
83
84void AllocatorPrintStats() {
85  allocator()->PrintStats();
86}
87
88static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
89  if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
90    return;
91  Context *ctx = CTX();
92  StackTrace stack;
93  stack.ObtainCurrent(thr, pc);
94  ThreadRegistryLock l(ctx->thread_registry);
95  ScopedReport rep(ReportTypeSignalUnsafe);
96  if (!IsFiredSuppression(ctx, rep, stack)) {
97    rep.AddStack(&stack);
98    OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
99  }
100}
101
102void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
103  CHECK_GT(thr->in_rtl, 0);
104  void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
105  if (p == 0)
106    return 0;
107  MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
108  b->Init(sz, thr->tid, CurrentStackId(thr, pc));
109  if (CTX() && CTX()->initialized)
110    MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
111  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
112  SignalUnsafeCall(thr, pc);
113  return p;
114}
115
116void user_free(ThreadState *thr, uptr pc, void *p) {
117  CHECK_GT(thr->in_rtl, 0);
118  CHECK_NE(p, (void*)0);
119  DPrintf("#%d: free(%p)\n", thr->tid, p);
120  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
121  if (b->ListHead()) {
122    MBlock::ScopedLock l(b);
123    for (SyncVar *s = b->ListHead(); s;) {
124      SyncVar *res = s;
125      s = s->next;
126      StatInc(thr, StatSyncDestroyed);
127      res->mtx.Lock();
128      res->mtx.Unlock();
129      DestroyAndFree(res);
130    }
131    b->ListReset();
132  }
133  if (CTX() && CTX()->initialized && thr->in_rtl == 1)
134    MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
135  allocator()->Deallocate(&thr->alloc_cache, p);
136  SignalUnsafeCall(thr, pc);
137}
138
139void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
140  CHECK_GT(thr->in_rtl, 0);
141  void *p2 = 0;
142  // FIXME: Handle "shrinking" more efficiently,
143  // it seems that some software actually does this.
144  if (sz) {
145    p2 = user_alloc(thr, pc, sz);
146    if (p2 == 0)
147      return 0;
148    if (p) {
149      MBlock *b = user_mblock(thr, p);
150      internal_memcpy(p2, p, min(b->Size(), sz));
151    }
152  }
153  if (p)
154    user_free(thr, pc, p);
155  return p2;
156}
157
158uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
159  CHECK_GT(thr->in_rtl, 0);
160  if (p == 0)
161    return 0;
162  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
163  return b ? b->Size() : 0;
164}
165
166MBlock *user_mblock(ThreadState *thr, void *p) {
167  CHECK_NE(p, (void*)0);
168  Allocator *a = allocator();
169  void *b = a->GetBlockBegin(p);
170  CHECK_NE(b, 0);
171  return (MBlock*)a->GetMetaData(b);
172}
173
174void invoke_malloc_hook(void *ptr, uptr size) {
175  Context *ctx = CTX();
176  ThreadState *thr = cur_thread();
177  if (ctx == 0 || !ctx->initialized || thr->in_rtl)
178    return;
179  __tsan_malloc_hook(ptr, size);
180}
181
182void invoke_free_hook(void *ptr) {
183  Context *ctx = CTX();
184  ThreadState *thr = cur_thread();
185  if (ctx == 0 || !ctx->initialized || thr->in_rtl)
186    return;
187  __tsan_free_hook(ptr);
188}
189
190void *internal_alloc(MBlockType typ, uptr sz) {
191  ThreadState *thr = cur_thread();
192  CHECK_GT(thr->in_rtl, 0);
193  if (thr->nomalloc) {
194    thr->nomalloc = 0;  // CHECK calls internal_malloc().
195    CHECK(0);
196  }
197  return InternalAlloc(sz);
198}
199
200void internal_free(void *p) {
201  ThreadState *thr = cur_thread();
202  CHECK_GT(thr->in_rtl, 0);
203  if (thr->nomalloc) {
204    thr->nomalloc = 0;  // CHECK calls internal_malloc().
205    CHECK(0);
206  }
207  InternalFree(p);
208}
209
210}  // namespace __tsan
211
212using namespace __tsan;
213
214extern "C" {
215uptr __tsan_get_current_allocated_bytes() {
216  u64 stats[AllocatorStatCount];
217  allocator()->GetStats(stats);
218  u64 m = stats[AllocatorStatMalloced];
219  u64 f = stats[AllocatorStatFreed];
220  return m >= f ? m - f : 1;
221}
222
223uptr __tsan_get_heap_size() {
224  u64 stats[AllocatorStatCount];
225  allocator()->GetStats(stats);
226  u64 m = stats[AllocatorStatMmapped];
227  u64 f = stats[AllocatorStatUnmapped];
228  return m >= f ? m - f : 1;
229}
230
231uptr __tsan_get_free_bytes() {
232  return 1;
233}
234
235uptr __tsan_get_unmapped_bytes() {
236  return 1;
237}
238
239uptr __tsan_get_estimated_allocated_size(uptr size) {
240  return size;
241}
242
243bool __tsan_get_ownership(void *p) {
244  return allocator()->GetBlockBegin(p) != 0;
245}
246
247uptr __tsan_get_allocated_size(void *p) {
248  if (p == 0)
249    return 0;
250  p = allocator()->GetBlockBegin(p);
251  if (p == 0)
252    return 0;
253  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
254  return b->Size();
255}
256
257void __tsan_on_thread_idle() {
258  ThreadState *thr = cur_thread();
259  allocator()->SwallowCache(&thr->alloc_cache);
260}
261}  // extern "C"
262