1//===-- tsan_mman.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_common.h"
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "tsan_mman.h"
16#include "tsan_rtl.h"
17#include "tsan_report.h"
18#include "tsan_flags.h"
19
20namespace __tsan {
21
22static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
23Allocator *allocator() {
24  return reinterpret_cast<Allocator*>(&allocator_placeholder);
25}
26
27void InitializeAllocator() {
28  allocator()->Init();
29}
30
31void AlloctorThreadFinish(ThreadState *thr) {
32  allocator()->SwallowCache(&thr->alloc_cache);
33}
34
35static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
36  if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
37    return;
38  StackTrace stack;
39  stack.ObtainCurrent(thr, pc);
40  ScopedReport rep(ReportTypeSignalUnsafe);
41  rep.AddStack(&stack);
42  OutputReport(rep, rep.GetReport()->stacks[0]);
43}
44
45void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
46  CHECK_GT(thr->in_rtl, 0);
47  void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
48  if (p == 0)
49    return 0;
50  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
51  b->size = sz;
52  b->alloc_tid = thr->unique_id;
53  b->alloc_stack_id = CurrentStackId(thr, pc);
54  if (CTX() && CTX()->initialized) {
55    MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
56  }
57  DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
58  SignalUnsafeCall(thr, pc);
59  return p;
60}
61
62void user_free(ThreadState *thr, uptr pc, void *p) {
63  CHECK_GT(thr->in_rtl, 0);
64  CHECK_NE(p, (void*)0);
65  DPrintf("#%d: free(%p)\n", thr->tid, p);
66  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
67  if (b->head)   {
68    Lock l(&b->mtx);
69    for (SyncVar *s = b->head; s;) {
70      SyncVar *res = s;
71      s = s->next;
72      StatInc(thr, StatSyncDestroyed);
73      res->mtx.Lock();
74      res->mtx.Unlock();
75      DestroyAndFree(res);
76    }
77    b->head = 0;
78  }
79  if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
80    MemoryRangeFreed(thr, pc, (uptr)p, b->size);
81  }
82  allocator()->Deallocate(&thr->alloc_cache, p);
83  SignalUnsafeCall(thr, pc);
84}
85
86void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
87  CHECK_GT(thr->in_rtl, 0);
88  void *p2 = 0;
89  // FIXME: Handle "shrinking" more efficiently,
90  // it seems that some software actually does this.
91  if (sz) {
92    p2 = user_alloc(thr, pc, sz);
93    if (p2 == 0)
94      return 0;
95    if (p) {
96      MBlock *b = user_mblock(thr, p);
97      internal_memcpy(p2, p, min(b->size, sz));
98    }
99  }
100  if (p) {
101    user_free(thr, pc, p);
102  }
103  return p2;
104}
105
106MBlock *user_mblock(ThreadState *thr, void *p) {
107  // CHECK_GT(thr->in_rtl, 0);
108  CHECK_NE(p, (void*)0);
109  return (MBlock*)allocator()->GetMetaData(p);
110}
111
112void *internal_alloc(MBlockType typ, uptr sz) {
113  ThreadState *thr = cur_thread();
114  CHECK_GT(thr->in_rtl, 0);
115  if (thr->nomalloc) {
116    thr->nomalloc = 0;  // CHECK calls internal_malloc().
117    CHECK(0);
118  }
119  return InternalAlloc(sz);
120}
121
122void internal_free(void *p) {
123  ThreadState *thr = cur_thread();
124  CHECK_GT(thr->in_rtl, 0);
125  if (thr->nomalloc) {
126    thr->nomalloc = 0;  // CHECK calls internal_malloc().
127    CHECK(0);
128  }
129  InternalFree(p);
130}
131
132}  // namespace __tsan
133