tsan_sync.cc revision 21cc85db95b8fa85a9ff7a403c8a24e345d73baf
1//===-- tsan_sync.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "tsan_sync.h"
15#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr, u64 uid)
21  : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22  , addr(addr)
23  , uid(uid)
24  , owner_tid(kInvalidTid)
25  , last_lock()
26  , recursion()
27  , is_rw()
28  , is_recursive()
29  , is_broken()
30  , is_linker_init() {
31}
32
33SyncTab::Part::Part()
34  : mtx(MutexTypeSyncTab, StatMtxSyncTab)
35  , val() {
36}
37
38SyncTab::SyncTab() {
39}
40
41SyncTab::~SyncTab() {
42  for (int i = 0; i < kPartCount; i++) {
43    while (tab_[i].val) {
44      SyncVar *tmp = tab_[i].val;
45      tab_[i].val = tmp->next;
46      DestroyAndFree(tmp);
47    }
48  }
49}
50
51SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
52                                     uptr addr, bool write_lock) {
53  return GetAndLock(thr, pc, addr, write_lock, true);
54}
55
56SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
57  return GetAndLock(0, 0, addr, write_lock, false);
58}
59
60SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
61  StatInc(thr, StatSyncCreated);
62  void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
63  const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
64  SyncVar *res = new(mem) SyncVar(addr, uid);
65#ifndef TSAN_GO
66  res->creation_stack.ObtainCurrent(thr, pc);
67#endif
68  return res;
69}
70
71SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
72                             uptr addr, bool write_lock, bool create) {
73#ifndef TSAN_GO
74  {  // NOLINT
75    SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
76    if (res)
77      return res;
78  }
79
80  // Here we ask only PrimaryAllocator, because
81  // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
82  // the hashmap anyway.
83  if (PrimaryAllocator::PointerIsMine((void*)addr)) {
84    MBlock *b = user_mblock(thr, (void*)addr);
85    Lock l(&b->mtx);
86    SyncVar *res = 0;
87    for (res = b->head; res; res = res->next) {
88      if (res->addr == addr)
89        break;
90    }
91    if (res == 0) {
92      if (!create)
93        return 0;
94      res = Create(thr, pc, addr);
95      res->next = b->head;
96      b->head = res;
97    }
98    if (write_lock)
99      res->mtx.Lock();
100    else
101      res->mtx.ReadLock();
102    return res;
103  }
104#endif
105
106  Part *p = &tab_[PartIdx(addr)];
107  {
108    ReadLock l(&p->mtx);
109    for (SyncVar *res = p->val; res; res = res->next) {
110      if (res->addr == addr) {
111        if (write_lock)
112          res->mtx.Lock();
113        else
114          res->mtx.ReadLock();
115        return res;
116      }
117    }
118  }
119  if (!create)
120    return 0;
121  {
122    Lock l(&p->mtx);
123    SyncVar *res = p->val;
124    for (; res; res = res->next) {
125      if (res->addr == addr)
126        break;
127    }
128    if (res == 0) {
129      res = Create(thr, pc, addr);
130      res->next = p->val;
131      p->val = res;
132    }
133    if (write_lock)
134      res->mtx.Lock();
135    else
136      res->mtx.ReadLock();
137    return res;
138  }
139}
140
141SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
142#ifndef TSAN_GO
143  {  // NOLINT
144    SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
145    if (res)
146      return res;
147  }
148  if (PrimaryAllocator::PointerIsMine((void*)addr)) {
149    MBlock *b = user_mblock(thr, (void*)addr);
150    SyncVar *res = 0;
151    {
152      Lock l(&b->mtx);
153      SyncVar **prev = &b->head;
154      res = *prev;
155      while (res) {
156        if (res->addr == addr) {
157          if (res->is_linker_init)
158            return 0;
159          *prev = res->next;
160          break;
161        }
162        prev = &res->next;
163        res = *prev;
164      }
165    }
166    if (res) {
167      StatInc(thr, StatSyncDestroyed);
168      res->mtx.Lock();
169      res->mtx.Unlock();
170    }
171    return res;
172  }
173#endif
174
175  Part *p = &tab_[PartIdx(addr)];
176  SyncVar *res = 0;
177  {
178    Lock l(&p->mtx);
179    SyncVar **prev = &p->val;
180    res = *prev;
181    while (res) {
182      if (res->addr == addr) {
183        if (res->is_linker_init)
184          return 0;
185        *prev = res->next;
186        break;
187      }
188      prev = &res->next;
189      res = *prev;
190    }
191  }
192  if (res) {
193    StatInc(thr, StatSyncDestroyed);
194    res->mtx.Lock();
195    res->mtx.Unlock();
196  }
197  return res;
198}
199
200uptr SyncVar::GetMemoryConsumption() {
201  return sizeof(*this)
202      + clock.size() * sizeof(u64)
203      + read_clock.size() * sizeof(u64)
204      + creation_stack.Size() * sizeof(uptr);
205}
206
207uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
208  uptr mem = 0;
209  for (int i = 0; i < kPartCount; i++) {
210    Part *p = &tab_[i];
211    Lock l(&p->mtx);
212    for (SyncVar *s = p->val; s; s = s->next) {
213      *nsync += 1;
214      mem += s->GetMemoryConsumption();
215    }
216  }
217  return mem;
218}
219
220int SyncTab::PartIdx(uptr addr) {
221  return (addr >> 3) % kPartCount;
222}
223
224StackTrace::StackTrace()
225    : n_()
226    , s_()
227    , c_() {
228}
229
230StackTrace::StackTrace(uptr *buf, uptr cnt)
231    : n_()
232    , s_(buf)
233    , c_(cnt) {
234  CHECK_NE(buf, 0);
235  CHECK_NE(cnt, 0);
236}
237
238StackTrace::~StackTrace() {
239  Reset();
240}
241
242void StackTrace::Reset() {
243  if (s_ && !c_) {
244    CHECK_NE(n_, 0);
245    internal_free(s_);
246    s_ = 0;
247  }
248  n_ = 0;
249}
250
251void StackTrace::Init(const uptr *pcs, uptr cnt) {
252  Reset();
253  if (cnt == 0)
254    return;
255  if (c_) {
256    CHECK_NE(s_, 0);
257    CHECK_LE(cnt, c_);
258  } else {
259    s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
260  }
261  n_ = cnt;
262  internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
263}
264
265void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
266  Reset();
267  n_ = thr->shadow_stack_pos - thr->shadow_stack;
268  if (n_ + !!toppc == 0)
269    return;
270  uptr start = 0;
271  if (c_) {
272    CHECK_NE(s_, 0);
273    if (n_ + !!toppc > c_) {
274      start = n_ - c_ + !!toppc;
275      n_ = c_ - !!toppc;
276    }
277  } else {
278    s_ = (uptr*)internal_alloc(MBlockStackTrace,
279                               (n_ + !!toppc) * sizeof(s_[0]));
280  }
281  for (uptr i = 0; i < n_; i++)
282    s_[i] = thr->shadow_stack[start + i];
283  if (toppc) {
284    s_[n_] = toppc;
285    n_++;
286  }
287}
288
289void StackTrace::CopyFrom(const StackTrace& other) {
290  Reset();
291  Init(other.Begin(), other.Size());
292}
293
294bool StackTrace::IsEmpty() const {
295  return n_ == 0;
296}
297
298uptr StackTrace::Size() const {
299  return n_;
300}
301
302uptr StackTrace::Get(uptr i) const {
303  CHECK_LT(i, n_);
304  return s_[i];
305}
306
307const uptr *StackTrace::Begin() const {
308  return s_;
309}
310
311}  // namespace __tsan
312