tsan_sync.cc revision 01a7ce809bf7cc627d73c045c70bcca9891f632c
1//===-- tsan_sync.cc ------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_placement_new.h"
14#include "tsan_sync.h"
15#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr, u64 uid)
21  : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22  , addr(addr)
23  , uid(uid)
24  , owner_tid(kInvalidTid)
25  , last_lock()
26  , recursion()
27  , is_rw()
28  , is_recursive()
29  , is_broken()
30  , is_linker_init() {
31}
32
33SyncTab::Part::Part()
34  : mtx(MutexTypeSyncTab, StatMtxSyncTab)
35  , val() {
36}
37
38SyncTab::SyncTab() {
39}
40
41SyncTab::~SyncTab() {
42  for (int i = 0; i < kPartCount; i++) {
43    while (tab_[i].val) {
44      SyncVar *tmp = tab_[i].val;
45      tab_[i].val = tmp->next;
46      DestroyAndFree(tmp);
47    }
48  }
49}
50
51SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
52                                     uptr addr, bool write_lock) {
53  return GetAndLock(thr, pc, addr, write_lock, true);
54}
55
56SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
57  return GetAndLock(0, 0, addr, write_lock, false);
58}
59
60SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
61  StatInc(thr, StatSyncCreated);
62  void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
63  const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
64  SyncVar *res = new(mem) SyncVar(addr, uid);
65#ifndef TSAN_GO
66  res->creation_stack_id = CurrentStackId(thr, pc);
67#endif
68  return res;
69}
70
71SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
72                             uptr addr, bool write_lock, bool create) {
73#ifndef TSAN_GO
74  {  // NOLINT
75    SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
76    if (res)
77      return res;
78  }
79
80  // Here we ask only PrimaryAllocator, because
81  // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
82  // the hashmap anyway.
83  if (PrimaryAllocator::PointerIsMine((void*)addr)) {
84    MBlock *b = user_mblock(thr, (void*)addr);
85    CHECK_NE(b, 0);
86    MBlock::ScopedLock l(b);
87    SyncVar *res = 0;
88    for (res = b->ListHead(); res; res = res->next) {
89      if (res->addr == addr)
90        break;
91    }
92    if (res == 0) {
93      if (!create)
94        return 0;
95      res = Create(thr, pc, addr);
96      b->ListPush(res);
97    }
98    if (write_lock)
99      res->mtx.Lock();
100    else
101      res->mtx.ReadLock();
102    return res;
103  }
104#endif
105
106  Part *p = &tab_[PartIdx(addr)];
107  {
108    ReadLock l(&p->mtx);
109    for (SyncVar *res = p->val; res; res = res->next) {
110      if (res->addr == addr) {
111        if (write_lock)
112          res->mtx.Lock();
113        else
114          res->mtx.ReadLock();
115        return res;
116      }
117    }
118  }
119  if (!create)
120    return 0;
121  {
122    Lock l(&p->mtx);
123    SyncVar *res = p->val;
124    for (; res; res = res->next) {
125      if (res->addr == addr)
126        break;
127    }
128    if (res == 0) {
129      res = Create(thr, pc, addr);
130      res->next = p->val;
131      p->val = res;
132    }
133    if (write_lock)
134      res->mtx.Lock();
135    else
136      res->mtx.ReadLock();
137    return res;
138  }
139}
140
141SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
142#ifndef TSAN_GO
143  {  // NOLINT
144    SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
145    if (res)
146      return res;
147  }
148  if (PrimaryAllocator::PointerIsMine((void*)addr)) {
149    MBlock *b = user_mblock(thr, (void*)addr);
150    CHECK_NE(b, 0);
151    SyncVar *res = 0;
152    {
153      MBlock::ScopedLock l(b);
154      res = b->ListHead();
155      if (res) {
156        if (res->addr == addr) {
157          if (res->is_linker_init)
158            return 0;
159          b->ListPop();
160        } else {
161          SyncVar **prev = &res->next;
162          res = *prev;
163          while (res) {
164            if (res->addr == addr) {
165              if (res->is_linker_init)
166                return 0;
167              *prev = res->next;
168              break;
169            }
170            prev = &res->next;
171            res = *prev;
172          }
173        }
174        if (res) {
175          StatInc(thr, StatSyncDestroyed);
176          res->mtx.Lock();
177          res->mtx.Unlock();
178        }
179      }
180    }
181    return res;
182  }
183#endif
184
185  Part *p = &tab_[PartIdx(addr)];
186  SyncVar *res = 0;
187  {
188    Lock l(&p->mtx);
189    SyncVar **prev = &p->val;
190    res = *prev;
191    while (res) {
192      if (res->addr == addr) {
193        if (res->is_linker_init)
194          return 0;
195        *prev = res->next;
196        break;
197      }
198      prev = &res->next;
199      res = *prev;
200    }
201  }
202  if (res) {
203    StatInc(thr, StatSyncDestroyed);
204    res->mtx.Lock();
205    res->mtx.Unlock();
206  }
207  return res;
208}
209
210int SyncTab::PartIdx(uptr addr) {
211  return (addr >> 3) % kPartCount;
212}
213
214StackTrace::StackTrace()
215    : n_()
216    , s_()
217    , c_() {
218}
219
220StackTrace::StackTrace(uptr *buf, uptr cnt)
221    : n_()
222    , s_(buf)
223    , c_(cnt) {
224  CHECK_NE(buf, 0);
225  CHECK_NE(cnt, 0);
226}
227
228StackTrace::~StackTrace() {
229  Reset();
230}
231
232void StackTrace::Reset() {
233  if (s_ && !c_) {
234    CHECK_NE(n_, 0);
235    internal_free(s_);
236    s_ = 0;
237  }
238  n_ = 0;
239}
240
241void StackTrace::Init(const uptr *pcs, uptr cnt) {
242  Reset();
243  if (cnt == 0)
244    return;
245  if (c_) {
246    CHECK_NE(s_, 0);
247    CHECK_LE(cnt, c_);
248  } else {
249    s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
250  }
251  n_ = cnt;
252  internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
253}
254
255void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
256  Reset();
257  n_ = thr->shadow_stack_pos - thr->shadow_stack;
258  if (n_ + !!toppc == 0)
259    return;
260  uptr start = 0;
261  if (c_) {
262    CHECK_NE(s_, 0);
263    if (n_ + !!toppc > c_) {
264      start = n_ - c_ + !!toppc;
265      n_ = c_ - !!toppc;
266    }
267  } else {
268    // Cap potentially huge stacks.
269    if (n_ + !!toppc > kTraceStackSize) {
270      start = n_ - kTraceStackSize + !!toppc;
271      n_ = kTraceStackSize - !!toppc;
272    }
273    s_ = (uptr*)internal_alloc(MBlockStackTrace,
274                               (n_ + !!toppc) * sizeof(s_[0]));
275  }
276  for (uptr i = 0; i < n_; i++)
277    s_[i] = thr->shadow_stack[start + i];
278  if (toppc) {
279    s_[n_] = toppc;
280    n_++;
281  }
282}
283
284void StackTrace::CopyFrom(const StackTrace& other) {
285  Reset();
286  Init(other.Begin(), other.Size());
287}
288
289bool StackTrace::IsEmpty() const {
290  return n_ == 0;
291}
292
293uptr StackTrace::Size() const {
294  return n_;
295}
296
297uptr StackTrace::Get(uptr i) const {
298  CHECK_LT(i, n_);
299  return s_[i];
300}
301
302const uptr *StackTrace::Begin() const {
303  return s_;
304}
305
306}  // namespace __tsan
307