tsan_rtl_mutex.cc revision fcb6c9c80f06c241801d22d2258f59e7bb828e7b
1//===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "tsan_rtl.h"
15#include "tsan_flags.h"
16#include "tsan_sync.h"
17#include "tsan_report.h"
18#include "tsan_symbolize.h"
19#include "tsan_platform.h"
20
21namespace __tsan {
22
23void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
24                 bool rw, bool recursive, bool linker_init) {
25  Context *ctx = CTX();
26  CHECK_GT(thr->in_rtl, 0);
27  DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
28  StatInc(thr, StatMutexCreate);
29  if (!linker_init && IsAppMem(addr)) {
30    CHECK(!thr->is_freeing);
31    thr->is_freeing = true;
32    MemoryWrite(thr, pc, addr, kSizeLog1);
33    thr->is_freeing = false;
34  }
35  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
36  s->is_rw = rw;
37  s->is_recursive = recursive;
38  s->is_linker_init = linker_init;
39  s->mtx.Unlock();
40}
41
42void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
43  Context *ctx = CTX();
44  CHECK_GT(thr->in_rtl, 0);
45  DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
46  StatInc(thr, StatMutexDestroy);
47#ifndef TSAN_GO
48  // Global mutexes not marked as LINKER_INITIALIZED
49  // cause tons of not interesting reports, so just ignore it.
50  if (IsGlobalVar(addr))
51    return;
52#endif
53  SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
54  if (s == 0)
55    return;
56  if (IsAppMem(addr)) {
57    CHECK(!thr->is_freeing);
58    thr->is_freeing = true;
59    MemoryWrite(thr, pc, addr, kSizeLog1);
60    thr->is_freeing = false;
61  }
62  if (flags()->report_destroy_locked
63      && s->owner_tid != SyncVar::kInvalidTid
64      && !s->is_broken) {
65    s->is_broken = true;
66    ThreadRegistryLock l(ctx->thread_registry);
67    ScopedReport rep(ReportTypeMutexDestroyLocked);
68    rep.AddMutex(s);
69    StackTrace trace;
70    trace.ObtainCurrent(thr, pc);
71    rep.AddStack(&trace);
72    FastState last(s->last_lock);
73    RestoreStack(last.tid(), last.epoch(), &trace, 0);
74    rep.AddStack(&trace);
75    rep.AddLocation(s->addr, 1);
76    OutputReport(ctx, rep);
77  }
78  thr->mset.Remove(s->GetId());
79  DestroyAndFree(s);
80}
81
82void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
83  CHECK_GT(thr->in_rtl, 0);
84  DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
85  CHECK_GT(rec, 0);
86  if (IsAppMem(addr))
87    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
88  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
89  thr->fast_state.IncrementEpoch();
90  TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
91  if (s->owner_tid == SyncVar::kInvalidTid) {
92    CHECK_EQ(s->recursion, 0);
93    s->owner_tid = thr->tid;
94    s->last_lock = thr->fast_state.raw();
95  } else if (s->owner_tid == thr->tid) {
96    CHECK_GT(s->recursion, 0);
97  } else {
98    Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr);
99    PrintCurrentStack(thr, pc);
100  }
101  if (s->recursion == 0) {
102    StatInc(thr, StatMutexLock);
103    thr->clock.set(thr->tid, thr->fast_state.epoch());
104    thr->clock.acquire(&s->clock);
105    StatInc(thr, StatSyncAcquire);
106    thr->clock.acquire(&s->read_clock);
107    StatInc(thr, StatSyncAcquire);
108  } else if (!s->is_recursive) {
109    StatInc(thr, StatMutexRecLock);
110  }
111  s->recursion += rec;
112  thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
113  s->mtx.Unlock();
114}
115
116int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
117  CHECK_GT(thr->in_rtl, 0);
118  DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
119  if (IsAppMem(addr))
120    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
121  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
122  thr->fast_state.IncrementEpoch();
123  TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
124  int rec = 0;
125  if (s->recursion == 0) {
126    if (!s->is_broken) {
127      s->is_broken = true;
128      Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr);
129      PrintCurrentStack(thr, pc);
130    }
131  } else if (s->owner_tid != thr->tid) {
132    if (!s->is_broken) {
133      s->is_broken = true;
134      Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
135             addr);
136      PrintCurrentStack(thr, pc);
137    }
138  } else {
139    rec = all ? s->recursion : 1;
140    s->recursion -= rec;
141    if (s->recursion == 0) {
142      StatInc(thr, StatMutexUnlock);
143      s->owner_tid = SyncVar::kInvalidTid;
144      thr->clock.set(thr->tid, thr->fast_state.epoch());
145      thr->fast_synch_epoch = thr->fast_state.epoch();
146      thr->clock.ReleaseStore(&s->clock);
147      StatInc(thr, StatSyncRelease);
148    } else {
149      StatInc(thr, StatMutexRecUnlock);
150    }
151  }
152  thr->mset.Del(s->GetId(), true);
153  s->mtx.Unlock();
154  return rec;
155}
156
157void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
158  CHECK_GT(thr->in_rtl, 0);
159  DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
160  StatInc(thr, StatMutexReadLock);
161  if (IsAppMem(addr))
162    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
163  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
164  thr->fast_state.IncrementEpoch();
165  TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
166  if (s->owner_tid != SyncVar::kInvalidTid) {
167    Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
168           addr);
169    PrintCurrentStack(thr, pc);
170  }
171  thr->clock.set(thr->tid, thr->fast_state.epoch());
172  thr->clock.acquire(&s->clock);
173  s->last_lock = thr->fast_state.raw();
174  StatInc(thr, StatSyncAcquire);
175  thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
176  s->mtx.ReadUnlock();
177}
178
179void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
180  CHECK_GT(thr->in_rtl, 0);
181  DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
182  StatInc(thr, StatMutexReadUnlock);
183  if (IsAppMem(addr))
184    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
185  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
186  thr->fast_state.IncrementEpoch();
187  TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
188  if (s->owner_tid != SyncVar::kInvalidTid) {
189    Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
190           addr);
191    PrintCurrentStack(thr, pc);
192  }
193  thr->clock.set(thr->tid, thr->fast_state.epoch());
194  thr->fast_synch_epoch = thr->fast_state.epoch();
195  thr->clock.release(&s->read_clock);
196  StatInc(thr, StatSyncRelease);
197  s->mtx.Unlock();
198  thr->mset.Del(s->GetId(), false);
199}
200
201void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
202  CHECK_GT(thr->in_rtl, 0);
203  DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
204  if (IsAppMem(addr))
205    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
206  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
207  bool write = true;
208  if (s->owner_tid == SyncVar::kInvalidTid) {
209    // Seems to be read unlock.
210    write = false;
211    StatInc(thr, StatMutexReadUnlock);
212    thr->fast_state.IncrementEpoch();
213    TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
214    thr->clock.set(thr->tid, thr->fast_state.epoch());
215    thr->fast_synch_epoch = thr->fast_state.epoch();
216    thr->clock.release(&s->read_clock);
217    StatInc(thr, StatSyncRelease);
218  } else if (s->owner_tid == thr->tid) {
219    // Seems to be write unlock.
220    thr->fast_state.IncrementEpoch();
221    TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
222    CHECK_GT(s->recursion, 0);
223    s->recursion--;
224    if (s->recursion == 0) {
225      StatInc(thr, StatMutexUnlock);
226      s->owner_tid = SyncVar::kInvalidTid;
227      // FIXME: Refactor me, plz.
228      // The sequence of events is quite tricky and doubled in several places.
229      // First, it's a bug to increment the epoch w/o writing to the trace.
230      // Then, the acquire/release logic can be factored out as well.
231      thr->clock.set(thr->tid, thr->fast_state.epoch());
232      thr->fast_synch_epoch = thr->fast_state.epoch();
233      thr->clock.ReleaseStore(&s->clock);
234      StatInc(thr, StatSyncRelease);
235    } else {
236      StatInc(thr, StatMutexRecUnlock);
237    }
238  } else if (!s->is_broken) {
239    s->is_broken = true;
240    Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
241           addr);
242    PrintCurrentStack(thr, pc);
243  }
244  thr->mset.Del(s->GetId(), write);
245  s->mtx.Unlock();
246}
247
248void Acquire(ThreadState *thr, uptr pc, uptr addr) {
249  CHECK_GT(thr->in_rtl, 0);
250  DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
251  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
252  thr->clock.set(thr->tid, thr->fast_state.epoch());
253  thr->clock.acquire(&s->clock);
254  StatInc(thr, StatSyncAcquire);
255  s->mtx.ReadUnlock();
256}
257
258static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
259  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
260  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
261  if (tctx->status == ThreadStatusRunning)
262    thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
263  else
264    thr->clock.set(tctx->tid, tctx->epoch1);
265}
266
267void AcquireGlobal(ThreadState *thr, uptr pc) {
268  ThreadRegistryLock l(CTX()->thread_registry);
269  CTX()->thread_registry->RunCallbackForEachThreadLocked(
270      UpdateClockCallback, thr);
271}
272
273void Release(ThreadState *thr, uptr pc, uptr addr) {
274  CHECK_GT(thr->in_rtl, 0);
275  DPrintf("#%d: Release %zx\n", thr->tid, addr);
276  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
277  thr->clock.set(thr->tid, thr->fast_state.epoch());
278  thr->clock.release(&s->clock);
279  StatInc(thr, StatSyncRelease);
280  s->mtx.Unlock();
281}
282
283void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
284  CHECK_GT(thr->in_rtl, 0);
285  DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
286  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
287  thr->clock.set(thr->tid, thr->fast_state.epoch());
288  thr->clock.ReleaseStore(&s->clock);
289  StatInc(thr, StatSyncRelease);
290  s->mtx.Unlock();
291}
292
293#ifndef TSAN_GO
294static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
295  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
296  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
297  if (tctx->status == ThreadStatusRunning)
298    thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
299  else
300    thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
301}
302
303void AfterSleep(ThreadState *thr, uptr pc) {
304  thr->last_sleep_stack_id = CurrentStackId(thr, pc);
305  ThreadRegistryLock l(CTX()->thread_registry);
306  CTX()->thread_registry->RunCallbackForEachThreadLocked(
307      UpdateSleepClockCallback, thr);
308}
309#endif
310
311}  // namespace __tsan
312