tsan_rtl_mutex.cc revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
1//===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15#include <sanitizer_common/sanitizer_stackdepot.h>
16
17#include "tsan_rtl.h"
18#include "tsan_flags.h"
19#include "tsan_sync.h"
20#include "tsan_report.h"
21#include "tsan_symbolize.h"
22#include "tsan_platform.h"
23
24namespace __tsan {
25
26void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
27
28struct Callback : DDCallback {
29  ThreadState *thr;
30  uptr pc;
31
32  Callback(ThreadState *thr, uptr pc)
33      : thr(thr)
34      , pc(pc) {
35    DDCallback::pt = thr->dd_pt;
36    DDCallback::lt = thr->dd_lt;
37  }
38
39  virtual u32 Unwind() {
40    return CurrentStackId(thr, pc);
41  }
42  virtual int UniqueTid() {
43    return thr->unique_id;
44  }
45};
46
47void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
48  Callback cb(thr, pc);
49  ctx->dd->MutexInit(&cb, &s->dd);
50  s->dd.ctx = s->GetId();
51}
52
53static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
54    uptr addr, u64 mid) {
55  ThreadRegistryLock l(ctx->thread_registry);
56  ScopedReport rep(typ);
57  rep.AddMutex(mid);
58  StackTrace trace;
59  trace.ObtainCurrent(thr, pc);
60  rep.AddStack(&trace);
61  rep.AddLocation(addr, 1);
62  OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
63}
64
65void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
66                 bool rw, bool recursive, bool linker_init) {
67  DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
68  StatInc(thr, StatMutexCreate);
69  if (!linker_init && IsAppMem(addr)) {
70    CHECK(!thr->is_freeing);
71    thr->is_freeing = true;
72    MemoryWrite(thr, pc, addr, kSizeLog1);
73    thr->is_freeing = false;
74  }
75  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
76  s->is_rw = rw;
77  s->is_recursive = recursive;
78  s->is_linker_init = linker_init;
79  s->mtx.Unlock();
80}
81
82void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
83  DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
84  StatInc(thr, StatMutexDestroy);
85#ifndef TSAN_GO
86  // Global mutexes not marked as LINKER_INITIALIZED
87  // cause tons of not interesting reports, so just ignore it.
88  if (IsGlobalVar(addr))
89    return;
90#endif
91  SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
92  if (s == 0)
93    return;
94  if (flags()->detect_deadlocks) {
95    Callback cb(thr, pc);
96    ctx->dd->MutexDestroy(&cb, &s->dd);
97  }
98  if (IsAppMem(addr)) {
99    CHECK(!thr->is_freeing);
100    thr->is_freeing = true;
101    MemoryWrite(thr, pc, addr, kSizeLog1);
102    thr->is_freeing = false;
103  }
104  if (flags()->report_destroy_locked
105      && s->owner_tid != SyncVar::kInvalidTid
106      && !s->is_broken) {
107    s->is_broken = true;
108    ThreadRegistryLock l(ctx->thread_registry);
109    ScopedReport rep(ReportTypeMutexDestroyLocked);
110    rep.AddMutex(s);
111    StackTrace trace;
112    trace.ObtainCurrent(thr, pc);
113    rep.AddStack(&trace);
114    FastState last(s->last_lock);
115    RestoreStack(last.tid(), last.epoch(), &trace, 0);
116    rep.AddStack(&trace);
117    rep.AddLocation(s->addr, 1);
118    OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
119  }
120  thr->mset.Remove(s->GetId());
121  DestroyAndFree(s);
122}
123
124void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
125  DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
126  CHECK_GT(rec, 0);
127  if (IsAppMem(addr))
128    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
129  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
130  thr->fast_state.IncrementEpoch();
131  TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
132  bool report_double_lock = false;
133  if (s->owner_tid == SyncVar::kInvalidTid) {
134    CHECK_EQ(s->recursion, 0);
135    s->owner_tid = thr->tid;
136    s->last_lock = thr->fast_state.raw();
137  } else if (s->owner_tid == thr->tid) {
138    CHECK_GT(s->recursion, 0);
139  } else if (flags()->report_mutex_bugs && !s->is_broken) {
140    s->is_broken = true;
141    report_double_lock = true;
142  }
143  if (s->recursion == 0) {
144    StatInc(thr, StatMutexLock);
145    AcquireImpl(thr, pc, &s->clock);
146    AcquireImpl(thr, pc, &s->read_clock);
147  } else if (!s->is_recursive) {
148    StatInc(thr, StatMutexRecLock);
149  }
150  s->recursion += rec;
151  thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
152  if (flags()->detect_deadlocks && s->recursion == 1) {
153    Callback cb(thr, pc);
154    if (!try_lock)
155      ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
156    ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
157  }
158  u64 mid = s->GetId();
159  s->mtx.Unlock();
160  // Can't touch s after this point.
161  if (report_double_lock)
162    ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
163  if (flags()->detect_deadlocks) {
164    Callback cb(thr, pc);
165    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
166  }
167}
168
169int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
170  DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
171  if (IsAppMem(addr))
172    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
173  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
174  thr->fast_state.IncrementEpoch();
175  TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
176  int rec = 0;
177  bool report_bad_unlock = false;
178  if (s->recursion == 0 || s->owner_tid != thr->tid) {
179    if (flags()->report_mutex_bugs && !s->is_broken) {
180      s->is_broken = true;
181      report_bad_unlock = true;
182    }
183  } else {
184    rec = all ? s->recursion : 1;
185    s->recursion -= rec;
186    if (s->recursion == 0) {
187      StatInc(thr, StatMutexUnlock);
188      s->owner_tid = SyncVar::kInvalidTid;
189      ReleaseStoreImpl(thr, pc, &s->clock);
190    } else {
191      StatInc(thr, StatMutexRecUnlock);
192    }
193  }
194  thr->mset.Del(s->GetId(), true);
195  if (flags()->detect_deadlocks && s->recursion == 0) {
196    Callback cb(thr, pc);
197    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
198  }
199  u64 mid = s->GetId();
200  s->mtx.Unlock();
201  // Can't touch s after this point.
202  if (report_bad_unlock)
203    ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
204  if (flags()->detect_deadlocks) {
205    Callback cb(thr, pc);
206    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
207  }
208  return rec;
209}
210
211void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
212  DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
213  StatInc(thr, StatMutexReadLock);
214  if (IsAppMem(addr))
215    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
216  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
217  thr->fast_state.IncrementEpoch();
218  TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
219  bool report_bad_lock = false;
220  if (s->owner_tid != SyncVar::kInvalidTid) {
221    if (flags()->report_mutex_bugs && !s->is_broken) {
222      s->is_broken = true;
223      report_bad_lock = true;
224    }
225  }
226  AcquireImpl(thr, pc, &s->clock);
227  s->last_lock = thr->fast_state.raw();
228  thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
229  if (flags()->detect_deadlocks && s->recursion == 0) {
230    Callback cb(thr, pc);
231    if (!trylock)
232      ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
233    ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
234  }
235  u64 mid = s->GetId();
236  s->mtx.ReadUnlock();
237  // Can't touch s after this point.
238  if (report_bad_lock)
239    ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
240  if (flags()->detect_deadlocks) {
241    Callback cb(thr, pc);
242    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
243  }
244}
245
246void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
247  DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
248  StatInc(thr, StatMutexReadUnlock);
249  if (IsAppMem(addr))
250    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
251  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
252  thr->fast_state.IncrementEpoch();
253  TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
254  bool report_bad_unlock = false;
255  if (s->owner_tid != SyncVar::kInvalidTid) {
256    if (flags()->report_mutex_bugs && !s->is_broken) {
257      s->is_broken = true;
258      report_bad_unlock = true;
259    }
260  }
261  ReleaseImpl(thr, pc, &s->read_clock);
262  if (flags()->detect_deadlocks && s->recursion == 0) {
263    Callback cb(thr, pc);
264    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
265  }
266  u64 mid = s->GetId();
267  s->mtx.Unlock();
268  // Can't touch s after this point.
269  thr->mset.Del(mid, false);
270  if (report_bad_unlock)
271    ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
272  if (flags()->detect_deadlocks) {
273    Callback cb(thr, pc);
274    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
275  }
276}
277
278void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
279  DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
280  if (IsAppMem(addr))
281    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
282  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
283  bool write = true;
284  bool report_bad_unlock = false;
285  if (s->owner_tid == SyncVar::kInvalidTid) {
286    // Seems to be read unlock.
287    write = false;
288    StatInc(thr, StatMutexReadUnlock);
289    thr->fast_state.IncrementEpoch();
290    TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
291    ReleaseImpl(thr, pc, &s->read_clock);
292  } else if (s->owner_tid == thr->tid) {
293    // Seems to be write unlock.
294    thr->fast_state.IncrementEpoch();
295    TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
296    CHECK_GT(s->recursion, 0);
297    s->recursion--;
298    if (s->recursion == 0) {
299      StatInc(thr, StatMutexUnlock);
300      s->owner_tid = SyncVar::kInvalidTid;
301      ReleaseImpl(thr, pc, &s->clock);
302    } else {
303      StatInc(thr, StatMutexRecUnlock);
304    }
305  } else if (!s->is_broken) {
306    s->is_broken = true;
307    report_bad_unlock = true;
308  }
309  thr->mset.Del(s->GetId(), write);
310  if (flags()->detect_deadlocks && s->recursion == 0) {
311    Callback cb(thr, pc);
312    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
313  }
314  u64 mid = s->GetId();
315  s->mtx.Unlock();
316  // Can't touch s after this point.
317  if (report_bad_unlock)
318    ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
319  if (flags()->detect_deadlocks) {
320    Callback cb(thr, pc);
321    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
322  }
323}
324
325void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
326  DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
327  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
328  s->owner_tid = SyncVar::kInvalidTid;
329  s->recursion = 0;
330  s->mtx.Unlock();
331}
332
333void Acquire(ThreadState *thr, uptr pc, uptr addr) {
334  DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
335  if (thr->ignore_sync)
336    return;
337  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
338  AcquireImpl(thr, pc, &s->clock);
339  s->mtx.ReadUnlock();
340}
341
342static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
343  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
344  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
345  if (tctx->status == ThreadStatusRunning)
346    thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
347  else
348    thr->clock.set(tctx->tid, tctx->epoch1);
349}
350
351void AcquireGlobal(ThreadState *thr, uptr pc) {
352  DPrintf("#%d: AcquireGlobal\n", thr->tid);
353  if (thr->ignore_sync)
354    return;
355  ThreadRegistryLock l(ctx->thread_registry);
356  ctx->thread_registry->RunCallbackForEachThreadLocked(
357      UpdateClockCallback, thr);
358}
359
360void Release(ThreadState *thr, uptr pc, uptr addr) {
361  DPrintf("#%d: Release %zx\n", thr->tid, addr);
362  if (thr->ignore_sync)
363    return;
364  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
365  thr->fast_state.IncrementEpoch();
366  // Can't increment epoch w/o writing to the trace as well.
367  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
368  ReleaseImpl(thr, pc, &s->clock);
369  s->mtx.Unlock();
370}
371
372void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
373  DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
374  if (thr->ignore_sync)
375    return;
376  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
377  thr->fast_state.IncrementEpoch();
378  // Can't increment epoch w/o writing to the trace as well.
379  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
380  ReleaseStoreImpl(thr, pc, &s->clock);
381  s->mtx.Unlock();
382}
383
384#ifndef TSAN_GO
385static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
386  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
387  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
388  if (tctx->status == ThreadStatusRunning)
389    thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
390  else
391    thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
392}
393
394void AfterSleep(ThreadState *thr, uptr pc) {
395  DPrintf("#%d: AfterSleep %zx\n", thr->tid);
396  if (thr->ignore_sync)
397    return;
398  thr->last_sleep_stack_id = CurrentStackId(thr, pc);
399  ThreadRegistryLock l(ctx->thread_registry);
400  ctx->thread_registry->RunCallbackForEachThreadLocked(
401      UpdateSleepClockCallback, thr);
402}
403#endif
404
405void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
406  if (thr->ignore_sync)
407    return;
408  thr->clock.set(thr->fast_state.epoch());
409  thr->clock.acquire(c);
410  StatInc(thr, StatSyncAcquire);
411}
412
413void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
414  if (thr->ignore_sync)
415    return;
416  thr->clock.set(thr->fast_state.epoch());
417  thr->fast_synch_epoch = thr->fast_state.epoch();
418  thr->clock.release(c);
419  StatInc(thr, StatSyncRelease);
420}
421
422void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
423  if (thr->ignore_sync)
424    return;
425  thr->clock.set(thr->fast_state.epoch());
426  thr->fast_synch_epoch = thr->fast_state.epoch();
427  thr->clock.ReleaseStore(c);
428  StatInc(thr, StatSyncRelease);
429}
430
431void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
432  if (thr->ignore_sync)
433    return;
434  thr->clock.set(thr->fast_state.epoch());
435  thr->fast_synch_epoch = thr->fast_state.epoch();
436  thr->clock.acq_rel(c);
437  StatInc(thr, StatSyncAcquire);
438  StatInc(thr, StatSyncRelease);
439}
440
441void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
442  if (r == 0)
443    return;
444  ThreadRegistryLock l(ctx->thread_registry);
445  ScopedReport rep(ReportTypeDeadlock);
446  for (int i = 0; i < r->n; i++) {
447    rep.AddMutex(r->loop[i].mtx_ctx0);
448    rep.AddUniqueTid((int)r->loop[i].thr_ctx);
449    rep.AddThread((int)r->loop[i].thr_ctx);
450  }
451  StackTrace stacks[2 * DDReport::kMaxLoopSize];
452  uptr dummy_pc = 0x42;
453  for (int i = 0; i < r->n; i++) {
454    uptr size;
455    for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
456      u32 stk = r->loop[i].stk[j];
457      if (stk) {
458        const uptr *trace = StackDepotGet(stk, &size);
459        stacks[i].Init(const_cast<uptr *>(trace), size);
460      } else {
461        // Sometimes we fail to extract the stack trace (FIXME: investigate),
462        // but we should still produce some stack trace in the report.
463        stacks[i].Init(&dummy_pc, 1);
464      }
465      rep.AddStack(&stacks[i]);
466    }
467  }
468  // FIXME: use all stacks for suppressions, not just the second stack of the
469  // first edge.
470  OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
471}
472
473}  // namespace __tsan
474