1//===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
15#include <sanitizer_common/sanitizer_stackdepot.h>
16
17#include "tsan_rtl.h"
18#include "tsan_flags.h"
19#include "tsan_sync.h"
20#include "tsan_report.h"
21#include "tsan_symbolize.h"
22#include "tsan_platform.h"
23
24namespace __tsan {
25
26void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
27
28struct Callback : DDCallback {
29  ThreadState *thr;
30  uptr pc;
31
32  Callback(ThreadState *thr, uptr pc)
33      : thr(thr)
34      , pc(pc) {
35    DDCallback::pt = thr->dd_pt;
36    DDCallback::lt = thr->dd_lt;
37  }
38
39  u32 Unwind() override { return CurrentStackId(thr, pc); }
40  int UniqueTid() override { return thr->unique_id; }
41};
42
43void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
44  Callback cb(thr, pc);
45  ctx->dd->MutexInit(&cb, &s->dd);
46  s->dd.ctx = s->GetId();
47}
48
49static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
50    uptr addr, u64 mid) {
51  // In Go, these misuses are either impossible, or detected by std lib,
52  // or false positives (e.g. unlock in a different thread).
53  if (kGoMode)
54    return;
55  ThreadRegistryLock l(ctx->thread_registry);
56  ScopedReport rep(typ);
57  rep.AddMutex(mid);
58  VarSizeStackTrace trace;
59  ObtainCurrentStack(thr, pc, &trace);
60  rep.AddStack(trace, true);
61  rep.AddLocation(addr, 1);
62  OutputReport(thr, rep);
63}
64
65void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
66                 bool rw, bool recursive, bool linker_init) {
67  DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
68  StatInc(thr, StatMutexCreate);
69  if (!linker_init && IsAppMem(addr)) {
70    CHECK(!thr->is_freeing);
71    thr->is_freeing = true;
72    MemoryWrite(thr, pc, addr, kSizeLog1);
73    thr->is_freeing = false;
74  }
75  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
76  s->is_rw = rw;
77  s->is_recursive = recursive;
78  s->is_linker_init = linker_init;
79  if (kCppMode && s->creation_stack_id == 0)
80    s->creation_stack_id = CurrentStackId(thr, pc);
81  s->mtx.Unlock();
82}
83
84void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
85  DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
86  StatInc(thr, StatMutexDestroy);
87#ifndef SANITIZER_GO
88  // Global mutexes not marked as LINKER_INITIALIZED
89  // cause tons of not interesting reports, so just ignore it.
90  if (IsGlobalVar(addr))
91    return;
92#endif
93  if (IsAppMem(addr)) {
94    CHECK(!thr->is_freeing);
95    thr->is_freeing = true;
96    MemoryWrite(thr, pc, addr, kSizeLog1);
97    thr->is_freeing = false;
98  }
99  SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
100  if (s == 0)
101    return;
102  if (common_flags()->detect_deadlocks) {
103    Callback cb(thr, pc);
104    ctx->dd->MutexDestroy(&cb, &s->dd);
105    ctx->dd->MutexInit(&cb, &s->dd);
106  }
107  bool unlock_locked = false;
108  if (flags()->report_destroy_locked
109      && s->owner_tid != SyncVar::kInvalidTid
110      && !s->is_broken) {
111    s->is_broken = true;
112    unlock_locked = true;
113  }
114  u64 mid = s->GetId();
115  u32 last_lock = s->last_lock;
116  if (!unlock_locked)
117    s->Reset(thr);  // must not reset it before the report is printed
118  s->mtx.Unlock();
119  if (unlock_locked) {
120    ThreadRegistryLock l(ctx->thread_registry);
121    ScopedReport rep(ReportTypeMutexDestroyLocked);
122    rep.AddMutex(mid);
123    VarSizeStackTrace trace;
124    ObtainCurrentStack(thr, pc, &trace);
125    rep.AddStack(trace);
126    FastState last(last_lock);
127    RestoreStack(last.tid(), last.epoch(), &trace, 0);
128    rep.AddStack(trace, true);
129    rep.AddLocation(addr, 1);
130    OutputReport(thr, rep);
131  }
132  if (unlock_locked) {
133    SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
134    if (s != 0) {
135      s->Reset(thr);
136      s->mtx.Unlock();
137    }
138  }
139  thr->mset.Remove(mid);
140  // s will be destroyed and freed in MetaMap::FreeBlock.
141}
142
143void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
144  DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
145  CHECK_GT(rec, 0);
146  if (IsAppMem(addr))
147    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
148  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
149  thr->fast_state.IncrementEpoch();
150  TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
151  bool report_double_lock = false;
152  if (s->owner_tid == SyncVar::kInvalidTid) {
153    CHECK_EQ(s->recursion, 0);
154    s->owner_tid = thr->tid;
155    s->last_lock = thr->fast_state.raw();
156  } else if (s->owner_tid == thr->tid) {
157    CHECK_GT(s->recursion, 0);
158  } else if (flags()->report_mutex_bugs && !s->is_broken) {
159    s->is_broken = true;
160    report_double_lock = true;
161  }
162  if (s->recursion == 0) {
163    StatInc(thr, StatMutexLock);
164    AcquireImpl(thr, pc, &s->clock);
165    AcquireImpl(thr, pc, &s->read_clock);
166  } else if (!s->is_recursive) {
167    StatInc(thr, StatMutexRecLock);
168  }
169  s->recursion += rec;
170  thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
171  if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
172    Callback cb(thr, pc);
173    if (!try_lock)
174      ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
175    ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
176  }
177  u64 mid = s->GetId();
178  s->mtx.Unlock();
179  // Can't touch s after this point.
180  if (report_double_lock)
181    ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
182  if (common_flags()->detect_deadlocks) {
183    Callback cb(thr, pc);
184    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
185  }
186}
187
188int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
189  DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
190  if (IsAppMem(addr))
191    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
192  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
193  thr->fast_state.IncrementEpoch();
194  TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
195  int rec = 0;
196  bool report_bad_unlock = false;
197  if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
198    if (flags()->report_mutex_bugs && !s->is_broken) {
199      s->is_broken = true;
200      report_bad_unlock = true;
201    }
202  } else {
203    rec = all ? s->recursion : 1;
204    s->recursion -= rec;
205    if (s->recursion == 0) {
206      StatInc(thr, StatMutexUnlock);
207      s->owner_tid = SyncVar::kInvalidTid;
208      ReleaseStoreImpl(thr, pc, &s->clock);
209    } else {
210      StatInc(thr, StatMutexRecUnlock);
211    }
212  }
213  thr->mset.Del(s->GetId(), true);
214  if (common_flags()->detect_deadlocks && s->recursion == 0 &&
215      !report_bad_unlock) {
216    Callback cb(thr, pc);
217    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
218  }
219  u64 mid = s->GetId();
220  s->mtx.Unlock();
221  // Can't touch s after this point.
222  if (report_bad_unlock)
223    ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
224  if (common_flags()->detect_deadlocks && !report_bad_unlock) {
225    Callback cb(thr, pc);
226    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
227  }
228  return rec;
229}
230
231void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
232  DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
233  StatInc(thr, StatMutexReadLock);
234  if (IsAppMem(addr))
235    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
236  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
237  thr->fast_state.IncrementEpoch();
238  TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
239  bool report_bad_lock = false;
240  if (s->owner_tid != SyncVar::kInvalidTid) {
241    if (flags()->report_mutex_bugs && !s->is_broken) {
242      s->is_broken = true;
243      report_bad_lock = true;
244    }
245  }
246  AcquireImpl(thr, pc, &s->clock);
247  s->last_lock = thr->fast_state.raw();
248  thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
249  if (common_flags()->detect_deadlocks && s->recursion == 0) {
250    Callback cb(thr, pc);
251    if (!trylock)
252      ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
253    ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
254  }
255  u64 mid = s->GetId();
256  s->mtx.ReadUnlock();
257  // Can't touch s after this point.
258  if (report_bad_lock)
259    ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
260  if (common_flags()->detect_deadlocks) {
261    Callback cb(thr, pc);
262    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
263  }
264}
265
266void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
267  DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
268  StatInc(thr, StatMutexReadUnlock);
269  if (IsAppMem(addr))
270    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
271  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
272  thr->fast_state.IncrementEpoch();
273  TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
274  bool report_bad_unlock = false;
275  if (s->owner_tid != SyncVar::kInvalidTid) {
276    if (flags()->report_mutex_bugs && !s->is_broken) {
277      s->is_broken = true;
278      report_bad_unlock = true;
279    }
280  }
281  ReleaseImpl(thr, pc, &s->read_clock);
282  if (common_flags()->detect_deadlocks && s->recursion == 0) {
283    Callback cb(thr, pc);
284    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
285  }
286  u64 mid = s->GetId();
287  s->mtx.Unlock();
288  // Can't touch s after this point.
289  thr->mset.Del(mid, false);
290  if (report_bad_unlock)
291    ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
292  if (common_flags()->detect_deadlocks) {
293    Callback cb(thr, pc);
294    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
295  }
296}
297
298void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
299  DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
300  if (IsAppMem(addr))
301    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
302  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
303  bool write = true;
304  bool report_bad_unlock = false;
305  if (s->owner_tid == SyncVar::kInvalidTid) {
306    // Seems to be read unlock.
307    write = false;
308    StatInc(thr, StatMutexReadUnlock);
309    thr->fast_state.IncrementEpoch();
310    TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
311    ReleaseImpl(thr, pc, &s->read_clock);
312  } else if (s->owner_tid == thr->tid) {
313    // Seems to be write unlock.
314    thr->fast_state.IncrementEpoch();
315    TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
316    CHECK_GT(s->recursion, 0);
317    s->recursion--;
318    if (s->recursion == 0) {
319      StatInc(thr, StatMutexUnlock);
320      s->owner_tid = SyncVar::kInvalidTid;
321      ReleaseImpl(thr, pc, &s->clock);
322    } else {
323      StatInc(thr, StatMutexRecUnlock);
324    }
325  } else if (!s->is_broken) {
326    s->is_broken = true;
327    report_bad_unlock = true;
328  }
329  thr->mset.Del(s->GetId(), write);
330  if (common_flags()->detect_deadlocks && s->recursion == 0) {
331    Callback cb(thr, pc);
332    ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
333  }
334  u64 mid = s->GetId();
335  s->mtx.Unlock();
336  // Can't touch s after this point.
337  if (report_bad_unlock)
338    ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
339  if (common_flags()->detect_deadlocks) {
340    Callback cb(thr, pc);
341    ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
342  }
343}
344
345void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
346  DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
347  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
348  s->owner_tid = SyncVar::kInvalidTid;
349  s->recursion = 0;
350  s->mtx.Unlock();
351}
352
353void Acquire(ThreadState *thr, uptr pc, uptr addr) {
354  DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
355  if (thr->ignore_sync)
356    return;
357  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
358  AcquireImpl(thr, pc, &s->clock);
359  s->mtx.ReadUnlock();
360}
361
362static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
363  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
364  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
365  if (tctx->status == ThreadStatusRunning)
366    thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
367  else
368    thr->clock.set(tctx->tid, tctx->epoch1);
369}
370
371void AcquireGlobal(ThreadState *thr, uptr pc) {
372  DPrintf("#%d: AcquireGlobal\n", thr->tid);
373  if (thr->ignore_sync)
374    return;
375  ThreadRegistryLock l(ctx->thread_registry);
376  ctx->thread_registry->RunCallbackForEachThreadLocked(
377      UpdateClockCallback, thr);
378}
379
380void Release(ThreadState *thr, uptr pc, uptr addr) {
381  DPrintf("#%d: Release %zx\n", thr->tid, addr);
382  if (thr->ignore_sync)
383    return;
384  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
385  thr->fast_state.IncrementEpoch();
386  // Can't increment epoch w/o writing to the trace as well.
387  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
388  ReleaseImpl(thr, pc, &s->clock);
389  s->mtx.Unlock();
390}
391
392void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
393  DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
394  if (thr->ignore_sync)
395    return;
396  SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
397  thr->fast_state.IncrementEpoch();
398  // Can't increment epoch w/o writing to the trace as well.
399  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
400  ReleaseStoreImpl(thr, pc, &s->clock);
401  s->mtx.Unlock();
402}
403
404#ifndef SANITIZER_GO
405static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
406  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
407  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
408  if (tctx->status == ThreadStatusRunning)
409    thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
410  else
411    thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
412}
413
414void AfterSleep(ThreadState *thr, uptr pc) {
415  DPrintf("#%d: AfterSleep %zx\n", thr->tid);
416  if (thr->ignore_sync)
417    return;
418  thr->last_sleep_stack_id = CurrentStackId(thr, pc);
419  ThreadRegistryLock l(ctx->thread_registry);
420  ctx->thread_registry->RunCallbackForEachThreadLocked(
421      UpdateSleepClockCallback, thr);
422}
423#endif
424
425void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
426  if (thr->ignore_sync)
427    return;
428  thr->clock.set(thr->fast_state.epoch());
429  thr->clock.acquire(&thr->clock_cache, c);
430  StatInc(thr, StatSyncAcquire);
431}
432
433void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
434  if (thr->ignore_sync)
435    return;
436  thr->clock.set(thr->fast_state.epoch());
437  thr->fast_synch_epoch = thr->fast_state.epoch();
438  thr->clock.release(&thr->clock_cache, c);
439  StatInc(thr, StatSyncRelease);
440}
441
442void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
443  if (thr->ignore_sync)
444    return;
445  thr->clock.set(thr->fast_state.epoch());
446  thr->fast_synch_epoch = thr->fast_state.epoch();
447  thr->clock.ReleaseStore(&thr->clock_cache, c);
448  StatInc(thr, StatSyncRelease);
449}
450
451void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
452  if (thr->ignore_sync)
453    return;
454  thr->clock.set(thr->fast_state.epoch());
455  thr->fast_synch_epoch = thr->fast_state.epoch();
456  thr->clock.acq_rel(&thr->clock_cache, c);
457  StatInc(thr, StatSyncAcquire);
458  StatInc(thr, StatSyncRelease);
459}
460
461void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
462  if (r == 0)
463    return;
464  ThreadRegistryLock l(ctx->thread_registry);
465  ScopedReport rep(ReportTypeDeadlock);
466  for (int i = 0; i < r->n; i++) {
467    rep.AddMutex(r->loop[i].mtx_ctx0);
468    rep.AddUniqueTid((int)r->loop[i].thr_ctx);
469    rep.AddThread((int)r->loop[i].thr_ctx);
470  }
471  uptr dummy_pc = 0x42;
472  for (int i = 0; i < r->n; i++) {
473    for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
474      u32 stk = r->loop[i].stk[j];
475      if (stk) {
476        rep.AddStack(StackDepotGet(stk), true);
477      } else {
478        // Sometimes we fail to extract the stack trace (FIXME: investigate),
479        // but we should still produce some stack trace in the report.
480        rep.AddStack(StackTrace(&dummy_pc, 1), true);
481      }
482    }
483  }
484  OutputReport(thr, rep);
485}
486
487}  // namespace __tsan
488