tsan_rtl_mutex.cc revision e1ddbf9a458e81125a03fea721997565124294ae
1//===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "tsan_rtl.h"
15#include "tsan_flags.h"
16#include "tsan_sync.h"
17#include "tsan_report.h"
18#include "tsan_symbolize.h"
19#include "tsan_platform.h"
20
21namespace __tsan {
22
23void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
24                 bool rw, bool recursive, bool linker_init) {
25  Context *ctx = CTX();
26  CHECK_GT(thr->in_rtl, 0);
27  DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
28  StatInc(thr, StatMutexCreate);
29  if (!linker_init && IsAppMem(addr)) {
30    CHECK(!thr->is_freeing);
31    thr->is_freeing = true;
32    MemoryWrite(thr, pc, addr, kSizeLog1);
33    thr->is_freeing = false;
34  }
35  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
36  s->is_rw = rw;
37  s->is_recursive = recursive;
38  s->is_linker_init = linker_init;
39  s->mtx.Unlock();
40}
41
42void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
43  Context *ctx = CTX();
44  CHECK_GT(thr->in_rtl, 0);
45  DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
46  StatInc(thr, StatMutexDestroy);
47#ifndef TSAN_GO
48  // Global mutexes not marked as LINKER_INITIALIZED
49  // cause tons of not interesting reports, so just ignore it.
50  if (IsGlobalVar(addr))
51    return;
52#endif
53  SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
54  if (s == 0)
55    return;
56  if (IsAppMem(addr)) {
57    CHECK(!thr->is_freeing);
58    thr->is_freeing = true;
59    MemoryWrite(thr, pc, addr, kSizeLog1);
60    thr->is_freeing = false;
61  }
62  if (flags()->report_destroy_locked
63      && s->owner_tid != SyncVar::kInvalidTid
64      && !s->is_broken) {
65    s->is_broken = true;
66    ThreadRegistryLock l(ctx->thread_registry);
67    ScopedReport rep(ReportTypeMutexDestroyLocked);
68    rep.AddMutex(s);
69    StackTrace trace;
70    trace.ObtainCurrent(thr, pc);
71    rep.AddStack(&trace);
72    FastState last(s->last_lock);
73    RestoreStack(last.tid(), last.epoch(), &trace, 0);
74    rep.AddStack(&trace);
75    rep.AddLocation(s->addr, 1);
76    OutputReport(ctx, rep);
77  }
78  thr->mset.Remove(s->GetId());
79  DestroyAndFree(s);
80}
81
82void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
83  CHECK_GT(thr->in_rtl, 0);
84  DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
85  CHECK_GT(rec, 0);
86  if (IsAppMem(addr))
87    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
88  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
89  thr->fast_state.IncrementEpoch();
90  TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
91  if (s->owner_tid == SyncVar::kInvalidTid) {
92    CHECK_EQ(s->recursion, 0);
93    s->owner_tid = thr->tid;
94    s->last_lock = thr->fast_state.raw();
95  } else if (s->owner_tid == thr->tid) {
96    CHECK_GT(s->recursion, 0);
97  } else {
98    Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr);
99    PrintCurrentStack(thr, pc);
100  }
101  if (s->recursion == 0) {
102    StatInc(thr, StatMutexLock);
103    AcquireImpl(thr, pc, &s->clock);
104    AcquireImpl(thr, pc, &s->read_clock);
105  } else if (!s->is_recursive) {
106    StatInc(thr, StatMutexRecLock);
107  }
108  s->recursion += rec;
109  thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
110  s->mtx.Unlock();
111}
112
113int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
114  CHECK_GT(thr->in_rtl, 0);
115  DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
116  if (IsAppMem(addr))
117    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
118  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
119  thr->fast_state.IncrementEpoch();
120  TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
121  int rec = 0;
122  if (s->recursion == 0) {
123    if (!s->is_broken) {
124      s->is_broken = true;
125      Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr);
126      PrintCurrentStack(thr, pc);
127    }
128  } else if (s->owner_tid != thr->tid) {
129    if (!s->is_broken) {
130      s->is_broken = true;
131      Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
132             addr);
133      PrintCurrentStack(thr, pc);
134    }
135  } else {
136    rec = all ? s->recursion : 1;
137    s->recursion -= rec;
138    if (s->recursion == 0) {
139      StatInc(thr, StatMutexUnlock);
140      s->owner_tid = SyncVar::kInvalidTid;
141      if (thr->ignore_sync == 0) {
142        thr->clock.set(thr->tid, thr->fast_state.epoch());
143        thr->fast_synch_epoch = thr->fast_state.epoch();
144        thr->clock.ReleaseStore(&s->clock);
145        StatInc(thr, StatSyncRelease);
146      }
147    } else {
148      StatInc(thr, StatMutexRecUnlock);
149    }
150  }
151  thr->mset.Del(s->GetId(), true);
152  s->mtx.Unlock();
153  return rec;
154}
155
156void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
157  CHECK_GT(thr->in_rtl, 0);
158  DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
159  StatInc(thr, StatMutexReadLock);
160  if (IsAppMem(addr))
161    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
162  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
163  thr->fast_state.IncrementEpoch();
164  TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
165  if (s->owner_tid != SyncVar::kInvalidTid) {
166    Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
167           addr);
168    PrintCurrentStack(thr, pc);
169  }
170  AcquireImpl(thr, pc, &s->clock);
171  s->last_lock = thr->fast_state.raw();
172  thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
173  s->mtx.ReadUnlock();
174}
175
176void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
177  CHECK_GT(thr->in_rtl, 0);
178  DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
179  StatInc(thr, StatMutexReadUnlock);
180  if (IsAppMem(addr))
181    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
182  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
183  thr->fast_state.IncrementEpoch();
184  TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
185  if (s->owner_tid != SyncVar::kInvalidTid) {
186    Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
187           addr);
188    PrintCurrentStack(thr, pc);
189  }
190  ReleaseImpl(thr, pc, &s->read_clock);
191  s->mtx.Unlock();
192  thr->mset.Del(s->GetId(), false);
193}
194
195void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
196  CHECK_GT(thr->in_rtl, 0);
197  DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
198  if (IsAppMem(addr))
199    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
200  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
201  bool write = true;
202  if (s->owner_tid == SyncVar::kInvalidTid) {
203    // Seems to be read unlock.
204    write = false;
205    StatInc(thr, StatMutexReadUnlock);
206    thr->fast_state.IncrementEpoch();
207    TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
208    ReleaseImpl(thr, pc, &s->read_clock);
209  } else if (s->owner_tid == thr->tid) {
210    // Seems to be write unlock.
211    thr->fast_state.IncrementEpoch();
212    TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
213    CHECK_GT(s->recursion, 0);
214    s->recursion--;
215    if (s->recursion == 0) {
216      StatInc(thr, StatMutexUnlock);
217      s->owner_tid = SyncVar::kInvalidTid;
218      ReleaseImpl(thr, pc, &s->clock);
219    } else {
220      StatInc(thr, StatMutexRecUnlock);
221    }
222  } else if (!s->is_broken) {
223    s->is_broken = true;
224    Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
225           addr);
226    PrintCurrentStack(thr, pc);
227  }
228  thr->mset.Del(s->GetId(), write);
229  s->mtx.Unlock();
230}
231
232void Acquire(ThreadState *thr, uptr pc, uptr addr) {
233  CHECK_GT(thr->in_rtl, 0);
234  DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
235  if (thr->ignore_sync)
236    return;
237  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
238  AcquireImpl(thr, pc, &s->clock);
239  s->mtx.ReadUnlock();
240}
241
242static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
243  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
244  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
245  if (tctx->status == ThreadStatusRunning)
246    thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
247  else
248    thr->clock.set(tctx->tid, tctx->epoch1);
249}
250
251void AcquireGlobal(ThreadState *thr, uptr pc) {
252  DPrintf("#%d: AcquireGlobal\n", thr->tid);
253  if (thr->ignore_sync)
254    return;
255  ThreadRegistryLock l(CTX()->thread_registry);
256  CTX()->thread_registry->RunCallbackForEachThreadLocked(
257      UpdateClockCallback, thr);
258}
259
260void Release(ThreadState *thr, uptr pc, uptr addr) {
261  CHECK_GT(thr->in_rtl, 0);
262  DPrintf("#%d: Release %zx\n", thr->tid, addr);
263  if (thr->ignore_sync)
264    return;
265  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
266  thr->fast_state.IncrementEpoch();
267  // Can't increment epoch w/o writing to the trace as well.
268  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
269  ReleaseImpl(thr, pc, &s->clock);
270  s->mtx.Unlock();
271}
272
273void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
274  CHECK_GT(thr->in_rtl, 0);
275  DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
276  if (thr->ignore_sync)
277    return;
278  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
279  thr->fast_state.IncrementEpoch();
280  // Can't increment epoch w/o writing to the trace as well.
281  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
282  ReleaseStoreImpl(thr, pc, &s->clock);
283  s->mtx.Unlock();
284}
285
286#ifndef TSAN_GO
287static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
288  ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
289  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
290  if (tctx->status == ThreadStatusRunning)
291    thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
292  else
293    thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
294}
295
296void AfterSleep(ThreadState *thr, uptr pc) {
297  DPrintf("#%d: AfterSleep %zx\n", thr->tid);
298  if (thr->ignore_sync)
299    return;
300  thr->last_sleep_stack_id = CurrentStackId(thr, pc);
301  ThreadRegistryLock l(CTX()->thread_registry);
302  CTX()->thread_registry->RunCallbackForEachThreadLocked(
303      UpdateSleepClockCallback, thr);
304}
305#endif
306
307void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
308  if (thr->ignore_sync)
309    return;
310  thr->clock.set(thr->tid, thr->fast_state.epoch());
311  thr->clock.acquire(c);
312  StatInc(thr, StatSyncAcquire);
313}
314
315void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
316  if (thr->ignore_sync)
317    return;
318  thr->clock.set(thr->tid, thr->fast_state.epoch());
319  thr->fast_synch_epoch = thr->fast_state.epoch();
320  thr->clock.release(c);
321  StatInc(thr, StatSyncRelease);
322}
323
324void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
325  if (thr->ignore_sync)
326    return;
327  thr->clock.set(thr->tid, thr->fast_state.epoch());
328  thr->fast_synch_epoch = thr->fast_state.epoch();
329  thr->clock.ReleaseStore(c);
330  StatInc(thr, StatSyncRelease);
331}
332
333void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
334  if (thr->ignore_sync)
335    return;
336  thr->clock.set(thr->tid, thr->fast_state.epoch());
337  thr->fast_synch_epoch = thr->fast_state.epoch();
338  thr->clock.acq_rel(c);
339  StatInc(thr, StatSyncAcquire);
340  StatInc(thr, StatSyncRelease);
341}
342
343}  // namespace __tsan
344