1//===-- tsan_mutex.cc -----------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_libc.h"
14#include "tsan_mutex.h"
15#include "tsan_platform.h"
16#include "tsan_rtl.h"
17
18namespace __tsan {
19
20// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
21// Readers have preference, can possibly starvate writers.
22
23// The table fixes what mutexes can be locked under what mutexes.
24// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
25// then Report mutex can be locked while under Threads mutex.
26// The leaf mutexes can be locked under any other mutexes.
27// Recursive locking is not supported.
28const MutexType MutexTypeLeaf = (MutexType)-1;
29static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
30  /*0 MutexTypeInvalid*/     {},
31  /*1 MutexTypeTrace*/       {MutexTypeLeaf},
32  /*2 MutexTypeThreads*/     {MutexTypeReport},
33  /*3 MutexTypeReport*/      {},
34  /*4 MutexTypeSyncVar*/     {},
35  /*5 MutexTypeSyncTab*/     {MutexTypeSyncVar},
36  /*6 MutexTypeSlab*/        {MutexTypeLeaf},
37  /*7 MutexTypeAnnotations*/ {},
38  /*8 MutexTypeAtExit*/      {MutexTypeSyncTab},
39};
40
41static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
42
43void InitializeMutex() {
44  // Build the "can lock" adjacency matrix.
45  // If [i][j]==true, then one can lock mutex j while under mutex i.
46  const int N = MutexTypeCount;
47  int cnt[N] = {};
48  bool leaf[N] = {};
49  for (int i = 1; i < N; i++) {
50    for (int j = 0; j < N; j++) {
51      int z = CanLockTab[i][j];
52      if (z == MutexTypeInvalid)
53        continue;
54      if (z == MutexTypeLeaf) {
55        CHECK(!leaf[i]);
56        leaf[i] = true;
57        continue;
58      }
59      CHECK(!CanLockAdj[i][z]);
60      CanLockAdj[i][z] = true;
61      cnt[i]++;
62    }
63  }
64  for (int i = 0; i < N; i++) {
65    CHECK(!leaf[i] || cnt[i] == 0);
66  }
67  // Add leaf mutexes.
68  for (int i = 0; i < N; i++) {
69    if (!leaf[i])
70      continue;
71    for (int j = 0; j < N; j++) {
72      if (i == j || leaf[j] || j == MutexTypeInvalid)
73        continue;
74      CHECK(!CanLockAdj[j][i]);
75      CanLockAdj[j][i] = true;
76    }
77  }
78  // Build the transitive closure.
79  bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
80  for (int i = 0; i < N; i++) {
81    for (int j = 0; j < N; j++) {
82      CanLockAdj2[i][j] = CanLockAdj[i][j];
83    }
84  }
85  for (int k = 0; k < N; k++) {
86    for (int i = 0; i < N; i++) {
87      for (int j = 0; j < N; j++) {
88        if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
89          CanLockAdj2[i][j] = true;
90        }
91      }
92    }
93  }
94#if 0
95  TsanPrintf("Can lock graph:\n");
96  for (int i = 0; i < N; i++) {
97    for (int j = 0; j < N; j++) {
98      TsanPrintf("%d ", CanLockAdj[i][j]);
99    }
100    TsanPrintf("\n");
101  }
102  TsanPrintf("Can lock graph closure:\n");
103  for (int i = 0; i < N; i++) {
104    for (int j = 0; j < N; j++) {
105      TsanPrintf("%d ", CanLockAdj2[i][j]);
106    }
107    TsanPrintf("\n");
108  }
109#endif
110  // Verify that the graph is acyclic.
111  for (int i = 0; i < N; i++) {
112    if (CanLockAdj2[i][i]) {
113      TsanPrintf("Mutex %d participates in a cycle\n", i);
114      Die();
115    }
116  }
117}
118
119DeadlockDetector::DeadlockDetector() {
120  // Rely on zero initialization because some mutexes can be locked before ctor.
121}
122
123void DeadlockDetector::Lock(MutexType t) {
124  // TsanPrintf("LOCK %d @%zu\n", t, seq_ + 1);
125  u64 max_seq = 0;
126  u64 max_idx = MutexTypeInvalid;
127  for (int i = 0; i != MutexTypeCount; i++) {
128    if (locked_[i] == 0)
129      continue;
130    CHECK_NE(locked_[i], max_seq);
131    if (max_seq < locked_[i]) {
132      max_seq = locked_[i];
133      max_idx = i;
134    }
135  }
136  locked_[t] = ++seq_;
137  if (max_idx == MutexTypeInvalid)
138    return;
139  // TsanPrintf("  last %d @%zu\n", max_idx, max_seq);
140  if (!CanLockAdj[max_idx][t]) {
141    TsanPrintf("ThreadSanitizer: internal deadlock detected\n");
142    TsanPrintf("ThreadSanitizer: can't lock %d while under %zu\n",
143               t, (uptr)max_idx);
144    CHECK(0);
145  }
146}
147
148void DeadlockDetector::Unlock(MutexType t) {
149  // TsanPrintf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
150  CHECK(locked_[t]);
151  locked_[t] = 0;
152}
153
154const uptr kUnlocked = 0;
155const uptr kWriteLock = 1;
156const uptr kReadLock = 2;
157
158class Backoff {
159 public:
160  Backoff()
161    : iter_() {
162  }
163
164  bool Do() {
165    if (iter_++ < kActiveSpinIters)
166      proc_yield(kActiveSpinCnt);
167    else
168      internal_sched_yield();
169    return true;
170  }
171
172  u64 Contention() const {
173    u64 active = iter_ % kActiveSpinIters;
174    u64 passive = iter_ - active;
175    return active + 10 * passive;
176  }
177
178 private:
179  int iter_;
180  static const int kActiveSpinIters = 10;
181  static const int kActiveSpinCnt = 20;
182};
183
184Mutex::Mutex(MutexType type, StatType stat_type) {
185  CHECK_GT(type, MutexTypeInvalid);
186  CHECK_LT(type, MutexTypeCount);
187#if TSAN_DEBUG
188  type_ = type;
189#endif
190#if TSAN_COLLECT_STATS
191  stat_type_ = stat_type;
192#endif
193  atomic_store(&state_, kUnlocked, memory_order_relaxed);
194}
195
196Mutex::~Mutex() {
197  CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
198}
199
200void Mutex::Lock() {
201#if TSAN_DEBUG && !TSAN_GO
202  cur_thread()->deadlock_detector.Lock(type_);
203#endif
204  uptr cmp = kUnlocked;
205  if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
206                                     memory_order_acquire))
207    return;
208  for (Backoff backoff; backoff.Do();) {
209    if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
210      cmp = kUnlocked;
211      if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
212                                       memory_order_acquire)) {
213#if TSAN_COLLECT_STATS
214        StatInc(cur_thread(), stat_type_, backoff.Contention());
215#endif
216        return;
217      }
218    }
219  }
220}
221
222void Mutex::Unlock() {
223  uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
224  (void)prev;
225  DCHECK_NE(prev & kWriteLock, 0);
226#if TSAN_DEBUG && !TSAN_GO
227  cur_thread()->deadlock_detector.Unlock(type_);
228#endif
229}
230
231void Mutex::ReadLock() {
232#if TSAN_DEBUG && !TSAN_GO
233  cur_thread()->deadlock_detector.Lock(type_);
234#endif
235  uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
236  if ((prev & kWriteLock) == 0)
237    return;
238  for (Backoff backoff; backoff.Do();) {
239    prev = atomic_load(&state_, memory_order_acquire);
240    if ((prev & kWriteLock) == 0) {
241#if TSAN_COLLECT_STATS
242      StatInc(cur_thread(), stat_type_, backoff.Contention());
243#endif
244      return;
245    }
246  }
247}
248
249void Mutex::ReadUnlock() {
250  uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
251  (void)prev;
252  DCHECK_EQ(prev & kWriteLock, 0);
253  DCHECK_GT(prev & ~kWriteLock, 0);
254#if TSAN_DEBUG && !TSAN_GO
255  cur_thread()->deadlock_detector.Unlock(type_);
256#endif
257}
258
259void Mutex::CheckLocked() {
260  CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
261}
262
263}  // namespace __tsan
264