1//===-- tsan_mutex.cc -----------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "sanitizer_common/sanitizer_libc.h"
14#include "tsan_mutex.h"
15#include "tsan_platform.h"
16#include "tsan_rtl.h"
17
18namespace __tsan {
19
20// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
21// Readers have preference, can possibly starvate writers.
22
23// The table fixes what mutexes can be locked under what mutexes.
24// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
25// then Report mutex can be locked while under Threads mutex.
26// The leaf mutexes can be locked under any other mutexes.
27// Recursive locking is not supported.
28#if TSAN_DEBUG && !TSAN_GO
29const MutexType MutexTypeLeaf = (MutexType)-1;
30static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
31  /*0  MutexTypeInvalid*/     {},
32  /*1  MutexTypeTrace*/       {MutexTypeLeaf},
33  /*2  MutexTypeThreads*/     {MutexTypeReport},
34  /*3  MutexTypeReport*/      {MutexTypeSyncTab, MutexTypeSyncVar,
35                               MutexTypeMBlock, MutexTypeJavaMBlock},
36  /*4  MutexTypeSyncVar*/     {},
37  /*5  MutexTypeSyncTab*/     {MutexTypeSyncVar},
38  /*6  MutexTypeSlab*/        {MutexTypeLeaf},
39  /*7  MutexTypeAnnotations*/ {},
40  /*8  MutexTypeAtExit*/      {MutexTypeSyncTab},
41  /*9  MutexTypeMBlock*/      {MutexTypeSyncVar},
42  /*10 MutexTypeJavaMBlock*/  {MutexTypeSyncVar},
43};
44
45static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
46#endif
47
48void InitializeMutex() {
49#if TSAN_DEBUG && !TSAN_GO
50  // Build the "can lock" adjacency matrix.
51  // If [i][j]==true, then one can lock mutex j while under mutex i.
52  const int N = MutexTypeCount;
53  int cnt[N] = {};
54  bool leaf[N] = {};
55  for (int i = 1; i < N; i++) {
56    for (int j = 0; j < N; j++) {
57      MutexType z = CanLockTab[i][j];
58      if (z == MutexTypeInvalid)
59        continue;
60      if (z == MutexTypeLeaf) {
61        CHECK(!leaf[i]);
62        leaf[i] = true;
63        continue;
64      }
65      CHECK(!CanLockAdj[i][(int)z]);
66      CanLockAdj[i][(int)z] = true;
67      cnt[i]++;
68    }
69  }
70  for (int i = 0; i < N; i++) {
71    CHECK(!leaf[i] || cnt[i] == 0);
72  }
73  // Add leaf mutexes.
74  for (int i = 0; i < N; i++) {
75    if (!leaf[i])
76      continue;
77    for (int j = 0; j < N; j++) {
78      if (i == j || leaf[j] || j == MutexTypeInvalid)
79        continue;
80      CHECK(!CanLockAdj[j][i]);
81      CanLockAdj[j][i] = true;
82    }
83  }
84  // Build the transitive closure.
85  bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
86  for (int i = 0; i < N; i++) {
87    for (int j = 0; j < N; j++) {
88      CanLockAdj2[i][j] = CanLockAdj[i][j];
89    }
90  }
91  for (int k = 0; k < N; k++) {
92    for (int i = 0; i < N; i++) {
93      for (int j = 0; j < N; j++) {
94        if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
95          CanLockAdj2[i][j] = true;
96        }
97      }
98    }
99  }
100#if 0
101  Printf("Can lock graph:\n");
102  for (int i = 0; i < N; i++) {
103    for (int j = 0; j < N; j++) {
104      Printf("%d ", CanLockAdj[i][j]);
105    }
106    Printf("\n");
107  }
108  Printf("Can lock graph closure:\n");
109  for (int i = 0; i < N; i++) {
110    for (int j = 0; j < N; j++) {
111      Printf("%d ", CanLockAdj2[i][j]);
112    }
113    Printf("\n");
114  }
115#endif
116  // Verify that the graph is acyclic.
117  for (int i = 0; i < N; i++) {
118    if (CanLockAdj2[i][i]) {
119      Printf("Mutex %d participates in a cycle\n", i);
120      Die();
121    }
122  }
123#endif
124}
125
126DeadlockDetector::DeadlockDetector() {
127  // Rely on zero initialization because some mutexes can be locked before ctor.
128}
129
130#if TSAN_DEBUG && !TSAN_GO
131void DeadlockDetector::Lock(MutexType t) {
132  // Printf("LOCK %d @%zu\n", t, seq_ + 1);
133  CHECK_GT(t, MutexTypeInvalid);
134  CHECK_LT(t, MutexTypeCount);
135  u64 max_seq = 0;
136  u64 max_idx = MutexTypeInvalid;
137  for (int i = 0; i != MutexTypeCount; i++) {
138    if (locked_[i] == 0)
139      continue;
140    CHECK_NE(locked_[i], max_seq);
141    if (max_seq < locked_[i]) {
142      max_seq = locked_[i];
143      max_idx = i;
144    }
145  }
146  locked_[t] = ++seq_;
147  if (max_idx == MutexTypeInvalid)
148    return;
149  // Printf("  last %d @%zu\n", max_idx, max_seq);
150  if (!CanLockAdj[max_idx][t]) {
151    Printf("ThreadSanitizer: internal deadlock detected\n");
152    Printf("ThreadSanitizer: can't lock %d while under %zu\n",
153               t, (uptr)max_idx);
154    CHECK(0);
155  }
156}
157
158void DeadlockDetector::Unlock(MutexType t) {
159  // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
160  CHECK(locked_[t]);
161  locked_[t] = 0;
162}
163#endif
164
165const uptr kUnlocked = 0;
166const uptr kWriteLock = 1;
167const uptr kReadLock = 2;
168
169class Backoff {
170 public:
171  Backoff()
172    : iter_() {
173  }
174
175  bool Do() {
176    if (iter_++ < kActiveSpinIters)
177      proc_yield(kActiveSpinCnt);
178    else
179      internal_sched_yield();
180    return true;
181  }
182
183  u64 Contention() const {
184    u64 active = iter_ % kActiveSpinIters;
185    u64 passive = iter_ - active;
186    return active + 10 * passive;
187  }
188
189 private:
190  int iter_;
191  static const int kActiveSpinIters = 10;
192  static const int kActiveSpinCnt = 20;
193};
194
195Mutex::Mutex(MutexType type, StatType stat_type) {
196  CHECK_GT(type, MutexTypeInvalid);
197  CHECK_LT(type, MutexTypeCount);
198#if TSAN_DEBUG
199  type_ = type;
200#endif
201#if TSAN_COLLECT_STATS
202  stat_type_ = stat_type;
203#endif
204  atomic_store(&state_, kUnlocked, memory_order_relaxed);
205}
206
207Mutex::~Mutex() {
208  CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
209}
210
211void Mutex::Lock() {
212#if TSAN_DEBUG && !TSAN_GO
213  cur_thread()->deadlock_detector.Lock(type_);
214#endif
215  uptr cmp = kUnlocked;
216  if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
217                                     memory_order_acquire))
218    return;
219  for (Backoff backoff; backoff.Do();) {
220    if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
221      cmp = kUnlocked;
222      if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
223                                       memory_order_acquire)) {
224#if TSAN_COLLECT_STATS
225        StatInc(cur_thread(), stat_type_, backoff.Contention());
226#endif
227        return;
228      }
229    }
230  }
231}
232
233void Mutex::Unlock() {
234  uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
235  (void)prev;
236  DCHECK_NE(prev & kWriteLock, 0);
237#if TSAN_DEBUG && !TSAN_GO
238  cur_thread()->deadlock_detector.Unlock(type_);
239#endif
240}
241
242void Mutex::ReadLock() {
243#if TSAN_DEBUG && !TSAN_GO
244  cur_thread()->deadlock_detector.Lock(type_);
245#endif
246  uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
247  if ((prev & kWriteLock) == 0)
248    return;
249  for (Backoff backoff; backoff.Do();) {
250    prev = atomic_load(&state_, memory_order_acquire);
251    if ((prev & kWriteLock) == 0) {
252#if TSAN_COLLECT_STATS
253      StatInc(cur_thread(), stat_type_, backoff.Contention());
254#endif
255      return;
256    }
257  }
258}
259
260void Mutex::ReadUnlock() {
261  uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
262  (void)prev;
263  DCHECK_EQ(prev & kWriteLock, 0);
264  DCHECK_GT(prev & ~kWriteLock, 0);
265#if TSAN_DEBUG && !TSAN_GO
266  cur_thread()->deadlock_detector.Unlock(type_);
267#endif
268}
269
270void Mutex::CheckLocked() {
271  CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
272}
273
274}  // namespace __tsan
275