1//===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Deadlock detector implementation based on NxN adjacency bit matrix.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_deadlock_detector_interface.h"
15#include "sanitizer_deadlock_detector.h"
16#include "sanitizer_allocator_internal.h"
17#include "sanitizer_placement_new.h"
18#include "sanitizer_mutex.h"
19
20#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
21
22namespace __sanitizer {
23
24typedef TwoLevelBitVector<> DDBV;  // DeadlockDetector's bit vector.
25
26struct DDPhysicalThread {
27};
28
29struct DDLogicalThread {
30  u64 ctx;
31  DeadlockDetectorTLS<DDBV> dd;
32  DDReport rep;
33  bool report_pending;
34};
35
36struct DD : public DDetector {
37  SpinMutex mtx;
38  DeadlockDetector<DDBV> dd;
39  DDFlags flags;
40
41  explicit DD(const DDFlags *flags);
42
43  DDPhysicalThread *CreatePhysicalThread() override;
44  void DestroyPhysicalThread(DDPhysicalThread *pt) override;
45
46  DDLogicalThread *CreateLogicalThread(u64 ctx) override;
47  void DestroyLogicalThread(DDLogicalThread *lt) override;
48
49  void MutexInit(DDCallback *cb, DDMutex *m) override;
50  void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
51  void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
52                      bool trylock) override;
53  void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
54  void MutexDestroy(DDCallback *cb, DDMutex *m) override;
55
56  DDReport *GetReport(DDCallback *cb) override;
57
58  void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
59  void ReportDeadlock(DDCallback *cb, DDMutex *m);
60};
61
62DDetector *DDetector::Create(const DDFlags *flags) {
63  (void)flags;
64  void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
65  return new(mem) DD(flags);
66}
67
68DD::DD(const DDFlags *flags)
69    : flags(*flags) {
70  dd.clear();
71}
72
73DDPhysicalThread* DD::CreatePhysicalThread() {
74  return 0;
75}
76
77void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
78}
79
80DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
81  DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
82  lt->ctx = ctx;
83  lt->dd.clear();
84  lt->report_pending = false;
85  return lt;
86}
87
88void DD::DestroyLogicalThread(DDLogicalThread *lt) {
89  lt->~DDLogicalThread();
90  InternalFree(lt);
91}
92
93void DD::MutexInit(DDCallback *cb, DDMutex *m) {
94  m->id = 0;
95  m->stk = cb->Unwind();
96}
97
98void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
99  if (!dd.nodeBelongsToCurrentEpoch(m->id))
100    m->id = dd.newNode(reinterpret_cast<uptr>(m));
101  dd.ensureCurrentEpoch(&lt->dd);
102}
103
104void DD::MutexBeforeLock(DDCallback *cb,
105    DDMutex *m, bool wlock) {
106  DDLogicalThread *lt = cb->lt;
107  if (lt->dd.empty()) return;  // This will be the first lock held by lt.
108  if (dd.hasAllEdges(&lt->dd, m->id)) return;  // We already have all edges.
109  SpinMutexLock lk(&mtx);
110  MutexEnsureID(lt, m);
111  if (dd.isHeld(&lt->dd, m->id))
112    return;  // FIXME: allow this only for recursive locks.
113  if (dd.onLockBefore(&lt->dd, m->id)) {
114    // Actually add this edge now so that we have all the stack traces.
115    dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
116    ReportDeadlock(cb, m);
117  }
118}
119
120void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
121  DDLogicalThread *lt = cb->lt;
122  uptr path[10];
123  uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
124  CHECK_GT(len, 0U);  // Hm.. cycle of 10 locks? I'd like to see that.
125  CHECK_EQ(m->id, path[0]);
126  lt->report_pending = true;
127  DDReport *rep = &lt->rep;
128  rep->n = len;
129  for (uptr i = 0; i < len; i++) {
130    uptr from = path[i];
131    uptr to = path[(i + 1) % len];
132    DDMutex *m0 = (DDMutex*)dd.getData(from);
133    DDMutex *m1 = (DDMutex*)dd.getData(to);
134
135    u32 stk_from = -1U, stk_to = -1U;
136    int unique_tid = 0;
137    dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
138    // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
139    //    unique_tid);
140    rep->loop[i].thr_ctx = unique_tid;
141    rep->loop[i].mtx_ctx0 = m0->ctx;
142    rep->loop[i].mtx_ctx1 = m1->ctx;
143    rep->loop[i].stk[0] = stk_to;
144    rep->loop[i].stk[1] = stk_from;
145  }
146}
147
148void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
149  DDLogicalThread *lt = cb->lt;
150  u32 stk = 0;
151  if (flags.second_deadlock_stack)
152    stk = cb->Unwind();
153  // Printf("T%p MutexLock:   %zx stk %u\n", lt, m->id, stk);
154  if (dd.onFirstLock(&lt->dd, m->id, stk))
155    return;
156  if (dd.onLockFast(&lt->dd, m->id, stk))
157    return;
158
159  SpinMutexLock lk(&mtx);
160  MutexEnsureID(lt, m);
161  if (wlock)  // Only a recursive rlock may be held.
162    CHECK(!dd.isHeld(&lt->dd, m->id));
163  if (!trylock)
164    dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
165  dd.onLockAfter(&lt->dd, m->id, stk);
166}
167
168void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
169  // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
170  dd.onUnlock(&cb->lt->dd, m->id);
171}
172
173void DD::MutexDestroy(DDCallback *cb,
174    DDMutex *m) {
175  if (!m->id) return;
176  SpinMutexLock lk(&mtx);
177  if (dd.nodeBelongsToCurrentEpoch(m->id))
178    dd.removeNode(m->id);
179  m->id = 0;
180}
181
182DDReport *DD::GetReport(DDCallback *cb) {
183  if (!cb->lt->report_pending)
184    return 0;
185  cb->lt->report_pending = false;
186  return &cb->lt->rep;
187}
188
189}  // namespace __sanitizer
190#endif  // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
191