sanitizer_deadlock_detector2.cc revision 2d1fdb26e458c4ddc04155c1d421bced3ba90cd0
1//===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Deadlock detector implementation based on adjacency lists.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_deadlock_detector_interface.h"
15#include "sanitizer_common.h"
16#include "sanitizer_allocator_internal.h"
17#include "sanitizer_placement_new.h"
18#include "sanitizer_mutex.h"
19
20#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
21
22namespace __sanitizer {
23
24const int kMaxNesting = 64;
25const u32 kNoId = -1;
26const u32 kEndId = -2;
27const int kMaxLink = 8;
28const int kL1Size = 1024;
29const int kL2Size = 1024;
30const int kMaxMutex = kL1Size * kL2Size;
31
32struct Id {
33  u32 id;
34  u32 seq;
35
36  explicit Id(u32 id = 0, u32 seq = 0)
37      : id(id)
38      , seq(seq) {
39  }
40};
41
42struct Link {
43  u32 id;
44  u32 seq;
45  u32 tid;
46  u32 stk0;
47  u32 stk1;
48
49  explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
50      : id(id)
51      , seq(seq)
52      , tid(tid)
53      , stk0(s0)
54      , stk1(s1) {
55  }
56};
57
58struct DDPhysicalThread {
59  DDReport rep;
60  bool report_pending;
61  bool visited[kMaxMutex];
62  Link pending[kMaxMutex];
63  Link path[kMaxMutex];
64};
65
66struct ThreadMutex {
67  u32 id;
68  u32 stk;
69};
70
71struct DDLogicalThread {
72  u64         ctx;
73  ThreadMutex locked[kMaxNesting];
74  int         nlocked;
75};
76
77struct Mutex {
78  StaticSpinMutex mtx;
79  u32 seq;
80  int nlink;
81  Link link[kMaxLink];
82};
83
84struct DD : public DDetector {
85  explicit DD(const DDFlags *flags);
86
87  DDPhysicalThread* CreatePhysicalThread();
88  void DestroyPhysicalThread(DDPhysicalThread *pt);
89
90  DDLogicalThread* CreateLogicalThread(u64 ctx);
91  void DestroyLogicalThread(DDLogicalThread *lt);
92
93  void MutexInit(DDCallback *cb, DDMutex *m);
94  void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
95  void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
96      bool trylock);
97  void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
98  void MutexDestroy(DDCallback *cb, DDMutex *m);
99
100  DDReport *GetReport(DDCallback *cb);
101
102  void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
103  void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
104  u32 allocateId(DDCallback *cb);
105  Mutex *getMutex(u32 id);
106  u32 getMutexId(Mutex *m);
107
108  DDFlags flags;
109
110  Mutex* mutex[kL1Size];
111
112  SpinMutex mtx;
113  InternalMmapVector<u32> free_id;
114  int id_gen;
115};
116
117DDetector *DDetector::Create(const DDFlags *flags) {
118  (void)flags;
119  void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
120  return new(mem) DD(flags);
121}
122
123DD::DD(const DDFlags *flags)
124    : flags(*flags)
125    , free_id(1024) {
126  id_gen = 0;
127}
128
129DDPhysicalThread* DD::CreatePhysicalThread() {
130  DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
131      "deadlock detector (physical thread)");
132  return pt;
133}
134
135void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
136  pt->~DDPhysicalThread();
137  UnmapOrDie(pt, sizeof(DDPhysicalThread));
138}
139
140DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
141  DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
142      sizeof(DDLogicalThread));
143  lt->ctx = ctx;
144  lt->nlocked = 0;
145  return lt;
146}
147
148void DD::DestroyLogicalThread(DDLogicalThread *lt) {
149  lt->~DDLogicalThread();
150  InternalFree(lt);
151}
152
153void DD::MutexInit(DDCallback *cb, DDMutex *m) {
154  VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
155  m->id = kNoId;
156  m->recursion = 0;
157  atomic_store(&m->owner, 0, memory_order_relaxed);
158}
159
160Mutex *DD::getMutex(u32 id) {
161  return &mutex[id / kL2Size][id % kL2Size];
162}
163
164u32 DD::getMutexId(Mutex *m) {
165  for (int i = 0; i < kL1Size; i++) {
166    Mutex *tab = mutex[i];
167    if (tab == 0)
168      break;
169    if (m >= tab && m < tab + kL2Size)
170      return i * kL2Size + (m - tab);
171  }
172  return -1;
173}
174
175u32 DD::allocateId(DDCallback *cb) {
176  u32 id = -1;
177  SpinMutexLock l(&mtx);
178  if (free_id.size() > 0) {
179    id = free_id.back();
180    free_id.pop_back();
181  } else {
182    CHECK_LT(id_gen, kMaxMutex);
183    if ((id_gen % kL2Size) == 0) {
184      mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
185          "deadlock detector (mutex table)");
186    }
187    id = id_gen++;
188  }
189  CHECK_LE(id, kMaxMutex);
190  VPrintf(3, "#%llu: DD::allocateId assign id %d\n",
191      cb->lt->ctx, id);
192  return id;
193}
194
195void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
196  VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
197      cb->lt->ctx, m, wlock, cb->lt->nlocked);
198  DDPhysicalThread *pt = cb->pt;
199  DDLogicalThread *lt = cb->lt;
200
201  uptr owner = atomic_load(&m->owner, memory_order_relaxed);
202  if (owner == (uptr)cb->lt) {
203    VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
204        cb->lt->ctx);
205    return;
206  }
207
208  CHECK_LE(lt->nlocked, kMaxNesting);
209
210  // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
211  if (m->id == kNoId)
212    m->id = allocateId(cb);
213
214  ThreadMutex *tm = &lt->locked[lt->nlocked++];
215  tm->id = m->id;
216  if (flags.second_deadlock_stack)
217    tm->stk = cb->Unwind();
218  if (lt->nlocked == 1) {
219    VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
220        cb->lt->ctx);
221    return;
222  }
223
224  bool added = false;
225  Mutex *mtx = getMutex(m->id);
226  for (int i = 0; i < lt->nlocked - 1; i++) {
227    u32 id1 = lt->locked[i].id;
228    u32 stk1 = lt->locked[i].stk;
229    Mutex *mtx1 = getMutex(id1);
230    SpinMutexLock l(&mtx1->mtx);
231    if (mtx1->nlink == kMaxLink) {
232      // FIXME(dvyukov): check stale links
233      continue;
234    }
235    int li = 0;
236    for (; li < mtx1->nlink; li++) {
237      Link *link = &mtx1->link[li];
238      if (link->id == m->id) {
239        if (link->seq != mtx->seq) {
240          link->seq = mtx->seq;
241          link->tid = lt->ctx;
242          link->stk0 = stk1;
243          link->stk1 = cb->Unwind();
244          added = true;
245          VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
246              cb->lt->ctx, getMutexId(mtx1), m->id);
247        }
248        break;
249      }
250    }
251    if (li == mtx1->nlink) {
252      // FIXME(dvyukov): check stale links
253      Link *link = &mtx1->link[mtx1->nlink++];
254      link->id = m->id;
255      link->seq = mtx->seq;
256      link->tid = lt->ctx;
257      link->stk0 = stk1;
258      link->stk1 = cb->Unwind();
259      added = true;
260      VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
261          cb->lt->ctx, getMutexId(mtx1), m->id);
262    }
263  }
264
265  if (!added || mtx->nlink == 0) {
266    VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
267        cb->lt->ctx);
268    return;
269  }
270
271  CycleCheck(pt, lt, m);
272}
273
274void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
275    bool trylock) {
276  VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
277      cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
278  DDLogicalThread *lt = cb->lt;
279
280  uptr owner = atomic_load(&m->owner, memory_order_relaxed);
281  if (owner == (uptr)cb->lt) {
282    VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
283    CHECK(wlock);
284    m->recursion++;
285    return;
286  }
287  CHECK_EQ(owner, 0);
288  if (wlock) {
289    VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
290    CHECK_EQ(m->recursion, 0);
291    m->recursion = 1;
292    atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
293  }
294
295  if (!trylock)
296    return;
297
298  CHECK_LE(lt->nlocked, kMaxNesting);
299  if (m->id == kNoId)
300    m->id = allocateId(cb);
301  ThreadMutex *tm = &lt->locked[lt->nlocked++];
302  tm->id = m->id;
303  if (flags.second_deadlock_stack)
304    tm->stk = cb->Unwind();
305}
306
307void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
308  VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
309      cb->lt->ctx, m, wlock, cb->lt->nlocked);
310  DDLogicalThread *lt = cb->lt;
311
312  uptr owner = atomic_load(&m->owner, memory_order_relaxed);
313  if (owner == (uptr)cb->lt) {
314    VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
315    if (--m->recursion > 0)
316      return;
317    VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
318    atomic_store(&m->owner, 0, memory_order_relaxed);
319  }
320  CHECK_NE(m->id, kNoId);
321  int last = lt->nlocked - 1;
322  for (int i = last; i >= 0; i--) {
323    if (cb->lt->locked[i].id == m->id) {
324      lt->locked[i] = lt->locked[last];
325      lt->nlocked--;
326      break;
327    }
328  }
329}
330
331void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
332  VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
333      cb->lt->ctx, m);
334  DDLogicalThread *lt = cb->lt;
335
336  if (m->id == kNoId)
337    return;
338
339  // Remove the mutex from lt->locked if there.
340  int last = lt->nlocked - 1;
341  for (int i = last; i >= 0; i--) {
342    if (lt->locked[i].id == m->id) {
343      lt->locked[i] = lt->locked[last];
344      lt->nlocked--;
345      break;
346    }
347  }
348
349  // Clear and invalidate the mutex descriptor.
350  {
351    Mutex *mtx = getMutex(m->id);
352    SpinMutexLock l(&mtx->mtx);
353    mtx->seq++;
354    mtx->nlink = 0;
355  }
356
357  // Return id to cache.
358  {
359    SpinMutexLock l(&mtx);
360    free_id.push_back(m->id);
361  }
362}
363
364void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
365    DDMutex *m) {
366  internal_memset(pt->visited, 0, sizeof(pt->visited));
367  int npath = 0;
368  int npending = 0;
369  {
370    Mutex *mtx = getMutex(m->id);
371    SpinMutexLock l(&mtx->mtx);
372    for (int li = 0; li < mtx->nlink; li++)
373      pt->pending[npending++] = mtx->link[li];
374  }
375  while (npending > 0) {
376    Link link = pt->pending[--npending];
377    if (link.id == kEndId) {
378      npath--;
379      continue;
380    }
381    if (pt->visited[link.id])
382      continue;
383    Mutex *mtx1 = getMutex(link.id);
384    SpinMutexLock l(&mtx1->mtx);
385    if (mtx1->seq != link.seq)
386      continue;
387    pt->visited[link.id] = true;
388    if (mtx1->nlink == 0)
389      continue;
390    pt->path[npath++] = link;
391    pt->pending[npending++] = Link(kEndId);
392    if (link.id == m->id)
393      return Report(pt, lt, npath);  // Bingo!
394    for (int li = 0; li < mtx1->nlink; li++) {
395      Link *link1 = &mtx1->link[li];
396      // Mutex *mtx2 = getMutex(link->id);
397      // FIXME(dvyukov): fast seq check
398      // FIXME(dvyukov): fast nlink != 0 check
399      // FIXME(dvyukov): fast pending check?
400      // FIXME(dvyukov): npending can be larger than kMaxMutex
401      pt->pending[npending++] = *link1;
402    }
403  }
404}
405
406void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
407  DDReport *rep = &pt->rep;
408  rep->n = npath;
409  for (int i = 0; i < npath; i++) {
410    Link *link = &pt->path[i];
411    Link *link0 = &pt->path[i ? i - 1 : npath - 1];
412    rep->loop[i].thr_ctx = link->tid;
413    rep->loop[i].mtx_ctx0 = link0->id;
414    rep->loop[i].mtx_ctx1 = link->id;
415    rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
416    rep->loop[i].stk[1] = link->stk1;
417  }
418  pt->report_pending = true;
419}
420
421DDReport *DD::GetReport(DDCallback *cb) {
422  if (!cb->pt->report_pending)
423    return 0;
424  cb->pt->report_pending = false;
425  return &cb->pt->rep;
426}
427
428}  // namespace __sanitizer
429#endif  // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
430