1//===-- tsan_rtl_report.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_libc.h"
15#include "sanitizer_common/sanitizer_placement_new.h"
16#include "sanitizer_common/sanitizer_stackdepot.h"
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_stacktrace.h"
19#include "tsan_platform.h"
20#include "tsan_rtl.h"
21#include "tsan_suppressions.h"
22#include "tsan_symbolize.h"
23#include "tsan_report.h"
24#include "tsan_sync.h"
25#include "tsan_mman.h"
26#include "tsan_flags.h"
27#include "tsan_fd.h"
28
29namespace __tsan {
30
31using namespace __sanitizer;  // NOLINT
32
33static ReportStack *SymbolizeStack(StackTrace trace);
34
35void TsanCheckFailed(const char *file, int line, const char *cond,
36                     u64 v1, u64 v2) {
37  // There is high probability that interceptors will check-fail as well,
38  // on the other hand there is no sense in processing interceptors
39  // since we are going to die soon.
40  ScopedIgnoreInterceptors ignore;
41  Printf("FATAL: ThreadSanitizer CHECK failed: "
42         "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
43         file, line, cond, (uptr)v1, (uptr)v2);
44  PrintCurrentStackSlow(StackTrace::GetCurrentPc());
45  Die();
46}
47
48// Can be overriden by an application/test to intercept reports.
49#ifdef TSAN_EXTERNAL_HOOKS
50bool OnReport(const ReportDesc *rep, bool suppressed);
51#else
52SANITIZER_INTERFACE_ATTRIBUTE
53bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
54  (void)rep;
55  return suppressed;
56}
57#endif
58
59static void StackStripMain(SymbolizedStack *frames) {
60  SymbolizedStack *last_frame = nullptr;
61  SymbolizedStack *last_frame2 = nullptr;
62  for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
63    last_frame2 = last_frame;
64    last_frame = cur;
65  }
66
67  if (last_frame2 == 0)
68    return;
69#ifndef SANITIZER_GO
70  const char *last = last_frame->info.function;
71  const char *last2 = last_frame2->info.function;
72  // Strip frame above 'main'
73  if (last2 && 0 == internal_strcmp(last2, "main")) {
74    last_frame->ClearAll();
75    last_frame2->next = nullptr;
76  // Strip our internal thread start routine.
77  } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
78    last_frame->ClearAll();
79    last_frame2->next = nullptr;
80  // Strip global ctors init.
81  } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
82    last_frame->ClearAll();
83    last_frame2->next = nullptr;
84  // If both are 0, then we probably just failed to symbolize.
85  } else if (last || last2) {
86    // Ensure that we recovered stack completely. Trimmed stack
87    // can actually happen if we do not instrument some code,
88    // so it's only a debug print. However we must try hard to not miss it
89    // due to our fault.
90    DPrintf("Bottom stack frame is missed\n");
91  }
92#else
93  // The last frame always point into runtime (gosched0, goexit0, runtime.main).
94  last_frame->ClearAll();
95  last_frame2->next = nullptr;
96#endif
97}
98
99ReportStack *SymbolizeStackId(u32 stack_id) {
100  if (stack_id == 0)
101    return 0;
102  StackTrace stack = StackDepotGet(stack_id);
103  if (stack.trace == nullptr)
104    return nullptr;
105  return SymbolizeStack(stack);
106}
107
108static ReportStack *SymbolizeStack(StackTrace trace) {
109  if (trace.size == 0)
110    return 0;
111  SymbolizedStack *top = nullptr;
112  for (uptr si = 0; si < trace.size; si++) {
113    const uptr pc = trace.trace[si];
114    uptr pc1 = pc;
115    // We obtain the return address, but we're interested in the previous
116    // instruction.
117    if ((pc & kExternalPCBit) == 0)
118      pc1 = StackTrace::GetPreviousInstructionPc(pc);
119    SymbolizedStack *ent = SymbolizeCode(pc1);
120    CHECK_NE(ent, 0);
121    SymbolizedStack *last = ent;
122    while (last->next) {
123      last->info.address = pc;  // restore original pc for report
124      last = last->next;
125    }
126    last->info.address = pc;  // restore original pc for report
127    last->next = top;
128    top = ent;
129  }
130  StackStripMain(top);
131
132  ReportStack *stack = ReportStack::New();
133  stack->frames = top;
134  return stack;
135}
136
137ScopedReport::ScopedReport(ReportType typ) {
138  ctx->thread_registry->CheckLocked();
139  void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
140  rep_ = new(mem) ReportDesc;
141  rep_->typ = typ;
142  ctx->report_mtx.Lock();
143  CommonSanitizerReportMutex.Lock();
144}
145
146ScopedReport::~ScopedReport() {
147  CommonSanitizerReportMutex.Unlock();
148  ctx->report_mtx.Unlock();
149  DestroyAndFree(rep_);
150}
151
152void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
153  ReportStack **rs = rep_->stacks.PushBack();
154  *rs = SymbolizeStack(stack);
155  (*rs)->suppressable = suppressable;
156}
157
158void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
159                                   const MutexSet *mset) {
160  void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
161  ReportMop *mop = new(mem) ReportMop;
162  rep_->mops.PushBack(mop);
163  mop->tid = s.tid();
164  mop->addr = addr + s.addr0();
165  mop->size = s.size();
166  mop->write = s.IsWrite();
167  mop->atomic = s.IsAtomic();
168  mop->stack = SymbolizeStack(stack);
169  if (mop->stack)
170    mop->stack->suppressable = true;
171  for (uptr i = 0; i < mset->Size(); i++) {
172    MutexSet::Desc d = mset->Get(i);
173    u64 mid = this->AddMutex(d.id);
174    ReportMopMutex mtx = {mid, d.write};
175    mop->mset.PushBack(mtx);
176  }
177}
178
179void ScopedReport::AddUniqueTid(int unique_tid) {
180  rep_->unique_tids.PushBack(unique_tid);
181}
182
183void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
184  for (uptr i = 0; i < rep_->threads.Size(); i++) {
185    if ((u32)rep_->threads[i]->id == tctx->tid)
186      return;
187  }
188  void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
189  ReportThread *rt = new(mem) ReportThread();
190  rep_->threads.PushBack(rt);
191  rt->id = tctx->tid;
192  rt->pid = tctx->os_id;
193  rt->running = (tctx->status == ThreadStatusRunning);
194  rt->name = internal_strdup(tctx->name);
195  rt->parent_tid = tctx->parent_tid;
196  rt->stack = 0;
197  rt->stack = SymbolizeStackId(tctx->creation_stack_id);
198  if (rt->stack)
199    rt->stack->suppressable = suppressable;
200}
201
202#ifndef SANITIZER_GO
203static ThreadContext *FindThreadByUidLocked(int unique_id) {
204  ctx->thread_registry->CheckLocked();
205  for (unsigned i = 0; i < kMaxTid; i++) {
206    ThreadContext *tctx = static_cast<ThreadContext*>(
207        ctx->thread_registry->GetThreadLocked(i));
208    if (tctx && tctx->unique_id == (u32)unique_id) {
209      return tctx;
210    }
211  }
212  return 0;
213}
214
215static ThreadContext *FindThreadByTidLocked(int tid) {
216  ctx->thread_registry->CheckLocked();
217  return static_cast<ThreadContext*>(
218      ctx->thread_registry->GetThreadLocked(tid));
219}
220
221static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
222  uptr addr = (uptr)arg;
223  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
224  if (tctx->status != ThreadStatusRunning)
225    return false;
226  ThreadState *thr = tctx->thr;
227  CHECK(thr);
228  return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
229          (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
230}
231
232ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
233  ctx->thread_registry->CheckLocked();
234  ThreadContext *tctx = static_cast<ThreadContext*>(
235      ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
236                                                    (void*)addr));
237  if (!tctx)
238    return 0;
239  ThreadState *thr = tctx->thr;
240  CHECK(thr);
241  *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
242  return tctx;
243}
244#endif
245
246void ScopedReport::AddThread(int unique_tid, bool suppressable) {
247#ifndef SANITIZER_GO
248  if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
249    AddThread(tctx, suppressable);
250#endif
251}
252
253void ScopedReport::AddMutex(const SyncVar *s) {
254  for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
255    if (rep_->mutexes[i]->id == s->uid)
256      return;
257  }
258  void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
259  ReportMutex *rm = new(mem) ReportMutex();
260  rep_->mutexes.PushBack(rm);
261  rm->id = s->uid;
262  rm->addr = s->addr;
263  rm->destroyed = false;
264  rm->stack = SymbolizeStackId(s->creation_stack_id);
265}
266
267u64 ScopedReport::AddMutex(u64 id) {
268  u64 uid = 0;
269  u64 mid = id;
270  uptr addr = SyncVar::SplitId(id, &uid);
271  SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
272  // Check that the mutex is still alive.
273  // Another mutex can be created at the same address,
274  // so check uid as well.
275  if (s && s->CheckId(uid)) {
276    mid = s->uid;
277    AddMutex(s);
278  } else {
279    AddDeadMutex(id);
280  }
281  if (s)
282    s->mtx.Unlock();
283  return mid;
284}
285
286void ScopedReport::AddDeadMutex(u64 id) {
287  for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
288    if (rep_->mutexes[i]->id == id)
289      return;
290  }
291  void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
292  ReportMutex *rm = new(mem) ReportMutex();
293  rep_->mutexes.PushBack(rm);
294  rm->id = id;
295  rm->addr = 0;
296  rm->destroyed = true;
297  rm->stack = 0;
298}
299
300void ScopedReport::AddLocation(uptr addr, uptr size) {
301  if (addr == 0)
302    return;
303#ifndef SANITIZER_GO
304  int fd = -1;
305  int creat_tid = -1;
306  u32 creat_stack = 0;
307  if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
308    ReportLocation *loc = ReportLocation::New(ReportLocationFD);
309    loc->fd = fd;
310    loc->tid = creat_tid;
311    loc->stack = SymbolizeStackId(creat_stack);
312    rep_->locs.PushBack(loc);
313    ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
314    if (tctx)
315      AddThread(tctx);
316    return;
317  }
318  MBlock *b = 0;
319  Allocator *a = allocator();
320  if (a->PointerIsMine((void*)addr)) {
321    void *block_begin = a->GetBlockBegin((void*)addr);
322    if (block_begin)
323      b = ctx->metamap.GetBlock((uptr)block_begin);
324  }
325  if (b != 0) {
326    ThreadContext *tctx = FindThreadByTidLocked(b->tid);
327    ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
328    loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
329    loc->heap_chunk_size = b->siz;
330    loc->tid = tctx ? tctx->tid : b->tid;
331    loc->stack = SymbolizeStackId(b->stk);
332    rep_->locs.PushBack(loc);
333    if (tctx)
334      AddThread(tctx);
335    return;
336  }
337  bool is_stack = false;
338  if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
339    ReportLocation *loc =
340        ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
341    loc->tid = tctx->tid;
342    rep_->locs.PushBack(loc);
343    AddThread(tctx);
344  }
345  if (ReportLocation *loc = SymbolizeData(addr)) {
346    loc->suppressable = true;
347    rep_->locs.PushBack(loc);
348    return;
349  }
350#endif
351}
352
353#ifndef SANITIZER_GO
354void ScopedReport::AddSleep(u32 stack_id) {
355  rep_->sleep = SymbolizeStackId(stack_id);
356}
357#endif
358
359void ScopedReport::SetCount(int count) {
360  rep_->count = count;
361}
362
363const ReportDesc *ScopedReport::GetReport() const {
364  return rep_;
365}
366
367void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
368                  MutexSet *mset) {
369  // This function restores stack trace and mutex set for the thread/epoch.
370  // It does so by getting stack trace and mutex set at the beginning of
371  // trace part, and then replaying the trace till the given epoch.
372  ctx->thread_registry->CheckLocked();
373  ThreadContext *tctx = static_cast<ThreadContext*>(
374      ctx->thread_registry->GetThreadLocked(tid));
375  if (tctx == 0)
376    return;
377  if (tctx->status != ThreadStatusRunning
378      && tctx->status != ThreadStatusFinished
379      && tctx->status != ThreadStatusDead)
380    return;
381  Trace* trace = ThreadTrace(tctx->tid);
382  Lock l(&trace->mtx);
383  const int partidx = (epoch / kTracePartSize) % TraceParts();
384  TraceHeader* hdr = &trace->headers[partidx];
385  if (epoch < hdr->epoch0)
386    return;
387  const u64 epoch0 = RoundDown(epoch, TraceSize());
388  const u64 eend = epoch % TraceSize();
389  const u64 ebegin = RoundDown(eend, kTracePartSize);
390  DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
391          tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
392  InternalScopedBuffer<uptr> stack(kShadowStackSize);
393  for (uptr i = 0; i < hdr->stack0.size; i++) {
394    stack[i] = hdr->stack0.trace[i];
395    DPrintf2("  #%02zu: pc=%zx\n", i, stack[i]);
396  }
397  if (mset)
398    *mset = hdr->mset0;
399  uptr pos = hdr->stack0.size;
400  Event *events = (Event*)GetThreadTrace(tid);
401  for (uptr i = ebegin; i <= eend; i++) {
402    Event ev = events[i];
403    EventType typ = (EventType)(ev >> 61);
404    uptr pc = (uptr)(ev & ((1ull << 61) - 1));
405    DPrintf2("  %zu typ=%d pc=%zx\n", i, typ, pc);
406    if (typ == EventTypeMop) {
407      stack[pos] = pc;
408    } else if (typ == EventTypeFuncEnter) {
409      stack[pos++] = pc;
410    } else if (typ == EventTypeFuncExit) {
411      if (pos > 0)
412        pos--;
413    }
414    if (mset) {
415      if (typ == EventTypeLock) {
416        mset->Add(pc, true, epoch0 + i);
417      } else if (typ == EventTypeUnlock) {
418        mset->Del(pc, true);
419      } else if (typ == EventTypeRLock) {
420        mset->Add(pc, false, epoch0 + i);
421      } else if (typ == EventTypeRUnlock) {
422        mset->Del(pc, false);
423      }
424    }
425    for (uptr j = 0; j <= pos; j++)
426      DPrintf2("      #%zu: %zx\n", j, stack[j]);
427  }
428  if (pos == 0 && stack[0] == 0)
429    return;
430  pos++;
431  stk->Init(stack.data(), pos);
432}
433
434static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
435                             uptr addr_min, uptr addr_max) {
436  bool equal_stack = false;
437  RacyStacks hash;
438  if (flags()->suppress_equal_stacks) {
439    hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
440    hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
441    for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
442      if (hash == ctx->racy_stacks[i]) {
443        DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
444        equal_stack = true;
445        break;
446      }
447    }
448  }
449  bool equal_address = false;
450  RacyAddress ra0 = {addr_min, addr_max};
451  if (flags()->suppress_equal_addresses) {
452    for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
453      RacyAddress ra2 = ctx->racy_addresses[i];
454      uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
455      uptr minend = min(ra0.addr_max, ra2.addr_max);
456      if (maxbeg < minend) {
457        DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
458        equal_address = true;
459        break;
460      }
461    }
462  }
463  if (equal_stack || equal_address) {
464    if (!equal_stack)
465      ctx->racy_stacks.PushBack(hash);
466    if (!equal_address)
467      ctx->racy_addresses.PushBack(ra0);
468    return true;
469  }
470  return false;
471}
472
473static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
474                          uptr addr_min, uptr addr_max) {
475  if (flags()->suppress_equal_stacks) {
476    RacyStacks hash;
477    hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
478    hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
479    ctx->racy_stacks.PushBack(hash);
480  }
481  if (flags()->suppress_equal_addresses) {
482    RacyAddress ra0 = {addr_min, addr_max};
483    ctx->racy_addresses.PushBack(ra0);
484  }
485}
486
487bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
488  atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
489  const ReportDesc *rep = srep.GetReport();
490  Suppression *supp = 0;
491  uptr suppress_pc = 0;
492  for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
493    suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
494  for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
495    suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
496  for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
497    suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
498  for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
499    suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
500  if (suppress_pc != 0) {
501    FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
502    ctx->fired_suppressions.push_back(s);
503  }
504  {
505    bool old_is_freeing = thr->is_freeing;
506    thr->is_freeing = false;
507    bool suppressed = OnReport(rep, suppress_pc != 0);
508    thr->is_freeing = old_is_freeing;
509    if (suppressed)
510      return false;
511  }
512  PrintReport(rep);
513  ctx->nreported++;
514  if (flags()->halt_on_error)
515    internal__exit(flags()->exitcode);
516  return true;
517}
518
519bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
520                        StackTrace trace) {
521  for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
522    if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
523      continue;
524    for (uptr j = 0; j < trace.size; j++) {
525      FiredSuppression *s = &ctx->fired_suppressions[k];
526      if (trace.trace[j] == s->pc) {
527        if (s->supp)
528          s->supp->hit_count++;
529        return true;
530      }
531    }
532  }
533  return false;
534}
535
536static bool IsFiredSuppression(Context *ctx,
537                               const ScopedReport &srep,
538                               uptr addr) {
539  for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
540    if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
541      continue;
542    FiredSuppression *s = &ctx->fired_suppressions[k];
543    if (addr == s->pc) {
544      if (s->supp)
545        s->supp->hit_count++;
546      return true;
547    }
548  }
549  return false;
550}
551
552static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
553  Shadow s0(thr->racy_state[0]);
554  Shadow s1(thr->racy_state[1]);
555  CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
556  if (!s0.IsAtomic() && !s1.IsAtomic())
557    return true;
558  if (s0.IsAtomic() && s1.IsFreed())
559    return true;
560  if (s1.IsAtomic() && thr->is_freeing)
561    return true;
562  return false;
563}
564
565void ReportRace(ThreadState *thr) {
566  CheckNoLocks(thr);
567
568  // Symbolizer makes lots of intercepted calls. If we try to process them,
569  // at best it will cause deadlocks on internal mutexes.
570  ScopedIgnoreInterceptors ignore;
571
572  if (!flags()->report_bugs)
573    return;
574  if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
575    return;
576
577  bool freed = false;
578  {
579    Shadow s(thr->racy_state[1]);
580    freed = s.GetFreedAndReset();
581    thr->racy_state[1] = s.raw();
582  }
583
584  uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
585  uptr addr_min = 0;
586  uptr addr_max = 0;
587  {
588    uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
589    uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
590    uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
591    uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
592    addr_min = min(a0, a1);
593    addr_max = max(e0, e1);
594    if (IsExpectedReport(addr_min, addr_max - addr_min))
595      return;
596  }
597
598  ThreadRegistryLock l0(ctx->thread_registry);
599
600  ReportType typ = ReportTypeRace;
601  if (thr->is_vptr_access && freed)
602    typ = ReportTypeVptrUseAfterFree;
603  else if (thr->is_vptr_access)
604    typ = ReportTypeVptrRace;
605  else if (freed)
606    typ = ReportTypeUseAfterFree;
607  ScopedReport rep(typ);
608  if (IsFiredSuppression(ctx, rep, addr))
609    return;
610  const uptr kMop = 2;
611  VarSizeStackTrace traces[kMop];
612  const uptr toppc = TraceTopPC(thr);
613  ObtainCurrentStack(thr, toppc, &traces[0]);
614  if (IsFiredSuppression(ctx, rep, traces[0]))
615    return;
616  InternalScopedBuffer<MutexSet> mset2(1);
617  new(mset2.data()) MutexSet();
618  Shadow s2(thr->racy_state[1]);
619  RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
620  if (IsFiredSuppression(ctx, rep, traces[1]))
621    return;
622
623  if (HandleRacyStacks(thr, traces, addr_min, addr_max))
624    return;
625
626  for (uptr i = 0; i < kMop; i++) {
627    Shadow s(thr->racy_state[i]);
628    rep.AddMemoryAccess(addr, s, traces[i],
629                        i == 0 ? &thr->mset : mset2.data());
630  }
631
632  for (uptr i = 0; i < kMop; i++) {
633    FastState s(thr->racy_state[i]);
634    ThreadContext *tctx = static_cast<ThreadContext*>(
635        ctx->thread_registry->GetThreadLocked(s.tid()));
636    if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
637      continue;
638    rep.AddThread(tctx);
639  }
640
641  rep.AddLocation(addr_min, addr_max - addr_min);
642
643#ifndef SANITIZER_GO
644  {  // NOLINT
645    Shadow s(thr->racy_state[1]);
646    if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
647      rep.AddSleep(thr->last_sleep_stack_id);
648  }
649#endif
650
651  if (!OutputReport(thr, rep))
652    return;
653
654  AddRacyStacks(thr, traces, addr_min, addr_max);
655}
656
657void PrintCurrentStack(ThreadState *thr, uptr pc) {
658  VarSizeStackTrace trace;
659  ObtainCurrentStack(thr, pc, &trace);
660  PrintStack(SymbolizeStack(trace));
661}
662
663void PrintCurrentStackSlow(uptr pc) {
664#ifndef SANITIZER_GO
665  BufferedStackTrace *ptrace =
666      new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
667          BufferedStackTrace();
668  ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
669  for (uptr i = 0; i < ptrace->size / 2; i++) {
670    uptr tmp = ptrace->trace_buffer[i];
671    ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
672    ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
673  }
674  PrintStack(SymbolizeStack(*ptrace));
675#endif
676}
677
678}  // namespace __tsan
679
680using namespace __tsan;
681
682extern "C" {
683SANITIZER_INTERFACE_ATTRIBUTE
684void __sanitizer_print_stack_trace() {
685  PrintCurrentStackSlow(StackTrace::GetCurrentPc());
686}
687}  // extern "C"
688