tsan_rtl_report.cc revision 39968339a07d790aadcf27534f92a0de8c0c90fb
1//===-- tsan_rtl_report.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_libc.h"
15#include "sanitizer_common/sanitizer_placement_new.h"
16#include "sanitizer_common/sanitizer_stackdepot.h"
17#include "sanitizer_common/sanitizer_common.h"
18#include "sanitizer_common/sanitizer_stacktrace.h"
19#include "tsan_platform.h"
20#include "tsan_rtl.h"
21#include "tsan_suppressions.h"
22#include "tsan_symbolize.h"
23#include "tsan_report.h"
24#include "tsan_sync.h"
25#include "tsan_mman.h"
26#include "tsan_flags.h"
27#include "tsan_fd.h"
28
29namespace __tsan {
30
31using namespace __sanitizer;  // NOLINT
32
33static ReportStack *SymbolizeStack(const StackTrace& trace);
34
35void TsanCheckFailed(const char *file, int line, const char *cond,
36                     u64 v1, u64 v2) {
37  ScopedInRtl in_rtl;
38  Printf("FATAL: ThreadSanitizer CHECK failed: "
39         "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
40         file, line, cond, (uptr)v1, (uptr)v2);
41  PrintCurrentStackSlow();
42  Die();
43}
44
45// Can be overriden by an application/test to intercept reports.
46#ifdef TSAN_EXTERNAL_HOOKS
47bool OnReport(const ReportDesc *rep, bool suppressed);
48#else
49SANITIZER_INTERFACE_ATTRIBUTE
50bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
51  (void)rep;
52  return suppressed;
53}
54#endif
55
56static void StackStripMain(ReportStack *stack) {
57  ReportStack *last_frame = 0;
58  ReportStack *last_frame2 = 0;
59  const char *prefix = "__interceptor_";
60  uptr prefix_len = internal_strlen(prefix);
61  const char *path_prefix = flags()->strip_path_prefix;
62  uptr path_prefix_len = internal_strlen(path_prefix);
63  char *pos;
64  for (ReportStack *ent = stack; ent; ent = ent->next) {
65    if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
66      ent->func += prefix_len;
67    if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
68      ent->file = pos + path_prefix_len;
69    if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
70      ent->file += 2;
71    last_frame2 = last_frame;
72    last_frame = ent;
73  }
74
75  if (last_frame2 == 0)
76    return;
77  const char *last = last_frame->func;
78#ifndef TSAN_GO
79  const char *last2 = last_frame2->func;
80  // Strip frame above 'main'
81  if (last2 && 0 == internal_strcmp(last2, "main")) {
82    last_frame2->next = 0;
83  // Strip our internal thread start routine.
84  } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
85    last_frame2->next = 0;
86  // Strip global ctors init.
87  } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
88    last_frame2->next = 0;
89  // If both are 0, then we probably just failed to symbolize.
90  } else if (last || last2) {
91    // Ensure that we recovered stack completely. Trimmed stack
92    // can actually happen if we do not instrument some code,
93    // so it's only a debug print. However we must try hard to not miss it
94    // due to our fault.
95    DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
96  }
97#else
98  // The last frame always point into runtime (gosched0, goexit0, runtime.main).
99  last_frame2->next = 0;
100  (void)last;
101#endif
102}
103
104static ReportStack *SymbolizeStack(const StackTrace& trace) {
105  if (trace.IsEmpty())
106    return 0;
107  ReportStack *stack = 0;
108  for (uptr si = 0; si < trace.Size(); si++) {
109    // We obtain the return address, that is, address of the next instruction,
110    // so offset it by 1 byte.
111    bool is_last = (si == trace.Size() - 1);
112    ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last);
113    CHECK_NE(ent, 0);
114    ReportStack *last = ent;
115    while (last->next) {
116      last->pc += !is_last;
117      last = last->next;
118    }
119    last->pc += !is_last;
120    last->next = stack;
121    stack = ent;
122  }
123  StackStripMain(stack);
124  return stack;
125}
126
127ScopedReport::ScopedReport(ReportType typ) {
128  ctx_ = CTX();
129  ctx_->thread_registry->CheckLocked();
130  void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
131  rep_ = new(mem) ReportDesc;
132  rep_->typ = typ;
133  ctx_->report_mtx.Lock();
134  CommonSanitizerReportMutex.Lock();
135}
136
137ScopedReport::~ScopedReport() {
138  CommonSanitizerReportMutex.Unlock();
139  ctx_->report_mtx.Unlock();
140  DestroyAndFree(rep_);
141}
142
143void ScopedReport::AddStack(const StackTrace *stack) {
144  ReportStack **rs = rep_->stacks.PushBack();
145  *rs = SymbolizeStack(*stack);
146}
147
148void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
149    const StackTrace *stack, const MutexSet *mset) {
150  void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
151  ReportMop *mop = new(mem) ReportMop;
152  rep_->mops.PushBack(mop);
153  mop->tid = s.tid();
154  mop->addr = addr + s.addr0();
155  mop->size = s.size();
156  mop->write = s.IsWrite();
157  mop->atomic = s.IsAtomic();
158  mop->stack = SymbolizeStack(*stack);
159  for (uptr i = 0; i < mset->Size(); i++) {
160    MutexSet::Desc d = mset->Get(i);
161    u64 uid = 0;
162    uptr addr = SyncVar::SplitId(d.id, &uid);
163    SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
164    // Check that the mutex is still alive.
165    // Another mutex can be created at the same address,
166    // so check uid as well.
167    if (s && s->CheckId(uid)) {
168      ReportMopMutex mtx = {s->uid, d.write};
169      mop->mset.PushBack(mtx);
170      AddMutex(s);
171    } else {
172      ReportMopMutex mtx = {d.id, d.write};
173      mop->mset.PushBack(mtx);
174      AddMutex(d.id);
175    }
176    if (s)
177      s->mtx.ReadUnlock();
178  }
179}
180
181void ScopedReport::AddThread(const ThreadContext *tctx) {
182  for (uptr i = 0; i < rep_->threads.Size(); i++) {
183    if ((u32)rep_->threads[i]->id == tctx->tid)
184      return;
185  }
186  void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
187  ReportThread *rt = new(mem) ReportThread();
188  rep_->threads.PushBack(rt);
189  rt->id = tctx->tid;
190  rt->pid = tctx->os_id;
191  rt->running = (tctx->status == ThreadStatusRunning);
192  rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
193  rt->parent_tid = tctx->parent_tid;
194  rt->stack = 0;
195#ifdef TSAN_GO
196  rt->stack = SymbolizeStack(tctx->creation_stack);
197#else
198  uptr ssz = 0;
199  const uptr *stack = StackDepotGet(tctx->creation_stack_id, &ssz);
200  if (stack) {
201    StackTrace trace;
202    trace.Init(stack, ssz);
203    rt->stack = SymbolizeStack(trace);
204  }
205#endif
206}
207
208#ifndef TSAN_GO
209static ThreadContext *FindThreadByUidLocked(int unique_id) {
210  Context *ctx = CTX();
211  ctx->thread_registry->CheckLocked();
212  for (unsigned i = 0; i < kMaxTid; i++) {
213    ThreadContext *tctx = static_cast<ThreadContext*>(
214        ctx->thread_registry->GetThreadLocked(i));
215    if (tctx && tctx->unique_id == (u32)unique_id) {
216      return tctx;
217    }
218  }
219  return 0;
220}
221
222static ThreadContext *FindThreadByTidLocked(int tid) {
223  Context *ctx = CTX();
224  ctx->thread_registry->CheckLocked();
225  return static_cast<ThreadContext*>(
226      ctx->thread_registry->GetThreadLocked(tid));
227}
228
229static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
230  uptr addr = (uptr)arg;
231  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
232  if (tctx->status != ThreadStatusRunning)
233    return false;
234  ThreadState *thr = tctx->thr;
235  CHECK(thr);
236  return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
237          (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
238}
239
240ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
241  Context *ctx = CTX();
242  ctx->thread_registry->CheckLocked();
243  ThreadContext *tctx = static_cast<ThreadContext*>(
244      ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
245                                                    (void*)addr));
246  if (!tctx)
247    return 0;
248  ThreadState *thr = tctx->thr;
249  CHECK(thr);
250  *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
251  return tctx;
252}
253#endif
254
255void ScopedReport::AddMutex(const SyncVar *s) {
256  for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
257    if (rep_->mutexes[i]->id == s->uid)
258      return;
259  }
260  void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
261  ReportMutex *rm = new(mem) ReportMutex();
262  rep_->mutexes.PushBack(rm);
263  rm->id = s->uid;
264  rm->destroyed = false;
265  rm->stack = 0;
266#ifndef TSAN_GO
267  uptr ssz = 0;
268  const uptr *stack = StackDepotGet(s->creation_stack_id, &ssz);
269  if (stack) {
270    StackTrace trace;
271    trace.Init(stack, ssz);
272    rm->stack = SymbolizeStack(trace);
273  }
274#endif
275}
276
277void ScopedReport::AddMutex(u64 id) {
278  for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
279    if (rep_->mutexes[i]->id == id)
280      return;
281  }
282  void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
283  ReportMutex *rm = new(mem) ReportMutex();
284  rep_->mutexes.PushBack(rm);
285  rm->id = id;
286  rm->destroyed = true;
287  rm->stack = 0;
288}
289
290void ScopedReport::AddLocation(uptr addr, uptr size) {
291  if (addr == 0)
292    return;
293#ifndef TSAN_GO
294  int fd = -1;
295  int creat_tid = -1;
296  u32 creat_stack = 0;
297  if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
298      || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
299    void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
300    ReportLocation *loc = new(mem) ReportLocation();
301    rep_->locs.PushBack(loc);
302    loc->type = ReportLocationFD;
303    loc->fd = fd;
304    loc->tid = creat_tid;
305    uptr ssz = 0;
306    const uptr *stack = StackDepotGet(creat_stack, &ssz);
307    if (stack) {
308      StackTrace trace;
309      trace.Init(stack, ssz);
310      loc->stack = SymbolizeStack(trace);
311    }
312    ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
313    if (tctx)
314      AddThread(tctx);
315    return;
316  }
317  MBlock *b = 0;
318  if (allocator()->PointerIsMine((void*)addr)
319      && (b = user_mblock(0, (void*)addr))) {
320    ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
321    void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
322    ReportLocation *loc = new(mem) ReportLocation();
323    rep_->locs.PushBack(loc);
324    loc->type = ReportLocationHeap;
325    loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
326    loc->size = b->Size();
327    loc->tid = tctx ? tctx->tid : b->Tid();
328    loc->name = 0;
329    loc->file = 0;
330    loc->line = 0;
331    loc->stack = 0;
332    uptr ssz = 0;
333    const uptr *stack = StackDepotGet(b->StackId(), &ssz);
334    if (stack) {
335      StackTrace trace;
336      trace.Init(stack, ssz);
337      loc->stack = SymbolizeStack(trace);
338    }
339    if (tctx)
340      AddThread(tctx);
341    return;
342  }
343  bool is_stack = false;
344  if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
345    void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
346    ReportLocation *loc = new(mem) ReportLocation();
347    rep_->locs.PushBack(loc);
348    loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
349    loc->tid = tctx->tid;
350    AddThread(tctx);
351  }
352  ReportLocation *loc = SymbolizeData(addr);
353  if (loc) {
354    rep_->locs.PushBack(loc);
355    return;
356  }
357#endif
358}
359
360#ifndef TSAN_GO
361void ScopedReport::AddSleep(u32 stack_id) {
362  uptr ssz = 0;
363  const uptr *stack = StackDepotGet(stack_id, &ssz);
364  if (stack) {
365    StackTrace trace;
366    trace.Init(stack, ssz);
367    rep_->sleep = SymbolizeStack(trace);
368  }
369}
370#endif
371
372void ScopedReport::SetCount(int count) {
373  rep_->count = count;
374}
375
376const ReportDesc *ScopedReport::GetReport() const {
377  return rep_;
378}
379
380void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
381  // This function restores stack trace and mutex set for the thread/epoch.
382  // It does so by getting stack trace and mutex set at the beginning of
383  // trace part, and then replaying the trace till the given epoch.
384  Context *ctx = CTX();
385  ctx->thread_registry->CheckLocked();
386  ThreadContext *tctx = static_cast<ThreadContext*>(
387      ctx->thread_registry->GetThreadLocked(tid));
388  if (tctx == 0)
389    return;
390  if (tctx->status != ThreadStatusRunning
391      && tctx->status != ThreadStatusFinished
392      && tctx->status != ThreadStatusDead)
393    return;
394  Trace* trace = ThreadTrace(tctx->tid);
395  Lock l(&trace->mtx);
396  const int partidx = (epoch / kTracePartSize) % TraceParts();
397  TraceHeader* hdr = &trace->headers[partidx];
398  if (epoch < hdr->epoch0)
399    return;
400  const u64 epoch0 = RoundDown(epoch, TraceSize());
401  const u64 eend = epoch % TraceSize();
402  const u64 ebegin = RoundDown(eend, kTracePartSize);
403  DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
404          tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
405  InternalScopedBuffer<uptr> stack(1024);  // FIXME: de-hardcode 1024
406  for (uptr i = 0; i < hdr->stack0.Size(); i++) {
407    stack[i] = hdr->stack0.Get(i);
408    DPrintf2("  #%02lu: pc=%zx\n", i, stack[i]);
409  }
410  if (mset)
411    *mset = hdr->mset0;
412  uptr pos = hdr->stack0.Size();
413  Event *events = (Event*)GetThreadTrace(tid);
414  for (uptr i = ebegin; i <= eend; i++) {
415    Event ev = events[i];
416    EventType typ = (EventType)(ev >> 61);
417    uptr pc = (uptr)(ev & ((1ull << 61) - 1));
418    DPrintf2("  %zu typ=%d pc=%zx\n", i, typ, pc);
419    if (typ == EventTypeMop) {
420      stack[pos] = pc;
421    } else if (typ == EventTypeFuncEnter) {
422      stack[pos++] = pc;
423    } else if (typ == EventTypeFuncExit) {
424      if (pos > 0)
425        pos--;
426    }
427    if (mset) {
428      if (typ == EventTypeLock) {
429        mset->Add(pc, true, epoch0 + i);
430      } else if (typ == EventTypeUnlock) {
431        mset->Del(pc, true);
432      } else if (typ == EventTypeRLock) {
433        mset->Add(pc, false, epoch0 + i);
434      } else if (typ == EventTypeRUnlock) {
435        mset->Del(pc, false);
436      }
437    }
438    for (uptr j = 0; j <= pos; j++)
439      DPrintf2("      #%zu: %zx\n", j, stack[j]);
440  }
441  if (pos == 0 && stack[0] == 0)
442    return;
443  pos++;
444  stk->Init(stack.data(), pos);
445}
446
447static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
448    uptr addr_min, uptr addr_max) {
449  Context *ctx = CTX();
450  bool equal_stack = false;
451  RacyStacks hash = {};
452  if (flags()->suppress_equal_stacks) {
453    hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
454    hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
455    for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
456      if (hash == ctx->racy_stacks[i]) {
457        DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
458        equal_stack = true;
459        break;
460      }
461    }
462  }
463  bool equal_address = false;
464  RacyAddress ra0 = {addr_min, addr_max};
465  if (flags()->suppress_equal_addresses) {
466    for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
467      RacyAddress ra2 = ctx->racy_addresses[i];
468      uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
469      uptr minend = min(ra0.addr_max, ra2.addr_max);
470      if (maxbeg < minend) {
471        DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
472        equal_address = true;
473        break;
474      }
475    }
476  }
477  if (equal_stack || equal_address) {
478    if (!equal_stack)
479      ctx->racy_stacks.PushBack(hash);
480    if (!equal_address)
481      ctx->racy_addresses.PushBack(ra0);
482    return true;
483  }
484  return false;
485}
486
487static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
488    uptr addr_min, uptr addr_max) {
489  Context *ctx = CTX();
490  if (flags()->suppress_equal_stacks) {
491    RacyStacks hash = {};
492    hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
493    hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
494    ctx->racy_stacks.PushBack(hash);
495  }
496  if (flags()->suppress_equal_addresses) {
497    RacyAddress ra0 = {addr_min, addr_max};
498    ctx->racy_addresses.PushBack(ra0);
499  }
500}
501
502bool OutputReport(Context *ctx,
503                  const ScopedReport &srep,
504                  const ReportStack *suppress_stack1,
505                  const ReportStack *suppress_stack2,
506                  const ReportLocation *suppress_loc) {
507  atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
508  const ReportDesc *rep = srep.GetReport();
509  Suppression *supp = 0;
510  uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
511  if (suppress_pc == 0)
512    suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
513  if (suppress_pc == 0)
514    suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp);
515  if (suppress_pc != 0) {
516    FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
517    ctx->fired_suppressions.PushBack(s);
518  }
519  if (OnReport(rep, suppress_pc != 0))
520    return false;
521  PrintReport(rep);
522  CTX()->nreported++;
523  return true;
524}
525
526bool IsFiredSuppression(Context *ctx,
527                        const ScopedReport &srep,
528                        const StackTrace &trace) {
529  for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) {
530    if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
531      continue;
532    for (uptr j = 0; j < trace.Size(); j++) {
533      FiredSuppression *s = &ctx->fired_suppressions[k];
534      if (trace.Get(j) == s->pc) {
535        if (s->supp)
536          s->supp->hit_count++;
537        return true;
538      }
539    }
540  }
541  return false;
542}
543
544static bool IsFiredSuppression(Context *ctx,
545                               const ScopedReport &srep,
546                               uptr addr) {
547  for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) {
548    if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
549      continue;
550    FiredSuppression *s = &ctx->fired_suppressions[k];
551    if (addr == s->pc) {
552      if (s->supp)
553        s->supp->hit_count++;
554      return true;
555    }
556  }
557  return false;
558}
559
560bool FrameIsInternal(const ReportStack *frame) {
561  return frame != 0 && frame->file != 0
562      && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
563          internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
564          internal_strstr(frame->file, "tsan_interface_"));
565}
566
567// On programs that use Java we see weird reports like:
568// WARNING: ThreadSanitizer: data race (pid=22512)
569//   Read of size 8 at 0x7d2b00084318 by thread 100:
570//     #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
571//     #1 <null> <null>:0 (0x7f7ad9b40193)
572//   Previous write of size 8 at 0x7d2b00084318 by thread 105:
573//     #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
574//     #1 <null> <null>:0 (0x7f7ad9b42707)
575static bool IsJavaNonsense(const ReportDesc *rep) {
576#ifndef TSAN_GO
577  for (uptr i = 0; i < rep->mops.Size(); i++) {
578    ReportMop *mop = rep->mops[i];
579    ReportStack *frame = mop->stack;
580    if (frame == 0
581        || (frame->func == 0 && frame->file == 0 && frame->line == 0
582          && frame->module == 0)) {
583      return true;
584    }
585    if (FrameIsInternal(frame)) {
586      frame = frame->next;
587      if (frame == 0
588          || (frame->func == 0 && frame->file == 0 && frame->line == 0
589          && frame->module == 0)) {
590        if (frame) {
591          FiredSuppression supp = {rep->typ, frame->pc, 0};
592          CTX()->fired_suppressions.PushBack(supp);
593        }
594        return true;
595      }
596    }
597  }
598#endif
599  return false;
600}
601
602static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
603  Shadow s0(thr->racy_state[0]);
604  Shadow s1(thr->racy_state[1]);
605  CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
606  if (!s0.IsAtomic() && !s1.IsAtomic())
607    return true;
608  if (s0.IsAtomic() && s1.IsFreed())
609    return true;
610  if (s1.IsAtomic() && thr->is_freeing)
611    return true;
612  return false;
613}
614
615void ReportRace(ThreadState *thr) {
616  if (!flags()->report_bugs)
617    return;
618  ScopedInRtl in_rtl;
619
620  if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
621    return;
622
623  bool freed = false;
624  {
625    Shadow s(thr->racy_state[1]);
626    freed = s.GetFreedAndReset();
627    thr->racy_state[1] = s.raw();
628  }
629
630  uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
631  uptr addr_min = 0;
632  uptr addr_max = 0;
633  {
634    uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
635    uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
636    uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
637    uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
638    addr_min = min(a0, a1);
639    addr_max = max(e0, e1);
640    if (IsExpectedReport(addr_min, addr_max - addr_min))
641      return;
642  }
643
644  Context *ctx = CTX();
645  ThreadRegistryLock l0(ctx->thread_registry);
646
647  ReportType typ = ReportTypeRace;
648  if (thr->is_vptr_access)
649    typ = ReportTypeVptrRace;
650  else if (freed)
651    typ = ReportTypeUseAfterFree;
652  ScopedReport rep(typ);
653  if (IsFiredSuppression(ctx, rep, addr))
654    return;
655  const uptr kMop = 2;
656  StackTrace traces[kMop];
657  const uptr toppc = TraceTopPC(thr);
658  traces[0].ObtainCurrent(thr, toppc);
659  if (IsFiredSuppression(ctx, rep, traces[0]))
660    return;
661  InternalScopedBuffer<MutexSet> mset2(1);
662  new(mset2.data()) MutexSet();
663  Shadow s2(thr->racy_state[1]);
664  RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
665  if (IsFiredSuppression(ctx, rep, traces[1]))
666    return;
667
668  if (HandleRacyStacks(thr, traces, addr_min, addr_max))
669    return;
670
671  for (uptr i = 0; i < kMop; i++) {
672    Shadow s(thr->racy_state[i]);
673    rep.AddMemoryAccess(addr, s, &traces[i],
674                        i == 0 ? &thr->mset : mset2.data());
675  }
676
677  if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
678    return;
679
680  for (uptr i = 0; i < kMop; i++) {
681    FastState s(thr->racy_state[i]);
682    ThreadContext *tctx = static_cast<ThreadContext*>(
683        ctx->thread_registry->GetThreadLocked(s.tid()));
684    if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
685      continue;
686    rep.AddThread(tctx);
687  }
688
689  rep.AddLocation(addr_min, addr_max - addr_min);
690
691#ifndef TSAN_GO
692  {  // NOLINT
693    Shadow s(thr->racy_state[1]);
694    if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
695      rep.AddSleep(thr->last_sleep_stack_id);
696  }
697#endif
698
699  ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ?
700                                 rep.GetReport()->locs[0] : 0;
701  if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
702                              rep.GetReport()->mops[1]->stack,
703                              suppress_loc))
704    return;
705
706  AddRacyStacks(thr, traces, addr_min, addr_max);
707}
708
709void PrintCurrentStack(ThreadState *thr, uptr pc) {
710  StackTrace trace;
711  trace.ObtainCurrent(thr, pc);
712  PrintStack(SymbolizeStack(trace));
713}
714
715void PrintCurrentStackSlow() {
716#ifndef TSAN_GO
717  __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
718      sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
719  ptrace->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(),
720      kStackTraceMax);
721  for (uptr i = 0; i < ptrace->size / 2; i++) {
722    uptr tmp = ptrace->trace[i];
723    ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
724    ptrace->trace[ptrace->size - i - 1] = tmp;
725  }
726  StackTrace trace;
727  trace.Init(ptrace->trace, ptrace->size);
728  PrintStack(SymbolizeStack(trace));
729#endif
730}
731
732}  // namespace __tsan
733