tsan_rtl_report.cc revision 591616d323d73b7ea7cd8fea4eec46cedccda27e
1//===-- tsan_rtl_report.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_libc.h"
15#include "sanitizer_common/sanitizer_placement_new.h"
16#include "sanitizer_common/sanitizer_stackdepot.h"
17#include "tsan_platform.h"
18#include "tsan_rtl.h"
19#include "tsan_suppressions.h"
20#include "tsan_symbolize.h"
21#include "tsan_report.h"
22#include "tsan_sync.h"
23#include "tsan_mman.h"
24#include "tsan_flags.h"
25
26namespace __tsan {
27
28void TsanCheckFailed(const char *file, int line, const char *cond,
29                     u64 v1, u64 v2) {
30  ScopedInRtl in_rtl;
31  TsanPrintf("FATAL: ThreadSanitizer CHECK failed: "
32             "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
33             file, line, cond, (uptr)v1, (uptr)v2);
34  Die();
35}
36
37// Can be overriden by an application/test to intercept reports.
38#ifdef TSAN_EXTERNAL_HOOKS
39bool OnReport(const ReportDesc *rep, bool suppressed);
40#else
41bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
42  (void)rep;
43  return suppressed;
44}
45#endif
46
47static void StackStripMain(ReportStack *stack) {
48  ReportStack *last_frame = 0;
49  ReportStack *last_frame2 = 0;
50  const char *prefix = "__interceptor_";
51  uptr prefix_len = internal_strlen(prefix);
52  const char *path_prefix = flags()->strip_path_prefix;
53  uptr path_prefix_len = internal_strlen(path_prefix);
54  char *pos;
55  for (ReportStack *ent = stack; ent; ent = ent->next) {
56    if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
57      ent->func += prefix_len;
58    if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
59      ent->file = pos + path_prefix_len;
60    if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
61      ent->file += 2;
62    last_frame2 = last_frame;
63    last_frame = ent;
64  }
65
66  if (last_frame2 == 0)
67    return;
68  const char *last = last_frame->func;
69#ifndef TSAN_GO
70  const char *last2 = last_frame2->func;
71  // Strip frame above 'main'
72  if (last2 && 0 == internal_strcmp(last2, "main")) {
73    last_frame2->next = 0;
74  // Strip our internal thread start routine.
75  } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
76    last_frame2->next = 0;
77  // Strip global ctors init.
78  } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
79    last_frame2->next = 0;
80  // If both are 0, then we probably just failed to symbolize.
81  } else if (last || last2) {
82    // Ensure that we recovered stack completely. Trimmed stack
83    // can actually happen if we do not instrument some code,
84    // so it's only a debug print. However we must try hard to not miss it
85    // due to our fault.
86    DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
87  }
88#else
89  if (last && 0 == internal_strcmp(last, "schedunlock"))
90    last_frame2->next = 0;
91#endif
92}
93
94static ReportStack *SymbolizeStack(const StackTrace& trace) {
95  if (trace.IsEmpty())
96    return 0;
97  ReportStack *stack = 0;
98  for (uptr si = 0; si < trace.Size(); si++) {
99    // We obtain the return address, that is, address of the next instruction,
100    // so offset it by 1 byte.
101    bool is_last = (si == trace.Size() - 1);
102    ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last);
103    CHECK_NE(ent, 0);
104    ReportStack *last = ent;
105    while (last->next) {
106      last->pc += !is_last;
107      last = last->next;
108    }
109    last->pc += !is_last;
110    last->next = stack;
111    stack = ent;
112  }
113  StackStripMain(stack);
114  return stack;
115}
116
117ScopedReport::ScopedReport(ReportType typ) {
118  ctx_ = CTX();
119  void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
120  rep_ = new(mem) ReportDesc;
121  rep_->typ = typ;
122  ctx_->report_mtx.Lock();
123}
124
125ScopedReport::~ScopedReport() {
126  ctx_->report_mtx.Unlock();
127  rep_->~ReportDesc();
128  internal_free(rep_);
129}
130
131void ScopedReport::AddStack(const StackTrace *stack) {
132  ReportStack **rs = rep_->stacks.PushBack();
133  *rs = SymbolizeStack(*stack);
134}
135
136void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
137                                   const StackTrace *stack) {
138  void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
139  ReportMop *mop = new(mem) ReportMop;
140  rep_->mops.PushBack(mop);
141  mop->tid = s.tid();
142  mop->addr = addr + s.addr0();
143  mop->size = s.size();
144  mop->write = s.is_write();
145  mop->nmutex = 0;
146  mop->stack = SymbolizeStack(*stack);
147}
148
149void ScopedReport::AddThread(const ThreadContext *tctx) {
150  for (uptr i = 0; i < rep_->threads.Size(); i++) {
151    if (rep_->threads[i]->id == tctx->tid)
152      return;
153  }
154  void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
155  ReportThread *rt = new(mem) ReportThread();
156  rep_->threads.PushBack(rt);
157  rt->id = tctx->tid;
158  rt->running = (tctx->status == ThreadStatusRunning);
159  rt->stack = SymbolizeStack(tctx->creation_stack);
160}
161
162#ifndef TSAN_GO
163static ThreadContext *FindThread(int unique_id) {
164  CTX()->thread_mtx.CheckLocked();
165  for (unsigned i = 0; i < kMaxTid; i++) {
166    ThreadContext *tctx = CTX()->threads[i];
167    if (tctx && tctx->unique_id == unique_id) {
168      return tctx;
169    }
170  }
171  return 0;
172}
173#endif
174
175void ScopedReport::AddMutex(const SyncVar *s) {
176  void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
177  ReportMutex *rm = new(mem) ReportMutex();
178  rep_->mutexes.PushBack(rm);
179  rm->id = 42;
180  rm->stack = SymbolizeStack(s->creation_stack);
181}
182
183void ScopedReport::AddLocation(uptr addr, uptr size) {
184  if (addr == 0)
185    return;
186#ifndef TSAN_GO
187  if (allocator()->PointerIsMine((void*)addr)) {
188    MBlock *b = user_mblock(0, (void*)addr);
189    ThreadContext *tctx = FindThread(b->alloc_tid);
190    void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
191    ReportLocation *loc = new(mem) ReportLocation();
192    rep_->locs.PushBack(loc);
193    loc->type = ReportLocationHeap;
194    loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
195    loc->size = b->size;
196    loc->tid = tctx ? tctx->tid : b->alloc_tid;
197    loc->name = 0;
198    loc->file = 0;
199    loc->line = 0;
200    loc->stack = 0;
201    uptr ssz = 0;
202    const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz);
203    if (stack) {
204      StackTrace trace;
205      trace.Init(stack, ssz);
206      loc->stack = SymbolizeStack(trace);
207    }
208    if (tctx)
209      AddThread(tctx);
210    return;
211  }
212#endif
213  ReportStack *symb = SymbolizeData(addr);
214  if (symb) {
215    void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
216    ReportLocation *loc = new(mem) ReportLocation();
217    rep_->locs.PushBack(loc);
218    loc->type = ReportLocationGlobal;
219    loc->addr = addr;
220    loc->size = size;
221    loc->tid = 0;
222    loc->name = symb->func;
223    loc->file = symb->file;
224    loc->line = symb->line;
225    loc->stack = 0;
226    internal_free(symb);
227    return;
228  }
229}
230
231#ifndef TSAN_GO
232void ScopedReport::AddSleep(u32 stack_id) {
233  uptr ssz = 0;
234  const uptr *stack = StackDepotGet(stack_id, &ssz);
235  if (stack) {
236    StackTrace trace;
237    trace.Init(stack, ssz);
238    rep_->sleep = SymbolizeStack(trace);
239  }
240}
241#endif
242
243const ReportDesc *ScopedReport::GetReport() const {
244  return rep_;
245}
246
247void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
248  ThreadContext *tctx = CTX()->threads[tid];
249  if (tctx == 0)
250    return;
251  Trace* trace = 0;
252  if (tctx->status == ThreadStatusRunning) {
253    CHECK(tctx->thr);
254    trace = &tctx->thr->trace;
255  } else if (tctx->status == ThreadStatusFinished
256      || tctx->status == ThreadStatusDead) {
257    if (tctx->dead_info == 0)
258      return;
259    trace = &tctx->dead_info->trace;
260  } else {
261    return;
262  }
263  Lock l(&trace->mtx);
264  const int partidx = (epoch / (kTraceSize / kTraceParts)) % kTraceParts;
265  TraceHeader* hdr = &trace->headers[partidx];
266  if (epoch < hdr->epoch0)
267    return;
268  const u64 eend = epoch % kTraceSize;
269  const u64 ebegin = eend / kTracePartSize * kTracePartSize;
270  DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
271          tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
272  InternalScopedBuffer<uptr> stack(1024);  // FIXME: de-hardcode 1024
273  for (uptr i = 0; i < hdr->stack0.Size(); i++) {
274    stack[i] = hdr->stack0.Get(i);
275    DPrintf2("  #%02lu: pc=%zx\n", i, stack[i]);
276  }
277  uptr pos = hdr->stack0.Size();
278  for (uptr i = ebegin; i <= eend; i++) {
279    Event ev = trace->events[i];
280    EventType typ = (EventType)(ev >> 61);
281    uptr pc = (uptr)(ev & 0xffffffffffffull);
282    DPrintf2("  %zu typ=%d pc=%zx\n", i, typ, pc);
283    if (typ == EventTypeMop) {
284      stack[pos] = pc;
285    } else if (typ == EventTypeFuncEnter) {
286      stack[pos++] = pc;
287    } else if (typ == EventTypeFuncExit) {
288      if (pos > 0)
289        pos--;
290    }
291    for (uptr j = 0; j <= pos; j++)
292      DPrintf2("      #%zu: %zx\n", j, stack[j]);
293  }
294  if (pos == 0 && stack[0] == 0)
295    return;
296  pos++;
297  stk->Init(stack.data(), pos);
298}
299
300static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
301    uptr addr_min, uptr addr_max) {
302  Context *ctx = CTX();
303  bool equal_stack = false;
304  RacyStacks hash = {};
305  if (flags()->suppress_equal_stacks) {
306    hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
307    hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
308    for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
309      if (hash == ctx->racy_stacks[i]) {
310        DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
311        equal_stack = true;
312        break;
313      }
314    }
315  }
316  bool equal_address = false;
317  RacyAddress ra0 = {addr_min, addr_max};
318  if (flags()->suppress_equal_addresses) {
319    for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
320      RacyAddress ra2 = ctx->racy_addresses[i];
321      uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
322      uptr minend = min(ra0.addr_max, ra2.addr_max);
323      if (maxbeg < minend) {
324        DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
325        equal_address = true;
326        break;
327      }
328    }
329  }
330  if (equal_stack || equal_address) {
331    if (!equal_stack)
332      ctx->racy_stacks.PushBack(hash);
333    if (!equal_address)
334      ctx->racy_addresses.PushBack(ra0);
335    return true;
336  }
337  return false;
338}
339
340static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
341    uptr addr_min, uptr addr_max) {
342  Context *ctx = CTX();
343  if (flags()->suppress_equal_stacks) {
344    RacyStacks hash;
345    hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
346    hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
347    ctx->racy_stacks.PushBack(hash);
348  }
349  if (flags()->suppress_equal_addresses) {
350    RacyAddress ra0 = {addr_min, addr_max};
351    ctx->racy_addresses.PushBack(ra0);
352  }
353}
354
355bool OutputReport(const ScopedReport &srep, const ReportStack *suppress_stack) {
356  const ReportDesc *rep = srep.GetReport();
357  bool suppressed = IsSuppressed(rep->typ, suppress_stack);
358  suppressed = OnReport(rep, suppressed);
359  if (suppressed)
360    return false;
361  PrintReport(rep);
362  CTX()->nreported++;
363  return true;
364}
365
366void ReportRace(ThreadState *thr) {
367  ScopedInRtl in_rtl;
368
369  bool freed = false;
370  {
371    Shadow s(thr->racy_state[1]);
372    freed = s.GetFreedAndReset();
373    thr->racy_state[1] = s.raw();
374  }
375
376  uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
377  uptr addr_min = 0;
378  uptr addr_max = 0;
379  {
380    uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
381    uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
382    uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
383    uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
384    addr_min = min(a0, a1);
385    addr_max = max(e0, e1);
386    if (IsExpectedReport(addr_min, addr_max - addr_min))
387      return;
388  }
389
390  Context *ctx = CTX();
391  Lock l0(&ctx->thread_mtx);
392
393  ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
394  const uptr kMop = 2;
395  StackTrace traces[kMop];
396  for (uptr i = 0; i < kMop; i++) {
397    Shadow s(thr->racy_state[i]);
398    RestoreStack(s.tid(), s.epoch(), &traces[i]);
399  }
400  // Failure to restore stack of the current thread
401  // was observed on free() interceptor called from pthread.
402  // Just get the current shadow stack instead.
403  if (traces[0].IsEmpty())
404    traces[0].ObtainCurrent(thr, 0);
405
406  if (HandleRacyStacks(thr, traces, addr_min, addr_max))
407    return;
408
409  for (uptr i = 0; i < kMop; i++) {
410    Shadow s(thr->racy_state[i]);
411    rep.AddMemoryAccess(addr, s, &traces[i]);
412  }
413
414  for (uptr i = 0; i < kMop; i++) {
415    FastState s(thr->racy_state[i]);
416    ThreadContext *tctx = ctx->threads[s.tid()];
417    if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
418      continue;
419    rep.AddThread(tctx);
420  }
421
422  rep.AddLocation(addr_min, addr_max - addr_min);
423
424#ifndef TSAN_GO
425  {  // NOLINT
426    Shadow s(thr->racy_state[1]);
427    if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
428      rep.AddSleep(thr->last_sleep_stack_id);
429  }
430#endif
431
432  if (!OutputReport(rep, rep.GetReport()->mops[0]->stack))
433    return;
434
435  AddRacyStacks(thr, traces, addr_min, addr_max);
436}
437
438void PrintCurrentStack(ThreadState *thr, uptr pc) {
439  StackTrace trace;
440  trace.ObtainCurrent(thr, pc);
441  PrintStack(SymbolizeStack(trace));
442}
443
444}  // namespace __tsan
445