tsan_rtl_report.cc revision 2f588f9d3417aa107ebbbd8830f97501023d3f40
1//===-- tsan_rtl_report.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_libc.h" 15#include "sanitizer_common/sanitizer_placement_new.h" 16#include "sanitizer_common/sanitizer_stackdepot.h" 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_stacktrace.h" 19#include "tsan_platform.h" 20#include "tsan_rtl.h" 21#include "tsan_suppressions.h" 22#include "tsan_symbolize.h" 23#include "tsan_report.h" 24#include "tsan_sync.h" 25#include "tsan_mman.h" 26#include "tsan_flags.h" 27#include "tsan_fd.h" 28 29namespace __tsan { 30 31using namespace __sanitizer; // NOLINT 32 33static ReportStack *SymbolizeStack(const StackTrace& trace); 34 35void TsanCheckFailed(const char *file, int line, const char *cond, 36 u64 v1, u64 v2) { 37 ScopedInRtl in_rtl; 38 Printf("FATAL: ThreadSanitizer CHECK failed: " 39 "%s:%d \"%s\" (0x%zx, 0x%zx)\n", 40 file, line, cond, (uptr)v1, (uptr)v2); 41 PrintCurrentStackSlow(); 42 Die(); 43} 44 45// Can be overriden by an application/test to intercept reports. 46#ifdef TSAN_EXTERNAL_HOOKS 47bool OnReport(const ReportDesc *rep, bool suppressed); 48#else 49SANITIZER_INTERFACE_ATTRIBUTE 50bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { 51 (void)rep; 52 return suppressed; 53} 54#endif 55 56static void StackStripMain(ReportStack *stack) { 57 ReportStack *last_frame = 0; 58 ReportStack *last_frame2 = 0; 59 const char *prefix = "__interceptor_"; 60 uptr prefix_len = internal_strlen(prefix); 61 const char *path_prefix = flags()->strip_path_prefix; 62 uptr path_prefix_len = internal_strlen(path_prefix); 63 char *pos; 64 for (ReportStack *ent = stack; ent; ent = ent->next) { 65 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len)) 66 ent->func += prefix_len; 67 if (ent->file && (pos = internal_strstr(ent->file, path_prefix))) 68 ent->file = pos + path_prefix_len; 69 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/') 70 ent->file += 2; 71 last_frame2 = last_frame; 72 last_frame = ent; 73 } 74 75 if (last_frame2 == 0) 76 return; 77 const char *last = last_frame->func; 78#ifndef TSAN_GO 79 const char *last2 = last_frame2->func; 80 // Strip frame above 'main' 81 if (last2 && 0 == internal_strcmp(last2, "main")) { 82 last_frame2->next = 0; 83 // Strip our internal thread start routine. 84 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { 85 last_frame2->next = 0; 86 // Strip global ctors init. 87 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { 88 last_frame2->next = 0; 89 // If both are 0, then we probably just failed to symbolize. 90 } else if (last || last2) { 91 // Ensure that we recovered stack completely. Trimmed stack 92 // can actually happen if we do not instrument some code, 93 // so it's only a debug print. However we must try hard to not miss it 94 // due to our fault. 95 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); 96 } 97#else 98 if (last && 0 == internal_strcmp(last, "schedunlock")) 99 last_frame2->next = 0; 100#endif 101} 102 103static ReportStack *SymbolizeStack(const StackTrace& trace) { 104 if (trace.IsEmpty()) 105 return 0; 106 ReportStack *stack = 0; 107 for (uptr si = 0; si < trace.Size(); si++) { 108 // We obtain the return address, that is, address of the next instruction, 109 // so offset it by 1 byte. 110 bool is_last = (si == trace.Size() - 1); 111 ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last); 112 CHECK_NE(ent, 0); 113 ReportStack *last = ent; 114 while (last->next) { 115 last->pc += !is_last; 116 last = last->next; 117 } 118 last->pc += !is_last; 119 last->next = stack; 120 stack = ent; 121 } 122 StackStripMain(stack); 123 return stack; 124} 125 126ScopedReport::ScopedReport(ReportType typ) { 127 ctx_ = CTX(); 128 ctx_->thread_mtx.CheckLocked(); 129 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); 130 rep_ = new(mem) ReportDesc; 131 rep_->typ = typ; 132 ctx_->report_mtx.Lock(); 133} 134 135ScopedReport::~ScopedReport() { 136 ctx_->report_mtx.Unlock(); 137 DestroyAndFree(rep_); 138} 139 140void ScopedReport::AddStack(const StackTrace *stack) { 141 ReportStack **rs = rep_->stacks.PushBack(); 142 *rs = SymbolizeStack(*stack); 143} 144 145void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, 146 const StackTrace *stack, const MutexSet *mset) { 147 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); 148 ReportMop *mop = new(mem) ReportMop; 149 rep_->mops.PushBack(mop); 150 mop->tid = s.tid(); 151 mop->addr = addr + s.addr0(); 152 mop->size = s.size(); 153 mop->write = s.IsWrite(); 154 mop->atomic = s.IsAtomic(); 155 mop->stack = SymbolizeStack(*stack); 156 for (uptr i = 0; i < mset->Size(); i++) { 157 MutexSet::Desc d = mset->Get(i); 158 u64 uid = 0; 159 uptr addr = SyncVar::SplitId(d.id, &uid); 160 SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false); 161 // Check that the mutex is still alive. 162 // Another mutex can be created at the same address, 163 // so check uid as well. 164 if (s && s->CheckId(uid)) { 165 ReportMopMutex mtx = {s->uid, d.write}; 166 mop->mset.PushBack(mtx); 167 AddMutex(s); 168 } else { 169 ReportMopMutex mtx = {d.id, d.write}; 170 mop->mset.PushBack(mtx); 171 AddMutex(d.id); 172 } 173 if (s) 174 s->mtx.ReadUnlock(); 175 } 176} 177 178void ScopedReport::AddThread(const ThreadContext *tctx) { 179 for (uptr i = 0; i < rep_->threads.Size(); i++) { 180 if (rep_->threads[i]->id == tctx->tid) 181 return; 182 } 183 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); 184 ReportThread *rt = new(mem) ReportThread(); 185 rep_->threads.PushBack(rt); 186 rt->id = tctx->tid; 187 rt->pid = tctx->os_id; 188 rt->running = (tctx->status == ThreadStatusRunning); 189 rt->name = tctx->name ? internal_strdup(tctx->name) : 0; 190 rt->parent_tid = tctx->creation_tid; 191 rt->stack = SymbolizeStack(tctx->creation_stack); 192} 193 194#ifndef TSAN_GO 195static ThreadContext *FindThread(int unique_id) { 196 Context *ctx = CTX(); 197 ctx->thread_mtx.CheckLocked(); 198 for (unsigned i = 0; i < kMaxTid; i++) { 199 ThreadContext *tctx = ctx->threads[i]; 200 if (tctx && tctx->unique_id == unique_id) { 201 return tctx; 202 } 203 } 204 return 0; 205} 206 207ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { 208 Context *ctx = CTX(); 209 ctx->thread_mtx.CheckLocked(); 210 for (unsigned i = 0; i < kMaxTid; i++) { 211 ThreadContext *tctx = ctx->threads[i]; 212 if (tctx == 0 || tctx->status != ThreadStatusRunning) 213 continue; 214 ThreadState *thr = tctx->thr; 215 CHECK(thr); 216 if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) { 217 *is_stack = true; 218 return tctx; 219 } 220 if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) { 221 *is_stack = false; 222 return tctx; 223 } 224 } 225 return 0; 226} 227#endif 228 229void ScopedReport::AddMutex(const SyncVar *s) { 230 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 231 if (rep_->mutexes[i]->id == s->uid) 232 return; 233 } 234 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 235 ReportMutex *rm = new(mem) ReportMutex(); 236 rep_->mutexes.PushBack(rm); 237 rm->id = s->uid; 238 rm->destroyed = false; 239 rm->stack = SymbolizeStack(s->creation_stack); 240} 241 242void ScopedReport::AddMutex(u64 id) { 243 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 244 if (rep_->mutexes[i]->id == id) 245 return; 246 } 247 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 248 ReportMutex *rm = new(mem) ReportMutex(); 249 rep_->mutexes.PushBack(rm); 250 rm->id = id; 251 rm->destroyed = true; 252 rm->stack = 0; 253} 254 255void ScopedReport::AddLocation(uptr addr, uptr size) { 256 if (addr == 0) 257 return; 258#ifndef TSAN_GO 259 int fd = -1; 260 int creat_tid = -1; 261 u32 creat_stack = 0; 262 if (FdLocation(addr, &fd, &creat_tid, &creat_stack) 263 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) { 264 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 265 ReportLocation *loc = new(mem) ReportLocation(); 266 rep_->locs.PushBack(loc); 267 loc->type = ReportLocationFD; 268 loc->fd = fd; 269 loc->tid = creat_tid; 270 uptr ssz = 0; 271 const uptr *stack = StackDepotGet(creat_stack, &ssz); 272 if (stack) { 273 StackTrace trace; 274 trace.Init(stack, ssz); 275 loc->stack = SymbolizeStack(trace); 276 } 277 ThreadContext *tctx = FindThread(creat_tid); 278 if (tctx) 279 AddThread(tctx); 280 return; 281 } 282 if (allocator()->PointerIsMine((void*)addr)) { 283 MBlock *b = user_mblock(0, (void*)addr); 284 ThreadContext *tctx = FindThread(b->alloc_tid); 285 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 286 ReportLocation *loc = new(mem) ReportLocation(); 287 rep_->locs.PushBack(loc); 288 loc->type = ReportLocationHeap; 289 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); 290 loc->size = b->size; 291 loc->tid = tctx ? tctx->tid : b->alloc_tid; 292 loc->name = 0; 293 loc->file = 0; 294 loc->line = 0; 295 loc->stack = 0; 296 uptr ssz = 0; 297 const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz); 298 if (stack) { 299 StackTrace trace; 300 trace.Init(stack, ssz); 301 loc->stack = SymbolizeStack(trace); 302 } 303 if (tctx) 304 AddThread(tctx); 305 return; 306 } 307 bool is_stack = false; 308 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { 309 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 310 ReportLocation *loc = new(mem) ReportLocation(); 311 rep_->locs.PushBack(loc); 312 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; 313 loc->tid = tctx->tid; 314 AddThread(tctx); 315 } 316 ReportLocation *loc = SymbolizeData(addr); 317 if (loc) { 318 rep_->locs.PushBack(loc); 319 return; 320 } 321#endif 322} 323 324#ifndef TSAN_GO 325void ScopedReport::AddSleep(u32 stack_id) { 326 uptr ssz = 0; 327 const uptr *stack = StackDepotGet(stack_id, &ssz); 328 if (stack) { 329 StackTrace trace; 330 trace.Init(stack, ssz); 331 rep_->sleep = SymbolizeStack(trace); 332 } 333} 334#endif 335 336const ReportDesc *ScopedReport::GetReport() const { 337 return rep_; 338} 339 340void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { 341 // This function restores stack trace and mutex set for the thread/epoch. 342 // It does so by getting stack trace and mutex set at the beginning of 343 // trace part, and then replaying the trace till the given epoch. 344 ThreadContext *tctx = CTX()->threads[tid]; 345 if (tctx == 0) 346 return; 347 Trace* trace = 0; 348 if (tctx->status == ThreadStatusRunning) { 349 CHECK(tctx->thr); 350 trace = &tctx->thr->trace; 351 } else if (tctx->status == ThreadStatusFinished 352 || tctx->status == ThreadStatusDead) { 353 if (tctx->dead_info == 0) 354 return; 355 trace = &tctx->dead_info->trace; 356 } else { 357 return; 358 } 359 Lock l(&trace->mtx); 360 const int partidx = (epoch / kTracePartSize) % TraceParts(); 361 TraceHeader* hdr = &trace->headers[partidx]; 362 if (epoch < hdr->epoch0) 363 return; 364 const u64 epoch0 = RoundDown(epoch, TraceSize()); 365 const u64 eend = epoch % TraceSize(); 366 const u64 ebegin = RoundDown(eend, kTracePartSize); 367 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", 368 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); 369 InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024 370 for (uptr i = 0; i < hdr->stack0.Size(); i++) { 371 stack[i] = hdr->stack0.Get(i); 372 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); 373 } 374 if (mset) 375 *mset = hdr->mset0; 376 uptr pos = hdr->stack0.Size(); 377 Event *events = (Event*)GetThreadTrace(tid); 378 for (uptr i = ebegin; i <= eend; i++) { 379 Event ev = events[i]; 380 EventType typ = (EventType)(ev >> 61); 381 uptr pc = (uptr)(ev & ((1ull << 61) - 1)); 382 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); 383 if (typ == EventTypeMop) { 384 stack[pos] = pc; 385 } else if (typ == EventTypeFuncEnter) { 386 stack[pos++] = pc; 387 } else if (typ == EventTypeFuncExit) { 388 if (pos > 0) 389 pos--; 390 } 391 if (mset) { 392 if (typ == EventTypeLock) { 393 mset->Add(pc, true, epoch0 + i); 394 } else if (typ == EventTypeUnlock) { 395 mset->Del(pc, true); 396 } else if (typ == EventTypeRLock) { 397 mset->Add(pc, false, epoch0 + i); 398 } else if (typ == EventTypeRUnlock) { 399 mset->Del(pc, false); 400 } 401 } 402 for (uptr j = 0; j <= pos; j++) 403 DPrintf2(" #%zu: %zx\n", j, stack[j]); 404 } 405 if (pos == 0 && stack[0] == 0) 406 return; 407 pos++; 408 stk->Init(stack.data(), pos); 409} 410 411static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 412 uptr addr_min, uptr addr_max) { 413 Context *ctx = CTX(); 414 bool equal_stack = false; 415 RacyStacks hash; 416 if (flags()->suppress_equal_stacks) { 417 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 418 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 419 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { 420 if (hash == ctx->racy_stacks[i]) { 421 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); 422 equal_stack = true; 423 break; 424 } 425 } 426 } 427 bool equal_address = false; 428 RacyAddress ra0 = {addr_min, addr_max}; 429 if (flags()->suppress_equal_addresses) { 430 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { 431 RacyAddress ra2 = ctx->racy_addresses[i]; 432 uptr maxbeg = max(ra0.addr_min, ra2.addr_min); 433 uptr minend = min(ra0.addr_max, ra2.addr_max); 434 if (maxbeg < minend) { 435 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); 436 equal_address = true; 437 break; 438 } 439 } 440 } 441 if (equal_stack || equal_address) { 442 if (!equal_stack) 443 ctx->racy_stacks.PushBack(hash); 444 if (!equal_address) 445 ctx->racy_addresses.PushBack(ra0); 446 return true; 447 } 448 return false; 449} 450 451static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 452 uptr addr_min, uptr addr_max) { 453 Context *ctx = CTX(); 454 if (flags()->suppress_equal_stacks) { 455 RacyStacks hash; 456 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 457 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 458 ctx->racy_stacks.PushBack(hash); 459 } 460 if (flags()->suppress_equal_addresses) { 461 RacyAddress ra0 = {addr_min, addr_max}; 462 ctx->racy_addresses.PushBack(ra0); 463 } 464} 465 466bool OutputReport(Context *ctx, 467 const ScopedReport &srep, 468 const ReportStack *suppress_stack1, 469 const ReportStack *suppress_stack2) { 470 const ReportDesc *rep = srep.GetReport(); 471 uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1); 472 if (suppress_pc == 0) 473 suppress_pc = IsSuppressed(rep->typ, suppress_stack2); 474 if (suppress_pc != 0) { 475 FiredSuppression supp = {srep.GetReport()->typ, suppress_pc}; 476 ctx->fired_suppressions.PushBack(supp); 477 } 478 if (OnReport(rep, suppress_pc != 0)) 479 return false; 480 PrintReport(rep); 481 CTX()->nreported++; 482 return true; 483} 484 485bool IsFiredSuppression(Context *ctx, 486 const ScopedReport &srep, 487 const StackTrace &trace) { 488 for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) { 489 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) 490 continue; 491 for (uptr j = 0; j < trace.Size(); j++) { 492 if (trace.Get(j) == ctx->fired_suppressions[k].pc) 493 return true; 494 } 495 } 496 return false; 497} 498 499bool FrameIsInternal(const ReportStack *frame) { 500 return frame != 0 && frame->file != 0 501 && (internal_strstr(frame->file, "tsan_interceptors.cc") || 502 internal_strstr(frame->file, "sanitizer_common_interceptors.inc")); 503} 504 505// On programs that use Java we see weird reports like: 506// WARNING: ThreadSanitizer: data race (pid=22512) 507// Read of size 8 at 0x7d2b00084318 by thread 100: 508// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3) 509// #1 <null> <null>:0 (0x7f7ad9b40193) 510// Previous write of size 8 at 0x7d2b00084318 by thread 105: 511// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919) 512// #1 <null> <null>:0 (0x7f7ad9b42707) 513static bool IsJavaNonsense(const ReportDesc *rep) { 514 for (uptr i = 0; i < rep->mops.Size(); i++) { 515 ReportMop *mop = rep->mops[i]; 516 ReportStack *frame = mop->stack; 517 if (frame == 0 518 || (frame->func == 0 && frame->file == 0 && frame->line == 0 519 && frame->module == 0)) { 520 return true; 521 } 522 if (FrameIsInternal(frame)) { 523 frame = frame->next; 524 if (frame == 0 525 || (frame->func == 0 && frame->file == 0 && frame->line == 0 526 && frame->module == 0)) { 527 if (frame) { 528 FiredSuppression supp = {rep->typ, frame->pc}; 529 CTX()->fired_suppressions.PushBack(supp); 530 } 531 return true; 532 } 533 } 534 } 535 return false; 536} 537 538static bool RaceBetweenAtomicAndFree(ThreadState *thr) { 539 Shadow s0(thr->racy_state[0]); 540 Shadow s1(thr->racy_state[1]); 541 CHECK(!(s0.IsAtomic() && s1.IsAtomic())); 542 if (!s0.IsAtomic() && !s1.IsAtomic()) 543 return true; 544 if (s0.IsAtomic() && s1.IsFreed()) 545 return true; 546 if (s1.IsAtomic() && thr->is_freeing) 547 return true; 548 return false; 549} 550 551void ReportRace(ThreadState *thr) { 552 if (!flags()->report_bugs) 553 return; 554 ScopedInRtl in_rtl; 555 556 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) 557 return; 558 559 if (thr->in_signal_handler) 560 Printf("ThreadSanitizer: printing report from signal handler." 561 " Can crash or hang.\n"); 562 563 bool freed = false; 564 { 565 Shadow s(thr->racy_state[1]); 566 freed = s.GetFreedAndReset(); 567 thr->racy_state[1] = s.raw(); 568 } 569 570 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); 571 uptr addr_min = 0; 572 uptr addr_max = 0; 573 { 574 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); 575 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); 576 uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); 577 uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); 578 addr_min = min(a0, a1); 579 addr_max = max(e0, e1); 580 if (IsExpectedReport(addr_min, addr_max - addr_min)) 581 return; 582 } 583 584 Context *ctx = CTX(); 585 Lock l0(&ctx->thread_mtx); 586 587 ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace); 588 const uptr kMop = 2; 589 StackTrace traces[kMop]; 590 const uptr toppc = TraceTopPC(thr); 591 traces[0].ObtainCurrent(thr, toppc); 592 if (IsFiredSuppression(ctx, rep, traces[0])) 593 return; 594 InternalScopedBuffer<MutexSet> mset2(1); 595 new(mset2.data()) MutexSet(); 596 Shadow s2(thr->racy_state[1]); 597 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); 598 599 if (HandleRacyStacks(thr, traces, addr_min, addr_max)) 600 return; 601 602 for (uptr i = 0; i < kMop; i++) { 603 Shadow s(thr->racy_state[i]); 604 rep.AddMemoryAccess(addr, s, &traces[i], 605 i == 0 ? &thr->mset : mset2.data()); 606 } 607 608 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport())) 609 return; 610 611 for (uptr i = 0; i < kMop; i++) { 612 FastState s(thr->racy_state[i]); 613 ThreadContext *tctx = ctx->threads[s.tid()]; 614 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) 615 continue; 616 rep.AddThread(tctx); 617 } 618 619 rep.AddLocation(addr_min, addr_max - addr_min); 620 621#ifndef TSAN_GO 622 { // NOLINT 623 Shadow s(thr->racy_state[1]); 624 if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) 625 rep.AddSleep(thr->last_sleep_stack_id); 626 } 627#endif 628 629 if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack, 630 rep.GetReport()->mops[1]->stack)) 631 return; 632 633 AddRacyStacks(thr, traces, addr_min, addr_max); 634} 635 636void PrintCurrentStack(ThreadState *thr, uptr pc) { 637 StackTrace trace; 638 trace.ObtainCurrent(thr, pc); 639 PrintStack(SymbolizeStack(trace)); 640} 641 642void PrintCurrentStackSlow() { 643#ifndef TSAN_GO 644 __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace, 645 sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace; 646 ptrace->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(), 647 kStackTraceMax); 648 StackTrace trace; 649 trace.Init(ptrace->trace, ptrace->size); 650 PrintStack(SymbolizeStack(trace)); 651#endif 652} 653 654} // namespace __tsan 655