tsan_rtl_report.cc revision 2c5284e0f87e101e177a151fae5f557bcf6f664c
1//===-- tsan_rtl_report.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_libc.h" 15#include "sanitizer_common/sanitizer_placement_new.h" 16#include "sanitizer_common/sanitizer_stackdepot.h" 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_stacktrace.h" 19#include "tsan_platform.h" 20#include "tsan_rtl.h" 21#include "tsan_suppressions.h" 22#include "tsan_symbolize.h" 23#include "tsan_report.h" 24#include "tsan_sync.h" 25#include "tsan_mman.h" 26#include "tsan_flags.h" 27#include "tsan_fd.h" 28 29namespace __tsan { 30 31using namespace __sanitizer; // NOLINT 32 33static ReportStack *SymbolizeStack(const StackTrace& trace); 34 35void TsanCheckFailed(const char *file, int line, const char *cond, 36 u64 v1, u64 v2) { 37 ScopedInRtl in_rtl; 38 Printf("FATAL: ThreadSanitizer CHECK failed: " 39 "%s:%d \"%s\" (0x%zx, 0x%zx)\n", 40 file, line, cond, (uptr)v1, (uptr)v2); 41 PrintCurrentStackSlow(); 42 Die(); 43} 44 45// Can be overriden by an application/test to intercept reports. 46#ifdef TSAN_EXTERNAL_HOOKS 47bool OnReport(const ReportDesc *rep, bool suppressed); 48#else 49SANITIZER_INTERFACE_ATTRIBUTE 50bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { 51 (void)rep; 52 return suppressed; 53} 54#endif 55 56static void StackStripMain(ReportStack *stack) { 57 ReportStack *last_frame = 0; 58 ReportStack *last_frame2 = 0; 59 const char *prefix = "__interceptor_"; 60 uptr prefix_len = internal_strlen(prefix); 61 const char *path_prefix = flags()->strip_path_prefix; 62 uptr path_prefix_len = internal_strlen(path_prefix); 63 char *pos; 64 for (ReportStack *ent = stack; ent; ent = ent->next) { 65 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len)) 66 ent->func += prefix_len; 67 if (ent->file && (pos = internal_strstr(ent->file, path_prefix))) 68 ent->file = pos + path_prefix_len; 69 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/') 70 ent->file += 2; 71 last_frame2 = last_frame; 72 last_frame = ent; 73 } 74 75 if (last_frame2 == 0) 76 return; 77 const char *last = last_frame->func; 78#ifndef TSAN_GO 79 const char *last2 = last_frame2->func; 80 // Strip frame above 'main' 81 if (last2 && 0 == internal_strcmp(last2, "main")) { 82 last_frame2->next = 0; 83 // Strip our internal thread start routine. 84 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { 85 last_frame2->next = 0; 86 // Strip global ctors init. 87 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { 88 last_frame2->next = 0; 89 // If both are 0, then we probably just failed to symbolize. 90 } else if (last || last2) { 91 // Ensure that we recovered stack completely. Trimmed stack 92 // can actually happen if we do not instrument some code, 93 // so it's only a debug print. However we must try hard to not miss it 94 // due to our fault. 95 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); 96 } 97#else 98 if (last && 0 == internal_strcmp(last, "schedunlock")) 99 last_frame2->next = 0; 100#endif 101} 102 103static ReportStack *SymbolizeStack(const StackTrace& trace) { 104 if (trace.IsEmpty()) 105 return 0; 106 ReportStack *stack = 0; 107 for (uptr si = 0; si < trace.Size(); si++) { 108 // We obtain the return address, that is, address of the next instruction, 109 // so offset it by 1 byte. 110 bool is_last = (si == trace.Size() - 1); 111 ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last); 112 CHECK_NE(ent, 0); 113 ReportStack *last = ent; 114 while (last->next) { 115 last->pc += !is_last; 116 last = last->next; 117 } 118 last->pc += !is_last; 119 last->next = stack; 120 stack = ent; 121 } 122 StackStripMain(stack); 123 return stack; 124} 125 126ScopedReport::ScopedReport(ReportType typ) { 127 ctx_ = CTX(); 128 ctx_->thread_registry->CheckLocked(); 129 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); 130 rep_ = new(mem) ReportDesc; 131 rep_->typ = typ; 132 ctx_->report_mtx.Lock(); 133} 134 135ScopedReport::~ScopedReport() { 136 ctx_->report_mtx.Unlock(); 137 DestroyAndFree(rep_); 138} 139 140void ScopedReport::AddStack(const StackTrace *stack) { 141 ReportStack **rs = rep_->stacks.PushBack(); 142 *rs = SymbolizeStack(*stack); 143} 144 145void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, 146 const StackTrace *stack, const MutexSet *mset) { 147 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); 148 ReportMop *mop = new(mem) ReportMop; 149 rep_->mops.PushBack(mop); 150 mop->tid = s.tid(); 151 mop->addr = addr + s.addr0(); 152 mop->size = s.size(); 153 mop->write = s.IsWrite(); 154 mop->atomic = s.IsAtomic(); 155 mop->stack = SymbolizeStack(*stack); 156 for (uptr i = 0; i < mset->Size(); i++) { 157 MutexSet::Desc d = mset->Get(i); 158 u64 uid = 0; 159 uptr addr = SyncVar::SplitId(d.id, &uid); 160 SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false); 161 // Check that the mutex is still alive. 162 // Another mutex can be created at the same address, 163 // so check uid as well. 164 if (s && s->CheckId(uid)) { 165 ReportMopMutex mtx = {s->uid, d.write}; 166 mop->mset.PushBack(mtx); 167 AddMutex(s); 168 } else { 169 ReportMopMutex mtx = {d.id, d.write}; 170 mop->mset.PushBack(mtx); 171 AddMutex(d.id); 172 } 173 if (s) 174 s->mtx.ReadUnlock(); 175 } 176} 177 178void ScopedReport::AddThread(const ThreadContext *tctx) { 179 for (uptr i = 0; i < rep_->threads.Size(); i++) { 180 if ((u32)rep_->threads[i]->id == tctx->tid) 181 return; 182 } 183 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); 184 ReportThread *rt = new(mem) ReportThread(); 185 rep_->threads.PushBack(rt); 186 rt->id = tctx->tid; 187 rt->pid = tctx->os_id; 188 rt->running = (tctx->status == ThreadStatusRunning); 189 rt->name = tctx->name ? internal_strdup(tctx->name) : 0; 190 rt->parent_tid = tctx->parent_tid; 191 rt->stack = 0; 192#ifdef TSAN_GO 193 rt->stack = SymbolizeStack(tctx->creation_stack); 194#else 195 uptr ssz = 0; 196 const uptr *stack = StackDepotGet(tctx->creation_stack_id, &ssz); 197 if (stack) { 198 StackTrace trace; 199 trace.Init(stack, ssz); 200 rt->stack = SymbolizeStack(trace); 201 } 202#endif 203} 204 205#ifndef TSAN_GO 206static ThreadContext *FindThreadByUidLocked(int unique_id) { 207 Context *ctx = CTX(); 208 ctx->thread_registry->CheckLocked(); 209 for (unsigned i = 0; i < kMaxTid; i++) { 210 ThreadContext *tctx = static_cast<ThreadContext*>( 211 ctx->thread_registry->GetThreadLocked(i)); 212 if (tctx && tctx->unique_id == (u32)unique_id) { 213 return tctx; 214 } 215 } 216 return 0; 217} 218 219static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { 220 uptr addr = (uptr)arg; 221 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 222 if (tctx->status != ThreadStatusRunning) 223 return false; 224 ThreadState *thr = tctx->thr; 225 CHECK(thr); 226 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || 227 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); 228} 229 230ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { 231 Context *ctx = CTX(); 232 ctx->thread_registry->CheckLocked(); 233 ThreadContext *tctx = static_cast<ThreadContext*>( 234 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, 235 (void*)addr)); 236 if (!tctx) 237 return 0; 238 ThreadState *thr = tctx->thr; 239 CHECK(thr); 240 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); 241 return tctx; 242} 243#endif 244 245void ScopedReport::AddMutex(const SyncVar *s) { 246 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 247 if (rep_->mutexes[i]->id == s->uid) 248 return; 249 } 250 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 251 ReportMutex *rm = new(mem) ReportMutex(); 252 rep_->mutexes.PushBack(rm); 253 rm->id = s->uid; 254 rm->destroyed = false; 255 rm->stack = 0; 256#ifndef TSAN_GO 257 uptr ssz = 0; 258 const uptr *stack = StackDepotGet(s->creation_stack_id, &ssz); 259 if (stack) { 260 StackTrace trace; 261 trace.Init(stack, ssz); 262 rm->stack = SymbolizeStack(trace); 263 } 264#endif 265} 266 267void ScopedReport::AddMutex(u64 id) { 268 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 269 if (rep_->mutexes[i]->id == id) 270 return; 271 } 272 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 273 ReportMutex *rm = new(mem) ReportMutex(); 274 rep_->mutexes.PushBack(rm); 275 rm->id = id; 276 rm->destroyed = true; 277 rm->stack = 0; 278} 279 280void ScopedReport::AddLocation(uptr addr, uptr size) { 281 if (addr == 0) 282 return; 283#ifndef TSAN_GO 284 int fd = -1; 285 int creat_tid = -1; 286 u32 creat_stack = 0; 287 if (FdLocation(addr, &fd, &creat_tid, &creat_stack) 288 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) { 289 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 290 ReportLocation *loc = new(mem) ReportLocation(); 291 rep_->locs.PushBack(loc); 292 loc->type = ReportLocationFD; 293 loc->fd = fd; 294 loc->tid = creat_tid; 295 uptr ssz = 0; 296 const uptr *stack = StackDepotGet(creat_stack, &ssz); 297 if (stack) { 298 StackTrace trace; 299 trace.Init(stack, ssz); 300 loc->stack = SymbolizeStack(trace); 301 } 302 ThreadContext *tctx = FindThreadByUidLocked(creat_tid); 303 if (tctx) 304 AddThread(tctx); 305 return; 306 } 307 if (allocator()->PointerIsMine((void*)addr)) { 308 MBlock *b = user_mblock(0, (void*)addr); 309 ThreadContext *tctx = FindThreadByUidLocked(b->alloc_tid); 310 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 311 ReportLocation *loc = new(mem) ReportLocation(); 312 rep_->locs.PushBack(loc); 313 loc->type = ReportLocationHeap; 314 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); 315 loc->size = b->size; 316 loc->tid = tctx ? tctx->tid : b->alloc_tid; 317 loc->name = 0; 318 loc->file = 0; 319 loc->line = 0; 320 loc->stack = 0; 321 uptr ssz = 0; 322 const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz); 323 if (stack) { 324 StackTrace trace; 325 trace.Init(stack, ssz); 326 loc->stack = SymbolizeStack(trace); 327 } 328 if (tctx) 329 AddThread(tctx); 330 return; 331 } 332 bool is_stack = false; 333 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { 334 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 335 ReportLocation *loc = new(mem) ReportLocation(); 336 rep_->locs.PushBack(loc); 337 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; 338 loc->tid = tctx->tid; 339 AddThread(tctx); 340 } 341 ReportLocation *loc = SymbolizeData(addr); 342 if (loc) { 343 rep_->locs.PushBack(loc); 344 return; 345 } 346#endif 347} 348 349#ifndef TSAN_GO 350void ScopedReport::AddSleep(u32 stack_id) { 351 uptr ssz = 0; 352 const uptr *stack = StackDepotGet(stack_id, &ssz); 353 if (stack) { 354 StackTrace trace; 355 trace.Init(stack, ssz); 356 rep_->sleep = SymbolizeStack(trace); 357 } 358} 359#endif 360 361const ReportDesc *ScopedReport::GetReport() const { 362 return rep_; 363} 364 365void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { 366 // This function restores stack trace and mutex set for the thread/epoch. 367 // It does so by getting stack trace and mutex set at the beginning of 368 // trace part, and then replaying the trace till the given epoch. 369 Context *ctx = CTX(); 370 ctx->thread_registry->CheckLocked(); 371 ThreadContext *tctx = static_cast<ThreadContext*>( 372 ctx->thread_registry->GetThreadLocked(tid)); 373 if (tctx == 0) 374 return; 375 Trace* trace = 0; 376 if (tctx->status == ThreadStatusRunning) { 377 CHECK(tctx->thr); 378 trace = &tctx->thr->trace; 379 } else if (tctx->status == ThreadStatusFinished 380 || tctx->status == ThreadStatusDead) { 381 if (tctx->dead_info == 0) 382 return; 383 trace = &tctx->dead_info->trace; 384 } else { 385 return; 386 } 387 Lock l(&trace->mtx); 388 const int partidx = (epoch / kTracePartSize) % TraceParts(); 389 TraceHeader* hdr = &trace->headers[partidx]; 390 if (epoch < hdr->epoch0) 391 return; 392 const u64 epoch0 = RoundDown(epoch, TraceSize()); 393 const u64 eend = epoch % TraceSize(); 394 const u64 ebegin = RoundDown(eend, kTracePartSize); 395 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", 396 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); 397 InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024 398 for (uptr i = 0; i < hdr->stack0.Size(); i++) { 399 stack[i] = hdr->stack0.Get(i); 400 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); 401 } 402 if (mset) 403 *mset = hdr->mset0; 404 uptr pos = hdr->stack0.Size(); 405 Event *events = (Event*)GetThreadTrace(tid); 406 for (uptr i = ebegin; i <= eend; i++) { 407 Event ev = events[i]; 408 EventType typ = (EventType)(ev >> 61); 409 uptr pc = (uptr)(ev & ((1ull << 61) - 1)); 410 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); 411 if (typ == EventTypeMop) { 412 stack[pos] = pc; 413 } else if (typ == EventTypeFuncEnter) { 414 stack[pos++] = pc; 415 } else if (typ == EventTypeFuncExit) { 416 if (pos > 0) 417 pos--; 418 } 419 if (mset) { 420 if (typ == EventTypeLock) { 421 mset->Add(pc, true, epoch0 + i); 422 } else if (typ == EventTypeUnlock) { 423 mset->Del(pc, true); 424 } else if (typ == EventTypeRLock) { 425 mset->Add(pc, false, epoch0 + i); 426 } else if (typ == EventTypeRUnlock) { 427 mset->Del(pc, false); 428 } 429 } 430 for (uptr j = 0; j <= pos; j++) 431 DPrintf2(" #%zu: %zx\n", j, stack[j]); 432 } 433 if (pos == 0 && stack[0] == 0) 434 return; 435 pos++; 436 stk->Init(stack.data(), pos); 437} 438 439static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 440 uptr addr_min, uptr addr_max) { 441 Context *ctx = CTX(); 442 bool equal_stack = false; 443 RacyStacks hash; 444 if (flags()->suppress_equal_stacks) { 445 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 446 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 447 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { 448 if (hash == ctx->racy_stacks[i]) { 449 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); 450 equal_stack = true; 451 break; 452 } 453 } 454 } 455 bool equal_address = false; 456 RacyAddress ra0 = {addr_min, addr_max}; 457 if (flags()->suppress_equal_addresses) { 458 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { 459 RacyAddress ra2 = ctx->racy_addresses[i]; 460 uptr maxbeg = max(ra0.addr_min, ra2.addr_min); 461 uptr minend = min(ra0.addr_max, ra2.addr_max); 462 if (maxbeg < minend) { 463 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); 464 equal_address = true; 465 break; 466 } 467 } 468 } 469 if (equal_stack || equal_address) { 470 if (!equal_stack) 471 ctx->racy_stacks.PushBack(hash); 472 if (!equal_address) 473 ctx->racy_addresses.PushBack(ra0); 474 return true; 475 } 476 return false; 477} 478 479static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 480 uptr addr_min, uptr addr_max) { 481 Context *ctx = CTX(); 482 if (flags()->suppress_equal_stacks) { 483 RacyStacks hash; 484 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 485 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 486 ctx->racy_stacks.PushBack(hash); 487 } 488 if (flags()->suppress_equal_addresses) { 489 RacyAddress ra0 = {addr_min, addr_max}; 490 ctx->racy_addresses.PushBack(ra0); 491 } 492} 493 494bool OutputReport(Context *ctx, 495 const ScopedReport &srep, 496 const ReportStack *suppress_stack1, 497 const ReportStack *suppress_stack2) { 498 const ReportDesc *rep = srep.GetReport(); 499 uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1); 500 if (suppress_pc == 0) 501 suppress_pc = IsSuppressed(rep->typ, suppress_stack2); 502 if (suppress_pc != 0) { 503 FiredSuppression supp = {srep.GetReport()->typ, suppress_pc}; 504 ctx->fired_suppressions.PushBack(supp); 505 } 506 if (OnReport(rep, suppress_pc != 0)) 507 return false; 508 PrintReport(rep); 509 CTX()->nreported++; 510 return true; 511} 512 513bool IsFiredSuppression(Context *ctx, 514 const ScopedReport &srep, 515 const StackTrace &trace) { 516 for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) { 517 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) 518 continue; 519 for (uptr j = 0; j < trace.Size(); j++) { 520 if (trace.Get(j) == ctx->fired_suppressions[k].pc) 521 return true; 522 } 523 } 524 return false; 525} 526 527bool FrameIsInternal(const ReportStack *frame) { 528 return frame != 0 && frame->file != 0 529 && (internal_strstr(frame->file, "tsan_interceptors.cc") || 530 internal_strstr(frame->file, "sanitizer_common_interceptors.inc") || 531 internal_strstr(frame->file, "tsan_interface_")); 532} 533 534// On programs that use Java we see weird reports like: 535// WARNING: ThreadSanitizer: data race (pid=22512) 536// Read of size 8 at 0x7d2b00084318 by thread 100: 537// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3) 538// #1 <null> <null>:0 (0x7f7ad9b40193) 539// Previous write of size 8 at 0x7d2b00084318 by thread 105: 540// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919) 541// #1 <null> <null>:0 (0x7f7ad9b42707) 542static bool IsJavaNonsense(const ReportDesc *rep) { 543#ifndef TSAN_GO 544 for (uptr i = 0; i < rep->mops.Size(); i++) { 545 ReportMop *mop = rep->mops[i]; 546 ReportStack *frame = mop->stack; 547 if (frame == 0 548 || (frame->func == 0 && frame->file == 0 && frame->line == 0 549 && frame->module == 0)) { 550 return true; 551 } 552 if (FrameIsInternal(frame)) { 553 frame = frame->next; 554 if (frame == 0 555 || (frame->func == 0 && frame->file == 0 && frame->line == 0 556 && frame->module == 0)) { 557 if (frame) { 558 FiredSuppression supp = {rep->typ, frame->pc}; 559 CTX()->fired_suppressions.PushBack(supp); 560 } 561 return true; 562 } 563 } 564 } 565#endif 566 return false; 567} 568 569static bool RaceBetweenAtomicAndFree(ThreadState *thr) { 570 Shadow s0(thr->racy_state[0]); 571 Shadow s1(thr->racy_state[1]); 572 CHECK(!(s0.IsAtomic() && s1.IsAtomic())); 573 if (!s0.IsAtomic() && !s1.IsAtomic()) 574 return true; 575 if (s0.IsAtomic() && s1.IsFreed()) 576 return true; 577 if (s1.IsAtomic() && thr->is_freeing) 578 return true; 579 return false; 580} 581 582void ReportRace(ThreadState *thr) { 583 if (!flags()->report_bugs) 584 return; 585 ScopedInRtl in_rtl; 586 587 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) 588 return; 589 590 if (thr->in_signal_handler) 591 Printf("ThreadSanitizer: printing report from signal handler." 592 " Can crash or hang.\n"); 593 594 bool freed = false; 595 { 596 Shadow s(thr->racy_state[1]); 597 freed = s.GetFreedAndReset(); 598 thr->racy_state[1] = s.raw(); 599 } 600 601 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); 602 uptr addr_min = 0; 603 uptr addr_max = 0; 604 { 605 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); 606 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); 607 uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); 608 uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); 609 addr_min = min(a0, a1); 610 addr_max = max(e0, e1); 611 if (IsExpectedReport(addr_min, addr_max - addr_min)) 612 return; 613 } 614 615 Context *ctx = CTX(); 616 ThreadRegistryLock l0(ctx->thread_registry); 617 618 ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace); 619 const uptr kMop = 2; 620 StackTrace traces[kMop]; 621 const uptr toppc = TraceTopPC(thr); 622 traces[0].ObtainCurrent(thr, toppc); 623 if (IsFiredSuppression(ctx, rep, traces[0])) 624 return; 625 InternalScopedBuffer<MutexSet> mset2(1); 626 new(mset2.data()) MutexSet(); 627 Shadow s2(thr->racy_state[1]); 628 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); 629 630 if (HandleRacyStacks(thr, traces, addr_min, addr_max)) 631 return; 632 633 for (uptr i = 0; i < kMop; i++) { 634 Shadow s(thr->racy_state[i]); 635 rep.AddMemoryAccess(addr, s, &traces[i], 636 i == 0 ? &thr->mset : mset2.data()); 637 } 638 639 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport())) 640 return; 641 642 for (uptr i = 0; i < kMop; i++) { 643 FastState s(thr->racy_state[i]); 644 ThreadContext *tctx = static_cast<ThreadContext*>( 645 ctx->thread_registry->GetThreadLocked(s.tid())); 646 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) 647 continue; 648 rep.AddThread(tctx); 649 } 650 651 rep.AddLocation(addr_min, addr_max - addr_min); 652 653#ifndef TSAN_GO 654 { // NOLINT 655 Shadow s(thr->racy_state[1]); 656 if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) 657 rep.AddSleep(thr->last_sleep_stack_id); 658 } 659#endif 660 661 if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack, 662 rep.GetReport()->mops[1]->stack)) 663 return; 664 665 AddRacyStacks(thr, traces, addr_min, addr_max); 666} 667 668void PrintCurrentStack(ThreadState *thr, uptr pc) { 669 StackTrace trace; 670 trace.ObtainCurrent(thr, pc); 671 PrintStack(SymbolizeStack(trace)); 672} 673 674void PrintCurrentStackSlow() { 675#ifndef TSAN_GO 676 __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace, 677 sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace; 678 ptrace->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(), 679 kStackTraceMax); 680 StackTrace trace; 681 trace.Init(ptrace->trace, ptrace->size); 682 PrintStack(SymbolizeStack(trace)); 683#endif 684} 685 686} // namespace __tsan 687