tsan_rtl_report.cc revision 1dc5f39fbcb09734809b73eb16dd64b50d647038
1//===-- tsan_rtl_report.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_libc.h" 15#include "sanitizer_common/sanitizer_placement_new.h" 16#include "sanitizer_common/sanitizer_stackdepot.h" 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_stacktrace.h" 19#include "tsan_platform.h" 20#include "tsan_rtl.h" 21#include "tsan_suppressions.h" 22#include "tsan_symbolize.h" 23#include "tsan_report.h" 24#include "tsan_sync.h" 25#include "tsan_mman.h" 26#include "tsan_flags.h" 27#include "tsan_fd.h" 28 29namespace __tsan { 30 31using namespace __sanitizer; // NOLINT 32 33static ReportStack *SymbolizeStack(const StackTrace& trace); 34 35void TsanCheckFailed(const char *file, int line, const char *cond, 36 u64 v1, u64 v2) { 37 ScopedInRtl in_rtl; 38 Printf("FATAL: ThreadSanitizer CHECK failed: " 39 "%s:%d \"%s\" (0x%zx, 0x%zx)\n", 40 file, line, cond, (uptr)v1, (uptr)v2); 41 PrintCurrentStackSlow(); 42 Die(); 43} 44 45// Can be overriden by an application/test to intercept reports. 46#ifdef TSAN_EXTERNAL_HOOKS 47bool OnReport(const ReportDesc *rep, bool suppressed); 48#else 49SANITIZER_INTERFACE_ATTRIBUTE 50bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { 51 (void)rep; 52 return suppressed; 53} 54#endif 55 56static void StackStripMain(ReportStack *stack) { 57 ReportStack *last_frame = 0; 58 ReportStack *last_frame2 = 0; 59 const char *prefix = "__interceptor_"; 60 uptr prefix_len = internal_strlen(prefix); 61 const char *path_prefix = flags()->strip_path_prefix; 62 uptr path_prefix_len = internal_strlen(path_prefix); 63 char *pos; 64 for (ReportStack *ent = stack; ent; ent = ent->next) { 65 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len)) 66 ent->func += prefix_len; 67 if (ent->file && (pos = internal_strstr(ent->file, path_prefix))) 68 ent->file = pos + path_prefix_len; 69 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/') 70 ent->file += 2; 71 last_frame2 = last_frame; 72 last_frame = ent; 73 } 74 75 if (last_frame2 == 0) 76 return; 77 const char *last = last_frame->func; 78#ifndef TSAN_GO 79 const char *last2 = last_frame2->func; 80 // Strip frame above 'main' 81 if (last2 && 0 == internal_strcmp(last2, "main")) { 82 last_frame2->next = 0; 83 // Strip our internal thread start routine. 84 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { 85 last_frame2->next = 0; 86 // Strip global ctors init. 87 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { 88 last_frame2->next = 0; 89 // If both are 0, then we probably just failed to symbolize. 90 } else if (last || last2) { 91 // Ensure that we recovered stack completely. Trimmed stack 92 // can actually happen if we do not instrument some code, 93 // so it's only a debug print. However we must try hard to not miss it 94 // due to our fault. 95 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); 96 } 97#else 98 // The last frame always point into runtime (gosched0, goexit0, runtime.main). 99 last_frame2->next = 0; 100 (void)last; 101#endif 102} 103 104static ReportStack *SymbolizeStack(const StackTrace& trace) { 105 if (trace.IsEmpty()) 106 return 0; 107 ReportStack *stack = 0; 108 for (uptr si = 0; si < trace.Size(); si++) { 109 // We obtain the return address, that is, address of the next instruction, 110 // so offset it by 1 byte. 111 bool is_last = (si == trace.Size() - 1); 112 ReportStack *ent = SymbolizeCode(trace.Get(si) - !is_last); 113 CHECK_NE(ent, 0); 114 ReportStack *last = ent; 115 while (last->next) { 116 last->pc += !is_last; 117 last = last->next; 118 } 119 last->pc += !is_last; 120 last->next = stack; 121 stack = ent; 122 } 123 StackStripMain(stack); 124 return stack; 125} 126 127ScopedReport::ScopedReport(ReportType typ) { 128 ctx_ = CTX(); 129 ctx_->thread_registry->CheckLocked(); 130 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); 131 rep_ = new(mem) ReportDesc; 132 rep_->typ = typ; 133 ctx_->report_mtx.Lock(); 134 CommonSanitizerReportMutex.Lock(); 135} 136 137ScopedReport::~ScopedReport() { 138 CommonSanitizerReportMutex.Unlock(); 139 ctx_->report_mtx.Unlock(); 140 DestroyAndFree(rep_); 141} 142 143void ScopedReport::AddStack(const StackTrace *stack) { 144 ReportStack **rs = rep_->stacks.PushBack(); 145 *rs = SymbolizeStack(*stack); 146} 147 148void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, 149 const StackTrace *stack, const MutexSet *mset) { 150 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); 151 ReportMop *mop = new(mem) ReportMop; 152 rep_->mops.PushBack(mop); 153 mop->tid = s.tid(); 154 mop->addr = addr + s.addr0(); 155 mop->size = s.size(); 156 mop->write = s.IsWrite(); 157 mop->atomic = s.IsAtomic(); 158 mop->stack = SymbolizeStack(*stack); 159 for (uptr i = 0; i < mset->Size(); i++) { 160 MutexSet::Desc d = mset->Get(i); 161 u64 uid = 0; 162 uptr addr = SyncVar::SplitId(d.id, &uid); 163 SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false); 164 // Check that the mutex is still alive. 165 // Another mutex can be created at the same address, 166 // so check uid as well. 167 if (s && s->CheckId(uid)) { 168 ReportMopMutex mtx = {s->uid, d.write}; 169 mop->mset.PushBack(mtx); 170 AddMutex(s); 171 } else { 172 ReportMopMutex mtx = {d.id, d.write}; 173 mop->mset.PushBack(mtx); 174 AddMutex(d.id); 175 } 176 if (s) 177 s->mtx.ReadUnlock(); 178 } 179} 180 181void ScopedReport::AddThread(const ThreadContext *tctx) { 182 for (uptr i = 0; i < rep_->threads.Size(); i++) { 183 if ((u32)rep_->threads[i]->id == tctx->tid) 184 return; 185 } 186 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); 187 ReportThread *rt = new(mem) ReportThread(); 188 rep_->threads.PushBack(rt); 189 rt->id = tctx->tid; 190 rt->pid = tctx->os_id; 191 rt->running = (tctx->status == ThreadStatusRunning); 192 rt->name = tctx->name ? internal_strdup(tctx->name) : 0; 193 rt->parent_tid = tctx->parent_tid; 194 rt->stack = 0; 195#ifdef TSAN_GO 196 rt->stack = SymbolizeStack(tctx->creation_stack); 197#else 198 uptr ssz = 0; 199 const uptr *stack = StackDepotGet(tctx->creation_stack_id, &ssz); 200 if (stack) { 201 StackTrace trace; 202 trace.Init(stack, ssz); 203 rt->stack = SymbolizeStack(trace); 204 } 205#endif 206} 207 208#ifndef TSAN_GO 209static ThreadContext *FindThreadByUidLocked(int unique_id) { 210 Context *ctx = CTX(); 211 ctx->thread_registry->CheckLocked(); 212 for (unsigned i = 0; i < kMaxTid; i++) { 213 ThreadContext *tctx = static_cast<ThreadContext*>( 214 ctx->thread_registry->GetThreadLocked(i)); 215 if (tctx && tctx->unique_id == (u32)unique_id) { 216 return tctx; 217 } 218 } 219 return 0; 220} 221 222static ThreadContext *FindThreadByTidLocked(int tid) { 223 Context *ctx = CTX(); 224 ctx->thread_registry->CheckLocked(); 225 return static_cast<ThreadContext*>( 226 ctx->thread_registry->GetThreadLocked(tid)); 227} 228 229static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { 230 uptr addr = (uptr)arg; 231 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 232 if (tctx->status != ThreadStatusRunning) 233 return false; 234 ThreadState *thr = tctx->thr; 235 CHECK(thr); 236 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || 237 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); 238} 239 240ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { 241 Context *ctx = CTX(); 242 ctx->thread_registry->CheckLocked(); 243 ThreadContext *tctx = static_cast<ThreadContext*>( 244 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, 245 (void*)addr)); 246 if (!tctx) 247 return 0; 248 ThreadState *thr = tctx->thr; 249 CHECK(thr); 250 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); 251 return tctx; 252} 253#endif 254 255void ScopedReport::AddMutex(const SyncVar *s) { 256 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 257 if (rep_->mutexes[i]->id == s->uid) 258 return; 259 } 260 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 261 ReportMutex *rm = new(mem) ReportMutex(); 262 rep_->mutexes.PushBack(rm); 263 rm->id = s->uid; 264 rm->destroyed = false; 265 rm->stack = 0; 266#ifndef TSAN_GO 267 uptr ssz = 0; 268 const uptr *stack = StackDepotGet(s->creation_stack_id, &ssz); 269 if (stack) { 270 StackTrace trace; 271 trace.Init(stack, ssz); 272 rm->stack = SymbolizeStack(trace); 273 } 274#endif 275} 276 277void ScopedReport::AddMutex(u64 id) { 278 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 279 if (rep_->mutexes[i]->id == id) 280 return; 281 } 282 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 283 ReportMutex *rm = new(mem) ReportMutex(); 284 rep_->mutexes.PushBack(rm); 285 rm->id = id; 286 rm->destroyed = true; 287 rm->stack = 0; 288} 289 290void ScopedReport::AddLocation(uptr addr, uptr size) { 291 if (addr == 0) 292 return; 293#ifndef TSAN_GO 294 int fd = -1; 295 int creat_tid = -1; 296 u32 creat_stack = 0; 297 if (FdLocation(addr, &fd, &creat_tid, &creat_stack) 298 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) { 299 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 300 ReportLocation *loc = new(mem) ReportLocation(); 301 rep_->locs.PushBack(loc); 302 loc->type = ReportLocationFD; 303 loc->fd = fd; 304 loc->tid = creat_tid; 305 uptr ssz = 0; 306 const uptr *stack = StackDepotGet(creat_stack, &ssz); 307 if (stack) { 308 StackTrace trace; 309 trace.Init(stack, ssz); 310 loc->stack = SymbolizeStack(trace); 311 } 312 ThreadContext *tctx = FindThreadByUidLocked(creat_tid); 313 if (tctx) 314 AddThread(tctx); 315 return; 316 } 317 MBlock *b = 0; 318 if (allocator()->PointerIsMine((void*)addr) 319 && (b = user_mblock(0, (void*)addr))) { 320 ThreadContext *tctx = FindThreadByTidLocked(b->Tid()); 321 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 322 ReportLocation *loc = new(mem) ReportLocation(); 323 rep_->locs.PushBack(loc); 324 loc->type = ReportLocationHeap; 325 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); 326 loc->size = b->Size(); 327 loc->tid = tctx ? tctx->tid : b->Tid(); 328 loc->name = 0; 329 loc->file = 0; 330 loc->line = 0; 331 loc->stack = 0; 332 uptr ssz = 0; 333 const uptr *stack = StackDepotGet(b->StackId(), &ssz); 334 if (stack) { 335 StackTrace trace; 336 trace.Init(stack, ssz); 337 loc->stack = SymbolizeStack(trace); 338 } 339 if (tctx) 340 AddThread(tctx); 341 return; 342 } 343 bool is_stack = false; 344 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { 345 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 346 ReportLocation *loc = new(mem) ReportLocation(); 347 rep_->locs.PushBack(loc); 348 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; 349 loc->tid = tctx->tid; 350 AddThread(tctx); 351 } 352 ReportLocation *loc = SymbolizeData(addr); 353 if (loc) { 354 rep_->locs.PushBack(loc); 355 return; 356 } 357#endif 358} 359 360#ifndef TSAN_GO 361void ScopedReport::AddSleep(u32 stack_id) { 362 uptr ssz = 0; 363 const uptr *stack = StackDepotGet(stack_id, &ssz); 364 if (stack) { 365 StackTrace trace; 366 trace.Init(stack, ssz); 367 rep_->sleep = SymbolizeStack(trace); 368 } 369} 370#endif 371 372void ScopedReport::SetCount(int count) { 373 rep_->count = count; 374} 375 376const ReportDesc *ScopedReport::GetReport() const { 377 return rep_; 378} 379 380void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { 381 // This function restores stack trace and mutex set for the thread/epoch. 382 // It does so by getting stack trace and mutex set at the beginning of 383 // trace part, and then replaying the trace till the given epoch. 384 Context *ctx = CTX(); 385 ctx->thread_registry->CheckLocked(); 386 ThreadContext *tctx = static_cast<ThreadContext*>( 387 ctx->thread_registry->GetThreadLocked(tid)); 388 if (tctx == 0) 389 return; 390 if (tctx->status != ThreadStatusRunning 391 && tctx->status != ThreadStatusFinished 392 && tctx->status != ThreadStatusDead) 393 return; 394 Trace* trace = ThreadTrace(tctx->tid); 395 Lock l(&trace->mtx); 396 const int partidx = (epoch / kTracePartSize) % TraceParts(); 397 TraceHeader* hdr = &trace->headers[partidx]; 398 if (epoch < hdr->epoch0) 399 return; 400 const u64 epoch0 = RoundDown(epoch, TraceSize()); 401 const u64 eend = epoch % TraceSize(); 402 const u64 ebegin = RoundDown(eend, kTracePartSize); 403 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", 404 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); 405 InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024 406 for (uptr i = 0; i < hdr->stack0.Size(); i++) { 407 stack[i] = hdr->stack0.Get(i); 408 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); 409 } 410 if (mset) 411 *mset = hdr->mset0; 412 uptr pos = hdr->stack0.Size(); 413 Event *events = (Event*)GetThreadTrace(tid); 414 for (uptr i = ebegin; i <= eend; i++) { 415 Event ev = events[i]; 416 EventType typ = (EventType)(ev >> 61); 417 uptr pc = (uptr)(ev & ((1ull << 61) - 1)); 418 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); 419 if (typ == EventTypeMop) { 420 stack[pos] = pc; 421 } else if (typ == EventTypeFuncEnter) { 422 stack[pos++] = pc; 423 } else if (typ == EventTypeFuncExit) { 424 if (pos > 0) 425 pos--; 426 } 427 if (mset) { 428 if (typ == EventTypeLock) { 429 mset->Add(pc, true, epoch0 + i); 430 } else if (typ == EventTypeUnlock) { 431 mset->Del(pc, true); 432 } else if (typ == EventTypeRLock) { 433 mset->Add(pc, false, epoch0 + i); 434 } else if (typ == EventTypeRUnlock) { 435 mset->Del(pc, false); 436 } 437 } 438 for (uptr j = 0; j <= pos; j++) 439 DPrintf2(" #%zu: %zx\n", j, stack[j]); 440 } 441 if (pos == 0 && stack[0] == 0) 442 return; 443 pos++; 444 stk->Init(stack.data(), pos); 445} 446 447static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 448 uptr addr_min, uptr addr_max) { 449 Context *ctx = CTX(); 450 bool equal_stack = false; 451 RacyStacks hash; 452 if (flags()->suppress_equal_stacks) { 453 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 454 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 455 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { 456 if (hash == ctx->racy_stacks[i]) { 457 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); 458 equal_stack = true; 459 break; 460 } 461 } 462 } 463 bool equal_address = false; 464 RacyAddress ra0 = {addr_min, addr_max}; 465 if (flags()->suppress_equal_addresses) { 466 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { 467 RacyAddress ra2 = ctx->racy_addresses[i]; 468 uptr maxbeg = max(ra0.addr_min, ra2.addr_min); 469 uptr minend = min(ra0.addr_max, ra2.addr_max); 470 if (maxbeg < minend) { 471 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); 472 equal_address = true; 473 break; 474 } 475 } 476 } 477 if (equal_stack || equal_address) { 478 if (!equal_stack) 479 ctx->racy_stacks.PushBack(hash); 480 if (!equal_address) 481 ctx->racy_addresses.PushBack(ra0); 482 return true; 483 } 484 return false; 485} 486 487static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 488 uptr addr_min, uptr addr_max) { 489 Context *ctx = CTX(); 490 if (flags()->suppress_equal_stacks) { 491 RacyStacks hash; 492 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 493 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 494 ctx->racy_stacks.PushBack(hash); 495 } 496 if (flags()->suppress_equal_addresses) { 497 RacyAddress ra0 = {addr_min, addr_max}; 498 ctx->racy_addresses.PushBack(ra0); 499 } 500} 501 502bool OutputReport(Context *ctx, 503 const ScopedReport &srep, 504 const ReportStack *suppress_stack1, 505 const ReportStack *suppress_stack2) { 506 atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); 507 const ReportDesc *rep = srep.GetReport(); 508 Suppression *supp = 0; 509 uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp); 510 if (suppress_pc == 0) 511 suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp); 512 if (suppress_pc != 0) { 513 FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; 514 ctx->fired_suppressions.PushBack(s); 515 } 516 if (OnReport(rep, suppress_pc != 0)) 517 return false; 518 PrintReport(rep); 519 CTX()->nreported++; 520 return true; 521} 522 523bool IsFiredSuppression(Context *ctx, 524 const ScopedReport &srep, 525 const StackTrace &trace) { 526 for (uptr k = 0; k < ctx->fired_suppressions.Size(); k++) { 527 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) 528 continue; 529 for (uptr j = 0; j < trace.Size(); j++) { 530 FiredSuppression *s = &ctx->fired_suppressions[k]; 531 if (trace.Get(j) == s->pc) { 532 if (s->supp) 533 s->supp->hit_count++; 534 return true; 535 } 536 } 537 } 538 return false; 539} 540 541bool FrameIsInternal(const ReportStack *frame) { 542 return frame != 0 && frame->file != 0 543 && (internal_strstr(frame->file, "tsan_interceptors.cc") || 544 internal_strstr(frame->file, "sanitizer_common_interceptors.inc") || 545 internal_strstr(frame->file, "tsan_interface_")); 546} 547 548// On programs that use Java we see weird reports like: 549// WARNING: ThreadSanitizer: data race (pid=22512) 550// Read of size 8 at 0x7d2b00084318 by thread 100: 551// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3) 552// #1 <null> <null>:0 (0x7f7ad9b40193) 553// Previous write of size 8 at 0x7d2b00084318 by thread 105: 554// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919) 555// #1 <null> <null>:0 (0x7f7ad9b42707) 556static bool IsJavaNonsense(const ReportDesc *rep) { 557#ifndef TSAN_GO 558 for (uptr i = 0; i < rep->mops.Size(); i++) { 559 ReportMop *mop = rep->mops[i]; 560 ReportStack *frame = mop->stack; 561 if (frame == 0 562 || (frame->func == 0 && frame->file == 0 && frame->line == 0 563 && frame->module == 0)) { 564 return true; 565 } 566 if (FrameIsInternal(frame)) { 567 frame = frame->next; 568 if (frame == 0 569 || (frame->func == 0 && frame->file == 0 && frame->line == 0 570 && frame->module == 0)) { 571 if (frame) { 572 FiredSuppression supp = {rep->typ, frame->pc, 0}; 573 CTX()->fired_suppressions.PushBack(supp); 574 } 575 return true; 576 } 577 } 578 } 579#endif 580 return false; 581} 582 583static bool RaceBetweenAtomicAndFree(ThreadState *thr) { 584 Shadow s0(thr->racy_state[0]); 585 Shadow s1(thr->racy_state[1]); 586 CHECK(!(s0.IsAtomic() && s1.IsAtomic())); 587 if (!s0.IsAtomic() && !s1.IsAtomic()) 588 return true; 589 if (s0.IsAtomic() && s1.IsFreed()) 590 return true; 591 if (s1.IsAtomic() && thr->is_freeing) 592 return true; 593 return false; 594} 595 596void ReportRace(ThreadState *thr) { 597 if (!flags()->report_bugs) 598 return; 599 ScopedInRtl in_rtl; 600 601 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) 602 return; 603 604 bool freed = false; 605 { 606 Shadow s(thr->racy_state[1]); 607 freed = s.GetFreedAndReset(); 608 thr->racy_state[1] = s.raw(); 609 } 610 611 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); 612 uptr addr_min = 0; 613 uptr addr_max = 0; 614 { 615 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); 616 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); 617 uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); 618 uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); 619 addr_min = min(a0, a1); 620 addr_max = max(e0, e1); 621 if (IsExpectedReport(addr_min, addr_max - addr_min)) 622 return; 623 } 624 625 Context *ctx = CTX(); 626 ThreadRegistryLock l0(ctx->thread_registry); 627 628 ReportType typ = ReportTypeRace; 629 if (thr->is_vptr_access) 630 typ = ReportTypeVptrRace; 631 else if (freed) 632 typ = ReportTypeUseAfterFree; 633 ScopedReport rep(typ); 634 const uptr kMop = 2; 635 StackTrace traces[kMop]; 636 const uptr toppc = TraceTopPC(thr); 637 traces[0].ObtainCurrent(thr, toppc); 638 if (IsFiredSuppression(ctx, rep, traces[0])) 639 return; 640 InternalScopedBuffer<MutexSet> mset2(1); 641 new(mset2.data()) MutexSet(); 642 Shadow s2(thr->racy_state[1]); 643 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); 644 645 if (HandleRacyStacks(thr, traces, addr_min, addr_max)) 646 return; 647 648 for (uptr i = 0; i < kMop; i++) { 649 Shadow s(thr->racy_state[i]); 650 rep.AddMemoryAccess(addr, s, &traces[i], 651 i == 0 ? &thr->mset : mset2.data()); 652 } 653 654 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport())) 655 return; 656 657 for (uptr i = 0; i < kMop; i++) { 658 FastState s(thr->racy_state[i]); 659 ThreadContext *tctx = static_cast<ThreadContext*>( 660 ctx->thread_registry->GetThreadLocked(s.tid())); 661 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) 662 continue; 663 rep.AddThread(tctx); 664 } 665 666 rep.AddLocation(addr_min, addr_max - addr_min); 667 668#ifndef TSAN_GO 669 { // NOLINT 670 Shadow s(thr->racy_state[1]); 671 if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) 672 rep.AddSleep(thr->last_sleep_stack_id); 673 } 674#endif 675 676 if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack, 677 rep.GetReport()->mops[1]->stack)) 678 return; 679 680 AddRacyStacks(thr, traces, addr_min, addr_max); 681} 682 683void PrintCurrentStack(ThreadState *thr, uptr pc) { 684 StackTrace trace; 685 trace.ObtainCurrent(thr, pc); 686 PrintStack(SymbolizeStack(trace)); 687} 688 689void PrintCurrentStackSlow() { 690#ifndef TSAN_GO 691 __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace, 692 sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace; 693 ptrace->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(), 694 kStackTraceMax); 695 for (uptr i = 0; i < ptrace->size / 2; i++) { 696 uptr tmp = ptrace->trace[i]; 697 ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1]; 698 ptrace->trace[ptrace->size - i - 1] = tmp; 699 } 700 StackTrace trace; 701 trace.Init(ptrace->trace, ptrace->size); 702 PrintStack(SymbolizeStack(trace)); 703#endif 704} 705 706} // namespace __tsan 707