tsan_rtl_report.cc revision f16dc4234098a22a9d0d56f0198d87905481e7fd
1//===-- tsan_rtl_report.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_libc.h" 15#include "sanitizer_common/sanitizer_placement_new.h" 16#include "sanitizer_common/sanitizer_stackdepot.h" 17#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_stacktrace.h" 19#include "tsan_platform.h" 20#include "tsan_rtl.h" 21#include "tsan_suppressions.h" 22#include "tsan_symbolize.h" 23#include "tsan_report.h" 24#include "tsan_sync.h" 25#include "tsan_mman.h" 26#include "tsan_flags.h" 27#include "tsan_fd.h" 28 29namespace __tsan { 30 31using namespace __sanitizer; // NOLINT 32 33static ReportStack *SymbolizeStack(const StackTrace& trace); 34 35void TsanCheckFailed(const char *file, int line, const char *cond, 36 u64 v1, u64 v2) { 37 ScopedInRtl in_rtl; 38 Printf("FATAL: ThreadSanitizer CHECK failed: " 39 "%s:%d \"%s\" (0x%zx, 0x%zx)\n", 40 file, line, cond, (uptr)v1, (uptr)v2); 41 PrintCurrentStackSlow(); 42 Die(); 43} 44 45// Can be overriden by an application/test to intercept reports. 46#ifdef TSAN_EXTERNAL_HOOKS 47bool OnReport(const ReportDesc *rep, bool suppressed); 48#else 49SANITIZER_INTERFACE_ATTRIBUTE 50bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { 51 (void)rep; 52 return suppressed; 53} 54#endif 55 56static void StackStripMain(ReportStack *stack) { 57 ReportStack *last_frame = 0; 58 ReportStack *last_frame2 = 0; 59 const char *prefix = "__interceptor_"; 60 uptr prefix_len = internal_strlen(prefix); 61 const char *path_prefix = flags()->strip_path_prefix; 62 uptr path_prefix_len = internal_strlen(path_prefix); 63 char *pos; 64 for (ReportStack *ent = stack; ent; ent = ent->next) { 65 if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len)) 66 ent->func += prefix_len; 67 if (ent->file && (pos = internal_strstr(ent->file, path_prefix))) 68 ent->file = pos + path_prefix_len; 69 if (ent->file && ent->file[0] == '.' && ent->file[1] == '/') 70 ent->file += 2; 71 last_frame2 = last_frame; 72 last_frame = ent; 73 } 74 75 if (last_frame2 == 0) 76 return; 77 const char *last = last_frame->func; 78#ifndef TSAN_GO 79 const char *last2 = last_frame2->func; 80 // Strip frame above 'main' 81 if (last2 && 0 == internal_strcmp(last2, "main")) { 82 last_frame2->next = 0; 83 // Strip our internal thread start routine. 84 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { 85 last_frame2->next = 0; 86 // Strip global ctors init. 87 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { 88 last_frame2->next = 0; 89 // If both are 0, then we probably just failed to symbolize. 90 } else if (last || last2) { 91 // Ensure that we recovered stack completely. Trimmed stack 92 // can actually happen if we do not instrument some code, 93 // so it's only a debug print. However we must try hard to not miss it 94 // due to our fault. 95 DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); 96 } 97#else 98 // The last frame always point into runtime (gosched0, goexit0, runtime.main). 99 last_frame2->next = 0; 100 (void)last; 101#endif 102} 103 104static ReportStack *SymbolizeStack(const StackTrace& trace) { 105 if (trace.IsEmpty()) 106 return 0; 107 ReportStack *stack = 0; 108 for (uptr si = 0; si < trace.Size(); si++) { 109 const uptr pc = trace.Get(si); 110#ifndef TSAN_GO 111 // We obtain the return address, that is, address of the next instruction, 112 // so offset it by 1 byte. 113 const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); 114#else 115 // FIXME(dvyukov): Go sometimes uses address of a function as top pc. 116 uptr pc1 = pc; 117 if (si != trace.Size() - 1) 118 pc1 -= 1; 119#endif 120 ReportStack *ent = SymbolizeCode(pc1); 121 CHECK_NE(ent, 0); 122 ReportStack *last = ent; 123 while (last->next) { 124 last->pc = pc; // restore original pc for report 125 last = last->next; 126 } 127 last->pc = pc; // restore original pc for report 128 last->next = stack; 129 stack = ent; 130 } 131 StackStripMain(stack); 132 return stack; 133} 134 135ScopedReport::ScopedReport(ReportType typ) { 136 ctx_ = CTX(); 137 ctx_->thread_registry->CheckLocked(); 138 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); 139 rep_ = new(mem) ReportDesc; 140 rep_->typ = typ; 141 ctx_->report_mtx.Lock(); 142 CommonSanitizerReportMutex.Lock(); 143} 144 145ScopedReport::~ScopedReport() { 146 CommonSanitizerReportMutex.Unlock(); 147 ctx_->report_mtx.Unlock(); 148 DestroyAndFree(rep_); 149} 150 151void ScopedReport::AddStack(const StackTrace *stack) { 152 ReportStack **rs = rep_->stacks.PushBack(); 153 *rs = SymbolizeStack(*stack); 154} 155 156void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, 157 const StackTrace *stack, const MutexSet *mset) { 158 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); 159 ReportMop *mop = new(mem) ReportMop; 160 rep_->mops.PushBack(mop); 161 mop->tid = s.tid(); 162 mop->addr = addr + s.addr0(); 163 mop->size = s.size(); 164 mop->write = s.IsWrite(); 165 mop->atomic = s.IsAtomic(); 166 mop->stack = SymbolizeStack(*stack); 167 for (uptr i = 0; i < mset->Size(); i++) { 168 MutexSet::Desc d = mset->Get(i); 169 u64 uid = 0; 170 uptr addr = SyncVar::SplitId(d.id, &uid); 171 SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false); 172 // Check that the mutex is still alive. 173 // Another mutex can be created at the same address, 174 // so check uid as well. 175 if (s && s->CheckId(uid)) { 176 ReportMopMutex mtx = {s->uid, d.write}; 177 mop->mset.PushBack(mtx); 178 AddMutex(s); 179 } else { 180 ReportMopMutex mtx = {d.id, d.write}; 181 mop->mset.PushBack(mtx); 182 AddMutex(d.id); 183 } 184 if (s) 185 s->mtx.ReadUnlock(); 186 } 187} 188 189void ScopedReport::AddThread(const ThreadContext *tctx) { 190 for (uptr i = 0; i < rep_->threads.Size(); i++) { 191 if ((u32)rep_->threads[i]->id == tctx->tid) 192 return; 193 } 194 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); 195 ReportThread *rt = new(mem) ReportThread(); 196 rep_->threads.PushBack(rt); 197 rt->id = tctx->tid; 198 rt->pid = tctx->os_id; 199 rt->running = (tctx->status == ThreadStatusRunning); 200 rt->name = tctx->name ? internal_strdup(tctx->name) : 0; 201 rt->parent_tid = tctx->parent_tid; 202 rt->stack = 0; 203#ifdef TSAN_GO 204 rt->stack = SymbolizeStack(tctx->creation_stack); 205#else 206 uptr ssz = 0; 207 const uptr *stack = StackDepotGet(tctx->creation_stack_id, &ssz); 208 if (stack) { 209 StackTrace trace; 210 trace.Init(stack, ssz); 211 rt->stack = SymbolizeStack(trace); 212 } 213#endif 214} 215 216#ifndef TSAN_GO 217static ThreadContext *FindThreadByUidLocked(int unique_id) { 218 Context *ctx = CTX(); 219 ctx->thread_registry->CheckLocked(); 220 for (unsigned i = 0; i < kMaxTid; i++) { 221 ThreadContext *tctx = static_cast<ThreadContext*>( 222 ctx->thread_registry->GetThreadLocked(i)); 223 if (tctx && tctx->unique_id == (u32)unique_id) { 224 return tctx; 225 } 226 } 227 return 0; 228} 229 230static ThreadContext *FindThreadByTidLocked(int tid) { 231 Context *ctx = CTX(); 232 ctx->thread_registry->CheckLocked(); 233 return static_cast<ThreadContext*>( 234 ctx->thread_registry->GetThreadLocked(tid)); 235} 236 237static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { 238 uptr addr = (uptr)arg; 239 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 240 if (tctx->status != ThreadStatusRunning) 241 return false; 242 ThreadState *thr = tctx->thr; 243 CHECK(thr); 244 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || 245 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); 246} 247 248ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { 249 Context *ctx = CTX(); 250 ctx->thread_registry->CheckLocked(); 251 ThreadContext *tctx = static_cast<ThreadContext*>( 252 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, 253 (void*)addr)); 254 if (!tctx) 255 return 0; 256 ThreadState *thr = tctx->thr; 257 CHECK(thr); 258 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); 259 return tctx; 260} 261#endif 262 263void ScopedReport::AddMutex(const SyncVar *s) { 264 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 265 if (rep_->mutexes[i]->id == s->uid) 266 return; 267 } 268 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 269 ReportMutex *rm = new(mem) ReportMutex(); 270 rep_->mutexes.PushBack(rm); 271 rm->id = s->uid; 272 rm->destroyed = false; 273 rm->stack = 0; 274#ifndef TSAN_GO 275 uptr ssz = 0; 276 const uptr *stack = StackDepotGet(s->creation_stack_id, &ssz); 277 if (stack) { 278 StackTrace trace; 279 trace.Init(stack, ssz); 280 rm->stack = SymbolizeStack(trace); 281 } 282#endif 283} 284 285void ScopedReport::AddMutex(u64 id) { 286 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 287 if (rep_->mutexes[i]->id == id) 288 return; 289 } 290 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 291 ReportMutex *rm = new(mem) ReportMutex(); 292 rep_->mutexes.PushBack(rm); 293 rm->id = id; 294 rm->destroyed = true; 295 rm->stack = 0; 296} 297 298void ScopedReport::AddLocation(uptr addr, uptr size) { 299 if (addr == 0) 300 return; 301#ifndef TSAN_GO 302 int fd = -1; 303 int creat_tid = -1; 304 u32 creat_stack = 0; 305 if (FdLocation(addr, &fd, &creat_tid, &creat_stack) 306 || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) { 307 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 308 ReportLocation *loc = new(mem) ReportLocation(); 309 rep_->locs.PushBack(loc); 310 loc->type = ReportLocationFD; 311 loc->fd = fd; 312 loc->tid = creat_tid; 313 uptr ssz = 0; 314 const uptr *stack = StackDepotGet(creat_stack, &ssz); 315 if (stack) { 316 StackTrace trace; 317 trace.Init(stack, ssz); 318 loc->stack = SymbolizeStack(trace); 319 } 320 ThreadContext *tctx = FindThreadByUidLocked(creat_tid); 321 if (tctx) 322 AddThread(tctx); 323 return; 324 } 325 MBlock *b = 0; 326 if (allocator()->PointerIsMine((void*)addr) 327 && (b = user_mblock(0, (void*)addr))) { 328 ThreadContext *tctx = FindThreadByTidLocked(b->Tid()); 329 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 330 ReportLocation *loc = new(mem) ReportLocation(); 331 rep_->locs.PushBack(loc); 332 loc->type = ReportLocationHeap; 333 loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); 334 loc->size = b->Size(); 335 loc->tid = tctx ? tctx->tid : b->Tid(); 336 loc->name = 0; 337 loc->file = 0; 338 loc->line = 0; 339 loc->stack = 0; 340 uptr ssz = 0; 341 const uptr *stack = StackDepotGet(b->StackId(), &ssz); 342 if (stack) { 343 StackTrace trace; 344 trace.Init(stack, ssz); 345 loc->stack = SymbolizeStack(trace); 346 } 347 if (tctx) 348 AddThread(tctx); 349 return; 350 } 351 bool is_stack = false; 352 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { 353 void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); 354 ReportLocation *loc = new(mem) ReportLocation(); 355 rep_->locs.PushBack(loc); 356 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; 357 loc->tid = tctx->tid; 358 AddThread(tctx); 359 } 360 ReportLocation *loc = SymbolizeData(addr); 361 if (loc) { 362 rep_->locs.PushBack(loc); 363 return; 364 } 365#endif 366} 367 368#ifndef TSAN_GO 369void ScopedReport::AddSleep(u32 stack_id) { 370 uptr ssz = 0; 371 const uptr *stack = StackDepotGet(stack_id, &ssz); 372 if (stack) { 373 StackTrace trace; 374 trace.Init(stack, ssz); 375 rep_->sleep = SymbolizeStack(trace); 376 } 377} 378#endif 379 380void ScopedReport::SetCount(int count) { 381 rep_->count = count; 382} 383 384const ReportDesc *ScopedReport::GetReport() const { 385 return rep_; 386} 387 388void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { 389 // This function restores stack trace and mutex set for the thread/epoch. 390 // It does so by getting stack trace and mutex set at the beginning of 391 // trace part, and then replaying the trace till the given epoch. 392 Context *ctx = CTX(); 393 ctx->thread_registry->CheckLocked(); 394 ThreadContext *tctx = static_cast<ThreadContext*>( 395 ctx->thread_registry->GetThreadLocked(tid)); 396 if (tctx == 0) 397 return; 398 if (tctx->status != ThreadStatusRunning 399 && tctx->status != ThreadStatusFinished 400 && tctx->status != ThreadStatusDead) 401 return; 402 Trace* trace = ThreadTrace(tctx->tid); 403 Lock l(&trace->mtx); 404 const int partidx = (epoch / kTracePartSize) % TraceParts(); 405 TraceHeader* hdr = &trace->headers[partidx]; 406 if (epoch < hdr->epoch0) 407 return; 408 const u64 epoch0 = RoundDown(epoch, TraceSize()); 409 const u64 eend = epoch % TraceSize(); 410 const u64 ebegin = RoundDown(eend, kTracePartSize); 411 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", 412 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); 413 InternalScopedBuffer<uptr> stack(kShadowStackSize); 414 for (uptr i = 0; i < hdr->stack0.Size(); i++) { 415 stack[i] = hdr->stack0.Get(i); 416 DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); 417 } 418 if (mset) 419 *mset = hdr->mset0; 420 uptr pos = hdr->stack0.Size(); 421 Event *events = (Event*)GetThreadTrace(tid); 422 for (uptr i = ebegin; i <= eend; i++) { 423 Event ev = events[i]; 424 EventType typ = (EventType)(ev >> 61); 425 uptr pc = (uptr)(ev & ((1ull << 61) - 1)); 426 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); 427 if (typ == EventTypeMop) { 428 stack[pos] = pc; 429 } else if (typ == EventTypeFuncEnter) { 430 stack[pos++] = pc; 431 } else if (typ == EventTypeFuncExit) { 432 if (pos > 0) 433 pos--; 434 } 435 if (mset) { 436 if (typ == EventTypeLock) { 437 mset->Add(pc, true, epoch0 + i); 438 } else if (typ == EventTypeUnlock) { 439 mset->Del(pc, true); 440 } else if (typ == EventTypeRLock) { 441 mset->Add(pc, false, epoch0 + i); 442 } else if (typ == EventTypeRUnlock) { 443 mset->Del(pc, false); 444 } 445 } 446 for (uptr j = 0; j <= pos; j++) 447 DPrintf2(" #%zu: %zx\n", j, stack[j]); 448 } 449 if (pos == 0 && stack[0] == 0) 450 return; 451 pos++; 452 stk->Init(stack.data(), pos); 453} 454 455static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 456 uptr addr_min, uptr addr_max) { 457 Context *ctx = CTX(); 458 bool equal_stack = false; 459 RacyStacks hash; 460 if (flags()->suppress_equal_stacks) { 461 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 462 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 463 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { 464 if (hash == ctx->racy_stacks[i]) { 465 DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); 466 equal_stack = true; 467 break; 468 } 469 } 470 } 471 bool equal_address = false; 472 RacyAddress ra0 = {addr_min, addr_max}; 473 if (flags()->suppress_equal_addresses) { 474 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { 475 RacyAddress ra2 = ctx->racy_addresses[i]; 476 uptr maxbeg = max(ra0.addr_min, ra2.addr_min); 477 uptr minend = min(ra0.addr_max, ra2.addr_max); 478 if (maxbeg < minend) { 479 DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); 480 equal_address = true; 481 break; 482 } 483 } 484 } 485 if (equal_stack || equal_address) { 486 if (!equal_stack) 487 ctx->racy_stacks.PushBack(hash); 488 if (!equal_address) 489 ctx->racy_addresses.PushBack(ra0); 490 return true; 491 } 492 return false; 493} 494 495static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], 496 uptr addr_min, uptr addr_max) { 497 Context *ctx = CTX(); 498 if (flags()->suppress_equal_stacks) { 499 RacyStacks hash; 500 hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); 501 hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); 502 ctx->racy_stacks.PushBack(hash); 503 } 504 if (flags()->suppress_equal_addresses) { 505 RacyAddress ra0 = {addr_min, addr_max}; 506 ctx->racy_addresses.PushBack(ra0); 507 } 508} 509 510bool OutputReport(Context *ctx, 511 const ScopedReport &srep, 512 const ReportStack *suppress_stack1, 513 const ReportStack *suppress_stack2, 514 const ReportLocation *suppress_loc) { 515 atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); 516 const ReportDesc *rep = srep.GetReport(); 517 Suppression *supp = 0; 518 uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp); 519 if (suppress_pc == 0) 520 suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp); 521 if (suppress_pc == 0) 522 suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp); 523 if (suppress_pc != 0) { 524 FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; 525 ctx->fired_suppressions.push_back(s); 526 } 527 if (OnReport(rep, suppress_pc != 0)) 528 return false; 529 PrintReport(rep); 530 ctx->nreported++; 531 if (flags()->halt_on_error) 532 internal__exit(flags()->exitcode); 533 return true; 534} 535 536bool IsFiredSuppression(Context *ctx, 537 const ScopedReport &srep, 538 const StackTrace &trace) { 539 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 540 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) 541 continue; 542 for (uptr j = 0; j < trace.Size(); j++) { 543 FiredSuppression *s = &ctx->fired_suppressions[k]; 544 if (trace.Get(j) == s->pc) { 545 if (s->supp) 546 s->supp->hit_count++; 547 return true; 548 } 549 } 550 } 551 return false; 552} 553 554static bool IsFiredSuppression(Context *ctx, 555 const ScopedReport &srep, 556 uptr addr) { 557 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 558 if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) 559 continue; 560 FiredSuppression *s = &ctx->fired_suppressions[k]; 561 if (addr == s->pc) { 562 if (s->supp) 563 s->supp->hit_count++; 564 return true; 565 } 566 } 567 return false; 568} 569 570bool FrameIsInternal(const ReportStack *frame) { 571 return frame != 0 && frame->file != 0 572 && (internal_strstr(frame->file, "tsan_interceptors.cc") || 573 internal_strstr(frame->file, "sanitizer_common_interceptors.inc") || 574 internal_strstr(frame->file, "tsan_interface_")); 575} 576 577// On programs that use Java we see weird reports like: 578// WARNING: ThreadSanitizer: data race (pid=22512) 579// Read of size 8 at 0x7d2b00084318 by thread 100: 580// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3) 581// #1 <null> <null>:0 (0x7f7ad9b40193) 582// Previous write of size 8 at 0x7d2b00084318 by thread 105: 583// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919) 584// #1 <null> <null>:0 (0x7f7ad9b42707) 585static bool IsJavaNonsense(const ReportDesc *rep) { 586#ifndef TSAN_GO 587 for (uptr i = 0; i < rep->mops.Size(); i++) { 588 ReportMop *mop = rep->mops[i]; 589 ReportStack *frame = mop->stack; 590 if (frame == 0 591 || (frame->func == 0 && frame->file == 0 && frame->line == 0 592 && frame->module == 0)) { 593 return true; 594 } 595 if (FrameIsInternal(frame)) { 596 frame = frame->next; 597 if (frame == 0 598 || (frame->func == 0 && frame->file == 0 && frame->line == 0 599 && frame->module == 0)) { 600 if (frame) { 601 FiredSuppression supp = {rep->typ, frame->pc, 0}; 602 CTX()->fired_suppressions.push_back(supp); 603 } 604 return true; 605 } 606 } 607 } 608#endif 609 return false; 610} 611 612static bool RaceBetweenAtomicAndFree(ThreadState *thr) { 613 Shadow s0(thr->racy_state[0]); 614 Shadow s1(thr->racy_state[1]); 615 CHECK(!(s0.IsAtomic() && s1.IsAtomic())); 616 if (!s0.IsAtomic() && !s1.IsAtomic()) 617 return true; 618 if (s0.IsAtomic() && s1.IsFreed()) 619 return true; 620 if (s1.IsAtomic() && thr->is_freeing) 621 return true; 622 return false; 623} 624 625void ReportRace(ThreadState *thr) { 626 if (!flags()->report_bugs) 627 return; 628 ScopedInRtl in_rtl; 629 630 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) 631 return; 632 633 bool freed = false; 634 { 635 Shadow s(thr->racy_state[1]); 636 freed = s.GetFreedAndReset(); 637 thr->racy_state[1] = s.raw(); 638 } 639 640 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); 641 uptr addr_min = 0; 642 uptr addr_max = 0; 643 { 644 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); 645 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); 646 uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); 647 uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); 648 addr_min = min(a0, a1); 649 addr_max = max(e0, e1); 650 if (IsExpectedReport(addr_min, addr_max - addr_min)) 651 return; 652 } 653 654 Context *ctx = CTX(); 655 ThreadRegistryLock l0(ctx->thread_registry); 656 657 ReportType typ = ReportTypeRace; 658 if (thr->is_vptr_access) 659 typ = ReportTypeVptrRace; 660 else if (freed) 661 typ = ReportTypeUseAfterFree; 662 ScopedReport rep(typ); 663 if (IsFiredSuppression(ctx, rep, addr)) 664 return; 665 const uptr kMop = 2; 666 StackTrace traces[kMop]; 667 const uptr toppc = TraceTopPC(thr); 668 traces[0].ObtainCurrent(thr, toppc); 669 if (IsFiredSuppression(ctx, rep, traces[0])) 670 return; 671 InternalScopedBuffer<MutexSet> mset2(1); 672 new(mset2.data()) MutexSet(); 673 Shadow s2(thr->racy_state[1]); 674 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); 675 if (IsFiredSuppression(ctx, rep, traces[1])) 676 return; 677 678 if (HandleRacyStacks(thr, traces, addr_min, addr_max)) 679 return; 680 681 for (uptr i = 0; i < kMop; i++) { 682 Shadow s(thr->racy_state[i]); 683 rep.AddMemoryAccess(addr, s, &traces[i], 684 i == 0 ? &thr->mset : mset2.data()); 685 } 686 687 if (flags()->suppress_java && IsJavaNonsense(rep.GetReport())) 688 return; 689 690 for (uptr i = 0; i < kMop; i++) { 691 FastState s(thr->racy_state[i]); 692 ThreadContext *tctx = static_cast<ThreadContext*>( 693 ctx->thread_registry->GetThreadLocked(s.tid())); 694 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) 695 continue; 696 rep.AddThread(tctx); 697 } 698 699 rep.AddLocation(addr_min, addr_max - addr_min); 700 701#ifndef TSAN_GO 702 { // NOLINT 703 Shadow s(thr->racy_state[1]); 704 if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) 705 rep.AddSleep(thr->last_sleep_stack_id); 706 } 707#endif 708 709 ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ? 710 rep.GetReport()->locs[0] : 0; 711 if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack, 712 rep.GetReport()->mops[1]->stack, 713 suppress_loc)) 714 return; 715 716 AddRacyStacks(thr, traces, addr_min, addr_max); 717} 718 719void PrintCurrentStack(ThreadState *thr, uptr pc) { 720 StackTrace trace; 721 trace.ObtainCurrent(thr, pc); 722 PrintStack(SymbolizeStack(trace)); 723} 724 725void PrintCurrentStackSlow() { 726#ifndef TSAN_GO 727 __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace, 728 sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace; 729 ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 730 0, 0, 0, false); 731 for (uptr i = 0; i < ptrace->size / 2; i++) { 732 uptr tmp = ptrace->trace[i]; 733 ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1]; 734 ptrace->trace[ptrace->size - i - 1] = tmp; 735 } 736 StackTrace trace; 737 trace.Init(ptrace->trace, ptrace->size); 738 PrintStack(SymbolizeStack(trace)); 739#endif 740} 741 742} // namespace __tsan 743