1//===-- tsan_rtl_thread.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_placement_new.h" 15#include "tsan_rtl.h" 16#include "tsan_mman.h" 17#include "tsan_platform.h" 18#include "tsan_report.h" 19#include "tsan_sync.h" 20 21namespace __tsan { 22 23// ThreadContext implementation. 24 25ThreadContext::ThreadContext(int tid) 26 : ThreadContextBase(tid) 27 , thr() 28 , sync() 29 , epoch0() 30 , epoch1() { 31} 32 33#ifndef TSAN_GO 34ThreadContext::~ThreadContext() { 35} 36#endif 37 38void ThreadContext::OnDead() { 39 sync.Reset(); 40} 41 42void ThreadContext::OnJoined(void *arg) { 43 ThreadState *caller_thr = static_cast<ThreadState *>(arg); 44 AcquireImpl(caller_thr, 0, &sync); 45 sync.Reset(); 46} 47 48struct OnCreatedArgs { 49 ThreadState *thr; 50 uptr pc; 51}; 52 53void ThreadContext::OnCreated(void *arg) { 54 thr = 0; 55 if (tid == 0) 56 return; 57 OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); 58 args->thr->fast_state.IncrementEpoch(); 59 // Can't increment epoch w/o writing to the trace as well. 60 TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); 61 ReleaseImpl(args->thr, 0, &sync); 62 creation_stack_id = CurrentStackId(args->thr, args->pc); 63 if (reuse_count == 0) 64 StatInc(args->thr, StatThreadMaxTid); 65} 66 67void ThreadContext::OnReset() { 68 sync.Reset(); 69 FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event)); 70 //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace)); 71} 72 73struct OnStartedArgs { 74 ThreadState *thr; 75 uptr stk_addr; 76 uptr stk_size; 77 uptr tls_addr; 78 uptr tls_size; 79}; 80 81void ThreadContext::OnStarted(void *arg) { 82 OnStartedArgs *args = static_cast<OnStartedArgs*>(arg); 83 thr = args->thr; 84 // RoundUp so that one trace part does not contain events 85 // from different threads. 86 epoch0 = RoundUp(epoch1 + 1, kTracePartSize); 87 epoch1 = (u64)-1; 88 new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count, 89 args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); 90#ifndef TSAN_GO 91 thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0]; 92 thr->shadow_stack_pos = thr->shadow_stack; 93 thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize; 94#else 95 // Setup dynamic shadow stack. 96 const int kInitStackSize = 8; 97 thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, 98 kInitStackSize * sizeof(uptr)); 99 thr->shadow_stack_pos = thr->shadow_stack; 100 thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; 101#endif 102#ifndef TSAN_GO 103 AllocatorThreadStart(thr); 104#endif 105 if (flags()->detect_deadlocks) { 106 thr->dd_pt = ctx->dd->CreatePhysicalThread(); 107 thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id); 108 } 109 thr->fast_synch_epoch = epoch0; 110 AcquireImpl(thr, 0, &sync); 111 thr->fast_state.SetHistorySize(flags()->history_size); 112 const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); 113 Trace *thr_trace = ThreadTrace(thr->tid); 114 thr_trace->headers[trace].epoch0 = epoch0; 115 StatInc(thr, StatSyncAcquire); 116 sync.Reset(); 117 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " 118 "tls_addr=%zx tls_size=%zx\n", 119 tid, (uptr)epoch0, args->stk_addr, args->stk_size, 120 args->tls_addr, args->tls_size); 121 thr->is_alive = true; 122} 123 124void ThreadContext::OnFinished() { 125 if (!detached) { 126 thr->fast_state.IncrementEpoch(); 127 // Can't increment epoch w/o writing to the trace as well. 128 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); 129 ReleaseImpl(thr, 0, &sync); 130 } 131 epoch1 = thr->fast_state.epoch(); 132 133 if (flags()->detect_deadlocks) { 134 ctx->dd->DestroyPhysicalThread(thr->dd_pt); 135 ctx->dd->DestroyLogicalThread(thr->dd_lt); 136 } 137 ctx->metamap.OnThreadIdle(thr); 138#ifndef TSAN_GO 139 AllocatorThreadFinish(thr); 140#endif 141 thr->~ThreadState(); 142 StatAggregate(ctx->stat, thr->stat); 143 thr = 0; 144} 145 146#ifndef TSAN_GO 147struct ThreadLeak { 148 ThreadContext *tctx; 149 int count; 150}; 151 152static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) { 153 Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg; 154 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 155 if (tctx->detached || tctx->status != ThreadStatusFinished) 156 return; 157 for (uptr i = 0; i < leaks.Size(); i++) { 158 if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) { 159 leaks[i].count++; 160 return; 161 } 162 } 163 ThreadLeak leak = {tctx, 1}; 164 leaks.PushBack(leak); 165} 166#endif 167 168#ifndef TSAN_GO 169static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) { 170 if (tctx->tid == 0) { 171 Printf("ThreadSanitizer: main thread finished with ignores enabled\n"); 172 } else { 173 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled," 174 " created at:\n", tctx->tid, tctx->name); 175 PrintStack(SymbolizeStackId(tctx->creation_stack_id)); 176 } 177 Printf(" One of the following ignores was not ended" 178 " (in order of probability)\n"); 179 for (uptr i = 0; i < set->Size(); i++) { 180 Printf(" Ignore was enabled at:\n"); 181 PrintStack(SymbolizeStackId(set->At(i))); 182 } 183 Die(); 184} 185 186static void ThreadCheckIgnore(ThreadState *thr) { 187 if (ctx->after_multithreaded_fork) 188 return; 189 if (thr->ignore_reads_and_writes) 190 ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set); 191 if (thr->ignore_sync) 192 ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set); 193} 194#else 195static void ThreadCheckIgnore(ThreadState *thr) {} 196#endif 197 198void ThreadFinalize(ThreadState *thr) { 199 ThreadCheckIgnore(thr); 200#ifndef TSAN_GO 201 if (!flags()->report_thread_leaks) 202 return; 203 ThreadRegistryLock l(ctx->thread_registry); 204 Vector<ThreadLeak> leaks(MBlockScopedBuf); 205 ctx->thread_registry->RunCallbackForEachThreadLocked( 206 MaybeReportThreadLeak, &leaks); 207 for (uptr i = 0; i < leaks.Size(); i++) { 208 ScopedReport rep(ReportTypeThreadLeak); 209 rep.AddThread(leaks[i].tctx, true); 210 rep.SetCount(leaks[i].count); 211 OutputReport(thr, rep); 212 } 213#endif 214} 215 216int ThreadCount(ThreadState *thr) { 217 uptr result; 218 ctx->thread_registry->GetNumberOfThreads(0, 0, &result); 219 return (int)result; 220} 221 222int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { 223 StatInc(thr, StatThreadCreate); 224 OnCreatedArgs args = { thr, pc }; 225 int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args); 226 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); 227 StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); 228 return tid; 229} 230 231void ThreadStart(ThreadState *thr, int tid, uptr os_id) { 232 uptr stk_addr = 0; 233 uptr stk_size = 0; 234 uptr tls_addr = 0; 235 uptr tls_size = 0; 236 GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); 237 238 if (tid) { 239 if (stk_addr && stk_size) 240 MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size); 241 242 if (tls_addr && tls_size) { 243 // Check that the thr object is in tls; 244 const uptr thr_beg = (uptr)thr; 245 const uptr thr_end = (uptr)thr + sizeof(*thr); 246 CHECK_GE(thr_beg, tls_addr); 247 CHECK_LE(thr_beg, tls_addr + tls_size); 248 CHECK_GE(thr_end, tls_addr); 249 CHECK_LE(thr_end, tls_addr + tls_size); 250 // Since the thr object is huge, skip it. 251 MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); 252 MemoryRangeImitateWrite(thr, /*pc=*/ 2, 253 thr_end, tls_addr + tls_size - thr_end); 254 } 255 } 256 257 ThreadRegistry *tr = ctx->thread_registry; 258 OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; 259 tr->StartThread(tid, os_id, &args); 260 261 tr->Lock(); 262 thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid); 263 tr->Unlock(); 264 265#ifndef TSAN_GO 266 if (ctx->after_multithreaded_fork) { 267 thr->ignore_interceptors++; 268 ThreadIgnoreBegin(thr, 0); 269 ThreadIgnoreSyncBegin(thr, 0); 270 } 271#endif 272} 273 274void ThreadFinish(ThreadState *thr) { 275 ThreadCheckIgnore(thr); 276 StatInc(thr, StatThreadFinish); 277 if (thr->stk_addr && thr->stk_size) 278 DontNeedShadowFor(thr->stk_addr, thr->stk_size); 279 if (thr->tls_addr && thr->tls_size) 280 DontNeedShadowFor(thr->tls_addr, thr->tls_size); 281 thr->is_alive = false; 282 ctx->thread_registry->FinishThread(thr->tid); 283} 284 285static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { 286 uptr uid = (uptr)arg; 287 if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { 288 tctx->user_id = 0; 289 return true; 290 } 291 return false; 292} 293 294int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { 295 int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); 296 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); 297 return res; 298} 299 300void ThreadJoin(ThreadState *thr, uptr pc, int tid) { 301 CHECK_GT(tid, 0); 302 CHECK_LT(tid, kMaxTid); 303 DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); 304 ctx->thread_registry->JoinThread(tid, thr); 305} 306 307void ThreadDetach(ThreadState *thr, uptr pc, int tid) { 308 CHECK_GT(tid, 0); 309 CHECK_LT(tid, kMaxTid); 310 ctx->thread_registry->DetachThread(tid); 311} 312 313void ThreadSetName(ThreadState *thr, const char *name) { 314 ctx->thread_registry->SetThreadName(thr->tid, name); 315} 316 317void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, 318 uptr size, bool is_write) { 319 if (size == 0) 320 return; 321 322 u64 *shadow_mem = (u64*)MemToShadow(addr); 323 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", 324 thr->tid, (void*)pc, (void*)addr, 325 (int)size, is_write); 326 327#if TSAN_DEBUG 328 if (!IsAppMem(addr)) { 329 Printf("Access to non app mem %zx\n", addr); 330 DCHECK(IsAppMem(addr)); 331 } 332 if (!IsAppMem(addr + size - 1)) { 333 Printf("Access to non app mem %zx\n", addr + size - 1); 334 DCHECK(IsAppMem(addr + size - 1)); 335 } 336 if (!IsShadowMem((uptr)shadow_mem)) { 337 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 338 DCHECK(IsShadowMem((uptr)shadow_mem)); 339 } 340 if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { 341 Printf("Bad shadow addr %p (%zx)\n", 342 shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); 343 DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); 344 } 345#endif 346 347 StatInc(thr, StatMopRange); 348 349 if (*shadow_mem == kShadowRodata) { 350 // Access to .rodata section, no races here. 351 // Measurements show that it can be 10-20% of all memory accesses. 352 StatInc(thr, StatMopRangeRodata); 353 return; 354 } 355 356 FastState fast_state = thr->fast_state; 357 if (fast_state.GetIgnoreBit()) 358 return; 359 360 fast_state.IncrementEpoch(); 361 thr->fast_state = fast_state; 362 TraceAddEvent(thr, fast_state, EventTypeMop, pc); 363 364 bool unaligned = (addr % kShadowCell) != 0; 365 366 // Handle unaligned beginning, if any. 367 for (; addr % kShadowCell && size; addr++, size--) { 368 int const kAccessSizeLog = 0; 369 Shadow cur(fast_state); 370 cur.SetWrite(is_write); 371 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); 372 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, 373 shadow_mem, cur); 374 } 375 if (unaligned) 376 shadow_mem += kShadowCnt; 377 // Handle middle part, if any. 378 for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { 379 int const kAccessSizeLog = 3; 380 Shadow cur(fast_state); 381 cur.SetWrite(is_write); 382 cur.SetAddr0AndSizeLog(0, kAccessSizeLog); 383 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, 384 shadow_mem, cur); 385 shadow_mem += kShadowCnt; 386 } 387 // Handle ending, if any. 388 for (; size; addr++, size--) { 389 int const kAccessSizeLog = 0; 390 Shadow cur(fast_state); 391 cur.SetWrite(is_write); 392 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); 393 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, 394 shadow_mem, cur); 395 } 396} 397 398} // namespace __tsan 399