1//===-- tsan_rtl_thread.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_placement_new.h" 15#include "tsan_rtl.h" 16#include "tsan_mman.h" 17#include "tsan_platform.h" 18#include "tsan_report.h" 19#include "tsan_sync.h" 20 21namespace __tsan { 22 23// ThreadContext implementation. 24 25ThreadContext::ThreadContext(int tid) 26 : ThreadContextBase(tid) 27 , thr() 28 , sync() 29 , epoch0() 30 , epoch1() 31 , dead_info() { 32} 33 34#ifndef TSAN_GO 35ThreadContext::~ThreadContext() { 36} 37#endif 38 39void ThreadContext::OnDead() { 40 sync.Reset(); 41} 42 43void ThreadContext::OnJoined(void *arg) { 44 ThreadState *caller_thr = static_cast<ThreadState *>(arg); 45 caller_thr->clock.acquire(&sync); 46 StatInc(caller_thr, StatSyncAcquire); 47} 48 49struct OnCreatedArgs { 50 ThreadState *thr; 51 uptr pc; 52}; 53 54void ThreadContext::OnCreated(void *arg) { 55 thr = 0; 56 if (tid == 0) 57 return; 58 OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); 59 args->thr->fast_state.IncrementEpoch(); 60 // Can't increment epoch w/o writing to the trace as well. 61 TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); 62 args->thr->clock.set(args->thr->tid, args->thr->fast_state.epoch()); 63 args->thr->fast_synch_epoch = args->thr->fast_state.epoch(); 64 args->thr->clock.release(&sync); 65 StatInc(args->thr, StatSyncRelease); 66#ifdef TSAN_GO 67 creation_stack.ObtainCurrent(args->thr, args->pc); 68#else 69 creation_stack_id = CurrentStackId(args->thr, args->pc); 70#endif 71 if (reuse_count == 0) 72 StatInc(args->thr, StatThreadMaxTid); 73} 74 75void ThreadContext::OnReset(void *arg) { 76 OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); 77 StatInc(args->thr, StatThreadReuse); 78 sync.Reset(); 79 DestroyAndFree(dead_info); 80} 81 82struct OnStartedArgs { 83 ThreadState *thr; 84 uptr stk_addr; 85 uptr stk_size; 86 uptr tls_addr; 87 uptr tls_size; 88}; 89 90void ThreadContext::OnStarted(void *arg) { 91 OnStartedArgs *args = static_cast<OnStartedArgs*>(arg); 92 thr = args->thr; 93 // RoundUp so that one trace part does not contain events 94 // from different threads. 95 epoch0 = RoundUp(epoch1 + 1, kTracePartSize); 96 epoch1 = (u64)-1; 97 new(thr) ThreadState(CTX(), tid, unique_id, 98 epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); 99#ifdef TSAN_GO 100 // Setup dynamic shadow stack. 101 const int kInitStackSize = 8; 102 args->thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, 103 kInitStackSize * sizeof(uptr)); 104 args->thr->shadow_stack_pos = thr->shadow_stack; 105 args->thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; 106#endif 107#ifndef TSAN_GO 108 AllocatorThreadStart(args->thr); 109#endif 110 thr = args->thr; 111 thr->fast_synch_epoch = epoch0; 112 thr->clock.set(tid, epoch0); 113 thr->clock.acquire(&sync); 114 thr->fast_state.SetHistorySize(flags()->history_size); 115 const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); 116 thr->trace.headers[trace].epoch0 = epoch0; 117 StatInc(thr, StatSyncAcquire); 118 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " 119 "tls_addr=%zx tls_size=%zx\n", 120 tid, (uptr)epoch0, args->stk_addr, args->stk_size, 121 args->tls_addr, args->tls_size); 122 thr->is_alive = true; 123} 124 125void ThreadContext::OnFinished() { 126 if (!detached) { 127 thr->fast_state.IncrementEpoch(); 128 // Can't increment epoch w/o writing to the trace as well. 129 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); 130 thr->clock.set(thr->tid, thr->fast_state.epoch()); 131 thr->fast_synch_epoch = thr->fast_state.epoch(); 132 thr->clock.release(&sync); 133 StatInc(thr, StatSyncRelease); 134 } 135 // Save from info about the thread. 136 dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) 137 ThreadDeadInfo(); 138 for (uptr i = 0; i < TraceParts(); i++) { 139 dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0; 140 dead_info->trace.headers[i].stack0.CopyFrom( 141 thr->trace.headers[i].stack0); 142 } 143 epoch1 = thr->fast_state.epoch(); 144 145#ifndef TSAN_GO 146 AllocatorThreadFinish(thr); 147#endif 148 thr->~ThreadState(); 149 StatAggregate(CTX()->stat, thr->stat); 150 thr = 0; 151} 152 153static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *unused) { 154 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 155 if (tctx->detached) 156 return; 157 if (tctx->status != ThreadStatusCreated 158 && tctx->status != ThreadStatusRunning 159 && tctx->status != ThreadStatusFinished) 160 return; 161 ScopedReport rep(ReportTypeThreadLeak); 162 rep.AddThread(tctx); 163 OutputReport(CTX(), rep); 164} 165 166void ThreadFinalize(ThreadState *thr) { 167 CHECK_GT(thr->in_rtl, 0); 168 if (!flags()->report_thread_leaks) 169 return; 170 ThreadRegistryLock l(CTX()->thread_registry); 171 CTX()->thread_registry->RunCallbackForEachThreadLocked( 172 MaybeReportThreadLeak, 0); 173} 174 175int ThreadCount(ThreadState *thr) { 176 CHECK_GT(thr->in_rtl, 0); 177 Context *ctx = CTX(); 178 uptr result; 179 ctx->thread_registry->GetNumberOfThreads(0, 0, &result); 180 return (int)result; 181} 182 183int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { 184 CHECK_GT(thr->in_rtl, 0); 185 StatInc(thr, StatThreadCreate); 186 Context *ctx = CTX(); 187 OnCreatedArgs args = { thr, pc }; 188 int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args); 189 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); 190 StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); 191 return tid; 192} 193 194void ThreadStart(ThreadState *thr, int tid, uptr os_id) { 195 CHECK_GT(thr->in_rtl, 0); 196 uptr stk_addr = 0; 197 uptr stk_size = 0; 198 uptr tls_addr = 0; 199 uptr tls_size = 0; 200 GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); 201 202 if (tid) { 203 if (stk_addr && stk_size) 204 MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size); 205 206 if (tls_addr && tls_size) { 207 // Check that the thr object is in tls; 208 const uptr thr_beg = (uptr)thr; 209 const uptr thr_end = (uptr)thr + sizeof(*thr); 210 CHECK_GE(thr_beg, tls_addr); 211 CHECK_LE(thr_beg, tls_addr + tls_size); 212 CHECK_GE(thr_end, tls_addr); 213 CHECK_LE(thr_end, tls_addr + tls_size); 214 // Since the thr object is huge, skip it. 215 MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); 216 MemoryRangeImitateWrite(thr, /*pc=*/ 2, 217 thr_end, tls_addr + tls_size - thr_end); 218 } 219 } 220 221 OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; 222 CTX()->thread_registry->StartThread(tid, os_id, &args); 223} 224 225void ThreadFinish(ThreadState *thr) { 226 CHECK_GT(thr->in_rtl, 0); 227 StatInc(thr, StatThreadFinish); 228 if (thr->stk_addr && thr->stk_size) 229 DontNeedShadowFor(thr->stk_addr, thr->stk_size); 230 if (thr->tls_addr && thr->tls_size) 231 DontNeedShadowFor(thr->tls_addr, thr->tls_size); 232 thr->is_alive = false; 233 Context *ctx = CTX(); 234 ctx->thread_registry->FinishThread(thr->tid); 235} 236 237static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { 238 uptr uid = (uptr)arg; 239 if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { 240 tctx->user_id = 0; 241 return true; 242 } 243 return false; 244} 245 246int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { 247 CHECK_GT(thr->in_rtl, 0); 248 Context *ctx = CTX(); 249 int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); 250 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); 251 return res; 252} 253 254void ThreadJoin(ThreadState *thr, uptr pc, int tid) { 255 CHECK_GT(thr->in_rtl, 0); 256 CHECK_GT(tid, 0); 257 CHECK_LT(tid, kMaxTid); 258 DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); 259 Context *ctx = CTX(); 260 ctx->thread_registry->JoinThread(tid, thr); 261} 262 263void ThreadDetach(ThreadState *thr, uptr pc, int tid) { 264 CHECK_GT(thr->in_rtl, 0); 265 CHECK_GT(tid, 0); 266 CHECK_LT(tid, kMaxTid); 267 Context *ctx = CTX(); 268 ctx->thread_registry->DetachThread(tid); 269} 270 271void ThreadSetName(ThreadState *thr, const char *name) { 272 CHECK_GT(thr->in_rtl, 0); 273 CTX()->thread_registry->SetThreadName(thr->tid, name); 274} 275 276void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, 277 uptr size, bool is_write) { 278 if (size == 0) 279 return; 280 281 u64 *shadow_mem = (u64*)MemToShadow(addr); 282 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", 283 thr->tid, (void*)pc, (void*)addr, 284 (int)size, is_write); 285 286#if TSAN_DEBUG 287 if (!IsAppMem(addr)) { 288 Printf("Access to non app mem %zx\n", addr); 289 DCHECK(IsAppMem(addr)); 290 } 291 if (!IsAppMem(addr + size - 1)) { 292 Printf("Access to non app mem %zx\n", addr + size - 1); 293 DCHECK(IsAppMem(addr + size - 1)); 294 } 295 if (!IsShadowMem((uptr)shadow_mem)) { 296 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 297 DCHECK(IsShadowMem((uptr)shadow_mem)); 298 } 299 if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { 300 Printf("Bad shadow addr %p (%zx)\n", 301 shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); 302 DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); 303 } 304#endif 305 306 StatInc(thr, StatMopRange); 307 308 FastState fast_state = thr->fast_state; 309 if (fast_state.GetIgnoreBit()) 310 return; 311 312 fast_state.IncrementEpoch(); 313 thr->fast_state = fast_state; 314 TraceAddEvent(thr, fast_state, EventTypeMop, pc); 315 316 bool unaligned = (addr % kShadowCell) != 0; 317 318 // Handle unaligned beginning, if any. 319 for (; addr % kShadowCell && size; addr++, size--) { 320 int const kAccessSizeLog = 0; 321 Shadow cur(fast_state); 322 cur.SetWrite(is_write); 323 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); 324 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, 325 shadow_mem, cur); 326 } 327 if (unaligned) 328 shadow_mem += kShadowCnt; 329 // Handle middle part, if any. 330 for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { 331 int const kAccessSizeLog = 3; 332 Shadow cur(fast_state); 333 cur.SetWrite(is_write); 334 cur.SetAddr0AndSizeLog(0, kAccessSizeLog); 335 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, 336 shadow_mem, cur); 337 shadow_mem += kShadowCnt; 338 } 339 // Handle ending, if any. 340 for (; size; addr++, size--) { 341 int const kAccessSizeLog = 0; 342 Shadow cur(fast_state); 343 cur.SetWrite(is_write); 344 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); 345 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, 346 shadow_mem, cur); 347 } 348} 349 350void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, 351 uptr size, uptr step, bool is_write) { 352 if (size == 0) 353 return; 354 FastState fast_state = thr->fast_state; 355 if (fast_state.GetIgnoreBit()) 356 return; 357 StatInc(thr, StatMopRange); 358 fast_state.IncrementEpoch(); 359 thr->fast_state = fast_state; 360 TraceAddEvent(thr, fast_state, EventTypeMop, pc); 361 362 for (uptr addr_end = addr + size; addr < addr_end; addr += step) { 363 u64 *shadow_mem = (u64*)MemToShadow(addr); 364 Shadow cur(fast_state); 365 cur.SetWrite(is_write); 366 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kSizeLog1); 367 MemoryAccessImpl(thr, addr, kSizeLog1, is_write, false, 368 shadow_mem, cur); 369 } 370} 371} // namespace __tsan 372