tsan_rtl_thread.cc revision e0023f74ea88efee329f68391b70f8adc6b21617
1//===-- tsan_rtl_thread.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12//===----------------------------------------------------------------------===// 13 14#include "sanitizer_common/sanitizer_placement_new.h" 15#include "tsan_rtl.h" 16#include "tsan_mman.h" 17#include "tsan_platform.h" 18#include "tsan_report.h" 19#include "tsan_sync.h" 20 21namespace __tsan { 22 23#ifndef TSAN_GO 24const int kThreadQuarantineSize = 16; 25#else 26const int kThreadQuarantineSize = 64; 27#endif 28 29static void MaybeReportThreadLeak(ThreadContext *tctx) { 30 if (tctx->detached) 31 return; 32 if (tctx->status != ThreadStatusCreated 33 && tctx->status != ThreadStatusRunning 34 && tctx->status != ThreadStatusFinished) 35 return; 36 ScopedReport rep(ReportTypeThreadLeak); 37 rep.AddThread(tctx); 38 OutputReport(rep); 39} 40 41void ThreadFinalize(ThreadState *thr) { 42 CHECK_GT(thr->in_rtl, 0); 43 if (!flags()->report_thread_leaks) 44 return; 45 Context *ctx = CTX(); 46 Lock l(&ctx->thread_mtx); 47 for (unsigned i = 0; i < kMaxTid; i++) { 48 ThreadContext *tctx = ctx->threads[i]; 49 if (tctx == 0) 50 continue; 51 MaybeReportThreadLeak(tctx); 52 } 53} 54 55static void ThreadDead(ThreadState *thr, ThreadContext *tctx) { 56 Context *ctx = CTX(); 57 CHECK_GT(thr->in_rtl, 0); 58 CHECK(tctx->status == ThreadStatusRunning 59 || tctx->status == ThreadStatusFinished); 60 DPrintf("#%d: ThreadDead uid=%zu\n", thr->tid, tctx->user_id); 61 tctx->status = ThreadStatusDead; 62 tctx->user_id = 0; 63 tctx->sync.Reset(); 64 65 // Put to dead list. 66 tctx->dead_next = 0; 67 if (ctx->dead_list_size == 0) 68 ctx->dead_list_head = tctx; 69 else 70 ctx->dead_list_tail->dead_next = tctx; 71 ctx->dead_list_tail = tctx; 72 ctx->dead_list_size++; 73} 74 75int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { 76 CHECK_GT(thr->in_rtl, 0); 77 Context *ctx = CTX(); 78 Lock l(&ctx->thread_mtx); 79 StatInc(thr, StatThreadCreate); 80 int tid = -1; 81 ThreadContext *tctx = 0; 82 if (ctx->dead_list_size > kThreadQuarantineSize 83 || ctx->thread_seq >= kMaxTid) { 84 if (ctx->dead_list_size == 0) { 85 TsanPrintf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", 86 kMaxTid); 87 Die(); 88 } 89 StatInc(thr, StatThreadReuse); 90 tctx = ctx->dead_list_head; 91 ctx->dead_list_head = tctx->dead_next; 92 ctx->dead_list_size--; 93 if (ctx->dead_list_size == 0) { 94 CHECK_EQ(tctx->dead_next, 0); 95 ctx->dead_list_head = 0; 96 } 97 CHECK_EQ(tctx->status, ThreadStatusDead); 98 tctx->status = ThreadStatusInvalid; 99 tctx->reuse_count++; 100 tctx->sync.Reset(); 101 tid = tctx->tid; 102 DestroyAndFree(tctx->dead_info); 103 } else { 104 StatInc(thr, StatThreadMaxTid); 105 tid = ctx->thread_seq++; 106 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); 107 tctx = new(mem) ThreadContext(tid); 108 ctx->threads[tid] = tctx; 109 } 110 CHECK_NE(tctx, 0); 111 CHECK_GE(tid, 0); 112 CHECK_LT(tid, kMaxTid); 113 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); 114 CHECK_EQ(tctx->status, ThreadStatusInvalid); 115 ctx->alive_threads++; 116 if (ctx->max_alive_threads < ctx->alive_threads) { 117 ctx->max_alive_threads++; 118 CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads); 119 StatInc(thr, StatThreadMaxAlive); 120 } 121 tctx->status = ThreadStatusCreated; 122 tctx->thr = 0; 123 tctx->user_id = uid; 124 tctx->unique_id = ctx->unique_thread_seq++; 125 tctx->detached = detached; 126 if (tid) { 127 thr->fast_state.IncrementEpoch(); 128 // Can't increment epoch w/o writing to the trace as well. 129 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); 130 thr->clock.set(thr->tid, thr->fast_state.epoch()); 131 thr->fast_synch_epoch = thr->fast_state.epoch(); 132 thr->clock.release(&tctx->sync); 133 StatInc(thr, StatSyncRelease); 134 135 tctx->creation_stack.ObtainCurrent(thr, pc); 136 } 137 return tid; 138} 139 140void ThreadStart(ThreadState *thr, int tid, uptr os_id) { 141 CHECK_GT(thr->in_rtl, 0); 142 uptr stk_addr = 0; 143 uptr stk_size = 0; 144 uptr tls_addr = 0; 145 uptr tls_size = 0; 146 GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); 147 148 if (tid) { 149 if (stk_addr && stk_size) { 150 MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size); 151 } 152 153 if (tls_addr && tls_size) { 154 // Check that the thr object is in tls; 155 const uptr thr_beg = (uptr)thr; 156 const uptr thr_end = (uptr)thr + sizeof(*thr); 157 CHECK_GE(thr_beg, tls_addr); 158 CHECK_LE(thr_beg, tls_addr + tls_size); 159 CHECK_GE(thr_end, tls_addr); 160 CHECK_LE(thr_end, tls_addr + tls_size); 161 // Since the thr object is huge, skip it. 162 MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); 163 MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end); 164 } 165 } 166 167 Lock l(&CTX()->thread_mtx); 168 ThreadContext *tctx = CTX()->threads[tid]; 169 CHECK_NE(tctx, 0); 170 CHECK_EQ(tctx->status, ThreadStatusCreated); 171 tctx->status = ThreadStatusRunning; 172 tctx->os_id = os_id; 173 tctx->epoch0 = tctx->epoch1 + 1; 174 tctx->epoch1 = (u64)-1; 175 new(thr) ThreadState(CTX(), tid, tctx->unique_id, 176 tctx->epoch0, stk_addr, stk_size, 177 tls_addr, tls_size); 178#ifdef TSAN_GO 179 // Setup dynamic shadow stack. 180 const int kInitStackSize = 8; 181 thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, 182 kInitStackSize * sizeof(uptr)); 183 thr->shadow_stack_pos = thr->shadow_stack; 184 thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; 185#endif 186 tctx->thr = thr; 187 thr->fast_synch_epoch = tctx->epoch0; 188 thr->clock.set(tid, tctx->epoch0); 189 thr->clock.acquire(&tctx->sync); 190 StatInc(thr, StatSyncAcquire); 191 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " 192 "tls_addr=%zx tls_size=%zx\n", 193 tid, (uptr)tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size); 194 thr->is_alive = true; 195} 196 197void ThreadFinish(ThreadState *thr) { 198 CHECK_GT(thr->in_rtl, 0); 199 StatInc(thr, StatThreadFinish); 200 // FIXME: Treat it as write. 201 if (thr->stk_addr && thr->stk_size) 202 MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size); 203 if (thr->tls_addr && thr->tls_size) { 204 const uptr thr_beg = (uptr)thr; 205 const uptr thr_end = (uptr)thr + sizeof(*thr); 206 // Since the thr object is huge, skip it. 207 MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr); 208 MemoryResetRange(thr, /*pc=*/ 5, 209 thr_end, thr->tls_addr + thr->tls_size - thr_end); 210 } 211 thr->is_alive = false; 212 Context *ctx = CTX(); 213 Lock l(&ctx->thread_mtx); 214 ThreadContext *tctx = ctx->threads[thr->tid]; 215 CHECK_NE(tctx, 0); 216 CHECK_EQ(tctx->status, ThreadStatusRunning); 217 CHECK_GT(ctx->alive_threads, 0); 218 ctx->alive_threads--; 219 if (tctx->detached) { 220 ThreadDead(thr, tctx); 221 } else { 222 thr->fast_state.IncrementEpoch(); 223 // Can't increment epoch w/o writing to the trace as well. 224 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); 225 thr->clock.set(thr->tid, thr->fast_state.epoch()); 226 thr->fast_synch_epoch = thr->fast_state.epoch(); 227 thr->clock.release(&tctx->sync); 228 StatInc(thr, StatSyncRelease); 229 tctx->status = ThreadStatusFinished; 230 } 231 232 // Save from info about the thread. 233 tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) 234 ThreadDeadInfo(); 235 internal_memcpy(&tctx->dead_info->trace.events[0], 236 &thr->trace.events[0], sizeof(thr->trace.events)); 237 for (int i = 0; i < kTraceParts; i++) { 238 tctx->dead_info->trace.headers[i].stack0.CopyFrom( 239 thr->trace.headers[i].stack0); 240 } 241 tctx->epoch1 = thr->fast_state.epoch(); 242 243#ifndef TSAN_GO 244 AlloctorThreadFinish(thr); 245#endif 246 thr->~ThreadState(); 247 StatAggregate(ctx->stat, thr->stat); 248 tctx->thr = 0; 249} 250 251int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { 252 CHECK_GT(thr->in_rtl, 0); 253 Context *ctx = CTX(); 254 Lock l(&ctx->thread_mtx); 255 int res = -1; 256 for (unsigned tid = 0; tid < kMaxTid; tid++) { 257 ThreadContext *tctx = ctx->threads[tid]; 258 if (tctx != 0 && tctx->user_id == uid 259 && tctx->status != ThreadStatusInvalid) { 260 tctx->user_id = 0; 261 res = tid; 262 break; 263 } 264 } 265 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); 266 return res; 267} 268 269void ThreadJoin(ThreadState *thr, uptr pc, int tid) { 270 CHECK_GT(thr->in_rtl, 0); 271 CHECK_GT(tid, 0); 272 CHECK_LT(tid, kMaxTid); 273 DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); 274 Context *ctx = CTX(); 275 Lock l(&ctx->thread_mtx); 276 ThreadContext *tctx = ctx->threads[tid]; 277 if (tctx->status == ThreadStatusInvalid) { 278 TsanPrintf("ThreadSanitizer: join of non-existent thread\n"); 279 return; 280 } 281 CHECK_EQ(tctx->detached, false); 282 CHECK_EQ(tctx->status, ThreadStatusFinished); 283 thr->clock.acquire(&tctx->sync); 284 StatInc(thr, StatSyncAcquire); 285 ThreadDead(thr, tctx); 286} 287 288void ThreadDetach(ThreadState *thr, uptr pc, int tid) { 289 CHECK_GT(thr->in_rtl, 0); 290 CHECK_GT(tid, 0); 291 CHECK_LT(tid, kMaxTid); 292 Context *ctx = CTX(); 293 Lock l(&ctx->thread_mtx); 294 ThreadContext *tctx = ctx->threads[tid]; 295 if (tctx->status == ThreadStatusInvalid) { 296 TsanPrintf("ThreadSanitizer: detach of non-existent thread\n"); 297 return; 298 } 299 if (tctx->status == ThreadStatusFinished) { 300 ThreadDead(thr, tctx); 301 } else { 302 tctx->detached = true; 303 } 304} 305 306void ThreadFinalizerGoroutine(ThreadState *thr) { 307 thr->clock.Disable(thr->tid); 308} 309 310void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, 311 uptr size, bool is_write) { 312 if (size == 0) 313 return; 314 315 u64 *shadow_mem = (u64*)MemToShadow(addr); 316 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", 317 thr->tid, (void*)pc, (void*)addr, 318 (int)size, is_write); 319 320#if TSAN_DEBUG 321 if (!IsAppMem(addr)) { 322 TsanPrintf("Access to non app mem %zx\n", addr); 323 DCHECK(IsAppMem(addr)); 324 } 325 if (!IsAppMem(addr + size - 1)) { 326 TsanPrintf("Access to non app mem %zx\n", addr + size - 1); 327 DCHECK(IsAppMem(addr + size - 1)); 328 } 329 if (!IsShadowMem((uptr)shadow_mem)) { 330 TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 331 DCHECK(IsShadowMem((uptr)shadow_mem)); 332 } 333 if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { 334 TsanPrintf("Bad shadow addr %p (%zx)\n", 335 shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); 336 DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); 337 } 338#endif 339 340 StatInc(thr, StatMopRange); 341 342 FastState fast_state = thr->fast_state; 343 if (fast_state.GetIgnoreBit()) 344 return; 345 346 fast_state.IncrementEpoch(); 347 thr->fast_state = fast_state; 348 TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc); 349 350 bool unaligned = (addr % kShadowCell) != 0; 351 352 // Handle unaligned beginning, if any. 353 for (; addr % kShadowCell && size; addr++, size--) { 354 int const kAccessSizeLog = 0; 355 Shadow cur(fast_state); 356 cur.SetWrite(is_write); 357 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); 358 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, 359 shadow_mem, cur); 360 } 361 if (unaligned) 362 shadow_mem += kShadowCnt; 363 // Handle middle part, if any. 364 for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { 365 int const kAccessSizeLog = 3; 366 Shadow cur(fast_state); 367 cur.SetWrite(is_write); 368 cur.SetAddr0AndSizeLog(0, kAccessSizeLog); 369 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, 370 shadow_mem, cur); 371 shadow_mem += kShadowCnt; 372 } 373 // Handle ending, if any. 374 for (; size; addr++, size--) { 375 int const kAccessSizeLog = 0; 376 Shadow cur(fast_state); 377 cur.SetWrite(is_write); 378 cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); 379 MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, 380 shadow_mem, cur); 381 } 382} 383 384void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr) { 385 MemoryAccess(thr, pc, addr, 0, 0); 386} 387 388void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr) { 389 MemoryAccess(thr, pc, addr, 0, 1); 390} 391 392void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr) { 393 MemoryAccess(thr, pc, addr, 3, 0); 394} 395 396void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr) { 397 MemoryAccess(thr, pc, addr, 3, 1); 398} 399} // namespace __tsan 400