tsan_rtl_thread.cc revision e1ddbf9a458e81125a03fea721997565124294ae
1//===-- tsan_rtl_thread.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_common/sanitizer_placement_new.h"
15#include "tsan_rtl.h"
16#include "tsan_mman.h"
17#include "tsan_platform.h"
18#include "tsan_report.h"
19#include "tsan_sync.h"
20
21namespace __tsan {
22
23// ThreadContext implementation.
24
25ThreadContext::ThreadContext(int tid)
26  : ThreadContextBase(tid)
27  , thr()
28  , sync()
29  , epoch0()
30  , epoch1() {
31}
32
33#ifndef TSAN_GO
34ThreadContext::~ThreadContext() {
35}
36#endif
37
38void ThreadContext::OnDead() {
39  sync.Reset();
40}
41
42void ThreadContext::OnJoined(void *arg) {
43  ThreadState *caller_thr = static_cast<ThreadState *>(arg);
44  AcquireImpl(caller_thr, 0, &sync);
45  sync.Reset();
46}
47
48struct OnCreatedArgs {
49  ThreadState *thr;
50  uptr pc;
51};
52
53void ThreadContext::OnCreated(void *arg) {
54  thr = 0;
55  if (tid == 0)
56    return;
57  OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
58  args->thr->fast_state.IncrementEpoch();
59  // Can't increment epoch w/o writing to the trace as well.
60  TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
61  ReleaseImpl(args->thr, 0, &sync);
62#ifdef TSAN_GO
63  creation_stack.ObtainCurrent(args->thr, args->pc);
64#else
65  creation_stack_id = CurrentStackId(args->thr, args->pc);
66#endif
67  if (reuse_count == 0)
68    StatInc(args->thr, StatThreadMaxTid);
69}
70
71void ThreadContext::OnReset() {
72  sync.Reset();
73  FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
74  //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
75}
76
77struct OnStartedArgs {
78  ThreadState *thr;
79  uptr stk_addr;
80  uptr stk_size;
81  uptr tls_addr;
82  uptr tls_size;
83};
84
85void ThreadContext::OnStarted(void *arg) {
86  OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
87  thr = args->thr;
88  // RoundUp so that one trace part does not contain events
89  // from different threads.
90  epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
91  epoch1 = (u64)-1;
92  new(thr) ThreadState(CTX(), tid, unique_id,
93      epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
94#ifdef TSAN_GO
95  // Setup dynamic shadow stack.
96  const int kInitStackSize = 8;
97  args->thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
98      kInitStackSize * sizeof(uptr));
99  args->thr->shadow_stack_pos = thr->shadow_stack;
100  args->thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
101#endif
102#ifndef TSAN_GO
103  AllocatorThreadStart(args->thr);
104#endif
105  thr = args->thr;
106  thr->fast_synch_epoch = epoch0;
107  AcquireImpl(thr, 0, &sync);
108  thr->fast_state.SetHistorySize(flags()->history_size);
109  const uptr trace = (epoch0 / kTracePartSize) % TraceParts();
110  Trace *thr_trace = ThreadTrace(thr->tid);
111  thr_trace->headers[trace].epoch0 = epoch0;
112  StatInc(thr, StatSyncAcquire);
113  sync.Reset();
114  DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
115          "tls_addr=%zx tls_size=%zx\n",
116          tid, (uptr)epoch0, args->stk_addr, args->stk_size,
117          args->tls_addr, args->tls_size);
118  thr->is_alive = true;
119}
120
121void ThreadContext::OnFinished() {
122  if (!detached) {
123    thr->fast_state.IncrementEpoch();
124    // Can't increment epoch w/o writing to the trace as well.
125    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
126    ReleaseImpl(thr, 0, &sync);
127  }
128  epoch1 = thr->fast_state.epoch();
129
130#ifndef TSAN_GO
131  AllocatorThreadFinish(thr);
132#endif
133  thr->~ThreadState();
134  StatAggregate(CTX()->stat, thr->stat);
135  thr = 0;
136}
137
138#ifndef TSAN_GO
139struct ThreadLeak {
140  ThreadContext *tctx;
141  int count;
142};
143
144static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
145  Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
146  ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
147  if (tctx->detached || tctx->status != ThreadStatusFinished)
148    return;
149  for (uptr i = 0; i < leaks.Size(); i++) {
150    if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
151      leaks[i].count++;
152      return;
153    }
154  }
155  ThreadLeak leak = {tctx, 1};
156  leaks.PushBack(leak);
157}
158#endif
159
160static void ThreadCheckIgnore(ThreadState *thr) {
161  if (thr->ignore_reads_and_writes) {
162    Printf("ThreadSanitizer: thread T%d finished with ignores enabled.\n",
163           thr->tid);
164  }
165  if (thr->ignore_sync) {
166    Printf("ThreadSanitizer: thread T%d finished with sync ignores enabled.\n",
167           thr->tid);
168  }
169}
170
171void ThreadFinalize(ThreadState *thr) {
172  CHECK_GT(thr->in_rtl, 0);
173  ThreadCheckIgnore(thr);
174#ifndef TSAN_GO
175  if (!flags()->report_thread_leaks)
176    return;
177  ThreadRegistryLock l(CTX()->thread_registry);
178  Vector<ThreadLeak> leaks(MBlockScopedBuf);
179  CTX()->thread_registry->RunCallbackForEachThreadLocked(
180      MaybeReportThreadLeak, &leaks);
181  for (uptr i = 0; i < leaks.Size(); i++) {
182    ScopedReport rep(ReportTypeThreadLeak);
183    rep.AddThread(leaks[i].tctx);
184    rep.SetCount(leaks[i].count);
185    OutputReport(CTX(), rep);
186  }
187#endif
188}
189
190int ThreadCount(ThreadState *thr) {
191  CHECK_GT(thr->in_rtl, 0);
192  Context *ctx = CTX();
193  uptr result;
194  ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
195  return (int)result;
196}
197
198int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
199  CHECK_GT(thr->in_rtl, 0);
200  StatInc(thr, StatThreadCreate);
201  Context *ctx = CTX();
202  OnCreatedArgs args = { thr, pc };
203  int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
204  DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
205  StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
206  return tid;
207}
208
209void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
210  CHECK_GT(thr->in_rtl, 0);
211  uptr stk_addr = 0;
212  uptr stk_size = 0;
213  uptr tls_addr = 0;
214  uptr tls_size = 0;
215  GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
216
217  if (tid) {
218    if (stk_addr && stk_size)
219      MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
220
221    if (tls_addr && tls_size) {
222      // Check that the thr object is in tls;
223      const uptr thr_beg = (uptr)thr;
224      const uptr thr_end = (uptr)thr + sizeof(*thr);
225      CHECK_GE(thr_beg, tls_addr);
226      CHECK_LE(thr_beg, tls_addr + tls_size);
227      CHECK_GE(thr_end, tls_addr);
228      CHECK_LE(thr_end, tls_addr + tls_size);
229      // Since the thr object is huge, skip it.
230      MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
231      MemoryRangeImitateWrite(thr, /*pc=*/ 2,
232          thr_end, tls_addr + tls_size - thr_end);
233    }
234  }
235
236  OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
237  CTX()->thread_registry->StartThread(tid, os_id, &args);
238}
239
240void ThreadFinish(ThreadState *thr) {
241  CHECK_GT(thr->in_rtl, 0);
242  ThreadCheckIgnore(thr);
243  StatInc(thr, StatThreadFinish);
244  if (thr->stk_addr && thr->stk_size)
245    DontNeedShadowFor(thr->stk_addr, thr->stk_size);
246  if (thr->tls_addr && thr->tls_size)
247    DontNeedShadowFor(thr->tls_addr, thr->tls_size);
248  thr->is_alive = false;
249  Context *ctx = CTX();
250  ctx->thread_registry->FinishThread(thr->tid);
251}
252
253static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
254  uptr uid = (uptr)arg;
255  if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
256    tctx->user_id = 0;
257    return true;
258  }
259  return false;
260}
261
262int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
263  CHECK_GT(thr->in_rtl, 0);
264  Context *ctx = CTX();
265  int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
266  DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
267  return res;
268}
269
270void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
271  CHECK_GT(thr->in_rtl, 0);
272  CHECK_GT(tid, 0);
273  CHECK_LT(tid, kMaxTid);
274  DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
275  Context *ctx = CTX();
276  ctx->thread_registry->JoinThread(tid, thr);
277}
278
279void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
280  CHECK_GT(thr->in_rtl, 0);
281  CHECK_GT(tid, 0);
282  CHECK_LT(tid, kMaxTid);
283  Context *ctx = CTX();
284  ctx->thread_registry->DetachThread(tid);
285}
286
287void ThreadSetName(ThreadState *thr, const char *name) {
288  CHECK_GT(thr->in_rtl, 0);
289  CTX()->thread_registry->SetThreadName(thr->tid, name);
290}
291
292void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
293                       uptr size, bool is_write) {
294  if (size == 0)
295    return;
296
297  u64 *shadow_mem = (u64*)MemToShadow(addr);
298  DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
299      thr->tid, (void*)pc, (void*)addr,
300      (int)size, is_write);
301
302#if TSAN_DEBUG
303  if (!IsAppMem(addr)) {
304    Printf("Access to non app mem %zx\n", addr);
305    DCHECK(IsAppMem(addr));
306  }
307  if (!IsAppMem(addr + size - 1)) {
308    Printf("Access to non app mem %zx\n", addr + size - 1);
309    DCHECK(IsAppMem(addr + size - 1));
310  }
311  if (!IsShadowMem((uptr)shadow_mem)) {
312    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
313    DCHECK(IsShadowMem((uptr)shadow_mem));
314  }
315  if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
316    Printf("Bad shadow addr %p (%zx)\n",
317               shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
318    DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
319  }
320#endif
321
322  StatInc(thr, StatMopRange);
323
324  if (*shadow_mem == kShadowRodata) {
325    // Access to .rodata section, no races here.
326    // Measurements show that it can be 10-20% of all memory accesses.
327    StatInc(thr, StatMopRangeRodata);
328    return;
329  }
330
331  FastState fast_state = thr->fast_state;
332  if (fast_state.GetIgnoreBit())
333    return;
334
335  fast_state.IncrementEpoch();
336  thr->fast_state = fast_state;
337  TraceAddEvent(thr, fast_state, EventTypeMop, pc);
338
339  bool unaligned = (addr % kShadowCell) != 0;
340
341  // Handle unaligned beginning, if any.
342  for (; addr % kShadowCell && size; addr++, size--) {
343    int const kAccessSizeLog = 0;
344    Shadow cur(fast_state);
345    cur.SetWrite(is_write);
346    cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
347    MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
348        shadow_mem, cur);
349  }
350  if (unaligned)
351    shadow_mem += kShadowCnt;
352  // Handle middle part, if any.
353  for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
354    int const kAccessSizeLog = 3;
355    Shadow cur(fast_state);
356    cur.SetWrite(is_write);
357    cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
358    MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
359        shadow_mem, cur);
360    shadow_mem += kShadowCnt;
361  }
362  // Handle ending, if any.
363  for (; size; addr++, size--) {
364    int const kAccessSizeLog = 0;
365    Shadow cur(fast_state);
366    cur.SetWrite(is_write);
367    cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
368    MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
369        shadow_mem, cur);
370  }
371}
372
373}  // namespace __tsan
374