tsan_rtl.cc revision 1dc4cf7e253aefa3ce3bd4a1d349a13647e8b2ea
1//===-- tsan_rtl.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main file (entry points) for the TSan run-time.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_libc.h"
18#include "sanitizer_common/sanitizer_stackdepot.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "tsan_defs.h"
21#include "tsan_platform.h"
22#include "tsan_rtl.h"
23#include "tsan_mman.h"
24#include "tsan_suppressions.h"
25
26volatile int __tsan_resumed = 0;
27
28extern "C" void __tsan_resume() {
29  __tsan_resumed = 1;
30}
31
32namespace __tsan {
33
34#ifndef TSAN_GO
35THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
36#endif
37static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
38
39static Context *ctx;
40Context *CTX() {
41  return ctx;
42}
43
44Context::Context()
45  : initialized()
46  , report_mtx(MutexTypeReport, StatMtxReport)
47  , nreported()
48  , nmissed_expected()
49  , thread_mtx(MutexTypeThreads, StatMtxThreads)
50  , racy_stacks(MBlockRacyStacks)
51  , racy_addresses(MBlockRacyAddresses) {
52}
53
54// The objects are allocated in TLS, so one may rely on zero-initialization.
55ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
56                         uptr stk_addr, uptr stk_size,
57                         uptr tls_addr, uptr tls_size)
58  : fast_state(tid, epoch)
59  // Do not touch these, rely on zero initialization,
60  // they may be accessed before the ctor.
61  // , fast_ignore_reads()
62  // , fast_ignore_writes()
63  // , in_rtl()
64  , shadow_stack_pos(&shadow_stack[0])
65  , tid(tid)
66  , unique_id(unique_id)
67  , stk_addr(stk_addr)
68  , stk_size(stk_size)
69  , tls_addr(tls_addr)
70  , tls_size(tls_size) {
71}
72
73ThreadContext::ThreadContext(int tid)
74  : tid(tid)
75  , unique_id()
76  , user_id()
77  , thr()
78  , status(ThreadStatusInvalid)
79  , detached()
80  , reuse_count()
81  , epoch0()
82  , epoch1()
83  , dead_info()
84  , dead_next() {
85}
86
87static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
88  uptr shadow = GetShadowMemoryConsumption();
89
90  int nthread = 0;
91  int nlivethread = 0;
92  uptr threadmem = 0;
93  {
94    Lock l(&ctx->thread_mtx);
95    for (unsigned i = 0; i < kMaxTid; i++) {
96      ThreadContext *tctx = ctx->threads[i];
97      if (tctx == 0)
98        continue;
99      nthread += 1;
100      threadmem += sizeof(ThreadContext);
101      if (tctx->status != ThreadStatusRunning)
102        continue;
103      nlivethread += 1;
104      threadmem += sizeof(ThreadState);
105    }
106  }
107
108  uptr nsync = 0;
109  uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
110
111  internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
112                                   " thread=%zuMB(total=%d/live=%d)"
113                                   " sync=%zuMB(cnt=%zu)\n",
114    num,
115    shadow >> 20,
116    threadmem >> 20, nthread, nlivethread,
117    syncmem >> 20, nsync);
118}
119
120static void MemoryProfileThread(void *arg) {
121  ScopedInRtl in_rtl;
122  fd_t fd = (fd_t)(uptr)arg;
123  for (int i = 0; ; i++) {
124    InternalScopedBuffer<char> buf(4096);
125    WriteMemoryProfile(buf.data(), buf.size(), i);
126    internal_write(fd, buf.data(), internal_strlen(buf.data()));
127    SleepForSeconds(1);
128  }
129}
130
131static void InitializeMemoryProfile() {
132  if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
133    return;
134  InternalScopedBuffer<char> filename(4096);
135  internal_snprintf(filename.data(), filename.size(), "%s.%d",
136      flags()->profile_memory, GetPid());
137  fd_t fd = internal_open(filename.data(), true);
138  if (fd == kInvalidFd) {
139    TsanPrintf("Failed to open memory profile file '%s'\n", &filename[0]);
140    Die();
141  }
142  internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
143}
144
145static void MemoryFlushThread(void *arg) {
146  ScopedInRtl in_rtl;
147  for (int i = 0; ; i++) {
148    SleepForMillis(flags()->flush_memory_ms);
149    FlushShadowMemory();
150  }
151}
152
153static void InitializeMemoryFlush() {
154  if (flags()->flush_memory_ms == 0)
155    return;
156  if (flags()->flush_memory_ms < 100)
157    flags()->flush_memory_ms = 100;
158  internal_start_thread(&MemoryFlushThread, 0);
159}
160
161void Initialize(ThreadState *thr) {
162  // Thread safe because done before all threads exist.
163  static bool is_initialized = false;
164  if (is_initialized)
165    return;
166  is_initialized = true;
167  ScopedInRtl in_rtl;
168#ifndef TSAN_GO
169  InitializeAllocator();
170#endif
171  InitializeInterceptors();
172  const char *env = InitializePlatform();
173  InitializeMutex();
174  InitializeDynamicAnnotations();
175  ctx = new(ctx_placeholder) Context;
176  InitializeShadowMemory();
177  ctx->dead_list_size = 0;
178  ctx->dead_list_head = 0;
179  ctx->dead_list_tail = 0;
180  InitializeFlags(&ctx->flags, env);
181  InitializeSuppressions();
182  InitializeMemoryProfile();
183  InitializeMemoryFlush();
184
185  if (ctx->flags.verbosity)
186    TsanPrintf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
187               GetPid());
188
189  // Initialize thread 0.
190  ctx->thread_seq = 0;
191  int tid = ThreadCreate(thr, 0, 0, true);
192  CHECK_EQ(tid, 0);
193  ThreadStart(thr, tid);
194  CHECK_EQ(thr->in_rtl, 1);
195  ctx->initialized = true;
196
197  if (flags()->stop_on_start) {
198    TsanPrintf("ThreadSanitizer is suspended at startup (pid %d)."
199           " Call __tsan_resume().\n",
200           GetPid());
201    while (__tsan_resumed == 0);
202  }
203}
204
205int Finalize(ThreadState *thr) {
206  ScopedInRtl in_rtl;
207  Context *ctx = __tsan::ctx;
208  bool failed = false;
209
210  ThreadFinalize(thr);
211
212  if (ctx->nreported) {
213    failed = true;
214    TsanPrintf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
215  }
216
217  if (ctx->nmissed_expected) {
218    failed = true;
219    TsanPrintf("ThreadSanitizer: missed %d expected races\n",
220        ctx->nmissed_expected);
221  }
222
223  StatOutput(ctx->stat);
224  return failed ? flags()->exitcode : 0;
225}
226
227u32 CurrentStackId(ThreadState *thr, uptr pc) {
228  if (thr->shadow_stack_pos == 0)  // May happen during bootstrap.
229    return 0;
230  if (pc) {
231    thr->shadow_stack_pos[0] = pc;
232    thr->shadow_stack_pos++;
233  }
234  u32 id = StackDepotPut(thr->shadow_stack,
235                         thr->shadow_stack_pos - thr->shadow_stack);
236  if (pc)
237    thr->shadow_stack_pos--;
238  return id;
239}
240
241void TraceSwitch(ThreadState *thr) {
242  thr->nomalloc++;
243  ScopedInRtl in_rtl;
244  Lock l(&thr->trace.mtx);
245  unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts;
246  TraceHeader *hdr = &thr->trace.headers[trace];
247  hdr->epoch0 = thr->fast_state.epoch();
248  hdr->stack0.ObtainCurrent(thr, 0);
249  thr->nomalloc--;
250}
251
252#ifndef TSAN_GO
253extern "C" void __tsan_trace_switch() {
254  TraceSwitch(cur_thread());
255}
256
257extern "C" void __tsan_report_race() {
258  ReportRace(cur_thread());
259}
260#endif
261
262ALWAYS_INLINE
263static Shadow LoadShadow(u64 *p) {
264  u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
265  return Shadow(raw);
266}
267
268ALWAYS_INLINE
269static void StoreShadow(u64 *sp, u64 s) {
270  atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
271}
272
273ALWAYS_INLINE
274static void StoreIfNotYetStored(u64 *sp, u64 *s) {
275  StoreShadow(sp, *s);
276  *s = 0;
277}
278
279static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
280                              Shadow cur, Shadow old) {
281  thr->racy_state[0] = cur.raw();
282  thr->racy_state[1] = old.raw();
283  thr->racy_shadow_addr = shadow_mem;
284#ifndef TSAN_GO
285  HACKY_CALL(__tsan_report_race);
286#else
287  ReportRace(thr);
288#endif
289}
290
291static inline bool BothReads(Shadow s, int kAccessIsWrite) {
292  return !kAccessIsWrite && !s.is_write();
293}
294
295static inline bool OldIsRWStronger(Shadow old, int kAccessIsWrite) {
296  return old.is_write() || !kAccessIsWrite;
297}
298
299static inline bool OldIsRWWeaker(Shadow old, int kAccessIsWrite) {
300  return !old.is_write() || kAccessIsWrite;
301}
302
303static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
304  return old.epoch() >= thr->fast_synch_epoch;
305}
306
307static inline bool HappensBefore(Shadow old, ThreadState *thr) {
308  return thr->clock.get(old.tid()) >= old.epoch();
309}
310
311ALWAYS_INLINE
312void MemoryAccessImpl(ThreadState *thr, uptr addr,
313    int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
314    u64 *shadow_mem, Shadow cur) {
315  StatInc(thr, StatMop);
316  StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
317  StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
318
319  // This potentially can live in an MMX/SSE scratch register.
320  // The required intrinsics are:
321  // __m128i _mm_move_epi64(__m128i*);
322  // _mm_storel_epi64(u64*, __m128i);
323  u64 store_word = cur.raw();
324
325  // scan all the shadow values and dispatch to 4 categories:
326  // same, replace, candidate and race (see comments below).
327  // we consider only 3 cases regarding access sizes:
328  // equal, intersect and not intersect. initially I considered
329  // larger and smaller as well, it allowed to replace some
330  // 'candidates' with 'same' or 'replace', but I think
331  // it's just not worth it (performance- and complexity-wise).
332
333  Shadow old(0);
334  if (kShadowCnt == 1) {
335    int idx = 0;
336#include "tsan_update_shadow_word_inl.h"
337  } else if (kShadowCnt == 2) {
338    int idx = 0;
339#include "tsan_update_shadow_word_inl.h"
340    idx = 1;
341#include "tsan_update_shadow_word_inl.h"
342  } else if (kShadowCnt == 4) {
343    int idx = 0;
344#include "tsan_update_shadow_word_inl.h"
345    idx = 1;
346#include "tsan_update_shadow_word_inl.h"
347    idx = 2;
348#include "tsan_update_shadow_word_inl.h"
349    idx = 3;
350#include "tsan_update_shadow_word_inl.h"
351  } else if (kShadowCnt == 8) {
352    int idx = 0;
353#include "tsan_update_shadow_word_inl.h"
354    idx = 1;
355#include "tsan_update_shadow_word_inl.h"
356    idx = 2;
357#include "tsan_update_shadow_word_inl.h"
358    idx = 3;
359#include "tsan_update_shadow_word_inl.h"
360    idx = 4;
361#include "tsan_update_shadow_word_inl.h"
362    idx = 5;
363#include "tsan_update_shadow_word_inl.h"
364    idx = 6;
365#include "tsan_update_shadow_word_inl.h"
366    idx = 7;
367#include "tsan_update_shadow_word_inl.h"
368  } else {
369    CHECK(false);
370  }
371
372  // we did not find any races and had already stored
373  // the current access info, so we are done
374  if (LIKELY(store_word == 0))
375    return;
376  // choose a random candidate slot and replace it
377  StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
378  StatInc(thr, StatShadowReplace);
379  return;
380 RACE:
381  HandleRace(thr, shadow_mem, cur, old);
382  return;
383}
384
385ALWAYS_INLINE
386void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
387    int kAccessSizeLog, bool kAccessIsWrite) {
388  u64 *shadow_mem = (u64*)MemToShadow(addr);
389  DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d"
390      " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
391      (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
392      (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
393      (uptr)shadow_mem[0], (uptr)shadow_mem[1],
394      (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
395#if TSAN_DEBUG
396  if (!IsAppMem(addr)) {
397    TsanPrintf("Access to non app mem %zx\n", addr);
398    DCHECK(IsAppMem(addr));
399  }
400  if (!IsShadowMem((uptr)shadow_mem)) {
401    TsanPrintf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
402    DCHECK(IsShadowMem((uptr)shadow_mem));
403  }
404#endif
405
406  FastState fast_state = thr->fast_state;
407  if (fast_state.GetIgnoreBit())
408    return;
409  fast_state.IncrementEpoch();
410  thr->fast_state = fast_state;
411  Shadow cur(fast_state);
412  cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
413  cur.SetWrite(kAccessIsWrite);
414
415  // We must not store to the trace if we do not store to the shadow.
416  // That is, this call must be moved somewhere below.
417  TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
418
419  MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, fast_state,
420      shadow_mem, cur);
421}
422
423static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
424                           u64 val) {
425  if (size == 0)
426    return;
427  // FIXME: fix me.
428  uptr offset = addr % kShadowCell;
429  if (offset) {
430    offset = kShadowCell - offset;
431    if (size <= offset)
432      return;
433    addr += offset;
434    size -= offset;
435  }
436  DCHECK_EQ(addr % 8, 0);
437  // If a user passes some insane arguments (memset(0)),
438  // let it just crash as usual.
439  if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
440    return;
441  (void)thr;
442  (void)pc;
443  // Some programs mmap like hundreds of GBs but actually used a small part.
444  // So, it's better to report a false positive on the memory
445  // then to hang here senselessly.
446  const uptr kMaxResetSize = 1024*1024*1024;
447  if (size > kMaxResetSize)
448    size = kMaxResetSize;
449  size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
450  u64 *p = (u64*)MemToShadow(addr);
451  CHECK(IsShadowMem((uptr)p));
452  CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
453  // FIXME: may overwrite a part outside the region
454  for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) {
455    p[i++] = val;
456    for (uptr j = 1; j < kShadowCnt; j++)
457      p[i++] = 0;
458  }
459}
460
461void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
462  MemoryRangeSet(thr, pc, addr, size, 0);
463}
464
465void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
466  MemoryAccessRange(thr, pc, addr, size, true);
467  Shadow s(thr->fast_state);
468  s.MarkAsFreed();
469  s.SetWrite(true);
470  s.SetAddr0AndSizeLog(0, 3);
471  MemoryRangeSet(thr, pc, addr, size, s.raw());
472}
473
474void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
475  Shadow s(thr->fast_state);
476  s.SetWrite(true);
477  s.SetAddr0AndSizeLog(0, 3);
478  MemoryRangeSet(thr, pc, addr, size, s.raw());
479}
480
481void FuncEntry(ThreadState *thr, uptr pc) {
482  DCHECK_EQ(thr->in_rtl, 0);
483  StatInc(thr, StatFuncEnter);
484  DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
485  thr->fast_state.IncrementEpoch();
486  TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
487
488  // Shadow stack maintenance can be replaced with
489  // stack unwinding during trace switch (which presumably must be faster).
490  DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
491#ifndef TSAN_GO
492  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
493#else
494  if (thr->shadow_stack_pos == thr->shadow_stack_end) {
495    const int sz = thr->shadow_stack_end - thr->shadow_stack;
496    const int newsz = 2 * sz;
497    uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
498        newsz * sizeof(uptr));
499    internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
500    internal_free(thr->shadow_stack);
501    thr->shadow_stack = newstack;
502    thr->shadow_stack_pos = newstack + sz;
503    thr->shadow_stack_end = newstack + newsz;
504  }
505#endif
506  thr->shadow_stack_pos[0] = pc;
507  thr->shadow_stack_pos++;
508}
509
510void FuncExit(ThreadState *thr) {
511  DCHECK_EQ(thr->in_rtl, 0);
512  StatInc(thr, StatFuncExit);
513  DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
514  thr->fast_state.IncrementEpoch();
515  TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
516
517  DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
518#ifndef TSAN_GO
519  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
520#endif
521  thr->shadow_stack_pos--;
522}
523
524void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
525  DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
526  thr->ignore_reads_and_writes += begin ? 1 : -1;
527  CHECK_GE(thr->ignore_reads_and_writes, 0);
528  if (thr->ignore_reads_and_writes)
529    thr->fast_state.SetIgnoreBit();
530  else
531    thr->fast_state.ClearIgnoreBit();
532}
533
534bool MD5Hash::operator==(const MD5Hash &other) const {
535  return hash[0] == other.hash[0] && hash[1] == other.hash[1];
536}
537
538#if TSAN_DEBUG
539void build_consistency_debug() {}
540#else
541void build_consistency_release() {}
542#endif
543
544#if TSAN_COLLECT_STATS
545void build_consistency_stats() {}
546#else
547void build_consistency_nostats() {}
548#endif
549
550#if TSAN_SHADOW_COUNT == 1
551void build_consistency_shadow1() {}
552#elif TSAN_SHADOW_COUNT == 2
553void build_consistency_shadow2() {}
554#elif TSAN_SHADOW_COUNT == 4
555void build_consistency_shadow4() {}
556#else
557void build_consistency_shadow8() {}
558#endif
559
560}  // namespace __tsan
561
562#ifndef TSAN_GO
563// Must be included in this file to make sure everything is inlined.
564#include "tsan_interface_inl.h"
565#endif
566