tsan_rtl.cc revision 68230a12bbd22c9402dd8f9af027fcb2e119f978
1//===-- tsan_rtl.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main file (entry points) for the TSan run-time.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_libc.h"
18#include "sanitizer_common/sanitizer_stackdepot.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_symbolizer.h"
21#include "tsan_defs.h"
22#include "tsan_platform.h"
23#include "tsan_rtl.h"
24#include "tsan_mman.h"
25#include "tsan_suppressions.h"
26
27volatile int __tsan_resumed = 0;
28
29extern "C" void __tsan_resume() {
30  __tsan_resumed = 1;
31}
32
33namespace __tsan {
34
35#ifndef TSAN_GO
36THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
37#endif
38static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
39
40static Context *ctx;
41Context *CTX() {
42  return ctx;
43}
44
45Context::Context()
46  : initialized()
47  , report_mtx(MutexTypeReport, StatMtxReport)
48  , nreported()
49  , nmissed_expected()
50  , thread_mtx(MutexTypeThreads, StatMtxThreads)
51  , racy_stacks(MBlockRacyStacks)
52  , racy_addresses(MBlockRacyAddresses)
53  , fired_suppressions(MBlockRacyAddresses) {
54}
55
56// The objects are allocated in TLS, so one may rely on zero-initialization.
57ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
58                         uptr stk_addr, uptr stk_size,
59                         uptr tls_addr, uptr tls_size)
60  : fast_state(tid, epoch)
61  // Do not touch these, rely on zero initialization,
62  // they may be accessed before the ctor.
63  // , fast_ignore_reads()
64  // , fast_ignore_writes()
65  // , in_rtl()
66  , shadow_stack_pos(&shadow_stack[0])
67  , tid(tid)
68  , unique_id(unique_id)
69  , stk_addr(stk_addr)
70  , stk_size(stk_size)
71  , tls_addr(tls_addr)
72  , tls_size(tls_size) {
73}
74
75ThreadContext::ThreadContext(int tid)
76  : tid(tid)
77  , unique_id()
78  , os_id()
79  , user_id()
80  , thr()
81  , status(ThreadStatusInvalid)
82  , detached()
83  , reuse_count()
84  , epoch0()
85  , epoch1()
86  , dead_info()
87  , dead_next()
88  , name() {
89}
90
91static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
92  uptr shadow = GetShadowMemoryConsumption();
93
94  int nthread = 0;
95  int nlivethread = 0;
96  uptr threadmem = 0;
97  {
98    Lock l(&ctx->thread_mtx);
99    for (unsigned i = 0; i < kMaxTid; i++) {
100      ThreadContext *tctx = ctx->threads[i];
101      if (tctx == 0)
102        continue;
103      nthread += 1;
104      threadmem += sizeof(ThreadContext);
105      if (tctx->status != ThreadStatusRunning)
106        continue;
107      nlivethread += 1;
108      threadmem += sizeof(ThreadState);
109    }
110  }
111
112  uptr nsync = 0;
113  uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync);
114
115  internal_snprintf(buf, buf_size, "%d: shadow=%zuMB"
116                                   " thread=%zuMB(total=%d/live=%d)"
117                                   " sync=%zuMB(cnt=%zu)\n",
118    num,
119    shadow >> 20,
120    threadmem >> 20, nthread, nlivethread,
121    syncmem >> 20, nsync);
122}
123
124static void MemoryProfileThread(void *arg) {
125  ScopedInRtl in_rtl;
126  fd_t fd = (fd_t)(uptr)arg;
127  for (int i = 0; ; i++) {
128    InternalScopedBuffer<char> buf(4096);
129    WriteMemoryProfile(buf.data(), buf.size(), i);
130    internal_write(fd, buf.data(), internal_strlen(buf.data()));
131    SleepForSeconds(1);
132  }
133}
134
135static void InitializeMemoryProfile() {
136  if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0)
137    return;
138  InternalScopedBuffer<char> filename(4096);
139  internal_snprintf(filename.data(), filename.size(), "%s.%d",
140      flags()->profile_memory, GetPid());
141  fd_t fd = internal_open(filename.data(), true);
142  if (fd == kInvalidFd) {
143    Printf("Failed to open memory profile file '%s'\n", &filename[0]);
144    Die();
145  }
146  internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd);
147}
148
149static void MemoryFlushThread(void *arg) {
150  ScopedInRtl in_rtl;
151  for (int i = 0; ; i++) {
152    SleepForMillis(flags()->flush_memory_ms);
153    FlushShadowMemory();
154  }
155}
156
157static void InitializeMemoryFlush() {
158  if (flags()->flush_memory_ms == 0)
159    return;
160  if (flags()->flush_memory_ms < 100)
161    flags()->flush_memory_ms = 100;
162  internal_start_thread(&MemoryFlushThread, 0);
163}
164
165void MapShadow(uptr addr, uptr size) {
166  MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
167}
168
169void Initialize(ThreadState *thr) {
170  // Thread safe because done before all threads exist.
171  static bool is_initialized = false;
172  if (is_initialized)
173    return;
174  is_initialized = true;
175  // Install tool-specific callbacks in sanitizer_common.
176  SetCheckFailedCallback(TsanCheckFailed);
177
178  ScopedInRtl in_rtl;
179#ifndef TSAN_GO
180  InitializeAllocator();
181#endif
182  InitializeInterceptors();
183  const char *env = InitializePlatform();
184  InitializeMutex();
185  InitializeDynamicAnnotations();
186  ctx = new(ctx_placeholder) Context;
187#ifndef TSAN_GO
188  InitializeShadowMemory();
189#endif
190  ctx->dead_list_size = 0;
191  ctx->dead_list_head = 0;
192  ctx->dead_list_tail = 0;
193  InitializeFlags(&ctx->flags, env);
194  // Setup correct file descriptor for error reports.
195  if (internal_strcmp(flags()->log_path, "stdout") == 0)
196    __sanitizer_set_report_fd(kStdoutFd);
197  else if (internal_strcmp(flags()->log_path, "stderr") == 0)
198    __sanitizer_set_report_fd(kStderrFd);
199  else
200    __sanitizer_set_report_path(flags()->log_path);
201  InitializeSuppressions();
202#ifndef TSAN_GO
203  // Initialize external symbolizer before internal threads are started.
204  const char *external_symbolizer = flags()->external_symbolizer_path;
205  if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
206    if (!InitializeExternalSymbolizer(external_symbolizer)) {
207      Printf("Failed to start external symbolizer: '%s'\n",
208             external_symbolizer);
209      Die();
210    }
211  }
212#endif
213  InitializeMemoryProfile();
214  InitializeMemoryFlush();
215
216  if (ctx->flags.verbosity)
217    Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
218               GetPid());
219
220  // Initialize thread 0.
221  ctx->thread_seq = 0;
222  int tid = ThreadCreate(thr, 0, 0, true);
223  CHECK_EQ(tid, 0);
224  ThreadStart(thr, tid, GetPid());
225  CHECK_EQ(thr->in_rtl, 1);
226  ctx->initialized = true;
227
228  if (flags()->stop_on_start) {
229    Printf("ThreadSanitizer is suspended at startup (pid %d)."
230           " Call __tsan_resume().\n",
231           GetPid());
232    while (__tsan_resumed == 0);
233  }
234}
235
236int Finalize(ThreadState *thr) {
237  ScopedInRtl in_rtl;
238  Context *ctx = __tsan::ctx;
239  bool failed = false;
240
241  if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
242    SleepForMillis(flags()->atexit_sleep_ms);
243
244  // Wait for pending reports.
245  ctx->report_mtx.Lock();
246  ctx->report_mtx.Unlock();
247
248  ThreadFinalize(thr);
249
250  if (ctx->nreported) {
251    failed = true;
252#ifndef TSAN_GO
253    Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
254#else
255    Printf("Found %d data race(s)\n", ctx->nreported);
256#endif
257  }
258
259  if (ctx->nmissed_expected) {
260    failed = true;
261    Printf("ThreadSanitizer: missed %d expected races\n",
262        ctx->nmissed_expected);
263  }
264
265  StatAggregate(ctx->stat, thr->stat);
266  StatOutput(ctx->stat);
267  return failed ? flags()->exitcode : 0;
268}
269
270#ifndef TSAN_GO
271u32 CurrentStackId(ThreadState *thr, uptr pc) {
272  if (thr->shadow_stack_pos == 0)  // May happen during bootstrap.
273    return 0;
274  if (pc) {
275    thr->shadow_stack_pos[0] = pc;
276    thr->shadow_stack_pos++;
277  }
278  u32 id = StackDepotPut(thr->shadow_stack,
279                         thr->shadow_stack_pos - thr->shadow_stack);
280  if (pc)
281    thr->shadow_stack_pos--;
282  return id;
283}
284#endif
285
286void TraceSwitch(ThreadState *thr) {
287  thr->nomalloc++;
288  ScopedInRtl in_rtl;
289  Lock l(&thr->trace.mtx);
290  unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
291  TraceHeader *hdr = &thr->trace.headers[trace];
292  hdr->epoch0 = thr->fast_state.epoch();
293  hdr->stack0.ObtainCurrent(thr, 0);
294  hdr->mset0 = thr->mset;
295  thr->nomalloc--;
296}
297
298uptr TraceTopPC(ThreadState *thr) {
299  Event *events = (Event*)GetThreadTrace(thr->tid);
300  uptr pc = events[thr->fast_state.GetTracePos()];
301  return pc;
302}
303
304uptr TraceSize() {
305  return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
306}
307
308uptr TraceParts() {
309  return TraceSize() / kTracePartSize;
310}
311
312#ifndef TSAN_GO
313extern "C" void __tsan_trace_switch() {
314  TraceSwitch(cur_thread());
315}
316
317extern "C" void __tsan_report_race() {
318  ReportRace(cur_thread());
319}
320#endif
321
322ALWAYS_INLINE
323static Shadow LoadShadow(u64 *p) {
324  u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
325  return Shadow(raw);
326}
327
328ALWAYS_INLINE
329static void StoreShadow(u64 *sp, u64 s) {
330  atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
331}
332
333ALWAYS_INLINE
334static void StoreIfNotYetStored(u64 *sp, u64 *s) {
335  StoreShadow(sp, *s);
336  *s = 0;
337}
338
339static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
340                              Shadow cur, Shadow old) {
341  thr->racy_state[0] = cur.raw();
342  thr->racy_state[1] = old.raw();
343  thr->racy_shadow_addr = shadow_mem;
344#ifndef TSAN_GO
345  HACKY_CALL(__tsan_report_race);
346#else
347  ReportRace(thr);
348#endif
349}
350
351static inline bool BothReads(Shadow s, int kAccessIsWrite) {
352  return !kAccessIsWrite && !s.is_write();
353}
354
355static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) {
356  return old.is_write() || !kAccessIsWrite;
357}
358
359static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) {
360  return !old.is_write() || kAccessIsWrite;
361}
362
363static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
364  return old.epoch() >= thr->fast_synch_epoch;
365}
366
367static inline bool HappensBefore(Shadow old, ThreadState *thr) {
368  return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
369}
370
371ALWAYS_INLINE
372void MemoryAccessImpl(ThreadState *thr, uptr addr,
373    int kAccessSizeLog, bool kAccessIsWrite,
374    u64 *shadow_mem, Shadow cur) {
375  StatInc(thr, StatMop);
376  StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
377  StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
378
379  // This potentially can live in an MMX/SSE scratch register.
380  // The required intrinsics are:
381  // __m128i _mm_move_epi64(__m128i*);
382  // _mm_storel_epi64(u64*, __m128i);
383  u64 store_word = cur.raw();
384
385  // scan all the shadow values and dispatch to 4 categories:
386  // same, replace, candidate and race (see comments below).
387  // we consider only 3 cases regarding access sizes:
388  // equal, intersect and not intersect. initially I considered
389  // larger and smaller as well, it allowed to replace some
390  // 'candidates' with 'same' or 'replace', but I think
391  // it's just not worth it (performance- and complexity-wise).
392
393  Shadow old(0);
394  if (kShadowCnt == 1) {
395    int idx = 0;
396#include "tsan_update_shadow_word_inl.h"
397  } else if (kShadowCnt == 2) {
398    int idx = 0;
399#include "tsan_update_shadow_word_inl.h"
400    idx = 1;
401#include "tsan_update_shadow_word_inl.h"
402  } else if (kShadowCnt == 4) {
403    int idx = 0;
404#include "tsan_update_shadow_word_inl.h"
405    idx = 1;
406#include "tsan_update_shadow_word_inl.h"
407    idx = 2;
408#include "tsan_update_shadow_word_inl.h"
409    idx = 3;
410#include "tsan_update_shadow_word_inl.h"
411  } else if (kShadowCnt == 8) {
412    int idx = 0;
413#include "tsan_update_shadow_word_inl.h"
414    idx = 1;
415#include "tsan_update_shadow_word_inl.h"
416    idx = 2;
417#include "tsan_update_shadow_word_inl.h"
418    idx = 3;
419#include "tsan_update_shadow_word_inl.h"
420    idx = 4;
421#include "tsan_update_shadow_word_inl.h"
422    idx = 5;
423#include "tsan_update_shadow_word_inl.h"
424    idx = 6;
425#include "tsan_update_shadow_word_inl.h"
426    idx = 7;
427#include "tsan_update_shadow_word_inl.h"
428  } else {
429    CHECK(false);
430  }
431
432  // we did not find any races and had already stored
433  // the current access info, so we are done
434  if (LIKELY(store_word == 0))
435    return;
436  // choose a random candidate slot and replace it
437  StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
438  StatInc(thr, StatShadowReplace);
439  return;
440 RACE:
441  HandleRace(thr, shadow_mem, cur, old);
442  return;
443}
444
445ALWAYS_INLINE
446void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
447    int kAccessSizeLog, bool kAccessIsWrite) {
448  u64 *shadow_mem = (u64*)MemToShadow(addr);
449  DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
450      " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
451      (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
452      (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
453      (uptr)shadow_mem[0], (uptr)shadow_mem[1],
454      (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
455#if TSAN_DEBUG
456  if (!IsAppMem(addr)) {
457    Printf("Access to non app mem %zx\n", addr);
458    DCHECK(IsAppMem(addr));
459  }
460  if (!IsShadowMem((uptr)shadow_mem)) {
461    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
462    DCHECK(IsShadowMem((uptr)shadow_mem));
463  }
464#endif
465
466  FastState fast_state = thr->fast_state;
467  if (fast_state.GetIgnoreBit())
468    return;
469  fast_state.IncrementEpoch();
470  thr->fast_state = fast_state;
471  Shadow cur(fast_state);
472  cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
473  cur.SetWrite(kAccessIsWrite);
474
475  // We must not store to the trace if we do not store to the shadow.
476  // That is, this call must be moved somewhere below.
477  TraceAddEvent(thr, fast_state, EventTypeMop, pc);
478
479  MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
480      shadow_mem, cur);
481}
482
483static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
484                           u64 val) {
485  if (size == 0)
486    return;
487  // FIXME: fix me.
488  uptr offset = addr % kShadowCell;
489  if (offset) {
490    offset = kShadowCell - offset;
491    if (size <= offset)
492      return;
493    addr += offset;
494    size -= offset;
495  }
496  DCHECK_EQ(addr % 8, 0);
497  // If a user passes some insane arguments (memset(0)),
498  // let it just crash as usual.
499  if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
500    return;
501  (void)thr;
502  (void)pc;
503  // Some programs mmap like hundreds of GBs but actually used a small part.
504  // So, it's better to report a false positive on the memory
505  // then to hang here senselessly.
506  const uptr kMaxResetSize = 4ull*1024*1024*1024;
507  if (size > kMaxResetSize)
508    size = kMaxResetSize;
509  size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
510  u64 *p = (u64*)MemToShadow(addr);
511  CHECK(IsShadowMem((uptr)p));
512  CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
513  // FIXME: may overwrite a part outside the region
514  for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) {
515    p[i++] = val;
516    for (uptr j = 1; j < kShadowCnt; j++)
517      p[i++] = 0;
518  }
519}
520
521void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
522  MemoryRangeSet(thr, pc, addr, size, 0);
523}
524
525void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
526  MemoryAccessRange(thr, pc, addr, size, true);
527  Shadow s(thr->fast_state);
528  s.ClearIgnoreBit();
529  s.MarkAsFreed();
530  s.SetWrite(true);
531  s.SetAddr0AndSizeLog(0, 3);
532  MemoryRangeSet(thr, pc, addr, size, s.raw());
533}
534
535void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
536  Shadow s(thr->fast_state);
537  s.ClearIgnoreBit();
538  s.SetWrite(true);
539  s.SetAddr0AndSizeLog(0, 3);
540  MemoryRangeSet(thr, pc, addr, size, s.raw());
541}
542
543ALWAYS_INLINE
544void FuncEntry(ThreadState *thr, uptr pc) {
545  DCHECK_EQ(thr->in_rtl, 0);
546  StatInc(thr, StatFuncEnter);
547  DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
548  thr->fast_state.IncrementEpoch();
549  TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
550
551  // Shadow stack maintenance can be replaced with
552  // stack unwinding during trace switch (which presumably must be faster).
553  DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
554#ifndef TSAN_GO
555  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
556#else
557  if (thr->shadow_stack_pos == thr->shadow_stack_end) {
558    const int sz = thr->shadow_stack_end - thr->shadow_stack;
559    const int newsz = 2 * sz;
560    uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
561        newsz * sizeof(uptr));
562    internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
563    internal_free(thr->shadow_stack);
564    thr->shadow_stack = newstack;
565    thr->shadow_stack_pos = newstack + sz;
566    thr->shadow_stack_end = newstack + newsz;
567  }
568#endif
569  thr->shadow_stack_pos[0] = pc;
570  thr->shadow_stack_pos++;
571}
572
573ALWAYS_INLINE
574void FuncExit(ThreadState *thr) {
575  DCHECK_EQ(thr->in_rtl, 0);
576  StatInc(thr, StatFuncExit);
577  DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
578  thr->fast_state.IncrementEpoch();
579  TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
580
581  DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
582#ifndef TSAN_GO
583  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
584#endif
585  thr->shadow_stack_pos--;
586}
587
588void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
589  DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
590  thr->ignore_reads_and_writes += begin ? 1 : -1;
591  CHECK_GE(thr->ignore_reads_and_writes, 0);
592  if (thr->ignore_reads_and_writes)
593    thr->fast_state.SetIgnoreBit();
594  else
595    thr->fast_state.ClearIgnoreBit();
596}
597
598bool MD5Hash::operator==(const MD5Hash &other) const {
599  return hash[0] == other.hash[0] && hash[1] == other.hash[1];
600}
601
602#if TSAN_DEBUG
603void build_consistency_debug() {}
604#else
605void build_consistency_release() {}
606#endif
607
608#if TSAN_COLLECT_STATS
609void build_consistency_stats() {}
610#else
611void build_consistency_nostats() {}
612#endif
613
614#if TSAN_SHADOW_COUNT == 1
615void build_consistency_shadow1() {}
616#elif TSAN_SHADOW_COUNT == 2
617void build_consistency_shadow2() {}
618#elif TSAN_SHADOW_COUNT == 4
619void build_consistency_shadow4() {}
620#else
621void build_consistency_shadow8() {}
622#endif
623
624}  // namespace __tsan
625
626#ifndef TSAN_GO
627// Must be included in this file to make sure everything is inlined.
628#include "tsan_interface_inl.h"
629#endif
630