tsan_rtl.cc revision 43c36e4b055f348d6076e6da44f9cd3e4399568f
1//===-- tsan_rtl.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main file (entry points) for the TSan run-time.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_libc.h"
18#include "sanitizer_common/sanitizer_stackdepot.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_symbolizer.h"
21#include "tsan_defs.h"
22#include "tsan_platform.h"
23#include "tsan_rtl.h"
24#include "tsan_mman.h"
25#include "tsan_suppressions.h"
26#include "tsan_symbolize.h"
27
28volatile int __tsan_resumed = 0;
29
30extern "C" void __tsan_resume() {
31  __tsan_resumed = 1;
32}
33
34namespace __tsan {
35
36#ifndef TSAN_GO
37THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
38#endif
39static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
40
41// Can be overriden by a front-end.
42bool CPP_WEAK OnFinalize(bool failed) {
43  return failed;
44}
45
46static Context *ctx;
47Context *CTX() {
48  return ctx;
49}
50
51static char thread_registry_placeholder[sizeof(ThreadRegistry)];
52
53static ThreadContextBase *CreateThreadContext(u32 tid) {
54  // Map thread trace when context is created.
55  MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
56  MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
57  new(ThreadTrace(tid)) Trace();
58  void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
59  return new(mem) ThreadContext(tid);
60}
61
62#ifndef TSAN_GO
63static const u32 kThreadQuarantineSize = 16;
64#else
65static const u32 kThreadQuarantineSize = 64;
66#endif
67
68Context::Context()
69  : initialized()
70  , report_mtx(MutexTypeReport, StatMtxReport)
71  , nreported()
72  , nmissed_expected()
73  , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
74      CreateThreadContext, kMaxTid, kThreadQuarantineSize))
75  , racy_stacks(MBlockRacyStacks)
76  , racy_addresses(MBlockRacyAddresses)
77  , fired_suppressions(MBlockRacyAddresses) {
78}
79
80// The objects are allocated in TLS, so one may rely on zero-initialization.
81ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
82                         uptr stk_addr, uptr stk_size,
83                         uptr tls_addr, uptr tls_size)
84  : fast_state(tid, epoch)
85  // Do not touch these, rely on zero initialization,
86  // they may be accessed before the ctor.
87  // , fast_ignore_reads()
88  // , fast_ignore_writes()
89  // , in_rtl()
90  , shadow_stack_pos(&shadow_stack[0])
91#ifndef TSAN_GO
92  , jmp_bufs(MBlockJmpBuf)
93#endif
94  , tid(tid)
95  , unique_id(unique_id)
96  , stk_addr(stk_addr)
97  , stk_size(stk_size)
98  , tls_addr(tls_addr)
99  , tls_size(tls_size) {
100}
101
102static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
103  uptr n_threads;
104  uptr n_running_threads;
105  ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
106  InternalScopedBuffer<char> buf(4096);
107  internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
108      i, n_threads, n_running_threads);
109  internal_write(fd, buf.data(), internal_strlen(buf.data()));
110  WriteMemoryProfile(buf.data(), buf.size());
111  internal_write(fd, buf.data(), internal_strlen(buf.data()));
112}
113
114static void BackgroundThread(void *arg) {
115  ScopedInRtl in_rtl;
116  Context *ctx = CTX();
117  const u64 kMs2Ns = 1000 * 1000;
118
119  fd_t mprof_fd = kInvalidFd;
120  if (flags()->profile_memory && flags()->profile_memory[0]) {
121    InternalScopedBuffer<char> filename(4096);
122    internal_snprintf(filename.data(), filename.size(), "%s.%d",
123        flags()->profile_memory, GetPid());
124    mprof_fd = OpenFile(filename.data(), true);
125    if (mprof_fd == kInvalidFd) {
126      Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
127          &filename[0]);
128    }
129  }
130
131  u64 last_flush = NanoTime();
132  for (int i = 0; ; i++) {
133    SleepForSeconds(1);
134    u64 now = NanoTime();
135
136    // Flush memory if requested.
137    if (flags()->flush_memory_ms) {
138      if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
139        FlushShadowMemory();
140        last_flush = NanoTime();
141      }
142    }
143
144    // Write memory profile if requested.
145    if (mprof_fd != kInvalidFd)
146      MemoryProfiler(ctx, mprof_fd, i);
147
148#ifndef TSAN_GO
149    // Flush symbolizer cache if requested.
150    if (flags()->flush_symbolizer_ms > 0) {
151      u64 last = atomic_load(&ctx->last_symbolize_time_ns,
152                             memory_order_relaxed);
153      if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
154        Lock l(&ctx->report_mtx);
155        SymbolizeFlush();
156        atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
157      }
158    }
159#endif
160  }
161}
162
163void DontNeedShadowFor(uptr addr, uptr size) {
164  uptr shadow_beg = MemToShadow(addr);
165  uptr shadow_end = MemToShadow(addr + size);
166  FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
167}
168
169void MapShadow(uptr addr, uptr size) {
170  MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
171}
172
173void MapThreadTrace(uptr addr, uptr size) {
174  DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
175  CHECK_GE(addr, kTraceMemBegin);
176  CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
177  if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
178    Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
179    Die();
180  }
181}
182
183void Initialize(ThreadState *thr) {
184  // Thread safe because done before all threads exist.
185  static bool is_initialized = false;
186  if (is_initialized)
187    return;
188  is_initialized = true;
189  SanitizerToolName = "ThreadSanitizer";
190  // Install tool-specific callbacks in sanitizer_common.
191  SetCheckFailedCallback(TsanCheckFailed);
192
193  ScopedInRtl in_rtl;
194#ifndef TSAN_GO
195  InitializeAllocator();
196#endif
197  InitializeInterceptors();
198  const char *env = InitializePlatform();
199  InitializeMutex();
200  InitializeDynamicAnnotations();
201  ctx = new(ctx_placeholder) Context;
202#ifndef TSAN_GO
203  InitializeShadowMemory();
204#endif
205  InitializeFlags(&ctx->flags, env);
206  // Setup correct file descriptor for error reports.
207  if (internal_strcmp(flags()->log_path, "stdout") == 0)
208    __sanitizer_set_report_fd(kStdoutFd);
209  else if (internal_strcmp(flags()->log_path, "stderr") == 0)
210    __sanitizer_set_report_fd(kStderrFd);
211  else
212    __sanitizer_set_report_path(flags()->log_path);
213  InitializeSuppressions();
214#ifndef TSAN_GO
215  // Initialize external symbolizer before internal threads are started.
216  const char *external_symbolizer = flags()->external_symbolizer_path;
217  if (external_symbolizer != 0 && external_symbolizer[0] != '\0') {
218    if (!InitializeExternalSymbolizer(external_symbolizer)) {
219      Printf("Failed to start external symbolizer: '%s'\n",
220             external_symbolizer);
221      Die();
222    }
223  }
224#endif
225  internal_start_thread(&BackgroundThread, 0);
226
227  if (ctx->flags.verbosity)
228    Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
229               GetPid());
230
231  // Initialize thread 0.
232  int tid = ThreadCreate(thr, 0, 0, true);
233  CHECK_EQ(tid, 0);
234  ThreadStart(thr, tid, GetPid());
235  CHECK_EQ(thr->in_rtl, 1);
236  ctx->initialized = true;
237
238  if (flags()->stop_on_start) {
239    Printf("ThreadSanitizer is suspended at startup (pid %d)."
240           " Call __tsan_resume().\n",
241           GetPid());
242    while (__tsan_resumed == 0) {}
243  }
244}
245
246int Finalize(ThreadState *thr) {
247  ScopedInRtl in_rtl;
248  Context *ctx = __tsan::ctx;
249  bool failed = false;
250
251  if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
252    SleepForMillis(flags()->atexit_sleep_ms);
253
254  // Wait for pending reports.
255  ctx->report_mtx.Lock();
256  ctx->report_mtx.Unlock();
257
258#ifndef TSAN_GO
259  if (ctx->flags.verbosity)
260    AllocatorPrintStats();
261#endif
262
263  ThreadFinalize(thr);
264
265  if (ctx->nreported) {
266    failed = true;
267#ifndef TSAN_GO
268    Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
269#else
270    Printf("Found %d data race(s)\n", ctx->nreported);
271#endif
272  }
273
274  if (ctx->nmissed_expected) {
275    failed = true;
276    Printf("ThreadSanitizer: missed %d expected races\n",
277        ctx->nmissed_expected);
278  }
279
280  if (flags()->print_suppressions)
281    PrintMatchedSuppressions();
282#ifndef TSAN_GO
283  if (flags()->print_benign)
284    PrintMatchedBenignRaces();
285#endif
286
287  failed = OnFinalize(failed);
288
289  StatAggregate(ctx->stat, thr->stat);
290  StatOutput(ctx->stat);
291  return failed ? flags()->exitcode : 0;
292}
293
294#ifndef TSAN_GO
295u32 CurrentStackId(ThreadState *thr, uptr pc) {
296  if (thr->shadow_stack_pos == 0)  // May happen during bootstrap.
297    return 0;
298  if (pc) {
299    thr->shadow_stack_pos[0] = pc;
300    thr->shadow_stack_pos++;
301  }
302  u32 id = StackDepotPut(thr->shadow_stack,
303                         thr->shadow_stack_pos - thr->shadow_stack);
304  if (pc)
305    thr->shadow_stack_pos--;
306  return id;
307}
308#endif
309
310void TraceSwitch(ThreadState *thr) {
311  thr->nomalloc++;
312  ScopedInRtl in_rtl;
313  Trace *thr_trace = ThreadTrace(thr->tid);
314  Lock l(&thr_trace->mtx);
315  unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
316  TraceHeader *hdr = &thr_trace->headers[trace];
317  hdr->epoch0 = thr->fast_state.epoch();
318  hdr->stack0.ObtainCurrent(thr, 0);
319  hdr->mset0 = thr->mset;
320  thr->nomalloc--;
321}
322
323Trace *ThreadTrace(int tid) {
324  return (Trace*)GetThreadTraceHeader(tid);
325}
326
327uptr TraceTopPC(ThreadState *thr) {
328  Event *events = (Event*)GetThreadTrace(thr->tid);
329  uptr pc = events[thr->fast_state.GetTracePos()];
330  return pc;
331}
332
333uptr TraceSize() {
334  return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
335}
336
337uptr TraceParts() {
338  return TraceSize() / kTracePartSize;
339}
340
341#ifndef TSAN_GO
342extern "C" void __tsan_trace_switch() {
343  TraceSwitch(cur_thread());
344}
345
346extern "C" void __tsan_report_race() {
347  ReportRace(cur_thread());
348}
349#endif
350
351ALWAYS_INLINE
352Shadow LoadShadow(u64 *p) {
353  u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
354  return Shadow(raw);
355}
356
357ALWAYS_INLINE
358void StoreShadow(u64 *sp, u64 s) {
359  atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
360}
361
362ALWAYS_INLINE
363void StoreIfNotYetStored(u64 *sp, u64 *s) {
364  StoreShadow(sp, *s);
365  *s = 0;
366}
367
368static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
369                              Shadow cur, Shadow old) {
370  thr->racy_state[0] = cur.raw();
371  thr->racy_state[1] = old.raw();
372  thr->racy_shadow_addr = shadow_mem;
373#ifndef TSAN_GO
374  HACKY_CALL(__tsan_report_race);
375#else
376  ReportRace(thr);
377#endif
378}
379
380static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
381  return old.epoch() >= thr->fast_synch_epoch;
382}
383
384static inline bool HappensBefore(Shadow old, ThreadState *thr) {
385  return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
386}
387
388// FIXME: should be ALWAYS_INLINE for performance reasons?
389void MemoryAccessImpl(ThreadState *thr, uptr addr,
390    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
391    u64 *shadow_mem, Shadow cur) {
392  StatInc(thr, StatMop);
393  StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
394  StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
395
396  // This potentially can live in an MMX/SSE scratch register.
397  // The required intrinsics are:
398  // __m128i _mm_move_epi64(__m128i*);
399  // _mm_storel_epi64(u64*, __m128i);
400  u64 store_word = cur.raw();
401
402  // scan all the shadow values and dispatch to 4 categories:
403  // same, replace, candidate and race (see comments below).
404  // we consider only 3 cases regarding access sizes:
405  // equal, intersect and not intersect. initially I considered
406  // larger and smaller as well, it allowed to replace some
407  // 'candidates' with 'same' or 'replace', but I think
408  // it's just not worth it (performance- and complexity-wise).
409
410  Shadow old(0);
411  if (kShadowCnt == 1) {
412    int idx = 0;
413#include "tsan_update_shadow_word_inl.h"
414  } else if (kShadowCnt == 2) {
415    int idx = 0;
416#include "tsan_update_shadow_word_inl.h"
417    idx = 1;
418#include "tsan_update_shadow_word_inl.h"
419  } else if (kShadowCnt == 4) {
420    int idx = 0;
421#include "tsan_update_shadow_word_inl.h"
422    idx = 1;
423#include "tsan_update_shadow_word_inl.h"
424    idx = 2;
425#include "tsan_update_shadow_word_inl.h"
426    idx = 3;
427#include "tsan_update_shadow_word_inl.h"
428  } else if (kShadowCnt == 8) {
429    int idx = 0;
430#include "tsan_update_shadow_word_inl.h"
431    idx = 1;
432#include "tsan_update_shadow_word_inl.h"
433    idx = 2;
434#include "tsan_update_shadow_word_inl.h"
435    idx = 3;
436#include "tsan_update_shadow_word_inl.h"
437    idx = 4;
438#include "tsan_update_shadow_word_inl.h"
439    idx = 5;
440#include "tsan_update_shadow_word_inl.h"
441    idx = 6;
442#include "tsan_update_shadow_word_inl.h"
443    idx = 7;
444#include "tsan_update_shadow_word_inl.h"
445  } else {
446    CHECK(false);
447  }
448
449  // we did not find any races and had already stored
450  // the current access info, so we are done
451  if (LIKELY(store_word == 0))
452    return;
453  // choose a random candidate slot and replace it
454  StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
455  StatInc(thr, StatShadowReplace);
456  return;
457 RACE:
458  HandleRace(thr, shadow_mem, cur, old);
459  return;
460}
461
462// FIXME: should be ALWAYS_INLINE for performance reasons?
463void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
464    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
465  u64 *shadow_mem = (u64*)MemToShadow(addr);
466  DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
467      " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
468      (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
469      (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
470      (uptr)shadow_mem[0], (uptr)shadow_mem[1],
471      (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
472#if TSAN_DEBUG
473  if (!IsAppMem(addr)) {
474    Printf("Access to non app mem %zx\n", addr);
475    DCHECK(IsAppMem(addr));
476  }
477  if (!IsShadowMem((uptr)shadow_mem)) {
478    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
479    DCHECK(IsShadowMem((uptr)shadow_mem));
480  }
481#endif
482
483  if (*shadow_mem == kShadowRodata) {
484    // Access to .rodata section, no races here.
485    // Measurements show that it can be 10-20% of all memory accesses.
486    StatInc(thr, StatMop);
487    StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
488    StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
489    StatInc(thr, StatMopRodata);
490    return;
491  }
492
493  FastState fast_state = thr->fast_state;
494  if (fast_state.GetIgnoreBit())
495    return;
496  fast_state.IncrementEpoch();
497  thr->fast_state = fast_state;
498  Shadow cur(fast_state);
499  cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
500  cur.SetWrite(kAccessIsWrite);
501  cur.SetAtomic(kIsAtomic);
502
503  // We must not store to the trace if we do not store to the shadow.
504  // That is, this call must be moved somewhere below.
505  TraceAddEvent(thr, fast_state, EventTypeMop, pc);
506
507  MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
508      shadow_mem, cur);
509}
510
511static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
512                           u64 val) {
513  (void)thr;
514  (void)pc;
515  if (size == 0)
516    return;
517  // FIXME: fix me.
518  uptr offset = addr % kShadowCell;
519  if (offset) {
520    offset = kShadowCell - offset;
521    if (size <= offset)
522      return;
523    addr += offset;
524    size -= offset;
525  }
526  DCHECK_EQ(addr % 8, 0);
527  // If a user passes some insane arguments (memset(0)),
528  // let it just crash as usual.
529  if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
530    return;
531  // Don't want to touch lots of shadow memory.
532  // If a program maps 10MB stack, there is no need reset the whole range.
533  size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
534  if (size < 64*1024) {
535    u64 *p = (u64*)MemToShadow(addr);
536    CHECK(IsShadowMem((uptr)p));
537    CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
538    // FIXME: may overwrite a part outside the region
539    for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
540      p[i++] = val;
541      for (uptr j = 1; j < kShadowCnt; j++)
542        p[i++] = 0;
543    }
544  } else {
545    // The region is big, reset only beginning and end.
546    const uptr kPageSize = 4096;
547    u64 *begin = (u64*)MemToShadow(addr);
548    u64 *end = begin + size / kShadowCell * kShadowCnt;
549    u64 *p = begin;
550    // Set at least first kPageSize/2 to page boundary.
551    while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
552      *p++ = val;
553      for (uptr j = 1; j < kShadowCnt; j++)
554        *p++ = 0;
555    }
556    // Reset middle part.
557    u64 *p1 = p;
558    p = RoundDown(end, kPageSize);
559    UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
560    MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
561    // Set the ending.
562    while (p < end) {
563      *p++ = val;
564      for (uptr j = 1; j < kShadowCnt; j++)
565        *p++ = 0;
566    }
567  }
568}
569
570void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
571  MemoryRangeSet(thr, pc, addr, size, 0);
572}
573
574void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
575  // Processing more than 1k (4k of shadow) is expensive,
576  // can cause excessive memory consumption (user does not necessary touch
577  // the whole range) and most likely unnecessary.
578  if (size > 1024)
579    size = 1024;
580  CHECK_EQ(thr->is_freeing, false);
581  thr->is_freeing = true;
582  MemoryAccessRange(thr, pc, addr, size, true);
583  thr->is_freeing = false;
584  Shadow s(thr->fast_state);
585  s.ClearIgnoreBit();
586  s.MarkAsFreed();
587  s.SetWrite(true);
588  s.SetAddr0AndSizeLog(0, 3);
589  MemoryRangeSet(thr, pc, addr, size, s.raw());
590}
591
592void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
593  Shadow s(thr->fast_state);
594  s.ClearIgnoreBit();
595  s.SetWrite(true);
596  s.SetAddr0AndSizeLog(0, 3);
597  MemoryRangeSet(thr, pc, addr, size, s.raw());
598}
599
600// FIXME: should be ALWAYS_INLINE for performance reasons?
601void FuncEntry(ThreadState *thr, uptr pc) {
602  DCHECK_EQ(thr->in_rtl, 0);
603  StatInc(thr, StatFuncEnter);
604  DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
605  thr->fast_state.IncrementEpoch();
606  TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
607
608  // Shadow stack maintenance can be replaced with
609  // stack unwinding during trace switch (which presumably must be faster).
610  DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
611#ifndef TSAN_GO
612  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
613#else
614  if (thr->shadow_stack_pos == thr->shadow_stack_end) {
615    const int sz = thr->shadow_stack_end - thr->shadow_stack;
616    const int newsz = 2 * sz;
617    uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
618        newsz * sizeof(uptr));
619    internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
620    internal_free(thr->shadow_stack);
621    thr->shadow_stack = newstack;
622    thr->shadow_stack_pos = newstack + sz;
623    thr->shadow_stack_end = newstack + newsz;
624  }
625#endif
626  thr->shadow_stack_pos[0] = pc;
627  thr->shadow_stack_pos++;
628}
629
630// FIXME: should be ALWAYS_INLINE for performance reasons?
631void FuncExit(ThreadState *thr) {
632  DCHECK_EQ(thr->in_rtl, 0);
633  StatInc(thr, StatFuncExit);
634  DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
635  thr->fast_state.IncrementEpoch();
636  TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
637
638  DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
639#ifndef TSAN_GO
640  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
641#endif
642  thr->shadow_stack_pos--;
643}
644
645void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
646  DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
647  thr->ignore_reads_and_writes += begin ? 1 : -1;
648  CHECK_GE(thr->ignore_reads_and_writes, 0);
649  if (thr->ignore_reads_and_writes)
650    thr->fast_state.SetIgnoreBit();
651  else
652    thr->fast_state.ClearIgnoreBit();
653}
654
655bool MD5Hash::operator==(const MD5Hash &other) const {
656  return hash[0] == other.hash[0] && hash[1] == other.hash[1];
657}
658
659#if TSAN_DEBUG
660void build_consistency_debug() {}
661#else
662void build_consistency_release() {}
663#endif
664
665#if TSAN_COLLECT_STATS
666void build_consistency_stats() {}
667#else
668void build_consistency_nostats() {}
669#endif
670
671#if TSAN_SHADOW_COUNT == 1
672void build_consistency_shadow1() {}
673#elif TSAN_SHADOW_COUNT == 2
674void build_consistency_shadow2() {}
675#elif TSAN_SHADOW_COUNT == 4
676void build_consistency_shadow4() {}
677#else
678void build_consistency_shadow8() {}
679#endif
680
681}  // namespace __tsan
682
683#ifndef TSAN_GO
684// Must be included in this file to make sure everything is inlined.
685#include "tsan_interface_inl.h"
686#endif
687