tsan_rtl.cc revision b48c2b2072c8cc17dc1579a6b20ce6c2a575821d
1//===-- tsan_rtl.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main file (entry points) for the TSan run-time.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_libc.h"
18#include "sanitizer_common/sanitizer_stackdepot.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_symbolizer.h"
21#include "tsan_defs.h"
22#include "tsan_platform.h"
23#include "tsan_rtl.h"
24#include "tsan_mman.h"
25#include "tsan_suppressions.h"
26#include "tsan_symbolize.h"
27
28volatile int __tsan_resumed = 0;
29
30extern "C" void __tsan_resume() {
31  __tsan_resumed = 1;
32}
33
34namespace __tsan {
35
36#ifndef TSAN_GO
37THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
38#endif
39static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
40
41// Can be overriden by a front-end.
42#ifdef TSAN_EXTERNAL_HOOKS
43bool OnFinalize(bool failed);
44#else
45bool WEAK OnFinalize(bool failed) {
46  return failed;
47}
48#endif
49
50static Context *ctx;
51Context *CTX() {
52  return ctx;
53}
54
55static char thread_registry_placeholder[sizeof(ThreadRegistry)];
56
57static ThreadContextBase *CreateThreadContext(u32 tid) {
58  // Map thread trace when context is created.
59  MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
60  MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
61  new(ThreadTrace(tid)) Trace();
62  void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
63  return new(mem) ThreadContext(tid);
64}
65
66#ifndef TSAN_GO
67static const u32 kThreadQuarantineSize = 16;
68#else
69static const u32 kThreadQuarantineSize = 64;
70#endif
71
72Context::Context()
73  : initialized()
74  , report_mtx(MutexTypeReport, StatMtxReport)
75  , nreported()
76  , nmissed_expected()
77  , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
78      CreateThreadContext, kMaxTid, kThreadQuarantineSize))
79  , racy_stacks(MBlockRacyStacks)
80  , racy_addresses(MBlockRacyAddresses)
81  , fired_suppressions(8) {
82}
83
84// The objects are allocated in TLS, so one may rely on zero-initialization.
85ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
86                         uptr stk_addr, uptr stk_size,
87                         uptr tls_addr, uptr tls_size)
88  : fast_state(tid, epoch)
89  // Do not touch these, rely on zero initialization,
90  // they may be accessed before the ctor.
91  // , ignore_reads_and_writes()
92  // , in_rtl()
93  , shadow_stack_pos(&shadow_stack[0])
94#ifndef TSAN_GO
95  , jmp_bufs(MBlockJmpBuf)
96#endif
97  , tid(tid)
98  , unique_id(unique_id)
99  , stk_addr(stk_addr)
100  , stk_size(stk_size)
101  , tls_addr(tls_addr)
102  , tls_size(tls_size) {
103}
104
105static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
106  uptr n_threads;
107  uptr n_running_threads;
108  ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
109  InternalScopedBuffer<char> buf(4096);
110  internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
111      i, n_threads, n_running_threads);
112  internal_write(fd, buf.data(), internal_strlen(buf.data()));
113  WriteMemoryProfile(buf.data(), buf.size());
114  internal_write(fd, buf.data(), internal_strlen(buf.data()));
115}
116
117static void BackgroundThread(void *arg) {
118  ScopedInRtl in_rtl;
119  Context *ctx = CTX();
120  const u64 kMs2Ns = 1000 * 1000;
121
122  fd_t mprof_fd = kInvalidFd;
123  if (flags()->profile_memory && flags()->profile_memory[0]) {
124    InternalScopedBuffer<char> filename(4096);
125    internal_snprintf(filename.data(), filename.size(), "%s.%d",
126        flags()->profile_memory, (int)internal_getpid());
127    uptr openrv = OpenFile(filename.data(), true);
128    if (internal_iserror(openrv)) {
129      Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
130          &filename[0]);
131    } else {
132      mprof_fd = openrv;
133    }
134  }
135
136  u64 last_flush = NanoTime();
137  uptr last_rss = 0;
138  for (int i = 0; ; i++) {
139    SleepForSeconds(1);
140    u64 now = NanoTime();
141
142    // Flush memory if requested.
143    if (flags()->flush_memory_ms > 0) {
144      if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
145        if (flags()->verbosity > 0)
146          Printf("ThreadSanitizer: periodic memory flush\n");
147        FlushShadowMemory();
148        last_flush = NanoTime();
149      }
150    }
151    if (flags()->memory_limit_mb > 0) {
152      uptr rss = GetRSS();
153      uptr limit = uptr(flags()->memory_limit_mb) << 20;
154      if (flags()->verbosity > 0) {
155        Printf("ThreadSanitizer: memory flush check"
156               " RSS=%llu LAST=%llu LIMIT=%llu\n",
157               (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20);
158      }
159      if (2 * rss > limit + last_rss) {
160        if (flags()->verbosity > 0)
161          Printf("ThreadSanitizer: flushing memory due to RSS\n");
162        FlushShadowMemory();
163        rss = GetRSS();
164        if (flags()->verbosity > 0)
165          Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
166      }
167      last_rss = rss;
168    }
169
170    // Write memory profile if requested.
171    if (mprof_fd != kInvalidFd)
172      MemoryProfiler(ctx, mprof_fd, i);
173
174#ifndef TSAN_GO
175    // Flush symbolizer cache if requested.
176    if (flags()->flush_symbolizer_ms > 0) {
177      u64 last = atomic_load(&ctx->last_symbolize_time_ns,
178                             memory_order_relaxed);
179      if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
180        Lock l(&ctx->report_mtx);
181        SpinMutexLock l2(&CommonSanitizerReportMutex);
182        SymbolizeFlush();
183        atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
184      }
185    }
186#endif
187  }
188}
189
190void DontNeedShadowFor(uptr addr, uptr size) {
191  uptr shadow_beg = MemToShadow(addr);
192  uptr shadow_end = MemToShadow(addr + size);
193  FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
194}
195
196void MapShadow(uptr addr, uptr size) {
197  MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
198}
199
200void MapThreadTrace(uptr addr, uptr size) {
201  DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
202  CHECK_GE(addr, kTraceMemBegin);
203  CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
204  if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
205    Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
206    Die();
207  }
208}
209
210void Initialize(ThreadState *thr) {
211  // Thread safe because done before all threads exist.
212  static bool is_initialized = false;
213  if (is_initialized)
214    return;
215  is_initialized = true;
216  SanitizerToolName = "ThreadSanitizer";
217  // Install tool-specific callbacks in sanitizer_common.
218  SetCheckFailedCallback(TsanCheckFailed);
219
220  ScopedInRtl in_rtl;
221#ifndef TSAN_GO
222  InitializeAllocator();
223#endif
224  InitializeInterceptors();
225  const char *env = InitializePlatform();
226  InitializeMutex();
227  InitializeDynamicAnnotations();
228  ctx = new(ctx_placeholder) Context;
229#ifndef TSAN_GO
230  InitializeShadowMemory();
231#endif
232  InitializeFlags(&ctx->flags, env);
233  // Setup correct file descriptor for error reports.
234  __sanitizer_set_report_path(flags()->log_path);
235  InitializeSuppressions();
236#ifndef TSAN_GO
237  InitializeLibIgnore();
238  // Initialize external symbolizer before internal threads are started.
239  const char *external_symbolizer = flags()->external_symbolizer_path;
240  bool symbolizer_started =
241      getSymbolizer()->InitializeExternal(external_symbolizer);
242  if (external_symbolizer != 0 && external_symbolizer[0] != '\0' &&
243      !symbolizer_started) {
244    Printf("Failed to start external symbolizer: '%s'\n",
245           external_symbolizer);
246    Die();
247  }
248#endif
249  internal_start_thread(&BackgroundThread, 0);
250
251  if (ctx->flags.verbosity)
252    Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
253           (int)internal_getpid());
254
255  // Initialize thread 0.
256  int tid = ThreadCreate(thr, 0, 0, true);
257  CHECK_EQ(tid, 0);
258  ThreadStart(thr, tid, internal_getpid());
259  CHECK_EQ(thr->in_rtl, 1);
260  ctx->initialized = true;
261
262  if (flags()->stop_on_start) {
263    Printf("ThreadSanitizer is suspended at startup (pid %d)."
264           " Call __tsan_resume().\n",
265           (int)internal_getpid());
266    while (__tsan_resumed == 0) {}
267  }
268}
269
270int Finalize(ThreadState *thr) {
271  ScopedInRtl in_rtl;
272  Context *ctx = __tsan::ctx;
273  bool failed = false;
274
275  if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
276    SleepForMillis(flags()->atexit_sleep_ms);
277
278  // Wait for pending reports.
279  ctx->report_mtx.Lock();
280  CommonSanitizerReportMutex.Lock();
281  CommonSanitizerReportMutex.Unlock();
282  ctx->report_mtx.Unlock();
283
284#ifndef TSAN_GO
285  if (ctx->flags.verbosity)
286    AllocatorPrintStats();
287#endif
288
289  ThreadFinalize(thr);
290
291  if (ctx->nreported) {
292    failed = true;
293#ifndef TSAN_GO
294    Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
295#else
296    Printf("Found %d data race(s)\n", ctx->nreported);
297#endif
298  }
299
300  if (ctx->nmissed_expected) {
301    failed = true;
302    Printf("ThreadSanitizer: missed %d expected races\n",
303        ctx->nmissed_expected);
304  }
305
306  if (flags()->print_suppressions)
307    PrintMatchedSuppressions();
308#ifndef TSAN_GO
309  if (flags()->print_benign)
310    PrintMatchedBenignRaces();
311#endif
312
313  failed = OnFinalize(failed);
314
315  StatAggregate(ctx->stat, thr->stat);
316  StatOutput(ctx->stat);
317  return failed ? flags()->exitcode : 0;
318}
319
320#ifndef TSAN_GO
321u32 CurrentStackId(ThreadState *thr, uptr pc) {
322  if (thr->shadow_stack_pos == 0)  // May happen during bootstrap.
323    return 0;
324  if (pc) {
325    thr->shadow_stack_pos[0] = pc;
326    thr->shadow_stack_pos++;
327  }
328  u32 id = StackDepotPut(thr->shadow_stack,
329                         thr->shadow_stack_pos - thr->shadow_stack);
330  if (pc)
331    thr->shadow_stack_pos--;
332  return id;
333}
334#endif
335
336void TraceSwitch(ThreadState *thr) {
337  thr->nomalloc++;
338  ScopedInRtl in_rtl;
339  Trace *thr_trace = ThreadTrace(thr->tid);
340  Lock l(&thr_trace->mtx);
341  unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
342  TraceHeader *hdr = &thr_trace->headers[trace];
343  hdr->epoch0 = thr->fast_state.epoch();
344  hdr->stack0.ObtainCurrent(thr, 0);
345  hdr->mset0 = thr->mset;
346  thr->nomalloc--;
347}
348
349Trace *ThreadTrace(int tid) {
350  return (Trace*)GetThreadTraceHeader(tid);
351}
352
353uptr TraceTopPC(ThreadState *thr) {
354  Event *events = (Event*)GetThreadTrace(thr->tid);
355  uptr pc = events[thr->fast_state.GetTracePos()];
356  return pc;
357}
358
359uptr TraceSize() {
360  return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
361}
362
363uptr TraceParts() {
364  return TraceSize() / kTracePartSize;
365}
366
367#ifndef TSAN_GO
368extern "C" void __tsan_trace_switch() {
369  TraceSwitch(cur_thread());
370}
371
372extern "C" void __tsan_report_race() {
373  ReportRace(cur_thread());
374}
375#endif
376
377ALWAYS_INLINE
378Shadow LoadShadow(u64 *p) {
379  u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
380  return Shadow(raw);
381}
382
383ALWAYS_INLINE
384void StoreShadow(u64 *sp, u64 s) {
385  atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
386}
387
388ALWAYS_INLINE
389void StoreIfNotYetStored(u64 *sp, u64 *s) {
390  StoreShadow(sp, *s);
391  *s = 0;
392}
393
394static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
395                              Shadow cur, Shadow old) {
396  thr->racy_state[0] = cur.raw();
397  thr->racy_state[1] = old.raw();
398  thr->racy_shadow_addr = shadow_mem;
399#ifndef TSAN_GO
400  HACKY_CALL(__tsan_report_race);
401#else
402  ReportRace(thr);
403#endif
404}
405
406static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
407  return old.epoch() >= thr->fast_synch_epoch;
408}
409
410static inline bool HappensBefore(Shadow old, ThreadState *thr) {
411  return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
412}
413
414ALWAYS_INLINE USED
415void MemoryAccessImpl(ThreadState *thr, uptr addr,
416    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
417    u64 *shadow_mem, Shadow cur) {
418  StatInc(thr, StatMop);
419  StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
420  StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
421
422  // This potentially can live in an MMX/SSE scratch register.
423  // The required intrinsics are:
424  // __m128i _mm_move_epi64(__m128i*);
425  // _mm_storel_epi64(u64*, __m128i);
426  u64 store_word = cur.raw();
427
428  // scan all the shadow values and dispatch to 4 categories:
429  // same, replace, candidate and race (see comments below).
430  // we consider only 3 cases regarding access sizes:
431  // equal, intersect and not intersect. initially I considered
432  // larger and smaller as well, it allowed to replace some
433  // 'candidates' with 'same' or 'replace', but I think
434  // it's just not worth it (performance- and complexity-wise).
435
436  Shadow old(0);
437  if (kShadowCnt == 1) {
438    int idx = 0;
439#include "tsan_update_shadow_word_inl.h"
440  } else if (kShadowCnt == 2) {
441    int idx = 0;
442#include "tsan_update_shadow_word_inl.h"
443    idx = 1;
444#include "tsan_update_shadow_word_inl.h"
445  } else if (kShadowCnt == 4) {
446    int idx = 0;
447#include "tsan_update_shadow_word_inl.h"
448    idx = 1;
449#include "tsan_update_shadow_word_inl.h"
450    idx = 2;
451#include "tsan_update_shadow_word_inl.h"
452    idx = 3;
453#include "tsan_update_shadow_word_inl.h"
454  } else if (kShadowCnt == 8) {
455    int idx = 0;
456#include "tsan_update_shadow_word_inl.h"
457    idx = 1;
458#include "tsan_update_shadow_word_inl.h"
459    idx = 2;
460#include "tsan_update_shadow_word_inl.h"
461    idx = 3;
462#include "tsan_update_shadow_word_inl.h"
463    idx = 4;
464#include "tsan_update_shadow_word_inl.h"
465    idx = 5;
466#include "tsan_update_shadow_word_inl.h"
467    idx = 6;
468#include "tsan_update_shadow_word_inl.h"
469    idx = 7;
470#include "tsan_update_shadow_word_inl.h"
471  } else {
472    CHECK(false);
473  }
474
475  // we did not find any races and had already stored
476  // the current access info, so we are done
477  if (LIKELY(store_word == 0))
478    return;
479  // choose a random candidate slot and replace it
480  StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
481  StatInc(thr, StatShadowReplace);
482  return;
483 RACE:
484  HandleRace(thr, shadow_mem, cur, old);
485  return;
486}
487
488void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
489    int size, bool kAccessIsWrite, bool kIsAtomic) {
490  while (size) {
491    int size1 = 1;
492    int kAccessSizeLog = kSizeLog1;
493    if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
494      size1 = 8;
495      kAccessSizeLog = kSizeLog8;
496    } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
497      size1 = 4;
498      kAccessSizeLog = kSizeLog4;
499    } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
500      size1 = 2;
501      kAccessSizeLog = kSizeLog2;
502    }
503    MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
504    addr += size1;
505    size -= size1;
506  }
507}
508
509ALWAYS_INLINE USED
510void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
511    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
512  u64 *shadow_mem = (u64*)MemToShadow(addr);
513  DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
514      " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
515      (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
516      (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
517      (uptr)shadow_mem[0], (uptr)shadow_mem[1],
518      (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
519#if TSAN_DEBUG
520  if (!IsAppMem(addr)) {
521    Printf("Access to non app mem %zx\n", addr);
522    DCHECK(IsAppMem(addr));
523  }
524  if (!IsShadowMem((uptr)shadow_mem)) {
525    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
526    DCHECK(IsShadowMem((uptr)shadow_mem));
527  }
528#endif
529
530  if (*shadow_mem == kShadowRodata) {
531    // Access to .rodata section, no races here.
532    // Measurements show that it can be 10-20% of all memory accesses.
533    StatInc(thr, StatMop);
534    StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
535    StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
536    StatInc(thr, StatMopRodata);
537    return;
538  }
539
540  FastState fast_state = thr->fast_state;
541  if (fast_state.GetIgnoreBit())
542    return;
543  fast_state.IncrementEpoch();
544  thr->fast_state = fast_state;
545  Shadow cur(fast_state);
546  cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
547  cur.SetWrite(kAccessIsWrite);
548  cur.SetAtomic(kIsAtomic);
549
550  // We must not store to the trace if we do not store to the shadow.
551  // That is, this call must be moved somewhere below.
552  TraceAddEvent(thr, fast_state, EventTypeMop, pc);
553
554  MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
555      shadow_mem, cur);
556}
557
558static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
559                           u64 val) {
560  (void)thr;
561  (void)pc;
562  if (size == 0)
563    return;
564  // FIXME: fix me.
565  uptr offset = addr % kShadowCell;
566  if (offset) {
567    offset = kShadowCell - offset;
568    if (size <= offset)
569      return;
570    addr += offset;
571    size -= offset;
572  }
573  DCHECK_EQ(addr % 8, 0);
574  // If a user passes some insane arguments (memset(0)),
575  // let it just crash as usual.
576  if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
577    return;
578  // Don't want to touch lots of shadow memory.
579  // If a program maps 10MB stack, there is no need reset the whole range.
580  size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
581  // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
582  // so we do it only for C/C++.
583  if (kGoMode || size < 64*1024) {
584    u64 *p = (u64*)MemToShadow(addr);
585    CHECK(IsShadowMem((uptr)p));
586    CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
587    // FIXME: may overwrite a part outside the region
588    for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
589      p[i++] = val;
590      for (uptr j = 1; j < kShadowCnt; j++)
591        p[i++] = 0;
592    }
593  } else {
594    // The region is big, reset only beginning and end.
595    const uptr kPageSize = 4096;
596    u64 *begin = (u64*)MemToShadow(addr);
597    u64 *end = begin + size / kShadowCell * kShadowCnt;
598    u64 *p = begin;
599    // Set at least first kPageSize/2 to page boundary.
600    while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
601      *p++ = val;
602      for (uptr j = 1; j < kShadowCnt; j++)
603        *p++ = 0;
604    }
605    // Reset middle part.
606    u64 *p1 = p;
607    p = RoundDown(end, kPageSize);
608    UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
609    MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
610    // Set the ending.
611    while (p < end) {
612      *p++ = val;
613      for (uptr j = 1; j < kShadowCnt; j++)
614        *p++ = 0;
615    }
616  }
617}
618
619void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
620  MemoryRangeSet(thr, pc, addr, size, 0);
621}
622
623void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
624  // Processing more than 1k (4k of shadow) is expensive,
625  // can cause excessive memory consumption (user does not necessary touch
626  // the whole range) and most likely unnecessary.
627  if (size > 1024)
628    size = 1024;
629  CHECK_EQ(thr->is_freeing, false);
630  thr->is_freeing = true;
631  MemoryAccessRange(thr, pc, addr, size, true);
632  thr->is_freeing = false;
633  thr->fast_state.IncrementEpoch();
634  TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
635  Shadow s(thr->fast_state);
636  s.ClearIgnoreBit();
637  s.MarkAsFreed();
638  s.SetWrite(true);
639  s.SetAddr0AndSizeLog(0, 3);
640  MemoryRangeSet(thr, pc, addr, size, s.raw());
641}
642
643void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
644  thr->fast_state.IncrementEpoch();
645  TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
646  Shadow s(thr->fast_state);
647  s.ClearIgnoreBit();
648  s.SetWrite(true);
649  s.SetAddr0AndSizeLog(0, 3);
650  MemoryRangeSet(thr, pc, addr, size, s.raw());
651}
652
653ALWAYS_INLINE USED
654void FuncEntry(ThreadState *thr, uptr pc) {
655  DCHECK_EQ(thr->in_rtl, 0);
656  StatInc(thr, StatFuncEnter);
657  DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
658  thr->fast_state.IncrementEpoch();
659  TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
660
661  // Shadow stack maintenance can be replaced with
662  // stack unwinding during trace switch (which presumably must be faster).
663  DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
664#ifndef TSAN_GO
665  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
666#else
667  if (thr->shadow_stack_pos == thr->shadow_stack_end) {
668    const int sz = thr->shadow_stack_end - thr->shadow_stack;
669    const int newsz = 2 * sz;
670    uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
671        newsz * sizeof(uptr));
672    internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
673    internal_free(thr->shadow_stack);
674    thr->shadow_stack = newstack;
675    thr->shadow_stack_pos = newstack + sz;
676    thr->shadow_stack_end = newstack + newsz;
677  }
678#endif
679  thr->shadow_stack_pos[0] = pc;
680  thr->shadow_stack_pos++;
681}
682
683ALWAYS_INLINE USED
684void FuncExit(ThreadState *thr) {
685  DCHECK_EQ(thr->in_rtl, 0);
686  StatInc(thr, StatFuncExit);
687  DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
688  thr->fast_state.IncrementEpoch();
689  TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
690
691  DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
692#ifndef TSAN_GO
693  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
694#endif
695  thr->shadow_stack_pos--;
696}
697
698void ThreadIgnoreBegin(ThreadState *thr) {
699  DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
700  thr->ignore_reads_and_writes++;
701  CHECK_GT(thr->ignore_reads_and_writes, 0);
702  thr->fast_state.SetIgnoreBit();
703}
704
705void ThreadIgnoreEnd(ThreadState *thr) {
706  DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
707  thr->ignore_reads_and_writes--;
708  CHECK_GE(thr->ignore_reads_and_writes, 0);
709  if (thr->ignore_reads_and_writes == 0)
710    thr->fast_state.ClearIgnoreBit();
711}
712
713void ThreadIgnoreSyncBegin(ThreadState *thr) {
714  DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
715  thr->ignore_sync++;
716  CHECK_GT(thr->ignore_sync, 0);
717}
718
719void ThreadIgnoreSyncEnd(ThreadState *thr) {
720  DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
721  thr->ignore_sync--;
722  CHECK_GE(thr->ignore_sync, 0);
723}
724
725bool MD5Hash::operator==(const MD5Hash &other) const {
726  return hash[0] == other.hash[0] && hash[1] == other.hash[1];
727}
728
729#if TSAN_DEBUG
730void build_consistency_debug() {}
731#else
732void build_consistency_release() {}
733#endif
734
735#if TSAN_COLLECT_STATS
736void build_consistency_stats() {}
737#else
738void build_consistency_nostats() {}
739#endif
740
741#if TSAN_SHADOW_COUNT == 1
742void build_consistency_shadow1() {}
743#elif TSAN_SHADOW_COUNT == 2
744void build_consistency_shadow2() {}
745#elif TSAN_SHADOW_COUNT == 4
746void build_consistency_shadow4() {}
747#else
748void build_consistency_shadow8() {}
749#endif
750
751}  // namespace __tsan
752
753#ifndef TSAN_GO
754// Must be included in this file to make sure everything is inlined.
755#include "tsan_interface_inl.h"
756#endif
757