tsan_rtl.cc revision e1ddbf9a458e81125a03fea721997565124294ae
1//===-- tsan_rtl.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main file (entry points) for the TSan run-time.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_atomic.h"
16#include "sanitizer_common/sanitizer_common.h"
17#include "sanitizer_common/sanitizer_libc.h"
18#include "sanitizer_common/sanitizer_stackdepot.h"
19#include "sanitizer_common/sanitizer_placement_new.h"
20#include "sanitizer_common/sanitizer_symbolizer.h"
21#include "tsan_defs.h"
22#include "tsan_platform.h"
23#include "tsan_rtl.h"
24#include "tsan_mman.h"
25#include "tsan_suppressions.h"
26#include "tsan_symbolize.h"
27
28volatile int __tsan_resumed = 0;
29
30extern "C" void __tsan_resume() {
31  __tsan_resumed = 1;
32}
33
34namespace __tsan {
35
36#ifndef TSAN_GO
37THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
38#endif
39static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
40
41// Can be overriden by a front-end.
42bool CPP_WEAK OnFinalize(bool failed) {
43  return failed;
44}
45
46static Context *ctx;
47Context *CTX() {
48  return ctx;
49}
50
51static char thread_registry_placeholder[sizeof(ThreadRegistry)];
52
53static ThreadContextBase *CreateThreadContext(u32 tid) {
54  // Map thread trace when context is created.
55  MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
56  MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
57  new(ThreadTrace(tid)) Trace();
58  void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
59  return new(mem) ThreadContext(tid);
60}
61
62#ifndef TSAN_GO
63static const u32 kThreadQuarantineSize = 16;
64#else
65static const u32 kThreadQuarantineSize = 64;
66#endif
67
68Context::Context()
69  : initialized()
70  , report_mtx(MutexTypeReport, StatMtxReport)
71  , nreported()
72  , nmissed_expected()
73  , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
74      CreateThreadContext, kMaxTid, kThreadQuarantineSize))
75  , racy_stacks(MBlockRacyStacks)
76  , racy_addresses(MBlockRacyAddresses)
77  , fired_suppressions(8) {
78}
79
80// The objects are allocated in TLS, so one may rely on zero-initialization.
81ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
82                         uptr stk_addr, uptr stk_size,
83                         uptr tls_addr, uptr tls_size)
84  : fast_state(tid, epoch)
85  // Do not touch these, rely on zero initialization,
86  // they may be accessed before the ctor.
87  // , ignore_reads_and_writes()
88  // , in_rtl()
89  , shadow_stack_pos(&shadow_stack[0])
90#ifndef TSAN_GO
91  , jmp_bufs(MBlockJmpBuf)
92#endif
93  , tid(tid)
94  , unique_id(unique_id)
95  , stk_addr(stk_addr)
96  , stk_size(stk_size)
97  , tls_addr(tls_addr)
98  , tls_size(tls_size) {
99}
100
101static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
102  uptr n_threads;
103  uptr n_running_threads;
104  ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
105  InternalScopedBuffer<char> buf(4096);
106  internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
107      i, n_threads, n_running_threads);
108  internal_write(fd, buf.data(), internal_strlen(buf.data()));
109  WriteMemoryProfile(buf.data(), buf.size());
110  internal_write(fd, buf.data(), internal_strlen(buf.data()));
111}
112
113static void BackgroundThread(void *arg) {
114  ScopedInRtl in_rtl;
115  Context *ctx = CTX();
116  const u64 kMs2Ns = 1000 * 1000;
117
118  fd_t mprof_fd = kInvalidFd;
119  if (flags()->profile_memory && flags()->profile_memory[0]) {
120    InternalScopedBuffer<char> filename(4096);
121    internal_snprintf(filename.data(), filename.size(), "%s.%d",
122        flags()->profile_memory, (int)internal_getpid());
123    uptr openrv = OpenFile(filename.data(), true);
124    if (internal_iserror(openrv)) {
125      Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
126          &filename[0]);
127    } else {
128      mprof_fd = openrv;
129    }
130  }
131
132  u64 last_flush = NanoTime();
133  uptr last_rss = 0;
134  for (int i = 0; ; i++) {
135    SleepForSeconds(1);
136    u64 now = NanoTime();
137
138    // Flush memory if requested.
139    if (flags()->flush_memory_ms > 0) {
140      if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
141        if (flags()->verbosity > 0)
142          Printf("ThreadSanitizer: periodic memory flush\n");
143        FlushShadowMemory();
144        last_flush = NanoTime();
145      }
146    }
147    if (flags()->memory_limit_mb > 0) {
148      uptr rss = GetRSS();
149      uptr limit = uptr(flags()->memory_limit_mb) << 20;
150      if (flags()->verbosity > 0) {
151        Printf("ThreadSanitizer: memory flush check"
152               " RSS=%llu LAST=%llu LIMIT=%llu\n",
153               (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20);
154      }
155      if (2 * rss > limit + last_rss) {
156        if (flags()->verbosity > 0)
157          Printf("ThreadSanitizer: flushing memory due to RSS\n");
158        FlushShadowMemory();
159        rss = GetRSS();
160        if (flags()->verbosity > 0)
161          Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
162      }
163      last_rss = rss;
164    }
165
166    // Write memory profile if requested.
167    if (mprof_fd != kInvalidFd)
168      MemoryProfiler(ctx, mprof_fd, i);
169
170#ifndef TSAN_GO
171    // Flush symbolizer cache if requested.
172    if (flags()->flush_symbolizer_ms > 0) {
173      u64 last = atomic_load(&ctx->last_symbolize_time_ns,
174                             memory_order_relaxed);
175      if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
176        Lock l(&ctx->report_mtx);
177        SpinMutexLock l2(&CommonSanitizerReportMutex);
178        SymbolizeFlush();
179        atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
180      }
181    }
182#endif
183  }
184}
185
186void DontNeedShadowFor(uptr addr, uptr size) {
187  uptr shadow_beg = MemToShadow(addr);
188  uptr shadow_end = MemToShadow(addr + size);
189  FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
190}
191
192void MapShadow(uptr addr, uptr size) {
193  MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
194}
195
196void MapThreadTrace(uptr addr, uptr size) {
197  DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
198  CHECK_GE(addr, kTraceMemBegin);
199  CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
200  if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
201    Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
202    Die();
203  }
204}
205
206void Initialize(ThreadState *thr) {
207  // Thread safe because done before all threads exist.
208  static bool is_initialized = false;
209  if (is_initialized)
210    return;
211  is_initialized = true;
212  SanitizerToolName = "ThreadSanitizer";
213  // Install tool-specific callbacks in sanitizer_common.
214  SetCheckFailedCallback(TsanCheckFailed);
215
216  ScopedInRtl in_rtl;
217#ifndef TSAN_GO
218  InitializeAllocator();
219#endif
220  InitializeInterceptors();
221  const char *env = InitializePlatform();
222  InitializeMutex();
223  InitializeDynamicAnnotations();
224  ctx = new(ctx_placeholder) Context;
225#ifndef TSAN_GO
226  InitializeShadowMemory();
227#endif
228  InitializeFlags(&ctx->flags, env);
229  // Setup correct file descriptor for error reports.
230  if (internal_strcmp(flags()->log_path, "stdout") == 0)
231    __sanitizer_set_report_fd(kStdoutFd);
232  else if (internal_strcmp(flags()->log_path, "stderr") == 0)
233    __sanitizer_set_report_fd(kStderrFd);
234  else
235    __sanitizer_set_report_path(flags()->log_path);
236  InitializeSuppressions();
237#ifndef TSAN_GO
238  InitializeLibIgnore();
239  // Initialize external symbolizer before internal threads are started.
240  const char *external_symbolizer = flags()->external_symbolizer_path;
241  bool symbolizer_started =
242      getSymbolizer()->InitializeExternal(external_symbolizer);
243  if (external_symbolizer != 0 && external_symbolizer[0] != '\0' &&
244      !symbolizer_started) {
245    Printf("Failed to start external symbolizer: '%s'\n",
246           external_symbolizer);
247    Die();
248  }
249#endif
250  internal_start_thread(&BackgroundThread, 0);
251
252  if (ctx->flags.verbosity)
253    Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
254           (int)internal_getpid());
255
256  // Initialize thread 0.
257  int tid = ThreadCreate(thr, 0, 0, true);
258  CHECK_EQ(tid, 0);
259  ThreadStart(thr, tid, internal_getpid());
260  CHECK_EQ(thr->in_rtl, 1);
261  ctx->initialized = true;
262
263  if (flags()->stop_on_start) {
264    Printf("ThreadSanitizer is suspended at startup (pid %d)."
265           " Call __tsan_resume().\n",
266           (int)internal_getpid());
267    while (__tsan_resumed == 0) {}
268  }
269}
270
271int Finalize(ThreadState *thr) {
272  ScopedInRtl in_rtl;
273  Context *ctx = __tsan::ctx;
274  bool failed = false;
275
276  if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
277    SleepForMillis(flags()->atexit_sleep_ms);
278
279  // Wait for pending reports.
280  ctx->report_mtx.Lock();
281  CommonSanitizerReportMutex.Lock();
282  CommonSanitizerReportMutex.Unlock();
283  ctx->report_mtx.Unlock();
284
285#ifndef TSAN_GO
286  if (ctx->flags.verbosity)
287    AllocatorPrintStats();
288#endif
289
290  ThreadFinalize(thr);
291
292  if (ctx->nreported) {
293    failed = true;
294#ifndef TSAN_GO
295    Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
296#else
297    Printf("Found %d data race(s)\n", ctx->nreported);
298#endif
299  }
300
301  if (ctx->nmissed_expected) {
302    failed = true;
303    Printf("ThreadSanitizer: missed %d expected races\n",
304        ctx->nmissed_expected);
305  }
306
307  if (flags()->print_suppressions)
308    PrintMatchedSuppressions();
309#ifndef TSAN_GO
310  if (flags()->print_benign)
311    PrintMatchedBenignRaces();
312#endif
313
314  failed = OnFinalize(failed);
315
316  StatAggregate(ctx->stat, thr->stat);
317  StatOutput(ctx->stat);
318  return failed ? flags()->exitcode : 0;
319}
320
321#ifndef TSAN_GO
322u32 CurrentStackId(ThreadState *thr, uptr pc) {
323  if (thr->shadow_stack_pos == 0)  // May happen during bootstrap.
324    return 0;
325  if (pc) {
326    thr->shadow_stack_pos[0] = pc;
327    thr->shadow_stack_pos++;
328  }
329  u32 id = StackDepotPut(thr->shadow_stack,
330                         thr->shadow_stack_pos - thr->shadow_stack);
331  if (pc)
332    thr->shadow_stack_pos--;
333  return id;
334}
335#endif
336
337void TraceSwitch(ThreadState *thr) {
338  thr->nomalloc++;
339  ScopedInRtl in_rtl;
340  Trace *thr_trace = ThreadTrace(thr->tid);
341  Lock l(&thr_trace->mtx);
342  unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
343  TraceHeader *hdr = &thr_trace->headers[trace];
344  hdr->epoch0 = thr->fast_state.epoch();
345  hdr->stack0.ObtainCurrent(thr, 0);
346  hdr->mset0 = thr->mset;
347  thr->nomalloc--;
348}
349
350Trace *ThreadTrace(int tid) {
351  return (Trace*)GetThreadTraceHeader(tid);
352}
353
354uptr TraceTopPC(ThreadState *thr) {
355  Event *events = (Event*)GetThreadTrace(thr->tid);
356  uptr pc = events[thr->fast_state.GetTracePos()];
357  return pc;
358}
359
360uptr TraceSize() {
361  return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
362}
363
364uptr TraceParts() {
365  return TraceSize() / kTracePartSize;
366}
367
368#ifndef TSAN_GO
369extern "C" void __tsan_trace_switch() {
370  TraceSwitch(cur_thread());
371}
372
373extern "C" void __tsan_report_race() {
374  ReportRace(cur_thread());
375}
376#endif
377
378ALWAYS_INLINE
379Shadow LoadShadow(u64 *p) {
380  u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
381  return Shadow(raw);
382}
383
384ALWAYS_INLINE
385void StoreShadow(u64 *sp, u64 s) {
386  atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
387}
388
389ALWAYS_INLINE
390void StoreIfNotYetStored(u64 *sp, u64 *s) {
391  StoreShadow(sp, *s);
392  *s = 0;
393}
394
395static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
396                              Shadow cur, Shadow old) {
397  thr->racy_state[0] = cur.raw();
398  thr->racy_state[1] = old.raw();
399  thr->racy_shadow_addr = shadow_mem;
400#ifndef TSAN_GO
401  HACKY_CALL(__tsan_report_race);
402#else
403  ReportRace(thr);
404#endif
405}
406
407static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
408  return old.epoch() >= thr->fast_synch_epoch;
409}
410
411static inline bool HappensBefore(Shadow old, ThreadState *thr) {
412  return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
413}
414
415ALWAYS_INLINE USED
416void MemoryAccessImpl(ThreadState *thr, uptr addr,
417    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
418    u64 *shadow_mem, Shadow cur) {
419  StatInc(thr, StatMop);
420  StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
421  StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
422
423  // This potentially can live in an MMX/SSE scratch register.
424  // The required intrinsics are:
425  // __m128i _mm_move_epi64(__m128i*);
426  // _mm_storel_epi64(u64*, __m128i);
427  u64 store_word = cur.raw();
428
429  // scan all the shadow values and dispatch to 4 categories:
430  // same, replace, candidate and race (see comments below).
431  // we consider only 3 cases regarding access sizes:
432  // equal, intersect and not intersect. initially I considered
433  // larger and smaller as well, it allowed to replace some
434  // 'candidates' with 'same' or 'replace', but I think
435  // it's just not worth it (performance- and complexity-wise).
436
437  Shadow old(0);
438  if (kShadowCnt == 1) {
439    int idx = 0;
440#include "tsan_update_shadow_word_inl.h"
441  } else if (kShadowCnt == 2) {
442    int idx = 0;
443#include "tsan_update_shadow_word_inl.h"
444    idx = 1;
445#include "tsan_update_shadow_word_inl.h"
446  } else if (kShadowCnt == 4) {
447    int idx = 0;
448#include "tsan_update_shadow_word_inl.h"
449    idx = 1;
450#include "tsan_update_shadow_word_inl.h"
451    idx = 2;
452#include "tsan_update_shadow_word_inl.h"
453    idx = 3;
454#include "tsan_update_shadow_word_inl.h"
455  } else if (kShadowCnt == 8) {
456    int idx = 0;
457#include "tsan_update_shadow_word_inl.h"
458    idx = 1;
459#include "tsan_update_shadow_word_inl.h"
460    idx = 2;
461#include "tsan_update_shadow_word_inl.h"
462    idx = 3;
463#include "tsan_update_shadow_word_inl.h"
464    idx = 4;
465#include "tsan_update_shadow_word_inl.h"
466    idx = 5;
467#include "tsan_update_shadow_word_inl.h"
468    idx = 6;
469#include "tsan_update_shadow_word_inl.h"
470    idx = 7;
471#include "tsan_update_shadow_word_inl.h"
472  } else {
473    CHECK(false);
474  }
475
476  // we did not find any races and had already stored
477  // the current access info, so we are done
478  if (LIKELY(store_word == 0))
479    return;
480  // choose a random candidate slot and replace it
481  StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
482  StatInc(thr, StatShadowReplace);
483  return;
484 RACE:
485  HandleRace(thr, shadow_mem, cur, old);
486  return;
487}
488
489void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
490    int size, bool kAccessIsWrite, bool kIsAtomic) {
491  while (size) {
492    int size1 = 1;
493    int kAccessSizeLog = kSizeLog1;
494    if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
495      size1 = 8;
496      kAccessSizeLog = kSizeLog8;
497    } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
498      size1 = 4;
499      kAccessSizeLog = kSizeLog4;
500    } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
501      size1 = 2;
502      kAccessSizeLog = kSizeLog2;
503    }
504    MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
505    addr += size1;
506    size -= size1;
507  }
508}
509
510ALWAYS_INLINE USED
511void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
512    int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
513  u64 *shadow_mem = (u64*)MemToShadow(addr);
514  DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
515      " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
516      (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
517      (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
518      (uptr)shadow_mem[0], (uptr)shadow_mem[1],
519      (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
520#if TSAN_DEBUG
521  if (!IsAppMem(addr)) {
522    Printf("Access to non app mem %zx\n", addr);
523    DCHECK(IsAppMem(addr));
524  }
525  if (!IsShadowMem((uptr)shadow_mem)) {
526    Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
527    DCHECK(IsShadowMem((uptr)shadow_mem));
528  }
529#endif
530
531  if (*shadow_mem == kShadowRodata) {
532    // Access to .rodata section, no races here.
533    // Measurements show that it can be 10-20% of all memory accesses.
534    StatInc(thr, StatMop);
535    StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
536    StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
537    StatInc(thr, StatMopRodata);
538    return;
539  }
540
541  FastState fast_state = thr->fast_state;
542  if (fast_state.GetIgnoreBit())
543    return;
544  fast_state.IncrementEpoch();
545  thr->fast_state = fast_state;
546  Shadow cur(fast_state);
547  cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
548  cur.SetWrite(kAccessIsWrite);
549  cur.SetAtomic(kIsAtomic);
550
551  // We must not store to the trace if we do not store to the shadow.
552  // That is, this call must be moved somewhere below.
553  TraceAddEvent(thr, fast_state, EventTypeMop, pc);
554
555  MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
556      shadow_mem, cur);
557}
558
559static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
560                           u64 val) {
561  (void)thr;
562  (void)pc;
563  if (size == 0)
564    return;
565  // FIXME: fix me.
566  uptr offset = addr % kShadowCell;
567  if (offset) {
568    offset = kShadowCell - offset;
569    if (size <= offset)
570      return;
571    addr += offset;
572    size -= offset;
573  }
574  DCHECK_EQ(addr % 8, 0);
575  // If a user passes some insane arguments (memset(0)),
576  // let it just crash as usual.
577  if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
578    return;
579  // Don't want to touch lots of shadow memory.
580  // If a program maps 10MB stack, there is no need reset the whole range.
581  size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
582  // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
583  // so we do it only for C/C++.
584  if (kGoMode || size < 64*1024) {
585    u64 *p = (u64*)MemToShadow(addr);
586    CHECK(IsShadowMem((uptr)p));
587    CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
588    // FIXME: may overwrite a part outside the region
589    for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
590      p[i++] = val;
591      for (uptr j = 1; j < kShadowCnt; j++)
592        p[i++] = 0;
593    }
594  } else {
595    // The region is big, reset only beginning and end.
596    const uptr kPageSize = 4096;
597    u64 *begin = (u64*)MemToShadow(addr);
598    u64 *end = begin + size / kShadowCell * kShadowCnt;
599    u64 *p = begin;
600    // Set at least first kPageSize/2 to page boundary.
601    while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
602      *p++ = val;
603      for (uptr j = 1; j < kShadowCnt; j++)
604        *p++ = 0;
605    }
606    // Reset middle part.
607    u64 *p1 = p;
608    p = RoundDown(end, kPageSize);
609    UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
610    MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
611    // Set the ending.
612    while (p < end) {
613      *p++ = val;
614      for (uptr j = 1; j < kShadowCnt; j++)
615        *p++ = 0;
616    }
617  }
618}
619
620void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
621  MemoryRangeSet(thr, pc, addr, size, 0);
622}
623
624void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
625  // Processing more than 1k (4k of shadow) is expensive,
626  // can cause excessive memory consumption (user does not necessary touch
627  // the whole range) and most likely unnecessary.
628  if (size > 1024)
629    size = 1024;
630  CHECK_EQ(thr->is_freeing, false);
631  thr->is_freeing = true;
632  MemoryAccessRange(thr, pc, addr, size, true);
633  thr->is_freeing = false;
634  thr->fast_state.IncrementEpoch();
635  TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
636  Shadow s(thr->fast_state);
637  s.ClearIgnoreBit();
638  s.MarkAsFreed();
639  s.SetWrite(true);
640  s.SetAddr0AndSizeLog(0, 3);
641  MemoryRangeSet(thr, pc, addr, size, s.raw());
642}
643
644void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
645  thr->fast_state.IncrementEpoch();
646  TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
647  Shadow s(thr->fast_state);
648  s.ClearIgnoreBit();
649  s.SetWrite(true);
650  s.SetAddr0AndSizeLog(0, 3);
651  MemoryRangeSet(thr, pc, addr, size, s.raw());
652}
653
654ALWAYS_INLINE USED
655void FuncEntry(ThreadState *thr, uptr pc) {
656  DCHECK_EQ(thr->in_rtl, 0);
657  StatInc(thr, StatFuncEnter);
658  DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
659  thr->fast_state.IncrementEpoch();
660  TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
661
662  // Shadow stack maintenance can be replaced with
663  // stack unwinding during trace switch (which presumably must be faster).
664  DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]);
665#ifndef TSAN_GO
666  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
667#else
668  if (thr->shadow_stack_pos == thr->shadow_stack_end) {
669    const int sz = thr->shadow_stack_end - thr->shadow_stack;
670    const int newsz = 2 * sz;
671    uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
672        newsz * sizeof(uptr));
673    internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
674    internal_free(thr->shadow_stack);
675    thr->shadow_stack = newstack;
676    thr->shadow_stack_pos = newstack + sz;
677    thr->shadow_stack_end = newstack + newsz;
678  }
679#endif
680  thr->shadow_stack_pos[0] = pc;
681  thr->shadow_stack_pos++;
682}
683
684ALWAYS_INLINE USED
685void FuncExit(ThreadState *thr) {
686  DCHECK_EQ(thr->in_rtl, 0);
687  StatInc(thr, StatFuncExit);
688  DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
689  thr->fast_state.IncrementEpoch();
690  TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
691
692  DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
693#ifndef TSAN_GO
694  DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]);
695#endif
696  thr->shadow_stack_pos--;
697}
698
699void ThreadIgnoreBegin(ThreadState *thr) {
700  DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
701  thr->ignore_reads_and_writes++;
702  CHECK_GT(thr->ignore_reads_and_writes, 0);
703  thr->fast_state.SetIgnoreBit();
704}
705
706void ThreadIgnoreEnd(ThreadState *thr) {
707  DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
708  thr->ignore_reads_and_writes--;
709  CHECK_GE(thr->ignore_reads_and_writes, 0);
710  if (thr->ignore_reads_and_writes == 0)
711    thr->fast_state.ClearIgnoreBit();
712}
713
714void ThreadIgnoreSyncBegin(ThreadState *thr) {
715  DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
716  thr->ignore_sync++;
717  CHECK_GT(thr->ignore_sync, 0);
718}
719
720void ThreadIgnoreSyncEnd(ThreadState *thr) {
721  DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
722  thr->ignore_sync--;
723  CHECK_GE(thr->ignore_sync, 0);
724}
725
726bool MD5Hash::operator==(const MD5Hash &other) const {
727  return hash[0] == other.hash[0] && hash[1] == other.hash[1];
728}
729
730#if TSAN_DEBUG
731void build_consistency_debug() {}
732#else
733void build_consistency_release() {}
734#endif
735
736#if TSAN_COLLECT_STATS
737void build_consistency_stats() {}
738#else
739void build_consistency_nostats() {}
740#endif
741
742#if TSAN_SHADOW_COUNT == 1
743void build_consistency_shadow1() {}
744#elif TSAN_SHADOW_COUNT == 2
745void build_consistency_shadow2() {}
746#elif TSAN_SHADOW_COUNT == 4
747void build_consistency_shadow4() {}
748#else
749void build_consistency_shadow8() {}
750#endif
751
752}  // namespace __tsan
753
754#ifndef TSAN_GO
755// Must be included in this file to make sure everything is inlined.
756#include "tsan_interface_inl.h"
757#endif
758