1//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between run-time libraries of sanitizers.
11//
12// It declares common functions and classes that are used in both runtimes.
13// Implementation of some functions are provided in sanitizer_common, while
14// others must be defined by run-time library itself.
15//===----------------------------------------------------------------------===//
16#ifndef SANITIZER_COMMON_H
17#define SANITIZER_COMMON_H
18
19#include "sanitizer_flags.h"
20#include "sanitizer_interface_internal.h"
21#include "sanitizer_internal_defs.h"
22#include "sanitizer_libc.h"
23#include "sanitizer_list.h"
24#include "sanitizer_mutex.h"
25
26#if defined(_MSC_VER) && !defined(__clang__)
27extern "C" void _ReadWriteBarrier();
28#pragma intrinsic(_ReadWriteBarrier)
29#endif
30
31namespace __sanitizer {
32struct StackTrace;
33struct AddressInfo;
34
35// Constants.
36const uptr kWordSize = SANITIZER_WORDSIZE / 8;
37const uptr kWordSizeInBits = 8 * kWordSize;
38
39#if defined(__powerpc__) || defined(__powerpc64__)
40  const uptr kCacheLineSize = 128;
41#else
42  const uptr kCacheLineSize = 64;
43#endif
44
45const uptr kMaxPathLength = 4096;
46
47const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
48
49static const uptr kErrorMessageBufferSize = 1 << 16;
50
51// Denotes fake PC values that come from JIT/JAVA/etc.
52// For such PC values __tsan_symbolize_external() will be called.
53const u64 kExternalPCBit = 1ULL << 60;
54
55extern const char *SanitizerToolName;  // Can be changed by the tool.
56
57extern atomic_uint32_t current_verbosity;
58INLINE void SetVerbosity(int verbosity) {
59  atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
60}
61INLINE int Verbosity() {
62  return atomic_load(&current_verbosity, memory_order_relaxed);
63}
64
65uptr GetPageSize();
66extern uptr PageSizeCached;
67INLINE uptr GetPageSizeCached() {
68  if (!PageSizeCached)
69    PageSizeCached = GetPageSize();
70  return PageSizeCached;
71}
72uptr GetMmapGranularity();
73uptr GetMaxVirtualAddress();
74// Threads
75uptr GetTid();
76uptr GetThreadSelf();
77void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
78                                uptr *stack_bottom);
79void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
80                          uptr *tls_addr, uptr *tls_size);
81
82// Memory management
83void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
84INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
85  return MmapOrDie(size, mem_type, /*raw_report*/ true);
86}
87void UnmapOrDie(void *addr, uptr size);
88void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
89                         const char *name = nullptr);
90void *MmapNoReserveOrDie(uptr size, const char *mem_type);
91void *MmapFixedOrDie(uptr fixed_addr, uptr size);
92void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
93void *MmapNoAccess(uptr size);
94// Map aligned chunk of address space; size and alignment are powers of two.
95void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
96// Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
97// unaccessible memory.
98bool MprotectNoAccess(uptr addr, uptr size);
99bool MprotectReadOnly(uptr addr, uptr size);
100
101// Used to check if we can map shadow memory to a fixed location.
102bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
103void FlushUnneededShadowMemory(uptr addr, uptr size);
104void IncreaseTotalMmap(uptr size);
105void DecreaseTotalMmap(uptr size);
106uptr GetRSS();
107void NoHugePagesInRegion(uptr addr, uptr length);
108void DontDumpShadowMemory(uptr addr, uptr length);
109// Check if the built VMA size matches the runtime one.
110void CheckVMASize();
111void RunMallocHooks(const void *ptr, uptr size);
112void RunFreeHooks(const void *ptr);
113
114// InternalScopedBuffer can be used instead of large stack arrays to
115// keep frame size low.
116// FIXME: use InternalAlloc instead of MmapOrDie once
117// InternalAlloc is made libc-free.
118template<typename T>
119class InternalScopedBuffer {
120 public:
121  explicit InternalScopedBuffer(uptr cnt) {
122    cnt_ = cnt;
123    ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
124  }
125  ~InternalScopedBuffer() {
126    UnmapOrDie(ptr_, cnt_ * sizeof(T));
127  }
128  T &operator[](uptr i) { return ptr_[i]; }
129  T *data() { return ptr_; }
130  uptr size() { return cnt_ * sizeof(T); }
131
132 private:
133  T *ptr_;
134  uptr cnt_;
135  // Disallow evil constructors.
136  InternalScopedBuffer(const InternalScopedBuffer&);
137  void operator=(const InternalScopedBuffer&);
138};
139
140class InternalScopedString : public InternalScopedBuffer<char> {
141 public:
142  explicit InternalScopedString(uptr max_length)
143      : InternalScopedBuffer<char>(max_length), length_(0) {
144    (*this)[0] = '\0';
145  }
146  uptr length() { return length_; }
147  void clear() {
148    (*this)[0] = '\0';
149    length_ = 0;
150  }
151  void append(const char *format, ...);
152
153 private:
154  uptr length_;
155};
156
157// Simple low-level (mmap-based) allocator for internal use. Doesn't have
158// constructor, so all instances of LowLevelAllocator should be
159// linker initialized.
160class LowLevelAllocator {
161 public:
162  // Requires an external lock.
163  void *Allocate(uptr size);
164 private:
165  char *allocated_end_;
166  char *allocated_current_;
167};
168typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
169// Allows to register tool-specific callbacks for LowLevelAllocator.
170// Passing NULL removes the callback.
171void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
172
173// IO
174void RawWrite(const char *buffer);
175bool ColorizeReports();
176void RemoveANSIEscapeSequencesFromString(char *buffer);
177void Printf(const char *format, ...);
178void Report(const char *format, ...);
179void SetPrintfAndReportCallback(void (*callback)(const char *));
180#define VReport(level, ...)                                              \
181  do {                                                                   \
182    if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
183  } while (0)
184#define VPrintf(level, ...)                                              \
185  do {                                                                   \
186    if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
187  } while (0)
188
189// Can be used to prevent mixing error reports from different sanitizers.
190extern StaticSpinMutex CommonSanitizerReportMutex;
191
192struct ReportFile {
193  void Write(const char *buffer, uptr length);
194  bool SupportsColors();
195  void SetReportPath(const char *path);
196
197  // Don't use fields directly. They are only declared public to allow
198  // aggregate initialization.
199
200  // Protects fields below.
201  StaticSpinMutex *mu;
202  // Opened file descriptor. Defaults to stderr. It may be equal to
203  // kInvalidFd, in which case new file will be opened when necessary.
204  fd_t fd;
205  // Path prefix of report file, set via __sanitizer_set_report_path.
206  char path_prefix[kMaxPathLength];
207  // Full path to report, obtained as <path_prefix>.PID
208  char full_path[kMaxPathLength];
209  // PID of the process that opened fd. If a fork() occurs,
210  // the PID of child will be different from fd_pid.
211  uptr fd_pid;
212
213 private:
214  void ReopenIfNecessary();
215};
216extern ReportFile report_file;
217
218extern uptr stoptheworld_tracer_pid;
219extern uptr stoptheworld_tracer_ppid;
220
221enum FileAccessMode {
222  RdOnly,
223  WrOnly,
224  RdWr
225};
226
227// Returns kInvalidFd on error.
228fd_t OpenFile(const char *filename, FileAccessMode mode,
229              error_t *errno_p = nullptr);
230void CloseFile(fd_t);
231
232// Return true on success, false on error.
233bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
234                  uptr *bytes_read = nullptr, error_t *error_p = nullptr);
235bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
236                 uptr *bytes_written = nullptr, error_t *error_p = nullptr);
237
238bool RenameFile(const char *oldpath, const char *newpath,
239                error_t *error_p = nullptr);
240
241// Scoped file handle closer.
242struct FileCloser {
243  explicit FileCloser(fd_t fd) : fd(fd) {}
244  ~FileCloser() { CloseFile(fd); }
245  fd_t fd;
246};
247
248bool SupportsColoredOutput(fd_t fd);
249
250// Opens the file 'file_name" and reads up to 'max_len' bytes.
251// The resulting buffer is mmaped and stored in '*buff'.
252// The size of the mmaped region is stored in '*buff_size'.
253// The total number of read bytes is stored in '*read_len'.
254// Returns true if file was successfully opened and read.
255bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
256                      uptr *read_len, uptr max_len = 1 << 26,
257                      error_t *errno_p = nullptr);
258// Maps given file to virtual memory, and returns pointer to it
259// (or NULL if mapping fails). Stores the size of mmaped region
260// in '*buff_size'.
261void *MapFileToMemory(const char *file_name, uptr *buff_size);
262void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
263
264bool IsAccessibleMemoryRange(uptr beg, uptr size);
265
266// Error report formatting.
267const char *StripPathPrefix(const char *filepath,
268                            const char *strip_file_prefix);
269// Strip the directories from the module name.
270const char *StripModuleName(const char *module);
271
272// OS
273uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
274uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
275uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
276const char *GetProcessName();
277void UpdateProcessName();
278void CacheBinaryName();
279void DisableCoreDumperIfNecessary();
280void DumpProcessMap();
281bool FileExists(const char *filename);
282const char *GetEnv(const char *name);
283bool SetEnv(const char *name, const char *value);
284const char *GetPwd();
285char *FindPathToBinary(const char *name);
286bool IsPathSeparator(const char c);
287bool IsAbsolutePath(const char *path);
288// Starts a subprocess and returs its pid.
289// If *_fd parameters are not kInvalidFd their corresponding input/output
290// streams will be redirect to the file. The files will always be closed
291// in parent process even in case of an error.
292// The child process will close all fds after STDERR_FILENO
293// before passing control to a program.
294pid_t StartSubprocess(const char *filename, const char *const argv[],
295                      fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
296                      fd_t stderr_fd = kInvalidFd);
297// Checks if specified process is still running
298bool IsProcessRunning(pid_t pid);
299// Waits for the process to finish and returns its exit code.
300// Returns -1 in case of an error.
301int WaitForProcess(pid_t pid);
302
303u32 GetUid();
304void ReExec();
305char **GetArgv();
306void PrintCmdline();
307bool StackSizeIsUnlimited();
308uptr GetStackSizeLimitInBytes();
309void SetStackSizeLimitInBytes(uptr limit);
310bool AddressSpaceIsUnlimited();
311void SetAddressSpaceUnlimited();
312void AdjustStackSize(void *attr);
313void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
314void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
315void SetSandboxingCallback(void (*f)());
316
317void CoverageUpdateMapping();
318void CovBeforeFork();
319void CovAfterFork(int child_pid);
320
321void InitializeCoverage(bool enabled, const char *coverage_dir);
322void ReInitializeCoverage(bool enabled, const char *coverage_dir);
323
324void InitTlsSize();
325uptr GetTlsSize();
326
327// Other
328void SleepForSeconds(int seconds);
329void SleepForMillis(int millis);
330u64 NanoTime();
331int Atexit(void (*function)(void));
332void SortArray(uptr *array, uptr size);
333bool TemplateMatch(const char *templ, const char *str);
334
335// Exit
336void NORETURN Abort();
337void NORETURN Die();
338void NORETURN
339CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
340void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
341                                      const char *mmap_type, error_t err,
342                                      bool raw_report = false);
343
344// Set the name of the current thread to 'name', return true on succees.
345// The name may be truncated to a system-dependent limit.
346bool SanitizerSetThreadName(const char *name);
347// Get the name of the current thread (no more than max_len bytes),
348// return true on succees. name should have space for at least max_len+1 bytes.
349bool SanitizerGetThreadName(char *name, int max_len);
350
351// Specific tools may override behavior of "Die" and "CheckFailed" functions
352// to do tool-specific job.
353typedef void (*DieCallbackType)(void);
354
355// It's possible to add several callbacks that would be run when "Die" is
356// called. The callbacks will be run in the opposite order. The tools are
357// strongly recommended to setup all callbacks during initialization, when there
358// is only a single thread.
359bool AddDieCallback(DieCallbackType callback);
360bool RemoveDieCallback(DieCallbackType callback);
361
362void SetUserDieCallback(DieCallbackType callback);
363
364typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
365                                       u64, u64);
366void SetCheckFailedCallback(CheckFailedCallbackType callback);
367
368// Callback will be called if soft_rss_limit_mb is given and the limit is
369// exceeded (exceeded==true) or if rss went down below the limit
370// (exceeded==false).
371// The callback should be registered once at the tool init time.
372void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
373
374// Functions related to signal handling.
375typedef void (*SignalHandlerType)(int, void *, void *);
376bool IsHandledDeadlySignal(int signum);
377void InstallDeadlySignalHandlers(SignalHandlerType handler);
378// Alternative signal stack (POSIX-only).
379void SetAlternateSignalStack();
380void UnsetAlternateSignalStack();
381
382// We don't want a summary too long.
383const int kMaxSummaryLength = 1024;
384// Construct a one-line string:
385//   SUMMARY: SanitizerToolName: error_message
386// and pass it to __sanitizer_report_error_summary.
387void ReportErrorSummary(const char *error_message);
388// Same as above, but construct error_message as:
389//   error_type file:line[:column][ function]
390void ReportErrorSummary(const char *error_type, const AddressInfo &info);
391// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
392void ReportErrorSummary(const char *error_type, StackTrace *trace);
393
394// Math
395#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
396extern "C" {
397unsigned char _BitScanForward(unsigned long *index, unsigned long mask);  // NOLINT
398unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);  // NOLINT
399#if defined(_WIN64)
400unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);  // NOLINT
401unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);  // NOLINT
402#endif
403}
404#endif
405
406INLINE uptr MostSignificantSetBitIndex(uptr x) {
407  CHECK_NE(x, 0U);
408  unsigned long up;  // NOLINT
409#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
410# ifdef _WIN64
411  up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
412# else
413  up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
414# endif
415#elif defined(_WIN64)
416  _BitScanReverse64(&up, x);
417#else
418  _BitScanReverse(&up, x);
419#endif
420  return up;
421}
422
423INLINE uptr LeastSignificantSetBitIndex(uptr x) {
424  CHECK_NE(x, 0U);
425  unsigned long up;  // NOLINT
426#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
427# ifdef _WIN64
428  up = __builtin_ctzll(x);
429# else
430  up = __builtin_ctzl(x);
431# endif
432#elif defined(_WIN64)
433  _BitScanForward64(&up, x);
434#else
435  _BitScanForward(&up, x);
436#endif
437  return up;
438}
439
440INLINE bool IsPowerOfTwo(uptr x) {
441  return (x & (x - 1)) == 0;
442}
443
444INLINE uptr RoundUpToPowerOfTwo(uptr size) {
445  CHECK(size);
446  if (IsPowerOfTwo(size)) return size;
447
448  uptr up = MostSignificantSetBitIndex(size);
449  CHECK(size < (1ULL << (up + 1)));
450  CHECK(size > (1ULL << up));
451  return 1ULL << (up + 1);
452}
453
454INLINE uptr RoundUpTo(uptr size, uptr boundary) {
455  RAW_CHECK(IsPowerOfTwo(boundary));
456  return (size + boundary - 1) & ~(boundary - 1);
457}
458
459INLINE uptr RoundDownTo(uptr x, uptr boundary) {
460  return x & ~(boundary - 1);
461}
462
463INLINE bool IsAligned(uptr a, uptr alignment) {
464  return (a & (alignment - 1)) == 0;
465}
466
467INLINE uptr Log2(uptr x) {
468  CHECK(IsPowerOfTwo(x));
469  return LeastSignificantSetBitIndex(x);
470}
471
472// Don't use std::min, std::max or std::swap, to minimize dependency
473// on libstdc++.
474template<class T> T Min(T a, T b) { return a < b ? a : b; }
475template<class T> T Max(T a, T b) { return a > b ? a : b; }
476template<class T> void Swap(T& a, T& b) {
477  T tmp = a;
478  a = b;
479  b = tmp;
480}
481
482// Char handling
483INLINE bool IsSpace(int c) {
484  return (c == ' ') || (c == '\n') || (c == '\t') ||
485         (c == '\f') || (c == '\r') || (c == '\v');
486}
487INLINE bool IsDigit(int c) {
488  return (c >= '0') && (c <= '9');
489}
490INLINE int ToLower(int c) {
491  return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
492}
493
494// A low-level vector based on mmap. May incur a significant memory overhead for
495// small vectors.
496// WARNING: The current implementation supports only POD types.
497template<typename T>
498class InternalMmapVectorNoCtor {
499 public:
500  void Initialize(uptr initial_capacity) {
501    capacity_ = Max(initial_capacity, (uptr)1);
502    size_ = 0;
503    data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
504  }
505  void Destroy() {
506    UnmapOrDie(data_, capacity_ * sizeof(T));
507  }
508  T &operator[](uptr i) {
509    CHECK_LT(i, size_);
510    return data_[i];
511  }
512  const T &operator[](uptr i) const {
513    CHECK_LT(i, size_);
514    return data_[i];
515  }
516  void push_back(const T &element) {
517    CHECK_LE(size_, capacity_);
518    if (size_ == capacity_) {
519      uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
520      Resize(new_capacity);
521    }
522    internal_memcpy(&data_[size_++], &element, sizeof(T));
523  }
524  T &back() {
525    CHECK_GT(size_, 0);
526    return data_[size_ - 1];
527  }
528  void pop_back() {
529    CHECK_GT(size_, 0);
530    size_--;
531  }
532  uptr size() const {
533    return size_;
534  }
535  const T *data() const {
536    return data_;
537  }
538  T *data() {
539    return data_;
540  }
541  uptr capacity() const {
542    return capacity_;
543  }
544
545  void clear() { size_ = 0; }
546  bool empty() const { return size() == 0; }
547
548  const T *begin() const {
549    return data();
550  }
551  T *begin() {
552    return data();
553  }
554  const T *end() const {
555    return data() + size();
556  }
557  T *end() {
558    return data() + size();
559  }
560
561 private:
562  void Resize(uptr new_capacity) {
563    CHECK_GT(new_capacity, 0);
564    CHECK_LE(size_, new_capacity);
565    T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
566                                 "InternalMmapVector");
567    internal_memcpy(new_data, data_, size_ * sizeof(T));
568    T *old_data = data_;
569    data_ = new_data;
570    UnmapOrDie(old_data, capacity_ * sizeof(T));
571    capacity_ = new_capacity;
572  }
573
574  T *data_;
575  uptr capacity_;
576  uptr size_;
577};
578
579template<typename T>
580class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
581 public:
582  explicit InternalMmapVector(uptr initial_capacity) {
583    InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
584  }
585  ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
586  // Disallow evil constructors.
587  InternalMmapVector(const InternalMmapVector&);
588  void operator=(const InternalMmapVector&);
589};
590
591// HeapSort for arrays and InternalMmapVector.
592template<class Container, class Compare>
593void InternalSort(Container *v, uptr size, Compare comp) {
594  if (size < 2)
595    return;
596  // Stage 1: insert elements to the heap.
597  for (uptr i = 1; i < size; i++) {
598    uptr j, p;
599    for (j = i; j > 0; j = p) {
600      p = (j - 1) / 2;
601      if (comp((*v)[p], (*v)[j]))
602        Swap((*v)[j], (*v)[p]);
603      else
604        break;
605    }
606  }
607  // Stage 2: swap largest element with the last one,
608  // and sink the new top.
609  for (uptr i = size - 1; i > 0; i--) {
610    Swap((*v)[0], (*v)[i]);
611    uptr j, max_ind;
612    for (j = 0; j < i; j = max_ind) {
613      uptr left = 2 * j + 1;
614      uptr right = 2 * j + 2;
615      max_ind = j;
616      if (left < i && comp((*v)[max_ind], (*v)[left]))
617        max_ind = left;
618      if (right < i && comp((*v)[max_ind], (*v)[right]))
619        max_ind = right;
620      if (max_ind != j)
621        Swap((*v)[j], (*v)[max_ind]);
622      else
623        break;
624    }
625  }
626}
627
628template<class Container, class Value, class Compare>
629uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
630                          const Value &val, Compare comp) {
631  uptr not_found = last + 1;
632  while (last >= first) {
633    uptr mid = (first + last) / 2;
634    if (comp(v[mid], val))
635      first = mid + 1;
636    else if (comp(val, v[mid]))
637      last = mid - 1;
638    else
639      return mid;
640  }
641  return not_found;
642}
643
644// Represents a binary loaded into virtual memory (e.g. this can be an
645// executable or a shared object).
646class LoadedModule {
647 public:
648  LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
649  void set(const char *module_name, uptr base_address);
650  void clear();
651  void addAddressRange(uptr beg, uptr end, bool executable);
652  bool containsAddress(uptr address) const;
653
654  const char *full_name() const { return full_name_; }
655  uptr base_address() const { return base_address_; }
656
657  struct AddressRange {
658    AddressRange *next;
659    uptr beg;
660    uptr end;
661    bool executable;
662
663    AddressRange(uptr beg, uptr end, bool executable)
664        : next(nullptr), beg(beg), end(end), executable(executable) {}
665  };
666
667  const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
668
669 private:
670  char *full_name_;  // Owned.
671  uptr base_address_;
672  IntrusiveList<AddressRange> ranges_;
673};
674
675// List of LoadedModules. OS-dependent implementation is responsible for
676// filling this information.
677class ListOfModules {
678 public:
679  ListOfModules() : modules_(kInitialCapacity) {}
680  ~ListOfModules() { clear(); }
681  void init();
682  const LoadedModule *begin() const { return modules_.begin(); }
683  LoadedModule *begin() { return modules_.begin(); }
684  const LoadedModule *end() const { return modules_.end(); }
685  LoadedModule *end() { return modules_.end(); }
686  uptr size() const { return modules_.size(); }
687  const LoadedModule &operator[](uptr i) const {
688    CHECK_LT(i, modules_.size());
689    return modules_[i];
690  }
691
692 private:
693  void clear() {
694    for (auto &module : modules_) module.clear();
695    modules_.clear();
696  }
697
698  InternalMmapVector<LoadedModule> modules_;
699  // We rarely have more than 16K loaded modules.
700  static const uptr kInitialCapacity = 1 << 14;
701};
702
703// Callback type for iterating over a set of memory ranges.
704typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
705
706enum AndroidApiLevel {
707  ANDROID_NOT_ANDROID = 0,
708  ANDROID_KITKAT = 19,
709  ANDROID_LOLLIPOP_MR1 = 22,
710  ANDROID_POST_LOLLIPOP = 23
711};
712
713void WriteToSyslog(const char *buffer);
714
715#if SANITIZER_MAC
716void LogFullErrorReport(const char *buffer);
717#else
718INLINE void LogFullErrorReport(const char *buffer) {}
719#endif
720
721#if SANITIZER_LINUX || SANITIZER_MAC
722void WriteOneLineToSyslog(const char *s);
723void LogMessageOnPrintf(const char *str);
724#else
725INLINE void WriteOneLineToSyslog(const char *s) {}
726INLINE void LogMessageOnPrintf(const char *str) {}
727#endif
728
729#if SANITIZER_LINUX
730// Initialize Android logging. Any writes before this are silently lost.
731void AndroidLogInit();
732#else
733INLINE void AndroidLogInit() {}
734#endif
735
736#if SANITIZER_ANDROID
737void SanitizerInitializeUnwinder();
738AndroidApiLevel AndroidGetApiLevel();
739#else
740INLINE void AndroidLogWrite(const char *buffer_unused) {}
741INLINE void SanitizerInitializeUnwinder() {}
742INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
743#endif
744
745INLINE uptr GetPthreadDestructorIterations() {
746#if SANITIZER_ANDROID
747  return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
748#elif SANITIZER_POSIX
749  return 4;
750#else
751// Unused on Windows.
752  return 0;
753#endif
754}
755
756void *internal_start_thread(void(*func)(void*), void *arg);
757void internal_join_thread(void *th);
758void MaybeStartBackgroudThread();
759
760// Make the compiler think that something is going on there.
761// Use this inside a loop that looks like memset/memcpy/etc to prevent the
762// compiler from recognising it and turning it into an actual call to
763// memset/memcpy/etc.
764static inline void SanitizerBreakOptimization(void *arg) {
765#if defined(_MSC_VER) && !defined(__clang__)
766  _ReadWriteBarrier();
767#else
768  __asm__ __volatile__("" : : "r" (arg) : "memory");
769#endif
770}
771
772struct SignalContext {
773  void *context;
774  uptr addr;
775  uptr pc;
776  uptr sp;
777  uptr bp;
778  bool is_memory_access;
779
780  enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
781
782  SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp,
783                bool is_memory_access, WriteFlag write_flag)
784      : context(context),
785        addr(addr),
786        pc(pc),
787        sp(sp),
788        bp(bp),
789        is_memory_access(is_memory_access),
790        write_flag(write_flag) {}
791
792  // Creates signal context in a platform-specific manner.
793  static SignalContext Create(void *siginfo, void *context);
794
795  // Returns true if the "context" indicates a memory write.
796  static WriteFlag GetWriteFlag(void *context);
797};
798
799void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
800
801void MaybeReexec();
802
803template <typename Fn>
804class RunOnDestruction {
805 public:
806  explicit RunOnDestruction(Fn fn) : fn_(fn) {}
807  ~RunOnDestruction() { fn_(); }
808
809 private:
810  Fn fn_;
811};
812
813// A simple scope guard. Usage:
814// auto cleanup = at_scope_exit([]{ do_cleanup; });
815template <typename Fn>
816RunOnDestruction<Fn> at_scope_exit(Fn fn) {
817  return RunOnDestruction<Fn>(fn);
818}
819
820// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
821// if a process uses virtual memory over 4TB (as many sanitizers like
822// to do).  This function will abort the process if running on a kernel
823// that looks vulnerable.
824#if SANITIZER_LINUX && SANITIZER_S390_64
825void AvoidCVE_2016_2143();
826#else
827INLINE void AvoidCVE_2016_2143() {}
828#endif
829
830}  // namespace __sanitizer
831
832inline void *operator new(__sanitizer::operator_new_size_type size,
833                          __sanitizer::LowLevelAllocator &alloc) {
834  return alloc.Allocate(size);
835}
836
837struct StackDepotStats {
838  uptr n_uniq_ids;
839  uptr allocated;
840};
841
842#endif  // SANITIZER_COMMON_H
843