1//===-- sanitizer_coverage.cc ---------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Sanitizer Coverage.
11// This file implements run-time support for a poor man's coverage tool.
12//
13// Compiler instrumentation:
14// For every interesting basic block the compiler injects the following code:
15// if (Guard < 0) {
16//    __sanitizer_cov(&Guard);
17// }
18// At the module start up time __sanitizer_cov_module_init sets the guards
19// to consecutive negative numbers (-1, -2, -3, ...).
20// It's fine to call __sanitizer_cov more than once for a given block.
21//
22// Run-time:
23//  - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
24//    and atomically set Guard to -Guard.
25//  - __sanitizer_cov_dump: dump the coverage data to disk.
26//  For every module of the current process that has coverage data
27//  this will create a file module_name.PID.sancov.
28//
29// The file format is simple: the first 8 bytes is the magic,
30// one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
31// magic defines the size of the following offsets.
32// The rest of the data is the offsets in the module.
33//
34// Eventually, this coverage implementation should be obsoleted by a more
35// powerful general purpose Clang/LLVM coverage instrumentation.
36// Consider this implementation as prototype.
37//
38// FIXME: support (or at least test with) dlclose.
39//===----------------------------------------------------------------------===//
40
41#include "sanitizer_allocator_internal.h"
42#include "sanitizer_common.h"
43#include "sanitizer_libc.h"
44#include "sanitizer_mutex.h"
45#include "sanitizer_procmaps.h"
46#include "sanitizer_stacktrace.h"
47#include "sanitizer_symbolizer.h"
48#include "sanitizer_flags.h"
49
50static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
51static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
52
53static atomic_uint32_t dump_once_guard;  // Ensure that CovDump runs only once.
54
55static atomic_uintptr_t coverage_counter;
56
57// pc_array is the array containing the covered PCs.
58// To make the pc_array thread- and async-signal-safe it has to be large enough.
59// 128M counters "ought to be enough for anybody" (4M on 32-bit).
60
61// With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
62// In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
63// dump current memory layout to another file.
64
65static bool cov_sandboxed = false;
66static fd_t cov_fd = kInvalidFd;
67static unsigned int cov_max_block_size = 0;
68static bool coverage_enabled = false;
69static const char *coverage_dir;
70
71namespace __sanitizer {
72
73class CoverageData {
74 public:
75  void Init();
76  void Enable();
77  void Disable();
78  void ReInit();
79  void BeforeFork();
80  void AfterFork(int child_pid);
81  void Extend(uptr npcs);
82  void Add(uptr pc, u32 *guard);
83  void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
84                 uptr cache_size);
85  void DumpCallerCalleePairs();
86  void DumpTrace();
87  void DumpAsBitSet();
88  void DumpCounters();
89  void DumpOffsets();
90  void DumpAll();
91
92  ALWAYS_INLINE
93  void TraceBasicBlock(s32 *id);
94
95  void InitializeGuardArray(s32 *guards);
96  void InitializeGuards(s32 *guards, uptr n, const char *module_name,
97                        uptr caller_pc);
98  void InitializeCounters(u8 *counters, uptr n);
99  void ReinitializeGuards();
100  uptr GetNumberOf8bitCounters();
101  uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
102
103  uptr *data();
104  uptr size();
105
106 private:
107  void DirectOpen();
108  void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
109
110  // Maximal size pc array may ever grow.
111  // We MmapNoReserve this space to ensure that the array is contiguous.
112  static const uptr kPcArrayMaxSize =
113      FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
114  // The amount file mapping for the pc array is grown by.
115  static const uptr kPcArrayMmapSize = 64 * 1024;
116
117  // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
118  // much RAM as it really needs.
119  uptr *pc_array;
120  // Index of the first available pc_array slot.
121  atomic_uintptr_t pc_array_index;
122  // Array size.
123  atomic_uintptr_t pc_array_size;
124  // Current file mapped size of the pc array.
125  uptr pc_array_mapped_size;
126  // Descriptor of the file mapped pc array.
127  fd_t pc_fd;
128
129  // Vector of coverage guard arrays, protected by mu.
130  InternalMmapVectorNoCtor<s32*> guard_array_vec;
131
132  struct NamedPcRange {
133    const char *copied_module_name;
134    uptr beg, end; // elements [beg,end) in pc_array.
135  };
136
137  // Vector of module and compilation unit pc ranges.
138  InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
139  InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
140
141  struct CounterAndSize {
142    u8 *counters;
143    uptr n;
144  };
145
146  InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
147  uptr num_8bit_counters;
148
149  // Caller-Callee (cc) array, size and current index.
150  static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
151  uptr **cc_array;
152  atomic_uintptr_t cc_array_index;
153  atomic_uintptr_t cc_array_size;
154
155  // Tracing event array, size and current pointer.
156  // We record all events (basic block entries) in a global buffer of u32
157  // values. Each such value is the index in pc_array.
158  // So far the tracing is highly experimental:
159  //   - not thread-safe;
160  //   - does not support long traces;
161  //   - not tuned for performance.
162  static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
163  u32 *tr_event_array;
164  uptr tr_event_array_size;
165  u32 *tr_event_pointer;
166  static const uptr kTrPcArrayMaxSize    = FIRST_32_SECOND_64(1 << 22, 1 << 27);
167
168  StaticSpinMutex mu;
169};
170
171static CoverageData coverage_data;
172
173void CovUpdateMapping(const char *path, uptr caller_pc = 0);
174
175void CoverageData::DirectOpen() {
176  InternalScopedString path(kMaxPathLength);
177  internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
178                    coverage_dir, internal_getpid());
179  pc_fd = OpenFile(path.data(), RdWr);
180  if (pc_fd == kInvalidFd) {
181    Report("Coverage: failed to open %s for reading/writing\n", path.data());
182    Die();
183  }
184
185  pc_array_mapped_size = 0;
186  CovUpdateMapping(coverage_dir);
187}
188
189void CoverageData::Init() {
190  pc_fd = kInvalidFd;
191}
192
193void CoverageData::Enable() {
194  if (pc_array)
195    return;
196  pc_array = reinterpret_cast<uptr *>(
197      MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
198  atomic_store(&pc_array_index, 0, memory_order_relaxed);
199  if (common_flags()->coverage_direct) {
200    atomic_store(&pc_array_size, 0, memory_order_relaxed);
201  } else {
202    atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
203  }
204
205  cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
206      sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
207  atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
208  atomic_store(&cc_array_index, 0, memory_order_relaxed);
209
210  // Allocate tr_event_array with a guard page at the end.
211  tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
212      sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
213      "CovInit::tr_event_array"));
214  MprotectNoAccess(
215      reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
216      GetMmapGranularity());
217  tr_event_array_size = kTrEventArrayMaxSize;
218  tr_event_pointer = tr_event_array;
219
220  num_8bit_counters = 0;
221}
222
223void CoverageData::InitializeGuardArray(s32 *guards) {
224  Enable();  // Make sure coverage is enabled at this point.
225  s32 n = guards[0];
226  for (s32 j = 1; j <= n; j++) {
227    uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
228    guards[j] = -static_cast<s32>(idx + 1);
229  }
230}
231
232void CoverageData::Disable() {
233  if (pc_array) {
234    UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
235    pc_array = nullptr;
236  }
237  if (cc_array) {
238    UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
239    cc_array = nullptr;
240  }
241  if (tr_event_array) {
242    UnmapOrDie(tr_event_array,
243               sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
244                   GetMmapGranularity());
245    tr_event_array = nullptr;
246    tr_event_pointer = nullptr;
247  }
248  if (pc_fd != kInvalidFd) {
249    CloseFile(pc_fd);
250    pc_fd = kInvalidFd;
251  }
252}
253
254void CoverageData::ReinitializeGuards() {
255  // Assuming single thread.
256  atomic_store(&pc_array_index, 0, memory_order_relaxed);
257  for (uptr i = 0; i < guard_array_vec.size(); i++)
258    InitializeGuardArray(guard_array_vec[i]);
259}
260
261void CoverageData::ReInit() {
262  Disable();
263  if (coverage_enabled) {
264    if (common_flags()->coverage_direct) {
265      // In memory-mapped mode we must extend the new file to the known array
266      // size.
267      uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
268      Enable();
269      if (size) Extend(size);
270      if (coverage_enabled) CovUpdateMapping(coverage_dir);
271    } else {
272      Enable();
273    }
274  }
275  // Re-initialize the guards.
276  // We are single-threaded now, no need to grab any lock.
277  CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
278  ReinitializeGuards();
279}
280
281void CoverageData::BeforeFork() {
282  mu.Lock();
283}
284
285void CoverageData::AfterFork(int child_pid) {
286  // We are single-threaded so it's OK to release the lock early.
287  mu.Unlock();
288  if (child_pid == 0) ReInit();
289}
290
291// Extend coverage PC array to fit additional npcs elements.
292void CoverageData::Extend(uptr npcs) {
293  if (!common_flags()->coverage_direct) return;
294  SpinMutexLock l(&mu);
295
296  uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
297  size += npcs * sizeof(uptr);
298
299  if (coverage_enabled && size > pc_array_mapped_size) {
300    if (pc_fd == kInvalidFd) DirectOpen();
301    CHECK_NE(pc_fd, kInvalidFd);
302
303    uptr new_mapped_size = pc_array_mapped_size;
304    while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
305    CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
306
307    // Extend the file and map the new space at the end of pc_array.
308    uptr res = internal_ftruncate(pc_fd, new_mapped_size);
309    int err;
310    if (internal_iserror(res, &err)) {
311      Printf("failed to extend raw coverage file: %d\n", err);
312      Die();
313    }
314
315    uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
316    void *p = MapWritableFileToMemory((void *)next_map_base,
317                                      new_mapped_size - pc_array_mapped_size,
318                                      pc_fd, pc_array_mapped_size);
319    CHECK_EQ((uptr)p, next_map_base);
320    pc_array_mapped_size = new_mapped_size;
321  }
322
323  atomic_store(&pc_array_size, size, memory_order_release);
324}
325
326void CoverageData::InitializeCounters(u8 *counters, uptr n) {
327  if (!counters) return;
328  CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
329  n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
330  SpinMutexLock l(&mu);
331  counters_vec.push_back({counters, n});
332  num_8bit_counters += n;
333}
334
335void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
336                                       uptr range_end) {
337  auto sym = Symbolizer::GetOrInit();
338  if (!sym)
339    return;
340  const char *module_name = sym->GetModuleNameForPc(caller_pc);
341  if (!module_name) return;
342  if (module_name_vec.empty() ||
343      module_name_vec.back().copied_module_name != module_name)
344    module_name_vec.push_back({module_name, range_beg, range_end});
345  else
346    module_name_vec.back().end = range_end;
347}
348
349void CoverageData::InitializeGuards(s32 *guards, uptr n,
350                                    const char *comp_unit_name,
351                                    uptr caller_pc) {
352  // The array 'guards' has n+1 elements, we use the element zero
353  // to store 'n'.
354  CHECK_LT(n, 1 << 30);
355  guards[0] = static_cast<s32>(n);
356  InitializeGuardArray(guards);
357  SpinMutexLock l(&mu);
358  uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
359  uptr range_beg = range_end - n;
360  comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
361  guard_array_vec.push_back(guards);
362  UpdateModuleNameVec(caller_pc, range_beg, range_end);
363}
364
365static const uptr kBundleCounterBits = 16;
366
367// When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
368// we insert the global counter into the first 16 bits of the PC.
369uptr BundlePcAndCounter(uptr pc, uptr counter) {
370  if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
371    return pc;
372  static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
373  if (counter > kMaxCounter)
374    counter = kMaxCounter;
375  CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
376  return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
377}
378
379uptr UnbundlePc(uptr bundle) {
380  if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
381    return bundle;
382  return (bundle << kBundleCounterBits) >> kBundleCounterBits;
383}
384
385uptr UnbundleCounter(uptr bundle) {
386  if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
387    return 0;
388  return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
389}
390
391// If guard is negative, atomically set it to -guard and store the PC in
392// pc_array.
393void CoverageData::Add(uptr pc, u32 *guard) {
394  atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
395  s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
396  if (guard_value >= 0) return;
397
398  atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
399  if (!pc_array) return;
400
401  uptr idx = -guard_value - 1;
402  if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
403    return;  // May happen after fork when pc_array_index becomes 0.
404  CHECK_LT(idx * sizeof(uptr),
405           atomic_load(&pc_array_size, memory_order_acquire));
406  uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
407  pc_array[idx] = BundlePcAndCounter(pc, counter);
408}
409
410// Registers a pair caller=>callee.
411// When a given caller is seen for the first time, the callee_cache is added
412// to the global array cc_array, callee_cache[0] is set to caller and
413// callee_cache[1] is set to cache_size.
414// Then we are trying to add callee to callee_cache [2,cache_size) if it is
415// not there yet.
416// If the cache is full we drop the callee (may want to fix this later).
417void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
418                             uptr cache_size) {
419  if (!cc_array) return;
420  atomic_uintptr_t *atomic_callee_cache =
421      reinterpret_cast<atomic_uintptr_t *>(callee_cache);
422  uptr zero = 0;
423  if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
424                                     memory_order_seq_cst)) {
425    uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
426    CHECK_LT(idx * sizeof(uptr),
427             atomic_load(&cc_array_size, memory_order_acquire));
428    callee_cache[1] = cache_size;
429    cc_array[idx] = callee_cache;
430  }
431  CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
432  for (uptr i = 2; i < cache_size; i++) {
433    uptr was = 0;
434    if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
435                                       memory_order_seq_cst)) {
436      atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
437      return;
438    }
439    if (was == callee)  // Already have this callee.
440      return;
441  }
442}
443
444uptr CoverageData::GetNumberOf8bitCounters() {
445  return num_8bit_counters;
446}
447
448// Map every 8bit counter to a 8-bit bitset and clear the counter.
449uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
450  uptr num_new_bits = 0;
451  uptr cur = 0;
452  // For better speed we map 8 counters to 8 bytes of bitset at once.
453  static const uptr kBatchSize = 8;
454  CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
455  for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
456    u8 *c = counters_vec[i].counters;
457    uptr n = counters_vec[i].n;
458    CHECK_EQ(n % 16, 0);
459    CHECK_EQ(cur % kBatchSize, 0);
460    CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
461    if (!bitset) {
462      internal_bzero_aligned16(c, n);
463      cur += n;
464      continue;
465    }
466    for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
467      CHECK_LT(cur, num_8bit_counters);
468      u64 *pc64 = reinterpret_cast<u64*>(c + j);
469      u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
470      u64 c64 = *pc64;
471      u64 old_bits_64 = *pb64;
472      u64 new_bits_64 = old_bits_64;
473      if (c64) {
474        *pc64 = 0;
475        for (uptr k = 0; k < kBatchSize; k++) {
476          u64 x = (c64 >> (8 * k)) & 0xff;
477          if (x) {
478            u64 bit = 0;
479            /**/ if (x >= 128) bit = 128;
480            else if (x >= 32) bit = 64;
481            else if (x >= 16) bit = 32;
482            else if (x >= 8) bit = 16;
483            else if (x >= 4) bit = 8;
484            else if (x >= 3) bit = 4;
485            else if (x >= 2) bit = 2;
486            else if (x >= 1) bit = 1;
487            u64 mask = bit << (8 * k);
488            if (!(new_bits_64 & mask)) {
489              num_new_bits++;
490              new_bits_64 |= mask;
491            }
492          }
493        }
494        *pb64 = new_bits_64;
495      }
496    }
497  }
498  CHECK_EQ(cur, num_8bit_counters);
499  return num_new_bits;
500}
501
502uptr *CoverageData::data() {
503  return pc_array;
504}
505
506uptr CoverageData::size() {
507  return atomic_load(&pc_array_index, memory_order_relaxed);
508}
509
510// Block layout for packed file format: header, followed by module name (no
511// trailing zero), followed by data blob.
512struct CovHeader {
513  int pid;
514  unsigned int module_name_length;
515  unsigned int data_length;
516};
517
518static void CovWritePacked(int pid, const char *module, const void *blob,
519                           unsigned int blob_size) {
520  if (cov_fd == kInvalidFd) return;
521  unsigned module_name_length = internal_strlen(module);
522  CovHeader header = {pid, module_name_length, blob_size};
523
524  if (cov_max_block_size == 0) {
525    // Writing to a file. Just go ahead.
526    WriteToFile(cov_fd, &header, sizeof(header));
527    WriteToFile(cov_fd, module, module_name_length);
528    WriteToFile(cov_fd, blob, blob_size);
529  } else {
530    // Writing to a socket. We want to split the data into appropriately sized
531    // blocks.
532    InternalScopedBuffer<char> block(cov_max_block_size);
533    CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
534    uptr header_size_with_module = sizeof(header) + module_name_length;
535    CHECK_LT(header_size_with_module, cov_max_block_size);
536    unsigned int max_payload_size =
537        cov_max_block_size - header_size_with_module;
538    char *block_pos = block.data();
539    internal_memcpy(block_pos, &header, sizeof(header));
540    block_pos += sizeof(header);
541    internal_memcpy(block_pos, module, module_name_length);
542    block_pos += module_name_length;
543    char *block_data_begin = block_pos;
544    const char *blob_pos = (const char *)blob;
545    while (blob_size > 0) {
546      unsigned int payload_size = Min(blob_size, max_payload_size);
547      blob_size -= payload_size;
548      internal_memcpy(block_data_begin, blob_pos, payload_size);
549      blob_pos += payload_size;
550      ((CovHeader *)block.data())->data_length = payload_size;
551      WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
552    }
553  }
554}
555
556// If packed = false: <name>.<pid>.<sancov> (name = module name).
557// If packed = true and name == 0: <pid>.<sancov>.<packed>.
558// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
559// user-supplied).
560static fd_t CovOpenFile(InternalScopedString *path, bool packed,
561                       const char *name, const char *extension = "sancov") {
562  path->clear();
563  if (!packed) {
564    CHECK(name);
565    path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
566                extension);
567  } else {
568    if (!name)
569      path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
570                  extension);
571    else
572      path->append("%s/%s.%s.packed", coverage_dir, name, extension);
573  }
574  fd_t fd = OpenFile(path->data(), WrOnly);
575  if (fd == kInvalidFd)
576    Report("SanitizerCoverage: failed to open %s for writing\n", path->data());
577  return fd;
578}
579
580// Dump trace PCs and trace events into two separate files.
581void CoverageData::DumpTrace() {
582  uptr max_idx = tr_event_pointer - tr_event_array;
583  if (!max_idx) return;
584  auto sym = Symbolizer::GetOrInit();
585  if (!sym)
586    return;
587  InternalScopedString out(32 << 20);
588  for (uptr i = 0, n = size(); i < n; i++) {
589    const char *module_name = "<unknown>";
590    uptr module_address = 0;
591    sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
592                                     &module_address);
593    out.append("%s 0x%zx\n", module_name, module_address);
594  }
595  InternalScopedString path(kMaxPathLength);
596  fd_t fd = CovOpenFile(&path, false, "trace-points");
597  if (fd == kInvalidFd) return;
598  WriteToFile(fd, out.data(), out.length());
599  CloseFile(fd);
600
601  fd = CovOpenFile(&path, false, "trace-compunits");
602  if (fd == kInvalidFd) return;
603  out.clear();
604  for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
605    out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
606  WriteToFile(fd, out.data(), out.length());
607  CloseFile(fd);
608
609  fd = CovOpenFile(&path, false, "trace-events");
610  if (fd == kInvalidFd) return;
611  uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
612  u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
613  // The trace file could be huge, and may not be written with a single syscall.
614  while (bytes_to_write) {
615    uptr actually_written;
616    if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
617        actually_written <= bytes_to_write) {
618      bytes_to_write -= actually_written;
619      event_bytes += actually_written;
620    } else {
621      break;
622    }
623  }
624  CloseFile(fd);
625  VReport(1, " CovDump: Trace: %zd PCs written\n", size());
626  VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
627}
628
629// This function dumps the caller=>callee pairs into a file as a sequence of
630// lines like "module_name offset".
631void CoverageData::DumpCallerCalleePairs() {
632  uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
633  if (!max_idx) return;
634  auto sym = Symbolizer::GetOrInit();
635  if (!sym)
636    return;
637  InternalScopedString out(32 << 20);
638  uptr total = 0;
639  for (uptr i = 0; i < max_idx; i++) {
640    uptr *cc_cache = cc_array[i];
641    CHECK(cc_cache);
642    uptr caller = cc_cache[0];
643    uptr n_callees = cc_cache[1];
644    const char *caller_module_name = "<unknown>";
645    uptr caller_module_address = 0;
646    sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
647                                     &caller_module_address);
648    for (uptr j = 2; j < n_callees; j++) {
649      uptr callee = cc_cache[j];
650      if (!callee) break;
651      total++;
652      const char *callee_module_name = "<unknown>";
653      uptr callee_module_address = 0;
654      sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
655                                       &callee_module_address);
656      out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
657                 caller_module_address, callee_module_name,
658                 callee_module_address);
659    }
660  }
661  InternalScopedString path(kMaxPathLength);
662  fd_t fd = CovOpenFile(&path, false, "caller-callee");
663  if (fd == kInvalidFd) return;
664  WriteToFile(fd, out.data(), out.length());
665  CloseFile(fd);
666  VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
667}
668
669// Record the current PC into the event buffer.
670// Every event is a u32 value (index in tr_pc_array_index) so we compute
671// it once and then cache in the provided 'cache' storage.
672//
673// This function will eventually be inlined by the compiler.
674void CoverageData::TraceBasicBlock(s32 *id) {
675  // Will trap here if
676  //  1. coverage is not enabled at run-time.
677  //  2. The array tr_event_array is full.
678  *tr_event_pointer = static_cast<u32>(*id - 1);
679  tr_event_pointer++;
680}
681
682void CoverageData::DumpCounters() {
683  if (!common_flags()->coverage_counters) return;
684  uptr n = coverage_data.GetNumberOf8bitCounters();
685  if (!n) return;
686  InternalScopedBuffer<u8> bitset(n);
687  coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
688  InternalScopedString path(kMaxPathLength);
689
690  for (uptr m = 0; m < module_name_vec.size(); m++) {
691    auto r = module_name_vec[m];
692    CHECK(r.copied_module_name);
693    CHECK_LE(r.beg, r.end);
694    CHECK_LE(r.end, size());
695    const char *base_name = StripModuleName(r.copied_module_name);
696    fd_t fd =
697        CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
698    if (fd == kInvalidFd) return;
699    WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
700    CloseFile(fd);
701    VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
702            base_name);
703  }
704}
705
706void CoverageData::DumpAsBitSet() {
707  if (!common_flags()->coverage_bitset) return;
708  if (!size()) return;
709  InternalScopedBuffer<char> out(size());
710  InternalScopedString path(kMaxPathLength);
711  for (uptr m = 0; m < module_name_vec.size(); m++) {
712    uptr n_set_bits = 0;
713    auto r = module_name_vec[m];
714    CHECK(r.copied_module_name);
715    CHECK_LE(r.beg, r.end);
716    CHECK_LE(r.end, size());
717    for (uptr i = r.beg; i < r.end; i++) {
718      uptr pc = UnbundlePc(pc_array[i]);
719      out[i] = pc ? '1' : '0';
720      if (pc)
721        n_set_bits++;
722    }
723    const char *base_name = StripModuleName(r.copied_module_name);
724    fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
725    if (fd == kInvalidFd) return;
726    WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
727    CloseFile(fd);
728    VReport(1,
729            " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
730            r.end - r.beg, base_name, n_set_bits);
731  }
732}
733
734void CoverageData::DumpOffsets() {
735  auto sym = Symbolizer::GetOrInit();
736  if (!common_flags()->coverage_pcs) return;
737  CHECK_NE(sym, nullptr);
738  InternalMmapVector<uptr> offsets(0);
739  InternalScopedString path(kMaxPathLength);
740  for (uptr m = 0; m < module_name_vec.size(); m++) {
741    offsets.clear();
742    uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
743    for (uptr i = 0; i < num_words_for_magic; i++)
744      offsets.push_back(0);
745    auto r = module_name_vec[m];
746    CHECK(r.copied_module_name);
747    CHECK_LE(r.beg, r.end);
748    CHECK_LE(r.end, size());
749    for (uptr i = r.beg; i < r.end; i++) {
750      uptr pc = UnbundlePc(pc_array[i]);
751      uptr counter = UnbundleCounter(pc_array[i]);
752      if (!pc) continue; // Not visited.
753      uptr offset = 0;
754      sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
755      offsets.push_back(BundlePcAndCounter(offset, counter));
756    }
757
758    CHECK_GE(offsets.size(), num_words_for_magic);
759    SortArray(offsets.data(), offsets.size());
760    for (uptr i = 0; i < offsets.size(); i++)
761      offsets[i] = UnbundlePc(offsets[i]);
762
763    uptr num_offsets = offsets.size() - num_words_for_magic;
764    u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
765    CHECK_EQ(*magic_p, 0ULL);
766    // FIXME: we may want to write 32-bit offsets even in 64-mode
767    // if all the offsets are small enough.
768    *magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
769
770    const char *module_name = StripModuleName(r.copied_module_name);
771    if (cov_sandboxed) {
772      if (cov_fd != kInvalidFd) {
773        CovWritePacked(internal_getpid(), module_name, offsets.data(),
774                       offsets.size() * sizeof(offsets[0]));
775        VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
776      }
777    } else {
778      // One file per module per process.
779      fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
780      if (fd == kInvalidFd) continue;
781      WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
782      CloseFile(fd);
783      VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
784    }
785  }
786  if (cov_fd != kInvalidFd)
787    CloseFile(cov_fd);
788}
789
790void CoverageData::DumpAll() {
791  if (!coverage_enabled || common_flags()->coverage_direct) return;
792  if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
793    return;
794  DumpAsBitSet();
795  DumpCounters();
796  DumpTrace();
797  DumpOffsets();
798  DumpCallerCalleePairs();
799}
800
801void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
802  if (!args) return;
803  if (!coverage_enabled) return;
804  cov_sandboxed = args->coverage_sandboxed;
805  if (!cov_sandboxed) return;
806  cov_max_block_size = args->coverage_max_block_size;
807  if (args->coverage_fd >= 0) {
808    cov_fd = (fd_t)args->coverage_fd;
809  } else {
810    InternalScopedString path(kMaxPathLength);
811    // Pre-open the file now. The sandbox won't allow us to do it later.
812    cov_fd = CovOpenFile(&path, true /* packed */, 0);
813  }
814}
815
816fd_t MaybeOpenCovFile(const char *name) {
817  CHECK(name);
818  if (!coverage_enabled) return kInvalidFd;
819  InternalScopedString path(kMaxPathLength);
820  return CovOpenFile(&path, true /* packed */, name);
821}
822
823void CovBeforeFork() {
824  coverage_data.BeforeFork();
825}
826
827void CovAfterFork(int child_pid) {
828  coverage_data.AfterFork(child_pid);
829}
830
831void InitializeCoverage(bool enabled, const char *dir) {
832  if (coverage_enabled)
833    return;  // May happen if two sanitizer enable coverage in the same process.
834  coverage_enabled = enabled;
835  coverage_dir = dir;
836  coverage_data.Init();
837  if (enabled) coverage_data.Enable();
838  if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
839}
840
841void ReInitializeCoverage(bool enabled, const char *dir) {
842  coverage_enabled = enabled;
843  coverage_dir = dir;
844  coverage_data.ReInit();
845}
846
847void CoverageUpdateMapping() {
848  if (coverage_enabled)
849    CovUpdateMapping(coverage_dir);
850}
851
852}  // namespace __sanitizer
853
854extern "C" {
855SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
856  coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
857                    guard);
858}
859SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
860  atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
861  if (static_cast<s32>(
862          __sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
863    __sanitizer_cov(guard);
864}
865SANITIZER_INTERFACE_ATTRIBUTE void
866__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
867  coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
868                          callee, callee_cache16, 16);
869}
870SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
871  coverage_enabled = true;
872  coverage_dir = common_flags()->coverage_dir;
873  coverage_data.Init();
874}
875SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
876  coverage_data.DumpAll();
877}
878SANITIZER_INTERFACE_ATTRIBUTE void
879__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
880                            const char *comp_unit_name) {
881  coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
882  coverage_data.InitializeCounters(counters, npcs);
883  if (!common_flags()->coverage_direct) return;
884  if (SANITIZER_ANDROID && coverage_enabled) {
885    // dlopen/dlclose interceptors do not work on Android, so we rely on
886    // Extend() calls to update .sancov.map.
887    CovUpdateMapping(coverage_dir, GET_CALLER_PC());
888  }
889  coverage_data.Extend(npcs);
890}
891SANITIZER_INTERFACE_ATTRIBUTE
892sptr __sanitizer_maybe_open_cov_file(const char *name) {
893  return (sptr)MaybeOpenCovFile(name);
894}
895SANITIZER_INTERFACE_ATTRIBUTE
896uptr __sanitizer_get_total_unique_coverage() {
897  return atomic_load(&coverage_counter, memory_order_relaxed);
898}
899
900SANITIZER_INTERFACE_ATTRIBUTE
901void __sanitizer_cov_trace_func_enter(s32 *id) {
902  coverage_data.TraceBasicBlock(id);
903}
904SANITIZER_INTERFACE_ATTRIBUTE
905void __sanitizer_cov_trace_basic_block(s32 *id) {
906  coverage_data.TraceBasicBlock(id);
907}
908SANITIZER_INTERFACE_ATTRIBUTE
909void __sanitizer_reset_coverage() {
910  coverage_data.ReinitializeGuards();
911  internal_bzero_aligned16(
912      coverage_data.data(),
913      RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
914}
915SANITIZER_INTERFACE_ATTRIBUTE
916uptr __sanitizer_get_coverage_guards(uptr **data) {
917  *data = coverage_data.data();
918  return coverage_data.size();
919}
920
921SANITIZER_INTERFACE_ATTRIBUTE
922uptr __sanitizer_get_number_of_counters() {
923  return coverage_data.GetNumberOf8bitCounters();
924}
925
926SANITIZER_INTERFACE_ATTRIBUTE
927uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
928  return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
929}
930}  // extern "C"
931