1// Copyright 2016 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/debug/activity_tracker.h"
6
7#include <algorithm>
8#include <limits>
9#include <utility>
10
11#include "base/atomic_sequence_num.h"
12#include "base/debug/stack_trace.h"
13#include "base/files/file.h"
14#include "base/files/file_path.h"
15#include "base/files/memory_mapped_file.h"
16#include "base/logging.h"
17#include "base/memory/ptr_util.h"
18#include "base/metrics/field_trial.h"
19#include "base/metrics/histogram_macros.h"
20#include "base/pending_task.h"
21#include "base/pickle.h"
22#include "base/process/process.h"
23#include "base/process/process_handle.h"
24#include "base/stl_util.h"
25#include "base/strings/string_util.h"
26#include "base/strings/utf_string_conversions.h"
27#include "base/threading/platform_thread.h"
28
29namespace base {
30namespace debug {
31
32namespace {
33
34// The minimum depth a stack should support.
35const int kMinStackDepth = 2;
36
37// The amount of memory set aside for holding arbitrary user data (key/value
38// pairs) globally or associated with ActivityData entries.
39const size_t kUserDataSize = 1 << 10;     // 1 KiB
40const size_t kProcessDataSize = 4 << 10;  // 4 KiB
41const size_t kGlobalDataSize = 16 << 10;  // 16 KiB
42const size_t kMaxUserDataNameLength =
43    static_cast<size_t>(std::numeric_limits<uint8_t>::max());
44
45// A constant used to indicate that module information is changing.
46const uint32_t kModuleInformationChanging = 0x80000000;
47
48// The key used to record process information.
49const char kProcessPhaseDataKey[] = "process-phase";
50
51// An atomically incrementing number, used to check for recreations of objects
52// in the same memory space.
53StaticAtomicSequenceNumber g_next_id;
54
55union ThreadRef {
56  int64_t as_id;
57#if defined(OS_WIN)
58  // On Windows, the handle itself is often a pseudo-handle with a common
59  // value meaning "this thread" and so the thread-id is used. The former
60  // can be converted to a thread-id with a system call.
61  PlatformThreadId as_tid;
62#elif defined(OS_POSIX)
63  // On Posix, the handle is always a unique identifier so no conversion
64  // needs to be done. However, it's value is officially opaque so there
65  // is no one correct way to convert it to a numerical identifier.
66  PlatformThreadHandle::Handle as_handle;
67#endif
68};
69
70// Gets the next non-zero identifier. It is only unique within a process.
71uint32_t GetNextDataId() {
72  uint32_t id;
73  while ((id = g_next_id.GetNext()) == 0)
74    ;
75  return id;
76}
77
78// Gets the current process-id, either from the GlobalActivityTracker if it
79// exists (where the PID can be defined for testing) or from the system if
80// there isn't such.
81int64_t GetProcessId() {
82  GlobalActivityTracker* global = GlobalActivityTracker::Get();
83  if (global)
84    return global->process_id();
85  return GetCurrentProcId();
86}
87
88// Finds and reuses a specific allocation or creates a new one.
89PersistentMemoryAllocator::Reference AllocateFrom(
90    PersistentMemoryAllocator* allocator,
91    uint32_t from_type,
92    size_t size,
93    uint32_t to_type) {
94  PersistentMemoryAllocator::Iterator iter(allocator);
95  PersistentMemoryAllocator::Reference ref;
96  while ((ref = iter.GetNextOfType(from_type)) != 0) {
97    DCHECK_LE(size, allocator->GetAllocSize(ref));
98    // This can fail if a another thread has just taken it. It is assumed that
99    // the memory is cleared during the "free" operation.
100    if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
101      return ref;
102  }
103
104  return allocator->Allocate(size, to_type);
105}
106
107// Determines the previous aligned index.
108size_t RoundDownToAlignment(size_t index, size_t alignment) {
109  return index & (0 - alignment);
110}
111
112// Determines the next aligned index.
113size_t RoundUpToAlignment(size_t index, size_t alignment) {
114  return (index + (alignment - 1)) & (0 - alignment);
115}
116
117// Converts "tick" timing into wall time.
118Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
119  return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
120}
121
122}  // namespace
123
124OwningProcess::OwningProcess() {}
125OwningProcess::~OwningProcess() {}
126
127void OwningProcess::Release_Initialize(int64_t pid) {
128  uint32_t old_id = data_id.load(std::memory_order_acquire);
129  DCHECK_EQ(0U, old_id);
130  process_id = pid != 0 ? pid : GetProcessId();
131  create_stamp = Time::Now().ToInternalValue();
132  data_id.store(GetNextDataId(), std::memory_order_release);
133}
134
135void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
136  DCHECK_NE(0U, data_id);
137  process_id = pid;
138  create_stamp = stamp;
139}
140
141// static
142bool OwningProcess::GetOwningProcessId(const void* memory,
143                                       int64_t* out_id,
144                                       int64_t* out_stamp) {
145  const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
146  uint32_t id = info->data_id.load(std::memory_order_acquire);
147  if (id == 0)
148    return false;
149
150  *out_id = info->process_id;
151  *out_stamp = info->create_stamp;
152  return id == info->data_id.load(std::memory_order_seq_cst);
153}
154
155// It doesn't matter what is contained in this (though it will be all zeros)
156// as only the address of it is important.
157const ActivityData kNullActivityData = {};
158
159ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
160  ThreadRef thread_ref;
161  thread_ref.as_id = 0;  // Zero the union in case other is smaller.
162#if defined(OS_WIN)
163  thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
164#elif defined(OS_POSIX)
165  thread_ref.as_handle = handle.platform_handle();
166#endif
167  return ForThread(thread_ref.as_id);
168}
169
170ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
171    PersistentMemoryAllocator* allocator,
172    uint32_t object_type,
173    uint32_t object_free_type,
174    size_t object_size,
175    size_t cache_size,
176    bool make_iterable)
177    : allocator_(allocator),
178      object_type_(object_type),
179      object_free_type_(object_free_type),
180      object_size_(object_size),
181      cache_size_(cache_size),
182      make_iterable_(make_iterable),
183      iterator_(allocator),
184      cache_values_(new Reference[cache_size]),
185      cache_used_(0) {
186  DCHECK(allocator);
187}
188
189ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
190
191ActivityTrackerMemoryAllocator::Reference
192ActivityTrackerMemoryAllocator::GetObjectReference() {
193  // First see if there is a cached value that can be returned. This is much
194  // faster than searching the memory system for free blocks.
195  while (cache_used_ > 0) {
196    Reference cached = cache_values_[--cache_used_];
197    // Change the type of the cached object to the proper type and return it.
198    // If the type-change fails that means another thread has taken this from
199    // under us (via the search below) so ignore it and keep trying. Don't
200    // clear the memory because that was done when the type was made "free".
201    if (allocator_->ChangeType(cached, object_type_, object_free_type_, false))
202      return cached;
203  }
204
205  // Fetch the next "free" object from persistent memory. Rather than restart
206  // the iterator at the head each time and likely waste time going again
207  // through objects that aren't relevant, the iterator continues from where
208  // it last left off and is only reset when the end is reached. If the
209  // returned reference matches |last|, then it has wrapped without finding
210  // anything.
211  const Reference last = iterator_.GetLast();
212  while (true) {
213    uint32_t type;
214    Reference found = iterator_.GetNext(&type);
215    if (found && type == object_free_type_) {
216      // Found a free object. Change it to the proper type and return it. If
217      // the type-change fails that means another thread has taken this from
218      // under us so ignore it and keep trying.
219      if (allocator_->ChangeType(found, object_type_, object_free_type_, false))
220        return found;
221    }
222    if (found == last) {
223      // Wrapped. No desired object was found.
224      break;
225    }
226    if (!found) {
227      // Reached end; start over at the beginning.
228      iterator_.Reset();
229    }
230  }
231
232  // No free block was found so instead allocate a new one.
233  Reference allocated = allocator_->Allocate(object_size_, object_type_);
234  if (allocated && make_iterable_)
235    allocator_->MakeIterable(allocated);
236  return allocated;
237}
238
239void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
240  // Mark object as free.
241  bool success = allocator_->ChangeType(ref, object_free_type_, object_type_,
242                                        /*clear=*/true);
243  DCHECK(success);
244
245  // Add this reference to our "free" cache if there is space. If not, the type
246  // has still been changed to indicate that it is free so this (or another)
247  // thread can find it, albeit more slowly, using the iteration method above.
248  if (cache_used_ < cache_size_)
249    cache_values_[cache_used_++] = ref;
250}
251
252// static
253void Activity::FillFrom(Activity* activity,
254                        const void* program_counter,
255                        const void* origin,
256                        Type type,
257                        const ActivityData& data) {
258  activity->time_internal = base::TimeTicks::Now().ToInternalValue();
259  activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
260  activity->origin_address = reinterpret_cast<uintptr_t>(origin);
261  activity->activity_type = type;
262  activity->data = data;
263
264#if defined(SYZYASAN)
265  // Create a stacktrace from the current location and get the addresses.
266  StackTrace stack_trace;
267  size_t stack_depth;
268  const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
269  // Copy the stack addresses, ignoring the first one (here).
270  size_t i;
271  for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
272    activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
273  }
274  activity->call_stack[i - 1] = 0;
275#else
276  activity->call_stack[0] = 0;
277#endif
278}
279
280ActivityUserData::TypedValue::TypedValue() {}
281ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
282ActivityUserData::TypedValue::~TypedValue() {}
283
284StringPiece ActivityUserData::TypedValue::Get() const {
285  DCHECK_EQ(RAW_VALUE, type_);
286  return long_value_;
287}
288
289StringPiece ActivityUserData::TypedValue::GetString() const {
290  DCHECK_EQ(STRING_VALUE, type_);
291  return long_value_;
292}
293
294bool ActivityUserData::TypedValue::GetBool() const {
295  DCHECK_EQ(BOOL_VALUE, type_);
296  return short_value_ != 0;
297}
298
299char ActivityUserData::TypedValue::GetChar() const {
300  DCHECK_EQ(CHAR_VALUE, type_);
301  return static_cast<char>(short_value_);
302}
303
304int64_t ActivityUserData::TypedValue::GetInt() const {
305  DCHECK_EQ(SIGNED_VALUE, type_);
306  return static_cast<int64_t>(short_value_);
307}
308
309uint64_t ActivityUserData::TypedValue::GetUint() const {
310  DCHECK_EQ(UNSIGNED_VALUE, type_);
311  return static_cast<uint64_t>(short_value_);
312}
313
314StringPiece ActivityUserData::TypedValue::GetReference() const {
315  DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
316  return ref_value_;
317}
318
319StringPiece ActivityUserData::TypedValue::GetStringReference() const {
320  DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
321  return ref_value_;
322}
323
324// These are required because std::atomic is (currently) not a POD type and
325// thus clang requires explicit out-of-line constructors and destructors even
326// when they do nothing.
327ActivityUserData::ValueInfo::ValueInfo() {}
328ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
329ActivityUserData::ValueInfo::~ValueInfo() {}
330ActivityUserData::MemoryHeader::MemoryHeader() {}
331ActivityUserData::MemoryHeader::~MemoryHeader() {}
332ActivityUserData::FieldHeader::FieldHeader() {}
333ActivityUserData::FieldHeader::~FieldHeader() {}
334
335ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
336
337ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
338    : memory_(reinterpret_cast<char*>(memory)),
339      available_(RoundDownToAlignment(size, kMemoryAlignment)),
340      header_(reinterpret_cast<MemoryHeader*>(memory)),
341      orig_data_id(0),
342      orig_process_id(0),
343      orig_create_stamp(0) {
344  // It's possible that no user data is being stored.
345  if (!memory_)
346    return;
347
348  static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
349  DCHECK_LT(sizeof(MemoryHeader), available_);
350  if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
351    header_->owner.Release_Initialize(pid);
352  memory_ += sizeof(MemoryHeader);
353  available_ -= sizeof(MemoryHeader);
354
355  // Make a copy of identifying information for later comparison.
356  *const_cast<uint32_t*>(&orig_data_id) =
357      header_->owner.data_id.load(std::memory_order_acquire);
358  *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
359  *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
360
361  // If there is already data present, load that. This allows the same class
362  // to be used for analysis through snapshots.
363  ImportExistingData();
364}
365
366ActivityUserData::~ActivityUserData() {}
367
368bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
369  DCHECK(output_snapshot);
370  DCHECK(output_snapshot->empty());
371
372  // Find any new data that may have been added by an active instance of this
373  // class that is adding records.
374  ImportExistingData();
375
376  // Add all the values to the snapshot.
377  for (const auto& entry : values_) {
378    TypedValue value;
379    const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
380    value.type_ = entry.second.type;
381    DCHECK_GE(entry.second.extent, size);
382
383    switch (entry.second.type) {
384      case RAW_VALUE:
385      case STRING_VALUE:
386        value.long_value_ =
387            std::string(reinterpret_cast<char*>(entry.second.memory), size);
388        break;
389      case RAW_VALUE_REFERENCE:
390      case STRING_VALUE_REFERENCE: {
391        ReferenceRecord* ref =
392            reinterpret_cast<ReferenceRecord*>(entry.second.memory);
393        value.ref_value_ = StringPiece(
394            reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
395            static_cast<size_t>(ref->size));
396      } break;
397      case BOOL_VALUE:
398      case CHAR_VALUE:
399        value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
400        break;
401      case SIGNED_VALUE:
402      case UNSIGNED_VALUE:
403        value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
404        break;
405      case END_OF_VALUES:  // Included for completeness purposes.
406        NOTREACHED();
407    }
408    auto inserted = output_snapshot->insert(
409        std::make_pair(entry.second.name.as_string(), std::move(value)));
410    DCHECK(inserted.second);  // True if inserted, false if existed.
411  }
412
413  // Another import attempt will validate that the underlying memory has not
414  // been reused for another purpose. Entries added since the first import
415  // will be ignored here but will be returned if another snapshot is created.
416  ImportExistingData();
417  if (!memory_) {
418    output_snapshot->clear();
419    return false;
420  }
421
422  // Successful snapshot.
423  return true;
424}
425
426const void* ActivityUserData::GetBaseAddress() const {
427  // The |memory_| pointer advances as elements are written but the |header_|
428  // value is always at the start of the block so just return that.
429  return header_;
430}
431
432void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
433                                                    int64_t stamp) {
434  if (!header_)
435    return;
436  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
437}
438
439// static
440bool ActivityUserData::GetOwningProcessId(const void* memory,
441                                          int64_t* out_id,
442                                          int64_t* out_stamp) {
443  const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
444  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
445}
446
447void ActivityUserData::Set(StringPiece name,
448                           ValueType type,
449                           const void* memory,
450                           size_t size) {
451  DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
452  size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
453                  size);
454
455  // It's possible that no user data is being stored.
456  if (!memory_)
457    return;
458
459  // The storage of a name is limited so use that limit during lookup.
460  if (name.length() > kMaxUserDataNameLength)
461    name.set(name.data(), kMaxUserDataNameLength);
462
463  ValueInfo* info;
464  auto existing = values_.find(name);
465  if (existing != values_.end()) {
466    info = &existing->second;
467  } else {
468    // The name size is limited to what can be held in a single byte but
469    // because there are not alignment constraints on strings, it's set tight
470    // against the header. Its extent (the reserved space, even if it's not
471    // all used) is calculated so that, when pressed against the header, the
472    // following field will be aligned properly.
473    size_t name_size = name.length();
474    size_t name_extent =
475        RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
476        sizeof(FieldHeader);
477    size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
478
479    // The "base size" is the size of the header and (padded) string key. Stop
480    // now if there's not room enough for even this.
481    size_t base_size = sizeof(FieldHeader) + name_extent;
482    if (base_size > available_)
483      return;
484
485    // The "full size" is the size for storing the entire value.
486    size_t full_size = std::min(base_size + value_extent, available_);
487
488    // If the value is actually a single byte, see if it can be stuffed at the
489    // end of the name extent rather than wasting kMemoryAlignment bytes.
490    if (size == 1 && name_extent > name_size) {
491      full_size = base_size;
492      --name_extent;
493      --base_size;
494    }
495
496    // Truncate the stored size to the amount of available memory. Stop now if
497    // there's not any room for even part of the value.
498    if (size != 0) {
499      size = std::min(full_size - base_size, size);
500      if (size == 0)
501        return;
502    }
503
504    // Allocate a chunk of memory.
505    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
506    memory_ += full_size;
507    available_ -= full_size;
508
509    // Datafill the header and name records. Memory must be zeroed. The |type|
510    // is written last, atomically, to release all the other values.
511    DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
512    DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
513    header->name_size = static_cast<uint8_t>(name_size);
514    header->record_size = full_size;
515    char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
516    void* value_memory =
517        reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
518    memcpy(name_memory, name.data(), name_size);
519    header->type.store(type, std::memory_order_release);
520
521    // Create an entry in |values_| so that this field can be found and changed
522    // later on without having to allocate new entries.
523    StringPiece persistent_name(name_memory, name_size);
524    auto inserted =
525        values_.insert(std::make_pair(persistent_name, ValueInfo()));
526    DCHECK(inserted.second);  // True if inserted, false if existed.
527    info = &inserted.first->second;
528    info->name = persistent_name;
529    info->memory = value_memory;
530    info->size_ptr = &header->value_size;
531    info->extent = full_size - sizeof(FieldHeader) - name_extent;
532    info->type = type;
533  }
534
535  // Copy the value data to storage. The |size| is written last, atomically, to
536  // release the copied data. Until then, a parallel reader will just ignore
537  // records with a zero size.
538  DCHECK_EQ(type, info->type);
539  size = std::min(size, info->extent);
540  info->size_ptr->store(0, std::memory_order_seq_cst);
541  memcpy(info->memory, memory, size);
542  info->size_ptr->store(size, std::memory_order_release);
543}
544
545void ActivityUserData::SetReference(StringPiece name,
546                                    ValueType type,
547                                    const void* memory,
548                                    size_t size) {
549  ReferenceRecord rec;
550  rec.address = reinterpret_cast<uintptr_t>(memory);
551  rec.size = size;
552  Set(name, type, &rec, sizeof(rec));
553}
554
555void ActivityUserData::ImportExistingData() const {
556  // It's possible that no user data is being stored.
557  if (!memory_)
558    return;
559
560  while (available_ > sizeof(FieldHeader)) {
561    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
562    ValueType type =
563        static_cast<ValueType>(header->type.load(std::memory_order_acquire));
564    if (type == END_OF_VALUES)
565      return;
566    if (header->record_size > available_)
567      return;
568
569    size_t value_offset = RoundUpToAlignment(
570        sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
571    if (header->record_size == value_offset &&
572        header->value_size.load(std::memory_order_relaxed) == 1) {
573      value_offset -= 1;
574    }
575    if (value_offset + header->value_size > header->record_size)
576      return;
577
578    ValueInfo info;
579    info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
580    info.type = type;
581    info.memory = memory_ + value_offset;
582    info.size_ptr = &header->value_size;
583    info.extent = header->record_size - value_offset;
584
585    StringPiece key(info.name);
586    values_.insert(std::make_pair(key, std::move(info)));
587
588    memory_ += header->record_size;
589    available_ -= header->record_size;
590  }
591
592  // Check if memory has been completely reused.
593  if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
594      header_->owner.process_id != orig_process_id ||
595      header_->owner.create_stamp != orig_create_stamp) {
596    memory_ = nullptr;
597    values_.clear();
598  }
599}
600
601// This information is kept for every thread that is tracked. It is filled
602// the very first time the thread is seen. All fields must be of exact sizes
603// so there is no issue moving between 32 and 64-bit builds.
604struct ThreadActivityTracker::Header {
605  // Defined in .h for analyzer access. Increment this if structure changes!
606  static constexpr uint32_t kPersistentTypeId =
607      GlobalActivityTracker::kTypeIdActivityTracker;
608
609  // Expected size for 32/64-bit check.
610  static constexpr size_t kExpectedInstanceSize =
611      OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
612      72;
613
614  // This information uniquely identifies a process.
615  OwningProcess owner;
616
617  // The thread-id (thread_ref.as_id) to which this data belongs. This number
618  // is not guaranteed to mean anything but combined with the process-id from
619  // OwningProcess is unique among all active trackers.
620  ThreadRef thread_ref;
621
622  // The start-time and start-ticks when the data was created. Each activity
623  // record has a |time_internal| value that can be converted to a "wall time"
624  // with these two values.
625  int64_t start_time;
626  int64_t start_ticks;
627
628  // The number of Activity slots (spaces that can hold an Activity) that
629  // immediately follow this structure in memory.
630  uint32_t stack_slots;
631
632  // Some padding to keep everything 64-bit aligned.
633  uint32_t padding;
634
635  // The current depth of the stack. This may be greater than the number of
636  // slots. If the depth exceeds the number of slots, the newest entries
637  // won't be recorded.
638  std::atomic<uint32_t> current_depth;
639
640  // A memory location used to indicate if changes have been made to the data
641  // that would invalidate an in-progress read of its contents. The active
642  // tracker will zero the value whenever something gets popped from the
643  // stack. A monitoring tracker can write a non-zero value here, copy the
644  // stack contents, and read the value to know, if it is still non-zero, that
645  // the contents didn't change while being copied. This can handle concurrent
646  // snapshot operations only if each snapshot writes a different bit (which
647  // is not the current implementation so no parallel snapshots allowed).
648  std::atomic<uint32_t> data_unchanged;
649
650  // The last "exception" activity. This can't be stored on the stack because
651  // that could get popped as things unwind.
652  Activity last_exception;
653
654  // The name of the thread (up to a maximum length). Dynamic-length names
655  // are not practical since the memory has to come from the same persistent
656  // allocator that holds this structure and to which this object has no
657  // reference.
658  char thread_name[32];
659};
660
661ThreadActivityTracker::Snapshot::Snapshot() {}
662ThreadActivityTracker::Snapshot::~Snapshot() {}
663
664ThreadActivityTracker::ScopedActivity::ScopedActivity(
665    ThreadActivityTracker* tracker,
666    const void* program_counter,
667    const void* origin,
668    Activity::Type type,
669    const ActivityData& data)
670    : tracker_(tracker) {
671  if (tracker_)
672    activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
673}
674
675ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
676  if (tracker_)
677    tracker_->PopActivity(activity_id_);
678}
679
680void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
681    Activity::Type type,
682    const ActivityData& data) {
683  if (tracker_)
684    tracker_->ChangeActivity(activity_id_, type, data);
685}
686
687ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
688    : header_(static_cast<Header*>(base)),
689      stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
690                                         sizeof(Header))),
691      stack_slots_(
692          static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
693  DCHECK(thread_checker_.CalledOnValidThread());
694
695  // Verify the parameters but fail gracefully if they're not valid so that
696  // production code based on external inputs will not crash.  IsValid() will
697  // return false in this case.
698  if (!base ||
699      // Ensure there is enough space for the header and at least a few records.
700      size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
701      // Ensure that the |stack_slots_| calculation didn't overflow.
702      (size - sizeof(Header)) / sizeof(Activity) >
703          std::numeric_limits<uint32_t>::max()) {
704    NOTREACHED();
705    return;
706  }
707
708  // Ensure that the thread reference doesn't exceed the size of the ID number.
709  // This won't compile at the global scope because Header is a private struct.
710  static_assert(
711      sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
712      "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
713
714  // Ensure that the alignment of Activity.data is properly aligned to a
715  // 64-bit boundary so there are no interoperability-issues across cpu
716  // architectures.
717  static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
718                "ActivityData.data is not 64-bit aligned");
719
720  // Provided memory should either be completely initialized or all zeros.
721  if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
722    // This is a new file. Double-check other fields and then initialize.
723    DCHECK_EQ(0, header_->owner.process_id);
724    DCHECK_EQ(0, header_->owner.create_stamp);
725    DCHECK_EQ(0, header_->thread_ref.as_id);
726    DCHECK_EQ(0, header_->start_time);
727    DCHECK_EQ(0, header_->start_ticks);
728    DCHECK_EQ(0U, header_->stack_slots);
729    DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
730    DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
731    DCHECK_EQ(0, stack_[0].time_internal);
732    DCHECK_EQ(0U, stack_[0].origin_address);
733    DCHECK_EQ(0U, stack_[0].call_stack[0]);
734    DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
735
736#if defined(OS_WIN)
737    header_->thread_ref.as_tid = PlatformThread::CurrentId();
738#elif defined(OS_POSIX)
739    header_->thread_ref.as_handle =
740        PlatformThread::CurrentHandle().platform_handle();
741#endif
742
743    header_->start_time = base::Time::Now().ToInternalValue();
744    header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
745    header_->stack_slots = stack_slots_;
746    strlcpy(header_->thread_name, PlatformThread::GetName(),
747            sizeof(header_->thread_name));
748
749    // This is done last so as to guarantee that everything above is "released"
750    // by the time this value gets written.
751    header_->owner.Release_Initialize();
752
753    valid_ = true;
754    DCHECK(IsValid());
755  } else {
756    // This is a file with existing data. Perform basic consistency checks.
757    valid_ = true;
758    valid_ = IsValid();
759  }
760}
761
762ThreadActivityTracker::~ThreadActivityTracker() {}
763
764ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
765    const void* program_counter,
766    const void* origin,
767    Activity::Type type,
768    const ActivityData& data) {
769  // A thread-checker creates a lock to check the thread-id which means
770  // re-entry into this code if lock acquisitions are being tracked.
771  DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
772         thread_checker_.CalledOnValidThread());
773
774  // Get the current depth of the stack. No access to other memory guarded
775  // by this variable is done here so a "relaxed" load is acceptable.
776  uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
777
778  // Handle the case where the stack depth has exceeded the storage capacity.
779  // Extra entries will be lost leaving only the base of the stack.
780  if (depth >= stack_slots_) {
781    // Since no other threads modify the data, no compare/exchange is needed.
782    // Since no other memory is being modified, a "relaxed" store is acceptable.
783    header_->current_depth.store(depth + 1, std::memory_order_relaxed);
784    return depth;
785  }
786
787  // Get a pointer to the next activity and load it. No atomicity is required
788  // here because the memory is known only to this thread. It will be made
789  // known to other threads once the depth is incremented.
790  Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
791
792  // Save the incremented depth. Because this guards |activity| memory filled
793  // above that may be read by another thread once the recorded depth changes,
794  // a "release" store is required.
795  header_->current_depth.store(depth + 1, std::memory_order_release);
796
797  // The current depth is used as the activity ID because it simply identifies
798  // an entry. Once an entry is pop'd, it's okay to reuse the ID.
799  return depth;
800}
801
802void ThreadActivityTracker::ChangeActivity(ActivityId id,
803                                           Activity::Type type,
804                                           const ActivityData& data) {
805  DCHECK(thread_checker_.CalledOnValidThread());
806  DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
807  DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
808
809  // Update the information if it is being recorded (i.e. within slot limit).
810  if (id < stack_slots_) {
811    Activity* activity = &stack_[id];
812
813    if (type != Activity::ACT_NULL) {
814      DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
815                type & Activity::ACT_CATEGORY_MASK);
816      activity->activity_type = type;
817    }
818
819    if (&data != &kNullActivityData)
820      activity->data = data;
821  }
822}
823
824void ThreadActivityTracker::PopActivity(ActivityId id) {
825  // Do an atomic decrement of the depth. No changes to stack entries guarded
826  // by this variable are done here so a "relaxed" operation is acceptable.
827  // |depth| will receive the value BEFORE it was modified which means the
828  // return value must also be decremented. The slot will be "free" after
829  // this call but since only a single thread can access this object, the
830  // data will remain valid until this method returns or calls outside.
831  uint32_t depth =
832      header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
833
834  // Validate that everything is running correctly.
835  DCHECK_EQ(id, depth);
836
837  // A thread-checker creates a lock to check the thread-id which means
838  // re-entry into this code if lock acquisitions are being tracked.
839  DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
840         thread_checker_.CalledOnValidThread());
841
842  // The stack has shrunk meaning that some other thread trying to copy the
843  // contents for reporting purposes could get bad data. That thread would
844  // have written a non-zero value into |data_unchanged|; clearing it here
845  // will let that thread detect that something did change. This needs to
846  // happen after the atomic |depth| operation above so a "release" store
847  // is required.
848  header_->data_unchanged.store(0, std::memory_order_release);
849}
850
851std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
852    ActivityId id,
853    ActivityTrackerMemoryAllocator* allocator) {
854  // Don't allow user data for lock acquisition as recursion may occur.
855  if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
856    NOTREACHED();
857    return MakeUnique<ActivityUserData>();
858  }
859
860  // User-data is only stored for activities actually held in the stack.
861  if (id >= stack_slots_)
862    return MakeUnique<ActivityUserData>();
863
864  // Create and return a real UserData object.
865  return CreateUserDataForActivity(&stack_[id], allocator);
866}
867
868bool ThreadActivityTracker::HasUserData(ActivityId id) {
869  // User-data is only stored for activities actually held in the stack.
870  return (id < stack_slots_ && stack_[id].user_data_ref);
871}
872
873void ThreadActivityTracker::ReleaseUserData(
874    ActivityId id,
875    ActivityTrackerMemoryAllocator* allocator) {
876  // User-data is only stored for activities actually held in the stack.
877  if (id < stack_slots_ && stack_[id].user_data_ref) {
878    allocator->ReleaseObjectReference(stack_[id].user_data_ref);
879    stack_[id].user_data_ref = 0;
880  }
881}
882
883void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
884                                                    const void* origin,
885                                                    Activity::Type type,
886                                                    const ActivityData& data) {
887  // A thread-checker creates a lock to check the thread-id which means
888  // re-entry into this code if lock acquisitions are being tracked.
889  DCHECK(thread_checker_.CalledOnValidThread());
890
891  // Fill the reusable exception activity.
892  Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
893                     data);
894
895  // The data has changed meaning that some other thread trying to copy the
896  // contents for reporting purposes could get bad data.
897  header_->data_unchanged.store(0, std::memory_order_relaxed);
898}
899
900bool ThreadActivityTracker::IsValid() const {
901  if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
902      header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
903      header_->start_time == 0 || header_->start_ticks == 0 ||
904      header_->stack_slots != stack_slots_ ||
905      header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
906    return false;
907  }
908
909  return valid_;
910}
911
912bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
913  DCHECK(output_snapshot);
914
915  // There is no "called on valid thread" check for this method as it can be
916  // called from other threads or even other processes. It is also the reason
917  // why atomic operations must be used in certain places above.
918
919  // It's possible for the data to change while reading it in such a way that it
920  // invalidates the read. Make several attempts but don't try forever.
921  const int kMaxAttempts = 10;
922  uint32_t depth;
923
924  // Stop here if the data isn't valid.
925  if (!IsValid())
926    return false;
927
928  // Allocate the maximum size for the stack so it doesn't have to be done
929  // during the time-sensitive snapshot operation. It is shrunk once the
930  // actual size is known.
931  output_snapshot->activity_stack.reserve(stack_slots_);
932
933  for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
934    // Remember the data IDs to ensure nothing is replaced during the snapshot
935    // operation. Use "acquire" so that all the non-atomic fields of the
936    // structure are valid (at least at the current moment in time).
937    const uint32_t starting_id =
938        header_->owner.data_id.load(std::memory_order_acquire);
939    const int64_t starting_create_stamp = header_->owner.create_stamp;
940    const int64_t starting_process_id = header_->owner.process_id;
941    const int64_t starting_thread_id = header_->thread_ref.as_id;
942
943    // Write a non-zero value to |data_unchanged| so it's possible to detect
944    // at the end that nothing has changed since copying the data began. A
945    // "cst" operation is required to ensure it occurs before everything else.
946    // Using "cst" memory ordering is relatively expensive but this is only
947    // done during analysis so doesn't directly affect the worker threads.
948    header_->data_unchanged.store(1, std::memory_order_seq_cst);
949
950    // Fetching the current depth also "acquires" the contents of the stack.
951    depth = header_->current_depth.load(std::memory_order_acquire);
952    uint32_t count = std::min(depth, stack_slots_);
953    output_snapshot->activity_stack.resize(count);
954    if (count > 0) {
955      // Copy the existing contents. Memcpy is used for speed.
956      memcpy(&output_snapshot->activity_stack[0], stack_,
957             count * sizeof(Activity));
958    }
959
960    // Capture the last exception.
961    memcpy(&output_snapshot->last_exception, &header_->last_exception,
962           sizeof(Activity));
963
964    // TODO(bcwhite): Snapshot other things here.
965
966    // Retry if something changed during the copy. A "cst" operation ensures
967    // it must happen after all the above operations.
968    if (!header_->data_unchanged.load(std::memory_order_seq_cst))
969      continue;
970
971    // Stack copied. Record it's full depth.
972    output_snapshot->activity_stack_depth = depth;
973
974    // Get the general thread information.
975    output_snapshot->thread_name =
976        std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
977    output_snapshot->create_stamp = header_->owner.create_stamp;
978    output_snapshot->thread_id = header_->thread_ref.as_id;
979    output_snapshot->process_id = header_->owner.process_id;
980
981    // All characters of the thread-name buffer were copied so as to not break
982    // if the trailing NUL were missing. Now limit the length if the actual
983    // name is shorter.
984    output_snapshot->thread_name.resize(
985        strlen(output_snapshot->thread_name.c_str()));
986
987    // If the data ID has changed then the tracker has exited and the memory
988    // reused by a new one. Try again.
989    if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
990        output_snapshot->create_stamp != starting_create_stamp ||
991        output_snapshot->process_id != starting_process_id ||
992        output_snapshot->thread_id != starting_thread_id) {
993      continue;
994    }
995
996    // Only successful if the data is still valid once everything is done since
997    // it's possible for the thread to end somewhere in the middle and all its
998    // values become garbage.
999    if (!IsValid())
1000      return false;
1001
1002    // Change all the timestamps in the activities from "ticks" to "wall" time.
1003    const Time start_time = Time::FromInternalValue(header_->start_time);
1004    const int64_t start_ticks = header_->start_ticks;
1005    for (Activity& activity : output_snapshot->activity_stack) {
1006      activity.time_internal =
1007          WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
1008              .ToInternalValue();
1009    }
1010    output_snapshot->last_exception.time_internal =
1011        WallTimeFromTickTime(start_ticks,
1012                             output_snapshot->last_exception.time_internal,
1013                             start_time)
1014            .ToInternalValue();
1015
1016    // Success!
1017    return true;
1018  }
1019
1020  // Too many attempts.
1021  return false;
1022}
1023
1024const void* ThreadActivityTracker::GetBaseAddress() {
1025  return header_;
1026}
1027
1028void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
1029                                                         int64_t stamp) {
1030  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
1031}
1032
1033// static
1034bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
1035                                               int64_t* out_id,
1036                                               int64_t* out_stamp) {
1037  const Header* header = reinterpret_cast<const Header*>(memory);
1038  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
1039}
1040
1041// static
1042size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
1043  return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
1044}
1045
1046std::unique_ptr<ActivityUserData>
1047ThreadActivityTracker::CreateUserDataForActivity(
1048    Activity* activity,
1049    ActivityTrackerMemoryAllocator* allocator) {
1050  DCHECK_EQ(0U, activity->user_data_ref);
1051
1052  PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
1053  void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
1054  if (memory) {
1055    std::unique_ptr<ActivityUserData> user_data =
1056        MakeUnique<ActivityUserData>(memory, kUserDataSize);
1057    activity->user_data_ref = ref;
1058    activity->user_data_id = user_data->id();
1059    return user_data;
1060  }
1061
1062  // Return a dummy object that will still accept (but ignore) Set() calls.
1063  return MakeUnique<ActivityUserData>();
1064}
1065
1066// The instantiation of the GlobalActivityTracker object.
1067// The object held here will obviously not be destructed at process exit
1068// but that's best since PersistentMemoryAllocator objects (that underlie
1069// GlobalActivityTracker objects) are explicitly forbidden from doing anything
1070// essential at exit anyway due to the fact that they depend on data managed
1071// elsewhere and which could be destructed first. An AtomicWord is used instead
1072// of std::atomic because the latter can create global ctors and dtors.
1073subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
1074
1075GlobalActivityTracker::ModuleInfo::ModuleInfo() {}
1076GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
1077GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
1078GlobalActivityTracker::ModuleInfo::~ModuleInfo() {}
1079
1080GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
1081    ModuleInfo&& rhs) = default;
1082GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
1083    const ModuleInfo& rhs) = default;
1084
1085GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() {}
1086GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() {}
1087
1088bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
1089    GlobalActivityTracker::ModuleInfo* info,
1090    size_t record_size) const {
1091  // Get the current "changes" indicator, acquiring all the other values.
1092  uint32_t current_changes = changes.load(std::memory_order_acquire);
1093
1094  // Copy out the dynamic information.
1095  info->is_loaded = loaded != 0;
1096  info->address = static_cast<uintptr_t>(address);
1097  info->load_time = load_time;
1098
1099  // Check to make sure no information changed while being read. A "seq-cst"
1100  // operation is expensive but is only done during analysis and it's the only
1101  // way to ensure this occurs after all the accesses above. If changes did
1102  // occur then return a "not loaded" result so that |size| and |address|
1103  // aren't expected to be accurate.
1104  if ((current_changes & kModuleInformationChanging) != 0 ||
1105      changes.load(std::memory_order_seq_cst) != current_changes) {
1106    info->is_loaded = false;
1107  }
1108
1109  // Copy out the static information. These never change so don't have to be
1110  // protected by the atomic |current_changes| operations.
1111  info->size = static_cast<size_t>(size);
1112  info->timestamp = timestamp;
1113  info->age = age;
1114  memcpy(info->identifier, identifier, sizeof(info->identifier));
1115
1116  if (offsetof(ModuleInfoRecord, pickle) + pickle_size > record_size)
1117    return false;
1118  Pickle pickler(pickle, pickle_size);
1119  PickleIterator iter(pickler);
1120  return iter.ReadString(&info->file) && iter.ReadString(&info->debug_file);
1121}
1122
1123bool GlobalActivityTracker::ModuleInfoRecord::EncodeFrom(
1124    const GlobalActivityTracker::ModuleInfo& info,
1125    size_t record_size) {
1126  Pickle pickler;
1127  bool okay =
1128      pickler.WriteString(info.file) && pickler.WriteString(info.debug_file);
1129  if (!okay) {
1130    NOTREACHED();
1131    return false;
1132  }
1133  if (offsetof(ModuleInfoRecord, pickle) + pickler.size() > record_size) {
1134    NOTREACHED();
1135    return false;
1136  }
1137
1138  // These fields never changes and are done before the record is made
1139  // iterable so no thread protection is necessary.
1140  size = info.size;
1141  timestamp = info.timestamp;
1142  age = info.age;
1143  memcpy(identifier, info.identifier, sizeof(identifier));
1144  memcpy(pickle, pickler.data(), pickler.size());
1145  pickle_size = pickler.size();
1146  changes.store(0, std::memory_order_relaxed);
1147
1148  // Initialize the owner info.
1149  owner.Release_Initialize();
1150
1151  // Now set those fields that can change.
1152  return UpdateFrom(info);
1153}
1154
1155bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
1156    const GlobalActivityTracker::ModuleInfo& info) {
1157  // Updates can occur after the record is made visible so make changes atomic.
1158  // A "strong" exchange ensures no false failures.
1159  uint32_t old_changes = changes.load(std::memory_order_relaxed);
1160  uint32_t new_changes = old_changes | kModuleInformationChanging;
1161  if ((old_changes & kModuleInformationChanging) != 0 ||
1162      !changes.compare_exchange_strong(old_changes, new_changes,
1163                                       std::memory_order_acquire,
1164                                       std::memory_order_acquire)) {
1165    NOTREACHED() << "Multiple sources are updating module information.";
1166    return false;
1167  }
1168
1169  loaded = info.is_loaded ? 1 : 0;
1170  address = info.address;
1171  load_time = Time::Now().ToInternalValue();
1172
1173  bool success = changes.compare_exchange_strong(new_changes, old_changes + 1,
1174                                                 std::memory_order_release,
1175                                                 std::memory_order_relaxed);
1176  DCHECK(success);
1177  return true;
1178}
1179
1180// static
1181size_t GlobalActivityTracker::ModuleInfoRecord::EncodedSize(
1182    const GlobalActivityTracker::ModuleInfo& info) {
1183  PickleSizer sizer;
1184  sizer.AddString(info.file);
1185  sizer.AddString(info.debug_file);
1186
1187  return offsetof(ModuleInfoRecord, pickle) + sizeof(Pickle::Header) +
1188         sizer.payload_size();
1189}
1190
1191GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
1192    const void* program_counter,
1193    const void* origin,
1194    Activity::Type type,
1195    const ActivityData& data,
1196    bool lock_allowed)
1197    : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
1198                                            program_counter,
1199                                            origin,
1200                                            type,
1201                                            data) {}
1202
1203GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
1204  if (tracker_ && tracker_->HasUserData(activity_id_)) {
1205    GlobalActivityTracker* global = GlobalActivityTracker::Get();
1206    AutoLock lock(global->user_data_allocator_lock_);
1207    tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
1208  }
1209}
1210
1211ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
1212  if (!user_data_) {
1213    if (tracker_) {
1214      GlobalActivityTracker* global = GlobalActivityTracker::Get();
1215      AutoLock lock(global->user_data_allocator_lock_);
1216      user_data_ =
1217          tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
1218    } else {
1219      user_data_ = MakeUnique<ActivityUserData>();
1220    }
1221  }
1222  return *user_data_;
1223}
1224
1225GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
1226                                                              size_t size,
1227                                                              int64_t pid)
1228    : ActivityUserData(memory, size, pid) {}
1229
1230GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
1231
1232void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
1233                                                    ValueType type,
1234                                                    const void* memory,
1235                                                    size_t size) {
1236  AutoLock lock(data_lock_);
1237  ActivityUserData::Set(name, type, memory, size);
1238}
1239
1240GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
1241    PersistentMemoryAllocator::Reference mem_reference,
1242    void* base,
1243    size_t size)
1244    : ThreadActivityTracker(base, size),
1245      mem_reference_(mem_reference),
1246      mem_base_(base) {}
1247
1248GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
1249  // The global |g_tracker_| must point to the owner of this class since all
1250  // objects of this type must be destructed before |g_tracker_| can be changed
1251  // (something that only occurs in tests).
1252  DCHECK(g_tracker_);
1253  GlobalActivityTracker::Get()->ReturnTrackerMemory(this);
1254}
1255
1256void GlobalActivityTracker::CreateWithAllocator(
1257    std::unique_ptr<PersistentMemoryAllocator> allocator,
1258    int stack_depth,
1259    int64_t process_id) {
1260  // There's no need to do anything with the result. It is self-managing.
1261  GlobalActivityTracker* global_tracker =
1262      new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
1263  // Create a tracker for this thread since it is known.
1264  global_tracker->CreateTrackerForCurrentThread();
1265}
1266
1267#if !defined(OS_NACL)
1268// static
1269void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
1270                                           size_t size,
1271                                           uint64_t id,
1272                                           StringPiece name,
1273                                           int stack_depth) {
1274  DCHECK(!file_path.empty());
1275  DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
1276
1277  // Create and map the file into memory and make it globally available.
1278  std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
1279  bool success =
1280      mapped_file->Initialize(File(file_path,
1281                                   File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
1282                                   File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
1283                              {0, static_cast<int64_t>(size)},
1284                              MemoryMappedFile::READ_WRITE_EXTEND);
1285  DCHECK(success);
1286  CreateWithAllocator(MakeUnique<FilePersistentMemoryAllocator>(
1287                          std::move(mapped_file), size, id, name, false),
1288                      stack_depth, 0);
1289}
1290#endif  // !defined(OS_NACL)
1291
1292// static
1293void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
1294                                                  uint64_t id,
1295                                                  StringPiece name,
1296                                                  int stack_depth,
1297                                                  int64_t process_id) {
1298  CreateWithAllocator(
1299      MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth,
1300      process_id);
1301}
1302
1303// static
1304void GlobalActivityTracker::SetForTesting(
1305    std::unique_ptr<GlobalActivityTracker> tracker) {
1306  CHECK(!subtle::NoBarrier_Load(&g_tracker_));
1307  subtle::Release_Store(&g_tracker_,
1308                        reinterpret_cast<uintptr_t>(tracker.release()));
1309}
1310
1311// static
1312std::unique_ptr<GlobalActivityTracker>
1313GlobalActivityTracker::ReleaseForTesting() {
1314  GlobalActivityTracker* tracker = Get();
1315  if (!tracker)
1316    return nullptr;
1317
1318  // Thread trackers assume that the global tracker is present for some
1319  // operations so ensure that there aren't any.
1320  tracker->ReleaseTrackerForCurrentThreadForTesting();
1321  DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
1322
1323  subtle::Release_Store(&g_tracker_, 0);
1324  return WrapUnique(tracker);
1325};
1326
1327ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
1328  DCHECK(!this_thread_tracker_.Get());
1329
1330  PersistentMemoryAllocator::Reference mem_reference;
1331
1332  {
1333    base::AutoLock autolock(thread_tracker_allocator_lock_);
1334    mem_reference = thread_tracker_allocator_.GetObjectReference();
1335  }
1336
1337  if (!mem_reference) {
1338    // Failure. This shouldn't happen. But be graceful if it does, probably
1339    // because the underlying allocator wasn't given enough memory to satisfy
1340    // to all possible requests.
1341    NOTREACHED();
1342    // Report the thread-count at which the allocator was full so that the
1343    // failure can be seen and underlying memory resized appropriately.
1344    UMA_HISTOGRAM_COUNTS_1000(
1345        "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
1346        thread_tracker_count_.load(std::memory_order_relaxed));
1347    // Return null, just as if tracking wasn't enabled.
1348    return nullptr;
1349  }
1350
1351  // Convert the memory block found above into an actual memory address.
1352  // Doing the conversion as a Header object enacts the 32/64-bit size
1353  // consistency checks which would not otherwise be done. Unfortunately,
1354  // some older compilers and MSVC don't have standard-conforming definitions
1355  // of std::atomic which cause it not to be plain-old-data. Don't check on
1356  // those platforms assuming that the checks on other platforms will be
1357  // sufficient.
1358  // TODO(bcwhite): Review this after major compiler releases.
1359  DCHECK(mem_reference);
1360  void* mem_base;
1361  mem_base =
1362      allocator_->GetAsObject<ThreadActivityTracker::Header>(mem_reference);
1363
1364  DCHECK(mem_base);
1365  DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
1366
1367  // Create a tracker with the acquired memory and set it as the tracker
1368  // for this particular thread in thread-local-storage.
1369  ManagedActivityTracker* tracker =
1370      new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
1371  DCHECK(tracker->IsValid());
1372  this_thread_tracker_.Set(tracker);
1373  int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
1374
1375  UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count",
1376                            old_count + 1, kMaxThreadCount);
1377  return tracker;
1378}
1379
1380void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
1381  ThreadActivityTracker* tracker =
1382      reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
1383  if (tracker) {
1384    this_thread_tracker_.Set(nullptr);
1385    delete tracker;
1386  }
1387}
1388
1389void GlobalActivityTracker::SetBackgroundTaskRunner(
1390    const scoped_refptr<TaskRunner>& runner) {
1391  AutoLock lock(global_tracker_lock_);
1392  background_task_runner_ = runner;
1393}
1394
1395void GlobalActivityTracker::SetProcessExitCallback(
1396    ProcessExitCallback callback) {
1397  AutoLock lock(global_tracker_lock_);
1398  process_exit_callback_ = callback;
1399}
1400
1401void GlobalActivityTracker::RecordProcessLaunch(
1402    ProcessId process_id,
1403    const FilePath::StringType& cmd) {
1404  const int64_t pid = process_id;
1405  DCHECK_NE(GetProcessId(), pid);
1406  DCHECK_NE(0, pid);
1407
1408  base::AutoLock lock(global_tracker_lock_);
1409  if (base::ContainsKey(known_processes_, pid)) {
1410    // TODO(bcwhite): Measure this in UMA.
1411    NOTREACHED() << "Process #" << process_id
1412                 << " was previously recorded as \"launched\""
1413                 << " with no corresponding exit.";
1414    known_processes_.erase(pid);
1415  }
1416
1417#if defined(OS_WIN)
1418  known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
1419#else
1420  known_processes_.insert(std::make_pair(pid, cmd));
1421#endif
1422}
1423
1424void GlobalActivityTracker::RecordProcessLaunch(
1425    ProcessId process_id,
1426    const FilePath::StringType& exe,
1427    const FilePath::StringType& args) {
1428  const int64_t pid = process_id;
1429  if (exe.find(FILE_PATH_LITERAL(" "))) {
1430    RecordProcessLaunch(pid, FilePath::StringType(FILE_PATH_LITERAL("\"")) +
1431                                 exe + FILE_PATH_LITERAL("\" ") + args);
1432  } else {
1433    RecordProcessLaunch(pid, exe + FILE_PATH_LITERAL(' ') + args);
1434  }
1435}
1436
1437void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
1438                                              int exit_code) {
1439  const int64_t pid = process_id;
1440  DCHECK_NE(GetProcessId(), pid);
1441  DCHECK_NE(0, pid);
1442
1443  scoped_refptr<TaskRunner> task_runner;
1444  std::string command_line;
1445  {
1446    base::AutoLock lock(global_tracker_lock_);
1447    task_runner = background_task_runner_;
1448    auto found = known_processes_.find(pid);
1449    if (found != known_processes_.end()) {
1450      command_line = std::move(found->second);
1451      known_processes_.erase(found);
1452    } else {
1453      DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
1454    }
1455  }
1456
1457  // Use the current time to differentiate the process that just exited
1458  // from any that might be created in the future with the same ID.
1459  int64_t now_stamp = Time::Now().ToInternalValue();
1460
1461  // The persistent allocator is thread-safe so run the iteration and
1462  // adjustments on a worker thread if one was provided.
1463  if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
1464    task_runner->PostTask(
1465        FROM_HERE,
1466        Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this), pid,
1467             now_stamp, exit_code, Passed(&command_line)));
1468    return;
1469  }
1470
1471  CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
1472}
1473
1474void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
1475  process_data().SetInt(kProcessPhaseDataKey, phase);
1476}
1477
1478void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
1479                                                int64_t exit_stamp,
1480                                                int exit_code,
1481                                                std::string&& command_line) {
1482  // The process may not have exited cleanly so its necessary to go through
1483  // all the data structures it may have allocated in the persistent memory
1484  // segment and mark them as "released". This will allow them to be reused
1485  // later on.
1486
1487  PersistentMemoryAllocator::Iterator iter(allocator_.get());
1488  PersistentMemoryAllocator::Reference ref;
1489
1490  ProcessExitCallback process_exit_callback;
1491  {
1492    AutoLock lock(global_tracker_lock_);
1493    process_exit_callback = process_exit_callback_;
1494  }
1495  if (process_exit_callback) {
1496    // Find the processes user-data record so the process phase can be passed
1497    // to the callback.
1498    ActivityUserData::Snapshot process_data_snapshot;
1499    while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
1500      const void* memory = allocator_->GetAsArray<char>(
1501          ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
1502      int64_t found_id;
1503      int64_t create_stamp;
1504      if (ActivityUserData::GetOwningProcessId(memory, &found_id,
1505                                               &create_stamp)) {
1506        if (found_id == process_id && create_stamp < exit_stamp) {
1507          const ActivityUserData process_data(const_cast<void*>(memory),
1508                                              allocator_->GetAllocSize(ref));
1509          process_data.CreateSnapshot(&process_data_snapshot);
1510          break;  // No need to look for any others.
1511        }
1512      }
1513    }
1514    iter.Reset();  // So it starts anew when used below.
1515
1516    // Record the process's phase at exit so callback doesn't need to go
1517    // searching based on a private key value.
1518    ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
1519    auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
1520    if (phase != process_data_snapshot.end())
1521      exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
1522
1523    // Perform the callback.
1524    process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
1525                              std::move(command_line),
1526                              std::move(process_data_snapshot));
1527  }
1528
1529  // Find all allocations associated with the exited process and free them.
1530  uint32_t type;
1531  while ((ref = iter.GetNext(&type)) != 0) {
1532    switch (type) {
1533      case kTypeIdActivityTracker:
1534      case kTypeIdUserDataRecord:
1535      case kTypeIdProcessDataRecord:
1536      case ModuleInfoRecord::kPersistentTypeId: {
1537        const void* memory = allocator_->GetAsArray<char>(
1538            ref, type, PersistentMemoryAllocator::kSizeAny);
1539        int64_t found_id;
1540        int64_t create_stamp;
1541
1542        // By convention, the OwningProcess structure is always the first
1543        // field of the structure so there's no need to handle all the
1544        // cases separately.
1545        if (OwningProcess::GetOwningProcessId(memory, &found_id,
1546                                              &create_stamp)) {
1547          // Only change the type to be "free" if the process ID matches and
1548          // the creation time is before the exit time (so PID re-use doesn't
1549          // cause the erasure of something that is in-use). Memory is cleared
1550          // here, rather than when it's needed, so as to limit the impact at
1551          // that critical time.
1552          if (found_id == process_id && create_stamp < exit_stamp)
1553            allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
1554        }
1555      } break;
1556    }
1557  }
1558}
1559
1560void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
1561  // Allocate at least one extra byte so the string is NUL terminated. All
1562  // memory returned by the allocator is guaranteed to be zeroed.
1563  PersistentMemoryAllocator::Reference ref =
1564      allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
1565  char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
1566                                              message.size() + 1);
1567  if (memory) {
1568    memcpy(memory, message.data(), message.size());
1569    allocator_->MakeIterable(ref);
1570  }
1571}
1572
1573void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
1574  AutoLock lock(modules_lock_);
1575  auto found = modules_.find(info.file);
1576  if (found != modules_.end()) {
1577    ModuleInfoRecord* record = found->second;
1578    DCHECK(record);
1579
1580    // Update the basic state of module information that has been already
1581    // recorded. It is assumed that the string information (identifier,
1582    // version, etc.) remain unchanged which means that there's no need
1583    // to create a new record to accommodate a possibly longer length.
1584    record->UpdateFrom(info);
1585    return;
1586  }
1587
1588  size_t required_size = ModuleInfoRecord::EncodedSize(info);
1589  ModuleInfoRecord* record = allocator_->New<ModuleInfoRecord>(required_size);
1590  if (!record)
1591    return;
1592
1593  bool success = record->EncodeFrom(info, required_size);
1594  DCHECK(success);
1595  allocator_->MakeIterable(record);
1596  modules_.insert(std::make_pair(info.file, record));
1597}
1598
1599void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
1600                                             StringPiece group_name) {
1601  const std::string key = std::string("FieldTrial.") + trial_name;
1602  global_data_.SetString(key, group_name);
1603}
1604
1605GlobalActivityTracker::GlobalActivityTracker(
1606    std::unique_ptr<PersistentMemoryAllocator> allocator,
1607    int stack_depth,
1608    int64_t process_id)
1609    : allocator_(std::move(allocator)),
1610      stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
1611      process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
1612      this_thread_tracker_(&OnTLSDestroy),
1613      thread_tracker_count_(0),
1614      thread_tracker_allocator_(allocator_.get(),
1615                                kTypeIdActivityTracker,
1616                                kTypeIdActivityTrackerFree,
1617                                stack_memory_size_,
1618                                kCachedThreadMemories,
1619                                /*make_iterable=*/true),
1620      user_data_allocator_(allocator_.get(),
1621                           kTypeIdUserDataRecord,
1622                           kTypeIdUserDataRecordFree,
1623                           kUserDataSize,
1624                           kCachedUserDataMemories,
1625                           /*make_iterable=*/true),
1626      process_data_(allocator_->GetAsArray<char>(
1627                        AllocateFrom(allocator_.get(),
1628                                     kTypeIdProcessDataRecordFree,
1629                                     kProcessDataSize,
1630                                     kTypeIdProcessDataRecord),
1631                        kTypeIdProcessDataRecord,
1632                        kProcessDataSize),
1633                    kProcessDataSize,
1634                    process_id_),
1635      global_data_(
1636          allocator_->GetAsArray<char>(
1637              allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
1638              kTypeIdGlobalDataRecord,
1639              kGlobalDataSize),
1640          kGlobalDataSize,
1641          process_id_) {
1642  DCHECK_NE(0, process_id_);
1643
1644  // Ensure that there is no other global object and then make this one such.
1645  DCHECK(!g_tracker_);
1646  subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
1647
1648  // The data records must be iterable in order to be found by an analyzer.
1649  allocator_->MakeIterable(allocator_->GetAsReference(
1650      process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
1651  allocator_->MakeIterable(allocator_->GetAsReference(
1652      global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
1653
1654  // Note that this process has launched.
1655  SetProcessPhase(PROCESS_LAUNCHED);
1656
1657  // Fetch and record all activated field trials.
1658  FieldTrial::ActiveGroups active_groups;
1659  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
1660  for (auto& group : active_groups)
1661    RecordFieldTrial(group.trial_name, group.group_name);
1662}
1663
1664GlobalActivityTracker::~GlobalActivityTracker() {
1665  DCHECK(Get() == nullptr || Get() == this);
1666  DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
1667  subtle::Release_Store(&g_tracker_, 0);
1668}
1669
1670void GlobalActivityTracker::ReturnTrackerMemory(
1671    ManagedActivityTracker* tracker) {
1672  PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
1673  void* mem_base = tracker->mem_base_;
1674  DCHECK(mem_reference);
1675  DCHECK(mem_base);
1676
1677  // Remove the destructed tracker from the set of known ones.
1678  DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
1679  thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
1680
1681  // Release this memory for re-use at a later time.
1682  base::AutoLock autolock(thread_tracker_allocator_lock_);
1683  thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
1684}
1685
1686void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
1687                                                const void* origin,
1688                                                uint32_t code) {
1689  // Get an existing tracker for this thread. It's not possible to create
1690  // one at this point because such would involve memory allocations and
1691  // other potentially complex operations that can cause failures if done
1692  // within an exception handler. In most cases various operations will
1693  // have already created the tracker so this shouldn't generally be a
1694  // problem.
1695  ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
1696  if (!tracker)
1697    return;
1698
1699  tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
1700                                   ActivityData::ForException(code));
1701}
1702
1703// static
1704void GlobalActivityTracker::OnTLSDestroy(void* value) {
1705  delete reinterpret_cast<ManagedActivityTracker*>(value);
1706}
1707
1708ScopedActivity::ScopedActivity(const void* program_counter,
1709                               uint8_t action,
1710                               uint32_t id,
1711                               int32_t info)
1712    : GlobalActivityTracker::ScopedThreadActivity(
1713          program_counter,
1714          nullptr,
1715          static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
1716          ActivityData::ForGeneric(id, info),
1717          /*lock_allowed=*/true),
1718      id_(id) {
1719  // The action must not affect the category bits of the activity type.
1720  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
1721}
1722
1723void ScopedActivity::ChangeAction(uint8_t action) {
1724  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
1725  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
1726                    kNullActivityData);
1727}
1728
1729void ScopedActivity::ChangeInfo(int32_t info) {
1730  ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
1731}
1732
1733void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
1734  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
1735  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
1736                    ActivityData::ForGeneric(id_, info));
1737}
1738
1739ScopedTaskRunActivity::ScopedTaskRunActivity(
1740    const void* program_counter,
1741    const base::PendingTask& task)
1742    : GlobalActivityTracker::ScopedThreadActivity(
1743          program_counter,
1744          task.posted_from.program_counter(),
1745          Activity::ACT_TASK_RUN,
1746          ActivityData::ForTask(task.sequence_num),
1747          /*lock_allowed=*/true) {}
1748
1749ScopedLockAcquireActivity::ScopedLockAcquireActivity(
1750    const void* program_counter,
1751    const base::internal::LockImpl* lock)
1752    : GlobalActivityTracker::ScopedThreadActivity(
1753          program_counter,
1754          nullptr,
1755          Activity::ACT_LOCK_ACQUIRE,
1756          ActivityData::ForLock(lock),
1757          /*lock_allowed=*/false) {}
1758
1759ScopedEventWaitActivity::ScopedEventWaitActivity(
1760    const void* program_counter,
1761    const base::WaitableEvent* event)
1762    : GlobalActivityTracker::ScopedThreadActivity(
1763          program_counter,
1764          nullptr,
1765          Activity::ACT_EVENT_WAIT,
1766          ActivityData::ForEvent(event),
1767          /*lock_allowed=*/true) {}
1768
1769ScopedThreadJoinActivity::ScopedThreadJoinActivity(
1770    const void* program_counter,
1771    const base::PlatformThreadHandle* thread)
1772    : GlobalActivityTracker::ScopedThreadActivity(
1773          program_counter,
1774          nullptr,
1775          Activity::ACT_THREAD_JOIN,
1776          ActivityData::ForThread(*thread),
1777          /*lock_allowed=*/true) {}
1778
1779#if !defined(OS_NACL) && !defined(OS_IOS)
1780ScopedProcessWaitActivity::ScopedProcessWaitActivity(
1781    const void* program_counter,
1782    const base::Process* process)
1783    : GlobalActivityTracker::ScopedThreadActivity(
1784          program_counter,
1785          nullptr,
1786          Activity::ACT_PROCESS_WAIT,
1787          ActivityData::ForProcess(process->Pid()),
1788          /*lock_allowed=*/true) {}
1789#endif
1790
1791}  // namespace debug
1792}  // namespace base
1793