1// Copyright (c) 2014 Google Inc.
2//
3// Use of this source code is governed by a BSD-style license that can be
4// found in the LICENSE file.
5
6// This header file defines implementation details of how the trace macros in
7// SkTraceEventCommon.h collect and store trace events. Anything not
8// implementation-specific should go in SkTraceEventCommon.h instead of here.
9
10#ifndef SkTraceEvent_DEFINED
11#define SkTraceEvent_DEFINED
12
13#include "SkAtomics.h"
14#include "SkEventTracer.h"
15#include "SkTraceEventCommon.h"
16
17////////////////////////////////////////////////////////////////////////////////
18// Implementation specific tracing API definitions.
19
20// By default, const char* argument values are assumed to have long-lived scope
21// and will not be copied. Use this macro to force a const char* to be copied.
22#define TRACE_STR_COPY(str) \
23    skia::tracing_internals::TraceStringWithCopy(str)
24
25// By default, uint64 ID argument values are not mangled with the Process ID in
26// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
27#define TRACE_ID_MANGLE(id) \
28    skia::tracing_internals::TraceID::ForceMangle(id)
29
30// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
31// macros. Use this macro to prevent Process ID mangling.
32#define TRACE_ID_DONT_MANGLE(id) \
33    skia::tracing_internals::TraceID::DontMangle(id)
34
35// Sets the current sample state to the given category and name (both must be
36// constant strings). These states are intended for a sampling profiler.
37// Implementation note: we store category and name together because we don't
38// want the inconsistency/expense of storing two pointers.
39// |thread_bucket| is [0..2] and is used to statically isolate samples in one
40// thread from others.
41#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
42    bucket_number, category, name)                 \
43        skia::tracing_internals::                     \
44        TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
45
46// Returns a current sampling state of the given bucket.
47#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
48    skia::tracing_internals::TraceEventSamplingStateScope<bucket_number>::Current()
49
50// Creates a scope of a sampling state of the given bucket.
51//
52// {  // The sampling state is set within this scope.
53//    TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
54//    ...;
55// }
56#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(                   \
57    bucket_number, category, name)                                      \
58    skia::tracing_internals::TraceEventSamplingStateScope<bucket_number>   \
59        traceEventSamplingScope(category "\0" name);
60
61
62#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
63    *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
64        (SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \
65         SkEventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags)
66
67// Get a pointer to the enabled state of the given trace category. Only
68// long-lived literal strings should be given as the category group. The
69// returned pointer can be held permanently in a local static for example. If
70// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
71// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
72// between the load of the tracing state and the call to
73// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
74// for best performance when tracing is disabled.
75// const uint8_t*
76//     TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
77#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
78    SkEventTracer::GetInstance()->getCategoryGroupEnabled
79
80// Get the number of times traces have been recorded. This is used to implement
81// the TRACE_EVENT_IS_NEW_TRACE facility.
82// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
83#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
84    SkEventTracer::GetInstance()->getNumTracesRecorded
85
86// Add a trace event to the platform tracing system.
87// SkEventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT(
88//                    char phase,
89//                    const uint8_t* category_group_enabled,
90//                    const char* name,
91//                    uint64_t id,
92//                    int num_args,
93//                    const char** arg_names,
94//                    const uint8_t* arg_types,
95//                    const uint64_t* arg_values,
96//                    unsigned char flags)
97#define TRACE_EVENT_API_ADD_TRACE_EVENT \
98    SkEventTracer::GetInstance()->addTraceEvent
99
100// Set the duration field of a COMPLETE trace event.
101// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
102//     const uint8_t* category_group_enabled,
103//     const char* name,
104//     SkEventTracer::Handle id)
105#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
106    SkEventTracer::GetInstance()->updateTraceEventDuration
107
108#define TRACE_EVENT_API_ATOMIC_WORD intptr_t
109#define TRACE_EVENT_API_ATOMIC_LOAD(var) sk_atomic_load(&var, sk_memory_order_relaxed)
110#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
111    sk_atomic_store(&var, value, sk_memory_order_relaxed)
112
113// Defines visibility for classes in trace_event.h
114#define TRACE_EVENT_API_CLASS_EXPORT SK_API
115
116// The thread buckets for the sampling profiler.
117TRACE_EVENT_API_CLASS_EXPORT extern \
118    TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
119
120#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket)                           \
121    g_trace_state[thread_bucket]
122
123////////////////////////////////////////////////////////////////////////////////
124
125// Implementation detail: trace event macros create temporary variables
126// to keep instrumentation overhead low. These macros give each temporary
127// variable a unique name based on the line number to prevent name collisions.
128#define INTERNAL_TRACE_EVENT_UID3(a,b) \
129    trace_event_unique_##a##b
130#define INTERNAL_TRACE_EVENT_UID2(a,b) \
131    INTERNAL_TRACE_EVENT_UID3(a,b)
132#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
133    INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
134
135// Implementation detail: internal macro to create static category.
136// No barriers are needed, because this code is designed to operate safely
137// even when the unsigned char* points to garbage data (which may be the case
138// on processors without cache coherency).
139#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
140    category_group, atomic, category_group_enabled) \
141    category_group_enabled = \
142        reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD( \
143            atomic)); \
144    if (!category_group_enabled) { \
145      category_group_enabled = \
146          TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
147      TRACE_EVENT_API_ATOMIC_STORE(atomic, \
148          reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
149              category_group_enabled)); \
150    }
151
152#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
153    static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
154    const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
155    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \
156        INTERNAL_TRACE_EVENT_UID(atomic), \
157        INTERNAL_TRACE_EVENT_UID(category_group_enabled));
158
159// Implementation detail: internal macro to create static category and add
160// event if the category is enabled.
161#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
162    do { \
163      INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
164      if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
165        skia::tracing_internals::AddTraceEvent( \
166            phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
167            skia::tracing_internals::kNoEventId, flags, ##__VA_ARGS__); \
168      } \
169    } while (0)
170
171// Implementation detail: internal macro to create static category and add begin
172// event if the category is enabled. Also adds the end event when the scope
173// ends.
174#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
175    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
176    skia::tracing_internals::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
177    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
178      SkEventTracer::Handle h = skia::tracing_internals::AddTraceEvent( \
179          TRACE_EVENT_PHASE_COMPLETE, \
180          INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
181          name, skia::tracing_internals::kNoEventId, \
182          TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
183      INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
184          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
185    }
186
187// Implementation detail: internal macro to create static category and add
188// event if the category is enabled.
189#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
190                                         flags, ...) \
191    do { \
192      INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
193      if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
194        unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
195        skia::tracing_internals::TraceID trace_event_trace_id( \
196            id, &trace_event_flags); \
197        skia::tracing_internals::AddTraceEvent( \
198            phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
199            name, trace_event_trace_id.data(), trace_event_flags, \
200            ##__VA_ARGS__); \
201      } \
202    } while (0)
203
204// Implementation detail: internal macro to create static category and add
205// event if the category is enabled.
206#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(phase, \
207        category_group, name, id, thread_id, flags, ...) \
208    do { \
209      INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
210      if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
211        unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
212        skia::tracing_internals::TraceID trace_event_trace_id( \
213            id, &trace_event_flags); \
214        skia::tracing_internals::AddTraceEventWithThreadIdAndTimestamp( \
215            phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
216            name, trace_event_trace_id.data(), \
217            thread_id, base::TimeTicks::FromInternalValue(timestamp), \
218            trace_event_flags, ##__VA_ARGS__); \
219      } \
220    } while (0)
221
222#define INTERNAL_TRACE_MEMORY(category, name)
223
224namespace skia {
225namespace tracing_internals {
226
227// Specify these values when the corresponding argument of AddTraceEvent is not
228// used.
229const int kZeroNumArgs = 0;
230const uint64_t kNoEventId = 0;
231
232// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
233// are by default mangled with the Process ID so that they are unlikely to
234// collide when the same pointer is used on different processes.
235class TraceID {
236 public:
237  class DontMangle {
238   public:
239    explicit DontMangle(const void* id)
240        : data_(static_cast<uint64_t>(
241              reinterpret_cast<uintptr_t>(id))) {}
242    explicit DontMangle(uint64_t id) : data_(id) {}
243    explicit DontMangle(unsigned int id) : data_(id) {}
244    explicit DontMangle(unsigned short id) : data_(id) {}
245    explicit DontMangle(unsigned char id) : data_(id) {}
246    explicit DontMangle(long long id)
247        : data_(static_cast<uint64_t>(id)) {}
248    explicit DontMangle(long id)
249        : data_(static_cast<uint64_t>(id)) {}
250    explicit DontMangle(int id)
251        : data_(static_cast<uint64_t>(id)) {}
252    explicit DontMangle(short id)
253        : data_(static_cast<uint64_t>(id)) {}
254    explicit DontMangle(signed char id)
255        : data_(static_cast<uint64_t>(id)) {}
256    uint64_t data() const { return data_; }
257   private:
258    uint64_t data_;
259  };
260
261  class ForceMangle {
262   public:
263    explicit ForceMangle(uint64_t id) : data_(id) {}
264    explicit ForceMangle(unsigned int id) : data_(id) {}
265    explicit ForceMangle(unsigned short id) : data_(id) {}
266    explicit ForceMangle(unsigned char id) : data_(id) {}
267    explicit ForceMangle(long long id)
268        : data_(static_cast<uint64_t>(id)) {}
269    explicit ForceMangle(long id)
270        : data_(static_cast<uint64_t>(id)) {}
271    explicit ForceMangle(int id)
272        : data_(static_cast<uint64_t>(id)) {}
273    explicit ForceMangle(short id)
274        : data_(static_cast<uint64_t>(id)) {}
275    explicit ForceMangle(signed char id)
276        : data_(static_cast<uint64_t>(id)) {}
277    uint64_t data() const { return data_; }
278   private:
279    uint64_t data_;
280  };
281
282  TraceID(const void* id, unsigned char* flags)
283      : data_(static_cast<uint64_t>(
284              reinterpret_cast<uintptr_t>(id))) {
285    *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
286  }
287  TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) {
288    *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
289  }
290  TraceID(DontMangle id, unsigned char* flags) : data_(id.data()) {
291  }
292  TraceID(uint64_t id, unsigned char* flags)
293      : data_(id) { (void)flags; }
294  TraceID(unsigned int id, unsigned char* flags)
295      : data_(id) { (void)flags; }
296  TraceID(unsigned short id, unsigned char* flags)
297      : data_(id) { (void)flags; }
298  TraceID(unsigned char id, unsigned char* flags)
299      : data_(id) { (void)flags; }
300  TraceID(long long id, unsigned char* flags)
301      : data_(static_cast<uint64_t>(id)) { (void)flags; }
302  TraceID(long id, unsigned char* flags)
303      : data_(static_cast<uint64_t>(id)) { (void)flags; }
304  TraceID(int id, unsigned char* flags)
305      : data_(static_cast<uint64_t>(id)) { (void)flags; }
306  TraceID(short id, unsigned char* flags)
307      : data_(static_cast<uint64_t>(id)) { (void)flags; }
308  TraceID(signed char id, unsigned char* flags)
309      : data_(static_cast<uint64_t>(id)) { (void)flags; }
310
311  uint64_t data() const { return data_; }
312
313 private:
314  uint64_t data_;
315};
316
317// Simple union to store various types as uint64_t.
318union TraceValueUnion {
319  bool as_bool;
320  uint64_t as_uint;
321  long long as_int;
322  double as_double;
323  const void* as_pointer;
324  const char* as_string;
325};
326
327// Simple container for const char* that should be copied instead of retained.
328class TraceStringWithCopy {
329 public:
330  explicit TraceStringWithCopy(const char* str) : str_(str) {}
331  operator const char* () const { return str_; }
332 private:
333  const char* str_;
334};
335
336// Define SetTraceValue for each allowed type. It stores the type and
337// value in the return arguments. This allows this API to avoid declaring any
338// structures so that it is portable to third_party libraries.
339#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
340                                         union_member, \
341                                         value_type_id) \
342    static inline void SetTraceValue( \
343        actual_type arg, \
344        unsigned char* type, \
345        uint64_t* value) { \
346      TraceValueUnion type_value; \
347      type_value.union_member = arg; \
348      *type = value_type_id; \
349      *value = type_value.as_uint; \
350    }
351// Simpler form for int types that can be safely casted.
352#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
353                                             value_type_id) \
354    static inline void SetTraceValue( \
355        actual_type arg, \
356        unsigned char* type, \
357        uint64_t* value) { \
358      *type = value_type_id; \
359      *value = static_cast<uint64_t>(arg); \
360    }
361
362INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
363INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
364INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
365INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
366INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
367INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
368INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
369INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
370INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
371INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
372INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
373INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
374                                 TRACE_VALUE_TYPE_POINTER)
375INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
376                                 TRACE_VALUE_TYPE_STRING)
377INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
378                                 TRACE_VALUE_TYPE_COPY_STRING)
379
380#undef INTERNAL_DECLARE_SET_TRACE_VALUE
381#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
382
383// These AddTraceEvent and AddTraceEvent template
384// functions are defined here instead of in the macro, because the arg_values
385// could be temporary objects, such as std::string. In order to store
386// pointers to the internal c_str and pass through to the tracing API,
387// the arg_values must live throughout these procedures.
388
389static inline SkEventTracer::Handle
390AddTraceEvent(
391    char phase,
392    const uint8_t* category_group_enabled,
393    const char* name,
394    uint64_t id,
395    unsigned char flags) {
396  return TRACE_EVENT_API_ADD_TRACE_EVENT(
397      phase, category_group_enabled, name, id,
398      kZeroNumArgs, nullptr, nullptr, nullptr, flags);
399}
400
401template<class ARG1_TYPE>
402static inline SkEventTracer::Handle
403AddTraceEvent(
404    char phase,
405    const uint8_t* category_group_enabled,
406    const char* name,
407    uint64_t id,
408    unsigned char flags,
409    const char* arg1_name,
410    const ARG1_TYPE& arg1_val) {
411  const int num_args = 1;
412  uint8_t arg_types[1];
413  uint64_t arg_values[1];
414  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
415  return TRACE_EVENT_API_ADD_TRACE_EVENT(
416      phase, category_group_enabled, name, id,
417      num_args, &arg1_name, arg_types, arg_values, flags);
418}
419
420template<class ARG1_TYPE, class ARG2_TYPE>
421static inline SkEventTracer::Handle
422AddTraceEvent(
423    char phase,
424    const uint8_t* category_group_enabled,
425    const char* name,
426    uint64_t id,
427    unsigned char flags,
428    const char* arg1_name,
429    const ARG1_TYPE& arg1_val,
430    const char* arg2_name,
431    const ARG2_TYPE& arg2_val) {
432  const int num_args = 2;
433  const char* arg_names[2] = { arg1_name, arg2_name };
434  unsigned char arg_types[2];
435  uint64_t arg_values[2];
436  SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
437  SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
438  return TRACE_EVENT_API_ADD_TRACE_EVENT(
439      phase, category_group_enabled, name, id,
440      num_args, arg_names, arg_types, arg_values, flags);
441}
442
443// Used by TRACE_EVENTx macros. Do not use directly.
444class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
445 public:
446  // Note: members of data_ intentionally left uninitialized. See Initialize.
447  ScopedTracer() : p_data_(nullptr) {}
448
449  ~ScopedTracer() {
450    if (p_data_ && *data_.category_group_enabled)
451      TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
452          data_.category_group_enabled, data_.name, data_.event_handle);
453  }
454
455  void Initialize(const uint8_t* category_group_enabled,
456                  const char* name,
457                  SkEventTracer::Handle event_handle) {
458    data_.category_group_enabled = category_group_enabled;
459    data_.name = name;
460    data_.event_handle = event_handle;
461    p_data_ = &data_;
462  }
463
464 private:
465  // This Data struct workaround is to avoid initializing all the members
466  // in Data during construction of this object, since this object is always
467  // constructed, even when tracing is disabled. If the members of Data were
468  // members of this class instead, compiler warnings occur about potential
469  // uninitialized accesses.
470  struct Data {
471    const uint8_t* category_group_enabled;
472    const char* name;
473    SkEventTracer::Handle event_handle;
474  };
475  Data* p_data_;
476  Data data_;
477};
478
479// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
480class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
481 public:
482  ScopedTraceBinaryEfficient(const char* category_group, const char* name);
483  ~ScopedTraceBinaryEfficient();
484
485 private:
486  const uint8_t* category_group_enabled_;
487  const char* name_;
488  SkEventTracer::Handle event_handle_;
489};
490
491// This macro generates less code then TRACE_EVENT0 but is also
492// slower to execute when tracing is off. It should generally only be
493// used with code that is seldom executed or conditionally executed
494// when debugging.
495// For now the category_group must be "gpu".
496#define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
497    skia::tracing_internals::ScopedTraceBinaryEfficient \
498        INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
499
500// TraceEventSamplingStateScope records the current sampling state
501// and sets a new sampling state. When the scope exists, it restores
502// the sampling state having recorded.
503template<size_t BucketNumber>
504class TraceEventSamplingStateScope {
505 public:
506  TraceEventSamplingStateScope(const char* category_and_name) {
507    previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
508    TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
509  }
510
511  ~TraceEventSamplingStateScope() {
512    TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
513  }
514
515  static inline const char* Current() {
516    return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
517      g_trace_state[BucketNumber]));
518  }
519
520  static inline void Set(const char* category_and_name) {
521    TRACE_EVENT_API_ATOMIC_STORE(
522      g_trace_state[BucketNumber],
523      reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
524        const_cast<char*>(category_and_name)));
525  }
526
527 private:
528  const char* previous_state_;
529};
530
531}  // namespace tracing_internals
532}  // namespace skia
533
534#endif
535