1/*
2 * Copyright 2013 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkOnce_DEFINED
9#define SkOnce_DEFINED
10
11// SkOnce.h defines SK_DECLARE_STATIC_ONCE and SkOnce(), which you can use
12// together to create a threadsafe way to call a function just once.  This
13// is particularly useful for lazy singleton initialization. E.g.
14//
15// static void set_up_my_singleton(Singleton** singleton) {
16//     *singleton = new Singleton(...);
17// }
18// ...
19// const Singleton& GetSingleton() {
20//     static Singleton* singleton = NULL;
21//     SK_DECLARE_STATIC_ONCE(once);
22//     SkOnce(&once, set_up_my_singleton, &singleton);
23//     SkASSERT(NULL != singleton);
24//     return *singleton;
25// }
26//
27// OnceTest.cpp also should serve as a few other simple examples.
28
29#include "SkThread.h"
30#include "SkTypes.h"
31
32#ifdef SK_USE_POSIX_THREADS
33#  define SK_ONCE_INIT { false, { PTHREAD_MUTEX_INITIALIZER } }
34#else
35#  define SK_ONCE_INIT { false, SkBaseMutex() }
36#endif
37
38#define SK_DECLARE_STATIC_ONCE(name) static SkOnceFlag name = SK_ONCE_INIT
39
40struct SkOnceFlag;  // If manually created, initialize with SkOnceFlag once = SK_ONCE_INIT
41
42template <typename Func, typename Arg>
43inline void SkOnce(SkOnceFlag* once, Func f, Arg arg);
44
45//  ----------------------  Implementation details below here. -----------------------------
46
47struct SkOnceFlag {
48    bool done;
49    SkBaseMutex mutex;
50};
51
52// TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands.
53
54#ifdef SK_BUILD_FOR_WIN
55#include <intrin.h>
56inline static void compiler_barrier() {
57    _ReadWriteBarrier();
58}
59#else
60inline static void compiler_barrier() {
61    asm volatile("" : : : "memory");
62}
63#endif
64
65inline static void full_barrier_on_arm() {
66#ifdef SK_CPU_ARM
67#if SK_ARM_ARCH >= 7
68    asm volatile("dmb" : : : "memory");
69#else
70    asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
71#endif
72#endif
73}
74
75// On every platform, we issue a compiler barrier to prevent it from reordering
76// code.  That's enough for platforms like x86 where release and acquire
77// barriers are no-ops.  On other platforms we may need to be more careful;
78// ARM, in particular, needs real code for both acquire and release.  We use a
79// full barrier, which acts as both, because that the finest precision ARM
80// provides.
81
82inline static void release_barrier() {
83    compiler_barrier();
84    full_barrier_on_arm();
85}
86
87inline static void acquire_barrier() {
88    compiler_barrier();
89    full_barrier_on_arm();
90}
91
92// We've pulled a pretty standard double-checked locking implementation apart
93// into its main fast path and a slow path that's called when we suspect the
94// one-time code hasn't run yet.
95
96// This is the guts of the code, called when we suspect the one-time code hasn't been run yet.
97// This should be rarely called, so we separate it from SkOnce and don't mark it as inline.
98// (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.)
99template <typename Func, typename Arg>
100static void sk_once_slow(SkOnceFlag* once, Func f, Arg arg) {
101    const SkAutoMutexAcquire lock(once->mutex);
102    if (!once->done) {
103        f(arg);
104        // Also known as a store-store/load-store barrier, this makes sure that the writes
105        // done before here---in particular, those done by calling f(arg)---are observable
106        // before the writes after the line, *done = true.
107        //
108        // In version control terms this is like saying, "check in the work up
109        // to and including f(arg), then check in *done=true as a subsequent change".
110        //
111        // We'll use this in the fast path to make sure f(arg)'s effects are
112        // observable whenever we observe *done == true.
113        release_barrier();
114        once->done = true;
115    }
116}
117
118// We nabbed this code from the dynamic_annotations library, and in their honor
119// we check the same define.  If you find yourself wanting more than just
120// ANNOTATE_BENIGN_RACE, it might make sense to pull that in as a dependency
121// rather than continue to reproduce it here.
122
123#if DYNAMIC_ANNOTATIONS_ENABLED
124// TSAN provides this hook to supress a known-safe apparent race.
125extern "C" {
126void AnnotateBenignRace(const char* file, int line, const volatile void* mem, const char* desc);
127}
128#define ANNOTATE_BENIGN_RACE(mem, desc) AnnotateBenignRace(__FILE__, __LINE__, mem, desc)
129#else
130#define ANNOTATE_BENIGN_RACE(mem, desc)
131#endif
132
133// This is our fast path, called all the time.  We do really want it to be inlined.
134template <typename Func, typename Arg>
135inline void SkOnce(SkOnceFlag* once, Func f, Arg arg) {
136    ANNOTATE_BENIGN_RACE(&(once->done), "Don't worry TSAN, we're sure this is safe.");
137    if (!once->done) {
138        sk_once_slow(once, f, arg);
139    }
140    // Also known as a load-load/load-store barrier, this acquire barrier makes
141    // sure that anything we read from memory---in particular, memory written by
142    // calling f(arg)---is at least as current as the value we read from once->done.
143    //
144    // In version control terms, this is a lot like saying "sync up to the
145    // commit where we wrote once->done = true".
146    //
147    // The release barrier in sk_once_slow guaranteed that once->done = true
148    // happens after f(arg), so by syncing to once->done = true here we're
149    // forcing ourselves to also wait until the effects of f(arg) are readble.
150    acquire_barrier();
151}
152
153#undef ANNOTATE_BENIGN_RACE
154
155#endif  // SkOnce_DEFINED
156