SkOnce.h revision f672cead70404080a991ebfb86c38316a4589b23
1/*
2 * Copyright 2013 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkOnce_DEFINED
9#define SkOnce_DEFINED
10
11// SkOnce.h defines SK_DECLARE_STATIC_ONCE and SkOnce(), which you can use
12// together to create a threadsafe way to call a function just once.  This
13// is particularly useful for lazy singleton initialization. E.g.
14//
15// static void set_up_my_singleton(Singleton** singleton) {
16//     *singleton = new Singleton(...);
17// }
18// ...
19// const Singleton& GetSingleton() {
20//     static Singleton* singleton = NULL;
21//     SK_DECLARE_STATIC_ONCE(once);
22//     SkOnce(&once, set_up_my_singleton, &singleton);
23//     SkASSERT(NULL != singleton);
24//     return *singleton;
25// }
26//
27// OnceTest.cpp also should serve as a few other simple examples.
28//
29// You may optionally pass SkOnce a second function to be called at exit for cleanup.
30
31#include "SkDynamicAnnotations.h"
32#include "SkThread.h"
33#include "SkTypes.h"
34
35#define SK_ONCE_INIT { false, { 0, SkDEBUGCODE(0) } }
36#define SK_DECLARE_STATIC_ONCE(name) static SkOnceFlag name = SK_ONCE_INIT
37
38struct SkOnceFlag;  // If manually created, initialize with SkOnceFlag once = SK_ONCE_INIT
39
40template <typename Func, typename Arg>
41inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)() = NULL);
42
43// If you've already got a lock and a flag to use, this variant lets you avoid an extra SkOnceFlag.
44template <typename Lock, typename Func, typename Arg>
45inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)() = NULL);
46
47//  ----------------------  Implementation details below here. -----------------------------
48
49// This is POD and must be zero-initialized.
50struct SkSpinlock {
51    void acquire() {
52        SkASSERT(shouldBeZero == 0);
53        // No memory barrier needed, but sk_atomic_cas gives us at least release anyway.
54        while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) {
55            // spin
56        }
57    }
58
59    void release() {
60        SkASSERT(shouldBeZero == 0);
61        // This requires a release memory barrier before storing, which sk_atomic_cas guarantees.
62        SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0));
63    }
64
65    int32_t thisIsPrivate;
66    SkDEBUGCODE(int32_t shouldBeZero;)
67};
68
69struct SkOnceFlag {
70    bool done;
71    SkSpinlock lock;
72};
73
74// TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands.
75
76#ifdef SK_BUILD_FOR_WIN
77#  include <intrin.h>
78inline static void compiler_barrier() {
79    _ReadWriteBarrier();
80}
81#else
82inline static void compiler_barrier() {
83    asm volatile("" : : : "memory");
84}
85#endif
86
87inline static void full_barrier_on_arm() {
88#if (defined(SK_CPU_ARM) && SK_ARM_ARCH >= 7) || defined(SK_CPU_ARM64)
89    asm volatile("dmb ish" : : : "memory");
90#elif defined(SK_CPU_ARM)
91    asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
92#endif
93}
94
95// On every platform, we issue a compiler barrier to prevent it from reordering
96// code.  That's enough for platforms like x86 where release and acquire
97// barriers are no-ops.  On other platforms we may need to be more careful;
98// ARM, in particular, needs real code for both acquire and release.  We use a
99// full barrier, which acts as both, because that the finest precision ARM
100// provides.
101
102inline static void release_barrier() {
103    compiler_barrier();
104    full_barrier_on_arm();
105}
106
107inline static void acquire_barrier() {
108    compiler_barrier();
109    full_barrier_on_arm();
110}
111
112// Works with SkSpinlock or SkMutex.
113template <typename Lock>
114class SkAutoLockAcquire {
115public:
116    explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); }
117    ~SkAutoLockAcquire() { fLock->release(); }
118private:
119    Lock* fLock;
120};
121
122// We've pulled a pretty standard double-checked locking implementation apart
123// into its main fast path and a slow path that's called when we suspect the
124// one-time code hasn't run yet.
125
126// This is the guts of the code, called when we suspect the one-time code hasn't been run yet.
127// This should be rarely called, so we separate it from SkOnce and don't mark it as inline.
128// (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.)
129template <typename Lock, typename Func, typename Arg>
130static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit)()) {
131    const SkAutoLockAcquire<Lock> locked(lock);
132    if (!*done) {
133        f(arg);
134        if (atExit != NULL) {
135            atexit(atExit);
136        }
137        // Also known as a store-store/load-store barrier, this makes sure that the writes
138        // done before here---in particular, those done by calling f(arg)---are observable
139        // before the writes after the line, *done = true.
140        //
141        // In version control terms this is like saying, "check in the work up
142        // to and including f(arg), then check in *done=true as a subsequent change".
143        //
144        // We'll use this in the fast path to make sure f(arg)'s effects are
145        // observable whenever we observe *done == true.
146        release_barrier();
147        *done = true;
148    }
149}
150
151// This is our fast path, called all the time.  We do really want it to be inlined.
152template <typename Lock, typename Func, typename Arg>
153inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) {
154    if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) {
155        sk_once_slow(done, lock, f, arg, atExit);
156    }
157    // Also known as a load-load/load-store barrier, this acquire barrier makes
158    // sure that anything we read from memory---in particular, memory written by
159    // calling f(arg)---is at least as current as the value we read from once->done.
160    //
161    // In version control terms, this is a lot like saying "sync up to the
162    // commit where we wrote once->done = true".
163    //
164    // The release barrier in sk_once_slow guaranteed that once->done = true
165    // happens after f(arg), so by syncing to once->done = true here we're
166    // forcing ourselves to also wait until the effects of f(arg) are readble.
167    acquire_barrier();
168}
169
170template <typename Func, typename Arg>
171inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) {
172    return SkOnce(&once->done, &once->lock, f, arg, atExit);
173}
174
175#undef SK_ANNOTATE_BENIGN_RACE
176
177#endif  // SkOnce_DEFINED
178