1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkSharedMutex.h"
9
10#include "SkAtomics.h"
11#include "SkTypes.h"
12#include "SkSemaphore.h"
13
14#if defined(THREAD_SANITIZER)
15
16/* Report that a lock has been created at address "lock". */
17#define ANNOTATE_RWLOCK_CREATE(lock) \
18    AnnotateRWLockCreate(__FILE__, __LINE__, lock)
19
20/* Report that the lock at address "lock" is about to be destroyed. */
21#define ANNOTATE_RWLOCK_DESTROY(lock) \
22    AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
23
24/* Report that the lock at address "lock" has been acquired.
25   is_w=1 for writer lock, is_w=0 for reader lock. */
26#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
27    AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
28
29/* Report that the lock at address "lock" is about to be released. */
30#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
31  AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
32
33#ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK
34# ifdef __GNUC__
35#  define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
36# else
37/* TODO(glider): for Windows support we may want to change this macro in order
38   to prepend __declspec(selectany) to the annotations' declarations. */
39#  error weak annotations are not supported for your compiler
40# endif
41#else
42# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
43#endif
44
45extern "C" {
46void AnnotateRWLockCreate(
47    const char *file, int line,
48    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
49void AnnotateRWLockDestroy(
50    const char *file, int line,
51    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
52void AnnotateRWLockAcquired(
53    const char *file, int line,
54    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
55void AnnotateRWLockReleased(
56    const char *file, int line,
57    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
58}
59
60#else
61
62#define ANNOTATE_RWLOCK_CREATE(lock)
63#define ANNOTATE_RWLOCK_DESTROY(lock)
64#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
65#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
66
67#endif
68
69#ifdef SK_DEBUG
70
71    #include "SkThreadID.h"
72    #include "SkTDArray.h"
73
74    class SkSharedMutex::ThreadIDSet {
75    public:
76        // Returns true if threadID is in the set.
77        bool find(SkThreadID threadID) const {
78            for (auto& t : fThreadIDs) {
79                if (t == threadID) return true;
80            }
81            return false;
82        }
83
84        // Returns true if did not already exist.
85        bool tryAdd(SkThreadID threadID) {
86            for (auto& t : fThreadIDs) {
87                if (t == threadID) return false;
88            }
89            fThreadIDs.append(1, &threadID);
90            return true;
91        }
92        // Returns true if already exists in Set.
93        bool tryRemove(SkThreadID threadID) {
94            for (int i = 0; i < fThreadIDs.count(); ++i) {
95                if (fThreadIDs[i] == threadID) {
96                    fThreadIDs.remove(i);
97                    return true;
98                }
99            }
100            return false;
101        }
102
103        void swap(ThreadIDSet& other) {
104            fThreadIDs.swap(other.fThreadIDs);
105        }
106
107        int count() const {
108            return fThreadIDs.count();
109        }
110
111    private:
112        SkTDArray<SkThreadID> fThreadIDs;
113    };
114
115    SkSharedMutex::SkSharedMutex()
116        : fCurrentShared(new ThreadIDSet)
117        , fWaitingExclusive(new ThreadIDSet)
118        , fWaitingShared(new ThreadIDSet){
119        ANNOTATE_RWLOCK_CREATE(this);
120    }
121
122    SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
123
124    void SkSharedMutex::acquire() {
125        SkThreadID threadID(SkGetThreadID());
126        int currentSharedCount;
127        int waitingExclusiveCount;
128        {
129            SkAutoMutexAcquire l(&fMu);
130
131            if (!fWaitingExclusive->tryAdd(threadID)) {
132                SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
133            }
134
135            currentSharedCount = fCurrentShared->count();
136            waitingExclusiveCount = fWaitingExclusive->count();
137        }
138
139        if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
140            fExclusiveQueue.wait();
141        }
142
143        ANNOTATE_RWLOCK_ACQUIRED(this, 1);
144    }
145
146    // Implementation Detail:
147    // The shared threads need two seperate queues to keep the threads that were added after the
148    // exclusive lock separate from the threads added before.
149    void SkSharedMutex::release() {
150        ANNOTATE_RWLOCK_RELEASED(this, 1);
151        SkThreadID threadID(SkGetThreadID());
152        int sharedWaitingCount;
153        int exclusiveWaitingCount;
154        int sharedQueueSelect;
155        {
156            SkAutoMutexAcquire l(&fMu);
157            SkASSERT(0 == fCurrentShared->count());
158            if (!fWaitingExclusive->tryRemove(threadID)) {
159                SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
160            }
161            exclusiveWaitingCount = fWaitingExclusive->count();
162            sharedWaitingCount = fWaitingShared->count();
163            fWaitingShared.swap(fCurrentShared);
164            sharedQueueSelect = fSharedQueueSelect;
165            if (sharedWaitingCount > 0) {
166                fSharedQueueSelect = 1 - fSharedQueueSelect;
167            }
168        }
169
170        if (sharedWaitingCount > 0) {
171            fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
172        } else if (exclusiveWaitingCount > 0) {
173            fExclusiveQueue.signal();
174        }
175    }
176
177    void SkSharedMutex::assertHeld() const {
178        SkThreadID threadID(SkGetThreadID());
179        SkAutoMutexAcquire l(&fMu);
180        SkASSERT(0 == fCurrentShared->count());
181        SkASSERT(fWaitingExclusive->find(threadID));
182    }
183
184    void SkSharedMutex::acquireShared() {
185        SkThreadID threadID(SkGetThreadID());
186        int exclusiveWaitingCount;
187        int sharedQueueSelect;
188        {
189            SkAutoMutexAcquire l(&fMu);
190            exclusiveWaitingCount = fWaitingExclusive->count();
191            if (exclusiveWaitingCount > 0) {
192                if (!fWaitingShared->tryAdd(threadID)) {
193                    SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
194                }
195            } else {
196                if (!fCurrentShared->tryAdd(threadID)) {
197                    SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
198                }
199            }
200            sharedQueueSelect = fSharedQueueSelect;
201        }
202
203        if (exclusiveWaitingCount > 0) {
204            fSharedQueue[sharedQueueSelect].wait();
205        }
206
207        ANNOTATE_RWLOCK_ACQUIRED(this, 0);
208    }
209
210    void SkSharedMutex::releaseShared() {
211        ANNOTATE_RWLOCK_RELEASED(this, 0);
212        SkThreadID threadID(SkGetThreadID());
213
214        int currentSharedCount;
215        int waitingExclusiveCount;
216        {
217            SkAutoMutexAcquire l(&fMu);
218            if (!fCurrentShared->tryRemove(threadID)) {
219                SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
220            }
221            currentSharedCount = fCurrentShared->count();
222            waitingExclusiveCount = fWaitingExclusive->count();
223        }
224
225        if (0 == currentSharedCount && waitingExclusiveCount > 0) {
226            fExclusiveQueue.signal();
227        }
228    }
229
230    void SkSharedMutex::assertHeldShared() const {
231        SkThreadID threadID(SkGetThreadID());
232        SkAutoMutexAcquire l(&fMu);
233        SkASSERT(fCurrentShared->find(threadID));
234    }
235
236#else
237
238    // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
239    // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
240    // the log of the count which is 1024.
241    //
242    // The three counts held in fQueueCounts are:
243    // * Shared - the number of shared lock holders currently running.
244    // * WaitingExclusive - the number of threads waiting for an exclusive lock.
245    // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
246    //   to finish.
247    static const int kLogThreadCount = 10;
248
249    enum {
250        kSharedOffset          = (0 * kLogThreadCount),
251        kWaitingExlusiveOffset = (1 * kLogThreadCount),
252        kWaitingSharedOffset   = (2 * kLogThreadCount),
253        kSharedMask            = ((1 << kLogThreadCount) - 1) << kSharedOffset,
254        kWaitingExclusiveMask  = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
255        kWaitingSharedMask     = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
256    };
257
258    SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
259    SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
260    void SkSharedMutex::acquire() {
261        // Increment the count of exclusive queue waiters.
262        int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
263                                                        sk_memory_order_acquire);
264
265        // If there are no other exclusive waiters and no shared threads are running then run
266        // else wait.
267        if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
268            fExclusiveQueue.wait();
269        }
270        ANNOTATE_RWLOCK_ACQUIRED(this, 1);
271    }
272
273    void SkSharedMutex::release() {
274        ANNOTATE_RWLOCK_RELEASED(this, 1);
275
276        int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
277        int32_t waitingShared;
278        int32_t newQueueCounts;
279        do {
280            newQueueCounts = oldQueueCounts;
281
282            // Decrement exclusive waiters.
283            newQueueCounts -= 1 << kWaitingExlusiveOffset;
284
285            // The number of threads waiting to acquire a shared lock.
286            waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
287
288            // If there are any move the counts of all the shared waiters to actual shared. They are
289            // going to run next.
290            if (waitingShared > 0) {
291
292                // Set waiting shared to zero.
293                newQueueCounts &= ~kWaitingSharedMask;
294
295                // Because this is the exclusive release, then there are zero readers. So, the bits
296                // for shared locks should be zero. Since those bits are zero, we can just |= in the
297                // waitingShared count instead of clearing with an &= and then |= the count.
298                newQueueCounts |= waitingShared << kSharedOffset;
299            }
300
301        } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
302                                                sk_memory_order_release, sk_memory_order_relaxed));
303
304        if (waitingShared > 0) {
305            // Run all the shared.
306            fSharedQueue.signal(waitingShared);
307        } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
308            // Run a single exclusive waiter.
309            fExclusiveQueue.signal();
310        }
311    }
312
313    void SkSharedMutex::acquireShared() {
314        int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
315        int32_t newQueueCounts;
316        do {
317            newQueueCounts = oldQueueCounts;
318            // If there are waiting exclusives then this shared lock waits else it runs.
319            if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
320                newQueueCounts += 1 << kWaitingSharedOffset;
321            } else {
322                newQueueCounts += 1 << kSharedOffset;
323            }
324        } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
325                                                sk_memory_order_acquire, sk_memory_order_relaxed));
326
327        // If there are waiting exclusives, then this shared waits until after it runs.
328        if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
329            fSharedQueue.wait();
330        }
331        ANNOTATE_RWLOCK_ACQUIRED(this, 0);
332
333    }
334
335    void SkSharedMutex::releaseShared() {
336        ANNOTATE_RWLOCK_RELEASED(this, 0);
337
338        // Decrement the shared count.
339        int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
340                                                        sk_memory_order_release);
341
342        // If shared count is going to zero (because the old count == 1) and there are exclusive
343        // waiters, then run a single exclusive waiter.
344        if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
345            && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
346            fExclusiveQueue.signal();
347        }
348    }
349
350#endif
351