SkPixelRef.cpp revision c50acf2321d7a934c80d754e9cbe936dfb8eb4cc
1/*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkBitmapCache.h"
9#include "SkMutex.h"
10#include "SkPixelRef.h"
11#include "SkTraceEvent.h"
12
13//#define SK_SUPPORT_LEGACY_UNBALANCED_PIXELREF_LOCKCOUNT
14//#define SK_TRACE_PIXELREF_LIFETIME
15
16#ifdef SK_BUILD_FOR_WIN32
17    // We don't have SK_BASE_MUTEX_INIT on Windows.
18
19    // must be a power-of-2. undef to just use 1 mutex
20    #define PIXELREF_MUTEX_RING_COUNT       32
21    static SkBaseMutex gPixelRefMutexRing[PIXELREF_MUTEX_RING_COUNT];
22
23#else
24    static SkBaseMutex gPixelRefMutexRing[] = {
25        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
26        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
27        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
28        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
29
30        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
31        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
32        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
33        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
34
35        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
36        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
37        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
38        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
39
40        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
41        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
42        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
43        SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT,
44    };
45    // must be a power-of-2. undef to just use 1 mutex
46    #define PIXELREF_MUTEX_RING_COUNT SK_ARRAY_COUNT(gPixelRefMutexRing)
47
48#endif
49
50static SkBaseMutex* get_default_mutex() {
51    static int32_t gPixelRefMutexRingIndex;
52
53    SkASSERT(SkIsPow2(PIXELREF_MUTEX_RING_COUNT));
54
55    // atomic_inc might be overkill here. It may be fine if once in a while
56    // we hit a race-condition and two subsequent calls get the same index...
57    int index = sk_atomic_inc(&gPixelRefMutexRingIndex);
58    return &gPixelRefMutexRing[index & (PIXELREF_MUTEX_RING_COUNT - 1)];
59}
60
61///////////////////////////////////////////////////////////////////////////////
62
63static uint32_t next_gen_id() {
64    static uint32_t gNextGenID = 0;
65    uint32_t genID;
66    // Loop in case our global wraps around, as we never want to return a 0.
67    do {
68        genID = sk_atomic_fetch_add(&gNextGenID, 2u) + 2;  // Never set the low bit.
69    } while (0 == genID);
70    return genID;
71}
72
73///////////////////////////////////////////////////////////////////////////////
74
75void SkPixelRef::setMutex(SkBaseMutex* mutex) {
76    if (NULL == mutex) {
77        mutex = get_default_mutex();
78    }
79    fMutex = mutex;
80}
81
82// just need a > 0 value, so pick a funny one to aid in debugging
83#define SKPIXELREF_PRELOCKED_LOCKCOUNT     123456789
84
85static SkImageInfo validate_info(const SkImageInfo& info) {
86    SkAlphaType newAlphaType = info.alphaType();
87    SkAssertResult(SkColorTypeValidateAlphaType(info.colorType(), info.alphaType(), &newAlphaType));
88    return info.makeAlphaType(newAlphaType);
89}
90
91#ifdef SK_TRACE_PIXELREF_LIFETIME
92    static int32_t gInstCounter;
93#endif
94
95SkPixelRef::SkPixelRef(const SkImageInfo& info)
96    : fInfo(validate_info(info))
97#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
98    , fStableID(next_gen_id())
99#endif
100
101{
102#ifdef SK_TRACE_PIXELREF_LIFETIME
103    SkDebugf(" pixelref %d\n", sk_atomic_inc(&gInstCounter));
104#endif
105    this->setMutex(NULL);
106    fRec.zero();
107    fLockCount = 0;
108    this->needsNewGenID();
109    fIsImmutable = false;
110    fPreLocked = false;
111    fAddedToCache.store(false);
112}
113
114
115SkPixelRef::SkPixelRef(const SkImageInfo& info, SkBaseMutex* mutex)
116    : fInfo(validate_info(info))
117#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
118    , fStableID(next_gen_id())
119#endif
120{
121#ifdef SK_TRACE_PIXELREF_LIFETIME
122    SkDebugf(" pixelref %d\n", sk_atomic_inc(&gInstCounter));
123#endif
124    this->setMutex(mutex);
125    fRec.zero();
126    fLockCount = 0;
127    this->needsNewGenID();
128    fIsImmutable = false;
129    fPreLocked = false;
130    fAddedToCache.store(false);
131}
132
133SkPixelRef::~SkPixelRef() {
134#ifndef SK_SUPPORT_LEGACY_UNBALANCED_PIXELREF_LOCKCOUNT
135    SkASSERT(SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount || 0 == fLockCount);
136#endif
137
138#ifdef SK_TRACE_PIXELREF_LIFETIME
139    SkDebugf("~pixelref %d\n", sk_atomic_dec(&gInstCounter) - 1);
140#endif
141    this->callGenIDChangeListeners();
142}
143
144void SkPixelRef::needsNewGenID() {
145    fTaggedGenID.store(0);
146    SkASSERT(!this->genIDIsUnique()); // This method isn't threadsafe, so the assert should be fine.
147}
148
149void SkPixelRef::cloneGenID(const SkPixelRef& that) {
150    // This is subtle.  We must call that.getGenerationID() to make sure its genID isn't 0.
151    uint32_t genID = that.getGenerationID();
152
153    // Neither ID is unique any more.
154    // (These & ~1u are actually redundant.  that.getGenerationID() just did it for us.)
155    this->fTaggedGenID.store(genID & ~1u);
156    that. fTaggedGenID.store(genID & ~1u);
157
158    // This method isn't threadsafe, so these asserts should be fine.
159    SkASSERT(!this->genIDIsUnique());
160    SkASSERT(!that. genIDIsUnique());
161}
162
163static void validate_pixels_ctable(const SkImageInfo& info, const void* pixels,
164                                   const SkColorTable* ctable) {
165    if (info.isEmpty()) {
166        return; // can't require pixels if the dimensions are empty
167    }
168    SkASSERT(pixels);
169    if (kIndex_8_SkColorType == info.colorType()) {
170        SkASSERT(ctable);
171    } else {
172        SkASSERT(NULL == ctable);
173    }
174}
175
176void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctable) {
177#ifndef SK_IGNORE_PIXELREF_SETPRELOCKED
178    validate_pixels_ctable(fInfo, pixels, ctable);
179    // only call me in your constructor, otherwise fLockCount tracking can get
180    // out of sync.
181    fRec.fPixels = pixels;
182    fRec.fColorTable = ctable;
183    fRec.fRowBytes = rowBytes;
184    fLockCount = SKPIXELREF_PRELOCKED_LOCKCOUNT;
185    fPreLocked = true;
186#endif
187}
188
189// Increments fLockCount only on success
190bool SkPixelRef::lockPixelsInsideMutex() {
191    fMutex->assertHeld();
192
193    if (1 == ++fLockCount) {
194        SkASSERT(fRec.isZero());
195        if (!this->onNewLockPixels(&fRec)) {
196            fRec.zero();
197            fLockCount -= 1;    // we return fLockCount unchanged if we fail.
198            return false;
199        }
200    }
201    validate_pixels_ctable(fInfo, fRec.fPixels, fRec.fColorTable);
202    return fRec.fPixels != NULL;
203}
204
205// For historical reasons, we always inc fLockCount, even if we return false.
206// It would be nice to change this (it seems), and only inc if we actually succeed...
207bool SkPixelRef::lockPixels() {
208    SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount);
209
210    if (!fPreLocked) {
211        TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex");
212        SkAutoMutexAcquire  ac(*fMutex);
213        TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex");
214        SkDEBUGCODE(int oldCount = fLockCount;)
215        bool success = this->lockPixelsInsideMutex();
216        // lockPixelsInsideMutex only increments the count if it succeeds.
217        SkASSERT(oldCount + (int)success == fLockCount);
218
219        if (!success) {
220            // For compatibility with SkBitmap calling lockPixels, we still want to increment
221            // fLockCount even if we failed. If we updated SkBitmap we could remove this oddity.
222            fLockCount += 1;
223            return false;
224        }
225    }
226    validate_pixels_ctable(fInfo, fRec.fPixels, fRec.fColorTable);
227    return fRec.fPixels != NULL;
228}
229
230bool SkPixelRef::lockPixels(LockRec* rec) {
231    if (this->lockPixels()) {
232        *rec = fRec;
233        return true;
234    }
235    return false;
236}
237
238void SkPixelRef::unlockPixels() {
239    SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount);
240
241    if (!fPreLocked) {
242        SkAutoMutexAcquire  ac(*fMutex);
243
244        SkASSERT(fLockCount > 0);
245        if (0 == --fLockCount) {
246            // don't call onUnlockPixels unless onLockPixels succeeded
247            if (fRec.fPixels) {
248                this->onUnlockPixels();
249                fRec.zero();
250            } else {
251                SkASSERT(fRec.isZero());
252            }
253        }
254    }
255}
256
257bool SkPixelRef::requestLock(const LockRequest& request, LockResult* result) {
258    SkASSERT(result);
259    if (request.fSize.isEmpty()) {
260        return false;
261    }
262    // until we support subsets, we have to check this...
263    if (request.fSize.width() != fInfo.width() || request.fSize.height() != fInfo.height()) {
264        return false;
265    }
266
267    if (fPreLocked) {
268        result->fUnlockProc = NULL;
269        result->fUnlockContext = NULL;
270        result->fCTable = fRec.fColorTable;
271        result->fPixels = fRec.fPixels;
272        result->fRowBytes = fRec.fRowBytes;
273        result->fSize.set(fInfo.width(), fInfo.height());
274    } else {
275        SkAutoMutexAcquire  ac(*fMutex);
276        if (!this->onRequestLock(request, result)) {
277            return false;
278        }
279    }
280    validate_pixels_ctable(fInfo, result->fPixels, result->fCTable);
281    return result->fPixels != NULL;
282}
283
284bool SkPixelRef::lockPixelsAreWritable() const {
285    return this->onLockPixelsAreWritable();
286}
287
288bool SkPixelRef::onLockPixelsAreWritable() const {
289    return true;
290}
291
292uint32_t SkPixelRef::getGenerationID() const {
293    uint32_t id = fTaggedGenID.load();
294    if (0 == id) {
295        uint32_t next = next_gen_id() | 1u;
296        if (fTaggedGenID.compare_exchange(&id, next)) {
297            id = next;  // There was no race or we won the race.  fTaggedGenID is next now.
298        } else {
299            // We lost a race to set fTaggedGenID. compare_exchange() filled id with the winner.
300        }
301        // We can't quite SkASSERT(this->genIDIsUnique()). It could be non-unique
302        // if we got here via the else path (pretty unlikely, but possible).
303    }
304    return id & ~1u;  // Mask off bottom unique bit.
305}
306
307void SkPixelRef::addGenIDChangeListener(GenIDChangeListener* listener) {
308    if (NULL == listener || !this->genIDIsUnique()) {
309        // No point in tracking this if we're not going to call it.
310        SkDELETE(listener);
311        return;
312    }
313    *fGenIDChangeListeners.append() = listener;
314}
315
316// we need to be called *before* the genID gets changed or zerod
317void SkPixelRef::callGenIDChangeListeners() {
318    // We don't invalidate ourselves if we think another SkPixelRef is sharing our genID.
319    if (this->genIDIsUnique()) {
320        for (int i = 0; i < fGenIDChangeListeners.count(); i++) {
321            fGenIDChangeListeners[i]->onChange();
322        }
323
324        // TODO: SkAtomic could add "old_value = atomic.xchg(new_value)" to make this clearer.
325        if (fAddedToCache.load()) {
326            SkNotifyBitmapGenIDIsStale(this->getGenerationID());
327            fAddedToCache.store(false);
328        }
329    }
330    // Listeners get at most one shot, so whether these triggered or not, blow them away.
331    fGenIDChangeListeners.deleteAll();
332}
333
334void SkPixelRef::notifyPixelsChanged() {
335#ifdef SK_DEBUG
336    if (fIsImmutable) {
337        SkDebugf("========== notifyPixelsChanged called on immutable pixelref");
338    }
339#endif
340    this->callGenIDChangeListeners();
341    this->needsNewGenID();
342    this->onNotifyPixelsChanged();
343}
344
345void SkPixelRef::changeAlphaType(SkAlphaType at) {
346    *const_cast<SkImageInfo*>(&fInfo) = fInfo.makeAlphaType(at);
347}
348
349void SkPixelRef::setImmutable() {
350    fIsImmutable = true;
351}
352
353bool SkPixelRef::readPixels(SkBitmap* dst, const SkIRect* subset) {
354    return this->onReadPixels(dst, subset);
355}
356
357///////////////////////////////////////////////////////////////////////////////////////////////////
358
359bool SkPixelRef::onReadPixels(SkBitmap* dst, const SkIRect* subset) {
360    return false;
361}
362
363void SkPixelRef::onNotifyPixelsChanged() { }
364
365SkData* SkPixelRef::onRefEncodedData() {
366    return NULL;
367}
368
369bool SkPixelRef::onGetYUV8Planes(SkISize sizes[3], void* planes[3], size_t rowBytes[3],
370                                 SkYUVColorSpace* colorSpace) {
371    return false;
372}
373
374size_t SkPixelRef::getAllocatedSizeInBytes() const {
375    return 0;
376}
377
378static void unlock_legacy_result(void* ctx) {
379    SkPixelRef* pr = (SkPixelRef*)ctx;
380    pr->unlockPixels();
381    pr->unref();    // balancing the Ref in onRequestLoc
382}
383
384bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) {
385    if (!this->lockPixelsInsideMutex()) {
386        return false;
387    }
388
389    result->fUnlockProc = unlock_legacy_result;
390    result->fUnlockContext = SkRef(this);   // this is balanced in our fUnlockProc
391    result->fCTable = fRec.fColorTable;
392    result->fPixels = fRec.fPixels;
393    result->fRowBytes = fRec.fRowBytes;
394    result->fSize.set(fInfo.width(), fInfo.height());
395    return true;
396}
397