SkPixelRef.cpp revision 19663e54c017499406036746e7689193aa6417e6
1/* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "SkBitmapCache.h" 9#include "SkPixelRef.h" 10#include "SkThread.h" 11#include "SkTraceEvent.h" 12 13//#define SK_SUPPORT_LEGACY_UNBALANCED_PIXELREF_LOCKCOUNT 14//#define SK_TRACE_PIXELREF_LIFETIME 15 16#ifdef SK_BUILD_FOR_WIN32 17 // We don't have SK_BASE_MUTEX_INIT on Windows. 18 19 // must be a power-of-2. undef to just use 1 mutex 20 #define PIXELREF_MUTEX_RING_COUNT 32 21 static SkBaseMutex gPixelRefMutexRing[PIXELREF_MUTEX_RING_COUNT]; 22 23#else 24 static SkBaseMutex gPixelRefMutexRing[] = { 25 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 26 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 27 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 28 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 29 30 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 31 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 32 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 33 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 34 35 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 36 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 37 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 38 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 39 40 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 41 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 42 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 43 SK_BASE_MUTEX_INIT, SK_BASE_MUTEX_INIT, 44 }; 45 // must be a power-of-2. undef to just use 1 mutex 46 #define PIXELREF_MUTEX_RING_COUNT SK_ARRAY_COUNT(gPixelRefMutexRing) 47 48#endif 49 50static SkBaseMutex* get_default_mutex() { 51 static int32_t gPixelRefMutexRingIndex; 52 53 SkASSERT(SkIsPow2(PIXELREF_MUTEX_RING_COUNT)); 54 55 // atomic_inc might be overkill here. It may be fine if once in a while 56 // we hit a race-condition and two subsequent calls get the same index... 57 int index = sk_atomic_inc(&gPixelRefMutexRingIndex); 58 return &gPixelRefMutexRing[index & (PIXELREF_MUTEX_RING_COUNT - 1)]; 59} 60 61/////////////////////////////////////////////////////////////////////////////// 62 63static uint32_t next_gen_id() { 64 static uint32_t gNextGenID = 0; 65 uint32_t genID; 66 // Loop in case our global wraps around, as we never want to return a 0. 67 do { 68 genID = sk_atomic_fetch_add(&gNextGenID, 2u) + 2; // Never set the low bit. 69 } while (0 == genID); 70 return genID; 71} 72 73/////////////////////////////////////////////////////////////////////////////// 74 75void SkPixelRef::setMutex(SkBaseMutex* mutex) { 76 if (NULL == mutex) { 77 mutex = get_default_mutex(); 78 } 79 fMutex = mutex; 80} 81 82// just need a > 0 value, so pick a funny one to aid in debugging 83#define SKPIXELREF_PRELOCKED_LOCKCOUNT 123456789 84 85static SkImageInfo validate_info(const SkImageInfo& info) { 86 SkAlphaType newAlphaType = info.alphaType(); 87 SkAssertResult(SkColorTypeValidateAlphaType(info.colorType(), info.alphaType(), &newAlphaType)); 88 return info.makeAlphaType(newAlphaType); 89} 90 91#ifdef SK_TRACE_PIXELREF_LIFETIME 92 static int32_t gInstCounter; 93#endif 94 95SkPixelRef::SkPixelRef(const SkImageInfo& info) 96 : fInfo(validate_info(info)) 97#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK 98 , fStableID(next_gen_id()) 99#endif 100 101{ 102#ifdef SK_TRACE_PIXELREF_LIFETIME 103 SkDebugf(" pixelref %d\n", sk_atomic_inc(&gInstCounter)); 104#endif 105 this->setMutex(NULL); 106 fRec.zero(); 107 fLockCount = 0; 108 this->needsNewGenID(); 109 fIsImmutable = false; 110 fPreLocked = false; 111 fAddedToCache.store(false); 112} 113 114 115SkPixelRef::SkPixelRef(const SkImageInfo& info, SkBaseMutex* mutex) 116 : fInfo(validate_info(info)) 117#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK 118 , fStableID(next_gen_id()) 119#endif 120{ 121#ifdef SK_TRACE_PIXELREF_LIFETIME 122 SkDebugf(" pixelref %d\n", sk_atomic_inc(&gInstCounter)); 123#endif 124 this->setMutex(mutex); 125 fRec.zero(); 126 fLockCount = 0; 127 this->needsNewGenID(); 128 fIsImmutable = false; 129 fPreLocked = false; 130 fAddedToCache.store(false); 131} 132 133SkPixelRef::~SkPixelRef() { 134#ifndef SK_SUPPORT_LEGACY_UNBALANCED_PIXELREF_LOCKCOUNT 135 SkASSERT(SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount || 0 == fLockCount); 136#endif 137 138#ifdef SK_TRACE_PIXELREF_LIFETIME 139 SkDebugf("~pixelref %d\n", sk_atomic_dec(&gInstCounter) - 1); 140#endif 141 this->callGenIDChangeListeners(); 142} 143 144void SkPixelRef::needsNewGenID() { 145 fTaggedGenID.store(0); 146 SkASSERT(!this->genIDIsUnique()); // This method isn't threadsafe, so the assert should be fine. 147} 148 149void SkPixelRef::cloneGenID(const SkPixelRef& that) { 150 // This is subtle. We must call that.getGenerationID() to make sure its genID isn't 0. 151 uint32_t genID = that.getGenerationID(); 152 153 // Neither ID is unique any more. 154 // (These & ~1u are actually redundant. that.getGenerationID() just did it for us.) 155 this->fTaggedGenID.store(genID & ~1u); 156 that. fTaggedGenID.store(genID & ~1u); 157 158 // This method isn't threadsafe, so these asserts should be fine. 159 SkASSERT(!this->genIDIsUnique()); 160 SkASSERT(!that. genIDIsUnique()); 161} 162 163void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctable) { 164#ifndef SK_IGNORE_PIXELREF_SETPRELOCKED 165 // only call me in your constructor, otherwise fLockCount tracking can get 166 // out of sync. 167 fRec.fPixels = pixels; 168 fRec.fColorTable = ctable; 169 fRec.fRowBytes = rowBytes; 170 fLockCount = SKPIXELREF_PRELOCKED_LOCKCOUNT; 171 fPreLocked = true; 172#endif 173} 174 175bool SkPixelRef::lockPixelsInsideMutex(LockRec* rec) { 176 fMutex->assertHeld(); 177 178 // For historical reasons, we always inc fLockCount, even if we return false. 179 // It would be nice to change this (it seems), and only inc if we actually succeed... 180 if (1 == ++fLockCount) { 181 SkASSERT(fRec.isZero()); 182 183 LockRec rec; 184 if (!this->onNewLockPixels(&rec)) { 185 fLockCount -= 1; // we return fLockCount unchanged if we fail. 186 return false; 187 } 188 SkASSERT(!rec.isZero()); // else why did onNewLock return true? 189 fRec = rec; 190 } 191 *rec = fRec; 192 return true; 193} 194 195bool SkPixelRef::lockPixels(LockRec* rec) { 196 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); 197 198 if (fPreLocked) { 199 *rec = fRec; 200 return true; 201 } else { 202 TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex"); 203 SkAutoMutexAcquire ac(*fMutex); 204 TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex"); 205 SkDEBUGCODE(int oldCount = fLockCount;) 206 bool success = this->lockPixelsInsideMutex(rec); 207 // lockPixelsInsideMutex only increments the count if it succeeds. 208 SkASSERT(oldCount + (int)success == fLockCount); 209 210 if (!success) { 211 // For compatibility with SkBitmap calling lockPixels, we still want to increment 212 // fLockCount even if we failed. If we updated SkBitmap we could remove this oddity. 213 fLockCount += 1; 214 } 215 return success; 216 } 217} 218 219bool SkPixelRef::lockPixels() { 220 LockRec rec; 221 return this->lockPixels(&rec); 222} 223 224void SkPixelRef::unlockPixels() { 225 SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount); 226 227 if (!fPreLocked) { 228 SkAutoMutexAcquire ac(*fMutex); 229 230 SkASSERT(fLockCount > 0); 231 if (0 == --fLockCount) { 232 // don't call onUnlockPixels unless onLockPixels succeeded 233 if (fRec.fPixels) { 234 this->onUnlockPixels(); 235 fRec.zero(); 236 } else { 237 SkASSERT(fRec.isZero()); 238 } 239 } 240 } 241} 242 243bool SkPixelRef::requestLock(const LockRequest& request, LockResult* result) { 244 SkASSERT(result); 245 if (request.fSize.isEmpty()) { 246 return false; 247 } 248 249 if (fPreLocked) { 250 result->fUnlockProc = NULL; 251 result->fUnlockContext = NULL; 252 result->fCTable = fRec.fColorTable; 253 result->fPixels = fRec.fPixels; 254 result->fRowBytes = fRec.fRowBytes; 255 result->fSize.set(fInfo.width(), fInfo.height()); 256 return true; 257 } else { 258 SkAutoMutexAcquire ac(*fMutex); 259 return this->onRequestLock(request, result); 260 } 261} 262 263bool SkPixelRef::lockPixelsAreWritable() const { 264 return this->onLockPixelsAreWritable(); 265} 266 267bool SkPixelRef::onLockPixelsAreWritable() const { 268 return true; 269} 270 271uint32_t SkPixelRef::getGenerationID() const { 272 uint32_t id = fTaggedGenID.load(); 273 if (0 == id) { 274 uint32_t next = next_gen_id() | 1u; 275 if (fTaggedGenID.compare_exchange(&id, next)) { 276 id = next; // There was no race or we won the race. fTaggedGenID is next now. 277 } else { 278 // We lost a race to set fTaggedGenID. compare_exchange() filled id with the winner. 279 } 280 // We can't quite SkASSERT(this->genIDIsUnique()). It could be non-unique 281 // if we got here via the else path (pretty unlikely, but possible). 282 } 283 return id & ~1u; // Mask off bottom unique bit. 284} 285 286void SkPixelRef::addGenIDChangeListener(GenIDChangeListener* listener) { 287 if (NULL == listener || !this->genIDIsUnique()) { 288 // No point in tracking this if we're not going to call it. 289 SkDELETE(listener); 290 return; 291 } 292 *fGenIDChangeListeners.append() = listener; 293} 294 295// we need to be called *before* the genID gets changed or zerod 296void SkPixelRef::callGenIDChangeListeners() { 297 // We don't invalidate ourselves if we think another SkPixelRef is sharing our genID. 298 if (this->genIDIsUnique()) { 299 for (int i = 0; i < fGenIDChangeListeners.count(); i++) { 300 fGenIDChangeListeners[i]->onChange(); 301 } 302 303 // TODO: SkAtomic could add "old_value = atomic.xchg(new_value)" to make this clearer. 304 if (fAddedToCache.load()) { 305 SkNotifyBitmapGenIDIsStale(this->getGenerationID()); 306 fAddedToCache.store(false); 307 } 308 } 309 // Listeners get at most one shot, so whether these triggered or not, blow them away. 310 fGenIDChangeListeners.deleteAll(); 311} 312 313void SkPixelRef::notifyPixelsChanged() { 314#ifdef SK_DEBUG 315 if (fIsImmutable) { 316 SkDebugf("========== notifyPixelsChanged called on immutable pixelref"); 317 } 318#endif 319 this->callGenIDChangeListeners(); 320 this->needsNewGenID(); 321} 322 323void SkPixelRef::changeAlphaType(SkAlphaType at) { 324 *const_cast<SkImageInfo*>(&fInfo) = fInfo.makeAlphaType(at); 325} 326 327void SkPixelRef::setImmutable() { 328 fIsImmutable = true; 329} 330 331bool SkPixelRef::readPixels(SkBitmap* dst, const SkIRect* subset) { 332 return this->onReadPixels(dst, subset); 333} 334 335/////////////////////////////////////////////////////////////////////////////////////////////////// 336 337bool SkPixelRef::onReadPixels(SkBitmap* dst, const SkIRect* subset) { 338 return false; 339} 340 341SkData* SkPixelRef::onRefEncodedData() { 342 return NULL; 343} 344 345bool SkPixelRef::onGetYUV8Planes(SkISize sizes[3], void* planes[3], size_t rowBytes[3], 346 SkYUVColorSpace* colorSpace) { 347 return false; 348} 349 350size_t SkPixelRef::getAllocatedSizeInBytes() const { 351 return 0; 352} 353 354static void unlock_legacy_result(void* ctx) { 355 SkPixelRef* pr = (SkPixelRef*)ctx; 356 pr->unlockPixels(); 357 pr->unref(); // balancing the Ref in onRequestLoc 358} 359 360bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) { 361 LockRec rec; 362 if (!this->lockPixelsInsideMutex(&rec)) { 363 return false; 364 } 365 366 result->fUnlockProc = unlock_legacy_result; 367 result->fUnlockContext = SkRef(this); // this is balanced in our fUnlockProc 368 result->fCTable = rec.fColorTable; 369 result->fPixels = rec.fPixels; 370 result->fRowBytes = rec.fRowBytes; 371 result->fSize.set(fInfo.width(), fInfo.height()); 372 return true; 373} 374