1/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8
9#include "GrResourceCache.h"
10
11#include "GrCaps.h"
12#include "GrGpuResourceCacheAccess.h"
13#include "GrProxyProvider.h"
14#include "GrTexture.h"
15#include "GrTextureProxyCacheAccess.h"
16#include "GrTracing.h"
17#include "SkGr.h"
18#include "SkMessageBus.h"
19#include "SkOpts.h"
20#include "SkTSort.h"
21
22DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
23
24DECLARE_SKMESSAGEBUS_MESSAGE(GrGpuResourceFreedMessage);
25
26//////////////////////////////////////////////////////////////////////////////
27
28GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
29    static int32_t gType = INHERITED::kInvalidDomain + 1;
30
31    int32_t type = sk_atomic_inc(&gType);
32    if (type > SK_MaxU16) {
33        SK_ABORT("Too many Resource Types");
34    }
35
36    return static_cast<ResourceType>(type);
37}
38
39GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
40    static int32_t gDomain = INHERITED::kInvalidDomain + 1;
41
42    int32_t domain = sk_atomic_inc(&gDomain);
43    if (domain > SK_MaxU16) {
44        SK_ABORT("Too many GrUniqueKey Domains");
45    }
46
47    return static_cast<Domain>(domain);
48}
49
50uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
51    return SkOpts::hash(data, size);
52}
53
54//////////////////////////////////////////////////////////////////////////////
55
56class GrResourceCache::AutoValidate : ::SkNoncopyable {
57public:
58    AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
59    ~AutoValidate() { fCache->validate(); }
60private:
61    GrResourceCache* fCache;
62};
63
64 //////////////////////////////////////////////////////////////////////////////
65
66
67GrResourceCache::GrResourceCache(const GrCaps* caps, uint32_t contextUniqueID)
68    : fProxyProvider(nullptr)
69    , fTimestamp(0)
70    , fMaxCount(kDefaultMaxCount)
71    , fMaxBytes(kDefaultMaxSize)
72    , fMaxUnusedFlushes(kDefaultMaxUnusedFlushes)
73#if GR_CACHE_STATS
74    , fHighWaterCount(0)
75    , fHighWaterBytes(0)
76    , fBudgetedHighWaterCount(0)
77    , fBudgetedHighWaterBytes(0)
78#endif
79    , fBytes(0)
80    , fBudgetedCount(0)
81    , fBudgetedBytes(0)
82    , fPurgeableBytes(0)
83    , fRequestFlush(false)
84    , fExternalFlushCnt(0)
85    , fContextUniqueID(contextUniqueID)
86    , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
87    SkDEBUGCODE(fCount = 0;)
88    SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
89}
90
91GrResourceCache::~GrResourceCache() {
92    this->releaseAll();
93}
94
95void GrResourceCache::setLimits(int count, size_t bytes, int maxUnusedFlushes) {
96    fMaxCount = count;
97    fMaxBytes = bytes;
98    fMaxUnusedFlushes = maxUnusedFlushes;
99    this->purgeAsNeeded();
100}
101
102void GrResourceCache::insertResource(GrGpuResource* resource) {
103    SkASSERT(resource);
104    SkASSERT(!this->isInCache(resource));
105    SkASSERT(!resource->wasDestroyed());
106    SkASSERT(!resource->isPurgeable());
107
108    // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
109    // up iterating over all the resources that already have timestamps.
110    resource->cacheAccess().setTimestamp(this->getNextTimestamp());
111
112    this->addToNonpurgeableArray(resource);
113
114    size_t size = resource->gpuMemorySize();
115    SkDEBUGCODE(++fCount;)
116    fBytes += size;
117#if GR_CACHE_STATS
118    fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
119    fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
120#endif
121    if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
122        ++fBudgetedCount;
123        fBudgetedBytes += size;
124        TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
125                       fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
126#if GR_CACHE_STATS
127        fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
128        fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
129#endif
130    }
131    if (resource->resourcePriv().getScratchKey().isValid() &&
132        !resource->getUniqueKey().isValid()) {
133        SkASSERT(!resource->resourcePriv().refsWrappedObjects());
134        fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
135    }
136
137    this->purgeAsNeeded();
138}
139
140void GrResourceCache::removeResource(GrGpuResource* resource) {
141    this->validate();
142    SkASSERT(this->isInCache(resource));
143
144    size_t size = resource->gpuMemorySize();
145    if (resource->isPurgeable()) {
146        fPurgeableQueue.remove(resource);
147        fPurgeableBytes -= size;
148    } else {
149        this->removeFromNonpurgeableArray(resource);
150    }
151
152    SkDEBUGCODE(--fCount;)
153    fBytes -= size;
154    if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
155        --fBudgetedCount;
156        fBudgetedBytes -= size;
157        TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
158                       fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
159    }
160
161    if (resource->resourcePriv().getScratchKey().isValid() &&
162        !resource->getUniqueKey().isValid()) {
163        fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
164    }
165    if (resource->getUniqueKey().isValid()) {
166        fUniqueHash.remove(resource->getUniqueKey());
167    }
168    this->validate();
169}
170
171void GrResourceCache::abandonAll() {
172    AutoValidate av(this);
173
174    while (fNonpurgeableResources.count()) {
175        GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
176        SkASSERT(!back->wasDestroyed());
177        back->cacheAccess().abandon();
178    }
179
180    while (fPurgeableQueue.count()) {
181        GrGpuResource* top = fPurgeableQueue.peek();
182        SkASSERT(!top->wasDestroyed());
183        top->cacheAccess().abandon();
184    }
185
186    SkASSERT(!fScratchMap.count());
187    SkASSERT(!fUniqueHash.count());
188    SkASSERT(!fCount);
189    SkASSERT(!this->getResourceCount());
190    SkASSERT(!fBytes);
191    SkASSERT(!fBudgetedCount);
192    SkASSERT(!fBudgetedBytes);
193    SkASSERT(!fPurgeableBytes);
194}
195
196void GrResourceCache::releaseAll() {
197    AutoValidate av(this);
198
199    this->processFreedGpuResources();
200
201    SkASSERT(fProxyProvider); // better have called setProxyProvider
202    // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
203    // they also have a raw pointer back to this class (which is presumably going away)!
204    fProxyProvider->removeAllUniqueKeys();
205
206    while(fNonpurgeableResources.count()) {
207        GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
208        SkASSERT(!back->wasDestroyed());
209        back->cacheAccess().release();
210    }
211
212    while (fPurgeableQueue.count()) {
213        GrGpuResource* top = fPurgeableQueue.peek();
214        SkASSERT(!top->wasDestroyed());
215        top->cacheAccess().release();
216    }
217
218    SkASSERT(!fScratchMap.count());
219    SkASSERT(!fUniqueHash.count());
220    SkASSERT(!fCount);
221    SkASSERT(!this->getResourceCount());
222    SkASSERT(!fBytes);
223    SkASSERT(!fBudgetedCount);
224    SkASSERT(!fBudgetedBytes);
225    SkASSERT(!fPurgeableBytes);
226}
227
228class GrResourceCache::AvailableForScratchUse {
229public:
230    AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
231
232    bool operator()(const GrGpuResource* resource) const {
233        SkASSERT(!resource->getUniqueKey().isValid() &&
234                 resource->resourcePriv().getScratchKey().isValid());
235        if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
236            return false;
237        }
238        return !fRejectPendingIO || !resource->internalHasPendingIO();
239    }
240
241private:
242    bool fRejectPendingIO;
243};
244
245GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
246                                                          size_t resourceSize,
247                                                          uint32_t flags) {
248    SkASSERT(scratchKey.isValid());
249
250    GrGpuResource* resource;
251    if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFlag)) {
252        resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
253        if (resource) {
254            this->refAndMakeResourceMRU(resource);
255            this->validate();
256            return resource;
257        } else if (flags & kRequireNoPendingIO_ScratchFlag) {
258            return nullptr;
259        }
260        // We would prefer to consume more available VRAM rather than flushing
261        // immediately, but on ANGLE this can lead to starving of the GPU.
262        if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) {
263            // kPrefer is specified, we didn't find a resource without pending io,
264            // but there is still space in our budget for the resource so force
265            // the caller to allocate a new resource.
266            return nullptr;
267        }
268    }
269    resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false));
270    if (resource) {
271        this->refAndMakeResourceMRU(resource);
272        this->validate();
273    }
274    return resource;
275}
276
277void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
278    SkASSERT(resource->resourcePriv().getScratchKey().isValid());
279    if (!resource->getUniqueKey().isValid()) {
280        fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
281    }
282}
283
284void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
285    // Someone has a ref to this resource in order to have removed the key. When the ref count
286    // reaches zero we will get a ref cnt notification and figure out what to do with it.
287    if (resource->getUniqueKey().isValid()) {
288        SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
289        fUniqueHash.remove(resource->getUniqueKey());
290    }
291    resource->cacheAccess().removeUniqueKey();
292
293    if (resource->resourcePriv().getScratchKey().isValid()) {
294        fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
295    }
296
297    this->validate();
298}
299
300void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
301    SkASSERT(resource);
302    SkASSERT(this->isInCache(resource));
303
304    // If another resource has the new key, remove its key then install the key on this resource.
305    if (newKey.isValid()) {
306        if (GrGpuResource* old = fUniqueHash.find(newKey)) {
307            // If the old resource using the key is purgeable and is unreachable, then remove it.
308            if (!old->resourcePriv().getScratchKey().isValid() && old->isPurgeable()) {
309                old->cacheAccess().release();
310            } else {
311                this->removeUniqueKey(old);
312            }
313        }
314        SkASSERT(nullptr == fUniqueHash.find(newKey));
315
316        // Remove the entry for this resource if it already has a unique key.
317        if (resource->getUniqueKey().isValid()) {
318            SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
319            fUniqueHash.remove(resource->getUniqueKey());
320            SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
321        } else {
322            // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
323            // from the ScratchMap
324            if (resource->resourcePriv().getScratchKey().isValid()) {
325                fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
326            }
327        }
328
329        resource->cacheAccess().setUniqueKey(newKey);
330        fUniqueHash.add(resource);
331    } else {
332        this->removeUniqueKey(resource);
333    }
334
335    this->validate();
336}
337
338void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
339    SkASSERT(resource);
340    SkASSERT(this->isInCache(resource));
341
342    if (resource->isPurgeable()) {
343        // It's about to become unpurgeable.
344        fPurgeableBytes -= resource->gpuMemorySize();
345        fPurgeableQueue.remove(resource);
346        this->addToNonpurgeableArray(resource);
347    }
348    resource->ref();
349
350    resource->cacheAccess().setTimestamp(this->getNextTimestamp());
351    this->validate();
352}
353
354void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
355    SkASSERT(resource);
356    SkASSERT(!resource->wasDestroyed());
357    SkASSERT(flags);
358    SkASSERT(this->isInCache(resource));
359    // This resource should always be in the nonpurgeable array when this function is called. It
360    // will be moved to the queue if it is newly purgeable.
361    SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
362
363    if (SkToBool(ResourceAccess::kRefCntReachedZero_RefNotificationFlag & flags)) {
364#ifdef SK_DEBUG
365        // When the timestamp overflows validate() is called. validate() checks that resources in
366        // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
367        // the purgeable queue happens just below in this function. So we mark it as an exception.
368        if (resource->isPurgeable()) {
369            fNewlyPurgeableResourceForValidation = resource;
370        }
371#endif
372        resource->cacheAccess().setTimestamp(this->getNextTimestamp());
373        SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
374    }
375
376    if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
377        SkASSERT(!resource->isPurgeable());
378        return;
379    }
380
381    SkASSERT(resource->isPurgeable());
382    this->removeFromNonpurgeableArray(resource);
383    fPurgeableQueue.insert(resource);
384    resource->cacheAccess().setFlushCntWhenResourceBecamePurgeable(fExternalFlushCnt);
385    resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
386    fPurgeableBytes += resource->gpuMemorySize();
387
388    if (SkBudgeted::kNo == resource->resourcePriv().isBudgeted()) {
389        // Check whether this resource could still be used as a scratch resource.
390        if (!resource->resourcePriv().refsWrappedObjects() &&
391            resource->resourcePriv().getScratchKey().isValid()) {
392            // We won't purge an existing resource to make room for this one.
393            if (fBudgetedCount < fMaxCount &&
394                fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
395                resource->resourcePriv().makeBudgeted();
396                return;
397            }
398        }
399    } else {
400        // Purge the resource immediately if we're over budget
401        // Also purge if the resource has neither a valid scratch key nor a unique key.
402        bool noKey = !resource->resourcePriv().getScratchKey().isValid() &&
403                     !resource->getUniqueKey().isValid();
404        if (!this->overBudget() && !noKey) {
405            return;
406        }
407    }
408
409    SkDEBUGCODE(int beforeCount = this->getResourceCount();)
410    resource->cacheAccess().release();
411    // We should at least free this resource, perhaps dependent resources as well.
412    SkASSERT(this->getResourceCount() < beforeCount);
413    this->validate();
414}
415
416void GrResourceCache::didChangeGpuMemorySize(const GrGpuResource* resource, size_t oldSize) {
417    // SkASSERT(!fPurging); GrPathRange increases size during flush. :(
418    SkASSERT(resource);
419    SkASSERT(this->isInCache(resource));
420
421    ptrdiff_t delta = resource->gpuMemorySize() - oldSize;
422
423    fBytes += delta;
424#if GR_CACHE_STATS
425    fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
426#endif
427    if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
428        fBudgetedBytes += delta;
429        TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
430                       fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
431#if GR_CACHE_STATS
432        fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
433#endif
434    }
435
436    this->purgeAsNeeded();
437    this->validate();
438}
439
440void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
441    SkASSERT(resource);
442    SkASSERT(this->isInCache(resource));
443
444    size_t size = resource->gpuMemorySize();
445
446    if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
447        ++fBudgetedCount;
448        fBudgetedBytes += size;
449#if GR_CACHE_STATS
450        fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
451        fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
452#endif
453        this->purgeAsNeeded();
454    } else {
455        --fBudgetedCount;
456        fBudgetedBytes -= size;
457    }
458    TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
459                   fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
460
461    this->validate();
462}
463
464void GrResourceCache::purgeAsNeeded() {
465    SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
466    fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
467    if (invalidKeyMsgs.count()) {
468        this->processInvalidUniqueKeys(invalidKeyMsgs);
469    }
470
471    this->processFreedGpuResources();
472
473    if (fMaxUnusedFlushes > 0) {
474        // We want to know how many complete flushes have occurred without the resource being used.
475        // If the resource was tagged when fExternalFlushCnt was N then this means it became
476        // purgeable during activity that became the N+1th flush. So when the flush count is N+2
477        // it has sat in the purgeable queue for one entire flush.
478        uint32_t oldestAllowedFlushCnt = fExternalFlushCnt - fMaxUnusedFlushes - 1;
479        // check for underflow
480        if (oldestAllowedFlushCnt < fExternalFlushCnt) {
481            while (fPurgeableQueue.count()) {
482                uint32_t flushWhenResourceBecamePurgeable =
483                        fPurgeableQueue.peek()->cacheAccess().flushCntWhenResourceBecamePurgeable();
484                if (oldestAllowedFlushCnt < flushWhenResourceBecamePurgeable) {
485                    // Resources were given both LRU timestamps and tagged with a flush cnt when
486                    // they first became purgeable. The LRU timestamp won't change again until the
487                    // resource is made non-purgeable again. So, at this point all the remaining
488                    // resources in the timestamp-sorted queue will have a flush count >= to this
489                    // one.
490                    break;
491                }
492                GrGpuResource* resource = fPurgeableQueue.peek();
493                SkASSERT(resource->isPurgeable());
494                resource->cacheAccess().release();
495            }
496        }
497    }
498
499    bool stillOverbudget = this->overBudget();
500    while (stillOverbudget && fPurgeableQueue.count()) {
501        GrGpuResource* resource = fPurgeableQueue.peek();
502        SkASSERT(resource->isPurgeable());
503        resource->cacheAccess().release();
504        stillOverbudget = this->overBudget();
505    }
506
507    this->validate();
508
509    if (stillOverbudget) {
510        // Set this so that GrDrawingManager will issue a flush to free up resources with pending
511        // IO that we were unable to purge in this pass.
512        fRequestFlush = true;
513    }
514}
515
516void GrResourceCache::purgeUnlockedResources(bool scratchResourcesOnly) {
517    if (!scratchResourcesOnly) {
518        // We could disable maintaining the heap property here, but it would add a lot of
519        // complexity. Moreover, this is rarely called.
520        while (fPurgeableQueue.count()) {
521            GrGpuResource* resource = fPurgeableQueue.peek();
522            SkASSERT(resource->isPurgeable());
523            resource->cacheAccess().release();
524        }
525    } else {
526        // Sort the queue
527        fPurgeableQueue.sort();
528
529        // Make a list of the scratch resources to delete
530        SkTDArray<GrGpuResource*> scratchResources;
531        for (int i = 0; i < fPurgeableQueue.count(); i++) {
532            GrGpuResource* resource = fPurgeableQueue.at(i);
533            SkASSERT(resource->isPurgeable());
534            if (!resource->getUniqueKey().isValid()) {
535                *scratchResources.append() = resource;
536            }
537        }
538
539        // Delete the scratch resources. This must be done as a separate pass
540        // to avoid messing up the sorted order of the queue
541        for (int i = 0; i < scratchResources.count(); i++) {
542            scratchResources.getAt(i)->cacheAccess().release();
543        }
544    }
545
546    this->validate();
547}
548
549void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) {
550    while (fPurgeableQueue.count()) {
551        const GrStdSteadyClock::time_point resourceTime =
552                fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable();
553        if (resourceTime >= purgeTime) {
554            // Resources were given both LRU timestamps and tagged with a frame number when
555            // they first became purgeable. The LRU timestamp won't change again until the
556            // resource is made non-purgeable again. So, at this point all the remaining
557            // resources in the timestamp-sorted queue will have a frame number >= to this
558            // one.
559            break;
560        }
561        GrGpuResource* resource = fPurgeableQueue.peek();
562        SkASSERT(resource->isPurgeable());
563        resource->cacheAccess().release();
564    }
565}
566
567void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
568
569    const size_t tmpByteBudget = SkTMax((size_t)0, fBytes - bytesToPurge);
570    bool stillOverbudget = tmpByteBudget < fBytes;
571
572    if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
573        // Sort the queue
574        fPurgeableQueue.sort();
575
576        // Make a list of the scratch resources to delete
577        SkTDArray<GrGpuResource*> scratchResources;
578        size_t scratchByteCount = 0;
579        for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
580            GrGpuResource* resource = fPurgeableQueue.at(i);
581            SkASSERT(resource->isPurgeable());
582            if (!resource->getUniqueKey().isValid()) {
583                *scratchResources.append() = resource;
584                scratchByteCount += resource->gpuMemorySize();
585                stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
586            }
587        }
588
589        // Delete the scratch resources. This must be done as a separate pass
590        // to avoid messing up the sorted order of the queue
591        for (int i = 0; i < scratchResources.count(); i++) {
592            scratchResources.getAt(i)->cacheAccess().release();
593        }
594        stillOverbudget = tmpByteBudget < fBytes;
595
596        this->validate();
597    }
598
599    // Purge any remaining resources in LRU order
600    if (stillOverbudget) {
601        const size_t cachedByteCount = fMaxBytes;
602        fMaxBytes = tmpByteBudget;
603        this->purgeAsNeeded();
604        fMaxBytes = cachedByteCount;
605    }
606}
607
608void GrResourceCache::processInvalidUniqueKeys(
609                                            const SkTArray<GrUniqueKeyInvalidatedMessage>& msgs) {
610    SkASSERT(fProxyProvider); // better have called setProxyProvider
611
612    for (int i = 0; i < msgs.count(); ++i) {
613        fProxyProvider->processInvalidProxyUniqueKey(msgs[i].key());
614
615        GrGpuResource* resource = this->findAndRefUniqueResource(msgs[i].key());
616        if (resource) {
617            resource->resourcePriv().removeUniqueKey();
618            resource->unref(); // If this resource is now purgeable, the cache will be notified.
619        }
620    }
621}
622
623void GrResourceCache::insertCrossContextGpuResource(GrGpuResource* resource) {
624    resource->ref();
625}
626
627void GrResourceCache::processFreedGpuResources() {
628    SkTArray<GrGpuResourceFreedMessage> msgs;
629    fFreedGpuResourceInbox.poll(&msgs);
630    for (int i = 0; i < msgs.count(); ++i) {
631        if (msgs[i].fOwningUniqueID == fContextUniqueID) {
632            msgs[i].fResource->unref();
633        }
634    }
635}
636
637void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
638    int index = fNonpurgeableResources.count();
639    *fNonpurgeableResources.append() = resource;
640    *resource->cacheAccess().accessCacheIndex() = index;
641}
642
643void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
644    int* index = resource->cacheAccess().accessCacheIndex();
645    // Fill the whole we will create in the array with the tail object, adjust its index, and
646    // then pop the array
647    GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
648    SkASSERT(fNonpurgeableResources[*index] == resource);
649    fNonpurgeableResources[*index] = tail;
650    *tail->cacheAccess().accessCacheIndex() = *index;
651    fNonpurgeableResources.pop();
652    SkDEBUGCODE(*index = -1);
653}
654
655uint32_t GrResourceCache::getNextTimestamp() {
656    // If we wrap then all the existing resources will appear older than any resources that get
657    // a timestamp after the wrap.
658    if (0 == fTimestamp) {
659        int count = this->getResourceCount();
660        if (count) {
661            // Reset all the timestamps. We sort the resources by timestamp and then assign
662            // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
663            // rare.
664            SkTDArray<GrGpuResource*> sortedPurgeableResources;
665            sortedPurgeableResources.setReserve(fPurgeableQueue.count());
666
667            while (fPurgeableQueue.count()) {
668                *sortedPurgeableResources.append() = fPurgeableQueue.peek();
669                fPurgeableQueue.pop();
670            }
671
672            SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1,
673                     CompareTimestamp);
674
675            // Pick resources out of the purgeable and non-purgeable arrays based on lowest
676            // timestamp and assign new timestamps.
677            int currP = 0;
678            int currNP = 0;
679            while (currP < sortedPurgeableResources.count() &&
680                   currNP < fNonpurgeableResources.count()) {
681                uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
682                uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
683                SkASSERT(tsP != tsNP);
684                if (tsP < tsNP) {
685                    sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
686                } else {
687                    // Correct the index in the nonpurgeable array stored on the resource post-sort.
688                    *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
689                    fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
690                }
691            }
692
693            // The above loop ended when we hit the end of one array. Finish the other one.
694            while (currP < sortedPurgeableResources.count()) {
695                sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
696            }
697            while (currNP < fNonpurgeableResources.count()) {
698                *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
699                fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
700            }
701
702            // Rebuild the queue.
703            for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
704                fPurgeableQueue.insert(sortedPurgeableResources[i]);
705            }
706
707            this->validate();
708            SkASSERT(count == this->getResourceCount());
709
710            // count should be the next timestamp we return.
711            SkASSERT(fTimestamp == SkToU32(count));
712        }
713    }
714    return fTimestamp++;
715}
716
717void GrResourceCache::notifyFlushOccurred(FlushType type) {
718    switch (type) {
719        case FlushType::kCacheRequested:
720            SkASSERT(fRequestFlush);
721            fRequestFlush = false;
722            break;
723        case FlushType::kExternal:
724            ++fExternalFlushCnt;
725            if (0 == fExternalFlushCnt) {
726                // When this wraps just reset all the purgeable resources' last used flush state.
727                for (int i = 0; i < fPurgeableQueue.count(); ++i) {
728                    fPurgeableQueue.at(i)->cacheAccess().setFlushCntWhenResourceBecamePurgeable(0);
729                }
730            }
731            break;
732    }
733    this->purgeAsNeeded();
734}
735
736void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
737    for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
738        fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
739    }
740    for (int i = 0; i < fPurgeableQueue.count(); ++i) {
741        fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
742    }
743}
744
745#ifdef SK_DEBUG
746void GrResourceCache::validate() const {
747    // Reduce the frequency of validations for large resource counts.
748    static SkRandom gRandom;
749    int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
750    if (~mask && (gRandom.nextU() & mask)) {
751        return;
752    }
753
754    struct Stats {
755        size_t fBytes;
756        int fBudgetedCount;
757        size_t fBudgetedBytes;
758        int fLocked;
759        int fScratch;
760        int fCouldBeScratch;
761        int fContent;
762        const ScratchMap* fScratchMap;
763        const UniqueHash* fUniqueHash;
764
765        Stats(const GrResourceCache* cache) {
766            memset(this, 0, sizeof(*this));
767            fScratchMap = &cache->fScratchMap;
768            fUniqueHash = &cache->fUniqueHash;
769        }
770
771        void update(GrGpuResource* resource) {
772            fBytes += resource->gpuMemorySize();
773
774            if (!resource->isPurgeable()) {
775                ++fLocked;
776            }
777
778            const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
779            const GrUniqueKey& uniqueKey = resource->getUniqueKey();
780
781            if (resource->cacheAccess().isScratch()) {
782                SkASSERT(!uniqueKey.isValid());
783                ++fScratch;
784                SkASSERT(fScratchMap->countForKey(scratchKey));
785                SkASSERT(!resource->resourcePriv().refsWrappedObjects());
786            } else if (scratchKey.isValid()) {
787                SkASSERT(SkBudgeted::kNo == resource->resourcePriv().isBudgeted() ||
788                         uniqueKey.isValid());
789                if (!uniqueKey.isValid()) {
790                    ++fCouldBeScratch;
791                    SkASSERT(fScratchMap->countForKey(scratchKey));
792                }
793                SkASSERT(!resource->resourcePriv().refsWrappedObjects());
794            }
795            if (uniqueKey.isValid()) {
796                ++fContent;
797                SkASSERT(fUniqueHash->find(uniqueKey) == resource);
798                SkASSERT(SkBudgeted::kYes == resource->resourcePriv().isBudgeted() ||
799                         resource->resourcePriv().refsWrappedObjects());
800
801                if (scratchKey.isValid()) {
802                    SkASSERT(!fScratchMap->has(resource, scratchKey));
803                }
804            }
805
806            if (SkBudgeted::kYes == resource->resourcePriv().isBudgeted()) {
807                ++fBudgetedCount;
808                fBudgetedBytes += resource->gpuMemorySize();
809            }
810        }
811    };
812
813    {
814        ScratchMap::ConstIter iter(&fScratchMap);
815
816        int count = 0;
817        for ( ; !iter.done(); ++iter) {
818            const GrGpuResource* resource = *iter;
819            SkASSERT(resource->resourcePriv().getScratchKey().isValid());
820            SkASSERT(!resource->getUniqueKey().isValid());
821            count++;
822        }
823        SkASSERT(count == fScratchMap.count()); // ensure the iterator is working correctly
824    }
825
826    Stats stats(this);
827    size_t purgeableBytes = 0;
828
829    for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
830        SkASSERT(!fNonpurgeableResources[i]->isPurgeable() ||
831                 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
832        SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
833        SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
834        stats.update(fNonpurgeableResources[i]);
835    }
836    for (int i = 0; i < fPurgeableQueue.count(); ++i) {
837        SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
838        SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
839        SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
840        stats.update(fPurgeableQueue.at(i));
841        purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
842    }
843
844    SkASSERT(fCount == this->getResourceCount());
845    SkASSERT(fBudgetedCount <= fCount);
846    SkASSERT(fBudgetedBytes <= fBytes);
847    SkASSERT(stats.fBytes == fBytes);
848    SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
849    SkASSERT(stats.fBudgetedCount == fBudgetedCount);
850    SkASSERT(purgeableBytes == fPurgeableBytes);
851#if GR_CACHE_STATS
852    SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
853    SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
854    SkASSERT(fBytes <= fHighWaterBytes);
855    SkASSERT(fCount <= fHighWaterCount);
856    SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
857    SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
858#endif
859    SkASSERT(stats.fContent == fUniqueHash.count());
860    SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
861
862    // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
863    // calls. This will be fixed when subresource registration is explicit.
864    // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
865    // SkASSERT(!overBudget || locked == count || fPurging);
866}
867
868bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
869    int index = *resource->cacheAccess().accessCacheIndex();
870    if (index < 0) {
871        return false;
872    }
873    if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
874        return true;
875    }
876    if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
877        return true;
878    }
879    SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
880    return false;
881}
882
883#endif
884