1/*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrContext.h"
9#include "GrContextOptions.h"
10#include "GrDrawingManager.h"
11#include "GrDrawContext.h"
12#include "GrLayerCache.h"
13#include "GrResourceCache.h"
14#include "GrResourceProvider.h"
15#include "GrSoftwarePathRenderer.h"
16#include "GrSurfacePriv.h"
17
18#include "SkConfig8888.h"
19#include "SkGrPriv.h"
20
21#include "batches/GrCopySurfaceBatch.h"
22#include "effects/GrConfigConversionEffect.h"
23#include "text/GrTextBlobCache.h"
24
25#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
26#define ASSERT_SINGLE_OWNER \
27    SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(&fSingleOwner);)
28#define RETURN_IF_ABANDONED if (fDrawingManager->abandoned()) { return; }
29#define RETURN_FALSE_IF_ABANDONED if (fDrawingManager->abandoned()) { return false; }
30#define RETURN_NULL_IF_ABANDONED if (fDrawingManager->abandoned()) { return nullptr; }
31
32////////////////////////////////////////////////////////////////////////////////
33
34GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
35    GrContextOptions defaultOptions;
36    return Create(backend, backendContext, defaultOptions);
37}
38
39GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
40                             const GrContextOptions& options) {
41    GrContext* context = new GrContext;
42
43    if (context->init(backend, backendContext, options)) {
44        return context;
45    } else {
46        context->unref();
47        return nullptr;
48    }
49}
50
51static int32_t gNextID = 1;
52static int32_t next_id() {
53    int32_t id;
54    do {
55        id = sk_atomic_inc(&gNextID);
56    } while (id == SK_InvalidGenID);
57    return id;
58}
59
60GrContext::GrContext() : fUniqueID(next_id()) {
61    fGpu = nullptr;
62    fCaps = nullptr;
63    fResourceCache = nullptr;
64    fResourceProvider = nullptr;
65    fBatchFontCache = nullptr;
66    fFlushToReduceCacheSize = false;
67}
68
69bool GrContext::init(GrBackend backend, GrBackendContext backendContext,
70                     const GrContextOptions& options) {
71    ASSERT_SINGLE_OWNER
72    SkASSERT(!fGpu);
73
74    fGpu = GrGpu::Create(backend, backendContext, options, this);
75    if (!fGpu) {
76        return false;
77    }
78    this->initCommon(options);
79    return true;
80}
81
82void GrContext::initCommon(const GrContextOptions& options) {
83    ASSERT_SINGLE_OWNER
84
85    fCaps = SkRef(fGpu->caps());
86    fResourceCache = new GrResourceCache(fCaps);
87    fResourceCache->setOverBudgetCallback(OverBudgetCB, this);
88    fResourceProvider = new GrResourceProvider(fGpu, fResourceCache, &fSingleOwner);
89
90    fLayerCache.reset(new GrLayerCache(this));
91
92    fDidTestPMConversions = false;
93
94    GrDrawTarget::Options dtOptions;
95    dtOptions.fClipBatchToBounds = options.fClipBatchToBounds;
96    dtOptions.fDrawBatchBounds = options.fDrawBatchBounds;
97    dtOptions.fMaxBatchLookback = options.fMaxBatchLookback;
98    fDrawingManager.reset(new GrDrawingManager(this, dtOptions, &fSingleOwner));
99
100    // GrBatchFontCache will eventually replace GrFontCache
101    fBatchFontCache = new GrBatchFontCache(this);
102
103    fTextBlobCache.reset(new GrTextBlobCache(TextBlobCacheOverBudgetCB, this));
104}
105
106GrContext::~GrContext() {
107    ASSERT_SINGLE_OWNER
108
109    if (!fGpu) {
110        SkASSERT(!fCaps);
111        return;
112    }
113
114    this->flush();
115
116    fDrawingManager->cleanup();
117
118    for (int i = 0; i < fCleanUpData.count(); ++i) {
119        (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
120    }
121
122    delete fResourceProvider;
123    delete fResourceCache;
124    delete fBatchFontCache;
125
126    fGpu->unref();
127    fCaps->unref();
128}
129
130void GrContext::abandonContext() {
131    ASSERT_SINGLE_OWNER
132
133    fResourceProvider->abandon();
134
135    // Need to abandon the drawing manager first so all the render targets
136    // will be released/forgotten before they too are abandoned.
137    fDrawingManager->abandon();
138
139    // abandon first to so destructors
140    // don't try to free the resources in the API.
141    fResourceCache->abandonAll();
142
143    fGpu->contextAbandoned();
144
145    fBatchFontCache->freeAll();
146    fLayerCache->freeAll();
147    fTextBlobCache->freeAll();
148}
149
150void GrContext::resetContext(uint32_t state) {
151    ASSERT_SINGLE_OWNER
152    fGpu->markContextDirty(state);
153}
154
155void GrContext::freeGpuResources() {
156    ASSERT_SINGLE_OWNER
157
158    this->flush();
159
160    fBatchFontCache->freeAll();
161    fLayerCache->freeAll();
162
163    fDrawingManager->freeGpuResources();
164
165    fResourceCache->purgeAllUnlocked();
166}
167
168void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
169    ASSERT_SINGLE_OWNER
170
171    if (resourceCount) {
172        *resourceCount = fResourceCache->getBudgetedResourceCount();
173    }
174    if (resourceBytes) {
175        *resourceBytes = fResourceCache->getBudgetedResourceBytes();
176    }
177}
178
179////////////////////////////////////////////////////////////////////////////////
180
181void GrContext::OverBudgetCB(void* data) {
182    SkASSERT(data);
183
184    GrContext* context = reinterpret_cast<GrContext*>(data);
185
186    // Flush the GrBufferedDrawTarget to possibly free up some textures
187    context->fFlushToReduceCacheSize = true;
188}
189
190void GrContext::TextBlobCacheOverBudgetCB(void* data) {
191    SkASSERT(data);
192
193    // Unlike the GrResourceCache, TextBlobs are drawn at the SkGpuDevice level, therefore they
194    // cannot use fFlushTorReduceCacheSize because it uses AutoCheckFlush.  The solution is to move
195    // drawText calls to below the GrContext level, but this is not trivial because they call
196    // drawPath on SkGpuDevice
197    GrContext* context = reinterpret_cast<GrContext*>(data);
198    context->flush();
199}
200
201////////////////////////////////////////////////////////////////////////////////
202
203void GrContext::flush(int flagsBitfield) {
204    ASSERT_SINGLE_OWNER
205    RETURN_IF_ABANDONED
206
207    if (kDiscard_FlushBit & flagsBitfield) {
208        fDrawingManager->reset();
209    } else {
210        fDrawingManager->flush();
211    }
212    fResourceCache->notifyFlushOccurred();
213    fFlushToReduceCacheSize = false;
214}
215
216bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
217                          const void* inPixels, size_t outRowBytes, void* outPixels) {
218    SkSrcPixelInfo srcPI;
219    if (!GrPixelConfig2ColorAndProfileType(srcConfig, &srcPI.fColorType, nullptr)) {
220        return false;
221    }
222    srcPI.fAlphaType = kUnpremul_SkAlphaType;
223    srcPI.fPixels = inPixels;
224    srcPI.fRowBytes = inRowBytes;
225
226    SkDstPixelInfo dstPI;
227    dstPI.fColorType = srcPI.fColorType;
228    dstPI.fAlphaType = kPremul_SkAlphaType;
229    dstPI.fPixels = outPixels;
230    dstPI.fRowBytes = outRowBytes;
231
232    return srcPI.convertPixelsTo(&dstPI, width, height);
233}
234
235bool GrContext::writeSurfacePixels(GrSurface* surface,
236                                   int left, int top, int width, int height,
237                                   GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
238                                   uint32_t pixelOpsFlags) {
239    ASSERT_SINGLE_OWNER
240    RETURN_FALSE_IF_ABANDONED
241    ASSERT_OWNED_RESOURCE(surface);
242    SkASSERT(surface);
243    GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::writeSurfacePixels");
244
245    this->testPMConversionsIfNecessary(pixelOpsFlags);
246
247    // Trim the params here so that if we wind up making a temporary surface it can be as small as
248    // necessary and because GrGpu::getWritePixelsInfo requires it.
249    if (!GrSurfacePriv::AdjustWritePixelParams(surface->width(), surface->height(),
250                                               GrBytesPerPixel(srcConfig), &left, &top, &width,
251                                               &height, &buffer, &rowBytes)) {
252        return false;
253    }
254
255    bool applyPremulToSrc = false;
256    if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
257        if (!GrPixelConfigIs8888(srcConfig)) {
258            return false;
259        }
260        applyPremulToSrc = true;
261    }
262
263    GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference;
264    // Don't prefer to draw for the conversion (and thereby access a texture from the cache) when
265    // we've already determined that there isn't a roundtrip preserving conversion processor pair.
266    if (applyPremulToSrc && !this->didFailPMUPMConversionTest()) {
267        drawPreference = GrGpu::kCallerPrefersDraw_DrawPreference;
268    }
269
270    GrGpu::WritePixelTempDrawInfo tempDrawInfo;
271    if (!fGpu->getWritePixelsInfo(surface, width, height, srcConfig, &drawPreference,
272                                  &tempDrawInfo)) {
273        return false;
274    }
275
276    if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && surface->surfacePriv().hasPendingIO()) {
277        this->flush();
278    }
279
280    SkAutoTUnref<GrTexture> tempTexture;
281    if (GrGpu::kNoDraw_DrawPreference != drawPreference) {
282        tempTexture.reset(
283            this->textureProvider()->createApproxTexture(tempDrawInfo.fTempSurfaceDesc));
284        if (!tempTexture && GrGpu::kRequireDraw_DrawPreference == drawPreference) {
285            return false;
286        }
287    }
288
289    // temp buffer for doing sw premul conversion, if needed.
290    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
291    if (tempTexture) {
292        SkAutoTUnref<const GrFragmentProcessor> fp;
293        SkMatrix textureMatrix;
294        textureMatrix.setIDiv(tempTexture->width(), tempTexture->height());
295        if (applyPremulToSrc) {
296            fp.reset(this->createUPMToPMEffect(tempTexture, tempDrawInfo.fSwizzle,
297                                               textureMatrix));
298            // If premultiplying was the only reason for the draw, fall back to a straight write.
299            if (!fp) {
300                if (GrGpu::kCallerPrefersDraw_DrawPreference == drawPreference) {
301                    tempTexture.reset(nullptr);
302                }
303            } else {
304                applyPremulToSrc = false;
305            }
306        }
307        if (tempTexture) {
308            if (!fp) {
309                fp.reset(GrConfigConversionEffect::Create(tempTexture, tempDrawInfo.fSwizzle,
310                    GrConfigConversionEffect::kNone_PMConversion, textureMatrix));
311                if (!fp) {
312                    return false;
313                }
314            }
315            GrRenderTarget* renderTarget = surface->asRenderTarget();
316            SkASSERT(renderTarget);
317            if (tempTexture->surfacePriv().hasPendingIO()) {
318                this->flush();
319            }
320            if (applyPremulToSrc) {
321                size_t tmpRowBytes = 4 * width;
322                tmpPixels.reset(width * height);
323                if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
324                                          tmpPixels.get())) {
325                    return false;
326                }
327                rowBytes = tmpRowBytes;
328                buffer = tmpPixels.get();
329                applyPremulToSrc = false;
330            }
331            if (!fGpu->writePixels(tempTexture, 0, 0, width, height,
332                                   tempDrawInfo.fWriteConfig, buffer,
333                                   rowBytes)) {
334                return false;
335            }
336            SkMatrix matrix;
337            matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
338            SkAutoTUnref<GrDrawContext> drawContext(this->drawContext(renderTarget));
339            if (!drawContext) {
340                return false;
341            }
342            GrPaint paint;
343            paint.addColorFragmentProcessor(fp);
344            paint.setPorterDuffXPFactory(SkXfermode::kSrc_Mode);
345            SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
346            drawContext->drawRect(GrClip::WideOpen(), paint, matrix, rect, nullptr);
347
348            if (kFlushWrites_PixelOp & pixelOpsFlags) {
349                this->flushSurfaceWrites(surface);
350            }
351        }
352    }
353    if (!tempTexture) {
354        if (applyPremulToSrc) {
355            size_t tmpRowBytes = 4 * width;
356            tmpPixels.reset(width * height);
357            if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
358                                      tmpPixels.get())) {
359                return false;
360            }
361            rowBytes = tmpRowBytes;
362            buffer = tmpPixels.get();
363            applyPremulToSrc = false;
364        }
365        return fGpu->writePixels(surface, left, top, width, height, srcConfig, buffer, rowBytes);
366    }
367    return true;
368}
369
370bool GrContext::readSurfacePixels(GrSurface* src,
371                                  int left, int top, int width, int height,
372                                  GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
373                                  uint32_t flags) {
374    ASSERT_SINGLE_OWNER
375    RETURN_FALSE_IF_ABANDONED
376    ASSERT_OWNED_RESOURCE(src);
377    SkASSERT(src);
378    GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::readSurfacePixels");
379
380    this->testPMConversionsIfNecessary(flags);
381    SkAutoMutexAcquire ama(fReadPixelsMutex);
382
383    // Adjust the params so that if we wind up using an intermediate surface we've already done
384    // all the trimming and the temporary can be the min size required.
385    if (!GrSurfacePriv::AdjustReadPixelParams(src->width(), src->height(),
386                                              GrBytesPerPixel(dstConfig), &left,
387                                              &top, &width, &height, &buffer, &rowBytes)) {
388        return false;
389    }
390
391    if (!(kDontFlush_PixelOpsFlag & flags) && src->surfacePriv().hasPendingWrite()) {
392        this->flush();
393    }
394
395    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
396    if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
397        // The unpremul flag is only allowed for 8888 configs.
398        return false;
399    }
400
401    GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference;
402    // Don't prefer to draw for the conversion (and thereby access a texture from the cache) when
403    // we've already determined that there isn't a roundtrip preserving conversion processor pair.
404    if (unpremul && !this->didFailPMUPMConversionTest()) {
405        drawPreference = GrGpu::kCallerPrefersDraw_DrawPreference;
406    }
407
408    GrGpu::ReadPixelTempDrawInfo tempDrawInfo;
409    if (!fGpu->getReadPixelsInfo(src, width, height, rowBytes, dstConfig, &drawPreference,
410                                 &tempDrawInfo)) {
411        return false;
412    }
413
414    SkAutoTUnref<GrSurface> surfaceToRead(SkRef(src));
415    bool didTempDraw = false;
416    if (GrGpu::kNoDraw_DrawPreference != drawPreference) {
417        if (tempDrawInfo.fUseExactScratch) {
418            // We only respect this when the entire src is being read. Otherwise we can trigger too
419            // many odd ball texture sizes and trash the cache.
420            if (width != src->width() || height != src->height()) {
421                tempDrawInfo.fUseExactScratch = false;
422            }
423        }
424        SkAutoTUnref<GrTexture> temp;
425        if (tempDrawInfo.fUseExactScratch) {
426            temp.reset(this->textureProvider()->createTexture(tempDrawInfo.fTempSurfaceDesc,
427                                                              SkBudgeted::kYes));
428        } else {
429            temp.reset(this->textureProvider()->createApproxTexture(tempDrawInfo.fTempSurfaceDesc));
430        }
431        if (temp) {
432            SkMatrix textureMatrix;
433            textureMatrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
434            textureMatrix.postIDiv(src->width(), src->height());
435            SkAutoTUnref<const GrFragmentProcessor> fp;
436            if (unpremul) {
437                fp.reset(this->createPMToUPMEffect(src->asTexture(), tempDrawInfo.fSwizzle,
438                    textureMatrix));
439                if (fp) {
440                    unpremul = false; // we no longer need to do this on CPU after the read back.
441                } else if (GrGpu::kCallerPrefersDraw_DrawPreference == drawPreference) {
442                    // We only wanted to do the draw in order to perform the unpremul so don't
443                    // bother.
444                    temp.reset(nullptr);
445                }
446            }
447            if (!fp && temp) {
448                fp.reset(GrConfigConversionEffect::Create(src->asTexture(), tempDrawInfo.fSwizzle,
449                    GrConfigConversionEffect::kNone_PMConversion, textureMatrix));
450            }
451            if (fp) {
452                GrPaint paint;
453                paint.addColorFragmentProcessor(fp);
454                paint.setPorterDuffXPFactory(SkXfermode::kSrc_Mode);
455                SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
456                SkAutoTUnref<GrDrawContext> drawContext(this->drawContext(temp->asRenderTarget()));
457                drawContext->drawRect(GrClip::WideOpen(), paint, SkMatrix::I(), rect, nullptr);
458                surfaceToRead.reset(SkRef(temp.get()));
459                left = 0;
460                top = 0;
461                didTempDraw = true;
462            }
463        }
464    }
465
466    if (GrGpu::kRequireDraw_DrawPreference == drawPreference && !didTempDraw) {
467        return false;
468    }
469    GrPixelConfig configToRead = dstConfig;
470    if (didTempDraw) {
471        this->flushSurfaceWrites(surfaceToRead);
472        configToRead = tempDrawInfo.fReadConfig;
473    }
474    if (!fGpu->readPixels(surfaceToRead, left, top, width, height, configToRead, buffer,
475                           rowBytes)) {
476        return false;
477    }
478
479    // Perform umpremul conversion if we weren't able to perform it as a draw.
480    if (unpremul) {
481        SkDstPixelInfo dstPI;
482        if (!GrPixelConfig2ColorAndProfileType(dstConfig, &dstPI.fColorType, nullptr)) {
483            return false;
484        }
485        dstPI.fAlphaType = kUnpremul_SkAlphaType;
486        dstPI.fPixels = buffer;
487        dstPI.fRowBytes = rowBytes;
488
489        SkSrcPixelInfo srcPI;
490        srcPI.fColorType = dstPI.fColorType;
491        srcPI.fAlphaType = kPremul_SkAlphaType;
492        srcPI.fPixels = buffer;
493        srcPI.fRowBytes = rowBytes;
494
495        return srcPI.convertPixelsTo(&dstPI, width, height);
496    }
497    return true;
498}
499
500void GrContext::prepareSurfaceForExternalIO(GrSurface* surface) {
501    ASSERT_SINGLE_OWNER
502    RETURN_IF_ABANDONED
503    SkASSERT(surface);
504    ASSERT_OWNED_RESOURCE(surface);
505    if (surface->surfacePriv().hasPendingIO()) {
506        this->flush();
507    }
508    GrRenderTarget* rt = surface->asRenderTarget();
509    if (fGpu && rt) {
510        fGpu->resolveRenderTarget(rt);
511    }
512}
513
514bool GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
515                            const SkIPoint& dstPoint) {
516    ASSERT_SINGLE_OWNER
517    RETURN_FALSE_IF_ABANDONED
518    GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::copySurface");
519
520    if (!src || !dst) {
521        return false;
522    }
523    ASSERT_OWNED_RESOURCE(src);
524    ASSERT_OWNED_RESOURCE(dst);
525
526    if (!dst->asRenderTarget()) {
527        SkIRect clippedSrcRect;
528        SkIPoint clippedDstPoint;
529        if (!GrCopySurfaceBatch::ClipSrcRectAndDstPoint(dst, src, srcRect, dstPoint,
530                                                        &clippedSrcRect, &clippedDstPoint)) {
531            return false;
532        }
533        // If we don't have an RT for the dst then we won't have a GrDrawContext to insert the
534        // the copy surface into. In the future we plan to have a more limited Context type
535        // (GrCopyContext?) that has the subset of GrDrawContext operations that should be
536        // allowed on textures that aren't render targets.
537        // For now we just flush any writes to the src and issue an immediate copy to the dst.
538        src->flushWrites();
539        return fGpu->copySurface(dst, src, clippedSrcRect, clippedDstPoint);
540    }
541    SkAutoTUnref<GrDrawContext> drawContext(this->drawContext(dst->asRenderTarget()));
542    if (!drawContext) {
543        return false;
544    }
545
546    if (!drawContext->copySurface(src, srcRect, dstPoint)) {
547        return false;
548    }
549    return true;
550}
551
552void GrContext::flushSurfaceWrites(GrSurface* surface) {
553    ASSERT_SINGLE_OWNER
554    RETURN_IF_ABANDONED
555    if (surface->surfacePriv().hasPendingWrite()) {
556        this->flush();
557    }
558}
559
560////////////////////////////////////////////////////////////////////////////////
561int GrContext::getRecommendedSampleCount(GrPixelConfig config,
562                                         SkScalar dpi) const {
563    ASSERT_SINGLE_OWNER
564
565    if (!this->caps()->isConfigRenderable(config, true)) {
566        return 0;
567    }
568    int chosenSampleCount = 0;
569    if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) {
570        if (dpi >= 250.0f) {
571            chosenSampleCount = 4;
572        } else {
573            chosenSampleCount = 16;
574        }
575    }
576    return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? chosenSampleCount : 0;
577}
578
579
580GrDrawContext* GrContext::drawContext(GrRenderTarget* rt, const SkSurfaceProps* surfaceProps) {
581    ASSERT_SINGLE_OWNER
582    return fDrawingManager->drawContext(rt, surfaceProps);
583}
584
585bool GrContext::abandoned() const {
586    ASSERT_SINGLE_OWNER
587    return fDrawingManager->abandoned();
588}
589
590namespace {
591void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
592    GrConfigConversionEffect::PMConversion pmToUPM;
593    GrConfigConversionEffect::PMConversion upmToPM;
594    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
595    *pmToUPMValue = pmToUPM;
596    *upmToPMValue = upmToPM;
597}
598}
599
600void GrContext::testPMConversionsIfNecessary(uint32_t flags) {
601    ASSERT_SINGLE_OWNER
602    if (SkToBool(kUnpremul_PixelOpsFlag & flags)) {
603        SkAutoMutexAcquire ama(fTestPMConversionsMutex);
604        if (!fDidTestPMConversions) {
605            test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
606            fDidTestPMConversions = true;
607        }
608    }
609}
610
611const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
612                                                          const GrSwizzle& swizzle,
613                                                          const SkMatrix& matrix) const {
614    ASSERT_SINGLE_OWNER
615    // We should have already called this->testPMConversionsIfNecessary().
616    SkASSERT(fDidTestPMConversions);
617    GrConfigConversionEffect::PMConversion pmToUPM =
618        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
619    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
620        return GrConfigConversionEffect::Create(texture, swizzle, pmToUPM, matrix);
621    } else {
622        return nullptr;
623    }
624}
625
626const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
627                                                          const GrSwizzle& swizzle,
628                                                          const SkMatrix& matrix) const {
629    ASSERT_SINGLE_OWNER
630    // We should have already called this->testPMConversionsIfNecessary().
631    SkASSERT(fDidTestPMConversions);
632    GrConfigConversionEffect::PMConversion upmToPM =
633        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
634    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
635        return GrConfigConversionEffect::Create(texture, swizzle, upmToPM, matrix);
636    } else {
637        return nullptr;
638    }
639}
640
641bool GrContext::didFailPMUPMConversionTest() const {
642    ASSERT_SINGLE_OWNER
643    // We should have already called this->testPMConversionsIfNecessary().
644    SkASSERT(fDidTestPMConversions);
645    // The PM<->UPM tests fail or succeed together so we only need to check one.
646    return GrConfigConversionEffect::kNone_PMConversion == fPMToUPMConversion;
647}
648
649//////////////////////////////////////////////////////////////////////////////
650
651void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
652    ASSERT_SINGLE_OWNER
653    if (maxTextures) {
654        *maxTextures = fResourceCache->getMaxResourceCount();
655    }
656    if (maxTextureBytes) {
657        *maxTextureBytes = fResourceCache->getMaxResourceBytes();
658    }
659}
660
661void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
662    ASSERT_SINGLE_OWNER
663    fResourceCache->setLimits(maxTextures, maxTextureBytes);
664}
665
666//////////////////////////////////////////////////////////////////////////////
667
668void GrContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
669    ASSERT_SINGLE_OWNER
670    fResourceCache->dumpMemoryStatistics(traceMemoryDump);
671}
672