GrContext.cpp revision b77f0f4ae560e97cc4cd2758752d955549017c3c
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrContext.h"
11
12#include "effects/GrSingleTextureEffect.h"
13#include "effects/GrConfigConversionEffect.h"
14
15#include "GrAARectRenderer.h"
16#include "GrBufferAllocPool.h"
17#include "GrGpu.h"
18#include "GrDrawTargetCaps.h"
19#include "GrIndexBuffer.h"
20#include "GrInOrderDrawBuffer.h"
21#include "GrOvalRenderer.h"
22#include "GrPathRenderer.h"
23#include "GrPathUtils.h"
24#include "GrResourceCache.h"
25#include "GrSoftwarePathRenderer.h"
26#include "GrStencilBuffer.h"
27#include "GrTextStrike.h"
28#include "SkRTConf.h"
29#include "SkRRect.h"
30#include "SkStrokeRec.h"
31#include "SkTLazy.h"
32#include "SkTLS.h"
33#include "SkTrace.h"
34
35SK_DEFINE_INST_COUNT(GrContext)
36SK_DEFINE_INST_COUNT(GrDrawState)
37
38// It can be useful to set this to false to test whether a bug is caused by using the
39// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
40// debugging simpler.
41SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
42                "Defers rendering in GrContext via GrInOrderDrawBuffer.");
43
44#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
45
46// When we're using coverage AA but the blend is incompatible (given gpu
47// limitations) should we disable AA or draw wrong?
48#define DISABLE_COVERAGE_AA_FOR_BLEND 1
49
50#ifdef SK_DEBUG
51    // change this to a 1 to see notifications when partial coverage fails
52    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
53#else
54    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
55#endif
56
57static const size_t MAX_TEXTURE_CACHE_COUNT = 2048;
58static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024;
59
60static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
61static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
62
63static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
64static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
65
66#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
67
68// Glorified typedef to avoid including GrDrawState.h in GrContext.h
69class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
70
71class GrContext::AutoCheckFlush {
72public:
73    AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); }
74
75    ~AutoCheckFlush() {
76        if (fContext->fFlushToReduceCacheSize) {
77            fContext->flush();
78        }
79    }
80
81private:
82    GrContext* fContext;
83};
84
85GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
86    GrContext* context = SkNEW(GrContext);
87    if (context->init(backend, backendContext)) {
88        return context;
89    } else {
90        context->unref();
91        return NULL;
92    }
93}
94
95namespace {
96void* CreateThreadInstanceCount() {
97    return SkNEW_ARGS(int, (0));
98}
99void DeleteThreadInstanceCount(void* v) {
100    delete reinterpret_cast<int*>(v);
101}
102#define THREAD_INSTANCE_COUNT \
103    (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, DeleteThreadInstanceCount)))
104}
105
106GrContext::GrContext() {
107    ++THREAD_INSTANCE_COUNT;
108    fDrawState = NULL;
109    fGpu = NULL;
110    fClip = NULL;
111    fPathRendererChain = NULL;
112    fSoftwarePathRenderer = NULL;
113    fTextureCache = NULL;
114    fFontCache = NULL;
115    fDrawBuffer = NULL;
116    fDrawBufferVBAllocPool = NULL;
117    fDrawBufferIBAllocPool = NULL;
118    fFlushToReduceCacheSize = false;
119    fAARectRenderer = NULL;
120    fOvalRenderer = NULL;
121    fViewMatrix.reset();
122    fMaxTextureSizeOverride = 1 << 20;
123}
124
125bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
126    SkASSERT(NULL == fGpu);
127
128    fGpu = GrGpu::Create(backend, backendContext, this);
129    if (NULL == fGpu) {
130        return false;
131    }
132
133    fDrawState = SkNEW(GrDrawState);
134    fGpu->setDrawState(fDrawState);
135
136    fTextureCache = SkNEW_ARGS(GrResourceCache,
137                               (MAX_TEXTURE_CACHE_COUNT,
138                                MAX_TEXTURE_CACHE_BYTES));
139    fTextureCache->setOverbudgetCallback(OverbudgetCB, this);
140
141    fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
142
143    fLastDrawWasBuffered = kNo_BufferedDraw;
144
145    fAARectRenderer = SkNEW(GrAARectRenderer);
146    fOvalRenderer = SkNEW(GrOvalRenderer);
147
148    fDidTestPMConversions = false;
149
150    this->setupDrawBuffer();
151
152    return true;
153}
154
155int GrContext::GetThreadInstanceCount() {
156    return THREAD_INSTANCE_COUNT;
157}
158
159GrContext::~GrContext() {
160    if (NULL == fGpu) {
161        return;
162    }
163
164    this->flush();
165
166    for (int i = 0; i < fCleanUpData.count(); ++i) {
167        (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
168    }
169
170    // Since the gpu can hold scratch textures, give it a chance to let go
171    // of them before freeing the texture cache
172    fGpu->purgeResources();
173
174    delete fTextureCache;
175    fTextureCache = NULL;
176    delete fFontCache;
177    delete fDrawBuffer;
178    delete fDrawBufferVBAllocPool;
179    delete fDrawBufferIBAllocPool;
180
181    fAARectRenderer->unref();
182    fOvalRenderer->unref();
183
184    fGpu->unref();
185    SkSafeUnref(fPathRendererChain);
186    SkSafeUnref(fSoftwarePathRenderer);
187    fDrawState->unref();
188
189    --THREAD_INSTANCE_COUNT;
190}
191
192void GrContext::contextLost() {
193    this->contextDestroyed();
194    this->setupDrawBuffer();
195}
196
197void GrContext::contextDestroyed() {
198    // abandon first to so destructors
199    // don't try to free the resources in the API.
200    fGpu->abandonResources();
201
202    // a path renderer may be holding onto resources that
203    // are now unusable
204    SkSafeSetNull(fPathRendererChain);
205    SkSafeSetNull(fSoftwarePathRenderer);
206
207    delete fDrawBuffer;
208    fDrawBuffer = NULL;
209
210    delete fDrawBufferVBAllocPool;
211    fDrawBufferVBAllocPool = NULL;
212
213    delete fDrawBufferIBAllocPool;
214    fDrawBufferIBAllocPool = NULL;
215
216    fAARectRenderer->reset();
217    fOvalRenderer->reset();
218
219    fTextureCache->purgeAllUnlocked();
220    fFontCache->freeAll();
221    fGpu->markContextDirty();
222}
223
224void GrContext::resetContext(uint32_t state) {
225    fGpu->markContextDirty(state);
226}
227
228void GrContext::freeGpuResources() {
229    this->flush();
230
231    fGpu->purgeResources();
232
233    fAARectRenderer->reset();
234    fOvalRenderer->reset();
235
236    fTextureCache->purgeAllUnlocked();
237    fFontCache->freeAll();
238    // a path renderer may be holding onto resources
239    SkSafeSetNull(fPathRendererChain);
240    SkSafeSetNull(fSoftwarePathRenderer);
241}
242
243size_t GrContext::getGpuTextureCacheBytes() const {
244  return fTextureCache->getCachedResourceBytes();
245}
246
247////////////////////////////////////////////////////////////////////////////////
248
249GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
250                                        const GrCacheID& cacheID,
251                                        const GrTextureParams* params) {
252    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
253    GrResource* resource = fTextureCache->find(resourceKey);
254    SkSafeRef(resource);
255    return static_cast<GrTexture*>(resource);
256}
257
258bool GrContext::isTextureInCache(const GrTextureDesc& desc,
259                                 const GrCacheID& cacheID,
260                                 const GrTextureParams* params) const {
261    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
262    return fTextureCache->hasKey(resourceKey);
263}
264
265void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
266    ASSERT_OWNED_RESOURCE(sb);
267
268    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
269                                                            sb->height(),
270                                                            sb->numSamples());
271    fTextureCache->addResource(resourceKey, sb);
272}
273
274GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
275                                              int sampleCnt) {
276    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
277                                                            height,
278                                                            sampleCnt);
279    GrResource* resource = fTextureCache->find(resourceKey);
280    return static_cast<GrStencilBuffer*>(resource);
281}
282
283static void stretchImage(void* dst,
284                         int dstW,
285                         int dstH,
286                         void* src,
287                         int srcW,
288                         int srcH,
289                         size_t bpp) {
290    GrFixed dx = (srcW << 16) / dstW;
291    GrFixed dy = (srcH << 16) / dstH;
292
293    GrFixed y = dy >> 1;
294
295    size_t dstXLimit = dstW*bpp;
296    for (int j = 0; j < dstH; ++j) {
297        GrFixed x = dx >> 1;
298        void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
299        void* dstRow = (uint8_t*)dst + j*dstW*bpp;
300        for (size_t i = 0; i < dstXLimit; i += bpp) {
301            memcpy((uint8_t*) dstRow + i,
302                   (uint8_t*) srcRow + (x>>16)*bpp,
303                   bpp);
304            x += dx;
305        }
306        y += dy;
307    }
308}
309
310namespace {
311
312// position + local coordinate
313extern const GrVertexAttrib gVertexAttribs[] = {
314    {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
315    {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding}
316};
317
318};
319
320// The desired texture is NPOT and tiled but that isn't supported by
321// the current hardware. Resize the texture to be a POT
322GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
323                                           const GrCacheID& cacheID,
324                                           void* srcData,
325                                           size_t rowBytes,
326                                           bool filter) {
327    SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
328    if (NULL == clampedTexture) {
329        clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
330
331        if (NULL == clampedTexture) {
332            return NULL;
333        }
334    }
335
336    GrTextureDesc rtDesc = desc;
337    rtDesc.fFlags =  rtDesc.fFlags |
338                     kRenderTarget_GrTextureFlagBit |
339                     kNoStencil_GrTextureFlagBit;
340    rtDesc.fWidth  = GrNextPow2(desc.fWidth);
341    rtDesc.fHeight = GrNextPow2(desc.fHeight);
342
343    GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
344
345    if (NULL != texture) {
346        GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
347        GrDrawState* drawState = fGpu->drawState();
348        drawState->setRenderTarget(texture->asRenderTarget());
349
350        // if filtering is not desired then we want to ensure all
351        // texels in the resampled image are copies of texels from
352        // the original.
353        GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
354                                                                   GrTextureParams::kNone_FilterMode);
355        drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
356
357        drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
358
359        GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
360
361        if (arg.succeeded()) {
362            GrPoint* verts = (GrPoint*) arg.vertices();
363            verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint));
364            verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint));
365            fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
366        }
367    } else {
368        // TODO: Our CPU stretch doesn't filter. But we create separate
369        // stretched textures when the texture params is either filtered or
370        // not. Either implement filtered stretch blit on CPU or just create
371        // one when FBO case fails.
372
373        rtDesc.fFlags = kNone_GrTextureFlags;
374        // no longer need to clamp at min RT size.
375        rtDesc.fWidth  = GrNextPow2(desc.fWidth);
376        rtDesc.fHeight = GrNextPow2(desc.fHeight);
377        size_t bpp = GrBytesPerPixel(desc.fConfig);
378        SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
379        stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
380                     srcData, desc.fWidth, desc.fHeight, bpp);
381
382        size_t stretchedRowBytes = rtDesc.fWidth * bpp;
383
384        SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(),
385                                                              stretchedRowBytes);
386        SkASSERT(NULL != texture);
387    }
388
389    return texture;
390}
391
392GrTexture* GrContext::createTexture(const GrTextureParams* params,
393                                    const GrTextureDesc& desc,
394                                    const GrCacheID& cacheID,
395                                    void* srcData,
396                                    size_t rowBytes,
397                                    GrResourceKey* cacheKey) {
398    SK_TRACE_EVENT0("GrContext::createTexture");
399
400    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
401
402    GrTexture* texture;
403    if (GrTexture::NeedsResizing(resourceKey)) {
404        texture = this->createResizedTexture(desc, cacheID,
405                                             srcData, rowBytes,
406                                             GrTexture::NeedsBilerp(resourceKey));
407    } else {
408        texture= fGpu->createTexture(desc, srcData, rowBytes);
409    }
410
411    if (NULL != texture) {
412        // Adding a resource could put us overbudget. Try to free up the
413        // necessary space before adding it.
414        fTextureCache->purgeAsNeeded(1, texture->sizeInBytes());
415        fTextureCache->addResource(resourceKey, texture);
416
417        if (NULL != cacheKey) {
418            *cacheKey = resourceKey;
419        }
420    }
421
422    return texture;
423}
424
425static GrTexture* create_scratch_texture(GrGpu* gpu,
426                                         GrResourceCache* textureCache,
427                                         const GrTextureDesc& desc) {
428    GrTexture* texture = gpu->createTexture(desc, NULL, 0);
429    if (NULL != texture) {
430        GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
431        // Adding a resource could put us overbudget. Try to free up the
432        // necessary space before adding it.
433        textureCache->purgeAsNeeded(1, texture->sizeInBytes());
434        // Make the resource exclusive so future 'find' calls don't return it
435        textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
436    }
437    return texture;
438}
439
440GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
441
442    SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
443             !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
444
445    // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
446    SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
447             !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
448             (inDesc.fConfig != kAlpha_8_GrPixelConfig));
449
450    if (!fGpu->caps()->reuseScratchTextures()) {
451        // If we're never recycling scratch textures we can
452        // always make them the right size
453        return create_scratch_texture(fGpu, fTextureCache, inDesc);
454    }
455
456    GrTextureDesc desc = inDesc;
457
458    if (kApprox_ScratchTexMatch == match) {
459        // bin by pow2 with a reasonable min
460        static const int MIN_SIZE = 16;
461        desc.fWidth  = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
462        desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
463    }
464
465    GrResource* resource = NULL;
466    int origWidth = desc.fWidth;
467    int origHeight = desc.fHeight;
468
469    do {
470        GrResourceKey key = GrTexture::ComputeScratchKey(desc);
471        // Ensure we have exclusive access to the texture so future 'find' calls don't return it
472        resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
473        if (NULL != resource) {
474            resource->ref();
475            break;
476        }
477        if (kExact_ScratchTexMatch == match) {
478            break;
479        }
480        // We had a cache miss and we are in approx mode, relax the fit of the flags.
481
482        // We no longer try to reuse textures that were previously used as render targets in
483        // situations where no RT is needed; doing otherwise can confuse the video driver and
484        // cause significant performance problems in some cases.
485        if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
486            desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
487        } else {
488            break;
489        }
490
491    } while (true);
492
493    if (NULL == resource) {
494        desc.fFlags = inDesc.fFlags;
495        desc.fWidth = origWidth;
496        desc.fHeight = origHeight;
497        resource = create_scratch_texture(fGpu, fTextureCache, desc);
498    }
499
500    return static_cast<GrTexture*>(resource);
501}
502
503void GrContext::addExistingTextureToCache(GrTexture* texture) {
504
505    if (NULL == texture) {
506        return;
507    }
508
509    // This texture should already have a cache entry since it was once
510    // attached
511    SkASSERT(NULL != texture->getCacheEntry());
512
513    // Conceptually, the cache entry is going to assume responsibility
514    // for the creation ref. Assert refcnt == 1.
515    SkASSERT(texture->unique());
516
517    if (fGpu->caps()->reuseScratchTextures()) {
518        // Since this texture came from an AutoScratchTexture it should
519        // still be in the exclusive pile. Recycle it.
520        fTextureCache->makeNonExclusive(texture->getCacheEntry());
521        this->purgeCache();
522    } else if (texture->getDeferredRefCount() <= 0) {
523        // When we aren't reusing textures we know this scratch texture
524        // will never be reused and would be just wasting time in the cache
525        fTextureCache->makeNonExclusive(texture->getCacheEntry());
526        fTextureCache->deleteResource(texture->getCacheEntry());
527    } else {
528        // In this case (fDeferredRefCount > 0) but the cache is the only
529        // one holding a real ref. Mark the object so when the deferred
530        // ref count goes to 0 the texture will be deleted (remember
531        // in this code path scratch textures aren't getting reused).
532        texture->setNeedsDeferredUnref();
533    }
534}
535
536
537void GrContext::unlockScratchTexture(GrTexture* texture) {
538    ASSERT_OWNED_RESOURCE(texture);
539    SkASSERT(NULL != texture->getCacheEntry());
540
541    // If this is a scratch texture we detached it from the cache
542    // while it was locked (to avoid two callers simultaneously getting
543    // the same texture).
544    if (texture->getCacheEntry()->key().isScratch()) {
545        if (fGpu->caps()->reuseScratchTextures()) {
546            fTextureCache->makeNonExclusive(texture->getCacheEntry());
547            this->purgeCache();
548        } else if (texture->unique() && texture->getDeferredRefCount() <= 0) {
549            // Only the cache now knows about this texture. Since we're never
550            // reusing scratch textures (in this code path) it would just be
551            // wasting time sitting in the cache.
552            fTextureCache->makeNonExclusive(texture->getCacheEntry());
553            fTextureCache->deleteResource(texture->getCacheEntry());
554        } else {
555            // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really
556            // want to readd it to the cache (since it will never be reused).
557            // Instead, give up the cache's ref and leave the decision up to
558            // addExistingTextureToCache once its ref count reaches 0. For
559            // this to work we need to leave it in the exclusive list.
560            texture->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
561            // Give up the cache's ref to the texture
562            texture->unref();
563        }
564    }
565}
566
567void GrContext::purgeCache() {
568    if (NULL != fTextureCache) {
569        fTextureCache->purgeAsNeeded();
570    }
571}
572
573bool GrContext::OverbudgetCB(void* data) {
574    SkASSERT(NULL != data);
575
576    GrContext* context = reinterpret_cast<GrContext*>(data);
577
578    // Flush the InOrderDrawBuffer to possibly free up some textures
579    context->fFlushToReduceCacheSize = true;
580
581    return true;
582}
583
584
585GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
586                                            void* srcData,
587                                            size_t rowBytes) {
588    GrTextureDesc descCopy = descIn;
589    return fGpu->createTexture(descCopy, srcData, rowBytes);
590}
591
592void GrContext::getTextureCacheLimits(int* maxTextures,
593                                      size_t* maxTextureBytes) const {
594    fTextureCache->getLimits(maxTextures, maxTextureBytes);
595}
596
597void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
598    fTextureCache->setLimits(maxTextures, maxTextureBytes);
599}
600
601int GrContext::getMaxTextureSize() const {
602    return GrMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
603}
604
605int GrContext::getMaxRenderTargetSize() const {
606    return fGpu->caps()->maxRenderTargetSize();
607}
608
609int GrContext::getMaxSampleCount() const {
610    return fGpu->caps()->maxSampleCount();
611}
612
613///////////////////////////////////////////////////////////////////////////////
614
615GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
616    return fGpu->wrapBackendTexture(desc);
617}
618
619GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
620    return fGpu->wrapBackendRenderTarget(desc);
621}
622
623///////////////////////////////////////////////////////////////////////////////
624
625bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
626                                          int width, int height) const {
627    const GrDrawTargetCaps* caps = fGpu->caps();
628    if (!caps->eightBitPaletteSupport()) {
629        return false;
630    }
631
632    bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
633
634    if (!isPow2) {
635        bool tiled = NULL != params && params->isTiled();
636        if (tiled && !caps->npotTextureTileSupport()) {
637            return false;
638        }
639    }
640    return true;
641}
642
643
644////////////////////////////////////////////////////////////////////////////////
645
646void GrContext::clear(const SkIRect* rect,
647                      const GrColor color,
648                      GrRenderTarget* target) {
649    AutoRestoreEffects are;
650    AutoCheckFlush acf(this);
651    this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color, target);
652}
653
654void GrContext::drawPaint(const GrPaint& origPaint) {
655    // set rect to be big enough to fill the space, but not super-huge, so we
656    // don't overflow fixed-point implementations
657    SkRect r;
658    r.setLTRB(0, 0,
659              SkIntToScalar(getRenderTarget()->width()),
660              SkIntToScalar(getRenderTarget()->height()));
661    SkMatrix inverse;
662    SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
663    AutoMatrix am;
664
665    // We attempt to map r by the inverse matrix and draw that. mapRect will
666    // map the four corners and bound them with a new rect. This will not
667    // produce a correct result for some perspective matrices.
668    if (!this->getMatrix().hasPerspective()) {
669        if (!fViewMatrix.invert(&inverse)) {
670            GrPrintf("Could not invert matrix\n");
671            return;
672        }
673        inverse.mapRect(&r);
674    } else {
675        if (!am.setIdentity(this, paint.writable())) {
676            GrPrintf("Could not invert matrix\n");
677            return;
678        }
679    }
680    // by definition this fills the entire clip, no need for AA
681    if (paint->isAntiAlias()) {
682        paint.writable()->setAntiAlias(false);
683    }
684    this->drawRect(*paint, r);
685}
686
687#ifdef SK_DEVELOPER
688void GrContext::dumpFontCache() const {
689    fFontCache->dump();
690}
691#endif
692
693////////////////////////////////////////////////////////////////////////////////
694
695namespace {
696inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
697    return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
698}
699}
700
701////////////////////////////////////////////////////////////////////////////////
702
703/*  create a triangle strip that strokes the specified triangle. There are 8
704 unique vertices, but we repreat the last 2 to close up. Alternatively we
705 could use an indices array, and then only send 8 verts, but not sure that
706 would be faster.
707 */
708static void setStrokeRectStrip(GrPoint verts[10], SkRect rect,
709                               SkScalar width) {
710    const SkScalar rad = SkScalarHalf(width);
711    rect.sort();
712
713    verts[0].set(rect.fLeft + rad, rect.fTop + rad);
714    verts[1].set(rect.fLeft - rad, rect.fTop - rad);
715    verts[2].set(rect.fRight - rad, rect.fTop + rad);
716    verts[3].set(rect.fRight + rad, rect.fTop - rad);
717    verts[4].set(rect.fRight - rad, rect.fBottom - rad);
718    verts[5].set(rect.fRight + rad, rect.fBottom + rad);
719    verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
720    verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
721    verts[8] = verts[0];
722    verts[9] = verts[1];
723}
724
725static bool isIRect(const SkRect& r) {
726    return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
727           SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
728}
729
730static bool apply_aa_to_rect(GrDrawTarget* target,
731                             const SkRect& rect,
732                             SkScalar strokeWidth,
733                             const SkMatrix& combinedMatrix,
734                             SkRect* devBoundRect,
735                             bool* useVertexCoverage) {
736    // we use a simple coverage ramp to do aa on axis-aligned rects
737    // we check if the rect will be axis-aligned, and the rect won't land on
738    // integer coords.
739
740    // we are keeping around the "tweak the alpha" trick because
741    // it is our only hope for the fixed-pipe implementation.
742    // In a shader implementation we can give a separate coverage input
743    // TODO: remove this ugliness when we drop the fixed-pipe impl
744    *useVertexCoverage = false;
745    if (!target->getDrawState().canTweakAlphaForCoverage()) {
746        if (disable_coverage_aa_for_blend(target)) {
747#ifdef SK_DEBUG
748            //GrPrintf("Turning off AA to correctly apply blend.\n");
749#endif
750            return false;
751        } else {
752            *useVertexCoverage = true;
753        }
754    }
755    const GrDrawState& drawState = target->getDrawState();
756    if (drawState.getRenderTarget()->isMultisampled()) {
757        return false;
758    }
759
760    if (0 == strokeWidth && target->willUseHWAALines()) {
761        return false;
762    }
763
764#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
765    if (strokeWidth >= 0) {
766#endif
767        if (!combinedMatrix.preservesAxisAlignment()) {
768            return false;
769        }
770
771#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
772    } else {
773        if (!combinedMatrix.preservesRightAngles()) {
774            return false;
775        }
776    }
777#endif
778
779    combinedMatrix.mapRect(devBoundRect, rect);
780
781    if (strokeWidth < 0) {
782        return !isIRect(*devBoundRect);
783    } else {
784        return true;
785    }
786}
787
788static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
789    return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
790           point.fY >= rect.fTop && point.fY <= rect.fBottom;
791}
792
793void GrContext::drawRect(const GrPaint& paint,
794                         const SkRect& rect,
795                         SkScalar width,
796                         const SkMatrix* matrix) {
797    SK_TRACE_EVENT0("GrContext::drawRect");
798
799    AutoRestoreEffects are;
800    AutoCheckFlush acf(this);
801    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
802
803    SkMatrix combinedMatrix = target->drawState()->getViewMatrix();
804    if (NULL != matrix) {
805        combinedMatrix.preConcat(*matrix);
806    }
807
808    // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
809    // cases where the RT is fully inside a stroke.
810    if (width < 0) {
811        SkRect rtRect;
812        target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
813        SkRect clipSpaceRTRect = rtRect;
814        bool checkClip = false;
815        if (NULL != this->getClip()) {
816            checkClip = true;
817            clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
818                                   SkIntToScalar(this->getClip()->fOrigin.fY));
819        }
820        // Does the clip contain the entire RT?
821        if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
822            SkMatrix invM;
823            if (!combinedMatrix.invert(&invM)) {
824                return;
825            }
826            // Does the rect bound the RT?
827            SkPoint srcSpaceRTQuad[4];
828            invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
829            if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
830                rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
831                rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
832                rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
833                // Will it blend?
834                GrColor clearColor;
835                if (paint.isOpaqueAndConstantColor(&clearColor)) {
836                    target->clear(NULL, clearColor);
837                    return;
838                }
839            }
840        }
841    }
842
843    SkRect devBoundRect;
844    bool useVertexCoverage;
845    bool needAA = paint.isAntiAlias() &&
846                  !target->getDrawState().getRenderTarget()->isMultisampled();
847    bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect,
848                                           &useVertexCoverage);
849    if (doAA) {
850        GrDrawState::AutoViewMatrixRestore avmr;
851        if (!avmr.setIdentity(target->drawState())) {
852            return;
853        }
854        if (width >= 0) {
855            fAARectRenderer->strokeAARect(this->getGpu(), target,
856                                          rect, combinedMatrix, devBoundRect,
857                                          width, useVertexCoverage);
858        } else {
859            // filled AA rect
860            fAARectRenderer->fillAARect(this->getGpu(), target,
861                                        rect, combinedMatrix, devBoundRect,
862                                        useVertexCoverage);
863        }
864        return;
865    }
866
867    if (width >= 0) {
868        // TODO: consider making static vertex buffers for these cases.
869        // Hairline could be done by just adding closing vertex to
870        // unitSquareVertexBuffer()
871
872        static const int worstCaseVertCount = 10;
873        target->drawState()->setDefaultVertexAttribs();
874        GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
875
876        if (!geo.succeeded()) {
877            GrPrintf("Failed to get space for vertices!\n");
878            return;
879        }
880
881        GrPrimitiveType primType;
882        int vertCount;
883        GrPoint* vertex = geo.positions();
884
885        if (width > 0) {
886            vertCount = 10;
887            primType = kTriangleStrip_GrPrimitiveType;
888            setStrokeRectStrip(vertex, rect, width);
889        } else {
890            // hairline
891            vertCount = 5;
892            primType = kLineStrip_GrPrimitiveType;
893            vertex[0].set(rect.fLeft, rect.fTop);
894            vertex[1].set(rect.fRight, rect.fTop);
895            vertex[2].set(rect.fRight, rect.fBottom);
896            vertex[3].set(rect.fLeft, rect.fBottom);
897            vertex[4].set(rect.fLeft, rect.fTop);
898        }
899
900        GrDrawState::AutoViewMatrixRestore avmr;
901        if (NULL != matrix) {
902            GrDrawState* drawState = target->drawState();
903            avmr.set(drawState, *matrix);
904        }
905
906        target->drawNonIndexed(primType, 0, vertCount);
907    } else {
908        // filled BW rect
909        target->drawSimpleRect(rect, matrix);
910    }
911}
912
913void GrContext::drawRectToRect(const GrPaint& paint,
914                               const SkRect& dstRect,
915                               const SkRect& localRect,
916                               const SkMatrix* dstMatrix,
917                               const SkMatrix* localMatrix) {
918    SK_TRACE_EVENT0("GrContext::drawRectToRect");
919    AutoRestoreEffects are;
920    AutoCheckFlush acf(this);
921    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
922
923    target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
924}
925
926namespace {
927
928extern const GrVertexAttrib gPosUVColorAttribs[] = {
929    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
930    {kVec2f_GrVertexAttribType,  sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding },
931    {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding}
932};
933
934extern const GrVertexAttrib gPosColorAttribs[] = {
935    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
936    {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding},
937};
938
939static void set_vertex_attributes(GrDrawState* drawState,
940                                  const GrPoint* texCoords,
941                                  const GrColor* colors,
942                                  int* colorOffset,
943                                  int* texOffset) {
944    *texOffset = -1;
945    *colorOffset = -1;
946
947    if (NULL != texCoords && NULL != colors) {
948        *texOffset = sizeof(GrPoint);
949        *colorOffset = 2*sizeof(GrPoint);
950        drawState->setVertexAttribs<gPosUVColorAttribs>(3);
951    } else if (NULL != texCoords) {
952        *texOffset = sizeof(GrPoint);
953        drawState->setVertexAttribs<gPosUVColorAttribs>(2);
954    } else if (NULL != colors) {
955        *colorOffset = sizeof(GrPoint);
956        drawState->setVertexAttribs<gPosColorAttribs>(2);
957    } else {
958        drawState->setVertexAttribs<gPosColorAttribs>(1);
959    }
960}
961
962};
963
964void GrContext::drawVertices(const GrPaint& paint,
965                             GrPrimitiveType primitiveType,
966                             int vertexCount,
967                             const GrPoint positions[],
968                             const GrPoint texCoords[],
969                             const GrColor colors[],
970                             const uint16_t indices[],
971                             int indexCount) {
972    SK_TRACE_EVENT0("GrContext::drawVertices");
973
974    GrDrawTarget::AutoReleaseGeometry geo;
975
976    AutoRestoreEffects are;
977    AutoCheckFlush acf(this);
978    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
979
980    GrDrawState* drawState = target->drawState();
981
982    int colorOffset = -1, texOffset = -1;
983    set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
984
985    size_t vertexSize = drawState->getVertexSize();
986    if (sizeof(GrPoint) != vertexSize) {
987        if (!geo.set(target, vertexCount, 0)) {
988            GrPrintf("Failed to get space for vertices!\n");
989            return;
990        }
991        void* curVertex = geo.vertices();
992
993        for (int i = 0; i < vertexCount; ++i) {
994            *((GrPoint*)curVertex) = positions[i];
995
996            if (texOffset >= 0) {
997                *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
998            }
999            if (colorOffset >= 0) {
1000                *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
1001            }
1002            curVertex = (void*)((intptr_t)curVertex + vertexSize);
1003        }
1004    } else {
1005        target->setVertexSourceToArray(positions, vertexCount);
1006    }
1007
1008    // we don't currently apply offscreen AA to this path. Need improved
1009    // management of GrDrawTarget's geometry to avoid copying points per-tile.
1010
1011    if (NULL != indices) {
1012        target->setIndexSourceToArray(indices, indexCount);
1013        target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
1014        target->resetIndexSource();
1015    } else {
1016        target->drawNonIndexed(primitiveType, 0, vertexCount);
1017    }
1018}
1019
1020///////////////////////////////////////////////////////////////////////////////
1021
1022void GrContext::drawRRect(const GrPaint& paint,
1023                          const SkRRect& rect,
1024                          const SkStrokeRec& stroke) {
1025    if (rect.isEmpty()) {
1026       return;
1027    }
1028
1029    AutoRestoreEffects are;
1030    AutoCheckFlush acf(this);
1031    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1032
1033    bool useAA = paint.isAntiAlias() &&
1034                 !target->getDrawState().getRenderTarget()->isMultisampled() &&
1035                 !disable_coverage_aa_for_blend(target);
1036
1037    if (!fOvalRenderer->drawSimpleRRect(target, this, useAA, rect, stroke)) {
1038        SkPath path;
1039        path.addRRect(rect);
1040        this->internalDrawPath(target, useAA, path, stroke);
1041    }
1042}
1043
1044///////////////////////////////////////////////////////////////////////////////
1045
1046void GrContext::drawOval(const GrPaint& paint,
1047                         const SkRect& oval,
1048                         const SkStrokeRec& stroke) {
1049    if (oval.isEmpty()) {
1050       return;
1051    }
1052
1053    AutoRestoreEffects are;
1054    AutoCheckFlush acf(this);
1055    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1056
1057    bool useAA = paint.isAntiAlias() &&
1058                 !target->getDrawState().getRenderTarget()->isMultisampled() &&
1059                 !disable_coverage_aa_for_blend(target);
1060
1061    if (!fOvalRenderer->drawOval(target, this, useAA, oval, stroke)) {
1062        SkPath path;
1063        path.addOval(oval);
1064        this->internalDrawPath(target, useAA, path, stroke);
1065    }
1066}
1067
1068// Can 'path' be drawn as a pair of filled nested rectangles?
1069static bool is_nested_rects(GrDrawTarget* target,
1070                            const SkPath& path,
1071                            const SkStrokeRec& stroke,
1072                            SkRect rects[2],
1073                            bool* useVertexCoverage) {
1074    SkASSERT(stroke.isFillStyle());
1075
1076    if (path.isInverseFillType()) {
1077        return false;
1078    }
1079
1080    const GrDrawState& drawState = target->getDrawState();
1081
1082    // TODO: this restriction could be lifted if we were willing to apply
1083    // the matrix to all the points individually rather than just to the rect
1084    if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1085        return false;
1086    }
1087
1088    *useVertexCoverage = false;
1089    if (!target->getDrawState().canTweakAlphaForCoverage()) {
1090        if (disable_coverage_aa_for_blend(target)) {
1091            return false;
1092        } else {
1093            *useVertexCoverage = true;
1094        }
1095    }
1096
1097    SkPath::Direction dirs[2];
1098    if (!path.isNestedRects(rects, dirs)) {
1099        return false;
1100    }
1101
1102    if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1103        // The two rects need to be wound opposite to each other
1104        return false;
1105    }
1106
1107    // Right now, nested rects where the margin is not the same width
1108    // all around do not render correctly
1109    const SkScalar* outer = rects[0].asScalars();
1110    const SkScalar* inner = rects[1].asScalars();
1111
1112    SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1113    for (int i = 1; i < 4; ++i) {
1114        SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1115        if (!SkScalarNearlyEqual(margin, temp)) {
1116            return false;
1117        }
1118    }
1119
1120    return true;
1121}
1122
1123void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
1124
1125    if (path.isEmpty()) {
1126       if (path.isInverseFillType()) {
1127           this->drawPaint(paint);
1128       }
1129       return;
1130    }
1131
1132    // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1133    // Scratch textures can be recycled after they are returned to the texture
1134    // cache. This presents a potential hazard for buffered drawing. However,
1135    // the writePixels that uploads to the scratch will perform a flush so we're
1136    // OK.
1137    AutoRestoreEffects are;
1138    AutoCheckFlush acf(this);
1139    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1140
1141    bool useAA = paint.isAntiAlias() && !target->getDrawState().getRenderTarget()->isMultisampled();
1142    if (useAA && stroke.getWidth() < 0 && !path.isConvex()) {
1143        // Concave AA paths are expensive - try to avoid them for special cases
1144        bool useVertexCoverage;
1145        SkRect rects[2];
1146
1147        if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) {
1148            SkMatrix origViewMatrix = target->getDrawState().getViewMatrix();
1149            GrDrawState::AutoViewMatrixRestore avmr;
1150            if (!avmr.setIdentity(target->drawState())) {
1151                return;
1152            }
1153
1154            fAARectRenderer->fillAANestedRects(this->getGpu(), target,
1155                                               rects,
1156                                               origViewMatrix,
1157                                               useVertexCoverage);
1158            return;
1159        }
1160    }
1161
1162    SkRect ovalRect;
1163    bool isOval = path.isOval(&ovalRect);
1164
1165    if (!isOval || path.isInverseFillType()
1166        || !fOvalRenderer->drawOval(target, this, useAA, ovalRect, stroke)) {
1167        this->internalDrawPath(target, useAA, path, stroke);
1168    }
1169}
1170
1171void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1172                                 const SkStrokeRec& stroke) {
1173    SkASSERT(!path.isEmpty());
1174
1175    // An Assumption here is that path renderer would use some form of tweaking
1176    // the src color (either the input alpha or in the frag shader) to implement
1177    // aa. If we have some future driver-mojo path AA that can do the right
1178    // thing WRT to the blend then we'll need some query on the PR.
1179    if (disable_coverage_aa_for_blend(target)) {
1180#ifdef SK_DEBUG
1181        //GrPrintf("Turning off AA to correctly apply blend.\n");
1182#endif
1183        useAA = false;
1184    }
1185
1186    GrPathRendererChain::DrawType type = useAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1187                                                 GrPathRendererChain::kColor_DrawType;
1188
1189    const SkPath* pathPtr = &path;
1190    SkPath tmpPath;
1191    SkStrokeRec strokeRec(stroke);
1192
1193    // Try a 1st time without stroking the path and without allowing the SW renderer
1194    GrPathRenderer* pr = this->getPathRenderer(*pathPtr, strokeRec, target, false, type);
1195
1196    if (NULL == pr) {
1197        if (!strokeRec.isHairlineStyle()) {
1198            // It didn't work the 1st time, so try again with the stroked path
1199            if (strokeRec.applyToPath(&tmpPath, *pathPtr)) {
1200                pathPtr = &tmpPath;
1201                strokeRec.setFillStyle();
1202            }
1203        }
1204        if (pathPtr->isEmpty()) {
1205            return;
1206        }
1207
1208        // This time, allow SW renderer
1209        pr = this->getPathRenderer(*pathPtr, strokeRec, target, true, type);
1210    }
1211
1212    if (NULL == pr) {
1213#ifdef SK_DEBUG
1214        GrPrintf("Unable to find path renderer compatible with path.\n");
1215#endif
1216        return;
1217    }
1218
1219    pr->drawPath(*pathPtr, strokeRec, target, useAA);
1220}
1221
1222////////////////////////////////////////////////////////////////////////////////
1223
1224void GrContext::flush(int flagsBitfield) {
1225    if (NULL == fDrawBuffer) {
1226        return;
1227    }
1228
1229    if (kDiscard_FlushBit & flagsBitfield) {
1230        fDrawBuffer->reset();
1231    } else {
1232        fDrawBuffer->flush();
1233    }
1234    fFlushToReduceCacheSize = false;
1235}
1236
1237bool GrContext::writeTexturePixels(GrTexture* texture,
1238                                   int left, int top, int width, int height,
1239                                   GrPixelConfig config, const void* buffer, size_t rowBytes,
1240                                   uint32_t flags) {
1241    SK_TRACE_EVENT0("GrContext::writeTexturePixels");
1242    ASSERT_OWNED_RESOURCE(texture);
1243
1244    if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
1245        if (NULL != texture->asRenderTarget()) {
1246            return this->writeRenderTargetPixels(texture->asRenderTarget(),
1247                                                 left, top, width, height,
1248                                                 config, buffer, rowBytes, flags);
1249        } else {
1250            return false;
1251        }
1252    }
1253
1254    if (!(kDontFlush_PixelOpsFlag & flags)) {
1255        this->flush();
1256    }
1257
1258    return fGpu->writeTexturePixels(texture, left, top, width, height,
1259                                    config, buffer, rowBytes);
1260}
1261
1262bool GrContext::readTexturePixels(GrTexture* texture,
1263                                  int left, int top, int width, int height,
1264                                  GrPixelConfig config, void* buffer, size_t rowBytes,
1265                                  uint32_t flags) {
1266    SK_TRACE_EVENT0("GrContext::readTexturePixels");
1267    ASSERT_OWNED_RESOURCE(texture);
1268
1269    GrRenderTarget* target = texture->asRenderTarget();
1270    if (NULL != target) {
1271        return this->readRenderTargetPixels(target,
1272                                            left, top, width, height,
1273                                            config, buffer, rowBytes,
1274                                            flags);
1275    } else {
1276        // TODO: make this more efficient for cases where we're reading the entire
1277        //       texture, i.e., use GetTexImage() instead
1278
1279        // create scratch rendertarget and read from that
1280        GrAutoScratchTexture ast;
1281        GrTextureDesc desc;
1282        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1283        desc.fWidth = width;
1284        desc.fHeight = height;
1285        desc.fConfig = config;
1286        desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1287        ast.set(this, desc, kExact_ScratchTexMatch);
1288        GrTexture* dst = ast.texture();
1289        if (NULL != dst && NULL != (target = dst->asRenderTarget())) {
1290            this->copyTexture(texture, target, NULL);
1291            return this->readRenderTargetPixels(target,
1292                                                left, top, width, height,
1293                                                config, buffer, rowBytes,
1294                                                flags);
1295        }
1296
1297        return false;
1298    }
1299}
1300
1301#include "SkConfig8888.h"
1302
1303namespace {
1304/**
1305 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
1306 * formats are representable as Config8888 and so the function returns false
1307 * if the GrPixelConfig has no equivalent Config8888.
1308 */
1309bool grconfig_to_config8888(GrPixelConfig config,
1310                            bool unpremul,
1311                            SkCanvas::Config8888* config8888) {
1312    switch (config) {
1313        case kRGBA_8888_GrPixelConfig:
1314            if (unpremul) {
1315                *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
1316            } else {
1317                *config8888 = SkCanvas::kRGBA_Premul_Config8888;
1318            }
1319            return true;
1320        case kBGRA_8888_GrPixelConfig:
1321            if (unpremul) {
1322                *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
1323            } else {
1324                *config8888 = SkCanvas::kBGRA_Premul_Config8888;
1325            }
1326            return true;
1327        default:
1328            return false;
1329    }
1330}
1331
1332// It returns a configuration with where the byte position of the R & B components are swapped in
1333// relation to the input config. This should only be called with the result of
1334// grconfig_to_config8888 as it will fail for other configs.
1335SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) {
1336    switch (config8888) {
1337        case SkCanvas::kBGRA_Premul_Config8888:
1338            return SkCanvas::kRGBA_Premul_Config8888;
1339        case SkCanvas::kBGRA_Unpremul_Config8888:
1340            return SkCanvas::kRGBA_Unpremul_Config8888;
1341        case SkCanvas::kRGBA_Premul_Config8888:
1342            return SkCanvas::kBGRA_Premul_Config8888;
1343        case SkCanvas::kRGBA_Unpremul_Config8888:
1344            return SkCanvas::kBGRA_Unpremul_Config8888;
1345        default:
1346            GrCrash("Unexpected input");
1347            return SkCanvas::kBGRA_Unpremul_Config8888;;
1348    }
1349}
1350}
1351
1352bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1353                                       int left, int top, int width, int height,
1354                                       GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1355                                       uint32_t flags) {
1356    SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
1357    ASSERT_OWNED_RESOURCE(target);
1358
1359    if (NULL == target) {
1360        target = fRenderTarget.get();
1361        if (NULL == target) {
1362            return false;
1363        }
1364    }
1365
1366    if (!(kDontFlush_PixelOpsFlag & flags)) {
1367        this->flush();
1368    }
1369
1370    // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1371
1372    // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1373    // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1374    bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1375                                                 width, height, dstConfig,
1376                                                 rowBytes);
1377    // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1378    // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1379    // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1380    // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1381    // dstConfig.
1382    GrPixelConfig readConfig = dstConfig;
1383    bool swapRAndB = false;
1384    if (GrPixelConfigSwapRAndB(dstConfig) ==
1385        fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1386        readConfig = GrPixelConfigSwapRAndB(readConfig);
1387        swapRAndB = true;
1388    }
1389
1390    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1391
1392    if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1393        // The unpremul flag is only allowed for these two configs.
1394        return false;
1395    }
1396
1397    // If the src is a texture and we would have to do conversions after read pixels, we instead
1398    // do the conversions by drawing the src to a scratch texture. If we handle any of the
1399    // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1400    // on the read back pixels.
1401    GrTexture* src = target->asTexture();
1402    GrAutoScratchTexture ast;
1403    if (NULL != src && (swapRAndB || unpremul || flipY)) {
1404        // Make the scratch a render target because we don't have a robust readTexturePixels as of
1405        // yet. It calls this function.
1406        GrTextureDesc desc;
1407        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1408        desc.fWidth = width;
1409        desc.fHeight = height;
1410        desc.fConfig = readConfig;
1411        desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1412
1413        // When a full read back is faster than a partial we could always make the scratch exactly
1414        // match the passed rect. However, if we see many different size rectangles we will trash
1415        // our texture cache and pay the cost of creating and destroying many textures. So, we only
1416        // request an exact match when the caller is reading an entire RT.
1417        ScratchTexMatch match = kApprox_ScratchTexMatch;
1418        if (0 == left &&
1419            0 == top &&
1420            target->width() == width &&
1421            target->height() == height &&
1422            fGpu->fullReadPixelsIsFasterThanPartial()) {
1423            match = kExact_ScratchTexMatch;
1424        }
1425        ast.set(this, desc, match);
1426        GrTexture* texture = ast.texture();
1427        if (texture) {
1428            // compute a matrix to perform the draw
1429            SkMatrix textureMatrix;
1430            textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1431            textureMatrix.postIDiv(src->width(), src->height());
1432
1433            SkAutoTUnref<const GrEffectRef> effect;
1434            if (unpremul) {
1435                effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1436                if (NULL != effect) {
1437                    unpremul = false; // we no longer need to do this on CPU after the read back.
1438                }
1439            }
1440            // If we failed to create a PM->UPM effect and have no other conversions to perform then
1441            // there is no longer any point to using the scratch.
1442            if (NULL != effect || flipY || swapRAndB) {
1443                if (!effect) {
1444                    effect.reset(GrConfigConversionEffect::Create(
1445                                                    src,
1446                                                    swapRAndB,
1447                                                    GrConfigConversionEffect::kNone_PMConversion,
1448                                                    textureMatrix));
1449                }
1450                swapRAndB = false; // we will handle the swap in the draw.
1451
1452                // We protect the existing geometry here since it may not be
1453                // clear to the caller that a draw operation (i.e., drawSimpleRect)
1454                // can be invoked in this method
1455                GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
1456                GrDrawState* drawState = fGpu->drawState();
1457                SkASSERT(effect);
1458                drawState->addColorEffect(effect);
1459
1460                drawState->setRenderTarget(texture->asRenderTarget());
1461                SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1462                fGpu->drawSimpleRect(rect, NULL);
1463                // we want to read back from the scratch's origin
1464                left = 0;
1465                top = 0;
1466                target = texture->asRenderTarget();
1467            }
1468        }
1469    }
1470    if (!fGpu->readPixels(target,
1471                          left, top, width, height,
1472                          readConfig, buffer, rowBytes)) {
1473        return false;
1474    }
1475    // Perform any conversions we weren't able to perform using a scratch texture.
1476    if (unpremul || swapRAndB) {
1477        // These are initialized to suppress a warning
1478        SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888;
1479        SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888;
1480
1481        SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888);
1482        grconfig_to_config8888(dstConfig, unpremul, &dstC8888);
1483
1484        if (swapRAndB) {
1485            SkASSERT(c8888IsValid); // we should only do r/b swap on 8888 configs
1486            srcC8888 = swap_config8888_red_and_blue(srcC8888);
1487        }
1488        SkASSERT(c8888IsValid);
1489        uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
1490        SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
1491                                  b32, rowBytes, srcC8888,
1492                                  width, height);
1493    }
1494    return true;
1495}
1496
1497void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1498    SkASSERT(target);
1499    ASSERT_OWNED_RESOURCE(target);
1500    // In the future we may track whether there are any pending draws to this
1501    // target. We don't today so we always perform a flush. We don't promise
1502    // this to our clients, though.
1503    this->flush();
1504    fGpu->resolveRenderTarget(target);
1505}
1506
1507void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1508    if (NULL == src || NULL == dst) {
1509        return;
1510    }
1511    ASSERT_OWNED_RESOURCE(src);
1512
1513    // Writes pending to the source texture are not tracked, so a flush
1514    // is required to ensure that the copy captures the most recent contents
1515    // of the source texture. See similar behavior in
1516    // GrContext::resolveRenderTarget.
1517    this->flush();
1518
1519    GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1520    GrDrawState* drawState = fGpu->drawState();
1521    drawState->setRenderTarget(dst);
1522    SkMatrix sampleM;
1523    sampleM.setIDiv(src->width(), src->height());
1524    SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
1525    if (NULL != topLeft) {
1526        srcRect.offset(*topLeft);
1527    }
1528    SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
1529    if (!srcRect.intersect(srcBounds)) {
1530        return;
1531    }
1532    sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
1533    drawState->addColorTextureEffect(src, sampleM);
1534    SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
1535    fGpu->drawSimpleRect(dstR, NULL);
1536}
1537
1538bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1539                                        int left, int top, int width, int height,
1540                                        GrPixelConfig srcConfig,
1541                                        const void* buffer,
1542                                        size_t rowBytes,
1543                                        uint32_t flags) {
1544    SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
1545    ASSERT_OWNED_RESOURCE(target);
1546
1547    if (NULL == target) {
1548        target = fRenderTarget.get();
1549        if (NULL == target) {
1550            return false;
1551        }
1552    }
1553
1554    // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1555    // desktop GL).
1556
1557    // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1558    // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1559    // set.)
1560
1561    // If the RT is also a texture and we don't have to premultiply then take the texture path.
1562    // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1563    // we do below.
1564
1565#if !defined(SK_BUILD_FOR_MAC)
1566    // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1567    // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1568    // HW is affected.
1569    if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
1570        fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
1571        return this->writeTexturePixels(target->asTexture(),
1572                                        left, top, width, height,
1573                                        srcConfig, buffer, rowBytes, flags);
1574    }
1575#endif
1576
1577    // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1578    // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1579    // config. This scratch will then have R and B swapped. We correct for this by swapping again
1580    // when drawing the scratch to the dst using a conversion effect.
1581    bool swapRAndB = false;
1582    GrPixelConfig writeConfig = srcConfig;
1583    if (GrPixelConfigSwapRAndB(srcConfig) ==
1584        fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
1585        writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1586        swapRAndB = true;
1587    }
1588
1589    GrTextureDesc desc;
1590    desc.fWidth = width;
1591    desc.fHeight = height;
1592    desc.fConfig = writeConfig;
1593    GrAutoScratchTexture ast(this, desc);
1594    GrTexture* texture = ast.texture();
1595    if (NULL == texture) {
1596        return false;
1597    }
1598
1599    SkAutoTUnref<const GrEffectRef> effect;
1600    SkMatrix textureMatrix;
1601    textureMatrix.setIDiv(texture->width(), texture->height());
1602
1603    // allocate a tmp buffer and sw convert the pixels to premul
1604    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1605
1606    if (kUnpremul_PixelOpsFlag & flags) {
1607        if (!GrPixelConfigIs8888(srcConfig)) {
1608            return false;
1609        }
1610        effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1611        // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1612        if (NULL == effect) {
1613            SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1614            SkDEBUGCODE(bool success = )
1615            grconfig_to_config8888(srcConfig, true, &srcConfig8888);
1616            SkASSERT(success);
1617            SkDEBUGCODE(success = )
1618            grconfig_to_config8888(srcConfig, false, &dstConfig8888);
1619            SkASSERT(success);
1620            const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
1621            tmpPixels.reset(width * height);
1622            SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
1623                                      src, rowBytes, srcConfig8888,
1624                                      width, height);
1625            buffer = tmpPixels.get();
1626            rowBytes = 4 * width;
1627        }
1628    }
1629    if (NULL == effect) {
1630        effect.reset(GrConfigConversionEffect::Create(texture,
1631                                                      swapRAndB,
1632                                                      GrConfigConversionEffect::kNone_PMConversion,
1633                                                      textureMatrix));
1634    }
1635
1636    if (!this->writeTexturePixels(texture,
1637                                  0, 0, width, height,
1638                                  writeConfig, buffer, rowBytes,
1639                                  flags & ~kUnpremul_PixelOpsFlag)) {
1640        return false;
1641    }
1642
1643    // writeRenderTargetPixels can be called in the midst of drawing another
1644    // object (e.g., when uploading a SW path rendering to the gpu while
1645    // drawing a rect) so preserve the current geometry.
1646    SkMatrix matrix;
1647    matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1648    GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
1649    GrDrawState* drawState = fGpu->drawState();
1650    SkASSERT(effect);
1651    drawState->addColorEffect(effect);
1652
1653    drawState->setRenderTarget(target);
1654
1655    fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
1656    return true;
1657}
1658////////////////////////////////////////////////////////////////////////////////
1659
1660GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1661                                       BufferedDraw buffered,
1662                                       AutoRestoreEffects* are,
1663                                       AutoCheckFlush* acf) {
1664    // All users of this draw state should be freeing up all effects when they're done.
1665    // Otherwise effects that own resources may keep those resources alive indefinitely.
1666    SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
1667
1668    if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1669        fDrawBuffer->flush();
1670        fLastDrawWasBuffered = kNo_BufferedDraw;
1671    }
1672    ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1673    if (NULL != paint) {
1674        SkASSERT(NULL != are);
1675        SkASSERT(NULL != acf);
1676        are->set(fDrawState);
1677        fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1678#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1679        if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1680            !fGpu->canApplyCoverage()) {
1681            GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1682        }
1683#endif
1684    } else {
1685        fDrawState->reset(fViewMatrix);
1686        fDrawState->setRenderTarget(fRenderTarget.get());
1687    }
1688    GrDrawTarget* target;
1689    if (kYes_BufferedDraw == buffered) {
1690        fLastDrawWasBuffered = kYes_BufferedDraw;
1691        target = fDrawBuffer;
1692    } else {
1693        SkASSERT(kNo_BufferedDraw == buffered);
1694        fLastDrawWasBuffered = kNo_BufferedDraw;
1695        target = fGpu;
1696    }
1697    fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
1698                                                     !fClip->fClipStack->isWideOpen());
1699    target->setClip(fClip);
1700    SkASSERT(fDrawState == target->drawState());
1701    return target;
1702}
1703
1704/*
1705 * This method finds a path renderer that can draw the specified path on
1706 * the provided target.
1707 * Due to its expense, the software path renderer has split out so it can
1708 * can be individually allowed/disallowed via the "allowSW" boolean.
1709 */
1710GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1711                                           const SkStrokeRec& stroke,
1712                                           const GrDrawTarget* target,
1713                                           bool allowSW,
1714                                           GrPathRendererChain::DrawType drawType,
1715                                           GrPathRendererChain::StencilSupport* stencilSupport) {
1716
1717    if (NULL == fPathRendererChain) {
1718        fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1719    }
1720
1721    GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1722                                                             stroke,
1723                                                             target,
1724                                                             drawType,
1725                                                             stencilSupport);
1726
1727    if (NULL == pr && allowSW) {
1728        if (NULL == fSoftwarePathRenderer) {
1729            fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1730        }
1731        pr = fSoftwarePathRenderer;
1732    }
1733
1734    return pr;
1735}
1736
1737////////////////////////////////////////////////////////////////////////////////
1738bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1739    return fGpu->caps()->isConfigRenderable(config, withMSAA);
1740}
1741
1742void GrContext::setupDrawBuffer() {
1743    SkASSERT(NULL == fDrawBuffer);
1744    SkASSERT(NULL == fDrawBufferVBAllocPool);
1745    SkASSERT(NULL == fDrawBufferIBAllocPool);
1746
1747    fDrawBufferVBAllocPool =
1748        SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1749                                    DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1750                                    DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1751    fDrawBufferIBAllocPool =
1752        SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1753                                   DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1754                                   DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1755
1756    fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1757                                                   fDrawBufferVBAllocPool,
1758                                                   fDrawBufferIBAllocPool));
1759
1760    fDrawBuffer->setDrawState(fDrawState);
1761}
1762
1763GrDrawTarget* GrContext::getTextTarget() {
1764    return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1765}
1766
1767const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1768    return fGpu->getQuadIndexBuffer();
1769}
1770
1771namespace {
1772void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1773    GrConfigConversionEffect::PMConversion pmToUPM;
1774    GrConfigConversionEffect::PMConversion upmToPM;
1775    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1776    *pmToUPMValue = pmToUPM;
1777    *upmToPMValue = upmToPM;
1778}
1779}
1780
1781const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
1782                                                  bool swapRAndB,
1783                                                  const SkMatrix& matrix) {
1784    if (!fDidTestPMConversions) {
1785        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1786        fDidTestPMConversions = true;
1787    }
1788    GrConfigConversionEffect::PMConversion pmToUPM =
1789        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1790    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1791        return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1792    } else {
1793        return NULL;
1794    }
1795}
1796
1797const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
1798                                                  bool swapRAndB,
1799                                                  const SkMatrix& matrix) {
1800    if (!fDidTestPMConversions) {
1801        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1802        fDidTestPMConversions = true;
1803    }
1804    GrConfigConversionEffect::PMConversion upmToPM =
1805        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1806    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1807        return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1808    } else {
1809        return NULL;
1810    }
1811}
1812
1813///////////////////////////////////////////////////////////////////////////////
1814#if GR_CACHE_STATS
1815void GrContext::printCacheStats() const {
1816    fTextureCache->printStats();
1817}
1818#endif
1819