GrContext.cpp revision 94ce9ac8624dbb45656b8f5c992fad9c9ff3ee5f
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrContext.h"
11
12#include "effects/GrConfigConversionEffect.h"
13#include "effects/GrDashingEffect.h"
14#include "effects/GrSingleTextureEffect.h"
15
16#include "GrAARectRenderer.h"
17#include "GrBufferAllocPool.h"
18#include "GrGpu.h"
19#include "GrDistanceFieldTextContext.h"
20#include "GrDrawTargetCaps.h"
21#include "GrIndexBuffer.h"
22#include "GrInOrderDrawBuffer.h"
23#include "GrLayerCache.h"
24#include "GrOvalRenderer.h"
25#include "GrPathRenderer.h"
26#include "GrPathUtils.h"
27#include "GrResourceCache.h"
28#include "GrResourceCache2.h"
29#include "GrSoftwarePathRenderer.h"
30#include "GrStencilBuffer.h"
31#include "GrStencilAndCoverTextContext.h"
32#include "GrStrokeInfo.h"
33#include "GrTextStrike.h"
34#include "GrTraceMarker.h"
35#include "GrTracing.h"
36#include "SkDashPathPriv.h"
37#include "SkGr.h"
38#include "SkRTConf.h"
39#include "SkRRect.h"
40#include "SkStrokeRec.h"
41#include "SkTLazy.h"
42#include "SkTLS.h"
43#include "SkTraceEvent.h"
44
45// It can be useful to set this to false to test whether a bug is caused by using the
46// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
47// debugging simpler.
48SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
49                "Defers rendering in GrContext via GrInOrderDrawBuffer.");
50
51#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
52
53#ifdef SK_DEBUG
54    // change this to a 1 to see notifications when partial coverage fails
55    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
56#else
57    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
58#endif
59
60static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
61static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
62
63static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
64static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
65
66static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
67static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
68
69#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
70
71// Glorified typedef to avoid including GrDrawState.h in GrContext.h
72class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
73
74class GrContext::AutoCheckFlush {
75public:
76    AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); }
77
78    ~AutoCheckFlush() {
79        if (fContext->fFlushToReduceCacheSize) {
80            fContext->flush();
81        }
82    }
83
84private:
85    GrContext* fContext;
86};
87
88GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
89                             const Options* opts) {
90    GrContext* context;
91    if (NULL == opts) {
92        context = SkNEW_ARGS(GrContext, (Options()));
93    } else {
94        context = SkNEW_ARGS(GrContext, (*opts));
95    }
96
97    if (context->init(backend, backendContext)) {
98        return context;
99    } else {
100        context->unref();
101        return NULL;
102    }
103}
104
105GrContext::GrContext(const Options& opts) : fOptions(opts) {
106    fDrawState = NULL;
107    fGpu = NULL;
108    fClip = NULL;
109    fPathRendererChain = NULL;
110    fSoftwarePathRenderer = NULL;
111    fResourceCache = NULL;
112    fResourceCache2 = NULL;
113    fFontCache = NULL;
114    fDrawBuffer = NULL;
115    fDrawBufferVBAllocPool = NULL;
116    fDrawBufferIBAllocPool = NULL;
117    fFlushToReduceCacheSize = false;
118    fAARectRenderer = NULL;
119    fOvalRenderer = NULL;
120    fViewMatrix.reset();
121    fMaxTextureSizeOverride = 1 << 20;
122}
123
124bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
125    SkASSERT(NULL == fGpu);
126
127    fGpu = GrGpu::Create(backend, backendContext, this);
128    if (NULL == fGpu) {
129        return false;
130    }
131
132    fDrawState = SkNEW(GrDrawState);
133    fGpu->setDrawState(fDrawState);
134
135    fResourceCache = SkNEW_ARGS(GrResourceCache, (MAX_RESOURCE_CACHE_COUNT,
136                                                  MAX_RESOURCE_CACHE_BYTES));
137    fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
138    fResourceCache2 = SkNEW(GrResourceCache2);
139
140    fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
141
142    fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
143
144    fLastDrawWasBuffered = kNo_BufferedDraw;
145
146    fAARectRenderer = SkNEW(GrAARectRenderer);
147    fOvalRenderer = SkNEW(GrOvalRenderer);
148
149    fDidTestPMConversions = false;
150
151    this->setupDrawBuffer();
152
153    return true;
154}
155
156GrContext::~GrContext() {
157    if (NULL == fGpu) {
158        return;
159    }
160
161    this->flush();
162
163    for (int i = 0; i < fCleanUpData.count(); ++i) {
164        (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
165    }
166
167    delete fResourceCache2;
168    fResourceCache2 = NULL;
169    delete fResourceCache;
170    fResourceCache = NULL;
171    delete fFontCache;
172    delete fDrawBuffer;
173    delete fDrawBufferVBAllocPool;
174    delete fDrawBufferIBAllocPool;
175
176    fAARectRenderer->unref();
177    fOvalRenderer->unref();
178
179    fGpu->unref();
180    SkSafeUnref(fPathRendererChain);
181    SkSafeUnref(fSoftwarePathRenderer);
182    fDrawState->unref();
183}
184
185void GrContext::abandonContext() {
186    // abandon first to so destructors
187    // don't try to free the resources in the API.
188    fResourceCache2->abandonAll();
189
190    fGpu->contextAbandonded();
191
192    // a path renderer may be holding onto resources that
193    // are now unusable
194    SkSafeSetNull(fPathRendererChain);
195    SkSafeSetNull(fSoftwarePathRenderer);
196
197    delete fDrawBuffer;
198    fDrawBuffer = NULL;
199
200    delete fDrawBufferVBAllocPool;
201    fDrawBufferVBAllocPool = NULL;
202
203    delete fDrawBufferIBAllocPool;
204    fDrawBufferIBAllocPool = NULL;
205
206    fAARectRenderer->reset();
207    fOvalRenderer->reset();
208
209    fResourceCache->purgeAllUnlocked();
210
211    fFontCache->freeAll();
212    fLayerCache->freeAll();
213}
214
215void GrContext::resetContext(uint32_t state) {
216    fGpu->markContextDirty(state);
217}
218
219void GrContext::freeGpuResources() {
220    this->flush();
221
222    fGpu->purgeResources();
223    if (NULL != fDrawBuffer) {
224        fDrawBuffer->purgeResources();
225    }
226
227    fAARectRenderer->reset();
228    fOvalRenderer->reset();
229
230    fResourceCache->purgeAllUnlocked();
231    fFontCache->freeAll();
232    fLayerCache->freeAll();
233    // a path renderer may be holding onto resources
234    SkSafeSetNull(fPathRendererChain);
235    SkSafeSetNull(fSoftwarePathRenderer);
236}
237
238void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
239  if (NULL != resourceCount) {
240    *resourceCount = fResourceCache->getCachedResourceCount();
241  }
242  if (NULL != resourceBytes) {
243    *resourceBytes = fResourceCache->getCachedResourceBytes();
244  }
245}
246
247GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
248                                            const SkDeviceProperties&
249                                            leakyProperties,
250                                            bool enableDistanceFieldFonts) {
251    if (fGpu->caps()->pathRenderingSupport()) {
252        if (renderTarget->getStencilBuffer() && renderTarget->isMultisampled()) {
253            return SkNEW_ARGS(GrStencilAndCoverTextContext, (this, leakyProperties));
254        }
255    }
256    return SkNEW_ARGS(GrDistanceFieldTextContext, (this, leakyProperties,
257                                                   enableDistanceFieldFonts));
258}
259
260////////////////////////////////////////////////////////////////////////////////
261
262GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
263                                        const GrCacheID& cacheID,
264                                        const GrTextureParams* params) {
265    GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
266    GrGpuResource* resource = fResourceCache->find(resourceKey);
267    SkSafeRef(resource);
268    return static_cast<GrTexture*>(resource);
269}
270
271bool GrContext::isTextureInCache(const GrTextureDesc& desc,
272                                 const GrCacheID& cacheID,
273                                 const GrTextureParams* params) const {
274    GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
275    return fResourceCache->hasKey(resourceKey);
276}
277
278void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
279    ASSERT_OWNED_RESOURCE(sb);
280
281    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
282                                                            sb->height(),
283                                                            sb->numSamples());
284    fResourceCache->addResource(resourceKey, sb);
285}
286
287GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
288                                              int sampleCnt) {
289    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
290                                                            height,
291                                                            sampleCnt);
292    GrGpuResource* resource = fResourceCache->find(resourceKey);
293    return static_cast<GrStencilBuffer*>(resource);
294}
295
296static void stretch_image(void* dst,
297                          int dstW,
298                          int dstH,
299                          const void* src,
300                          int srcW,
301                          int srcH,
302                          size_t bpp) {
303    SkFixed dx = (srcW << 16) / dstW;
304    SkFixed dy = (srcH << 16) / dstH;
305
306    SkFixed y = dy >> 1;
307
308    size_t dstXLimit = dstW*bpp;
309    for (int j = 0; j < dstH; ++j) {
310        SkFixed x = dx >> 1;
311        const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
312        uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
313        for (size_t i = 0; i < dstXLimit; i += bpp) {
314            memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
315            x += dx;
316        }
317        y += dy;
318    }
319}
320
321namespace {
322
323// position + local coordinate
324extern const GrVertexAttrib gVertexAttribs[] = {
325    {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
326    {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
327};
328
329};
330
331// The desired texture is NPOT and tiled but that isn't supported by
332// the current hardware. Resize the texture to be a POT
333GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
334                                           const GrCacheID& cacheID,
335                                           const void* srcData,
336                                           size_t rowBytes,
337                                           bool filter) {
338    SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
339    if (NULL == clampedTexture) {
340        clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
341
342        if (NULL == clampedTexture) {
343            return NULL;
344        }
345    }
346
347    GrTextureDesc rtDesc = desc;
348    rtDesc.fFlags =  rtDesc.fFlags |
349                     kRenderTarget_GrTextureFlagBit |
350                     kNoStencil_GrTextureFlagBit;
351    rtDesc.fWidth  = GrNextPow2(desc.fWidth);
352    rtDesc.fHeight = GrNextPow2(desc.fHeight);
353
354    GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
355
356    if (NULL != texture) {
357        GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
358        GrDrawState* drawState = fGpu->drawState();
359        drawState->setRenderTarget(texture->asRenderTarget());
360
361        // if filtering is not desired then we want to ensure all
362        // texels in the resampled image are copies of texels from
363        // the original.
364        GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
365                                                                   GrTextureParams::kNone_FilterMode);
366        drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
367
368        drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
369
370        GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
371
372        if (arg.succeeded()) {
373            SkPoint* verts = (SkPoint*) arg.vertices();
374            verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
375            verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
376            fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
377        }
378    } else {
379        // TODO: Our CPU stretch doesn't filter. But we create separate
380        // stretched textures when the texture params is either filtered or
381        // not. Either implement filtered stretch blit on CPU or just create
382        // one when FBO case fails.
383
384        rtDesc.fFlags = kNone_GrTextureFlags;
385        // no longer need to clamp at min RT size.
386        rtDesc.fWidth  = GrNextPow2(desc.fWidth);
387        rtDesc.fHeight = GrNextPow2(desc.fHeight);
388
389        // We shouldn't be resizing a compressed texture.
390        SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
391
392        size_t bpp = GrBytesPerPixel(desc.fConfig);
393        GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
394        stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
395                      srcData, desc.fWidth, desc.fHeight, bpp);
396
397        size_t stretchedRowBytes = rtDesc.fWidth * bpp;
398
399        texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
400        SkASSERT(NULL != texture);
401    }
402
403    return texture;
404}
405
406GrTexture* GrContext::createTexture(const GrTextureParams* params,
407                                    const GrTextureDesc& desc,
408                                    const GrCacheID& cacheID,
409                                    const void* srcData,
410                                    size_t rowBytes,
411                                    GrResourceKey* cacheKey) {
412    GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
413
414    GrTexture* texture;
415    if (GrTextureImpl::NeedsResizing(resourceKey)) {
416        // We do not know how to resize compressed textures.
417        SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
418
419        texture = this->createResizedTexture(desc, cacheID,
420                                             srcData, rowBytes,
421                                             GrTextureImpl::NeedsBilerp(resourceKey));
422    } else {
423        texture = fGpu->createTexture(desc, srcData, rowBytes);
424    }
425
426    if (NULL != texture) {
427        // Adding a resource could put us overbudget. Try to free up the
428        // necessary space before adding it.
429        fResourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
430        fResourceCache->addResource(resourceKey, texture);
431
432        if (NULL != cacheKey) {
433            *cacheKey = resourceKey;
434        }
435    }
436
437    return texture;
438}
439
440static GrTexture* create_scratch_texture(GrGpu* gpu,
441                                         GrResourceCache* resourceCache,
442                                         const GrTextureDesc& desc) {
443    GrTexture* texture = gpu->createTexture(desc, NULL, 0);
444    if (NULL != texture) {
445        GrResourceKey key = GrTextureImpl::ComputeScratchKey(texture->desc());
446        // Adding a resource could put us overbudget. Try to free up the
447        // necessary space before adding it.
448        resourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
449        // Make the resource exclusive so future 'find' calls don't return it
450        resourceCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
451    }
452    return texture;
453}
454
455GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
456
457    SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
458             !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
459
460    // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
461    SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
462             !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
463             (inDesc.fConfig != kAlpha_8_GrPixelConfig));
464
465    if (!fGpu->caps()->reuseScratchTextures() &&
466        !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
467        // If we're never recycling this texture we can always make it the right size
468        return create_scratch_texture(fGpu, fResourceCache, inDesc);
469    }
470
471    GrTextureDesc desc = inDesc;
472
473    if (kApprox_ScratchTexMatch == match) {
474        // bin by pow2 with a reasonable min
475        static const int MIN_SIZE = 16;
476        desc.fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
477        desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
478    }
479
480    GrGpuResource* resource = NULL;
481    int origWidth = desc.fWidth;
482    int origHeight = desc.fHeight;
483
484    do {
485        GrResourceKey key = GrTextureImpl::ComputeScratchKey(desc);
486        // Ensure we have exclusive access to the texture so future 'find' calls don't return it
487        resource = fResourceCache->find(key, GrResourceCache::kHide_OwnershipFlag);
488        if (NULL != resource) {
489            resource->ref();
490            break;
491        }
492        if (kExact_ScratchTexMatch == match) {
493            break;
494        }
495        // We had a cache miss and we are in approx mode, relax the fit of the flags.
496
497        // We no longer try to reuse textures that were previously used as render targets in
498        // situations where no RT is needed; doing otherwise can confuse the video driver and
499        // cause significant performance problems in some cases.
500        if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
501            desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
502        } else {
503            break;
504        }
505
506    } while (true);
507
508    if (NULL == resource) {
509        desc.fFlags = inDesc.fFlags;
510        desc.fWidth = origWidth;
511        desc.fHeight = origHeight;
512        resource = create_scratch_texture(fGpu, fResourceCache, desc);
513    }
514
515    return static_cast<GrTexture*>(resource);
516}
517
518void GrContext::addExistingTextureToCache(GrTexture* texture) {
519
520    if (NULL == texture) {
521        return;
522    }
523
524    // This texture should already have a cache entry since it was once
525    // attached
526    SkASSERT(NULL != texture->getCacheEntry());
527
528    // Conceptually, the cache entry is going to assume responsibility
529    // for the creation ref. Assert refcnt == 1.
530    // Except that this also gets called when the texture is prematurely
531    // abandoned. In that case the ref count may be > 1.
532    // SkASSERT(texture->unique());
533
534    if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
535        // Since this texture came from an AutoScratchTexture it should
536        // still be in the exclusive pile. Recycle it.
537        fResourceCache->makeNonExclusive(texture->getCacheEntry());
538        this->purgeCache();
539    } else {
540        // When we aren't reusing textures we know this scratch texture
541        // will never be reused and would be just wasting time in the cache
542        fResourceCache->makeNonExclusive(texture->getCacheEntry());
543        fResourceCache->deleteResource(texture->getCacheEntry());
544    }
545}
546
547void GrContext::unlockScratchTexture(GrTexture* texture) {
548    if (texture->wasDestroyed()) {
549        return;
550    }
551
552    ASSERT_OWNED_RESOURCE(texture);
553    SkASSERT(NULL != texture->getCacheEntry());
554
555    // If this is a scratch texture we detached it from the cache
556    // while it was locked (to avoid two callers simultaneously getting
557    // the same texture).
558    if (texture->getCacheEntry()->key().isScratch()) {
559        if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
560            fResourceCache->makeNonExclusive(texture->getCacheEntry());
561            this->purgeCache();
562        } else if (texture->unique()) {
563            // Only the cache now knows about this texture. Since we're never
564            // reusing scratch textures (in this code path) it would just be
565            // wasting time sitting in the cache.
566            fResourceCache->makeNonExclusive(texture->getCacheEntry());
567            fResourceCache->deleteResource(texture->getCacheEntry());
568        } else {
569            // In this case (there is still a non-cache ref) but we don't really
570            // want to readd it to the cache (since it will never be reused).
571            // Instead, give up the cache's ref and leave the decision up to
572            // addExistingTextureToCache once its ref count reaches 0. For
573            // this to work we need to leave it in the exclusive list.
574            texture->impl()->setFlag((GrTextureFlags) GrTextureImpl::kReturnToCache_FlagBit);
575            // Give up the cache's ref to the texture
576            texture->unref();
577        }
578    }
579}
580
581void GrContext::purgeCache() {
582    if (NULL != fResourceCache) {
583        fResourceCache->purgeAsNeeded();
584    }
585}
586
587bool GrContext::OverbudgetCB(void* data) {
588    SkASSERT(NULL != data);
589
590    GrContext* context = reinterpret_cast<GrContext*>(data);
591
592    // Flush the InOrderDrawBuffer to possibly free up some textures
593    context->fFlushToReduceCacheSize = true;
594
595    return true;
596}
597
598
599GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
600                                            void* srcData,
601                                            size_t rowBytes) {
602    GrTextureDesc descCopy = descIn;
603    return fGpu->createTexture(descCopy, srcData, rowBytes);
604}
605
606void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
607    fResourceCache->getLimits(maxTextures, maxTextureBytes);
608}
609
610void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
611    fResourceCache->setLimits(maxTextures, maxTextureBytes);
612}
613
614int GrContext::getMaxTextureSize() const {
615    return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
616}
617
618int GrContext::getMaxRenderTargetSize() const {
619    return fGpu->caps()->maxRenderTargetSize();
620}
621
622int GrContext::getMaxSampleCount() const {
623    return fGpu->caps()->maxSampleCount();
624}
625
626///////////////////////////////////////////////////////////////////////////////
627
628GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
629    return fGpu->wrapBackendTexture(desc);
630}
631
632GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
633    return fGpu->wrapBackendRenderTarget(desc);
634}
635
636///////////////////////////////////////////////////////////////////////////////
637
638bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
639                                          int width, int height) const {
640    const GrDrawTargetCaps* caps = fGpu->caps();
641    if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
642        return false;
643    }
644
645    bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
646
647    if (!isPow2) {
648        bool tiled = NULL != params && params->isTiled();
649        if (tiled && !caps->npotTextureTileSupport()) {
650            return false;
651        }
652    }
653    return true;
654}
655
656
657////////////////////////////////////////////////////////////////////////////////
658
659void GrContext::clear(const SkIRect* rect,
660                      const GrColor color,
661                      bool canIgnoreRect,
662                      GrRenderTarget* renderTarget) {
663    ASSERT_OWNED_RESOURCE(renderTarget);
664    AutoRestoreEffects are;
665    AutoCheckFlush acf(this);
666    GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
667    GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf);
668    if (NULL == target) {
669        return;
670    }
671    target->clear(rect, color, canIgnoreRect, renderTarget);
672}
673
674void GrContext::drawPaint(const GrPaint& origPaint) {
675    // set rect to be big enough to fill the space, but not super-huge, so we
676    // don't overflow fixed-point implementations
677    SkRect r;
678    r.setLTRB(0, 0,
679              SkIntToScalar(getRenderTarget()->width()),
680              SkIntToScalar(getRenderTarget()->height()));
681    SkMatrix inverse;
682    SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
683    AutoMatrix am;
684    GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this);
685
686    // We attempt to map r by the inverse matrix and draw that. mapRect will
687    // map the four corners and bound them with a new rect. This will not
688    // produce a correct result for some perspective matrices.
689    if (!this->getMatrix().hasPerspective()) {
690        if (!fViewMatrix.invert(&inverse)) {
691            GrPrintf("Could not invert matrix\n");
692            return;
693        }
694        inverse.mapRect(&r);
695    } else {
696        if (!am.setIdentity(this, paint.writable())) {
697            GrPrintf("Could not invert matrix\n");
698            return;
699        }
700    }
701    // by definition this fills the entire clip, no need for AA
702    if (paint->isAntiAlias()) {
703        paint.writable()->setAntiAlias(false);
704    }
705    this->drawRect(*paint, r);
706}
707
708#ifdef SK_DEVELOPER
709void GrContext::dumpFontCache() const {
710    fFontCache->dump();
711}
712#endif
713
714////////////////////////////////////////////////////////////////////////////////
715
716/*  create a triangle strip that strokes the specified triangle. There are 8
717 unique vertices, but we repreat the last 2 to close up. Alternatively we
718 could use an indices array, and then only send 8 verts, but not sure that
719 would be faster.
720 */
721static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
722                               SkScalar width) {
723    const SkScalar rad = SkScalarHalf(width);
724    rect.sort();
725
726    verts[0].set(rect.fLeft + rad, rect.fTop + rad);
727    verts[1].set(rect.fLeft - rad, rect.fTop - rad);
728    verts[2].set(rect.fRight - rad, rect.fTop + rad);
729    verts[3].set(rect.fRight + rad, rect.fTop - rad);
730    verts[4].set(rect.fRight - rad, rect.fBottom - rad);
731    verts[5].set(rect.fRight + rad, rect.fBottom + rad);
732    verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
733    verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
734    verts[8] = verts[0];
735    verts[9] = verts[1];
736}
737
738static inline bool is_irect(const SkRect& r) {
739  return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
740         SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
741}
742
743static bool apply_aa_to_rect(GrDrawTarget* target,
744                             const SkRect& rect,
745                             SkScalar strokeWidth,
746                             const SkMatrix& combinedMatrix,
747                             SkRect* devBoundRect) {
748    if (!target->getDrawState().canTweakAlphaForCoverage() &&
749        target->shouldDisableCoverageAAForBlend()) {
750#ifdef SK_DEBUG
751        //GrPrintf("Turning off AA to correctly apply blend.\n");
752#endif
753        return false;
754    }
755    const GrDrawState& drawState = target->getDrawState();
756    if (drawState.getRenderTarget()->isMultisampled()) {
757        return false;
758    }
759
760#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
761    if (strokeWidth >= 0) {
762#endif
763        if (!combinedMatrix.preservesAxisAlignment()) {
764            return false;
765        }
766
767#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
768    } else {
769        if (!combinedMatrix.preservesRightAngles()) {
770            return false;
771        }
772    }
773#endif
774
775    combinedMatrix.mapRect(devBoundRect, rect);
776    if (strokeWidth < 0) {
777        return !is_irect(*devBoundRect);
778    }
779
780    return true;
781}
782
783static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
784    return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
785           point.fY >= rect.fTop && point.fY <= rect.fBottom;
786}
787
788void GrContext::drawRect(const GrPaint& paint,
789                         const SkRect& rect,
790                         const GrStrokeInfo* strokeInfo) {
791    if (NULL != strokeInfo && strokeInfo->isDashed()) {
792        SkPath path;
793        path.addRect(rect);
794        this->drawPath(paint, path, *strokeInfo);
795        return;
796    }
797
798    AutoRestoreEffects are;
799    AutoCheckFlush acf(this);
800    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
801    if (NULL == target) {
802        return;
803    }
804
805    GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
806    SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
807    SkMatrix matrix = target->drawState()->getViewMatrix();
808
809    // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
810    // cases where the RT is fully inside a stroke.
811    if (width < 0) {
812        SkRect rtRect;
813        target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
814        SkRect clipSpaceRTRect = rtRect;
815        bool checkClip = false;
816        if (NULL != this->getClip()) {
817            checkClip = true;
818            clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
819                                   SkIntToScalar(this->getClip()->fOrigin.fY));
820        }
821        // Does the clip contain the entire RT?
822        if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
823            SkMatrix invM;
824            if (!matrix.invert(&invM)) {
825                return;
826            }
827            // Does the rect bound the RT?
828            SkPoint srcSpaceRTQuad[4];
829            invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
830            if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
831                rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
832                rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
833                rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
834                // Will it blend?
835                GrColor clearColor;
836                if (paint.isOpaqueAndConstantColor(&clearColor)) {
837                    target->clear(NULL, clearColor, true);
838                    return;
839                }
840            }
841        }
842    }
843
844    SkRect devBoundRect;
845    bool needAA = paint.isAntiAlias() &&
846                  !target->getDrawState().getRenderTarget()->isMultisampled();
847    bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect);
848
849    const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
850
851    if (doAA) {
852        GrDrawState::AutoViewMatrixRestore avmr;
853        if (!avmr.setIdentity(target->drawState())) {
854            return;
855        }
856        if (width >= 0) {
857            fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
858                                          matrix, devBoundRect,
859                                          strokeRec);
860        } else {
861            // filled AA rect
862            fAARectRenderer->fillAARect(this->getGpu(), target,
863                                        rect, matrix, devBoundRect);
864        }
865        return;
866    }
867
868    if (width >= 0) {
869        // TODO: consider making static vertex buffers for these cases.
870        // Hairline could be done by just adding closing vertex to
871        // unitSquareVertexBuffer()
872
873        static const int worstCaseVertCount = 10;
874        target->drawState()->setDefaultVertexAttribs();
875        GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
876
877        if (!geo.succeeded()) {
878            GrPrintf("Failed to get space for vertices!\n");
879            return;
880        }
881
882        GrPrimitiveType primType;
883        int vertCount;
884        SkPoint* vertex = geo.positions();
885
886        if (width > 0) {
887            vertCount = 10;
888            primType = kTriangleStrip_GrPrimitiveType;
889            setStrokeRectStrip(vertex, rect, width);
890        } else {
891            // hairline
892            vertCount = 5;
893            primType = kLineStrip_GrPrimitiveType;
894            vertex[0].set(rect.fLeft, rect.fTop);
895            vertex[1].set(rect.fRight, rect.fTop);
896            vertex[2].set(rect.fRight, rect.fBottom);
897            vertex[3].set(rect.fLeft, rect.fBottom);
898            vertex[4].set(rect.fLeft, rect.fTop);
899        }
900
901        target->drawNonIndexed(primType, 0, vertCount);
902    } else {
903        // filled BW rect
904        target->drawSimpleRect(rect);
905    }
906}
907
908void GrContext::drawRectToRect(const GrPaint& paint,
909                               const SkRect& dstRect,
910                               const SkRect& localRect,
911                               const SkMatrix* localMatrix) {
912    AutoRestoreEffects are;
913    AutoCheckFlush acf(this);
914    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
915    if (NULL == target) {
916        return;
917    }
918
919    GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
920
921    target->drawRect(dstRect, &localRect, localMatrix);
922}
923
924namespace {
925
926extern const GrVertexAttrib gPosUVColorAttribs[] = {
927    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
928    {kVec2f_GrVertexAttribType,  sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
929    {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
930};
931
932extern const GrVertexAttrib gPosColorAttribs[] = {
933    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
934    {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
935};
936
937static void set_vertex_attributes(GrDrawState* drawState,
938                                  const SkPoint* texCoords,
939                                  const GrColor* colors,
940                                  int* colorOffset,
941                                  int* texOffset) {
942    *texOffset = -1;
943    *colorOffset = -1;
944
945    if (NULL != texCoords && NULL != colors) {
946        *texOffset = sizeof(SkPoint);
947        *colorOffset = 2*sizeof(SkPoint);
948        drawState->setVertexAttribs<gPosUVColorAttribs>(3);
949    } else if (NULL != texCoords) {
950        *texOffset = sizeof(SkPoint);
951        drawState->setVertexAttribs<gPosUVColorAttribs>(2);
952    } else if (NULL != colors) {
953        *colorOffset = sizeof(SkPoint);
954        drawState->setVertexAttribs<gPosColorAttribs>(2);
955    } else {
956        drawState->setVertexAttribs<gPosColorAttribs>(1);
957    }
958}
959
960};
961
962void GrContext::drawVertices(const GrPaint& paint,
963                             GrPrimitiveType primitiveType,
964                             int vertexCount,
965                             const SkPoint positions[],
966                             const SkPoint texCoords[],
967                             const GrColor colors[],
968                             const uint16_t indices[],
969                             int indexCount) {
970    AutoRestoreEffects are;
971    AutoCheckFlush acf(this);
972    GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
973
974    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
975    if (NULL == target) {
976        return;
977    }
978    GrDrawState* drawState = target->drawState();
979
980    GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
981
982    int colorOffset = -1, texOffset = -1;
983    set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
984
985    size_t vertexSize = drawState->getVertexSize();
986    if (sizeof(SkPoint) != vertexSize) {
987        if (!geo.set(target, vertexCount, 0)) {
988            GrPrintf("Failed to get space for vertices!\n");
989            return;
990        }
991        void* curVertex = geo.vertices();
992
993        for (int i = 0; i < vertexCount; ++i) {
994            *((SkPoint*)curVertex) = positions[i];
995
996            if (texOffset >= 0) {
997                *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
998            }
999            if (colorOffset >= 0) {
1000                *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
1001            }
1002            curVertex = (void*)((intptr_t)curVertex + vertexSize);
1003        }
1004    } else {
1005        target->setVertexSourceToArray(positions, vertexCount);
1006    }
1007
1008    // we don't currently apply offscreen AA to this path. Need improved
1009    // management of GrDrawTarget's geometry to avoid copying points per-tile.
1010
1011    if (NULL != indices) {
1012        target->setIndexSourceToArray(indices, indexCount);
1013        target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
1014        target->resetIndexSource();
1015    } else {
1016        target->drawNonIndexed(primitiveType, 0, vertexCount);
1017    }
1018}
1019
1020///////////////////////////////////////////////////////////////////////////////
1021
1022void GrContext::drawRRect(const GrPaint& paint,
1023                          const SkRRect& rrect,
1024                          const GrStrokeInfo& strokeInfo) {
1025    if (rrect.isEmpty()) {
1026       return;
1027    }
1028
1029    if (strokeInfo.isDashed()) {
1030        SkPath path;
1031        path.addRRect(rrect);
1032        this->drawPath(paint, path, strokeInfo);
1033        return;
1034    }
1035
1036    AutoRestoreEffects are;
1037    AutoCheckFlush acf(this);
1038    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1039    if (NULL == target) {
1040        return;
1041    }
1042
1043    GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
1044
1045    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1046
1047    if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) {
1048        SkPath path;
1049        path.addRRect(rrect);
1050        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1051    }
1052}
1053
1054///////////////////////////////////////////////////////////////////////////////
1055
1056void GrContext::drawDRRect(const GrPaint& paint,
1057                           const SkRRect& outer,
1058                           const SkRRect& inner) {
1059    if (outer.isEmpty()) {
1060       return;
1061    }
1062
1063    AutoRestoreEffects are;
1064    AutoCheckFlush acf(this);
1065    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1066
1067    GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
1068
1069    if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
1070        SkPath path;
1071        path.addRRect(inner);
1072        path.addRRect(outer);
1073        path.setFillType(SkPath::kEvenOdd_FillType);
1074
1075        GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
1076        this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
1077    }
1078}
1079
1080///////////////////////////////////////////////////////////////////////////////
1081
1082void GrContext::drawOval(const GrPaint& paint,
1083                         const SkRect& oval,
1084                         const GrStrokeInfo& strokeInfo) {
1085    if (oval.isEmpty()) {
1086       return;
1087    }
1088
1089    if (strokeInfo.isDashed()) {
1090        SkPath path;
1091        path.addOval(oval);
1092        this->drawPath(paint, path, strokeInfo);
1093        return;
1094    }
1095
1096    AutoRestoreEffects are;
1097    AutoCheckFlush acf(this);
1098    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1099    if (NULL == target) {
1100        return;
1101    }
1102
1103    GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1104
1105    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1106
1107
1108    if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) {
1109        SkPath path;
1110        path.addOval(oval);
1111        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1112    }
1113}
1114
1115// Can 'path' be drawn as a pair of filled nested rectangles?
1116static bool is_nested_rects(GrDrawTarget* target,
1117                            const SkPath& path,
1118                            const SkStrokeRec& stroke,
1119                            SkRect rects[2]) {
1120    SkASSERT(stroke.isFillStyle());
1121
1122    if (path.isInverseFillType()) {
1123        return false;
1124    }
1125
1126    const GrDrawState& drawState = target->getDrawState();
1127
1128    // TODO: this restriction could be lifted if we were willing to apply
1129    // the matrix to all the points individually rather than just to the rect
1130    if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1131        return false;
1132    }
1133
1134    if (!target->getDrawState().canTweakAlphaForCoverage() &&
1135        target->shouldDisableCoverageAAForBlend()) {
1136        return false;
1137    }
1138
1139    SkPath::Direction dirs[2];
1140    if (!path.isNestedRects(rects, dirs)) {
1141        return false;
1142    }
1143
1144    if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1145        // The two rects need to be wound opposite to each other
1146        return false;
1147    }
1148
1149    // Right now, nested rects where the margin is not the same width
1150    // all around do not render correctly
1151    const SkScalar* outer = rects[0].asScalars();
1152    const SkScalar* inner = rects[1].asScalars();
1153
1154    SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1155    for (int i = 1; i < 4; ++i) {
1156        SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1157        if (!SkScalarNearlyEqual(margin, temp)) {
1158            return false;
1159        }
1160    }
1161
1162    return true;
1163}
1164
1165void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
1166
1167    if (path.isEmpty()) {
1168       if (path.isInverseFillType()) {
1169           this->drawPaint(paint);
1170       }
1171       return;
1172    }
1173
1174    if (strokeInfo.isDashed()) {
1175        SkPoint pts[2];
1176        if (path.isLine(pts)) {
1177            AutoRestoreEffects are;
1178            AutoCheckFlush acf(this);
1179            GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1180            if (NULL == target) {
1181                return;
1182            }
1183            GrDrawState* drawState = target->drawState();
1184
1185            SkMatrix origViewMatrix = drawState->getViewMatrix();
1186            GrDrawState::AutoViewMatrixRestore avmr;
1187            if (avmr.setIdentity(target->drawState())) {
1188                if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target,
1189                                                  origViewMatrix)) {
1190                    return;
1191                }
1192            }
1193        }
1194
1195        // Filter dashed path into new path with the dashing applied
1196        const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
1197        SkTLazy<SkPath> effectPath;
1198        GrStrokeInfo newStrokeInfo(strokeInfo, false);
1199        SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
1200        if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
1201            this->drawPath(paint, *effectPath.get(), newStrokeInfo);
1202            return;
1203        }
1204
1205        this->drawPath(paint, path, newStrokeInfo);
1206        return;
1207    }
1208
1209    // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1210    // Scratch textures can be recycled after they are returned to the texture
1211    // cache. This presents a potential hazard for buffered drawing. However,
1212    // the writePixels that uploads to the scratch will perform a flush so we're
1213    // OK.
1214    AutoRestoreEffects are;
1215    AutoCheckFlush acf(this);
1216    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1217    if (NULL == target) {
1218        return;
1219    }
1220    GrDrawState* drawState = target->drawState();
1221
1222    GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
1223
1224    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1225
1226    bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1227
1228    if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
1229        // Concave AA paths are expensive - try to avoid them for special cases
1230        SkRect rects[2];
1231
1232        if (is_nested_rects(target, path, strokeRec, rects)) {
1233            SkMatrix origViewMatrix = drawState->getViewMatrix();
1234            GrDrawState::AutoViewMatrixRestore avmr;
1235            if (!avmr.setIdentity(target->drawState())) {
1236                return;
1237            }
1238
1239            fAARectRenderer->fillAANestedRects(this->getGpu(), target, rects, origViewMatrix);
1240            return;
1241        }
1242    }
1243
1244    SkRect ovalRect;
1245    bool isOval = path.isOval(&ovalRect);
1246
1247    if (!isOval || path.isInverseFillType()
1248        || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) {
1249        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1250    }
1251}
1252
1253void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1254                                 const GrStrokeInfo& strokeInfo) {
1255    SkASSERT(!path.isEmpty());
1256
1257    GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1258
1259
1260    // An Assumption here is that path renderer would use some form of tweaking
1261    // the src color (either the input alpha or in the frag shader) to implement
1262    // aa. If we have some future driver-mojo path AA that can do the right
1263    // thing WRT to the blend then we'll need some query on the PR.
1264    bool useCoverageAA = useAA &&
1265        !target->getDrawState().getRenderTarget()->isMultisampled() &&
1266        !target->shouldDisableCoverageAAForBlend();
1267
1268
1269    GrPathRendererChain::DrawType type =
1270        useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1271                           GrPathRendererChain::kColor_DrawType;
1272
1273    const SkPath* pathPtr = &path;
1274    SkTLazy<SkPath> tmpPath;
1275    SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
1276
1277    // Try a 1st time without stroking the path and without allowing the SW renderer
1278    GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1279
1280    if (NULL == pr) {
1281        if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1282            // It didn't work the 1st time, so try again with the stroked path
1283            if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1284                pathPtr = tmpPath.get();
1285                stroke.writable()->setFillStyle();
1286                if (pathPtr->isEmpty()) {
1287                    return;
1288                }
1289            }
1290        }
1291
1292        // This time, allow SW renderer
1293        pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1294    }
1295
1296    if (NULL == pr) {
1297#ifdef SK_DEBUG
1298        GrPrintf("Unable to find path renderer compatible with path.\n");
1299#endif
1300        return;
1301    }
1302
1303    pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1304}
1305
1306////////////////////////////////////////////////////////////////////////////////
1307
1308void GrContext::flush(int flagsBitfield) {
1309    if (NULL == fDrawBuffer) {
1310        return;
1311    }
1312
1313    if (kDiscard_FlushBit & flagsBitfield) {
1314        fDrawBuffer->reset();
1315    } else {
1316        fDrawBuffer->flush();
1317    }
1318    fFlushToReduceCacheSize = false;
1319}
1320
1321bool GrContext::writeTexturePixels(GrTexture* texture,
1322                                   int left, int top, int width, int height,
1323                                   GrPixelConfig config, const void* buffer, size_t rowBytes,
1324                                   uint32_t flags) {
1325    ASSERT_OWNED_RESOURCE(texture);
1326
1327    if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
1328        if (NULL != texture->asRenderTarget()) {
1329            return this->writeRenderTargetPixels(texture->asRenderTarget(),
1330                                                 left, top, width, height,
1331                                                 config, buffer, rowBytes, flags);
1332        } else {
1333            return false;
1334        }
1335    }
1336
1337    if (!(kDontFlush_PixelOpsFlag & flags)) {
1338        this->flush();
1339    }
1340
1341    return fGpu->writeTexturePixels(texture, left, top, width, height,
1342                                    config, buffer, rowBytes);
1343}
1344
1345bool GrContext::readTexturePixels(GrTexture* texture,
1346                                  int left, int top, int width, int height,
1347                                  GrPixelConfig config, void* buffer, size_t rowBytes,
1348                                  uint32_t flags) {
1349    ASSERT_OWNED_RESOURCE(texture);
1350
1351    GrRenderTarget* target = texture->asRenderTarget();
1352    if (NULL != target) {
1353        return this->readRenderTargetPixels(target,
1354                                            left, top, width, height,
1355                                            config, buffer, rowBytes,
1356                                            flags);
1357    } else {
1358        // TODO: make this more efficient for cases where we're reading the entire
1359        //       texture, i.e., use GetTexImage() instead
1360
1361        // create scratch rendertarget and read from that
1362        GrAutoScratchTexture ast;
1363        GrTextureDesc desc;
1364        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1365        desc.fWidth = width;
1366        desc.fHeight = height;
1367        desc.fConfig = config;
1368        desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1369        ast.set(this, desc, kExact_ScratchTexMatch);
1370        GrTexture* dst = ast.texture();
1371        if (NULL != dst && NULL != (target = dst->asRenderTarget())) {
1372            this->copyTexture(texture, target, NULL);
1373            return this->readRenderTargetPixels(target,
1374                                                left, top, width, height,
1375                                                config, buffer, rowBytes,
1376                                                flags);
1377        }
1378
1379        return false;
1380    }
1381}
1382
1383#include "SkConfig8888.h"
1384
1385// toggles between RGBA and BGRA
1386static SkColorType toggle_colortype32(SkColorType ct) {
1387    if (kRGBA_8888_SkColorType == ct) {
1388        return kBGRA_8888_SkColorType;
1389    } else {
1390        SkASSERT(kBGRA_8888_SkColorType == ct);
1391        return kRGBA_8888_SkColorType;
1392    }
1393}
1394
1395bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1396                                       int left, int top, int width, int height,
1397                                       GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1398                                       uint32_t flags) {
1399    ASSERT_OWNED_RESOURCE(target);
1400
1401    if (NULL == target) {
1402        target = fRenderTarget.get();
1403        if (NULL == target) {
1404            return false;
1405        }
1406    }
1407
1408    if (!(kDontFlush_PixelOpsFlag & flags)) {
1409        this->flush();
1410    }
1411
1412    // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1413
1414    // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1415    // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1416    bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1417                                                 width, height, dstConfig,
1418                                                 rowBytes);
1419    // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1420    // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1421    // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1422    // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1423    // dstConfig.
1424    GrPixelConfig readConfig = dstConfig;
1425    bool swapRAndB = false;
1426    if (GrPixelConfigSwapRAndB(dstConfig) ==
1427        fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1428        readConfig = GrPixelConfigSwapRAndB(readConfig);
1429        swapRAndB = true;
1430    }
1431
1432    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1433
1434    if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1435        // The unpremul flag is only allowed for these two configs.
1436        return false;
1437    }
1438
1439    // If the src is a texture and we would have to do conversions after read pixels, we instead
1440    // do the conversions by drawing the src to a scratch texture. If we handle any of the
1441    // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1442    // on the read back pixels.
1443    GrTexture* src = target->asTexture();
1444    GrAutoScratchTexture ast;
1445    if (NULL != src && (swapRAndB || unpremul || flipY)) {
1446        // Make the scratch a render target because we don't have a robust readTexturePixels as of
1447        // yet. It calls this function.
1448        GrTextureDesc desc;
1449        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1450        desc.fWidth = width;
1451        desc.fHeight = height;
1452        desc.fConfig = readConfig;
1453        desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1454
1455        // When a full read back is faster than a partial we could always make the scratch exactly
1456        // match the passed rect. However, if we see many different size rectangles we will trash
1457        // our texture cache and pay the cost of creating and destroying many textures. So, we only
1458        // request an exact match when the caller is reading an entire RT.
1459        ScratchTexMatch match = kApprox_ScratchTexMatch;
1460        if (0 == left &&
1461            0 == top &&
1462            target->width() == width &&
1463            target->height() == height &&
1464            fGpu->fullReadPixelsIsFasterThanPartial()) {
1465            match = kExact_ScratchTexMatch;
1466        }
1467        ast.set(this, desc, match);
1468        GrTexture* texture = ast.texture();
1469        if (texture) {
1470            // compute a matrix to perform the draw
1471            SkMatrix textureMatrix;
1472            textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1473            textureMatrix.postIDiv(src->width(), src->height());
1474
1475            SkAutoTUnref<const GrEffect> effect;
1476            if (unpremul) {
1477                effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1478                if (NULL != effect) {
1479                    unpremul = false; // we no longer need to do this on CPU after the read back.
1480                }
1481            }
1482            // If we failed to create a PM->UPM effect and have no other conversions to perform then
1483            // there is no longer any point to using the scratch.
1484            if (NULL != effect || flipY || swapRAndB) {
1485                if (!effect) {
1486                    effect.reset(GrConfigConversionEffect::Create(
1487                                                    src,
1488                                                    swapRAndB,
1489                                                    GrConfigConversionEffect::kNone_PMConversion,
1490                                                    textureMatrix));
1491                }
1492                swapRAndB = false; // we will handle the swap in the draw.
1493
1494                // We protect the existing geometry here since it may not be
1495                // clear to the caller that a draw operation (i.e., drawSimpleRect)
1496                // can be invoked in this method
1497                GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
1498                GrDrawState* drawState = fGpu->drawState();
1499                SkASSERT(effect);
1500                drawState->addColorEffect(effect);
1501
1502                drawState->setRenderTarget(texture->asRenderTarget());
1503                SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1504                fGpu->drawSimpleRect(rect);
1505                // we want to read back from the scratch's origin
1506                left = 0;
1507                top = 0;
1508                target = texture->asRenderTarget();
1509            }
1510        }
1511    }
1512    if (!fGpu->readPixels(target,
1513                          left, top, width, height,
1514                          readConfig, buffer, rowBytes)) {
1515        return false;
1516    }
1517    // Perform any conversions we weren't able to perform using a scratch texture.
1518    if (unpremul || swapRAndB) {
1519        SkDstPixelInfo dstPI;
1520        if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1521            return false;
1522        }
1523        dstPI.fAlphaType = kUnpremul_SkAlphaType;
1524        dstPI.fPixels = buffer;
1525        dstPI.fRowBytes = rowBytes;
1526
1527        SkSrcPixelInfo srcPI;
1528        srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1529        srcPI.fAlphaType = kPremul_SkAlphaType;
1530        srcPI.fPixels = buffer;
1531        srcPI.fRowBytes = rowBytes;
1532
1533        return srcPI.convertPixelsTo(&dstPI, width, height);
1534    }
1535    return true;
1536}
1537
1538void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1539    SkASSERT(target);
1540    ASSERT_OWNED_RESOURCE(target);
1541    // In the future we may track whether there are any pending draws to this
1542    // target. We don't today so we always perform a flush. We don't promise
1543    // this to our clients, though.
1544    this->flush();
1545    if (NULL != fGpu) {
1546        fGpu->resolveRenderTarget(target);
1547    }
1548}
1549
1550void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
1551    SkASSERT(renderTarget);
1552    ASSERT_OWNED_RESOURCE(renderTarget);
1553    AutoRestoreEffects are;
1554    AutoCheckFlush acf(this);
1555    GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf);
1556    if (NULL == target) {
1557        return;
1558    }
1559    target->discard(renderTarget);
1560}
1561
1562void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1563    if (NULL == src || NULL == dst) {
1564        return;
1565    }
1566    ASSERT_OWNED_RESOURCE(src);
1567
1568    // Writes pending to the source texture are not tracked, so a flush
1569    // is required to ensure that the copy captures the most recent contents
1570    // of the source texture. See similar behavior in
1571    // GrContext::resolveRenderTarget.
1572    this->flush();
1573
1574    GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1575    GrDrawState* drawState = fGpu->drawState();
1576    drawState->setRenderTarget(dst);
1577    SkMatrix sampleM;
1578    sampleM.setIDiv(src->width(), src->height());
1579    SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
1580    if (NULL != topLeft) {
1581        srcRect.offset(*topLeft);
1582    }
1583    SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
1584    if (!srcRect.intersect(srcBounds)) {
1585        return;
1586    }
1587    sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
1588    drawState->addColorTextureEffect(src, sampleM);
1589    SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
1590    fGpu->drawSimpleRect(dstR);
1591}
1592
1593bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1594                                        int left, int top, int width, int height,
1595                                        GrPixelConfig srcConfig,
1596                                        const void* buffer,
1597                                        size_t rowBytes,
1598                                        uint32_t flags) {
1599    ASSERT_OWNED_RESOURCE(target);
1600
1601    if (NULL == target) {
1602        target = fRenderTarget.get();
1603        if (NULL == target) {
1604            return false;
1605        }
1606    }
1607
1608    // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1609    // desktop GL).
1610
1611    // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1612    // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1613    // set.)
1614
1615    // If the RT is also a texture and we don't have to premultiply then take the texture path.
1616    // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1617    // we do below.
1618
1619#if !defined(SK_BUILD_FOR_MAC)
1620    // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1621    // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1622    // HW is affected.
1623    if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
1624        fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
1625        return this->writeTexturePixels(target->asTexture(),
1626                                        left, top, width, height,
1627                                        srcConfig, buffer, rowBytes, flags);
1628    }
1629#endif
1630
1631    // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1632    // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1633    // config. This scratch will then have R and B swapped. We correct for this by swapping again
1634    // when drawing the scratch to the dst using a conversion effect.
1635    bool swapRAndB = false;
1636    GrPixelConfig writeConfig = srcConfig;
1637    if (GrPixelConfigSwapRAndB(srcConfig) ==
1638        fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
1639        writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1640        swapRAndB = true;
1641    }
1642
1643    GrTextureDesc desc;
1644    desc.fWidth = width;
1645    desc.fHeight = height;
1646    desc.fConfig = writeConfig;
1647    GrAutoScratchTexture ast(this, desc);
1648    GrTexture* texture = ast.texture();
1649    if (NULL == texture) {
1650        return false;
1651    }
1652
1653    SkAutoTUnref<const GrEffect> effect;
1654    SkMatrix textureMatrix;
1655    textureMatrix.setIDiv(texture->width(), texture->height());
1656
1657    // allocate a tmp buffer and sw convert the pixels to premul
1658    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1659
1660    if (kUnpremul_PixelOpsFlag & flags) {
1661        if (!GrPixelConfigIs8888(srcConfig)) {
1662            return false;
1663        }
1664        effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1665        // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1666        if (NULL == effect) {
1667            SkSrcPixelInfo srcPI;
1668            if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1669                return false;
1670            }
1671            srcPI.fAlphaType = kUnpremul_SkAlphaType;
1672            srcPI.fPixels = buffer;
1673            srcPI.fRowBytes = rowBytes;
1674
1675            tmpPixels.reset(width * height);
1676
1677            SkDstPixelInfo dstPI;
1678            dstPI.fColorType = srcPI.fColorType;
1679            dstPI.fAlphaType = kPremul_SkAlphaType;
1680            dstPI.fPixels = tmpPixels.get();
1681            dstPI.fRowBytes = 4 * width;
1682
1683            if (!srcPI.convertPixelsTo(&dstPI, width, height)) {
1684                return false;
1685            }
1686
1687            buffer = tmpPixels.get();
1688            rowBytes = 4 * width;
1689        }
1690    }
1691    if (NULL == effect) {
1692        effect.reset(GrConfigConversionEffect::Create(texture,
1693                                                      swapRAndB,
1694                                                      GrConfigConversionEffect::kNone_PMConversion,
1695                                                      textureMatrix));
1696    }
1697
1698    if (!this->writeTexturePixels(texture,
1699                                  0, 0, width, height,
1700                                  writeConfig, buffer, rowBytes,
1701                                  flags & ~kUnpremul_PixelOpsFlag)) {
1702        return false;
1703    }
1704
1705    // writeRenderTargetPixels can be called in the midst of drawing another
1706    // object (e.g., when uploading a SW path rendering to the gpu while
1707    // drawing a rect) so preserve the current geometry.
1708    SkMatrix matrix;
1709    matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1710    GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
1711    GrDrawState* drawState = fGpu->drawState();
1712    SkASSERT(effect);
1713    drawState->addColorEffect(effect);
1714
1715    drawState->setRenderTarget(target);
1716
1717    fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
1718    return true;
1719}
1720////////////////////////////////////////////////////////////////////////////////
1721
1722GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1723                                       BufferedDraw buffered,
1724                                       AutoRestoreEffects* are,
1725                                       AutoCheckFlush* acf) {
1726    // All users of this draw state should be freeing up all effects when they're done.
1727    // Otherwise effects that own resources may keep those resources alive indefinitely.
1728    SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
1729
1730    if (NULL == fGpu) {
1731        return NULL;
1732    }
1733
1734    if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1735        fDrawBuffer->flush();
1736        fLastDrawWasBuffered = kNo_BufferedDraw;
1737    }
1738    ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1739    if (NULL != paint) {
1740        SkASSERT(NULL != are);
1741        SkASSERT(NULL != acf);
1742        are->set(fDrawState);
1743        fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1744#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1745        if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1746            !fDrawState->couldApplyCoverage(fGpu->caps())) {
1747            GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1748        }
1749#endif
1750        if (fDrawState->getBlendOpts() & GrDrawState::kSkipDraw_BlendOptFlag) {
1751            are->set(NULL);
1752            return NULL;
1753        }
1754        // Clear any vertex attributes configured for the previous use of the
1755        // GrDrawState which can effect which blend optimizations are in effect.
1756        fDrawState->setDefaultVertexAttribs();
1757    } else {
1758        fDrawState->reset(fViewMatrix);
1759        fDrawState->setRenderTarget(fRenderTarget.get());
1760    }
1761    GrDrawTarget* target;
1762    if (kYes_BufferedDraw == buffered) {
1763        fLastDrawWasBuffered = kYes_BufferedDraw;
1764        target = fDrawBuffer;
1765    } else {
1766        SkASSERT(kNo_BufferedDraw == buffered);
1767        fLastDrawWasBuffered = kNo_BufferedDraw;
1768        target = fGpu;
1769    }
1770    fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
1771                                                     !fClip->fClipStack->isWideOpen());
1772    target->setClip(fClip);
1773    SkASSERT(fDrawState == target->drawState());
1774    return target;
1775}
1776
1777/*
1778 * This method finds a path renderer that can draw the specified path on
1779 * the provided target.
1780 * Due to its expense, the software path renderer has split out so it can
1781 * can be individually allowed/disallowed via the "allowSW" boolean.
1782 */
1783GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1784                                           const SkStrokeRec& stroke,
1785                                           const GrDrawTarget* target,
1786                                           bool allowSW,
1787                                           GrPathRendererChain::DrawType drawType,
1788                                           GrPathRendererChain::StencilSupport* stencilSupport) {
1789
1790    if (NULL == fPathRendererChain) {
1791        fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1792    }
1793
1794    GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1795                                                             stroke,
1796                                                             target,
1797                                                             drawType,
1798                                                             stencilSupport);
1799
1800    if (NULL == pr && allowSW) {
1801        if (NULL == fSoftwarePathRenderer) {
1802            fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1803        }
1804        pr = fSoftwarePathRenderer;
1805    }
1806
1807    return pr;
1808}
1809
1810////////////////////////////////////////////////////////////////////////////////
1811bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1812    return fGpu->caps()->isConfigRenderable(config, withMSAA);
1813}
1814
1815int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1816                                         SkScalar dpi) const {
1817    if (!this->isConfigRenderable(config, true)) {
1818        return 0;
1819    }
1820    int chosenSampleCount = 0;
1821    if (fGpu->caps()->pathRenderingSupport()) {
1822        if (dpi >= 250.0f) {
1823            chosenSampleCount = 4;
1824        } else {
1825            chosenSampleCount = 16;
1826        }
1827    }
1828    return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1829        chosenSampleCount : 0;
1830}
1831
1832void GrContext::setupDrawBuffer() {
1833    SkASSERT(NULL == fDrawBuffer);
1834    SkASSERT(NULL == fDrawBufferVBAllocPool);
1835    SkASSERT(NULL == fDrawBufferIBAllocPool);
1836
1837    fDrawBufferVBAllocPool =
1838        SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1839                                    DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1840                                    DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1841    fDrawBufferIBAllocPool =
1842        SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1843                                   DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1844                                   DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1845
1846    fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1847                                                   fDrawBufferVBAllocPool,
1848                                                   fDrawBufferIBAllocPool));
1849
1850    fDrawBuffer->setDrawState(fDrawState);
1851}
1852
1853GrDrawTarget* GrContext::getTextTarget() {
1854    return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1855}
1856
1857const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1858    return fGpu->getQuadIndexBuffer();
1859}
1860
1861namespace {
1862void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1863    GrConfigConversionEffect::PMConversion pmToUPM;
1864    GrConfigConversionEffect::PMConversion upmToPM;
1865    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1866    *pmToUPMValue = pmToUPM;
1867    *upmToPMValue = upmToPM;
1868}
1869}
1870
1871const GrEffect* GrContext::createPMToUPMEffect(GrTexture* texture,
1872                                               bool swapRAndB,
1873                                               const SkMatrix& matrix) {
1874    if (!fDidTestPMConversions) {
1875        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1876        fDidTestPMConversions = true;
1877    }
1878    GrConfigConversionEffect::PMConversion pmToUPM =
1879        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1880    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1881        return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1882    } else {
1883        return NULL;
1884    }
1885}
1886
1887const GrEffect* GrContext::createUPMToPMEffect(GrTexture* texture,
1888                                               bool swapRAndB,
1889                                               const SkMatrix& matrix) {
1890    if (!fDidTestPMConversions) {
1891        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1892        fDidTestPMConversions = true;
1893    }
1894    GrConfigConversionEffect::PMConversion upmToPM =
1895        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1896    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1897        return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1898    } else {
1899        return NULL;
1900    }
1901}
1902
1903GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) {
1904    SkASSERT(fGpu->caps()->pathRenderingSupport());
1905
1906    // TODO: now we add to fResourceCache. This should change to fResourceCache.
1907    GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke);
1908    GrPath* path = static_cast<GrPath*>(fResourceCache->find(resourceKey));
1909    if (NULL != path && path->isEqualTo(inPath, stroke)) {
1910        path->ref();
1911    } else {
1912        path = fGpu->createPath(inPath, stroke);
1913        fResourceCache->purgeAsNeeded(1, path->gpuMemorySize());
1914        fResourceCache->addResource(resourceKey, path);
1915    }
1916    return path;
1917}
1918
1919void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
1920    fResourceCache->purgeAsNeeded(1, resource->gpuMemorySize());
1921    fResourceCache->addResource(resourceKey, resource);
1922}
1923
1924GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1925    GrGpuResource* resource = fResourceCache->find(resourceKey);
1926    SkSafeRef(resource);
1927    return resource;
1928}
1929
1930void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1931    fGpu->addGpuTraceMarker(marker);
1932    if (NULL != fDrawBuffer) {
1933        fDrawBuffer->addGpuTraceMarker(marker);
1934    }
1935}
1936
1937void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
1938    fGpu->removeGpuTraceMarker(marker);
1939    if (NULL != fDrawBuffer) {
1940        fDrawBuffer->removeGpuTraceMarker(marker);
1941    }
1942}
1943
1944///////////////////////////////////////////////////////////////////////////////
1945#if GR_CACHE_STATS
1946void GrContext::printCacheStats() const {
1947    fResourceCache->printStats();
1948}
1949#endif
1950