GrContext.cpp revision 1f3c73825b8a1752abc6b74fbce978a430de6473
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrContext.h"
11
12#include "effects/GrSingleTextureEffect.h"
13#include "effects/GrConfigConversionEffect.h"
14
15#include "GrBufferAllocPool.h"
16#include "GrGpu.h"
17#include "GrDrawTargetCaps.h"
18#include "GrIndexBuffer.h"
19#include "GrInOrderDrawBuffer.h"
20#include "GrOvalRenderer.h"
21#include "GrPathRenderer.h"
22#include "GrPathUtils.h"
23#include "GrResourceCache.h"
24#include "GrSoftwarePathRenderer.h"
25#include "GrStencilBuffer.h"
26#include "GrTextStrike.h"
27#include "SkRTConf.h"
28#include "SkStrokeRec.h"
29#include "SkTLazy.h"
30#include "SkTLS.h"
31#include "SkTrace.h"
32
33SK_DEFINE_INST_COUNT(GrContext)
34SK_DEFINE_INST_COUNT(GrDrawState)
35
36// It can be useful to set this to false to test whether a bug is caused by using the
37// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
38// debugging simpler.
39SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
40                "Defers rendering in GrContext via GrInOrderDrawBuffer.");
41
42#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
43
44// When we're using coverage AA but the blend is incompatible (given gpu
45// limitations) should we disable AA or draw wrong?
46#define DISABLE_COVERAGE_AA_FOR_BLEND 1
47
48#if GR_DEBUG
49    // change this to a 1 to see notifications when partial coverage fails
50    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
51#else
52    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
53#endif
54
55static const size_t MAX_TEXTURE_CACHE_COUNT = 2048;
56static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024;
57
58static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
59static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
60
61static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
62static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
63
64#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this)
65
66// Glorified typedef to avoid including GrDrawState.h in GrContext.h
67class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
68
69GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
70    GrContext* context = SkNEW(GrContext);
71    if (context->init(backend, backendContext)) {
72        return context;
73    } else {
74        context->unref();
75        return NULL;
76    }
77}
78
79namespace {
80void* CreateThreadInstanceCount() {
81    return SkNEW_ARGS(int, (0));
82}
83void DeleteThreadInstanceCount(void* v) {
84    delete reinterpret_cast<int*>(v);
85}
86#define THREAD_INSTANCE_COUNT \
87    (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, DeleteThreadInstanceCount)))
88}
89
90GrContext::GrContext() {
91    ++THREAD_INSTANCE_COUNT;
92    fDrawState = NULL;
93    fGpu = NULL;
94    fClip = NULL;
95    fPathRendererChain = NULL;
96    fSoftwarePathRenderer = NULL;
97    fTextureCache = NULL;
98    fFontCache = NULL;
99    fDrawBuffer = NULL;
100    fDrawBufferVBAllocPool = NULL;
101    fDrawBufferIBAllocPool = NULL;
102    fAARectRenderer = NULL;
103    fOvalRenderer = NULL;
104    fViewMatrix.reset();
105}
106
107bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
108    GrAssert(NULL == fGpu);
109
110    fGpu = GrGpu::Create(backend, backendContext, this);
111    if (NULL == fGpu) {
112        return false;
113    }
114
115    fDrawState = SkNEW(GrDrawState);
116    fGpu->setDrawState(fDrawState);
117
118    fTextureCache = SkNEW_ARGS(GrResourceCache,
119                               (MAX_TEXTURE_CACHE_COUNT,
120                                MAX_TEXTURE_CACHE_BYTES));
121    fTextureCache->setOverbudgetCallback(OverbudgetCB, this);
122
123    fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
124
125    fLastDrawWasBuffered = kNo_BufferedDraw;
126
127    fAARectRenderer = SkNEW(GrAARectRenderer);
128    fOvalRenderer = SkNEW(GrOvalRenderer);
129
130    fDidTestPMConversions = false;
131
132    this->setupDrawBuffer();
133
134    return true;
135}
136
137int GrContext::GetThreadInstanceCount() {
138    return THREAD_INSTANCE_COUNT;
139}
140
141GrContext::~GrContext() {
142    for (int i = 0; i < fCleanUpData.count(); ++i) {
143        (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
144    }
145
146    if (NULL == fGpu) {
147        return;
148    }
149
150    this->flush();
151
152    // Since the gpu can hold scratch textures, give it a chance to let go
153    // of them before freeing the texture cache
154    fGpu->purgeResources();
155
156    delete fTextureCache;
157    fTextureCache = NULL;
158    delete fFontCache;
159    delete fDrawBuffer;
160    delete fDrawBufferVBAllocPool;
161    delete fDrawBufferIBAllocPool;
162
163    fAARectRenderer->unref();
164    fOvalRenderer->unref();
165
166    fGpu->unref();
167    GrSafeUnref(fPathRendererChain);
168    GrSafeUnref(fSoftwarePathRenderer);
169    fDrawState->unref();
170
171    --THREAD_INSTANCE_COUNT;
172}
173
174void GrContext::contextLost() {
175    this->contextDestroyed();
176    this->setupDrawBuffer();
177}
178
179void GrContext::contextDestroyed() {
180    // abandon first to so destructors
181    // don't try to free the resources in the API.
182    fGpu->abandonResources();
183
184    // a path renderer may be holding onto resources that
185    // are now unusable
186    GrSafeSetNull(fPathRendererChain);
187    GrSafeSetNull(fSoftwarePathRenderer);
188
189    delete fDrawBuffer;
190    fDrawBuffer = NULL;
191
192    delete fDrawBufferVBAllocPool;
193    fDrawBufferVBAllocPool = NULL;
194
195    delete fDrawBufferIBAllocPool;
196    fDrawBufferIBAllocPool = NULL;
197
198    fAARectRenderer->reset();
199    fOvalRenderer->reset();
200
201    fTextureCache->purgeAllUnlocked();
202    fFontCache->freeAll();
203    fGpu->markContextDirty();
204}
205
206void GrContext::resetContext(uint32_t state) {
207    fGpu->markContextDirty(state);
208}
209
210void GrContext::freeGpuResources() {
211    this->flush();
212
213    fGpu->purgeResources();
214
215    fAARectRenderer->reset();
216    fOvalRenderer->reset();
217
218    fTextureCache->purgeAllUnlocked();
219    fFontCache->freeAll();
220    // a path renderer may be holding onto resources
221    GrSafeSetNull(fPathRendererChain);
222    GrSafeSetNull(fSoftwarePathRenderer);
223}
224
225size_t GrContext::getGpuTextureCacheBytes() const {
226  return fTextureCache->getCachedResourceBytes();
227}
228
229////////////////////////////////////////////////////////////////////////////////
230
231GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
232                                        const GrCacheID& cacheID,
233                                        const GrTextureParams* params) {
234    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
235    GrResource* resource = fTextureCache->find(resourceKey);
236    SkSafeRef(resource);
237    return static_cast<GrTexture*>(resource);
238}
239
240bool GrContext::isTextureInCache(const GrTextureDesc& desc,
241                                 const GrCacheID& cacheID,
242                                 const GrTextureParams* params) const {
243    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
244    return fTextureCache->hasKey(resourceKey);
245}
246
247void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
248    ASSERT_OWNED_RESOURCE(sb);
249
250    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
251                                                            sb->height(),
252                                                            sb->numSamples());
253    fTextureCache->addResource(resourceKey, sb);
254}
255
256GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
257                                              int sampleCnt) {
258    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
259                                                            height,
260                                                            sampleCnt);
261    GrResource* resource = fTextureCache->find(resourceKey);
262    return static_cast<GrStencilBuffer*>(resource);
263}
264
265static void stretchImage(void* dst,
266                         int dstW,
267                         int dstH,
268                         void* src,
269                         int srcW,
270                         int srcH,
271                         int bpp) {
272    GrFixed dx = (srcW << 16) / dstW;
273    GrFixed dy = (srcH << 16) / dstH;
274
275    GrFixed y = dy >> 1;
276
277    int dstXLimit = dstW*bpp;
278    for (int j = 0; j < dstH; ++j) {
279        GrFixed x = dx >> 1;
280        void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
281        void* dstRow = (uint8_t*)dst + j*dstW*bpp;
282        for (int i = 0; i < dstXLimit; i += bpp) {
283            memcpy((uint8_t*) dstRow + i,
284                   (uint8_t*) srcRow + (x>>16)*bpp,
285                   bpp);
286            x += dx;
287        }
288        y += dy;
289    }
290}
291
292namespace {
293
294// position + local coordinate
295extern const GrVertexAttrib gVertexAttribs[] = {
296    {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
297    {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding}
298};
299
300};
301
302// The desired texture is NPOT and tiled but that isn't supported by
303// the current hardware. Resize the texture to be a POT
304GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
305                                           const GrCacheID& cacheID,
306                                           void* srcData,
307                                           size_t rowBytes,
308                                           bool needsFiltering) {
309    SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
310    if (NULL == clampedTexture) {
311        clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
312
313        if (NULL == clampedTexture) {
314            return NULL;
315        }
316    }
317
318    GrTextureDesc rtDesc = desc;
319    rtDesc.fFlags =  rtDesc.fFlags |
320                     kRenderTarget_GrTextureFlagBit |
321                     kNoStencil_GrTextureFlagBit;
322    rtDesc.fWidth  = GrNextPow2(GrMax(desc.fWidth, 64));
323    rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
324
325    GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
326
327    if (NULL != texture) {
328        GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
329        GrDrawState* drawState = fGpu->drawState();
330        drawState->setRenderTarget(texture->asRenderTarget());
331
332        // if filtering is not desired then we want to ensure all
333        // texels in the resampled image are copies of texels from
334        // the original.
335        GrTextureParams params(SkShader::kClamp_TileMode, needsFiltering);
336        drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
337
338        drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
339
340        GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
341
342        if (arg.succeeded()) {
343            GrPoint* verts = (GrPoint*) arg.vertices();
344            verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint));
345            verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint));
346            fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
347        }
348    } else {
349        // TODO: Our CPU stretch doesn't filter. But we create separate
350        // stretched textures when the texture params is either filtered or
351        // not. Either implement filtered stretch blit on CPU or just create
352        // one when FBO case fails.
353
354        rtDesc.fFlags = kNone_GrTextureFlags;
355        // no longer need to clamp at min RT size.
356        rtDesc.fWidth  = GrNextPow2(desc.fWidth);
357        rtDesc.fHeight = GrNextPow2(desc.fHeight);
358        int bpp = GrBytesPerPixel(desc.fConfig);
359        SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
360        stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
361                     srcData, desc.fWidth, desc.fHeight, bpp);
362
363        size_t stretchedRowBytes = rtDesc.fWidth * bpp;
364
365        SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(),
366                                                              stretchedRowBytes);
367        GrAssert(NULL != texture);
368    }
369
370    return texture;
371}
372
373GrTexture* GrContext::createTexture(const GrTextureParams* params,
374                                    const GrTextureDesc& desc,
375                                    const GrCacheID& cacheID,
376                                    void* srcData,
377                                    size_t rowBytes) {
378    SK_TRACE_EVENT0("GrContext::createTexture");
379
380    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
381
382    GrTexture* texture;
383    if (GrTexture::NeedsResizing(resourceKey)) {
384        texture = this->createResizedTexture(desc, cacheID,
385                                             srcData, rowBytes,
386                                             GrTexture::NeedsFiltering(resourceKey));
387    } else {
388        texture= fGpu->createTexture(desc, srcData, rowBytes);
389    }
390
391    if (NULL != texture) {
392        // Adding a resource could put us overbudget. Try to free up the
393        // necessary space before adding it.
394        fTextureCache->purgeAsNeeded(1, texture->sizeInBytes());
395        fTextureCache->addResource(resourceKey, texture);
396    }
397
398    return texture;
399}
400
401static GrTexture* create_scratch_texture(GrGpu* gpu,
402                                         GrResourceCache* textureCache,
403                                         const GrTextureDesc& desc) {
404    GrTexture* texture = gpu->createTexture(desc, NULL, 0);
405    if (NULL != texture) {
406        GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
407        // Adding a resource could put us overbudget. Try to free up the
408        // necessary space before adding it.
409        textureCache->purgeAsNeeded(1, texture->sizeInBytes());
410        // Make the resource exclusive so future 'find' calls don't return it
411        textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
412    }
413    return texture;
414}
415
416GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
417
418    GrAssert((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
419             !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
420
421    // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
422    GrAssert(this->isConfigRenderable(kAlpha_8_GrPixelConfig) ||
423             !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
424             (inDesc.fConfig != kAlpha_8_GrPixelConfig));
425
426    if (!fGpu->caps()->reuseScratchTextures()) {
427        // If we're never recycling scratch textures we can
428        // always make them the right size
429        return create_scratch_texture(fGpu, fTextureCache, inDesc);
430    }
431
432    GrTextureDesc desc = inDesc;
433
434    if (kApprox_ScratchTexMatch == match) {
435        // bin by pow2 with a reasonable min
436        static const int MIN_SIZE = 16;
437        desc.fWidth  = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
438        desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
439    }
440
441    GrResource* resource = NULL;
442    int origWidth = desc.fWidth;
443    int origHeight = desc.fHeight;
444
445    do {
446        GrResourceKey key = GrTexture::ComputeScratchKey(desc);
447        // Ensure we have exclusive access to the texture so future 'find' calls don't return it
448        resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
449        if (NULL != resource) {
450            resource->ref();
451            break;
452        }
453        if (kExact_ScratchTexMatch == match) {
454            break;
455        }
456        // We had a cache miss and we are in approx mode, relax the fit of the flags.
457
458        // We no longer try to reuse textures that were previously used as render targets in
459        // situations where no RT is needed; doing otherwise can confuse the video driver and
460        // cause significant performance problems in some cases.
461        if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
462            desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
463        } else {
464            break;
465        }
466
467    } while (true);
468
469    if (NULL == resource) {
470        desc.fFlags = inDesc.fFlags;
471        desc.fWidth = origWidth;
472        desc.fHeight = origHeight;
473        resource = create_scratch_texture(fGpu, fTextureCache, desc);
474    }
475
476    return static_cast<GrTexture*>(resource);
477}
478
479void GrContext::addExistingTextureToCache(GrTexture* texture) {
480
481    if (NULL == texture) {
482        return;
483    }
484
485    // This texture should already have a cache entry since it was once
486    // attached
487    GrAssert(NULL != texture->getCacheEntry());
488
489    // Conceptually, the cache entry is going to assume responsibility
490    // for the creation ref.
491    GrAssert(texture->unique());
492
493    // Since this texture came from an AutoScratchTexture it should
494    // still be in the exclusive pile
495    fTextureCache->makeNonExclusive(texture->getCacheEntry());
496
497    if (fGpu->caps()->reuseScratchTextures()) {
498        this->purgeCache();
499    } else {
500        // When we aren't reusing textures we know this scratch texture
501        // will never be reused and would be just wasting time in the cache
502        fTextureCache->deleteResource(texture->getCacheEntry());
503    }
504}
505
506
507void GrContext::unlockScratchTexture(GrTexture* texture) {
508    ASSERT_OWNED_RESOURCE(texture);
509    GrAssert(NULL != texture->getCacheEntry());
510
511    // If this is a scratch texture we detached it from the cache
512    // while it was locked (to avoid two callers simultaneously getting
513    // the same texture).
514    if (texture->getCacheEntry()->key().isScratch()) {
515        fTextureCache->makeNonExclusive(texture->getCacheEntry());
516        this->purgeCache();
517    }
518}
519
520void GrContext::purgeCache() {
521    if (NULL != fTextureCache) {
522        fTextureCache->purgeAsNeeded();
523    }
524}
525
526bool GrContext::OverbudgetCB(void* data) {
527    GrAssert(NULL != data);
528
529    GrContext* context = reinterpret_cast<GrContext*>(data);
530
531    // Flush the InOrderDrawBuffer to possibly free up some textures
532    context->flush();
533
534    // TODO: actually track flush's behavior rather than always just
535    // returning true.
536    return true;
537}
538
539
540GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
541                                            void* srcData,
542                                            size_t rowBytes) {
543    GrTextureDesc descCopy = descIn;
544    return fGpu->createTexture(descCopy, srcData, rowBytes);
545}
546
547void GrContext::getTextureCacheLimits(int* maxTextures,
548                                      size_t* maxTextureBytes) const {
549    fTextureCache->getLimits(maxTextures, maxTextureBytes);
550}
551
552void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
553    fTextureCache->setLimits(maxTextures, maxTextureBytes);
554}
555
556int GrContext::getMaxTextureSize() const {
557    return fGpu->caps()->maxTextureSize();
558}
559
560int GrContext::getMaxRenderTargetSize() const {
561    return fGpu->caps()->maxRenderTargetSize();
562}
563
564int GrContext::getMaxSampleCount() const {
565    return fGpu->caps()->maxSampleCount();
566}
567
568///////////////////////////////////////////////////////////////////////////////
569
570GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
571    return fGpu->wrapBackendTexture(desc);
572}
573
574GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
575    return fGpu->wrapBackendRenderTarget(desc);
576}
577
578///////////////////////////////////////////////////////////////////////////////
579
580bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
581                                          int width, int height) const {
582    const GrDrawTargetCaps* caps = fGpu->caps();
583    if (!caps->eightBitPaletteSupport()) {
584        return false;
585    }
586
587    bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
588
589    if (!isPow2) {
590        bool tiled = NULL != params && params->isTiled();
591        if (tiled && !caps->npotTextureTileSupport()) {
592            return false;
593        }
594    }
595    return true;
596}
597
598
599////////////////////////////////////////////////////////////////////////////////
600
601void GrContext::clear(const SkIRect* rect,
602                      const GrColor color,
603                      GrRenderTarget* target) {
604    AutoRestoreEffects are;
605    this->prepareToDraw(NULL, BUFFERED_DRAW, &are)->clear(rect, color, target);
606}
607
608void GrContext::drawPaint(const GrPaint& origPaint) {
609    // set rect to be big enough to fill the space, but not super-huge, so we
610    // don't overflow fixed-point implementations
611    SkRect r;
612    r.setLTRB(0, 0,
613              SkIntToScalar(getRenderTarget()->width()),
614              SkIntToScalar(getRenderTarget()->height()));
615    SkMatrix inverse;
616    SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
617    AutoMatrix am;
618
619    // We attempt to map r by the inverse matrix and draw that. mapRect will
620    // map the four corners and bound them with a new rect. This will not
621    // produce a correct result for some perspective matrices.
622    if (!this->getMatrix().hasPerspective()) {
623        if (!fViewMatrix.invert(&inverse)) {
624            GrPrintf("Could not invert matrix\n");
625            return;
626        }
627        inverse.mapRect(&r);
628    } else {
629        if (!am.setIdentity(this, paint.writable())) {
630            GrPrintf("Could not invert matrix\n");
631            return;
632        }
633    }
634    // by definition this fills the entire clip, no need for AA
635    if (paint->isAntiAlias()) {
636        paint.writable()->setAntiAlias(false);
637    }
638    this->drawRect(*paint, r);
639}
640
641////////////////////////////////////////////////////////////////////////////////
642
643namespace {
644inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
645    return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
646}
647}
648
649////////////////////////////////////////////////////////////////////////////////
650
651/*  create a triangle strip that strokes the specified triangle. There are 8
652 unique vertices, but we repreat the last 2 to close up. Alternatively we
653 could use an indices array, and then only send 8 verts, but not sure that
654 would be faster.
655 */
656static void setStrokeRectStrip(GrPoint verts[10], SkRect rect,
657                               SkScalar width) {
658    const SkScalar rad = SkScalarHalf(width);
659    rect.sort();
660
661    verts[0].set(rect.fLeft + rad, rect.fTop + rad);
662    verts[1].set(rect.fLeft - rad, rect.fTop - rad);
663    verts[2].set(rect.fRight - rad, rect.fTop + rad);
664    verts[3].set(rect.fRight + rad, rect.fTop - rad);
665    verts[4].set(rect.fRight - rad, rect.fBottom - rad);
666    verts[5].set(rect.fRight + rad, rect.fBottom + rad);
667    verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
668    verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
669    verts[8] = verts[0];
670    verts[9] = verts[1];
671}
672
673static bool isIRect(const SkRect& r) {
674    return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
675           SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
676}
677
678static bool apply_aa_to_rect(GrDrawTarget* target,
679                             const SkRect& rect,
680                             SkScalar strokeWidth,
681                             const SkMatrix* matrix,
682                             SkMatrix* combinedMatrix,
683                             SkRect* devRect,
684                             bool* useVertexCoverage) {
685    // we use a simple coverage ramp to do aa on axis-aligned rects
686    // we check if the rect will be axis-aligned, and the rect won't land on
687    // integer coords.
688
689    // we are keeping around the "tweak the alpha" trick because
690    // it is our only hope for the fixed-pipe implementation.
691    // In a shader implementation we can give a separate coverage input
692    // TODO: remove this ugliness when we drop the fixed-pipe impl
693    *useVertexCoverage = false;
694    if (!target->getDrawState().canTweakAlphaForCoverage()) {
695        if (disable_coverage_aa_for_blend(target)) {
696#if GR_DEBUG
697            //GrPrintf("Turning off AA to correctly apply blend.\n");
698#endif
699            return false;
700        } else {
701            *useVertexCoverage = true;
702        }
703    }
704    const GrDrawState& drawState = target->getDrawState();
705    if (drawState.getRenderTarget()->isMultisampled()) {
706        return false;
707    }
708
709    if (0 == strokeWidth && target->willUseHWAALines()) {
710        return false;
711    }
712
713#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
714    if (strokeWidth >= 0) {
715#endif
716        if (!drawState.getViewMatrix().preservesAxisAlignment()) {
717            return false;
718        }
719
720        if (NULL != matrix && !matrix->preservesAxisAlignment()) {
721            return false;
722        }
723#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
724    } else {
725        if (!drawState.getViewMatrix().preservesAxisAlignment() &&
726            !drawState.getViewMatrix().preservesRightAngles()) {
727            return false;
728        }
729
730        if (NULL != matrix && !matrix->preservesRightAngles()) {
731            return false;
732        }
733    }
734#endif
735
736    *combinedMatrix = drawState.getViewMatrix();
737    if (NULL != matrix) {
738        combinedMatrix->preConcat(*matrix);
739
740#if GR_DEBUG
741#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
742        if (strokeWidth >= 0) {
743#endif
744            GrAssert(combinedMatrix->preservesAxisAlignment());
745#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
746        } else {
747            GrAssert(combinedMatrix->preservesRightAngles());
748        }
749#endif
750#endif
751    }
752
753    combinedMatrix->mapRect(devRect, rect);
754
755    if (strokeWidth < 0) {
756        return !isIRect(*devRect);
757    } else {
758        return true;
759    }
760}
761
762void GrContext::drawRect(const GrPaint& paint,
763                         const SkRect& rect,
764                         SkScalar width,
765                         const SkMatrix* matrix) {
766    SK_TRACE_EVENT0("GrContext::drawRect");
767
768    AutoRestoreEffects are;
769    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
770
771    SkRect devRect;
772    SkMatrix combinedMatrix;
773    bool useVertexCoverage;
774    bool needAA = paint.isAntiAlias() &&
775                  !target->getDrawState().getRenderTarget()->isMultisampled();
776    bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix,
777                                           &combinedMatrix, &devRect,
778                                           &useVertexCoverage);
779    if (doAA) {
780        GrDrawState::AutoViewMatrixRestore avmr;
781        if (!avmr.setIdentity(target->drawState())) {
782            return;
783        }
784        if (width >= 0) {
785            fAARectRenderer->strokeAARect(this->getGpu(), target,
786                                          rect, combinedMatrix, devRect,
787                                          width, useVertexCoverage);
788        } else {
789            // filled AA rect
790            fAARectRenderer->fillAARect(this->getGpu(), target,
791                                        rect, combinedMatrix, devRect,
792                                        useVertexCoverage);
793        }
794        return;
795    }
796
797    if (width >= 0) {
798        // TODO: consider making static vertex buffers for these cases.
799        // Hairline could be done by just adding closing vertex to
800        // unitSquareVertexBuffer()
801
802        static const int worstCaseVertCount = 10;
803        target->drawState()->setDefaultVertexAttribs();
804        GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
805
806        if (!geo.succeeded()) {
807            GrPrintf("Failed to get space for vertices!\n");
808            return;
809        }
810
811        GrPrimitiveType primType;
812        int vertCount;
813        GrPoint* vertex = geo.positions();
814
815        if (width > 0) {
816            vertCount = 10;
817            primType = kTriangleStrip_GrPrimitiveType;
818            setStrokeRectStrip(vertex, rect, width);
819        } else {
820            // hairline
821            vertCount = 5;
822            primType = kLineStrip_GrPrimitiveType;
823            vertex[0].set(rect.fLeft, rect.fTop);
824            vertex[1].set(rect.fRight, rect.fTop);
825            vertex[2].set(rect.fRight, rect.fBottom);
826            vertex[3].set(rect.fLeft, rect.fBottom);
827            vertex[4].set(rect.fLeft, rect.fTop);
828        }
829
830        GrDrawState::AutoViewMatrixRestore avmr;
831        if (NULL != matrix) {
832            GrDrawState* drawState = target->drawState();
833            avmr.set(drawState, *matrix);
834        }
835
836        target->drawNonIndexed(primType, 0, vertCount);
837    } else {
838        // filled BW rect
839        target->drawSimpleRect(rect, matrix);
840    }
841}
842
843void GrContext::drawRectToRect(const GrPaint& paint,
844                               const SkRect& dstRect,
845                               const SkRect& localRect,
846                               const SkMatrix* dstMatrix,
847                               const SkMatrix* localMatrix) {
848    SK_TRACE_EVENT0("GrContext::drawRectToRect");
849    AutoRestoreEffects are;
850    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
851
852    target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
853}
854
855namespace {
856
857extern const GrVertexAttrib gPosUVColorAttribs[] = {
858    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
859    {kVec2f_GrVertexAttribType,  sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding },
860    {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding}
861};
862
863extern const GrVertexAttrib gPosColorAttribs[] = {
864    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
865    {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding},
866};
867
868static void set_vertex_attributes(GrDrawState* drawState,
869                                  const GrPoint* texCoords,
870                                  const GrColor* colors,
871                                  int* colorOffset,
872                                  int* texOffset) {
873    *texOffset = -1;
874    *colorOffset = -1;
875
876    if (NULL != texCoords && NULL != colors) {
877        *texOffset = sizeof(GrPoint);
878        *colorOffset = 2*sizeof(GrPoint);
879        drawState->setVertexAttribs<gPosUVColorAttribs>(3);
880    } else if (NULL != texCoords) {
881        *texOffset = sizeof(GrPoint);
882        drawState->setVertexAttribs<gPosUVColorAttribs>(2);
883    } else if (NULL != colors) {
884        *colorOffset = sizeof(GrPoint);
885        drawState->setVertexAttribs<gPosColorAttribs>(2);
886    } else {
887        drawState->setVertexAttribs<gPosColorAttribs>(1);
888    }
889}
890
891};
892
893void GrContext::drawVertices(const GrPaint& paint,
894                             GrPrimitiveType primitiveType,
895                             int vertexCount,
896                             const GrPoint positions[],
897                             const GrPoint texCoords[],
898                             const GrColor colors[],
899                             const uint16_t indices[],
900                             int indexCount) {
901    SK_TRACE_EVENT0("GrContext::drawVertices");
902
903    GrDrawTarget::AutoReleaseGeometry geo;
904
905    AutoRestoreEffects are;
906    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
907
908    GrDrawState* drawState = target->drawState();
909
910    int colorOffset = -1, texOffset = -1;
911    set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
912
913    size_t vertexSize = drawState->getVertexSize();
914    if (sizeof(GrPoint) != vertexSize) {
915        if (!geo.set(target, vertexCount, 0)) {
916            GrPrintf("Failed to get space for vertices!\n");
917            return;
918        }
919        void* curVertex = geo.vertices();
920
921        for (int i = 0; i < vertexCount; ++i) {
922            *((GrPoint*)curVertex) = positions[i];
923
924            if (texOffset >= 0) {
925                *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
926            }
927            if (colorOffset >= 0) {
928                *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
929            }
930            curVertex = (void*)((intptr_t)curVertex + vertexSize);
931        }
932    } else {
933        target->setVertexSourceToArray(positions, vertexCount);
934    }
935
936    // we don't currently apply offscreen AA to this path. Need improved
937    // management of GrDrawTarget's geometry to avoid copying points per-tile.
938
939    if (NULL != indices) {
940        target->setIndexSourceToArray(indices, indexCount);
941        target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
942        target->resetIndexSource();
943    } else {
944        target->drawNonIndexed(primitiveType, 0, vertexCount);
945    }
946}
947
948///////////////////////////////////////////////////////////////////////////////
949
950void GrContext::drawRRect(const GrPaint& paint,
951                          const SkRRect& rect,
952                          const SkStrokeRec& stroke) {
953
954    AutoRestoreEffects are;
955    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
956
957    bool useAA = paint.isAntiAlias() &&
958                 !target->getDrawState().getRenderTarget()->isMultisampled() &&
959                 !disable_coverage_aa_for_blend(target);
960
961    if (!fOvalRenderer->drawSimpleRRect(target, this, useAA, rect, stroke)) {
962        SkPath path;
963        path.addRRect(rect);
964        this->internalDrawPath(target, useAA, path, stroke);
965    }
966}
967
968///////////////////////////////////////////////////////////////////////////////
969
970void GrContext::drawOval(const GrPaint& paint,
971                         const SkRect& oval,
972                         const SkStrokeRec& stroke) {
973
974    AutoRestoreEffects are;
975    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
976
977    bool useAA = paint.isAntiAlias() &&
978                 !target->getDrawState().getRenderTarget()->isMultisampled() &&
979                 !disable_coverage_aa_for_blend(target);
980
981    if (!fOvalRenderer->drawOval(target, this, useAA, oval, stroke)) {
982        SkPath path;
983        path.addOval(oval);
984        this->internalDrawPath(target, useAA, path, stroke);
985    }
986}
987
988namespace {
989
990// Can 'path' be drawn as a pair of filled nested rectangles?
991static bool is_nested_rects(GrDrawTarget* target,
992                            const SkPath& path,
993                            const SkStrokeRec& stroke,
994                            SkRect rects[2],
995                            bool* useVertexCoverage) {
996    SkASSERT(stroke.isFillStyle());
997
998    if (path.isInverseFillType()) {
999        return false;
1000    }
1001
1002    const GrDrawState& drawState = target->getDrawState();
1003
1004    // TODO: this restriction could be lifted if we were willing to apply
1005    // the matrix to all the points individually rather than just to the rect
1006    if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1007        return false;
1008    }
1009
1010    *useVertexCoverage = false;
1011    if (!target->getDrawState().canTweakAlphaForCoverage()) {
1012        if (disable_coverage_aa_for_blend(target)) {
1013            return false;
1014        } else {
1015            *useVertexCoverage = true;
1016        }
1017    }
1018
1019    SkPath::Direction dirs[2];
1020    if (!path.isNestedRects(rects, dirs)) {
1021        return false;
1022    }
1023
1024    if (SkPath::kWinding_FillType == path.getFillType()) {
1025        // The two rects need to be wound opposite to each other
1026        return dirs[0] != dirs[1];
1027    } else {
1028        return true;
1029    }
1030}
1031
1032};
1033
1034void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
1035
1036    if (path.isEmpty()) {
1037       if (path.isInverseFillType()) {
1038           this->drawPaint(paint);
1039       }
1040       return;
1041    }
1042
1043    // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1044    // Scratch textures can be recycled after they are returned to the texture
1045    // cache. This presents a potential hazard for buffered drawing. However,
1046    // the writePixels that uploads to the scratch will perform a flush so we're
1047    // OK.
1048    AutoRestoreEffects are;
1049    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are);
1050
1051    bool useAA = paint.isAntiAlias() && !target->getDrawState().getRenderTarget()->isMultisampled();
1052    if (useAA && stroke.getWidth() < 0 && !path.isConvex()) {
1053        // Concave AA paths are expensive - try to avoid them for special cases
1054        bool useVertexCoverage;
1055        SkRect rects[2];
1056
1057        if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) {
1058            SkMatrix origViewMatrix = target->getDrawState().getViewMatrix();
1059            GrDrawState::AutoViewMatrixRestore avmr;
1060            if (!avmr.setIdentity(target->drawState())) {
1061                return;
1062            }
1063
1064            fAARectRenderer->fillAANestedRects(this->getGpu(), target,
1065                                               rects,
1066                                               origViewMatrix,
1067                                               useVertexCoverage);
1068            return;
1069        }
1070    }
1071
1072    SkRect ovalRect;
1073    bool isOval = path.isOval(&ovalRect);
1074
1075    if (!isOval || path.isInverseFillType()
1076        || !fOvalRenderer->drawOval(target, this, useAA, ovalRect, stroke)) {
1077        this->internalDrawPath(target, useAA, path, stroke);
1078    }
1079}
1080
1081void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1082                                 const SkStrokeRec& stroke) {
1083
1084    // An Assumption here is that path renderer would use some form of tweaking
1085    // the src color (either the input alpha or in the frag shader) to implement
1086    // aa. If we have some future driver-mojo path AA that can do the right
1087    // thing WRT to the blend then we'll need some query on the PR.
1088    if (disable_coverage_aa_for_blend(target)) {
1089#if GR_DEBUG
1090        //GrPrintf("Turning off AA to correctly apply blend.\n");
1091#endif
1092        useAA = false;
1093    }
1094
1095    GrPathRendererChain::DrawType type = useAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1096                                                 GrPathRendererChain::kColor_DrawType;
1097
1098    const SkPath* pathPtr = &path;
1099    SkPath tmpPath;
1100    SkStrokeRec strokeRec(stroke);
1101
1102    // Try a 1st time without stroking the path and without allowing the SW renderer
1103    GrPathRenderer* pr = this->getPathRenderer(*pathPtr, strokeRec, target, false, type);
1104
1105    if (NULL == pr) {
1106        if (!strokeRec.isHairlineStyle()) {
1107            // It didn't work the 1st time, so try again with the stroked path
1108            if (strokeRec.applyToPath(&tmpPath, *pathPtr)) {
1109                pathPtr = &tmpPath;
1110                strokeRec.setFillStyle();
1111            }
1112        }
1113        // This time, allow SW renderer
1114        pr = this->getPathRenderer(*pathPtr, strokeRec, target, true, type);
1115    }
1116
1117    if (NULL == pr) {
1118#if GR_DEBUG
1119        GrPrintf("Unable to find path renderer compatible with path.\n");
1120#endif
1121        return;
1122    }
1123
1124    pr->drawPath(*pathPtr, strokeRec, target, useAA);
1125}
1126
1127////////////////////////////////////////////////////////////////////////////////
1128
1129void GrContext::flush(int flagsBitfield) {
1130    if (NULL == fDrawBuffer) {
1131        return;
1132    }
1133
1134    if (kDiscard_FlushBit & flagsBitfield) {
1135        fDrawBuffer->reset();
1136    } else {
1137        fDrawBuffer->flush();
1138    }
1139}
1140
1141bool GrContext::writeTexturePixels(GrTexture* texture,
1142                                   int left, int top, int width, int height,
1143                                   GrPixelConfig config, const void* buffer, size_t rowBytes,
1144                                   uint32_t flags) {
1145    SK_TRACE_EVENT0("GrContext::writeTexturePixels");
1146    ASSERT_OWNED_RESOURCE(texture);
1147
1148    if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
1149        if (NULL != texture->asRenderTarget()) {
1150            return this->writeRenderTargetPixels(texture->asRenderTarget(),
1151                                                 left, top, width, height,
1152                                                 config, buffer, rowBytes, flags);
1153        } else {
1154            return false;
1155        }
1156    }
1157
1158    if (!(kDontFlush_PixelOpsFlag & flags)) {
1159        this->flush();
1160    }
1161
1162    return fGpu->writeTexturePixels(texture, left, top, width, height,
1163                                    config, buffer, rowBytes);
1164}
1165
1166bool GrContext::readTexturePixels(GrTexture* texture,
1167                                  int left, int top, int width, int height,
1168                                  GrPixelConfig config, void* buffer, size_t rowBytes,
1169                                  uint32_t flags) {
1170    SK_TRACE_EVENT0("GrContext::readTexturePixels");
1171    ASSERT_OWNED_RESOURCE(texture);
1172
1173    // TODO: code read pixels for textures that aren't also rendertargets
1174    GrRenderTarget* target = texture->asRenderTarget();
1175    if (NULL != target) {
1176        return this->readRenderTargetPixels(target,
1177                                            left, top, width, height,
1178                                            config, buffer, rowBytes,
1179                                            flags);
1180    } else {
1181        return false;
1182    }
1183}
1184
1185#include "SkConfig8888.h"
1186
1187namespace {
1188/**
1189 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
1190 * formats are representable as Config8888 and so the function returns false
1191 * if the GrPixelConfig has no equivalent Config8888.
1192 */
1193bool grconfig_to_config8888(GrPixelConfig config,
1194                            bool unpremul,
1195                            SkCanvas::Config8888* config8888) {
1196    switch (config) {
1197        case kRGBA_8888_GrPixelConfig:
1198            if (unpremul) {
1199                *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
1200            } else {
1201                *config8888 = SkCanvas::kRGBA_Premul_Config8888;
1202            }
1203            return true;
1204        case kBGRA_8888_GrPixelConfig:
1205            if (unpremul) {
1206                *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
1207            } else {
1208                *config8888 = SkCanvas::kBGRA_Premul_Config8888;
1209            }
1210            return true;
1211        default:
1212            return false;
1213    }
1214}
1215
1216// It returns a configuration with where the byte position of the R & B components are swapped in
1217// relation to the input config. This should only be called with the result of
1218// grconfig_to_config8888 as it will fail for other configs.
1219SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) {
1220    switch (config8888) {
1221        case SkCanvas::kBGRA_Premul_Config8888:
1222            return SkCanvas::kRGBA_Premul_Config8888;
1223        case SkCanvas::kBGRA_Unpremul_Config8888:
1224            return SkCanvas::kRGBA_Unpremul_Config8888;
1225        case SkCanvas::kRGBA_Premul_Config8888:
1226            return SkCanvas::kBGRA_Premul_Config8888;
1227        case SkCanvas::kRGBA_Unpremul_Config8888:
1228            return SkCanvas::kBGRA_Unpremul_Config8888;
1229        default:
1230            GrCrash("Unexpected input");
1231            return SkCanvas::kBGRA_Unpremul_Config8888;;
1232    }
1233}
1234}
1235
1236bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1237                                       int left, int top, int width, int height,
1238                                       GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1239                                       uint32_t flags) {
1240    SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
1241    ASSERT_OWNED_RESOURCE(target);
1242
1243    if (NULL == target) {
1244        target = fRenderTarget.get();
1245        if (NULL == target) {
1246            return false;
1247        }
1248    }
1249
1250    if (!(kDontFlush_PixelOpsFlag & flags)) {
1251        this->flush();
1252    }
1253
1254    // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1255
1256    // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1257    // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1258    bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1259                                                 width, height, dstConfig,
1260                                                 rowBytes);
1261    // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1262    // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1263    // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1264    // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1265    // dstConfig.
1266    GrPixelConfig readConfig = dstConfig;
1267    bool swapRAndB = false;
1268    if (GrPixelConfigSwapRAndB(dstConfig) ==
1269        fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1270        readConfig = GrPixelConfigSwapRAndB(readConfig);
1271        swapRAndB = true;
1272    }
1273
1274    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1275
1276    if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1277        // The unpremul flag is only allowed for these two configs.
1278        return false;
1279    }
1280
1281    // If the src is a texture and we would have to do conversions after read pixels, we instead
1282    // do the conversions by drawing the src to a scratch texture. If we handle any of the
1283    // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1284    // on the read back pixels.
1285    GrTexture* src = target->asTexture();
1286    GrAutoScratchTexture ast;
1287    if (NULL != src && (swapRAndB || unpremul || flipY)) {
1288        // Make the scratch a render target because we don't have a robust readTexturePixels as of
1289        // yet. It calls this function.
1290        GrTextureDesc desc;
1291        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1292        desc.fWidth = width;
1293        desc.fHeight = height;
1294        desc.fConfig = readConfig;
1295        desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1296
1297        // When a full read back is faster than a partial we could always make the scratch exactly
1298        // match the passed rect. However, if we see many different size rectangles we will trash
1299        // our texture cache and pay the cost of creating and destroying many textures. So, we only
1300        // request an exact match when the caller is reading an entire RT.
1301        ScratchTexMatch match = kApprox_ScratchTexMatch;
1302        if (0 == left &&
1303            0 == top &&
1304            target->width() == width &&
1305            target->height() == height &&
1306            fGpu->fullReadPixelsIsFasterThanPartial()) {
1307            match = kExact_ScratchTexMatch;
1308        }
1309        ast.set(this, desc, match);
1310        GrTexture* texture = ast.texture();
1311        if (texture) {
1312            // compute a matrix to perform the draw
1313            SkMatrix textureMatrix;
1314            textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1315            textureMatrix.postIDiv(src->width(), src->height());
1316
1317            SkAutoTUnref<const GrEffectRef> effect;
1318            if (unpremul) {
1319                effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1320                if (NULL != effect) {
1321                    unpremul = false; // we no longer need to do this on CPU after the read back.
1322                }
1323            }
1324            // If we failed to create a PM->UPM effect and have no other conversions to perform then
1325            // there is no longer any point to using the scratch.
1326            if (NULL != effect || flipY || swapRAndB) {
1327                if (!effect) {
1328                    effect.reset(GrConfigConversionEffect::Create(
1329                                                    src,
1330                                                    swapRAndB,
1331                                                    GrConfigConversionEffect::kNone_PMConversion,
1332                                                    textureMatrix));
1333                }
1334                swapRAndB = false; // we will handle the swap in the draw.
1335
1336                // We protect the existing geometry here since it may not be
1337                // clear to the caller that a draw operation (i.e., drawSimpleRect)
1338                // can be invoked in this method
1339                GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
1340                GrDrawState* drawState = fGpu->drawState();
1341                GrAssert(effect);
1342                drawState->addColorEffect(effect);
1343
1344                drawState->setRenderTarget(texture->asRenderTarget());
1345                SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1346                fGpu->drawSimpleRect(rect, NULL);
1347                // we want to read back from the scratch's origin
1348                left = 0;
1349                top = 0;
1350                target = texture->asRenderTarget();
1351            }
1352        }
1353    }
1354    if (!fGpu->readPixels(target,
1355                          left, top, width, height,
1356                          readConfig, buffer, rowBytes)) {
1357        return false;
1358    }
1359    // Perform any conversions we weren't able to perform using a scratch texture.
1360    if (unpremul || swapRAndB) {
1361        // These are initialized to suppress a warning
1362        SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888;
1363        SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888;
1364
1365        SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888);
1366        grconfig_to_config8888(dstConfig, unpremul, &dstC8888);
1367
1368        if (swapRAndB) {
1369            GrAssert(c8888IsValid); // we should only do r/b swap on 8888 configs
1370            srcC8888 = swap_config8888_red_and_blue(srcC8888);
1371        }
1372        GrAssert(c8888IsValid);
1373        uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
1374        SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
1375                                  b32, rowBytes, srcC8888,
1376                                  width, height);
1377    }
1378    return true;
1379}
1380
1381void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1382    GrAssert(target);
1383    ASSERT_OWNED_RESOURCE(target);
1384    // In the future we may track whether there are any pending draws to this
1385    // target. We don't today so we always perform a flush. We don't promise
1386    // this to our clients, though.
1387    this->flush();
1388    fGpu->resolveRenderTarget(target);
1389}
1390
1391void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1392    if (NULL == src || NULL == dst) {
1393        return;
1394    }
1395    ASSERT_OWNED_RESOURCE(src);
1396
1397    // Writes pending to the source texture are not tracked, so a flush
1398    // is required to ensure that the copy captures the most recent contents
1399    // of the source texture. See similar behavior in
1400    // GrContext::resolveRenderTarget.
1401    this->flush();
1402
1403    GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1404    GrDrawState* drawState = fGpu->drawState();
1405    drawState->setRenderTarget(dst);
1406    SkMatrix sampleM;
1407    sampleM.setIDiv(src->width(), src->height());
1408    SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
1409    if (NULL != topLeft) {
1410        srcRect.offset(*topLeft);
1411    }
1412    SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
1413    if (!srcRect.intersect(srcBounds)) {
1414        return;
1415    }
1416    sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
1417    drawState->addColorTextureEffect(src, sampleM);
1418    SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
1419    fGpu->drawSimpleRect(dstR, NULL);
1420}
1421
1422bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1423                                        int left, int top, int width, int height,
1424                                        GrPixelConfig srcConfig,
1425                                        const void* buffer,
1426                                        size_t rowBytes,
1427                                        uint32_t flags) {
1428    SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
1429    ASSERT_OWNED_RESOURCE(target);
1430
1431    if (NULL == target) {
1432        target = fRenderTarget.get();
1433        if (NULL == target) {
1434            return false;
1435        }
1436    }
1437
1438    // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1439    // desktop GL).
1440
1441    // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1442    // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1443    // set.)
1444
1445    // If the RT is also a texture and we don't have to premultiply then take the texture path.
1446    // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1447    // we do below.
1448
1449#if !GR_MAC_BUILD
1450    // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1451    // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1452    // HW is affected.
1453    if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
1454        fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
1455        return this->writeTexturePixels(target->asTexture(),
1456                                        left, top, width, height,
1457                                        srcConfig, buffer, rowBytes, flags);
1458    }
1459#endif
1460
1461    // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1462    // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1463    // config. This scratch will then have R and B swapped. We correct for this by swapping again
1464    // when drawing the scratch to the dst using a conversion effect.
1465    bool swapRAndB = false;
1466    GrPixelConfig writeConfig = srcConfig;
1467    if (GrPixelConfigSwapRAndB(srcConfig) ==
1468        fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
1469        writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1470        swapRAndB = true;
1471    }
1472
1473    GrTextureDesc desc;
1474    desc.fWidth = width;
1475    desc.fHeight = height;
1476    desc.fConfig = writeConfig;
1477    GrAutoScratchTexture ast(this, desc);
1478    GrTexture* texture = ast.texture();
1479    if (NULL == texture) {
1480        return false;
1481    }
1482
1483    SkAutoTUnref<const GrEffectRef> effect;
1484    SkMatrix textureMatrix;
1485    textureMatrix.setIDiv(texture->width(), texture->height());
1486
1487    // allocate a tmp buffer and sw convert the pixels to premul
1488    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1489
1490    if (kUnpremul_PixelOpsFlag & flags) {
1491        if (!GrPixelConfigIs8888(srcConfig)) {
1492            return false;
1493        }
1494        effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1495        // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1496        if (NULL == effect) {
1497            SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1498            GR_DEBUGCODE(bool success = )
1499            grconfig_to_config8888(srcConfig, true, &srcConfig8888);
1500            GrAssert(success);
1501            GR_DEBUGCODE(success = )
1502            grconfig_to_config8888(srcConfig, false, &dstConfig8888);
1503            GrAssert(success);
1504            const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
1505            tmpPixels.reset(width * height);
1506            SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
1507                                      src, rowBytes, srcConfig8888,
1508                                      width, height);
1509            buffer = tmpPixels.get();
1510            rowBytes = 4 * width;
1511        }
1512    }
1513    if (NULL == effect) {
1514        effect.reset(GrConfigConversionEffect::Create(texture,
1515                                                      swapRAndB,
1516                                                      GrConfigConversionEffect::kNone_PMConversion,
1517                                                      textureMatrix));
1518    }
1519
1520    if (!this->writeTexturePixels(texture,
1521                                  0, 0, width, height,
1522                                  writeConfig, buffer, rowBytes,
1523                                  flags & ~kUnpremul_PixelOpsFlag)) {
1524        return false;
1525    }
1526
1527    // writeRenderTargetPixels can be called in the midst of drawing another
1528    // object (e.g., when uploading a SW path rendering to the gpu while
1529    // drawing a rect) so preserve the current geometry.
1530    SkMatrix matrix;
1531    matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1532    GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
1533    GrDrawState* drawState = fGpu->drawState();
1534    GrAssert(effect);
1535    drawState->addColorEffect(effect);
1536
1537    drawState->setRenderTarget(target);
1538
1539    fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
1540    return true;
1541}
1542////////////////////////////////////////////////////////////////////////////////
1543
1544GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1545                                       BufferedDraw buffered,
1546                                       AutoRestoreEffects* are) {
1547    // All users of this draw state should be freeing up all effects when they're done.
1548    // Otherwise effects that own resources may keep those resources alive indefinitely.
1549    GrAssert(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
1550
1551    if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1552        fDrawBuffer->flush();
1553        fLastDrawWasBuffered = kNo_BufferedDraw;
1554    }
1555    ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1556    if (NULL != paint) {
1557        GrAssert(NULL != are);
1558        are->set(fDrawState);
1559        fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1560#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1561        if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1562            !fGpu->canApplyCoverage()) {
1563            GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1564        }
1565#endif
1566    } else {
1567        fDrawState->reset(fViewMatrix);
1568        fDrawState->setRenderTarget(fRenderTarget.get());
1569    }
1570    GrDrawTarget* target;
1571    if (kYes_BufferedDraw == buffered) {
1572        fLastDrawWasBuffered = kYes_BufferedDraw;
1573        target = fDrawBuffer;
1574    } else {
1575        GrAssert(kNo_BufferedDraw == buffered);
1576        fLastDrawWasBuffered = kNo_BufferedDraw;
1577        target = fGpu;
1578    }
1579    fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
1580                                                     !fClip->fClipStack->isWideOpen());
1581    target->setClip(fClip);
1582    GrAssert(fDrawState == target->drawState());
1583    return target;
1584}
1585
1586/*
1587 * This method finds a path renderer that can draw the specified path on
1588 * the provided target.
1589 * Due to its expense, the software path renderer has split out so it can
1590 * can be individually allowed/disallowed via the "allowSW" boolean.
1591 */
1592GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1593                                           const SkStrokeRec& stroke,
1594                                           const GrDrawTarget* target,
1595                                           bool allowSW,
1596                                           GrPathRendererChain::DrawType drawType,
1597                                           GrPathRendererChain::StencilSupport* stencilSupport) {
1598
1599    if (NULL == fPathRendererChain) {
1600        fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1601    }
1602
1603    GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1604                                                             stroke,
1605                                                             target,
1606                                                             drawType,
1607                                                             stencilSupport);
1608
1609    if (NULL == pr && allowSW) {
1610        if (NULL == fSoftwarePathRenderer) {
1611            fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1612        }
1613        pr = fSoftwarePathRenderer;
1614    }
1615
1616    return pr;
1617}
1618
1619////////////////////////////////////////////////////////////////////////////////
1620
1621bool GrContext::isConfigRenderable(GrPixelConfig config) const {
1622    return fGpu->isConfigRenderable(config);
1623}
1624
1625static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
1626    intptr_t mask = 1 << shift;
1627    if (pred) {
1628        bits |= mask;
1629    } else {
1630        bits &= ~mask;
1631    }
1632    return bits;
1633}
1634
1635void GrContext::setupDrawBuffer() {
1636
1637    GrAssert(NULL == fDrawBuffer);
1638    GrAssert(NULL == fDrawBufferVBAllocPool);
1639    GrAssert(NULL == fDrawBufferIBAllocPool);
1640
1641    fDrawBufferVBAllocPool =
1642        SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1643                                    DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1644                                    DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1645    fDrawBufferIBAllocPool =
1646        SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1647                                   DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1648                                   DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1649
1650    fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1651                                                   fDrawBufferVBAllocPool,
1652                                                   fDrawBufferIBAllocPool));
1653
1654    fDrawBuffer->setDrawState(fDrawState);
1655}
1656
1657GrDrawTarget* GrContext::getTextTarget() {
1658    return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL);
1659}
1660
1661const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1662    return fGpu->getQuadIndexBuffer();
1663}
1664
1665namespace {
1666void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1667    GrConfigConversionEffect::PMConversion pmToUPM;
1668    GrConfigConversionEffect::PMConversion upmToPM;
1669    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1670    *pmToUPMValue = pmToUPM;
1671    *upmToPMValue = upmToPM;
1672}
1673}
1674
1675const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
1676                                                  bool swapRAndB,
1677                                                  const SkMatrix& matrix) {
1678    if (!fDidTestPMConversions) {
1679        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1680        fDidTestPMConversions = true;
1681    }
1682    GrConfigConversionEffect::PMConversion pmToUPM =
1683        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1684    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1685        return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1686    } else {
1687        return NULL;
1688    }
1689}
1690
1691const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
1692                                                  bool swapRAndB,
1693                                                  const SkMatrix& matrix) {
1694    if (!fDidTestPMConversions) {
1695        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1696        fDidTestPMConversions = true;
1697    }
1698    GrConfigConversionEffect::PMConversion upmToPM =
1699        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1700    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1701        return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1702    } else {
1703        return NULL;
1704    }
1705}
1706
1707///////////////////////////////////////////////////////////////////////////////
1708#if GR_CACHE_STATS
1709void GrContext::printCacheStats() const {
1710    fTextureCache->printStats();
1711}
1712#endif
1713