GrContext.cpp revision e30597375c19dfb5197fd065a3d1768401eb00fa
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9#include "GrContext.h"
10
11#include "effects/GrConfigConversionEffect.h"
12#include "effects/GrDashingEffect.h"
13#include "effects/GrSingleTextureEffect.h"
14
15#include "GrAARectRenderer.h"
16#include "GrBufferAllocPool.h"
17#include "GrGpu.h"
18#include "GrDistanceFieldTextContext.h"
19#include "GrDrawTargetCaps.h"
20#include "GrIndexBuffer.h"
21#include "GrInOrderDrawBuffer.h"
22#include "GrLayerCache.h"
23#include "GrOvalRenderer.h"
24#include "GrPathRenderer.h"
25#include "GrPathUtils.h"
26#include "GrResourceCache.h"
27#include "GrResourceCache2.h"
28#include "GrSoftwarePathRenderer.h"
29#include "GrStencilBuffer.h"
30#include "GrStencilAndCoverTextContext.h"
31#include "GrStrokeInfo.h"
32#include "GrSurfacePriv.h"
33#include "GrTextStrike.h"
34#include "GrTexturePriv.h"
35#include "GrTraceMarker.h"
36#include "GrTracing.h"
37#include "SkDashPathPriv.h"
38#include "SkConfig8888.h"
39#include "SkGr.h"
40#include "SkRTConf.h"
41#include "SkRRect.h"
42#include "SkStrokeRec.h"
43#include "SkTLazy.h"
44#include "SkTLS.h"
45#include "SkTraceEvent.h"
46
47// It can be useful to set this to false to test whether a bug is caused by using the
48// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
49// debugging simpler.
50SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
51                "Defers rendering in GrContext via GrInOrderDrawBuffer.");
52
53#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
54
55#ifdef SK_DEBUG
56    // change this to a 1 to see notifications when partial coverage fails
57    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
58#else
59    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
60#endif
61
62static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
63static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
64
65static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
66static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
67
68static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
69static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
70
71#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
72
73// Glorified typedef to avoid including GrDrawState.h in GrContext.h
74class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
75
76class GrContext::AutoCheckFlush {
77public:
78    AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); }
79
80    ~AutoCheckFlush() {
81        if (fContext->fFlushToReduceCacheSize) {
82            fContext->flush();
83        }
84    }
85
86private:
87    GrContext* fContext;
88};
89
90GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
91                             const Options* opts) {
92    GrContext* context;
93    if (NULL == opts) {
94        context = SkNEW_ARGS(GrContext, (Options()));
95    } else {
96        context = SkNEW_ARGS(GrContext, (*opts));
97    }
98
99    if (context->init(backend, backendContext)) {
100        return context;
101    } else {
102        context->unref();
103        return NULL;
104    }
105}
106
107GrContext::GrContext(const Options& opts) : fOptions(opts) {
108    fDrawState = NULL;
109    fGpu = NULL;
110    fClip = NULL;
111    fPathRendererChain = NULL;
112    fSoftwarePathRenderer = NULL;
113    fResourceCache = NULL;
114    fResourceCache2 = NULL;
115    fFontCache = NULL;
116    fDrawBuffer = NULL;
117    fDrawBufferVBAllocPool = NULL;
118    fDrawBufferIBAllocPool = NULL;
119    fFlushToReduceCacheSize = false;
120    fAARectRenderer = NULL;
121    fOvalRenderer = NULL;
122    fViewMatrix.reset();
123    fMaxTextureSizeOverride = 1 << 20;
124}
125
126bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
127    SkASSERT(NULL == fGpu);
128
129    fGpu = GrGpu::Create(backend, backendContext, this);
130    if (NULL == fGpu) {
131        return false;
132    }
133
134    fDrawState = SkNEW(GrDrawState);
135    fGpu->setDrawState(fDrawState);
136
137    fResourceCache = SkNEW_ARGS(GrResourceCache, (fGpu->caps(),
138                                                  MAX_RESOURCE_CACHE_COUNT,
139                                                  MAX_RESOURCE_CACHE_BYTES));
140    fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
141    fResourceCache2 = SkNEW(GrResourceCache2);
142
143    fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
144
145    fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
146
147    fLastDrawWasBuffered = kNo_BufferedDraw;
148
149    fAARectRenderer = SkNEW(GrAARectRenderer);
150    fOvalRenderer = SkNEW(GrOvalRenderer);
151
152    fDidTestPMConversions = false;
153
154    this->setupDrawBuffer();
155
156    return true;
157}
158
159GrContext::~GrContext() {
160    if (NULL == fGpu) {
161        return;
162    }
163
164    this->flush();
165
166    for (int i = 0; i < fCleanUpData.count(); ++i) {
167        (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
168    }
169
170    delete fResourceCache2;
171    fResourceCache2 = NULL;
172    delete fResourceCache;
173    fResourceCache = NULL;
174    delete fFontCache;
175    delete fDrawBuffer;
176    delete fDrawBufferVBAllocPool;
177    delete fDrawBufferIBAllocPool;
178
179    fAARectRenderer->unref();
180    fOvalRenderer->unref();
181
182    fGpu->unref();
183    SkSafeUnref(fPathRendererChain);
184    SkSafeUnref(fSoftwarePathRenderer);
185    fDrawState->unref();
186}
187
188void GrContext::abandonContext() {
189    // abandon first to so destructors
190    // don't try to free the resources in the API.
191    fResourceCache2->abandonAll();
192
193    fGpu->contextAbandoned();
194
195    // a path renderer may be holding onto resources that
196    // are now unusable
197    SkSafeSetNull(fPathRendererChain);
198    SkSafeSetNull(fSoftwarePathRenderer);
199
200    delete fDrawBuffer;
201    fDrawBuffer = NULL;
202
203    delete fDrawBufferVBAllocPool;
204    fDrawBufferVBAllocPool = NULL;
205
206    delete fDrawBufferIBAllocPool;
207    fDrawBufferIBAllocPool = NULL;
208
209    fAARectRenderer->reset();
210    fOvalRenderer->reset();
211
212    fResourceCache->purgeAllUnlocked();
213
214    fFontCache->freeAll();
215    fLayerCache->freeAll();
216}
217
218void GrContext::resetContext(uint32_t state) {
219    fGpu->markContextDirty(state);
220}
221
222void GrContext::freeGpuResources() {
223    this->flush();
224
225    fGpu->purgeResources();
226    if (fDrawBuffer) {
227        fDrawBuffer->purgeResources();
228    }
229
230    fAARectRenderer->reset();
231    fOvalRenderer->reset();
232
233    fResourceCache->purgeAllUnlocked();
234    fFontCache->freeAll();
235    fLayerCache->freeAll();
236    // a path renderer may be holding onto resources
237    SkSafeSetNull(fPathRendererChain);
238    SkSafeSetNull(fSoftwarePathRenderer);
239}
240
241void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
242  if (resourceCount) {
243    *resourceCount = fResourceCache->getCachedResourceCount();
244  }
245  if (resourceBytes) {
246    *resourceBytes = fResourceCache->getCachedResourceBytes();
247  }
248}
249
250GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
251                                            const SkDeviceProperties&
252                                            leakyProperties,
253                                            bool enableDistanceFieldFonts) {
254    if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() &&
255                                                renderTarget->isMultisampled()) {
256        return GrStencilAndCoverTextContext::Create(this, leakyProperties);
257    }
258
259    return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDistanceFieldFonts);
260}
261
262////////////////////////////////////////////////////////////////////////////////
263
264GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
265                                        const GrCacheID& cacheID,
266                                        const GrTextureParams* params) {
267    GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
268    GrGpuResource* resource = fResourceCache->find(resourceKey);
269    SkSafeRef(resource);
270    return static_cast<GrTexture*>(resource);
271}
272
273bool GrContext::isTextureInCache(const GrTextureDesc& desc,
274                                 const GrCacheID& cacheID,
275                                 const GrTextureParams* params) const {
276    GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
277    return fResourceCache->hasKey(resourceKey);
278}
279
280void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
281    ASSERT_OWNED_RESOURCE(sb);
282
283    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
284                                                            sb->height(),
285                                                            sb->numSamples());
286    fResourceCache->addResource(resourceKey, sb);
287}
288
289GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
290                                              int sampleCnt) {
291    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
292                                                            height,
293                                                            sampleCnt);
294    GrGpuResource* resource = fResourceCache->find(resourceKey);
295    return static_cast<GrStencilBuffer*>(resource);
296}
297
298static void stretch_image(void* dst,
299                          int dstW,
300                          int dstH,
301                          const void* src,
302                          int srcW,
303                          int srcH,
304                          size_t bpp) {
305    SkFixed dx = (srcW << 16) / dstW;
306    SkFixed dy = (srcH << 16) / dstH;
307
308    SkFixed y = dy >> 1;
309
310    size_t dstXLimit = dstW*bpp;
311    for (int j = 0; j < dstH; ++j) {
312        SkFixed x = dx >> 1;
313        const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
314        uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
315        for (size_t i = 0; i < dstXLimit; i += bpp) {
316            memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
317            x += dx;
318        }
319        y += dy;
320    }
321}
322
323namespace {
324
325// position + local coordinate
326extern const GrVertexAttrib gVertexAttribs[] = {
327    {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
328    {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
329};
330
331};
332
333// The desired texture is NPOT and tiled but that isn't supported by
334// the current hardware. Resize the texture to be a POT
335GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
336                                           const GrCacheID& cacheID,
337                                           const void* srcData,
338                                           size_t rowBytes,
339                                           bool filter) {
340    SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
341    if (NULL == clampedTexture) {
342        clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
343
344        if (NULL == clampedTexture) {
345            return NULL;
346        }
347    }
348
349    GrTextureDesc rtDesc = desc;
350    rtDesc.fFlags =  rtDesc.fFlags |
351                     kRenderTarget_GrTextureFlagBit |
352                     kNoStencil_GrTextureFlagBit;
353    rtDesc.fWidth  = GrNextPow2(desc.fWidth);
354    rtDesc.fHeight = GrNextPow2(desc.fHeight);
355
356    GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
357
358    if (texture) {
359        GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
360        GrDrawState* drawState = fGpu->drawState();
361        drawState->setRenderTarget(texture->asRenderTarget());
362
363        // if filtering is not desired then we want to ensure all
364        // texels in the resampled image are copies of texels from
365        // the original.
366        GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
367                                                                   GrTextureParams::kNone_FilterMode);
368        drawState->addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
369
370        drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs),
371                                                    2 * sizeof(SkPoint));
372
373        GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
374
375        if (arg.succeeded()) {
376            SkPoint* verts = (SkPoint*) arg.vertices();
377            verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
378            verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
379            fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
380        }
381    } else {
382        // TODO: Our CPU stretch doesn't filter. But we create separate
383        // stretched textures when the texture params is either filtered or
384        // not. Either implement filtered stretch blit on CPU or just create
385        // one when FBO case fails.
386
387        rtDesc.fFlags = kNone_GrTextureFlags;
388        // no longer need to clamp at min RT size.
389        rtDesc.fWidth  = GrNextPow2(desc.fWidth);
390        rtDesc.fHeight = GrNextPow2(desc.fHeight);
391
392        // We shouldn't be resizing a compressed texture.
393        SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
394
395        size_t bpp = GrBytesPerPixel(desc.fConfig);
396        GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
397        stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
398                      srcData, desc.fWidth, desc.fHeight, bpp);
399
400        size_t stretchedRowBytes = rtDesc.fWidth * bpp;
401
402        texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
403        SkASSERT(texture);
404    }
405
406    return texture;
407}
408
409GrTexture* GrContext::createTexture(const GrTextureParams* params,
410                                    const GrTextureDesc& desc,
411                                    const GrCacheID& cacheID,
412                                    const void* srcData,
413                                    size_t rowBytes,
414                                    GrResourceKey* cacheKey) {
415    GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
416
417    GrTexture* texture;
418    if (GrTexturePriv::NeedsResizing(resourceKey)) {
419        // We do not know how to resize compressed textures.
420        SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
421
422        texture = this->createResizedTexture(desc, cacheID,
423                                             srcData, rowBytes,
424                                             GrTexturePriv::NeedsBilerp(resourceKey));
425    } else {
426        texture = fGpu->createTexture(desc, srcData, rowBytes);
427    }
428
429    if (texture) {
430        fResourceCache->addResource(resourceKey, texture);
431
432        if (cacheKey) {
433            *cacheKey = resourceKey;
434        }
435    }
436
437    return texture;
438}
439
440GrTexture* GrContext::createNewScratchTexture(const GrTextureDesc& desc) {
441    GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
442    if (!texture) {
443        return NULL;
444    }
445    fResourceCache->addResource(texture->getScratchKey(), texture);
446    return texture;
447}
448
449GrTexture* GrContext::refScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match,
450                                        bool calledDuringFlush) {
451    // kNoStencil has no meaning if kRT isn't set.
452    SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
453             !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
454
455    // Make sure caller has checked for renderability if kRT is set.
456    SkASSERT(!(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
457             this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
458
459    SkTCopyOnFirstWrite<GrTextureDesc> desc(inDesc);
460
461    if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrTextureFlagBit)) {
462        GrTextureFlags origFlags = desc->fFlags;
463        if (kApprox_ScratchTexMatch == match) {
464            // bin by pow2 with a reasonable min
465            static const int MIN_SIZE = 16;
466            GrTextureDesc* wdesc = desc.writable();
467            wdesc->fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
468            wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
469        }
470
471        do {
472            GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
473            GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key,
474                                                                                 calledDuringFlush);
475            if (resource) {
476                fResourceCache->makeResourceMRU(resource);
477                return static_cast<GrTexture*>(resource);
478            }
479
480            if (kExact_ScratchTexMatch == match) {
481                break;
482            }
483            // We had a cache miss and we are in approx mode, relax the fit of the flags.
484
485            // We no longer try to reuse textures that were previously used as render targets in
486            // situations where no RT is needed; doing otherwise can confuse the video driver and
487            // cause significant performance problems in some cases.
488            if (desc->fFlags & kNoStencil_GrTextureFlagBit) {
489                desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrTextureFlagBit;
490            } else {
491                break;
492            }
493
494        } while (true);
495
496        desc.writable()->fFlags = origFlags;
497    }
498
499    GrTexture* texture = this->createNewScratchTexture(*desc);
500    SkASSERT(NULL == texture ||
501             texture->getScratchKey() == GrTexturePriv::ComputeScratchKey(*desc));
502    return texture;
503}
504
505bool GrContext::OverbudgetCB(void* data) {
506    SkASSERT(data);
507
508    GrContext* context = reinterpret_cast<GrContext*>(data);
509
510    // Flush the InOrderDrawBuffer to possibly free up some textures
511    context->fFlushToReduceCacheSize = true;
512
513    return true;
514}
515
516
517GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
518                                            void* srcData,
519                                            size_t rowBytes) {
520    GrTextureDesc descCopy = descIn;
521    return fGpu->createTexture(descCopy, srcData, rowBytes);
522}
523
524void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
525    fResourceCache->getLimits(maxTextures, maxTextureBytes);
526}
527
528void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
529    fResourceCache->setLimits(maxTextures, maxTextureBytes);
530}
531
532int GrContext::getMaxTextureSize() const {
533    return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
534}
535
536int GrContext::getMaxRenderTargetSize() const {
537    return fGpu->caps()->maxRenderTargetSize();
538}
539
540int GrContext::getMaxSampleCount() const {
541    return fGpu->caps()->maxSampleCount();
542}
543
544///////////////////////////////////////////////////////////////////////////////
545
546GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
547    return fGpu->wrapBackendTexture(desc);
548}
549
550GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
551    return fGpu->wrapBackendRenderTarget(desc);
552}
553
554///////////////////////////////////////////////////////////////////////////////
555
556bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
557                                          int width, int height) const {
558    const GrDrawTargetCaps* caps = fGpu->caps();
559    if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
560        return false;
561    }
562
563    bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
564
565    if (!isPow2) {
566        bool tiled = params && params->isTiled();
567        if (tiled && !caps->npotTextureTileSupport()) {
568            return false;
569        }
570    }
571    return true;
572}
573
574
575////////////////////////////////////////////////////////////////////////////////
576
577void GrContext::clear(const SkIRect* rect,
578                      const GrColor color,
579                      bool canIgnoreRect,
580                      GrRenderTarget* renderTarget) {
581    ASSERT_OWNED_RESOURCE(renderTarget);
582    AutoRestoreEffects are;
583    AutoCheckFlush acf(this);
584    GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
585    GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf);
586    if (NULL == target) {
587        return;
588    }
589    target->clear(rect, color, canIgnoreRect, renderTarget);
590}
591
592void GrContext::drawPaint(const GrPaint& origPaint) {
593    // set rect to be big enough to fill the space, but not super-huge, so we
594    // don't overflow fixed-point implementations
595    SkRect r;
596    r.setLTRB(0, 0,
597              SkIntToScalar(getRenderTarget()->width()),
598              SkIntToScalar(getRenderTarget()->height()));
599    SkMatrix inverse;
600    SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
601    AutoMatrix am;
602    GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this);
603
604    // We attempt to map r by the inverse matrix and draw that. mapRect will
605    // map the four corners and bound them with a new rect. This will not
606    // produce a correct result for some perspective matrices.
607    if (!this->getMatrix().hasPerspective()) {
608        if (!fViewMatrix.invert(&inverse)) {
609            GrPrintf("Could not invert matrix\n");
610            return;
611        }
612        inverse.mapRect(&r);
613    } else {
614        if (!am.setIdentity(this, paint.writable())) {
615            GrPrintf("Could not invert matrix\n");
616            return;
617        }
618    }
619    // by definition this fills the entire clip, no need for AA
620    if (paint->isAntiAlias()) {
621        paint.writable()->setAntiAlias(false);
622    }
623    this->drawRect(*paint, r);
624}
625
626#ifdef SK_DEVELOPER
627void GrContext::dumpFontCache() const {
628    fFontCache->dump();
629}
630#endif
631
632////////////////////////////////////////////////////////////////////////////////
633
634/*  create a triangle strip that strokes the specified triangle. There are 8
635 unique vertices, but we repreat the last 2 to close up. Alternatively we
636 could use an indices array, and then only send 8 verts, but not sure that
637 would be faster.
638 */
639static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
640                               SkScalar width) {
641    const SkScalar rad = SkScalarHalf(width);
642    rect.sort();
643
644    verts[0].set(rect.fLeft + rad, rect.fTop + rad);
645    verts[1].set(rect.fLeft - rad, rect.fTop - rad);
646    verts[2].set(rect.fRight - rad, rect.fTop + rad);
647    verts[3].set(rect.fRight + rad, rect.fTop - rad);
648    verts[4].set(rect.fRight - rad, rect.fBottom - rad);
649    verts[5].set(rect.fRight + rad, rect.fBottom + rad);
650    verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
651    verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
652    verts[8] = verts[0];
653    verts[9] = verts[1];
654}
655
656static inline bool is_irect(const SkRect& r) {
657  return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
658         SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
659}
660
661static bool apply_aa_to_rect(GrDrawTarget* target,
662                             const SkRect& rect,
663                             SkScalar strokeWidth,
664                             const SkMatrix& combinedMatrix,
665                             SkRect* devBoundRect) {
666    if (!target->getDrawState().canTweakAlphaForCoverage() &&
667        target->shouldDisableCoverageAAForBlend()) {
668#ifdef SK_DEBUG
669        //GrPrintf("Turning off AA to correctly apply blend.\n");
670#endif
671        return false;
672    }
673    const GrDrawState& drawState = target->getDrawState();
674    if (drawState.getRenderTarget()->isMultisampled()) {
675        return false;
676    }
677
678#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
679    if (strokeWidth >= 0) {
680#endif
681        if (!combinedMatrix.preservesAxisAlignment()) {
682            return false;
683        }
684
685#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
686    } else {
687        if (!combinedMatrix.preservesRightAngles()) {
688            return false;
689        }
690    }
691#endif
692
693    combinedMatrix.mapRect(devBoundRect, rect);
694    if (strokeWidth < 0) {
695        return !is_irect(*devBoundRect);
696    }
697
698    return true;
699}
700
701static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
702    return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
703           point.fY >= rect.fTop && point.fY <= rect.fBottom;
704}
705
706void GrContext::drawRect(const GrPaint& paint,
707                         const SkRect& rect,
708                         const GrStrokeInfo* strokeInfo) {
709    if (strokeInfo && strokeInfo->isDashed()) {
710        SkPath path;
711        path.addRect(rect);
712        this->drawPath(paint, path, *strokeInfo);
713        return;
714    }
715
716    AutoRestoreEffects are;
717    AutoCheckFlush acf(this);
718    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
719    if (NULL == target) {
720        return;
721    }
722
723    GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
724    SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
725    SkMatrix matrix = target->drawState()->getViewMatrix();
726
727    // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
728    // cases where the RT is fully inside a stroke.
729    if (width < 0) {
730        SkRect rtRect;
731        target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
732        SkRect clipSpaceRTRect = rtRect;
733        bool checkClip = false;
734        if (this->getClip()) {
735            checkClip = true;
736            clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
737                                   SkIntToScalar(this->getClip()->fOrigin.fY));
738        }
739        // Does the clip contain the entire RT?
740        if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
741            SkMatrix invM;
742            if (!matrix.invert(&invM)) {
743                return;
744            }
745            // Does the rect bound the RT?
746            SkPoint srcSpaceRTQuad[4];
747            invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
748            if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
749                rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
750                rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
751                rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
752                // Will it blend?
753                GrColor clearColor;
754                if (paint.isOpaqueAndConstantColor(&clearColor)) {
755                    target->clear(NULL, clearColor, true);
756                    return;
757                }
758            }
759        }
760    }
761
762    SkRect devBoundRect;
763    bool needAA = paint.isAntiAlias() &&
764                  !target->getDrawState().getRenderTarget()->isMultisampled();
765    bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect);
766
767    const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
768
769    if (doAA) {
770        GrDrawState::AutoViewMatrixRestore avmr;
771        if (!avmr.setIdentity(target->drawState())) {
772            return;
773        }
774        if (width >= 0) {
775            fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
776                                          matrix, devBoundRect,
777                                          strokeRec);
778        } else {
779            // filled AA rect
780            fAARectRenderer->fillAARect(this->getGpu(), target,
781                                        rect, matrix, devBoundRect);
782        }
783        return;
784    }
785
786    if (width >= 0) {
787        // TODO: consider making static vertex buffers for these cases.
788        // Hairline could be done by just adding closing vertex to
789        // unitSquareVertexBuffer()
790
791        static const int worstCaseVertCount = 10;
792        target->drawState()->setDefaultVertexAttribs();
793        GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
794
795        if (!geo.succeeded()) {
796            GrPrintf("Failed to get space for vertices!\n");
797            return;
798        }
799
800        GrPrimitiveType primType;
801        int vertCount;
802        SkPoint* vertex = geo.positions();
803
804        if (width > 0) {
805            vertCount = 10;
806            primType = kTriangleStrip_GrPrimitiveType;
807            setStrokeRectStrip(vertex, rect, width);
808        } else {
809            // hairline
810            vertCount = 5;
811            primType = kLineStrip_GrPrimitiveType;
812            vertex[0].set(rect.fLeft, rect.fTop);
813            vertex[1].set(rect.fRight, rect.fTop);
814            vertex[2].set(rect.fRight, rect.fBottom);
815            vertex[3].set(rect.fLeft, rect.fBottom);
816            vertex[4].set(rect.fLeft, rect.fTop);
817        }
818
819        target->drawNonIndexed(primType, 0, vertCount);
820    } else {
821        // filled BW rect
822        target->drawSimpleRect(rect);
823    }
824}
825
826void GrContext::drawRectToRect(const GrPaint& paint,
827                               const SkRect& dstRect,
828                               const SkRect& localRect,
829                               const SkMatrix* localMatrix) {
830    AutoRestoreEffects are;
831    AutoCheckFlush acf(this);
832    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
833    if (NULL == target) {
834        return;
835    }
836
837    GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
838
839    target->drawRect(dstRect, &localRect, localMatrix);
840}
841
842namespace {
843
844extern const GrVertexAttrib gPosUVColorAttribs[] = {
845    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
846    {kVec2f_GrVertexAttribType,  sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
847    {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
848};
849
850static const size_t kPosUVAttribsSize = 2 * sizeof(SkPoint);
851static const size_t kPosUVColorAttribsSize = 2 * sizeof(SkPoint) + sizeof(GrColor);
852
853extern const GrVertexAttrib gPosColorAttribs[] = {
854    {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
855    {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
856};
857
858static const size_t kPosAttribsSize = sizeof(SkPoint);
859static const size_t kPosColorAttribsSize = sizeof(SkPoint) + sizeof(GrColor);
860
861static void set_vertex_attributes(GrDrawState* drawState,
862                                  const SkPoint* texCoords,
863                                  const GrColor* colors,
864                                  int* colorOffset,
865                                  int* texOffset) {
866    *texOffset = -1;
867    *colorOffset = -1;
868
869    if (texCoords && colors) {
870        *texOffset = sizeof(SkPoint);
871        *colorOffset = 2*sizeof(SkPoint);
872        drawState->setVertexAttribs<gPosUVColorAttribs>(3, kPosUVColorAttribsSize);
873    } else if (texCoords) {
874        *texOffset = sizeof(SkPoint);
875        drawState->setVertexAttribs<gPosUVColorAttribs>(2, kPosUVAttribsSize);
876    } else if (colors) {
877        *colorOffset = sizeof(SkPoint);
878        drawState->setVertexAttribs<gPosColorAttribs>(2, kPosColorAttribsSize);
879    } else {
880        drawState->setVertexAttribs<gPosColorAttribs>(1, kPosAttribsSize);
881    }
882}
883
884};
885
886void GrContext::drawVertices(const GrPaint& paint,
887                             GrPrimitiveType primitiveType,
888                             int vertexCount,
889                             const SkPoint positions[],
890                             const SkPoint texCoords[],
891                             const GrColor colors[],
892                             const uint16_t indices[],
893                             int indexCount) {
894    AutoRestoreEffects are;
895    AutoCheckFlush acf(this);
896    GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
897
898    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
899    if (NULL == target) {
900        return;
901    }
902    GrDrawState* drawState = target->drawState();
903
904    GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
905
906    int colorOffset = -1, texOffset = -1;
907    set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
908
909    size_t VertexStride = drawState->getVertexStride();
910    if (sizeof(SkPoint) != VertexStride) {
911        if (!geo.set(target, vertexCount, 0)) {
912            GrPrintf("Failed to get space for vertices!\n");
913            return;
914        }
915        void* curVertex = geo.vertices();
916
917        for (int i = 0; i < vertexCount; ++i) {
918            *((SkPoint*)curVertex) = positions[i];
919
920            if (texOffset >= 0) {
921                *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
922            }
923            if (colorOffset >= 0) {
924                *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
925            }
926            curVertex = (void*)((intptr_t)curVertex + VertexStride);
927        }
928    } else {
929        target->setVertexSourceToArray(positions, vertexCount);
930    }
931
932    // we don't currently apply offscreen AA to this path. Need improved
933    // management of GrDrawTarget's geometry to avoid copying points per-tile.
934
935    if (indices) {
936        target->setIndexSourceToArray(indices, indexCount);
937        target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
938        target->resetIndexSource();
939    } else {
940        target->drawNonIndexed(primitiveType, 0, vertexCount);
941    }
942}
943
944///////////////////////////////////////////////////////////////////////////////
945
946void GrContext::drawRRect(const GrPaint& paint,
947                          const SkRRect& rrect,
948                          const GrStrokeInfo& strokeInfo) {
949    if (rrect.isEmpty()) {
950       return;
951    }
952
953    if (strokeInfo.isDashed()) {
954        SkPath path;
955        path.addRRect(rrect);
956        this->drawPath(paint, path, strokeInfo);
957        return;
958    }
959
960    AutoRestoreEffects are;
961    AutoCheckFlush acf(this);
962    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
963    if (NULL == target) {
964        return;
965    }
966
967    GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
968
969    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
970
971    if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) {
972        SkPath path;
973        path.addRRect(rrect);
974        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
975    }
976}
977
978///////////////////////////////////////////////////////////////////////////////
979
980void GrContext::drawDRRect(const GrPaint& paint,
981                           const SkRRect& outer,
982                           const SkRRect& inner) {
983    if (outer.isEmpty()) {
984       return;
985    }
986
987    AutoRestoreEffects are;
988    AutoCheckFlush acf(this);
989    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
990
991    GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
992
993    if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
994        SkPath path;
995        path.addRRect(inner);
996        path.addRRect(outer);
997        path.setFillType(SkPath::kEvenOdd_FillType);
998
999        GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
1000        this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
1001    }
1002}
1003
1004///////////////////////////////////////////////////////////////////////////////
1005
1006void GrContext::drawOval(const GrPaint& paint,
1007                         const SkRect& oval,
1008                         const GrStrokeInfo& strokeInfo) {
1009    if (oval.isEmpty()) {
1010       return;
1011    }
1012
1013    if (strokeInfo.isDashed()) {
1014        SkPath path;
1015        path.addOval(oval);
1016        this->drawPath(paint, path, strokeInfo);
1017        return;
1018    }
1019
1020    AutoRestoreEffects are;
1021    AutoCheckFlush acf(this);
1022    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1023    if (NULL == target) {
1024        return;
1025    }
1026
1027    GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1028
1029    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1030
1031
1032    if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) {
1033        SkPath path;
1034        path.addOval(oval);
1035        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1036    }
1037}
1038
1039// Can 'path' be drawn as a pair of filled nested rectangles?
1040static bool is_nested_rects(GrDrawTarget* target,
1041                            const SkPath& path,
1042                            const SkStrokeRec& stroke,
1043                            SkRect rects[2]) {
1044    SkASSERT(stroke.isFillStyle());
1045
1046    if (path.isInverseFillType()) {
1047        return false;
1048    }
1049
1050    const GrDrawState& drawState = target->getDrawState();
1051
1052    // TODO: this restriction could be lifted if we were willing to apply
1053    // the matrix to all the points individually rather than just to the rect
1054    if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1055        return false;
1056    }
1057
1058    if (!target->getDrawState().canTweakAlphaForCoverage() &&
1059        target->shouldDisableCoverageAAForBlend()) {
1060        return false;
1061    }
1062
1063    SkPath::Direction dirs[2];
1064    if (!path.isNestedRects(rects, dirs)) {
1065        return false;
1066    }
1067
1068    if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1069        // The two rects need to be wound opposite to each other
1070        return false;
1071    }
1072
1073    // Right now, nested rects where the margin is not the same width
1074    // all around do not render correctly
1075    const SkScalar* outer = rects[0].asScalars();
1076    const SkScalar* inner = rects[1].asScalars();
1077
1078    SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1079    for (int i = 1; i < 4; ++i) {
1080        SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1081        if (!SkScalarNearlyEqual(margin, temp)) {
1082            return false;
1083        }
1084    }
1085
1086    return true;
1087}
1088
1089void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
1090
1091    if (path.isEmpty()) {
1092       if (path.isInverseFillType()) {
1093           this->drawPaint(paint);
1094       }
1095       return;
1096    }
1097
1098    if (strokeInfo.isDashed()) {
1099        SkPoint pts[2];
1100        if (path.isLine(pts)) {
1101            AutoRestoreEffects are;
1102            AutoCheckFlush acf(this);
1103            GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1104            if (NULL == target) {
1105                return;
1106            }
1107            GrDrawState* drawState = target->drawState();
1108
1109            SkMatrix origViewMatrix = drawState->getViewMatrix();
1110            GrDrawState::AutoViewMatrixRestore avmr;
1111            if (avmr.setIdentity(target->drawState())) {
1112                if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target,
1113                                                  origViewMatrix)) {
1114                    return;
1115                }
1116            }
1117        }
1118
1119        // Filter dashed path into new path with the dashing applied
1120        const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
1121        SkTLazy<SkPath> effectPath;
1122        GrStrokeInfo newStrokeInfo(strokeInfo, false);
1123        SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
1124        if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
1125            this->drawPath(paint, *effectPath.get(), newStrokeInfo);
1126            return;
1127        }
1128
1129        this->drawPath(paint, path, newStrokeInfo);
1130        return;
1131    }
1132
1133    // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1134    // Scratch textures can be recycled after they are returned to the texture
1135    // cache. This presents a potential hazard for buffered drawing. However,
1136    // the writePixels that uploads to the scratch will perform a flush so we're
1137    // OK.
1138    AutoRestoreEffects are;
1139    AutoCheckFlush acf(this);
1140    GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1141    if (NULL == target) {
1142        return;
1143    }
1144    GrDrawState* drawState = target->drawState();
1145
1146    GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
1147
1148    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1149
1150    bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1151
1152    if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
1153        // Concave AA paths are expensive - try to avoid them for special cases
1154        SkRect rects[2];
1155
1156        if (is_nested_rects(target, path, strokeRec, rects)) {
1157            SkMatrix origViewMatrix = drawState->getViewMatrix();
1158            GrDrawState::AutoViewMatrixRestore avmr;
1159            if (!avmr.setIdentity(target->drawState())) {
1160                return;
1161            }
1162
1163            fAARectRenderer->fillAANestedRects(this->getGpu(), target, rects, origViewMatrix);
1164            return;
1165        }
1166    }
1167
1168    SkRect ovalRect;
1169    bool isOval = path.isOval(&ovalRect);
1170
1171    if (!isOval || path.isInverseFillType()
1172        || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) {
1173        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1174    }
1175}
1176
1177void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1178                                 const GrStrokeInfo& strokeInfo) {
1179    SkASSERT(!path.isEmpty());
1180
1181    GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1182
1183
1184    // An Assumption here is that path renderer would use some form of tweaking
1185    // the src color (either the input alpha or in the frag shader) to implement
1186    // aa. If we have some future driver-mojo path AA that can do the right
1187    // thing WRT to the blend then we'll need some query on the PR.
1188    bool useCoverageAA = useAA &&
1189        !target->getDrawState().getRenderTarget()->isMultisampled() &&
1190        !target->shouldDisableCoverageAAForBlend();
1191
1192
1193    GrPathRendererChain::DrawType type =
1194        useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1195                           GrPathRendererChain::kColor_DrawType;
1196
1197    const SkPath* pathPtr = &path;
1198    SkTLazy<SkPath> tmpPath;
1199    SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
1200
1201    // Try a 1st time without stroking the path and without allowing the SW renderer
1202    GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1203
1204    if (NULL == pr) {
1205        if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1206            // It didn't work the 1st time, so try again with the stroked path
1207            if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1208                pathPtr = tmpPath.get();
1209                stroke.writable()->setFillStyle();
1210                if (pathPtr->isEmpty()) {
1211                    return;
1212                }
1213            }
1214        }
1215
1216        // This time, allow SW renderer
1217        pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1218    }
1219
1220    if (NULL == pr) {
1221#ifdef SK_DEBUG
1222        GrPrintf("Unable to find path renderer compatible with path.\n");
1223#endif
1224        return;
1225    }
1226
1227    pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1228}
1229
1230////////////////////////////////////////////////////////////////////////////////
1231
1232void GrContext::flush(int flagsBitfield) {
1233    if (NULL == fDrawBuffer) {
1234        return;
1235    }
1236
1237    if (kDiscard_FlushBit & flagsBitfield) {
1238        fDrawBuffer->reset();
1239    } else {
1240        fDrawBuffer->flush();
1241    }
1242    fResourceCache->purgeAsNeeded();
1243    fFlushToReduceCacheSize = false;
1244}
1245
1246bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
1247                          const void* inPixels, size_t outRowBytes, void* outPixels) {
1248    SkSrcPixelInfo srcPI;
1249    if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1250        return false;
1251    }
1252    srcPI.fAlphaType = kUnpremul_SkAlphaType;
1253    srcPI.fPixels = inPixels;
1254    srcPI.fRowBytes = inRowBytes;
1255
1256    SkDstPixelInfo dstPI;
1257    dstPI.fColorType = srcPI.fColorType;
1258    dstPI.fAlphaType = kPremul_SkAlphaType;
1259    dstPI.fPixels = outPixels;
1260    dstPI.fRowBytes = outRowBytes;
1261
1262    return srcPI.convertPixelsTo(&dstPI, width, height);
1263}
1264
1265bool GrContext::writeSurfacePixels(GrSurface* surface,
1266                                   int left, int top, int width, int height,
1267                                   GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
1268                                   uint32_t pixelOpsFlags) {
1269
1270    {
1271        GrTexture* texture = NULL;
1272        if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) &&
1273            fGpu->canWriteTexturePixels(texture, srcConfig)) {
1274
1275            if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) &&
1276                surface->surfacePriv().hasPendingIO()) {
1277                this->flush();
1278            }
1279            return fGpu->writeTexturePixels(texture, left, top, width, height,
1280                                            srcConfig, buffer, rowBytes);
1281            // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the
1282            // upload is already flushed.
1283        }
1284    }
1285
1286    // If we didn't do a direct texture write then we upload the pixels to a texture and draw.
1287    GrRenderTarget* renderTarget = surface->asRenderTarget();
1288    if (NULL == renderTarget) {
1289        return false;
1290    }
1291
1292    // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1293    // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1294    // config. This scratch will then have R and B swapped. We correct for this by swapping again
1295    // when drawing the scratch to the dst using a conversion effect.
1296    bool swapRAndB = false;
1297    GrPixelConfig writeConfig = srcConfig;
1298    if (GrPixelConfigSwapRAndB(srcConfig) ==
1299        fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) {
1300        writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1301        swapRAndB = true;
1302    }
1303
1304    GrTextureDesc desc;
1305    desc.fWidth = width;
1306    desc.fHeight = height;
1307    desc.fConfig = writeConfig;
1308    SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch));
1309    if (!texture) {
1310        return false;
1311    }
1312
1313    SkAutoTUnref<const GrFragmentProcessor> fp;
1314    SkMatrix textureMatrix;
1315    textureMatrix.setIDiv(texture->width(), texture->height());
1316
1317    // allocate a tmp buffer and sw convert the pixels to premul
1318    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1319
1320    if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
1321        if (!GrPixelConfigIs8888(srcConfig)) {
1322            return false;
1323        }
1324        fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1325        // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1326        if (NULL == fp) {
1327            size_t tmpRowBytes = 4 * width;
1328            tmpPixels.reset(width * height);
1329            if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
1330                                      tmpPixels.get())) {
1331                return false;
1332            }
1333            rowBytes = tmpRowBytes;
1334            buffer = tmpPixels.get();
1335        }
1336    }
1337    if (NULL == fp) {
1338        fp.reset(GrConfigConversionEffect::Create(texture,
1339                                                  swapRAndB,
1340                                                  GrConfigConversionEffect::kNone_PMConversion,
1341                                                  textureMatrix));
1342    }
1343
1344    // Even if the client told us not to flush, we still flush here. The client may have known that
1345    // writes to the original surface caused no data hazards, but they can't know that the scratch
1346    // we just got is safe.
1347    if (texture->surfacePriv().hasPendingIO()) {
1348        this->flush();
1349    }
1350    if (!fGpu->writeTexturePixels(texture, 0, 0, width, height,
1351                                  writeConfig, buffer, rowBytes)) {
1352        return false;
1353    }
1354
1355    SkMatrix matrix;
1356    matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1357
1358    // This function can be called in the midst of drawing another object (e.g., when uploading a
1359    // SW-rasterized clip while issuing a draw). So we push the current geometry state before
1360    // drawing a rect to the render target.
1361    // The bracket ensures we pop the stack if we wind up flushing below.
1362    {
1363        GrDrawTarget* drawTarget = this->prepareToDraw(NULL, kYes_BufferedDraw, NULL, NULL);
1364        GrDrawTarget::AutoGeometryAndStatePush agasp(drawTarget, GrDrawTarget::kReset_ASRInit,
1365                                                     &matrix);
1366        GrDrawState* drawState = drawTarget->drawState();
1367        drawState->addColorProcessor(fp);
1368        drawState->setRenderTarget(renderTarget);
1369        drawState->disableState(GrDrawState::kClip_StateBit);
1370        drawTarget->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
1371    }
1372
1373    if (kFlushWrites_PixelOp & pixelOpsFlags) {
1374        this->flushSurfaceWrites(surface);
1375    }
1376
1377    return true;
1378}
1379
1380// toggles between RGBA and BGRA
1381static SkColorType toggle_colortype32(SkColorType ct) {
1382    if (kRGBA_8888_SkColorType == ct) {
1383        return kBGRA_8888_SkColorType;
1384    } else {
1385        SkASSERT(kBGRA_8888_SkColorType == ct);
1386        return kRGBA_8888_SkColorType;
1387    }
1388}
1389
1390bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1391                                       int left, int top, int width, int height,
1392                                       GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1393                                       uint32_t flags) {
1394    ASSERT_OWNED_RESOURCE(target);
1395
1396    if (NULL == target) {
1397        target = fRenderTarget.get();
1398        if (NULL == target) {
1399            return false;
1400        }
1401    }
1402
1403    if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) {
1404        this->flush();
1405    }
1406
1407    // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1408
1409    // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1410    // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1411    bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1412                                                 width, height, dstConfig,
1413                                                 rowBytes);
1414    // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1415    // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1416    // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1417    // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1418    // dstConfig.
1419    GrPixelConfig readConfig = dstConfig;
1420    bool swapRAndB = false;
1421    if (GrPixelConfigSwapRAndB(dstConfig) ==
1422        fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1423        readConfig = GrPixelConfigSwapRAndB(readConfig);
1424        swapRAndB = true;
1425    }
1426
1427    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1428
1429    if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1430        // The unpremul flag is only allowed for these two configs.
1431        return false;
1432    }
1433
1434    // If the src is a texture and we would have to do conversions after read pixels, we instead
1435    // do the conversions by drawing the src to a scratch texture. If we handle any of the
1436    // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1437    // on the read back pixels.
1438    GrTexture* src = target->asTexture();
1439    if (src && (swapRAndB || unpremul || flipY)) {
1440        // Make the scratch a render so we can read its pixels.
1441        GrTextureDesc desc;
1442        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1443        desc.fWidth = width;
1444        desc.fHeight = height;
1445        desc.fConfig = readConfig;
1446        desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1447
1448        // When a full read back is faster than a partial we could always make the scratch exactly
1449        // match the passed rect. However, if we see many different size rectangles we will trash
1450        // our texture cache and pay the cost of creating and destroying many textures. So, we only
1451        // request an exact match when the caller is reading an entire RT.
1452        ScratchTexMatch match = kApprox_ScratchTexMatch;
1453        if (0 == left &&
1454            0 == top &&
1455            target->width() == width &&
1456            target->height() == height &&
1457            fGpu->fullReadPixelsIsFasterThanPartial()) {
1458            match = kExact_ScratchTexMatch;
1459        }
1460        SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, match));
1461        if (texture) {
1462            // compute a matrix to perform the draw
1463            SkMatrix textureMatrix;
1464            textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1465            textureMatrix.postIDiv(src->width(), src->height());
1466
1467            SkAutoTUnref<const GrFragmentProcessor> fp;
1468            if (unpremul) {
1469                fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1470                if (fp) {
1471                    unpremul = false; // we no longer need to do this on CPU after the read back.
1472                }
1473            }
1474            // If we failed to create a PM->UPM effect and have no other conversions to perform then
1475            // there is no longer any point to using the scratch.
1476            if (fp || flipY || swapRAndB) {
1477                if (!fp) {
1478                    fp.reset(GrConfigConversionEffect::Create(
1479                            src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion,
1480                            textureMatrix));
1481                }
1482                swapRAndB = false; // we will handle the swap in the draw.
1483
1484                // We protect the existing geometry here since it may not be
1485                // clear to the caller that a draw operation (i.e., drawSimpleRect)
1486                // can be invoked in this method
1487                GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
1488                GrDrawState* drawState = fGpu->drawState();
1489                SkASSERT(fp);
1490                drawState->addColorProcessor(fp);
1491
1492                drawState->setRenderTarget(texture->asRenderTarget());
1493                SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1494                fGpu->drawSimpleRect(rect);
1495                // we want to read back from the scratch's origin
1496                left = 0;
1497                top = 0;
1498                target = texture->asRenderTarget();
1499            }
1500        }
1501    }
1502    if (!fGpu->readPixels(target,
1503                          left, top, width, height,
1504                          readConfig, buffer, rowBytes)) {
1505        return false;
1506    }
1507    // Perform any conversions we weren't able to perform using a scratch texture.
1508    if (unpremul || swapRAndB) {
1509        SkDstPixelInfo dstPI;
1510        if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1511            return false;
1512        }
1513        dstPI.fAlphaType = kUnpremul_SkAlphaType;
1514        dstPI.fPixels = buffer;
1515        dstPI.fRowBytes = rowBytes;
1516
1517        SkSrcPixelInfo srcPI;
1518        srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1519        srcPI.fAlphaType = kPremul_SkAlphaType;
1520        srcPI.fPixels = buffer;
1521        srcPI.fRowBytes = rowBytes;
1522
1523        return srcPI.convertPixelsTo(&dstPI, width, height);
1524    }
1525    return true;
1526}
1527
1528void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1529    SkASSERT(target);
1530    ASSERT_OWNED_RESOURCE(target);
1531    // In the future we may track whether there are any pending draws to this
1532    // target. We don't today so we always perform a flush. We don't promise
1533    // this to our clients, though.
1534    this->flush();
1535    if (fGpu) {
1536        fGpu->resolveRenderTarget(target);
1537    }
1538}
1539
1540void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
1541    SkASSERT(renderTarget);
1542    ASSERT_OWNED_RESOURCE(renderTarget);
1543    AutoRestoreEffects are;
1544    AutoCheckFlush acf(this);
1545    GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf);
1546    if (NULL == target) {
1547        return;
1548    }
1549    target->discard(renderTarget);
1550}
1551
1552void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1553                            const SkIPoint& dstPoint, uint32_t pixelOpsFlags) {
1554    if (NULL == src || NULL == dst) {
1555        return;
1556    }
1557    ASSERT_OWNED_RESOURCE(src);
1558    ASSERT_OWNED_RESOURCE(dst);
1559
1560    // Since we're going to the draw target and not GPU, no need to check kNoFlush
1561    // here.
1562
1563    GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1564    if (NULL == target) {
1565        return;
1566    }
1567    target->copySurface(dst, src, srcRect, dstPoint);
1568
1569    if (kFlushWrites_PixelOp & pixelOpsFlags) {
1570        this->flush();
1571    }
1572}
1573
1574void GrContext::flushSurfaceWrites(GrSurface* surface) {
1575    if (surface->surfacePriv().hasPendingWrite()) {
1576        this->flush();
1577    }
1578}
1579
1580////////////////////////////////////////////////////////////////////////////////
1581
1582GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1583                                       BufferedDraw buffered,
1584                                       AutoRestoreEffects* are,
1585                                       AutoCheckFlush* acf) {
1586    // All users of this draw state should be freeing up all effects when they're done.
1587    // Otherwise effects that own resources may keep those resources alive indefinitely.
1588    SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages() &&
1589             !fDrawState->hasGeometryProcessor());
1590
1591    if (NULL == fGpu) {
1592        return NULL;
1593    }
1594
1595    if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1596        fDrawBuffer->flush();
1597        fLastDrawWasBuffered = kNo_BufferedDraw;
1598    }
1599    ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1600    if (paint) {
1601        SkASSERT(are);
1602        SkASSERT(acf);
1603        are->set(fDrawState);
1604        fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1605#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1606        if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1607            !fDrawState->couldApplyCoverage(fGpu->caps())) {
1608            GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1609        }
1610#endif
1611        // Clear any vertex attributes configured for the previous use of the
1612        // GrDrawState which can effect which blend optimizations are in effect.
1613        fDrawState->setDefaultVertexAttribs();
1614    } else {
1615        fDrawState->reset(fViewMatrix);
1616        fDrawState->setRenderTarget(fRenderTarget.get());
1617    }
1618    GrDrawTarget* target;
1619    if (kYes_BufferedDraw == buffered) {
1620        fLastDrawWasBuffered = kYes_BufferedDraw;
1621        target = fDrawBuffer;
1622    } else {
1623        SkASSERT(kNo_BufferedDraw == buffered);
1624        fLastDrawWasBuffered = kNo_BufferedDraw;
1625        target = fGpu;
1626    }
1627    fDrawState->setState(GrDrawState::kClip_StateBit, fClip &&
1628                                                     !fClip->fClipStack->isWideOpen());
1629    target->setClip(fClip);
1630    SkASSERT(fDrawState == target->drawState());
1631    return target;
1632}
1633
1634/*
1635 * This method finds a path renderer that can draw the specified path on
1636 * the provided target.
1637 * Due to its expense, the software path renderer has split out so it can
1638 * can be individually allowed/disallowed via the "allowSW" boolean.
1639 */
1640GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1641                                           const SkStrokeRec& stroke,
1642                                           const GrDrawTarget* target,
1643                                           bool allowSW,
1644                                           GrPathRendererChain::DrawType drawType,
1645                                           GrPathRendererChain::StencilSupport* stencilSupport) {
1646
1647    if (NULL == fPathRendererChain) {
1648        fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1649    }
1650
1651    GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1652                                                             stroke,
1653                                                             target,
1654                                                             drawType,
1655                                                             stencilSupport);
1656
1657    if (NULL == pr && allowSW) {
1658        if (NULL == fSoftwarePathRenderer) {
1659            fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1660        }
1661        pr = fSoftwarePathRenderer;
1662    }
1663
1664    return pr;
1665}
1666
1667////////////////////////////////////////////////////////////////////////////////
1668bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1669    return fGpu->caps()->isConfigRenderable(config, withMSAA);
1670}
1671
1672int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1673                                         SkScalar dpi) const {
1674    if (!this->isConfigRenderable(config, true)) {
1675        return 0;
1676    }
1677    int chosenSampleCount = 0;
1678    if (fGpu->caps()->pathRenderingSupport()) {
1679        if (dpi >= 250.0f) {
1680            chosenSampleCount = 4;
1681        } else {
1682            chosenSampleCount = 16;
1683        }
1684    }
1685    return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1686        chosenSampleCount : 0;
1687}
1688
1689void GrContext::setupDrawBuffer() {
1690    SkASSERT(NULL == fDrawBuffer);
1691    SkASSERT(NULL == fDrawBufferVBAllocPool);
1692    SkASSERT(NULL == fDrawBufferIBAllocPool);
1693
1694    fDrawBufferVBAllocPool =
1695        SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1696                                    DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1697                                    DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1698    fDrawBufferIBAllocPool =
1699        SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1700                                   DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1701                                   DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1702
1703    fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1704                                                   fDrawBufferVBAllocPool,
1705                                                   fDrawBufferIBAllocPool));
1706
1707    fDrawBuffer->setDrawState(fDrawState);
1708}
1709
1710GrDrawTarget* GrContext::getTextTarget() {
1711    return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1712}
1713
1714const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1715    return fGpu->getQuadIndexBuffer();
1716}
1717
1718namespace {
1719void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1720    GrConfigConversionEffect::PMConversion pmToUPM;
1721    GrConfigConversionEffect::PMConversion upmToPM;
1722    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1723    *pmToUPMValue = pmToUPM;
1724    *upmToPMValue = upmToPM;
1725}
1726}
1727
1728const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
1729                                                          bool swapRAndB,
1730                                                          const SkMatrix& matrix) {
1731    if (!fDidTestPMConversions) {
1732        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1733        fDidTestPMConversions = true;
1734    }
1735    GrConfigConversionEffect::PMConversion pmToUPM =
1736        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1737    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1738        return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1739    } else {
1740        return NULL;
1741    }
1742}
1743
1744const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
1745                                                          bool swapRAndB,
1746                                                          const SkMatrix& matrix) {
1747    if (!fDidTestPMConversions) {
1748        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1749        fDidTestPMConversions = true;
1750    }
1751    GrConfigConversionEffect::PMConversion upmToPM =
1752        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1753    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1754        return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1755    } else {
1756        return NULL;
1757    }
1758}
1759
1760void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
1761    fResourceCache->addResource(resourceKey, resource);
1762}
1763
1764GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1765    GrGpuResource* resource = fResourceCache->find(resourceKey);
1766    SkSafeRef(resource);
1767    return resource;
1768}
1769
1770void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1771    fGpu->addGpuTraceMarker(marker);
1772    if (fDrawBuffer) {
1773        fDrawBuffer->addGpuTraceMarker(marker);
1774    }
1775}
1776
1777void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
1778    fGpu->removeGpuTraceMarker(marker);
1779    if (fDrawBuffer) {
1780        fDrawBuffer->removeGpuTraceMarker(marker);
1781    }
1782}
1783
1784///////////////////////////////////////////////////////////////////////////////
1785#if GR_CACHE_STATS
1786void GrContext::printCacheStats() const {
1787    fResourceCache->printStats();
1788}
1789#endif
1790
1791#if GR_GPU_STATS
1792const GrContext::GPUStats* GrContext::gpuStats() const {
1793    return fGpu->gpuStats();
1794}
1795#endif
1796
1797