GrContext.cpp revision dace19ec17e85872df3fb35212e1b8bce72018b6
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9#include "GrContext.h"
10
11#include "GrAARectRenderer.h"
12#include "GrBufferAllocPool.h"
13#include "GrDefaultGeoProcFactory.h"
14#include "GrGpuResource.h"
15#include "GrGpuResourceCacheAccess.h"
16#include "GrDistanceFieldTextContext.h"
17#include "GrDrawTargetCaps.h"
18#include "GrGpu.h"
19#include "GrIndexBuffer.h"
20#include "GrInOrderDrawBuffer.h"
21#include "GrLayerCache.h"
22#include "GrOvalRenderer.h"
23#include "GrPathRenderer.h"
24#include "GrPathUtils.h"
25#include "GrResourceCache2.h"
26#include "GrSoftwarePathRenderer.h"
27#include "GrStencilBuffer.h"
28#include "GrStencilAndCoverTextContext.h"
29#include "GrStrokeInfo.h"
30#include "GrSurfacePriv.h"
31#include "GrTextStrike.h"
32#include "GrTexturePriv.h"
33#include "GrTraceMarker.h"
34#include "GrTracing.h"
35#include "SkDashPathPriv.h"
36#include "SkConfig8888.h"
37#include "SkGr.h"
38#include "SkRRect.h"
39#include "SkStrokeRec.h"
40#include "SkTLazy.h"
41#include "SkTLS.h"
42#include "SkTraceEvent.h"
43
44#include "effects/GrConfigConversionEffect.h"
45#include "effects/GrDashingEffect.h"
46#include "effects/GrSingleTextureEffect.h"
47
48#ifdef SK_DEBUG
49    // change this to a 1 to see notifications when partial coverage fails
50    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
51#else
52    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
53#endif
54
55static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
56static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
57
58static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
59static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
60
61#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
62
63// Glorified typedef to avoid including GrDrawState.h in GrContext.h
64class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
65
66class GrContext::AutoCheckFlush {
67public:
68    AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); }
69
70    ~AutoCheckFlush() {
71        if (fContext->fFlushToReduceCacheSize) {
72            fContext->flush();
73        }
74    }
75
76private:
77    GrContext* fContext;
78};
79
80GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
81                             const Options* opts) {
82    GrContext* context;
83    if (NULL == opts) {
84        context = SkNEW_ARGS(GrContext, (Options()));
85    } else {
86        context = SkNEW_ARGS(GrContext, (*opts));
87    }
88
89    if (context->init(backend, backendContext)) {
90        return context;
91    } else {
92        context->unref();
93        return NULL;
94    }
95}
96
97GrContext::GrContext(const Options& opts) : fOptions(opts) {
98    fDrawState = NULL;
99    fGpu = NULL;
100    fClip = NULL;
101    fPathRendererChain = NULL;
102    fSoftwarePathRenderer = NULL;
103    fResourceCache2 = NULL;
104    fFontCache = NULL;
105    fDrawBuffer = NULL;
106    fDrawBufferVBAllocPool = NULL;
107    fDrawBufferIBAllocPool = NULL;
108    fFlushToReduceCacheSize = false;
109    fAARectRenderer = NULL;
110    fOvalRenderer = NULL;
111    fViewMatrix.reset();
112    fMaxTextureSizeOverride = 1 << 20;
113}
114
115bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
116    SkASSERT(NULL == fGpu);
117
118    fGpu = GrGpu::Create(backend, backendContext, this);
119    if (NULL == fGpu) {
120        return false;
121    }
122    this->initCommon();
123    return true;
124}
125
126void GrContext::initCommon() {
127    fDrawState = SkNEW(GrDrawState);
128
129    fResourceCache2 = SkNEW(GrResourceCache2);
130    fResourceCache2->setOverBudgetCallback(OverBudgetCB, this);
131
132    fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
133
134    fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
135
136    fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu));
137    fOvalRenderer = SkNEW(GrOvalRenderer);
138
139    fDidTestPMConversions = false;
140
141    this->setupDrawBuffer();
142}
143
144GrContext::~GrContext() {
145    if (NULL == fGpu) {
146        return;
147    }
148
149    this->flush();
150
151    for (int i = 0; i < fCleanUpData.count(); ++i) {
152        (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
153    }
154
155    SkDELETE(fResourceCache2);
156    SkDELETE(fFontCache);
157    SkDELETE(fDrawBuffer);
158    SkDELETE(fDrawBufferVBAllocPool);
159    SkDELETE(fDrawBufferIBAllocPool);
160
161    fAARectRenderer->unref();
162    fOvalRenderer->unref();
163
164    fGpu->unref();
165    SkSafeUnref(fPathRendererChain);
166    SkSafeUnref(fSoftwarePathRenderer);
167    fDrawState->unref();
168}
169
170void GrContext::abandonContext() {
171    // abandon first to so destructors
172    // don't try to free the resources in the API.
173    fResourceCache2->abandonAll();
174
175    fGpu->contextAbandoned();
176
177    // a path renderer may be holding onto resources that
178    // are now unusable
179    SkSafeSetNull(fPathRendererChain);
180    SkSafeSetNull(fSoftwarePathRenderer);
181
182    delete fDrawBuffer;
183    fDrawBuffer = NULL;
184
185    delete fDrawBufferVBAllocPool;
186    fDrawBufferVBAllocPool = NULL;
187
188    delete fDrawBufferIBAllocPool;
189    fDrawBufferIBAllocPool = NULL;
190
191    fAARectRenderer->reset();
192    fOvalRenderer->reset();
193
194    fFontCache->freeAll();
195    fLayerCache->freeAll();
196}
197
198void GrContext::resetContext(uint32_t state) {
199    fGpu->markContextDirty(state);
200}
201
202void GrContext::freeGpuResources() {
203    this->flush();
204
205    if (fDrawBuffer) {
206        fDrawBuffer->purgeResources();
207    }
208
209    fAARectRenderer->reset();
210    fOvalRenderer->reset();
211
212    fFontCache->freeAll();
213    fLayerCache->freeAll();
214    // a path renderer may be holding onto resources
215    SkSafeSetNull(fPathRendererChain);
216    SkSafeSetNull(fSoftwarePathRenderer);
217}
218
219void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
220    if (resourceCount) {
221        *resourceCount = fResourceCache2->getBudgetedResourceCount();
222    }
223    if (resourceBytes) {
224        *resourceBytes = fResourceCache2->getBudgetedResourceBytes();
225    }
226}
227
228GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
229                                            const SkDeviceProperties&
230                                            leakyProperties,
231                                            bool enableDistanceFieldFonts) {
232    if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() &&
233                                                renderTarget->isMultisampled()) {
234        return GrStencilAndCoverTextContext::Create(this, leakyProperties);
235    }
236
237    return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDistanceFieldFonts);
238}
239
240////////////////////////////////////////////////////////////////////////////////
241
242GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
243                                        const GrCacheID& cacheID,
244                                        const GrTextureParams* params) {
245    GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
246
247    GrGpuResource* resource = this->findAndRefCachedResource(resourceKey);
248    if (resource) {
249        SkASSERT(static_cast<GrSurface*>(resource)->asTexture());
250        return static_cast<GrSurface*>(resource)->asTexture();
251    }
252    return NULL;
253}
254
255bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
256                                 const GrCacheID& cacheID,
257                                 const GrTextureParams* params) const {
258    GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
259    return fResourceCache2->hasContentKey(resourceKey);
260}
261
262void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
263    // TODO: Make GrStencilBuffers use the scratch mechanism rather than content keys.
264    ASSERT_OWNED_RESOURCE(sb);
265
266    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
267                                                            sb->height(),
268                                                            sb->numSamples());
269    SkAssertResult(sb->cacheAccess().setContentKey(resourceKey));
270}
271
272GrStencilBuffer* GrContext::findAndRefStencilBuffer(int width, int height, int sampleCnt) {
273    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width, height, sampleCnt);
274    GrGpuResource* resource = this->findAndRefCachedResource(resourceKey);
275    return static_cast<GrStencilBuffer*>(resource);
276}
277
278static void stretch_image(void* dst,
279                          int dstW,
280                          int dstH,
281                          const void* src,
282                          int srcW,
283                          int srcH,
284                          size_t bpp) {
285    SkFixed dx = (srcW << 16) / dstW;
286    SkFixed dy = (srcH << 16) / dstH;
287
288    SkFixed y = dy >> 1;
289
290    size_t dstXLimit = dstW*bpp;
291    for (int j = 0; j < dstH; ++j) {
292        SkFixed x = dx >> 1;
293        const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
294        uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
295        for (size_t i = 0; i < dstXLimit; i += bpp) {
296            memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
297            x += dx;
298        }
299        y += dy;
300    }
301}
302
303// The desired texture is NPOT and tiled but that isn't supported by
304// the current hardware. Resize the texture to be a POT
305GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
306                                           const GrCacheID& cacheID,
307                                           const void* srcData,
308                                           size_t rowBytes,
309                                           bool filter) {
310    SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
311    if (NULL == clampedTexture) {
312        clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
313
314        if (NULL == clampedTexture) {
315            return NULL;
316        }
317    }
318
319    GrSurfaceDesc rtDesc = desc;
320    rtDesc.fFlags =  rtDesc.fFlags |
321                     kRenderTarget_GrSurfaceFlag |
322                     kNoStencil_GrSurfaceFlag;
323    rtDesc.fWidth  = GrNextPow2(desc.fWidth);
324    rtDesc.fHeight = GrNextPow2(desc.fHeight);
325
326    GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
327
328    if (texture) {
329        GrDrawTarget::AutoStateRestore asr(fDrawBuffer, GrDrawTarget::kReset_ASRInit);
330        GrDrawState* drawState = fDrawBuffer->drawState();
331        drawState->setRenderTarget(texture->asRenderTarget());
332
333        // if filtering is not desired then we want to ensure all
334        // texels in the resampled image are copies of texels from
335        // the original.
336        GrTextureParams params(SkShader::kClamp_TileMode,
337                               filter ? GrTextureParams::kBilerp_FilterMode :
338                                        GrTextureParams::kNone_FilterMode);
339        drawState->addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
340
341        drawState->setGeometryProcessor(
342                GrDefaultGeoProcFactory::CreateAndSetAttribs(
343                        drawState,
344                        GrDefaultGeoProcFactory::kPosition_GPType |
345                        GrDefaultGeoProcFactory::kLocalCoord_GPType))->unref();
346
347        GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, 0);
348
349        if (arg.succeeded()) {
350            SkPoint* verts = (SkPoint*) arg.vertices();
351            verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
352            verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
353            fDrawBuffer->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
354        }
355    } else {
356        // TODO: Our CPU stretch doesn't filter. But we create separate
357        // stretched textures when the texture params is either filtered or
358        // not. Either implement filtered stretch blit on CPU or just create
359        // one when FBO case fails.
360
361        rtDesc.fFlags = kNone_GrSurfaceFlags;
362        // no longer need to clamp at min RT size.
363        rtDesc.fWidth  = GrNextPow2(desc.fWidth);
364        rtDesc.fHeight = GrNextPow2(desc.fHeight);
365
366        // We shouldn't be resizing a compressed texture.
367        SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
368
369        size_t bpp = GrBytesPerPixel(desc.fConfig);
370        GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
371        stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
372                      srcData, desc.fWidth, desc.fHeight, bpp);
373
374        size_t stretchedRowBytes = rtDesc.fWidth * bpp;
375
376        texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
377        SkASSERT(texture);
378    }
379
380    return texture;
381}
382
383GrTexture* GrContext::createTexture(const GrTextureParams* params,
384                                    const GrSurfaceDesc& desc,
385                                    const GrCacheID& cacheID,
386                                    const void* srcData,
387                                    size_t rowBytes,
388                                    GrResourceKey* cacheKey) {
389    GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
390
391    GrTexture* texture;
392    if (GrTexturePriv::NeedsResizing(resourceKey)) {
393        // We do not know how to resize compressed textures.
394        SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
395
396        texture = this->createResizedTexture(desc, cacheID,
397                                             srcData, rowBytes,
398                                             GrTexturePriv::NeedsBilerp(resourceKey));
399    } else {
400        texture = fGpu->createTexture(desc, srcData, rowBytes);
401    }
402
403    if (texture) {
404        if (texture->cacheAccess().setContentKey(resourceKey)) {
405            if (cacheKey) {
406                *cacheKey = resourceKey;
407            }
408        } else {
409            texture->unref();
410            texture = NULL;
411        }
412    }
413
414    return texture;
415}
416
417GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match,
418                                        bool calledDuringFlush) {
419    // kNoStencil has no meaning if kRT isn't set.
420    SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
421             !(inDesc.fFlags & kNoStencil_GrSurfaceFlag));
422
423    // Make sure caller has checked for renderability if kRT is set.
424    SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
425             this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
426
427    SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
428
429    if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
430        GrSurfaceFlags origFlags = desc->fFlags;
431        if (kApprox_ScratchTexMatch == match) {
432            // bin by pow2 with a reasonable min
433            static const int MIN_SIZE = 16;
434            GrSurfaceDesc* wdesc = desc.writable();
435            wdesc->fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
436            wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
437        }
438
439        do {
440            GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
441            uint32_t scratchFlags = 0;
442            if (calledDuringFlush) {
443                scratchFlags = GrResourceCache2::kRequireNoPendingIO_ScratchFlag;
444            } else  if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
445                // If it is not a render target then it will most likely be populated by
446                // writePixels() which will trigger a flush if the texture has pending IO.
447                scratchFlags = GrResourceCache2::kPreferNoPendingIO_ScratchFlag;
448            }
449            GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key, scratchFlags);
450            if (resource) {
451                return static_cast<GrSurface*>(resource)->asTexture();
452            }
453
454            if (kExact_ScratchTexMatch == match) {
455                break;
456            }
457            // We had a cache miss and we are in approx mode, relax the fit of the flags.
458
459            // We no longer try to reuse textures that were previously used as render targets in
460            // situations where no RT is needed; doing otherwise can confuse the video driver and
461            // cause significant performance problems in some cases.
462            if (desc->fFlags & kNoStencil_GrSurfaceFlag) {
463                desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrSurfaceFlag;
464            } else {
465                break;
466            }
467
468        } while (true);
469
470        desc.writable()->fFlags = origFlags;
471    }
472
473    GrTexture* texture = fGpu->createTexture(*desc, NULL, 0);
474    SkASSERT(NULL == texture ||
475             texture->cacheAccess().getScratchKey() == GrTexturePriv::ComputeScratchKey(*desc));
476    return texture;
477}
478
479void GrContext::OverBudgetCB(void* data) {
480    SkASSERT(data);
481
482    GrContext* context = reinterpret_cast<GrContext*>(data);
483
484    // Flush the InOrderDrawBuffer to possibly free up some textures
485    context->fFlushToReduceCacheSize = true;
486}
487
488
489GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& descIn,
490                                            void* srcData,
491                                            size_t rowBytes) {
492    GrSurfaceDesc descCopy = descIn;
493    return fGpu->createTexture(descCopy, srcData, rowBytes);
494}
495
496void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
497    if (maxTextures) {
498        *maxTextures = fResourceCache2->getMaxResourceCount();
499    }
500    if (maxTextureBytes) {
501        *maxTextureBytes = fResourceCache2->getMaxResourceBytes();
502    }
503}
504
505void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
506    fResourceCache2->setLimits(maxTextures, maxTextureBytes);
507}
508
509int GrContext::getMaxTextureSize() const {
510    return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
511}
512
513int GrContext::getMaxRenderTargetSize() const {
514    return fGpu->caps()->maxRenderTargetSize();
515}
516
517int GrContext::getMaxSampleCount() const {
518    return fGpu->caps()->maxSampleCount();
519}
520
521///////////////////////////////////////////////////////////////////////////////
522
523GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
524    return fGpu->wrapBackendTexture(desc);
525}
526
527GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
528    return fGpu->wrapBackendRenderTarget(desc);
529}
530
531///////////////////////////////////////////////////////////////////////////////
532
533bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
534                                          int width, int height) const {
535    const GrDrawTargetCaps* caps = fGpu->caps();
536    if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
537        return false;
538    }
539
540    bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
541
542    if (!isPow2) {
543        bool tiled = params && params->isTiled();
544        if (tiled && !caps->npotTextureTileSupport()) {
545            return false;
546        }
547    }
548    return true;
549}
550
551
552////////////////////////////////////////////////////////////////////////////////
553
554void GrContext::clear(const SkIRect* rect,
555                      const GrColor color,
556                      bool canIgnoreRect,
557                      GrRenderTarget* renderTarget) {
558    ASSERT_OWNED_RESOURCE(renderTarget);
559    SkASSERT(renderTarget);
560
561    AutoRestoreEffects are;
562    AutoCheckFlush acf(this);
563    GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
564    GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
565    if (NULL == target) {
566        return;
567    }
568    target->clear(rect, color, canIgnoreRect, renderTarget);
569}
570
571void GrContext::drawPaint(const GrPaint& origPaint) {
572    // set rect to be big enough to fill the space, but not super-huge, so we
573    // don't overflow fixed-point implementations
574    SkRect r;
575    r.setLTRB(0, 0,
576              SkIntToScalar(getRenderTarget()->width()),
577              SkIntToScalar(getRenderTarget()->height()));
578    SkMatrix inverse;
579    SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
580    AutoMatrix am;
581    GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this);
582
583    // We attempt to map r by the inverse matrix and draw that. mapRect will
584    // map the four corners and bound them with a new rect. This will not
585    // produce a correct result for some perspective matrices.
586    if (!this->getMatrix().hasPerspective()) {
587        if (!fViewMatrix.invert(&inverse)) {
588            SkDebugf("Could not invert matrix\n");
589            return;
590        }
591        inverse.mapRect(&r);
592    } else {
593        if (!am.setIdentity(this, paint.writable())) {
594            SkDebugf("Could not invert matrix\n");
595            return;
596        }
597    }
598    // by definition this fills the entire clip, no need for AA
599    if (paint->isAntiAlias()) {
600        paint.writable()->setAntiAlias(false);
601    }
602    this->drawRect(*paint, r);
603}
604
605#ifdef SK_DEVELOPER
606void GrContext::dumpFontCache() const {
607    fFontCache->dump();
608}
609#endif
610
611////////////////////////////////////////////////////////////////////////////////
612
613/*  create a triangle strip that strokes the specified triangle. There are 8
614 unique vertices, but we repreat the last 2 to close up. Alternatively we
615 could use an indices array, and then only send 8 verts, but not sure that
616 would be faster.
617 */
618static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
619                               SkScalar width) {
620    const SkScalar rad = SkScalarHalf(width);
621    rect.sort();
622
623    verts[0].set(rect.fLeft + rad, rect.fTop + rad);
624    verts[1].set(rect.fLeft - rad, rect.fTop - rad);
625    verts[2].set(rect.fRight - rad, rect.fTop + rad);
626    verts[3].set(rect.fRight + rad, rect.fTop - rad);
627    verts[4].set(rect.fRight - rad, rect.fBottom - rad);
628    verts[5].set(rect.fRight + rad, rect.fBottom + rad);
629    verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
630    verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
631    verts[8] = verts[0];
632    verts[9] = verts[1];
633}
634
635static inline bool is_irect(const SkRect& r) {
636  return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
637         SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
638}
639
640static bool apply_aa_to_rect(GrDrawTarget* target,
641                             const SkRect& rect,
642                             SkScalar strokeWidth,
643                             const SkMatrix& combinedMatrix,
644                             SkRect* devBoundRect) {
645    if (!target->getDrawState().canTweakAlphaForCoverage() &&
646        target->shouldDisableCoverageAAForBlend()) {
647#ifdef SK_DEBUG
648        //SkDebugf("Turning off AA to correctly apply blend.\n");
649#endif
650        return false;
651    }
652    const GrDrawState& drawState = target->getDrawState();
653    if (drawState.getRenderTarget()->isMultisampled()) {
654        return false;
655    }
656
657#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
658    if (strokeWidth >= 0) {
659#endif
660        if (!combinedMatrix.preservesAxisAlignment()) {
661            return false;
662        }
663
664#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
665    } else {
666        if (!combinedMatrix.preservesRightAngles()) {
667            return false;
668        }
669    }
670#endif
671
672    combinedMatrix.mapRect(devBoundRect, rect);
673    if (strokeWidth < 0) {
674        return !is_irect(*devBoundRect);
675    }
676
677    return true;
678}
679
680static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
681    return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
682           point.fY >= rect.fTop && point.fY <= rect.fBottom;
683}
684
685void GrContext::drawRect(const GrPaint& paint,
686                         const SkRect& rect,
687                         const GrStrokeInfo* strokeInfo) {
688    if (strokeInfo && strokeInfo->isDashed()) {
689        SkPath path;
690        path.addRect(rect);
691        this->drawPath(paint, path, *strokeInfo);
692        return;
693    }
694
695    AutoRestoreEffects are;
696    AutoCheckFlush acf(this);
697    GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
698    if (NULL == target) {
699        return;
700    }
701
702    GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
703    SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
704    SkMatrix matrix = target->drawState()->getViewMatrix();
705
706    // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
707    // cases where the RT is fully inside a stroke.
708    if (width < 0) {
709        SkRect rtRect;
710        target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
711        SkRect clipSpaceRTRect = rtRect;
712        bool checkClip = false;
713        if (this->getClip()) {
714            checkClip = true;
715            clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
716                                   SkIntToScalar(this->getClip()->fOrigin.fY));
717        }
718        // Does the clip contain the entire RT?
719        if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
720            SkMatrix invM;
721            if (!matrix.invert(&invM)) {
722                return;
723            }
724            // Does the rect bound the RT?
725            SkPoint srcSpaceRTQuad[4];
726            invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
727            if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
728                rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
729                rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
730                rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
731                // Will it blend?
732                GrColor clearColor;
733                if (paint.isOpaqueAndConstantColor(&clearColor)) {
734                    target->clear(NULL, clearColor, true, fRenderTarget);
735                    return;
736                }
737            }
738        }
739    }
740
741    SkRect devBoundRect;
742    bool needAA = paint.isAntiAlias() &&
743                  !target->getDrawState().getRenderTarget()->isMultisampled();
744    bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect);
745
746    if (doAA) {
747        GrDrawState::AutoViewMatrixRestore avmr;
748        if (!avmr.setIdentity(target->drawState())) {
749            return;
750        }
751        if (width >= 0) {
752            const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
753            fAARectRenderer->strokeAARect(target, rect, matrix, devBoundRect, strokeRec);
754        } else {
755            // filled AA rect
756            fAARectRenderer->fillAARect(target,
757                                        rect, matrix, devBoundRect);
758        }
759        return;
760    }
761
762    if (width >= 0) {
763        // TODO: consider making static vertex buffers for these cases.
764        // Hairline could be done by just adding closing vertex to
765        // unitSquareVertexBuffer()
766
767        static const int worstCaseVertCount = 10;
768        target->drawState()->setDefaultVertexAttribs();
769        target->drawState()->setGeometryProcessor(GrDefaultGeoProcFactory::Create(false))->unref();
770        GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
771
772        if (!geo.succeeded()) {
773            SkDebugf("Failed to get space for vertices!\n");
774            return;
775        }
776
777        GrPrimitiveType primType;
778        int vertCount;
779        SkPoint* vertex = geo.positions();
780
781        if (width > 0) {
782            vertCount = 10;
783            primType = kTriangleStrip_GrPrimitiveType;
784            setStrokeRectStrip(vertex, rect, width);
785        } else {
786            // hairline
787            vertCount = 5;
788            primType = kLineStrip_GrPrimitiveType;
789            vertex[0].set(rect.fLeft, rect.fTop);
790            vertex[1].set(rect.fRight, rect.fTop);
791            vertex[2].set(rect.fRight, rect.fBottom);
792            vertex[3].set(rect.fLeft, rect.fBottom);
793            vertex[4].set(rect.fLeft, rect.fTop);
794        }
795
796        target->drawNonIndexed(primType, 0, vertCount);
797    } else {
798        // filled BW rect
799        target->drawSimpleRect(rect);
800    }
801}
802
803void GrContext::drawRectToRect(const GrPaint& paint,
804                               const SkRect& dstRect,
805                               const SkRect& localRect,
806                               const SkMatrix* localMatrix) {
807    AutoRestoreEffects are;
808    AutoCheckFlush acf(this);
809    GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
810    if (NULL == target) {
811        return;
812    }
813
814    GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
815
816    target->drawRect(dstRect, &localRect, localMatrix);
817}
818
819static void set_vertex_attributes(GrDrawState* drawState,
820                                  const SkPoint* texCoords,
821                                  const GrColor* colors,
822                                  int* colorOffset,
823                                  int* texOffset) {
824    *texOffset = -1;
825    *colorOffset = -1;
826
827    uint32_t flags = GrDefaultGeoProcFactory::kPosition_GPType;
828    if (texCoords && colors) {
829        *colorOffset = sizeof(SkPoint);
830        *texOffset = sizeof(SkPoint) + sizeof(GrColor);
831        flags |= GrDefaultGeoProcFactory::kColor_GPType |
832                 GrDefaultGeoProcFactory::kLocalCoord_GPType;
833    } else if (texCoords) {
834        *texOffset = sizeof(SkPoint);
835        flags |= GrDefaultGeoProcFactory::kLocalCoord_GPType;
836    } else if (colors) {
837        *colorOffset = sizeof(SkPoint);
838        flags |= GrDefaultGeoProcFactory::kColor_GPType;
839    }
840    drawState->setGeometryProcessor(GrDefaultGeoProcFactory::CreateAndSetAttribs(drawState,
841                                                                                 flags))->unref();
842}
843
844void GrContext::drawVertices(const GrPaint& paint,
845                             GrPrimitiveType primitiveType,
846                             int vertexCount,
847                             const SkPoint positions[],
848                             const SkPoint texCoords[],
849                             const GrColor colors[],
850                             const uint16_t indices[],
851                             int indexCount) {
852    AutoRestoreEffects are;
853    AutoCheckFlush acf(this);
854    GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
855
856    GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
857    if (NULL == target) {
858        return;
859    }
860    GrDrawState* drawState = target->drawState();
861
862    GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
863
864    int colorOffset = -1, texOffset = -1;
865    set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
866
867    size_t VertexStride = drawState->getVertexStride();
868    if (!geo.set(target, vertexCount, indexCount)) {
869        SkDebugf("Failed to get space for vertices!\n");
870        return;
871    }
872    void* curVertex = geo.vertices();
873
874    for (int i = 0; i < vertexCount; ++i) {
875        *((SkPoint*)curVertex) = positions[i];
876
877        if (texOffset >= 0) {
878            *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
879        }
880        if (colorOffset >= 0) {
881            *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
882        }
883        curVertex = (void*)((intptr_t)curVertex + VertexStride);
884    }
885
886    // we don't currently apply offscreen AA to this path. Need improved
887    // management of GrDrawTarget's geometry to avoid copying points per-tile.
888    if (indices) {
889        uint16_t* curIndex = (uint16_t*)geo.indices();
890        for (int i = 0; i < indexCount; ++i) {
891            curIndex[i] = indices[i];
892        }
893        target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
894    } else {
895        target->drawNonIndexed(primitiveType, 0, vertexCount);
896    }
897}
898
899///////////////////////////////////////////////////////////////////////////////
900
901void GrContext::drawRRect(const GrPaint& paint,
902                          const SkRRect& rrect,
903                          const GrStrokeInfo& strokeInfo) {
904    if (rrect.isEmpty()) {
905       return;
906    }
907
908    if (strokeInfo.isDashed()) {
909        SkPath path;
910        path.addRRect(rrect);
911        this->drawPath(paint, path, strokeInfo);
912        return;
913    }
914
915    AutoRestoreEffects are;
916    AutoCheckFlush acf(this);
917    GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
918    if (NULL == target) {
919        return;
920    }
921
922    GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
923
924    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
925
926    if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) {
927        SkPath path;
928        path.addRRect(rrect);
929        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
930    }
931}
932
933///////////////////////////////////////////////////////////////////////////////
934
935void GrContext::drawDRRect(const GrPaint& paint,
936                           const SkRRect& outer,
937                           const SkRRect& inner) {
938    if (outer.isEmpty()) {
939       return;
940    }
941
942    AutoRestoreEffects are;
943    AutoCheckFlush acf(this);
944    GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
945
946    GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
947
948    if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
949        SkPath path;
950        path.addRRect(inner);
951        path.addRRect(outer);
952        path.setFillType(SkPath::kEvenOdd_FillType);
953
954        GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
955        this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
956    }
957}
958
959///////////////////////////////////////////////////////////////////////////////
960
961void GrContext::drawOval(const GrPaint& paint,
962                         const SkRect& oval,
963                         const GrStrokeInfo& strokeInfo) {
964    if (oval.isEmpty()) {
965       return;
966    }
967
968    if (strokeInfo.isDashed()) {
969        SkPath path;
970        path.addOval(oval);
971        this->drawPath(paint, path, strokeInfo);
972        return;
973    }
974
975    AutoRestoreEffects are;
976    AutoCheckFlush acf(this);
977    GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
978    if (NULL == target) {
979        return;
980    }
981
982    GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
983
984    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
985
986
987    if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) {
988        SkPath path;
989        path.addOval(oval);
990        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
991    }
992}
993
994// Can 'path' be drawn as a pair of filled nested rectangles?
995static bool is_nested_rects(GrDrawTarget* target,
996                            const SkPath& path,
997                            const SkStrokeRec& stroke,
998                            SkRect rects[2]) {
999    SkASSERT(stroke.isFillStyle());
1000
1001    if (path.isInverseFillType()) {
1002        return false;
1003    }
1004
1005    const GrDrawState& drawState = target->getDrawState();
1006
1007    // TODO: this restriction could be lifted if we were willing to apply
1008    // the matrix to all the points individually rather than just to the rect
1009    if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1010        return false;
1011    }
1012
1013    if (!target->getDrawState().canTweakAlphaForCoverage() &&
1014        target->shouldDisableCoverageAAForBlend()) {
1015        return false;
1016    }
1017
1018    SkPath::Direction dirs[2];
1019    if (!path.isNestedRects(rects, dirs)) {
1020        return false;
1021    }
1022
1023    if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1024        // The two rects need to be wound opposite to each other
1025        return false;
1026    }
1027
1028    // Right now, nested rects where the margin is not the same width
1029    // all around do not render correctly
1030    const SkScalar* outer = rects[0].asScalars();
1031    const SkScalar* inner = rects[1].asScalars();
1032
1033    bool allEq = true;
1034
1035    SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1036    bool allGoE1 = margin >= SK_Scalar1;
1037
1038    for (int i = 1; i < 4; ++i) {
1039        SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1040        if (temp < SK_Scalar1) {
1041            allGoE1 = false;
1042        }
1043        if (!SkScalarNearlyEqual(margin, temp)) {
1044            allEq = false;
1045        }
1046    }
1047
1048    return allEq || allGoE1;
1049}
1050
1051void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
1052
1053    if (path.isEmpty()) {
1054       if (path.isInverseFillType()) {
1055           this->drawPaint(paint);
1056       }
1057       return;
1058    }
1059
1060    if (strokeInfo.isDashed()) {
1061        SkPoint pts[2];
1062        if (path.isLine(pts)) {
1063            AutoRestoreEffects are;
1064            AutoCheckFlush acf(this);
1065            GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1066            if (NULL == target) {
1067                return;
1068            }
1069            GrDrawState* drawState = target->drawState();
1070
1071            SkMatrix origViewMatrix = drawState->getViewMatrix();
1072            GrDrawState::AutoViewMatrixRestore avmr;
1073            if (avmr.setIdentity(target->drawState())) {
1074                if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target,
1075                                                  origViewMatrix)) {
1076                    return;
1077                }
1078            }
1079        }
1080
1081        // Filter dashed path into new path with the dashing applied
1082        const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
1083        SkTLazy<SkPath> effectPath;
1084        GrStrokeInfo newStrokeInfo(strokeInfo, false);
1085        SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
1086        if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
1087            this->drawPath(paint, *effectPath.get(), newStrokeInfo);
1088            return;
1089        }
1090
1091        this->drawPath(paint, path, newStrokeInfo);
1092        return;
1093    }
1094
1095    // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1096    // Scratch textures can be recycled after they are returned to the texture
1097    // cache. This presents a potential hazard for buffered drawing. However,
1098    // the writePixels that uploads to the scratch will perform a flush so we're
1099    // OK.
1100    AutoRestoreEffects are;
1101    AutoCheckFlush acf(this);
1102    GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1103    if (NULL == target) {
1104        return;
1105    }
1106    GrDrawState* drawState = target->drawState();
1107
1108    GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
1109
1110    const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1111
1112    bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1113
1114    if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
1115        // Concave AA paths are expensive - try to avoid them for special cases
1116        SkRect rects[2];
1117
1118        if (is_nested_rects(target, path, strokeRec, rects)) {
1119            SkMatrix origViewMatrix = drawState->getViewMatrix();
1120            GrDrawState::AutoViewMatrixRestore avmr;
1121            if (!avmr.setIdentity(target->drawState())) {
1122                return;
1123            }
1124
1125            fAARectRenderer->fillAANestedRects(target, rects, origViewMatrix);
1126            return;
1127        }
1128    }
1129
1130    SkRect ovalRect;
1131    bool isOval = path.isOval(&ovalRect);
1132
1133    if (!isOval || path.isInverseFillType()
1134        || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) {
1135        this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1136    }
1137}
1138
1139void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1140                                 const GrStrokeInfo& strokeInfo) {
1141    SkASSERT(!path.isEmpty());
1142
1143    GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1144
1145
1146    // An Assumption here is that path renderer would use some form of tweaking
1147    // the src color (either the input alpha or in the frag shader) to implement
1148    // aa. If we have some future driver-mojo path AA that can do the right
1149    // thing WRT to the blend then we'll need some query on the PR.
1150    bool useCoverageAA = useAA &&
1151        !target->getDrawState().getRenderTarget()->isMultisampled() &&
1152        !target->shouldDisableCoverageAAForBlend();
1153
1154
1155    GrPathRendererChain::DrawType type =
1156        useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1157                           GrPathRendererChain::kColor_DrawType;
1158
1159    const SkPath* pathPtr = &path;
1160    SkTLazy<SkPath> tmpPath;
1161    SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
1162
1163    // Try a 1st time without stroking the path and without allowing the SW renderer
1164    GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1165
1166    if (NULL == pr) {
1167        if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1168            // It didn't work the 1st time, so try again with the stroked path
1169            if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1170                pathPtr = tmpPath.get();
1171                stroke.writable()->setFillStyle();
1172                if (pathPtr->isEmpty()) {
1173                    return;
1174                }
1175            }
1176        }
1177
1178        // This time, allow SW renderer
1179        pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1180    }
1181
1182    if (NULL == pr) {
1183#ifdef SK_DEBUG
1184        SkDebugf("Unable to find path renderer compatible with path.\n");
1185#endif
1186        return;
1187    }
1188
1189    pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1190}
1191
1192////////////////////////////////////////////////////////////////////////////////
1193
1194void GrContext::flush(int flagsBitfield) {
1195    if (NULL == fDrawBuffer) {
1196        return;
1197    }
1198
1199    if (kDiscard_FlushBit & flagsBitfield) {
1200        fDrawBuffer->reset();
1201    } else {
1202        fDrawBuffer->flush();
1203    }
1204    fFlushToReduceCacheSize = false;
1205}
1206
1207bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
1208                          const void* inPixels, size_t outRowBytes, void* outPixels) {
1209    SkSrcPixelInfo srcPI;
1210    if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1211        return false;
1212    }
1213    srcPI.fAlphaType = kUnpremul_SkAlphaType;
1214    srcPI.fPixels = inPixels;
1215    srcPI.fRowBytes = inRowBytes;
1216
1217    SkDstPixelInfo dstPI;
1218    dstPI.fColorType = srcPI.fColorType;
1219    dstPI.fAlphaType = kPremul_SkAlphaType;
1220    dstPI.fPixels = outPixels;
1221    dstPI.fRowBytes = outRowBytes;
1222
1223    return srcPI.convertPixelsTo(&dstPI, width, height);
1224}
1225
1226bool GrContext::writeSurfacePixels(GrSurface* surface,
1227                                   int left, int top, int width, int height,
1228                                   GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
1229                                   uint32_t pixelOpsFlags) {
1230
1231    {
1232        GrTexture* texture = NULL;
1233        if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) &&
1234            fGpu->canWriteTexturePixels(texture, srcConfig)) {
1235
1236            if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) &&
1237                surface->surfacePriv().hasPendingIO()) {
1238                this->flush();
1239            }
1240            return fGpu->writeTexturePixels(texture, left, top, width, height,
1241                                            srcConfig, buffer, rowBytes);
1242            // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the
1243            // upload is already flushed.
1244        }
1245    }
1246
1247    // If we didn't do a direct texture write then we upload the pixels to a texture and draw.
1248    GrRenderTarget* renderTarget = surface->asRenderTarget();
1249    if (NULL == renderTarget) {
1250        return false;
1251    }
1252
1253    // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1254    // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1255    // config. This scratch will then have R and B swapped. We correct for this by swapping again
1256    // when drawing the scratch to the dst using a conversion effect.
1257    bool swapRAndB = false;
1258    GrPixelConfig writeConfig = srcConfig;
1259    if (GrPixelConfigSwapRAndB(srcConfig) ==
1260        fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) {
1261        writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1262        swapRAndB = true;
1263    }
1264
1265    GrSurfaceDesc desc;
1266    desc.fWidth = width;
1267    desc.fHeight = height;
1268    desc.fConfig = writeConfig;
1269    SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch));
1270    if (!texture) {
1271        return false;
1272    }
1273
1274    SkAutoTUnref<const GrFragmentProcessor> fp;
1275    SkMatrix textureMatrix;
1276    textureMatrix.setIDiv(texture->width(), texture->height());
1277
1278    // allocate a tmp buffer and sw convert the pixels to premul
1279    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1280
1281    if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
1282        if (!GrPixelConfigIs8888(srcConfig)) {
1283            return false;
1284        }
1285        fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1286        // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1287        if (NULL == fp) {
1288            size_t tmpRowBytes = 4 * width;
1289            tmpPixels.reset(width * height);
1290            if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
1291                                      tmpPixels.get())) {
1292                return false;
1293            }
1294            rowBytes = tmpRowBytes;
1295            buffer = tmpPixels.get();
1296        }
1297    }
1298    if (NULL == fp) {
1299        fp.reset(GrConfigConversionEffect::Create(texture,
1300                                                  swapRAndB,
1301                                                  GrConfigConversionEffect::kNone_PMConversion,
1302                                                  textureMatrix));
1303    }
1304
1305    // Even if the client told us not to flush, we still flush here. The client may have known that
1306    // writes to the original surface caused no data hazards, but they can't know that the scratch
1307    // we just got is safe.
1308    if (texture->surfacePriv().hasPendingIO()) {
1309        this->flush();
1310    }
1311    if (!fGpu->writeTexturePixels(texture, 0, 0, width, height,
1312                                  writeConfig, buffer, rowBytes)) {
1313        return false;
1314    }
1315
1316    SkMatrix matrix;
1317    matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1318
1319    // This function can be called in the midst of drawing another object (e.g., when uploading a
1320    // SW-rasterized clip while issuing a draw). So we push the current geometry state before
1321    // drawing a rect to the render target.
1322    // The bracket ensures we pop the stack if we wind up flushing below.
1323    {
1324        GrDrawTarget* drawTarget = this->prepareToDraw(NULL, NULL, NULL);
1325        GrDrawTarget::AutoGeometryAndStatePush agasp(drawTarget, GrDrawTarget::kReset_ASRInit,
1326                                                     &matrix);
1327        GrDrawState* drawState = drawTarget->drawState();
1328        drawState->addColorProcessor(fp);
1329        drawState->setRenderTarget(renderTarget);
1330        drawState->disableState(GrDrawState::kClip_StateBit);
1331        drawTarget->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
1332    }
1333
1334    if (kFlushWrites_PixelOp & pixelOpsFlags) {
1335        this->flushSurfaceWrites(surface);
1336    }
1337
1338    return true;
1339}
1340
1341// toggles between RGBA and BGRA
1342static SkColorType toggle_colortype32(SkColorType ct) {
1343    if (kRGBA_8888_SkColorType == ct) {
1344        return kBGRA_8888_SkColorType;
1345    } else {
1346        SkASSERT(kBGRA_8888_SkColorType == ct);
1347        return kRGBA_8888_SkColorType;
1348    }
1349}
1350
1351bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1352                                       int left, int top, int width, int height,
1353                                       GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1354                                       uint32_t flags) {
1355    ASSERT_OWNED_RESOURCE(target);
1356    SkASSERT(target);
1357
1358    if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) {
1359        this->flush();
1360    }
1361
1362    // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1363
1364    // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1365    // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1366    bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1367                                                 width, height, dstConfig,
1368                                                 rowBytes);
1369    // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1370    // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1371    // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1372    // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1373    // dstConfig.
1374    GrPixelConfig readConfig = dstConfig;
1375    bool swapRAndB = false;
1376    if (GrPixelConfigSwapRAndB(dstConfig) ==
1377        fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1378        readConfig = GrPixelConfigSwapRAndB(readConfig);
1379        swapRAndB = true;
1380    }
1381
1382    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1383
1384    if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1385        // The unpremul flag is only allowed for these two configs.
1386        return false;
1387    }
1388
1389    SkAutoTUnref<GrTexture> tempTexture;
1390
1391    // If the src is a texture and we would have to do conversions after read pixels, we instead
1392    // do the conversions by drawing the src to a scratch texture. If we handle any of the
1393    // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1394    // on the read back pixels.
1395    GrTexture* src = target->asTexture();
1396    if (src && (swapRAndB || unpremul || flipY)) {
1397        // Make the scratch a render so we can read its pixels.
1398        GrSurfaceDesc desc;
1399        desc.fFlags = kRenderTarget_GrSurfaceFlag;
1400        desc.fWidth = width;
1401        desc.fHeight = height;
1402        desc.fConfig = readConfig;
1403        desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1404
1405        // When a full read back is faster than a partial we could always make the scratch exactly
1406        // match the passed rect. However, if we see many different size rectangles we will trash
1407        // our texture cache and pay the cost of creating and destroying many textures. So, we only
1408        // request an exact match when the caller is reading an entire RT.
1409        ScratchTexMatch match = kApprox_ScratchTexMatch;
1410        if (0 == left &&
1411            0 == top &&
1412            target->width() == width &&
1413            target->height() == height &&
1414            fGpu->fullReadPixelsIsFasterThanPartial()) {
1415            match = kExact_ScratchTexMatch;
1416        }
1417        tempTexture.reset(this->refScratchTexture(desc, match));
1418        if (tempTexture) {
1419            // compute a matrix to perform the draw
1420            SkMatrix textureMatrix;
1421            textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1422            textureMatrix.postIDiv(src->width(), src->height());
1423
1424            SkAutoTUnref<const GrFragmentProcessor> fp;
1425            if (unpremul) {
1426                fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1427                if (fp) {
1428                    unpremul = false; // we no longer need to do this on CPU after the read back.
1429                }
1430            }
1431            // If we failed to create a PM->UPM effect and have no other conversions to perform then
1432            // there is no longer any point to using the scratch.
1433            if (fp || flipY || swapRAndB) {
1434                if (!fp) {
1435                    fp.reset(GrConfigConversionEffect::Create(
1436                            src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion,
1437                            textureMatrix));
1438                }
1439                swapRAndB = false; // we will handle the swap in the draw.
1440
1441                // We protect the existing geometry here since it may not be
1442                // clear to the caller that a draw operation (i.e., drawSimpleRect)
1443                // can be invoked in this method
1444                {
1445                    GrDrawTarget::AutoGeometryAndStatePush agasp(fDrawBuffer,
1446                                                                 GrDrawTarget::kReset_ASRInit);
1447                    GrDrawState* drawState = fDrawBuffer->drawState();
1448                    SkASSERT(fp);
1449                    drawState->addColorProcessor(fp);
1450
1451                    drawState->setRenderTarget(tempTexture->asRenderTarget());
1452                    SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1453                    fDrawBuffer->drawSimpleRect(rect);
1454                    // we want to read back from the scratch's origin
1455                    left = 0;
1456                    top = 0;
1457                    target = tempTexture->asRenderTarget();
1458                }
1459                this->flushSurfaceWrites(target);
1460            }
1461        }
1462    }
1463
1464    if (!fGpu->readPixels(target,
1465                          left, top, width, height,
1466                          readConfig, buffer, rowBytes)) {
1467        return false;
1468    }
1469    // Perform any conversions we weren't able to perform using a scratch texture.
1470    if (unpremul || swapRAndB) {
1471        SkDstPixelInfo dstPI;
1472        if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1473            return false;
1474        }
1475        dstPI.fAlphaType = kUnpremul_SkAlphaType;
1476        dstPI.fPixels = buffer;
1477        dstPI.fRowBytes = rowBytes;
1478
1479        SkSrcPixelInfo srcPI;
1480        srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1481        srcPI.fAlphaType = kPremul_SkAlphaType;
1482        srcPI.fPixels = buffer;
1483        srcPI.fRowBytes = rowBytes;
1484
1485        return srcPI.convertPixelsTo(&dstPI, width, height);
1486    }
1487    return true;
1488}
1489
1490void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) {
1491    SkASSERT(surface);
1492    ASSERT_OWNED_RESOURCE(surface);
1493    if (surface->surfacePriv().hasPendingIO()) {
1494        this->flush();
1495    }
1496    GrRenderTarget* rt = surface->asRenderTarget();
1497    if (fGpu && rt) {
1498        fGpu->resolveRenderTarget(rt);
1499    }
1500}
1501
1502void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
1503    SkASSERT(renderTarget);
1504    ASSERT_OWNED_RESOURCE(renderTarget);
1505    AutoRestoreEffects are;
1506    AutoCheckFlush acf(this);
1507    GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
1508    if (NULL == target) {
1509        return;
1510    }
1511    target->discard(renderTarget);
1512}
1513
1514void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1515                            const SkIPoint& dstPoint, uint32_t pixelOpsFlags) {
1516    if (NULL == src || NULL == dst) {
1517        return;
1518    }
1519    ASSERT_OWNED_RESOURCE(src);
1520    ASSERT_OWNED_RESOURCE(dst);
1521
1522    // Since we're going to the draw target and not GPU, no need to check kNoFlush
1523    // here.
1524
1525    GrDrawTarget* target = this->prepareToDraw(NULL, NULL, NULL);
1526    if (NULL == target) {
1527        return;
1528    }
1529    target->copySurface(dst, src, srcRect, dstPoint);
1530
1531    if (kFlushWrites_PixelOp & pixelOpsFlags) {
1532        this->flush();
1533    }
1534}
1535
1536void GrContext::flushSurfaceWrites(GrSurface* surface) {
1537    if (surface->surfacePriv().hasPendingWrite()) {
1538        this->flush();
1539    }
1540}
1541
1542////////////////////////////////////////////////////////////////////////////////
1543
1544GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1545                                       AutoRestoreEffects* are,
1546                                       AutoCheckFlush* acf) {
1547    // All users of this draw state should be freeing up all effects when they're done.
1548    // Otherwise effects that own resources may keep those resources alive indefinitely.
1549    SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages() &&
1550             !fDrawState->hasGeometryProcessor());
1551
1552    if (NULL == fGpu) {
1553        return NULL;
1554    }
1555
1556    ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1557    if (paint) {
1558        SkASSERT(are);
1559        SkASSERT(acf);
1560        are->set(fDrawState);
1561        fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1562#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1563        if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1564            !fDrawState->couldApplyCoverage(fGpu->caps())) {
1565            SkDebugf("Partial pixel coverage will be incorrectly blended.\n");
1566        }
1567#endif
1568        // Clear any vertex attributes configured for the previous use of the
1569        // GrDrawState which can effect which blend optimizations are in effect.
1570        fDrawState->setDefaultVertexAttribs();
1571    } else {
1572        fDrawState->reset(fViewMatrix);
1573        fDrawState->setRenderTarget(fRenderTarget.get());
1574    }
1575    fDrawState->setState(GrDrawState::kClip_StateBit, fClip &&
1576                                                     !fClip->fClipStack->isWideOpen());
1577    fDrawBuffer->setClip(fClip);
1578    SkASSERT(fDrawState == fDrawBuffer->drawState());
1579    return fDrawBuffer;
1580}
1581
1582/*
1583 * This method finds a path renderer that can draw the specified path on
1584 * the provided target.
1585 * Due to its expense, the software path renderer has split out so it can
1586 * can be individually allowed/disallowed via the "allowSW" boolean.
1587 */
1588GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1589                                           const SkStrokeRec& stroke,
1590                                           const GrDrawTarget* target,
1591                                           bool allowSW,
1592                                           GrPathRendererChain::DrawType drawType,
1593                                           GrPathRendererChain::StencilSupport* stencilSupport) {
1594
1595    if (NULL == fPathRendererChain) {
1596        fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1597    }
1598
1599    GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1600                                                             stroke,
1601                                                             target,
1602                                                             drawType,
1603                                                             stencilSupport);
1604
1605    if (NULL == pr && allowSW) {
1606        if (NULL == fSoftwarePathRenderer) {
1607            fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1608        }
1609        pr = fSoftwarePathRenderer;
1610    }
1611
1612    return pr;
1613}
1614
1615////////////////////////////////////////////////////////////////////////////////
1616bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1617    return fGpu->caps()->isConfigRenderable(config, withMSAA);
1618}
1619
1620int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1621                                         SkScalar dpi) const {
1622    if (!this->isConfigRenderable(config, true)) {
1623        return 0;
1624    }
1625    int chosenSampleCount = 0;
1626    if (fGpu->caps()->pathRenderingSupport()) {
1627        if (dpi >= 250.0f) {
1628            chosenSampleCount = 4;
1629        } else {
1630            chosenSampleCount = 16;
1631        }
1632    }
1633    return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1634        chosenSampleCount : 0;
1635}
1636
1637void GrContext::setupDrawBuffer() {
1638    SkASSERT(NULL == fDrawBuffer);
1639    SkASSERT(NULL == fDrawBufferVBAllocPool);
1640    SkASSERT(NULL == fDrawBufferIBAllocPool);
1641
1642    fDrawBufferVBAllocPool =
1643        SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1644                                    DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1645                                    DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1646    fDrawBufferIBAllocPool =
1647        SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1648                                   DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1649                                   DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1650
1651    fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1652                                                   fDrawBufferVBAllocPool,
1653                                                   fDrawBufferIBAllocPool));
1654
1655    fDrawBuffer->setDrawState(fDrawState);
1656}
1657
1658GrDrawTarget* GrContext::getTextTarget() {
1659    return this->prepareToDraw(NULL, NULL, NULL);
1660}
1661
1662const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1663    return fGpu->getQuadIndexBuffer();
1664}
1665
1666namespace {
1667void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1668    GrConfigConversionEffect::PMConversion pmToUPM;
1669    GrConfigConversionEffect::PMConversion upmToPM;
1670    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1671    *pmToUPMValue = pmToUPM;
1672    *upmToPMValue = upmToPM;
1673}
1674}
1675
1676const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
1677                                                          bool swapRAndB,
1678                                                          const SkMatrix& matrix) {
1679    if (!fDidTestPMConversions) {
1680        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1681        fDidTestPMConversions = true;
1682    }
1683    GrConfigConversionEffect::PMConversion pmToUPM =
1684        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1685    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1686        return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1687    } else {
1688        return NULL;
1689    }
1690}
1691
1692const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
1693                                                          bool swapRAndB,
1694                                                          const SkMatrix& matrix) {
1695    if (!fDidTestPMConversions) {
1696        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1697        fDidTestPMConversions = true;
1698    }
1699    GrConfigConversionEffect::PMConversion upmToPM =
1700        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1701    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1702        return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1703    } else {
1704        return NULL;
1705    }
1706}
1707
1708void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
1709    resource->cacheAccess().setContentKey(resourceKey);
1710}
1711
1712GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1713    return fResourceCache2->findAndRefContentResource(resourceKey);
1714}
1715
1716void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1717    fGpu->addGpuTraceMarker(marker);
1718    if (fDrawBuffer) {
1719        fDrawBuffer->addGpuTraceMarker(marker);
1720    }
1721}
1722
1723void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
1724    fGpu->removeGpuTraceMarker(marker);
1725    if (fDrawBuffer) {
1726        fDrawBuffer->removeGpuTraceMarker(marker);
1727    }
1728}
1729
1730///////////////////////////////////////////////////////////////////////////////
1731#if GR_CACHE_STATS
1732void GrContext::printCacheStats() const {
1733    fResourceCache2->printStats();
1734}
1735#endif
1736
1737#if GR_GPU_STATS
1738const GrContext::GPUStats* GrContext::gpuStats() const {
1739    return fGpu->gpuStats();
1740}
1741#endif
1742
1743