GrContext.cpp revision a04e8e842450e606dd938ddae17857849bd504d4
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrContext.h"
11
12#include "effects/GrConvolutionEffect.h"
13#include "effects/GrSingleTextureEffect.h"
14#include "effects/GrConfigConversionEffect.h"
15
16#include "GrBufferAllocPool.h"
17#include "GrGpu.h"
18#include "GrIndexBuffer.h"
19#include "GrInOrderDrawBuffer.h"
20#include "GrPathRenderer.h"
21#include "GrPathUtils.h"
22#include "GrResourceCache.h"
23#include "GrSoftwarePathRenderer.h"
24#include "GrStencilBuffer.h"
25#include "GrTextStrike.h"
26#include "SkTLazy.h"
27#include "SkTLS.h"
28#include "SkTrace.h"
29
30SK_DEFINE_INST_COUNT(GrContext)
31SK_DEFINE_INST_COUNT(GrDrawState)
32
33// It can be useful to set this to kNo_BufferedDraw to test whether a bug is caused by using the
34// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
35// debugging easier.
36#define DEFAULT_BUFFERING (GR_DISABLE_DRAW_BUFFERING ? kNo_BufferedDraw : kYes_BufferedDraw)
37
38#define MAX_BLUR_SIGMA 4.0f
39
40// When we're using coverage AA but the blend is incompatible (given gpu
41// limitations) should we disable AA or draw wrong?
42#define DISABLE_COVERAGE_AA_FOR_BLEND 1
43
44#if GR_DEBUG
45    // change this to a 1 to see notifications when partial coverage fails
46    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
47#else
48    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
49#endif
50
51static const size_t MAX_TEXTURE_CACHE_COUNT = 256;
52static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024;
53
54static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
55static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
56
57static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
58static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
59
60#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this)
61
62GrContext* GrContext::Create(GrEngine engine,
63                             GrPlatform3DContext context3D) {
64    GrContext* ctx = NULL;
65    GrGpu* fGpu = GrGpu::Create(engine, context3D);
66    if (NULL != fGpu) {
67        ctx = SkNEW_ARGS(GrContext, (fGpu));
68        fGpu->unref();
69    }
70    return ctx;
71}
72
73namespace {
74void* CreateThreadInstanceCount() {
75    return SkNEW_ARGS(int, (0));
76}
77void DeleteThreadInstanceCount(void* v) {
78    delete reinterpret_cast<int*>(v);
79}
80#define THREAD_INSTANCE_COUNT                                               \
81    (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount,          \
82                                        DeleteThreadInstanceCount)))
83
84}
85
86int GrContext::GetThreadInstanceCount() {
87    return THREAD_INSTANCE_COUNT;
88}
89
90GrContext::~GrContext() {
91    this->flush();
92
93    // Since the gpu can hold scratch textures, give it a chance to let go
94    // of them before freeing the texture cache
95    fGpu->purgeResources();
96
97    delete fTextureCache;
98    delete fFontCache;
99    delete fDrawBuffer;
100    delete fDrawBufferVBAllocPool;
101    delete fDrawBufferIBAllocPool;
102
103    fAARectRenderer->unref();
104
105    fGpu->unref();
106    GrSafeUnref(fPathRendererChain);
107    GrSafeUnref(fSoftwarePathRenderer);
108    fDrawState->unref();
109
110    --THREAD_INSTANCE_COUNT;
111}
112
113void GrContext::contextLost() {
114    contextDestroyed();
115    this->setupDrawBuffer();
116}
117
118void GrContext::contextDestroyed() {
119    // abandon first to so destructors
120    // don't try to free the resources in the API.
121    fGpu->abandonResources();
122
123    // a path renderer may be holding onto resources that
124    // are now unusable
125    GrSafeSetNull(fPathRendererChain);
126    GrSafeSetNull(fSoftwarePathRenderer);
127
128    delete fDrawBuffer;
129    fDrawBuffer = NULL;
130
131    delete fDrawBufferVBAllocPool;
132    fDrawBufferVBAllocPool = NULL;
133
134    delete fDrawBufferIBAllocPool;
135    fDrawBufferIBAllocPool = NULL;
136
137    fAARectRenderer->reset();
138
139    fTextureCache->removeAll();
140    fFontCache->freeAll();
141    fGpu->markContextDirty();
142}
143
144void GrContext::resetContext() {
145    fGpu->markContextDirty();
146}
147
148void GrContext::freeGpuResources() {
149    this->flush();
150
151    fGpu->purgeResources();
152
153    fAARectRenderer->reset();
154
155    fTextureCache->removeAll();
156    fFontCache->freeAll();
157    // a path renderer may be holding onto resources
158    GrSafeSetNull(fPathRendererChain);
159    GrSafeSetNull(fSoftwarePathRenderer);
160}
161
162size_t GrContext::getGpuTextureCacheBytes() const {
163  return fTextureCache->getCachedResourceBytes();
164}
165
166////////////////////////////////////////////////////////////////////////////////
167
168namespace {
169
170void scale_rect(SkRect* rect, float xScale, float yScale) {
171    rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale));
172    rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale));
173    rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale));
174    rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale));
175}
176
177float adjust_sigma(float sigma, int *scaleFactor, int *radius) {
178    *scaleFactor = 1;
179    while (sigma > MAX_BLUR_SIGMA) {
180        *scaleFactor *= 2;
181        sigma *= 0.5f;
182    }
183    *radius = static_cast<int>(ceilf(sigma * 3.0f));
184    GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius);
185    return sigma;
186}
187
188void convolve_gaussian(GrDrawTarget* target,
189                       GrTexture* texture,
190                       const SkRect& rect,
191                       float sigma,
192                       int radius,
193                       Gr1DKernelEffect::Direction direction) {
194    GrRenderTarget* rt = target->drawState()->getRenderTarget();
195    GrDrawTarget::AutoStateRestore asr(target, GrDrawTarget::kReset_ASRInit);
196    GrDrawState* drawState = target->drawState();
197    drawState->setRenderTarget(rt);
198    GrMatrix sampleM;
199    sampleM.setIDiv(texture->width(), texture->height());
200    drawState->sampler(0)->reset(sampleM);
201    SkAutoTUnref<GrConvolutionEffect> conv(SkNEW_ARGS(GrConvolutionEffect,
202                                                      (texture, direction, radius,
203                                                       sigma)));
204    drawState->sampler(0)->setCustomStage(conv);
205    target->drawSimpleRect(rect, NULL);
206}
207
208}
209
210GrTexture* GrContext::findAndLockTexture(const GrTextureDesc& desc,
211                                         const GrCacheData& cacheData,
212                                         const GrTextureParams* params) {
213    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
214    GrResource* resource = fTextureCache->findAndLock(resourceKey,
215                                                      GrResourceCache::kNested_LockType);
216    return static_cast<GrTexture*>(resource);
217}
218
219bool GrContext::isTextureInCache(const GrTextureDesc& desc,
220                                 const GrCacheData& cacheData,
221                                 const GrTextureParams* params) const {
222    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
223    return fTextureCache->hasKey(resourceKey);
224}
225
226void GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) {
227    ASSERT_OWNED_RESOURCE(sb);
228
229    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
230                                                            sb->height(),
231                                                            sb->numSamples());
232    fTextureCache->createAndLock(resourceKey, sb);
233}
234
235GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
236                                              int sampleCnt) {
237    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
238                                                            height,
239                                                            sampleCnt);
240    GrResource* resource = fTextureCache->findAndLock(resourceKey,
241                                            GrResourceCache::kSingle_LockType);
242    return static_cast<GrStencilBuffer*>(resource);
243}
244
245void GrContext::unlockStencilBuffer(GrStencilBuffer* sb) {
246    ASSERT_OWNED_RESOURCE(sb);
247    GrAssert(NULL != sb->getCacheEntry());
248
249    fTextureCache->unlock(sb->getCacheEntry());
250}
251
252static void stretchImage(void* dst,
253                         int dstW,
254                         int dstH,
255                         void* src,
256                         int srcW,
257                         int srcH,
258                         int bpp) {
259    GrFixed dx = (srcW << 16) / dstW;
260    GrFixed dy = (srcH << 16) / dstH;
261
262    GrFixed y = dy >> 1;
263
264    int dstXLimit = dstW*bpp;
265    for (int j = 0; j < dstH; ++j) {
266        GrFixed x = dx >> 1;
267        void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
268        void* dstRow = (uint8_t*)dst + j*dstW*bpp;
269        for (int i = 0; i < dstXLimit; i += bpp) {
270            memcpy((uint8_t*) dstRow + i,
271                   (uint8_t*) srcRow + (x>>16)*bpp,
272                   bpp);
273            x += dx;
274        }
275        y += dy;
276    }
277}
278
279// The desired texture is NPOT and tiled but that isn't supported by
280// the current hardware. Resize the texture to be a POT
281GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
282                                           const GrCacheData& cacheData,
283                                           void* srcData,
284                                           size_t rowBytes,
285                                           bool needsFiltering) {
286    GrTexture* clampedTexture = this->findAndLockTexture(desc, cacheData, NULL);
287
288    if (NULL == clampedTexture) {
289        clampedTexture = this->createAndLockTexture(NULL, desc, cacheData, srcData, rowBytes);
290        GrAssert(NULL != clampedTexture);
291        if (NULL == clampedTexture) {
292            return NULL;
293        }
294    }
295    GrTextureDesc rtDesc = desc;
296    rtDesc.fFlags =  rtDesc.fFlags |
297                     kRenderTarget_GrTextureFlagBit |
298                     kNoStencil_GrTextureFlagBit;
299    rtDesc.fWidth  = GrNextPow2(GrMax(desc.fWidth, 64));
300    rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
301
302    GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
303
304    if (NULL != texture) {
305        GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
306        GrDrawState* drawState = fGpu->drawState();
307        drawState->setRenderTarget(texture->asRenderTarget());
308
309        // if filtering is not desired then we want to ensure all
310        // texels in the resampled image are copies of texels from
311        // the original.
312        drawState->sampler(0)->reset(SkShader::kClamp_TileMode,
313                                     needsFiltering);
314        drawState->createTextureEffect(0, clampedTexture);
315
316        static const GrVertexLayout layout =
317                            GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
318        GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
319
320        if (arg.succeeded()) {
321            GrPoint* verts = (GrPoint*) arg.vertices();
322            verts[0].setIRectFan(0, 0,
323                                    texture->width(),
324                                    texture->height(),
325                                    2*sizeof(GrPoint));
326            verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint));
327            fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType,
328                                    0, 4);
329        }
330        texture->releaseRenderTarget();
331    } else {
332        // TODO: Our CPU stretch doesn't filter. But we create separate
333        // stretched textures when the sampler state is either filtered or
334        // not. Either implement filtered stretch blit on CPU or just create
335        // one when FBO case fails.
336
337        rtDesc.fFlags = kNone_GrTextureFlags;
338        // no longer need to clamp at min RT size.
339        rtDesc.fWidth  = GrNextPow2(desc.fWidth);
340        rtDesc.fHeight = GrNextPow2(desc.fHeight);
341        int bpp = GrBytesPerPixel(desc.fConfig);
342        SkAutoSMalloc<128*128*4> stretchedPixels(bpp *
343                                                    rtDesc.fWidth *
344                                                    rtDesc.fHeight);
345        stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
346                        srcData, desc.fWidth, desc.fHeight, bpp);
347
348        size_t stretchedRowBytes = rtDesc.fWidth * bpp;
349
350        GrTexture* texture = fGpu->createTexture(rtDesc,
351                                                    stretchedPixels.get(),
352                                                    stretchedRowBytes);
353        GrAssert(NULL != texture);
354    }
355    this->unlockTexture(clampedTexture);
356
357    return texture;
358}
359
360GrTexture* GrContext::createAndLockTexture(
361        const GrTextureParams* params,
362        const GrTextureDesc& desc,
363        const GrCacheData& cacheData,
364        void* srcData,
365        size_t rowBytes) {
366    SK_TRACE_EVENT0("GrContext::createAndLockTexture");
367
368#if GR_DUMP_TEXTURE_UPLOAD
369    GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
370#endif
371
372    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
373
374    GrTexture* texture = NULL;
375    if (GrTexture::NeedsResizing(resourceKey)) {
376        texture = this->createResizedTexture(desc, cacheData,
377                                             srcData, rowBytes,
378                                             GrTexture::NeedsFiltering(resourceKey));
379    } else {
380        texture = fGpu->createTexture(desc, srcData, rowBytes);
381    }
382
383    if (NULL != texture) {
384        fTextureCache->createAndLock(resourceKey, texture);
385    }
386
387    return texture;
388}
389
390GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc,
391                                         ScratchTexMatch match) {
392    GrTextureDesc desc = inDesc;
393    GrCacheData cacheData(GrCacheData::kScratch_CacheID);
394
395    GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) ||
396             !(desc.fFlags & kNoStencil_GrTextureFlagBit));
397
398    if (kExact_ScratchTexMatch != match) {
399        // bin by pow2 with a reasonable min
400        static const int MIN_SIZE = 256;
401        desc.fWidth  = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
402        desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
403    }
404
405    GrResource* resource = NULL;
406    int origWidth = desc.fWidth;
407    int origHeight = desc.fHeight;
408    bool doubledW = false;
409    bool doubledH = false;
410
411    do {
412        GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, cacheData, true);
413        resource = fTextureCache->findAndLock(key,
414                                              GrResourceCache::kNested_LockType);
415        // if we miss, relax the fit of the flags...
416        // then try doubling width... then height.
417        if (NULL != resource || kExact_ScratchTexMatch == match) {
418            break;
419        }
420        // We no longer try to reuse textures that were previously used as render targets in
421        // situations where no RT is needed; doing otherwise can confuse the video driver and
422        // cause significant performance problems in some cases.
423        if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
424            desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
425        } else if (!doubledW) {
426            desc.fFlags = inDesc.fFlags;
427            desc.fWidth *= 2;
428            doubledW = true;
429        } else if (!doubledH) {
430            desc.fFlags = inDesc.fFlags;
431            desc.fWidth = origWidth;
432            desc.fHeight *= 2;
433            doubledH = true;
434        } else {
435            break;
436        }
437
438    } while (true);
439
440    if (NULL == resource) {
441        desc.fFlags = inDesc.fFlags;
442        desc.fWidth = origWidth;
443        desc.fHeight = origHeight;
444        GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
445        if (NULL != texture) {
446            GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL,
447                                                      texture->desc(),
448                                                      cacheData,
449                                                      true);
450            fTextureCache->createAndLock(key, texture);
451            resource = texture;
452        }
453    }
454
455    // If the caller gives us the same desc/sampler twice we don't want
456    // to return the same texture the second time (unless it was previously
457    // released). So make it exclusive to hide it from future searches.
458    if (NULL != resource) {
459        fTextureCache->makeExclusive(resource->getCacheEntry());
460    }
461
462    return static_cast<GrTexture*>(resource);
463}
464
465void GrContext::addExistingTextureToCache(GrTexture* texture) {
466
467    if (NULL == texture) {
468        return;
469    }
470
471    // This texture should already have a cache entry since it was once
472    // attached
473    GrAssert(NULL != texture->getCacheEntry());
474
475    // Conceptually, the cache entry is going to assume responsibility
476    // for the creation ref.
477    GrAssert(1 == texture->getRefCnt());
478
479    // Since this texture came from an AutoScratchTexture it should
480    // still be in the exclusive pile
481    fTextureCache->makeNonExclusive(texture->getCacheEntry());
482
483    // and it should still be locked
484    fTextureCache->unlock(texture->getCacheEntry());
485}
486
487void GrContext::unlockTexture(GrTexture* texture) {
488    ASSERT_OWNED_RESOURCE(texture);
489    GrAssert(NULL != texture->getCacheEntry());
490
491    // If this is a scratch texture we detached it from the cache
492    // while it was locked (to avoid two callers simultaneously getting
493    // the same texture).
494    if (GrTexture::IsScratchTexture(texture->getCacheEntry()->key())) {
495        fTextureCache->makeNonExclusive(texture->getCacheEntry());
496    }
497
498    fTextureCache->unlock(texture->getCacheEntry());
499}
500
501GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
502                                            void* srcData,
503                                            size_t rowBytes) {
504    GrTextureDesc descCopy = descIn;
505    return fGpu->createTexture(descCopy, srcData, rowBytes);
506}
507
508void GrContext::getTextureCacheLimits(int* maxTextures,
509                                      size_t* maxTextureBytes) const {
510    fTextureCache->getLimits(maxTextures, maxTextureBytes);
511}
512
513void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
514    fTextureCache->setLimits(maxTextures, maxTextureBytes);
515}
516
517int GrContext::getMaxTextureSize() const {
518    return fGpu->getCaps().fMaxTextureSize;
519}
520
521int GrContext::getMaxRenderTargetSize() const {
522    return fGpu->getCaps().fMaxRenderTargetSize;
523}
524
525///////////////////////////////////////////////////////////////////////////////
526
527GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) {
528    return fGpu->createPlatformTexture(desc);
529}
530
531GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) {
532    return fGpu->createPlatformRenderTarget(desc);
533}
534
535///////////////////////////////////////////////////////////////////////////////
536
537bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
538                                          int width, int height) const {
539    const GrDrawTarget::Caps& caps = fGpu->getCaps();
540    if (!caps.f8BitPaletteSupport) {
541        return false;
542    }
543
544    bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
545
546    if (!isPow2) {
547        bool tiled = NULL != params && params->isTiled();
548        if (tiled && !caps.fNPOTTextureTileSupport) {
549            return false;
550        }
551    }
552    return true;
553}
554
555////////////////////////////////////////////////////////////////////////////////
556
557const GrClipData* GrContext::getClip() const {
558    return fGpu->getClip();
559}
560
561void GrContext::setClip(const GrClipData* clipData) {
562    fGpu->setClip(clipData);
563    fDrawState->enableState(GrDrawState::kClip_StateBit);
564}
565
566////////////////////////////////////////////////////////////////////////////////
567
568void GrContext::clear(const GrIRect* rect,
569                      const GrColor color,
570                      GrRenderTarget* target) {
571    this->prepareToDraw(NULL, DEFAULT_BUFFERING)->clear(rect, color, target);
572}
573
574void GrContext::drawPaint(const GrPaint& paint) {
575    // set rect to be big enough to fill the space, but not super-huge, so we
576    // don't overflow fixed-point implementations
577    GrRect r;
578    r.setLTRB(0, 0,
579              GrIntToScalar(getRenderTarget()->width()),
580              GrIntToScalar(getRenderTarget()->height()));
581    GrMatrix inverse;
582    SkTLazy<GrPaint> tmpPaint;
583    const GrPaint* p = &paint;
584    AutoMatrix am;
585
586    // We attempt to map r by the inverse matrix and draw that. mapRect will
587    // map the four corners and bound them with a new rect. This will not
588    // produce a correct result for some perspective matrices.
589    if (!this->getMatrix().hasPerspective()) {
590        if (!fDrawState->getViewInverse(&inverse)) {
591            GrPrintf("Could not invert matrix\n");
592            return;
593        }
594        inverse.mapRect(&r);
595    } else {
596        if (paint.hasTextureOrMask()) {
597            tmpPaint.set(paint);
598            p = tmpPaint.get();
599            if (!tmpPaint.get()->preConcatSamplerMatricesWithInverse(fDrawState->getViewMatrix())) {
600                GrPrintf("Could not invert matrix\n");
601            }
602        }
603        am.set(this, GrMatrix::I());
604    }
605    // by definition this fills the entire clip, no need for AA
606    if (paint.fAntiAlias) {
607        if (!tmpPaint.isValid()) {
608            tmpPaint.set(paint);
609            p = tmpPaint.get();
610        }
611        GrAssert(p == tmpPaint.get());
612        tmpPaint.get()->fAntiAlias = false;
613    }
614    this->drawRect(*p, r);
615}
616
617////////////////////////////////////////////////////////////////////////////////
618
619namespace {
620inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
621    return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
622}
623}
624
625////////////////////////////////////////////////////////////////////////////////
626
627/*  create a triangle strip that strokes the specified triangle. There are 8
628 unique vertices, but we repreat the last 2 to close up. Alternatively we
629 could use an indices array, and then only send 8 verts, but not sure that
630 would be faster.
631 */
632static void setStrokeRectStrip(GrPoint verts[10], GrRect rect,
633                               GrScalar width) {
634    const GrScalar rad = GrScalarHalf(width);
635    rect.sort();
636
637    verts[0].set(rect.fLeft + rad, rect.fTop + rad);
638    verts[1].set(rect.fLeft - rad, rect.fTop - rad);
639    verts[2].set(rect.fRight - rad, rect.fTop + rad);
640    verts[3].set(rect.fRight + rad, rect.fTop - rad);
641    verts[4].set(rect.fRight - rad, rect.fBottom - rad);
642    verts[5].set(rect.fRight + rad, rect.fBottom + rad);
643    verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
644    verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
645    verts[8] = verts[0];
646    verts[9] = verts[1];
647}
648
649/**
650 * Returns true if the rects edges are integer-aligned.
651 */
652static bool isIRect(const GrRect& r) {
653    return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) &&
654           GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom);
655}
656
657static bool apply_aa_to_rect(GrDrawTarget* target,
658                             const GrRect& rect,
659                             GrScalar width,
660                             const GrMatrix* matrix,
661                             GrMatrix* combinedMatrix,
662                             GrRect* devRect,
663                             bool* useVertexCoverage) {
664    // we use a simple coverage ramp to do aa on axis-aligned rects
665    // we check if the rect will be axis-aligned, and the rect won't land on
666    // integer coords.
667
668    // we are keeping around the "tweak the alpha" trick because
669    // it is our only hope for the fixed-pipe implementation.
670    // In a shader implementation we can give a separate coverage input
671    // TODO: remove this ugliness when we drop the fixed-pipe impl
672    *useVertexCoverage = false;
673    if (!target->canTweakAlphaForCoverage()) {
674        if (disable_coverage_aa_for_blend(target)) {
675#if GR_DEBUG
676            //GrPrintf("Turning off AA to correctly apply blend.\n");
677#endif
678            return false;
679        } else {
680            *useVertexCoverage = true;
681        }
682    }
683    const GrDrawState& drawState = target->getDrawState();
684    if (drawState.getRenderTarget()->isMultisampled()) {
685        return false;
686    }
687
688    if (0 == width && target->willUseHWAALines()) {
689        return false;
690    }
691
692    if (!drawState.getViewMatrix().preservesAxisAlignment()) {
693        return false;
694    }
695
696    if (NULL != matrix &&
697        !matrix->preservesAxisAlignment()) {
698        return false;
699    }
700
701    *combinedMatrix = drawState.getViewMatrix();
702    if (NULL != matrix) {
703        combinedMatrix->preConcat(*matrix);
704        GrAssert(combinedMatrix->preservesAxisAlignment());
705    }
706
707    combinedMatrix->mapRect(devRect, rect);
708    devRect->sort();
709
710    if (width < 0) {
711        return !isIRect(*devRect);
712    } else {
713        return true;
714    }
715}
716
717void GrContext::drawRect(const GrPaint& paint,
718                         const GrRect& rect,
719                         GrScalar width,
720                         const GrMatrix* matrix) {
721    SK_TRACE_EVENT0("GrContext::drawRect");
722
723    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
724    GrDrawState::AutoStageDisable atr(fDrawState);
725
726    GrRect devRect = rect;
727    GrMatrix combinedMatrix;
728    bool useVertexCoverage;
729    bool needAA = paint.fAntiAlias &&
730                  !this->getRenderTarget()->isMultisampled();
731    bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix,
732                                           &combinedMatrix, &devRect,
733                                           &useVertexCoverage);
734
735    if (doAA) {
736        GrDrawTarget::AutoDeviceCoordDraw adcd(target);
737        if (!adcd.succeeded()) {
738            return;
739        }
740        if (width >= 0) {
741            GrVec strokeSize;;
742            if (width > 0) {
743                strokeSize.set(width, width);
744                combinedMatrix.mapVectors(&strokeSize, 1);
745                strokeSize.setAbs(strokeSize);
746            } else {
747                strokeSize.set(GR_Scalar1, GR_Scalar1);
748            }
749            fAARectRenderer->strokeAARect(this->getGpu(), target, devRect,
750                                         strokeSize, useVertexCoverage);
751        } else {
752            fAARectRenderer->fillAARect(this->getGpu(), target,
753                                       devRect, useVertexCoverage);
754        }
755        return;
756    }
757
758    if (width >= 0) {
759        // TODO: consider making static vertex buffers for these cases.
760        // Hairline could be done by just adding closing vertex to
761        // unitSquareVertexBuffer()
762
763        static const int worstCaseVertCount = 10;
764        GrDrawTarget::AutoReleaseGeometry geo(target, 0, worstCaseVertCount, 0);
765
766        if (!geo.succeeded()) {
767            GrPrintf("Failed to get space for vertices!\n");
768            return;
769        }
770
771        GrPrimitiveType primType;
772        int vertCount;
773        GrPoint* vertex = geo.positions();
774
775        if (width > 0) {
776            vertCount = 10;
777            primType = kTriangleStrip_GrPrimitiveType;
778            setStrokeRectStrip(vertex, rect, width);
779        } else {
780            // hairline
781            vertCount = 5;
782            primType = kLineStrip_GrPrimitiveType;
783            vertex[0].set(rect.fLeft, rect.fTop);
784            vertex[1].set(rect.fRight, rect.fTop);
785            vertex[2].set(rect.fRight, rect.fBottom);
786            vertex[3].set(rect.fLeft, rect.fBottom);
787            vertex[4].set(rect.fLeft, rect.fTop);
788        }
789
790        GrDrawState::AutoViewMatrixRestore avmr;
791        if (NULL != matrix) {
792            GrDrawState* drawState = target->drawState();
793            avmr.set(drawState);
794            drawState->preConcatViewMatrix(*matrix);
795            drawState->preConcatSamplerMatrices(*matrix);
796        }
797
798        target->drawNonIndexed(primType, 0, vertCount);
799    } else {
800#if GR_STATIC_RECT_VB
801            const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
802            if (NULL == sqVB) {
803                GrPrintf("Failed to create static rect vb.\n");
804                return;
805            }
806            target->setVertexSourceToBuffer(0, sqVB);
807            GrDrawState* drawState = target->drawState();
808            GrDrawState::AutoViewMatrixRestore avmr(drawState);
809            GrMatrix m;
810            m.setAll(rect.width(),    0,             rect.fLeft,
811                        0,            rect.height(), rect.fTop,
812                        0,            0,             GrMatrix::I()[8]);
813
814            if (NULL != matrix) {
815                m.postConcat(*matrix);
816            }
817            drawState->preConcatViewMatrix(m);
818            drawState->preConcatSamplerMatrices(m);
819
820            target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
821#else
822            target->drawSimpleRect(rect, matrix);
823#endif
824    }
825}
826
827void GrContext::drawRectToRect(const GrPaint& paint,
828                               const GrRect& dstRect,
829                               const GrRect& srcRect,
830                               const GrMatrix* dstMatrix,
831                               const GrMatrix* srcMatrix) {
832    SK_TRACE_EVENT0("GrContext::drawRectToRect");
833
834    // srcRect refers to paint's first texture
835    if (!paint.isTextureStageEnabled(0)) {
836        drawRect(paint, dstRect, -1, dstMatrix);
837        return;
838    }
839
840    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
841
842#if GR_STATIC_RECT_VB
843    GrDrawState::AutoStageDisable atr(fDrawState);
844    GrDrawState* drawState = target->drawState();
845    GrDrawState::AutoViewMatrixRestore avmr(drawState);
846
847    GrMatrix m;
848
849    m.setAll(dstRect.width(), 0,                dstRect.fLeft,
850             0,               dstRect.height(), dstRect.fTop,
851             0,               0,                GrMatrix::I()[8]);
852    if (NULL != dstMatrix) {
853        m.postConcat(*dstMatrix);
854    }
855    drawState->preConcatViewMatrix(m);
856
857    // we explicitly setup the correct coords for the first stage. The others
858    // must know about the view matrix change.
859    for (int s = 1; s < GrPaint::kTotalStages; ++s) {
860        if (drawState->isStageEnabled(s)) {
861            drawState->sampler(s)->preConcatMatrix(m);
862        }
863    }
864
865    m.setAll(srcRect.width(), 0,                srcRect.fLeft,
866             0,               srcRect.height(), srcRect.fTop,
867             0,               0,                GrMatrix::I()[8]);
868    if (NULL != srcMatrix) {
869        m.postConcat(*srcMatrix);
870    }
871    drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m);
872
873    const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
874    if (NULL == sqVB) {
875        GrPrintf("Failed to create static rect vb.\n");
876        return;
877    }
878    target->setVertexSourceToBuffer(0, sqVB);
879    target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
880#else
881    GrDrawState::AutoStageDisable atr(fDrawState);
882
883    const GrRect* srcRects[GrDrawState::kNumStages] = {NULL};
884    const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL};
885    srcRects[0] = &srcRect;
886    srcMatrices[0] = srcMatrix;
887
888    target->drawRect(dstRect, dstMatrix, srcRects, srcMatrices);
889#endif
890}
891
892void GrContext::drawVertices(const GrPaint& paint,
893                             GrPrimitiveType primitiveType,
894                             int vertexCount,
895                             const GrPoint positions[],
896                             const GrPoint texCoords[],
897                             const GrColor colors[],
898                             const uint16_t indices[],
899                             int indexCount) {
900    SK_TRACE_EVENT0("GrContext::drawVertices");
901
902    GrDrawTarget::AutoReleaseGeometry geo;
903
904    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
905    GrDrawState::AutoStageDisable atr(fDrawState);
906
907    GrVertexLayout layout = 0;
908    if (NULL != texCoords) {
909        layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(0, 0);
910    }
911    if (NULL != colors) {
912        layout |= GrDrawTarget::kColor_VertexLayoutBit;
913    }
914    int vertexSize = GrDrawTarget::VertexSize(layout);
915
916    if (sizeof(GrPoint) != vertexSize) {
917        if (!geo.set(target, layout, vertexCount, 0)) {
918            GrPrintf("Failed to get space for vertices!\n");
919            return;
920        }
921        int texOffsets[GrDrawState::kMaxTexCoords];
922        int colorOffset;
923        GrDrawTarget::VertexSizeAndOffsetsByIdx(layout,
924                                                texOffsets,
925                                                &colorOffset,
926                                                NULL,
927                                                NULL);
928        void* curVertex = geo.vertices();
929
930        for (int i = 0; i < vertexCount; ++i) {
931            *((GrPoint*)curVertex) = positions[i];
932
933            if (texOffsets[0] > 0) {
934                *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i];
935            }
936            if (colorOffset > 0) {
937                *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
938            }
939            curVertex = (void*)((intptr_t)curVertex + vertexSize);
940        }
941    } else {
942        target->setVertexSourceToArray(layout, positions, vertexCount);
943    }
944
945    // we don't currently apply offscreen AA to this path. Need improved
946    // management of GrDrawTarget's geometry to avoid copying points per-tile.
947
948    if (NULL != indices) {
949        target->setIndexSourceToArray(indices, indexCount);
950        target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
951    } else {
952        target->drawNonIndexed(primitiveType, 0, vertexCount);
953    }
954}
955
956///////////////////////////////////////////////////////////////////////////////
957namespace {
958
959struct CircleVertex {
960    GrPoint fPos;
961    GrPoint fCenter;
962    GrScalar fOuterRadius;
963    GrScalar fInnerRadius;
964};
965
966/* Returns true if will map a circle to another circle. This can be true
967 * if the matrix only includes square-scale, rotation, translation.
968 */
969inline bool isSimilarityTransformation(const SkMatrix& matrix,
970                                       SkScalar tol = SK_ScalarNearlyZero) {
971    if (matrix.isIdentity() || matrix.getType() == SkMatrix::kTranslate_Mask) {
972        return true;
973    }
974    if (matrix.hasPerspective()) {
975        return false;
976    }
977
978    SkScalar mx = matrix.get(SkMatrix::kMScaleX);
979    SkScalar sx = matrix.get(SkMatrix::kMSkewX);
980    SkScalar my = matrix.get(SkMatrix::kMScaleY);
981    SkScalar sy = matrix.get(SkMatrix::kMSkewY);
982
983    if (mx == 0 && sx == 0 && my == 0 && sy == 0) {
984        return false;
985    }
986
987    // it has scales or skews, but it could also be rotation, check it out.
988    SkVector vec[2];
989    vec[0].set(mx, sx);
990    vec[1].set(sy, my);
991
992    return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol)) &&
993           SkScalarNearlyEqual(vec[0].lengthSqd(), vec[1].lengthSqd(),
994                SkScalarSquare(tol));
995}
996
997}
998
999// TODO: strokeWidth can't be larger than zero right now.
1000// It will be fixed when drawPath() can handle strokes.
1001void GrContext::drawOval(const GrPaint& paint,
1002                         const GrRect& rect,
1003                         SkScalar strokeWidth) {
1004    GrAssert(strokeWidth <= 0);
1005    if (!isSimilarityTransformation(this->getMatrix()) ||
1006        !paint.fAntiAlias ||
1007        rect.height() != rect.width()) {
1008        SkPath path;
1009        path.addOval(rect);
1010        GrPathFill fill = (strokeWidth == 0) ?
1011                           kHairLine_GrPathFill : kWinding_GrPathFill;
1012        this->internalDrawPath(paint, path, fill, NULL);
1013        return;
1014    }
1015
1016    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
1017
1018    GrDrawState* drawState = target->drawState();
1019    GrDrawState::AutoStageDisable atr(fDrawState);
1020    const GrMatrix vm = drawState->getViewMatrix();
1021
1022    const GrRenderTarget* rt = drawState->getRenderTarget();
1023    if (NULL == rt) {
1024        return;
1025    }
1026
1027    GrDrawTarget::AutoDeviceCoordDraw adcd(target);
1028    if (!adcd.succeeded()) {
1029        return;
1030    }
1031
1032    GrVertexLayout layout = GrDrawTarget::kEdge_VertexLayoutBit;
1033    GrAssert(sizeof(CircleVertex) == GrDrawTarget::VertexSize(layout));
1034
1035    GrPoint center = GrPoint::Make(rect.centerX(), rect.centerY());
1036    GrScalar radius = SkScalarHalf(rect.width());
1037
1038    vm.mapPoints(&center, 1);
1039    radius = vm.mapRadius(radius);
1040
1041    GrScalar outerRadius = radius;
1042    GrScalar innerRadius = 0;
1043    SkScalar halfWidth = 0;
1044    if (strokeWidth == 0) {
1045        halfWidth = SkScalarHalf(SK_Scalar1);
1046
1047        outerRadius += halfWidth;
1048        innerRadius = SkMaxScalar(0, radius - halfWidth);
1049    }
1050
1051    GrDrawTarget::AutoReleaseGeometry geo(target, layout, 4, 0);
1052    if (!geo.succeeded()) {
1053        GrPrintf("Failed to get space for vertices!\n");
1054        return;
1055    }
1056
1057    CircleVertex* verts = reinterpret_cast<CircleVertex*>(geo.vertices());
1058
1059    // The fragment shader will extend the radius out half a pixel
1060    // to antialias. Expand the drawn rect here so all the pixels
1061    // will be captured.
1062    SkScalar L = center.fX - outerRadius - SkFloatToScalar(0.5f);
1063    SkScalar R = center.fX + outerRadius + SkFloatToScalar(0.5f);
1064    SkScalar T = center.fY - outerRadius - SkFloatToScalar(0.5f);
1065    SkScalar B = center.fY + outerRadius + SkFloatToScalar(0.5f);
1066
1067    verts[0].fPos = SkPoint::Make(L, T);
1068    verts[1].fPos = SkPoint::Make(R, T);
1069    verts[2].fPos = SkPoint::Make(L, B);
1070    verts[3].fPos = SkPoint::Make(R, B);
1071
1072    for (int i = 0; i < 4; ++i) {
1073        // this goes to fragment shader, it should be in y-points-up space.
1074        verts[i].fCenter = SkPoint::Make(center.fX, rt->height() - center.fY);
1075
1076        verts[i].fOuterRadius = outerRadius;
1077        verts[i].fInnerRadius = innerRadius;
1078    }
1079
1080    drawState->setVertexEdgeType(GrDrawState::kCircle_EdgeType);
1081    target->drawNonIndexed(kTriangleStrip_GrPrimitiveType, 0, 4);
1082}
1083
1084void GrContext::drawPath(const GrPaint& paint, const SkPath& path,
1085                         GrPathFill fill, const GrPoint* translate) {
1086
1087    if (path.isEmpty()) {
1088       if (GrIsFillInverted(fill)) {
1089           this->drawPaint(paint);
1090       }
1091       return;
1092    }
1093
1094    SkRect ovalRect;
1095    if (!GrIsFillInverted(fill) && path.isOval(&ovalRect)) {
1096        if (translate) {
1097            ovalRect.offset(*translate);
1098        }
1099        SkScalar width = (fill == kHairLine_GrPathFill) ? 0 : -SK_Scalar1;
1100        this->drawOval(paint, ovalRect, width);
1101        return;
1102    }
1103
1104    internalDrawPath(paint, path, fill, translate);
1105}
1106
1107void GrContext::internalDrawPath(const GrPaint& paint, const SkPath& path,
1108                                 GrPathFill fill, const GrPoint* translate) {
1109
1110    // Note that below we may sw-rasterize the path into a scratch texture.
1111    // Scratch textures can be recycled after they are returned to the texture
1112    // cache. This presents a potential hazard for buffered drawing. However,
1113    // the writePixels that uploads to the scratch will perform a flush so we're
1114    // OK.
1115    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
1116    GrDrawState::AutoStageDisable atr(fDrawState);
1117
1118    bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled();
1119
1120    // An Assumption here is that path renderer would use some form of tweaking
1121    // the src color (either the input alpha or in the frag shader) to implement
1122    // aa. If we have some future driver-mojo path AA that can do the right
1123    // thing WRT to the blend then we'll need some query on the PR.
1124    if (disable_coverage_aa_for_blend(target)) {
1125#if GR_DEBUG
1126        //GrPrintf("Turning off AA to correctly apply blend.\n");
1127#endif
1128        prAA = false;
1129    }
1130
1131    GrPathRenderer* pr = this->getPathRenderer(path, fill, target, prAA, true);
1132    if (NULL == pr) {
1133#if GR_DEBUG
1134        GrPrintf("Unable to find path renderer compatible with path.\n");
1135#endif
1136        return;
1137    }
1138
1139    pr->drawPath(path, fill, translate, target, prAA);
1140}
1141
1142////////////////////////////////////////////////////////////////////////////////
1143
1144void GrContext::flush(int flagsBitfield) {
1145    if (kDiscard_FlushBit & flagsBitfield) {
1146        fDrawBuffer->reset();
1147    } else {
1148        this->flushDrawBuffer();
1149    }
1150    if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) {
1151        fGpu->forceRenderTargetFlush();
1152    }
1153}
1154
1155void GrContext::flushDrawBuffer() {
1156    if (fDrawBuffer) {
1157        // With addition of the AA clip path, flushing the draw buffer can
1158        // result in the generation of an AA clip mask. During this
1159        // process the SW path renderer may be invoked which recusively
1160        // calls this method (via internalWriteTexturePixels) creating
1161        // infinite recursion
1162        GrInOrderDrawBuffer* temp = fDrawBuffer;
1163        fDrawBuffer = NULL;
1164
1165        temp->flushTo(fGpu);
1166
1167        fDrawBuffer = temp;
1168    }
1169}
1170
1171void GrContext::writeTexturePixels(GrTexture* texture,
1172                                   int left, int top, int width, int height,
1173                                   GrPixelConfig config, const void* buffer, size_t rowBytes,
1174                                   uint32_t flags) {
1175    SK_TRACE_EVENT0("GrContext::writeTexturePixels");
1176    ASSERT_OWNED_RESOURCE(texture);
1177
1178    // TODO: use scratch texture to perform conversion
1179    if (kUnpremul_PixelOpsFlag & flags) {
1180        return;
1181    }
1182    if (!(kDontFlush_PixelOpsFlag & flags)) {
1183        this->flush();
1184    }
1185
1186    fGpu->writeTexturePixels(texture, left, top, width, height,
1187                             config, buffer, rowBytes);
1188}
1189
1190bool GrContext::readTexturePixels(GrTexture* texture,
1191                                  int left, int top, int width, int height,
1192                                  GrPixelConfig config, void* buffer, size_t rowBytes,
1193                                  uint32_t flags) {
1194    SK_TRACE_EVENT0("GrContext::readTexturePixels");
1195    ASSERT_OWNED_RESOURCE(texture);
1196
1197    // TODO: code read pixels for textures that aren't also rendertargets
1198    GrRenderTarget* target = texture->asRenderTarget();
1199    if (NULL != target) {
1200        return this->readRenderTargetPixels(target,
1201                                            left, top, width, height,
1202                                            config, buffer, rowBytes,
1203                                            flags);
1204    } else {
1205        return false;
1206    }
1207}
1208
1209#include "SkConfig8888.h"
1210
1211namespace {
1212/**
1213 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
1214 * formats are representable as Config8888 and so the function returns false
1215 * if the GrPixelConfig has no equivalent Config8888.
1216 */
1217bool grconfig_to_config8888(GrPixelConfig config,
1218                            bool unpremul,
1219                            SkCanvas::Config8888* config8888) {
1220    switch (config) {
1221        case kRGBA_8888_GrPixelConfig:
1222            if (unpremul) {
1223                *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
1224            } else {
1225                *config8888 = SkCanvas::kRGBA_Premul_Config8888;
1226            }
1227            return true;
1228        case kBGRA_8888_GrPixelConfig:
1229            if (unpremul) {
1230                *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
1231            } else {
1232                *config8888 = SkCanvas::kBGRA_Premul_Config8888;
1233            }
1234            return true;
1235        default:
1236            return false;
1237    }
1238}
1239
1240// It returns a configuration with where the byte position of the R & B components are swapped in
1241// relation to the input config. This should only be called with the result of
1242// grconfig_to_config8888 as it will fail for other configs.
1243SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) {
1244    switch (config8888) {
1245        case SkCanvas::kBGRA_Premul_Config8888:
1246            return SkCanvas::kRGBA_Premul_Config8888;
1247        case SkCanvas::kBGRA_Unpremul_Config8888:
1248            return SkCanvas::kRGBA_Unpremul_Config8888;
1249        case SkCanvas::kRGBA_Premul_Config8888:
1250            return SkCanvas::kBGRA_Premul_Config8888;
1251        case SkCanvas::kRGBA_Unpremul_Config8888:
1252            return SkCanvas::kBGRA_Unpremul_Config8888;
1253        default:
1254            GrCrash("Unexpected input");
1255            return SkCanvas::kBGRA_Unpremul_Config8888;;
1256    }
1257}
1258}
1259
1260bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1261                                       int left, int top, int width, int height,
1262                                       GrPixelConfig config, void* buffer, size_t rowBytes,
1263                                       uint32_t flags) {
1264    SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
1265    ASSERT_OWNED_RESOURCE(target);
1266
1267    if (NULL == target) {
1268        target = fDrawState->getRenderTarget();
1269        if (NULL == target) {
1270            return false;
1271        }
1272    }
1273
1274    if (!(kDontFlush_PixelOpsFlag & flags)) {
1275        this->flush();
1276    }
1277
1278    // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1279
1280    // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1281    // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1282    bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1283                                                 width, height, config,
1284                                                 rowBytes);
1285    bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == GrPixelConfigSwapRAndB(config);
1286
1287    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1288
1289    // flipY will get set to false when it is handled below using a scratch. However, in that case
1290    // we still want to do the read upside down.
1291    bool readUpsideDown = flipY;
1292
1293    if (unpremul && kRGBA_8888_GrPixelConfig != config && kBGRA_8888_GrPixelConfig != config) {
1294        // The unpremul flag is only allowed for these two configs.
1295        return false;
1296    }
1297
1298    GrPixelConfig readConfig;
1299    if (swapRAndB) {
1300        readConfig = GrPixelConfigSwapRAndB(config);
1301        GrAssert(kUnknown_GrPixelConfig != config);
1302    } else {
1303        readConfig = config;
1304    }
1305
1306    // If the src is a texture and we would have to do conversions after read pixels, we instead
1307    // do the conversions by drawing the src to a scratch texture. If we handle any of the
1308    // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1309    // on the read back pixels.
1310    GrTexture* src = target->asTexture();
1311    GrAutoScratchTexture ast;
1312    if (NULL != src && (swapRAndB || unpremul || flipY)) {
1313        // Make the scratch a render target because we don't have a robust readTexturePixels as of
1314        // yet. It calls this function.
1315        GrTextureDesc desc;
1316        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1317        desc.fWidth = width;
1318        desc.fHeight = height;
1319        desc.fConfig = readConfig;
1320
1321        // When a full readback is faster than a partial we could always make the scratch exactly
1322        // match the passed rect. However, if we see many different size rectangles we will trash
1323        // our texture cache and pay the cost of creating and destroying many textures. So, we only
1324        // request an exact match when the caller is reading an entire RT.
1325        ScratchTexMatch match = kApprox_ScratchTexMatch;
1326        if (0 == left &&
1327            0 == top &&
1328            target->width() == width &&
1329            target->height() == height &&
1330            fGpu->fullReadPixelsIsFasterThanPartial()) {
1331            match = kExact_ScratchTexMatch;
1332        }
1333        ast.set(this, desc, match);
1334        GrTexture* texture = ast.texture();
1335        if (texture) {
1336            SkAutoTUnref<GrCustomStage> stage;
1337            if (unpremul) {
1338                stage.reset(this->createPMToUPMEffect(src, swapRAndB));
1339            }
1340            // If we failed to create a PM->UPM effect and have no other conversions to perform then
1341            // there is no longer any point to using the scratch.
1342            if (NULL != stage || flipY || swapRAndB) {
1343                if (NULL == stage) {
1344                    stage.reset(GrConfigConversionEffect::Create(src, swapRAndB));
1345                    GrAssert(NULL != stage);
1346                } else {
1347                    unpremul = false; // we will handle the UPM conversion in the draw
1348                }
1349                swapRAndB = false; // we will handle the swap in the draw.
1350
1351                GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1352                GrDrawState* drawState = fGpu->drawState();
1353                drawState->setRenderTarget(texture->asRenderTarget());
1354                GrMatrix matrix;
1355                if (flipY) {
1356                    matrix.setTranslate(SK_Scalar1 * left,
1357                                        SK_Scalar1 * (top + height));
1358                    matrix.set(GrMatrix::kMScaleY, -GR_Scalar1);
1359                    flipY = false; // the y flip will be handled in the draw
1360                } else {
1361                    matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1362                }
1363                matrix.postIDiv(src->width(), src->height());
1364                drawState->sampler(0)->reset(matrix);
1365                drawState->sampler(0)->setCustomStage(stage);
1366                GrRect rect = GrRect::MakeWH(GrIntToScalar(width), GrIntToScalar(height));
1367                fGpu->drawSimpleRect(rect, NULL);
1368                // we want to read back from the scratch's origin
1369                left = 0;
1370                top = 0;
1371                target = texture->asRenderTarget();
1372            }
1373        }
1374    }
1375    if (!fGpu->readPixels(target,
1376                          left, top, width, height,
1377                          readConfig, buffer, rowBytes, readUpsideDown)) {
1378        return false;
1379    }
1380    // Perform any conversions we weren't able to perfom using a scratch texture.
1381    if (unpremul || swapRAndB || flipY) {
1382        SkCanvas::Config8888 srcC8888;
1383        SkCanvas::Config8888 dstC8888;
1384        bool c8888IsValid = grconfig_to_config8888(config, false, &srcC8888);
1385        grconfig_to_config8888(config, unpremul, &dstC8888);
1386        if (swapRAndB) {
1387            GrAssert(c8888IsValid); // we should only do r/b swap on 8888 configs
1388            srcC8888 = swap_config8888_red_and_blue(srcC8888);
1389        }
1390        if (flipY) {
1391            size_t tightRB = width * GrBytesPerPixel(config);
1392            if (0 == rowBytes) {
1393                rowBytes = tightRB;
1394            }
1395            SkAutoSTMalloc<256, uint8_t> tempRow(tightRB);
1396            intptr_t top = reinterpret_cast<intptr_t>(buffer);
1397            intptr_t bot = top + (height - 1) * rowBytes;
1398            while (top < bot) {
1399                uint32_t* t = reinterpret_cast<uint32_t*>(top);
1400                uint32_t* b = reinterpret_cast<uint32_t*>(bot);
1401                uint32_t* temp = reinterpret_cast<uint32_t*>(tempRow.get());
1402                memcpy(temp, t, tightRB);
1403                if (c8888IsValid) {
1404                    SkConvertConfig8888Pixels(t, tightRB, dstC8888,
1405                                              b, tightRB, srcC8888,
1406                                              width, 1);
1407                    SkConvertConfig8888Pixels(b, tightRB, dstC8888,
1408                                              temp, tightRB, srcC8888,
1409                                              width, 1);
1410                } else {
1411                    memcpy(t, b, tightRB);
1412                    memcpy(b, temp, tightRB);
1413                }
1414                top += rowBytes;
1415                bot -= rowBytes;
1416            }
1417            // The above loop does nothing on the middle row when height is odd.
1418            if (top == bot && c8888IsValid && dstC8888 != srcC8888) {
1419                uint32_t* mid = reinterpret_cast<uint32_t*>(top);
1420                SkConvertConfig8888Pixels(mid, tightRB, dstC8888, mid, tightRB, srcC8888, width, 1);
1421            }
1422        } else {
1423            // if we aren't flipping Y then we have no reason to be here other than doing
1424            // conversions for 8888 (r/b swap or upm).
1425            GrAssert(c8888IsValid);
1426            uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
1427            SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
1428                                      b32, rowBytes, srcC8888,
1429                                      width, height);
1430        }
1431    }
1432    return true;
1433}
1434
1435void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1436    GrAssert(target);
1437    ASSERT_OWNED_RESOURCE(target);
1438    // In the future we may track whether there are any pending draws to this
1439    // target. We don't today so we always perform a flush. We don't promise
1440    // this to our clients, though.
1441    this->flush();
1442    fGpu->resolveRenderTarget(target);
1443}
1444
1445void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) {
1446    if (NULL == src || NULL == dst) {
1447        return;
1448    }
1449    ASSERT_OWNED_RESOURCE(src);
1450
1451    // Writes pending to the source texture are not tracked, so a flush
1452    // is required to ensure that the copy captures the most recent contents
1453    // of the source texture. See similar behaviour in
1454    // GrContext::resolveRenderTarget.
1455    this->flush();
1456
1457    GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1458    GrDrawState* drawState = fGpu->drawState();
1459    drawState->setRenderTarget(dst);
1460    GrMatrix sampleM;
1461    sampleM.setIDiv(src->width(), src->height());
1462    drawState->sampler(0)->reset(sampleM);
1463    drawState->createTextureEffect(0, src);
1464    SkRect rect = SkRect::MakeXYWH(0, 0,
1465                                   SK_Scalar1 * src->width(),
1466                                   SK_Scalar1 * src->height());
1467    fGpu->drawSimpleRect(rect, NULL);
1468}
1469
1470void GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1471                                        int left, int top, int width, int height,
1472                                        GrPixelConfig config,
1473                                        const void* buffer,
1474                                        size_t rowBytes,
1475                                        uint32_t flags) {
1476    SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
1477    ASSERT_OWNED_RESOURCE(target);
1478
1479    if (NULL == target) {
1480        target = fDrawState->getRenderTarget();
1481        if (NULL == target) {
1482            return;
1483        }
1484    }
1485
1486    // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1487    // desktop GL).
1488
1489    // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1490    // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1491    // set.)
1492
1493    // If the RT is also a texture and we don't have to premultiply then take the texture path.
1494    // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1495    // we do below.
1496
1497#if !GR_MAC_BUILD
1498    // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1499    // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1500    // HW is affected.
1501    if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags)) {
1502        this->writeTexturePixels(target->asTexture(),
1503                                 left, top, width, height,
1504                                 config, buffer, rowBytes, flags);
1505        return;
1506    }
1507#endif
1508    SkAutoTUnref<GrCustomStage> stage;
1509    bool swapRAndB = (fGpu->preferredReadPixelsConfig(config) == GrPixelConfigSwapRAndB(config));
1510
1511    GrPixelConfig textureConfig;
1512    if (swapRAndB) {
1513        textureConfig = GrPixelConfigSwapRAndB(config);
1514    } else {
1515        textureConfig = config;
1516    }
1517
1518    GrTextureDesc desc;
1519    desc.fWidth = width;
1520    desc.fHeight = height;
1521    desc.fConfig = textureConfig;
1522    GrAutoScratchTexture ast(this, desc);
1523    GrTexture* texture = ast.texture();
1524    if (NULL == texture) {
1525        return;
1526    }
1527    // allocate a tmp buffer and sw convert the pixels to premul
1528    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1529
1530    if (kUnpremul_PixelOpsFlag & flags) {
1531        if (kRGBA_8888_GrPixelConfig != config && kBGRA_8888_GrPixelConfig != config) {
1532            return;
1533        }
1534        stage.reset(this->createUPMToPMEffect(texture, swapRAndB));
1535        if (NULL == stage) {
1536            SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1537            GR_DEBUGCODE(bool success = )
1538            grconfig_to_config8888(config, true, &srcConfig8888);
1539            GrAssert(success);
1540            GR_DEBUGCODE(success = )
1541            grconfig_to_config8888(config, false, &dstConfig8888);
1542            GrAssert(success);
1543            const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
1544            tmpPixels.reset(width * height);
1545            SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
1546                                      src, rowBytes, srcConfig8888,
1547                                      width, height);
1548            buffer = tmpPixels.get();
1549            rowBytes = 4 * width;
1550        }
1551    }
1552    if (NULL == stage) {
1553        stage.reset(GrConfigConversionEffect::Create(texture, swapRAndB));
1554        GrAssert(NULL != stage);
1555    }
1556
1557    this->writeTexturePixels(texture,
1558                             0, 0, width, height,
1559                             textureConfig, buffer, rowBytes,
1560                             flags & ~kUnpremul_PixelOpsFlag);
1561
1562    GrDrawTarget::AutoStateRestore  asr(fGpu, GrDrawTarget::kReset_ASRInit);
1563    GrDrawState* drawState = fGpu->drawState();
1564
1565    GrMatrix matrix;
1566    matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
1567    drawState->setViewMatrix(matrix);
1568    drawState->setRenderTarget(target);
1569
1570    matrix.setIDiv(texture->width(), texture->height());
1571    drawState->sampler(0)->reset(matrix);
1572    drawState->sampler(0)->setCustomStage(stage);
1573
1574    fGpu->drawSimpleRect(GrRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
1575}
1576////////////////////////////////////////////////////////////////////////////////
1577
1578void GrContext::setPaint(const GrPaint& paint) {
1579    GrAssert(fDrawState->stagesDisabled());
1580
1581    for (int i = 0; i < GrPaint::kMaxTextures; ++i) {
1582        int s = i + GrPaint::kFirstTextureStage;
1583        if (paint.isTextureStageEnabled(i)) {
1584            *fDrawState->sampler(s) = paint.getTextureSampler(i);
1585        }
1586    }
1587
1588    fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage);
1589
1590    for (int i = 0; i < GrPaint::kMaxMasks; ++i) {
1591        int s = i + GrPaint::kFirstMaskStage;
1592        if (paint.isMaskStageEnabled(i)) {
1593            *fDrawState->sampler(s) = paint.getMaskSampler(i);
1594        }
1595    }
1596
1597    // disable all stages not accessible via the paint
1598    for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) {
1599        fDrawState->disableStage(s);
1600    }
1601
1602    fDrawState->setColor(paint.fColor);
1603
1604    if (paint.fDither) {
1605        fDrawState->enableState(GrDrawState::kDither_StateBit);
1606    } else {
1607        fDrawState->disableState(GrDrawState::kDither_StateBit);
1608    }
1609    if (paint.fAntiAlias) {
1610        fDrawState->enableState(GrDrawState::kHWAntialias_StateBit);
1611    } else {
1612        fDrawState->disableState(GrDrawState::kHWAntialias_StateBit);
1613    }
1614    if (paint.fColorMatrixEnabled) {
1615        fDrawState->enableState(GrDrawState::kColorMatrix_StateBit);
1616        fDrawState->setColorMatrix(paint.fColorMatrix);
1617    } else {
1618        fDrawState->disableState(GrDrawState::kColorMatrix_StateBit);
1619    }
1620    fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff);
1621    fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode);
1622    fDrawState->setCoverage(paint.fCoverage);
1623#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1624    if ((paint.hasMask() || 0xff != paint.fCoverage) &&
1625        !fGpu->canApplyCoverage()) {
1626        GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1627    }
1628#endif
1629}
1630
1631GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, BufferedDraw buffered) {
1632    if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1633        this->flushDrawBuffer();
1634        fLastDrawWasBuffered = kNo_BufferedDraw;
1635    }
1636    if (NULL != paint) {
1637        this->setPaint(*paint);
1638    }
1639    if (kYes_BufferedDraw == buffered) {
1640        fDrawBuffer->setClip(fGpu->getClip());
1641        fLastDrawWasBuffered = kYes_BufferedDraw;
1642        return fDrawBuffer;
1643    } else {
1644        GrAssert(kNo_BufferedDraw == buffered);
1645        return fGpu;
1646    }
1647}
1648
1649/*
1650 * This method finds a path renderer that can draw the specified path on
1651 * the provided target.
1652 * Due to its expense, the software path renderer has split out so it can
1653 * can be individually allowed/disallowed via the "allowSW" boolean.
1654 */
1655GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1656                                           GrPathFill fill,
1657                                           const GrDrawTarget* target,
1658                                           bool antiAlias,
1659                                           bool allowSW) {
1660    if (NULL == fPathRendererChain) {
1661        fPathRendererChain =
1662            SkNEW_ARGS(GrPathRendererChain,
1663                       (this, GrPathRendererChain::kNone_UsageFlag));
1664    }
1665
1666    GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, fill,
1667                                                             target,
1668                                                             antiAlias);
1669
1670    if (NULL == pr && allowSW) {
1671        if (NULL == fSoftwarePathRenderer) {
1672            fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1673        }
1674
1675        pr = fSoftwarePathRenderer;
1676    }
1677
1678    return pr;
1679}
1680
1681////////////////////////////////////////////////////////////////////////////////
1682
1683void GrContext::setRenderTarget(GrRenderTarget* target) {
1684    ASSERT_OWNED_RESOURCE(target);
1685    fDrawState->setRenderTarget(target);
1686}
1687
1688GrRenderTarget* GrContext::getRenderTarget() {
1689    return fDrawState->getRenderTarget();
1690}
1691
1692const GrRenderTarget* GrContext::getRenderTarget() const {
1693    return fDrawState->getRenderTarget();
1694}
1695
1696bool GrContext::isConfigRenderable(GrPixelConfig config) const {
1697    return fGpu->isConfigRenderable(config);
1698}
1699
1700const GrMatrix& GrContext::getMatrix() const {
1701    return fDrawState->getViewMatrix();
1702}
1703
1704void GrContext::setMatrix(const GrMatrix& m) {
1705    fDrawState->setViewMatrix(m);
1706}
1707
1708void GrContext::concatMatrix(const GrMatrix& m) const {
1709    fDrawState->preConcatViewMatrix(m);
1710}
1711
1712static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
1713    intptr_t mask = 1 << shift;
1714    if (pred) {
1715        bits |= mask;
1716    } else {
1717        bits &= ~mask;
1718    }
1719    return bits;
1720}
1721
1722GrContext::GrContext(GrGpu* gpu) {
1723    ++THREAD_INSTANCE_COUNT;
1724
1725    fGpu = gpu;
1726    fGpu->ref();
1727    fGpu->setContext(this);
1728
1729    fDrawState = SkNEW(GrDrawState);
1730    fGpu->setDrawState(fDrawState);
1731
1732    fPathRendererChain = NULL;
1733    fSoftwarePathRenderer = NULL;
1734
1735    fTextureCache = SkNEW_ARGS(GrResourceCache,
1736                               (MAX_TEXTURE_CACHE_COUNT,
1737                                MAX_TEXTURE_CACHE_BYTES));
1738    fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
1739
1740    fLastDrawWasBuffered = kNo_BufferedDraw;
1741
1742    fDrawBuffer = NULL;
1743    fDrawBufferVBAllocPool = NULL;
1744    fDrawBufferIBAllocPool = NULL;
1745
1746    fAARectRenderer = SkNEW(GrAARectRenderer);
1747
1748    fDidTestPMConversions = false;
1749
1750    this->setupDrawBuffer();
1751}
1752
1753void GrContext::setupDrawBuffer() {
1754
1755    GrAssert(NULL == fDrawBuffer);
1756    GrAssert(NULL == fDrawBufferVBAllocPool);
1757    GrAssert(NULL == fDrawBufferIBAllocPool);
1758
1759    fDrawBufferVBAllocPool =
1760        SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1761                                    DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1762                                    DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1763    fDrawBufferIBAllocPool =
1764        SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1765                                   DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1766                                   DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1767
1768    fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1769                                          fDrawBufferVBAllocPool,
1770                                          fDrawBufferIBAllocPool));
1771
1772    fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer());
1773    if (fDrawBuffer) {
1774        fDrawBuffer->setAutoFlushTarget(fGpu);
1775        fDrawBuffer->setDrawState(fDrawState);
1776    }
1777}
1778
1779GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) {
1780    return prepareToDraw(&paint, DEFAULT_BUFFERING);
1781}
1782
1783const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1784    return fGpu->getQuadIndexBuffer();
1785}
1786
1787namespace {
1788void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1789    GrConfigConversionEffect::PMConversion pmToUPM;
1790    GrConfigConversionEffect::PMConversion upmToPM;
1791    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1792    *pmToUPMValue = pmToUPM;
1793    *upmToPMValue = upmToPM;
1794}
1795}
1796
1797GrCustomStage* GrContext::createPMToUPMEffect(GrTexture* texture, bool swapRAndB) {
1798    if (!fDidTestPMConversions) {
1799        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1800    }
1801    GrConfigConversionEffect::PMConversion pmToUPM =
1802        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1803    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1804        return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM);
1805    } else {
1806        return NULL;
1807    }
1808}
1809
1810GrCustomStage* GrContext::createUPMToPMEffect(GrTexture* texture, bool swapRAndB) {
1811    if (!fDidTestPMConversions) {
1812        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1813    }
1814    GrConfigConversionEffect::PMConversion upmToPM =
1815        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1816    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1817        return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM);
1818    } else {
1819        return NULL;
1820    }
1821}
1822
1823GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture,
1824                                   bool canClobberSrc,
1825                                   const SkRect& rect,
1826                                   float sigmaX, float sigmaY) {
1827    ASSERT_OWNED_RESOURCE(srcTexture);
1828    GrRenderTarget* oldRenderTarget = this->getRenderTarget();
1829    AutoMatrix avm(this, GrMatrix::I());
1830    SkIRect clearRect;
1831    int scaleFactorX, radiusX;
1832    int scaleFactorY, radiusY;
1833    sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX);
1834    sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY);
1835
1836    SkRect srcRect(rect);
1837    scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY);
1838    srcRect.roundOut();
1839    scale_rect(&srcRect, static_cast<float>(scaleFactorX),
1840                         static_cast<float>(scaleFactorY));
1841
1842    AutoClip acs(this, srcRect);
1843
1844    GrAssert(kBGRA_8888_GrPixelConfig == srcTexture->config() ||
1845             kRGBA_8888_GrPixelConfig == srcTexture->config() ||
1846             kAlpha_8_GrPixelConfig == srcTexture->config());
1847
1848    GrTextureDesc desc;
1849    desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
1850    desc.fWidth = SkScalarFloorToInt(srcRect.width());
1851    desc.fHeight = SkScalarFloorToInt(srcRect.height());
1852    desc.fConfig = srcTexture->config();
1853
1854    GrAutoScratchTexture temp1, temp2;
1855    GrTexture* dstTexture = temp1.set(this, desc);
1856    GrTexture* tempTexture = canClobberSrc ? srcTexture : temp2.set(this, desc);
1857
1858    GrPaint paint;
1859    paint.reset();
1860    paint.textureSampler(0)->textureParams()->setBilerp(true);
1861
1862    for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) {
1863        paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(),
1864                                                   srcTexture->height());
1865        this->setRenderTarget(dstTexture->asRenderTarget());
1866        SkRect dstRect(srcRect);
1867        scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f,
1868                            i < scaleFactorY ? 0.5f : 1.0f);
1869        paint.textureSampler(0)->setCustomStage(SkNEW_ARGS(GrSingleTextureEffect,
1870                                                           (srcTexture)))->unref();
1871        this->drawRectToRect(paint, dstRect, srcRect);
1872        srcRect = dstRect;
1873        srcTexture = dstTexture;
1874        SkTSwap(dstTexture, tempTexture);
1875    }
1876
1877    SkIRect srcIRect;
1878    srcRect.roundOut(&srcIRect);
1879
1880    if (sigmaX > 0.0f) {
1881        if (scaleFactorX > 1) {
1882            // Clear out a radius to the right of the srcRect to prevent the
1883            // X convolution from reading garbage.
1884            clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop,
1885                                          radiusX, srcIRect.height());
1886            this->clear(&clearRect, 0x0);
1887        }
1888
1889        this->setRenderTarget(dstTexture->asRenderTarget());
1890        GrDrawTarget* target = this->prepareToDraw(NULL, DEFAULT_BUFFERING);
1891        convolve_gaussian(target, srcTexture, srcRect, sigmaX, radiusX,
1892                          Gr1DKernelEffect::kX_Direction);
1893        srcTexture = dstTexture;
1894        SkTSwap(dstTexture, tempTexture);
1895    }
1896
1897    if (sigmaY > 0.0f) {
1898        if (scaleFactorY > 1 || sigmaX > 0.0f) {
1899            // Clear out a radius below the srcRect to prevent the Y
1900            // convolution from reading garbage.
1901            clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom,
1902                                          srcIRect.width(), radiusY);
1903            this->clear(&clearRect, 0x0);
1904        }
1905
1906        this->setRenderTarget(dstTexture->asRenderTarget());
1907        GrDrawTarget* target = this->prepareToDraw(NULL, DEFAULT_BUFFERING);
1908        convolve_gaussian(target, srcTexture, srcRect, sigmaY, radiusY,
1909                          Gr1DKernelEffect::kY_Direction);
1910        srcTexture = dstTexture;
1911        SkTSwap(dstTexture, tempTexture);
1912    }
1913
1914    if (scaleFactorX > 1 || scaleFactorY > 1) {
1915        // Clear one pixel to the right and below, to accommodate bilinear
1916        // upsampling.
1917        clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom,
1918                                      srcIRect.width() + 1, 1);
1919        this->clear(&clearRect, 0x0);
1920        clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop,
1921                                      1, srcIRect.height());
1922        this->clear(&clearRect, 0x0);
1923        // FIXME:  This should be mitchell, not bilinear.
1924        paint.textureSampler(0)->textureParams()->setBilerp(true);
1925        paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(),
1926                                                   srcTexture->height());
1927        this->setRenderTarget(dstTexture->asRenderTarget());
1928        paint.textureSampler(0)->setCustomStage(SkNEW_ARGS(GrSingleTextureEffect,
1929                                                           (srcTexture)))->unref();
1930        SkRect dstRect(srcRect);
1931        scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY);
1932        this->drawRectToRect(paint, dstRect, srcRect);
1933        srcRect = dstRect;
1934        srcTexture = dstTexture;
1935        SkTSwap(dstTexture, tempTexture);
1936    }
1937    this->setRenderTarget(oldRenderTarget);
1938    if (srcTexture == temp1.texture()) {
1939        return temp1.detach();
1940    } else if (srcTexture == temp2.texture()) {
1941        return temp2.detach();
1942    } else {
1943        srcTexture->ref();
1944        return srcTexture;
1945    }
1946}
1947
1948///////////////////////////////////////////////////////////////////////////////
1949#if GR_DEBUG
1950void GrContext::printCacheStats() const {
1951    fTextureCache->printStats();
1952}
1953#endif
1954