GrContext.cpp revision ccaa002dd81a6a8bd5acb7a2fa69a2437873c1fd
1
2/*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrContext.h"
11
12#include "effects/GrConvolutionEffect.h"
13#include "effects/GrSingleTextureEffect.h"
14#include "effects/GrConfigConversionEffect.h"
15
16#include "GrBufferAllocPool.h"
17#include "GrGpu.h"
18#include "GrIndexBuffer.h"
19#include "GrInOrderDrawBuffer.h"
20#include "GrPathRenderer.h"
21#include "GrPathUtils.h"
22#include "GrResourceCache.h"
23#include "GrSoftwarePathRenderer.h"
24#include "GrStencilBuffer.h"
25#include "GrTextStrike.h"
26#include "SkTLazy.h"
27#include "SkTLS.h"
28#include "SkTrace.h"
29
30SK_DEFINE_INST_COUNT(GrContext)
31SK_DEFINE_INST_COUNT(GrDrawState)
32
33// It can be useful to set this to kNo_BufferedDraw to test whether a bug is caused by using the
34// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
35// debugging easier.
36#define DEFAULT_BUFFERING (GR_DISABLE_DRAW_BUFFERING ? kNo_BufferedDraw : kYes_BufferedDraw)
37
38#define MAX_BLUR_SIGMA 4.0f
39
40// When we're using coverage AA but the blend is incompatible (given gpu
41// limitations) should we disable AA or draw wrong?
42#define DISABLE_COVERAGE_AA_FOR_BLEND 1
43
44#if GR_DEBUG
45    // change this to a 1 to see notifications when partial coverage fails
46    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
47#else
48    #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
49#endif
50
51static const size_t MAX_TEXTURE_CACHE_COUNT = 256;
52static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024;
53
54static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
55static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
56
57static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
58static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
59
60#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this)
61
62GrContext* GrContext::Create(GrEngine engine,
63                             GrPlatform3DContext context3D) {
64    GrContext* ctx = NULL;
65    GrGpu* fGpu = GrGpu::Create(engine, context3D);
66    if (NULL != fGpu) {
67        ctx = SkNEW_ARGS(GrContext, (fGpu));
68        fGpu->unref();
69    }
70    return ctx;
71}
72
73namespace {
74void* CreateThreadInstanceCount() {
75    return SkNEW_ARGS(int, (0));
76}
77void DeleteThreadInstanceCount(void* v) {
78    delete reinterpret_cast<int*>(v);
79}
80#define THREAD_INSTANCE_COUNT                                               \
81    (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount,          \
82                                        DeleteThreadInstanceCount)))
83
84}
85
86int GrContext::GetThreadInstanceCount() {
87    return THREAD_INSTANCE_COUNT;
88}
89
90GrContext::~GrContext() {
91    for (int i = 0; i < fCleanUpData.count(); ++i) {
92        (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
93    }
94
95    this->flush();
96
97    // Since the gpu can hold scratch textures, give it a chance to let go
98    // of them before freeing the texture cache
99    fGpu->purgeResources();
100
101    delete fTextureCache;
102    fTextureCache = NULL;
103    delete fFontCache;
104    delete fDrawBuffer;
105    delete fDrawBufferVBAllocPool;
106    delete fDrawBufferIBAllocPool;
107
108    fAARectRenderer->unref();
109
110    fGpu->unref();
111    GrSafeUnref(fPathRendererChain);
112    GrSafeUnref(fSoftwarePathRenderer);
113    fDrawState->unref();
114
115    --THREAD_INSTANCE_COUNT;
116}
117
118void GrContext::contextLost() {
119    contextDestroyed();
120    this->setupDrawBuffer();
121}
122
123void GrContext::contextDestroyed() {
124    // abandon first to so destructors
125    // don't try to free the resources in the API.
126    fGpu->abandonResources();
127
128    // a path renderer may be holding onto resources that
129    // are now unusable
130    GrSafeSetNull(fPathRendererChain);
131    GrSafeSetNull(fSoftwarePathRenderer);
132
133    delete fDrawBuffer;
134    fDrawBuffer = NULL;
135
136    delete fDrawBufferVBAllocPool;
137    fDrawBufferVBAllocPool = NULL;
138
139    delete fDrawBufferIBAllocPool;
140    fDrawBufferIBAllocPool = NULL;
141
142    fAARectRenderer->reset();
143
144    fTextureCache->purgeAllUnlocked();
145    fFontCache->freeAll();
146    fGpu->markContextDirty();
147}
148
149void GrContext::resetContext() {
150    fGpu->markContextDirty();
151}
152
153void GrContext::freeGpuResources() {
154    this->flush();
155
156    fGpu->purgeResources();
157
158    fAARectRenderer->reset();
159
160    fTextureCache->purgeAllUnlocked();
161    fFontCache->freeAll();
162    // a path renderer may be holding onto resources
163    GrSafeSetNull(fPathRendererChain);
164    GrSafeSetNull(fSoftwarePathRenderer);
165}
166
167size_t GrContext::getGpuTextureCacheBytes() const {
168  return fTextureCache->getCachedResourceBytes();
169}
170
171////////////////////////////////////////////////////////////////////////////////
172
173namespace {
174
175void scale_rect(SkRect* rect, float xScale, float yScale) {
176    rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale));
177    rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale));
178    rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale));
179    rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale));
180}
181
182float adjust_sigma(float sigma, int *scaleFactor, int *radius) {
183    *scaleFactor = 1;
184    while (sigma > MAX_BLUR_SIGMA) {
185        *scaleFactor *= 2;
186        sigma *= 0.5f;
187    }
188    *radius = static_cast<int>(ceilf(sigma * 3.0f));
189    GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius);
190    return sigma;
191}
192
193void convolve_gaussian(GrDrawTarget* target,
194                       GrTexture* texture,
195                       const SkRect& rect,
196                       float sigma,
197                       int radius,
198                       Gr1DKernelEffect::Direction direction) {
199    GrRenderTarget* rt = target->drawState()->getRenderTarget();
200    GrDrawTarget::AutoStateRestore asr(target, GrDrawTarget::kReset_ASRInit);
201    GrDrawState* drawState = target->drawState();
202    drawState->setRenderTarget(rt);
203    GrMatrix sampleM;
204    sampleM.setIDiv(texture->width(), texture->height());
205    drawState->sampler(0)->reset(sampleM);
206    SkAutoTUnref<GrConvolutionEffect> conv(SkNEW_ARGS(GrConvolutionEffect,
207                                                      (texture, direction, radius,
208                                                       sigma)));
209    drawState->sampler(0)->setCustomStage(conv);
210    target->drawSimpleRect(rect, NULL);
211}
212
213}
214
215
216GrTexture* GrContext::findTexture(const GrCacheKey& key) {
217    return static_cast<GrTexture*>(fTextureCache->find(key.key()));
218}
219
220GrTexture* GrContext::findTexture(const GrTextureDesc& desc,
221                                  const GrCacheData& cacheData,
222                                  const GrTextureParams* params) {
223    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
224    GrResource* resource = fTextureCache->find(resourceKey);
225    return static_cast<GrTexture*>(resource);
226}
227
228bool GrContext::isTextureInCache(const GrTextureDesc& desc,
229                                 const GrCacheData& cacheData,
230                                 const GrTextureParams* params) const {
231    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
232    return fTextureCache->hasKey(resourceKey);
233}
234
235void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
236    ASSERT_OWNED_RESOURCE(sb);
237
238    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
239                                                            sb->height(),
240                                                            sb->numSamples());
241    fTextureCache->create(resourceKey, sb);
242}
243
244GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
245                                              int sampleCnt) {
246    GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
247                                                            height,
248                                                            sampleCnt);
249    GrResource* resource = fTextureCache->find(resourceKey);
250    return static_cast<GrStencilBuffer*>(resource);
251}
252
253static void stretchImage(void* dst,
254                         int dstW,
255                         int dstH,
256                         void* src,
257                         int srcW,
258                         int srcH,
259                         int bpp) {
260    GrFixed dx = (srcW << 16) / dstW;
261    GrFixed dy = (srcH << 16) / dstH;
262
263    GrFixed y = dy >> 1;
264
265    int dstXLimit = dstW*bpp;
266    for (int j = 0; j < dstH; ++j) {
267        GrFixed x = dx >> 1;
268        void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
269        void* dstRow = (uint8_t*)dst + j*dstW*bpp;
270        for (int i = 0; i < dstXLimit; i += bpp) {
271            memcpy((uint8_t*) dstRow + i,
272                   (uint8_t*) srcRow + (x>>16)*bpp,
273                   bpp);
274            x += dx;
275        }
276        y += dy;
277    }
278}
279
280// The desired texture is NPOT and tiled but that isn't supported by
281// the current hardware. Resize the texture to be a POT
282GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
283                                           const GrCacheData& cacheData,
284                                           void* srcData,
285                                           size_t rowBytes,
286                                           bool needsFiltering) {
287    GrTexture* clampedTexture = this->findTexture(desc, cacheData, NULL);
288    if (NULL == clampedTexture) {
289        clampedTexture = this->createTexture(NULL, desc, cacheData, srcData, rowBytes);
290
291        GrAssert(NULL != clampedTexture);
292        if (NULL == clampedTexture) {
293            return NULL;
294        }
295    }
296
297    clampedTexture->ref();
298
299    GrTextureDesc rtDesc = desc;
300    rtDesc.fFlags =  rtDesc.fFlags |
301                     kRenderTarget_GrTextureFlagBit |
302                     kNoStencil_GrTextureFlagBit;
303    rtDesc.fWidth  = GrNextPow2(GrMax(desc.fWidth, 64));
304    rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64));
305
306    GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
307
308    if (NULL != texture) {
309        GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
310        GrDrawState* drawState = fGpu->drawState();
311        drawState->setRenderTarget(texture->asRenderTarget());
312
313        // if filtering is not desired then we want to ensure all
314        // texels in the resampled image are copies of texels from
315        // the original.
316        drawState->sampler(0)->reset();
317        GrTextureParams params(SkShader::kClamp_TileMode, needsFiltering);
318        drawState->createTextureEffect(0, clampedTexture, params);
319
320        static const GrVertexLayout layout =
321                            GrDrawTarget::StageTexCoordVertexLayoutBit(0,0);
322        GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0);
323
324        if (arg.succeeded()) {
325            GrPoint* verts = (GrPoint*) arg.vertices();
326            verts[0].setIRectFan(0, 0,
327                                    texture->width(),
328                                    texture->height(),
329                                    2*sizeof(GrPoint));
330            verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint));
331            fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType,
332                                    0, 4);
333        }
334        texture->releaseRenderTarget();
335    } else {
336        // TODO: Our CPU stretch doesn't filter. But we create separate
337        // stretched textures when the sampler state is either filtered or
338        // not. Either implement filtered stretch blit on CPU or just create
339        // one when FBO case fails.
340
341        rtDesc.fFlags = kNone_GrTextureFlags;
342        // no longer need to clamp at min RT size.
343        rtDesc.fWidth  = GrNextPow2(desc.fWidth);
344        rtDesc.fHeight = GrNextPow2(desc.fHeight);
345        int bpp = GrBytesPerPixel(desc.fConfig);
346        SkAutoSMalloc<128*128*4> stretchedPixels(bpp *
347                                                    rtDesc.fWidth *
348                                                    rtDesc.fHeight);
349        stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
350                        srcData, desc.fWidth, desc.fHeight, bpp);
351
352        size_t stretchedRowBytes = rtDesc.fWidth * bpp;
353
354        GrTexture* texture = fGpu->createTexture(rtDesc,
355                                                    stretchedPixels.get(),
356                                                    stretchedRowBytes);
357        GrAssert(NULL != texture);
358    }
359
360    clampedTexture->unref();
361    return texture;
362}
363
364GrTexture* GrContext::createTexture(
365        const GrTextureParams* params,
366        const GrTextureDesc& desc,
367        const GrCacheData& cacheData,
368        void* srcData,
369        size_t rowBytes) {
370    SK_TRACE_EVENT0("GrContext::createAndLockTexture");
371
372#if GR_DUMP_TEXTURE_UPLOAD
373    GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight);
374#endif
375
376    GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheData, false);
377
378    SkAutoTUnref<GrTexture> texture;
379    if (GrTexture::NeedsResizing(resourceKey)) {
380        texture.reset(this->createResizedTexture(desc, cacheData,
381                                             srcData, rowBytes,
382                                             GrTexture::NeedsFiltering(resourceKey)));
383    } else {
384        texture.reset(fGpu->createTexture(desc, srcData, rowBytes));
385    }
386
387    if (NULL != texture) {
388        fTextureCache->create(resourceKey, texture);
389    }
390
391    return texture;
392}
393
394GrTexture* GrContext::lockScratchTexture(const GrTextureDesc& inDesc,
395                                         ScratchTexMatch match) {
396    GrTextureDesc desc = inDesc;
397    GrCacheData cacheData(GrCacheData::kScratch_CacheID);
398
399    GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) ||
400             !(desc.fFlags & kNoStencil_GrTextureFlagBit));
401
402    if (kExact_ScratchTexMatch != match) {
403        // bin by pow2 with a reasonable min
404        static const int MIN_SIZE = 256;
405        desc.fWidth  = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth));
406        desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight));
407    }
408
409    GrResource* resource = NULL;
410    int origWidth = desc.fWidth;
411    int origHeight = desc.fHeight;
412    bool doubledW = false;
413    bool doubledH = false;
414
415    do {
416        GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, cacheData, true);
417        resource = fTextureCache->find(key);
418        // if we miss, relax the fit of the flags...
419        // then try doubling width... then height.
420        if (NULL != resource || kExact_ScratchTexMatch == match) {
421            break;
422        }
423        // We no longer try to reuse textures that were previously used as render targets in
424        // situations where no RT is needed; doing otherwise can confuse the video driver and
425        // cause significant performance problems in some cases.
426        if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
427            desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
428        } else if (!doubledW) {
429            desc.fFlags = inDesc.fFlags;
430            desc.fWidth *= 2;
431            doubledW = true;
432        } else if (!doubledH) {
433            desc.fFlags = inDesc.fFlags;
434            desc.fWidth = origWidth;
435            desc.fHeight *= 2;
436            doubledH = true;
437        } else {
438            break;
439        }
440
441    } while (true);
442
443    if (NULL == resource) {
444        desc.fFlags = inDesc.fFlags;
445        desc.fWidth = origWidth;
446        desc.fHeight = origHeight;
447        SkAutoTUnref<GrTexture> texture(fGpu->createTexture(desc, NULL, 0));
448        if (NULL != texture) {
449            GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL,
450                                                      texture->desc(),
451                                                      cacheData,
452                                                      true);
453            fTextureCache->create(key, texture);
454            resource = texture;
455        }
456    }
457
458    // If the caller gives us the same desc/sampler twice we don't want
459    // to return the same texture the second time (unless it was previously
460    // released). So make it exclusive to hide it from future searches.
461    if (NULL != resource) {
462        fTextureCache->makeExclusive(resource->getCacheEntry());
463    }
464
465    return static_cast<GrTexture*>(resource);
466}
467
468void GrContext::addExistingTextureToCache(GrTexture* texture) {
469
470    if (NULL == texture) {
471        return;
472    }
473
474    // This texture should already have a cache entry since it was once
475    // attached
476    GrAssert(NULL != texture->getCacheEntry());
477
478    // Conceptually, the cache entry is going to assume responsibility
479    // for the creation ref.
480    GrAssert(1 == texture->getRefCnt());
481
482    // Since this texture came from an AutoScratchTexture it should
483    // still be in the exclusive pile
484    fTextureCache->makeNonExclusive(texture->getCacheEntry());
485
486    this->purgeCache();
487}
488
489
490void GrContext::unlockScratchTexture(GrTexture* texture) {
491    ASSERT_OWNED_RESOURCE(texture);
492    GrAssert(NULL != texture->getCacheEntry());
493
494    // If this is a scratch texture we detached it from the cache
495    // while it was locked (to avoid two callers simultaneously getting
496    // the same texture).
497    if (GrTexture::IsScratchTexture(texture->getCacheEntry()->key())) {
498        fTextureCache->makeNonExclusive(texture->getCacheEntry());
499    }
500
501    this->purgeCache();
502}
503
504void GrContext::purgeCache() {
505    if (NULL != fTextureCache) {
506        fTextureCache->purgeAsNeeded();
507    }
508}
509
510GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
511                                            void* srcData,
512                                            size_t rowBytes) {
513    GrTextureDesc descCopy = descIn;
514    return fGpu->createTexture(descCopy, srcData, rowBytes);
515}
516
517void GrContext::getTextureCacheLimits(int* maxTextures,
518                                      size_t* maxTextureBytes) const {
519    fTextureCache->getLimits(maxTextures, maxTextureBytes);
520}
521
522void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
523    fTextureCache->setLimits(maxTextures, maxTextureBytes);
524}
525
526int GrContext::getMaxTextureSize() const {
527    return fGpu->getCaps().maxTextureSize();
528}
529
530int GrContext::getMaxRenderTargetSize() const {
531    return fGpu->getCaps().maxRenderTargetSize();
532}
533
534///////////////////////////////////////////////////////////////////////////////
535
536GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) {
537    return fGpu->createPlatformTexture(desc);
538}
539
540GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) {
541    return fGpu->createPlatformRenderTarget(desc);
542}
543
544///////////////////////////////////////////////////////////////////////////////
545
546bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
547                                          int width, int height) const {
548    const GrDrawTarget::Caps& caps = fGpu->getCaps();
549    if (!caps.eightBitPaletteSupport()) {
550        return false;
551    }
552
553    bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
554
555    if (!isPow2) {
556        bool tiled = NULL != params && params->isTiled();
557        if (tiled && !caps.npotTextureTileSupport()) {
558            return false;
559        }
560    }
561    return true;
562}
563
564////////////////////////////////////////////////////////////////////////////////
565
566const GrClipData* GrContext::getClip() const {
567    return fGpu->getClip();
568}
569
570void GrContext::setClip(const GrClipData* clipData) {
571    fGpu->setClip(clipData);
572    fDrawState->enableState(GrDrawState::kClip_StateBit);
573}
574
575////////////////////////////////////////////////////////////////////////////////
576
577void GrContext::clear(const GrIRect* rect,
578                      const GrColor color,
579                      GrRenderTarget* target) {
580    this->prepareToDraw(NULL, DEFAULT_BUFFERING)->clear(rect, color, target);
581}
582
583void GrContext::drawPaint(const GrPaint& paint) {
584    // set rect to be big enough to fill the space, but not super-huge, so we
585    // don't overflow fixed-point implementations
586    GrRect r;
587    r.setLTRB(0, 0,
588              GrIntToScalar(getRenderTarget()->width()),
589              GrIntToScalar(getRenderTarget()->height()));
590    GrMatrix inverse;
591    SkTLazy<GrPaint> tmpPaint;
592    const GrPaint* p = &paint;
593    AutoMatrix am;
594
595    // We attempt to map r by the inverse matrix and draw that. mapRect will
596    // map the four corners and bound them with a new rect. This will not
597    // produce a correct result for some perspective matrices.
598    if (!this->getMatrix().hasPerspective()) {
599        if (!fDrawState->getViewInverse(&inverse)) {
600            GrPrintf("Could not invert matrix\n");
601            return;
602        }
603        inverse.mapRect(&r);
604    } else {
605        if (paint.hasTextureOrMask()) {
606            tmpPaint.set(paint);
607            p = tmpPaint.get();
608            if (!tmpPaint.get()->preConcatSamplerMatricesWithInverse(fDrawState->getViewMatrix())) {
609                GrPrintf("Could not invert matrix\n");
610            }
611        }
612        am.set(this, GrMatrix::I());
613    }
614    // by definition this fills the entire clip, no need for AA
615    if (paint.fAntiAlias) {
616        if (!tmpPaint.isValid()) {
617            tmpPaint.set(paint);
618            p = tmpPaint.get();
619        }
620        GrAssert(p == tmpPaint.get());
621        tmpPaint.get()->fAntiAlias = false;
622    }
623    this->drawRect(*p, r);
624}
625
626////////////////////////////////////////////////////////////////////////////////
627
628namespace {
629inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) {
630    return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage();
631}
632}
633
634////////////////////////////////////////////////////////////////////////////////
635
636/*  create a triangle strip that strokes the specified triangle. There are 8
637 unique vertices, but we repreat the last 2 to close up. Alternatively we
638 could use an indices array, and then only send 8 verts, but not sure that
639 would be faster.
640 */
641static void setStrokeRectStrip(GrPoint verts[10], GrRect rect,
642                               GrScalar width) {
643    const GrScalar rad = GrScalarHalf(width);
644    rect.sort();
645
646    verts[0].set(rect.fLeft + rad, rect.fTop + rad);
647    verts[1].set(rect.fLeft - rad, rect.fTop - rad);
648    verts[2].set(rect.fRight - rad, rect.fTop + rad);
649    verts[3].set(rect.fRight + rad, rect.fTop - rad);
650    verts[4].set(rect.fRight - rad, rect.fBottom - rad);
651    verts[5].set(rect.fRight + rad, rect.fBottom + rad);
652    verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
653    verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
654    verts[8] = verts[0];
655    verts[9] = verts[1];
656}
657
658/**
659 * Returns true if the rects edges are integer-aligned.
660 */
661static bool isIRect(const GrRect& r) {
662    return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) &&
663           GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom);
664}
665
666static bool apply_aa_to_rect(GrDrawTarget* target,
667                             const GrRect& rect,
668                             GrScalar width,
669                             const GrMatrix* matrix,
670                             GrMatrix* combinedMatrix,
671                             GrRect* devRect,
672                             bool* useVertexCoverage) {
673    // we use a simple coverage ramp to do aa on axis-aligned rects
674    // we check if the rect will be axis-aligned, and the rect won't land on
675    // integer coords.
676
677    // we are keeping around the "tweak the alpha" trick because
678    // it is our only hope for the fixed-pipe implementation.
679    // In a shader implementation we can give a separate coverage input
680    // TODO: remove this ugliness when we drop the fixed-pipe impl
681    *useVertexCoverage = false;
682    if (!target->canTweakAlphaForCoverage()) {
683        if (disable_coverage_aa_for_blend(target)) {
684#if GR_DEBUG
685            //GrPrintf("Turning off AA to correctly apply blend.\n");
686#endif
687            return false;
688        } else {
689            *useVertexCoverage = true;
690        }
691    }
692    const GrDrawState& drawState = target->getDrawState();
693    if (drawState.getRenderTarget()->isMultisampled()) {
694        return false;
695    }
696
697    if (0 == width && target->willUseHWAALines()) {
698        return false;
699    }
700
701    if (!drawState.getViewMatrix().preservesAxisAlignment()) {
702        return false;
703    }
704
705    if (NULL != matrix &&
706        !matrix->preservesAxisAlignment()) {
707        return false;
708    }
709
710    *combinedMatrix = drawState.getViewMatrix();
711    if (NULL != matrix) {
712        combinedMatrix->preConcat(*matrix);
713        GrAssert(combinedMatrix->preservesAxisAlignment());
714    }
715
716    combinedMatrix->mapRect(devRect, rect);
717    devRect->sort();
718
719    if (width < 0) {
720        return !isIRect(*devRect);
721    } else {
722        return true;
723    }
724}
725
726void GrContext::drawRect(const GrPaint& paint,
727                         const GrRect& rect,
728                         GrScalar width,
729                         const GrMatrix* matrix) {
730    SK_TRACE_EVENT0("GrContext::drawRect");
731
732    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
733    GrDrawState::AutoStageDisable atr(fDrawState);
734
735    GrRect devRect = rect;
736    GrMatrix combinedMatrix;
737    bool useVertexCoverage;
738    bool needAA = paint.fAntiAlias &&
739                  !this->getRenderTarget()->isMultisampled();
740    bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix,
741                                           &combinedMatrix, &devRect,
742                                           &useVertexCoverage);
743
744    if (doAA) {
745        GrDrawTarget::AutoDeviceCoordDraw adcd(target);
746        if (!adcd.succeeded()) {
747            return;
748        }
749        if (width >= 0) {
750            GrVec strokeSize;;
751            if (width > 0) {
752                strokeSize.set(width, width);
753                combinedMatrix.mapVectors(&strokeSize, 1);
754                strokeSize.setAbs(strokeSize);
755            } else {
756                strokeSize.set(GR_Scalar1, GR_Scalar1);
757            }
758            fAARectRenderer->strokeAARect(this->getGpu(), target, devRect,
759                                         strokeSize, useVertexCoverage);
760        } else {
761            fAARectRenderer->fillAARect(this->getGpu(), target,
762                                       devRect, useVertexCoverage);
763        }
764        return;
765    }
766
767    if (width >= 0) {
768        // TODO: consider making static vertex buffers for these cases.
769        // Hairline could be done by just adding closing vertex to
770        // unitSquareVertexBuffer()
771
772        static const int worstCaseVertCount = 10;
773        GrDrawTarget::AutoReleaseGeometry geo(target, 0, worstCaseVertCount, 0);
774
775        if (!geo.succeeded()) {
776            GrPrintf("Failed to get space for vertices!\n");
777            return;
778        }
779
780        GrPrimitiveType primType;
781        int vertCount;
782        GrPoint* vertex = geo.positions();
783
784        if (width > 0) {
785            vertCount = 10;
786            primType = kTriangleStrip_GrPrimitiveType;
787            setStrokeRectStrip(vertex, rect, width);
788        } else {
789            // hairline
790            vertCount = 5;
791            primType = kLineStrip_GrPrimitiveType;
792            vertex[0].set(rect.fLeft, rect.fTop);
793            vertex[1].set(rect.fRight, rect.fTop);
794            vertex[2].set(rect.fRight, rect.fBottom);
795            vertex[3].set(rect.fLeft, rect.fBottom);
796            vertex[4].set(rect.fLeft, rect.fTop);
797        }
798
799        GrDrawState::AutoViewMatrixRestore avmr;
800        if (NULL != matrix) {
801            GrDrawState* drawState = target->drawState();
802            avmr.set(drawState);
803            drawState->preConcatViewMatrix(*matrix);
804            drawState->preConcatSamplerMatrices(*matrix);
805        }
806
807        target->drawNonIndexed(primType, 0, vertCount);
808    } else {
809#if GR_STATIC_RECT_VB
810            const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
811            if (NULL == sqVB) {
812                GrPrintf("Failed to create static rect vb.\n");
813                return;
814            }
815            target->setVertexSourceToBuffer(0, sqVB);
816            GrDrawState* drawState = target->drawState();
817            GrDrawState::AutoViewMatrixRestore avmr(drawState);
818            GrMatrix m;
819            m.setAll(rect.width(),    0,             rect.fLeft,
820                        0,            rect.height(), rect.fTop,
821                        0,            0,             GrMatrix::I()[8]);
822
823            if (NULL != matrix) {
824                m.postConcat(*matrix);
825            }
826            drawState->preConcatViewMatrix(m);
827            drawState->preConcatSamplerMatrices(m);
828
829            target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
830#else
831            target->drawSimpleRect(rect, matrix);
832#endif
833    }
834}
835
836void GrContext::drawRectToRect(const GrPaint& paint,
837                               const GrRect& dstRect,
838                               const GrRect& srcRect,
839                               const GrMatrix* dstMatrix,
840                               const GrMatrix* srcMatrix) {
841    SK_TRACE_EVENT0("GrContext::drawRectToRect");
842
843    // srcRect refers to paint's first texture
844    if (!paint.isTextureStageEnabled(0)) {
845        drawRect(paint, dstRect, -1, dstMatrix);
846        return;
847    }
848
849    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
850
851#if GR_STATIC_RECT_VB
852    GrDrawState::AutoStageDisable atr(fDrawState);
853    GrDrawState* drawState = target->drawState();
854    GrDrawState::AutoViewMatrixRestore avmr(drawState);
855
856    GrMatrix m;
857
858    m.setAll(dstRect.width(), 0,                dstRect.fLeft,
859             0,               dstRect.height(), dstRect.fTop,
860             0,               0,                GrMatrix::I()[8]);
861    if (NULL != dstMatrix) {
862        m.postConcat(*dstMatrix);
863    }
864    drawState->preConcatViewMatrix(m);
865
866    // we explicitly setup the correct coords for the first stage. The others
867    // must know about the view matrix change.
868    for (int s = 1; s < GrPaint::kTotalStages; ++s) {
869        if (drawState->isStageEnabled(s)) {
870            drawState->sampler(s)->preConcatMatrix(m);
871        }
872    }
873
874    m.setAll(srcRect.width(), 0,                srcRect.fLeft,
875             0,               srcRect.height(), srcRect.fTop,
876             0,               0,                GrMatrix::I()[8]);
877    if (NULL != srcMatrix) {
878        m.postConcat(*srcMatrix);
879    }
880    drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m);
881
882    const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer();
883    if (NULL == sqVB) {
884        GrPrintf("Failed to create static rect vb.\n");
885        return;
886    }
887    target->setVertexSourceToBuffer(0, sqVB);
888    target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
889#else
890    GrDrawState::AutoStageDisable atr(fDrawState);
891
892    const GrRect* srcRects[GrDrawState::kNumStages] = {NULL};
893    const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL};
894    srcRects[0] = &srcRect;
895    srcMatrices[0] = srcMatrix;
896
897    target->drawRect(dstRect, dstMatrix, srcRects, srcMatrices);
898#endif
899}
900
901void GrContext::drawVertices(const GrPaint& paint,
902                             GrPrimitiveType primitiveType,
903                             int vertexCount,
904                             const GrPoint positions[],
905                             const GrPoint texCoords[],
906                             const GrColor colors[],
907                             const uint16_t indices[],
908                             int indexCount) {
909    SK_TRACE_EVENT0("GrContext::drawVertices");
910
911    GrDrawTarget::AutoReleaseGeometry geo;
912
913    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
914    GrDrawState::AutoStageDisable atr(fDrawState);
915
916    GrVertexLayout layout = 0;
917    if (NULL != texCoords) {
918        layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(0, 0);
919    }
920    if (NULL != colors) {
921        layout |= GrDrawTarget::kColor_VertexLayoutBit;
922    }
923    int vertexSize = GrDrawTarget::VertexSize(layout);
924
925    if (sizeof(GrPoint) != vertexSize) {
926        if (!geo.set(target, layout, vertexCount, 0)) {
927            GrPrintf("Failed to get space for vertices!\n");
928            return;
929        }
930        int texOffsets[GrDrawState::kMaxTexCoords];
931        int colorOffset;
932        GrDrawTarget::VertexSizeAndOffsetsByIdx(layout,
933                                                texOffsets,
934                                                &colorOffset,
935                                                NULL,
936                                                NULL);
937        void* curVertex = geo.vertices();
938
939        for (int i = 0; i < vertexCount; ++i) {
940            *((GrPoint*)curVertex) = positions[i];
941
942            if (texOffsets[0] > 0) {
943                *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i];
944            }
945            if (colorOffset > 0) {
946                *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
947            }
948            curVertex = (void*)((intptr_t)curVertex + vertexSize);
949        }
950    } else {
951        target->setVertexSourceToArray(layout, positions, vertexCount);
952    }
953
954    // we don't currently apply offscreen AA to this path. Need improved
955    // management of GrDrawTarget's geometry to avoid copying points per-tile.
956
957    if (NULL != indices) {
958        target->setIndexSourceToArray(indices, indexCount);
959        target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
960    } else {
961        target->drawNonIndexed(primitiveType, 0, vertexCount);
962    }
963}
964
965///////////////////////////////////////////////////////////////////////////////
966namespace {
967
968struct CircleVertex {
969    GrPoint fPos;
970    GrPoint fCenter;
971    GrScalar fOuterRadius;
972    GrScalar fInnerRadius;
973};
974
975/* Returns true if will map a circle to another circle. This can be true
976 * if the matrix only includes square-scale, rotation, translation.
977 */
978inline bool isSimilarityTransformation(const SkMatrix& matrix,
979                                       SkScalar tol = SK_ScalarNearlyZero) {
980    if (matrix.isIdentity() || matrix.getType() == SkMatrix::kTranslate_Mask) {
981        return true;
982    }
983    if (matrix.hasPerspective()) {
984        return false;
985    }
986
987    SkScalar mx = matrix.get(SkMatrix::kMScaleX);
988    SkScalar sx = matrix.get(SkMatrix::kMSkewX);
989    SkScalar my = matrix.get(SkMatrix::kMScaleY);
990    SkScalar sy = matrix.get(SkMatrix::kMSkewY);
991
992    if (mx == 0 && sx == 0 && my == 0 && sy == 0) {
993        return false;
994    }
995
996    // it has scales or skews, but it could also be rotation, check it out.
997    SkVector vec[2];
998    vec[0].set(mx, sx);
999    vec[1].set(sy, my);
1000
1001    return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol)) &&
1002           SkScalarNearlyEqual(vec[0].lengthSqd(), vec[1].lengthSqd(),
1003                SkScalarSquare(tol));
1004}
1005
1006}
1007
1008// TODO: strokeWidth can't be larger than zero right now.
1009// It will be fixed when drawPath() can handle strokes.
1010void GrContext::drawOval(const GrPaint& paint,
1011                         const GrRect& rect,
1012                         SkScalar strokeWidth) {
1013    GrAssert(strokeWidth <= 0);
1014    if (!isSimilarityTransformation(this->getMatrix()) ||
1015        !paint.fAntiAlias ||
1016        rect.height() != rect.width()) {
1017        SkPath path;
1018        path.addOval(rect);
1019        GrPathFill fill = (strokeWidth == 0) ?
1020                           kHairLine_GrPathFill : kWinding_GrPathFill;
1021        this->internalDrawPath(paint, path, fill, NULL);
1022        return;
1023    }
1024
1025    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
1026
1027    GrDrawState* drawState = target->drawState();
1028    GrDrawState::AutoStageDisable atr(fDrawState);
1029    const GrMatrix vm = drawState->getViewMatrix();
1030
1031    const GrRenderTarget* rt = drawState->getRenderTarget();
1032    if (NULL == rt) {
1033        return;
1034    }
1035
1036    GrDrawTarget::AutoDeviceCoordDraw adcd(target);
1037    if (!adcd.succeeded()) {
1038        return;
1039    }
1040
1041    GrVertexLayout layout = GrDrawTarget::kEdge_VertexLayoutBit;
1042    GrAssert(sizeof(CircleVertex) == GrDrawTarget::VertexSize(layout));
1043
1044    GrPoint center = GrPoint::Make(rect.centerX(), rect.centerY());
1045    GrScalar radius = SkScalarHalf(rect.width());
1046
1047    vm.mapPoints(&center, 1);
1048    radius = vm.mapRadius(radius);
1049
1050    GrScalar outerRadius = radius;
1051    GrScalar innerRadius = 0;
1052    SkScalar halfWidth = 0;
1053    if (strokeWidth == 0) {
1054        halfWidth = SkScalarHalf(SK_Scalar1);
1055
1056        outerRadius += halfWidth;
1057        innerRadius = SkMaxScalar(0, radius - halfWidth);
1058    }
1059
1060    GrDrawTarget::AutoReleaseGeometry geo(target, layout, 4, 0);
1061    if (!geo.succeeded()) {
1062        GrPrintf("Failed to get space for vertices!\n");
1063        return;
1064    }
1065
1066    CircleVertex* verts = reinterpret_cast<CircleVertex*>(geo.vertices());
1067
1068    // The fragment shader will extend the radius out half a pixel
1069    // to antialias. Expand the drawn rect here so all the pixels
1070    // will be captured.
1071    SkScalar L = center.fX - outerRadius - SkFloatToScalar(0.5f);
1072    SkScalar R = center.fX + outerRadius + SkFloatToScalar(0.5f);
1073    SkScalar T = center.fY - outerRadius - SkFloatToScalar(0.5f);
1074    SkScalar B = center.fY + outerRadius + SkFloatToScalar(0.5f);
1075
1076    verts[0].fPos = SkPoint::Make(L, T);
1077    verts[1].fPos = SkPoint::Make(R, T);
1078    verts[2].fPos = SkPoint::Make(L, B);
1079    verts[3].fPos = SkPoint::Make(R, B);
1080
1081    for (int i = 0; i < 4; ++i) {
1082        // this goes to fragment shader, it should be in y-points-up space.
1083        verts[i].fCenter = SkPoint::Make(center.fX, rt->height() - center.fY);
1084
1085        verts[i].fOuterRadius = outerRadius;
1086        verts[i].fInnerRadius = innerRadius;
1087    }
1088
1089    drawState->setVertexEdgeType(GrDrawState::kCircle_EdgeType);
1090    target->drawNonIndexed(kTriangleStrip_GrPrimitiveType, 0, 4);
1091}
1092
1093void GrContext::drawPath(const GrPaint& paint, const SkPath& path,
1094                         GrPathFill fill, const GrPoint* translate) {
1095
1096    if (path.isEmpty()) {
1097       if (GrIsFillInverted(fill)) {
1098           this->drawPaint(paint);
1099       }
1100       return;
1101    }
1102
1103    SkRect ovalRect;
1104    if (!GrIsFillInverted(fill) && path.isOval(&ovalRect)) {
1105        if (translate) {
1106            ovalRect.offset(*translate);
1107        }
1108        SkScalar width = (fill == kHairLine_GrPathFill) ? 0 : -SK_Scalar1;
1109        this->drawOval(paint, ovalRect, width);
1110        return;
1111    }
1112
1113    internalDrawPath(paint, path, fill, translate);
1114}
1115
1116void GrContext::internalDrawPath(const GrPaint& paint, const SkPath& path,
1117                                 GrPathFill fill, const GrPoint* translate) {
1118
1119    // Note that below we may sw-rasterize the path into a scratch texture.
1120    // Scratch textures can be recycled after they are returned to the texture
1121    // cache. This presents a potential hazard for buffered drawing. However,
1122    // the writePixels that uploads to the scratch will perform a flush so we're
1123    // OK.
1124    GrDrawTarget* target = this->prepareToDraw(&paint, DEFAULT_BUFFERING);
1125    GrDrawState::AutoStageDisable atr(fDrawState);
1126
1127    bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled();
1128
1129    // An Assumption here is that path renderer would use some form of tweaking
1130    // the src color (either the input alpha or in the frag shader) to implement
1131    // aa. If we have some future driver-mojo path AA that can do the right
1132    // thing WRT to the blend then we'll need some query on the PR.
1133    if (disable_coverage_aa_for_blend(target)) {
1134#if GR_DEBUG
1135        //GrPrintf("Turning off AA to correctly apply blend.\n");
1136#endif
1137        prAA = false;
1138    }
1139
1140    GrPathRenderer* pr = this->getPathRenderer(path, fill, target, prAA, true);
1141    if (NULL == pr) {
1142#if GR_DEBUG
1143        GrPrintf("Unable to find path renderer compatible with path.\n");
1144#endif
1145        return;
1146    }
1147
1148    pr->drawPath(path, fill, translate, target, prAA);
1149}
1150
1151////////////////////////////////////////////////////////////////////////////////
1152
1153void GrContext::flush(int flagsBitfield) {
1154    if (kDiscard_FlushBit & flagsBitfield) {
1155        fDrawBuffer->reset();
1156    } else {
1157        this->flushDrawBuffer();
1158    }
1159    if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) {
1160        fGpu->forceRenderTargetFlush();
1161    }
1162}
1163
1164void GrContext::flushDrawBuffer() {
1165    if (fDrawBuffer) {
1166        // With addition of the AA clip path, flushing the draw buffer can
1167        // result in the generation of an AA clip mask. During this
1168        // process the SW path renderer may be invoked which recusively
1169        // calls this method (via internalWriteTexturePixels) creating
1170        // infinite recursion
1171        GrInOrderDrawBuffer* temp = fDrawBuffer;
1172        fDrawBuffer = NULL;
1173
1174        temp->flushTo(fGpu);
1175
1176        fDrawBuffer = temp;
1177    }
1178}
1179
1180void GrContext::writeTexturePixels(GrTexture* texture,
1181                                   int left, int top, int width, int height,
1182                                   GrPixelConfig config, const void* buffer, size_t rowBytes,
1183                                   uint32_t flags) {
1184    SK_TRACE_EVENT0("GrContext::writeTexturePixels");
1185    ASSERT_OWNED_RESOURCE(texture);
1186
1187    // TODO: use scratch texture to perform conversion
1188    if (kUnpremul_PixelOpsFlag & flags) {
1189        return;
1190    }
1191    if (!(kDontFlush_PixelOpsFlag & flags)) {
1192        this->flush();
1193    }
1194
1195    fGpu->writeTexturePixels(texture, left, top, width, height,
1196                             config, buffer, rowBytes);
1197}
1198
1199bool GrContext::readTexturePixels(GrTexture* texture,
1200                                  int left, int top, int width, int height,
1201                                  GrPixelConfig config, void* buffer, size_t rowBytes,
1202                                  uint32_t flags) {
1203    SK_TRACE_EVENT0("GrContext::readTexturePixels");
1204    ASSERT_OWNED_RESOURCE(texture);
1205
1206    // TODO: code read pixels for textures that aren't also rendertargets
1207    GrRenderTarget* target = texture->asRenderTarget();
1208    if (NULL != target) {
1209        return this->readRenderTargetPixels(target,
1210                                            left, top, width, height,
1211                                            config, buffer, rowBytes,
1212                                            flags);
1213    } else {
1214        return false;
1215    }
1216}
1217
1218#include "SkConfig8888.h"
1219
1220namespace {
1221/**
1222 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel
1223 * formats are representable as Config8888 and so the function returns false
1224 * if the GrPixelConfig has no equivalent Config8888.
1225 */
1226bool grconfig_to_config8888(GrPixelConfig config,
1227                            bool unpremul,
1228                            SkCanvas::Config8888* config8888) {
1229    switch (config) {
1230        case kRGBA_8888_GrPixelConfig:
1231            if (unpremul) {
1232                *config8888 = SkCanvas::kRGBA_Unpremul_Config8888;
1233            } else {
1234                *config8888 = SkCanvas::kRGBA_Premul_Config8888;
1235            }
1236            return true;
1237        case kBGRA_8888_GrPixelConfig:
1238            if (unpremul) {
1239                *config8888 = SkCanvas::kBGRA_Unpremul_Config8888;
1240            } else {
1241                *config8888 = SkCanvas::kBGRA_Premul_Config8888;
1242            }
1243            return true;
1244        default:
1245            return false;
1246    }
1247}
1248
1249// It returns a configuration with where the byte position of the R & B components are swapped in
1250// relation to the input config. This should only be called with the result of
1251// grconfig_to_config8888 as it will fail for other configs.
1252SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) {
1253    switch (config8888) {
1254        case SkCanvas::kBGRA_Premul_Config8888:
1255            return SkCanvas::kRGBA_Premul_Config8888;
1256        case SkCanvas::kBGRA_Unpremul_Config8888:
1257            return SkCanvas::kRGBA_Unpremul_Config8888;
1258        case SkCanvas::kRGBA_Premul_Config8888:
1259            return SkCanvas::kBGRA_Premul_Config8888;
1260        case SkCanvas::kRGBA_Unpremul_Config8888:
1261            return SkCanvas::kBGRA_Unpremul_Config8888;
1262        default:
1263            GrCrash("Unexpected input");
1264            return SkCanvas::kBGRA_Unpremul_Config8888;;
1265    }
1266}
1267}
1268
1269bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1270                                       int left, int top, int width, int height,
1271                                       GrPixelConfig config, void* buffer, size_t rowBytes,
1272                                       uint32_t flags) {
1273    SK_TRACE_EVENT0("GrContext::readRenderTargetPixels");
1274    ASSERT_OWNED_RESOURCE(target);
1275
1276    if (NULL == target) {
1277        target = fDrawState->getRenderTarget();
1278        if (NULL == target) {
1279            return false;
1280        }
1281    }
1282
1283    if (!(kDontFlush_PixelOpsFlag & flags)) {
1284        this->flush();
1285    }
1286
1287    // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1288
1289    // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1290    // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1291    bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1292                                                 width, height, config,
1293                                                 rowBytes);
1294    bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == GrPixelConfigSwapRAndB(config);
1295
1296    bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1297
1298    // flipY will get set to false when it is handled below using a scratch. However, in that case
1299    // we still want to do the read upside down.
1300    bool readUpsideDown = flipY;
1301
1302    if (unpremul && kRGBA_8888_GrPixelConfig != config && kBGRA_8888_GrPixelConfig != config) {
1303        // The unpremul flag is only allowed for these two configs.
1304        return false;
1305    }
1306
1307    GrPixelConfig readConfig;
1308    if (swapRAndB) {
1309        readConfig = GrPixelConfigSwapRAndB(config);
1310        GrAssert(kUnknown_GrPixelConfig != config);
1311    } else {
1312        readConfig = config;
1313    }
1314
1315    // If the src is a texture and we would have to do conversions after read pixels, we instead
1316    // do the conversions by drawing the src to a scratch texture. If we handle any of the
1317    // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1318    // on the read back pixels.
1319    GrTexture* src = target->asTexture();
1320    GrAutoScratchTexture ast;
1321    if (NULL != src && (swapRAndB || unpremul || flipY)) {
1322        // Make the scratch a render target because we don't have a robust readTexturePixels as of
1323        // yet. It calls this function.
1324        GrTextureDesc desc;
1325        desc.fFlags = kRenderTarget_GrTextureFlagBit;
1326        desc.fWidth = width;
1327        desc.fHeight = height;
1328        desc.fConfig = readConfig;
1329
1330        // When a full readback is faster than a partial we could always make the scratch exactly
1331        // match the passed rect. However, if we see many different size rectangles we will trash
1332        // our texture cache and pay the cost of creating and destroying many textures. So, we only
1333        // request an exact match when the caller is reading an entire RT.
1334        ScratchTexMatch match = kApprox_ScratchTexMatch;
1335        if (0 == left &&
1336            0 == top &&
1337            target->width() == width &&
1338            target->height() == height &&
1339            fGpu->fullReadPixelsIsFasterThanPartial()) {
1340            match = kExact_ScratchTexMatch;
1341        }
1342        ast.set(this, desc, match);
1343        GrTexture* texture = ast.texture();
1344        if (texture) {
1345            SkAutoTUnref<GrCustomStage> stage;
1346            if (unpremul) {
1347                stage.reset(this->createPMToUPMEffect(src, swapRAndB));
1348            }
1349            // If we failed to create a PM->UPM effect and have no other conversions to perform then
1350            // there is no longer any point to using the scratch.
1351            if (NULL != stage || flipY || swapRAndB) {
1352                if (NULL == stage) {
1353                    stage.reset(GrConfigConversionEffect::Create(src, swapRAndB));
1354                    GrAssert(NULL != stage);
1355                } else {
1356                    unpremul = false; // we will handle the UPM conversion in the draw
1357                }
1358                swapRAndB = false; // we will handle the swap in the draw.
1359
1360                GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1361                GrDrawState* drawState = fGpu->drawState();
1362                drawState->setRenderTarget(texture->asRenderTarget());
1363                GrMatrix matrix;
1364                if (flipY) {
1365                    matrix.setTranslate(SK_Scalar1 * left,
1366                                        SK_Scalar1 * (top + height));
1367                    matrix.set(GrMatrix::kMScaleY, -GR_Scalar1);
1368                    flipY = false; // the y flip will be handled in the draw
1369                } else {
1370                    matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1371                }
1372                matrix.postIDiv(src->width(), src->height());
1373                drawState->sampler(0)->reset(matrix);
1374                drawState->sampler(0)->setCustomStage(stage);
1375                GrRect rect = GrRect::MakeWH(GrIntToScalar(width), GrIntToScalar(height));
1376                fGpu->drawSimpleRect(rect, NULL);
1377                // we want to read back from the scratch's origin
1378                left = 0;
1379                top = 0;
1380                target = texture->asRenderTarget();
1381            }
1382        }
1383    }
1384    if (!fGpu->readPixels(target,
1385                          left, top, width, height,
1386                          readConfig, buffer, rowBytes, readUpsideDown)) {
1387        return false;
1388    }
1389    // Perform any conversions we weren't able to perfom using a scratch texture.
1390    if (unpremul || swapRAndB || flipY) {
1391        // These are initialized to suppress a warning
1392        SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888;
1393        SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888;
1394
1395        bool c8888IsValid = grconfig_to_config8888(config, false, &srcC8888);
1396        grconfig_to_config8888(config, unpremul, &dstC8888);
1397
1398        if (swapRAndB) {
1399            GrAssert(c8888IsValid); // we should only do r/b swap on 8888 configs
1400            srcC8888 = swap_config8888_red_and_blue(srcC8888);
1401        }
1402        if (flipY) {
1403            size_t tightRB = width * GrBytesPerPixel(config);
1404            if (0 == rowBytes) {
1405                rowBytes = tightRB;
1406            }
1407            SkAutoSTMalloc<256, uint8_t> tempRow(tightRB);
1408            intptr_t top = reinterpret_cast<intptr_t>(buffer);
1409            intptr_t bot = top + (height - 1) * rowBytes;
1410            while (top < bot) {
1411                uint32_t* t = reinterpret_cast<uint32_t*>(top);
1412                uint32_t* b = reinterpret_cast<uint32_t*>(bot);
1413                uint32_t* temp = reinterpret_cast<uint32_t*>(tempRow.get());
1414                memcpy(temp, t, tightRB);
1415                if (c8888IsValid) {
1416                    SkConvertConfig8888Pixels(t, tightRB, dstC8888,
1417                                              b, tightRB, srcC8888,
1418                                              width, 1);
1419                    SkConvertConfig8888Pixels(b, tightRB, dstC8888,
1420                                              temp, tightRB, srcC8888,
1421                                              width, 1);
1422                } else {
1423                    memcpy(t, b, tightRB);
1424                    memcpy(b, temp, tightRB);
1425                }
1426                top += rowBytes;
1427                bot -= rowBytes;
1428            }
1429            // The above loop does nothing on the middle row when height is odd.
1430            if (top == bot && c8888IsValid && dstC8888 != srcC8888) {
1431                uint32_t* mid = reinterpret_cast<uint32_t*>(top);
1432                SkConvertConfig8888Pixels(mid, tightRB, dstC8888, mid, tightRB, srcC8888, width, 1);
1433            }
1434        } else {
1435            // if we aren't flipping Y then we have no reason to be here other than doing
1436            // conversions for 8888 (r/b swap or upm).
1437            GrAssert(c8888IsValid);
1438            uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer);
1439            SkConvertConfig8888Pixels(b32, rowBytes, dstC8888,
1440                                      b32, rowBytes, srcC8888,
1441                                      width, height);
1442        }
1443    }
1444    return true;
1445}
1446
1447void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1448    GrAssert(target);
1449    ASSERT_OWNED_RESOURCE(target);
1450    // In the future we may track whether there are any pending draws to this
1451    // target. We don't today so we always perform a flush. We don't promise
1452    // this to our clients, though.
1453    this->flush();
1454    fGpu->resolveRenderTarget(target);
1455}
1456
1457void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) {
1458    if (NULL == src || NULL == dst) {
1459        return;
1460    }
1461    ASSERT_OWNED_RESOURCE(src);
1462
1463    // Writes pending to the source texture are not tracked, so a flush
1464    // is required to ensure that the copy captures the most recent contents
1465    // of the source texture. See similar behaviour in
1466    // GrContext::resolveRenderTarget.
1467    this->flush();
1468
1469    GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1470    GrDrawState* drawState = fGpu->drawState();
1471    drawState->setRenderTarget(dst);
1472    GrMatrix sampleM;
1473    sampleM.setIDiv(src->width(), src->height());
1474    drawState->sampler(0)->reset(sampleM);
1475    drawState->createTextureEffect(0, src);
1476    SkRect rect = SkRect::MakeXYWH(0, 0,
1477                                   SK_Scalar1 * src->width(),
1478                                   SK_Scalar1 * src->height());
1479    fGpu->drawSimpleRect(rect, NULL);
1480}
1481
1482void GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1483                                        int left, int top, int width, int height,
1484                                        GrPixelConfig config,
1485                                        const void* buffer,
1486                                        size_t rowBytes,
1487                                        uint32_t flags) {
1488    SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels");
1489    ASSERT_OWNED_RESOURCE(target);
1490
1491    if (NULL == target) {
1492        target = fDrawState->getRenderTarget();
1493        if (NULL == target) {
1494            return;
1495        }
1496    }
1497
1498    // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1499    // desktop GL).
1500
1501    // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1502    // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1503    // set.)
1504
1505    // If the RT is also a texture and we don't have to premultiply then take the texture path.
1506    // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1507    // we do below.
1508
1509#if !GR_MAC_BUILD
1510    // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1511    // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1512    // HW is affected.
1513    if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags)) {
1514        this->writeTexturePixels(target->asTexture(),
1515                                 left, top, width, height,
1516                                 config, buffer, rowBytes, flags);
1517        return;
1518    }
1519#endif
1520    SkAutoTUnref<GrCustomStage> stage;
1521    bool swapRAndB = (fGpu->preferredReadPixelsConfig(config) == GrPixelConfigSwapRAndB(config));
1522
1523    GrPixelConfig textureConfig;
1524    if (swapRAndB) {
1525        textureConfig = GrPixelConfigSwapRAndB(config);
1526    } else {
1527        textureConfig = config;
1528    }
1529
1530    GrTextureDesc desc;
1531    desc.fWidth = width;
1532    desc.fHeight = height;
1533    desc.fConfig = textureConfig;
1534    GrAutoScratchTexture ast(this, desc);
1535    GrTexture* texture = ast.texture();
1536    if (NULL == texture) {
1537        return;
1538    }
1539    // allocate a tmp buffer and sw convert the pixels to premul
1540    SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1541
1542    if (kUnpremul_PixelOpsFlag & flags) {
1543        if (kRGBA_8888_GrPixelConfig != config && kBGRA_8888_GrPixelConfig != config) {
1544            return;
1545        }
1546        stage.reset(this->createUPMToPMEffect(texture, swapRAndB));
1547        if (NULL == stage) {
1548            SkCanvas::Config8888 srcConfig8888, dstConfig8888;
1549            GR_DEBUGCODE(bool success = )
1550            grconfig_to_config8888(config, true, &srcConfig8888);
1551            GrAssert(success);
1552            GR_DEBUGCODE(success = )
1553            grconfig_to_config8888(config, false, &dstConfig8888);
1554            GrAssert(success);
1555            const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer);
1556            tmpPixels.reset(width * height);
1557            SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888,
1558                                      src, rowBytes, srcConfig8888,
1559                                      width, height);
1560            buffer = tmpPixels.get();
1561            rowBytes = 4 * width;
1562        }
1563    }
1564    if (NULL == stage) {
1565        stage.reset(GrConfigConversionEffect::Create(texture, swapRAndB));
1566        GrAssert(NULL != stage);
1567    }
1568
1569    this->writeTexturePixels(texture,
1570                             0, 0, width, height,
1571                             textureConfig, buffer, rowBytes,
1572                             flags & ~kUnpremul_PixelOpsFlag);
1573
1574    GrDrawTarget::AutoStateRestore  asr(fGpu, GrDrawTarget::kReset_ASRInit);
1575    GrDrawState* drawState = fGpu->drawState();
1576
1577    GrMatrix matrix;
1578    matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top));
1579    drawState->setViewMatrix(matrix);
1580    drawState->setRenderTarget(target);
1581
1582    matrix.setIDiv(texture->width(), texture->height());
1583    drawState->sampler(0)->reset(matrix);
1584    drawState->sampler(0)->setCustomStage(stage);
1585
1586    fGpu->drawSimpleRect(GrRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
1587}
1588////////////////////////////////////////////////////////////////////////////////
1589
1590void GrContext::setPaint(const GrPaint& paint) {
1591    GrAssert(fDrawState->stagesDisabled());
1592
1593    for (int i = 0; i < GrPaint::kMaxTextures; ++i) {
1594        int s = i + GrPaint::kFirstTextureStage;
1595        if (paint.isTextureStageEnabled(i)) {
1596            *fDrawState->sampler(s) = paint.getTextureSampler(i);
1597        }
1598    }
1599
1600    fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage);
1601
1602    for (int i = 0; i < GrPaint::kMaxMasks; ++i) {
1603        int s = i + GrPaint::kFirstMaskStage;
1604        if (paint.isMaskStageEnabled(i)) {
1605            *fDrawState->sampler(s) = paint.getMaskSampler(i);
1606        }
1607    }
1608
1609    // disable all stages not accessible via the paint
1610    for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) {
1611        fDrawState->disableStage(s);
1612    }
1613
1614    fDrawState->setColor(paint.fColor);
1615
1616    if (paint.fDither) {
1617        fDrawState->enableState(GrDrawState::kDither_StateBit);
1618    } else {
1619        fDrawState->disableState(GrDrawState::kDither_StateBit);
1620    }
1621    if (paint.fAntiAlias) {
1622        fDrawState->enableState(GrDrawState::kHWAntialias_StateBit);
1623    } else {
1624        fDrawState->disableState(GrDrawState::kHWAntialias_StateBit);
1625    }
1626    if (paint.fColorMatrixEnabled) {
1627        fDrawState->enableState(GrDrawState::kColorMatrix_StateBit);
1628        fDrawState->setColorMatrix(paint.fColorMatrix);
1629    } else {
1630        fDrawState->disableState(GrDrawState::kColorMatrix_StateBit);
1631    }
1632    fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff);
1633    fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode);
1634    fDrawState->setCoverage(paint.fCoverage);
1635#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1636    if ((paint.hasMask() || 0xff != paint.fCoverage) &&
1637        !fGpu->canApplyCoverage()) {
1638        GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1639    }
1640#endif
1641}
1642
1643GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, BufferedDraw buffered) {
1644    if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1645        this->flushDrawBuffer();
1646        fLastDrawWasBuffered = kNo_BufferedDraw;
1647    }
1648    if (NULL != paint) {
1649        this->setPaint(*paint);
1650    }
1651    if (kYes_BufferedDraw == buffered) {
1652        fDrawBuffer->setClip(fGpu->getClip());
1653        fLastDrawWasBuffered = kYes_BufferedDraw;
1654        return fDrawBuffer;
1655    } else {
1656        GrAssert(kNo_BufferedDraw == buffered);
1657        return fGpu;
1658    }
1659}
1660
1661/*
1662 * This method finds a path renderer that can draw the specified path on
1663 * the provided target.
1664 * Due to its expense, the software path renderer has split out so it can
1665 * can be individually allowed/disallowed via the "allowSW" boolean.
1666 */
1667GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1668                                           GrPathFill fill,
1669                                           const GrDrawTarget* target,
1670                                           bool antiAlias,
1671                                           bool allowSW) {
1672    if (NULL == fPathRendererChain) {
1673        fPathRendererChain =
1674            SkNEW_ARGS(GrPathRendererChain,
1675                       (this, GrPathRendererChain::kNone_UsageFlag));
1676    }
1677
1678    GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, fill,
1679                                                             target,
1680                                                             antiAlias);
1681
1682    if (NULL == pr && allowSW) {
1683        if (NULL == fSoftwarePathRenderer) {
1684            fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1685        }
1686
1687        pr = fSoftwarePathRenderer;
1688    }
1689
1690    return pr;
1691}
1692
1693////////////////////////////////////////////////////////////////////////////////
1694
1695void GrContext::setRenderTarget(GrRenderTarget* target) {
1696    ASSERT_OWNED_RESOURCE(target);
1697    fDrawState->setRenderTarget(target);
1698}
1699
1700GrRenderTarget* GrContext::getRenderTarget() {
1701    return fDrawState->getRenderTarget();
1702}
1703
1704const GrRenderTarget* GrContext::getRenderTarget() const {
1705    return fDrawState->getRenderTarget();
1706}
1707
1708bool GrContext::isConfigRenderable(GrPixelConfig config) const {
1709    return fGpu->isConfigRenderable(config);
1710}
1711
1712const GrMatrix& GrContext::getMatrix() const {
1713    return fDrawState->getViewMatrix();
1714}
1715
1716void GrContext::setMatrix(const GrMatrix& m) {
1717    fDrawState->setViewMatrix(m);
1718}
1719
1720void GrContext::concatMatrix(const GrMatrix& m) const {
1721    fDrawState->preConcatViewMatrix(m);
1722}
1723
1724static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) {
1725    intptr_t mask = 1 << shift;
1726    if (pred) {
1727        bits |= mask;
1728    } else {
1729        bits &= ~mask;
1730    }
1731    return bits;
1732}
1733
1734GrContext::GrContext(GrGpu* gpu) {
1735    ++THREAD_INSTANCE_COUNT;
1736
1737    fGpu = gpu;
1738    fGpu->ref();
1739    fGpu->setContext(this);
1740
1741    fDrawState = SkNEW(GrDrawState);
1742    fGpu->setDrawState(fDrawState);
1743
1744    fPathRendererChain = NULL;
1745    fSoftwarePathRenderer = NULL;
1746
1747    fTextureCache = SkNEW_ARGS(GrResourceCache,
1748                               (MAX_TEXTURE_CACHE_COUNT,
1749                                MAX_TEXTURE_CACHE_BYTES));
1750    fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
1751
1752    fLastDrawWasBuffered = kNo_BufferedDraw;
1753
1754    fDrawBuffer = NULL;
1755    fDrawBufferVBAllocPool = NULL;
1756    fDrawBufferIBAllocPool = NULL;
1757
1758    fAARectRenderer = SkNEW(GrAARectRenderer);
1759
1760    fDidTestPMConversions = false;
1761
1762    this->setupDrawBuffer();
1763}
1764
1765void GrContext::setupDrawBuffer() {
1766
1767    GrAssert(NULL == fDrawBuffer);
1768    GrAssert(NULL == fDrawBufferVBAllocPool);
1769    GrAssert(NULL == fDrawBufferIBAllocPool);
1770
1771    fDrawBufferVBAllocPool =
1772        SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1773                                    DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1774                                    DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1775    fDrawBufferIBAllocPool =
1776        SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1777                                   DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1778                                   DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1779
1780    fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1781                                          fDrawBufferVBAllocPool,
1782                                          fDrawBufferIBAllocPool));
1783
1784    fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer());
1785    if (fDrawBuffer) {
1786        fDrawBuffer->setAutoFlushTarget(fGpu);
1787        fDrawBuffer->setDrawState(fDrawState);
1788    }
1789}
1790
1791GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) {
1792    return prepareToDraw(&paint, DEFAULT_BUFFERING);
1793}
1794
1795const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1796    return fGpu->getQuadIndexBuffer();
1797}
1798
1799namespace {
1800void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1801    GrConfigConversionEffect::PMConversion pmToUPM;
1802    GrConfigConversionEffect::PMConversion upmToPM;
1803    GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1804    *pmToUPMValue = pmToUPM;
1805    *upmToPMValue = upmToPM;
1806}
1807}
1808
1809GrCustomStage* GrContext::createPMToUPMEffect(GrTexture* texture, bool swapRAndB) {
1810    if (!fDidTestPMConversions) {
1811        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1812        fDidTestPMConversions = true;
1813    }
1814    GrConfigConversionEffect::PMConversion pmToUPM =
1815        static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1816    if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1817        return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM);
1818    } else {
1819        return NULL;
1820    }
1821}
1822
1823GrCustomStage* GrContext::createUPMToPMEffect(GrTexture* texture, bool swapRAndB) {
1824    if (!fDidTestPMConversions) {
1825        test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1826        fDidTestPMConversions = true;
1827    }
1828    GrConfigConversionEffect::PMConversion upmToPM =
1829        static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1830    if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1831        return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM);
1832    } else {
1833        return NULL;
1834    }
1835}
1836
1837GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture,
1838                                   bool canClobberSrc,
1839                                   const SkRect& rect,
1840                                   float sigmaX, float sigmaY) {
1841    ASSERT_OWNED_RESOURCE(srcTexture);
1842    GrRenderTarget* oldRenderTarget = this->getRenderTarget();
1843    AutoMatrix avm(this, GrMatrix::I());
1844    SkIRect clearRect;
1845    int scaleFactorX, radiusX;
1846    int scaleFactorY, radiusY;
1847    sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX);
1848    sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY);
1849
1850    SkRect srcRect(rect);
1851    scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY);
1852    srcRect.roundOut();
1853    scale_rect(&srcRect, static_cast<float>(scaleFactorX),
1854                         static_cast<float>(scaleFactorY));
1855
1856    AutoClip acs(this, srcRect);
1857
1858    GrAssert(kBGRA_8888_GrPixelConfig == srcTexture->config() ||
1859             kRGBA_8888_GrPixelConfig == srcTexture->config() ||
1860             kAlpha_8_GrPixelConfig == srcTexture->config());
1861
1862    GrTextureDesc desc;
1863    desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit;
1864    desc.fWidth = SkScalarFloorToInt(srcRect.width());
1865    desc.fHeight = SkScalarFloorToInt(srcRect.height());
1866    desc.fConfig = srcTexture->config();
1867
1868    GrAutoScratchTexture temp1, temp2;
1869    GrTexture* dstTexture = temp1.set(this, desc);
1870    GrTexture* tempTexture = canClobberSrc ? srcTexture : temp2.set(this, desc);
1871
1872    GrPaint paint;
1873    paint.reset();
1874
1875    for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) {
1876        paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(),
1877                                                   srcTexture->height());
1878        this->setRenderTarget(dstTexture->asRenderTarget());
1879        SkRect dstRect(srcRect);
1880        scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f,
1881                            i < scaleFactorY ? 0.5f : 1.0f);
1882        paint.textureSampler(0)->setCustomStage(SkNEW_ARGS(GrSingleTextureEffect,
1883                                                           (srcTexture, true)))->unref();
1884        this->drawRectToRect(paint, dstRect, srcRect);
1885        srcRect = dstRect;
1886        srcTexture = dstTexture;
1887        SkTSwap(dstTexture, tempTexture);
1888    }
1889
1890    SkIRect srcIRect;
1891    srcRect.roundOut(&srcIRect);
1892
1893    if (sigmaX > 0.0f) {
1894        if (scaleFactorX > 1) {
1895            // Clear out a radius to the right of the srcRect to prevent the
1896            // X convolution from reading garbage.
1897            clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop,
1898                                          radiusX, srcIRect.height());
1899            this->clear(&clearRect, 0x0);
1900        }
1901
1902        this->setRenderTarget(dstTexture->asRenderTarget());
1903        GrDrawTarget* target = this->prepareToDraw(NULL, DEFAULT_BUFFERING);
1904        convolve_gaussian(target, srcTexture, srcRect, sigmaX, radiusX,
1905                          Gr1DKernelEffect::kX_Direction);
1906        srcTexture = dstTexture;
1907        SkTSwap(dstTexture, tempTexture);
1908    }
1909
1910    if (sigmaY > 0.0f) {
1911        if (scaleFactorY > 1 || sigmaX > 0.0f) {
1912            // Clear out a radius below the srcRect to prevent the Y
1913            // convolution from reading garbage.
1914            clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom,
1915                                          srcIRect.width(), radiusY);
1916            this->clear(&clearRect, 0x0);
1917        }
1918
1919        this->setRenderTarget(dstTexture->asRenderTarget());
1920        GrDrawTarget* target = this->prepareToDraw(NULL, DEFAULT_BUFFERING);
1921        convolve_gaussian(target, srcTexture, srcRect, sigmaY, radiusY,
1922                          Gr1DKernelEffect::kY_Direction);
1923        srcTexture = dstTexture;
1924        SkTSwap(dstTexture, tempTexture);
1925    }
1926
1927    if (scaleFactorX > 1 || scaleFactorY > 1) {
1928        // Clear one pixel to the right and below, to accommodate bilinear
1929        // upsampling.
1930        clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom,
1931                                      srcIRect.width() + 1, 1);
1932        this->clear(&clearRect, 0x0);
1933        clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop,
1934                                      1, srcIRect.height());
1935        this->clear(&clearRect, 0x0);
1936        // FIXME:  This should be mitchell, not bilinear.
1937        paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(),
1938                                                   srcTexture->height());
1939        this->setRenderTarget(dstTexture->asRenderTarget());
1940        paint.textureSampler(0)->setCustomStage(SkNEW_ARGS(GrSingleTextureEffect,
1941                                                           (srcTexture, true)))->unref();
1942        SkRect dstRect(srcRect);
1943        scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY);
1944        this->drawRectToRect(paint, dstRect, srcRect);
1945        srcRect = dstRect;
1946        srcTexture = dstTexture;
1947        SkTSwap(dstTexture, tempTexture);
1948    }
1949    this->setRenderTarget(oldRenderTarget);
1950    if (srcTexture == temp1.texture()) {
1951        return temp1.detach();
1952    } else if (srcTexture == temp2.texture()) {
1953        return temp2.detach();
1954    } else {
1955        srcTexture->ref();
1956        return srcTexture;
1957    }
1958}
1959
1960///////////////////////////////////////////////////////////////////////////////
1961#if GR_CACHE_STATS
1962void GrContext::printCacheStats() const {
1963    fTextureCache->printStats();
1964}
1965#endif
1966