GrContext.cpp revision 137f1347abaf0bb6a945e91c2f6cb49f0ee69bc3
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "effects/GrConvolutionEffect.h" 13#include "effects/GrSingleTextureEffect.h" 14#include "effects/GrConfigConversionEffect.h" 15 16#include "GrBufferAllocPool.h" 17#include "GrGpu.h" 18#include "GrDrawTargetCaps.h" 19#include "GrIndexBuffer.h" 20#include "GrInOrderDrawBuffer.h" 21#include "GrOvalRenderer.h" 22#include "GrPathRenderer.h" 23#include "GrPathUtils.h" 24#include "GrResourceCache.h" 25#include "GrSoftwarePathRenderer.h" 26#include "GrStencilBuffer.h" 27#include "GrTextStrike.h" 28#include "SkRTConf.h" 29#include "SkStrokeRec.h" 30#include "SkTLazy.h" 31#include "SkTLS.h" 32#include "SkTrace.h" 33 34SK_DEFINE_INST_COUNT(GrContext) 35SK_DEFINE_INST_COUNT(GrDrawState) 36 37// It can be useful to set this to false to test whether a bug is caused by using the 38// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make 39// debugging simpler. 40SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true, 41 "Defers rendering in GrContext via GrInOrderDrawBuffer."); 42 43#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw) 44 45#define MAX_BLUR_SIGMA 4.0f 46 47// When we're using coverage AA but the blend is incompatible (given gpu 48// limitations) should we disable AA or draw wrong? 49#define DISABLE_COVERAGE_AA_FOR_BLEND 1 50 51#if GR_DEBUG 52 // change this to a 1 to see notifications when partial coverage fails 53 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 54#else 55 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 56#endif 57 58static const size_t MAX_TEXTURE_CACHE_COUNT = 2048; 59static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024; 60 61static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 62static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 63 64static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; 65static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; 66 67#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 68 69GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { 70 GrContext* context = SkNEW(GrContext); 71 if (context->init(backend, backendContext)) { 72 return context; 73 } else { 74 context->unref(); 75 return NULL; 76 } 77} 78 79namespace { 80void* CreateThreadInstanceCount() { 81 return SkNEW_ARGS(int, (0)); 82} 83void DeleteThreadInstanceCount(void* v) { 84 delete reinterpret_cast<int*>(v); 85} 86#define THREAD_INSTANCE_COUNT \ 87 (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, DeleteThreadInstanceCount))) 88} 89 90GrContext::GrContext() { 91 ++THREAD_INSTANCE_COUNT; 92 fDrawState = NULL; 93 fGpu = NULL; 94 fClip = NULL; 95 fPathRendererChain = NULL; 96 fSoftwarePathRenderer = NULL; 97 fTextureCache = NULL; 98 fFontCache = NULL; 99 fDrawBuffer = NULL; 100 fDrawBufferVBAllocPool = NULL; 101 fDrawBufferIBAllocPool = NULL; 102 fAARectRenderer = NULL; 103 fOvalRenderer = NULL; 104 fViewMatrix.reset(); 105} 106 107bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { 108 GrAssert(NULL == fGpu); 109 110 fGpu = GrGpu::Create(backend, backendContext, this); 111 if (NULL == fGpu) { 112 return false; 113 } 114 115 fDrawState = SkNEW(GrDrawState); 116 fGpu->setDrawState(fDrawState); 117 118 119 fTextureCache = SkNEW_ARGS(GrResourceCache, 120 (MAX_TEXTURE_CACHE_COUNT, 121 MAX_TEXTURE_CACHE_BYTES)); 122 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); 123 124 fLastDrawWasBuffered = kNo_BufferedDraw; 125 126 fAARectRenderer = SkNEW(GrAARectRenderer); 127 fOvalRenderer = SkNEW(GrOvalRenderer); 128 129 fDidTestPMConversions = false; 130 131 this->setupDrawBuffer(); 132 133 return true; 134} 135 136int GrContext::GetThreadInstanceCount() { 137 return THREAD_INSTANCE_COUNT; 138} 139 140GrContext::~GrContext() { 141 for (int i = 0; i < fCleanUpData.count(); ++i) { 142 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 143 } 144 145 if (NULL == fGpu) { 146 return; 147 } 148 149 this->flush(); 150 151 // Since the gpu can hold scratch textures, give it a chance to let go 152 // of them before freeing the texture cache 153 fGpu->purgeResources(); 154 155 delete fTextureCache; 156 fTextureCache = NULL; 157 delete fFontCache; 158 delete fDrawBuffer; 159 delete fDrawBufferVBAllocPool; 160 delete fDrawBufferIBAllocPool; 161 162 fAARectRenderer->unref(); 163 fOvalRenderer->unref(); 164 165 fGpu->unref(); 166 GrSafeUnref(fPathRendererChain); 167 GrSafeUnref(fSoftwarePathRenderer); 168 fDrawState->unref(); 169 170 --THREAD_INSTANCE_COUNT; 171} 172 173void GrContext::contextLost() { 174 this->contextDestroyed(); 175 this->setupDrawBuffer(); 176} 177 178void GrContext::contextDestroyed() { 179 // abandon first to so destructors 180 // don't try to free the resources in the API. 181 fGpu->abandonResources(); 182 183 // a path renderer may be holding onto resources that 184 // are now unusable 185 GrSafeSetNull(fPathRendererChain); 186 GrSafeSetNull(fSoftwarePathRenderer); 187 188 delete fDrawBuffer; 189 fDrawBuffer = NULL; 190 191 delete fDrawBufferVBAllocPool; 192 fDrawBufferVBAllocPool = NULL; 193 194 delete fDrawBufferIBAllocPool; 195 fDrawBufferIBAllocPool = NULL; 196 197 fAARectRenderer->reset(); 198 199 fTextureCache->purgeAllUnlocked(); 200 fFontCache->freeAll(); 201 fGpu->markContextDirty(); 202} 203 204void GrContext::resetContext() { 205 fGpu->markContextDirty(); 206} 207 208void GrContext::freeGpuResources() { 209 this->flush(); 210 211 fGpu->purgeResources(); 212 213 fAARectRenderer->reset(); 214 215 fTextureCache->purgeAllUnlocked(); 216 fFontCache->freeAll(); 217 // a path renderer may be holding onto resources 218 GrSafeSetNull(fPathRendererChain); 219 GrSafeSetNull(fSoftwarePathRenderer); 220} 221 222size_t GrContext::getGpuTextureCacheBytes() const { 223 return fTextureCache->getCachedResourceBytes(); 224} 225 226//////////////////////////////////////////////////////////////////////////////// 227 228namespace { 229 230void scale_rect(SkRect* rect, float xScale, float yScale) { 231 rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale)); 232 rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale)); 233 rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale)); 234 rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale)); 235} 236 237float adjust_sigma(float sigma, int *scaleFactor, int *radius) { 238 *scaleFactor = 1; 239 while (sigma > MAX_BLUR_SIGMA) { 240 *scaleFactor *= 2; 241 sigma *= 0.5f; 242 } 243 *radius = static_cast<int>(ceilf(sigma * 3.0f)); 244 GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius); 245 return sigma; 246} 247 248void convolve_gaussian(GrDrawTarget* target, 249 GrTexture* texture, 250 const SkRect& rect, 251 float sigma, 252 int radius, 253 Gr1DKernelEffect::Direction direction) { 254 GrRenderTarget* rt = target->drawState()->getRenderTarget(); 255 GrDrawTarget::AutoStateRestore asr(target, GrDrawTarget::kReset_ASRInit); 256 GrDrawState* drawState = target->drawState(); 257 drawState->setRenderTarget(rt); 258 SkAutoTUnref<GrEffectRef> conv(GrConvolutionEffect::CreateGaussian(texture, 259 direction, 260 radius, 261 sigma)); 262 drawState->setEffect(0, conv); 263 target->drawSimpleRect(rect, NULL); 264} 265 266} 267 268//////////////////////////////////////////////////////////////////////////////// 269 270GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc, 271 const GrCacheID& cacheID, 272 const GrTextureParams* params) { 273 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 274 GrResource* resource = fTextureCache->find(resourceKey); 275 SkSafeRef(resource); 276 return static_cast<GrTexture*>(resource); 277} 278 279bool GrContext::isTextureInCache(const GrTextureDesc& desc, 280 const GrCacheID& cacheID, 281 const GrTextureParams* params) const { 282 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 283 return fTextureCache->hasKey(resourceKey); 284} 285 286void GrContext::addStencilBuffer(GrStencilBuffer* sb) { 287 ASSERT_OWNED_RESOURCE(sb); 288 289 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(), 290 sb->height(), 291 sb->numSamples()); 292 fTextureCache->addResource(resourceKey, sb); 293} 294 295GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 296 int sampleCnt) { 297 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width, 298 height, 299 sampleCnt); 300 GrResource* resource = fTextureCache->find(resourceKey); 301 return static_cast<GrStencilBuffer*>(resource); 302} 303 304static void stretchImage(void* dst, 305 int dstW, 306 int dstH, 307 void* src, 308 int srcW, 309 int srcH, 310 int bpp) { 311 GrFixed dx = (srcW << 16) / dstW; 312 GrFixed dy = (srcH << 16) / dstH; 313 314 GrFixed y = dy >> 1; 315 316 int dstXLimit = dstW*bpp; 317 for (int j = 0; j < dstH; ++j) { 318 GrFixed x = dx >> 1; 319 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 320 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 321 for (int i = 0; i < dstXLimit; i += bpp) { 322 memcpy((uint8_t*) dstRow + i, 323 (uint8_t*) srcRow + (x>>16)*bpp, 324 bpp); 325 x += dx; 326 } 327 y += dy; 328 } 329} 330 331namespace { 332 333// position + local coordinate 334extern const GrVertexAttrib gVertexAttribs[] = { 335 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 336 {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding} 337}; 338 339}; 340 341// The desired texture is NPOT and tiled but that isn't supported by 342// the current hardware. Resize the texture to be a POT 343GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, 344 const GrCacheID& cacheID, 345 void* srcData, 346 size_t rowBytes, 347 bool needsFiltering) { 348 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL)); 349 if (NULL == clampedTexture) { 350 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes)); 351 352 if (NULL == clampedTexture) { 353 return NULL; 354 } 355 } 356 357 GrTextureDesc rtDesc = desc; 358 rtDesc.fFlags = rtDesc.fFlags | 359 kRenderTarget_GrTextureFlagBit | 360 kNoStencil_GrTextureFlagBit; 361 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 362 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 363 364 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 365 366 if (NULL != texture) { 367 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 368 GrDrawState* drawState = fGpu->drawState(); 369 drawState->setRenderTarget(texture->asRenderTarget()); 370 371 // if filtering is not desired then we want to ensure all 372 // texels in the resampled image are copies of texels from 373 // the original. 374 GrTextureParams params(SkShader::kClamp_TileMode, needsFiltering); 375 drawState->createTextureEffect(0, clampedTexture, SkMatrix::I(), params); 376 377 drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs)); 378 379 GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0); 380 381 if (arg.succeeded()) { 382 GrPoint* verts = (GrPoint*) arg.vertices(); 383 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint)); 384 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint)); 385 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 386 } 387 } else { 388 // TODO: Our CPU stretch doesn't filter. But we create separate 389 // stretched textures when the texture params is either filtered or 390 // not. Either implement filtered stretch blit on CPU or just create 391 // one when FBO case fails. 392 393 rtDesc.fFlags = kNone_GrTextureFlags; 394 // no longer need to clamp at min RT size. 395 rtDesc.fWidth = GrNextPow2(desc.fWidth); 396 rtDesc.fHeight = GrNextPow2(desc.fHeight); 397 int bpp = GrBytesPerPixel(desc.fConfig); 398 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight); 399 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 400 srcData, desc.fWidth, desc.fHeight, bpp); 401 402 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 403 404 SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(), 405 stretchedRowBytes); 406 GrAssert(NULL != texture); 407 } 408 409 return texture; 410} 411 412GrTexture* GrContext::createTexture(const GrTextureParams* params, 413 const GrTextureDesc& desc, 414 const GrCacheID& cacheID, 415 void* srcData, 416 size_t rowBytes) { 417 SK_TRACE_EVENT0("GrContext::createTexture"); 418 419 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 420 421 GrTexture* texture; 422 if (GrTexture::NeedsResizing(resourceKey)) { 423 texture = this->createResizedTexture(desc, cacheID, 424 srcData, rowBytes, 425 GrTexture::NeedsFiltering(resourceKey)); 426 } else { 427 texture= fGpu->createTexture(desc, srcData, rowBytes); 428 } 429 430 if (NULL != texture) { 431 fTextureCache->addResource(resourceKey, texture); 432 } 433 434 return texture; 435} 436 437GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) { 438 GrTextureDesc desc = inDesc; 439 440 GrAssert((desc.fFlags & kRenderTarget_GrTextureFlagBit) || 441 !(desc.fFlags & kNoStencil_GrTextureFlagBit)); 442 443 if (kApprox_ScratchTexMatch == match) { 444 // bin by pow2 with a reasonable min 445 static const int MIN_SIZE = 16; 446 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 447 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 448 } 449 450 // Renderable A8 targets are not universally supported (e.g., not on ANGLE) 451 GrAssert(this->isConfigRenderable(kAlpha_8_GrPixelConfig) || 452 !(desc.fFlags & kRenderTarget_GrTextureFlagBit) || 453 (desc.fConfig != kAlpha_8_GrPixelConfig)); 454 455 GrResource* resource = NULL; 456 int origWidth = desc.fWidth; 457 int origHeight = desc.fHeight; 458 459 do { 460 GrResourceKey key = GrTexture::ComputeScratchKey(desc); 461 // Ensure we have exclusive access to the texture so future 'find' calls don't return it 462 resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag); 463 if (NULL != resource) { 464 resource->ref(); 465 break; 466 } 467 if (kExact_ScratchTexMatch == match) { 468 break; 469 } 470 // We had a cache miss and we are in approx mode, relax the fit of the flags. 471 472 // We no longer try to reuse textures that were previously used as render targets in 473 // situations where no RT is needed; doing otherwise can confuse the video driver and 474 // cause significant performance problems in some cases. 475 if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 476 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 477 } else { 478 break; 479 } 480 481 } while (true); 482 483 if (NULL == resource) { 484 desc.fFlags = inDesc.fFlags; 485 desc.fWidth = origWidth; 486 desc.fHeight = origHeight; 487 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 488 if (NULL != texture) { 489 GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc()); 490 // Make the resource exclusive so future 'find' calls don't return it 491 fTextureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag); 492 resource = texture; 493 } 494 } 495 496 return static_cast<GrTexture*>(resource); 497} 498 499void GrContext::addExistingTextureToCache(GrTexture* texture) { 500 501 if (NULL == texture) { 502 return; 503 } 504 505 // This texture should already have a cache entry since it was once 506 // attached 507 GrAssert(NULL != texture->getCacheEntry()); 508 509 // Conceptually, the cache entry is going to assume responsibility 510 // for the creation ref. 511 GrAssert(1 == texture->getRefCnt()); 512 513 // Since this texture came from an AutoScratchTexture it should 514 // still be in the exclusive pile 515 fTextureCache->makeNonExclusive(texture->getCacheEntry()); 516 517 this->purgeCache(); 518} 519 520 521void GrContext::unlockScratchTexture(GrTexture* texture) { 522 ASSERT_OWNED_RESOURCE(texture); 523 GrAssert(NULL != texture->getCacheEntry()); 524 525 // If this is a scratch texture we detached it from the cache 526 // while it was locked (to avoid two callers simultaneously getting 527 // the same texture). 528 if (texture->getCacheEntry()->key().isScratch()) { 529 fTextureCache->makeNonExclusive(texture->getCacheEntry()); 530 } 531 532 this->purgeCache(); 533} 534 535void GrContext::purgeCache() { 536 if (NULL != fTextureCache) { 537 fTextureCache->purgeAsNeeded(); 538 } 539} 540 541GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, 542 void* srcData, 543 size_t rowBytes) { 544 GrTextureDesc descCopy = descIn; 545 return fGpu->createTexture(descCopy, srcData, rowBytes); 546} 547 548void GrContext::getTextureCacheLimits(int* maxTextures, 549 size_t* maxTextureBytes) const { 550 fTextureCache->getLimits(maxTextures, maxTextureBytes); 551} 552 553void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 554 fTextureCache->setLimits(maxTextures, maxTextureBytes); 555} 556 557int GrContext::getMaxTextureSize() const { 558 return fGpu->caps()->maxTextureSize(); 559} 560 561int GrContext::getMaxRenderTargetSize() const { 562 return fGpu->caps()->maxRenderTargetSize(); 563} 564 565int GrContext::getMaxSampleCount() const { 566 return fGpu->caps()->maxSampleCount(); 567} 568 569/////////////////////////////////////////////////////////////////////////////// 570 571GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) { 572 return fGpu->wrapBackendTexture(desc); 573} 574 575GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 576 return fGpu->wrapBackendRenderTarget(desc); 577} 578 579/////////////////////////////////////////////////////////////////////////////// 580 581bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params, 582 int width, int height) const { 583 const GrDrawTargetCaps* caps = fGpu->caps(); 584 if (!caps->eightBitPaletteSupport()) { 585 return false; 586 } 587 588 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 589 590 if (!isPow2) { 591 bool tiled = NULL != params && params->isTiled(); 592 if (tiled && !caps->npotTextureTileSupport()) { 593 return false; 594 } 595 } 596 return true; 597} 598 599 600//////////////////////////////////////////////////////////////////////////////// 601 602void GrContext::clear(const GrIRect* rect, 603 const GrColor color, 604 GrRenderTarget* target) { 605 this->prepareToDraw(NULL, BUFFERED_DRAW)->clear(rect, color, target); 606} 607 608void GrContext::drawPaint(const GrPaint& origPaint) { 609 // set rect to be big enough to fill the space, but not super-huge, so we 610 // don't overflow fixed-point implementations 611 GrRect r; 612 r.setLTRB(0, 0, 613 SkIntToScalar(getRenderTarget()->width()), 614 SkIntToScalar(getRenderTarget()->height())); 615 SkMatrix inverse; 616 SkTCopyOnFirstWrite<GrPaint> paint(origPaint); 617 AutoMatrix am; 618 619 // We attempt to map r by the inverse matrix and draw that. mapRect will 620 // map the four corners and bound them with a new rect. This will not 621 // produce a correct result for some perspective matrices. 622 if (!this->getMatrix().hasPerspective()) { 623 if (!fViewMatrix.invert(&inverse)) { 624 GrPrintf("Could not invert matrix\n"); 625 return; 626 } 627 inverse.mapRect(&r); 628 } else { 629 if (!am.setIdentity(this, paint.writable())) { 630 GrPrintf("Could not invert matrix\n"); 631 return; 632 } 633 } 634 // by definition this fills the entire clip, no need for AA 635 if (paint->isAntiAlias()) { 636 paint.writable()->setAntiAlias(false); 637 } 638 this->drawRect(*paint, r); 639} 640 641//////////////////////////////////////////////////////////////////////////////// 642 643namespace { 644inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 645 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 646} 647} 648 649//////////////////////////////////////////////////////////////////////////////// 650 651/* create a triangle strip that strokes the specified triangle. There are 8 652 unique vertices, but we repreat the last 2 to close up. Alternatively we 653 could use an indices array, and then only send 8 verts, but not sure that 654 would be faster. 655 */ 656static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 657 SkScalar width) { 658 const SkScalar rad = SkScalarHalf(width); 659 rect.sort(); 660 661 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 662 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 663 verts[2].set(rect.fRight - rad, rect.fTop + rad); 664 verts[3].set(rect.fRight + rad, rect.fTop - rad); 665 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 666 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 667 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 668 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 669 verts[8] = verts[0]; 670 verts[9] = verts[1]; 671} 672 673static bool isIRect(const GrRect& r) { 674 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) && 675 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom); 676} 677 678static bool apply_aa_to_rect(GrDrawTarget* target, 679 const GrRect& rect, 680 SkScalar strokeWidth, 681 const SkMatrix* matrix, 682 SkMatrix* combinedMatrix, 683 GrRect* devRect, 684 bool* useVertexCoverage) { 685 // we use a simple coverage ramp to do aa on axis-aligned rects 686 // we check if the rect will be axis-aligned, and the rect won't land on 687 // integer coords. 688 689 // we are keeping around the "tweak the alpha" trick because 690 // it is our only hope for the fixed-pipe implementation. 691 // In a shader implementation we can give a separate coverage input 692 // TODO: remove this ugliness when we drop the fixed-pipe impl 693 *useVertexCoverage = false; 694 if (!target->getDrawState().canTweakAlphaForCoverage()) { 695 if (disable_coverage_aa_for_blend(target)) { 696#if GR_DEBUG 697 //GrPrintf("Turning off AA to correctly apply blend.\n"); 698#endif 699 return false; 700 } else { 701 *useVertexCoverage = true; 702 } 703 } 704 const GrDrawState& drawState = target->getDrawState(); 705 if (drawState.getRenderTarget()->isMultisampled()) { 706 return false; 707 } 708 709 if (0 == strokeWidth && target->willUseHWAALines()) { 710 return false; 711 } 712 713#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 714 if (strokeWidth >= 0) { 715#endif 716 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 717 return false; 718 } 719 720 if (NULL != matrix && !matrix->preservesAxisAlignment()) { 721 return false; 722 } 723#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 724 } else { 725 if (!drawState.getViewMatrix().preservesAxisAlignment() && 726 !drawState.getViewMatrix().preservesRightAngles()) { 727 return false; 728 } 729 730 if (NULL != matrix && !matrix->preservesRightAngles()) { 731 return false; 732 } 733 } 734#endif 735 736 *combinedMatrix = drawState.getViewMatrix(); 737 if (NULL != matrix) { 738 combinedMatrix->preConcat(*matrix); 739 740#if GR_DEBUG 741#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 742 if (strokeWidth >= 0) { 743#endif 744 GrAssert(combinedMatrix->preservesAxisAlignment()); 745#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 746 } else { 747 GrAssert(combinedMatrix->preservesRightAngles()); 748 } 749#endif 750#endif 751 } 752 753 combinedMatrix->mapRect(devRect, rect); 754 755 if (strokeWidth < 0) { 756 return !isIRect(*devRect); 757 } else { 758 return true; 759 } 760} 761 762void GrContext::drawRect(const GrPaint& paint, 763 const GrRect& rect, 764 SkScalar width, 765 const SkMatrix* matrix) { 766 SK_TRACE_EVENT0("GrContext::drawRect"); 767 768 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW); 769 GrDrawState::AutoStageDisable atr(fDrawState); 770 771 GrRect devRect; 772 SkMatrix combinedMatrix; 773 bool useVertexCoverage; 774 bool needAA = paint.isAntiAlias() && 775 !target->getDrawState().getRenderTarget()->isMultisampled(); 776 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 777 &combinedMatrix, &devRect, 778 &useVertexCoverage); 779 if (doAA) { 780 GrDrawState::AutoViewMatrixRestore avmr; 781 if (!avmr.setIdentity(target->drawState())) { 782 return; 783 } 784 if (width >= 0) { 785 fAARectRenderer->strokeAARect(this->getGpu(), target, 786 rect, combinedMatrix, devRect, 787 width, useVertexCoverage); 788 } else { 789 // filled AA rect 790 fAARectRenderer->fillAARect(this->getGpu(), target, 791 rect, combinedMatrix, devRect, 792 useVertexCoverage); 793 } 794 return; 795 } 796 797 if (width >= 0) { 798 // TODO: consider making static vertex buffers for these cases. 799 // Hairline could be done by just adding closing vertex to 800 // unitSquareVertexBuffer() 801 802 static const int worstCaseVertCount = 10; 803 target->drawState()->setDefaultVertexAttribs(); 804 GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0); 805 806 if (!geo.succeeded()) { 807 GrPrintf("Failed to get space for vertices!\n"); 808 return; 809 } 810 811 GrPrimitiveType primType; 812 int vertCount; 813 GrPoint* vertex = geo.positions(); 814 815 if (width > 0) { 816 vertCount = 10; 817 primType = kTriangleStrip_GrPrimitiveType; 818 setStrokeRectStrip(vertex, rect, width); 819 } else { 820 // hairline 821 vertCount = 5; 822 primType = kLineStrip_GrPrimitiveType; 823 vertex[0].set(rect.fLeft, rect.fTop); 824 vertex[1].set(rect.fRight, rect.fTop); 825 vertex[2].set(rect.fRight, rect.fBottom); 826 vertex[3].set(rect.fLeft, rect.fBottom); 827 vertex[4].set(rect.fLeft, rect.fTop); 828 } 829 830 GrDrawState::AutoViewMatrixRestore avmr; 831 if (NULL != matrix) { 832 GrDrawState* drawState = target->drawState(); 833 avmr.set(drawState, *matrix); 834 } 835 836 target->drawNonIndexed(primType, 0, vertCount); 837 } else { 838 // filled BW rect 839 target->drawSimpleRect(rect, matrix); 840 } 841} 842 843void GrContext::drawRectToRect(const GrPaint& paint, 844 const GrRect& dstRect, 845 const GrRect& localRect, 846 const SkMatrix* dstMatrix, 847 const SkMatrix* localMatrix) { 848 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 849 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW); 850 GrDrawState::AutoStageDisable atr(fDrawState); 851 852 target->drawRect(dstRect, dstMatrix, &localRect, localMatrix); 853} 854 855namespace { 856 857extern const GrVertexAttrib gPosUVColorAttribs[] = { 858 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding }, 859 {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding }, 860 {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding} 861}; 862 863extern const GrVertexAttrib gPosColorAttribs[] = { 864 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 865 {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding}, 866}; 867 868static void set_vertex_attributes(GrDrawState* drawState, 869 const GrPoint* texCoords, 870 const GrColor* colors, 871 int* colorOffset, 872 int* texOffset) { 873 *texOffset = -1; 874 *colorOffset = -1; 875 876 if (NULL != texCoords && NULL != colors) { 877 *texOffset = sizeof(GrPoint); 878 *colorOffset = 2*sizeof(GrPoint); 879 drawState->setVertexAttribs<gPosUVColorAttribs>(3); 880 } else if (NULL != texCoords) { 881 *texOffset = sizeof(GrPoint); 882 drawState->setVertexAttribs<gPosUVColorAttribs>(2); 883 } else if (NULL != colors) { 884 *colorOffset = sizeof(GrPoint); 885 drawState->setVertexAttribs<gPosColorAttribs>(2); 886 } else { 887 drawState->setVertexAttribs<gPosColorAttribs>(1); 888 } 889} 890 891}; 892 893void GrContext::drawVertices(const GrPaint& paint, 894 GrPrimitiveType primitiveType, 895 int vertexCount, 896 const GrPoint positions[], 897 const GrPoint texCoords[], 898 const GrColor colors[], 899 const uint16_t indices[], 900 int indexCount) { 901 SK_TRACE_EVENT0("GrContext::drawVertices"); 902 903 GrDrawTarget::AutoReleaseGeometry geo; 904 905 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW); 906 GrDrawState::AutoStageDisable atr(fDrawState); 907 908 GrDrawState* drawState = target->drawState(); 909 910 int colorOffset = -1, texOffset = -1; 911 set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset); 912 913 size_t vertexSize = drawState->getVertexSize(); 914 if (sizeof(GrPoint) != vertexSize) { 915 if (!geo.set(target, vertexCount, 0)) { 916 GrPrintf("Failed to get space for vertices!\n"); 917 return; 918 } 919 void* curVertex = geo.vertices(); 920 921 for (int i = 0; i < vertexCount; ++i) { 922 *((GrPoint*)curVertex) = positions[i]; 923 924 if (texOffset >= 0) { 925 *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i]; 926 } 927 if (colorOffset >= 0) { 928 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 929 } 930 curVertex = (void*)((intptr_t)curVertex + vertexSize); 931 } 932 } else { 933 target->setVertexSourceToArray(positions, vertexCount); 934 } 935 936 // we don't currently apply offscreen AA to this path. Need improved 937 // management of GrDrawTarget's geometry to avoid copying points per-tile. 938 939 if (NULL != indices) { 940 target->setIndexSourceToArray(indices, indexCount); 941 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 942 target->resetIndexSource(); 943 } else { 944 target->drawNonIndexed(primitiveType, 0, vertexCount); 945 } 946} 947 948/////////////////////////////////////////////////////////////////////////////// 949 950void GrContext::drawRRect(const GrPaint& paint, 951 const SkRRect& rect, 952 const SkStrokeRec& stroke) { 953 954 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW); 955 GrDrawState::AutoStageDisable atr(fDrawState); 956 957 bool useAA = paint.isAntiAlias() && 958 !target->getDrawState().getRenderTarget()->isMultisampled() && 959 !disable_coverage_aa_for_blend(target); 960 961 if (!fOvalRenderer->drawSimpleRRect(target, this, useAA, rect, stroke)) { 962 SkPath path; 963 path.addRRect(rect); 964 this->internalDrawPath(target, useAA, path, stroke); 965 } 966} 967 968/////////////////////////////////////////////////////////////////////////////// 969 970void GrContext::drawOval(const GrPaint& paint, 971 const GrRect& oval, 972 const SkStrokeRec& stroke) { 973 974 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW); 975 GrDrawState::AutoStageDisable atr(fDrawState); 976 977 bool useAA = paint.isAntiAlias() && 978 !target->getDrawState().getRenderTarget()->isMultisampled() && 979 !disable_coverage_aa_for_blend(target); 980 981 if (!fOvalRenderer->drawOval(target, this, useAA, oval, stroke)) { 982 SkPath path; 983 path.addOval(oval); 984 this->internalDrawPath(target, useAA, path, stroke); 985 } 986} 987 988namespace { 989 990// Can 'path' be drawn as a pair of filled nested rectangles? 991static bool is_nested_rects(GrDrawTarget* target, 992 const SkPath& path, 993 const SkStrokeRec& stroke, 994 SkRect rects[2], 995 bool* useVertexCoverage) { 996 SkASSERT(stroke.isFillStyle()); 997 998 if (path.isInverseFillType()) { 999 return false; 1000 } 1001 1002 const GrDrawState& drawState = target->getDrawState(); 1003 1004 // TODO: this restriction could be lifted if we were willing to apply 1005 // the matrix to all the points individually rather than just to the rect 1006 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1007 return false; 1008 } 1009 1010 *useVertexCoverage = false; 1011 if (!target->getDrawState().canTweakAlphaForCoverage()) { 1012 if (disable_coverage_aa_for_blend(target)) { 1013 return false; 1014 } else { 1015 *useVertexCoverage = true; 1016 } 1017 } 1018 1019 SkPath::Direction dirs[2]; 1020 if (!path.isNestedRects(rects, dirs)) { 1021 return false; 1022 } 1023 1024 if (SkPath::kWinding_FillType == path.getFillType()) { 1025 // The two rects need to be wound opposite to each other 1026 return dirs[0] != dirs[1]; 1027 } else { 1028 return true; 1029 } 1030} 1031 1032}; 1033 1034void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) { 1035 1036 if (path.isEmpty()) { 1037 if (path.isInverseFillType()) { 1038 this->drawPaint(paint); 1039 } 1040 return; 1041 } 1042 1043 // Note that internalDrawPath may sw-rasterize the path into a scratch texture. 1044 // Scratch textures can be recycled after they are returned to the texture 1045 // cache. This presents a potential hazard for buffered drawing. However, 1046 // the writePixels that uploads to the scratch will perform a flush so we're 1047 // OK. 1048 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW); 1049 GrDrawState::AutoStageDisable atr(fDrawState); 1050 1051 bool useAA = paint.isAntiAlias() && !target->getDrawState().getRenderTarget()->isMultisampled(); 1052 if (useAA && stroke.getWidth() < 0 && !path.isConvex()) { 1053 // Concave AA paths are expensive - try to avoid them for special cases 1054 bool useVertexCoverage; 1055 SkRect rects[2]; 1056 1057 if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) { 1058 SkMatrix origViewMatrix = target->getDrawState().getViewMatrix(); 1059 GrDrawState::AutoViewMatrixRestore avmr; 1060 if (!avmr.setIdentity(target->drawState())) { 1061 return; 1062 } 1063 1064 fAARectRenderer->fillAANestedRects(this->getGpu(), target, 1065 rects, 1066 origViewMatrix, 1067 useVertexCoverage); 1068 return; 1069 } 1070 } 1071 1072 SkRect ovalRect; 1073 bool isOval = path.isOval(&ovalRect); 1074 1075 if (!isOval || path.isInverseFillType() 1076 || !fOvalRenderer->drawOval(target, this, useAA, ovalRect, stroke)) { 1077 this->internalDrawPath(target, useAA, path, stroke); 1078 } 1079} 1080 1081void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path, 1082 const SkStrokeRec& stroke) { 1083 1084 // An Assumption here is that path renderer would use some form of tweaking 1085 // the src color (either the input alpha or in the frag shader) to implement 1086 // aa. If we have some future driver-mojo path AA that can do the right 1087 // thing WRT to the blend then we'll need some query on the PR. 1088 if (disable_coverage_aa_for_blend(target)) { 1089#if GR_DEBUG 1090 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1091#endif 1092 useAA = false; 1093 } 1094 1095 GrPathRendererChain::DrawType type = useAA ? GrPathRendererChain::kColorAntiAlias_DrawType : 1096 GrPathRendererChain::kColor_DrawType; 1097 1098 const SkPath* pathPtr = &path; 1099 SkPath tmpPath; 1100 SkStrokeRec strokeRec(stroke); 1101 1102 // Try a 1st time without stroking the path and without allowing the SW renderer 1103 GrPathRenderer* pr = this->getPathRenderer(*pathPtr, strokeRec, target, false, type); 1104 1105 if (NULL == pr) { 1106 if (!strokeRec.isHairlineStyle()) { 1107 // It didn't work the 1st time, so try again with the stroked path 1108 if (strokeRec.applyToPath(&tmpPath, *pathPtr)) { 1109 pathPtr = &tmpPath; 1110 strokeRec.setFillStyle(); 1111 } 1112 } 1113 // This time, allow SW renderer 1114 pr = this->getPathRenderer(*pathPtr, strokeRec, target, true, type); 1115 } 1116 1117 if (NULL == pr) { 1118#if GR_DEBUG 1119 GrPrintf("Unable to find path renderer compatible with path.\n"); 1120#endif 1121 return; 1122 } 1123 1124 pr->drawPath(*pathPtr, strokeRec, target, useAA); 1125} 1126 1127//////////////////////////////////////////////////////////////////////////////// 1128 1129void GrContext::flush(int flagsBitfield) { 1130 if (kDiscard_FlushBit & flagsBitfield) { 1131 fDrawBuffer->reset(); 1132 } else { 1133 this->flushDrawBuffer(); 1134 } 1135 // TODO: Remove this flag 1136 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1137 fGpu->drawState()->setRenderTarget(this->getRenderTarget()); 1138 fGpu->forceRenderTargetFlush(); 1139 } 1140} 1141 1142void GrContext::flushDrawBuffer() { 1143 if (NULL != fDrawBuffer && !fDrawBuffer->isFlushing()) { 1144 fDrawBuffer->flush(); 1145 } 1146} 1147 1148bool GrContext::writeTexturePixels(GrTexture* texture, 1149 int left, int top, int width, int height, 1150 GrPixelConfig config, const void* buffer, size_t rowBytes, 1151 uint32_t flags) { 1152 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1153 ASSERT_OWNED_RESOURCE(texture); 1154 1155 if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) { 1156 if (NULL != texture->asRenderTarget()) { 1157 return this->writeRenderTargetPixels(texture->asRenderTarget(), 1158 left, top, width, height, 1159 config, buffer, rowBytes, flags); 1160 } else { 1161 return false; 1162 } 1163 } 1164 1165 if (!(kDontFlush_PixelOpsFlag & flags)) { 1166 this->flush(); 1167 } 1168 1169 return fGpu->writeTexturePixels(texture, left, top, width, height, 1170 config, buffer, rowBytes); 1171} 1172 1173bool GrContext::readTexturePixels(GrTexture* texture, 1174 int left, int top, int width, int height, 1175 GrPixelConfig config, void* buffer, size_t rowBytes, 1176 uint32_t flags) { 1177 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1178 ASSERT_OWNED_RESOURCE(texture); 1179 1180 // TODO: code read pixels for textures that aren't also rendertargets 1181 GrRenderTarget* target = texture->asRenderTarget(); 1182 if (NULL != target) { 1183 return this->readRenderTargetPixels(target, 1184 left, top, width, height, 1185 config, buffer, rowBytes, 1186 flags); 1187 } else { 1188 return false; 1189 } 1190} 1191 1192#include "SkConfig8888.h" 1193 1194namespace { 1195/** 1196 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1197 * formats are representable as Config8888 and so the function returns false 1198 * if the GrPixelConfig has no equivalent Config8888. 1199 */ 1200bool grconfig_to_config8888(GrPixelConfig config, 1201 bool unpremul, 1202 SkCanvas::Config8888* config8888) { 1203 switch (config) { 1204 case kRGBA_8888_GrPixelConfig: 1205 if (unpremul) { 1206 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1207 } else { 1208 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1209 } 1210 return true; 1211 case kBGRA_8888_GrPixelConfig: 1212 if (unpremul) { 1213 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1214 } else { 1215 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1216 } 1217 return true; 1218 default: 1219 return false; 1220 } 1221} 1222 1223// It returns a configuration with where the byte position of the R & B components are swapped in 1224// relation to the input config. This should only be called with the result of 1225// grconfig_to_config8888 as it will fail for other configs. 1226SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) { 1227 switch (config8888) { 1228 case SkCanvas::kBGRA_Premul_Config8888: 1229 return SkCanvas::kRGBA_Premul_Config8888; 1230 case SkCanvas::kBGRA_Unpremul_Config8888: 1231 return SkCanvas::kRGBA_Unpremul_Config8888; 1232 case SkCanvas::kRGBA_Premul_Config8888: 1233 return SkCanvas::kBGRA_Premul_Config8888; 1234 case SkCanvas::kRGBA_Unpremul_Config8888: 1235 return SkCanvas::kBGRA_Unpremul_Config8888; 1236 default: 1237 GrCrash("Unexpected input"); 1238 return SkCanvas::kBGRA_Unpremul_Config8888;; 1239 } 1240} 1241} 1242 1243bool GrContext::readRenderTargetPixels(GrRenderTarget* target, 1244 int left, int top, int width, int height, 1245 GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 1246 uint32_t flags) { 1247 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1248 ASSERT_OWNED_RESOURCE(target); 1249 1250 if (NULL == target) { 1251 target = fRenderTarget.get(); 1252 if (NULL == target) { 1253 return false; 1254 } 1255 } 1256 1257 if (!(kDontFlush_PixelOpsFlag & flags)) { 1258 this->flush(); 1259 } 1260 1261 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. 1262 1263 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll 1264 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. 1265 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, 1266 width, height, dstConfig, 1267 rowBytes); 1268 // We ignore the preferred config if it is different than our config unless it is an R/B swap. 1269 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped 1270 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during 1271 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from 1272 // dstConfig. 1273 GrPixelConfig readConfig = dstConfig; 1274 bool swapRAndB = false; 1275 if (GrPixelConfigSwapRAndB(dstConfig) == 1276 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { 1277 readConfig = GrPixelConfigSwapRAndB(readConfig); 1278 swapRAndB = true; 1279 } 1280 1281 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 1282 1283 if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 1284 // The unpremul flag is only allowed for these two configs. 1285 return false; 1286 } 1287 1288 // If the src is a texture and we would have to do conversions after read pixels, we instead 1289 // do the conversions by drawing the src to a scratch texture. If we handle any of the 1290 // conversions in the draw we set the corresponding bool to false so that we don't reapply it 1291 // on the read back pixels. 1292 GrTexture* src = target->asTexture(); 1293 GrAutoScratchTexture ast; 1294 if (NULL != src && (swapRAndB || unpremul || flipY)) { 1295 // Make the scratch a render target because we don't have a robust readTexturePixels as of 1296 // yet. It calls this function. 1297 GrTextureDesc desc; 1298 desc.fFlags = kRenderTarget_GrTextureFlagBit; 1299 desc.fWidth = width; 1300 desc.fHeight = height; 1301 desc.fConfig = readConfig; 1302 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 1303 1304 // When a full read back is faster than a partial we could always make the scratch exactly 1305 // match the passed rect. However, if we see many different size rectangles we will trash 1306 // our texture cache and pay the cost of creating and destroying many textures. So, we only 1307 // request an exact match when the caller is reading an entire RT. 1308 ScratchTexMatch match = kApprox_ScratchTexMatch; 1309 if (0 == left && 1310 0 == top && 1311 target->width() == width && 1312 target->height() == height && 1313 fGpu->fullReadPixelsIsFasterThanPartial()) { 1314 match = kExact_ScratchTexMatch; 1315 } 1316 ast.set(this, desc, match); 1317 GrTexture* texture = ast.texture(); 1318 if (texture) { 1319 // compute a matrix to perform the draw 1320 SkMatrix textureMatrix; 1321 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1322 textureMatrix.postIDiv(src->width(), src->height()); 1323 1324 SkAutoTUnref<const GrEffectRef> effect; 1325 if (unpremul) { 1326 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); 1327 if (NULL != effect) { 1328 unpremul = false; // we no longer need to do this on CPU after the read back. 1329 } 1330 } 1331 // If we failed to create a PM->UPM effect and have no other conversions to perform then 1332 // there is no longer any point to using the scratch. 1333 if (NULL != effect || flipY || swapRAndB) { 1334 if (!effect) { 1335 effect.reset(GrConfigConversionEffect::Create( 1336 src, 1337 swapRAndB, 1338 GrConfigConversionEffect::kNone_PMConversion, 1339 textureMatrix)); 1340 } 1341 swapRAndB = false; // we will handle the swap in the draw. 1342 1343 // We protect the existing geometry here since it may not be 1344 // clear to the caller that a draw operation (i.e., drawSimpleRect) 1345 // can be invoked in this method 1346 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit); 1347 GrDrawState* drawState = fGpu->drawState(); 1348 GrAssert(effect); 1349 drawState->setEffect(0, effect); 1350 1351 drawState->setRenderTarget(texture->asRenderTarget()); 1352 GrRect rect = GrRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 1353 fGpu->drawSimpleRect(rect, NULL); 1354 // we want to read back from the scratch's origin 1355 left = 0; 1356 top = 0; 1357 target = texture->asRenderTarget(); 1358 } 1359 } 1360 } 1361 if (!fGpu->readPixels(target, 1362 left, top, width, height, 1363 readConfig, buffer, rowBytes)) { 1364 return false; 1365 } 1366 // Perform any conversions we weren't able to perform using a scratch texture. 1367 if (unpremul || swapRAndB) { 1368 // These are initialized to suppress a warning 1369 SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888; 1370 SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888; 1371 1372 SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888); 1373 grconfig_to_config8888(dstConfig, unpremul, &dstC8888); 1374 1375 if (swapRAndB) { 1376 GrAssert(c8888IsValid); // we should only do r/b swap on 8888 configs 1377 srcC8888 = swap_config8888_red_and_blue(srcC8888); 1378 } 1379 GrAssert(c8888IsValid); 1380 uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer); 1381 SkConvertConfig8888Pixels(b32, rowBytes, dstC8888, 1382 b32, rowBytes, srcC8888, 1383 width, height); 1384 } 1385 return true; 1386} 1387 1388void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1389 GrAssert(target); 1390 ASSERT_OWNED_RESOURCE(target); 1391 // In the future we may track whether there are any pending draws to this 1392 // target. We don't today so we always perform a flush. We don't promise 1393 // this to our clients, though. 1394 this->flush(); 1395 fGpu->resolveRenderTarget(target); 1396} 1397 1398void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) { 1399 if (NULL == src || NULL == dst) { 1400 return; 1401 } 1402 ASSERT_OWNED_RESOURCE(src); 1403 1404 // Writes pending to the source texture are not tracked, so a flush 1405 // is required to ensure that the copy captures the most recent contents 1406 // of the source texture. See similar behavior in 1407 // GrContext::resolveRenderTarget. 1408 this->flush(); 1409 1410 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1411 GrDrawState* drawState = fGpu->drawState(); 1412 drawState->setRenderTarget(dst); 1413 SkMatrix sampleM; 1414 sampleM.setIDiv(src->width(), src->height()); 1415 SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height()); 1416 if (NULL != topLeft) { 1417 srcRect.offset(*topLeft); 1418 } 1419 SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height()); 1420 if (!srcRect.intersect(srcBounds)) { 1421 return; 1422 } 1423 sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop)); 1424 drawState->createTextureEffect(0, src, sampleM); 1425 SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height())); 1426 fGpu->drawSimpleRect(dstR, NULL); 1427} 1428 1429bool GrContext::writeRenderTargetPixels(GrRenderTarget* target, 1430 int left, int top, int width, int height, 1431 GrPixelConfig srcConfig, 1432 const void* buffer, 1433 size_t rowBytes, 1434 uint32_t flags) { 1435 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1436 ASSERT_OWNED_RESOURCE(target); 1437 1438 if (NULL == target) { 1439 target = fRenderTarget.get(); 1440 if (NULL == target) { 1441 return false; 1442 } 1443 } 1444 1445 // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on 1446 // desktop GL). 1447 1448 // We will always call some form of writeTexturePixels and we will pass our flags on to it. 1449 // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't 1450 // set.) 1451 1452 // If the RT is also a texture and we don't have to premultiply then take the texture path. 1453 // We expect to be at least as fast or faster since it doesn't use an intermediate texture as 1454 // we do below. 1455 1456#if !GR_MAC_BUILD 1457 // At least some drivers on the Mac get confused when glTexImage2D is called on a texture 1458 // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or 1459 // HW is affected. 1460 if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) && 1461 fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) { 1462 return this->writeTexturePixels(target->asTexture(), 1463 left, top, width, height, 1464 srcConfig, buffer, rowBytes, flags); 1465 } 1466#endif 1467 1468 // We ignore the preferred config unless it is a R/B swap of the src config. In that case 1469 // we will upload the original src data to a scratch texture but we will spoof it as the swapped 1470 // config. This scratch will then have R and B swapped. We correct for this by swapping again 1471 // when drawing the scratch to the dst using a conversion effect. 1472 bool swapRAndB = false; 1473 GrPixelConfig writeConfig = srcConfig; 1474 if (GrPixelConfigSwapRAndB(srcConfig) == 1475 fGpu->preferredWritePixelsConfig(srcConfig, target->config())) { 1476 writeConfig = GrPixelConfigSwapRAndB(srcConfig); 1477 swapRAndB = true; 1478 } 1479 1480 GrTextureDesc desc; 1481 desc.fWidth = width; 1482 desc.fHeight = height; 1483 desc.fConfig = writeConfig; 1484 GrAutoScratchTexture ast(this, desc); 1485 GrTexture* texture = ast.texture(); 1486 if (NULL == texture) { 1487 return false; 1488 } 1489 1490 SkAutoTUnref<const GrEffectRef> effect; 1491 SkMatrix textureMatrix; 1492 textureMatrix.setIDiv(texture->width(), texture->height()); 1493 1494 // allocate a tmp buffer and sw convert the pixels to premul 1495 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 1496 1497 if (kUnpremul_PixelOpsFlag & flags) { 1498 if (!GrPixelConfigIs8888(srcConfig)) { 1499 return false; 1500 } 1501 effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); 1502 // handle the unpremul step on the CPU if we couldn't create an effect to do it. 1503 if (NULL == effect) { 1504 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1505 GR_DEBUGCODE(bool success = ) 1506 grconfig_to_config8888(srcConfig, true, &srcConfig8888); 1507 GrAssert(success); 1508 GR_DEBUGCODE(success = ) 1509 grconfig_to_config8888(srcConfig, false, &dstConfig8888); 1510 GrAssert(success); 1511 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1512 tmpPixels.reset(width * height); 1513 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1514 src, rowBytes, srcConfig8888, 1515 width, height); 1516 buffer = tmpPixels.get(); 1517 rowBytes = 4 * width; 1518 } 1519 } 1520 if (NULL == effect) { 1521 effect.reset(GrConfigConversionEffect::Create(texture, 1522 swapRAndB, 1523 GrConfigConversionEffect::kNone_PMConversion, 1524 textureMatrix)); 1525 } 1526 1527 if (!this->writeTexturePixels(texture, 1528 0, 0, width, height, 1529 writeConfig, buffer, rowBytes, 1530 flags & ~kUnpremul_PixelOpsFlag)) { 1531 return false; 1532 } 1533 1534 // writeRenderTargetPixels can be called in the midst of drawing another 1535 // object (e.g., when uploading a SW path rendering to the gpu while 1536 // drawing a rect) so preserve the current geometry. 1537 SkMatrix matrix; 1538 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 1539 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix); 1540 GrDrawState* drawState = fGpu->drawState(); 1541 GrAssert(effect); 1542 drawState->setEffect(0, effect); 1543 1544 drawState->setRenderTarget(target); 1545 1546 fGpu->drawSimpleRect(GrRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL); 1547 return true; 1548} 1549//////////////////////////////////////////////////////////////////////////////// 1550 1551GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, BufferedDraw buffered) { 1552 if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) { 1553 this->flushDrawBuffer(); 1554 fLastDrawWasBuffered = kNo_BufferedDraw; 1555 } 1556 ASSERT_OWNED_RESOURCE(fRenderTarget.get()); 1557 if (NULL != paint) { 1558 GrAssert(fDrawState->stagesDisabled()); 1559 fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get()); 1560#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1561 if ((paint->hasMask() || 0xff != paint->fCoverage) && 1562 !fGpu->canApplyCoverage()) { 1563 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1564 } 1565#endif 1566 } else { 1567 fDrawState->reset(fViewMatrix); 1568 fDrawState->setRenderTarget(fRenderTarget.get()); 1569 } 1570 GrDrawTarget* target; 1571 if (kYes_BufferedDraw == buffered) { 1572 fLastDrawWasBuffered = kYes_BufferedDraw; 1573 target = fDrawBuffer; 1574 } else { 1575 GrAssert(kNo_BufferedDraw == buffered); 1576 fLastDrawWasBuffered = kNo_BufferedDraw; 1577 target = fGpu; 1578 } 1579 fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip && 1580 !fClip->fClipStack->isWideOpen()); 1581 target->setClip(fClip); 1582 GrAssert(fDrawState == target->drawState()); 1583 return target; 1584} 1585 1586/* 1587 * This method finds a path renderer that can draw the specified path on 1588 * the provided target. 1589 * Due to its expense, the software path renderer has split out so it can 1590 * can be individually allowed/disallowed via the "allowSW" boolean. 1591 */ 1592GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1593 const SkStrokeRec& stroke, 1594 const GrDrawTarget* target, 1595 bool allowSW, 1596 GrPathRendererChain::DrawType drawType, 1597 GrPathRendererChain::StencilSupport* stencilSupport) { 1598 1599 if (NULL == fPathRendererChain) { 1600 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); 1601 } 1602 1603 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, 1604 stroke, 1605 target, 1606 drawType, 1607 stencilSupport); 1608 1609 if (NULL == pr && allowSW) { 1610 if (NULL == fSoftwarePathRenderer) { 1611 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1612 } 1613 pr = fSoftwarePathRenderer; 1614 } 1615 1616 return pr; 1617} 1618 1619//////////////////////////////////////////////////////////////////////////////// 1620 1621bool GrContext::isConfigRenderable(GrPixelConfig config) const { 1622 return fGpu->isConfigRenderable(config); 1623} 1624 1625static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 1626 intptr_t mask = 1 << shift; 1627 if (pred) { 1628 bits |= mask; 1629 } else { 1630 bits &= ~mask; 1631 } 1632 return bits; 1633} 1634 1635void GrContext::setupDrawBuffer() { 1636 1637 GrAssert(NULL == fDrawBuffer); 1638 GrAssert(NULL == fDrawBufferVBAllocPool); 1639 GrAssert(NULL == fDrawBufferIBAllocPool); 1640 1641 fDrawBufferVBAllocPool = 1642 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1643 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1644 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1645 fDrawBufferIBAllocPool = 1646 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1647 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1648 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1649 1650 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, 1651 fDrawBufferVBAllocPool, 1652 fDrawBufferIBAllocPool)); 1653 1654 fDrawBuffer->setDrawState(fDrawState); 1655} 1656 1657GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 1658 return this->prepareToDraw(&paint, BUFFERED_DRAW); 1659} 1660 1661const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1662 return fGpu->getQuadIndexBuffer(); 1663} 1664 1665namespace { 1666void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 1667 GrConfigConversionEffect::PMConversion pmToUPM; 1668 GrConfigConversionEffect::PMConversion upmToPM; 1669 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 1670 *pmToUPMValue = pmToUPM; 1671 *upmToPMValue = upmToPM; 1672} 1673} 1674 1675const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture, 1676 bool swapRAndB, 1677 const SkMatrix& matrix) { 1678 if (!fDidTestPMConversions) { 1679 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1680 fDidTestPMConversions = true; 1681 } 1682 GrConfigConversionEffect::PMConversion pmToUPM = 1683 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 1684 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 1685 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); 1686 } else { 1687 return NULL; 1688 } 1689} 1690 1691const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture, 1692 bool swapRAndB, 1693 const SkMatrix& matrix) { 1694 if (!fDidTestPMConversions) { 1695 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1696 fDidTestPMConversions = true; 1697 } 1698 GrConfigConversionEffect::PMConversion upmToPM = 1699 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 1700 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 1701 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); 1702 } else { 1703 return NULL; 1704 } 1705} 1706 1707GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 1708 bool canClobberSrc, 1709 const SkRect& rect, 1710 float sigmaX, float sigmaY) { 1711 ASSERT_OWNED_RESOURCE(srcTexture); 1712 1713 AutoRenderTarget art(this); 1714 1715 AutoMatrix am; 1716 am.setIdentity(this); 1717 1718 SkIRect clearRect; 1719 int scaleFactorX, radiusX; 1720 int scaleFactorY, radiusY; 1721 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX); 1722 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY); 1723 1724 SkRect srcRect(rect); 1725 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 1726 srcRect.roundOut(); 1727 scale_rect(&srcRect, static_cast<float>(scaleFactorX), 1728 static_cast<float>(scaleFactorY)); 1729 1730 AutoClip acs(this, srcRect); 1731 1732 GrAssert(kBGRA_8888_GrPixelConfig == srcTexture->config() || 1733 kRGBA_8888_GrPixelConfig == srcTexture->config() || 1734 kAlpha_8_GrPixelConfig == srcTexture->config()); 1735 1736 GrTextureDesc desc; 1737 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; 1738 desc.fWidth = SkScalarFloorToInt(srcRect.width()); 1739 desc.fHeight = SkScalarFloorToInt(srcRect.height()); 1740 desc.fConfig = srcTexture->config(); 1741 1742 GrAutoScratchTexture temp1, temp2; 1743 GrTexture* dstTexture = temp1.set(this, desc); 1744 GrTexture* tempTexture = canClobberSrc ? srcTexture : temp2.set(this, desc); 1745 if (NULL == dstTexture || NULL == tempTexture) { 1746 return NULL; 1747 } 1748 1749 GrPaint paint; 1750 paint.reset(); 1751 1752 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 1753 SkMatrix matrix; 1754 matrix.setIDiv(srcTexture->width(), srcTexture->height()); 1755 this->setRenderTarget(dstTexture->asRenderTarget()); 1756 SkRect dstRect(srcRect); 1757 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 1758 i < scaleFactorY ? 0.5f : 1.0f); 1759 1760 paint.colorStage(0)->setEffect(GrSimpleTextureEffect::Create(srcTexture, 1761 matrix, 1762 true))->unref(); 1763 this->drawRectToRect(paint, dstRect, srcRect); 1764 srcRect = dstRect; 1765 srcTexture = dstTexture; 1766 SkTSwap(dstTexture, tempTexture); 1767 } 1768 1769 SkIRect srcIRect; 1770 srcRect.roundOut(&srcIRect); 1771 1772 if (sigmaX > 0.0f) { 1773 if (scaleFactorX > 1) { 1774 // Clear out a radius to the right of the srcRect to prevent the 1775 // X convolution from reading garbage. 1776 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1777 radiusX, srcIRect.height()); 1778 this->clear(&clearRect, 0x0); 1779 } 1780 1781 this->setRenderTarget(dstTexture->asRenderTarget()); 1782 GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW); 1783 convolve_gaussian(target, srcTexture, srcRect, sigmaX, radiusX, 1784 Gr1DKernelEffect::kX_Direction); 1785 srcTexture = dstTexture; 1786 SkTSwap(dstTexture, tempTexture); 1787 } 1788 1789 if (sigmaY > 0.0f) { 1790 if (scaleFactorY > 1 || sigmaX > 0.0f) { 1791 // Clear out a radius below the srcRect to prevent the Y 1792 // convolution from reading garbage. 1793 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1794 srcIRect.width(), radiusY); 1795 this->clear(&clearRect, 0x0); 1796 } 1797 1798 this->setRenderTarget(dstTexture->asRenderTarget()); 1799 GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW); 1800 convolve_gaussian(target, srcTexture, srcRect, sigmaY, radiusY, 1801 Gr1DKernelEffect::kY_Direction); 1802 srcTexture = dstTexture; 1803 SkTSwap(dstTexture, tempTexture); 1804 } 1805 1806 if (scaleFactorX > 1 || scaleFactorY > 1) { 1807 // Clear one pixel to the right and below, to accommodate bilinear 1808 // upsampling. 1809 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1810 srcIRect.width() + 1, 1); 1811 this->clear(&clearRect, 0x0); 1812 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1813 1, srcIRect.height()); 1814 this->clear(&clearRect, 0x0); 1815 SkMatrix matrix; 1816 // FIXME: This should be mitchell, not bilinear. 1817 matrix.setIDiv(srcTexture->width(), srcTexture->height()); 1818 this->setRenderTarget(dstTexture->asRenderTarget()); 1819 paint.colorStage(0)->setEffect(GrSimpleTextureEffect::Create(srcTexture, 1820 matrix, 1821 true))->unref(); 1822 SkRect dstRect(srcRect); 1823 scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY); 1824 this->drawRectToRect(paint, dstRect, srcRect); 1825 srcRect = dstRect; 1826 srcTexture = dstTexture; 1827 SkTSwap(dstTexture, tempTexture); 1828 } 1829 if (srcTexture == temp1.texture()) { 1830 return temp1.detach(); 1831 } else if (srcTexture == temp2.texture()) { 1832 return temp2.detach(); 1833 } else { 1834 srcTexture->ref(); 1835 return srcTexture; 1836 } 1837} 1838 1839/////////////////////////////////////////////////////////////////////////////// 1840#if GR_CACHE_STATS 1841void GrContext::printCacheStats() const { 1842 fTextureCache->printStats(); 1843} 1844#endif 1845