GrContext.cpp revision 0982d35187da7e1ed6c0eba5951bbdadca8b33e7
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "effects/GrMorphologyEffect.h" 13#include "effects/GrConvolutionEffect.h" 14#include "effects/GrSingleTextureEffect.h" 15 16#include "GrBufferAllocPool.h" 17#include "GrClipIterator.h" 18#include "GrGpu.h" 19#include "GrIndexBuffer.h" 20#include "GrInOrderDrawBuffer.h" 21#include "GrPathRenderer.h" 22#include "GrPathUtils.h" 23#include "GrResourceCache.h" 24#include "GrSoftwarePathRenderer.h" 25#include "GrStencilBuffer.h" 26#include "GrTextStrike.h" 27#include "SkTLazy.h" 28#include "SkTLS.h" 29#include "SkTrace.h" 30 31SK_DEFINE_INST_COUNT(GrContext) 32SK_DEFINE_INST_COUNT(GrDrawState) 33 34#define DEFER_TEXT_RENDERING 1 35 36#define DEFER_PATHS 1 37 38#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 39 40#define MAX_BLUR_SIGMA 4.0f 41 42// When we're using coverage AA but the blend is incompatible (given gpu 43// limitations) should we disable AA or draw wrong? 44#define DISABLE_COVERAGE_AA_FOR_BLEND 1 45 46#if GR_DEBUG 47 // change this to a 1 to see notifications when partial coverage fails 48 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 49#else 50 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 51#endif 52 53static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 54static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 55 56static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 57static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 58 59// path rendering is the only thing we defer today that uses non-static indices 60static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = DEFER_PATHS ? 1 << 11 : 0; 61static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = DEFER_PATHS ? 4 : 0; 62 63#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 64 65GrContext* GrContext::Create(GrEngine engine, 66 GrPlatform3DContext context3D) { 67 GrContext* ctx = NULL; 68 GrGpu* fGpu = GrGpu::Create(engine, context3D); 69 if (NULL != fGpu) { 70 ctx = SkNEW_ARGS(GrContext, (fGpu)); 71 fGpu->unref(); 72 } 73 return ctx; 74} 75 76namespace { 77void* CreateThreadInstanceCount() { 78 return SkNEW_ARGS(int, (0)); 79} 80void DeleteThreadInstanceCount(void* v) { 81 delete reinterpret_cast<int*>(v); 82} 83#define THREAD_INSTANCE_COUNT \ 84 (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, \ 85 DeleteThreadInstanceCount))) 86 87} 88 89int GrContext::GetThreadInstanceCount() { 90 return THREAD_INSTANCE_COUNT; 91} 92 93GrContext::~GrContext() { 94 this->flush(); 95 96 // Since the gpu can hold scratch textures, give it a chance to let go 97 // of them before freeing the texture cache 98 fGpu->purgeResources(); 99 100 delete fTextureCache; 101 delete fFontCache; 102 delete fDrawBuffer; 103 delete fDrawBufferVBAllocPool; 104 delete fDrawBufferIBAllocPool; 105 106 fAARectRenderer->unref(); 107 108 fGpu->unref(); 109 GrSafeUnref(fPathRendererChain); 110 GrSafeUnref(fSoftwarePathRenderer); 111 fDrawState->unref(); 112 113 --THREAD_INSTANCE_COUNT; 114} 115 116void GrContext::contextLost() { 117 contextDestroyed(); 118 this->setupDrawBuffer(); 119} 120 121void GrContext::contextDestroyed() { 122 // abandon first to so destructors 123 // don't try to free the resources in the API. 124 fGpu->abandonResources(); 125 126 // a path renderer may be holding onto resources that 127 // are now unusable 128 GrSafeSetNull(fPathRendererChain); 129 GrSafeSetNull(fSoftwarePathRenderer); 130 131 delete fDrawBuffer; 132 fDrawBuffer = NULL; 133 134 delete fDrawBufferVBAllocPool; 135 fDrawBufferVBAllocPool = NULL; 136 137 delete fDrawBufferIBAllocPool; 138 fDrawBufferIBAllocPool = NULL; 139 140 fAARectRenderer->reset(); 141 142 fTextureCache->removeAll(); 143 fFontCache->freeAll(); 144 fGpu->markContextDirty(); 145} 146 147void GrContext::resetContext() { 148 fGpu->markContextDirty(); 149} 150 151void GrContext::freeGpuResources() { 152 this->flush(); 153 154 fGpu->purgeResources(); 155 156 fAARectRenderer->reset(); 157 158 fTextureCache->removeAll(); 159 fFontCache->freeAll(); 160 // a path renderer may be holding onto resources 161 GrSafeSetNull(fPathRendererChain); 162 GrSafeSetNull(fSoftwarePathRenderer); 163} 164 165size_t GrContext::getGpuTextureCacheBytes() const { 166 return fTextureCache->getCachedResourceBytes(); 167} 168 169//////////////////////////////////////////////////////////////////////////////// 170 171GrTexture* GrContext::TextureCacheEntry::texture() const { 172 if (NULL == fEntry) { 173 return NULL; 174 } else { 175 return (GrTexture*) fEntry->resource(); 176 } 177} 178 179namespace { 180 181// we should never have more than one stencil buffer with same combo of 182// (width,height,samplecount) 183void gen_stencil_key_values(int width, int height, 184 int sampleCnt, uint32_t v[4]) { 185 v[0] = width; 186 v[1] = height; 187 v[2] = sampleCnt; 188 v[3] = GrResourceKey::kStencilBuffer_TypeBit; 189} 190 191void gen_stencil_key_values(const GrStencilBuffer* sb, 192 uint32_t v[4]) { 193 gen_stencil_key_values(sb->width(), sb->height(), 194 sb->numSamples(), v); 195} 196 197void scale_rect(SkRect* rect, float xScale, float yScale) { 198 rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale)); 199 rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale)); 200 rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale)); 201 rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale)); 202} 203 204float adjust_sigma(float sigma, int *scaleFactor, int *radius) { 205 *scaleFactor = 1; 206 while (sigma > MAX_BLUR_SIGMA) { 207 *scaleFactor *= 2; 208 sigma *= 0.5f; 209 } 210 *radius = static_cast<int>(ceilf(sigma * 3.0f)); 211 GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius); 212 return sigma; 213} 214 215void apply_morphology(GrGpu* gpu, 216 GrTexture* texture, 217 const SkRect& rect, 218 int radius, 219 GrContext::MorphologyType morphType, 220 Gr1DKernelEffect::Direction direction) { 221 222 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 223 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 224 GrDrawState* drawState = gpu->drawState(); 225 drawState->setRenderTarget(target); 226 GrMatrix sampleM; 227 sampleM.setIDiv(texture->width(), texture->height()); 228 drawState->sampler(0)->reset(sampleM); 229 SkAutoTUnref<GrCustomStage> morph( 230 SkNEW_ARGS(GrMorphologyEffect, (texture, direction, radius, morphType))); 231 drawState->sampler(0)->setCustomStage(morph); 232 gpu->drawSimpleRect(rect, NULL); 233} 234 235void convolve_gaussian(GrGpu* gpu, 236 GrTexture* texture, 237 const SkRect& rect, 238 float sigma, 239 int radius, 240 Gr1DKernelEffect::Direction direction) { 241 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 242 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 243 GrDrawState* drawState = gpu->drawState(); 244 drawState->setRenderTarget(target); 245 GrMatrix sampleM; 246 sampleM.setIDiv(texture->width(), texture->height()); 247 drawState->sampler(0)->reset(sampleM); 248 SkAutoTUnref<GrConvolutionEffect> conv(SkNEW_ARGS(GrConvolutionEffect, 249 (texture, direction, radius, 250 sigma))); 251 drawState->sampler(0)->setCustomStage(conv); 252 gpu->drawSimpleRect(rect, NULL); 253} 254 255} 256 257GrContext::TextureCacheEntry GrContext::findAndLockTexture(const GrTextureDesc& desc, 258 const GrTextureParams* params) { 259 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, false); 260 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 261 GrResourceCache::kNested_LockType)); 262} 263 264bool GrContext::isTextureInCache(const GrTextureDesc& desc, 265 const GrTextureParams* params) const { 266 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, false); 267 return fTextureCache->hasKey(resourceKey); 268} 269 270GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 271 ASSERT_OWNED_RESOURCE(sb); 272 uint32_t v[4]; 273 gen_stencil_key_values(sb, v); 274 GrResourceKey resourceKey(v); 275 return fTextureCache->createAndLock(resourceKey, sb); 276} 277 278GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 279 int sampleCnt) { 280 uint32_t v[4]; 281 gen_stencil_key_values(width, height, sampleCnt, v); 282 GrResourceKey resourceKey(v); 283 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 284 GrResourceCache::kSingle_LockType); 285 if (NULL != entry) { 286 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 287 return sb; 288 } else { 289 return NULL; 290 } 291} 292 293void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 294 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 295 fTextureCache->unlock(sbEntry); 296} 297 298static void stretchImage(void* dst, 299 int dstW, 300 int dstH, 301 void* src, 302 int srcW, 303 int srcH, 304 int bpp) { 305 GrFixed dx = (srcW << 16) / dstW; 306 GrFixed dy = (srcH << 16) / dstH; 307 308 GrFixed y = dy >> 1; 309 310 int dstXLimit = dstW*bpp; 311 for (int j = 0; j < dstH; ++j) { 312 GrFixed x = dx >> 1; 313 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 314 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 315 for (int i = 0; i < dstXLimit; i += bpp) { 316 memcpy((uint8_t*) dstRow + i, 317 (uint8_t*) srcRow + (x>>16)*bpp, 318 bpp); 319 x += dx; 320 } 321 y += dy; 322 } 323} 324 325GrContext::TextureCacheEntry GrContext::createAndLockTexture( 326 const GrTextureParams* params, 327 const GrTextureDesc& desc, 328 void* srcData, 329 size_t rowBytes) { 330 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 331 332#if GR_DUMP_TEXTURE_UPLOAD 333 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 334#endif 335 336 TextureCacheEntry entry; 337 338 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, false); 339 340 if (GrTexture::NeedsResizing(resourceKey)) { 341 // The desired texture is NPOT and tiled but that isn't supported by 342 // the current hardware. Resize the texture to be a POT 343 GrAssert(NULL != params); 344 TextureCacheEntry clampEntry = this->findAndLockTexture(desc, NULL); 345 346 if (NULL == clampEntry.texture()) { 347 clampEntry = this->createAndLockTexture(NULL, desc, srcData, rowBytes); 348 GrAssert(NULL != clampEntry.texture()); 349 if (NULL == clampEntry.texture()) { 350 return entry; 351 } 352 } 353 GrTextureDesc rtDesc = desc; 354 rtDesc.fFlags = rtDesc.fFlags | 355 kRenderTarget_GrTextureFlagBit | 356 kNoStencil_GrTextureFlagBit; 357 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 358 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 359 360 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 361 362 if (NULL != texture) { 363 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 364 GrDrawState* drawState = fGpu->drawState(); 365 drawState->setRenderTarget(texture->asRenderTarget()); 366 367 // if filtering is not desired then we want to ensure all 368 // texels in the resampled image are copies of texels from 369 // the original. 370 drawState->sampler(0)->reset(SkShader::kClamp_TileMode, 371 GrTexture::NeedsFiltering(resourceKey)); 372 drawState->createTextureEffect(0, clampEntry.texture()); 373 374 static const GrVertexLayout layout = 375 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 376 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 377 378 if (arg.succeeded()) { 379 GrPoint* verts = (GrPoint*) arg.vertices(); 380 verts[0].setIRectFan(0, 0, 381 texture->width(), 382 texture->height(), 383 2*sizeof(GrPoint)); 384 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 385 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 386 0, 4); 387 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 388 } 389 texture->releaseRenderTarget(); 390 } else { 391 // TODO: Our CPU stretch doesn't filter. But we create separate 392 // stretched textures when the sampler state is either filtered or 393 // not. Either implement filtered stretch blit on CPU or just create 394 // one when FBO case fails. 395 396 rtDesc.fFlags = kNone_GrTextureFlags; 397 // no longer need to clamp at min RT size. 398 rtDesc.fWidth = GrNextPow2(desc.fWidth); 399 rtDesc.fHeight = GrNextPow2(desc.fHeight); 400 int bpp = GrBytesPerPixel(desc.fConfig); 401 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 402 rtDesc.fWidth * 403 rtDesc.fHeight); 404 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 405 srcData, desc.fWidth, desc.fHeight, bpp); 406 407 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 408 409 GrTexture* texture = fGpu->createTexture(rtDesc, 410 stretchedPixels.get(), 411 stretchedRowBytes); 412 GrAssert(NULL != texture); 413 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 414 } 415 fTextureCache->unlock(clampEntry.cacheEntry()); 416 417 } else { 418 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 419 if (NULL != texture) { 420 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 421 } 422 } 423 return entry; 424} 425 426GrContext::TextureCacheEntry GrContext::lockScratchTexture( 427 const GrTextureDesc& inDesc, 428 ScratchTexMatch match) { 429 GrTextureDesc desc = inDesc; 430 desc.fClientCacheID = kScratch_CacheID; 431 432 if (kExact_ScratchTexMatch != match) { 433 // bin by pow2 with a reasonable min 434 static const int MIN_SIZE = 256; 435 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 436 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 437 } 438 439 GrResourceEntry* entry; 440 int origWidth = desc.fWidth; 441 int origHeight = desc.fHeight; 442 bool doubledW = false; 443 bool doubledH = false; 444 445 do { 446 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, true); 447 entry = fTextureCache->findAndLock(key, 448 GrResourceCache::kNested_LockType); 449 // if we miss, relax the fit of the flags... 450 // then try doubling width... then height. 451 if (NULL != entry || kExact_ScratchTexMatch == match) { 452 break; 453 } 454 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 455 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 456 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 457 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 458 } else if (!doubledW) { 459 desc.fFlags = inDesc.fFlags; 460 desc.fWidth *= 2; 461 doubledW = true; 462 } else if (!doubledH) { 463 desc.fFlags = inDesc.fFlags; 464 desc.fWidth = origWidth; 465 desc.fHeight *= 2; 466 doubledH = true; 467 } else { 468 break; 469 } 470 471 } while (true); 472 473 if (NULL == entry) { 474 desc.fFlags = inDesc.fFlags; 475 desc.fWidth = origWidth; 476 desc.fHeight = origHeight; 477 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 478 if (NULL != texture) { 479 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, 480 texture->desc(), 481 true); 482 entry = fTextureCache->createAndLock(key, texture); 483 } 484 } 485 486 // If the caller gives us the same desc/sampler twice we don't want 487 // to return the same texture the second time (unless it was previously 488 // released). So we detach the entry from the cache and reattach at release. 489 if (NULL != entry) { 490 fTextureCache->detach(entry); 491 } 492 return TextureCacheEntry(entry); 493} 494 495void GrContext::addExistingTextureToCache(GrTexture* texture) { 496 497 if (NULL == texture) { 498 return; 499 } 500 501 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, 502 texture->desc(), 503 true); 504 fTextureCache->attach(key, texture); 505} 506 507void GrContext::unlockTexture(TextureCacheEntry entry) { 508 ASSERT_OWNED_RESOURCE(entry.texture()); 509 // If this is a scratch texture we detached it from the cache 510 // while it was locked (to avoid two callers simultaneously getting 511 // the same texture). 512 if (GrTexture::IsScratchTexture(entry.cacheEntry()->key())) { 513 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 514 } else { 515 fTextureCache->unlock(entry.cacheEntry()); 516 } 517} 518 519void GrContext::freeEntry(TextureCacheEntry entry) { 520 ASSERT_OWNED_RESOURCE(entry.texture()); 521 522 fTextureCache->freeEntry(entry.cacheEntry()); 523} 524 525GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, 526 void* srcData, 527 size_t rowBytes) { 528 GrTextureDesc descCopy = descIn; 529 descCopy.fClientCacheID = kUncached_CacheID; 530 return fGpu->createTexture(descCopy, srcData, rowBytes); 531} 532 533void GrContext::getTextureCacheLimits(int* maxTextures, 534 size_t* maxTextureBytes) const { 535 fTextureCache->getLimits(maxTextures, maxTextureBytes); 536} 537 538void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 539 fTextureCache->setLimits(maxTextures, maxTextureBytes); 540} 541 542int GrContext::getMaxTextureSize() const { 543 return fGpu->getCaps().fMaxTextureSize; 544} 545 546int GrContext::getMaxRenderTargetSize() const { 547 return fGpu->getCaps().fMaxRenderTargetSize; 548} 549 550/////////////////////////////////////////////////////////////////////////////// 551 552GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 553 return fGpu->createPlatformTexture(desc); 554} 555 556GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 557 return fGpu->createPlatformRenderTarget(desc); 558} 559 560/////////////////////////////////////////////////////////////////////////////// 561 562bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params, 563 int width, int height) const { 564 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 565 if (!caps.f8BitPaletteSupport) { 566 return false; 567 } 568 569 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 570 571 if (!isPow2) { 572 bool tiled = NULL != params && params->isTiled(); 573 if (tiled && !caps.fNPOTTextureTileSupport) { 574 return false; 575 } 576 } 577 return true; 578} 579 580//////////////////////////////////////////////////////////////////////////////// 581 582const GrClipData* GrContext::getClip() const { 583 return fGpu->getClip(); 584} 585 586void GrContext::setClip(const GrClipData* clipData) { 587 fGpu->setClip(clipData); 588 fDrawState->enableState(GrDrawState::kClip_StateBit); 589} 590 591//////////////////////////////////////////////////////////////////////////////// 592 593void GrContext::clear(const GrIRect* rect, 594 const GrColor color, 595 GrRenderTarget* target) { 596 this->flush(); 597 fGpu->clear(rect, color, target); 598} 599 600void GrContext::drawPaint(const GrPaint& paint) { 601 // set rect to be big enough to fill the space, but not super-huge, so we 602 // don't overflow fixed-point implementations 603 GrRect r; 604 r.setLTRB(0, 0, 605 GrIntToScalar(getRenderTarget()->width()), 606 GrIntToScalar(getRenderTarget()->height())); 607 GrMatrix inverse; 608 SkTLazy<GrPaint> tmpPaint; 609 const GrPaint* p = &paint; 610 AutoMatrix am; 611 612 // We attempt to map r by the inverse matrix and draw that. mapRect will 613 // map the four corners and bound them with a new rect. This will not 614 // produce a correct result for some perspective matrices. 615 if (!this->getMatrix().hasPerspective()) { 616 if (!fDrawState->getViewInverse(&inverse)) { 617 GrPrintf("Could not invert matrix\n"); 618 return; 619 } 620 inverse.mapRect(&r); 621 } else { 622 if (paint.hasTextureOrMask()) { 623 tmpPaint.set(paint); 624 p = tmpPaint.get(); 625 if (!tmpPaint.get()->preConcatSamplerMatricesWithInverse(fDrawState->getViewMatrix())) { 626 GrPrintf("Could not invert matrix\n"); 627 } 628 } 629 am.set(this, GrMatrix::I()); 630 } 631 // by definition this fills the entire clip, no need for AA 632 if (paint.fAntiAlias) { 633 if (!tmpPaint.isValid()) { 634 tmpPaint.set(paint); 635 p = tmpPaint.get(); 636 } 637 GrAssert(p == tmpPaint.get()); 638 tmpPaint.get()->fAntiAlias = false; 639 } 640 this->drawRect(*p, r); 641} 642 643//////////////////////////////////////////////////////////////////////////////// 644 645namespace { 646inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 647 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 648} 649} 650 651//////////////////////////////////////////////////////////////////////////////// 652 653/* create a triangle strip that strokes the specified triangle. There are 8 654 unique vertices, but we repreat the last 2 to close up. Alternatively we 655 could use an indices array, and then only send 8 verts, but not sure that 656 would be faster. 657 */ 658static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 659 GrScalar width) { 660 const GrScalar rad = GrScalarHalf(width); 661 rect.sort(); 662 663 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 664 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 665 verts[2].set(rect.fRight - rad, rect.fTop + rad); 666 verts[3].set(rect.fRight + rad, rect.fTop - rad); 667 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 668 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 669 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 670 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 671 verts[8] = verts[0]; 672 verts[9] = verts[1]; 673} 674 675/** 676 * Returns true if the rects edges are integer-aligned. 677 */ 678static bool isIRect(const GrRect& r) { 679 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 680 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 681} 682 683static bool apply_aa_to_rect(GrDrawTarget* target, 684 const GrRect& rect, 685 GrScalar width, 686 const GrMatrix* matrix, 687 GrMatrix* combinedMatrix, 688 GrRect* devRect, 689 bool* useVertexCoverage) { 690 // we use a simple coverage ramp to do aa on axis-aligned rects 691 // we check if the rect will be axis-aligned, and the rect won't land on 692 // integer coords. 693 694 // we are keeping around the "tweak the alpha" trick because 695 // it is our only hope for the fixed-pipe implementation. 696 // In a shader implementation we can give a separate coverage input 697 // TODO: remove this ugliness when we drop the fixed-pipe impl 698 *useVertexCoverage = false; 699 if (!target->canTweakAlphaForCoverage()) { 700 if (disable_coverage_aa_for_blend(target)) { 701#if GR_DEBUG 702 //GrPrintf("Turning off AA to correctly apply blend.\n"); 703#endif 704 return false; 705 } else { 706 *useVertexCoverage = true; 707 } 708 } 709 const GrDrawState& drawState = target->getDrawState(); 710 if (drawState.getRenderTarget()->isMultisampled()) { 711 return false; 712 } 713 714 if (0 == width && target->willUseHWAALines()) { 715 return false; 716 } 717 718 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 719 return false; 720 } 721 722 if (NULL != matrix && 723 !matrix->preservesAxisAlignment()) { 724 return false; 725 } 726 727 *combinedMatrix = drawState.getViewMatrix(); 728 if (NULL != matrix) { 729 combinedMatrix->preConcat(*matrix); 730 GrAssert(combinedMatrix->preservesAxisAlignment()); 731 } 732 733 combinedMatrix->mapRect(devRect, rect); 734 devRect->sort(); 735 736 if (width < 0) { 737 return !isIRect(*devRect); 738 } else { 739 return true; 740 } 741} 742 743void GrContext::drawRect(const GrPaint& paint, 744 const GrRect& rect, 745 GrScalar width, 746 const GrMatrix* matrix) { 747 SK_TRACE_EVENT0("GrContext::drawRect"); 748 749 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 750 GrDrawState::AutoStageDisable atr(fDrawState); 751 752 GrRect devRect = rect; 753 GrMatrix combinedMatrix; 754 bool useVertexCoverage; 755 bool needAA = paint.fAntiAlias && 756 !this->getRenderTarget()->isMultisampled(); 757 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 758 &combinedMatrix, &devRect, 759 &useVertexCoverage); 760 761 if (doAA) { 762 GrDrawTarget::AutoDeviceCoordDraw adcd(target); 763 if (!adcd.succeeded()) { 764 return; 765 } 766 if (width >= 0) { 767 GrVec strokeSize;; 768 if (width > 0) { 769 strokeSize.set(width, width); 770 combinedMatrix.mapVectors(&strokeSize, 1); 771 strokeSize.setAbs(strokeSize); 772 } else { 773 strokeSize.set(GR_Scalar1, GR_Scalar1); 774 } 775 fAARectRenderer->strokeAARect(this->getGpu(), target, devRect, 776 strokeSize, useVertexCoverage); 777 } else { 778 fAARectRenderer->fillAARect(this->getGpu(), target, 779 devRect, useVertexCoverage); 780 } 781 return; 782 } 783 784 if (width >= 0) { 785 // TODO: consider making static vertex buffers for these cases. 786 // Hairline could be done by just adding closing vertex to 787 // unitSquareVertexBuffer() 788 789 static const int worstCaseVertCount = 10; 790 GrDrawTarget::AutoReleaseGeometry geo(target, 0, worstCaseVertCount, 0); 791 792 if (!geo.succeeded()) { 793 GrPrintf("Failed to get space for vertices!\n"); 794 return; 795 } 796 797 GrPrimitiveType primType; 798 int vertCount; 799 GrPoint* vertex = geo.positions(); 800 801 if (width > 0) { 802 vertCount = 10; 803 primType = kTriangleStrip_GrPrimitiveType; 804 setStrokeRectStrip(vertex, rect, width); 805 } else { 806 // hairline 807 vertCount = 5; 808 primType = kLineStrip_GrPrimitiveType; 809 vertex[0].set(rect.fLeft, rect.fTop); 810 vertex[1].set(rect.fRight, rect.fTop); 811 vertex[2].set(rect.fRight, rect.fBottom); 812 vertex[3].set(rect.fLeft, rect.fBottom); 813 vertex[4].set(rect.fLeft, rect.fTop); 814 } 815 816 GrDrawState::AutoViewMatrixRestore avmr; 817 if (NULL != matrix) { 818 GrDrawState* drawState = target->drawState(); 819 avmr.set(drawState); 820 drawState->preConcatViewMatrix(*matrix); 821 drawState->preConcatSamplerMatrices(*matrix); 822 } 823 824 target->drawNonIndexed(primType, 0, vertCount); 825 } else { 826#if GR_STATIC_RECT_VB 827 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 828 if (NULL == sqVB) { 829 GrPrintf("Failed to create static rect vb.\n"); 830 return; 831 } 832 target->setVertexSourceToBuffer(0, sqVB); 833 GrDrawState* drawState = target->drawState(); 834 GrDrawState::AutoViewMatrixRestore avmr(drawState); 835 GrMatrix m; 836 m.setAll(rect.width(), 0, rect.fLeft, 837 0, rect.height(), rect.fTop, 838 0, 0, GrMatrix::I()[8]); 839 840 if (NULL != matrix) { 841 m.postConcat(*matrix); 842 } 843 drawState->preConcatViewMatrix(m); 844 drawState->preConcatSamplerMatrices(m); 845 846 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 847#else 848 target->drawSimpleRect(rect, matrix); 849#endif 850 } 851} 852 853void GrContext::drawRectToRect(const GrPaint& paint, 854 const GrRect& dstRect, 855 const GrRect& srcRect, 856 const GrMatrix* dstMatrix, 857 const GrMatrix* srcMatrix) { 858 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 859 860 // srcRect refers to paint's first texture 861 if (!paint.isTextureStageEnabled(0)) { 862 drawRect(paint, dstRect, -1, dstMatrix); 863 return; 864 } 865 866 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 867 868#if GR_STATIC_RECT_VB 869 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 870 GrDrawState::AutoStageDisable atr(fDrawState); 871 GrDrawState* drawState = target->drawState(); 872 GrDrawState::AutoViewMatrixRestore avmr(drawState); 873 874 GrMatrix m; 875 876 m.setAll(dstRect.width(), 0, dstRect.fLeft, 877 0, dstRect.height(), dstRect.fTop, 878 0, 0, GrMatrix::I()[8]); 879 if (NULL != dstMatrix) { 880 m.postConcat(*dstMatrix); 881 } 882 drawState->preConcatViewMatrix(m); 883 884 // we explicitly setup the correct coords for the first stage. The others 885 // must know about the view matrix change. 886 for (int s = 1; s < GrPaint::kTotalStages; ++s) { 887 if (drawState->isStageEnabled(s)) { 888 drawState->sampler(s)->preConcatMatrix(m); 889 } 890 } 891 892 m.setAll(srcRect.width(), 0, srcRect.fLeft, 893 0, srcRect.height(), srcRect.fTop, 894 0, 0, GrMatrix::I()[8]); 895 if (NULL != srcMatrix) { 896 m.postConcat(*srcMatrix); 897 } 898 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 899 900 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 901 if (NULL == sqVB) { 902 GrPrintf("Failed to create static rect vb.\n"); 903 return; 904 } 905 target->setVertexSourceToBuffer(0, sqVB); 906 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 907#else 908 909 GrDrawTarget* target; 910#if BATCH_RECT_TO_RECT 911 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 912#else 913 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 914#endif 915 GrDrawState::AutoStageDisable atr(fDrawState); 916 917 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 918 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 919 srcRects[0] = &srcRect; 920 srcMatrices[0] = srcMatrix; 921 922 target->drawRect(dstRect, dstMatrix, srcRects, srcMatrices); 923#endif 924} 925 926void GrContext::drawVertices(const GrPaint& paint, 927 GrPrimitiveType primitiveType, 928 int vertexCount, 929 const GrPoint positions[], 930 const GrPoint texCoords[], 931 const GrColor colors[], 932 const uint16_t indices[], 933 int indexCount) { 934 SK_TRACE_EVENT0("GrContext::drawVertices"); 935 936 GrDrawTarget::AutoReleaseGeometry geo; 937 938 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 939 GrDrawState::AutoStageDisable atr(fDrawState); 940 941 GrVertexLayout layout = 0; 942 if (NULL != texCoords) { 943 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(0, 0); 944 } 945 if (NULL != colors) { 946 layout |= GrDrawTarget::kColor_VertexLayoutBit; 947 } 948 int vertexSize = GrDrawTarget::VertexSize(layout); 949 950 if (sizeof(GrPoint) != vertexSize) { 951 if (!geo.set(target, layout, vertexCount, 0)) { 952 GrPrintf("Failed to get space for vertices!\n"); 953 return; 954 } 955 int texOffsets[GrDrawState::kMaxTexCoords]; 956 int colorOffset; 957 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 958 texOffsets, 959 &colorOffset, 960 NULL, 961 NULL); 962 void* curVertex = geo.vertices(); 963 964 for (int i = 0; i < vertexCount; ++i) { 965 *((GrPoint*)curVertex) = positions[i]; 966 967 if (texOffsets[0] > 0) { 968 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 969 } 970 if (colorOffset > 0) { 971 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 972 } 973 curVertex = (void*)((intptr_t)curVertex + vertexSize); 974 } 975 } else { 976 target->setVertexSourceToArray(layout, positions, vertexCount); 977 } 978 979 // we don't currently apply offscreen AA to this path. Need improved 980 // management of GrDrawTarget's geometry to avoid copying points per-tile. 981 982 if (NULL != indices) { 983 target->setIndexSourceToArray(indices, indexCount); 984 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 985 } else { 986 target->drawNonIndexed(primitiveType, 0, vertexCount); 987 } 988} 989 990/////////////////////////////////////////////////////////////////////////////// 991namespace { 992 993struct CircleVertex { 994 GrPoint fPos; 995 GrPoint fCenter; 996 GrScalar fOuterRadius; 997 GrScalar fInnerRadius; 998}; 999 1000/* Returns true if will map a circle to another circle. This can be true 1001 * if the matrix only includes square-scale, rotation, translation. 1002 */ 1003inline bool isSimilarityTransformation(const SkMatrix& matrix, 1004 SkScalar tol = SK_ScalarNearlyZero) { 1005 if (matrix.isIdentity() || matrix.getType() == SkMatrix::kTranslate_Mask) { 1006 return true; 1007 } 1008 if (matrix.hasPerspective()) { 1009 return false; 1010 } 1011 1012 SkScalar mx = matrix.get(SkMatrix::kMScaleX); 1013 SkScalar sx = matrix.get(SkMatrix::kMSkewX); 1014 SkScalar my = matrix.get(SkMatrix::kMScaleY); 1015 SkScalar sy = matrix.get(SkMatrix::kMSkewY); 1016 1017 if (mx == 0 && sx == 0 && my == 0 && sy == 0) { 1018 return false; 1019 } 1020 1021 // it has scales or skews, but it could also be rotation, check it out. 1022 SkVector vec[2]; 1023 vec[0].set(mx, sx); 1024 vec[1].set(sy, my); 1025 1026 return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol)) && 1027 SkScalarNearlyEqual(vec[0].lengthSqd(), vec[1].lengthSqd(), 1028 SkScalarSquare(tol)); 1029} 1030 1031} 1032 1033// TODO: strokeWidth can't be larger than zero right now. 1034// It will be fixed when drawPath() can handle strokes. 1035void GrContext::drawOval(const GrPaint& paint, 1036 const GrRect& rect, 1037 SkScalar strokeWidth) { 1038 GrAssert(strokeWidth <= 0); 1039 if (!isSimilarityTransformation(this->getMatrix()) || 1040 !paint.fAntiAlias || 1041 rect.height() != rect.width()) { 1042 SkPath path; 1043 path.addOval(rect); 1044 GrPathFill fill = (strokeWidth == 0) ? 1045 kHairLine_GrPathFill : kWinding_GrPathFill; 1046 this->internalDrawPath(paint, path, fill, NULL); 1047 return; 1048 } 1049 1050 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1051 kUnbuffered_DrawCategory; 1052 GrDrawTarget* target = this->prepareToDraw(paint, category); 1053 GrDrawState* drawState = target->drawState(); 1054 GrDrawState::AutoStageDisable atr(fDrawState); 1055 const GrMatrix vm = drawState->getViewMatrix(); 1056 1057 const GrRenderTarget* rt = drawState->getRenderTarget(); 1058 if (NULL == rt) { 1059 return; 1060 } 1061 1062 GrDrawTarget::AutoDeviceCoordDraw adcd(target); 1063 if (!adcd.succeeded()) { 1064 return; 1065 } 1066 1067 GrVertexLayout layout = GrDrawTarget::kEdge_VertexLayoutBit; 1068 GrAssert(sizeof(CircleVertex) == GrDrawTarget::VertexSize(layout)); 1069 1070 GrPoint center = GrPoint::Make(rect.centerX(), rect.centerY()); 1071 GrScalar radius = SkScalarHalf(rect.width()); 1072 1073 vm.mapPoints(¢er, 1); 1074 radius = vm.mapRadius(radius); 1075 1076 GrScalar outerRadius = radius; 1077 GrScalar innerRadius = 0; 1078 SkScalar halfWidth = 0; 1079 if (strokeWidth == 0) { 1080 halfWidth = SkScalarHalf(SK_Scalar1); 1081 1082 outerRadius += halfWidth; 1083 innerRadius = SkMaxScalar(0, radius - halfWidth); 1084 } 1085 1086 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 4, 0); 1087 if (!geo.succeeded()) { 1088 GrPrintf("Failed to get space for vertices!\n"); 1089 return; 1090 } 1091 1092 CircleVertex* verts = reinterpret_cast<CircleVertex*>(geo.vertices()); 1093 1094 // The fragment shader will extend the radius out half a pixel 1095 // to antialias. Expand the drawn rect here so all the pixels 1096 // will be captured. 1097 SkScalar L = center.fX - outerRadius - SkFloatToScalar(0.5f); 1098 SkScalar R = center.fX + outerRadius + SkFloatToScalar(0.5f); 1099 SkScalar T = center.fY - outerRadius - SkFloatToScalar(0.5f); 1100 SkScalar B = center.fY + outerRadius + SkFloatToScalar(0.5f); 1101 1102 verts[0].fPos = SkPoint::Make(L, T); 1103 verts[1].fPos = SkPoint::Make(R, T); 1104 verts[2].fPos = SkPoint::Make(L, B); 1105 verts[3].fPos = SkPoint::Make(R, B); 1106 1107 for (int i = 0; i < 4; ++i) { 1108 // this goes to fragment shader, it should be in y-points-up space. 1109 verts[i].fCenter = SkPoint::Make(center.fX, rt->height() - center.fY); 1110 1111 verts[i].fOuterRadius = outerRadius; 1112 verts[i].fInnerRadius = innerRadius; 1113 } 1114 1115 drawState->setVertexEdgeType(GrDrawState::kCircle_EdgeType); 1116 target->drawNonIndexed(kTriangleStrip_GrPrimitiveType, 0, 4); 1117} 1118 1119void GrContext::drawPath(const GrPaint& paint, const SkPath& path, 1120 GrPathFill fill, const GrPoint* translate) { 1121 1122 if (path.isEmpty()) { 1123 if (GrIsFillInverted(fill)) { 1124 this->drawPaint(paint); 1125 } 1126 return; 1127 } 1128 1129 SkRect ovalRect; 1130 if (!GrIsFillInverted(fill) && path.isOval(&ovalRect)) { 1131 if (translate) { 1132 ovalRect.offset(*translate); 1133 } 1134 SkScalar width = (fill == kHairLine_GrPathFill) ? 0 : -SK_Scalar1; 1135 this->drawOval(paint, ovalRect, width); 1136 return; 1137 } 1138 1139 internalDrawPath(paint, path, fill, translate); 1140} 1141 1142void GrContext::internalDrawPath(const GrPaint& paint, const SkPath& path, 1143 GrPathFill fill, const GrPoint* translate) { 1144 1145 // Note that below we may sw-rasterize the path into a scratch texture. 1146 // Scratch textures can be recycled after they are returned to the texture 1147 // cache. This presents a potential hazard for buffered drawing. However, 1148 // the writePixels that uploads to the scratch will perform a flush so we're 1149 // OK. 1150 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1151 kUnbuffered_DrawCategory; 1152 GrDrawTarget* target = this->prepareToDraw(paint, category); 1153 GrDrawState::AutoStageDisable atr(fDrawState); 1154 1155 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1156 1157 // An Assumption here is that path renderer would use some form of tweaking 1158 // the src color (either the input alpha or in the frag shader) to implement 1159 // aa. If we have some future driver-mojo path AA that can do the right 1160 // thing WRT to the blend then we'll need some query on the PR. 1161 if (disable_coverage_aa_for_blend(target)) { 1162#if GR_DEBUG 1163 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1164#endif 1165 prAA = false; 1166 } 1167 1168 GrPathRenderer* pr = this->getPathRenderer(path, fill, target, prAA, true); 1169 if (NULL == pr) { 1170#if GR_DEBUG 1171 GrPrintf("Unable to find path renderer compatible with path.\n"); 1172#endif 1173 return; 1174 } 1175 1176 pr->drawPath(path, fill, translate, target, prAA); 1177} 1178 1179//////////////////////////////////////////////////////////////////////////////// 1180 1181void GrContext::flush(int flagsBitfield) { 1182 if (kDiscard_FlushBit & flagsBitfield) { 1183 fDrawBuffer->reset(); 1184 } else { 1185 this->flushDrawBuffer(); 1186 } 1187 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1188 fGpu->forceRenderTargetFlush(); 1189 } 1190} 1191 1192void GrContext::flushDrawBuffer() { 1193 if (fDrawBuffer) { 1194 // With addition of the AA clip path, flushing the draw buffer can 1195 // result in the generation of an AA clip mask. During this 1196 // process the SW path renderer may be invoked which recusively 1197 // calls this method (via internalWriteTexturePixels) creating 1198 // infinite recursion 1199 GrInOrderDrawBuffer* temp = fDrawBuffer; 1200 fDrawBuffer = NULL; 1201 1202 temp->flushTo(fGpu); 1203 1204 fDrawBuffer = temp; 1205 } 1206} 1207 1208void GrContext::internalWriteTexturePixels(GrTexture* texture, 1209 int left, int top, 1210 int width, int height, 1211 GrPixelConfig config, 1212 const void* buffer, 1213 size_t rowBytes, 1214 uint32_t flags) { 1215 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1216 ASSERT_OWNED_RESOURCE(texture); 1217 1218 if (!(kDontFlush_PixelOpsFlag & flags)) { 1219 this->flush(); 1220 } 1221 // TODO: use scratch texture to perform conversion 1222 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1223 GrPixelConfigIsUnpremultiplied(config)) { 1224 return; 1225 } 1226 1227 fGpu->writeTexturePixels(texture, left, top, width, height, 1228 config, buffer, rowBytes); 1229} 1230 1231bool GrContext::internalReadTexturePixels(GrTexture* texture, 1232 int left, int top, 1233 int width, int height, 1234 GrPixelConfig config, 1235 void* buffer, 1236 size_t rowBytes, 1237 uint32_t flags) { 1238 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1239 ASSERT_OWNED_RESOURCE(texture); 1240 1241 // TODO: code read pixels for textures that aren't also rendertargets 1242 GrRenderTarget* target = texture->asRenderTarget(); 1243 if (NULL != target) { 1244 return this->internalReadRenderTargetPixels(target, 1245 left, top, width, height, 1246 config, buffer, rowBytes, 1247 flags); 1248 } else { 1249 return false; 1250 } 1251} 1252 1253#include "SkConfig8888.h" 1254 1255namespace { 1256/** 1257 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1258 * formats are representable as Config8888 and so the function returns false 1259 * if the GrPixelConfig has no equivalent Config8888. 1260 */ 1261bool grconfig_to_config8888(GrPixelConfig config, 1262 SkCanvas::Config8888* config8888) { 1263 switch (config) { 1264 case kRGBA_8888_PM_GrPixelConfig: 1265 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1266 return true; 1267 case kRGBA_8888_UPM_GrPixelConfig: 1268 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1269 return true; 1270 case kBGRA_8888_PM_GrPixelConfig: 1271 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1272 return true; 1273 case kBGRA_8888_UPM_GrPixelConfig: 1274 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1275 return true; 1276 default: 1277 return false; 1278 } 1279} 1280} 1281 1282bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1283 int left, int top, 1284 int width, int height, 1285 GrPixelConfig config, 1286 void* buffer, 1287 size_t rowBytes, 1288 uint32_t flags) { 1289 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1290 ASSERT_OWNED_RESOURCE(target); 1291 1292 if (NULL == target) { 1293 target = fDrawState->getRenderTarget(); 1294 if (NULL == target) { 1295 return false; 1296 } 1297 } 1298 1299 if (!(kDontFlush_PixelOpsFlag & flags)) { 1300 this->flush(); 1301 } 1302 1303 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1304 GrPixelConfigIsUnpremultiplied(config) && 1305 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1306 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1307 if (!grconfig_to_config8888(target->config(), &srcConfig8888) || 1308 !grconfig_to_config8888(config, &dstConfig8888)) { 1309 return false; 1310 } 1311 // do read back using target's own config 1312 this->internalReadRenderTargetPixels(target, 1313 left, top, 1314 width, height, 1315 target->config(), 1316 buffer, rowBytes, 1317 kDontFlush_PixelOpsFlag); 1318 // sw convert the pixels to unpremul config 1319 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer); 1320 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888, 1321 pixels, rowBytes, srcConfig8888, 1322 width, height); 1323 return true; 1324 } 1325 1326 GrTexture* src = target->asTexture(); 1327 bool swapRAndB = NULL != src && 1328 fGpu->preferredReadPixelsConfig(config) == 1329 GrPixelConfigSwapRAndB(config); 1330 1331 bool flipY = NULL != src && 1332 fGpu->readPixelsWillPayForYFlip(target, left, top, 1333 width, height, config, 1334 rowBytes); 1335 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1336 GrPixelConfigIsUnpremultiplied(config)); 1337 1338 if (NULL == src && alphaConversion) { 1339 // we should fallback to cpu conversion here. This could happen when 1340 // we were given an external render target by the client that is not 1341 // also a texture (e.g. FBO 0 in GL) 1342 return false; 1343 } 1344 // we draw to a scratch texture if any of these conversion are applied 1345 GrAutoScratchTexture ast; 1346 if (flipY || swapRAndB || alphaConversion) { 1347 GrAssert(NULL != src); 1348 if (swapRAndB) { 1349 config = GrPixelConfigSwapRAndB(config); 1350 GrAssert(kUnknown_GrPixelConfig != config); 1351 } 1352 // Make the scratch a render target because we don't have a robust 1353 // readTexturePixels as of yet (it calls this function). 1354 GrTextureDesc desc; 1355 desc.fFlags = kRenderTarget_GrTextureFlagBit; 1356 desc.fWidth = width; 1357 desc.fHeight = height; 1358 desc.fConfig = config; 1359 1360 // When a full readback is faster than a partial we could always make 1361 // the scratch exactly match the passed rect. However, if we see many 1362 // different size rectangles we will trash our texture cache and pay the 1363 // cost of creating and destroying many textures. So, we only request 1364 // an exact match when the caller is reading an entire RT. 1365 ScratchTexMatch match = kApprox_ScratchTexMatch; 1366 if (0 == left && 1367 0 == top && 1368 target->width() == width && 1369 target->height() == height && 1370 fGpu->fullReadPixelsIsFasterThanPartial()) { 1371 match = kExact_ScratchTexMatch; 1372 } 1373 ast.set(this, desc, match); 1374 GrTexture* texture = ast.texture(); 1375 if (!texture) { 1376 return false; 1377 } 1378 target = texture->asRenderTarget(); 1379 GrAssert(NULL != target); 1380 1381 GrDrawTarget::AutoStateRestore asr(fGpu, 1382 GrDrawTarget::kReset_ASRInit); 1383 GrDrawState* drawState = fGpu->drawState(); 1384 drawState->setRenderTarget(target); 1385 1386 GrMatrix matrix; 1387 if (flipY) { 1388 matrix.setTranslate(SK_Scalar1 * left, 1389 SK_Scalar1 * (top + height)); 1390 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1391 } else { 1392 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1393 } 1394 matrix.postIDiv(src->width(), src->height()); 1395 drawState->sampler(0)->reset(matrix); 1396 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1397 drawState->createTextureEffect(0, src); 1398 GrRect rect; 1399 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1400 fGpu->drawSimpleRect(rect, NULL); 1401 left = 0; 1402 top = 0; 1403 } 1404 return fGpu->readPixels(target, 1405 left, top, width, height, 1406 config, buffer, rowBytes, flipY); 1407} 1408 1409void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1410 GrAssert(target); 1411 ASSERT_OWNED_RESOURCE(target); 1412 // In the future we may track whether there are any pending draws to this 1413 // target. We don't today so we always perform a flush. We don't promise 1414 // this to our clients, though. 1415 this->flush(); 1416 fGpu->resolveRenderTarget(target); 1417} 1418 1419void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1420 if (NULL == src || NULL == dst) { 1421 return; 1422 } 1423 ASSERT_OWNED_RESOURCE(src); 1424 1425 // Writes pending to the source texture are not tracked, so a flush 1426 // is required to ensure that the copy captures the most recent contents 1427 // of the source texture. See similar behaviour in 1428 // GrContext::resolveRenderTarget. 1429 this->flush(); 1430 1431 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1432 GrDrawState* drawState = fGpu->drawState(); 1433 drawState->setRenderTarget(dst); 1434 GrMatrix sampleM; 1435 sampleM.setIDiv(src->width(), src->height()); 1436 drawState->sampler(0)->reset(sampleM); 1437 drawState->createTextureEffect(0, src); 1438 SkRect rect = SkRect::MakeXYWH(0, 0, 1439 SK_Scalar1 * src->width(), 1440 SK_Scalar1 * src->height()); 1441 fGpu->drawSimpleRect(rect, NULL); 1442} 1443 1444void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1445 int left, int top, 1446 int width, int height, 1447 GrPixelConfig config, 1448 const void* buffer, 1449 size_t rowBytes, 1450 uint32_t flags) { 1451 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1452 ASSERT_OWNED_RESOURCE(target); 1453 1454 if (NULL == target) { 1455 target = fDrawState->getRenderTarget(); 1456 if (NULL == target) { 1457 return; 1458 } 1459 } 1460 1461 // TODO: when underlying api has a direct way to do this we should use it 1462 // (e.g. glDrawPixels on desktop GL). 1463 1464 // If the RT is also a texture and we don't have to do PM/UPM conversion 1465 // then take the texture path, which we expect to be at least as fast or 1466 // faster since it doesn't use an intermediate texture as we do below. 1467 1468#if !GR_MAC_BUILD 1469 // At least some drivers on the Mac get confused when glTexImage2D is called 1470 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1471 // determine what OS versions and/or HW is affected. 1472 if (NULL != target->asTexture() && 1473 GrPixelConfigIsUnpremultiplied(target->config()) == 1474 GrPixelConfigIsUnpremultiplied(config)) { 1475 1476 this->internalWriteTexturePixels(target->asTexture(), 1477 left, top, width, height, 1478 config, buffer, rowBytes, flags); 1479 return; 1480 } 1481#endif 1482 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1483 GrPixelConfigIsUnpremultiplied(config) && 1484 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1485 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1486 if (!grconfig_to_config8888(config, &srcConfig8888) || 1487 !grconfig_to_config8888(target->config(), &dstConfig8888)) { 1488 return; 1489 } 1490 // allocate a tmp buffer and sw convert the pixels to premul 1491 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height); 1492 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1493 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1494 src, rowBytes, srcConfig8888, 1495 width, height); 1496 // upload the already premul pixels 1497 this->internalWriteRenderTargetPixels(target, 1498 left, top, 1499 width, height, 1500 target->config(), 1501 tmpPixels, 4 * width, flags); 1502 return; 1503 } 1504 1505 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1506 GrPixelConfigSwapRAndB(config); 1507 if (swapRAndB) { 1508 config = GrPixelConfigSwapRAndB(config); 1509 } 1510 1511 GrTextureDesc desc; 1512 desc.fWidth = width; 1513 desc.fHeight = height; 1514 desc.fConfig = config; 1515 1516 GrAutoScratchTexture ast(this, desc); 1517 GrTexture* texture = ast.texture(); 1518 if (NULL == texture) { 1519 return; 1520 } 1521 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1522 config, buffer, rowBytes, flags); 1523 1524 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1525 GrDrawState* drawState = fGpu->drawState(); 1526 1527 GrMatrix matrix; 1528 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1529 drawState->setViewMatrix(matrix); 1530 drawState->setRenderTarget(target); 1531 1532 matrix.setIDiv(texture->width(), texture->height()); 1533 drawState->sampler(0)->reset(matrix); 1534 drawState->createTextureEffect(0, texture); 1535 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1536 1537 static const GrVertexLayout layout = 0; 1538 static const int VCOUNT = 4; 1539 // TODO: Use GrGpu::drawRect here 1540 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1541 if (!geo.succeeded()) { 1542 GrPrintf("Failed to get space for vertices!\n"); 1543 return; 1544 } 1545 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1546 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, VCOUNT); 1547} 1548//////////////////////////////////////////////////////////////////////////////// 1549 1550void GrContext::setPaint(const GrPaint& paint) { 1551 GrAssert(fDrawState->stagesDisabled()); 1552 1553 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1554 int s = i + GrPaint::kFirstTextureStage; 1555 if (paint.isTextureStageEnabled(i)) { 1556 *fDrawState->sampler(s) = paint.getTextureSampler(i); 1557 } 1558 } 1559 1560 fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1561 1562 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1563 int s = i + GrPaint::kFirstMaskStage; 1564 if (paint.isMaskStageEnabled(i)) { 1565 *fDrawState->sampler(s) = paint.getMaskSampler(i); 1566 } 1567 } 1568 1569 // disable all stages not accessible via the paint 1570 for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) { 1571 fDrawState->disableStage(s); 1572 } 1573 1574 fDrawState->setColor(paint.fColor); 1575 1576 if (paint.fDither) { 1577 fDrawState->enableState(GrDrawState::kDither_StateBit); 1578 } else { 1579 fDrawState->disableState(GrDrawState::kDither_StateBit); 1580 } 1581 if (paint.fAntiAlias) { 1582 fDrawState->enableState(GrDrawState::kHWAntialias_StateBit); 1583 } else { 1584 fDrawState->disableState(GrDrawState::kHWAntialias_StateBit); 1585 } 1586 if (paint.fColorMatrixEnabled) { 1587 fDrawState->enableState(GrDrawState::kColorMatrix_StateBit); 1588 fDrawState->setColorMatrix(paint.fColorMatrix); 1589 } else { 1590 fDrawState->disableState(GrDrawState::kColorMatrix_StateBit); 1591 } 1592 fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1593 fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1594 fDrawState->setCoverage(paint.fCoverage); 1595#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1596 if ((paint.hasMask() || 0xff != paint.fCoverage) && 1597 !fGpu->canApplyCoverage()) { 1598 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1599 } 1600#endif 1601} 1602 1603GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1604 DrawCategory category) { 1605 if (category != fLastDrawCategory) { 1606 this->flushDrawBuffer(); 1607 fLastDrawCategory = category; 1608 } 1609 this->setPaint(paint); 1610 GrDrawTarget* target = fGpu; 1611 switch (category) { 1612 case kUnbuffered_DrawCategory: 1613 target = fGpu; 1614 break; 1615 case kBuffered_DrawCategory: 1616 target = fDrawBuffer; 1617 fDrawBuffer->setClip(fGpu->getClip()); 1618 break; 1619 default: 1620 GrCrash("Unexpected DrawCategory."); 1621 break; 1622 } 1623 return target; 1624} 1625 1626/* 1627 * This method finds a path renderer that can draw the specified path on 1628 * the provided target. 1629 * Due to its expense, the software path renderer has split out so it can 1630 * can be individually allowed/disallowed via the "allowSW" boolean. 1631 */ 1632GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1633 GrPathFill fill, 1634 const GrDrawTarget* target, 1635 bool antiAlias, 1636 bool allowSW) { 1637 if (NULL == fPathRendererChain) { 1638 fPathRendererChain = 1639 SkNEW_ARGS(GrPathRendererChain, 1640 (this, GrPathRendererChain::kNone_UsageFlag)); 1641 } 1642 1643 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, fill, 1644 target, 1645 antiAlias); 1646 1647 if (NULL == pr && allowSW) { 1648 if (NULL == fSoftwarePathRenderer) { 1649 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1650 } 1651 1652 pr = fSoftwarePathRenderer; 1653 } 1654 1655 return pr; 1656} 1657 1658//////////////////////////////////////////////////////////////////////////////// 1659 1660void GrContext::setRenderTarget(GrRenderTarget* target) { 1661 ASSERT_OWNED_RESOURCE(target); 1662 if (fDrawState->getRenderTarget() != target) { 1663 this->flush(false); 1664 fDrawState->setRenderTarget(target); 1665 } 1666} 1667 1668GrRenderTarget* GrContext::getRenderTarget() { 1669 return fDrawState->getRenderTarget(); 1670} 1671 1672const GrRenderTarget* GrContext::getRenderTarget() const { 1673 return fDrawState->getRenderTarget(); 1674} 1675 1676bool GrContext::isConfigRenderable(GrPixelConfig config) const { 1677 return fGpu->isConfigRenderable(config); 1678} 1679 1680const GrMatrix& GrContext::getMatrix() const { 1681 return fDrawState->getViewMatrix(); 1682} 1683 1684void GrContext::setMatrix(const GrMatrix& m) { 1685 fDrawState->setViewMatrix(m); 1686} 1687 1688void GrContext::concatMatrix(const GrMatrix& m) const { 1689 fDrawState->preConcatViewMatrix(m); 1690} 1691 1692static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 1693 intptr_t mask = 1 << shift; 1694 if (pred) { 1695 bits |= mask; 1696 } else { 1697 bits &= ~mask; 1698 } 1699 return bits; 1700} 1701 1702GrContext::GrContext(GrGpu* gpu) { 1703 ++THREAD_INSTANCE_COUNT; 1704 1705 fGpu = gpu; 1706 fGpu->ref(); 1707 fGpu->setContext(this); 1708 1709 fDrawState = SkNEW(GrDrawState); 1710 fGpu->setDrawState(fDrawState); 1711 1712 fPathRendererChain = NULL; 1713 fSoftwarePathRenderer = NULL; 1714 1715 fTextureCache = SkNEW_ARGS(GrResourceCache, 1716 (MAX_TEXTURE_CACHE_COUNT, 1717 MAX_TEXTURE_CACHE_BYTES)); 1718 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); 1719 1720 fLastDrawCategory = kUnbuffered_DrawCategory; 1721 1722 fDrawBuffer = NULL; 1723 fDrawBufferVBAllocPool = NULL; 1724 fDrawBufferIBAllocPool = NULL; 1725 1726 fAARectRenderer = SkNEW(GrAARectRenderer); 1727 1728 this->setupDrawBuffer(); 1729} 1730 1731void GrContext::setupDrawBuffer() { 1732 1733 GrAssert(NULL == fDrawBuffer); 1734 GrAssert(NULL == fDrawBufferVBAllocPool); 1735 GrAssert(NULL == fDrawBufferIBAllocPool); 1736 1737#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT || DEFER_PATHS 1738 fDrawBufferVBAllocPool = 1739 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1740 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1741 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1742 fDrawBufferIBAllocPool = 1743 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1744 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1745 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1746 1747 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, 1748 fDrawBufferVBAllocPool, 1749 fDrawBufferIBAllocPool)); 1750#endif 1751 1752#if BATCH_RECT_TO_RECT 1753 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 1754#endif 1755 if (fDrawBuffer) { 1756 fDrawBuffer->setAutoFlushTarget(fGpu); 1757 fDrawBuffer->setDrawState(fDrawState); 1758 } 1759} 1760 1761GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 1762#if DEFER_TEXT_RENDERING 1763 return prepareToDraw(paint, kBuffered_DrawCategory); 1764#else 1765 return prepareToDraw(paint, kUnbuffered_DrawCategory); 1766#endif 1767} 1768 1769const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1770 return fGpu->getQuadIndexBuffer(); 1771} 1772 1773GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 1774 bool canClobberSrc, 1775 const SkRect& rect, 1776 float sigmaX, float sigmaY) { 1777 ASSERT_OWNED_RESOURCE(srcTexture); 1778 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1779 AutoMatrix avm(this, GrMatrix::I()); 1780 SkIRect clearRect; 1781 int scaleFactorX, radiusX; 1782 int scaleFactorY, radiusY; 1783 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX); 1784 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY); 1785 1786 SkRect srcRect(rect); 1787 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 1788 srcRect.roundOut(); 1789 scale_rect(&srcRect, static_cast<float>(scaleFactorX), 1790 static_cast<float>(scaleFactorY)); 1791 1792 AutoClip acs(this, srcRect); 1793 1794 GrAssert(kBGRA_8888_PM_GrPixelConfig == srcTexture->config() || 1795 kRGBA_8888_PM_GrPixelConfig == srcTexture->config() || 1796 kAlpha_8_GrPixelConfig == srcTexture->config()); 1797 1798 GrTextureDesc desc; 1799 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; 1800 desc.fWidth = SkScalarFloorToInt(srcRect.width()); 1801 desc.fHeight = SkScalarFloorToInt(srcRect.height()); 1802 desc.fConfig = srcTexture->config(); 1803 1804 GrAutoScratchTexture temp1, temp2; 1805 GrTexture* dstTexture = temp1.set(this, desc); 1806 GrTexture* tempTexture = canClobberSrc ? srcTexture : temp2.set(this, desc); 1807 1808 GrPaint paint; 1809 paint.reset(); 1810 paint.textureSampler(0)->textureParams()->setBilerp(true); 1811 1812 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 1813 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1814 srcTexture->height()); 1815 this->setRenderTarget(dstTexture->asRenderTarget()); 1816 SkRect dstRect(srcRect); 1817 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 1818 i < scaleFactorY ? 0.5f : 1.0f); 1819 paint.textureSampler(0)->setCustomStage(SkNEW_ARGS(GrSingleTextureEffect, 1820 (srcTexture)))->unref(); 1821 this->drawRectToRect(paint, dstRect, srcRect); 1822 srcRect = dstRect; 1823 srcTexture = dstTexture; 1824 SkTSwap(dstTexture, tempTexture); 1825 } 1826 1827 SkIRect srcIRect; 1828 srcRect.roundOut(&srcIRect); 1829 1830 if (sigmaX > 0.0f) { 1831 if (scaleFactorX > 1) { 1832 // Clear out a radius to the right of the srcRect to prevent the 1833 // X convolution from reading garbage. 1834 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1835 radiusX, srcIRect.height()); 1836 this->clear(&clearRect, 0x0); 1837 } 1838 1839 this->setRenderTarget(dstTexture->asRenderTarget()); 1840 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaX, radiusX, 1841 Gr1DKernelEffect::kX_Direction); 1842 srcTexture = dstTexture; 1843 SkTSwap(dstTexture, tempTexture); 1844 } 1845 1846 if (sigmaY > 0.0f) { 1847 if (scaleFactorY > 1 || sigmaX > 0.0f) { 1848 // Clear out a radius below the srcRect to prevent the Y 1849 // convolution from reading garbage. 1850 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1851 srcIRect.width(), radiusY); 1852 this->clear(&clearRect, 0x0); 1853 } 1854 1855 this->setRenderTarget(dstTexture->asRenderTarget()); 1856 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaY, radiusY, 1857 Gr1DKernelEffect::kY_Direction); 1858 srcTexture = dstTexture; 1859 SkTSwap(dstTexture, tempTexture); 1860 } 1861 1862 if (scaleFactorX > 1 || scaleFactorY > 1) { 1863 // Clear one pixel to the right and below, to accommodate bilinear 1864 // upsampling. 1865 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1866 srcIRect.width() + 1, 1); 1867 this->clear(&clearRect, 0x0); 1868 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1869 1, srcIRect.height()); 1870 this->clear(&clearRect, 0x0); 1871 // FIXME: This should be mitchell, not bilinear. 1872 paint.textureSampler(0)->textureParams()->setBilerp(true); 1873 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1874 srcTexture->height()); 1875 this->setRenderTarget(dstTexture->asRenderTarget()); 1876 paint.textureSampler(0)->setCustomStage(SkNEW_ARGS(GrSingleTextureEffect, 1877 (srcTexture)))->unref(); 1878 SkRect dstRect(srcRect); 1879 scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY); 1880 this->drawRectToRect(paint, dstRect, srcRect); 1881 srcRect = dstRect; 1882 srcTexture = dstTexture; 1883 SkTSwap(dstTexture, tempTexture); 1884 } 1885 this->setRenderTarget(oldRenderTarget); 1886 if (srcTexture == temp1.texture()) { 1887 return temp1.detach(); 1888 } else if (srcTexture == temp2.texture()) { 1889 return temp2.detach(); 1890 } else { 1891 srcTexture->ref(); 1892 return srcTexture; 1893 } 1894} 1895 1896GrTexture* GrContext::applyMorphology(GrTexture* srcTexture, 1897 const GrRect& rect, 1898 MorphologyType morphType, 1899 SkISize radius) { 1900 ASSERT_OWNED_RESOURCE(srcTexture); 1901 srcTexture->ref(); 1902 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1903 1904 AutoMatrix avm(this, GrMatrix::I()); 1905 1906 AutoClip acs(this, GrRect::MakeWH(SkIntToScalar(srcTexture->width()), 1907 SkIntToScalar(srcTexture->height()))); 1908 GrTextureDesc desc; 1909 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; 1910 desc.fWidth = SkScalarCeilToInt(rect.width()); 1911 desc.fHeight = SkScalarCeilToInt(rect.height()); 1912 desc.fConfig = kRGBA_8888_PM_GrPixelConfig; 1913 if (radius.fWidth > 0) { 1914 GrAutoScratchTexture ast(this, desc); 1915 this->setRenderTarget(ast.texture()->asRenderTarget()); 1916 apply_morphology(fGpu, srcTexture, rect, radius.fWidth, morphType, 1917 Gr1DKernelEffect::kX_Direction); 1918 SkIRect clearRect = SkIRect::MakeXYWH( 1919 SkScalarFloorToInt(rect.fLeft), 1920 SkScalarFloorToInt(rect.fBottom), 1921 SkScalarFloorToInt(rect.width()), 1922 radius.fHeight); 1923 this->clear(&clearRect, 0x0); 1924 srcTexture->unref(); 1925 srcTexture = ast.detach(); 1926 } 1927 if (radius.fHeight > 0) { 1928 GrAutoScratchTexture ast(this, desc); 1929 this->setRenderTarget(ast.texture()->asRenderTarget()); 1930 apply_morphology(fGpu, srcTexture, rect, radius.fHeight, morphType, 1931 Gr1DKernelEffect::kY_Direction); 1932 srcTexture->unref(); 1933 srcTexture = ast.detach(); 1934 } 1935 this->setRenderTarget(oldRenderTarget); 1936 return srcTexture; 1937} 1938 1939/////////////////////////////////////////////////////////////////////////////// 1940