GrContext.cpp revision 1e95d715d06c5125ef6e5439e953fd0353be92b2
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "effects/GrMorphologyEffect.h" 13#include "effects/GrConvolutionEffect.h" 14 15#include "GrBufferAllocPool.h" 16#include "GrClipIterator.h" 17#include "GrGpu.h" 18#include "GrIndexBuffer.h" 19#include "GrInOrderDrawBuffer.h" 20#include "GrPathRenderer.h" 21#include "GrPathUtils.h" 22#include "GrResourceCache.h" 23#include "GrSoftwarePathRenderer.h" 24#include "GrStencilBuffer.h" 25#include "GrTextStrike.h" 26#include "SkTLazy.h" 27#include "SkTLS.h" 28#include "SkTrace.h" 29 30SK_DEFINE_INST_COUNT(GrContext) 31SK_DEFINE_INST_COUNT(GrDrawState) 32 33#define DEFER_TEXT_RENDERING 1 34 35#define DEFER_PATHS 1 36 37#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 38 39#define MAX_BLUR_SIGMA 4.0f 40 41// When we're using coverage AA but the blend is incompatible (given gpu 42// limitations) should we disable AA or draw wrong? 43#define DISABLE_COVERAGE_AA_FOR_BLEND 1 44 45#if GR_DEBUG 46 // change this to a 1 to see notifications when partial coverage fails 47 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 48#else 49 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 50#endif 51 52static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 53static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 54 55static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 56static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 57 58// path rendering is the only thing we defer today that uses non-static indices 59static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = DEFER_PATHS ? 1 << 11 : 0; 60static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = DEFER_PATHS ? 4 : 0; 61 62#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 63 64GrContext* GrContext::Create(GrEngine engine, 65 GrPlatform3DContext context3D) { 66 GrContext* ctx = NULL; 67 GrGpu* fGpu = GrGpu::Create(engine, context3D); 68 if (NULL != fGpu) { 69 ctx = SkNEW_ARGS(GrContext, (fGpu)); 70 fGpu->unref(); 71 } 72 return ctx; 73} 74 75namespace { 76void* CreateThreadInstanceCount() { 77 return SkNEW_ARGS(int, (0)); 78} 79void DeleteThreadInstanceCount(void* v) { 80 delete reinterpret_cast<int*>(v); 81} 82#define THREAD_INSTANCE_COUNT \ 83 (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, \ 84 DeleteThreadInstanceCount))) 85 86} 87 88int GrContext::GetThreadInstanceCount() { 89 return THREAD_INSTANCE_COUNT; 90} 91 92GrContext::~GrContext() { 93 this->flush(); 94 95 // Since the gpu can hold scratch textures, give it a chance to let go 96 // of them before freeing the texture cache 97 fGpu->purgeResources(); 98 99 delete fTextureCache; 100 delete fFontCache; 101 delete fDrawBuffer; 102 delete fDrawBufferVBAllocPool; 103 delete fDrawBufferIBAllocPool; 104 105 fAARectRenderer->unref(); 106 107 fGpu->unref(); 108 GrSafeUnref(fPathRendererChain); 109 GrSafeUnref(fSoftwarePathRenderer); 110 fDrawState->unref(); 111 112 --THREAD_INSTANCE_COUNT; 113} 114 115void GrContext::contextLost() { 116 contextDestroyed(); 117 this->setupDrawBuffer(); 118} 119 120void GrContext::contextDestroyed() { 121 // abandon first to so destructors 122 // don't try to free the resources in the API. 123 fGpu->abandonResources(); 124 125 // a path renderer may be holding onto resources that 126 // are now unusable 127 GrSafeSetNull(fPathRendererChain); 128 GrSafeSetNull(fSoftwarePathRenderer); 129 130 delete fDrawBuffer; 131 fDrawBuffer = NULL; 132 133 delete fDrawBufferVBAllocPool; 134 fDrawBufferVBAllocPool = NULL; 135 136 delete fDrawBufferIBAllocPool; 137 fDrawBufferIBAllocPool = NULL; 138 139 fAARectRenderer->reset(); 140 141 fTextureCache->removeAll(); 142 fFontCache->freeAll(); 143 fGpu->markContextDirty(); 144} 145 146void GrContext::resetContext() { 147 fGpu->markContextDirty(); 148} 149 150void GrContext::freeGpuResources() { 151 this->flush(); 152 153 fGpu->purgeResources(); 154 155 fAARectRenderer->reset(); 156 157 fTextureCache->removeAll(); 158 fFontCache->freeAll(); 159 // a path renderer may be holding onto resources 160 GrSafeSetNull(fPathRendererChain); 161 GrSafeSetNull(fSoftwarePathRenderer); 162} 163 164size_t GrContext::getGpuTextureCacheBytes() const { 165 return fTextureCache->getCachedResourceBytes(); 166} 167 168//////////////////////////////////////////////////////////////////////////////// 169 170int GrContext::PaintStageVertexLayoutBits( 171 const GrPaint& paint, 172 const bool hasTexCoords[GrPaint::kTotalStages]) { 173 int stageMask = paint.getActiveStageMask(); 174 int layout = 0; 175 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 176 if ((1 << i) & stageMask) { 177 if (NULL != hasTexCoords && hasTexCoords[i]) { 178 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 179 } 180 } 181 } 182 return layout; 183} 184 185 186//////////////////////////////////////////////////////////////////////////////// 187 188GrTexture* GrContext::TextureCacheEntry::texture() const { 189 if (NULL == fEntry) { 190 return NULL; 191 } else { 192 return (GrTexture*) fEntry->resource(); 193 } 194} 195 196namespace { 197 198// we should never have more than one stencil buffer with same combo of 199// (width,height,samplecount) 200void gen_stencil_key_values(int width, int height, 201 int sampleCnt, uint32_t v[4]) { 202 v[0] = width; 203 v[1] = height; 204 v[2] = sampleCnt; 205 v[3] = GrResourceKey::kStencilBuffer_TypeBit; 206} 207 208void gen_stencil_key_values(const GrStencilBuffer* sb, 209 uint32_t v[4]) { 210 gen_stencil_key_values(sb->width(), sb->height(), 211 sb->numSamples(), v); 212} 213 214void scale_rect(SkRect* rect, float xScale, float yScale) { 215 rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale)); 216 rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale)); 217 rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale)); 218 rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale)); 219} 220 221float adjust_sigma(float sigma, int *scaleFactor, int *radius) { 222 *scaleFactor = 1; 223 while (sigma > MAX_BLUR_SIGMA) { 224 *scaleFactor *= 2; 225 sigma *= 0.5f; 226 } 227 *radius = static_cast<int>(ceilf(sigma * 3.0f)); 228 GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius); 229 return sigma; 230} 231 232void apply_morphology(GrGpu* gpu, 233 GrTexture* texture, 234 const SkRect& rect, 235 int radius, 236 GrContext::MorphologyType morphType, 237 Gr1DKernelEffect::Direction direction) { 238 239 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 240 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 241 GrDrawState* drawState = gpu->drawState(); 242 drawState->setRenderTarget(target); 243 GrMatrix sampleM; 244 sampleM.setIDiv(texture->width(), texture->height()); 245 drawState->sampler(0)->reset(sampleM); 246 SkAutoTUnref<GrCustomStage> morph( 247 SkNEW_ARGS(GrMorphologyEffect, (texture, direction, radius, morphType))); 248 drawState->sampler(0)->setCustomStage(morph); 249 gpu->drawSimpleRect(rect, NULL, 1 << 0); 250} 251 252void convolve_gaussian(GrGpu* gpu, 253 GrTexture* texture, 254 const SkRect& rect, 255 float sigma, 256 int radius, 257 Gr1DKernelEffect::Direction direction) { 258 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 259 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 260 GrDrawState* drawState = gpu->drawState(); 261 drawState->setRenderTarget(target); 262 GrMatrix sampleM; 263 sampleM.setIDiv(texture->width(), texture->height()); 264 drawState->sampler(0)->reset(sampleM); 265 SkAutoTUnref<GrConvolutionEffect> conv(SkNEW_ARGS(GrConvolutionEffect, 266 (texture, direction, radius, 267 sigma))); 268 drawState->sampler(0)->setCustomStage(conv); 269 gpu->drawSimpleRect(rect, NULL, 1 << 0); 270} 271 272} 273 274GrContext::TextureCacheEntry GrContext::findAndLockTexture( 275 const GrTextureDesc& desc, 276 const GrSamplerState* sampler) { 277 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, desc, false); 278 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 279 GrResourceCache::kNested_LockType)); 280} 281 282bool GrContext::isTextureInCache(const GrTextureDesc& desc, 283 const GrSamplerState* sampler) const { 284 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, desc, false); 285 return fTextureCache->hasKey(resourceKey); 286} 287 288GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 289 ASSERT_OWNED_RESOURCE(sb); 290 uint32_t v[4]; 291 gen_stencil_key_values(sb, v); 292 GrResourceKey resourceKey(v); 293 return fTextureCache->createAndLock(resourceKey, sb); 294} 295 296GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 297 int sampleCnt) { 298 uint32_t v[4]; 299 gen_stencil_key_values(width, height, sampleCnt, v); 300 GrResourceKey resourceKey(v); 301 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 302 GrResourceCache::kSingle_LockType); 303 if (NULL != entry) { 304 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 305 return sb; 306 } else { 307 return NULL; 308 } 309} 310 311void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 312 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 313 fTextureCache->unlock(sbEntry); 314} 315 316static void stretchImage(void* dst, 317 int dstW, 318 int dstH, 319 void* src, 320 int srcW, 321 int srcH, 322 int bpp) { 323 GrFixed dx = (srcW << 16) / dstW; 324 GrFixed dy = (srcH << 16) / dstH; 325 326 GrFixed y = dy >> 1; 327 328 int dstXLimit = dstW*bpp; 329 for (int j = 0; j < dstH; ++j) { 330 GrFixed x = dx >> 1; 331 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 332 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 333 for (int i = 0; i < dstXLimit; i += bpp) { 334 memcpy((uint8_t*) dstRow + i, 335 (uint8_t*) srcRow + (x>>16)*bpp, 336 bpp); 337 x += dx; 338 } 339 y += dy; 340 } 341} 342 343GrContext::TextureCacheEntry GrContext::createAndLockTexture( 344 const GrSamplerState* sampler, 345 const GrTextureDesc& desc, 346 void* srcData, 347 size_t rowBytes) { 348 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 349 350#if GR_DUMP_TEXTURE_UPLOAD 351 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 352#endif 353 354 TextureCacheEntry entry; 355 356 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, 357 desc, false); 358 359 if (GrTexture::NeedsResizing(resourceKey)) { 360 // The desired texture is NPOT and tiled but that isn't supported by 361 // the current hardware. Resize the texture to be a POT 362 GrAssert(NULL != sampler); 363 TextureCacheEntry clampEntry = this->findAndLockTexture(desc, 364 NULL); 365 366 if (NULL == clampEntry.texture()) { 367 clampEntry = this->createAndLockTexture(NULL, desc, 368 srcData, rowBytes); 369 GrAssert(NULL != clampEntry.texture()); 370 if (NULL == clampEntry.texture()) { 371 return entry; 372 } 373 } 374 GrTextureDesc rtDesc = desc; 375 rtDesc.fFlags = rtDesc.fFlags | 376 kRenderTarget_GrTextureFlagBit | 377 kNoStencil_GrTextureFlagBit; 378 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 379 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 380 381 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 382 383 if (NULL != texture) { 384 GrDrawTarget::AutoStateRestore asr(fGpu, 385 GrDrawTarget::kReset_ASRInit); 386 GrDrawState* drawState = fGpu->drawState(); 387 drawState->setRenderTarget(texture->asRenderTarget()); 388 drawState->setTexture(0, clampEntry.texture()); 389 390 GrSamplerState::Filter filter; 391 // if filtering is not desired then we want to ensure all 392 // texels in the resampled image are copies of texels from 393 // the original. 394 if (GrTexture::NeedsFiltering(resourceKey)) { 395 filter = GrSamplerState::kBilinear_Filter; 396 } else { 397 filter = GrSamplerState::kNearest_Filter; 398 } 399 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 400 filter); 401 402 static const GrVertexLayout layout = 403 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 404 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 405 406 if (arg.succeeded()) { 407 GrPoint* verts = (GrPoint*) arg.vertices(); 408 verts[0].setIRectFan(0, 0, 409 texture->width(), 410 texture->height(), 411 2*sizeof(GrPoint)); 412 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 413 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 414 0, 4); 415 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 416 } 417 texture->releaseRenderTarget(); 418 } else { 419 // TODO: Our CPU stretch doesn't filter. But we create separate 420 // stretched textures when the sampler state is either filtered or 421 // not. Either implement filtered stretch blit on CPU or just create 422 // one when FBO case fails. 423 424 rtDesc.fFlags = kNone_GrTextureFlags; 425 // no longer need to clamp at min RT size. 426 rtDesc.fWidth = GrNextPow2(desc.fWidth); 427 rtDesc.fHeight = GrNextPow2(desc.fHeight); 428 int bpp = GrBytesPerPixel(desc.fConfig); 429 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 430 rtDesc.fWidth * 431 rtDesc.fHeight); 432 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 433 srcData, desc.fWidth, desc.fHeight, bpp); 434 435 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 436 437 GrTexture* texture = fGpu->createTexture(rtDesc, 438 stretchedPixels.get(), 439 stretchedRowBytes); 440 GrAssert(NULL != texture); 441 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 442 } 443 fTextureCache->unlock(clampEntry.cacheEntry()); 444 445 } else { 446 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 447 if (NULL != texture) { 448 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 449 } 450 } 451 return entry; 452} 453 454GrContext::TextureCacheEntry GrContext::lockScratchTexture( 455 const GrTextureDesc& inDesc, 456 ScratchTexMatch match) { 457 GrTextureDesc desc = inDesc; 458 desc.fClientCacheID = kScratch_CacheID; 459 460 if (kExact_ScratchTexMatch != match) { 461 // bin by pow2 with a reasonable min 462 static const int MIN_SIZE = 256; 463 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 464 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 465 } 466 467 GrResourceEntry* entry; 468 int origWidth = desc.fWidth; 469 int origHeight = desc.fHeight; 470 bool doubledW = false; 471 bool doubledH = false; 472 473 do { 474 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, true); 475 entry = fTextureCache->findAndLock(key, 476 GrResourceCache::kNested_LockType); 477 // if we miss, relax the fit of the flags... 478 // then try doubling width... then height. 479 if (NULL != entry || kExact_ScratchTexMatch == match) { 480 break; 481 } 482 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 483 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 484 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 485 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 486 } else if (!doubledW) { 487 desc.fFlags = inDesc.fFlags; 488 desc.fWidth *= 2; 489 doubledW = true; 490 } else if (!doubledH) { 491 desc.fFlags = inDesc.fFlags; 492 desc.fWidth = origWidth; 493 desc.fHeight *= 2; 494 doubledH = true; 495 } else { 496 break; 497 } 498 499 } while (true); 500 501 if (NULL == entry) { 502 desc.fFlags = inDesc.fFlags; 503 desc.fWidth = origWidth; 504 desc.fHeight = origHeight; 505 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 506 if (NULL != texture) { 507 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, 508 texture->desc(), 509 true); 510 entry = fTextureCache->createAndLock(key, texture); 511 } 512 } 513 514 // If the caller gives us the same desc/sampler twice we don't want 515 // to return the same texture the second time (unless it was previously 516 // released). So we detach the entry from the cache and reattach at release. 517 if (NULL != entry) { 518 fTextureCache->detach(entry); 519 } 520 return TextureCacheEntry(entry); 521} 522 523void GrContext::addExistingTextureToCache(GrTexture* texture) { 524 525 if (NULL == texture) { 526 return; 527 } 528 529 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, 530 texture->desc(), 531 true); 532 fTextureCache->attach(key, texture); 533} 534 535void GrContext::unlockTexture(TextureCacheEntry entry) { 536 ASSERT_OWNED_RESOURCE(entry.texture()); 537 // If this is a scratch texture we detached it from the cache 538 // while it was locked (to avoid two callers simultaneously getting 539 // the same texture). 540 if (GrTexture::IsScratchTexture(entry.cacheEntry()->key())) { 541 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 542 } else { 543 fTextureCache->unlock(entry.cacheEntry()); 544 } 545} 546 547void GrContext::freeEntry(TextureCacheEntry entry) { 548 ASSERT_OWNED_RESOURCE(entry.texture()); 549 550 fTextureCache->freeEntry(entry.cacheEntry()); 551} 552 553GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, 554 void* srcData, 555 size_t rowBytes) { 556 GrTextureDesc descCopy = descIn; 557 descCopy.fClientCacheID = kUncached_CacheID; 558 return fGpu->createTexture(descCopy, srcData, rowBytes); 559} 560 561void GrContext::getTextureCacheLimits(int* maxTextures, 562 size_t* maxTextureBytes) const { 563 fTextureCache->getLimits(maxTextures, maxTextureBytes); 564} 565 566void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 567 fTextureCache->setLimits(maxTextures, maxTextureBytes); 568} 569 570int GrContext::getMaxTextureSize() const { 571 return fGpu->getCaps().fMaxTextureSize; 572} 573 574int GrContext::getMaxRenderTargetSize() const { 575 return fGpu->getCaps().fMaxRenderTargetSize; 576} 577 578/////////////////////////////////////////////////////////////////////////////// 579 580GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 581 return fGpu->createPlatformTexture(desc); 582} 583 584GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 585 return fGpu->createPlatformRenderTarget(desc); 586} 587 588/////////////////////////////////////////////////////////////////////////////// 589 590bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 591 int width, int height) const { 592 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 593 if (!caps.f8BitPaletteSupport) { 594 return false; 595 } 596 597 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 598 599 if (!isPow2) { 600 bool tiled = NULL != sampler && 601 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 602 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 603 if (tiled && !caps.fNPOTTextureTileSupport) { 604 return false; 605 } 606 } 607 return true; 608} 609 610//////////////////////////////////////////////////////////////////////////////// 611 612const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 613 614void GrContext::setClip(const GrClip& clip) { 615 fGpu->setClip(clip); 616 fDrawState->enableState(GrDrawState::kClip_StateBit); 617} 618 619//////////////////////////////////////////////////////////////////////////////// 620 621void GrContext::clear(const GrIRect* rect, 622 const GrColor color, 623 GrRenderTarget* target) { 624 this->flush(); 625 fGpu->clear(rect, color, target); 626} 627 628void GrContext::drawPaint(const GrPaint& paint) { 629 // set rect to be big enough to fill the space, but not super-huge, so we 630 // don't overflow fixed-point implementations 631 GrRect r; 632 r.setLTRB(0, 0, 633 GrIntToScalar(getRenderTarget()->width()), 634 GrIntToScalar(getRenderTarget()->height())); 635 GrMatrix inverse; 636 SkTLazy<GrPaint> tmpPaint; 637 const GrPaint* p = &paint; 638 AutoMatrix am; 639 640 // We attempt to map r by the inverse matrix and draw that. mapRect will 641 // map the four corners and bound them with a new rect. This will not 642 // produce a correct result for some perspective matrices. 643 if (!this->getMatrix().hasPerspective()) { 644 if (!fDrawState->getViewInverse(&inverse)) { 645 GrPrintf("Could not invert matrix"); 646 return; 647 } 648 inverse.mapRect(&r); 649 } else { 650 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 651 if (!fDrawState->getViewInverse(&inverse)) { 652 GrPrintf("Could not invert matrix"); 653 return; 654 } 655 tmpPaint.set(paint); 656 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 657 p = tmpPaint.get(); 658 } 659 am.set(this, GrMatrix::I()); 660 } 661 // by definition this fills the entire clip, no need for AA 662 if (paint.fAntiAlias) { 663 if (!tmpPaint.isValid()) { 664 tmpPaint.set(paint); 665 p = tmpPaint.get(); 666 } 667 GrAssert(p == tmpPaint.get()); 668 tmpPaint.get()->fAntiAlias = false; 669 } 670 this->drawRect(*p, r); 671} 672 673//////////////////////////////////////////////////////////////////////////////// 674 675namespace { 676inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 677 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 678} 679} 680 681//////////////////////////////////////////////////////////////////////////////// 682 683/* create a triangle strip that strokes the specified triangle. There are 8 684 unique vertices, but we repreat the last 2 to close up. Alternatively we 685 could use an indices array, and then only send 8 verts, but not sure that 686 would be faster. 687 */ 688static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 689 GrScalar width) { 690 const GrScalar rad = GrScalarHalf(width); 691 rect.sort(); 692 693 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 694 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 695 verts[2].set(rect.fRight - rad, rect.fTop + rad); 696 verts[3].set(rect.fRight + rad, rect.fTop - rad); 697 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 698 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 699 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 700 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 701 verts[8] = verts[0]; 702 verts[9] = verts[1]; 703} 704 705/** 706 * Returns true if the rects edges are integer-aligned. 707 */ 708static bool isIRect(const GrRect& r) { 709 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 710 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 711} 712 713static bool apply_aa_to_rect(GrDrawTarget* target, 714 const GrRect& rect, 715 GrScalar width, 716 const GrMatrix* matrix, 717 GrMatrix* combinedMatrix, 718 GrRect* devRect, 719 bool* useVertexCoverage) { 720 // we use a simple coverage ramp to do aa on axis-aligned rects 721 // we check if the rect will be axis-aligned, and the rect won't land on 722 // integer coords. 723 724 // we are keeping around the "tweak the alpha" trick because 725 // it is our only hope for the fixed-pipe implementation. 726 // In a shader implementation we can give a separate coverage input 727 // TODO: remove this ugliness when we drop the fixed-pipe impl 728 *useVertexCoverage = false; 729 if (!target->canTweakAlphaForCoverage()) { 730 if (disable_coverage_aa_for_blend(target)) { 731#if GR_DEBUG 732 //GrPrintf("Turning off AA to correctly apply blend.\n"); 733#endif 734 return false; 735 } else { 736 *useVertexCoverage = true; 737 } 738 } 739 const GrDrawState& drawState = target->getDrawState(); 740 if (drawState.getRenderTarget()->isMultisampled()) { 741 return false; 742 } 743 744 if (0 == width && target->willUseHWAALines()) { 745 return false; 746 } 747 748 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 749 return false; 750 } 751 752 if (NULL != matrix && 753 !matrix->preservesAxisAlignment()) { 754 return false; 755 } 756 757 *combinedMatrix = drawState.getViewMatrix(); 758 if (NULL != matrix) { 759 combinedMatrix->preConcat(*matrix); 760 GrAssert(combinedMatrix->preservesAxisAlignment()); 761 } 762 763 combinedMatrix->mapRect(devRect, rect); 764 devRect->sort(); 765 766 if (width < 0) { 767 return !isIRect(*devRect); 768 } else { 769 return true; 770 } 771} 772 773void GrContext::drawRect(const GrPaint& paint, 774 const GrRect& rect, 775 GrScalar width, 776 const GrMatrix* matrix) { 777 SK_TRACE_EVENT0("GrContext::drawRect"); 778 779 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 780 GrDrawState::AutoStageDisable atr(fDrawState); 781 int stageMask = paint.getActiveStageMask(); 782 783 GrRect devRect = rect; 784 GrMatrix combinedMatrix; 785 bool useVertexCoverage; 786 bool needAA = paint.fAntiAlias && 787 !this->getRenderTarget()->isMultisampled(); 788 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 789 &combinedMatrix, &devRect, 790 &useVertexCoverage); 791 792 if (doAA) { 793 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 794 if (width >= 0) { 795 GrVec strokeSize;; 796 if (width > 0) { 797 strokeSize.set(width, width); 798 combinedMatrix.mapVectors(&strokeSize, 1); 799 strokeSize.setAbs(strokeSize); 800 } else { 801 strokeSize.set(GR_Scalar1, GR_Scalar1); 802 } 803 fAARectRenderer->strokeAARect(this->getGpu(), target, devRect, 804 strokeSize, useVertexCoverage); 805 } else { 806 fAARectRenderer->fillAARect(this->getGpu(), target, 807 devRect, useVertexCoverage); 808 } 809 return; 810 } 811 812 if (width >= 0) { 813 // TODO: consider making static vertex buffers for these cases. 814 // Hairline could be done by just adding closing vertex to 815 // unitSquareVertexBuffer() 816 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 817 818 static const int worstCaseVertCount = 10; 819 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 820 821 if (!geo.succeeded()) { 822 GrPrintf("Failed to get space for vertices!\n"); 823 return; 824 } 825 826 GrPrimitiveType primType; 827 int vertCount; 828 GrPoint* vertex = geo.positions(); 829 830 if (width > 0) { 831 vertCount = 10; 832 primType = kTriangleStrip_GrPrimitiveType; 833 setStrokeRectStrip(vertex, rect, width); 834 } else { 835 // hairline 836 vertCount = 5; 837 primType = kLineStrip_GrPrimitiveType; 838 vertex[0].set(rect.fLeft, rect.fTop); 839 vertex[1].set(rect.fRight, rect.fTop); 840 vertex[2].set(rect.fRight, rect.fBottom); 841 vertex[3].set(rect.fLeft, rect.fBottom); 842 vertex[4].set(rect.fLeft, rect.fTop); 843 } 844 845 GrDrawState::AutoViewMatrixRestore avmr; 846 if (NULL != matrix) { 847 GrDrawState* drawState = target->drawState(); 848 avmr.set(drawState); 849 drawState->preConcatViewMatrix(*matrix); 850 drawState->preConcatSamplerMatrices(stageMask, *matrix); 851 } 852 853 target->drawNonIndexed(primType, 0, vertCount); 854 } else { 855#if GR_STATIC_RECT_VB 856 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 857 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 858 if (NULL == sqVB) { 859 GrPrintf("Failed to create static rect vb.\n"); 860 return; 861 } 862 target->setVertexSourceToBuffer(layout, sqVB); 863 GrDrawState* drawState = target->drawState(); 864 GrDrawState::AutoViewMatrixRestore avmr(drawState); 865 GrMatrix m; 866 m.setAll(rect.width(), 0, rect.fLeft, 867 0, rect.height(), rect.fTop, 868 0, 0, GrMatrix::I()[8]); 869 870 if (NULL != matrix) { 871 m.postConcat(*matrix); 872 } 873 drawState->preConcatViewMatrix(m); 874 drawState->preConcatSamplerMatrices(stageMask, m); 875 876 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 877#else 878 target->drawSimpleRect(rect, matrix, stageMask); 879#endif 880 } 881} 882 883void GrContext::drawRectToRect(const GrPaint& paint, 884 const GrRect& dstRect, 885 const GrRect& srcRect, 886 const GrMatrix* dstMatrix, 887 const GrMatrix* srcMatrix) { 888 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 889 890 // srcRect refers to paint's first texture 891 if (!paint.isTextureStageEnabled(0)) { 892 drawRect(paint, dstRect, -1, dstMatrix); 893 return; 894 } 895 896 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 897 898#if GR_STATIC_RECT_VB 899 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 900 GrDrawState::AutoStageDisable atr(fDrawState); 901 GrDrawState* drawState = target->drawState(); 902 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 903 GrDrawState::AutoViewMatrixRestore avmr(drawState); 904 905 GrMatrix m; 906 907 m.setAll(dstRect.width(), 0, dstRect.fLeft, 908 0, dstRect.height(), dstRect.fTop, 909 0, 0, GrMatrix::I()[8]); 910 if (NULL != dstMatrix) { 911 m.postConcat(*dstMatrix); 912 } 913 drawState->preConcatViewMatrix(m); 914 915 // srcRect refers to first stage 916 int otherStageMask = paint.getActiveStageMask() & 917 (~(1 << GrPaint::kFirstTextureStage)); 918 if (otherStageMask) { 919 drawState->preConcatSamplerMatrices(otherStageMask, m); 920 } 921 922 m.setAll(srcRect.width(), 0, srcRect.fLeft, 923 0, srcRect.height(), srcRect.fTop, 924 0, 0, GrMatrix::I()[8]); 925 if (NULL != srcMatrix) { 926 m.postConcat(*srcMatrix); 927 } 928 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 929 930 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 931 if (NULL == sqVB) { 932 GrPrintf("Failed to create static rect vb.\n"); 933 return; 934 } 935 target->setVertexSourceToBuffer(layout, sqVB); 936 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 937#else 938 939 GrDrawTarget* target; 940#if BATCH_RECT_TO_RECT 941 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 942#else 943 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 944#endif 945 GrDrawState::AutoStageDisable atr(fDrawState); 946 947 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 948 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 949 srcRects[0] = &srcRect; 950 srcMatrices[0] = srcMatrix; 951 952 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 953#endif 954} 955 956void GrContext::drawVertices(const GrPaint& paint, 957 GrPrimitiveType primitiveType, 958 int vertexCount, 959 const GrPoint positions[], 960 const GrPoint texCoords[], 961 const GrColor colors[], 962 const uint16_t indices[], 963 int indexCount) { 964 SK_TRACE_EVENT0("GrContext::drawVertices"); 965 966 GrDrawTarget::AutoReleaseGeometry geo; 967 968 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 969 GrDrawState::AutoStageDisable atr(fDrawState); 970 971 bool hasTexCoords[GrPaint::kTotalStages] = { 972 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 973 0 // remaining stages use positions 974 }; 975 976 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 977 978 if (NULL != colors) { 979 layout |= GrDrawTarget::kColor_VertexLayoutBit; 980 } 981 int vertexSize = GrDrawTarget::VertexSize(layout); 982 983 if (sizeof(GrPoint) != vertexSize) { 984 if (!geo.set(target, layout, vertexCount, 0)) { 985 GrPrintf("Failed to get space for vertices!\n"); 986 return; 987 } 988 int texOffsets[GrDrawState::kMaxTexCoords]; 989 int colorOffset; 990 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 991 texOffsets, 992 &colorOffset, 993 NULL, 994 NULL); 995 void* curVertex = geo.vertices(); 996 997 for (int i = 0; i < vertexCount; ++i) { 998 *((GrPoint*)curVertex) = positions[i]; 999 1000 if (texOffsets[0] > 0) { 1001 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 1002 } 1003 if (colorOffset > 0) { 1004 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1005 } 1006 curVertex = (void*)((intptr_t)curVertex + vertexSize); 1007 } 1008 } else { 1009 target->setVertexSourceToArray(layout, positions, vertexCount); 1010 } 1011 1012 // we don't currently apply offscreen AA to this path. Need improved 1013 // management of GrDrawTarget's geometry to avoid copying points per-tile. 1014 1015 if (NULL != indices) { 1016 target->setIndexSourceToArray(indices, indexCount); 1017 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1018 } else { 1019 target->drawNonIndexed(primitiveType, 0, vertexCount); 1020 } 1021} 1022 1023/////////////////////////////////////////////////////////////////////////////// 1024namespace { 1025 1026struct CircleVertex { 1027 GrPoint fPos; 1028 GrPoint fCenter; 1029 GrScalar fOuterRadius; 1030 GrScalar fInnerRadius; 1031}; 1032 1033/* Returns true if will map a circle to another circle. This can be true 1034 * if the matrix only includes square-scale, rotation, translation. 1035 */ 1036inline bool isSimilarityTransformation(const SkMatrix& matrix, 1037 SkScalar tol = SK_ScalarNearlyZero) { 1038 if (matrix.isIdentity() || matrix.getType() == SkMatrix::kTranslate_Mask) { 1039 return true; 1040 } 1041 if (matrix.hasPerspective()) { 1042 return false; 1043 } 1044 1045 SkScalar mx = matrix.get(SkMatrix::kMScaleX); 1046 SkScalar sx = matrix.get(SkMatrix::kMSkewX); 1047 SkScalar my = matrix.get(SkMatrix::kMScaleY); 1048 SkScalar sy = matrix.get(SkMatrix::kMSkewY); 1049 1050 if (mx == 0 && sx == 0 && my == 0 && sy == 0) { 1051 return false; 1052 } 1053 1054 // it has scales or skews, but it could also be rotation, check it out. 1055 SkVector vec[2]; 1056 vec[0].set(mx, sx); 1057 vec[1].set(sy, my); 1058 1059 return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol)) && 1060 SkScalarNearlyEqual(vec[0].lengthSqd(), vec[1].lengthSqd(), 1061 SkScalarSquare(tol)); 1062} 1063 1064} 1065 1066// TODO: strokeWidth can't be larger than zero right now. 1067// It will be fixed when drawPath() can handle strokes. 1068void GrContext::drawOval(const GrPaint& paint, 1069 const GrRect& rect, 1070 SkScalar strokeWidth) { 1071 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1072 kUnbuffered_DrawCategory; 1073 GrDrawTarget* target = this->prepareToDraw(paint, category); 1074 GrDrawState::AutoStageDisable atr(fDrawState); 1075 GrDrawState* drawState = target->drawState(); 1076 GrMatrix vm = drawState->getViewMatrix(); 1077 1078 if (!isSimilarityTransformation(vm) || 1079 !paint.fAntiAlias || 1080 rect.height() != rect.width()) { 1081 SkPath path; 1082 path.addOval(rect); 1083 GrPathFill fill = (strokeWidth == 0) ? 1084 kHairLine_GrPathFill : kWinding_GrPathFill; 1085 this->internalDrawPath(paint, path, fill, NULL); 1086 return; 1087 } 1088 1089 const GrRenderTarget* rt = drawState->getRenderTarget(); 1090 if (NULL == rt) { 1091 return; 1092 } 1093 1094 GrDrawTarget::AutoDeviceCoordDraw adcd(target, paint.getActiveStageMask()); 1095 1096 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1097 layout |= GrDrawTarget::kEdge_VertexLayoutBit; 1098 GrAssert(sizeof(CircleVertex) == GrDrawTarget::VertexSize(layout)); 1099 1100 GrPoint center = GrPoint::Make(rect.centerX(), rect.centerY()); 1101 GrScalar radius = SkScalarHalf(rect.width()); 1102 1103 vm.mapPoints(¢er, 1); 1104 radius = vm.mapRadius(radius); 1105 1106 GrScalar outerRadius = radius; 1107 GrScalar innerRadius = 0; 1108 SkScalar halfWidth = 0; 1109 if (strokeWidth == 0) { 1110 halfWidth = SkScalarHalf(SK_Scalar1); 1111 1112 outerRadius += halfWidth; 1113 innerRadius = SkMaxScalar(0, radius - halfWidth); 1114 } 1115 1116 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 4, 0); 1117 if (!geo.succeeded()) { 1118 GrPrintf("Failed to get space for vertices!\n"); 1119 return; 1120 } 1121 1122 CircleVertex* verts = reinterpret_cast<CircleVertex*>(geo.vertices()); 1123 1124 // The fragment shader will extend the radius out half a pixel 1125 // to antialias. Expand the drawn rect here so all the pixels 1126 // will be captured. 1127 SkScalar L = center.fX - outerRadius - SkFloatToScalar(0.5f); 1128 SkScalar R = center.fX + outerRadius + SkFloatToScalar(0.5f); 1129 SkScalar T = center.fY - outerRadius - SkFloatToScalar(0.5f); 1130 SkScalar B = center.fY + outerRadius + SkFloatToScalar(0.5f); 1131 1132 verts[0].fPos = SkPoint::Make(L, T); 1133 verts[1].fPos = SkPoint::Make(R, T); 1134 verts[2].fPos = SkPoint::Make(L, B); 1135 verts[3].fPos = SkPoint::Make(R, B); 1136 1137 for (int i = 0; i < 4; ++i) { 1138 // this goes to fragment shader, it should be in y-points-up space. 1139 verts[i].fCenter = SkPoint::Make(center.fX, rt->height() - center.fY); 1140 1141 verts[i].fOuterRadius = outerRadius; 1142 verts[i].fInnerRadius = innerRadius; 1143 } 1144 1145 drawState->setVertexEdgeType(GrDrawState::kCircle_EdgeType); 1146 target->drawNonIndexed(kTriangleStrip_GrPrimitiveType, 0, 4); 1147} 1148 1149void GrContext::drawPath(const GrPaint& paint, const SkPath& path, 1150 GrPathFill fill, const GrPoint* translate) { 1151 1152 if (path.isEmpty()) { 1153 if (GrIsFillInverted(fill)) { 1154 this->drawPaint(paint); 1155 } 1156 return; 1157 } 1158 1159 SkRect ovalRect; 1160 if (!GrIsFillInverted(fill) && path.isOval(&ovalRect)) { 1161 if (translate) { 1162 ovalRect.offset(*translate); 1163 } 1164 SkScalar width = (fill == kHairLine_GrPathFill) ? 0 : -SK_Scalar1; 1165 this->drawOval(paint, ovalRect, width); 1166 return; 1167 } 1168 1169 internalDrawPath(paint, path, fill, translate); 1170} 1171 1172void GrContext::internalDrawPath(const GrPaint& paint, const SkPath& path, 1173 GrPathFill fill, const GrPoint* translate) { 1174 1175 // Note that below we may sw-rasterize the path into a scratch texture. 1176 // Scratch textures can be recycled after they are returned to the texture 1177 // cache. This presents a potential hazard for buffered drawing. However, 1178 // the writePixels that uploads to the scratch will perform a flush so we're 1179 // OK. 1180 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1181 kUnbuffered_DrawCategory; 1182 GrDrawTarget* target = this->prepareToDraw(paint, category); 1183 GrDrawState::AutoStageDisable atr(fDrawState); 1184 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1185 1186 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1187 1188 // An Assumption here is that path renderer would use some form of tweaking 1189 // the src color (either the input alpha or in the frag shader) to implement 1190 // aa. If we have some future driver-mojo path AA that can do the right 1191 // thing WRT to the blend then we'll need some query on the PR. 1192 if (disable_coverage_aa_for_blend(target)) { 1193#if GR_DEBUG 1194 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1195#endif 1196 prAA = false; 1197 } 1198 1199 GrPathRenderer* pr = this->getPathRenderer(path, fill, target, prAA, true); 1200 if (NULL == pr) { 1201#if GR_DEBUG 1202 GrPrintf("Unable to find path renderer compatible with path.\n"); 1203#endif 1204 return; 1205 } 1206 1207 pr->drawPath(path, fill, translate, target, stageMask, prAA); 1208} 1209 1210//////////////////////////////////////////////////////////////////////////////// 1211 1212void GrContext::flush(int flagsBitfield) { 1213 if (kDiscard_FlushBit & flagsBitfield) { 1214 fDrawBuffer->reset(); 1215 } else { 1216 this->flushDrawBuffer(); 1217 } 1218 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1219 fGpu->forceRenderTargetFlush(); 1220 } 1221} 1222 1223void GrContext::flushDrawBuffer() { 1224 if (fDrawBuffer) { 1225 // With addition of the AA clip path, flushing the draw buffer can 1226 // result in the generation of an AA clip mask. During this 1227 // process the SW path renderer may be invoked which recusively 1228 // calls this method (via internalWriteTexturePixels) creating 1229 // infinite recursion 1230 GrInOrderDrawBuffer* temp = fDrawBuffer; 1231 fDrawBuffer = NULL; 1232 1233 temp->flushTo(fGpu); 1234 1235 fDrawBuffer = temp; 1236 } 1237} 1238 1239void GrContext::internalWriteTexturePixels(GrTexture* texture, 1240 int left, int top, 1241 int width, int height, 1242 GrPixelConfig config, 1243 const void* buffer, 1244 size_t rowBytes, 1245 uint32_t flags) { 1246 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1247 ASSERT_OWNED_RESOURCE(texture); 1248 1249 if (!(kDontFlush_PixelOpsFlag & flags)) { 1250 this->flush(); 1251 } 1252 // TODO: use scratch texture to perform conversion 1253 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1254 GrPixelConfigIsUnpremultiplied(config)) { 1255 return; 1256 } 1257 1258 fGpu->writeTexturePixels(texture, left, top, width, height, 1259 config, buffer, rowBytes); 1260} 1261 1262bool GrContext::internalReadTexturePixels(GrTexture* texture, 1263 int left, int top, 1264 int width, int height, 1265 GrPixelConfig config, 1266 void* buffer, 1267 size_t rowBytes, 1268 uint32_t flags) { 1269 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1270 ASSERT_OWNED_RESOURCE(texture); 1271 1272 // TODO: code read pixels for textures that aren't also rendertargets 1273 GrRenderTarget* target = texture->asRenderTarget(); 1274 if (NULL != target) { 1275 return this->internalReadRenderTargetPixels(target, 1276 left, top, width, height, 1277 config, buffer, rowBytes, 1278 flags); 1279 } else { 1280 return false; 1281 } 1282} 1283 1284#include "SkConfig8888.h" 1285 1286namespace { 1287/** 1288 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1289 * formats are representable as Config8888 and so the function returns false 1290 * if the GrPixelConfig has no equivalent Config8888. 1291 */ 1292bool grconfig_to_config8888(GrPixelConfig config, 1293 SkCanvas::Config8888* config8888) { 1294 switch (config) { 1295 case kRGBA_8888_PM_GrPixelConfig: 1296 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1297 return true; 1298 case kRGBA_8888_UPM_GrPixelConfig: 1299 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1300 return true; 1301 case kBGRA_8888_PM_GrPixelConfig: 1302 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1303 return true; 1304 case kBGRA_8888_UPM_GrPixelConfig: 1305 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1306 return true; 1307 default: 1308 return false; 1309 } 1310} 1311} 1312 1313bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1314 int left, int top, 1315 int width, int height, 1316 GrPixelConfig config, 1317 void* buffer, 1318 size_t rowBytes, 1319 uint32_t flags) { 1320 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1321 ASSERT_OWNED_RESOURCE(target); 1322 1323 if (NULL == target) { 1324 target = fDrawState->getRenderTarget(); 1325 if (NULL == target) { 1326 return false; 1327 } 1328 } 1329 1330 if (!(kDontFlush_PixelOpsFlag & flags)) { 1331 this->flush(); 1332 } 1333 1334 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1335 GrPixelConfigIsUnpremultiplied(config) && 1336 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1337 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1338 if (!grconfig_to_config8888(target->config(), &srcConfig8888) || 1339 !grconfig_to_config8888(config, &dstConfig8888)) { 1340 return false; 1341 } 1342 // do read back using target's own config 1343 this->internalReadRenderTargetPixels(target, 1344 left, top, 1345 width, height, 1346 target->config(), 1347 buffer, rowBytes, 1348 kDontFlush_PixelOpsFlag); 1349 // sw convert the pixels to unpremul config 1350 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer); 1351 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888, 1352 pixels, rowBytes, srcConfig8888, 1353 width, height); 1354 return true; 1355 } 1356 1357 GrTexture* src = target->asTexture(); 1358 bool swapRAndB = NULL != src && 1359 fGpu->preferredReadPixelsConfig(config) == 1360 GrPixelConfigSwapRAndB(config); 1361 1362 bool flipY = NULL != src && 1363 fGpu->readPixelsWillPayForYFlip(target, left, top, 1364 width, height, config, 1365 rowBytes); 1366 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1367 GrPixelConfigIsUnpremultiplied(config)); 1368 1369 if (NULL == src && alphaConversion) { 1370 // we should fallback to cpu conversion here. This could happen when 1371 // we were given an external render target by the client that is not 1372 // also a texture (e.g. FBO 0 in GL) 1373 return false; 1374 } 1375 // we draw to a scratch texture if any of these conversion are applied 1376 GrAutoScratchTexture ast; 1377 if (flipY || swapRAndB || alphaConversion) { 1378 GrAssert(NULL != src); 1379 if (swapRAndB) { 1380 config = GrPixelConfigSwapRAndB(config); 1381 GrAssert(kUnknown_GrPixelConfig != config); 1382 } 1383 // Make the scratch a render target because we don't have a robust 1384 // readTexturePixels as of yet (it calls this function). 1385 GrTextureDesc desc; 1386 desc.fFlags = kRenderTarget_GrTextureFlagBit; 1387 desc.fWidth = width; 1388 desc.fHeight = height; 1389 desc.fConfig = config; 1390 1391 // When a full readback is faster than a partial we could always make 1392 // the scratch exactly match the passed rect. However, if we see many 1393 // different size rectangles we will trash our texture cache and pay the 1394 // cost of creating and destroying many textures. So, we only request 1395 // an exact match when the caller is reading an entire RT. 1396 ScratchTexMatch match = kApprox_ScratchTexMatch; 1397 if (0 == left && 1398 0 == top && 1399 target->width() == width && 1400 target->height() == height && 1401 fGpu->fullReadPixelsIsFasterThanPartial()) { 1402 match = kExact_ScratchTexMatch; 1403 } 1404 ast.set(this, desc, match); 1405 GrTexture* texture = ast.texture(); 1406 if (!texture) { 1407 return false; 1408 } 1409 target = texture->asRenderTarget(); 1410 GrAssert(NULL != target); 1411 1412 GrDrawTarget::AutoStateRestore asr(fGpu, 1413 GrDrawTarget::kReset_ASRInit); 1414 GrDrawState* drawState = fGpu->drawState(); 1415 drawState->setRenderTarget(target); 1416 1417 GrMatrix matrix; 1418 if (flipY) { 1419 matrix.setTranslate(SK_Scalar1 * left, 1420 SK_Scalar1 * (top + height)); 1421 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1422 } else { 1423 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1424 } 1425 matrix.postIDiv(src->width(), src->height()); 1426 drawState->sampler(0)->reset(matrix); 1427 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1428 drawState->setTexture(0, src); 1429 GrRect rect; 1430 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1431 fGpu->drawSimpleRect(rect, NULL, 0x1); 1432 left = 0; 1433 top = 0; 1434 } 1435 return fGpu->readPixels(target, 1436 left, top, width, height, 1437 config, buffer, rowBytes, flipY); 1438} 1439 1440void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1441 GrAssert(target); 1442 ASSERT_OWNED_RESOURCE(target); 1443 // In the future we may track whether there are any pending draws to this 1444 // target. We don't today so we always perform a flush. We don't promise 1445 // this to our clients, though. 1446 this->flush(); 1447 fGpu->resolveRenderTarget(target); 1448} 1449 1450void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1451 if (NULL == src || NULL == dst) { 1452 return; 1453 } 1454 ASSERT_OWNED_RESOURCE(src); 1455 1456 // Writes pending to the source texture are not tracked, so a flush 1457 // is required to ensure that the copy captures the most recent contents 1458 // of the source texture. See similar behaviour in 1459 // GrContext::resolveRenderTarget. 1460 this->flush(); 1461 1462 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1463 GrDrawState* drawState = fGpu->drawState(); 1464 drawState->setRenderTarget(dst); 1465 GrMatrix sampleM; 1466 sampleM.setIDiv(src->width(), src->height()); 1467 drawState->setTexture(0, src); 1468 drawState->sampler(0)->reset(sampleM); 1469 SkRect rect = SkRect::MakeXYWH(0, 0, 1470 SK_Scalar1 * src->width(), 1471 SK_Scalar1 * src->height()); 1472 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1473} 1474 1475void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1476 int left, int top, 1477 int width, int height, 1478 GrPixelConfig config, 1479 const void* buffer, 1480 size_t rowBytes, 1481 uint32_t flags) { 1482 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1483 ASSERT_OWNED_RESOURCE(target); 1484 1485 if (NULL == target) { 1486 target = fDrawState->getRenderTarget(); 1487 if (NULL == target) { 1488 return; 1489 } 1490 } 1491 1492 // TODO: when underlying api has a direct way to do this we should use it 1493 // (e.g. glDrawPixels on desktop GL). 1494 1495 // If the RT is also a texture and we don't have to do PM/UPM conversion 1496 // then take the texture path, which we expect to be at least as fast or 1497 // faster since it doesn't use an intermediate texture as we do below. 1498 1499#if !GR_MAC_BUILD 1500 // At least some drivers on the Mac get confused when glTexImage2D is called 1501 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1502 // determine what OS versions and/or HW is affected. 1503 if (NULL != target->asTexture() && 1504 GrPixelConfigIsUnpremultiplied(target->config()) == 1505 GrPixelConfigIsUnpremultiplied(config)) { 1506 1507 this->internalWriteTexturePixels(target->asTexture(), 1508 left, top, width, height, 1509 config, buffer, rowBytes, flags); 1510 return; 1511 } 1512#endif 1513 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1514 GrPixelConfigIsUnpremultiplied(config) && 1515 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1516 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1517 if (!grconfig_to_config8888(config, &srcConfig8888) || 1518 !grconfig_to_config8888(target->config(), &dstConfig8888)) { 1519 return; 1520 } 1521 // allocate a tmp buffer and sw convert the pixels to premul 1522 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height); 1523 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1524 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1525 src, rowBytes, srcConfig8888, 1526 width, height); 1527 // upload the already premul pixels 1528 this->internalWriteRenderTargetPixels(target, 1529 left, top, 1530 width, height, 1531 target->config(), 1532 tmpPixels, 4 * width, flags); 1533 return; 1534 } 1535 1536 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1537 GrPixelConfigSwapRAndB(config); 1538 if (swapRAndB) { 1539 config = GrPixelConfigSwapRAndB(config); 1540 } 1541 1542 GrTextureDesc desc; 1543 desc.fWidth = width; 1544 desc.fHeight = height; 1545 desc.fConfig = config; 1546 1547 GrAutoScratchTexture ast(this, desc); 1548 GrTexture* texture = ast.texture(); 1549 if (NULL == texture) { 1550 return; 1551 } 1552 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1553 config, buffer, rowBytes, flags); 1554 1555 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1556 GrDrawState* drawState = fGpu->drawState(); 1557 1558 GrMatrix matrix; 1559 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1560 drawState->setViewMatrix(matrix); 1561 drawState->setRenderTarget(target); 1562 drawState->setTexture(0, texture); 1563 1564 matrix.setIDiv(texture->width(), texture->height()); 1565 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 1566 GrSamplerState::kNearest_Filter, 1567 matrix); 1568 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1569 1570 static const GrVertexLayout layout = 0; 1571 static const int VCOUNT = 4; 1572 // TODO: Use GrGpu::drawRect here 1573 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1574 if (!geo.succeeded()) { 1575 GrPrintf("Failed to get space for vertices!\n"); 1576 return; 1577 } 1578 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1579 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, VCOUNT); 1580} 1581//////////////////////////////////////////////////////////////////////////////// 1582 1583void GrContext::setPaint(const GrPaint& paint) { 1584 GrAssert(fDrawState->stagesDisabled()); 1585 1586 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1587 int s = i + GrPaint::kFirstTextureStage; 1588 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 1589 if (paint.isTextureStageEnabled(i)) { 1590 fDrawState->setTexture(s, paint.getTexture(i)); 1591 *fDrawState->sampler(s) = paint.getTextureSampler(i); 1592 } 1593 } 1594 1595 fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1596 1597 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1598 int s = i + GrPaint::kFirstMaskStage; 1599 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 1600 if (paint.isMaskStageEnabled(i)) { 1601 fDrawState->setTexture(s, paint.getMask(i)); 1602 *fDrawState->sampler(s) = paint.getMaskSampler(i); 1603 } 1604 } 1605 1606 // disable all stages not accessible via the paint 1607 for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) { 1608 fDrawState->disableStage(s); 1609 } 1610 1611 fDrawState->setColor(paint.fColor); 1612 1613 if (paint.fDither) { 1614 fDrawState->enableState(GrDrawState::kDither_StateBit); 1615 } else { 1616 fDrawState->disableState(GrDrawState::kDither_StateBit); 1617 } 1618 if (paint.fAntiAlias) { 1619 fDrawState->enableState(GrDrawState::kHWAntialias_StateBit); 1620 } else { 1621 fDrawState->disableState(GrDrawState::kHWAntialias_StateBit); 1622 } 1623 if (paint.fColorMatrixEnabled) { 1624 fDrawState->enableState(GrDrawState::kColorMatrix_StateBit); 1625 fDrawState->setColorMatrix(paint.fColorMatrix); 1626 } else { 1627 fDrawState->disableState(GrDrawState::kColorMatrix_StateBit); 1628 } 1629 fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1630 fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1631 fDrawState->setCoverage(paint.fCoverage); 1632#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1633 if ((paint.getActiveMaskStageMask() || 0xff != paint.fCoverage) && 1634 !fGpu->canApplyCoverage()) { 1635 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1636 } 1637#endif 1638} 1639 1640GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1641 DrawCategory category) { 1642 if (category != fLastDrawCategory) { 1643 this->flushDrawBuffer(); 1644 fLastDrawCategory = category; 1645 } 1646 this->setPaint(paint); 1647 GrDrawTarget* target = fGpu; 1648 switch (category) { 1649 case kUnbuffered_DrawCategory: 1650 target = fGpu; 1651 break; 1652 case kBuffered_DrawCategory: 1653 target = fDrawBuffer; 1654 fDrawBuffer->setClip(fGpu->getClip()); 1655 break; 1656 default: 1657 GrCrash("Unexpected DrawCategory."); 1658 break; 1659 } 1660 return target; 1661} 1662 1663/* 1664 * This method finds a path renderer that can draw the specified path on 1665 * the provided target. 1666 * Due to its expense, the software path renderer has split out so it can 1667 * can be individually allowed/disallowed via the "allowSW" boolean. 1668 */ 1669GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1670 GrPathFill fill, 1671 const GrDrawTarget* target, 1672 bool antiAlias, 1673 bool allowSW) { 1674 if (NULL == fPathRendererChain) { 1675 fPathRendererChain = 1676 SkNEW_ARGS(GrPathRendererChain, 1677 (this, GrPathRendererChain::kNone_UsageFlag)); 1678 } 1679 1680 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, fill, 1681 target, 1682 antiAlias); 1683 1684 if (NULL == pr && allowSW) { 1685 if (NULL == fSoftwarePathRenderer) { 1686 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1687 } 1688 1689 pr = fSoftwarePathRenderer; 1690 } 1691 1692 return pr; 1693} 1694 1695//////////////////////////////////////////////////////////////////////////////// 1696 1697void GrContext::setRenderTarget(GrRenderTarget* target) { 1698 ASSERT_OWNED_RESOURCE(target); 1699 if (fDrawState->getRenderTarget() != target) { 1700 this->flush(false); 1701 fDrawState->setRenderTarget(target); 1702 } 1703} 1704 1705GrRenderTarget* GrContext::getRenderTarget() { 1706 return fDrawState->getRenderTarget(); 1707} 1708 1709const GrRenderTarget* GrContext::getRenderTarget() const { 1710 return fDrawState->getRenderTarget(); 1711} 1712 1713bool GrContext::isConfigRenderable(GrPixelConfig config) const { 1714 return fGpu->isConfigRenderable(config); 1715} 1716 1717const GrMatrix& GrContext::getMatrix() const { 1718 return fDrawState->getViewMatrix(); 1719} 1720 1721void GrContext::setMatrix(const GrMatrix& m) { 1722 fDrawState->setViewMatrix(m); 1723} 1724 1725void GrContext::concatMatrix(const GrMatrix& m) const { 1726 fDrawState->preConcatViewMatrix(m); 1727} 1728 1729static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 1730 intptr_t mask = 1 << shift; 1731 if (pred) { 1732 bits |= mask; 1733 } else { 1734 bits &= ~mask; 1735 } 1736 return bits; 1737} 1738 1739GrContext::GrContext(GrGpu* gpu) { 1740 ++THREAD_INSTANCE_COUNT; 1741 1742 fGpu = gpu; 1743 fGpu->ref(); 1744 fGpu->setContext(this); 1745 1746 fDrawState = SkNEW(GrDrawState); 1747 fGpu->setDrawState(fDrawState); 1748 1749 fPathRendererChain = NULL; 1750 fSoftwarePathRenderer = NULL; 1751 1752 fTextureCache = SkNEW_ARGS(GrResourceCache, 1753 (MAX_TEXTURE_CACHE_COUNT, 1754 MAX_TEXTURE_CACHE_BYTES)); 1755 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); 1756 1757 fLastDrawCategory = kUnbuffered_DrawCategory; 1758 1759 fDrawBuffer = NULL; 1760 fDrawBufferVBAllocPool = NULL; 1761 fDrawBufferIBAllocPool = NULL; 1762 1763 fAARectRenderer = SkNEW(GrAARectRenderer); 1764 1765 this->setupDrawBuffer(); 1766} 1767 1768void GrContext::setupDrawBuffer() { 1769 1770 GrAssert(NULL == fDrawBuffer); 1771 GrAssert(NULL == fDrawBufferVBAllocPool); 1772 GrAssert(NULL == fDrawBufferIBAllocPool); 1773 1774#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT || DEFER_PATHS 1775 fDrawBufferVBAllocPool = 1776 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1777 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1778 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1779 fDrawBufferIBAllocPool = 1780 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1781 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1782 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1783 1784 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, 1785 fDrawBufferVBAllocPool, 1786 fDrawBufferIBAllocPool)); 1787#endif 1788 1789#if BATCH_RECT_TO_RECT 1790 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 1791#endif 1792 if (fDrawBuffer) { 1793 fDrawBuffer->setAutoFlushTarget(fGpu); 1794 fDrawBuffer->setDrawState(fDrawState); 1795 } 1796} 1797 1798GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 1799#if DEFER_TEXT_RENDERING 1800 return prepareToDraw(paint, kBuffered_DrawCategory); 1801#else 1802 return prepareToDraw(paint, kUnbuffered_DrawCategory); 1803#endif 1804} 1805 1806const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1807 return fGpu->getQuadIndexBuffer(); 1808} 1809 1810GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 1811 bool canClobberSrc, 1812 const SkRect& rect, 1813 float sigmaX, float sigmaY) { 1814 ASSERT_OWNED_RESOURCE(srcTexture); 1815 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1816 AutoMatrix avm(this, GrMatrix::I()); 1817 SkIRect clearRect; 1818 int scaleFactorX, radiusX; 1819 int scaleFactorY, radiusY; 1820 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX); 1821 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY); 1822 1823 SkRect srcRect(rect); 1824 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 1825 srcRect.roundOut(); 1826 scale_rect(&srcRect, static_cast<float>(scaleFactorX), 1827 static_cast<float>(scaleFactorY)); 1828 1829 AutoClip acs(this, srcRect); 1830 1831 GrAssert(kBGRA_8888_PM_GrPixelConfig == srcTexture->config() || 1832 kRGBA_8888_PM_GrPixelConfig == srcTexture->config() || 1833 kAlpha_8_GrPixelConfig == srcTexture->config()); 1834 1835 GrTextureDesc desc; 1836 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; 1837 desc.fWidth = SkScalarFloorToInt(srcRect.width()); 1838 desc.fHeight = SkScalarFloorToInt(srcRect.height()); 1839 desc.fConfig = srcTexture->config(); 1840 1841 GrAutoScratchTexture temp1, temp2; 1842 GrTexture* dstTexture = temp1.set(this, desc); 1843 GrTexture* tempTexture = canClobberSrc ? srcTexture : temp2.set(this, desc); 1844 1845 GrPaint paint; 1846 paint.reset(); 1847 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 1848 1849 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 1850 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1851 srcTexture->height()); 1852 this->setRenderTarget(dstTexture->asRenderTarget()); 1853 SkRect dstRect(srcRect); 1854 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 1855 i < scaleFactorY ? 0.5f : 1.0f); 1856 paint.setTexture(0, srcTexture); 1857 this->drawRectToRect(paint, dstRect, srcRect); 1858 srcRect = dstRect; 1859 srcTexture = dstTexture; 1860 SkTSwap(dstTexture, tempTexture); 1861 } 1862 1863 SkIRect srcIRect; 1864 srcRect.roundOut(&srcIRect); 1865 1866 if (sigmaX > 0.0f) { 1867 if (scaleFactorX > 1) { 1868 // Clear out a radius to the right of the srcRect to prevent the 1869 // X convolution from reading garbage. 1870 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1871 radiusX, srcIRect.height()); 1872 this->clear(&clearRect, 0x0); 1873 } 1874 1875 this->setRenderTarget(dstTexture->asRenderTarget()); 1876 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaX, radiusX, 1877 Gr1DKernelEffect::kX_Direction); 1878 srcTexture = dstTexture; 1879 SkTSwap(dstTexture, tempTexture); 1880 } 1881 1882 if (sigmaY > 0.0f) { 1883 if (scaleFactorY > 1 || sigmaX > 0.0f) { 1884 // Clear out a radius below the srcRect to prevent the Y 1885 // convolution from reading garbage. 1886 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1887 srcIRect.width(), radiusY); 1888 this->clear(&clearRect, 0x0); 1889 } 1890 1891 this->setRenderTarget(dstTexture->asRenderTarget()); 1892 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaY, radiusY, 1893 Gr1DKernelEffect::kY_Direction); 1894 srcTexture = dstTexture; 1895 SkTSwap(dstTexture, tempTexture); 1896 } 1897 1898 if (scaleFactorX > 1 || scaleFactorY > 1) { 1899 // Clear one pixel to the right and below, to accommodate bilinear 1900 // upsampling. 1901 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1902 srcIRect.width() + 1, 1); 1903 this->clear(&clearRect, 0x0); 1904 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1905 1, srcIRect.height()); 1906 this->clear(&clearRect, 0x0); 1907 // FIXME: This should be mitchell, not bilinear. 1908 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 1909 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1910 srcTexture->height()); 1911 this->setRenderTarget(dstTexture->asRenderTarget()); 1912 paint.setTexture(0, srcTexture); 1913 SkRect dstRect(srcRect); 1914 scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY); 1915 this->drawRectToRect(paint, dstRect, srcRect); 1916 srcRect = dstRect; 1917 srcTexture = dstTexture; 1918 SkTSwap(dstTexture, tempTexture); 1919 } 1920 this->setRenderTarget(oldRenderTarget); 1921 if (srcTexture == temp1.texture()) { 1922 return temp1.detach(); 1923 } else if (srcTexture == temp2.texture()) { 1924 return temp2.detach(); 1925 } else { 1926 srcTexture->ref(); 1927 return srcTexture; 1928 } 1929} 1930 1931GrTexture* GrContext::applyMorphology(GrTexture* srcTexture, 1932 const GrRect& rect, 1933 MorphologyType morphType, 1934 SkISize radius) { 1935 ASSERT_OWNED_RESOURCE(srcTexture); 1936 srcTexture->ref(); 1937 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1938 1939 AutoMatrix avm(this, GrMatrix::I()); 1940 1941 AutoClip acs(this, GrRect::MakeWH(SkIntToScalar(srcTexture->width()), 1942 SkIntToScalar(srcTexture->height()))); 1943 GrTextureDesc desc; 1944 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; 1945 desc.fWidth = SkScalarCeilToInt(rect.width()); 1946 desc.fHeight = SkScalarCeilToInt(rect.height()); 1947 desc.fConfig = kRGBA_8888_PM_GrPixelConfig; 1948 if (radius.fWidth > 0) { 1949 GrAutoScratchTexture ast(this, desc); 1950 this->setRenderTarget(ast.texture()->asRenderTarget()); 1951 apply_morphology(fGpu, srcTexture, rect, radius.fWidth, morphType, 1952 Gr1DKernelEffect::kX_Direction); 1953 SkIRect clearRect = SkIRect::MakeXYWH( 1954 SkScalarFloorToInt(rect.fLeft), 1955 SkScalarFloorToInt(rect.fBottom), 1956 SkScalarFloorToInt(rect.width()), 1957 radius.fHeight); 1958 this->clear(&clearRect, 0x0); 1959 srcTexture->unref(); 1960 srcTexture = ast.detach(); 1961 } 1962 if (radius.fHeight > 0) { 1963 GrAutoScratchTexture ast(this, desc); 1964 this->setRenderTarget(ast.texture()->asRenderTarget()); 1965 apply_morphology(fGpu, srcTexture, rect, radius.fHeight, morphType, 1966 Gr1DKernelEffect::kY_Direction); 1967 srcTexture->unref(); 1968 srcTexture = ast.detach(); 1969 } 1970 this->setRenderTarget(oldRenderTarget); 1971 return srcTexture; 1972} 1973 1974/////////////////////////////////////////////////////////////////////////////// 1975