GrContext.cpp revision f13f58804659175925042a291304d483a4fd9278
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "effects/GrMorphologyEffect.h" 13#include "effects/GrConvolutionEffect.h" 14 15#include "GrBufferAllocPool.h" 16#include "GrClipIterator.h" 17#include "GrGpu.h" 18#include "GrIndexBuffer.h" 19#include "GrInOrderDrawBuffer.h" 20#include "GrPathRenderer.h" 21#include "GrPathUtils.h" 22#include "GrResourceCache.h" 23#include "GrSoftwarePathRenderer.h" 24#include "GrStencilBuffer.h" 25#include "GrTextStrike.h" 26#include "SkTLazy.h" 27#include "SkTLS.h" 28#include "SkTrace.h" 29 30#define DEFER_TEXT_RENDERING 1 31 32#define DEFER_PATHS 1 33 34#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 35 36#define MAX_BLUR_SIGMA 4.0f 37 38// When we're using coverage AA but the blend is incompatible (given gpu 39// limitations) should we disable AA or draw wrong? 40#define DISABLE_COVERAGE_AA_FOR_BLEND 1 41 42#if GR_DEBUG 43 // change this to a 1 to see notifications when partial coverage fails 44 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 45#else 46 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 47#endif 48 49static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 50static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 51 52static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 53static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 54 55// path rendering is the only thing we defer today that uses non-static indices 56static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = DEFER_PATHS ? 1 << 11 : 0; 57static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = DEFER_PATHS ? 4 : 0; 58 59#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 60 61GrContext* GrContext::Create(GrEngine engine, 62 GrPlatform3DContext context3D) { 63 GrContext* ctx = NULL; 64 GrGpu* fGpu = GrGpu::Create(engine, context3D); 65 if (NULL != fGpu) { 66 ctx = new GrContext(fGpu); 67 fGpu->unref(); 68 } 69 return ctx; 70} 71 72namespace { 73void* CreateThreadInstanceCount() { 74 return new int(0); 75} 76void DeleteThreadInstanceCount(void* v) { 77 delete reinterpret_cast<int*>(v); 78} 79#define THREAD_INSTANCE_COUNT \ 80 (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, \ 81 DeleteThreadInstanceCount))) 82 83} 84 85int GrContext::GetThreadInstanceCount() { 86 return THREAD_INSTANCE_COUNT; 87} 88 89GrContext::~GrContext() { 90 this->flush(); 91 92 // Since the gpu can hold scratch textures, give it a chance to let go 93 // of them before freeing the texture cache 94 fGpu->purgeResources(); 95 96 delete fTextureCache; 97 delete fFontCache; 98 delete fDrawBuffer; 99 delete fDrawBufferVBAllocPool; 100 delete fDrawBufferIBAllocPool; 101 102 fAARectRenderer->unref(); 103 104 fGpu->unref(); 105 GrSafeUnref(fPathRendererChain); 106 GrSafeUnref(fSoftwarePathRenderer); 107 fDrawState->unref(); 108 109 --THREAD_INSTANCE_COUNT; 110} 111 112void GrContext::contextLost() { 113 contextDestroyed(); 114 this->setupDrawBuffer(); 115} 116 117void GrContext::contextDestroyed() { 118 // abandon first to so destructors 119 // don't try to free the resources in the API. 120 fGpu->abandonResources(); 121 122 // a path renderer may be holding onto resources that 123 // are now unusable 124 GrSafeSetNull(fPathRendererChain); 125 GrSafeSetNull(fSoftwarePathRenderer); 126 127 delete fDrawBuffer; 128 fDrawBuffer = NULL; 129 130 delete fDrawBufferVBAllocPool; 131 fDrawBufferVBAllocPool = NULL; 132 133 delete fDrawBufferIBAllocPool; 134 fDrawBufferIBAllocPool = NULL; 135 136 fAARectRenderer->reset(); 137 138 fTextureCache->removeAll(); 139 fFontCache->freeAll(); 140 fGpu->markContextDirty(); 141} 142 143void GrContext::resetContext() { 144 fGpu->markContextDirty(); 145} 146 147void GrContext::freeGpuResources() { 148 this->flush(); 149 150 fGpu->purgeResources(); 151 152 fAARectRenderer->reset(); 153 154 fTextureCache->removeAll(); 155 fFontCache->freeAll(); 156 // a path renderer may be holding onto resources 157 GrSafeSetNull(fPathRendererChain); 158 GrSafeSetNull(fSoftwarePathRenderer); 159} 160 161size_t GrContext::getGpuTextureCacheBytes() const { 162 return fTextureCache->getCachedResourceBytes(); 163} 164 165//////////////////////////////////////////////////////////////////////////////// 166 167int GrContext::PaintStageVertexLayoutBits( 168 const GrPaint& paint, 169 const bool hasTexCoords[GrPaint::kTotalStages]) { 170 int stageMask = paint.getActiveStageMask(); 171 int layout = 0; 172 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 173 if ((1 << i) & stageMask) { 174 if (NULL != hasTexCoords && hasTexCoords[i]) { 175 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 176 } 177 } 178 } 179 return layout; 180} 181 182 183//////////////////////////////////////////////////////////////////////////////// 184 185GrTexture* GrContext::TextureCacheEntry::texture() const { 186 if (NULL == fEntry) { 187 return NULL; 188 } else { 189 return (GrTexture*) fEntry->resource(); 190 } 191} 192 193namespace { 194 195// we should never have more than one stencil buffer with same combo of 196// (width,height,samplecount) 197void gen_stencil_key_values(int width, int height, 198 int sampleCnt, uint32_t v[4]) { 199 v[0] = width; 200 v[1] = height; 201 v[2] = sampleCnt; 202 v[3] = GrResourceKey::kStencilBuffer_TypeBit; 203} 204 205void gen_stencil_key_values(const GrStencilBuffer* sb, 206 uint32_t v[4]) { 207 gen_stencil_key_values(sb->width(), sb->height(), 208 sb->numSamples(), v); 209} 210 211void scale_rect(SkRect* rect, float xScale, float yScale) { 212 rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale)); 213 rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale)); 214 rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale)); 215 rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale)); 216} 217 218float adjust_sigma(float sigma, int *scaleFactor, int *radius) { 219 *scaleFactor = 1; 220 while (sigma > MAX_BLUR_SIGMA) { 221 *scaleFactor *= 2; 222 sigma *= 0.5f; 223 } 224 *radius = static_cast<int>(ceilf(sigma * 3.0f)); 225 GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius); 226 return sigma; 227} 228 229void apply_morphology(GrGpu* gpu, 230 GrTexture* texture, 231 const SkRect& rect, 232 int radius, 233 GrContext::MorphologyType morphType, 234 Gr1DKernelEffect::Direction direction) { 235 236 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 237 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 238 GrDrawState* drawState = gpu->drawState(); 239 drawState->setRenderTarget(target); 240 GrMatrix sampleM; 241 sampleM.setIDiv(texture->width(), texture->height()); 242 drawState->sampler(0)->reset(sampleM); 243 SkAutoTUnref<GrCustomStage> morph( 244 new GrMorphologyEffect(direction, radius, morphType)); 245 drawState->sampler(0)->setCustomStage(morph); 246 drawState->setTexture(0, texture); 247 gpu->drawSimpleRect(rect, NULL, 1 << 0); 248} 249 250void convolve_gaussian(GrGpu* gpu, 251 GrTexture* texture, 252 const SkRect& rect, 253 float sigma, 254 int radius, 255 Gr1DKernelEffect::Direction direction) { 256 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 257 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 258 GrDrawState* drawState = gpu->drawState(); 259 drawState->setRenderTarget(target); 260 GrMatrix sampleM; 261 sampleM.setIDiv(texture->width(), texture->height()); 262 drawState->sampler(0)->reset(sampleM); 263 SkAutoTUnref<GrConvolutionEffect> conv(new 264 GrConvolutionEffect(direction, radius)); 265 conv->setGaussianKernel(sigma); 266 drawState->sampler(0)->setCustomStage(conv); 267 drawState->setTexture(0, texture); 268 gpu->drawSimpleRect(rect, NULL, 1 << 0); 269} 270 271} 272 273GrContext::TextureCacheEntry GrContext::findAndLockTexture( 274 const GrTextureDesc& desc, 275 const GrSamplerState* sampler) { 276 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, desc, false); 277 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 278 GrResourceCache::kNested_LockType)); 279} 280 281bool GrContext::isTextureInCache(const GrTextureDesc& desc, 282 const GrSamplerState* sampler) const { 283 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, desc, false); 284 return fTextureCache->hasKey(resourceKey); 285} 286 287GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 288 ASSERT_OWNED_RESOURCE(sb); 289 uint32_t v[4]; 290 gen_stencil_key_values(sb, v); 291 GrResourceKey resourceKey(v); 292 return fTextureCache->createAndLock(resourceKey, sb); 293} 294 295GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 296 int sampleCnt) { 297 uint32_t v[4]; 298 gen_stencil_key_values(width, height, sampleCnt, v); 299 GrResourceKey resourceKey(v); 300 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 301 GrResourceCache::kSingle_LockType); 302 if (NULL != entry) { 303 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 304 return sb; 305 } else { 306 return NULL; 307 } 308} 309 310void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 311 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 312 fTextureCache->unlock(sbEntry); 313} 314 315static void stretchImage(void* dst, 316 int dstW, 317 int dstH, 318 void* src, 319 int srcW, 320 int srcH, 321 int bpp) { 322 GrFixed dx = (srcW << 16) / dstW; 323 GrFixed dy = (srcH << 16) / dstH; 324 325 GrFixed y = dy >> 1; 326 327 int dstXLimit = dstW*bpp; 328 for (int j = 0; j < dstH; ++j) { 329 GrFixed x = dx >> 1; 330 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 331 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 332 for (int i = 0; i < dstXLimit; i += bpp) { 333 memcpy((uint8_t*) dstRow + i, 334 (uint8_t*) srcRow + (x>>16)*bpp, 335 bpp); 336 x += dx; 337 } 338 y += dy; 339 } 340} 341 342GrContext::TextureCacheEntry GrContext::createAndLockTexture( 343 const GrSamplerState* sampler, 344 const GrTextureDesc& desc, 345 void* srcData, 346 size_t rowBytes) { 347 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 348 349#if GR_DUMP_TEXTURE_UPLOAD 350 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 351#endif 352 353 TextureCacheEntry entry; 354 355 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, 356 desc, false); 357 358 if (GrTexture::NeedsResizing(resourceKey)) { 359 // The desired texture is NPOT and tiled but that isn't supported by 360 // the current hardware. Resize the texture to be a POT 361 GrAssert(NULL != sampler); 362 TextureCacheEntry clampEntry = this->findAndLockTexture(desc, 363 NULL); 364 365 if (NULL == clampEntry.texture()) { 366 clampEntry = this->createAndLockTexture(NULL, desc, 367 srcData, rowBytes); 368 GrAssert(NULL != clampEntry.texture()); 369 if (NULL == clampEntry.texture()) { 370 return entry; 371 } 372 } 373 GrTextureDesc rtDesc = desc; 374 rtDesc.fFlags = rtDesc.fFlags | 375 kRenderTarget_GrTextureFlagBit | 376 kNoStencil_GrTextureFlagBit; 377 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 378 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 379 380 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 381 382 if (NULL != texture) { 383 GrDrawTarget::AutoStateRestore asr(fGpu, 384 GrDrawTarget::kReset_ASRInit); 385 GrDrawState* drawState = fGpu->drawState(); 386 drawState->setRenderTarget(texture->asRenderTarget()); 387 drawState->setTexture(0, clampEntry.texture()); 388 389 GrSamplerState::Filter filter; 390 // if filtering is not desired then we want to ensure all 391 // texels in the resampled image are copies of texels from 392 // the original. 393 if (GrTexture::NeedsFiltering(resourceKey)) { 394 filter = GrSamplerState::kBilinear_Filter; 395 } else { 396 filter = GrSamplerState::kNearest_Filter; 397 } 398 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 399 filter); 400 401 static const GrVertexLayout layout = 402 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 403 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 404 405 if (arg.succeeded()) { 406 GrPoint* verts = (GrPoint*) arg.vertices(); 407 verts[0].setIRectFan(0, 0, 408 texture->width(), 409 texture->height(), 410 2*sizeof(GrPoint)); 411 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 412 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 413 0, 4); 414 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 415 } 416 texture->releaseRenderTarget(); 417 } else { 418 // TODO: Our CPU stretch doesn't filter. But we create separate 419 // stretched textures when the sampler state is either filtered or 420 // not. Either implement filtered stretch blit on CPU or just create 421 // one when FBO case fails. 422 423 rtDesc.fFlags = kNone_GrTextureFlags; 424 // no longer need to clamp at min RT size. 425 rtDesc.fWidth = GrNextPow2(desc.fWidth); 426 rtDesc.fHeight = GrNextPow2(desc.fHeight); 427 int bpp = GrBytesPerPixel(desc.fConfig); 428 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 429 rtDesc.fWidth * 430 rtDesc.fHeight); 431 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 432 srcData, desc.fWidth, desc.fHeight, bpp); 433 434 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 435 436 GrTexture* texture = fGpu->createTexture(rtDesc, 437 stretchedPixels.get(), 438 stretchedRowBytes); 439 GrAssert(NULL != texture); 440 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 441 } 442 fTextureCache->unlock(clampEntry.cacheEntry()); 443 444 } else { 445 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 446 if (NULL != texture) { 447 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 448 } 449 } 450 return entry; 451} 452 453GrContext::TextureCacheEntry GrContext::lockScratchTexture( 454 const GrTextureDesc& inDesc, 455 ScratchTexMatch match) { 456 GrTextureDesc desc = inDesc; 457 desc.fClientCacheID = kScratch_CacheID; 458 459 if (kExact_ScratchTexMatch != match) { 460 // bin by pow2 with a reasonable min 461 static const int MIN_SIZE = 256; 462 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 463 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 464 } 465 466 GrResourceEntry* entry; 467 int origWidth = desc.fWidth; 468 int origHeight = desc.fHeight; 469 bool doubledW = false; 470 bool doubledH = false; 471 472 do { 473 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, true); 474 entry = fTextureCache->findAndLock(key, 475 GrResourceCache::kNested_LockType); 476 // if we miss, relax the fit of the flags... 477 // then try doubling width... then height. 478 if (NULL != entry || kExact_ScratchTexMatch == match) { 479 break; 480 } 481 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 482 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 483 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 484 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 485 } else if (!doubledW) { 486 desc.fFlags = inDesc.fFlags; 487 desc.fWidth *= 2; 488 doubledW = true; 489 } else if (!doubledH) { 490 desc.fFlags = inDesc.fFlags; 491 desc.fWidth = origWidth; 492 desc.fHeight *= 2; 493 doubledH = true; 494 } else { 495 break; 496 } 497 498 } while (true); 499 500 if (NULL == entry) { 501 desc.fFlags = inDesc.fFlags; 502 desc.fWidth = origWidth; 503 desc.fHeight = origHeight; 504 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 505 if (NULL != texture) { 506 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, 507 texture->desc(), 508 true); 509 entry = fTextureCache->createAndLock(key, texture); 510 } 511 } 512 513 // If the caller gives us the same desc/sampler twice we don't want 514 // to return the same texture the second time (unless it was previously 515 // released). So we detach the entry from the cache and reattach at release. 516 if (NULL != entry) { 517 fTextureCache->detach(entry); 518 } 519 return TextureCacheEntry(entry); 520} 521 522void GrContext::addExistingTextureToCache(GrTexture* texture) { 523 524 if (NULL == texture) { 525 return; 526 } 527 528 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, 529 texture->desc(), 530 true); 531 fTextureCache->attach(key, texture); 532} 533 534void GrContext::unlockTexture(TextureCacheEntry entry) { 535 ASSERT_OWNED_RESOURCE(entry.texture()); 536 // If this is a scratch texture we detached it from the cache 537 // while it was locked (to avoid two callers simultaneously getting 538 // the same texture). 539 if (GrTexture::IsScratchTexture(entry.cacheEntry()->key())) { 540 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 541 } else { 542 fTextureCache->unlock(entry.cacheEntry()); 543 } 544} 545 546void GrContext::freeEntry(TextureCacheEntry entry) { 547 ASSERT_OWNED_RESOURCE(entry.texture()); 548 549 fTextureCache->freeEntry(entry.cacheEntry()); 550} 551 552GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, 553 void* srcData, 554 size_t rowBytes) { 555 GrTextureDesc descCopy = descIn; 556 descCopy.fClientCacheID = kUncached_CacheID; 557 return fGpu->createTexture(descCopy, srcData, rowBytes); 558} 559 560void GrContext::getTextureCacheLimits(int* maxTextures, 561 size_t* maxTextureBytes) const { 562 fTextureCache->getLimits(maxTextures, maxTextureBytes); 563} 564 565void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 566 fTextureCache->setLimits(maxTextures, maxTextureBytes); 567} 568 569int GrContext::getMaxTextureSize() const { 570 return fGpu->getCaps().fMaxTextureSize; 571} 572 573int GrContext::getMaxRenderTargetSize() const { 574 return fGpu->getCaps().fMaxRenderTargetSize; 575} 576 577/////////////////////////////////////////////////////////////////////////////// 578 579GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 580 return fGpu->createPlatformTexture(desc); 581} 582 583GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 584 return fGpu->createPlatformRenderTarget(desc); 585} 586 587/////////////////////////////////////////////////////////////////////////////// 588 589bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 590 int width, int height) const { 591 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 592 if (!caps.f8BitPaletteSupport) { 593 return false; 594 } 595 596 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 597 598 if (!isPow2) { 599 bool tiled = NULL != sampler && 600 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 601 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 602 if (tiled && !caps.fNPOTTextureTileSupport) { 603 return false; 604 } 605 } 606 return true; 607} 608 609//////////////////////////////////////////////////////////////////////////////// 610 611const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 612 613void GrContext::setClip(const GrClip& clip) { 614 fGpu->setClip(clip); 615 fDrawState->enableState(GrDrawState::kClip_StateBit); 616} 617 618void GrContext::setClip(const GrIRect& rect) { 619 GrClip clip; 620 clip.setFromIRect(rect); 621 fGpu->setClip(clip); 622} 623 624//////////////////////////////////////////////////////////////////////////////// 625 626void GrContext::clear(const GrIRect* rect, 627 const GrColor color, 628 GrRenderTarget* target) { 629 this->flush(); 630 fGpu->clear(rect, color, target); 631} 632 633void GrContext::drawPaint(const GrPaint& paint) { 634 // set rect to be big enough to fill the space, but not super-huge, so we 635 // don't overflow fixed-point implementations 636 GrRect r; 637 r.setLTRB(0, 0, 638 GrIntToScalar(getRenderTarget()->width()), 639 GrIntToScalar(getRenderTarget()->height())); 640 GrMatrix inverse; 641 SkTLazy<GrPaint> tmpPaint; 642 const GrPaint* p = &paint; 643 GrAutoMatrix am; 644 645 // We attempt to map r by the inverse matrix and draw that. mapRect will 646 // map the four corners and bound them with a new rect. This will not 647 // produce a correct result for some perspective matrices. 648 if (!this->getMatrix().hasPerspective()) { 649 if (!fDrawState->getViewInverse(&inverse)) { 650 GrPrintf("Could not invert matrix"); 651 return; 652 } 653 inverse.mapRect(&r); 654 } else { 655 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 656 if (!fDrawState->getViewInverse(&inverse)) { 657 GrPrintf("Could not invert matrix"); 658 return; 659 } 660 tmpPaint.set(paint); 661 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 662 p = tmpPaint.get(); 663 } 664 am.set(this, GrMatrix::I()); 665 } 666 // by definition this fills the entire clip, no need for AA 667 if (paint.fAntiAlias) { 668 if (!tmpPaint.isValid()) { 669 tmpPaint.set(paint); 670 p = tmpPaint.get(); 671 } 672 GrAssert(p == tmpPaint.get()); 673 tmpPaint.get()->fAntiAlias = false; 674 } 675 this->drawRect(*p, r); 676} 677 678//////////////////////////////////////////////////////////////////////////////// 679 680namespace { 681inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 682 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 683} 684} 685 686//////////////////////////////////////////////////////////////////////////////// 687 688/* create a triangle strip that strokes the specified triangle. There are 8 689 unique vertices, but we repreat the last 2 to close up. Alternatively we 690 could use an indices array, and then only send 8 verts, but not sure that 691 would be faster. 692 */ 693static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 694 GrScalar width) { 695 const GrScalar rad = GrScalarHalf(width); 696 rect.sort(); 697 698 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 699 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 700 verts[2].set(rect.fRight - rad, rect.fTop + rad); 701 verts[3].set(rect.fRight + rad, rect.fTop - rad); 702 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 703 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 704 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 705 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 706 verts[8] = verts[0]; 707 verts[9] = verts[1]; 708} 709 710/** 711 * Returns true if the rects edges are integer-aligned. 712 */ 713static bool isIRect(const GrRect& r) { 714 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 715 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 716} 717 718static bool apply_aa_to_rect(GrDrawTarget* target, 719 const GrRect& rect, 720 GrScalar width, 721 const GrMatrix* matrix, 722 GrMatrix* combinedMatrix, 723 GrRect* devRect, 724 bool* useVertexCoverage) { 725 // we use a simple coverage ramp to do aa on axis-aligned rects 726 // we check if the rect will be axis-aligned, and the rect won't land on 727 // integer coords. 728 729 // we are keeping around the "tweak the alpha" trick because 730 // it is our only hope for the fixed-pipe implementation. 731 // In a shader implementation we can give a separate coverage input 732 // TODO: remove this ugliness when we drop the fixed-pipe impl 733 *useVertexCoverage = false; 734 if (!target->canTweakAlphaForCoverage()) { 735 if (disable_coverage_aa_for_blend(target)) { 736#if GR_DEBUG 737 //GrPrintf("Turning off AA to correctly apply blend.\n"); 738#endif 739 return false; 740 } else { 741 *useVertexCoverage = true; 742 } 743 } 744 const GrDrawState& drawState = target->getDrawState(); 745 if (drawState.getRenderTarget()->isMultisampled()) { 746 return false; 747 } 748 749 if (0 == width && target->willUseHWAALines()) { 750 return false; 751 } 752 753 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 754 return false; 755 } 756 757 if (NULL != matrix && 758 !matrix->preservesAxisAlignment()) { 759 return false; 760 } 761 762 *combinedMatrix = drawState.getViewMatrix(); 763 if (NULL != matrix) { 764 combinedMatrix->preConcat(*matrix); 765 GrAssert(combinedMatrix->preservesAxisAlignment()); 766 } 767 768 combinedMatrix->mapRect(devRect, rect); 769 devRect->sort(); 770 771 if (width < 0) { 772 return !isIRect(*devRect); 773 } else { 774 return true; 775 } 776} 777 778void GrContext::drawRect(const GrPaint& paint, 779 const GrRect& rect, 780 GrScalar width, 781 const GrMatrix* matrix) { 782 SK_TRACE_EVENT0("GrContext::drawRect"); 783 784 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 785 GrDrawState::AutoStageDisable atr(fDrawState); 786 int stageMask = paint.getActiveStageMask(); 787 788 GrRect devRect = rect; 789 GrMatrix combinedMatrix; 790 bool useVertexCoverage; 791 bool needAA = paint.fAntiAlias && 792 !this->getRenderTarget()->isMultisampled(); 793 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 794 &combinedMatrix, &devRect, 795 &useVertexCoverage); 796 797 if (doAA) { 798 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 799 if (width >= 0) { 800 GrVec strokeSize;; 801 if (width > 0) { 802 strokeSize.set(width, width); 803 combinedMatrix.mapVectors(&strokeSize, 1); 804 strokeSize.setAbs(strokeSize); 805 } else { 806 strokeSize.set(GR_Scalar1, GR_Scalar1); 807 } 808 fAARectRenderer->strokeAARect(this->getGpu(), target, devRect, 809 strokeSize, useVertexCoverage); 810 } else { 811 fAARectRenderer->fillAARect(this->getGpu(), target, 812 devRect, useVertexCoverage); 813 } 814 return; 815 } 816 817 if (width >= 0) { 818 // TODO: consider making static vertex buffers for these cases. 819 // Hairline could be done by just adding closing vertex to 820 // unitSquareVertexBuffer() 821 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 822 823 static const int worstCaseVertCount = 10; 824 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 825 826 if (!geo.succeeded()) { 827 GrPrintf("Failed to get space for vertices!\n"); 828 return; 829 } 830 831 GrPrimitiveType primType; 832 int vertCount; 833 GrPoint* vertex = geo.positions(); 834 835 if (width > 0) { 836 vertCount = 10; 837 primType = kTriangleStrip_GrPrimitiveType; 838 setStrokeRectStrip(vertex, rect, width); 839 } else { 840 // hairline 841 vertCount = 5; 842 primType = kLineStrip_GrPrimitiveType; 843 vertex[0].set(rect.fLeft, rect.fTop); 844 vertex[1].set(rect.fRight, rect.fTop); 845 vertex[2].set(rect.fRight, rect.fBottom); 846 vertex[3].set(rect.fLeft, rect.fBottom); 847 vertex[4].set(rect.fLeft, rect.fTop); 848 } 849 850 GrDrawState::AutoViewMatrixRestore avmr; 851 if (NULL != matrix) { 852 GrDrawState* drawState = target->drawState(); 853 avmr.set(drawState); 854 drawState->preConcatViewMatrix(*matrix); 855 drawState->preConcatSamplerMatrices(stageMask, *matrix); 856 } 857 858 target->drawNonIndexed(primType, 0, vertCount); 859 } else { 860#if GR_STATIC_RECT_VB 861 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 862 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 863 if (NULL == sqVB) { 864 GrPrintf("Failed to create static rect vb.\n"); 865 return; 866 } 867 target->setVertexSourceToBuffer(layout, sqVB); 868 GrDrawState* drawState = target->drawState(); 869 GrDrawState::AutoViewMatrixRestore avmr(drawState); 870 GrMatrix m; 871 m.setAll(rect.width(), 0, rect.fLeft, 872 0, rect.height(), rect.fTop, 873 0, 0, GrMatrix::I()[8]); 874 875 if (NULL != matrix) { 876 m.postConcat(*matrix); 877 } 878 drawState->preConcatViewMatrix(m); 879 drawState->preConcatSamplerMatrices(stageMask, m); 880 881 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 882#else 883 target->drawSimpleRect(rect, matrix, stageMask); 884#endif 885 } 886} 887 888void GrContext::drawRectToRect(const GrPaint& paint, 889 const GrRect& dstRect, 890 const GrRect& srcRect, 891 const GrMatrix* dstMatrix, 892 const GrMatrix* srcMatrix) { 893 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 894 895 // srcRect refers to paint's first texture 896 if (!paint.isTextureStageEnabled(0)) { 897 drawRect(paint, dstRect, -1, dstMatrix); 898 return; 899 } 900 901 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 902 903#if GR_STATIC_RECT_VB 904 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 905 GrDrawState::AutoStageDisable atr(fDrawState); 906 GrDrawState* drawState = target->drawState(); 907 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 908 GrDrawState::AutoViewMatrixRestore avmr(drawState); 909 910 GrMatrix m; 911 912 m.setAll(dstRect.width(), 0, dstRect.fLeft, 913 0, dstRect.height(), dstRect.fTop, 914 0, 0, GrMatrix::I()[8]); 915 if (NULL != dstMatrix) { 916 m.postConcat(*dstMatrix); 917 } 918 drawState->preConcatViewMatrix(m); 919 920 // srcRect refers to first stage 921 int otherStageMask = paint.getActiveStageMask() & 922 (~(1 << GrPaint::kFirstTextureStage)); 923 if (otherStageMask) { 924 drawState->preConcatSamplerMatrices(otherStageMask, m); 925 } 926 927 m.setAll(srcRect.width(), 0, srcRect.fLeft, 928 0, srcRect.height(), srcRect.fTop, 929 0, 0, GrMatrix::I()[8]); 930 if (NULL != srcMatrix) { 931 m.postConcat(*srcMatrix); 932 } 933 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 934 935 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 936 if (NULL == sqVB) { 937 GrPrintf("Failed to create static rect vb.\n"); 938 return; 939 } 940 target->setVertexSourceToBuffer(layout, sqVB); 941 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 942#else 943 944 GrDrawTarget* target; 945#if BATCH_RECT_TO_RECT 946 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 947#else 948 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 949#endif 950 GrDrawState::AutoStageDisable atr(fDrawState); 951 952 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 953 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 954 srcRects[0] = &srcRect; 955 srcMatrices[0] = srcMatrix; 956 957 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 958#endif 959} 960 961void GrContext::drawVertices(const GrPaint& paint, 962 GrPrimitiveType primitiveType, 963 int vertexCount, 964 const GrPoint positions[], 965 const GrPoint texCoords[], 966 const GrColor colors[], 967 const uint16_t indices[], 968 int indexCount) { 969 SK_TRACE_EVENT0("GrContext::drawVertices"); 970 971 GrDrawTarget::AutoReleaseGeometry geo; 972 973 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 974 GrDrawState::AutoStageDisable atr(fDrawState); 975 976 bool hasTexCoords[GrPaint::kTotalStages] = { 977 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 978 0 // remaining stages use positions 979 }; 980 981 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 982 983 if (NULL != colors) { 984 layout |= GrDrawTarget::kColor_VertexLayoutBit; 985 } 986 int vertexSize = GrDrawTarget::VertexSize(layout); 987 988 if (sizeof(GrPoint) != vertexSize) { 989 if (!geo.set(target, layout, vertexCount, 0)) { 990 GrPrintf("Failed to get space for vertices!\n"); 991 return; 992 } 993 int texOffsets[GrDrawState::kMaxTexCoords]; 994 int colorOffset; 995 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 996 texOffsets, 997 &colorOffset, 998 NULL, 999 NULL); 1000 void* curVertex = geo.vertices(); 1001 1002 for (int i = 0; i < vertexCount; ++i) { 1003 *((GrPoint*)curVertex) = positions[i]; 1004 1005 if (texOffsets[0] > 0) { 1006 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 1007 } 1008 if (colorOffset > 0) { 1009 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1010 } 1011 curVertex = (void*)((intptr_t)curVertex + vertexSize); 1012 } 1013 } else { 1014 target->setVertexSourceToArray(layout, positions, vertexCount); 1015 } 1016 1017 // we don't currently apply offscreen AA to this path. Need improved 1018 // management of GrDrawTarget's geometry to avoid copying points per-tile. 1019 1020 if (NULL != indices) { 1021 target->setIndexSourceToArray(indices, indexCount); 1022 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1023 } else { 1024 target->drawNonIndexed(primitiveType, 0, vertexCount); 1025 } 1026} 1027 1028/////////////////////////////////////////////////////////////////////////////// 1029namespace { 1030 1031struct CircleVertex { 1032 GrPoint fPos; 1033 GrPoint fCenter; 1034 GrScalar fOuterRadius; 1035 GrScalar fInnerRadius; 1036}; 1037 1038/* Returns true if will map a circle to another circle. This can be true 1039 * if the matrix only includes square-scale, rotation, translation. 1040 */ 1041inline bool isSimilarityTransformation(const SkMatrix& matrix, 1042 SkScalar tol = SK_ScalarNearlyZero) { 1043 if (matrix.isIdentity() || matrix.getType() == SkMatrix::kTranslate_Mask) { 1044 return true; 1045 } 1046 if (matrix.hasPerspective()) { 1047 return false; 1048 } 1049 1050 SkScalar mx = matrix.get(SkMatrix::kMScaleX); 1051 SkScalar sx = matrix.get(SkMatrix::kMSkewX); 1052 SkScalar my = matrix.get(SkMatrix::kMScaleY); 1053 SkScalar sy = matrix.get(SkMatrix::kMSkewY); 1054 1055 if (mx == 0 && sx == 0 && my == 0 && sy == 0) { 1056 return false; 1057 } 1058 1059 // it has scales or skews, but it could also be rotation, check it out. 1060 SkVector vec[2]; 1061 vec[0].set(mx, sx); 1062 vec[1].set(sy, my); 1063 1064 return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol)) && 1065 SkScalarNearlyEqual(vec[0].lengthSqd(), vec[1].lengthSqd(), 1066 SkScalarSquare(tol)); 1067} 1068 1069} 1070 1071// TODO: strokeWidth can't be larger than zero right now. 1072// It will be fixed when drawPath() can handle strokes. 1073void GrContext::drawOval(const GrPaint& paint, 1074 const GrRect& rect, 1075 SkScalar strokeWidth) { 1076 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1077 kUnbuffered_DrawCategory; 1078 GrDrawTarget* target = this->prepareToDraw(paint, category); 1079 GrDrawState::AutoStageDisable atr(fDrawState); 1080 GrDrawState* drawState = target->drawState(); 1081 GrMatrix vm = drawState->getViewMatrix(); 1082 1083 if (!isSimilarityTransformation(vm) || 1084 !paint.fAntiAlias || 1085 rect.height() != rect.width()) { 1086 SkPath path; 1087 path.addOval(rect); 1088 GrPathFill fill = (strokeWidth == 0) ? 1089 kHairLine_GrPathFill : kWinding_GrPathFill; 1090 this->internalDrawPath(paint, path, fill, NULL); 1091 return; 1092 } 1093 1094 const GrRenderTarget* rt = drawState->getRenderTarget(); 1095 if (NULL == rt) { 1096 return; 1097 } 1098 1099 GrDrawTarget::AutoDeviceCoordDraw adcd(target, paint.getActiveStageMask()); 1100 1101 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1102 layout |= GrDrawTarget::kEdge_VertexLayoutBit; 1103 GrAssert(sizeof(CircleVertex) == GrDrawTarget::VertexSize(layout)); 1104 1105 GrPoint center = GrPoint::Make(rect.centerX(), rect.centerY()); 1106 GrScalar radius = SkScalarHalf(rect.width()); 1107 1108 vm.mapPoints(¢er, 1); 1109 radius = vm.mapRadius(radius); 1110 1111 GrScalar outerRadius = radius; 1112 GrScalar innerRadius = 0; 1113 SkScalar halfWidth = 0; 1114 if (strokeWidth == 0) { 1115 halfWidth = SkScalarHalf(SK_Scalar1); 1116 1117 outerRadius += halfWidth; 1118 innerRadius = SkMaxScalar(0, radius - halfWidth); 1119 } 1120 1121 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 4, 0); 1122 if (!geo.succeeded()) { 1123 GrPrintf("Failed to get space for vertices!\n"); 1124 return; 1125 } 1126 1127 CircleVertex* verts = reinterpret_cast<CircleVertex*>(geo.vertices()); 1128 1129 // The fragment shader will extend the radius out half a pixel 1130 // to antialias. Expand the drawn rect here so all the pixels 1131 // will be captured. 1132 SkScalar L = center.fX - outerRadius - SkFloatToScalar(0.5f); 1133 SkScalar R = center.fX + outerRadius + SkFloatToScalar(0.5f); 1134 SkScalar T = center.fY - outerRadius - SkFloatToScalar(0.5f); 1135 SkScalar B = center.fY + outerRadius + SkFloatToScalar(0.5f); 1136 1137 verts[0].fPos = SkPoint::Make(L, T); 1138 verts[1].fPos = SkPoint::Make(R, T); 1139 verts[2].fPos = SkPoint::Make(L, B); 1140 verts[3].fPos = SkPoint::Make(R, B); 1141 1142 for (int i = 0; i < 4; ++i) { 1143 // this goes to fragment shader, it should be in y-points-up space. 1144 verts[i].fCenter = SkPoint::Make(center.fX, rt->height() - center.fY); 1145 1146 verts[i].fOuterRadius = outerRadius; 1147 verts[i].fInnerRadius = innerRadius; 1148 } 1149 1150 drawState->setVertexEdgeType(GrDrawState::kCircle_EdgeType); 1151 target->drawNonIndexed(kTriangleStrip_GrPrimitiveType, 0, 4); 1152} 1153 1154void GrContext::drawPath(const GrPaint& paint, const SkPath& path, 1155 GrPathFill fill, const GrPoint* translate) { 1156 1157 if (path.isEmpty()) { 1158 if (GrIsFillInverted(fill)) { 1159 this->drawPaint(paint); 1160 } 1161 return; 1162 } 1163 1164 SkRect ovalRect; 1165 if (!GrIsFillInverted(fill) && path.isOval(&ovalRect)) { 1166 if (translate) { 1167 ovalRect.offset(*translate); 1168 } 1169 SkScalar width = (fill == kHairLine_GrPathFill) ? 0 : -SK_Scalar1; 1170 this->drawOval(paint, ovalRect, width); 1171 return; 1172 } 1173 1174 internalDrawPath(paint, path, fill, translate); 1175} 1176 1177void GrContext::internalDrawPath(const GrPaint& paint, const SkPath& path, 1178 GrPathFill fill, const GrPoint* translate) { 1179 1180 // Note that below we may sw-rasterize the path into a scratch texture. 1181 // Scratch textures can be recycled after they are returned to the texture 1182 // cache. This presents a potential hazard for buffered drawing. However, 1183 // the writePixels that uploads to the scratch will perform a flush so we're 1184 // OK. 1185 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1186 kUnbuffered_DrawCategory; 1187 GrDrawTarget* target = this->prepareToDraw(paint, category); 1188 GrDrawState::AutoStageDisable atr(fDrawState); 1189 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1190 1191 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1192 1193 // An Assumption here is that path renderer would use some form of tweaking 1194 // the src color (either the input alpha or in the frag shader) to implement 1195 // aa. If we have some future driver-mojo path AA that can do the right 1196 // thing WRT to the blend then we'll need some query on the PR. 1197 if (disable_coverage_aa_for_blend(target)) { 1198#if GR_DEBUG 1199 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1200#endif 1201 prAA = false; 1202 } 1203 1204 GrPathRenderer* pr = this->getPathRenderer(path, fill, target, prAA, true); 1205 if (NULL == pr) { 1206#if GR_DEBUG 1207 GrPrintf("Unable to find path renderer compatible with path.\n"); 1208#endif 1209 return; 1210 } 1211 1212 pr->drawPath(path, fill, translate, target, stageMask, prAA); 1213} 1214 1215//////////////////////////////////////////////////////////////////////////////// 1216 1217void GrContext::flush(int flagsBitfield) { 1218 if (kDiscard_FlushBit & flagsBitfield) { 1219 fDrawBuffer->reset(); 1220 } else { 1221 this->flushDrawBuffer(); 1222 } 1223 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1224 fGpu->forceRenderTargetFlush(); 1225 } 1226} 1227 1228void GrContext::flushDrawBuffer() { 1229 if (fDrawBuffer) { 1230 // With addition of the AA clip path, flushing the draw buffer can 1231 // result in the generation of an AA clip mask. During this 1232 // process the SW path renderer may be invoked which recusively 1233 // calls this method (via internalWriteTexturePixels) creating 1234 // infinite recursion 1235 GrInOrderDrawBuffer* temp = fDrawBuffer; 1236 fDrawBuffer = NULL; 1237 1238 temp->flushTo(fGpu); 1239 1240 fDrawBuffer = temp; 1241 } 1242} 1243 1244void GrContext::internalWriteTexturePixels(GrTexture* texture, 1245 int left, int top, 1246 int width, int height, 1247 GrPixelConfig config, 1248 const void* buffer, 1249 size_t rowBytes, 1250 uint32_t flags) { 1251 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1252 ASSERT_OWNED_RESOURCE(texture); 1253 1254 if (!(kDontFlush_PixelOpsFlag & flags)) { 1255 this->flush(); 1256 } 1257 // TODO: use scratch texture to perform conversion 1258 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1259 GrPixelConfigIsUnpremultiplied(config)) { 1260 return; 1261 } 1262 1263 fGpu->writeTexturePixels(texture, left, top, width, height, 1264 config, buffer, rowBytes); 1265} 1266 1267bool GrContext::internalReadTexturePixels(GrTexture* texture, 1268 int left, int top, 1269 int width, int height, 1270 GrPixelConfig config, 1271 void* buffer, 1272 size_t rowBytes, 1273 uint32_t flags) { 1274 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1275 ASSERT_OWNED_RESOURCE(texture); 1276 1277 // TODO: code read pixels for textures that aren't also rendertargets 1278 GrRenderTarget* target = texture->asRenderTarget(); 1279 if (NULL != target) { 1280 return this->internalReadRenderTargetPixels(target, 1281 left, top, width, height, 1282 config, buffer, rowBytes, 1283 flags); 1284 } else { 1285 return false; 1286 } 1287} 1288 1289#include "SkConfig8888.h" 1290 1291namespace { 1292/** 1293 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1294 * formats are representable as Config8888 and so the function returns false 1295 * if the GrPixelConfig has no equivalent Config8888. 1296 */ 1297bool grconfig_to_config8888(GrPixelConfig config, 1298 SkCanvas::Config8888* config8888) { 1299 switch (config) { 1300 case kRGBA_8888_PM_GrPixelConfig: 1301 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1302 return true; 1303 case kRGBA_8888_UPM_GrPixelConfig: 1304 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1305 return true; 1306 case kBGRA_8888_PM_GrPixelConfig: 1307 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1308 return true; 1309 case kBGRA_8888_UPM_GrPixelConfig: 1310 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1311 return true; 1312 default: 1313 return false; 1314 } 1315} 1316} 1317 1318bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1319 int left, int top, 1320 int width, int height, 1321 GrPixelConfig config, 1322 void* buffer, 1323 size_t rowBytes, 1324 uint32_t flags) { 1325 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1326 ASSERT_OWNED_RESOURCE(target); 1327 1328 if (NULL == target) { 1329 target = fDrawState->getRenderTarget(); 1330 if (NULL == target) { 1331 return false; 1332 } 1333 } 1334 1335 if (!(kDontFlush_PixelOpsFlag & flags)) { 1336 this->flush(); 1337 } 1338 1339 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1340 GrPixelConfigIsUnpremultiplied(config) && 1341 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1342 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1343 if (!grconfig_to_config8888(target->config(), &srcConfig8888) || 1344 !grconfig_to_config8888(config, &dstConfig8888)) { 1345 return false; 1346 } 1347 // do read back using target's own config 1348 this->internalReadRenderTargetPixels(target, 1349 left, top, 1350 width, height, 1351 target->config(), 1352 buffer, rowBytes, 1353 kDontFlush_PixelOpsFlag); 1354 // sw convert the pixels to unpremul config 1355 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer); 1356 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888, 1357 pixels, rowBytes, srcConfig8888, 1358 width, height); 1359 return true; 1360 } 1361 1362 GrTexture* src = target->asTexture(); 1363 bool swapRAndB = NULL != src && 1364 fGpu->preferredReadPixelsConfig(config) == 1365 GrPixelConfigSwapRAndB(config); 1366 1367 bool flipY = NULL != src && 1368 fGpu->readPixelsWillPayForYFlip(target, left, top, 1369 width, height, config, 1370 rowBytes); 1371 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1372 GrPixelConfigIsUnpremultiplied(config)); 1373 1374 if (NULL == src && alphaConversion) { 1375 // we should fallback to cpu conversion here. This could happen when 1376 // we were given an external render target by the client that is not 1377 // also a texture (e.g. FBO 0 in GL) 1378 return false; 1379 } 1380 // we draw to a scratch texture if any of these conversion are applied 1381 GrAutoScratchTexture ast; 1382 if (flipY || swapRAndB || alphaConversion) { 1383 GrAssert(NULL != src); 1384 if (swapRAndB) { 1385 config = GrPixelConfigSwapRAndB(config); 1386 GrAssert(kUnknown_GrPixelConfig != config); 1387 } 1388 // Make the scratch a render target because we don't have a robust 1389 // readTexturePixels as of yet (it calls this function). 1390 GrTextureDesc desc; 1391 desc.fFlags = kRenderTarget_GrTextureFlagBit; 1392 desc.fWidth = width; 1393 desc.fHeight = height; 1394 desc.fConfig = config; 1395 1396 // When a full readback is faster than a partial we could always make 1397 // the scratch exactly match the passed rect. However, if we see many 1398 // different size rectangles we will trash our texture cache and pay the 1399 // cost of creating and destroying many textures. So, we only request 1400 // an exact match when the caller is reading an entire RT. 1401 ScratchTexMatch match = kApprox_ScratchTexMatch; 1402 if (0 == left && 1403 0 == top && 1404 target->width() == width && 1405 target->height() == height && 1406 fGpu->fullReadPixelsIsFasterThanPartial()) { 1407 match = kExact_ScratchTexMatch; 1408 } 1409 ast.set(this, desc, match); 1410 GrTexture* texture = ast.texture(); 1411 if (!texture) { 1412 return false; 1413 } 1414 target = texture->asRenderTarget(); 1415 GrAssert(NULL != target); 1416 1417 GrDrawTarget::AutoStateRestore asr(fGpu, 1418 GrDrawTarget::kReset_ASRInit); 1419 GrDrawState* drawState = fGpu->drawState(); 1420 drawState->setRenderTarget(target); 1421 1422 GrMatrix matrix; 1423 if (flipY) { 1424 matrix.setTranslate(SK_Scalar1 * left, 1425 SK_Scalar1 * (top + height)); 1426 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1427 } else { 1428 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1429 } 1430 matrix.postIDiv(src->width(), src->height()); 1431 drawState->sampler(0)->reset(matrix); 1432 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1433 drawState->setTexture(0, src); 1434 GrRect rect; 1435 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1436 fGpu->drawSimpleRect(rect, NULL, 0x1); 1437 left = 0; 1438 top = 0; 1439 } 1440 return fGpu->readPixels(target, 1441 left, top, width, height, 1442 config, buffer, rowBytes, flipY); 1443} 1444 1445void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1446 GrAssert(target); 1447 ASSERT_OWNED_RESOURCE(target); 1448 // In the future we may track whether there are any pending draws to this 1449 // target. We don't today so we always perform a flush. We don't promise 1450 // this to our clients, though. 1451 this->flush(); 1452 fGpu->resolveRenderTarget(target); 1453} 1454 1455void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1456 if (NULL == src || NULL == dst) { 1457 return; 1458 } 1459 ASSERT_OWNED_RESOURCE(src); 1460 1461 // Writes pending to the source texture are not tracked, so a flush 1462 // is required to ensure that the copy captures the most recent contents 1463 // of the source texture. See similar behaviour in 1464 // GrContext::resolveRenderTarget. 1465 this->flush(); 1466 1467 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1468 GrDrawState* drawState = fGpu->drawState(); 1469 drawState->setRenderTarget(dst); 1470 GrMatrix sampleM; 1471 sampleM.setIDiv(src->width(), src->height()); 1472 drawState->setTexture(0, src); 1473 drawState->sampler(0)->reset(sampleM); 1474 SkRect rect = SkRect::MakeXYWH(0, 0, 1475 SK_Scalar1 * src->width(), 1476 SK_Scalar1 * src->height()); 1477 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1478} 1479 1480void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1481 int left, int top, 1482 int width, int height, 1483 GrPixelConfig config, 1484 const void* buffer, 1485 size_t rowBytes, 1486 uint32_t flags) { 1487 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1488 ASSERT_OWNED_RESOURCE(target); 1489 1490 if (NULL == target) { 1491 target = fDrawState->getRenderTarget(); 1492 if (NULL == target) { 1493 return; 1494 } 1495 } 1496 1497 // TODO: when underlying api has a direct way to do this we should use it 1498 // (e.g. glDrawPixels on desktop GL). 1499 1500 // If the RT is also a texture and we don't have to do PM/UPM conversion 1501 // then take the texture path, which we expect to be at least as fast or 1502 // faster since it doesn't use an intermediate texture as we do below. 1503 1504#if !GR_MAC_BUILD 1505 // At least some drivers on the Mac get confused when glTexImage2D is called 1506 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1507 // determine what OS versions and/or HW is affected. 1508 if (NULL != target->asTexture() && 1509 GrPixelConfigIsUnpremultiplied(target->config()) == 1510 GrPixelConfigIsUnpremultiplied(config)) { 1511 1512 this->internalWriteTexturePixels(target->asTexture(), 1513 left, top, width, height, 1514 config, buffer, rowBytes, flags); 1515 return; 1516 } 1517#endif 1518 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1519 GrPixelConfigIsUnpremultiplied(config) && 1520 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1521 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1522 if (!grconfig_to_config8888(config, &srcConfig8888) || 1523 !grconfig_to_config8888(target->config(), &dstConfig8888)) { 1524 return; 1525 } 1526 // allocate a tmp buffer and sw convert the pixels to premul 1527 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height); 1528 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1529 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1530 src, rowBytes, srcConfig8888, 1531 width, height); 1532 // upload the already premul pixels 1533 this->internalWriteRenderTargetPixels(target, 1534 left, top, 1535 width, height, 1536 target->config(), 1537 tmpPixels, 4 * width, flags); 1538 return; 1539 } 1540 1541 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1542 GrPixelConfigSwapRAndB(config); 1543 if (swapRAndB) { 1544 config = GrPixelConfigSwapRAndB(config); 1545 } 1546 1547 GrTextureDesc desc; 1548 desc.fWidth = width; 1549 desc.fHeight = height; 1550 desc.fConfig = config; 1551 1552 GrAutoScratchTexture ast(this, desc); 1553 GrTexture* texture = ast.texture(); 1554 if (NULL == texture) { 1555 return; 1556 } 1557 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1558 config, buffer, rowBytes, flags); 1559 1560 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1561 GrDrawState* drawState = fGpu->drawState(); 1562 1563 GrMatrix matrix; 1564 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1565 drawState->setViewMatrix(matrix); 1566 drawState->setRenderTarget(target); 1567 drawState->setTexture(0, texture); 1568 1569 matrix.setIDiv(texture->width(), texture->height()); 1570 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 1571 GrSamplerState::kNearest_Filter, 1572 matrix); 1573 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1574 1575 static const GrVertexLayout layout = 0; 1576 static const int VCOUNT = 4; 1577 // TODO: Use GrGpu::drawRect here 1578 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1579 if (!geo.succeeded()) { 1580 GrPrintf("Failed to get space for vertices!\n"); 1581 return; 1582 } 1583 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1584 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, VCOUNT); 1585} 1586//////////////////////////////////////////////////////////////////////////////// 1587 1588void GrContext::setPaint(const GrPaint& paint) { 1589 GrAssert(fDrawState->stagesDisabled()); 1590 1591 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1592 int s = i + GrPaint::kFirstTextureStage; 1593 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 1594 if (paint.isTextureStageEnabled(i)) { 1595 fDrawState->setTexture(s, paint.getTexture(i)); 1596 *fDrawState->sampler(s) = paint.getTextureSampler(i); 1597 } 1598 } 1599 1600 fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1601 1602 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1603 int s = i + GrPaint::kFirstMaskStage; 1604 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 1605 if (paint.isMaskStageEnabled(i)) { 1606 fDrawState->setTexture(s, paint.getMask(i)); 1607 *fDrawState->sampler(s) = paint.getMaskSampler(i); 1608 } 1609 } 1610 1611 // disable all stages not accessible via the paint 1612 for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) { 1613 fDrawState->setTexture(s, NULL); 1614 } 1615 1616 fDrawState->setColor(paint.fColor); 1617 1618 if (paint.fDither) { 1619 fDrawState->enableState(GrDrawState::kDither_StateBit); 1620 } else { 1621 fDrawState->disableState(GrDrawState::kDither_StateBit); 1622 } 1623 if (paint.fAntiAlias) { 1624 fDrawState->enableState(GrDrawState::kHWAntialias_StateBit); 1625 } else { 1626 fDrawState->disableState(GrDrawState::kHWAntialias_StateBit); 1627 } 1628 if (paint.fColorMatrixEnabled) { 1629 fDrawState->enableState(GrDrawState::kColorMatrix_StateBit); 1630 fDrawState->setColorMatrix(paint.fColorMatrix); 1631 } else { 1632 fDrawState->disableState(GrDrawState::kColorMatrix_StateBit); 1633 } 1634 fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1635 fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1636 fDrawState->setCoverage(paint.fCoverage); 1637#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1638 if ((paint.getActiveMaskStageMask() || 0xff != paint.fCoverage) && 1639 !fGpu->canApplyCoverage()) { 1640 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1641 } 1642#endif 1643} 1644 1645GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1646 DrawCategory category) { 1647 if (category != fLastDrawCategory) { 1648 this->flushDrawBuffer(); 1649 fLastDrawCategory = category; 1650 } 1651 this->setPaint(paint); 1652 GrDrawTarget* target = fGpu; 1653 switch (category) { 1654 case kUnbuffered_DrawCategory: 1655 target = fGpu; 1656 break; 1657 case kBuffered_DrawCategory: 1658 target = fDrawBuffer; 1659 fDrawBuffer->setClip(fGpu->getClip()); 1660 break; 1661 default: 1662 GrCrash("Unexpected DrawCategory."); 1663 break; 1664 } 1665 return target; 1666} 1667 1668/* 1669 * This method finds a path renderer that can draw the specified path on 1670 * the provided target. 1671 * Due to its expense, the software path renderer has split out so it can 1672 * can be individually allowed/disallowed via the "allowSW" boolean. 1673 */ 1674GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1675 GrPathFill fill, 1676 const GrDrawTarget* target, 1677 bool antiAlias, 1678 bool allowSW) { 1679 if (NULL == fPathRendererChain) { 1680 fPathRendererChain = 1681 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag); 1682 } 1683 1684 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, fill, 1685 target, 1686 antiAlias); 1687 1688 if (NULL == pr && allowSW) { 1689 if (NULL == fSoftwarePathRenderer) { 1690 fSoftwarePathRenderer = new GrSoftwarePathRenderer(this); 1691 } 1692 1693 pr = fSoftwarePathRenderer; 1694 } 1695 1696 return pr; 1697} 1698 1699//////////////////////////////////////////////////////////////////////////////// 1700 1701void GrContext::setRenderTarget(GrRenderTarget* target) { 1702 ASSERT_OWNED_RESOURCE(target); 1703 if (fDrawState->getRenderTarget() != target) { 1704 this->flush(false); 1705 fDrawState->setRenderTarget(target); 1706 } 1707} 1708 1709GrRenderTarget* GrContext::getRenderTarget() { 1710 return fDrawState->getRenderTarget(); 1711} 1712 1713const GrRenderTarget* GrContext::getRenderTarget() const { 1714 return fDrawState->getRenderTarget(); 1715} 1716 1717bool GrContext::isConfigRenderable(GrPixelConfig config) const { 1718 return fGpu->isConfigRenderable(config); 1719} 1720 1721const GrMatrix& GrContext::getMatrix() const { 1722 return fDrawState->getViewMatrix(); 1723} 1724 1725void GrContext::setMatrix(const GrMatrix& m) { 1726 fDrawState->setViewMatrix(m); 1727} 1728 1729void GrContext::concatMatrix(const GrMatrix& m) const { 1730 fDrawState->preConcatViewMatrix(m); 1731} 1732 1733static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 1734 intptr_t mask = 1 << shift; 1735 if (pred) { 1736 bits |= mask; 1737 } else { 1738 bits &= ~mask; 1739 } 1740 return bits; 1741} 1742 1743GrContext::GrContext(GrGpu* gpu) { 1744 ++THREAD_INSTANCE_COUNT; 1745 1746 fGpu = gpu; 1747 fGpu->ref(); 1748 fGpu->setContext(this); 1749 1750 fDrawState = new GrDrawState(); 1751 fGpu->setDrawState(fDrawState); 1752 1753 fPathRendererChain = NULL; 1754 fSoftwarePathRenderer = NULL; 1755 1756 fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT, 1757 MAX_TEXTURE_CACHE_BYTES); 1758 fFontCache = new GrFontCache(fGpu); 1759 1760 fLastDrawCategory = kUnbuffered_DrawCategory; 1761 1762 fDrawBuffer = NULL; 1763 fDrawBufferVBAllocPool = NULL; 1764 fDrawBufferIBAllocPool = NULL; 1765 1766 fAARectRenderer = new GrAARectRenderer; 1767 1768 this->setupDrawBuffer(); 1769} 1770 1771void GrContext::setupDrawBuffer() { 1772 1773 GrAssert(NULL == fDrawBuffer); 1774 GrAssert(NULL == fDrawBufferVBAllocPool); 1775 GrAssert(NULL == fDrawBufferIBAllocPool); 1776 1777#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT || DEFER_PATHS 1778 fDrawBufferVBAllocPool = 1779 new GrVertexBufferAllocPool(fGpu, false, 1780 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1781 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS); 1782 fDrawBufferIBAllocPool = 1783 new GrIndexBufferAllocPool(fGpu, false, 1784 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1785 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS); 1786 1787 fDrawBuffer = new GrInOrderDrawBuffer(fGpu, 1788 fDrawBufferVBAllocPool, 1789 fDrawBufferIBAllocPool); 1790#endif 1791 1792#if BATCH_RECT_TO_RECT 1793 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 1794#endif 1795 fDrawBuffer->setAutoFlushTarget(fGpu); 1796 fDrawBuffer->setDrawState(fDrawState); 1797} 1798 1799GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 1800#if DEFER_TEXT_RENDERING 1801 return prepareToDraw(paint, kBuffered_DrawCategory); 1802#else 1803 return prepareToDraw(paint, kUnbuffered_DrawCategory); 1804#endif 1805} 1806 1807const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1808 return fGpu->getQuadIndexBuffer(); 1809} 1810 1811GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 1812 GrAutoScratchTexture* temp1, 1813 GrAutoScratchTexture* temp2, 1814 const SkRect& rect, 1815 float sigmaX, float sigmaY) { 1816 ASSERT_OWNED_RESOURCE(srcTexture); 1817 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1818 GrClip oldClip = this->getClip(); 1819 GrTexture* origTexture = srcTexture; 1820 GrAutoMatrix avm(this, GrMatrix::I()); 1821 SkIRect clearRect; 1822 int scaleFactorX, radiusX; 1823 int scaleFactorY, radiusY; 1824 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX); 1825 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY); 1826 1827 SkRect srcRect(rect); 1828 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 1829 srcRect.roundOut(); 1830 scale_rect(&srcRect, static_cast<float>(scaleFactorX), 1831 static_cast<float>(scaleFactorY)); 1832 this->setClip(srcRect); 1833 1834 GrAssert(kBGRA_8888_PM_GrPixelConfig == srcTexture->config() || 1835 kRGBA_8888_PM_GrPixelConfig == srcTexture->config() || 1836 kAlpha_8_GrPixelConfig == srcTexture->config()); 1837 1838 GrTextureDesc desc; 1839 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; 1840 desc.fWidth = SkScalarFloorToInt(srcRect.width()); 1841 desc.fHeight = SkScalarFloorToInt(srcRect.height()); 1842 desc.fConfig = srcTexture->config(); 1843 1844 temp1->set(this, desc); 1845 if (temp2) { 1846 temp2->set(this, desc); 1847 } 1848 1849 GrTexture* dstTexture = temp1->texture(); 1850 GrPaint paint; 1851 paint.reset(); 1852 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 1853 1854 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 1855 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1856 srcTexture->height()); 1857 this->setRenderTarget(dstTexture->asRenderTarget()); 1858 SkRect dstRect(srcRect); 1859 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 1860 i < scaleFactorY ? 0.5f : 1.0f); 1861 paint.setTexture(0, srcTexture); 1862 this->drawRectToRect(paint, dstRect, srcRect); 1863 srcRect = dstRect; 1864 SkTSwap(srcTexture, dstTexture); 1865 // If temp2 is non-NULL, don't render back to origTexture 1866 if (temp2 && dstTexture == origTexture) { 1867 dstTexture = temp2->texture(); 1868 } 1869 } 1870 1871 SkIRect srcIRect; 1872 srcRect.roundOut(&srcIRect); 1873 1874 if (sigmaX > 0.0f) { 1875 if (scaleFactorX > 1) { 1876 // Clear out a radius to the right of the srcRect to prevent the 1877 // X convolution from reading garbage. 1878 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1879 radiusX, srcIRect.height()); 1880 this->clear(&clearRect, 0x0); 1881 } 1882 1883 this->setRenderTarget(dstTexture->asRenderTarget()); 1884 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaX, radiusX, 1885 Gr1DKernelEffect::kX_Direction); 1886 SkTSwap(srcTexture, dstTexture); 1887 if (temp2 && dstTexture == origTexture) { 1888 dstTexture = temp2->texture(); 1889 } 1890 } 1891 1892 if (sigmaY > 0.0f) { 1893 if (scaleFactorY > 1 || sigmaX > 0.0f) { 1894 // Clear out a radius below the srcRect to prevent the Y 1895 // convolution from reading garbage. 1896 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1897 srcIRect.width(), radiusY); 1898 this->clear(&clearRect, 0x0); 1899 } 1900 1901 this->setRenderTarget(dstTexture->asRenderTarget()); 1902 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaY, radiusY, 1903 Gr1DKernelEffect::kY_Direction); 1904 SkTSwap(srcTexture, dstTexture); 1905 if (temp2 && dstTexture == origTexture) { 1906 dstTexture = temp2->texture(); 1907 } 1908 } 1909 1910 if (scaleFactorX > 1 || scaleFactorY > 1) { 1911 // Clear one pixel to the right and below, to accommodate bilinear 1912 // upsampling. 1913 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1914 srcIRect.width() + 1, 1); 1915 this->clear(&clearRect, 0x0); 1916 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1917 1, srcIRect.height()); 1918 this->clear(&clearRect, 0x0); 1919 // FIXME: This should be mitchell, not bilinear. 1920 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 1921 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1922 srcTexture->height()); 1923 this->setRenderTarget(dstTexture->asRenderTarget()); 1924 paint.setTexture(0, srcTexture); 1925 SkRect dstRect(srcRect); 1926 scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY); 1927 this->drawRectToRect(paint, dstRect, srcRect); 1928 srcRect = dstRect; 1929 SkTSwap(srcTexture, dstTexture); 1930 } 1931 this->setRenderTarget(oldRenderTarget); 1932 this->setClip(oldClip); 1933 return srcTexture; 1934} 1935 1936GrTexture* GrContext::applyMorphology(GrTexture* srcTexture, 1937 const GrRect& rect, 1938 GrTexture* temp1, GrTexture* temp2, 1939 MorphologyType morphType, 1940 SkISize radius) { 1941 ASSERT_OWNED_RESOURCE(srcTexture); 1942 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1943 GrAutoMatrix avm(this, GrMatrix::I()); 1944 GrClip oldClip = this->getClip(); 1945 this->setClip(GrRect::MakeWH(SkIntToScalar(srcTexture->width()), 1946 SkIntToScalar(srcTexture->height()))); 1947 if (radius.fWidth > 0) { 1948 this->setRenderTarget(temp1->asRenderTarget()); 1949 apply_morphology(fGpu, srcTexture, rect, radius.fWidth, morphType, 1950 Gr1DKernelEffect::kX_Direction); 1951 SkIRect clearRect = SkIRect::MakeXYWH( 1952 SkScalarFloorToInt(rect.fLeft), 1953 SkScalarFloorToInt(rect.fBottom), 1954 SkScalarFloorToInt(rect.width()), 1955 radius.fHeight); 1956 this->clear(&clearRect, 0x0); 1957 srcTexture = temp1; 1958 } 1959 if (radius.fHeight > 0) { 1960 this->setRenderTarget(temp2->asRenderTarget()); 1961 apply_morphology(fGpu, srcTexture, rect, radius.fHeight, morphType, 1962 Gr1DKernelEffect::kY_Direction); 1963 srcTexture = temp2; 1964 } 1965 this->setRenderTarget(oldRenderTarget); 1966 this->setClip(oldClip); 1967 return srcTexture; 1968} 1969 1970void GrContext::postClipPush() { 1971 fGpu->postClipPush(); 1972} 1973 1974void GrContext::preClipPop() { 1975 fGpu->preClipPop(); 1976}; 1977 1978/////////////////////////////////////////////////////////////////////////////// 1979