GrContext.cpp revision 8f7e1dac5c92bf1f53feb603a9bd249d53afa81a
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "effects/GrMorphologyEffect.h" 13#include "effects/GrConvolutionEffect.h" 14 15#include "GrBufferAllocPool.h" 16#include "GrClipIterator.h" 17#include "GrGpu.h" 18#include "GrIndexBuffer.h" 19#include "GrInOrderDrawBuffer.h" 20#include "GrPathRenderer.h" 21#include "GrPathUtils.h" 22#include "GrResourceCache.h" 23#include "GrSoftwarePathRenderer.h" 24#include "GrStencilBuffer.h" 25#include "GrTextStrike.h" 26#include "SkTLazy.h" 27#include "SkTLS.h" 28#include "SkTrace.h" 29 30#define DEFER_TEXT_RENDERING 1 31 32#define DEFER_PATHS 1 33 34#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 35 36#define MAX_BLUR_SIGMA 4.0f 37 38// When we're using coverage AA but the blend is incompatible (given gpu 39// limitations) should we disable AA or draw wrong? 40#define DISABLE_COVERAGE_AA_FOR_BLEND 1 41 42#if GR_DEBUG 43 // change this to a 1 to see notifications when partial coverage fails 44 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 45#else 46 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 47#endif 48 49static const size_t kDefaultTextureCacheBudget = 16 * 1024 * 1024; 50 51static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 52static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 53 54// path rendering is the only thing we defer today that uses non-static indices 55static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = DEFER_PATHS ? 1 << 11 : 0; 56static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = DEFER_PATHS ? 4 : 0; 57 58#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 59 60GrContext* GrContext::Create(GrEngine engine, 61 GrPlatform3DContext context3D) { 62 GrContext* ctx = NULL; 63 GrGpu* fGpu = GrGpu::Create(engine, context3D); 64 if (NULL != fGpu) { 65 ctx = new GrContext(fGpu); 66 fGpu->unref(); 67 } 68 return ctx; 69} 70 71namespace { 72void* CreateThreadInstanceCount() { 73 return new int(0); 74} 75void DeleteThreadInstanceCount(void* v) { 76 delete reinterpret_cast<int*>(v); 77} 78#define THREAD_INSTANCE_COUNT \ 79 (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, \ 80 DeleteThreadInstanceCount))) 81 82} 83 84int GrContext::GetThreadInstanceCount() { 85 return THREAD_INSTANCE_COUNT; 86} 87 88GrContext::~GrContext() { 89 this->flush(); 90 91 // Since the gpu can hold scratch textures, give it a chance to let go 92 // of them before freeing the texture cache 93 fGpu->purgeResources(); 94 95 delete fTextureCache; 96 delete fFontCache; 97 delete fDrawBuffer; 98 delete fDrawBufferVBAllocPool; 99 delete fDrawBufferIBAllocPool; 100 101 fAARectRenderer->unref(); 102 103 fGpu->unref(); 104 GrSafeUnref(fPathRendererChain); 105 GrSafeUnref(fSoftwarePathRenderer); 106 fDrawState->unref(); 107 108 --THREAD_INSTANCE_COUNT; 109} 110 111void GrContext::contextLost() { 112 contextDestroyed(); 113 this->setupDrawBuffer(); 114} 115 116void GrContext::contextDestroyed() { 117 // abandon first to so destructors 118 // don't try to free the resources in the API. 119 fGpu->abandonResources(); 120 121 // a path renderer may be holding onto resources that 122 // are now unusable 123 GrSafeSetNull(fPathRendererChain); 124 GrSafeSetNull(fSoftwarePathRenderer); 125 126 delete fDrawBuffer; 127 fDrawBuffer = NULL; 128 129 delete fDrawBufferVBAllocPool; 130 fDrawBufferVBAllocPool = NULL; 131 132 delete fDrawBufferIBAllocPool; 133 fDrawBufferIBAllocPool = NULL; 134 135 fAARectRenderer->reset(); 136 137 fTextureCache->removeAll(); 138 fFontCache->freeAll(); 139 fGpu->markContextDirty(); 140} 141 142void GrContext::resetContext() { 143 fGpu->markContextDirty(); 144} 145 146void GrContext::freeGpuResources() { 147 this->flush(); 148 149 fGpu->purgeResources(); 150 151 fAARectRenderer->reset(); 152 153 fTextureCache->removeAll(); 154 fFontCache->freeAll(); 155 // a path renderer may be holding onto resources 156 GrSafeSetNull(fPathRendererChain); 157 GrSafeSetNull(fSoftwarePathRenderer); 158} 159 160size_t GrContext::getGpuTextureCacheBytes() const { 161 return fTextureCache->getCachedResourceBytes(); 162} 163 164//////////////////////////////////////////////////////////////////////////////// 165 166int GrContext::PaintStageVertexLayoutBits( 167 const GrPaint& paint, 168 const bool hasTexCoords[GrPaint::kTotalStages]) { 169 int stageMask = paint.getActiveStageMask(); 170 int layout = 0; 171 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 172 if ((1 << i) & stageMask) { 173 if (NULL != hasTexCoords && hasTexCoords[i]) { 174 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 175 } else { 176 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i); 177 } 178 } 179 } 180 return layout; 181} 182 183 184//////////////////////////////////////////////////////////////////////////////// 185 186GrTexture* GrContext::TextureCacheEntry::texture() const { 187 if (NULL == fEntry) { 188 return NULL; 189 } else { 190 return (GrTexture*) fEntry->resource(); 191 } 192} 193 194namespace { 195 196// we should never have more than one stencil buffer with same combo of 197// (width,height,samplecount) 198void gen_stencil_key_values(int width, int height, 199 int sampleCnt, uint32_t v[4]) { 200 v[0] = width; 201 v[1] = height; 202 v[2] = sampleCnt; 203 v[3] = GrResourceKey::kStencilBuffer_TypeBit; 204} 205 206void gen_stencil_key_values(const GrStencilBuffer* sb, 207 uint32_t v[4]) { 208 gen_stencil_key_values(sb->width(), sb->height(), 209 sb->numSamples(), v); 210} 211 212void scale_rect(SkRect* rect, float xScale, float yScale) { 213 rect->fLeft = SkScalarMul(rect->fLeft, SkFloatToScalar(xScale)); 214 rect->fTop = SkScalarMul(rect->fTop, SkFloatToScalar(yScale)); 215 rect->fRight = SkScalarMul(rect->fRight, SkFloatToScalar(xScale)); 216 rect->fBottom = SkScalarMul(rect->fBottom, SkFloatToScalar(yScale)); 217} 218 219float adjust_sigma(float sigma, int *scaleFactor, int *radius) { 220 *scaleFactor = 1; 221 while (sigma > MAX_BLUR_SIGMA) { 222 *scaleFactor *= 2; 223 sigma *= 0.5f; 224 } 225 *radius = static_cast<int>(ceilf(sigma * 3.0f)); 226 GrAssert(*radius <= GrConvolutionEffect::kMaxKernelRadius); 227 return sigma; 228} 229 230void apply_morphology(GrGpu* gpu, 231 GrTexture* texture, 232 const SkRect& rect, 233 int radius, 234 GrContext::MorphologyType morphType, 235 Gr1DKernelEffect::Direction direction) { 236 237 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 238 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 239 GrDrawState* drawState = gpu->drawState(); 240 drawState->setRenderTarget(target); 241 GrMatrix sampleM; 242 sampleM.setIDiv(texture->width(), texture->height()); 243 drawState->sampler(0)->reset(sampleM); 244 SkAutoTUnref<GrCustomStage> morph( 245 new GrMorphologyEffect(direction, radius, morphType)); 246 drawState->sampler(0)->setCustomStage(morph); 247 drawState->setTexture(0, texture); 248 gpu->drawSimpleRect(rect, NULL, 1 << 0); 249} 250 251void convolve_gaussian(GrGpu* gpu, 252 GrTexture* texture, 253 const SkRect& rect, 254 float sigma, 255 int radius, 256 Gr1DKernelEffect::Direction direction) { 257 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 258 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 259 GrDrawState* drawState = gpu->drawState(); 260 drawState->setRenderTarget(target); 261 GrMatrix sampleM; 262 sampleM.setIDiv(texture->width(), texture->height()); 263 drawState->sampler(0)->reset(sampleM); 264 SkAutoTUnref<GrConvolutionEffect> conv(new 265 GrConvolutionEffect(direction, radius)); 266 conv->setGaussianKernel(sigma); 267 drawState->sampler(0)->setCustomStage(conv); 268 drawState->setTexture(0, texture); 269 gpu->drawSimpleRect(rect, NULL, 1 << 0); 270} 271 272} 273 274GrContext::TextureCacheEntry GrContext::findAndLockTexture( 275 const GrTextureDesc& desc, 276 const GrSamplerState* sampler) { 277 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, desc, false); 278 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 279 GrResourceCache::kNested_LockType)); 280} 281 282bool GrContext::isTextureInCache(const GrTextureDesc& desc, 283 const GrSamplerState* sampler) const { 284 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, desc, false); 285 return fTextureCache->hasKey(resourceKey); 286} 287 288GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 289 ASSERT_OWNED_RESOURCE(sb); 290 uint32_t v[4]; 291 gen_stencil_key_values(sb, v); 292 GrResourceKey resourceKey(v); 293 return fTextureCache->createAndLock(resourceKey, sb); 294} 295 296GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 297 int sampleCnt) { 298 uint32_t v[4]; 299 gen_stencil_key_values(width, height, sampleCnt, v); 300 GrResourceKey resourceKey(v); 301 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 302 GrResourceCache::kSingle_LockType); 303 if (NULL != entry) { 304 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 305 return sb; 306 } else { 307 return NULL; 308 } 309} 310 311void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 312 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 313 fTextureCache->unlock(sbEntry); 314} 315 316static void stretchImage(void* dst, 317 int dstW, 318 int dstH, 319 void* src, 320 int srcW, 321 int srcH, 322 int bpp) { 323 GrFixed dx = (srcW << 16) / dstW; 324 GrFixed dy = (srcH << 16) / dstH; 325 326 GrFixed y = dy >> 1; 327 328 int dstXLimit = dstW*bpp; 329 for (int j = 0; j < dstH; ++j) { 330 GrFixed x = dx >> 1; 331 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 332 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 333 for (int i = 0; i < dstXLimit; i += bpp) { 334 memcpy((uint8_t*) dstRow + i, 335 (uint8_t*) srcRow + (x>>16)*bpp, 336 bpp); 337 x += dx; 338 } 339 y += dy; 340 } 341} 342 343GrContext::TextureCacheEntry GrContext::createAndLockTexture( 344 const GrSamplerState* sampler, 345 const GrTextureDesc& desc, 346 void* srcData, 347 size_t rowBytes) { 348 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 349 350#if GR_DUMP_TEXTURE_UPLOAD 351 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 352#endif 353 354 TextureCacheEntry entry; 355 356 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, sampler, 357 desc, false); 358 359 if (GrTexture::NeedsResizing(resourceKey)) { 360 // The desired texture is NPOT and tiled but that isn't supported by 361 // the current hardware. Resize the texture to be a POT 362 GrAssert(NULL != sampler); 363 TextureCacheEntry clampEntry = this->findAndLockTexture(desc, 364 NULL); 365 366 if (NULL == clampEntry.texture()) { 367 clampEntry = this->createAndLockTexture(NULL, desc, 368 srcData, rowBytes); 369 GrAssert(NULL != clampEntry.texture()); 370 if (NULL == clampEntry.texture()) { 371 return entry; 372 } 373 } 374 GrTextureDesc rtDesc = desc; 375 rtDesc.fFlags = rtDesc.fFlags | 376 kRenderTarget_GrTextureFlagBit | 377 kNoStencil_GrTextureFlagBit; 378 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 379 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 380 381 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 382 383 if (NULL != texture) { 384 GrDrawTarget::AutoStateRestore asr(fGpu, 385 GrDrawTarget::kReset_ASRInit); 386 GrDrawState* drawState = fGpu->drawState(); 387 drawState->setRenderTarget(texture->asRenderTarget()); 388 drawState->setTexture(0, clampEntry.texture()); 389 390 GrSamplerState::Filter filter; 391 // if filtering is not desired then we want to ensure all 392 // texels in the resampled image are copies of texels from 393 // the original. 394 if (GrTexture::NeedsFiltering(resourceKey)) { 395 filter = GrSamplerState::kBilinear_Filter; 396 } else { 397 filter = GrSamplerState::kNearest_Filter; 398 } 399 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 400 filter); 401 402 static const GrVertexLayout layout = 403 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 404 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 405 406 if (arg.succeeded()) { 407 GrPoint* verts = (GrPoint*) arg.vertices(); 408 verts[0].setIRectFan(0, 0, 409 texture->width(), 410 texture->height(), 411 2*sizeof(GrPoint)); 412 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 413 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 414 0, 4); 415 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 416 } 417 texture->releaseRenderTarget(); 418 } else { 419 // TODO: Our CPU stretch doesn't filter. But we create separate 420 // stretched textures when the sampler state is either filtered or 421 // not. Either implement filtered stretch blit on CPU or just create 422 // one when FBO case fails. 423 424 rtDesc.fFlags = kNone_GrTextureFlags; 425 // no longer need to clamp at min RT size. 426 rtDesc.fWidth = GrNextPow2(desc.fWidth); 427 rtDesc.fHeight = GrNextPow2(desc.fHeight); 428 int bpp = GrBytesPerPixel(desc.fConfig); 429 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 430 rtDesc.fWidth * 431 rtDesc.fHeight); 432 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 433 srcData, desc.fWidth, desc.fHeight, bpp); 434 435 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 436 437 GrTexture* texture = fGpu->createTexture(rtDesc, 438 stretchedPixels.get(), 439 stretchedRowBytes); 440 GrAssert(NULL != texture); 441 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 442 } 443 fTextureCache->unlock(clampEntry.cacheEntry()); 444 445 } else { 446 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 447 if (NULL != texture) { 448 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 449 } 450 } 451 return entry; 452} 453 454GrContext::TextureCacheEntry GrContext::lockScratchTexture( 455 const GrTextureDesc& inDesc, 456 ScratchTexMatch match) { 457 GrTextureDesc desc = inDesc; 458 desc.fClientCacheID = kScratch_CacheID; 459 460 if (kExact_ScratchTexMatch != match) { 461 // bin by pow2 with a reasonable min 462 static const int MIN_SIZE = 256; 463 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 464 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 465 } 466 467 GrResourceEntry* entry; 468 int origWidth = desc.fWidth; 469 int origHeight = desc.fHeight; 470 bool doubledW = false; 471 bool doubledH = false; 472 473 do { 474 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, desc, true); 475 entry = fTextureCache->findAndLock(key, 476 GrResourceCache::kNested_LockType); 477 // if we miss, relax the fit of the flags... 478 // then try doubling width... then height. 479 if (NULL != entry || kExact_ScratchTexMatch == match) { 480 break; 481 } 482 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 483 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 484 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 485 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 486 } else if (!doubledW) { 487 desc.fFlags = inDesc.fFlags; 488 desc.fWidth *= 2; 489 doubledW = true; 490 } else if (!doubledH) { 491 desc.fFlags = inDesc.fFlags; 492 desc.fWidth = origWidth; 493 desc.fHeight *= 2; 494 doubledH = true; 495 } else { 496 break; 497 } 498 499 } while (true); 500 501 if (NULL == entry) { 502 desc.fFlags = inDesc.fFlags; 503 desc.fWidth = origWidth; 504 desc.fHeight = origHeight; 505 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 506 if (NULL != texture) { 507 GrResourceKey key = GrTexture::ComputeKey(fGpu, NULL, 508 texture->desc(), 509 true); 510 entry = fTextureCache->createAndLock(key, texture); 511 } 512 } 513 514 // If the caller gives us the same desc/sampler twice we don't want 515 // to return the same texture the second time (unless it was previously 516 // released). So we detach the entry from the cache and reattach at release. 517 if (NULL != entry) { 518 fTextureCache->detach(entry); 519 } 520 return TextureCacheEntry(entry); 521} 522 523void GrContext::unlockTexture(TextureCacheEntry entry) { 524 ASSERT_OWNED_RESOURCE(entry.texture()); 525 // If this is a scratch texture we detached it from the cache 526 // while it was locked (to avoid two callers simultaneously getting 527 // the same texture). 528 if (GrTexture::IsScratchTexture(entry.cacheEntry()->key())) { 529 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 530 } else { 531 fTextureCache->unlock(entry.cacheEntry()); 532 } 533} 534 535GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, 536 void* srcData, 537 size_t rowBytes) { 538 GrTextureDesc descCopy = descIn; 539 descCopy.fClientCacheID = kUncached_CacheID; 540 return fGpu->createTexture(descCopy, srcData, rowBytes); 541} 542 543size_t GrContext::getTextureCacheBudget() const { 544 return fTextureCache->getBudget(); 545} 546 547void GrContext::setTextureCacheBudget(size_t maxTextureBytes) { 548 fTextureCache->setBudget(maxTextureBytes); 549} 550 551int GrContext::getMaxTextureSize() const { 552 return fGpu->getCaps().fMaxTextureSize; 553} 554 555int GrContext::getMaxRenderTargetSize() const { 556 return fGpu->getCaps().fMaxRenderTargetSize; 557} 558 559/////////////////////////////////////////////////////////////////////////////// 560 561GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 562 return fGpu->createPlatformTexture(desc); 563} 564 565GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 566 return fGpu->createPlatformRenderTarget(desc); 567} 568 569/////////////////////////////////////////////////////////////////////////////// 570 571bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 572 int width, int height) const { 573 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 574 if (!caps.f8BitPaletteSupport) { 575 return false; 576 } 577 578 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 579 580 if (!isPow2) { 581 bool tiled = NULL != sampler && 582 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 583 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 584 if (tiled && !caps.fNPOTTextureTileSupport) { 585 return false; 586 } 587 } 588 return true; 589} 590 591//////////////////////////////////////////////////////////////////////////////// 592 593const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 594 595void GrContext::setClip(const GrClip& clip) { 596 fGpu->setClip(clip); 597 fDrawState->enableState(GrDrawState::kClip_StateBit); 598} 599 600void GrContext::setClip(const GrIRect& rect) { 601 GrClip clip; 602 clip.setFromIRect(rect); 603 fGpu->setClip(clip); 604} 605 606//////////////////////////////////////////////////////////////////////////////// 607 608void GrContext::clear(const GrIRect* rect, const GrColor color) { 609 this->flush(); 610 fGpu->clear(rect, color); 611} 612 613void GrContext::drawPaint(const GrPaint& paint) { 614 // set rect to be big enough to fill the space, but not super-huge, so we 615 // don't overflow fixed-point implementations 616 GrRect r; 617 r.setLTRB(0, 0, 618 GrIntToScalar(getRenderTarget()->width()), 619 GrIntToScalar(getRenderTarget()->height())); 620 GrMatrix inverse; 621 SkTLazy<GrPaint> tmpPaint; 622 const GrPaint* p = &paint; 623 GrAutoMatrix am; 624 625 // We attempt to map r by the inverse matrix and draw that. mapRect will 626 // map the four corners and bound them with a new rect. This will not 627 // produce a correct result for some perspective matrices. 628 if (!this->getMatrix().hasPerspective()) { 629 if (!fDrawState->getViewInverse(&inverse)) { 630 GrPrintf("Could not invert matrix"); 631 return; 632 } 633 inverse.mapRect(&r); 634 } else { 635 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 636 if (!fDrawState->getViewInverse(&inverse)) { 637 GrPrintf("Could not invert matrix"); 638 return; 639 } 640 tmpPaint.set(paint); 641 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 642 p = tmpPaint.get(); 643 } 644 am.set(this, GrMatrix::I()); 645 } 646 // by definition this fills the entire clip, no need for AA 647 if (paint.fAntiAlias) { 648 if (!tmpPaint.isValid()) { 649 tmpPaint.set(paint); 650 p = tmpPaint.get(); 651 } 652 GrAssert(p == tmpPaint.get()); 653 tmpPaint.get()->fAntiAlias = false; 654 } 655 this->drawRect(*p, r); 656} 657 658//////////////////////////////////////////////////////////////////////////////// 659 660namespace { 661inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 662 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 663} 664} 665 666//////////////////////////////////////////////////////////////////////////////// 667 668/* create a triangle strip that strokes the specified triangle. There are 8 669 unique vertices, but we repreat the last 2 to close up. Alternatively we 670 could use an indices array, and then only send 8 verts, but not sure that 671 would be faster. 672 */ 673static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 674 GrScalar width) { 675 const GrScalar rad = GrScalarHalf(width); 676 rect.sort(); 677 678 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 679 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 680 verts[2].set(rect.fRight - rad, rect.fTop + rad); 681 verts[3].set(rect.fRight + rad, rect.fTop - rad); 682 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 683 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 684 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 685 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 686 verts[8] = verts[0]; 687 verts[9] = verts[1]; 688} 689 690/** 691 * Returns true if the rects edges are integer-aligned. 692 */ 693static bool isIRect(const GrRect& r) { 694 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 695 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 696} 697 698static bool apply_aa_to_rect(GrDrawTarget* target, 699 const GrRect& rect, 700 GrScalar width, 701 const GrMatrix* matrix, 702 GrMatrix* combinedMatrix, 703 GrRect* devRect, 704 bool* useVertexCoverage) { 705 // we use a simple coverage ramp to do aa on axis-aligned rects 706 // we check if the rect will be axis-aligned, and the rect won't land on 707 // integer coords. 708 709 // we are keeping around the "tweak the alpha" trick because 710 // it is our only hope for the fixed-pipe implementation. 711 // In a shader implementation we can give a separate coverage input 712 // TODO: remove this ugliness when we drop the fixed-pipe impl 713 *useVertexCoverage = false; 714 if (!target->canTweakAlphaForCoverage()) { 715 if (disable_coverage_aa_for_blend(target)) { 716#if GR_DEBUG 717 //GrPrintf("Turning off AA to correctly apply blend.\n"); 718#endif 719 return false; 720 } else { 721 *useVertexCoverage = true; 722 } 723 } 724 const GrDrawState& drawState = target->getDrawState(); 725 if (drawState.getRenderTarget()->isMultisampled()) { 726 return false; 727 } 728 729 if (0 == width && target->willUseHWAALines()) { 730 return false; 731 } 732 733 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 734 return false; 735 } 736 737 if (NULL != matrix && 738 !matrix->preservesAxisAlignment()) { 739 return false; 740 } 741 742 *combinedMatrix = drawState.getViewMatrix(); 743 if (NULL != matrix) { 744 combinedMatrix->preConcat(*matrix); 745 GrAssert(combinedMatrix->preservesAxisAlignment()); 746 } 747 748 combinedMatrix->mapRect(devRect, rect); 749 devRect->sort(); 750 751 if (width < 0) { 752 return !isIRect(*devRect); 753 } else { 754 return true; 755 } 756} 757 758void GrContext::drawRect(const GrPaint& paint, 759 const GrRect& rect, 760 GrScalar width, 761 const GrMatrix* matrix) { 762 SK_TRACE_EVENT0("GrContext::drawRect"); 763 764 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 765 GrDrawState::AutoTextureRelease atr(fDrawState); 766 int stageMask = paint.getActiveStageMask(); 767 768 GrRect devRect = rect; 769 GrMatrix combinedMatrix; 770 bool useVertexCoverage; 771 bool needAA = paint.fAntiAlias && 772 !this->getRenderTarget()->isMultisampled(); 773 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 774 &combinedMatrix, &devRect, 775 &useVertexCoverage); 776 777 if (doAA) { 778 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 779 if (width >= 0) { 780 GrVec strokeSize;; 781 if (width > 0) { 782 strokeSize.set(width, width); 783 combinedMatrix.mapVectors(&strokeSize, 1); 784 strokeSize.setAbs(strokeSize); 785 } else { 786 strokeSize.set(GR_Scalar1, GR_Scalar1); 787 } 788 fAARectRenderer->strokeAARect(this->getGpu(), target, devRect, 789 strokeSize, useVertexCoverage); 790 } else { 791 fAARectRenderer->fillAARect(this->getGpu(), target, 792 devRect, useVertexCoverage); 793 } 794 return; 795 } 796 797 if (width >= 0) { 798 // TODO: consider making static vertex buffers for these cases. 799 // Hairline could be done by just adding closing vertex to 800 // unitSquareVertexBuffer() 801 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 802 803 static const int worstCaseVertCount = 10; 804 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 805 806 if (!geo.succeeded()) { 807 GrPrintf("Failed to get space for vertices!\n"); 808 return; 809 } 810 811 GrPrimitiveType primType; 812 int vertCount; 813 GrPoint* vertex = geo.positions(); 814 815 if (width > 0) { 816 vertCount = 10; 817 primType = kTriangleStrip_GrPrimitiveType; 818 setStrokeRectStrip(vertex, rect, width); 819 } else { 820 // hairline 821 vertCount = 5; 822 primType = kLineStrip_GrPrimitiveType; 823 vertex[0].set(rect.fLeft, rect.fTop); 824 vertex[1].set(rect.fRight, rect.fTop); 825 vertex[2].set(rect.fRight, rect.fBottom); 826 vertex[3].set(rect.fLeft, rect.fBottom); 827 vertex[4].set(rect.fLeft, rect.fTop); 828 } 829 830 GrDrawState::AutoViewMatrixRestore avmr; 831 if (NULL != matrix) { 832 GrDrawState* drawState = target->drawState(); 833 avmr.set(drawState); 834 drawState->preConcatViewMatrix(*matrix); 835 drawState->preConcatSamplerMatrices(stageMask, *matrix); 836 } 837 838 target->drawNonIndexed(primType, 0, vertCount); 839 } else { 840#if GR_STATIC_RECT_VB 841 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 842 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 843 if (NULL == sqVB) { 844 GrPrintf("Failed to create static rect vb.\n"); 845 return; 846 } 847 target->setVertexSourceToBuffer(layout, sqVB); 848 GrDrawState* drawState = target->drawState(); 849 GrDrawState::AutoViewMatrixRestore avmr(drawState); 850 GrMatrix m; 851 m.setAll(rect.width(), 0, rect.fLeft, 852 0, rect.height(), rect.fTop, 853 0, 0, GrMatrix::I()[8]); 854 855 if (NULL != matrix) { 856 m.postConcat(*matrix); 857 } 858 drawState->preConcatViewMatrix(m); 859 drawState->preConcatSamplerMatrices(stageMask, m); 860 861 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 862#else 863 target->drawSimpleRect(rect, matrix, stageMask); 864#endif 865 } 866} 867 868void GrContext::drawRectToRect(const GrPaint& paint, 869 const GrRect& dstRect, 870 const GrRect& srcRect, 871 const GrMatrix* dstMatrix, 872 const GrMatrix* srcMatrix) { 873 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 874 875 // srcRect refers to paint's first texture 876 if (NULL == paint.getTexture(0)) { 877 drawRect(paint, dstRect, -1, dstMatrix); 878 return; 879 } 880 881 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 882 883#if GR_STATIC_RECT_VB 884 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 885 GrDrawState::AutoTextureRelease atr(fDrawState); 886 GrDrawState* drawState = target->drawState(); 887 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 888 GrDrawState::AutoViewMatrixRestore avmr(drawState); 889 890 GrMatrix m; 891 892 m.setAll(dstRect.width(), 0, dstRect.fLeft, 893 0, dstRect.height(), dstRect.fTop, 894 0, 0, GrMatrix::I()[8]); 895 if (NULL != dstMatrix) { 896 m.postConcat(*dstMatrix); 897 } 898 drawState->preConcatViewMatrix(m); 899 900 // srcRect refers to first stage 901 int otherStageMask = paint.getActiveStageMask() & 902 (~(1 << GrPaint::kFirstTextureStage)); 903 if (otherStageMask) { 904 drawState->preConcatSamplerMatrices(otherStageMask, m); 905 } 906 907 m.setAll(srcRect.width(), 0, srcRect.fLeft, 908 0, srcRect.height(), srcRect.fTop, 909 0, 0, GrMatrix::I()[8]); 910 if (NULL != srcMatrix) { 911 m.postConcat(*srcMatrix); 912 } 913 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 914 915 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 916 if (NULL == sqVB) { 917 GrPrintf("Failed to create static rect vb.\n"); 918 return; 919 } 920 target->setVertexSourceToBuffer(layout, sqVB); 921 target->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 922#else 923 924 GrDrawTarget* target; 925#if BATCH_RECT_TO_RECT 926 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 927#else 928 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 929#endif 930 GrDrawState::AutoTextureRelease atr(fDrawState); 931 932 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 933 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 934 srcRects[0] = &srcRect; 935 srcMatrices[0] = srcMatrix; 936 937 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 938#endif 939} 940 941void GrContext::drawVertices(const GrPaint& paint, 942 GrPrimitiveType primitiveType, 943 int vertexCount, 944 const GrPoint positions[], 945 const GrPoint texCoords[], 946 const GrColor colors[], 947 const uint16_t indices[], 948 int indexCount) { 949 SK_TRACE_EVENT0("GrContext::drawVertices"); 950 951 GrDrawTarget::AutoReleaseGeometry geo; 952 953 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 954 GrDrawState::AutoTextureRelease atr(fDrawState); 955 956 bool hasTexCoords[GrPaint::kTotalStages] = { 957 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 958 0 // remaining stages use positions 959 }; 960 961 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 962 963 if (NULL != colors) { 964 layout |= GrDrawTarget::kColor_VertexLayoutBit; 965 } 966 int vertexSize = GrDrawTarget::VertexSize(layout); 967 968 if (sizeof(GrPoint) != vertexSize) { 969 if (!geo.set(target, layout, vertexCount, 0)) { 970 GrPrintf("Failed to get space for vertices!\n"); 971 return; 972 } 973 int texOffsets[GrDrawState::kMaxTexCoords]; 974 int colorOffset; 975 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 976 texOffsets, 977 &colorOffset, 978 NULL, 979 NULL); 980 void* curVertex = geo.vertices(); 981 982 for (int i = 0; i < vertexCount; ++i) { 983 *((GrPoint*)curVertex) = positions[i]; 984 985 if (texOffsets[0] > 0) { 986 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 987 } 988 if (colorOffset > 0) { 989 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 990 } 991 curVertex = (void*)((intptr_t)curVertex + vertexSize); 992 } 993 } else { 994 target->setVertexSourceToArray(layout, positions, vertexCount); 995 } 996 997 // we don't currently apply offscreen AA to this path. Need improved 998 // management of GrDrawTarget's geometry to avoid copying points per-tile. 999 1000 if (NULL != indices) { 1001 target->setIndexSourceToArray(indices, indexCount); 1002 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1003 } else { 1004 target->drawNonIndexed(primitiveType, 0, vertexCount); 1005 } 1006} 1007 1008/////////////////////////////////////////////////////////////////////////////// 1009namespace { 1010 1011struct CircleVertex { 1012 GrPoint fPos; 1013 GrPoint fCenter; 1014 GrScalar fOuterRadius; 1015 GrScalar fInnerRadius; 1016}; 1017 1018/* Returns true if will map a circle to another circle. This can be true 1019 * if the matrix only includes square-scale, rotation, translation. 1020 */ 1021inline bool isSimilarityTransformation(const SkMatrix& matrix, 1022 SkScalar tol = SK_ScalarNearlyZero) { 1023 if (matrix.isIdentity() || matrix.getType() == SkMatrix::kTranslate_Mask) { 1024 return true; 1025 } 1026 if (matrix.hasPerspective()) { 1027 return false; 1028 } 1029 1030 SkScalar mx = matrix.get(SkMatrix::kMScaleX); 1031 SkScalar sx = matrix.get(SkMatrix::kMSkewX); 1032 SkScalar my = matrix.get(SkMatrix::kMScaleY); 1033 SkScalar sy = matrix.get(SkMatrix::kMSkewY); 1034 1035 if (mx == 0 && sx == 0 && my == 0 && sy == 0) { 1036 return false; 1037 } 1038 1039 // it has scales or skews, but it could also be rotation, check it out. 1040 SkVector vec[2]; 1041 vec[0].set(mx, sx); 1042 vec[1].set(sy, my); 1043 1044 return SkScalarNearlyZero(vec[0].dot(vec[1]), SkScalarSquare(tol)) && 1045 SkScalarNearlyEqual(vec[0].lengthSqd(), vec[1].lengthSqd(), 1046 SkScalarSquare(tol)); 1047} 1048 1049} 1050 1051// TODO: strokeWidth can't be larger than zero right now. 1052// It will be fixed when drawPath() can handle strokes. 1053void GrContext::drawOval(const GrPaint& paint, 1054 const GrRect& rect, 1055 SkScalar strokeWidth) { 1056 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1057 kUnbuffered_DrawCategory; 1058 GrDrawTarget* target = this->prepareToDraw(paint, category); 1059 GrDrawState::AutoTextureRelease atr(fDrawState); 1060 GrDrawState* drawState = target->drawState(); 1061 GrMatrix vm = drawState->getViewMatrix(); 1062 1063 if (!isSimilarityTransformation(vm) || 1064 !paint.fAntiAlias || 1065 rect.height() != rect.width()) { 1066 SkPath path; 1067 path.addOval(rect); 1068 GrPathFill fill = (strokeWidth == 0) ? 1069 kHairLine_GrPathFill : kWinding_GrPathFill; 1070 this->internalDrawPath(paint, path, fill, NULL); 1071 return; 1072 } 1073 1074 const GrRenderTarget* rt = drawState->getRenderTarget(); 1075 if (NULL == rt) { 1076 return; 1077 } 1078 1079 GrDrawTarget::AutoDeviceCoordDraw adcd(target, paint.getActiveStageMask()); 1080 1081 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1082 layout |= GrDrawTarget::kEdge_VertexLayoutBit; 1083 GrAssert(sizeof(CircleVertex) == GrDrawTarget::VertexSize(layout)); 1084 1085 GrPoint center = GrPoint::Make(rect.centerX(), rect.centerY()); 1086 GrScalar radius = SkScalarHalf(rect.width()); 1087 1088 vm.mapPoints(¢er, 1); 1089 radius = vm.mapRadius(radius); 1090 1091 GrScalar outerRadius = radius; 1092 GrScalar innerRadius = 0; 1093 SkScalar halfWidth = 0; 1094 if (strokeWidth == 0) { 1095 halfWidth = SkScalarHalf(SK_Scalar1); 1096 1097 outerRadius += halfWidth; 1098 innerRadius = SkMaxScalar(0, radius - halfWidth); 1099 } 1100 1101 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 4, 0); 1102 if (!geo.succeeded()) { 1103 GrPrintf("Failed to get space for vertices!\n"); 1104 return; 1105 } 1106 1107 CircleVertex* verts = reinterpret_cast<CircleVertex*>(geo.vertices()); 1108 1109 SkScalar L = center.fX - outerRadius; 1110 SkScalar R = center.fX + outerRadius; 1111 SkScalar T = center.fY - outerRadius; 1112 SkScalar B = center.fY + outerRadius; 1113 1114 verts[0].fPos = SkPoint::Make(L, T); 1115 verts[1].fPos = SkPoint::Make(R, T); 1116 verts[2].fPos = SkPoint::Make(L, B); 1117 verts[3].fPos = SkPoint::Make(R, B); 1118 1119 for (int i = 0; i < 4; ++i) { 1120 // this goes to fragment shader, it should be in y-points-up space. 1121 verts[i].fCenter = SkPoint::Make(center.fX, rt->height() - center.fY); 1122 1123 verts[i].fOuterRadius = outerRadius; 1124 verts[i].fInnerRadius = innerRadius; 1125 } 1126 1127 drawState->setVertexEdgeType(GrDrawState::kCircle_EdgeType); 1128 target->drawNonIndexed(kTriangleStrip_GrPrimitiveType, 0, 4); 1129} 1130 1131void GrContext::drawPath(const GrPaint& paint, const SkPath& path, 1132 GrPathFill fill, const GrPoint* translate) { 1133 1134 if (path.isEmpty()) { 1135 if (GrIsFillInverted(fill)) { 1136 this->drawPaint(paint); 1137 } 1138 return; 1139 } 1140 1141 SkRect ovalRect; 1142 if (!GrIsFillInverted(fill) && path.isOval(&ovalRect)) { 1143 if (translate) { 1144 ovalRect.offset(*translate); 1145 } 1146 SkScalar width = (fill == kHairLine_GrPathFill) ? 0 : -SK_Scalar1; 1147 this->drawOval(paint, ovalRect, width); 1148 return; 1149 } 1150 1151 internalDrawPath(paint, path, fill, translate); 1152} 1153 1154void GrContext::internalDrawPath(const GrPaint& paint, const SkPath& path, 1155 GrPathFill fill, const GrPoint* translate) { 1156 1157 // Note that below we may sw-rasterize the path into a scratch texture. 1158 // Scratch textures can be recycled after they are returned to the texture 1159 // cache. This presents a potential hazard for buffered drawing. However, 1160 // the writePixels that uploads to the scratch will perform a flush so we're 1161 // OK. 1162 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1163 kUnbuffered_DrawCategory; 1164 GrDrawTarget* target = this->prepareToDraw(paint, category); 1165 GrDrawState::AutoTextureRelease atr(fDrawState); 1166 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1167 1168 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1169 1170 // An Assumption here is that path renderer would use some form of tweaking 1171 // the src color (either the input alpha or in the frag shader) to implement 1172 // aa. If we have some future driver-mojo path AA that can do the right 1173 // thing WRT to the blend then we'll need some query on the PR. 1174 if (disable_coverage_aa_for_blend(target)) { 1175#if GR_DEBUG 1176 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1177#endif 1178 prAA = false; 1179 } 1180 1181 GrPathRenderer* pr = this->getPathRenderer(path, fill, target, prAA, true); 1182 if (NULL == pr) { 1183#if GR_DEBUG 1184 GrPrintf("Unable to find path renderer compatible with path.\n"); 1185#endif 1186 return; 1187 } 1188 1189 pr->drawPath(path, fill, translate, target, stageMask, prAA); 1190} 1191 1192//////////////////////////////////////////////////////////////////////////////// 1193 1194void GrContext::flush(int flagsBitfield) { 1195 if (kDiscard_FlushBit & flagsBitfield) { 1196 fDrawBuffer->reset(); 1197 } else { 1198 this->flushDrawBuffer(); 1199 } 1200 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1201 fGpu->forceRenderTargetFlush(); 1202 } 1203} 1204 1205void GrContext::flushDrawBuffer() { 1206 if (fDrawBuffer) { 1207 // With addition of the AA clip path, flushing the draw buffer can 1208 // result in the generation of an AA clip mask. During this 1209 // process the SW path renderer may be invoked which recusively 1210 // calls this method (via internalWriteTexturePixels) creating 1211 // infinite recursion 1212 GrInOrderDrawBuffer* temp = fDrawBuffer; 1213 fDrawBuffer = NULL; 1214 1215 temp->flushTo(fGpu); 1216 1217 fDrawBuffer = temp; 1218 } 1219} 1220 1221void GrContext::internalWriteTexturePixels(GrTexture* texture, 1222 int left, int top, 1223 int width, int height, 1224 GrPixelConfig config, 1225 const void* buffer, 1226 size_t rowBytes, 1227 uint32_t flags) { 1228 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1229 ASSERT_OWNED_RESOURCE(texture); 1230 1231 if (!(kDontFlush_PixelOpsFlag & flags)) { 1232 this->flush(); 1233 } 1234 // TODO: use scratch texture to perform conversion 1235 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1236 GrPixelConfigIsUnpremultiplied(config)) { 1237 return; 1238 } 1239 1240 fGpu->writeTexturePixels(texture, left, top, width, height, 1241 config, buffer, rowBytes); 1242} 1243 1244bool GrContext::internalReadTexturePixels(GrTexture* texture, 1245 int left, int top, 1246 int width, int height, 1247 GrPixelConfig config, 1248 void* buffer, 1249 size_t rowBytes, 1250 uint32_t flags) { 1251 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1252 ASSERT_OWNED_RESOURCE(texture); 1253 1254 // TODO: code read pixels for textures that aren't also rendertargets 1255 GrRenderTarget* target = texture->asRenderTarget(); 1256 if (NULL != target) { 1257 return this->internalReadRenderTargetPixels(target, 1258 left, top, width, height, 1259 config, buffer, rowBytes, 1260 flags); 1261 } else { 1262 return false; 1263 } 1264} 1265 1266#include "SkConfig8888.h" 1267 1268namespace { 1269/** 1270 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1271 * formats are representable as Config8888 and so the function returns false 1272 * if the GrPixelConfig has no equivalent Config8888. 1273 */ 1274bool grconfig_to_config8888(GrPixelConfig config, 1275 SkCanvas::Config8888* config8888) { 1276 switch (config) { 1277 case kRGBA_8888_PM_GrPixelConfig: 1278 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1279 return true; 1280 case kRGBA_8888_UPM_GrPixelConfig: 1281 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1282 return true; 1283 case kBGRA_8888_PM_GrPixelConfig: 1284 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1285 return true; 1286 case kBGRA_8888_UPM_GrPixelConfig: 1287 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1288 return true; 1289 default: 1290 return false; 1291 } 1292} 1293} 1294 1295bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1296 int left, int top, 1297 int width, int height, 1298 GrPixelConfig config, 1299 void* buffer, 1300 size_t rowBytes, 1301 uint32_t flags) { 1302 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1303 ASSERT_OWNED_RESOURCE(target); 1304 1305 if (NULL == target) { 1306 target = fDrawState->getRenderTarget(); 1307 if (NULL == target) { 1308 return false; 1309 } 1310 } 1311 1312 if (!(kDontFlush_PixelOpsFlag & flags)) { 1313 this->flush(); 1314 } 1315 1316 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1317 GrPixelConfigIsUnpremultiplied(config) && 1318 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1319 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1320 if (!grconfig_to_config8888(target->config(), &srcConfig8888) || 1321 !grconfig_to_config8888(config, &dstConfig8888)) { 1322 return false; 1323 } 1324 // do read back using target's own config 1325 this->internalReadRenderTargetPixels(target, 1326 left, top, 1327 width, height, 1328 target->config(), 1329 buffer, rowBytes, 1330 kDontFlush_PixelOpsFlag); 1331 // sw convert the pixels to unpremul config 1332 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer); 1333 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888, 1334 pixels, rowBytes, srcConfig8888, 1335 width, height); 1336 return true; 1337 } 1338 1339 GrTexture* src = target->asTexture(); 1340 bool swapRAndB = NULL != src && 1341 fGpu->preferredReadPixelsConfig(config) == 1342 GrPixelConfigSwapRAndB(config); 1343 1344 bool flipY = NULL != src && 1345 fGpu->readPixelsWillPayForYFlip(target, left, top, 1346 width, height, config, 1347 rowBytes); 1348 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1349 GrPixelConfigIsUnpremultiplied(config)); 1350 1351 if (NULL == src && alphaConversion) { 1352 // we should fallback to cpu conversion here. This could happen when 1353 // we were given an external render target by the client that is not 1354 // also a texture (e.g. FBO 0 in GL) 1355 return false; 1356 } 1357 // we draw to a scratch texture if any of these conversion are applied 1358 GrAutoScratchTexture ast; 1359 if (flipY || swapRAndB || alphaConversion) { 1360 GrAssert(NULL != src); 1361 if (swapRAndB) { 1362 config = GrPixelConfigSwapRAndB(config); 1363 GrAssert(kUnknown_GrPixelConfig != config); 1364 } 1365 // Make the scratch a render target because we don't have a robust 1366 // readTexturePixels as of yet (it calls this function). 1367 GrTextureDesc desc; 1368 desc.fFlags = kRenderTarget_GrTextureFlagBit; 1369 desc.fWidth = width; 1370 desc.fHeight = height; 1371 desc.fConfig = config; 1372 1373 // When a full readback is faster than a partial we could always make 1374 // the scratch exactly match the passed rect. However, if we see many 1375 // different size rectangles we will trash our texture cache and pay the 1376 // cost of creating and destroying many textures. So, we only request 1377 // an exact match when the caller is reading an entire RT. 1378 ScratchTexMatch match = kApprox_ScratchTexMatch; 1379 if (0 == left && 1380 0 == top && 1381 target->width() == width && 1382 target->height() == height && 1383 fGpu->fullReadPixelsIsFasterThanPartial()) { 1384 match = kExact_ScratchTexMatch; 1385 } 1386 ast.set(this, desc, match); 1387 GrTexture* texture = ast.texture(); 1388 if (!texture) { 1389 return false; 1390 } 1391 target = texture->asRenderTarget(); 1392 GrAssert(NULL != target); 1393 1394 GrDrawTarget::AutoStateRestore asr(fGpu, 1395 GrDrawTarget::kReset_ASRInit); 1396 GrDrawState* drawState = fGpu->drawState(); 1397 drawState->setRenderTarget(target); 1398 1399 GrMatrix matrix; 1400 if (flipY) { 1401 matrix.setTranslate(SK_Scalar1 * left, 1402 SK_Scalar1 * (top + height)); 1403 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1404 } else { 1405 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1406 } 1407 matrix.postIDiv(src->width(), src->height()); 1408 drawState->sampler(0)->reset(matrix); 1409 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1410 drawState->setTexture(0, src); 1411 GrRect rect; 1412 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1413 fGpu->drawSimpleRect(rect, NULL, 0x1); 1414 left = 0; 1415 top = 0; 1416 } 1417 return fGpu->readPixels(target, 1418 left, top, width, height, 1419 config, buffer, rowBytes, flipY); 1420} 1421 1422void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1423 GrAssert(target); 1424 ASSERT_OWNED_RESOURCE(target); 1425 // In the future we may track whether there are any pending draws to this 1426 // target. We don't today so we always perform a flush. We don't promise 1427 // this to our clients, though. 1428 this->flush(); 1429 fGpu->resolveRenderTarget(target); 1430} 1431 1432void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1433 if (NULL == src || NULL == dst) { 1434 return; 1435 } 1436 ASSERT_OWNED_RESOURCE(src); 1437 1438 // Writes pending to the source texture are not tracked, so a flush 1439 // is required to ensure that the copy captures the most recent contents 1440 // of the source texture. See similar behaviour in 1441 // GrContext::resolveRenderTarget. 1442 this->flush(); 1443 1444 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1445 GrDrawState* drawState = fGpu->drawState(); 1446 drawState->setRenderTarget(dst); 1447 GrMatrix sampleM; 1448 sampleM.setIDiv(src->width(), src->height()); 1449 drawState->setTexture(0, src); 1450 drawState->sampler(0)->reset(sampleM); 1451 SkRect rect = SkRect::MakeXYWH(0, 0, 1452 SK_Scalar1 * src->width(), 1453 SK_Scalar1 * src->height()); 1454 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1455} 1456 1457void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1458 int left, int top, 1459 int width, int height, 1460 GrPixelConfig config, 1461 const void* buffer, 1462 size_t rowBytes, 1463 uint32_t flags) { 1464 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1465 ASSERT_OWNED_RESOURCE(target); 1466 1467 if (NULL == target) { 1468 target = fDrawState->getRenderTarget(); 1469 if (NULL == target) { 1470 return; 1471 } 1472 } 1473 1474 // TODO: when underlying api has a direct way to do this we should use it 1475 // (e.g. glDrawPixels on desktop GL). 1476 1477 // If the RT is also a texture and we don't have to do PM/UPM conversion 1478 // then take the texture path, which we expect to be at least as fast or 1479 // faster since it doesn't use an intermediate texture as we do below. 1480 1481#if !GR_MAC_BUILD 1482 // At least some drivers on the Mac get confused when glTexImage2D is called 1483 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1484 // determine what OS versions and/or HW is affected. 1485 if (NULL != target->asTexture() && 1486 GrPixelConfigIsUnpremultiplied(target->config()) == 1487 GrPixelConfigIsUnpremultiplied(config)) { 1488 1489 this->internalWriteTexturePixels(target->asTexture(), 1490 left, top, width, height, 1491 config, buffer, rowBytes, flags); 1492 return; 1493 } 1494#endif 1495 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1496 GrPixelConfigIsUnpremultiplied(config) && 1497 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1498 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1499 if (!grconfig_to_config8888(config, &srcConfig8888) || 1500 !grconfig_to_config8888(target->config(), &dstConfig8888)) { 1501 return; 1502 } 1503 // allocate a tmp buffer and sw convert the pixels to premul 1504 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height); 1505 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1506 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1507 src, rowBytes, srcConfig8888, 1508 width, height); 1509 // upload the already premul pixels 1510 this->internalWriteRenderTargetPixels(target, 1511 left, top, 1512 width, height, 1513 target->config(), 1514 tmpPixels, 4 * width, flags); 1515 return; 1516 } 1517 1518 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1519 GrPixelConfigSwapRAndB(config); 1520 if (swapRAndB) { 1521 config = GrPixelConfigSwapRAndB(config); 1522 } 1523 1524 GrTextureDesc desc; 1525 desc.fWidth = width; 1526 desc.fHeight = height; 1527 desc.fConfig = config; 1528 1529 GrAutoScratchTexture ast(this, desc); 1530 GrTexture* texture = ast.texture(); 1531 if (NULL == texture) { 1532 return; 1533 } 1534 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1535 config, buffer, rowBytes, flags); 1536 1537 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1538 GrDrawState* drawState = fGpu->drawState(); 1539 1540 GrMatrix matrix; 1541 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1542 drawState->setViewMatrix(matrix); 1543 drawState->setRenderTarget(target); 1544 drawState->setTexture(0, texture); 1545 1546 matrix.setIDiv(texture->width(), texture->height()); 1547 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 1548 GrSamplerState::kNearest_Filter, 1549 matrix); 1550 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1551 1552 GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0); 1553 static const int VCOUNT = 4; 1554 // TODO: Use GrGpu::drawRect here 1555 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1556 if (!geo.succeeded()) { 1557 GrPrintf("Failed to get space for vertices!\n"); 1558 return; 1559 } 1560 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1561 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, VCOUNT); 1562} 1563//////////////////////////////////////////////////////////////////////////////// 1564 1565void GrContext::setPaint(const GrPaint& paint) { 1566 1567 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1568 int s = i + GrPaint::kFirstTextureStage; 1569 fDrawState->setTexture(s, paint.getTexture(i)); 1570 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 1571 if (paint.getTexture(i)) { 1572 *fDrawState->sampler(s) = paint.getTextureSampler(i); 1573 } 1574 } 1575 1576 fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1577 1578 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1579 int s = i + GrPaint::kFirstMaskStage; 1580 fDrawState->setTexture(s, paint.getMask(i)); 1581 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 1582 if (paint.getMask(i)) { 1583 *fDrawState->sampler(s) = paint.getMaskSampler(i); 1584 } 1585 } 1586 1587 // disable all stages not accessible via the paint 1588 for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) { 1589 fDrawState->setTexture(s, NULL); 1590 } 1591 1592 fDrawState->setColor(paint.fColor); 1593 1594 if (paint.fDither) { 1595 fDrawState->enableState(GrDrawState::kDither_StateBit); 1596 } else { 1597 fDrawState->disableState(GrDrawState::kDither_StateBit); 1598 } 1599 if (paint.fAntiAlias) { 1600 fDrawState->enableState(GrDrawState::kHWAntialias_StateBit); 1601 } else { 1602 fDrawState->disableState(GrDrawState::kHWAntialias_StateBit); 1603 } 1604 if (paint.fColorMatrixEnabled) { 1605 fDrawState->enableState(GrDrawState::kColorMatrix_StateBit); 1606 fDrawState->setColorMatrix(paint.fColorMatrix); 1607 } else { 1608 fDrawState->disableState(GrDrawState::kColorMatrix_StateBit); 1609 } 1610 fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1611 fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1612 fDrawState->setCoverage(paint.fCoverage); 1613#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1614 if ((paint.getActiveMaskStageMask() || 0xff != paint.fCoverage) && 1615 !fGpu->canApplyCoverage()) { 1616 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1617 } 1618#endif 1619} 1620 1621GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1622 DrawCategory category) { 1623 if (category != fLastDrawCategory) { 1624 this->flushDrawBuffer(); 1625 fLastDrawCategory = category; 1626 } 1627 this->setPaint(paint); 1628 GrDrawTarget* target = fGpu; 1629 switch (category) { 1630 case kUnbuffered_DrawCategory: 1631 target = fGpu; 1632 break; 1633 case kBuffered_DrawCategory: 1634 target = fDrawBuffer; 1635 fDrawBuffer->setClip(fGpu->getClip()); 1636 break; 1637 default: 1638 GrCrash("Unexpected DrawCategory."); 1639 break; 1640 } 1641 return target; 1642} 1643 1644/* 1645 * This method finds a path renderer that can draw the specified path on 1646 * the provided target. 1647 * Due to its expense, the software path renderer has split out so it can 1648 * can be individually allowed/disallowed via the "allowSW" boolean. 1649 */ 1650GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1651 GrPathFill fill, 1652 const GrDrawTarget* target, 1653 bool antiAlias, 1654 bool allowSW) { 1655 if (NULL == fPathRendererChain) { 1656 fPathRendererChain = 1657 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag); 1658 } 1659 1660 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, fill, 1661 target, 1662 antiAlias); 1663 1664 if (NULL == pr && allowSW) { 1665 if (NULL == fSoftwarePathRenderer) { 1666 fSoftwarePathRenderer = new GrSoftwarePathRenderer(this); 1667 } 1668 1669 pr = fSoftwarePathRenderer; 1670 } 1671 1672 return pr; 1673} 1674 1675//////////////////////////////////////////////////////////////////////////////// 1676 1677void GrContext::setRenderTarget(GrRenderTarget* target) { 1678 ASSERT_OWNED_RESOURCE(target); 1679 if (fDrawState->getRenderTarget() != target) { 1680 this->flush(false); 1681 fDrawState->setRenderTarget(target); 1682 } 1683} 1684 1685GrRenderTarget* GrContext::getRenderTarget() { 1686 return fDrawState->getRenderTarget(); 1687} 1688 1689const GrRenderTarget* GrContext::getRenderTarget() const { 1690 return fDrawState->getRenderTarget(); 1691} 1692 1693bool GrContext::isConfigRenderable(GrPixelConfig config) const { 1694 return fGpu->isConfigRenderable(config); 1695} 1696 1697const GrMatrix& GrContext::getMatrix() const { 1698 return fDrawState->getViewMatrix(); 1699} 1700 1701void GrContext::setMatrix(const GrMatrix& m) { 1702 fDrawState->setViewMatrix(m); 1703} 1704 1705void GrContext::concatMatrix(const GrMatrix& m) const { 1706 fDrawState->preConcatViewMatrix(m); 1707} 1708 1709static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 1710 intptr_t mask = 1 << shift; 1711 if (pred) { 1712 bits |= mask; 1713 } else { 1714 bits &= ~mask; 1715 } 1716 return bits; 1717} 1718 1719GrContext::GrContext(GrGpu* gpu) { 1720 ++THREAD_INSTANCE_COUNT; 1721 1722 fGpu = gpu; 1723 fGpu->ref(); 1724 fGpu->setContext(this); 1725 1726 fDrawState = new GrDrawState(); 1727 fGpu->setDrawState(fDrawState); 1728 1729 fPathRendererChain = NULL; 1730 fSoftwarePathRenderer = NULL; 1731 1732 fTextureCache = new GrResourceCache(kDefaultTextureCacheBudget); 1733 fFontCache = new GrFontCache(fGpu); 1734 1735 fLastDrawCategory = kUnbuffered_DrawCategory; 1736 1737 fDrawBuffer = NULL; 1738 fDrawBufferVBAllocPool = NULL; 1739 fDrawBufferIBAllocPool = NULL; 1740 1741 fAARectRenderer = new GrAARectRenderer; 1742 1743 this->setupDrawBuffer(); 1744} 1745 1746void GrContext::setupDrawBuffer() { 1747 1748 GrAssert(NULL == fDrawBuffer); 1749 GrAssert(NULL == fDrawBufferVBAllocPool); 1750 GrAssert(NULL == fDrawBufferIBAllocPool); 1751 1752#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT || DEFER_PATHS 1753 fDrawBufferVBAllocPool = 1754 new GrVertexBufferAllocPool(fGpu, false, 1755 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1756 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS); 1757 fDrawBufferIBAllocPool = 1758 new GrIndexBufferAllocPool(fGpu, false, 1759 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1760 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS); 1761 1762 fDrawBuffer = new GrInOrderDrawBuffer(fGpu, 1763 fDrawBufferVBAllocPool, 1764 fDrawBufferIBAllocPool); 1765#endif 1766 1767#if BATCH_RECT_TO_RECT 1768 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 1769#endif 1770 fDrawBuffer->setAutoFlushTarget(fGpu); 1771 fDrawBuffer->setDrawState(fDrawState); 1772} 1773 1774GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 1775#if DEFER_TEXT_RENDERING 1776 return prepareToDraw(paint, kBuffered_DrawCategory); 1777#else 1778 return prepareToDraw(paint, kUnbuffered_DrawCategory); 1779#endif 1780} 1781 1782const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1783 return fGpu->getQuadIndexBuffer(); 1784} 1785 1786GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 1787 GrAutoScratchTexture* temp1, 1788 GrAutoScratchTexture* temp2, 1789 const SkRect& rect, 1790 float sigmaX, float sigmaY) { 1791 ASSERT_OWNED_RESOURCE(srcTexture); 1792 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1793 GrClip oldClip = this->getClip(); 1794 GrTexture* origTexture = srcTexture; 1795 GrAutoMatrix avm(this, GrMatrix::I()); 1796 SkIRect clearRect; 1797 int scaleFactorX, radiusX; 1798 int scaleFactorY, radiusY; 1799 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &radiusX); 1800 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &radiusY); 1801 1802 SkRect srcRect(rect); 1803 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 1804 srcRect.roundOut(); 1805 scale_rect(&srcRect, static_cast<float>(scaleFactorX), 1806 static_cast<float>(scaleFactorY)); 1807 this->setClip(srcRect); 1808 1809 GrAssert(kBGRA_8888_PM_GrPixelConfig == srcTexture->config() || 1810 kRGBA_8888_PM_GrPixelConfig == srcTexture->config() || 1811 kAlpha_8_GrPixelConfig == srcTexture->config()); 1812 1813 GrTextureDesc desc; 1814 desc.fFlags = kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit; 1815 desc.fWidth = SkScalarFloorToInt(srcRect.width()); 1816 desc.fHeight = SkScalarFloorToInt(srcRect.height()); 1817 desc.fConfig = srcTexture->config(); 1818 1819 temp1->set(this, desc); 1820 if (temp2) { 1821 temp2->set(this, desc); 1822 } 1823 1824 GrTexture* dstTexture = temp1->texture(); 1825 GrPaint paint; 1826 paint.reset(); 1827 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 1828 1829 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 1830 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1831 srcTexture->height()); 1832 this->setRenderTarget(dstTexture->asRenderTarget()); 1833 SkRect dstRect(srcRect); 1834 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 1835 i < scaleFactorY ? 0.5f : 1.0f); 1836 paint.setTexture(0, srcTexture); 1837 this->drawRectToRect(paint, dstRect, srcRect); 1838 srcRect = dstRect; 1839 SkTSwap(srcTexture, dstTexture); 1840 // If temp2 is non-NULL, don't render back to origTexture 1841 if (temp2 && dstTexture == origTexture) { 1842 dstTexture = temp2->texture(); 1843 } 1844 } 1845 1846 SkIRect srcIRect; 1847 srcRect.roundOut(&srcIRect); 1848 1849 if (sigmaX > 0.0f) { 1850 if (scaleFactorX > 1) { 1851 // Clear out a radius to the right of the srcRect to prevent the 1852 // X convolution from reading garbage. 1853 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1854 radiusX, srcIRect.height()); 1855 this->clear(&clearRect, 0x0); 1856 } 1857 1858 this->setRenderTarget(dstTexture->asRenderTarget()); 1859 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaX, radiusX, 1860 Gr1DKernelEffect::kX_Direction); 1861 SkTSwap(srcTexture, dstTexture); 1862 if (temp2 && dstTexture == origTexture) { 1863 dstTexture = temp2->texture(); 1864 } 1865 } 1866 1867 if (sigmaY > 0.0f) { 1868 if (scaleFactorY > 1 || sigmaX > 0.0f) { 1869 // Clear out a radius below the srcRect to prevent the Y 1870 // convolution from reading garbage. 1871 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1872 srcIRect.width(), radiusY); 1873 this->clear(&clearRect, 0x0); 1874 } 1875 1876 this->setRenderTarget(dstTexture->asRenderTarget()); 1877 convolve_gaussian(fGpu, srcTexture, srcRect, sigmaY, radiusY, 1878 Gr1DKernelEffect::kY_Direction); 1879 SkTSwap(srcTexture, dstTexture); 1880 if (temp2 && dstTexture == origTexture) { 1881 dstTexture = temp2->texture(); 1882 } 1883 } 1884 1885 if (scaleFactorX > 1 || scaleFactorY > 1) { 1886 // Clear one pixel to the right and below, to accommodate bilinear 1887 // upsampling. 1888 clearRect = SkIRect::MakeXYWH(srcIRect.fLeft, srcIRect.fBottom, 1889 srcIRect.width() + 1, 1); 1890 this->clear(&clearRect, 0x0); 1891 clearRect = SkIRect::MakeXYWH(srcIRect.fRight, srcIRect.fTop, 1892 1, srcIRect.height()); 1893 this->clear(&clearRect, 0x0); 1894 // FIXME: This should be mitchell, not bilinear. 1895 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 1896 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 1897 srcTexture->height()); 1898 this->setRenderTarget(dstTexture->asRenderTarget()); 1899 paint.setTexture(0, srcTexture); 1900 SkRect dstRect(srcRect); 1901 scale_rect(&dstRect, (float) scaleFactorX, (float) scaleFactorY); 1902 this->drawRectToRect(paint, dstRect, srcRect); 1903 srcRect = dstRect; 1904 SkTSwap(srcTexture, dstTexture); 1905 } 1906 this->setRenderTarget(oldRenderTarget); 1907 this->setClip(oldClip); 1908 return srcTexture; 1909} 1910 1911GrTexture* GrContext::applyMorphology(GrTexture* srcTexture, 1912 const GrRect& rect, 1913 GrTexture* temp1, GrTexture* temp2, 1914 MorphologyType morphType, 1915 SkISize radius) { 1916 ASSERT_OWNED_RESOURCE(srcTexture); 1917 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 1918 GrAutoMatrix avm(this, GrMatrix::I()); 1919 GrClip oldClip = this->getClip(); 1920 this->setClip(GrRect::MakeWH(SkIntToScalar(srcTexture->width()), 1921 SkIntToScalar(srcTexture->height()))); 1922 if (radius.fWidth > 0) { 1923 this->setRenderTarget(temp1->asRenderTarget()); 1924 apply_morphology(fGpu, srcTexture, rect, radius.fWidth, morphType, 1925 Gr1DKernelEffect::kX_Direction); 1926 SkIRect clearRect = SkIRect::MakeXYWH( 1927 SkScalarFloorToInt(rect.fLeft), 1928 SkScalarFloorToInt(rect.fBottom), 1929 SkScalarFloorToInt(rect.width()), 1930 radius.fHeight); 1931 this->clear(&clearRect, 0x0); 1932 srcTexture = temp1; 1933 } 1934 if (radius.fHeight > 0) { 1935 this->setRenderTarget(temp2->asRenderTarget()); 1936 apply_morphology(fGpu, srcTexture, rect, radius.fHeight, morphType, 1937 Gr1DKernelEffect::kY_Direction); 1938 srcTexture = temp2; 1939 } 1940 this->setRenderTarget(oldRenderTarget); 1941 this->setClip(oldClip); 1942 return srcTexture; 1943} 1944 1945void GrContext::postClipPush() { 1946 fGpu->postClipPush(); 1947} 1948 1949void GrContext::preClipPop() { 1950 fGpu->preClipPop(); 1951}; 1952 1953/////////////////////////////////////////////////////////////////////////////// 1954