GrContext.cpp revision a4de8c257ea0be8ff7081f645249b6afe5c48e7e
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "effects/GrSingleTextureEffect.h" 13#include "effects/GrConfigConversionEffect.h" 14 15#include "GrAARectRenderer.h" 16#include "GrBufferAllocPool.h" 17#include "GrGpu.h" 18#include "GrDrawTargetCaps.h" 19#include "GrIndexBuffer.h" 20#include "GrInOrderDrawBuffer.h" 21#include "GrOvalRenderer.h" 22#include "GrPathRenderer.h" 23#include "GrPathUtils.h" 24#include "GrResourceCache.h" 25#include "GrSoftwarePathRenderer.h" 26#include "GrStencilBuffer.h" 27#include "GrTextStrike.h" 28#include "SkRTConf.h" 29#include "SkRRect.h" 30#include "SkStrokeRec.h" 31#include "SkTLazy.h" 32#include "SkTLS.h" 33#include "SkTrace.h" 34 35SK_DEFINE_INST_COUNT(GrContext) 36SK_DEFINE_INST_COUNT(GrDrawState) 37 38// It can be useful to set this to false to test whether a bug is caused by using the 39// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make 40// debugging simpler. 41SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true, 42 "Defers rendering in GrContext via GrInOrderDrawBuffer."); 43 44#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw) 45 46// When we're using coverage AA but the blend is incompatible (given gpu 47// limitations) should we disable AA or draw wrong? 48#define DISABLE_COVERAGE_AA_FOR_BLEND 1 49 50#ifdef SK_DEBUG 51 // change this to a 1 to see notifications when partial coverage fails 52 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 53#else 54 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 55#endif 56 57static const size_t MAX_TEXTURE_CACHE_COUNT = 2048; 58static const size_t MAX_TEXTURE_CACHE_BYTES = GR_DEFAULT_TEXTURE_CACHE_MB_LIMIT * 1024 * 1024; 59 60static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 61static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 62 63static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; 64static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; 65 66#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 67 68// Glorified typedef to avoid including GrDrawState.h in GrContext.h 69class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {}; 70 71GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { 72 GrContext* context = SkNEW(GrContext); 73 if (context->init(backend, backendContext)) { 74 return context; 75 } else { 76 context->unref(); 77 return NULL; 78 } 79} 80 81namespace { 82void* CreateThreadInstanceCount() { 83 return SkNEW_ARGS(int, (0)); 84} 85void DeleteThreadInstanceCount(void* v) { 86 delete reinterpret_cast<int*>(v); 87} 88#define THREAD_INSTANCE_COUNT \ 89 (*reinterpret_cast<int*>(SkTLS::Get(CreateThreadInstanceCount, DeleteThreadInstanceCount))) 90} 91 92GrContext::GrContext() { 93 ++THREAD_INSTANCE_COUNT; 94 fDrawState = NULL; 95 fGpu = NULL; 96 fClip = NULL; 97 fPathRendererChain = NULL; 98 fSoftwarePathRenderer = NULL; 99 fTextureCache = NULL; 100 fFontCache = NULL; 101 fDrawBuffer = NULL; 102 fDrawBufferVBAllocPool = NULL; 103 fDrawBufferIBAllocPool = NULL; 104 fAARectRenderer = NULL; 105 fOvalRenderer = NULL; 106 fViewMatrix.reset(); 107 fMaxTextureSizeOverride = 1 << 20; 108} 109 110bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { 111 SkASSERT(NULL == fGpu); 112 113 fGpu = GrGpu::Create(backend, backendContext, this); 114 if (NULL == fGpu) { 115 return false; 116 } 117 118 fDrawState = SkNEW(GrDrawState); 119 fGpu->setDrawState(fDrawState); 120 121 fTextureCache = SkNEW_ARGS(GrResourceCache, 122 (MAX_TEXTURE_CACHE_COUNT, 123 MAX_TEXTURE_CACHE_BYTES)); 124 fTextureCache->setOverbudgetCallback(OverbudgetCB, this); 125 126 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); 127 128 fLastDrawWasBuffered = kNo_BufferedDraw; 129 130 fAARectRenderer = SkNEW(GrAARectRenderer); 131 fOvalRenderer = SkNEW(GrOvalRenderer); 132 133 fDidTestPMConversions = false; 134 135 this->setupDrawBuffer(); 136 137 return true; 138} 139 140int GrContext::GetThreadInstanceCount() { 141 return THREAD_INSTANCE_COUNT; 142} 143 144GrContext::~GrContext() { 145 for (int i = 0; i < fCleanUpData.count(); ++i) { 146 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 147 } 148 149 if (NULL == fGpu) { 150 return; 151 } 152 153 this->flush(); 154 155 // Since the gpu can hold scratch textures, give it a chance to let go 156 // of them before freeing the texture cache 157 fGpu->purgeResources(); 158 159 delete fTextureCache; 160 fTextureCache = NULL; 161 delete fFontCache; 162 delete fDrawBuffer; 163 delete fDrawBufferVBAllocPool; 164 delete fDrawBufferIBAllocPool; 165 166 fAARectRenderer->unref(); 167 fOvalRenderer->unref(); 168 169 fGpu->unref(); 170 SkSafeUnref(fPathRendererChain); 171 SkSafeUnref(fSoftwarePathRenderer); 172 fDrawState->unref(); 173 174 --THREAD_INSTANCE_COUNT; 175} 176 177void GrContext::contextLost() { 178 this->contextDestroyed(); 179 this->setupDrawBuffer(); 180} 181 182void GrContext::contextDestroyed() { 183 // abandon first to so destructors 184 // don't try to free the resources in the API. 185 fGpu->abandonResources(); 186 187 // a path renderer may be holding onto resources that 188 // are now unusable 189 SkSafeSetNull(fPathRendererChain); 190 SkSafeSetNull(fSoftwarePathRenderer); 191 192 delete fDrawBuffer; 193 fDrawBuffer = NULL; 194 195 delete fDrawBufferVBAllocPool; 196 fDrawBufferVBAllocPool = NULL; 197 198 delete fDrawBufferIBAllocPool; 199 fDrawBufferIBAllocPool = NULL; 200 201 fAARectRenderer->reset(); 202 fOvalRenderer->reset(); 203 204 fTextureCache->purgeAllUnlocked(); 205 fFontCache->freeAll(); 206 fGpu->markContextDirty(); 207} 208 209void GrContext::resetContext(uint32_t state) { 210 fGpu->markContextDirty(state); 211} 212 213void GrContext::freeGpuResources() { 214 this->flush(); 215 216 fGpu->purgeResources(); 217 218 fAARectRenderer->reset(); 219 fOvalRenderer->reset(); 220 221 fTextureCache->purgeAllUnlocked(); 222 fFontCache->freeAll(); 223 // a path renderer may be holding onto resources 224 SkSafeSetNull(fPathRendererChain); 225 SkSafeSetNull(fSoftwarePathRenderer); 226} 227 228size_t GrContext::getGpuTextureCacheBytes() const { 229 return fTextureCache->getCachedResourceBytes(); 230} 231 232//////////////////////////////////////////////////////////////////////////////// 233 234GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc, 235 const GrCacheID& cacheID, 236 const GrTextureParams* params) { 237 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 238 GrResource* resource = fTextureCache->find(resourceKey); 239 SkSafeRef(resource); 240 return static_cast<GrTexture*>(resource); 241} 242 243bool GrContext::isTextureInCache(const GrTextureDesc& desc, 244 const GrCacheID& cacheID, 245 const GrTextureParams* params) const { 246 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 247 return fTextureCache->hasKey(resourceKey); 248} 249 250void GrContext::addStencilBuffer(GrStencilBuffer* sb) { 251 ASSERT_OWNED_RESOURCE(sb); 252 253 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(), 254 sb->height(), 255 sb->numSamples()); 256 fTextureCache->addResource(resourceKey, sb); 257} 258 259GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 260 int sampleCnt) { 261 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width, 262 height, 263 sampleCnt); 264 GrResource* resource = fTextureCache->find(resourceKey); 265 return static_cast<GrStencilBuffer*>(resource); 266} 267 268static void stretchImage(void* dst, 269 int dstW, 270 int dstH, 271 void* src, 272 int srcW, 273 int srcH, 274 int bpp) { 275 GrFixed dx = (srcW << 16) / dstW; 276 GrFixed dy = (srcH << 16) / dstH; 277 278 GrFixed y = dy >> 1; 279 280 int dstXLimit = dstW*bpp; 281 for (int j = 0; j < dstH; ++j) { 282 GrFixed x = dx >> 1; 283 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 284 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 285 for (int i = 0; i < dstXLimit; i += bpp) { 286 memcpy((uint8_t*) dstRow + i, 287 (uint8_t*) srcRow + (x>>16)*bpp, 288 bpp); 289 x += dx; 290 } 291 y += dy; 292 } 293} 294 295namespace { 296 297// position + local coordinate 298extern const GrVertexAttrib gVertexAttribs[] = { 299 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 300 {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding} 301}; 302 303}; 304 305// The desired texture is NPOT and tiled but that isn't supported by 306// the current hardware. Resize the texture to be a POT 307GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc, 308 const GrCacheID& cacheID, 309 void* srcData, 310 size_t rowBytes, 311 bool filter) { 312 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL)); 313 if (NULL == clampedTexture) { 314 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes)); 315 316 if (NULL == clampedTexture) { 317 return NULL; 318 } 319 } 320 321 GrTextureDesc rtDesc = desc; 322 rtDesc.fFlags = rtDesc.fFlags | 323 kRenderTarget_GrTextureFlagBit | 324 kNoStencil_GrTextureFlagBit; 325 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 326 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 327 328 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 329 330 if (NULL != texture) { 331 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 332 GrDrawState* drawState = fGpu->drawState(); 333 drawState->setRenderTarget(texture->asRenderTarget()); 334 335 // if filtering is not desired then we want to ensure all 336 // texels in the resampled image are copies of texels from 337 // the original. 338 GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode : 339 GrTextureParams::kNone_FilterMode); 340 drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params); 341 342 drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs)); 343 344 GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0); 345 346 if (arg.succeeded()) { 347 GrPoint* verts = (GrPoint*) arg.vertices(); 348 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(GrPoint)); 349 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(GrPoint)); 350 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 351 } 352 } else { 353 // TODO: Our CPU stretch doesn't filter. But we create separate 354 // stretched textures when the texture params is either filtered or 355 // not. Either implement filtered stretch blit on CPU or just create 356 // one when FBO case fails. 357 358 rtDesc.fFlags = kNone_GrTextureFlags; 359 // no longer need to clamp at min RT size. 360 rtDesc.fWidth = GrNextPow2(desc.fWidth); 361 rtDesc.fHeight = GrNextPow2(desc.fHeight); 362 int bpp = GrBytesPerPixel(desc.fConfig); 363 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight); 364 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 365 srcData, desc.fWidth, desc.fHeight, bpp); 366 367 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 368 369 SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(), 370 stretchedRowBytes); 371 SkASSERT(NULL != texture); 372 } 373 374 return texture; 375} 376 377GrTexture* GrContext::createTexture(const GrTextureParams* params, 378 const GrTextureDesc& desc, 379 const GrCacheID& cacheID, 380 void* srcData, 381 size_t rowBytes) { 382 SK_TRACE_EVENT0("GrContext::createTexture"); 383 384 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID); 385 386 GrTexture* texture; 387 if (GrTexture::NeedsResizing(resourceKey)) { 388 texture = this->createResizedTexture(desc, cacheID, 389 srcData, rowBytes, 390 GrTexture::NeedsBilerp(resourceKey)); 391 } else { 392 texture= fGpu->createTexture(desc, srcData, rowBytes); 393 } 394 395 if (NULL != texture) { 396 // Adding a resource could put us overbudget. Try to free up the 397 // necessary space before adding it. 398 fTextureCache->purgeAsNeeded(1, texture->sizeInBytes()); 399 fTextureCache->addResource(resourceKey, texture); 400 } 401 402 return texture; 403} 404 405static GrTexture* create_scratch_texture(GrGpu* gpu, 406 GrResourceCache* textureCache, 407 const GrTextureDesc& desc) { 408 GrTexture* texture = gpu->createTexture(desc, NULL, 0); 409 if (NULL != texture) { 410 GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc()); 411 // Adding a resource could put us overbudget. Try to free up the 412 // necessary space before adding it. 413 textureCache->purgeAsNeeded(1, texture->sizeInBytes()); 414 // Make the resource exclusive so future 'find' calls don't return it 415 textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag); 416 } 417 return texture; 418} 419 420GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) { 421 422 SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || 423 !(inDesc.fFlags & kNoStencil_GrTextureFlagBit)); 424 425 // Renderable A8 targets are not universally supported (e.g., not on ANGLE) 426 SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig) || 427 !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) || 428 (inDesc.fConfig != kAlpha_8_GrPixelConfig)); 429 430 if (!fGpu->caps()->reuseScratchTextures()) { 431 // If we're never recycling scratch textures we can 432 // always make them the right size 433 return create_scratch_texture(fGpu, fTextureCache, inDesc); 434 } 435 436 GrTextureDesc desc = inDesc; 437 438 if (kApprox_ScratchTexMatch == match) { 439 // bin by pow2 with a reasonable min 440 static const int MIN_SIZE = 16; 441 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 442 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 443 } 444 445 GrResource* resource = NULL; 446 int origWidth = desc.fWidth; 447 int origHeight = desc.fHeight; 448 449 do { 450 GrResourceKey key = GrTexture::ComputeScratchKey(desc); 451 // Ensure we have exclusive access to the texture so future 'find' calls don't return it 452 resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag); 453 if (NULL != resource) { 454 resource->ref(); 455 break; 456 } 457 if (kExact_ScratchTexMatch == match) { 458 break; 459 } 460 // We had a cache miss and we are in approx mode, relax the fit of the flags. 461 462 // We no longer try to reuse textures that were previously used as render targets in 463 // situations where no RT is needed; doing otherwise can confuse the video driver and 464 // cause significant performance problems in some cases. 465 if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 466 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 467 } else { 468 break; 469 } 470 471 } while (true); 472 473 if (NULL == resource) { 474 desc.fFlags = inDesc.fFlags; 475 desc.fWidth = origWidth; 476 desc.fHeight = origHeight; 477 resource = create_scratch_texture(fGpu, fTextureCache, desc); 478 } 479 480 return static_cast<GrTexture*>(resource); 481} 482 483void GrContext::addExistingTextureToCache(GrTexture* texture) { 484 485 if (NULL == texture) { 486 return; 487 } 488 489 // This texture should already have a cache entry since it was once 490 // attached 491 SkASSERT(NULL != texture->getCacheEntry()); 492 493 // Conceptually, the cache entry is going to assume responsibility 494 // for the creation ref. 495 SkASSERT(texture->unique()); 496 497 // Since this texture came from an AutoScratchTexture it should 498 // still be in the exclusive pile 499 fTextureCache->makeNonExclusive(texture->getCacheEntry()); 500 501 if (fGpu->caps()->reuseScratchTextures()) { 502 this->purgeCache(); 503 } else { 504 // When we aren't reusing textures we know this scratch texture 505 // will never be reused and would be just wasting time in the cache 506 fTextureCache->deleteResource(texture->getCacheEntry()); 507 } 508} 509 510 511void GrContext::unlockScratchTexture(GrTexture* texture) { 512 ASSERT_OWNED_RESOURCE(texture); 513 SkASSERT(NULL != texture->getCacheEntry()); 514 515 // If this is a scratch texture we detached it from the cache 516 // while it was locked (to avoid two callers simultaneously getting 517 // the same texture). 518 if (texture->getCacheEntry()->key().isScratch()) { 519 fTextureCache->makeNonExclusive(texture->getCacheEntry()); 520 this->purgeCache(); 521 } 522} 523 524void GrContext::purgeCache() { 525 if (NULL != fTextureCache) { 526 fTextureCache->purgeAsNeeded(); 527 } 528} 529 530bool GrContext::OverbudgetCB(void* data) { 531 SkASSERT(NULL != data); 532 533 GrContext* context = reinterpret_cast<GrContext*>(data); 534 535 // Flush the InOrderDrawBuffer to possibly free up some textures 536 context->flush(); 537 538 // TODO: actually track flush's behavior rather than always just 539 // returning true. 540 return true; 541} 542 543 544GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn, 545 void* srcData, 546 size_t rowBytes) { 547 GrTextureDesc descCopy = descIn; 548 return fGpu->createTexture(descCopy, srcData, rowBytes); 549} 550 551void GrContext::getTextureCacheLimits(int* maxTextures, 552 size_t* maxTextureBytes) const { 553 fTextureCache->getLimits(maxTextures, maxTextureBytes); 554} 555 556void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 557 fTextureCache->setLimits(maxTextures, maxTextureBytes); 558} 559 560int GrContext::getMaxTextureSize() const { 561 return GrMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride); 562} 563 564int GrContext::getMaxRenderTargetSize() const { 565 return fGpu->caps()->maxRenderTargetSize(); 566} 567 568int GrContext::getMaxSampleCount() const { 569 return fGpu->caps()->maxSampleCount(); 570} 571 572/////////////////////////////////////////////////////////////////////////////// 573 574GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) { 575 return fGpu->wrapBackendTexture(desc); 576} 577 578GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 579 return fGpu->wrapBackendRenderTarget(desc); 580} 581 582/////////////////////////////////////////////////////////////////////////////// 583 584bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params, 585 int width, int height) const { 586 const GrDrawTargetCaps* caps = fGpu->caps(); 587 if (!caps->eightBitPaletteSupport()) { 588 return false; 589 } 590 591 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 592 593 if (!isPow2) { 594 bool tiled = NULL != params && params->isTiled(); 595 if (tiled && !caps->npotTextureTileSupport()) { 596 return false; 597 } 598 } 599 return true; 600} 601 602 603//////////////////////////////////////////////////////////////////////////////// 604 605void GrContext::clear(const SkIRect* rect, 606 const GrColor color, 607 GrRenderTarget* target) { 608 AutoRestoreEffects are; 609 this->prepareToDraw(NULL, BUFFERED_DRAW, &are)->clear(rect, color, target); 610} 611 612void GrContext::drawPaint(const GrPaint& origPaint) { 613 // set rect to be big enough to fill the space, but not super-huge, so we 614 // don't overflow fixed-point implementations 615 SkRect r; 616 r.setLTRB(0, 0, 617 SkIntToScalar(getRenderTarget()->width()), 618 SkIntToScalar(getRenderTarget()->height())); 619 SkMatrix inverse; 620 SkTCopyOnFirstWrite<GrPaint> paint(origPaint); 621 AutoMatrix am; 622 623 // We attempt to map r by the inverse matrix and draw that. mapRect will 624 // map the four corners and bound them with a new rect. This will not 625 // produce a correct result for some perspective matrices. 626 if (!this->getMatrix().hasPerspective()) { 627 if (!fViewMatrix.invert(&inverse)) { 628 GrPrintf("Could not invert matrix\n"); 629 return; 630 } 631 inverse.mapRect(&r); 632 } else { 633 if (!am.setIdentity(this, paint.writable())) { 634 GrPrintf("Could not invert matrix\n"); 635 return; 636 } 637 } 638 // by definition this fills the entire clip, no need for AA 639 if (paint->isAntiAlias()) { 640 paint.writable()->setAntiAlias(false); 641 } 642 this->drawRect(*paint, r); 643} 644 645//////////////////////////////////////////////////////////////////////////////// 646 647namespace { 648inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 649 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 650} 651} 652 653//////////////////////////////////////////////////////////////////////////////// 654 655/* create a triangle strip that strokes the specified triangle. There are 8 656 unique vertices, but we repreat the last 2 to close up. Alternatively we 657 could use an indices array, and then only send 8 verts, but not sure that 658 would be faster. 659 */ 660static void setStrokeRectStrip(GrPoint verts[10], SkRect rect, 661 SkScalar width) { 662 const SkScalar rad = SkScalarHalf(width); 663 rect.sort(); 664 665 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 666 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 667 verts[2].set(rect.fRight - rad, rect.fTop + rad); 668 verts[3].set(rect.fRight + rad, rect.fTop - rad); 669 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 670 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 671 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 672 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 673 verts[8] = verts[0]; 674 verts[9] = verts[1]; 675} 676 677static bool isIRect(const SkRect& r) { 678 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) && 679 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom); 680} 681 682static bool apply_aa_to_rect(GrDrawTarget* target, 683 const SkRect& rect, 684 SkScalar strokeWidth, 685 const SkMatrix& combinedMatrix, 686 SkRect* devBoundRect, 687 bool* useVertexCoverage) { 688 // we use a simple coverage ramp to do aa on axis-aligned rects 689 // we check if the rect will be axis-aligned, and the rect won't land on 690 // integer coords. 691 692 // we are keeping around the "tweak the alpha" trick because 693 // it is our only hope for the fixed-pipe implementation. 694 // In a shader implementation we can give a separate coverage input 695 // TODO: remove this ugliness when we drop the fixed-pipe impl 696 *useVertexCoverage = false; 697 if (!target->getDrawState().canTweakAlphaForCoverage()) { 698 if (disable_coverage_aa_for_blend(target)) { 699#ifdef SK_DEBUG 700 //GrPrintf("Turning off AA to correctly apply blend.\n"); 701#endif 702 return false; 703 } else { 704 *useVertexCoverage = true; 705 } 706 } 707 const GrDrawState& drawState = target->getDrawState(); 708 if (drawState.getRenderTarget()->isMultisampled()) { 709 return false; 710 } 711 712 if (0 == strokeWidth && target->willUseHWAALines()) { 713 return false; 714 } 715 716#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 717 if (strokeWidth >= 0) { 718#endif 719 if (!combinedMatrix.preservesAxisAlignment()) { 720 return false; 721 } 722 723#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 724 } else { 725 if (!combinedMatrix.preservesRightAngles()) { 726 return false; 727 } 728 } 729#endif 730 731 combinedMatrix.mapRect(devBoundRect, rect); 732 733 if (strokeWidth < 0) { 734 return !isIRect(*devBoundRect); 735 } else { 736 return true; 737 } 738} 739 740static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) { 741 return point.fX >= rect.fLeft && point.fX <= rect.fRight && 742 point.fY >= rect.fTop && point.fY <= rect.fBottom; 743} 744 745void GrContext::drawRect(const GrPaint& paint, 746 const SkRect& rect, 747 SkScalar width, 748 const SkMatrix* matrix) { 749 SK_TRACE_EVENT0("GrContext::drawRect"); 750 751 AutoRestoreEffects are; 752 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are); 753 754 SkMatrix combinedMatrix = target->drawState()->getViewMatrix(); 755 if (NULL != matrix) { 756 combinedMatrix.preConcat(*matrix); 757 } 758 759 // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking 760 // cases where the RT is fully inside a stroke. 761 if (width < 0) { 762 SkRect rtRect; 763 target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect); 764 SkRect clipSpaceRTRect = rtRect; 765 bool checkClip = false; 766 if (NULL != this->getClip()) { 767 checkClip = true; 768 clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX), 769 SkIntToScalar(this->getClip()->fOrigin.fY)); 770 } 771 // Does the clip contain the entire RT? 772 if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) { 773 SkMatrix invM; 774 if (!combinedMatrix.invert(&invM)) { 775 return; 776 } 777 // Does the rect bound the RT? 778 SkPoint srcSpaceRTQuad[4]; 779 invM.mapRectToQuad(srcSpaceRTQuad, rtRect); 780 if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) && 781 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) && 782 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) && 783 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) { 784 // Will it blend? 785 GrColor clearColor; 786 if (paint.isOpaqueAndConstantColor(&clearColor)) { 787 target->clear(NULL, clearColor); 788 return; 789 } 790 } 791 } 792 } 793 794 SkRect devBoundRect; 795 bool useVertexCoverage; 796 bool needAA = paint.isAntiAlias() && 797 !target->getDrawState().getRenderTarget()->isMultisampled(); 798 bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect, 799 &useVertexCoverage); 800 if (doAA) { 801 GrDrawState::AutoViewMatrixRestore avmr; 802 if (!avmr.setIdentity(target->drawState())) { 803 return; 804 } 805 if (width >= 0) { 806 fAARectRenderer->strokeAARect(this->getGpu(), target, 807 rect, combinedMatrix, devBoundRect, 808 width, useVertexCoverage); 809 } else { 810 // filled AA rect 811 fAARectRenderer->fillAARect(this->getGpu(), target, 812 rect, combinedMatrix, devBoundRect, 813 useVertexCoverage); 814 } 815 return; 816 } 817 818 if (width >= 0) { 819 // TODO: consider making static vertex buffers for these cases. 820 // Hairline could be done by just adding closing vertex to 821 // unitSquareVertexBuffer() 822 823 static const int worstCaseVertCount = 10; 824 target->drawState()->setDefaultVertexAttribs(); 825 GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0); 826 827 if (!geo.succeeded()) { 828 GrPrintf("Failed to get space for vertices!\n"); 829 return; 830 } 831 832 GrPrimitiveType primType; 833 int vertCount; 834 GrPoint* vertex = geo.positions(); 835 836 if (width > 0) { 837 vertCount = 10; 838 primType = kTriangleStrip_GrPrimitiveType; 839 setStrokeRectStrip(vertex, rect, width); 840 } else { 841 // hairline 842 vertCount = 5; 843 primType = kLineStrip_GrPrimitiveType; 844 vertex[0].set(rect.fLeft, rect.fTop); 845 vertex[1].set(rect.fRight, rect.fTop); 846 vertex[2].set(rect.fRight, rect.fBottom); 847 vertex[3].set(rect.fLeft, rect.fBottom); 848 vertex[4].set(rect.fLeft, rect.fTop); 849 } 850 851 GrDrawState::AutoViewMatrixRestore avmr; 852 if (NULL != matrix) { 853 GrDrawState* drawState = target->drawState(); 854 avmr.set(drawState, *matrix); 855 } 856 857 target->drawNonIndexed(primType, 0, vertCount); 858 } else { 859 // filled BW rect 860 target->drawSimpleRect(rect, matrix); 861 } 862} 863 864void GrContext::drawRectToRect(const GrPaint& paint, 865 const SkRect& dstRect, 866 const SkRect& localRect, 867 const SkMatrix* dstMatrix, 868 const SkMatrix* localMatrix) { 869 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 870 AutoRestoreEffects are; 871 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are); 872 873 target->drawRect(dstRect, dstMatrix, &localRect, localMatrix); 874} 875 876namespace { 877 878extern const GrVertexAttrib gPosUVColorAttribs[] = { 879 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding }, 880 {kVec2f_GrVertexAttribType, sizeof(GrPoint), kLocalCoord_GrVertexAttribBinding }, 881 {kVec4ub_GrVertexAttribType, 2*sizeof(GrPoint), kColor_GrVertexAttribBinding} 882}; 883 884extern const GrVertexAttrib gPosColorAttribs[] = { 885 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 886 {kVec4ub_GrVertexAttribType, sizeof(GrPoint), kColor_GrVertexAttribBinding}, 887}; 888 889static void set_vertex_attributes(GrDrawState* drawState, 890 const GrPoint* texCoords, 891 const GrColor* colors, 892 int* colorOffset, 893 int* texOffset) { 894 *texOffset = -1; 895 *colorOffset = -1; 896 897 if (NULL != texCoords && NULL != colors) { 898 *texOffset = sizeof(GrPoint); 899 *colorOffset = 2*sizeof(GrPoint); 900 drawState->setVertexAttribs<gPosUVColorAttribs>(3); 901 } else if (NULL != texCoords) { 902 *texOffset = sizeof(GrPoint); 903 drawState->setVertexAttribs<gPosUVColorAttribs>(2); 904 } else if (NULL != colors) { 905 *colorOffset = sizeof(GrPoint); 906 drawState->setVertexAttribs<gPosColorAttribs>(2); 907 } else { 908 drawState->setVertexAttribs<gPosColorAttribs>(1); 909 } 910} 911 912}; 913 914void GrContext::drawVertices(const GrPaint& paint, 915 GrPrimitiveType primitiveType, 916 int vertexCount, 917 const GrPoint positions[], 918 const GrPoint texCoords[], 919 const GrColor colors[], 920 const uint16_t indices[], 921 int indexCount) { 922 SK_TRACE_EVENT0("GrContext::drawVertices"); 923 924 GrDrawTarget::AutoReleaseGeometry geo; 925 926 AutoRestoreEffects are; 927 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are); 928 929 GrDrawState* drawState = target->drawState(); 930 931 int colorOffset = -1, texOffset = -1; 932 set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset); 933 934 size_t vertexSize = drawState->getVertexSize(); 935 if (sizeof(GrPoint) != vertexSize) { 936 if (!geo.set(target, vertexCount, 0)) { 937 GrPrintf("Failed to get space for vertices!\n"); 938 return; 939 } 940 void* curVertex = geo.vertices(); 941 942 for (int i = 0; i < vertexCount; ++i) { 943 *((GrPoint*)curVertex) = positions[i]; 944 945 if (texOffset >= 0) { 946 *(GrPoint*)((intptr_t)curVertex + texOffset) = texCoords[i]; 947 } 948 if (colorOffset >= 0) { 949 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 950 } 951 curVertex = (void*)((intptr_t)curVertex + vertexSize); 952 } 953 } else { 954 target->setVertexSourceToArray(positions, vertexCount); 955 } 956 957 // we don't currently apply offscreen AA to this path. Need improved 958 // management of GrDrawTarget's geometry to avoid copying points per-tile. 959 960 if (NULL != indices) { 961 target->setIndexSourceToArray(indices, indexCount); 962 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 963 target->resetIndexSource(); 964 } else { 965 target->drawNonIndexed(primitiveType, 0, vertexCount); 966 } 967} 968 969/////////////////////////////////////////////////////////////////////////////// 970 971void GrContext::drawRRect(const GrPaint& paint, 972 const SkRRect& rect, 973 const SkStrokeRec& stroke) { 974 if (rect.isEmpty()) { 975 return; 976 } 977 978 AutoRestoreEffects are; 979 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are); 980 981 bool useAA = paint.isAntiAlias() && 982 !target->getDrawState().getRenderTarget()->isMultisampled() && 983 !disable_coverage_aa_for_blend(target); 984 985 if (!fOvalRenderer->drawSimpleRRect(target, this, useAA, rect, stroke)) { 986 SkPath path; 987 path.addRRect(rect); 988 this->internalDrawPath(target, useAA, path, stroke); 989 } 990} 991 992/////////////////////////////////////////////////////////////////////////////// 993 994void GrContext::drawOval(const GrPaint& paint, 995 const SkRect& oval, 996 const SkStrokeRec& stroke) { 997 if (oval.isEmpty()) { 998 return; 999 } 1000 1001 AutoRestoreEffects are; 1002 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are); 1003 1004 bool useAA = paint.isAntiAlias() && 1005 !target->getDrawState().getRenderTarget()->isMultisampled() && 1006 !disable_coverage_aa_for_blend(target); 1007 1008 if (!fOvalRenderer->drawOval(target, this, useAA, oval, stroke)) { 1009 SkPath path; 1010 path.addOval(oval); 1011 this->internalDrawPath(target, useAA, path, stroke); 1012 } 1013} 1014 1015// Can 'path' be drawn as a pair of filled nested rectangles? 1016static bool is_nested_rects(GrDrawTarget* target, 1017 const SkPath& path, 1018 const SkStrokeRec& stroke, 1019 SkRect rects[2], 1020 bool* useVertexCoverage) { 1021 SkASSERT(stroke.isFillStyle()); 1022 1023 if (path.isInverseFillType()) { 1024 return false; 1025 } 1026 1027 const GrDrawState& drawState = target->getDrawState(); 1028 1029 // TODO: this restriction could be lifted if we were willing to apply 1030 // the matrix to all the points individually rather than just to the rect 1031 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1032 return false; 1033 } 1034 1035 *useVertexCoverage = false; 1036 if (!target->getDrawState().canTweakAlphaForCoverage()) { 1037 if (disable_coverage_aa_for_blend(target)) { 1038 return false; 1039 } else { 1040 *useVertexCoverage = true; 1041 } 1042 } 1043 1044 SkPath::Direction dirs[2]; 1045 if (!path.isNestedRects(rects, dirs)) { 1046 return false; 1047 } 1048 1049 if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) { 1050 // The two rects need to be wound opposite to each other 1051 return false; 1052 } 1053 1054 // Right now, nested rects where the margin is not the same width 1055 // all around do not render correctly 1056 const SkScalar* outer = rects[0].asScalars(); 1057 const SkScalar* inner = rects[1].asScalars(); 1058 1059 SkScalar margin = SkScalarAbs(outer[0] - inner[0]); 1060 for (int i = 1; i < 4; ++i) { 1061 SkScalar temp = SkScalarAbs(outer[i] - inner[i]); 1062 if (!SkScalarNearlyEqual(margin, temp)) { 1063 return false; 1064 } 1065 } 1066 1067 return true; 1068} 1069 1070void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) { 1071 1072 if (path.isEmpty()) { 1073 if (path.isInverseFillType()) { 1074 this->drawPaint(paint); 1075 } 1076 return; 1077 } 1078 1079 // Note that internalDrawPath may sw-rasterize the path into a scratch texture. 1080 // Scratch textures can be recycled after they are returned to the texture 1081 // cache. This presents a potential hazard for buffered drawing. However, 1082 // the writePixels that uploads to the scratch will perform a flush so we're 1083 // OK. 1084 AutoRestoreEffects are; 1085 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are); 1086 1087 bool useAA = paint.isAntiAlias() && !target->getDrawState().getRenderTarget()->isMultisampled(); 1088 if (useAA && stroke.getWidth() < 0 && !path.isConvex()) { 1089 // Concave AA paths are expensive - try to avoid them for special cases 1090 bool useVertexCoverage; 1091 SkRect rects[2]; 1092 1093 if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) { 1094 SkMatrix origViewMatrix = target->getDrawState().getViewMatrix(); 1095 GrDrawState::AutoViewMatrixRestore avmr; 1096 if (!avmr.setIdentity(target->drawState())) { 1097 return; 1098 } 1099 1100 fAARectRenderer->fillAANestedRects(this->getGpu(), target, 1101 rects, 1102 origViewMatrix, 1103 useVertexCoverage); 1104 return; 1105 } 1106 } 1107 1108 SkRect ovalRect; 1109 bool isOval = path.isOval(&ovalRect); 1110 1111 if (!isOval || path.isInverseFillType() 1112 || !fOvalRenderer->drawOval(target, this, useAA, ovalRect, stroke)) { 1113 this->internalDrawPath(target, useAA, path, stroke); 1114 } 1115} 1116 1117void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path, 1118 const SkStrokeRec& stroke) { 1119 SkASSERT(!path.isEmpty()); 1120 1121 // An Assumption here is that path renderer would use some form of tweaking 1122 // the src color (either the input alpha or in the frag shader) to implement 1123 // aa. If we have some future driver-mojo path AA that can do the right 1124 // thing WRT to the blend then we'll need some query on the PR. 1125 if (disable_coverage_aa_for_blend(target)) { 1126#ifdef SK_DEBUG 1127 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1128#endif 1129 useAA = false; 1130 } 1131 1132 GrPathRendererChain::DrawType type = useAA ? GrPathRendererChain::kColorAntiAlias_DrawType : 1133 GrPathRendererChain::kColor_DrawType; 1134 1135 const SkPath* pathPtr = &path; 1136 SkPath tmpPath; 1137 SkStrokeRec strokeRec(stroke); 1138 1139 // Try a 1st time without stroking the path and without allowing the SW renderer 1140 GrPathRenderer* pr = this->getPathRenderer(*pathPtr, strokeRec, target, false, type); 1141 1142 if (NULL == pr) { 1143 if (!strokeRec.isHairlineStyle()) { 1144 // It didn't work the 1st time, so try again with the stroked path 1145 if (strokeRec.applyToPath(&tmpPath, *pathPtr)) { 1146 pathPtr = &tmpPath; 1147 strokeRec.setFillStyle(); 1148 } 1149 } 1150 if (pathPtr->isEmpty()) { 1151 return; 1152 } 1153 1154 // This time, allow SW renderer 1155 pr = this->getPathRenderer(*pathPtr, strokeRec, target, true, type); 1156 } 1157 1158 if (NULL == pr) { 1159#ifdef SK_DEBUG 1160 GrPrintf("Unable to find path renderer compatible with path.\n"); 1161#endif 1162 return; 1163 } 1164 1165 pr->drawPath(*pathPtr, strokeRec, target, useAA); 1166} 1167 1168//////////////////////////////////////////////////////////////////////////////// 1169 1170void GrContext::flush(int flagsBitfield) { 1171 if (NULL == fDrawBuffer) { 1172 return; 1173 } 1174 1175 if (kDiscard_FlushBit & flagsBitfield) { 1176 fDrawBuffer->reset(); 1177 } else { 1178 fDrawBuffer->flush(); 1179 } 1180} 1181 1182bool GrContext::writeTexturePixels(GrTexture* texture, 1183 int left, int top, int width, int height, 1184 GrPixelConfig config, const void* buffer, size_t rowBytes, 1185 uint32_t flags) { 1186 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1187 ASSERT_OWNED_RESOURCE(texture); 1188 1189 if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) { 1190 if (NULL != texture->asRenderTarget()) { 1191 return this->writeRenderTargetPixels(texture->asRenderTarget(), 1192 left, top, width, height, 1193 config, buffer, rowBytes, flags); 1194 } else { 1195 return false; 1196 } 1197 } 1198 1199 if (!(kDontFlush_PixelOpsFlag & flags)) { 1200 this->flush(); 1201 } 1202 1203 return fGpu->writeTexturePixels(texture, left, top, width, height, 1204 config, buffer, rowBytes); 1205} 1206 1207bool GrContext::readTexturePixels(GrTexture* texture, 1208 int left, int top, int width, int height, 1209 GrPixelConfig config, void* buffer, size_t rowBytes, 1210 uint32_t flags) { 1211 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1212 ASSERT_OWNED_RESOURCE(texture); 1213 1214 // TODO: code read pixels for textures that aren't also rendertargets 1215 GrRenderTarget* target = texture->asRenderTarget(); 1216 if (NULL != target) { 1217 return this->readRenderTargetPixels(target, 1218 left, top, width, height, 1219 config, buffer, rowBytes, 1220 flags); 1221 } else { 1222 return false; 1223 } 1224} 1225 1226#include "SkConfig8888.h" 1227 1228namespace { 1229/** 1230 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1231 * formats are representable as Config8888 and so the function returns false 1232 * if the GrPixelConfig has no equivalent Config8888. 1233 */ 1234bool grconfig_to_config8888(GrPixelConfig config, 1235 bool unpremul, 1236 SkCanvas::Config8888* config8888) { 1237 switch (config) { 1238 case kRGBA_8888_GrPixelConfig: 1239 if (unpremul) { 1240 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1241 } else { 1242 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1243 } 1244 return true; 1245 case kBGRA_8888_GrPixelConfig: 1246 if (unpremul) { 1247 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1248 } else { 1249 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1250 } 1251 return true; 1252 default: 1253 return false; 1254 } 1255} 1256 1257// It returns a configuration with where the byte position of the R & B components are swapped in 1258// relation to the input config. This should only be called with the result of 1259// grconfig_to_config8888 as it will fail for other configs. 1260SkCanvas::Config8888 swap_config8888_red_and_blue(SkCanvas::Config8888 config8888) { 1261 switch (config8888) { 1262 case SkCanvas::kBGRA_Premul_Config8888: 1263 return SkCanvas::kRGBA_Premul_Config8888; 1264 case SkCanvas::kBGRA_Unpremul_Config8888: 1265 return SkCanvas::kRGBA_Unpremul_Config8888; 1266 case SkCanvas::kRGBA_Premul_Config8888: 1267 return SkCanvas::kBGRA_Premul_Config8888; 1268 case SkCanvas::kRGBA_Unpremul_Config8888: 1269 return SkCanvas::kBGRA_Unpremul_Config8888; 1270 default: 1271 GrCrash("Unexpected input"); 1272 return SkCanvas::kBGRA_Unpremul_Config8888;; 1273 } 1274} 1275} 1276 1277bool GrContext::readRenderTargetPixels(GrRenderTarget* target, 1278 int left, int top, int width, int height, 1279 GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 1280 uint32_t flags) { 1281 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1282 ASSERT_OWNED_RESOURCE(target); 1283 1284 if (NULL == target) { 1285 target = fRenderTarget.get(); 1286 if (NULL == target) { 1287 return false; 1288 } 1289 } 1290 1291 if (!(kDontFlush_PixelOpsFlag & flags)) { 1292 this->flush(); 1293 } 1294 1295 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. 1296 1297 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll 1298 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. 1299 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, 1300 width, height, dstConfig, 1301 rowBytes); 1302 // We ignore the preferred config if it is different than our config unless it is an R/B swap. 1303 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped 1304 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during 1305 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from 1306 // dstConfig. 1307 GrPixelConfig readConfig = dstConfig; 1308 bool swapRAndB = false; 1309 if (GrPixelConfigSwapRAndB(dstConfig) == 1310 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { 1311 readConfig = GrPixelConfigSwapRAndB(readConfig); 1312 swapRAndB = true; 1313 } 1314 1315 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 1316 1317 if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 1318 // The unpremul flag is only allowed for these two configs. 1319 return false; 1320 } 1321 1322 // If the src is a texture and we would have to do conversions after read pixels, we instead 1323 // do the conversions by drawing the src to a scratch texture. If we handle any of the 1324 // conversions in the draw we set the corresponding bool to false so that we don't reapply it 1325 // on the read back pixels. 1326 GrTexture* src = target->asTexture(); 1327 GrAutoScratchTexture ast; 1328 if (NULL != src && (swapRAndB || unpremul || flipY)) { 1329 // Make the scratch a render target because we don't have a robust readTexturePixels as of 1330 // yet. It calls this function. 1331 GrTextureDesc desc; 1332 desc.fFlags = kRenderTarget_GrTextureFlagBit; 1333 desc.fWidth = width; 1334 desc.fHeight = height; 1335 desc.fConfig = readConfig; 1336 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 1337 1338 // When a full read back is faster than a partial we could always make the scratch exactly 1339 // match the passed rect. However, if we see many different size rectangles we will trash 1340 // our texture cache and pay the cost of creating and destroying many textures. So, we only 1341 // request an exact match when the caller is reading an entire RT. 1342 ScratchTexMatch match = kApprox_ScratchTexMatch; 1343 if (0 == left && 1344 0 == top && 1345 target->width() == width && 1346 target->height() == height && 1347 fGpu->fullReadPixelsIsFasterThanPartial()) { 1348 match = kExact_ScratchTexMatch; 1349 } 1350 ast.set(this, desc, match); 1351 GrTexture* texture = ast.texture(); 1352 if (texture) { 1353 // compute a matrix to perform the draw 1354 SkMatrix textureMatrix; 1355 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1356 textureMatrix.postIDiv(src->width(), src->height()); 1357 1358 SkAutoTUnref<const GrEffectRef> effect; 1359 if (unpremul) { 1360 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); 1361 if (NULL != effect) { 1362 unpremul = false; // we no longer need to do this on CPU after the read back. 1363 } 1364 } 1365 // If we failed to create a PM->UPM effect and have no other conversions to perform then 1366 // there is no longer any point to using the scratch. 1367 if (NULL != effect || flipY || swapRAndB) { 1368 if (!effect) { 1369 effect.reset(GrConfigConversionEffect::Create( 1370 src, 1371 swapRAndB, 1372 GrConfigConversionEffect::kNone_PMConversion, 1373 textureMatrix)); 1374 } 1375 swapRAndB = false; // we will handle the swap in the draw. 1376 1377 // We protect the existing geometry here since it may not be 1378 // clear to the caller that a draw operation (i.e., drawSimpleRect) 1379 // can be invoked in this method 1380 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit); 1381 GrDrawState* drawState = fGpu->drawState(); 1382 SkASSERT(effect); 1383 drawState->addColorEffect(effect); 1384 1385 drawState->setRenderTarget(texture->asRenderTarget()); 1386 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 1387 fGpu->drawSimpleRect(rect, NULL); 1388 // we want to read back from the scratch's origin 1389 left = 0; 1390 top = 0; 1391 target = texture->asRenderTarget(); 1392 } 1393 } 1394 } 1395 if (!fGpu->readPixels(target, 1396 left, top, width, height, 1397 readConfig, buffer, rowBytes)) { 1398 return false; 1399 } 1400 // Perform any conversions we weren't able to perform using a scratch texture. 1401 if (unpremul || swapRAndB) { 1402 // These are initialized to suppress a warning 1403 SkCanvas::Config8888 srcC8888 = SkCanvas::kNative_Premul_Config8888; 1404 SkCanvas::Config8888 dstC8888 = SkCanvas::kNative_Premul_Config8888; 1405 1406 SkDEBUGCODE(bool c8888IsValid =) grconfig_to_config8888(dstConfig, false, &srcC8888); 1407 grconfig_to_config8888(dstConfig, unpremul, &dstC8888); 1408 1409 if (swapRAndB) { 1410 SkASSERT(c8888IsValid); // we should only do r/b swap on 8888 configs 1411 srcC8888 = swap_config8888_red_and_blue(srcC8888); 1412 } 1413 SkASSERT(c8888IsValid); 1414 uint32_t* b32 = reinterpret_cast<uint32_t*>(buffer); 1415 SkConvertConfig8888Pixels(b32, rowBytes, dstC8888, 1416 b32, rowBytes, srcC8888, 1417 width, height); 1418 } 1419 return true; 1420} 1421 1422void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1423 SkASSERT(target); 1424 ASSERT_OWNED_RESOURCE(target); 1425 // In the future we may track whether there are any pending draws to this 1426 // target. We don't today so we always perform a flush. We don't promise 1427 // this to our clients, though. 1428 this->flush(); 1429 fGpu->resolveRenderTarget(target); 1430} 1431 1432void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) { 1433 if (NULL == src || NULL == dst) { 1434 return; 1435 } 1436 ASSERT_OWNED_RESOURCE(src); 1437 1438 // Writes pending to the source texture are not tracked, so a flush 1439 // is required to ensure that the copy captures the most recent contents 1440 // of the source texture. See similar behavior in 1441 // GrContext::resolveRenderTarget. 1442 this->flush(); 1443 1444 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1445 GrDrawState* drawState = fGpu->drawState(); 1446 drawState->setRenderTarget(dst); 1447 SkMatrix sampleM; 1448 sampleM.setIDiv(src->width(), src->height()); 1449 SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height()); 1450 if (NULL != topLeft) { 1451 srcRect.offset(*topLeft); 1452 } 1453 SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height()); 1454 if (!srcRect.intersect(srcBounds)) { 1455 return; 1456 } 1457 sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop)); 1458 drawState->addColorTextureEffect(src, sampleM); 1459 SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height())); 1460 fGpu->drawSimpleRect(dstR, NULL); 1461} 1462 1463bool GrContext::writeRenderTargetPixels(GrRenderTarget* target, 1464 int left, int top, int width, int height, 1465 GrPixelConfig srcConfig, 1466 const void* buffer, 1467 size_t rowBytes, 1468 uint32_t flags) { 1469 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1470 ASSERT_OWNED_RESOURCE(target); 1471 1472 if (NULL == target) { 1473 target = fRenderTarget.get(); 1474 if (NULL == target) { 1475 return false; 1476 } 1477 } 1478 1479 // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on 1480 // desktop GL). 1481 1482 // We will always call some form of writeTexturePixels and we will pass our flags on to it. 1483 // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't 1484 // set.) 1485 1486 // If the RT is also a texture and we don't have to premultiply then take the texture path. 1487 // We expect to be at least as fast or faster since it doesn't use an intermediate texture as 1488 // we do below. 1489 1490#if !GR_MAC_BUILD 1491 // At least some drivers on the Mac get confused when glTexImage2D is called on a texture 1492 // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or 1493 // HW is affected. 1494 if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) && 1495 fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) { 1496 return this->writeTexturePixels(target->asTexture(), 1497 left, top, width, height, 1498 srcConfig, buffer, rowBytes, flags); 1499 } 1500#endif 1501 1502 // We ignore the preferred config unless it is a R/B swap of the src config. In that case 1503 // we will upload the original src data to a scratch texture but we will spoof it as the swapped 1504 // config. This scratch will then have R and B swapped. We correct for this by swapping again 1505 // when drawing the scratch to the dst using a conversion effect. 1506 bool swapRAndB = false; 1507 GrPixelConfig writeConfig = srcConfig; 1508 if (GrPixelConfigSwapRAndB(srcConfig) == 1509 fGpu->preferredWritePixelsConfig(srcConfig, target->config())) { 1510 writeConfig = GrPixelConfigSwapRAndB(srcConfig); 1511 swapRAndB = true; 1512 } 1513 1514 GrTextureDesc desc; 1515 desc.fWidth = width; 1516 desc.fHeight = height; 1517 desc.fConfig = writeConfig; 1518 GrAutoScratchTexture ast(this, desc); 1519 GrTexture* texture = ast.texture(); 1520 if (NULL == texture) { 1521 return false; 1522 } 1523 1524 SkAutoTUnref<const GrEffectRef> effect; 1525 SkMatrix textureMatrix; 1526 textureMatrix.setIDiv(texture->width(), texture->height()); 1527 1528 // allocate a tmp buffer and sw convert the pixels to premul 1529 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 1530 1531 if (kUnpremul_PixelOpsFlag & flags) { 1532 if (!GrPixelConfigIs8888(srcConfig)) { 1533 return false; 1534 } 1535 effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); 1536 // handle the unpremul step on the CPU if we couldn't create an effect to do it. 1537 if (NULL == effect) { 1538 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1539 SkDEBUGCODE(bool success = ) 1540 grconfig_to_config8888(srcConfig, true, &srcConfig8888); 1541 SkASSERT(success); 1542 SkDEBUGCODE(success = ) 1543 grconfig_to_config8888(srcConfig, false, &dstConfig8888); 1544 SkASSERT(success); 1545 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1546 tmpPixels.reset(width * height); 1547 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1548 src, rowBytes, srcConfig8888, 1549 width, height); 1550 buffer = tmpPixels.get(); 1551 rowBytes = 4 * width; 1552 } 1553 } 1554 if (NULL == effect) { 1555 effect.reset(GrConfigConversionEffect::Create(texture, 1556 swapRAndB, 1557 GrConfigConversionEffect::kNone_PMConversion, 1558 textureMatrix)); 1559 } 1560 1561 if (!this->writeTexturePixels(texture, 1562 0, 0, width, height, 1563 writeConfig, buffer, rowBytes, 1564 flags & ~kUnpremul_PixelOpsFlag)) { 1565 return false; 1566 } 1567 1568 // writeRenderTargetPixels can be called in the midst of drawing another 1569 // object (e.g., when uploading a SW path rendering to the gpu while 1570 // drawing a rect) so preserve the current geometry. 1571 SkMatrix matrix; 1572 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 1573 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix); 1574 GrDrawState* drawState = fGpu->drawState(); 1575 SkASSERT(effect); 1576 drawState->addColorEffect(effect); 1577 1578 drawState->setRenderTarget(target); 1579 1580 fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL); 1581 return true; 1582} 1583//////////////////////////////////////////////////////////////////////////////// 1584 1585GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, 1586 BufferedDraw buffered, 1587 AutoRestoreEffects* are) { 1588 // All users of this draw state should be freeing up all effects when they're done. 1589 // Otherwise effects that own resources may keep those resources alive indefinitely. 1590 SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages()); 1591 1592 if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) { 1593 fDrawBuffer->flush(); 1594 fLastDrawWasBuffered = kNo_BufferedDraw; 1595 } 1596 ASSERT_OWNED_RESOURCE(fRenderTarget.get()); 1597 if (NULL != paint) { 1598 SkASSERT(NULL != are); 1599 are->set(fDrawState); 1600 fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get()); 1601#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1602 if ((paint->hasMask() || 0xff != paint->fCoverage) && 1603 !fGpu->canApplyCoverage()) { 1604 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1605 } 1606#endif 1607 } else { 1608 fDrawState->reset(fViewMatrix); 1609 fDrawState->setRenderTarget(fRenderTarget.get()); 1610 } 1611 GrDrawTarget* target; 1612 if (kYes_BufferedDraw == buffered) { 1613 fLastDrawWasBuffered = kYes_BufferedDraw; 1614 target = fDrawBuffer; 1615 } else { 1616 SkASSERT(kNo_BufferedDraw == buffered); 1617 fLastDrawWasBuffered = kNo_BufferedDraw; 1618 target = fGpu; 1619 } 1620 fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip && 1621 !fClip->fClipStack->isWideOpen()); 1622 target->setClip(fClip); 1623 SkASSERT(fDrawState == target->drawState()); 1624 return target; 1625} 1626 1627/* 1628 * This method finds a path renderer that can draw the specified path on 1629 * the provided target. 1630 * Due to its expense, the software path renderer has split out so it can 1631 * can be individually allowed/disallowed via the "allowSW" boolean. 1632 */ 1633GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1634 const SkStrokeRec& stroke, 1635 const GrDrawTarget* target, 1636 bool allowSW, 1637 GrPathRendererChain::DrawType drawType, 1638 GrPathRendererChain::StencilSupport* stencilSupport) { 1639 1640 if (NULL == fPathRendererChain) { 1641 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); 1642 } 1643 1644 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, 1645 stroke, 1646 target, 1647 drawType, 1648 stencilSupport); 1649 1650 if (NULL == pr && allowSW) { 1651 if (NULL == fSoftwarePathRenderer) { 1652 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1653 } 1654 pr = fSoftwarePathRenderer; 1655 } 1656 1657 return pr; 1658} 1659 1660//////////////////////////////////////////////////////////////////////////////// 1661 1662bool GrContext::isConfigRenderable(GrPixelConfig config) const { 1663 return fGpu->isConfigRenderable(config); 1664} 1665 1666static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 1667 intptr_t mask = 1 << shift; 1668 if (pred) { 1669 bits |= mask; 1670 } else { 1671 bits &= ~mask; 1672 } 1673 return bits; 1674} 1675 1676void GrContext::setupDrawBuffer() { 1677 SkASSERT(NULL == fDrawBuffer); 1678 SkASSERT(NULL == fDrawBufferVBAllocPool); 1679 SkASSERT(NULL == fDrawBufferIBAllocPool); 1680 1681 fDrawBufferVBAllocPool = 1682 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1683 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1684 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1685 fDrawBufferIBAllocPool = 1686 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1687 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1688 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1689 1690 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, 1691 fDrawBufferVBAllocPool, 1692 fDrawBufferIBAllocPool)); 1693 1694 fDrawBuffer->setDrawState(fDrawState); 1695} 1696 1697GrDrawTarget* GrContext::getTextTarget() { 1698 return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL); 1699} 1700 1701const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1702 return fGpu->getQuadIndexBuffer(); 1703} 1704 1705namespace { 1706void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 1707 GrConfigConversionEffect::PMConversion pmToUPM; 1708 GrConfigConversionEffect::PMConversion upmToPM; 1709 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 1710 *pmToUPMValue = pmToUPM; 1711 *upmToPMValue = upmToPM; 1712} 1713} 1714 1715const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture, 1716 bool swapRAndB, 1717 const SkMatrix& matrix) { 1718 if (!fDidTestPMConversions) { 1719 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1720 fDidTestPMConversions = true; 1721 } 1722 GrConfigConversionEffect::PMConversion pmToUPM = 1723 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 1724 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 1725 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); 1726 } else { 1727 return NULL; 1728 } 1729} 1730 1731const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture, 1732 bool swapRAndB, 1733 const SkMatrix& matrix) { 1734 if (!fDidTestPMConversions) { 1735 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1736 fDidTestPMConversions = true; 1737 } 1738 GrConfigConversionEffect::PMConversion upmToPM = 1739 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 1740 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 1741 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); 1742 } else { 1743 return NULL; 1744 } 1745} 1746 1747/////////////////////////////////////////////////////////////////////////////// 1748#if GR_CACHE_STATS 1749void GrContext::printCacheStats() const { 1750 fTextureCache->printStats(); 1751} 1752#endif 1753