GrContext.cpp revision 395ef0568dac793020f66cbec4b6ef86736d4db0
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9#include "GrContext.h" 10 11#include "effects/GrConfigConversionEffect.h" 12#include "effects/GrDashingEffect.h" 13#include "effects/GrSingleTextureEffect.h" 14 15#include "GrAARectRenderer.h" 16#include "GrBufferAllocPool.h" 17#include "GrGpu.h" 18#include "GrGpuResource.h" 19#include "GrGpuResourceCacheAccess.h" 20#include "GrDistanceFieldTextContext.h" 21#include "GrDrawTargetCaps.h" 22#include "GrIndexBuffer.h" 23#include "GrInOrderDrawBuffer.h" 24#include "GrLayerCache.h" 25#include "GrOvalRenderer.h" 26#include "GrPathRenderer.h" 27#include "GrPathUtils.h" 28#include "GrResourceCache.h" 29#include "GrResourceCache2.h" 30#include "GrSoftwarePathRenderer.h" 31#include "GrStencilBuffer.h" 32#include "GrStencilAndCoverTextContext.h" 33#include "GrStrokeInfo.h" 34#include "GrSurfacePriv.h" 35#include "GrTextStrike.h" 36#include "GrTexturePriv.h" 37#include "GrTraceMarker.h" 38#include "GrTracing.h" 39#include "SkDashPathPriv.h" 40#include "SkConfig8888.h" 41#include "SkGr.h" 42#include "SkRRect.h" 43#include "SkStrokeRec.h" 44#include "SkTLazy.h" 45#include "SkTLS.h" 46#include "SkTraceEvent.h" 47 48#ifdef SK_DEBUG 49 // change this to a 1 to see notifications when partial coverage fails 50 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 51#else 52 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0 53#endif 54 55static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT; 56static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024; 57 58static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 59static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 60 61static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; 62static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; 63 64#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 65 66// Glorified typedef to avoid including GrDrawState.h in GrContext.h 67class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {}; 68 69class GrContext::AutoCheckFlush { 70public: 71 AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); } 72 73 ~AutoCheckFlush() { 74 if (fContext->fFlushToReduceCacheSize) { 75 fContext->flush(); 76 } 77 } 78 79private: 80 GrContext* fContext; 81}; 82 83GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, 84 const Options* opts) { 85 GrContext* context; 86 if (NULL == opts) { 87 context = SkNEW_ARGS(GrContext, (Options())); 88 } else { 89 context = SkNEW_ARGS(GrContext, (*opts)); 90 } 91 92 if (context->init(backend, backendContext)) { 93 return context; 94 } else { 95 context->unref(); 96 return NULL; 97 } 98} 99 100GrContext::GrContext(const Options& opts) : fOptions(opts) { 101 fDrawState = NULL; 102 fGpu = NULL; 103 fClip = NULL; 104 fPathRendererChain = NULL; 105 fSoftwarePathRenderer = NULL; 106 fResourceCache = NULL; 107 fResourceCache2 = NULL; 108 fFontCache = NULL; 109 fDrawBuffer = NULL; 110 fDrawBufferVBAllocPool = NULL; 111 fDrawBufferIBAllocPool = NULL; 112 fFlushToReduceCacheSize = false; 113 fAARectRenderer = NULL; 114 fOvalRenderer = NULL; 115 fViewMatrix.reset(); 116 fMaxTextureSizeOverride = 1 << 20; 117} 118 119bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { 120 SkASSERT(NULL == fGpu); 121 122 fGpu = GrGpu::Create(backend, backendContext, this); 123 if (NULL == fGpu) { 124 return false; 125 } 126 this->initCommon(); 127 return true; 128} 129 130void GrContext::initCommon() { 131 fDrawState = SkNEW(GrDrawState); 132 133 fResourceCache = SkNEW_ARGS(GrResourceCache, (fGpu->caps(), 134 MAX_RESOURCE_CACHE_COUNT, 135 MAX_RESOURCE_CACHE_BYTES)); 136 fResourceCache->setOverbudgetCallback(OverbudgetCB, this); 137 fResourceCache2 = SkNEW(GrResourceCache2); 138 139 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); 140 141 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this))); 142 143 fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu)); 144 fOvalRenderer = SkNEW(GrOvalRenderer); 145 146 fDidTestPMConversions = false; 147 148 this->setupDrawBuffer(); 149} 150 151GrContext::~GrContext() { 152 if (NULL == fGpu) { 153 return; 154 } 155 156 this->flush(); 157 158 for (int i = 0; i < fCleanUpData.count(); ++i) { 159 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 160 } 161 162 SkDELETE(fResourceCache2); 163 fResourceCache2 = NULL; 164 SkDELETE(fResourceCache); 165 fResourceCache = NULL; 166 SkDELETE(fFontCache); 167 SkDELETE(fDrawBuffer); 168 SkDELETE(fDrawBufferVBAllocPool); 169 SkDELETE(fDrawBufferIBAllocPool); 170 171 fAARectRenderer->unref(); 172 fOvalRenderer->unref(); 173 174 fGpu->unref(); 175 SkSafeUnref(fPathRendererChain); 176 SkSafeUnref(fSoftwarePathRenderer); 177 fDrawState->unref(); 178} 179 180void GrContext::abandonContext() { 181 // abandon first to so destructors 182 // don't try to free the resources in the API. 183 fResourceCache2->abandonAll(); 184 185 fGpu->contextAbandoned(); 186 187 // a path renderer may be holding onto resources that 188 // are now unusable 189 SkSafeSetNull(fPathRendererChain); 190 SkSafeSetNull(fSoftwarePathRenderer); 191 192 delete fDrawBuffer; 193 fDrawBuffer = NULL; 194 195 delete fDrawBufferVBAllocPool; 196 fDrawBufferVBAllocPool = NULL; 197 198 delete fDrawBufferIBAllocPool; 199 fDrawBufferIBAllocPool = NULL; 200 201 fAARectRenderer->reset(); 202 fOvalRenderer->reset(); 203 204 fResourceCache->purgeAllUnlocked(); 205 206 fFontCache->freeAll(); 207 fLayerCache->freeAll(); 208} 209 210void GrContext::resetContext(uint32_t state) { 211 fGpu->markContextDirty(state); 212} 213 214void GrContext::freeGpuResources() { 215 this->flush(); 216 217 if (fDrawBuffer) { 218 fDrawBuffer->purgeResources(); 219 } 220 221 fAARectRenderer->reset(); 222 fOvalRenderer->reset(); 223 224 fResourceCache->purgeAllUnlocked(); 225 fFontCache->freeAll(); 226 fLayerCache->freeAll(); 227 // a path renderer may be holding onto resources 228 SkSafeSetNull(fPathRendererChain); 229 SkSafeSetNull(fSoftwarePathRenderer); 230} 231 232void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const { 233 if (resourceCount) { 234 *resourceCount = fResourceCache->getCachedResourceCount(); 235 } 236 if (resourceBytes) { 237 *resourceBytes = fResourceCache->getCachedResourceBytes(); 238 } 239} 240 241GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget, 242 const SkDeviceProperties& 243 leakyProperties, 244 bool enableDistanceFieldFonts) { 245 if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() && 246 renderTarget->isMultisampled()) { 247 return GrStencilAndCoverTextContext::Create(this, leakyProperties); 248 } 249 250 return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDistanceFieldFonts); 251} 252 253//////////////////////////////////////////////////////////////////////////////// 254 255GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc, 256 const GrCacheID& cacheID, 257 const GrTextureParams* params) { 258 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID); 259 260 GrGpuResource* resource = this->findAndRefCachedResource(resourceKey); 261 if (resource) { 262 SkASSERT(static_cast<GrSurface*>(resource)->asTexture()); 263 return static_cast<GrSurface*>(resource)->asTexture(); 264 } 265 return NULL; 266} 267 268bool GrContext::isTextureInCache(const GrSurfaceDesc& desc, 269 const GrCacheID& cacheID, 270 const GrTextureParams* params) const { 271 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID); 272 return fResourceCache2->hasContentKey(resourceKey); 273} 274 275void GrContext::addStencilBuffer(GrStencilBuffer* sb) { 276 ASSERT_OWNED_RESOURCE(sb); 277 278 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(), 279 sb->height(), 280 sb->numSamples()); 281 fResourceCache->addResource(resourceKey, sb); 282} 283 284GrStencilBuffer* GrContext::findAndRefStencilBuffer(int width, int height, int sampleCnt) { 285 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width, height, sampleCnt); 286 GrGpuResource* resource = this->findAndRefCachedResource(resourceKey); 287 return static_cast<GrStencilBuffer*>(resource); 288} 289 290static void stretch_image(void* dst, 291 int dstW, 292 int dstH, 293 const void* src, 294 int srcW, 295 int srcH, 296 size_t bpp) { 297 SkFixed dx = (srcW << 16) / dstW; 298 SkFixed dy = (srcH << 16) / dstH; 299 300 SkFixed y = dy >> 1; 301 302 size_t dstXLimit = dstW*bpp; 303 for (int j = 0; j < dstH; ++j) { 304 SkFixed x = dx >> 1; 305 const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp; 306 uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp; 307 for (size_t i = 0; i < dstXLimit; i += bpp) { 308 memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp); 309 x += dx; 310 } 311 y += dy; 312 } 313} 314 315namespace { 316 317// position + local coordinate 318extern const GrVertexAttrib gVertexAttribs[] = { 319 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 320 {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding} 321}; 322 323}; 324 325// The desired texture is NPOT and tiled but that isn't supported by 326// the current hardware. Resize the texture to be a POT 327GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc, 328 const GrCacheID& cacheID, 329 const void* srcData, 330 size_t rowBytes, 331 bool filter) { 332 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL)); 333 if (NULL == clampedTexture) { 334 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes)); 335 336 if (NULL == clampedTexture) { 337 return NULL; 338 } 339 } 340 341 GrSurfaceDesc rtDesc = desc; 342 rtDesc.fFlags = rtDesc.fFlags | 343 kRenderTarget_GrSurfaceFlag | 344 kNoStencil_GrSurfaceFlag; 345 rtDesc.fWidth = GrNextPow2(desc.fWidth); 346 rtDesc.fHeight = GrNextPow2(desc.fHeight); 347 348 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 349 350 if (texture) { 351 GrDrawTarget::AutoStateRestore asr(fDrawBuffer, GrDrawTarget::kReset_ASRInit); 352 GrDrawState* drawState = fDrawBuffer->drawState(); 353 drawState->setRenderTarget(texture->asRenderTarget()); 354 355 // if filtering is not desired then we want to ensure all 356 // texels in the resampled image are copies of texels from 357 // the original. 358 GrTextureParams params(SkShader::kClamp_TileMode, 359 filter ? GrTextureParams::kBilerp_FilterMode : 360 GrTextureParams::kNone_FilterMode); 361 drawState->addColorTextureProcessor(clampedTexture, SkMatrix::I(), params); 362 363 drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs), 364 2 * sizeof(SkPoint)); 365 366 GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, 0); 367 368 if (arg.succeeded()) { 369 SkPoint* verts = (SkPoint*) arg.vertices(); 370 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint)); 371 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint)); 372 fDrawBuffer->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4); 373 } 374 } else { 375 // TODO: Our CPU stretch doesn't filter. But we create separate 376 // stretched textures when the texture params is either filtered or 377 // not. Either implement filtered stretch blit on CPU or just create 378 // one when FBO case fails. 379 380 rtDesc.fFlags = kNone_GrSurfaceFlags; 381 // no longer need to clamp at min RT size. 382 rtDesc.fWidth = GrNextPow2(desc.fWidth); 383 rtDesc.fHeight = GrNextPow2(desc.fHeight); 384 385 // We shouldn't be resizing a compressed texture. 386 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig)); 387 388 size_t bpp = GrBytesPerPixel(desc.fConfig); 389 GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight); 390 stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 391 srcData, desc.fWidth, desc.fHeight, bpp); 392 393 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 394 395 texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes); 396 SkASSERT(texture); 397 } 398 399 return texture; 400} 401 402GrTexture* GrContext::createTexture(const GrTextureParams* params, 403 const GrSurfaceDesc& desc, 404 const GrCacheID& cacheID, 405 const void* srcData, 406 size_t rowBytes, 407 GrResourceKey* cacheKey) { 408 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID); 409 410 GrTexture* texture; 411 if (GrTexturePriv::NeedsResizing(resourceKey)) { 412 // We do not know how to resize compressed textures. 413 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig)); 414 415 texture = this->createResizedTexture(desc, cacheID, 416 srcData, rowBytes, 417 GrTexturePriv::NeedsBilerp(resourceKey)); 418 } else { 419 texture = fGpu->createTexture(desc, srcData, rowBytes); 420 } 421 422 if (texture) { 423 fResourceCache->addResource(resourceKey, texture); 424 425 if (cacheKey) { 426 *cacheKey = resourceKey; 427 } 428 } 429 430 return texture; 431} 432 433GrTexture* GrContext::createNewScratchTexture(const GrSurfaceDesc& desc) { 434 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 435 if (!texture) { 436 return NULL; 437 } 438 fResourceCache->addResource(texture->cacheAccess().getScratchKey(), texture); 439 return texture; 440} 441 442GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match, 443 bool calledDuringFlush) { 444 // kNoStencil has no meaning if kRT isn't set. 445 SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || 446 !(inDesc.fFlags & kNoStencil_GrSurfaceFlag)); 447 448 // Make sure caller has checked for renderability if kRT is set. 449 SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) || 450 this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0)); 451 452 SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc); 453 454 if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) { 455 GrSurfaceFlags origFlags = desc->fFlags; 456 if (kApprox_ScratchTexMatch == match) { 457 // bin by pow2 with a reasonable min 458 static const int MIN_SIZE = 16; 459 GrSurfaceDesc* wdesc = desc.writable(); 460 wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth)); 461 wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight)); 462 } 463 464 do { 465 GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc); 466 uint32_t scratchFlags = 0; 467 if (calledDuringFlush) { 468 scratchFlags = GrResourceCache2::kRequireNoPendingIO_ScratchFlag; 469 } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) { 470 // If it is not a render target then it will most likely be populated by 471 // writePixels() which will trigger a flush if the texture has pending IO. 472 scratchFlags = GrResourceCache2::kPreferNoPendingIO_ScratchFlag; 473 } 474 GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key, scratchFlags); 475 if (resource) { 476 fResourceCache->makeResourceMRU(resource); 477 return static_cast<GrSurface*>(resource)->asTexture(); 478 } 479 480 if (kExact_ScratchTexMatch == match) { 481 break; 482 } 483 // We had a cache miss and we are in approx mode, relax the fit of the flags. 484 485 // We no longer try to reuse textures that were previously used as render targets in 486 // situations where no RT is needed; doing otherwise can confuse the video driver and 487 // cause significant performance problems in some cases. 488 if (desc->fFlags & kNoStencil_GrSurfaceFlag) { 489 desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrSurfaceFlag; 490 } else { 491 break; 492 } 493 494 } while (true); 495 496 desc.writable()->fFlags = origFlags; 497 } 498 499 GrTexture* texture = this->createNewScratchTexture(*desc); 500 SkASSERT(NULL == texture || 501 texture->cacheAccess().getScratchKey() == GrTexturePriv::ComputeScratchKey(*desc)); 502 return texture; 503} 504 505bool GrContext::OverbudgetCB(void* data) { 506 SkASSERT(data); 507 508 GrContext* context = reinterpret_cast<GrContext*>(data); 509 510 // Flush the InOrderDrawBuffer to possibly free up some textures 511 context->fFlushToReduceCacheSize = true; 512 513 return true; 514} 515 516 517GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& descIn, 518 void* srcData, 519 size_t rowBytes) { 520 GrSurfaceDesc descCopy = descIn; 521 return fGpu->createTexture(descCopy, srcData, rowBytes); 522} 523 524void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const { 525 fResourceCache->getLimits(maxTextures, maxTextureBytes); 526} 527 528void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) { 529 fResourceCache->setLimits(maxTextures, maxTextureBytes); 530} 531 532int GrContext::getMaxTextureSize() const { 533 return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride); 534} 535 536int GrContext::getMaxRenderTargetSize() const { 537 return fGpu->caps()->maxRenderTargetSize(); 538} 539 540int GrContext::getMaxSampleCount() const { 541 return fGpu->caps()->maxSampleCount(); 542} 543 544/////////////////////////////////////////////////////////////////////////////// 545 546GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) { 547 return fGpu->wrapBackendTexture(desc); 548} 549 550GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 551 return fGpu->wrapBackendRenderTarget(desc); 552} 553 554/////////////////////////////////////////////////////////////////////////////// 555 556bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params, 557 int width, int height) const { 558 const GrDrawTargetCaps* caps = fGpu->caps(); 559 if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) { 560 return false; 561 } 562 563 bool isPow2 = SkIsPow2(width) && SkIsPow2(height); 564 565 if (!isPow2) { 566 bool tiled = params && params->isTiled(); 567 if (tiled && !caps->npotTextureTileSupport()) { 568 return false; 569 } 570 } 571 return true; 572} 573 574 575//////////////////////////////////////////////////////////////////////////////// 576 577void GrContext::clear(const SkIRect* rect, 578 const GrColor color, 579 bool canIgnoreRect, 580 GrRenderTarget* renderTarget) { 581 ASSERT_OWNED_RESOURCE(renderTarget); 582 SkASSERT(renderTarget); 583 584 AutoRestoreEffects are; 585 AutoCheckFlush acf(this); 586 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this); 587 GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf); 588 if (NULL == target) { 589 return; 590 } 591 target->clear(rect, color, canIgnoreRect, renderTarget); 592} 593 594void GrContext::drawPaint(const GrPaint& origPaint) { 595 // set rect to be big enough to fill the space, but not super-huge, so we 596 // don't overflow fixed-point implementations 597 SkRect r; 598 r.setLTRB(0, 0, 599 SkIntToScalar(getRenderTarget()->width()), 600 SkIntToScalar(getRenderTarget()->height())); 601 SkMatrix inverse; 602 SkTCopyOnFirstWrite<GrPaint> paint(origPaint); 603 AutoMatrix am; 604 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this); 605 606 // We attempt to map r by the inverse matrix and draw that. mapRect will 607 // map the four corners and bound them with a new rect. This will not 608 // produce a correct result for some perspective matrices. 609 if (!this->getMatrix().hasPerspective()) { 610 if (!fViewMatrix.invert(&inverse)) { 611 SkDebugf("Could not invert matrix\n"); 612 return; 613 } 614 inverse.mapRect(&r); 615 } else { 616 if (!am.setIdentity(this, paint.writable())) { 617 SkDebugf("Could not invert matrix\n"); 618 return; 619 } 620 } 621 // by definition this fills the entire clip, no need for AA 622 if (paint->isAntiAlias()) { 623 paint.writable()->setAntiAlias(false); 624 } 625 this->drawRect(*paint, r); 626} 627 628#ifdef SK_DEVELOPER 629void GrContext::dumpFontCache() const { 630 fFontCache->dump(); 631} 632#endif 633 634//////////////////////////////////////////////////////////////////////////////// 635 636/* create a triangle strip that strokes the specified triangle. There are 8 637 unique vertices, but we repreat the last 2 to close up. Alternatively we 638 could use an indices array, and then only send 8 verts, but not sure that 639 would be faster. 640 */ 641static void setStrokeRectStrip(SkPoint verts[10], SkRect rect, 642 SkScalar width) { 643 const SkScalar rad = SkScalarHalf(width); 644 rect.sort(); 645 646 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 647 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 648 verts[2].set(rect.fRight - rad, rect.fTop + rad); 649 verts[3].set(rect.fRight + rad, rect.fTop - rad); 650 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 651 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 652 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 653 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 654 verts[8] = verts[0]; 655 verts[9] = verts[1]; 656} 657 658static inline bool is_irect(const SkRect& r) { 659 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) && 660 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom); 661} 662 663static bool apply_aa_to_rect(GrDrawTarget* target, 664 const SkRect& rect, 665 SkScalar strokeWidth, 666 const SkMatrix& combinedMatrix, 667 SkRect* devBoundRect) { 668 if (!target->getDrawState().canTweakAlphaForCoverage() && 669 target->shouldDisableCoverageAAForBlend()) { 670#ifdef SK_DEBUG 671 //SkDebugf("Turning off AA to correctly apply blend.\n"); 672#endif 673 return false; 674 } 675 const GrDrawState& drawState = target->getDrawState(); 676 if (drawState.getRenderTarget()->isMultisampled()) { 677 return false; 678 } 679 680#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 681 if (strokeWidth >= 0) { 682#endif 683 if (!combinedMatrix.preservesAxisAlignment()) { 684 return false; 685 } 686 687#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 688 } else { 689 if (!combinedMatrix.preservesRightAngles()) { 690 return false; 691 } 692 } 693#endif 694 695 combinedMatrix.mapRect(devBoundRect, rect); 696 if (strokeWidth < 0) { 697 return !is_irect(*devBoundRect); 698 } 699 700 return true; 701} 702 703static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) { 704 return point.fX >= rect.fLeft && point.fX <= rect.fRight && 705 point.fY >= rect.fTop && point.fY <= rect.fBottom; 706} 707 708void GrContext::drawRect(const GrPaint& paint, 709 const SkRect& rect, 710 const GrStrokeInfo* strokeInfo) { 711 if (strokeInfo && strokeInfo->isDashed()) { 712 SkPath path; 713 path.addRect(rect); 714 this->drawPath(paint, path, *strokeInfo); 715 return; 716 } 717 718 AutoRestoreEffects are; 719 AutoCheckFlush acf(this); 720 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 721 if (NULL == target) { 722 return; 723 } 724 725 GR_CREATE_TRACE_MARKER("GrContext::drawRect", target); 726 SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth(); 727 SkMatrix matrix = target->drawState()->getViewMatrix(); 728 729 // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking 730 // cases where the RT is fully inside a stroke. 731 if (width < 0) { 732 SkRect rtRect; 733 target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect); 734 SkRect clipSpaceRTRect = rtRect; 735 bool checkClip = false; 736 if (this->getClip()) { 737 checkClip = true; 738 clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX), 739 SkIntToScalar(this->getClip()->fOrigin.fY)); 740 } 741 // Does the clip contain the entire RT? 742 if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) { 743 SkMatrix invM; 744 if (!matrix.invert(&invM)) { 745 return; 746 } 747 // Does the rect bound the RT? 748 SkPoint srcSpaceRTQuad[4]; 749 invM.mapRectToQuad(srcSpaceRTQuad, rtRect); 750 if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) && 751 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) && 752 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) && 753 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) { 754 // Will it blend? 755 GrColor clearColor; 756 if (paint.isOpaqueAndConstantColor(&clearColor)) { 757 target->clear(NULL, clearColor, true, fRenderTarget); 758 return; 759 } 760 } 761 } 762 } 763 764 SkRect devBoundRect; 765 bool needAA = paint.isAntiAlias() && 766 !target->getDrawState().getRenderTarget()->isMultisampled(); 767 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect); 768 769 if (doAA) { 770 GrDrawState::AutoViewMatrixRestore avmr; 771 if (!avmr.setIdentity(target->drawState())) { 772 return; 773 } 774 if (width >= 0) { 775 const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec(); 776 fAARectRenderer->strokeAARect(target, rect, matrix, devBoundRect, strokeRec); 777 } else { 778 // filled AA rect 779 fAARectRenderer->fillAARect(target, 780 rect, matrix, devBoundRect); 781 } 782 return; 783 } 784 785 if (width >= 0) { 786 // TODO: consider making static vertex buffers for these cases. 787 // Hairline could be done by just adding closing vertex to 788 // unitSquareVertexBuffer() 789 790 static const int worstCaseVertCount = 10; 791 target->drawState()->setDefaultVertexAttribs(); 792 GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0); 793 794 if (!geo.succeeded()) { 795 SkDebugf("Failed to get space for vertices!\n"); 796 return; 797 } 798 799 GrPrimitiveType primType; 800 int vertCount; 801 SkPoint* vertex = geo.positions(); 802 803 if (width > 0) { 804 vertCount = 10; 805 primType = kTriangleStrip_GrPrimitiveType; 806 setStrokeRectStrip(vertex, rect, width); 807 } else { 808 // hairline 809 vertCount = 5; 810 primType = kLineStrip_GrPrimitiveType; 811 vertex[0].set(rect.fLeft, rect.fTop); 812 vertex[1].set(rect.fRight, rect.fTop); 813 vertex[2].set(rect.fRight, rect.fBottom); 814 vertex[3].set(rect.fLeft, rect.fBottom); 815 vertex[4].set(rect.fLeft, rect.fTop); 816 } 817 818 target->drawNonIndexed(primType, 0, vertCount); 819 } else { 820 // filled BW rect 821 target->drawSimpleRect(rect); 822 } 823} 824 825void GrContext::drawRectToRect(const GrPaint& paint, 826 const SkRect& dstRect, 827 const SkRect& localRect, 828 const SkMatrix* localMatrix) { 829 AutoRestoreEffects are; 830 AutoCheckFlush acf(this); 831 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 832 if (NULL == target) { 833 return; 834 } 835 836 GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target); 837 838 target->drawRect(dstRect, &localRect, localMatrix); 839} 840 841namespace { 842 843extern const GrVertexAttrib gPosUVColorAttribs[] = { 844 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding }, 845 {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding }, 846 {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding} 847}; 848 849static const size_t kPosUVAttribsSize = 2 * sizeof(SkPoint); 850static const size_t kPosUVColorAttribsSize = 2 * sizeof(SkPoint) + sizeof(GrColor); 851 852extern const GrVertexAttrib gPosColorAttribs[] = { 853 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding}, 854 {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding}, 855}; 856 857static const size_t kPosAttribsSize = sizeof(SkPoint); 858static const size_t kPosColorAttribsSize = sizeof(SkPoint) + sizeof(GrColor); 859 860static void set_vertex_attributes(GrDrawState* drawState, 861 const SkPoint* texCoords, 862 const GrColor* colors, 863 int* colorOffset, 864 int* texOffset) { 865 *texOffset = -1; 866 *colorOffset = -1; 867 868 if (texCoords && colors) { 869 *texOffset = sizeof(SkPoint); 870 *colorOffset = 2*sizeof(SkPoint); 871 drawState->setVertexAttribs<gPosUVColorAttribs>(3, kPosUVColorAttribsSize); 872 } else if (texCoords) { 873 *texOffset = sizeof(SkPoint); 874 drawState->setVertexAttribs<gPosUVColorAttribs>(2, kPosUVAttribsSize); 875 } else if (colors) { 876 *colorOffset = sizeof(SkPoint); 877 drawState->setVertexAttribs<gPosColorAttribs>(2, kPosColorAttribsSize); 878 } else { 879 drawState->setVertexAttribs<gPosColorAttribs>(1, kPosAttribsSize); 880 } 881} 882 883}; 884 885void GrContext::drawVertices(const GrPaint& paint, 886 GrPrimitiveType primitiveType, 887 int vertexCount, 888 const SkPoint positions[], 889 const SkPoint texCoords[], 890 const GrColor colors[], 891 const uint16_t indices[], 892 int indexCount) { 893 AutoRestoreEffects are; 894 AutoCheckFlush acf(this); 895 GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope 896 897 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 898 if (NULL == target) { 899 return; 900 } 901 GrDrawState* drawState = target->drawState(); 902 903 GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target); 904 905 int colorOffset = -1, texOffset = -1; 906 set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset); 907 908 size_t VertexStride = drawState->getVertexStride(); 909 if (!geo.set(target, vertexCount, indexCount)) { 910 SkDebugf("Failed to get space for vertices!\n"); 911 return; 912 } 913 void* curVertex = geo.vertices(); 914 915 for (int i = 0; i < vertexCount; ++i) { 916 *((SkPoint*)curVertex) = positions[i]; 917 918 if (texOffset >= 0) { 919 *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i]; 920 } 921 if (colorOffset >= 0) { 922 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 923 } 924 curVertex = (void*)((intptr_t)curVertex + VertexStride); 925 } 926 927 // we don't currently apply offscreen AA to this path. Need improved 928 // management of GrDrawTarget's geometry to avoid copying points per-tile. 929 if (indices) { 930 uint16_t* curIndex = (uint16_t*)geo.indices(); 931 for (int i = 0; i < indexCount; ++i) { 932 curIndex[i] = indices[i]; 933 } 934 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 935 } else { 936 target->drawNonIndexed(primitiveType, 0, vertexCount); 937 } 938} 939 940/////////////////////////////////////////////////////////////////////////////// 941 942void GrContext::drawRRect(const GrPaint& paint, 943 const SkRRect& rrect, 944 const GrStrokeInfo& strokeInfo) { 945 if (rrect.isEmpty()) { 946 return; 947 } 948 949 if (strokeInfo.isDashed()) { 950 SkPath path; 951 path.addRRect(rrect); 952 this->drawPath(paint, path, strokeInfo); 953 return; 954 } 955 956 AutoRestoreEffects are; 957 AutoCheckFlush acf(this); 958 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 959 if (NULL == target) { 960 return; 961 } 962 963 GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target); 964 965 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 966 967 if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) { 968 SkPath path; 969 path.addRRect(rrect); 970 this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo); 971 } 972} 973 974/////////////////////////////////////////////////////////////////////////////// 975 976void GrContext::drawDRRect(const GrPaint& paint, 977 const SkRRect& outer, 978 const SkRRect& inner) { 979 if (outer.isEmpty()) { 980 return; 981 } 982 983 AutoRestoreEffects are; 984 AutoCheckFlush acf(this); 985 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 986 987 GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target); 988 989 if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) { 990 SkPath path; 991 path.addRRect(inner); 992 path.addRRect(outer); 993 path.setFillType(SkPath::kEvenOdd_FillType); 994 995 GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle); 996 this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec); 997 } 998} 999 1000/////////////////////////////////////////////////////////////////////////////// 1001 1002void GrContext::drawOval(const GrPaint& paint, 1003 const SkRect& oval, 1004 const GrStrokeInfo& strokeInfo) { 1005 if (oval.isEmpty()) { 1006 return; 1007 } 1008 1009 if (strokeInfo.isDashed()) { 1010 SkPath path; 1011 path.addOval(oval); 1012 this->drawPath(paint, path, strokeInfo); 1013 return; 1014 } 1015 1016 AutoRestoreEffects are; 1017 AutoCheckFlush acf(this); 1018 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 1019 if (NULL == target) { 1020 return; 1021 } 1022 1023 GR_CREATE_TRACE_MARKER("GrContext::drawOval", target); 1024 1025 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1026 1027 1028 if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) { 1029 SkPath path; 1030 path.addOval(oval); 1031 this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo); 1032 } 1033} 1034 1035// Can 'path' be drawn as a pair of filled nested rectangles? 1036static bool is_nested_rects(GrDrawTarget* target, 1037 const SkPath& path, 1038 const SkStrokeRec& stroke, 1039 SkRect rects[2]) { 1040 SkASSERT(stroke.isFillStyle()); 1041 1042 if (path.isInverseFillType()) { 1043 return false; 1044 } 1045 1046 const GrDrawState& drawState = target->getDrawState(); 1047 1048 // TODO: this restriction could be lifted if we were willing to apply 1049 // the matrix to all the points individually rather than just to the rect 1050 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1051 return false; 1052 } 1053 1054 if (!target->getDrawState().canTweakAlphaForCoverage() && 1055 target->shouldDisableCoverageAAForBlend()) { 1056 return false; 1057 } 1058 1059 SkPath::Direction dirs[2]; 1060 if (!path.isNestedRects(rects, dirs)) { 1061 return false; 1062 } 1063 1064 if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) { 1065 // The two rects need to be wound opposite to each other 1066 return false; 1067 } 1068 1069 // Right now, nested rects where the margin is not the same width 1070 // all around do not render correctly 1071 const SkScalar* outer = rects[0].asScalars(); 1072 const SkScalar* inner = rects[1].asScalars(); 1073 1074 bool allEq = true; 1075 1076 SkScalar margin = SkScalarAbs(outer[0] - inner[0]); 1077 bool allGoE1 = margin >= SK_Scalar1; 1078 1079 for (int i = 1; i < 4; ++i) { 1080 SkScalar temp = SkScalarAbs(outer[i] - inner[i]); 1081 if (temp < SK_Scalar1) { 1082 allGoE1 = false; 1083 } 1084 if (!SkScalarNearlyEqual(margin, temp)) { 1085 allEq = false; 1086 } 1087 } 1088 1089 return allEq || allGoE1; 1090} 1091 1092void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) { 1093 1094 if (path.isEmpty()) { 1095 if (path.isInverseFillType()) { 1096 this->drawPaint(paint); 1097 } 1098 return; 1099 } 1100 1101 if (strokeInfo.isDashed()) { 1102 SkPoint pts[2]; 1103 if (path.isLine(pts)) { 1104 AutoRestoreEffects are; 1105 AutoCheckFlush acf(this); 1106 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 1107 if (NULL == target) { 1108 return; 1109 } 1110 GrDrawState* drawState = target->drawState(); 1111 1112 SkMatrix origViewMatrix = drawState->getViewMatrix(); 1113 GrDrawState::AutoViewMatrixRestore avmr; 1114 if (avmr.setIdentity(target->drawState())) { 1115 if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target, 1116 origViewMatrix)) { 1117 return; 1118 } 1119 } 1120 } 1121 1122 // Filter dashed path into new path with the dashing applied 1123 const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo(); 1124 SkTLazy<SkPath> effectPath; 1125 GrStrokeInfo newStrokeInfo(strokeInfo, false); 1126 SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr(); 1127 if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) { 1128 this->drawPath(paint, *effectPath.get(), newStrokeInfo); 1129 return; 1130 } 1131 1132 this->drawPath(paint, path, newStrokeInfo); 1133 return; 1134 } 1135 1136 // Note that internalDrawPath may sw-rasterize the path into a scratch texture. 1137 // Scratch textures can be recycled after they are returned to the texture 1138 // cache. This presents a potential hazard for buffered drawing. However, 1139 // the writePixels that uploads to the scratch will perform a flush so we're 1140 // OK. 1141 AutoRestoreEffects are; 1142 AutoCheckFlush acf(this); 1143 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf); 1144 if (NULL == target) { 1145 return; 1146 } 1147 GrDrawState* drawState = target->drawState(); 1148 1149 GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex()); 1150 1151 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1152 1153 bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled(); 1154 1155 if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) { 1156 // Concave AA paths are expensive - try to avoid them for special cases 1157 SkRect rects[2]; 1158 1159 if (is_nested_rects(target, path, strokeRec, rects)) { 1160 SkMatrix origViewMatrix = drawState->getViewMatrix(); 1161 GrDrawState::AutoViewMatrixRestore avmr; 1162 if (!avmr.setIdentity(target->drawState())) { 1163 return; 1164 } 1165 1166 fAARectRenderer->fillAANestedRects(target, rects, origViewMatrix); 1167 return; 1168 } 1169 } 1170 1171 SkRect ovalRect; 1172 bool isOval = path.isOval(&ovalRect); 1173 1174 if (!isOval || path.isInverseFillType() 1175 || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) { 1176 this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo); 1177 } 1178} 1179 1180void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path, 1181 const GrStrokeInfo& strokeInfo) { 1182 SkASSERT(!path.isEmpty()); 1183 1184 GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target); 1185 1186 1187 // An Assumption here is that path renderer would use some form of tweaking 1188 // the src color (either the input alpha or in the frag shader) to implement 1189 // aa. If we have some future driver-mojo path AA that can do the right 1190 // thing WRT to the blend then we'll need some query on the PR. 1191 bool useCoverageAA = useAA && 1192 !target->getDrawState().getRenderTarget()->isMultisampled() && 1193 !target->shouldDisableCoverageAAForBlend(); 1194 1195 1196 GrPathRendererChain::DrawType type = 1197 useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType : 1198 GrPathRendererChain::kColor_DrawType; 1199 1200 const SkPath* pathPtr = &path; 1201 SkTLazy<SkPath> tmpPath; 1202 SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec()); 1203 1204 // Try a 1st time without stroking the path and without allowing the SW renderer 1205 GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type); 1206 1207 if (NULL == pr) { 1208 if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) { 1209 // It didn't work the 1st time, so try again with the stroked path 1210 if (stroke->applyToPath(tmpPath.init(), *pathPtr)) { 1211 pathPtr = tmpPath.get(); 1212 stroke.writable()->setFillStyle(); 1213 if (pathPtr->isEmpty()) { 1214 return; 1215 } 1216 } 1217 } 1218 1219 // This time, allow SW renderer 1220 pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type); 1221 } 1222 1223 if (NULL == pr) { 1224#ifdef SK_DEBUG 1225 SkDebugf("Unable to find path renderer compatible with path.\n"); 1226#endif 1227 return; 1228 } 1229 1230 pr->drawPath(*pathPtr, *stroke, target, useCoverageAA); 1231} 1232 1233//////////////////////////////////////////////////////////////////////////////// 1234 1235void GrContext::flush(int flagsBitfield) { 1236 if (NULL == fDrawBuffer) { 1237 return; 1238 } 1239 1240 if (kDiscard_FlushBit & flagsBitfield) { 1241 fDrawBuffer->reset(); 1242 } else { 1243 fDrawBuffer->flush(); 1244 } 1245 fResourceCache->purgeAsNeeded(); 1246 fFlushToReduceCacheSize = false; 1247} 1248 1249bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes, 1250 const void* inPixels, size_t outRowBytes, void* outPixels) { 1251 SkSrcPixelInfo srcPI; 1252 if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) { 1253 return false; 1254 } 1255 srcPI.fAlphaType = kUnpremul_SkAlphaType; 1256 srcPI.fPixels = inPixels; 1257 srcPI.fRowBytes = inRowBytes; 1258 1259 SkDstPixelInfo dstPI; 1260 dstPI.fColorType = srcPI.fColorType; 1261 dstPI.fAlphaType = kPremul_SkAlphaType; 1262 dstPI.fPixels = outPixels; 1263 dstPI.fRowBytes = outRowBytes; 1264 1265 return srcPI.convertPixelsTo(&dstPI, width, height); 1266} 1267 1268bool GrContext::writeSurfacePixels(GrSurface* surface, 1269 int left, int top, int width, int height, 1270 GrPixelConfig srcConfig, const void* buffer, size_t rowBytes, 1271 uint32_t pixelOpsFlags) { 1272 1273 { 1274 GrTexture* texture = NULL; 1275 if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) && 1276 fGpu->canWriteTexturePixels(texture, srcConfig)) { 1277 1278 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && 1279 surface->surfacePriv().hasPendingIO()) { 1280 this->flush(); 1281 } 1282 return fGpu->writeTexturePixels(texture, left, top, width, height, 1283 srcConfig, buffer, rowBytes); 1284 // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the 1285 // upload is already flushed. 1286 } 1287 } 1288 1289 // If we didn't do a direct texture write then we upload the pixels to a texture and draw. 1290 GrRenderTarget* renderTarget = surface->asRenderTarget(); 1291 if (NULL == renderTarget) { 1292 return false; 1293 } 1294 1295 // We ignore the preferred config unless it is a R/B swap of the src config. In that case 1296 // we will upload the original src data to a scratch texture but we will spoof it as the swapped 1297 // config. This scratch will then have R and B swapped. We correct for this by swapping again 1298 // when drawing the scratch to the dst using a conversion effect. 1299 bool swapRAndB = false; 1300 GrPixelConfig writeConfig = srcConfig; 1301 if (GrPixelConfigSwapRAndB(srcConfig) == 1302 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) { 1303 writeConfig = GrPixelConfigSwapRAndB(srcConfig); 1304 swapRAndB = true; 1305 } 1306 1307 GrSurfaceDesc desc; 1308 desc.fWidth = width; 1309 desc.fHeight = height; 1310 desc.fConfig = writeConfig; 1311 SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch)); 1312 if (!texture) { 1313 return false; 1314 } 1315 1316 SkAutoTUnref<const GrFragmentProcessor> fp; 1317 SkMatrix textureMatrix; 1318 textureMatrix.setIDiv(texture->width(), texture->height()); 1319 1320 // allocate a tmp buffer and sw convert the pixels to premul 1321 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 1322 1323 if (kUnpremul_PixelOpsFlag & pixelOpsFlags) { 1324 if (!GrPixelConfigIs8888(srcConfig)) { 1325 return false; 1326 } 1327 fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); 1328 // handle the unpremul step on the CPU if we couldn't create an effect to do it. 1329 if (NULL == fp) { 1330 size_t tmpRowBytes = 4 * width; 1331 tmpPixels.reset(width * height); 1332 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes, 1333 tmpPixels.get())) { 1334 return false; 1335 } 1336 rowBytes = tmpRowBytes; 1337 buffer = tmpPixels.get(); 1338 } 1339 } 1340 if (NULL == fp) { 1341 fp.reset(GrConfigConversionEffect::Create(texture, 1342 swapRAndB, 1343 GrConfigConversionEffect::kNone_PMConversion, 1344 textureMatrix)); 1345 } 1346 1347 // Even if the client told us not to flush, we still flush here. The client may have known that 1348 // writes to the original surface caused no data hazards, but they can't know that the scratch 1349 // we just got is safe. 1350 if (texture->surfacePriv().hasPendingIO()) { 1351 this->flush(); 1352 } 1353 if (!fGpu->writeTexturePixels(texture, 0, 0, width, height, 1354 writeConfig, buffer, rowBytes)) { 1355 return false; 1356 } 1357 1358 SkMatrix matrix; 1359 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 1360 1361 // This function can be called in the midst of drawing another object (e.g., when uploading a 1362 // SW-rasterized clip while issuing a draw). So we push the current geometry state before 1363 // drawing a rect to the render target. 1364 // The bracket ensures we pop the stack if we wind up flushing below. 1365 { 1366 GrDrawTarget* drawTarget = this->prepareToDraw(NULL, NULL, NULL); 1367 GrDrawTarget::AutoGeometryAndStatePush agasp(drawTarget, GrDrawTarget::kReset_ASRInit, 1368 &matrix); 1369 GrDrawState* drawState = drawTarget->drawState(); 1370 drawState->addColorProcessor(fp); 1371 drawState->setRenderTarget(renderTarget); 1372 drawState->disableState(GrDrawState::kClip_StateBit); 1373 drawTarget->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height))); 1374 } 1375 1376 if (kFlushWrites_PixelOp & pixelOpsFlags) { 1377 this->flushSurfaceWrites(surface); 1378 } 1379 1380 return true; 1381} 1382 1383// toggles between RGBA and BGRA 1384static SkColorType toggle_colortype32(SkColorType ct) { 1385 if (kRGBA_8888_SkColorType == ct) { 1386 return kBGRA_8888_SkColorType; 1387 } else { 1388 SkASSERT(kBGRA_8888_SkColorType == ct); 1389 return kRGBA_8888_SkColorType; 1390 } 1391} 1392 1393bool GrContext::readRenderTargetPixels(GrRenderTarget* target, 1394 int left, int top, int width, int height, 1395 GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 1396 uint32_t flags) { 1397 ASSERT_OWNED_RESOURCE(target); 1398 SkASSERT(target); 1399 1400 if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) { 1401 this->flush(); 1402 } 1403 1404 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. 1405 1406 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll 1407 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. 1408 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, 1409 width, height, dstConfig, 1410 rowBytes); 1411 // We ignore the preferred config if it is different than our config unless it is an R/B swap. 1412 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped 1413 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during 1414 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from 1415 // dstConfig. 1416 GrPixelConfig readConfig = dstConfig; 1417 bool swapRAndB = false; 1418 if (GrPixelConfigSwapRAndB(dstConfig) == 1419 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { 1420 readConfig = GrPixelConfigSwapRAndB(readConfig); 1421 swapRAndB = true; 1422 } 1423 1424 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 1425 1426 if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 1427 // The unpremul flag is only allowed for these two configs. 1428 return false; 1429 } 1430 1431 // If the src is a texture and we would have to do conversions after read pixels, we instead 1432 // do the conversions by drawing the src to a scratch texture. If we handle any of the 1433 // conversions in the draw we set the corresponding bool to false so that we don't reapply it 1434 // on the read back pixels. 1435 GrTexture* src = target->asTexture(); 1436 if (src && (swapRAndB || unpremul || flipY)) { 1437 // Make the scratch a render so we can read its pixels. 1438 GrSurfaceDesc desc; 1439 desc.fFlags = kRenderTarget_GrSurfaceFlag; 1440 desc.fWidth = width; 1441 desc.fHeight = height; 1442 desc.fConfig = readConfig; 1443 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 1444 1445 // When a full read back is faster than a partial we could always make the scratch exactly 1446 // match the passed rect. However, if we see many different size rectangles we will trash 1447 // our texture cache and pay the cost of creating and destroying many textures. So, we only 1448 // request an exact match when the caller is reading an entire RT. 1449 ScratchTexMatch match = kApprox_ScratchTexMatch; 1450 if (0 == left && 1451 0 == top && 1452 target->width() == width && 1453 target->height() == height && 1454 fGpu->fullReadPixelsIsFasterThanPartial()) { 1455 match = kExact_ScratchTexMatch; 1456 } 1457 SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, match)); 1458 if (texture) { 1459 // compute a matrix to perform the draw 1460 SkMatrix textureMatrix; 1461 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1462 textureMatrix.postIDiv(src->width(), src->height()); 1463 1464 SkAutoTUnref<const GrFragmentProcessor> fp; 1465 if (unpremul) { 1466 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); 1467 if (fp) { 1468 unpremul = false; // we no longer need to do this on CPU after the read back. 1469 } 1470 } 1471 // If we failed to create a PM->UPM effect and have no other conversions to perform then 1472 // there is no longer any point to using the scratch. 1473 if (fp || flipY || swapRAndB) { 1474 if (!fp) { 1475 fp.reset(GrConfigConversionEffect::Create( 1476 src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion, 1477 textureMatrix)); 1478 } 1479 swapRAndB = false; // we will handle the swap in the draw. 1480 1481 // We protect the existing geometry here since it may not be 1482 // clear to the caller that a draw operation (i.e., drawSimpleRect) 1483 // can be invoked in this method 1484 { 1485 GrDrawTarget::AutoGeometryAndStatePush agasp(fDrawBuffer, 1486 GrDrawTarget::kReset_ASRInit); 1487 GrDrawState* drawState = fDrawBuffer->drawState(); 1488 SkASSERT(fp); 1489 drawState->addColorProcessor(fp); 1490 1491 drawState->setRenderTarget(texture->asRenderTarget()); 1492 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 1493 fDrawBuffer->drawSimpleRect(rect); 1494 // we want to read back from the scratch's origin 1495 left = 0; 1496 top = 0; 1497 target = texture->asRenderTarget(); 1498 } 1499 this->flushSurfaceWrites(target); 1500 } 1501 } 1502 } 1503 1504 if (!fGpu->readPixels(target, 1505 left, top, width, height, 1506 readConfig, buffer, rowBytes)) { 1507 return false; 1508 } 1509 // Perform any conversions we weren't able to perform using a scratch texture. 1510 if (unpremul || swapRAndB) { 1511 SkDstPixelInfo dstPI; 1512 if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) { 1513 return false; 1514 } 1515 dstPI.fAlphaType = kUnpremul_SkAlphaType; 1516 dstPI.fPixels = buffer; 1517 dstPI.fRowBytes = rowBytes; 1518 1519 SkSrcPixelInfo srcPI; 1520 srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType; 1521 srcPI.fAlphaType = kPremul_SkAlphaType; 1522 srcPI.fPixels = buffer; 1523 srcPI.fRowBytes = rowBytes; 1524 1525 return srcPI.convertPixelsTo(&dstPI, width, height); 1526 } 1527 return true; 1528} 1529 1530void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) { 1531 SkASSERT(surface); 1532 ASSERT_OWNED_RESOURCE(surface); 1533 if (surface->surfacePriv().hasPendingIO()) { 1534 this->flush(); 1535 } 1536 GrRenderTarget* rt = surface->asRenderTarget(); 1537 if (fGpu && rt) { 1538 fGpu->resolveRenderTarget(rt); 1539 } 1540} 1541 1542void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) { 1543 SkASSERT(renderTarget); 1544 ASSERT_OWNED_RESOURCE(renderTarget); 1545 AutoRestoreEffects are; 1546 AutoCheckFlush acf(this); 1547 GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf); 1548 if (NULL == target) { 1549 return; 1550 } 1551 target->discard(renderTarget); 1552} 1553 1554void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 1555 const SkIPoint& dstPoint, uint32_t pixelOpsFlags) { 1556 if (NULL == src || NULL == dst) { 1557 return; 1558 } 1559 ASSERT_OWNED_RESOURCE(src); 1560 ASSERT_OWNED_RESOURCE(dst); 1561 1562 // Since we're going to the draw target and not GPU, no need to check kNoFlush 1563 // here. 1564 1565 GrDrawTarget* target = this->prepareToDraw(NULL, NULL, NULL); 1566 if (NULL == target) { 1567 return; 1568 } 1569 target->copySurface(dst, src, srcRect, dstPoint); 1570 1571 if (kFlushWrites_PixelOp & pixelOpsFlags) { 1572 this->flush(); 1573 } 1574} 1575 1576void GrContext::flushSurfaceWrites(GrSurface* surface) { 1577 if (surface->surfacePriv().hasPendingWrite()) { 1578 this->flush(); 1579 } 1580} 1581 1582//////////////////////////////////////////////////////////////////////////////// 1583 1584GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint, 1585 AutoRestoreEffects* are, 1586 AutoCheckFlush* acf) { 1587 // All users of this draw state should be freeing up all effects when they're done. 1588 // Otherwise effects that own resources may keep those resources alive indefinitely. 1589 SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages() && 1590 !fDrawState->hasGeometryProcessor()); 1591 1592 if (NULL == fGpu) { 1593 return NULL; 1594 } 1595 1596 ASSERT_OWNED_RESOURCE(fRenderTarget.get()); 1597 if (paint) { 1598 SkASSERT(are); 1599 SkASSERT(acf); 1600 are->set(fDrawState); 1601 fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get()); 1602#if GR_DEBUG_PARTIAL_COVERAGE_CHECK 1603 if ((paint->hasMask() || 0xff != paint->fCoverage) && 1604 !fDrawState->couldApplyCoverage(fGpu->caps())) { 1605 SkDebugf("Partial pixel coverage will be incorrectly blended.\n"); 1606 } 1607#endif 1608 // Clear any vertex attributes configured for the previous use of the 1609 // GrDrawState which can effect which blend optimizations are in effect. 1610 fDrawState->setDefaultVertexAttribs(); 1611 } else { 1612 fDrawState->reset(fViewMatrix); 1613 fDrawState->setRenderTarget(fRenderTarget.get()); 1614 } 1615 fDrawState->setState(GrDrawState::kClip_StateBit, fClip && 1616 !fClip->fClipStack->isWideOpen()); 1617 fDrawBuffer->setClip(fClip); 1618 SkASSERT(fDrawState == fDrawBuffer->drawState()); 1619 return fDrawBuffer; 1620} 1621 1622/* 1623 * This method finds a path renderer that can draw the specified path on 1624 * the provided target. 1625 * Due to its expense, the software path renderer has split out so it can 1626 * can be individually allowed/disallowed via the "allowSW" boolean. 1627 */ 1628GrPathRenderer* GrContext::getPathRenderer(const SkPath& path, 1629 const SkStrokeRec& stroke, 1630 const GrDrawTarget* target, 1631 bool allowSW, 1632 GrPathRendererChain::DrawType drawType, 1633 GrPathRendererChain::StencilSupport* stencilSupport) { 1634 1635 if (NULL == fPathRendererChain) { 1636 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); 1637 } 1638 1639 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path, 1640 stroke, 1641 target, 1642 drawType, 1643 stencilSupport); 1644 1645 if (NULL == pr && allowSW) { 1646 if (NULL == fSoftwarePathRenderer) { 1647 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1648 } 1649 pr = fSoftwarePathRenderer; 1650 } 1651 1652 return pr; 1653} 1654 1655//////////////////////////////////////////////////////////////////////////////// 1656bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const { 1657 return fGpu->caps()->isConfigRenderable(config, withMSAA); 1658} 1659 1660int GrContext::getRecommendedSampleCount(GrPixelConfig config, 1661 SkScalar dpi) const { 1662 if (!this->isConfigRenderable(config, true)) { 1663 return 0; 1664 } 1665 int chosenSampleCount = 0; 1666 if (fGpu->caps()->pathRenderingSupport()) { 1667 if (dpi >= 250.0f) { 1668 chosenSampleCount = 4; 1669 } else { 1670 chosenSampleCount = 16; 1671 } 1672 } 1673 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? 1674 chosenSampleCount : 0; 1675} 1676 1677void GrContext::setupDrawBuffer() { 1678 SkASSERT(NULL == fDrawBuffer); 1679 SkASSERT(NULL == fDrawBufferVBAllocPool); 1680 SkASSERT(NULL == fDrawBufferIBAllocPool); 1681 1682 fDrawBufferVBAllocPool = 1683 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1684 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1685 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1686 fDrawBufferIBAllocPool = 1687 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1688 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1689 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1690 1691 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, 1692 fDrawBufferVBAllocPool, 1693 fDrawBufferIBAllocPool)); 1694 1695 fDrawBuffer->setDrawState(fDrawState); 1696} 1697 1698GrDrawTarget* GrContext::getTextTarget() { 1699 return this->prepareToDraw(NULL, NULL, NULL); 1700} 1701 1702const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1703 return fGpu->getQuadIndexBuffer(); 1704} 1705 1706namespace { 1707void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 1708 GrConfigConversionEffect::PMConversion pmToUPM; 1709 GrConfigConversionEffect::PMConversion upmToPM; 1710 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 1711 *pmToUPMValue = pmToUPM; 1712 *upmToPMValue = upmToPM; 1713} 1714} 1715 1716const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture, 1717 bool swapRAndB, 1718 const SkMatrix& matrix) { 1719 if (!fDidTestPMConversions) { 1720 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1721 fDidTestPMConversions = true; 1722 } 1723 GrConfigConversionEffect::PMConversion pmToUPM = 1724 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 1725 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 1726 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); 1727 } else { 1728 return NULL; 1729 } 1730} 1731 1732const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture, 1733 bool swapRAndB, 1734 const SkMatrix& matrix) { 1735 if (!fDidTestPMConversions) { 1736 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1737 fDidTestPMConversions = true; 1738 } 1739 GrConfigConversionEffect::PMConversion upmToPM = 1740 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 1741 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 1742 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); 1743 } else { 1744 return NULL; 1745 } 1746} 1747 1748void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) { 1749 fResourceCache->addResource(resourceKey, resource); 1750} 1751 1752GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) { 1753 GrGpuResource* resource = fResourceCache2->findAndRefContentResource(resourceKey); 1754 if (resource) { 1755 fResourceCache->makeResourceMRU(resource); 1756 } 1757 return resource; 1758} 1759 1760void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { 1761 fGpu->addGpuTraceMarker(marker); 1762 if (fDrawBuffer) { 1763 fDrawBuffer->addGpuTraceMarker(marker); 1764 } 1765} 1766 1767void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { 1768 fGpu->removeGpuTraceMarker(marker); 1769 if (fDrawBuffer) { 1770 fDrawBuffer->removeGpuTraceMarker(marker); 1771 } 1772} 1773 1774/////////////////////////////////////////////////////////////////////////////// 1775#if GR_CACHE_STATS 1776void GrContext::printCacheStats() const { 1777 fResourceCache->printStats(); 1778} 1779#endif 1780 1781#if GR_GPU_STATS 1782const GrContext::GPUStats* GrContext::gpuStats() const { 1783 return fGpu->gpuStats(); 1784} 1785#endif 1786 1787