GrContext.cpp revision 46f7afb9867200b568c21736da8a8bbb56b20e30
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "GrBufferAllocPool.h" 13#include "GrClipIterator.h" 14#include "GrGpu.h" 15#include "GrIndexBuffer.h" 16#include "GrInOrderDrawBuffer.h" 17#include "GrPathRenderer.h" 18#include "GrPathUtils.h" 19#include "GrResourceCache.h" 20#include "GrStencilBuffer.h" 21#include "GrTextStrike.h" 22#include "SkTLazy.h" 23#include "SkTrace.h" 24 25// Using MSAA seems to be slower for some yet unknown reason. 26#define PREFER_MSAA_OFFSCREEN_AA 0 27#define OFFSCREEN_SSAA_SCALE 4 // super sample at 4x4 28 29#define DEFER_TEXT_RENDERING 1 30 31#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 32 33// When we're using coverage AA but the blend is incompatible (given gpu 34// limitations) should we disable AA or draw wrong? 35#define DISABLE_COVERAGE_AA_FOR_BLEND 1 36 37static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 38static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 39 40static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 18; 41static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 42 43// We are currently only batching Text and drawRectToRect, both 44// of which use the quad index buffer. 45static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 0; 46static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 0; 47 48#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 49 50GrContext* GrContext::Create(GrEngine engine, 51 GrPlatform3DContext context3D) { 52 GrContext* ctx = NULL; 53 GrGpu* fGpu = GrGpu::Create(engine, context3D); 54 if (NULL != fGpu) { 55 ctx = new GrContext(fGpu); 56 fGpu->unref(); 57 } 58 return ctx; 59} 60 61GrContext::~GrContext() { 62 this->flush(); 63 delete fTextureCache; 64 delete fFontCache; 65 delete fDrawBuffer; 66 delete fDrawBufferVBAllocPool; 67 delete fDrawBufferIBAllocPool; 68 69 GrSafeUnref(fAAFillRectIndexBuffer); 70 GrSafeUnref(fAAStrokeRectIndexBuffer); 71 fGpu->unref(); 72 GrSafeUnref(fPathRendererChain); 73} 74 75void GrContext::contextLost() { 76 contextDestroyed(); 77 this->setupDrawBuffer(); 78} 79 80void GrContext::contextDestroyed() { 81 // abandon first to so destructors 82 // don't try to free the resources in the API. 83 fGpu->abandonResources(); 84 85 // a path renderer may be holding onto resources that 86 // are now unusable 87 GrSafeSetNull(fPathRendererChain); 88 89 delete fDrawBuffer; 90 fDrawBuffer = NULL; 91 92 delete fDrawBufferVBAllocPool; 93 fDrawBufferVBAllocPool = NULL; 94 95 delete fDrawBufferIBAllocPool; 96 fDrawBufferIBAllocPool = NULL; 97 98 GrSafeSetNull(fAAFillRectIndexBuffer); 99 GrSafeSetNull(fAAStrokeRectIndexBuffer); 100 101 fTextureCache->removeAll(); 102 fFontCache->freeAll(); 103 fGpu->markContextDirty(); 104} 105 106void GrContext::resetContext() { 107 fGpu->markContextDirty(); 108} 109 110void GrContext::freeGpuResources() { 111 this->flush(); 112 fTextureCache->removeAll(); 113 fFontCache->freeAll(); 114 // a path renderer may be holding onto resources 115 GrSafeSetNull(fPathRendererChain); 116} 117 118//////////////////////////////////////////////////////////////////////////////// 119 120int GrContext::PaintStageVertexLayoutBits( 121 const GrPaint& paint, 122 const bool hasTexCoords[GrPaint::kTotalStages]) { 123 int stageMask = paint.getActiveStageMask(); 124 int layout = 0; 125 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 126 if ((1 << i) & stageMask) { 127 if (NULL != hasTexCoords && hasTexCoords[i]) { 128 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 129 } else { 130 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i); 131 } 132 } 133 } 134 return layout; 135} 136 137 138//////////////////////////////////////////////////////////////////////////////// 139 140enum { 141 // flags for textures 142 kNPOTBit = 0x1, 143 kFilterBit = 0x2, 144 kScratchBit = 0x4, 145 146 // resource type 147 kTextureBit = 0x8, 148 kStencilBufferBit = 0x10 149}; 150 151GrTexture* GrContext::TextureCacheEntry::texture() const { 152 if (NULL == fEntry) { 153 return NULL; 154 } else { 155 return (GrTexture*) fEntry->resource(); 156 } 157} 158 159namespace { 160// returns true if this is a "special" texture because of gpu NPOT limitations 161bool gen_texture_key_values(const GrGpu* gpu, 162 const GrSamplerState* sampler, 163 GrContext::TextureKey clientKey, 164 int width, 165 int height, 166 bool scratch, 167 uint32_t v[4]) { 168 GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t)); 169 // we assume we only need 16 bits of width and height 170 // assert that texture creation will fail anyway if this assumption 171 // would cause key collisions. 172 GrAssert(gpu->getCaps().fMaxTextureSize <= SK_MaxU16); 173 v[0] = clientKey & 0xffffffffUL; 174 v[1] = (clientKey >> 32) & 0xffffffffUL; 175 v[2] = width | (height << 16); 176 177 v[3] = 0; 178 if (!gpu->getCaps().fNPOTTextureTileSupport) { 179 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 180 181 bool tiled = NULL != sampler && 182 ((sampler->getWrapX() != GrSamplerState::kClamp_WrapMode) || 183 (sampler->getWrapY() != GrSamplerState::kClamp_WrapMode)); 184 185 if (tiled && !isPow2) { 186 v[3] |= kNPOTBit; 187 if (GrSamplerState::kNearest_Filter != sampler->getFilter()) { 188 v[3] |= kFilterBit; 189 } 190 } 191 } 192 193 if (scratch) { 194 v[3] |= kScratchBit; 195 } 196 197 v[3] |= kTextureBit; 198 199 return v[3] & kNPOTBit; 200} 201 202// we should never have more than one stencil buffer with same combo of 203// (width,height,samplecount) 204void gen_stencil_key_values(int width, int height, 205 int sampleCnt, uint32_t v[4]) { 206 v[0] = width; 207 v[1] = height; 208 v[2] = sampleCnt; 209 v[3] = kStencilBufferBit; 210} 211 212void gen_stencil_key_values(const GrStencilBuffer* sb, 213 uint32_t v[4]) { 214 gen_stencil_key_values(sb->width(), sb->height(), 215 sb->numSamples(), v); 216} 217 218} 219 220GrContext::TextureCacheEntry GrContext::findAndLockTexture( 221 TextureKey key, 222 int width, 223 int height, 224 const GrSamplerState* sampler) { 225 uint32_t v[4]; 226 gen_texture_key_values(fGpu, sampler, key, width, height, false, v); 227 GrResourceKey resourceKey(v); 228 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 229 GrResourceCache::kNested_LockType)); 230} 231 232bool GrContext::isTextureInCache(TextureKey key, 233 int width, 234 int height, 235 const GrSamplerState* sampler) const { 236 uint32_t v[4]; 237 gen_texture_key_values(fGpu, sampler, key, width, height, false, v); 238 GrResourceKey resourceKey(v); 239 return fTextureCache->hasKey(resourceKey); 240} 241 242GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 243 ASSERT_OWNED_RESOURCE(sb); 244 uint32_t v[4]; 245 gen_stencil_key_values(sb, v); 246 GrResourceKey resourceKey(v); 247 return fTextureCache->createAndLock(resourceKey, sb); 248} 249 250GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 251 int sampleCnt) { 252 uint32_t v[4]; 253 gen_stencil_key_values(width, height, sampleCnt, v); 254 GrResourceKey resourceKey(v); 255 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 256 GrResourceCache::kSingle_LockType); 257 if (NULL != entry) { 258 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 259 return sb; 260 } else { 261 return NULL; 262 } 263} 264 265void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 266 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 267 fTextureCache->unlock(sbEntry); 268} 269 270static void stretchImage(void* dst, 271 int dstW, 272 int dstH, 273 void* src, 274 int srcW, 275 int srcH, 276 int bpp) { 277 GrFixed dx = (srcW << 16) / dstW; 278 GrFixed dy = (srcH << 16) / dstH; 279 280 GrFixed y = dy >> 1; 281 282 int dstXLimit = dstW*bpp; 283 for (int j = 0; j < dstH; ++j) { 284 GrFixed x = dx >> 1; 285 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 286 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 287 for (int i = 0; i < dstXLimit; i += bpp) { 288 memcpy((uint8_t*) dstRow + i, 289 (uint8_t*) srcRow + (x>>16)*bpp, 290 bpp); 291 x += dx; 292 } 293 y += dy; 294 } 295} 296 297GrContext::TextureCacheEntry GrContext::createAndLockTexture( 298 TextureKey key, 299 const GrSamplerState* sampler, 300 const GrTextureDesc& desc, 301 void* srcData, 302 size_t rowBytes) { 303 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 304 305#if GR_DUMP_TEXTURE_UPLOAD 306 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 307#endif 308 309 TextureCacheEntry entry; 310 uint32_t v[4]; 311 bool special = gen_texture_key_values(fGpu, sampler, key, 312 desc.fWidth, desc.fHeight, false, v); 313 GrResourceKey resourceKey(v); 314 315 if (special) { 316 GrAssert(NULL != sampler); 317 TextureCacheEntry clampEntry = this->findAndLockTexture(key, 318 desc.fWidth, 319 desc.fHeight, 320 NULL); 321 322 if (NULL == clampEntry.texture()) { 323 clampEntry = this->createAndLockTexture(key, NULL, desc, 324 srcData, rowBytes); 325 GrAssert(NULL != clampEntry.texture()); 326 if (NULL == clampEntry.texture()) { 327 return entry; 328 } 329 } 330 GrTextureDesc rtDesc = desc; 331 rtDesc.fFlags = rtDesc.fFlags | 332 kRenderTarget_GrTextureFlagBit | 333 kNoStencil_GrTextureFlagBit; 334 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 335 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 336 337 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 338 339 if (NULL != texture) { 340 GrDrawTarget::AutoStateRestore asr(fGpu); 341 GrDrawState* drawState = fGpu->drawState(); 342 drawState->reset(); 343 drawState->setRenderTarget(texture->asRenderTarget()); 344 drawState->setTexture(0, clampEntry.texture()); 345 346 GrSamplerState::Filter filter; 347 // if filtering is not desired then we want to ensure all 348 // texels in the resampled image are copies of texels from 349 // the original. 350 if (GrSamplerState::kNearest_Filter == sampler->getFilter()) { 351 filter = GrSamplerState::kNearest_Filter; 352 } else { 353 filter = GrSamplerState::kBilinear_Filter; 354 } 355 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 356 filter); 357 358 static const GrVertexLayout layout = 359 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 360 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 361 362 if (arg.succeeded()) { 363 GrPoint* verts = (GrPoint*) arg.vertices(); 364 verts[0].setIRectFan(0, 0, 365 texture->width(), 366 texture->height(), 367 2*sizeof(GrPoint)); 368 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 369 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 370 0, 4); 371 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 372 } 373 texture->releaseRenderTarget(); 374 } else { 375 // TODO: Our CPU stretch doesn't filter. But we create separate 376 // stretched textures when the sampler state is either filtered or 377 // not. Either implement filtered stretch blit on CPU or just create 378 // one when FBO case fails. 379 380 rtDesc.fFlags = kNone_GrTextureFlags; 381 // no longer need to clamp at min RT size. 382 rtDesc.fWidth = GrNextPow2(desc.fWidth); 383 rtDesc.fHeight = GrNextPow2(desc.fHeight); 384 int bpp = GrBytesPerPixel(desc.fConfig); 385 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 386 rtDesc.fWidth * 387 rtDesc.fHeight); 388 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 389 srcData, desc.fWidth, desc.fHeight, bpp); 390 391 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 392 393 GrTexture* texture = fGpu->createTexture(rtDesc, 394 stretchedPixels.get(), 395 stretchedRowBytes); 396 GrAssert(NULL != texture); 397 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 398 } 399 fTextureCache->unlock(clampEntry.cacheEntry()); 400 401 } else { 402 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 403 if (NULL != texture) { 404 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 405 } 406 } 407 return entry; 408} 409 410namespace { 411inline void gen_scratch_tex_key_values(const GrGpu* gpu, 412 const GrTextureDesc& desc, 413 uint32_t v[4]) { 414 // Instead of a client-provided key of the texture contents 415 // we create a key of from the descriptor. 416 GrContext::TextureKey descKey = desc.fAALevel | 417 (desc.fFlags << 8) | 418 ((uint64_t) desc.fConfig << 32); 419 // this code path isn't friendly to tiling with NPOT restricitons 420 // We just pass ClampNoFilter() 421 gen_texture_key_values(gpu, NULL, descKey, desc.fWidth, 422 desc.fHeight, true, v); 423} 424} 425 426GrContext::TextureCacheEntry GrContext::lockScratchTexture( 427 const GrTextureDesc& inDesc, 428 ScratchTexMatch match) { 429 430 GrTextureDesc desc = inDesc; 431 if (kExact_ScratchTexMatch != match) { 432 // bin by pow2 with a reasonable min 433 static const int MIN_SIZE = 256; 434 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 435 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 436 } 437 438 uint32_t p0 = desc.fConfig; 439 uint32_t p1 = (desc.fAALevel << 16) | desc.fFlags; 440 441 GrResourceEntry* entry; 442 int origWidth = desc.fWidth; 443 int origHeight = desc.fHeight; 444 bool doubledW = false; 445 bool doubledH = false; 446 447 do { 448 uint32_t v[4]; 449 gen_scratch_tex_key_values(fGpu, desc, v); 450 GrResourceKey key(v); 451 entry = fTextureCache->findAndLock(key, 452 GrResourceCache::kNested_LockType); 453 // if we miss, relax the fit of the flags... 454 // then try doubling width... then height. 455 if (NULL != entry || kExact_ScratchTexMatch == match) { 456 break; 457 } 458 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 459 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 460 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 461 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 462 } else if (!doubledW) { 463 desc.fFlags = inDesc.fFlags; 464 desc.fWidth *= 2; 465 doubledW = true; 466 } else if (!doubledH) { 467 desc.fFlags = inDesc.fFlags; 468 desc.fWidth = origWidth; 469 desc.fHeight *= 2; 470 doubledH = true; 471 } else { 472 break; 473 } 474 475 } while (true); 476 477 if (NULL == entry) { 478 desc.fFlags = inDesc.fFlags; 479 desc.fWidth = origWidth; 480 desc.fHeight = origHeight; 481 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 482 if (NULL != texture) { 483 uint32_t v[4]; 484 gen_scratch_tex_key_values(fGpu, desc, v); 485 GrResourceKey key(v); 486 entry = fTextureCache->createAndLock(key, texture); 487 } 488 } 489 490 // If the caller gives us the same desc/sampler twice we don't want 491 // to return the same texture the second time (unless it was previously 492 // released). So we detach the entry from the cache and reattach at release. 493 if (NULL != entry) { 494 fTextureCache->detach(entry); 495 } 496 return TextureCacheEntry(entry); 497} 498 499void GrContext::unlockTexture(TextureCacheEntry entry) { 500 ASSERT_OWNED_RESOURCE(entry.texture()); 501 // If this is a scratch texture we detached it from the cache 502 // while it was locked (to avoid two callers simultaneously getting 503 // the same texture). 504 if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) { 505 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 506 } else { 507 fTextureCache->unlock(entry.cacheEntry()); 508 } 509} 510 511GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc, 512 void* srcData, 513 size_t rowBytes) { 514 return fGpu->createTexture(desc, srcData, rowBytes); 515} 516 517void GrContext::getTextureCacheLimits(int* maxTextures, 518 size_t* maxTextureBytes) const { 519 fTextureCache->getLimits(maxTextures, maxTextureBytes); 520} 521 522void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 523 fTextureCache->setLimits(maxTextures, maxTextureBytes); 524} 525 526int GrContext::getMaxTextureSize() const { 527 return fGpu->getCaps().fMaxTextureSize; 528} 529 530int GrContext::getMaxRenderTargetSize() const { 531 return fGpu->getCaps().fMaxRenderTargetSize; 532} 533 534/////////////////////////////////////////////////////////////////////////////// 535 536GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 537 return fGpu->createPlatformTexture(desc); 538} 539 540GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 541 return fGpu->createPlatformRenderTarget(desc); 542} 543 544GrResource* GrContext::createPlatformSurface(const GrPlatformSurfaceDesc& desc) { 545 // validate flags here so that GrGpu subclasses don't have to check 546 if (kTexture_GrPlatformSurfaceType == desc.fSurfaceType && 547 0 != desc.fRenderTargetFlags) { 548 return NULL; 549 } 550 if (desc.fSampleCnt && 551 (kGrCanResolve_GrPlatformRenderTargetFlagBit & desc.fRenderTargetFlags)) { 552 return NULL; 553 } 554 if (kTextureRenderTarget_GrPlatformSurfaceType == desc.fSurfaceType && 555 desc.fSampleCnt && 556 !(kGrCanResolve_GrPlatformRenderTargetFlagBit & desc.fRenderTargetFlags)) { 557 return NULL; 558 } 559 return fGpu->createPlatformSurface(desc); 560} 561 562/////////////////////////////////////////////////////////////////////////////// 563 564bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 565 int width, int height) const { 566 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 567 if (!caps.f8BitPaletteSupport) { 568 return false; 569 } 570 571 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 572 573 if (!isPow2) { 574 bool tiled = NULL != sampler && 575 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 576 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 577 if (tiled && !caps.fNPOTTextureTileSupport) { 578 return false; 579 } 580 } 581 return true; 582} 583 584//////////////////////////////////////////////////////////////////////////////// 585 586const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 587 588void GrContext::setClip(const GrClip& clip) { 589 fGpu->setClip(clip); 590 fGpu->drawState()->enableState(GrDrawState::kClip_StateBit); 591} 592 593void GrContext::setClip(const GrIRect& rect) { 594 GrClip clip; 595 clip.setFromIRect(rect); 596 fGpu->setClip(clip); 597} 598 599//////////////////////////////////////////////////////////////////////////////// 600 601void GrContext::clear(const GrIRect* rect, const GrColor color) { 602 this->flush(); 603 fGpu->clear(rect, color); 604} 605 606void GrContext::drawPaint(const GrPaint& paint) { 607 // set rect to be big enough to fill the space, but not super-huge, so we 608 // don't overflow fixed-point implementations 609 GrRect r; 610 r.setLTRB(0, 0, 611 GrIntToScalar(getRenderTarget()->width()), 612 GrIntToScalar(getRenderTarget()->height())); 613 GrMatrix inverse; 614 SkTLazy<GrPaint> tmpPaint; 615 const GrPaint* p = &paint; 616 GrDrawState* drawState = fGpu->drawState(); 617 GrAutoMatrix am; 618 619 // We attempt to map r by the inverse matrix and draw that. mapRect will 620 // map the four corners and bound them with a new rect. This will not 621 // produce a correct result for some perspective matrices. 622 if (!this->getMatrix().hasPerspective()) { 623 if (!drawState->getViewInverse(&inverse)) { 624 GrPrintf("Could not invert matrix"); 625 return; 626 } 627 inverse.mapRect(&r); 628 } else { 629 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 630 if (!drawState->getViewInverse(&inverse)) { 631 GrPrintf("Could not invert matrix"); 632 return; 633 } 634 tmpPaint.set(paint); 635 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 636 p = tmpPaint.get(); 637 } 638 am.set(this, GrMatrix::I()); 639 } 640 // by definition this fills the entire clip, no need for AA 641 if (paint.fAntiAlias) { 642 if (!tmpPaint.isValid()) { 643 tmpPaint.set(paint); 644 p = tmpPaint.get(); 645 } 646 GrAssert(p == tmpPaint.get()); 647 tmpPaint.get()->fAntiAlias = false; 648 } 649 this->drawRect(*p, r); 650} 651 652//////////////////////////////////////////////////////////////////////////////// 653 654namespace { 655inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 656 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 657} 658} 659 660struct GrContext::OffscreenRecord { 661 enum Downsample { 662 k4x4SinglePass_Downsample, 663 kFSAA_Downsample 664 } fDownsample; 665 int fTileSizeX; 666 int fTileSizeY; 667 int fTileCountX; 668 int fTileCountY; 669 int fScale; 670 GrAutoScratchTexture fOffscreen; 671 GrDrawTarget::SavedDrawState fSavedState; 672 GrClip fClip; 673}; 674 675bool GrContext::doOffscreenAA(GrDrawTarget* target, 676 bool isHairLines) const { 677#if !GR_USE_OFFSCREEN_AA 678 return false; 679#else 680 // Line primitves are always rasterized as 1 pixel wide. 681 // Super-sampling would make them too thin but MSAA would be OK. 682 if (isHairLines && 683 (!PREFER_MSAA_OFFSCREEN_AA || !fGpu->getCaps().fFSAASupport)) { 684 return false; 685 } 686 if (target->getDrawState().getRenderTarget()->isMultisampled()) { 687 return false; 688 } 689 if (disable_coverage_aa_for_blend(target)) { 690#if GR_DEBUG 691 //GrPrintf("Turning off AA to correctly apply blend.\n"); 692#endif 693 return false; 694 } 695 return true; 696#endif 697} 698 699bool GrContext::prepareForOffscreenAA(GrDrawTarget* target, 700 bool requireStencil, 701 const GrIRect& boundRect, 702 GrPathRenderer* pr, 703 OffscreenRecord* record) { 704 705 GrAssert(GR_USE_OFFSCREEN_AA); 706 707 GrAssert(NULL == record->fOffscreen.texture()); 708 GrAssert(!boundRect.isEmpty()); 709 710 int boundW = boundRect.width(); 711 int boundH = boundRect.height(); 712 713 GrTextureDesc desc; 714 715 desc.fWidth = GrMin(fMaxOffscreenAASize, boundW); 716 desc.fHeight = GrMin(fMaxOffscreenAASize, boundH); 717 718 if (requireStencil) { 719 desc.fFlags = kRenderTarget_GrTextureFlagBit; 720 } else { 721 desc.fFlags = kRenderTarget_GrTextureFlagBit | 722 kNoStencil_GrTextureFlagBit; 723 } 724 725 desc.fConfig = kRGBA_8888_PM_GrPixelConfig; 726 727 if (PREFER_MSAA_OFFSCREEN_AA && fGpu->getCaps().fFSAASupport) { 728 record->fDownsample = OffscreenRecord::kFSAA_Downsample; 729 record->fScale = 1; 730 desc.fAALevel = kMed_GrAALevel; 731 } else { 732 record->fDownsample = OffscreenRecord::k4x4SinglePass_Downsample; 733 record->fScale = OFFSCREEN_SSAA_SCALE; 734 // both downsample paths assume this 735 GR_STATIC_ASSERT(4 == OFFSCREEN_SSAA_SCALE); 736 desc.fAALevel = kNone_GrAALevel; 737 } 738 739 desc.fWidth *= record->fScale; 740 desc.fHeight *= record->fScale; 741 record->fOffscreen.set(this, desc); 742 if (NULL == record->fOffscreen.texture()) { 743 return false; 744 } 745 // the approximate lookup might have given us some slop space, might as well 746 // use it when computing the tiles size. 747 // these are scale values, will adjust after considering 748 // the possible second offscreen. 749 record->fTileSizeX = record->fOffscreen.texture()->width(); 750 record->fTileSizeY = record->fOffscreen.texture()->height(); 751 752 record->fTileSizeX /= record->fScale; 753 record->fTileSizeY /= record->fScale; 754 755 record->fTileCountX = GrIDivRoundUp(boundW, record->fTileSizeX); 756 record->fTileCountY = GrIDivRoundUp(boundH, record->fTileSizeY); 757 758 record->fClip = target->getClip(); 759 760 target->saveCurrentDrawState(&record->fSavedState); 761 return true; 762} 763 764void GrContext::setupOffscreenAAPass1(GrDrawTarget* target, 765 const GrIRect& boundRect, 766 int tileX, int tileY, 767 OffscreenRecord* record) { 768 769 GrRenderTarget* offRT = record->fOffscreen.texture()->asRenderTarget(); 770 GrAssert(NULL != offRT); 771 772 GrDrawState* drawState = target->drawState(); 773 GrMatrix vm = drawState->getViewMatrix(); 774 drawState->reset(); 775 *drawState->viewMatrix() = vm; 776 drawState->setRenderTarget(offRT); 777 778#if PREFER_MSAA_OFFSCREEN_AA 779 drawState->enableState(GrDrawState::kHWAntialias_StateBit); 780#endif 781 782 GrMatrix transM; 783 int left = boundRect.fLeft + tileX * record->fTileSizeX; 784 int top = boundRect.fTop + tileY * record->fTileSizeY; 785 transM.setTranslate(-left * GR_Scalar1, -top * GR_Scalar1); 786 drawState->viewMatrix()->postConcat(transM); 787 GrMatrix scaleM; 788 scaleM.setScale(record->fScale * GR_Scalar1, record->fScale * GR_Scalar1); 789 drawState->viewMatrix()->postConcat(scaleM); 790 791 int w = (tileX == record->fTileCountX-1) ? boundRect.fRight - left : 792 record->fTileSizeX; 793 int h = (tileY == record->fTileCountY-1) ? boundRect.fBottom - top : 794 record->fTileSizeY; 795 GrIRect clear = SkIRect::MakeWH(record->fScale * w, 796 record->fScale * h); 797 target->setClip(GrClip(clear)); 798 drawState->enableState(GrDrawState::kClip_StateBit); 799 800#if 0 801 // visualize tile boundaries by setting edges of offscreen to white 802 // and interior to tranparent. black. 803 target->clear(&clear, 0xffffffff); 804 805 static const int gOffset = 2; 806 GrIRect clear2 = SkIRect::MakeLTRB(gOffset, gOffset, 807 record->fScale * w - gOffset, 808 record->fScale * h - gOffset); 809 target->clear(&clear2, 0x0); 810#else 811 target->clear(&clear, 0x0); 812#endif 813} 814 815void GrContext::doOffscreenAAPass2(GrDrawTarget* target, 816 const GrPaint& paint, 817 const GrIRect& boundRect, 818 int tileX, int tileY, 819 OffscreenRecord* record) { 820 SK_TRACE_EVENT0("GrContext::doOffscreenAAPass2"); 821 GrAssert(NULL != record->fOffscreen.texture()); 822 GrDrawTarget::AutoGeometryPush agp(target); 823 GrIRect tileRect; 824 tileRect.fLeft = boundRect.fLeft + tileX * record->fTileSizeX; 825 tileRect.fTop = boundRect.fTop + tileY * record->fTileSizeY, 826 tileRect.fRight = (tileX == record->fTileCountX-1) ? 827 boundRect.fRight : 828 tileRect.fLeft + record->fTileSizeX; 829 tileRect.fBottom = (tileY == record->fTileCountY-1) ? 830 boundRect.fBottom : 831 tileRect.fTop + record->fTileSizeY; 832 833 GrSamplerState::Filter filter; 834 if (OffscreenRecord::k4x4SinglePass_Downsample == record->fDownsample) { 835 filter = GrSamplerState::k4x4Downsample_Filter; 836 } else { 837 filter = GrSamplerState::kBilinear_Filter; 838 } 839 840 GrTexture* src = record->fOffscreen.texture(); 841 int scale; 842 843 enum { 844 kOffscreenStage = GrPaint::kTotalStages, 845 }; 846 847 GrDrawState* drawState = target->drawState(); 848 849 if (OffscreenRecord::kFSAA_Downsample == record->fDownsample) { 850 scale = 1; 851 GrIRect rect = SkIRect::MakeWH(tileRect.width(), tileRect.height()); 852 src->asRenderTarget()->overrideResolveRect(rect); 853 } else { 854 GrAssert(OffscreenRecord::k4x4SinglePass_Downsample == 855 record->fDownsample); 856 scale = 4; 857 } 858 859 // setup for draw back to main RT, we use the original 860 // draw state setup by the caller plus an additional coverage 861 // stage to handle the AA resolve. Also, we use an identity 862 // view matrix and so pre-concat sampler matrices with view inv. 863 int stageMask = paint.getActiveStageMask(); 864 865 target->restoreDrawState(record->fSavedState); 866 target->setClip(record->fClip); 867 868 if (stageMask) { 869 GrMatrix invVM; 870 if (drawState->getViewInverse(&invVM)) { 871 drawState->preConcatSamplerMatrices(stageMask, invVM); 872 } 873 } 874 // This is important when tiling, otherwise second tile's 875 // pass 1 view matrix will be incorrect. 876 GrDrawState::AutoViewMatrixRestore avmr(drawState, GrMatrix::I()); 877 878 drawState->setTexture(kOffscreenStage, src); 879 GrSamplerState* sampler = drawState->sampler(kOffscreenStage); 880 sampler->reset(GrSamplerState::kClamp_WrapMode, filter); 881 sampler->matrix()->setScale(scale * GR_Scalar1 / src->width(), 882 scale * GR_Scalar1 / src->height()); 883 sampler->matrix()->preTranslate(SkIntToScalar(-tileRect.fLeft), 884 SkIntToScalar(-tileRect.fTop)); 885 886 GrRect dstRect; 887 int stages = (1 << kOffscreenStage) | stageMask; 888 dstRect.set(tileRect); 889 target->drawSimpleRect(dstRect, NULL, stages); 890} 891 892void GrContext::cleanupOffscreenAA(GrDrawTarget* target, 893 GrPathRenderer* pr, 894 OffscreenRecord* record) { 895 target->restoreDrawState(record->fSavedState); 896} 897 898//////////////////////////////////////////////////////////////////////////////// 899 900/* create a triangle strip that strokes the specified triangle. There are 8 901 unique vertices, but we repreat the last 2 to close up. Alternatively we 902 could use an indices array, and then only send 8 verts, but not sure that 903 would be faster. 904 */ 905static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 906 GrScalar width) { 907 const GrScalar rad = GrScalarHalf(width); 908 rect.sort(); 909 910 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 911 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 912 verts[2].set(rect.fRight - rad, rect.fTop + rad); 913 verts[3].set(rect.fRight + rad, rect.fTop - rad); 914 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 915 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 916 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 917 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 918 verts[8] = verts[0]; 919 verts[9] = verts[1]; 920} 921 922static void setInsetFan(GrPoint* pts, size_t stride, 923 const GrRect& r, GrScalar dx, GrScalar dy) { 924 pts->setRectFan(r.fLeft + dx, r.fTop + dy, r.fRight - dx, r.fBottom - dy, stride); 925} 926 927static const uint16_t gFillAARectIdx[] = { 928 0, 1, 5, 5, 4, 0, 929 1, 2, 6, 6, 5, 1, 930 2, 3, 7, 7, 6, 2, 931 3, 0, 4, 4, 7, 3, 932 4, 5, 6, 6, 7, 4, 933}; 934 935int GrContext::aaFillRectIndexCount() const { 936 return GR_ARRAY_COUNT(gFillAARectIdx); 937} 938 939GrIndexBuffer* GrContext::aaFillRectIndexBuffer() { 940 if (NULL == fAAFillRectIndexBuffer) { 941 fAAFillRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gFillAARectIdx), 942 false); 943 if (NULL != fAAFillRectIndexBuffer) { 944 #if GR_DEBUG 945 bool updated = 946 #endif 947 fAAFillRectIndexBuffer->updateData(gFillAARectIdx, 948 sizeof(gFillAARectIdx)); 949 GR_DEBUGASSERT(updated); 950 } 951 } 952 return fAAFillRectIndexBuffer; 953} 954 955static const uint16_t gStrokeAARectIdx[] = { 956 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0, 957 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0, 958 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0, 959 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0, 960 961 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4, 962 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4, 963 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4, 964 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4, 965 966 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8, 967 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8, 968 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8, 969 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8, 970}; 971 972int GrContext::aaStrokeRectIndexCount() const { 973 return GR_ARRAY_COUNT(gStrokeAARectIdx); 974} 975 976GrIndexBuffer* GrContext::aaStrokeRectIndexBuffer() { 977 if (NULL == fAAStrokeRectIndexBuffer) { 978 fAAStrokeRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gStrokeAARectIdx), 979 false); 980 if (NULL != fAAStrokeRectIndexBuffer) { 981 #if GR_DEBUG 982 bool updated = 983 #endif 984 fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx, 985 sizeof(gStrokeAARectIdx)); 986 GR_DEBUGASSERT(updated); 987 } 988 } 989 return fAAStrokeRectIndexBuffer; 990} 991 992static GrVertexLayout aa_rect_layout(const GrDrawTarget* target, 993 bool useCoverage) { 994 GrVertexLayout layout = 0; 995 for (int s = 0; s < GrDrawState::kNumStages; ++s) { 996 if (NULL != target->getDrawState().getTexture(s)) { 997 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s); 998 } 999 } 1000 if (useCoverage) { 1001 layout |= GrDrawTarget::kCoverage_VertexLayoutBit; 1002 } else { 1003 layout |= GrDrawTarget::kColor_VertexLayoutBit; 1004 } 1005 return layout; 1006} 1007 1008void GrContext::fillAARect(GrDrawTarget* target, 1009 const GrRect& devRect, 1010 bool useVertexCoverage) { 1011 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 1012 1013 size_t vsize = GrDrawTarget::VertexSize(layout); 1014 1015 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 8, 0); 1016 if (!geo.succeeded()) { 1017 GrPrintf("Failed to get space for vertices!\n"); 1018 return; 1019 } 1020 GrIndexBuffer* indexBuffer = this->aaFillRectIndexBuffer(); 1021 if (NULL == indexBuffer) { 1022 GrPrintf("Failed to create index buffer!\n"); 1023 return; 1024 } 1025 1026 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 1027 1028 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 1029 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 1030 1031 setInsetFan(fan0Pos, vsize, devRect, -GR_ScalarHalf, -GR_ScalarHalf); 1032 setInsetFan(fan1Pos, vsize, devRect, GR_ScalarHalf, GR_ScalarHalf); 1033 1034 verts += sizeof(GrPoint); 1035 for (int i = 0; i < 4; ++i) { 1036 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 1037 } 1038 1039 GrColor innerColor; 1040 if (useVertexCoverage) { 1041 innerColor = 0xffffffff; 1042 } else { 1043 innerColor = target->getDrawState().getColor(); 1044 } 1045 1046 verts += 4 * vsize; 1047 for (int i = 0; i < 4; ++i) { 1048 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 1049 } 1050 1051 target->setIndexSourceToBuffer(indexBuffer); 1052 1053 target->drawIndexed(kTriangles_PrimitiveType, 0, 1054 0, 8, this->aaFillRectIndexCount()); 1055} 1056 1057void GrContext::strokeAARect(GrDrawTarget* target, 1058 const GrRect& devRect, 1059 const GrVec& devStrokeSize, 1060 bool useVertexCoverage) { 1061 const GrScalar& dx = devStrokeSize.fX; 1062 const GrScalar& dy = devStrokeSize.fY; 1063 const GrScalar rx = GrMul(dx, GR_ScalarHalf); 1064 const GrScalar ry = GrMul(dy, GR_ScalarHalf); 1065 1066 GrScalar spare; 1067 { 1068 GrScalar w = devRect.width() - dx; 1069 GrScalar h = devRect.height() - dy; 1070 spare = GrMin(w, h); 1071 } 1072 1073 if (spare <= 0) { 1074 GrRect r(devRect); 1075 r.inset(-rx, -ry); 1076 fillAARect(target, r, useVertexCoverage); 1077 return; 1078 } 1079 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 1080 size_t vsize = GrDrawTarget::VertexSize(layout); 1081 1082 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 16, 0); 1083 if (!geo.succeeded()) { 1084 GrPrintf("Failed to get space for vertices!\n"); 1085 return; 1086 } 1087 GrIndexBuffer* indexBuffer = this->aaStrokeRectIndexBuffer(); 1088 if (NULL == indexBuffer) { 1089 GrPrintf("Failed to create index buffer!\n"); 1090 return; 1091 } 1092 1093 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 1094 1095 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 1096 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 1097 GrPoint* fan2Pos = reinterpret_cast<GrPoint*>(verts + 8 * vsize); 1098 GrPoint* fan3Pos = reinterpret_cast<GrPoint*>(verts + 12 * vsize); 1099 1100 setInsetFan(fan0Pos, vsize, devRect, -rx - GR_ScalarHalf, -ry - GR_ScalarHalf); 1101 setInsetFan(fan1Pos, vsize, devRect, -rx + GR_ScalarHalf, -ry + GR_ScalarHalf); 1102 setInsetFan(fan2Pos, vsize, devRect, rx - GR_ScalarHalf, ry - GR_ScalarHalf); 1103 setInsetFan(fan3Pos, vsize, devRect, rx + GR_ScalarHalf, ry + GR_ScalarHalf); 1104 1105 verts += sizeof(GrPoint); 1106 for (int i = 0; i < 4; ++i) { 1107 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 1108 } 1109 1110 GrColor innerColor; 1111 if (useVertexCoverage) { 1112 innerColor = 0xffffffff; 1113 } else { 1114 innerColor = target->getDrawState().getColor(); 1115 } 1116 verts += 4 * vsize; 1117 for (int i = 0; i < 8; ++i) { 1118 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 1119 } 1120 1121 verts += 8 * vsize; 1122 for (int i = 0; i < 8; ++i) { 1123 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 1124 } 1125 1126 target->setIndexSourceToBuffer(indexBuffer); 1127 target->drawIndexed(kTriangles_PrimitiveType, 1128 0, 0, 16, aaStrokeRectIndexCount()); 1129} 1130 1131/** 1132 * Returns true if the rects edges are integer-aligned. 1133 */ 1134static bool isIRect(const GrRect& r) { 1135 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 1136 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 1137} 1138 1139static bool apply_aa_to_rect(GrDrawTarget* target, 1140 const GrRect& rect, 1141 GrScalar width, 1142 const GrMatrix* matrix, 1143 GrMatrix* combinedMatrix, 1144 GrRect* devRect, 1145 bool* useVertexCoverage) { 1146 // we use a simple coverage ramp to do aa on axis-aligned rects 1147 // we check if the rect will be axis-aligned, and the rect won't land on 1148 // integer coords. 1149 1150 // we are keeping around the "tweak the alpha" trick because 1151 // it is our only hope for the fixed-pipe implementation. 1152 // In a shader implementation we can give a separate coverage input 1153 // TODO: remove this ugliness when we drop the fixed-pipe impl 1154 *useVertexCoverage = false; 1155 if (!target->canTweakAlphaForCoverage()) { 1156 if (disable_coverage_aa_for_blend(target)) { 1157#if GR_DEBUG 1158 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1159#endif 1160 return false; 1161 } else { 1162 *useVertexCoverage = true; 1163 } 1164 } 1165 const GrDrawState& drawState = target->getDrawState(); 1166 if (drawState.getRenderTarget()->isMultisampled()) { 1167 return false; 1168 } 1169 1170 if (0 == width && target->willUseHWAALines()) { 1171 return false; 1172 } 1173 1174 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1175 return false; 1176 } 1177 1178 if (NULL != matrix && 1179 !matrix->preservesAxisAlignment()) { 1180 return false; 1181 } 1182 1183 *combinedMatrix = drawState.getViewMatrix(); 1184 if (NULL != matrix) { 1185 combinedMatrix->preConcat(*matrix); 1186 GrAssert(combinedMatrix->preservesAxisAlignment()); 1187 } 1188 1189 combinedMatrix->mapRect(devRect, rect); 1190 devRect->sort(); 1191 1192 if (width < 0) { 1193 return !isIRect(*devRect); 1194 } else { 1195 return true; 1196 } 1197} 1198 1199void GrContext::drawRect(const GrPaint& paint, 1200 const GrRect& rect, 1201 GrScalar width, 1202 const GrMatrix* matrix) { 1203 SK_TRACE_EVENT0("GrContext::drawRect"); 1204 1205 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1206 int stageMask = paint.getActiveStageMask(); 1207 1208 GrRect devRect = rect; 1209 GrMatrix combinedMatrix; 1210 bool useVertexCoverage; 1211 bool needAA = paint.fAntiAlias && 1212 !this->getRenderTarget()->isMultisampled(); 1213 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 1214 &combinedMatrix, &devRect, 1215 &useVertexCoverage); 1216 1217 if (doAA) { 1218 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1219 if (width >= 0) { 1220 GrVec strokeSize;; 1221 if (width > 0) { 1222 strokeSize.set(width, width); 1223 combinedMatrix.mapVectors(&strokeSize, 1); 1224 strokeSize.setAbs(strokeSize); 1225 } else { 1226 strokeSize.set(GR_Scalar1, GR_Scalar1); 1227 } 1228 strokeAARect(target, devRect, strokeSize, useVertexCoverage); 1229 } else { 1230 fillAARect(target, devRect, useVertexCoverage); 1231 } 1232 return; 1233 } 1234 1235 if (width >= 0) { 1236 // TODO: consider making static vertex buffers for these cases. 1237 // Hairline could be done by just adding closing vertex to 1238 // unitSquareVertexBuffer() 1239 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1240 1241 static const int worstCaseVertCount = 10; 1242 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 1243 1244 if (!geo.succeeded()) { 1245 GrPrintf("Failed to get space for vertices!\n"); 1246 return; 1247 } 1248 1249 GrPrimitiveType primType; 1250 int vertCount; 1251 GrPoint* vertex = geo.positions(); 1252 1253 if (width > 0) { 1254 vertCount = 10; 1255 primType = kTriangleStrip_PrimitiveType; 1256 setStrokeRectStrip(vertex, rect, width); 1257 } else { 1258 // hairline 1259 vertCount = 5; 1260 primType = kLineStrip_PrimitiveType; 1261 vertex[0].set(rect.fLeft, rect.fTop); 1262 vertex[1].set(rect.fRight, rect.fTop); 1263 vertex[2].set(rect.fRight, rect.fBottom); 1264 vertex[3].set(rect.fLeft, rect.fBottom); 1265 vertex[4].set(rect.fLeft, rect.fTop); 1266 } 1267 1268 GrDrawState::AutoViewMatrixRestore avmr; 1269 if (NULL != matrix) { 1270 GrDrawState* drawState = target->drawState(); 1271 avmr.set(drawState); 1272 drawState->preConcatViewMatrix(*matrix); 1273 drawState->preConcatSamplerMatrices(stageMask, *matrix); 1274 } 1275 1276 target->drawNonIndexed(primType, 0, vertCount); 1277 } else { 1278#if GR_STATIC_RECT_VB 1279 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1280 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1281 if (NULL == sqVB) { 1282 GrPrintf("Failed to create static rect vb.\n"); 1283 return; 1284 } 1285 target->setVertexSourceToBuffer(layout, sqVB); 1286 GrDrawState* drawState = target->drawState(); 1287 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1288 GrMatrix m; 1289 m.setAll(rect.width(), 0, rect.fLeft, 1290 0, rect.height(), rect.fTop, 1291 0, 0, GrMatrix::I()[8]); 1292 1293 if (NULL != matrix) { 1294 m.postConcat(*matrix); 1295 } 1296 drawState->preConcatViewMatrix(m); 1297 drawState->preConcatSamplerMatrices(stageMask, m); 1298 1299 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1300#else 1301 target->drawSimpleRect(rect, matrix, stageMask); 1302#endif 1303 } 1304} 1305 1306void GrContext::drawRectToRect(const GrPaint& paint, 1307 const GrRect& dstRect, 1308 const GrRect& srcRect, 1309 const GrMatrix* dstMatrix, 1310 const GrMatrix* srcMatrix) { 1311 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 1312 1313 // srcRect refers to paint's first texture 1314 if (NULL == paint.getTexture(0)) { 1315 drawRect(paint, dstRect, -1, dstMatrix); 1316 return; 1317 } 1318 1319 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 1320 1321#if GR_STATIC_RECT_VB 1322 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1323 GrDrawState* drawState = target->drawState(); 1324 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1325 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1326 1327 GrMatrix m; 1328 1329 m.setAll(dstRect.width(), 0, dstRect.fLeft, 1330 0, dstRect.height(), dstRect.fTop, 1331 0, 0, GrMatrix::I()[8]); 1332 if (NULL != dstMatrix) { 1333 m.postConcat(*dstMatrix); 1334 } 1335 drawState->preConcatViewMatrix(m); 1336 1337 // srcRect refers to first stage 1338 int otherStageMask = paint.getActiveStageMask() & 1339 (~(1 << GrPaint::kFirstTextureStage)); 1340 if (otherStageMask) { 1341 drawState->preConcatSamplerMatrices(otherStageMask, m); 1342 } 1343 1344 m.setAll(srcRect.width(), 0, srcRect.fLeft, 1345 0, srcRect.height(), srcRect.fTop, 1346 0, 0, GrMatrix::I()[8]); 1347 if (NULL != srcMatrix) { 1348 m.postConcat(*srcMatrix); 1349 } 1350 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 1351 1352 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1353 if (NULL == sqVB) { 1354 GrPrintf("Failed to create static rect vb.\n"); 1355 return; 1356 } 1357 target->setVertexSourceToBuffer(layout, sqVB); 1358 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1359#else 1360 1361 GrDrawTarget* target; 1362#if BATCH_RECT_TO_RECT 1363 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 1364#else 1365 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1366#endif 1367 1368 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1369 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 1370 srcRects[0] = &srcRect; 1371 srcMatrices[0] = srcMatrix; 1372 1373 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 1374#endif 1375} 1376 1377void GrContext::drawVertices(const GrPaint& paint, 1378 GrPrimitiveType primitiveType, 1379 int vertexCount, 1380 const GrPoint positions[], 1381 const GrPoint texCoords[], 1382 const GrColor colors[], 1383 const uint16_t indices[], 1384 int indexCount) { 1385 SK_TRACE_EVENT0("GrContext::drawVertices"); 1386 1387 GrDrawTarget::AutoReleaseGeometry geo; 1388 1389 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1390 1391 bool hasTexCoords[GrPaint::kTotalStages] = { 1392 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 1393 0 // remaining stages use positions 1394 }; 1395 1396 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 1397 1398 if (NULL != colors) { 1399 layout |= GrDrawTarget::kColor_VertexLayoutBit; 1400 } 1401 int vertexSize = GrDrawTarget::VertexSize(layout); 1402 1403 if (sizeof(GrPoint) != vertexSize) { 1404 if (!geo.set(target, layout, vertexCount, 0)) { 1405 GrPrintf("Failed to get space for vertices!\n"); 1406 return; 1407 } 1408 int texOffsets[GrDrawState::kMaxTexCoords]; 1409 int colorOffset; 1410 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 1411 texOffsets, 1412 &colorOffset, 1413 NULL, 1414 NULL); 1415 void* curVertex = geo.vertices(); 1416 1417 for (int i = 0; i < vertexCount; ++i) { 1418 *((GrPoint*)curVertex) = positions[i]; 1419 1420 if (texOffsets[0] > 0) { 1421 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 1422 } 1423 if (colorOffset > 0) { 1424 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1425 } 1426 curVertex = (void*)((intptr_t)curVertex + vertexSize); 1427 } 1428 } else { 1429 target->setVertexSourceToArray(layout, positions, vertexCount); 1430 } 1431 1432 // we don't currently apply offscreen AA to this path. Need improved 1433 // management of GrDrawTarget's geometry to avoid copying points per-tile. 1434 1435 if (NULL != indices) { 1436 target->setIndexSourceToArray(indices, indexCount); 1437 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1438 } else { 1439 target->drawNonIndexed(primitiveType, 0, vertexCount); 1440 } 1441} 1442 1443/////////////////////////////////////////////////////////////////////////////// 1444#include "SkDraw.h" 1445#include "SkRasterClip.h" 1446 1447namespace { 1448 1449SkPath::FillType gr_fill_to_sk_fill(GrPathFill fill) { 1450 switch (fill) { 1451 case kWinding_PathFill: 1452 return SkPath::kWinding_FillType; 1453 case kEvenOdd_PathFill: 1454 return SkPath::kEvenOdd_FillType; 1455 case kInverseWinding_PathFill: 1456 return SkPath::kInverseWinding_FillType; 1457 case kInverseEvenOdd_PathFill: 1458 return SkPath::kInverseEvenOdd_FillType; 1459 default: 1460 GrCrash("Unexpected fill."); 1461 return SkPath::kWinding_FillType; 1462 } 1463} 1464 1465// gets device coord bounds of path (not considering the fill) and clip. The 1466// path bounds will be a subset of the clip bounds. returns false if path bounds 1467// would be empty. 1468bool get_path_and_clip_bounds(const GrDrawTarget* target, 1469 const GrPath& path, 1470 const GrVec* translate, 1471 GrIRect* pathBounds, 1472 GrIRect* clipBounds) { 1473 // compute bounds as intersection of rt size, clip, and path 1474 const GrRenderTarget* rt = target->getDrawState().getRenderTarget(); 1475 if (NULL == rt) { 1476 return false; 1477 } 1478 *pathBounds = GrIRect::MakeWH(rt->width(), rt->height()); 1479 const GrClip& clip = target->getClip(); 1480 if (clip.hasConservativeBounds()) { 1481 clip.getConservativeBounds().roundOut(clipBounds); 1482 if (!pathBounds->intersect(*clipBounds)) { 1483 return false; 1484 } 1485 } else { 1486 // pathBounds is currently the rt extent, set clip bounds to that rect. 1487 *clipBounds = *pathBounds; 1488 } 1489 GrRect pathSBounds = path.getBounds(); 1490 if (!pathSBounds.isEmpty()) { 1491 if (NULL != translate) { 1492 pathSBounds.offset(*translate); 1493 } 1494 target->getDrawState().getViewMatrix().mapRect(&pathSBounds, 1495 pathSBounds); 1496 GrIRect pathIBounds; 1497 pathSBounds.roundOut(&pathIBounds); 1498 if (!pathBounds->intersect(pathIBounds)) { 1499 return false; 1500 } 1501 } else { 1502 return false; 1503 } 1504 return true; 1505} 1506 1507/** 1508 * sw rasterizes path to A8 mask using the context's matrix and uploads to a 1509 * scratch texture. 1510 */ 1511 1512bool sw_draw_path_to_mask_texture(const GrPath& clientPath, 1513 const GrIRect& pathDevBounds, 1514 GrPathFill fill, 1515 GrContext* context, 1516 const GrPoint* translate, 1517 GrAutoScratchTexture* tex) { 1518 SkPaint paint; 1519 SkPath tmpPath; 1520 const SkPath* pathToDraw = &clientPath; 1521 if (kHairLine_PathFill == fill) { 1522 paint.setStyle(SkPaint::kStroke_Style); 1523 paint.setStrokeWidth(SK_Scalar1); 1524 } else { 1525 paint.setStyle(SkPaint::kFill_Style); 1526 SkPath::FillType skfill = gr_fill_to_sk_fill(fill); 1527 if (skfill != pathToDraw->getFillType()) { 1528 tmpPath = *pathToDraw; 1529 tmpPath.setFillType(skfill); 1530 pathToDraw = &tmpPath; 1531 } 1532 } 1533 paint.setAntiAlias(true); 1534 paint.setColor(SK_ColorWHITE); 1535 1536 GrMatrix matrix = context->getMatrix(); 1537 if (NULL != translate) { 1538 matrix.postTranslate(translate->fX, translate->fY); 1539 } 1540 1541 matrix.postTranslate(-pathDevBounds.fLeft * SK_Scalar1, 1542 -pathDevBounds.fTop * SK_Scalar1); 1543 GrIRect bounds = GrIRect::MakeWH(pathDevBounds.width(), 1544 pathDevBounds.height()); 1545 1546 SkBitmap bm; 1547 bm.setConfig(SkBitmap::kA8_Config, bounds.fRight, bounds.fBottom); 1548 if (!bm.allocPixels()) { 1549 return false; 1550 } 1551 sk_bzero(bm.getPixels(), bm.getSafeSize()); 1552 1553 SkDraw draw; 1554 sk_bzero(&draw, sizeof(draw)); 1555 SkRasterClip rc(bounds); 1556 draw.fRC = &rc; 1557 draw.fClip = &rc.bwRgn(); 1558 draw.fMatrix = &matrix; 1559 draw.fBitmap = &bm; 1560 draw.drawPath(*pathToDraw, paint); 1561 1562 const GrTextureDesc desc = { 1563 kNone_GrTextureFlags, 1564 kNone_GrAALevel, 1565 bounds.fRight, 1566 bounds.fBottom, 1567 kAlpha_8_GrPixelConfig 1568 }; 1569 1570 tex->set(context, desc); 1571 GrTexture* texture = tex->texture(); 1572 1573 if (NULL == texture) { 1574 return false; 1575 } 1576 SkAutoLockPixels alp(bm); 1577 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, 1578 bm.getPixels(), bm.rowBytes()); 1579 return true; 1580} 1581 1582void draw_around_inv_path(GrDrawTarget* target, 1583 GrDrawState::StageMask stageMask, 1584 const GrIRect& clipBounds, 1585 const GrIRect& pathBounds) { 1586 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1587 GrRect rect; 1588 if (clipBounds.fTop < pathBounds.fTop) { 1589 rect.iset(clipBounds.fLeft, clipBounds.fTop, 1590 clipBounds.fRight, pathBounds.fTop); 1591 target->drawSimpleRect(rect, NULL, stageMask); 1592 } 1593 if (clipBounds.fLeft < pathBounds.fLeft) { 1594 rect.iset(clipBounds.fLeft, pathBounds.fTop, 1595 pathBounds.fLeft, pathBounds.fBottom); 1596 target->drawSimpleRect(rect, NULL, stageMask); 1597 } 1598 if (clipBounds.fRight > pathBounds.fRight) { 1599 rect.iset(pathBounds.fRight, pathBounds.fTop, 1600 clipBounds.fRight, pathBounds.fBottom); 1601 target->drawSimpleRect(rect, NULL, stageMask); 1602 } 1603 if (clipBounds.fBottom > pathBounds.fBottom) { 1604 rect.iset(clipBounds.fLeft, pathBounds.fBottom, 1605 clipBounds.fRight, clipBounds.fBottom); 1606 target->drawSimpleRect(rect, NULL, stageMask); 1607 } 1608} 1609 1610} 1611 1612void GrContext::drawPath(const GrPaint& paint, const GrPath& path, 1613 GrPathFill fill, const GrPoint* translate) { 1614 1615 if (path.isEmpty()) { 1616 if (GrIsFillInverted(fill)) { 1617 this->drawPaint(paint); 1618 } 1619 return; 1620 } 1621 1622 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1623 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1624 1625 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1626 1627 // An Assumption here is that path renderer would use some form of tweaking 1628 // the src color (either the input alpha or in the frag shader) to implement 1629 // aa. If we have some future driver-mojo path AA that can do the right 1630 // thing WRT to the blend then we'll need some query on the PR. 1631 if (disable_coverage_aa_for_blend(target)) { 1632#if GR_DEBUG 1633 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1634#endif 1635 prAA = false; 1636 } 1637 1638 bool doOSAA = false; 1639 GrPathRenderer* pr = NULL; 1640 if (prAA) { 1641 pr = this->getPathRenderer(path, fill, true); 1642 if (NULL == pr) { 1643 GrAutoScratchTexture ast; 1644 GrIRect pathBounds, clipBounds; 1645 if (!get_path_and_clip_bounds(target, path, translate, 1646 &pathBounds, &clipBounds)) { 1647 return; 1648 } 1649 prAA = false; 1650 if (this->doOffscreenAA(target, kHairLine_PathFill == fill)) { 1651 pr = this->getPathRenderer(path, fill, false); 1652 doOSAA = true; 1653 } 1654 if (NULL == pr && sw_draw_path_to_mask_texture(path, pathBounds, 1655 fill, this, 1656 translate, &ast)) { 1657 GrTexture* texture = ast.texture(); 1658 GrAssert(NULL != texture); 1659 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1660 enum { 1661 kPathMaskStage = GrPaint::kTotalStages, 1662 }; 1663 target->drawState()->setTexture(kPathMaskStage, texture); 1664 target->drawState()->sampler(kPathMaskStage)->reset(); 1665 GrScalar w = GrIntToScalar(pathBounds.width()); 1666 GrScalar h = GrIntToScalar(pathBounds.height()); 1667 GrRect maskRect = GrRect::MakeWH(w / texture->width(), 1668 h / texture->height()); 1669 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1670 srcRects[kPathMaskStage] = &maskRect; 1671 stageMask |= 1 << kPathMaskStage; 1672 GrRect dstRect = GrRect::MakeLTRB( 1673 SK_Scalar1* pathBounds.fLeft, 1674 SK_Scalar1* pathBounds.fTop, 1675 SK_Scalar1* pathBounds.fRight, 1676 SK_Scalar1* pathBounds.fBottom); 1677 target->drawRect(dstRect, NULL, stageMask, srcRects, NULL); 1678 target->drawState()->setTexture(kPathMaskStage, NULL); 1679 if (GrIsFillInverted(fill)) { 1680 draw_around_inv_path(target, stageMask, 1681 clipBounds, pathBounds); 1682 } 1683 return; 1684 } 1685 } 1686 } else { 1687 pr = this->getPathRenderer(path, fill, false); 1688 } 1689 1690 if (NULL == pr) { 1691#if GR_DEBUG 1692 GrPrintf("Unable to find path renderer compatible with path.\n"); 1693#endif 1694 return; 1695 } 1696 1697 GrPathRenderer::AutoClearPath arp(pr, target, &path, fill, prAA, translate); 1698 1699 if (doOSAA) { 1700 bool needsStencil = pr->requiresStencilPass(target, path, fill); 1701 GrIRect pathBounds; 1702 GrIRect clipBounds; 1703 if (!get_path_and_clip_bounds(target, path, translate, 1704 &pathBounds, &clipBounds)) { 1705 return; 1706 } 1707 OffscreenRecord record; 1708 if (this->prepareForOffscreenAA(target, needsStencil, pathBounds, 1709 pr, &record)) { 1710 for (int tx = 0; tx < record.fTileCountX; ++tx) { 1711 for (int ty = 0; ty < record.fTileCountY; ++ty) { 1712 this->setupOffscreenAAPass1(target, pathBounds, 1713 tx, ty, &record); 1714 pr->drawPath(0); 1715 this->doOffscreenAAPass2(target, paint, pathBounds, 1716 tx, ty, &record); 1717 } 1718 } 1719 this->cleanupOffscreenAA(target, pr, &record); 1720 if (GrIsFillInverted(fill)) { 1721 draw_around_inv_path(target, stageMask, clipBounds, pathBounds); 1722 } 1723 return; 1724 } 1725 } 1726 pr->drawPath(stageMask); 1727} 1728 1729//////////////////////////////////////////////////////////////////////////////// 1730 1731void GrContext::flush(int flagsBitfield) { 1732 if (kDiscard_FlushBit & flagsBitfield) { 1733 fDrawBuffer->reset(); 1734 } else { 1735 this->flushDrawBuffer(); 1736 } 1737 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1738 fGpu->forceRenderTargetFlush(); 1739 } 1740} 1741 1742void GrContext::flushText() { 1743 if (kText_DrawCategory == fLastDrawCategory) { 1744 flushDrawBuffer(); 1745 } 1746} 1747 1748void GrContext::flushDrawBuffer() { 1749#if BATCH_RECT_TO_RECT || DEFER_TEXT_RENDERING 1750 if (fDrawBuffer) { 1751 fDrawBuffer->playback(fGpu); 1752 fDrawBuffer->reset(); 1753 } 1754#endif 1755} 1756 1757void GrContext::internalWriteTexturePixels(GrTexture* texture, 1758 int left, int top, 1759 int width, int height, 1760 GrPixelConfig config, 1761 const void* buffer, 1762 size_t rowBytes, 1763 uint32_t flags) { 1764 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1765 ASSERT_OWNED_RESOURCE(texture); 1766 1767 if (!(kDontFlush_PixelOpsFlag & flags)) { 1768 this->flush(); 1769 } 1770 // TODO: use scratch texture to perform conversion 1771 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1772 GrPixelConfigIsUnpremultiplied(config)) { 1773 return; 1774 } 1775 1776 fGpu->writeTexturePixels(texture, left, top, width, height, 1777 config, buffer, rowBytes); 1778} 1779 1780bool GrContext::internalReadTexturePixels(GrTexture* texture, 1781 int left, int top, 1782 int width, int height, 1783 GrPixelConfig config, 1784 void* buffer, 1785 size_t rowBytes, 1786 uint32_t flags) { 1787 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1788 ASSERT_OWNED_RESOURCE(texture); 1789 1790 // TODO: code read pixels for textures that aren't also rendertargets 1791 GrRenderTarget* target = texture->asRenderTarget(); 1792 if (NULL != target) { 1793 return this->internalReadRenderTargetPixels(target, 1794 left, top, width, height, 1795 config, buffer, rowBytes, 1796 flags); 1797 } else { 1798 return false; 1799 } 1800} 1801 1802bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1803 int left, int top, 1804 int width, int height, 1805 GrPixelConfig config, 1806 void* buffer, 1807 size_t rowBytes, 1808 uint32_t flags) { 1809 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1810 ASSERT_OWNED_RESOURCE(target); 1811 1812 if (NULL == target) { 1813 target = fGpu->drawState()->getRenderTarget(); 1814 if (NULL == target) { 1815 return false; 1816 } 1817 } 1818 1819 // PM <-> UPM conversion requires a draw. Currently we only support drawing 1820 // into a UPM target, not reading from a UPM texture. Thus, UPM->PM is not 1821 // not supported at this time. 1822 if (GrPixelConfigIsUnpremultiplied(target->config()) && 1823 !GrPixelConfigIsUnpremultiplied(config)) { 1824 return false; 1825 } 1826 1827 if (!(kDontFlush_PixelOpsFlag & flags)) { 1828 this->flush(); 1829 } 1830 1831 GrTexture* src = target->asTexture(); 1832 bool swapRAndB = NULL != src && 1833 fGpu->preferredReadPixelsConfig(config) == 1834 GrPixelConfigSwapRAndB(config); 1835 1836 bool flipY = NULL != src && 1837 fGpu->readPixelsWillPayForYFlip(target, left, top, 1838 width, height, config, 1839 rowBytes); 1840 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1841 GrPixelConfigIsUnpremultiplied(config)); 1842 1843 if (NULL == src && alphaConversion) { 1844 // we should fallback to cpu conversion here. This could happen when 1845 // we were given an external render target by the client that is not 1846 // also a texture (e.g. FBO 0 in GL) 1847 return false; 1848 } 1849 // we draw to a scratch texture if any of these conversion are applied 1850 GrAutoScratchTexture ast; 1851 if (flipY || swapRAndB || alphaConversion) { 1852 GrAssert(NULL != src); 1853 if (swapRAndB) { 1854 config = GrPixelConfigSwapRAndB(config); 1855 GrAssert(kUnknown_GrPixelConfig != config); 1856 } 1857 // Make the scratch a render target because we don't have a robust 1858 // readTexturePixels as of yet (it calls this function). 1859 const GrTextureDesc desc = { 1860 kRenderTarget_GrTextureFlagBit, 1861 kNone_GrAALevel, 1862 width, height, 1863 config 1864 }; 1865 1866 // When a full readback is faster than a partial we could always make 1867 // the scratch exactly match the passed rect. However, if we see many 1868 // different size rectangles we will trash our texture cache and pay the 1869 // cost of creating and destroying many textures. So, we only request 1870 // an exact match when the caller is reading an entire RT. 1871 ScratchTexMatch match = kApprox_ScratchTexMatch; 1872 if (0 == left && 1873 0 == top && 1874 target->width() == width && 1875 target->height() == height && 1876 fGpu->fullReadPixelsIsFasterThanPartial()) { 1877 match = kExact_ScratchTexMatch; 1878 } 1879 ast.set(this, desc, match); 1880 GrTexture* texture = ast.texture(); 1881 if (!texture) { 1882 return false; 1883 } 1884 target = texture->asRenderTarget(); 1885 GrAssert(NULL != target); 1886 1887 GrDrawTarget::AutoStateRestore asr(fGpu); 1888 GrDrawState* drawState = fGpu->drawState(); 1889 drawState->reset(); 1890 drawState->setRenderTarget(target); 1891 1892 GrMatrix matrix; 1893 if (flipY) { 1894 matrix.setTranslate(SK_Scalar1 * left, 1895 SK_Scalar1 * (top + height)); 1896 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1897 } else { 1898 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1899 } 1900 matrix.postIDiv(src->width(), src->height()); 1901 drawState->sampler(0)->reset(matrix); 1902 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1903 drawState->setTexture(0, src); 1904 GrRect rect; 1905 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1906 fGpu->drawSimpleRect(rect, NULL, 0x1); 1907 left = 0; 1908 top = 0; 1909 } 1910 return fGpu->readPixels(target, 1911 left, top, width, height, 1912 config, buffer, rowBytes, flipY); 1913} 1914 1915void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1916 if (NULL == src || NULL == dst) { 1917 return; 1918 } 1919 ASSERT_OWNED_RESOURCE(src); 1920 1921 GrDrawTarget::AutoStateRestore asr(fGpu); 1922 GrDrawState* drawState = fGpu->drawState(); 1923 drawState->reset(); 1924 drawState->setRenderTarget(dst); 1925 GrMatrix sampleM; 1926 sampleM.setIDiv(src->width(), src->height()); 1927 drawState->setTexture(0, src); 1928 drawState->sampler(0)->reset(sampleM); 1929 SkRect rect = SkRect::MakeXYWH(0, 0, 1930 SK_Scalar1 * src->width(), 1931 SK_Scalar1 * src->height()); 1932 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1933} 1934 1935void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1936 int left, int top, 1937 int width, int height, 1938 GrPixelConfig config, 1939 const void* buffer, 1940 size_t rowBytes, 1941 uint32_t flags) { 1942 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1943 ASSERT_OWNED_RESOURCE(target); 1944 1945 if (NULL == target) { 1946 target = fGpu->drawState()->getRenderTarget(); 1947 if (NULL == target) { 1948 return; 1949 } 1950 } 1951 1952 // TODO: when underlying api has a direct way to do this we should use it 1953 // (e.g. glDrawPixels on desktop GL). 1954 1955 // If the RT is also a texture and we don't have to do PM/UPM conversion 1956 // then take the texture path, which we expect to be at least as fast or 1957 // faster since it doesn't use an intermediate texture as we do below. 1958 1959#if !GR_MAC_BUILD 1960 // At least some drivers on the Mac get confused when glTexImage2D is called 1961 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1962 // determine what OS versions and/or HW is affected. 1963 if (NULL != target->asTexture() && 1964 GrPixelConfigIsUnpremultiplied(target->config()) == 1965 GrPixelConfigIsUnpremultiplied(config)) { 1966 1967 this->internalWriteTexturePixels(target->asTexture(), 1968 left, top, width, height, 1969 config, buffer, rowBytes, flags); 1970 return; 1971 } 1972#endif 1973 1974 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1975 GrPixelConfigSwapRAndB(config); 1976 if (swapRAndB) { 1977 config = GrPixelConfigSwapRAndB(config); 1978 } 1979 1980 const GrTextureDesc desc = { 1981 kNone_GrTextureFlags, kNone_GrAALevel, width, height, config 1982 }; 1983 GrAutoScratchTexture ast(this, desc); 1984 GrTexture* texture = ast.texture(); 1985 if (NULL == texture) { 1986 return; 1987 } 1988 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1989 config, buffer, rowBytes, flags); 1990 1991 GrDrawTarget::AutoStateRestore asr(fGpu); 1992 GrDrawState* drawState = fGpu->drawState(); 1993 drawState->reset(); 1994 1995 GrMatrix matrix; 1996 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1997 drawState->setViewMatrix(matrix); 1998 drawState->setRenderTarget(target); 1999 drawState->setTexture(0, texture); 2000 2001 matrix.setIDiv(texture->width(), texture->height()); 2002 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 2003 GrSamplerState::kNearest_Filter, 2004 matrix); 2005 drawState->sampler(0)->setRAndBSwap(swapRAndB); 2006 2007 GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0); 2008 static const int VCOUNT = 4; 2009 // TODO: Use GrGpu::drawRect here 2010 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 2011 if (!geo.succeeded()) { 2012 GrPrintf("Failed to get space for vertices!\n"); 2013 return; 2014 } 2015 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 2016 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 0, VCOUNT); 2017} 2018//////////////////////////////////////////////////////////////////////////////// 2019 2020void GrContext::setPaint(const GrPaint& paint, GrDrawTarget* target) { 2021 GrDrawState* drawState = target->drawState(); 2022 2023 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 2024 int s = i + GrPaint::kFirstTextureStage; 2025 drawState->setTexture(s, paint.getTexture(i)); 2026 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 2027 if (paint.getTexture(i)) { 2028 *drawState->sampler(s) = paint.getTextureSampler(i); 2029 } 2030 } 2031 2032 drawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 2033 2034 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 2035 int s = i + GrPaint::kFirstMaskStage; 2036 drawState->setTexture(s, paint.getMask(i)); 2037 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 2038 if (paint.getMask(i)) { 2039 *drawState->sampler(s) = paint.getMaskSampler(i); 2040 } 2041 } 2042 2043 drawState->setColor(paint.fColor); 2044 2045 if (paint.fDither) { 2046 drawState->enableState(GrDrawState::kDither_StateBit); 2047 } else { 2048 drawState->disableState(GrDrawState::kDither_StateBit); 2049 } 2050 if (paint.fAntiAlias) { 2051 drawState->enableState(GrDrawState::kHWAntialias_StateBit); 2052 } else { 2053 drawState->disableState(GrDrawState::kHWAntialias_StateBit); 2054 } 2055 if (paint.fColorMatrixEnabled) { 2056 drawState->enableState(GrDrawState::kColorMatrix_StateBit); 2057 } else { 2058 drawState->disableState(GrDrawState::kColorMatrix_StateBit); 2059 } 2060 drawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 2061 drawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 2062 drawState->setColorMatrix(paint.fColorMatrix); 2063 2064 if (paint.getActiveMaskStageMask() && !target->canApplyCoverage()) { 2065 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 2066 } 2067} 2068 2069GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 2070 DrawCategory category) { 2071 if (category != fLastDrawCategory) { 2072 flushDrawBuffer(); 2073 fLastDrawCategory = category; 2074 } 2075 this->setPaint(paint, fGpu); 2076 GrDrawTarget* target = fGpu; 2077 switch (category) { 2078 case kText_DrawCategory: 2079#if DEFER_TEXT_RENDERING 2080 target = fDrawBuffer; 2081 fDrawBuffer->initializeDrawStateAndClip(*fGpu); 2082#else 2083 target = fGpu; 2084#endif 2085 break; 2086 case kUnbuffered_DrawCategory: 2087 target = fGpu; 2088 break; 2089 case kBuffered_DrawCategory: 2090 target = fDrawBuffer; 2091 fDrawBuffer->initializeDrawStateAndClip(*fGpu); 2092 break; 2093 } 2094 return target; 2095} 2096 2097GrPathRenderer* GrContext::getPathRenderer(const GrPath& path, 2098 GrPathFill fill, 2099 bool antiAlias) { 2100 if (NULL == fPathRendererChain) { 2101 fPathRendererChain = 2102 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag); 2103 } 2104 return fPathRendererChain->getPathRenderer(fGpu->getCaps(), path, 2105 fill, antiAlias); 2106} 2107 2108//////////////////////////////////////////////////////////////////////////////// 2109 2110void GrContext::setRenderTarget(GrRenderTarget* target) { 2111 ASSERT_OWNED_RESOURCE(target); 2112 this->flush(false); 2113 fGpu->drawState()->setRenderTarget(target); 2114} 2115 2116GrRenderTarget* GrContext::getRenderTarget() { 2117 return fGpu->drawState()->getRenderTarget(); 2118} 2119 2120const GrRenderTarget* GrContext::getRenderTarget() const { 2121 return fGpu->getDrawState().getRenderTarget(); 2122} 2123 2124const GrMatrix& GrContext::getMatrix() const { 2125 return fGpu->getDrawState().getViewMatrix(); 2126} 2127 2128void GrContext::setMatrix(const GrMatrix& m) { 2129 fGpu->drawState()->setViewMatrix(m); 2130} 2131 2132void GrContext::concatMatrix(const GrMatrix& m) const { 2133 fGpu->drawState()->preConcatViewMatrix(m); 2134} 2135 2136static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 2137 intptr_t mask = 1 << shift; 2138 if (pred) { 2139 bits |= mask; 2140 } else { 2141 bits &= ~mask; 2142 } 2143 return bits; 2144} 2145 2146void GrContext::resetStats() { 2147 fGpu->resetStats(); 2148} 2149 2150const GrGpuStats& GrContext::getStats() const { 2151 return fGpu->getStats(); 2152} 2153 2154void GrContext::printStats() const { 2155 fGpu->printStats(); 2156} 2157 2158GrContext::GrContext(GrGpu* gpu) { 2159 fGpu = gpu; 2160 fGpu->ref(); 2161 fGpu->setContext(this); 2162 2163 fPathRendererChain = NULL; 2164 2165 fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT, 2166 MAX_TEXTURE_CACHE_BYTES); 2167 fFontCache = new GrFontCache(fGpu); 2168 2169 fLastDrawCategory = kUnbuffered_DrawCategory; 2170 2171 fDrawBuffer = NULL; 2172 fDrawBufferVBAllocPool = NULL; 2173 fDrawBufferIBAllocPool = NULL; 2174 2175 fAAFillRectIndexBuffer = NULL; 2176 fAAStrokeRectIndexBuffer = NULL; 2177 2178 int gpuMaxOffscreen = gpu->getCaps().fMaxRenderTargetSize; 2179 if (!PREFER_MSAA_OFFSCREEN_AA || !gpu->getCaps().fFSAASupport) { 2180 gpuMaxOffscreen /= OFFSCREEN_SSAA_SCALE; 2181 } 2182 fMaxOffscreenAASize = GrMin(GR_MAX_OFFSCREEN_AA_SIZE, gpuMaxOffscreen); 2183 2184 this->setupDrawBuffer(); 2185} 2186 2187void GrContext::setupDrawBuffer() { 2188 2189 GrAssert(NULL == fDrawBuffer); 2190 GrAssert(NULL == fDrawBufferVBAllocPool); 2191 GrAssert(NULL == fDrawBufferIBAllocPool); 2192 2193#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT 2194 fDrawBufferVBAllocPool = 2195 new GrVertexBufferAllocPool(fGpu, false, 2196 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 2197 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS); 2198 fDrawBufferIBAllocPool = 2199 new GrIndexBufferAllocPool(fGpu, false, 2200 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 2201 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS); 2202 2203 fDrawBuffer = new GrInOrderDrawBuffer(fGpu, 2204 fDrawBufferVBAllocPool, 2205 fDrawBufferIBAllocPool); 2206#endif 2207 2208#if BATCH_RECT_TO_RECT 2209 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 2210#endif 2211} 2212 2213GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 2214 GrDrawTarget* target; 2215#if DEFER_TEXT_RENDERING 2216 target = prepareToDraw(paint, kText_DrawCategory); 2217#else 2218 target = prepareToDraw(paint, kUnbuffered_DrawCategory); 2219#endif 2220 this->setPaint(paint, target); 2221 return target; 2222} 2223 2224const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 2225 return fGpu->getQuadIndexBuffer(); 2226} 2227 2228void GrContext::convolveInX(GrTexture* texture, 2229 const SkRect& rect, 2230 const float* kernel, 2231 int kernelWidth) { 2232 ASSERT_OWNED_RESOURCE(texture); 2233 2234 float imageIncrement[2] = {1.0f / texture->width(), 0.0f}; 2235 convolve(texture, rect, imageIncrement, kernel, kernelWidth); 2236} 2237 2238void GrContext::convolveInY(GrTexture* texture, 2239 const SkRect& rect, 2240 const float* kernel, 2241 int kernelWidth) { 2242 ASSERT_OWNED_RESOURCE(texture); 2243 2244 float imageIncrement[2] = {0.0f, 1.0f / texture->height()}; 2245 convolve(texture, rect, imageIncrement, kernel, kernelWidth); 2246} 2247 2248void GrContext::convolve(GrTexture* texture, 2249 const SkRect& rect, 2250 float imageIncrement[2], 2251 const float* kernel, 2252 int kernelWidth) { 2253 ASSERT_OWNED_RESOURCE(texture); 2254 2255 GrDrawTarget::AutoStateRestore asr(fGpu); 2256 GrDrawState* drawState = fGpu->drawState(); 2257 GrMatrix sampleM; 2258 sampleM.setIDiv(texture->width(), texture->height()); 2259 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 2260 GrSamplerState::kConvolution_Filter, 2261 sampleM); 2262 drawState->sampler(0)->setConvolutionParams(kernelWidth, 2263 kernel, 2264 imageIncrement); 2265 2266 drawState->setViewMatrix(GrMatrix::I()); 2267 drawState->setTexture(0, texture); 2268 drawState->setAlpha(0xFF); 2269 drawState->setBlendFunc(kOne_BlendCoeff, kZero_BlendCoeff); 2270 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 2271} 2272 2273/////////////////////////////////////////////////////////////////////////////// 2274