GrContext.cpp revision 1fadb20c50c2302565f73ae12057a6f5d22192c7
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "GrBufferAllocPool.h" 13#include "GrClipIterator.h" 14#include "GrGpu.h" 15#include "GrIndexBuffer.h" 16#include "GrInOrderDrawBuffer.h" 17#include "GrPathRenderer.h" 18#include "GrPathUtils.h" 19#include "GrResourceCache.h" 20#include "GrStencilBuffer.h" 21#include "GrTextStrike.h" 22#include "SkTLazy.h" 23#include "SkTrace.h" 24 25// Using MSAA seems to be slower for some yet unknown reason. 26#define PREFER_MSAA_OFFSCREEN_AA 0 27#define OFFSCREEN_SSAA_SCALE 4 // super sample at 4x4 28 29#define DEFER_TEXT_RENDERING 1 30 31#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 32 33// When we're using coverage AA but the blend is incompatible (given gpu 34// limitations) should we disable AA or draw wrong? 35#define DISABLE_COVERAGE_AA_FOR_BLEND 1 36 37static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 38static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 39 40static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 18; 41static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 42 43// We are currently only batching Text and drawRectToRect, both 44// of which use the quad index buffer. 45static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 0; 46static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 0; 47 48#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 49 50GrContext* GrContext::Create(GrEngine engine, 51 GrPlatform3DContext context3D) { 52 GrContext* ctx = NULL; 53 GrGpu* fGpu = GrGpu::Create(engine, context3D); 54 if (NULL != fGpu) { 55 ctx = new GrContext(fGpu); 56 fGpu->unref(); 57 } 58 return ctx; 59} 60 61GrContext::~GrContext() { 62 this->flush(); 63 delete fTextureCache; 64 delete fFontCache; 65 delete fDrawBuffer; 66 delete fDrawBufferVBAllocPool; 67 delete fDrawBufferIBAllocPool; 68 69 GrSafeUnref(fAAFillRectIndexBuffer); 70 GrSafeUnref(fAAStrokeRectIndexBuffer); 71 fGpu->unref(); 72 GrSafeUnref(fPathRendererChain); 73} 74 75void GrContext::contextLost() { 76 contextDestroyed(); 77 this->setupDrawBuffer(); 78} 79 80void GrContext::contextDestroyed() { 81 // abandon first to so destructors 82 // don't try to free the resources in the API. 83 fGpu->abandonResources(); 84 85 // a path renderer may be holding onto resources that 86 // are now unusable 87 GrSafeSetNull(fPathRendererChain); 88 89 delete fDrawBuffer; 90 fDrawBuffer = NULL; 91 92 delete fDrawBufferVBAllocPool; 93 fDrawBufferVBAllocPool = NULL; 94 95 delete fDrawBufferIBAllocPool; 96 fDrawBufferIBAllocPool = NULL; 97 98 GrSafeSetNull(fAAFillRectIndexBuffer); 99 GrSafeSetNull(fAAStrokeRectIndexBuffer); 100 101 fTextureCache->removeAll(); 102 fFontCache->freeAll(); 103 fGpu->markContextDirty(); 104} 105 106void GrContext::resetContext() { 107 fGpu->markContextDirty(); 108} 109 110void GrContext::freeGpuResources() { 111 this->flush(); 112 fTextureCache->removeAll(); 113 fFontCache->freeAll(); 114 // a path renderer may be holding onto resources 115 GrSafeSetNull(fPathRendererChain); 116} 117 118//////////////////////////////////////////////////////////////////////////////// 119 120int GrContext::PaintStageVertexLayoutBits( 121 const GrPaint& paint, 122 const bool hasTexCoords[GrPaint::kTotalStages]) { 123 int stageMask = paint.getActiveStageMask(); 124 int layout = 0; 125 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 126 if ((1 << i) & stageMask) { 127 if (NULL != hasTexCoords && hasTexCoords[i]) { 128 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 129 } else { 130 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i); 131 } 132 } 133 } 134 return layout; 135} 136 137 138//////////////////////////////////////////////////////////////////////////////// 139 140enum { 141 // flags for textures 142 kNPOTBit = 0x1, 143 kFilterBit = 0x2, 144 kScratchBit = 0x4, 145 146 // resource type 147 kTextureBit = 0x8, 148 kStencilBufferBit = 0x10 149}; 150 151GrTexture* GrContext::TextureCacheEntry::texture() const { 152 if (NULL == fEntry) { 153 return NULL; 154 } else { 155 return (GrTexture*) fEntry->resource(); 156 } 157} 158 159namespace { 160// returns true if this is a "special" texture because of gpu NPOT limitations 161bool gen_texture_key_values(const GrGpu* gpu, 162 const GrSamplerState* sampler, 163 GrContext::TextureKey clientKey, 164 int width, 165 int height, 166 bool scratch, 167 uint32_t v[4]) { 168 GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t)); 169 // we assume we only need 16 bits of width and height 170 // assert that texture creation will fail anyway if this assumption 171 // would cause key collisions. 172 GrAssert(gpu->getCaps().fMaxTextureSize <= SK_MaxU16); 173 v[0] = clientKey & 0xffffffffUL; 174 v[1] = (clientKey >> 32) & 0xffffffffUL; 175 v[2] = width | (height << 16); 176 177 v[3] = 0; 178 if (!gpu->getCaps().fNPOTTextureTileSupport) { 179 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 180 181 bool tiled = NULL != sampler && 182 ((sampler->getWrapX() != GrSamplerState::kClamp_WrapMode) || 183 (sampler->getWrapY() != GrSamplerState::kClamp_WrapMode)); 184 185 if (tiled && !isPow2) { 186 v[3] |= kNPOTBit; 187 if (GrSamplerState::kNearest_Filter != sampler->getFilter()) { 188 v[3] |= kFilterBit; 189 } 190 } 191 } 192 193 if (scratch) { 194 v[3] |= kScratchBit; 195 } 196 197 v[3] |= kTextureBit; 198 199 return v[3] & kNPOTBit; 200} 201 202// we should never have more than one stencil buffer with same combo of 203// (width,height,samplecount) 204void gen_stencil_key_values(int width, int height, 205 int sampleCnt, uint32_t v[4]) { 206 v[0] = width; 207 v[1] = height; 208 v[2] = sampleCnt; 209 v[3] = kStencilBufferBit; 210} 211 212void gen_stencil_key_values(const GrStencilBuffer* sb, 213 uint32_t v[4]) { 214 gen_stencil_key_values(sb->width(), sb->height(), 215 sb->numSamples(), v); 216} 217 218// This should be subsumed by a future version of GrDrawState 219// It does not reset stage textures/samplers or per-vertex-edge-aa state since 220// they aren't used unless the vertex layout references them. 221// It also doesn't set the render target. 222void reset_draw_state(GrDrawState* drawState){ 223 224 drawState->setViewMatrix(GrMatrix::I()); 225 drawState->setColorFilter(0, SkXfermode::kDst_Mode); 226 drawState->resetStateFlags(); 227 drawState->setEdgeAAData(NULL, 0); 228 drawState->disableStencil(); 229 drawState->setAlpha(0xFF); 230 drawState->setBlendFunc(kOne_BlendCoeff, 231 kZero_BlendCoeff); 232 drawState->setFirstCoverageStage(GrDrawState::kNumStages); 233 drawState->setDrawFace(GrDrawState::kBoth_DrawFace); 234} 235} 236 237GrContext::TextureCacheEntry GrContext::findAndLockTexture( 238 TextureKey key, 239 int width, 240 int height, 241 const GrSamplerState* sampler) { 242 uint32_t v[4]; 243 gen_texture_key_values(fGpu, sampler, key, width, height, false, v); 244 GrResourceKey resourceKey(v); 245 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 246 GrResourceCache::kNested_LockType)); 247} 248 249bool GrContext::isTextureInCache(TextureKey key, 250 int width, 251 int height, 252 const GrSamplerState* sampler) const { 253 uint32_t v[4]; 254 gen_texture_key_values(fGpu, sampler, key, width, height, false, v); 255 GrResourceKey resourceKey(v); 256 return fTextureCache->hasKey(resourceKey); 257} 258 259GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 260 ASSERT_OWNED_RESOURCE(sb); 261 uint32_t v[4]; 262 gen_stencil_key_values(sb, v); 263 GrResourceKey resourceKey(v); 264 return fTextureCache->createAndLock(resourceKey, sb); 265} 266 267GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 268 int sampleCnt) { 269 uint32_t v[4]; 270 gen_stencil_key_values(width, height, sampleCnt, v); 271 GrResourceKey resourceKey(v); 272 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 273 GrResourceCache::kSingle_LockType); 274 if (NULL != entry) { 275 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 276 return sb; 277 } else { 278 return NULL; 279 } 280} 281 282void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 283 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 284 fTextureCache->unlock(sbEntry); 285} 286 287static void stretchImage(void* dst, 288 int dstW, 289 int dstH, 290 void* src, 291 int srcW, 292 int srcH, 293 int bpp) { 294 GrFixed dx = (srcW << 16) / dstW; 295 GrFixed dy = (srcH << 16) / dstH; 296 297 GrFixed y = dy >> 1; 298 299 int dstXLimit = dstW*bpp; 300 for (int j = 0; j < dstH; ++j) { 301 GrFixed x = dx >> 1; 302 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 303 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 304 for (int i = 0; i < dstXLimit; i += bpp) { 305 memcpy((uint8_t*) dstRow + i, 306 (uint8_t*) srcRow + (x>>16)*bpp, 307 bpp); 308 x += dx; 309 } 310 y += dy; 311 } 312} 313 314GrContext::TextureCacheEntry GrContext::createAndLockTexture( 315 TextureKey key, 316 const GrSamplerState* sampler, 317 const GrTextureDesc& desc, 318 void* srcData, 319 size_t rowBytes) { 320 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 321 322#if GR_DUMP_TEXTURE_UPLOAD 323 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 324#endif 325 326 TextureCacheEntry entry; 327 uint32_t v[4]; 328 bool special = gen_texture_key_values(fGpu, sampler, key, 329 desc.fWidth, desc.fHeight, false, v); 330 GrResourceKey resourceKey(v); 331 332 if (special) { 333 GrAssert(NULL != sampler); 334 TextureCacheEntry clampEntry = this->findAndLockTexture(key, 335 desc.fWidth, 336 desc.fHeight, 337 NULL); 338 339 if (NULL == clampEntry.texture()) { 340 clampEntry = this->createAndLockTexture(key, NULL, desc, 341 srcData, rowBytes); 342 GrAssert(NULL != clampEntry.texture()); 343 if (NULL == clampEntry.texture()) { 344 return entry; 345 } 346 } 347 GrTextureDesc rtDesc = desc; 348 rtDesc.fFlags = rtDesc.fFlags | 349 kRenderTarget_GrTextureFlagBit | 350 kNoStencil_GrTextureFlagBit; 351 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 352 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 353 354 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 355 356 if (NULL != texture) { 357 GrDrawTarget::AutoStateRestore asr(fGpu); 358 GrDrawState* drawState = fGpu->drawState(); 359 reset_draw_state(drawState); 360 drawState->setRenderTarget(texture->asRenderTarget()); 361 drawState->setTexture(0, clampEntry.texture()); 362 363 GrSamplerState::Filter filter; 364 // if filtering is not desired then we want to ensure all 365 // texels in the resampled image are copies of texels from 366 // the original. 367 if (GrSamplerState::kNearest_Filter == sampler->getFilter()) { 368 filter = GrSamplerState::kNearest_Filter; 369 } else { 370 filter = GrSamplerState::kBilinear_Filter; 371 } 372 GrSamplerState stretchSampler(GrSamplerState::kClamp_WrapMode, 373 filter); 374 drawState->setSampler(0, stretchSampler); 375 376 static const GrVertexLayout layout = 377 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 378 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 379 380 if (arg.succeeded()) { 381 GrPoint* verts = (GrPoint*) arg.vertices(); 382 verts[0].setIRectFan(0, 0, 383 texture->width(), 384 texture->height(), 385 2*sizeof(GrPoint)); 386 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 387 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 388 0, 4); 389 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 390 } 391 texture->releaseRenderTarget(); 392 } else { 393 // TODO: Our CPU stretch doesn't filter. But we create separate 394 // stretched textures when the sampler state is either filtered or 395 // not. Either implement filtered stretch blit on CPU or just create 396 // one when FBO case fails. 397 398 rtDesc.fFlags = kNone_GrTextureFlags; 399 // no longer need to clamp at min RT size. 400 rtDesc.fWidth = GrNextPow2(desc.fWidth); 401 rtDesc.fHeight = GrNextPow2(desc.fHeight); 402 int bpp = GrBytesPerPixel(desc.fConfig); 403 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 404 rtDesc.fWidth * 405 rtDesc.fHeight); 406 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 407 srcData, desc.fWidth, desc.fHeight, bpp); 408 409 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 410 411 GrTexture* texture = fGpu->createTexture(rtDesc, 412 stretchedPixels.get(), 413 stretchedRowBytes); 414 GrAssert(NULL != texture); 415 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 416 } 417 fTextureCache->unlock(clampEntry.cacheEntry()); 418 419 } else { 420 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 421 if (NULL != texture) { 422 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 423 } 424 } 425 return entry; 426} 427 428namespace { 429inline void gen_scratch_tex_key_values(const GrGpu* gpu, 430 const GrTextureDesc& desc, 431 uint32_t v[4]) { 432 // Instead of a client-provided key of the texture contents 433 // we create a key of from the descriptor. 434 GrContext::TextureKey descKey = desc.fAALevel | 435 (desc.fFlags << 8) | 436 ((uint64_t) desc.fConfig << 32); 437 // this code path isn't friendly to tiling with NPOT restricitons 438 // We just pass ClampNoFilter() 439 gen_texture_key_values(gpu, NULL, descKey, desc.fWidth, 440 desc.fHeight, true, v); 441} 442} 443 444GrContext::TextureCacheEntry GrContext::lockScratchTexture( 445 const GrTextureDesc& inDesc, 446 ScratchTexMatch match) { 447 448 GrTextureDesc desc = inDesc; 449 if (kExact_ScratchTexMatch != match) { 450 // bin by pow2 with a reasonable min 451 static const int MIN_SIZE = 256; 452 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 453 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 454 } 455 456 uint32_t p0 = desc.fConfig; 457 uint32_t p1 = (desc.fAALevel << 16) | desc.fFlags; 458 459 GrResourceEntry* entry; 460 int origWidth = desc.fWidth; 461 int origHeight = desc.fHeight; 462 bool doubledW = false; 463 bool doubledH = false; 464 465 do { 466 uint32_t v[4]; 467 gen_scratch_tex_key_values(fGpu, desc, v); 468 GrResourceKey key(v); 469 entry = fTextureCache->findAndLock(key, 470 GrResourceCache::kNested_LockType); 471 // if we miss, relax the fit of the flags... 472 // then try doubling width... then height. 473 if (NULL != entry || kExact_ScratchTexMatch == match) { 474 break; 475 } 476 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 477 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 478 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 479 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 480 } else if (!doubledW) { 481 desc.fFlags = inDesc.fFlags; 482 desc.fWidth *= 2; 483 doubledW = true; 484 } else if (!doubledH) { 485 desc.fFlags = inDesc.fFlags; 486 desc.fWidth = origWidth; 487 desc.fHeight *= 2; 488 doubledH = true; 489 } else { 490 break; 491 } 492 493 } while (true); 494 495 if (NULL == entry) { 496 desc.fFlags = inDesc.fFlags; 497 desc.fWidth = origWidth; 498 desc.fHeight = origHeight; 499 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 500 if (NULL != texture) { 501 uint32_t v[4]; 502 gen_scratch_tex_key_values(fGpu, desc, v); 503 GrResourceKey key(v); 504 entry = fTextureCache->createAndLock(key, texture); 505 } 506 } 507 508 // If the caller gives us the same desc/sampler twice we don't want 509 // to return the same texture the second time (unless it was previously 510 // released). So we detach the entry from the cache and reattach at release. 511 if (NULL != entry) { 512 fTextureCache->detach(entry); 513 } 514 return TextureCacheEntry(entry); 515} 516 517void GrContext::unlockTexture(TextureCacheEntry entry) { 518 ASSERT_OWNED_RESOURCE(entry.texture()); 519 // If this is a scratch texture we detached it from the cache 520 // while it was locked (to avoid two callers simultaneously getting 521 // the same texture). 522 if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) { 523 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 524 } else { 525 fTextureCache->unlock(entry.cacheEntry()); 526 } 527} 528 529GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc, 530 void* srcData, 531 size_t rowBytes) { 532 return fGpu->createTexture(desc, srcData, rowBytes); 533} 534 535void GrContext::getTextureCacheLimits(int* maxTextures, 536 size_t* maxTextureBytes) const { 537 fTextureCache->getLimits(maxTextures, maxTextureBytes); 538} 539 540void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 541 fTextureCache->setLimits(maxTextures, maxTextureBytes); 542} 543 544int GrContext::getMaxTextureSize() const { 545 return fGpu->getCaps().fMaxTextureSize; 546} 547 548int GrContext::getMaxRenderTargetSize() const { 549 return fGpu->getCaps().fMaxRenderTargetSize; 550} 551 552/////////////////////////////////////////////////////////////////////////////// 553 554GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 555 return fGpu->createPlatformTexture(desc); 556} 557 558GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 559 return fGpu->createPlatformRenderTarget(desc); 560} 561 562GrResource* GrContext::createPlatformSurface(const GrPlatformSurfaceDesc& desc) { 563 // validate flags here so that GrGpu subclasses don't have to check 564 if (kTexture_GrPlatformSurfaceType == desc.fSurfaceType && 565 0 != desc.fRenderTargetFlags) { 566 return NULL; 567 } 568 if (desc.fSampleCnt && 569 (kGrCanResolve_GrPlatformRenderTargetFlagBit & desc.fRenderTargetFlags)) { 570 return NULL; 571 } 572 if (kTextureRenderTarget_GrPlatformSurfaceType == desc.fSurfaceType && 573 desc.fSampleCnt && 574 !(kGrCanResolve_GrPlatformRenderTargetFlagBit & desc.fRenderTargetFlags)) { 575 return NULL; 576 } 577 return fGpu->createPlatformSurface(desc); 578} 579 580/////////////////////////////////////////////////////////////////////////////// 581 582bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 583 int width, int height) const { 584 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 585 if (!caps.f8BitPaletteSupport) { 586 return false; 587 } 588 589 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 590 591 if (!isPow2) { 592 bool tiled = NULL != sampler && 593 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 594 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 595 if (tiled && !caps.fNPOTTextureTileSupport) { 596 return false; 597 } 598 } 599 return true; 600} 601 602//////////////////////////////////////////////////////////////////////////////// 603 604const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 605 606void GrContext::setClip(const GrClip& clip) { 607 fGpu->setClip(clip); 608 fGpu->drawState()->enableState(GrDrawState::kClip_StateBit); 609} 610 611void GrContext::setClip(const GrIRect& rect) { 612 GrClip clip; 613 clip.setFromIRect(rect); 614 fGpu->setClip(clip); 615} 616 617//////////////////////////////////////////////////////////////////////////////// 618 619void GrContext::clear(const GrIRect* rect, const GrColor color) { 620 this->flush(); 621 fGpu->clear(rect, color); 622} 623 624void GrContext::drawPaint(const GrPaint& paint) { 625 // set rect to be big enough to fill the space, but not super-huge, so we 626 // don't overflow fixed-point implementations 627 GrRect r; 628 r.setLTRB(0, 0, 629 GrIntToScalar(getRenderTarget()->width()), 630 GrIntToScalar(getRenderTarget()->height())); 631 GrMatrix inverse; 632 SkTLazy<GrPaint> tmpPaint; 633 const GrPaint* p = &paint; 634 GrDrawState* drawState = fGpu->drawState(); 635 GrAutoMatrix am; 636 637 // We attempt to map r by the inverse matrix and draw that. mapRect will 638 // map the four corners and bound them with a new rect. This will not 639 // produce a correct result for some perspective matrices. 640 if (!this->getMatrix().hasPerspective()) { 641 if (!drawState->getViewInverse(&inverse)) { 642 GrPrintf("Could not invert matrix"); 643 return; 644 } 645 inverse.mapRect(&r); 646 } else { 647 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 648 if (!drawState->getViewInverse(&inverse)) { 649 GrPrintf("Could not invert matrix"); 650 return; 651 } 652 tmpPaint.set(paint); 653 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 654 p = tmpPaint.get(); 655 } 656 am.set(this, GrMatrix::I()); 657 } 658 // by definition this fills the entire clip, no need for AA 659 if (paint.fAntiAlias) { 660 if (!tmpPaint.isValid()) { 661 tmpPaint.set(paint); 662 p = tmpPaint.get(); 663 } 664 GrAssert(p == tmpPaint.get()); 665 tmpPaint.get()->fAntiAlias = false; 666 } 667 this->drawRect(*p, r); 668} 669 670//////////////////////////////////////////////////////////////////////////////// 671 672namespace { 673inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 674 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 675} 676} 677 678struct GrContext::OffscreenRecord { 679 enum Downsample { 680 k4x4TwoPass_Downsample, 681 k4x4SinglePass_Downsample, 682 kFSAA_Downsample 683 } fDownsample; 684 int fTileSizeX; 685 int fTileSizeY; 686 int fTileCountX; 687 int fTileCountY; 688 int fScale; 689 GrAutoScratchTexture fOffscreen0; 690 GrAutoScratchTexture fOffscreen1; 691 GrDrawTarget::SavedDrawState fSavedState; 692 GrClip fClip; 693}; 694 695bool GrContext::doOffscreenAA(GrDrawTarget* target, 696 bool isHairLines) const { 697#if !GR_USE_OFFSCREEN_AA 698 return false; 699#else 700 // Line primitves are always rasterized as 1 pixel wide. 701 // Super-sampling would make them too thin but MSAA would be OK. 702 if (isHairLines && 703 (!PREFER_MSAA_OFFSCREEN_AA || !fGpu->getCaps().fFSAASupport)) { 704 return false; 705 } 706 if (target->getDrawState().getRenderTarget()->isMultisampled()) { 707 return false; 708 } 709 if (disable_coverage_aa_for_blend(target)) { 710#if GR_DEBUG 711 //GrPrintf("Turning off AA to correctly apply blend.\n"); 712#endif 713 return false; 714 } 715 return true; 716#endif 717} 718 719bool GrContext::prepareForOffscreenAA(GrDrawTarget* target, 720 bool requireStencil, 721 const GrIRect& boundRect, 722 GrPathRenderer* pr, 723 OffscreenRecord* record) { 724 725 GrAssert(GR_USE_OFFSCREEN_AA); 726 727 GrAssert(NULL == record->fOffscreen0.texture()); 728 GrAssert(NULL == record->fOffscreen1.texture()); 729 GrAssert(!boundRect.isEmpty()); 730 731 int boundW = boundRect.width(); 732 int boundH = boundRect.height(); 733 734 GrTextureDesc desc; 735 736 desc.fWidth = GrMin(fMaxOffscreenAASize, boundW); 737 desc.fHeight = GrMin(fMaxOffscreenAASize, boundH); 738 739 if (requireStencil) { 740 desc.fFlags = kRenderTarget_GrTextureFlagBit; 741 } else { 742 desc.fFlags = kRenderTarget_GrTextureFlagBit | 743 kNoStencil_GrTextureFlagBit; 744 } 745 746 desc.fConfig = kRGBA_8888_PM_GrPixelConfig; 747 748 if (PREFER_MSAA_OFFSCREEN_AA && fGpu->getCaps().fFSAASupport) { 749 record->fDownsample = OffscreenRecord::kFSAA_Downsample; 750 record->fScale = 1; 751 desc.fAALevel = kMed_GrAALevel; 752 } else { 753 record->fDownsample = fGpu->getCaps().fShaderSupport ? 754 OffscreenRecord::k4x4SinglePass_Downsample : 755 OffscreenRecord::k4x4TwoPass_Downsample; 756 record->fScale = OFFSCREEN_SSAA_SCALE; 757 // both downsample paths assume this 758 GR_STATIC_ASSERT(4 == OFFSCREEN_SSAA_SCALE); 759 desc.fAALevel = kNone_GrAALevel; 760 } 761 762 desc.fWidth *= record->fScale; 763 desc.fHeight *= record->fScale; 764 record->fOffscreen0.set(this, desc); 765 if (NULL == record->fOffscreen0.texture()) { 766 return false; 767 } 768 // the approximate lookup might have given us some slop space, might as well 769 // use it when computing the tiles size. 770 // these are scale values, will adjust after considering 771 // the possible second offscreen. 772 record->fTileSizeX = record->fOffscreen0.texture()->width(); 773 record->fTileSizeY = record->fOffscreen0.texture()->height(); 774 775 if (OffscreenRecord::k4x4TwoPass_Downsample == record->fDownsample) { 776 desc.fWidth /= 2; 777 desc.fHeight /= 2; 778 record->fOffscreen1.set(this, desc); 779 if (NULL == record->fOffscreen1.texture()) { 780 return false; 781 } 782 record->fTileSizeX = GrMin(record->fTileSizeX, 783 2 * record->fOffscreen0.texture()->width()); 784 record->fTileSizeY = GrMin(record->fTileSizeY, 785 2 * record->fOffscreen0.texture()->height()); 786 } 787 record->fTileSizeX /= record->fScale; 788 record->fTileSizeY /= record->fScale; 789 790 record->fTileCountX = GrIDivRoundUp(boundW, record->fTileSizeX); 791 record->fTileCountY = GrIDivRoundUp(boundH, record->fTileSizeY); 792 793 record->fClip = target->getClip(); 794 795 target->saveCurrentDrawState(&record->fSavedState); 796 return true; 797} 798 799void GrContext::setupOffscreenAAPass1(GrDrawTarget* target, 800 const GrIRect& boundRect, 801 int tileX, int tileY, 802 OffscreenRecord* record) { 803 804 GrRenderTarget* offRT0 = record->fOffscreen0.texture()->asRenderTarget(); 805 GrAssert(NULL != offRT0); 806 807 GrPaint tempPaint; 808 tempPaint.reset(); 809 this->setPaint(tempPaint, target); 810 GrDrawState* drawState = target->drawState(); 811 drawState->setRenderTarget(offRT0); 812#if PREFER_MSAA_OFFSCREEN_AA 813 target->enableState(GrDrawState::kHWAntialias_StateBit); 814#endif 815 816 GrMatrix transM; 817 int left = boundRect.fLeft + tileX * record->fTileSizeX; 818 int top = boundRect.fTop + tileY * record->fTileSizeY; 819 transM.setTranslate(-left * GR_Scalar1, -top * GR_Scalar1); 820 drawState->viewMatrix()->postConcat(transM); 821 GrMatrix scaleM; 822 scaleM.setScale(record->fScale * GR_Scalar1, record->fScale * GR_Scalar1); 823 drawState->viewMatrix()->postConcat(scaleM); 824 825 int w = (tileX == record->fTileCountX-1) ? boundRect.fRight - left : 826 record->fTileSizeX; 827 int h = (tileY == record->fTileCountY-1) ? boundRect.fBottom - top : 828 record->fTileSizeY; 829 GrIRect clear = SkIRect::MakeWH(record->fScale * w, 830 record->fScale * h); 831 target->setClip(GrClip(clear)); 832#if 0 833 // visualize tile boundaries by setting edges of offscreen to white 834 // and interior to tranparent. black. 835 target->clear(&clear, 0xffffffff); 836 837 static const int gOffset = 2; 838 GrIRect clear2 = SkIRect::MakeLTRB(gOffset, gOffset, 839 record->fScale * w - gOffset, 840 record->fScale * h - gOffset); 841 target->clear(&clear2, 0x0); 842#else 843 target->clear(&clear, 0x0); 844#endif 845} 846 847void GrContext::doOffscreenAAPass2(GrDrawTarget* target, 848 const GrPaint& paint, 849 const GrIRect& boundRect, 850 int tileX, int tileY, 851 OffscreenRecord* record) { 852 SK_TRACE_EVENT0("GrContext::doOffscreenAAPass2"); 853 GrAssert(NULL != record->fOffscreen0.texture()); 854 GrDrawTarget::AutoGeometryPush agp(target); 855 GrIRect tileRect; 856 tileRect.fLeft = boundRect.fLeft + tileX * record->fTileSizeX; 857 tileRect.fTop = boundRect.fTop + tileY * record->fTileSizeY, 858 tileRect.fRight = (tileX == record->fTileCountX-1) ? 859 boundRect.fRight : 860 tileRect.fLeft + record->fTileSizeX; 861 tileRect.fBottom = (tileY == record->fTileCountY-1) ? 862 boundRect.fBottom : 863 tileRect.fTop + record->fTileSizeY; 864 865 GrSamplerState::Filter filter; 866 if (OffscreenRecord::k4x4SinglePass_Downsample == record->fDownsample) { 867 filter = GrSamplerState::k4x4Downsample_Filter; 868 } else { 869 filter = GrSamplerState::kBilinear_Filter; 870 } 871 872 GrMatrix sampleM; 873 GrSamplerState sampler(GrSamplerState::kClamp_WrapMode, filter); 874 875 GrTexture* src = record->fOffscreen0.texture(); 876 int scale; 877 878 enum { 879 kOffscreenStage = GrPaint::kTotalStages, 880 }; 881 882 GrDrawState* drawState = target->drawState(); 883 884 if (OffscreenRecord::k4x4TwoPass_Downsample == record->fDownsample) { 885 GrAssert(NULL != record->fOffscreen1.texture()); 886 scale = 2; 887 GrRenderTarget* dst = record->fOffscreen1.texture()->asRenderTarget(); 888 889 // Do 2x2 downsample from first to second 890 drawState->setTexture(kOffscreenStage, src); 891 drawState->setRenderTarget(dst); 892 drawState->setViewMatrix(GrMatrix::I()); 893 sampleM.setScale(scale * GR_Scalar1 / src->width(), 894 scale * GR_Scalar1 / src->height()); 895 sampler.setMatrix(sampleM); 896 drawState->setSampler(kOffscreenStage, sampler); 897 GrRect rect = SkRect::MakeWH(SkIntToScalar(scale * tileRect.width()), 898 SkIntToScalar(scale * tileRect.height())); 899 target->drawSimpleRect(rect, NULL, 1 << kOffscreenStage); 900 901 src = record->fOffscreen1.texture(); 902 } else if (OffscreenRecord::kFSAA_Downsample == record->fDownsample) { 903 scale = 1; 904 GrIRect rect = SkIRect::MakeWH(tileRect.width(), tileRect.height()); 905 src->asRenderTarget()->overrideResolveRect(rect); 906 } else { 907 GrAssert(OffscreenRecord::k4x4SinglePass_Downsample == 908 record->fDownsample); 909 scale = 4; 910 } 911 912 // setup for draw back to main RT, we use the original 913 // draw state setup by the caller plus an additional coverage 914 // stage to handle the AA resolve. Also, we use an identity 915 // view matrix and so pre-concat sampler matrices with view inv. 916 int stageMask = paint.getActiveStageMask(); 917 918 target->restoreDrawState(record->fSavedState); 919 target->setClip(record->fClip); 920 921 if (stageMask) { 922 GrMatrix invVM; 923 if (drawState->getViewInverse(&invVM)) { 924 drawState->preConcatSamplerMatrices(stageMask, invVM); 925 } 926 } 927 // This is important when tiling, otherwise second tile's 928 // pass 1 view matrix will be incorrect. 929 GrDrawState::AutoViewMatrixRestore avmr(drawState, GrMatrix::I()); 930 931 drawState->setTexture(kOffscreenStage, src); 932 sampleM.setScale(scale * GR_Scalar1 / src->width(), 933 scale * GR_Scalar1 / src->height()); 934 sampler.setMatrix(sampleM); 935 sampleM.setTranslate(SkIntToScalar(-tileRect.fLeft), 936 SkIntToScalar(-tileRect.fTop)); 937 sampler.preConcatMatrix(sampleM); 938 drawState->setSampler(kOffscreenStage, sampler); 939 940 GrRect dstRect; 941 int stages = (1 << kOffscreenStage) | stageMask; 942 dstRect.set(tileRect); 943 target->drawSimpleRect(dstRect, NULL, stages); 944} 945 946void GrContext::cleanupOffscreenAA(GrDrawTarget* target, 947 GrPathRenderer* pr, 948 OffscreenRecord* record) { 949 target->restoreDrawState(record->fSavedState); 950} 951 952//////////////////////////////////////////////////////////////////////////////// 953 954/* create a triangle strip that strokes the specified triangle. There are 8 955 unique vertices, but we repreat the last 2 to close up. Alternatively we 956 could use an indices array, and then only send 8 verts, but not sure that 957 would be faster. 958 */ 959static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 960 GrScalar width) { 961 const GrScalar rad = GrScalarHalf(width); 962 rect.sort(); 963 964 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 965 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 966 verts[2].set(rect.fRight - rad, rect.fTop + rad); 967 verts[3].set(rect.fRight + rad, rect.fTop - rad); 968 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 969 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 970 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 971 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 972 verts[8] = verts[0]; 973 verts[9] = verts[1]; 974} 975 976static void setInsetFan(GrPoint* pts, size_t stride, 977 const GrRect& r, GrScalar dx, GrScalar dy) { 978 pts->setRectFan(r.fLeft + dx, r.fTop + dy, r.fRight - dx, r.fBottom - dy, stride); 979} 980 981static const uint16_t gFillAARectIdx[] = { 982 0, 1, 5, 5, 4, 0, 983 1, 2, 6, 6, 5, 1, 984 2, 3, 7, 7, 6, 2, 985 3, 0, 4, 4, 7, 3, 986 4, 5, 6, 6, 7, 4, 987}; 988 989int GrContext::aaFillRectIndexCount() const { 990 return GR_ARRAY_COUNT(gFillAARectIdx); 991} 992 993GrIndexBuffer* GrContext::aaFillRectIndexBuffer() { 994 if (NULL == fAAFillRectIndexBuffer) { 995 fAAFillRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gFillAARectIdx), 996 false); 997 if (NULL != fAAFillRectIndexBuffer) { 998 #if GR_DEBUG 999 bool updated = 1000 #endif 1001 fAAFillRectIndexBuffer->updateData(gFillAARectIdx, 1002 sizeof(gFillAARectIdx)); 1003 GR_DEBUGASSERT(updated); 1004 } 1005 } 1006 return fAAFillRectIndexBuffer; 1007} 1008 1009static const uint16_t gStrokeAARectIdx[] = { 1010 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0, 1011 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0, 1012 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0, 1013 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0, 1014 1015 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4, 1016 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4, 1017 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4, 1018 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4, 1019 1020 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8, 1021 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8, 1022 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8, 1023 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8, 1024}; 1025 1026int GrContext::aaStrokeRectIndexCount() const { 1027 return GR_ARRAY_COUNT(gStrokeAARectIdx); 1028} 1029 1030GrIndexBuffer* GrContext::aaStrokeRectIndexBuffer() { 1031 if (NULL == fAAStrokeRectIndexBuffer) { 1032 fAAStrokeRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gStrokeAARectIdx), 1033 false); 1034 if (NULL != fAAStrokeRectIndexBuffer) { 1035 #if GR_DEBUG 1036 bool updated = 1037 #endif 1038 fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx, 1039 sizeof(gStrokeAARectIdx)); 1040 GR_DEBUGASSERT(updated); 1041 } 1042 } 1043 return fAAStrokeRectIndexBuffer; 1044} 1045 1046static GrVertexLayout aa_rect_layout(const GrDrawTarget* target, 1047 bool useCoverage) { 1048 GrVertexLayout layout = 0; 1049 for (int s = 0; s < GrDrawState::kNumStages; ++s) { 1050 if (NULL != target->getDrawState().getTexture(s)) { 1051 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s); 1052 } 1053 } 1054 if (useCoverage) { 1055 layout |= GrDrawTarget::kCoverage_VertexLayoutBit; 1056 } else { 1057 layout |= GrDrawTarget::kColor_VertexLayoutBit; 1058 } 1059 return layout; 1060} 1061 1062void GrContext::fillAARect(GrDrawTarget* target, 1063 const GrRect& devRect, 1064 bool useVertexCoverage) { 1065 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 1066 1067 size_t vsize = GrDrawTarget::VertexSize(layout); 1068 1069 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 8, 0); 1070 if (!geo.succeeded()) { 1071 GrPrintf("Failed to get space for vertices!\n"); 1072 return; 1073 } 1074 GrIndexBuffer* indexBuffer = this->aaFillRectIndexBuffer(); 1075 if (NULL == indexBuffer) { 1076 GrPrintf("Failed to create index buffer!\n"); 1077 return; 1078 } 1079 1080 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 1081 1082 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 1083 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 1084 1085 setInsetFan(fan0Pos, vsize, devRect, -GR_ScalarHalf, -GR_ScalarHalf); 1086 setInsetFan(fan1Pos, vsize, devRect, GR_ScalarHalf, GR_ScalarHalf); 1087 1088 verts += sizeof(GrPoint); 1089 for (int i = 0; i < 4; ++i) { 1090 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 1091 } 1092 1093 GrColor innerColor; 1094 if (useVertexCoverage) { 1095 innerColor = 0xffffffff; 1096 } else { 1097 innerColor = target->getDrawState().getColor(); 1098 } 1099 1100 verts += 4 * vsize; 1101 for (int i = 0; i < 4; ++i) { 1102 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 1103 } 1104 1105 target->setIndexSourceToBuffer(indexBuffer); 1106 1107 target->drawIndexed(kTriangles_PrimitiveType, 0, 1108 0, 8, this->aaFillRectIndexCount()); 1109} 1110 1111void GrContext::strokeAARect(GrDrawTarget* target, 1112 const GrRect& devRect, 1113 const GrVec& devStrokeSize, 1114 bool useVertexCoverage) { 1115 const GrScalar& dx = devStrokeSize.fX; 1116 const GrScalar& dy = devStrokeSize.fY; 1117 const GrScalar rx = GrMul(dx, GR_ScalarHalf); 1118 const GrScalar ry = GrMul(dy, GR_ScalarHalf); 1119 1120 GrScalar spare; 1121 { 1122 GrScalar w = devRect.width() - dx; 1123 GrScalar h = devRect.height() - dy; 1124 spare = GrMin(w, h); 1125 } 1126 1127 if (spare <= 0) { 1128 GrRect r(devRect); 1129 r.inset(-rx, -ry); 1130 fillAARect(target, r, useVertexCoverage); 1131 return; 1132 } 1133 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 1134 size_t vsize = GrDrawTarget::VertexSize(layout); 1135 1136 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 16, 0); 1137 if (!geo.succeeded()) { 1138 GrPrintf("Failed to get space for vertices!\n"); 1139 return; 1140 } 1141 GrIndexBuffer* indexBuffer = this->aaStrokeRectIndexBuffer(); 1142 if (NULL == indexBuffer) { 1143 GrPrintf("Failed to create index buffer!\n"); 1144 return; 1145 } 1146 1147 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 1148 1149 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 1150 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 1151 GrPoint* fan2Pos = reinterpret_cast<GrPoint*>(verts + 8 * vsize); 1152 GrPoint* fan3Pos = reinterpret_cast<GrPoint*>(verts + 12 * vsize); 1153 1154 setInsetFan(fan0Pos, vsize, devRect, -rx - GR_ScalarHalf, -ry - GR_ScalarHalf); 1155 setInsetFan(fan1Pos, vsize, devRect, -rx + GR_ScalarHalf, -ry + GR_ScalarHalf); 1156 setInsetFan(fan2Pos, vsize, devRect, rx - GR_ScalarHalf, ry - GR_ScalarHalf); 1157 setInsetFan(fan3Pos, vsize, devRect, rx + GR_ScalarHalf, ry + GR_ScalarHalf); 1158 1159 verts += sizeof(GrPoint); 1160 for (int i = 0; i < 4; ++i) { 1161 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 1162 } 1163 1164 GrColor innerColor; 1165 if (useVertexCoverage) { 1166 innerColor = 0xffffffff; 1167 } else { 1168 innerColor = target->getDrawState().getColor(); 1169 } 1170 verts += 4 * vsize; 1171 for (int i = 0; i < 8; ++i) { 1172 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 1173 } 1174 1175 verts += 8 * vsize; 1176 for (int i = 0; i < 8; ++i) { 1177 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 1178 } 1179 1180 target->setIndexSourceToBuffer(indexBuffer); 1181 target->drawIndexed(kTriangles_PrimitiveType, 1182 0, 0, 16, aaStrokeRectIndexCount()); 1183} 1184 1185/** 1186 * Returns true if the rects edges are integer-aligned. 1187 */ 1188static bool isIRect(const GrRect& r) { 1189 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 1190 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 1191} 1192 1193static bool apply_aa_to_rect(GrDrawTarget* target, 1194 const GrRect& rect, 1195 GrScalar width, 1196 const GrMatrix* matrix, 1197 GrMatrix* combinedMatrix, 1198 GrRect* devRect, 1199 bool* useVertexCoverage) { 1200 // we use a simple alpha ramp to do aa on axis-aligned rects 1201 // do AA with alpha ramp if the caller requested AA, the rect 1202 // will be axis-aligned, and the rect won't land on integer coords. 1203 1204 // we are keeping around the "tweak the alpha" trick because 1205 // it is our only hope for the fixed-pipe implementation. 1206 // In a shader implementation we can give a separate coverage input 1207 // TODO: remove this ugliness when we drop the fixed-pipe impl 1208 *useVertexCoverage = false; 1209 if (!target->canTweakAlphaForCoverage()) { 1210 if (target->getCaps().fSupportPerVertexCoverage) { 1211 if (disable_coverage_aa_for_blend(target)) { 1212#if GR_DEBUG 1213 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1214#endif 1215 return false; 1216 } else { 1217 *useVertexCoverage = true; 1218 } 1219 } else { 1220 GrPrintf("Rect AA dropped because no support for coverage.\n"); 1221 return false; 1222 } 1223 } 1224 const GrDrawState& drawState = target->getDrawState(); 1225 if (drawState.getRenderTarget()->isMultisampled()) { 1226 return false; 1227 } 1228 1229 if (0 == width && target->willUseHWAALines()) { 1230 return false; 1231 } 1232 1233 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1234 return false; 1235 } 1236 1237 if (NULL != matrix && 1238 !matrix->preservesAxisAlignment()) { 1239 return false; 1240 } 1241 1242 *combinedMatrix = drawState.getViewMatrix(); 1243 if (NULL != matrix) { 1244 combinedMatrix->preConcat(*matrix); 1245 GrAssert(combinedMatrix->preservesAxisAlignment()); 1246 } 1247 1248 combinedMatrix->mapRect(devRect, rect); 1249 devRect->sort(); 1250 1251 if (width < 0) { 1252 return !isIRect(*devRect); 1253 } else { 1254 return true; 1255 } 1256} 1257 1258void GrContext::drawRect(const GrPaint& paint, 1259 const GrRect& rect, 1260 GrScalar width, 1261 const GrMatrix* matrix) { 1262 SK_TRACE_EVENT0("GrContext::drawRect"); 1263 1264 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1265 int stageMask = paint.getActiveStageMask(); 1266 1267 GrRect devRect = rect; 1268 GrMatrix combinedMatrix; 1269 bool useVertexCoverage; 1270 bool needAA = paint.fAntiAlias && 1271 !this->getRenderTarget()->isMultisampled(); 1272 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 1273 &combinedMatrix, &devRect, 1274 &useVertexCoverage); 1275 1276 if (doAA) { 1277 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1278 if (width >= 0) { 1279 GrVec strokeSize;; 1280 if (width > 0) { 1281 strokeSize.set(width, width); 1282 combinedMatrix.mapVectors(&strokeSize, 1); 1283 strokeSize.setAbs(strokeSize); 1284 } else { 1285 strokeSize.set(GR_Scalar1, GR_Scalar1); 1286 } 1287 strokeAARect(target, devRect, strokeSize, useVertexCoverage); 1288 } else { 1289 fillAARect(target, devRect, useVertexCoverage); 1290 } 1291 return; 1292 } 1293 1294 if (width >= 0) { 1295 // TODO: consider making static vertex buffers for these cases. 1296 // Hairline could be done by just adding closing vertex to 1297 // unitSquareVertexBuffer() 1298 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1299 1300 static const int worstCaseVertCount = 10; 1301 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 1302 1303 if (!geo.succeeded()) { 1304 GrPrintf("Failed to get space for vertices!\n"); 1305 return; 1306 } 1307 1308 GrPrimitiveType primType; 1309 int vertCount; 1310 GrPoint* vertex = geo.positions(); 1311 1312 if (width > 0) { 1313 vertCount = 10; 1314 primType = kTriangleStrip_PrimitiveType; 1315 setStrokeRectStrip(vertex, rect, width); 1316 } else { 1317 // hairline 1318 vertCount = 5; 1319 primType = kLineStrip_PrimitiveType; 1320 vertex[0].set(rect.fLeft, rect.fTop); 1321 vertex[1].set(rect.fRight, rect.fTop); 1322 vertex[2].set(rect.fRight, rect.fBottom); 1323 vertex[3].set(rect.fLeft, rect.fBottom); 1324 vertex[4].set(rect.fLeft, rect.fTop); 1325 } 1326 1327 GrDrawState::AutoViewMatrixRestore avmr; 1328 if (NULL != matrix) { 1329 GrDrawState* drawState = target->drawState(); 1330 avmr.set(drawState); 1331 drawState->preConcatViewMatrix(*matrix); 1332 drawState->preConcatSamplerMatrices(stageMask, *matrix); 1333 } 1334 1335 target->drawNonIndexed(primType, 0, vertCount); 1336 } else { 1337#if GR_STATIC_RECT_VB 1338 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1339 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1340 if (NULL == sqVB) { 1341 GrPrintf("Failed to create static rect vb.\n"); 1342 return; 1343 } 1344 target->setVertexSourceToBuffer(layout, sqVB); 1345 GrDrawState* drawState = target->drawState(); 1346 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1347 GrMatrix m; 1348 m.setAll(rect.width(), 0, rect.fLeft, 1349 0, rect.height(), rect.fTop, 1350 0, 0, GrMatrix::I()[8]); 1351 1352 if (NULL != matrix) { 1353 m.postConcat(*matrix); 1354 } 1355 drawState->preConcatViewMatrix(m); 1356 drawState->preConcatSamplerMatrices(stageMask, m); 1357 1358 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1359#else 1360 target->drawSimpleRect(rect, matrix, stageMask); 1361#endif 1362 } 1363} 1364 1365void GrContext::drawRectToRect(const GrPaint& paint, 1366 const GrRect& dstRect, 1367 const GrRect& srcRect, 1368 const GrMatrix* dstMatrix, 1369 const GrMatrix* srcMatrix) { 1370 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 1371 1372 // srcRect refers to paint's first texture 1373 if (NULL == paint.getTexture(0)) { 1374 drawRect(paint, dstRect, -1, dstMatrix); 1375 return; 1376 } 1377 1378 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 1379 1380#if GR_STATIC_RECT_VB 1381 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1382 GrDrawState* drawState = target->drawState(); 1383 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1384 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1385 1386 GrMatrix m; 1387 1388 m.setAll(dstRect.width(), 0, dstRect.fLeft, 1389 0, dstRect.height(), dstRect.fTop, 1390 0, 0, GrMatrix::I()[8]); 1391 if (NULL != dstMatrix) { 1392 m.postConcat(*dstMatrix); 1393 } 1394 drawState->preConcatViewMatrix(m); 1395 1396 // srcRect refers to first stage 1397 int otherStageMask = paint.getActiveStageMask() & 1398 (~(1 << GrPaint::kFirstTextureStage)); 1399 if (otherStageMask) { 1400 drawState->preConcatSamplerMatrices(otherStageMask, m); 1401 } 1402 1403 m.setAll(srcRect.width(), 0, srcRect.fLeft, 1404 0, srcRect.height(), srcRect.fTop, 1405 0, 0, GrMatrix::I()[8]); 1406 if (NULL != srcMatrix) { 1407 m.postConcat(*srcMatrix); 1408 } 1409 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 1410 1411 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1412 if (NULL == sqVB) { 1413 GrPrintf("Failed to create static rect vb.\n"); 1414 return; 1415 } 1416 target->setVertexSourceToBuffer(layout, sqVB); 1417 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1418#else 1419 1420 GrDrawTarget* target; 1421#if BATCH_RECT_TO_RECT 1422 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 1423#else 1424 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1425#endif 1426 1427 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1428 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 1429 srcRects[0] = &srcRect; 1430 srcMatrices[0] = srcMatrix; 1431 1432 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 1433#endif 1434} 1435 1436void GrContext::drawVertices(const GrPaint& paint, 1437 GrPrimitiveType primitiveType, 1438 int vertexCount, 1439 const GrPoint positions[], 1440 const GrPoint texCoords[], 1441 const GrColor colors[], 1442 const uint16_t indices[], 1443 int indexCount) { 1444 SK_TRACE_EVENT0("GrContext::drawVertices"); 1445 1446 GrDrawTarget::AutoReleaseGeometry geo; 1447 1448 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1449 1450 bool hasTexCoords[GrPaint::kTotalStages] = { 1451 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 1452 0 // remaining stages use positions 1453 }; 1454 1455 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 1456 1457 if (NULL != colors) { 1458 layout |= GrDrawTarget::kColor_VertexLayoutBit; 1459 } 1460 int vertexSize = GrDrawTarget::VertexSize(layout); 1461 1462 if (sizeof(GrPoint) != vertexSize) { 1463 if (!geo.set(target, layout, vertexCount, 0)) { 1464 GrPrintf("Failed to get space for vertices!\n"); 1465 return; 1466 } 1467 int texOffsets[GrDrawState::kMaxTexCoords]; 1468 int colorOffset; 1469 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 1470 texOffsets, 1471 &colorOffset, 1472 NULL, 1473 NULL); 1474 void* curVertex = geo.vertices(); 1475 1476 for (int i = 0; i < vertexCount; ++i) { 1477 *((GrPoint*)curVertex) = positions[i]; 1478 1479 if (texOffsets[0] > 0) { 1480 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 1481 } 1482 if (colorOffset > 0) { 1483 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1484 } 1485 curVertex = (void*)((intptr_t)curVertex + vertexSize); 1486 } 1487 } else { 1488 target->setVertexSourceToArray(layout, positions, vertexCount); 1489 } 1490 1491 // we don't currently apply offscreen AA to this path. Need improved 1492 // management of GrDrawTarget's geometry to avoid copying points per-tile. 1493 1494 if (NULL != indices) { 1495 target->setIndexSourceToArray(indices, indexCount); 1496 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1497 } else { 1498 target->drawNonIndexed(primitiveType, 0, vertexCount); 1499 } 1500} 1501 1502/////////////////////////////////////////////////////////////////////////////// 1503 1504void GrContext::drawPath(const GrPaint& paint, const GrPath& path, 1505 GrPathFill fill, const GrPoint* translate) { 1506 1507 if (path.isEmpty()) { 1508#if GR_DEBUG 1509 GrPrintf("Empty path should have been caught by canvas.\n"); 1510#endif 1511 if (GrIsFillInverted(fill)) { 1512 this->drawPaint(paint); 1513 } 1514 return; 1515 } 1516 1517 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1518 1519 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1520 1521 // An Assumption here is that path renderer would use some form of tweaking 1522 // the src color (either the input alpha or in the frag shader) to implement 1523 // aa. If we have some future driver-mojo path AA that can do the right 1524 // thing WRT to the blend then we'll need some query on the PR. 1525 if (disable_coverage_aa_for_blend(target)) { 1526#if GR_DEBUG 1527 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1528#endif 1529 prAA = false; 1530 } 1531 1532 bool doOSAA = false; 1533 GrPathRenderer* pr = NULL; 1534 if (prAA) { 1535 pr = this->getPathRenderer(path, fill, true); 1536 if (NULL == pr) { 1537 prAA = false; 1538 doOSAA = this->doOffscreenAA(target, kHairLine_PathFill == fill); 1539 pr = this->getPathRenderer(path, fill, false); 1540 } 1541 } else { 1542 pr = this->getPathRenderer(path, fill, false); 1543 } 1544 1545 if (NULL == pr) { 1546#if GR_DEBUG 1547 GrPrintf("Unable to find path renderer compatible with path.\n"); 1548#endif 1549 return; 1550 } 1551 1552 GrPathRenderer::AutoClearPath arp(pr, target, &path, fill, prAA, translate); 1553 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1554 1555 if (doOSAA) { 1556 bool needsStencil = pr->requiresStencilPass(target, path, fill); 1557 const GrRenderTarget* rt = target->getDrawState().getRenderTarget(); 1558 // compute bounds as intersection of rt size, clip, and path 1559 GrIRect bound = SkIRect::MakeWH(rt->width(), rt->height()); 1560 GrIRect clipIBounds; 1561 if (target->getClip().hasConservativeBounds()) { 1562 target->getClip().getConservativeBounds().roundOut(&clipIBounds); 1563 if (!bound.intersect(clipIBounds)) { 1564 return; 1565 } 1566 } 1567 GrRect pathBounds = path.getBounds(); 1568 if (!pathBounds.isEmpty()) { 1569 if (NULL != translate) { 1570 pathBounds.offset(*translate); 1571 } 1572 target->getDrawState().getViewMatrix().mapRect(&pathBounds, 1573 pathBounds); 1574 GrIRect pathIBounds; 1575 pathBounds.roundOut(&pathIBounds); 1576 if (!bound.intersect(pathIBounds)) { 1577 return; 1578 } 1579 } 1580 OffscreenRecord record; 1581 if (this->prepareForOffscreenAA(target, needsStencil, bound, 1582 pr, &record)) { 1583 for (int tx = 0; tx < record.fTileCountX; ++tx) { 1584 for (int ty = 0; ty < record.fTileCountY; ++ty) { 1585 this->setupOffscreenAAPass1(target, bound, tx, ty, &record); 1586 pr->drawPath(0); 1587 this->doOffscreenAAPass2(target, paint, bound, tx, ty, &record); 1588 } 1589 } 1590 this->cleanupOffscreenAA(target, pr, &record); 1591 if (GrIsFillInverted(fill) && bound != clipIBounds) { 1592 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1593 GrRect rect; 1594 if (clipIBounds.fTop < bound.fTop) { 1595 rect.iset(clipIBounds.fLeft, clipIBounds.fTop, 1596 clipIBounds.fRight, bound.fTop); 1597 target->drawSimpleRect(rect, NULL, stageMask); 1598 } 1599 if (clipIBounds.fLeft < bound.fLeft) { 1600 rect.iset(clipIBounds.fLeft, bound.fTop, 1601 bound.fLeft, bound.fBottom); 1602 target->drawSimpleRect(rect, NULL, stageMask); 1603 } 1604 if (clipIBounds.fRight > bound.fRight) { 1605 rect.iset(bound.fRight, bound.fTop, 1606 clipIBounds.fRight, bound.fBottom); 1607 target->drawSimpleRect(rect, NULL, stageMask); 1608 } 1609 if (clipIBounds.fBottom > bound.fBottom) { 1610 rect.iset(clipIBounds.fLeft, bound.fBottom, 1611 clipIBounds.fRight, clipIBounds.fBottom); 1612 target->drawSimpleRect(rect, NULL, stageMask); 1613 } 1614 } 1615 return; 1616 } 1617 } 1618 pr->drawPath(stageMask); 1619} 1620 1621//////////////////////////////////////////////////////////////////////////////// 1622 1623bool GrContext::supportsShaders() const { 1624 return fGpu->getCaps().fShaderSupport; 1625} 1626 1627void GrContext::flush(int flagsBitfield) { 1628 if (kDiscard_FlushBit & flagsBitfield) { 1629 fDrawBuffer->reset(); 1630 } else { 1631 this->flushDrawBuffer(); 1632 } 1633 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1634 fGpu->forceRenderTargetFlush(); 1635 } 1636} 1637 1638void GrContext::flushText() { 1639 if (kText_DrawCategory == fLastDrawCategory) { 1640 flushDrawBuffer(); 1641 } 1642} 1643 1644void GrContext::flushDrawBuffer() { 1645#if BATCH_RECT_TO_RECT || DEFER_TEXT_RENDERING 1646 if (fDrawBuffer) { 1647 fDrawBuffer->playback(fGpu); 1648 fDrawBuffer->reset(); 1649 } 1650#endif 1651} 1652 1653void GrContext::internalWriteTexturePixels(GrTexture* texture, 1654 int left, int top, 1655 int width, int height, 1656 GrPixelConfig config, 1657 const void* buffer, 1658 size_t rowBytes, 1659 uint32_t flags) { 1660 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1661 ASSERT_OWNED_RESOURCE(texture); 1662 1663 if (!(kDontFlush_PixelOpsFlag & flags)) { 1664 this->flush(); 1665 } 1666 // TODO: use scratch texture to perform conversion 1667 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1668 GrPixelConfigIsUnpremultiplied(config)) { 1669 return; 1670 } 1671 1672 fGpu->writeTexturePixels(texture, left, top, width, height, 1673 config, buffer, rowBytes); 1674} 1675 1676bool GrContext::internalReadTexturePixels(GrTexture* texture, 1677 int left, int top, 1678 int width, int height, 1679 GrPixelConfig config, 1680 void* buffer, 1681 size_t rowBytes, 1682 uint32_t flags) { 1683 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1684 ASSERT_OWNED_RESOURCE(texture); 1685 1686 // TODO: code read pixels for textures that aren't also rendertargets 1687 GrRenderTarget* target = texture->asRenderTarget(); 1688 if (NULL != target) { 1689 return this->internalReadRenderTargetPixels(target, 1690 left, top, width, height, 1691 config, buffer, rowBytes, 1692 flags); 1693 } else { 1694 return false; 1695 } 1696} 1697 1698bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1699 int left, int top, 1700 int width, int height, 1701 GrPixelConfig config, 1702 void* buffer, 1703 size_t rowBytes, 1704 uint32_t flags) { 1705 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1706 ASSERT_OWNED_RESOURCE(target); 1707 1708 if (NULL == target) { 1709 target = fGpu->drawState()->getRenderTarget(); 1710 if (NULL == target) { 1711 return false; 1712 } 1713 } 1714 1715 // PM <-> UPM conversion requires a draw. Currently we only support drawing 1716 // into a UPM target, not reading from a UPM texture. Thus, UPM->PM is not 1717 // not supported at this time. 1718 if (GrPixelConfigIsUnpremultiplied(target->config()) && 1719 !GrPixelConfigIsUnpremultiplied(config)) { 1720 return false; 1721 } 1722 1723 if (!(kDontFlush_PixelOpsFlag & flags)) { 1724 this->flush(); 1725 } 1726 1727 GrTexture* src = target->asTexture(); 1728 bool swapRAndB = NULL != src && 1729 fGpu->preferredReadPixelsConfig(config) == 1730 GrPixelConfigSwapRAndB(config); 1731 1732 bool flipY = NULL != src && 1733 fGpu->readPixelsWillPayForYFlip(target, left, top, 1734 width, height, config, 1735 rowBytes); 1736 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1737 GrPixelConfigIsUnpremultiplied(config)); 1738 1739 if (NULL == src && alphaConversion) { 1740 // we should fallback to cpu conversion here. This could happen when 1741 // we were given an external render target by the client that is not 1742 // also a texture (e.g. FBO 0 in GL) 1743 return false; 1744 } 1745 // we draw to a scratch texture if any of these conversion are applied 1746 GrAutoScratchTexture ast; 1747 if (flipY || swapRAndB || alphaConversion) { 1748 GrAssert(NULL != src); 1749 if (swapRAndB) { 1750 config = GrPixelConfigSwapRAndB(config); 1751 GrAssert(kUnknown_GrPixelConfig != config); 1752 } 1753 // Make the scratch a render target because we don't have a robust 1754 // readTexturePixels as of yet (it calls this function). 1755 const GrTextureDesc desc = { 1756 kRenderTarget_GrTextureFlagBit, 1757 kNone_GrAALevel, 1758 width, height, 1759 config 1760 }; 1761 1762 // When a full readback is faster than a partial we could always make 1763 // the scratch exactly match the passed rect. However, if we see many 1764 // different size rectangles we will trash our texture cache and pay the 1765 // cost of creating and destroying many textures. So, we only request 1766 // an exact match when the caller is reading an entire RT. 1767 ScratchTexMatch match = kApprox_ScratchTexMatch; 1768 if (0 == left && 1769 0 == top && 1770 target->width() == width && 1771 target->height() == height && 1772 fGpu->fullReadPixelsIsFasterThanPartial()) { 1773 match = kExact_ScratchTexMatch; 1774 } 1775 ast.set(this, desc, match); 1776 GrTexture* texture = ast.texture(); 1777 if (!texture) { 1778 return false; 1779 } 1780 target = texture->asRenderTarget(); 1781 GrAssert(NULL != target); 1782 1783 GrDrawTarget::AutoStateRestore asr(fGpu); 1784 GrDrawState* drawState = fGpu->drawState(); 1785 reset_draw_state(drawState); 1786 drawState->setRenderTarget(target); 1787 1788 GrMatrix matrix; 1789 if (flipY) { 1790 matrix.setTranslate(SK_Scalar1 * left, 1791 SK_Scalar1 * (top + height)); 1792 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1793 } else { 1794 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1795 } 1796 matrix.postIDiv(src->width(), src->height()); 1797 GrSamplerState sampler; 1798 sampler.reset(GrSamplerState::kClamp_WrapMode, 1799 GrSamplerState::kNearest_Filter, 1800 matrix); 1801 sampler.setRAndBSwap(swapRAndB); 1802 drawState->setSampler(0, sampler); 1803 drawState->setTexture(0, src); 1804 GrRect rect; 1805 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1806 fGpu->drawSimpleRect(rect, NULL, 0x1); 1807 left = 0; 1808 top = 0; 1809 } 1810 return fGpu->readPixels(target, 1811 left, top, width, height, 1812 config, buffer, rowBytes, flipY); 1813} 1814 1815void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1816 if (NULL == src || NULL == dst) { 1817 return; 1818 } 1819 ASSERT_OWNED_RESOURCE(src); 1820 1821 GrDrawTarget::AutoStateRestore asr(fGpu); 1822 GrDrawState* drawState = fGpu->drawState(); 1823 reset_draw_state(drawState); 1824 drawState->setRenderTarget(dst); 1825 GrSamplerState sampler(GrSamplerState::kClamp_WrapMode, 1826 GrSamplerState::kNearest_Filter); 1827 GrMatrix sampleM; 1828 sampleM.setIDiv(src->width(), src->height()); 1829 sampler.setMatrix(sampleM); 1830 drawState->setTexture(0, src); 1831 drawState->setSampler(0, sampler); 1832 SkRect rect = SkRect::MakeXYWH(0, 0, src->width(), src->height()); 1833 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1834} 1835 1836void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1837 int left, int top, 1838 int width, int height, 1839 GrPixelConfig config, 1840 const void* buffer, 1841 size_t rowBytes, 1842 uint32_t flags) { 1843 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1844 ASSERT_OWNED_RESOURCE(target); 1845 1846 if (NULL == target) { 1847 target = fGpu->drawState()->getRenderTarget(); 1848 if (NULL == target) { 1849 return; 1850 } 1851 } 1852 1853 // TODO: when underlying api has a direct way to do this we should use it 1854 // (e.g. glDrawPixels on desktop GL). 1855 1856 // If the RT is also a texture and we don't have to do PM/UPM conversion 1857 // then take the texture path, which we expect to be at least as fast or 1858 // faster since it doesn't use an intermediate texture as we do below. 1859 1860#if !GR_MAC_BUILD 1861 // At least some drivers on the Mac get confused when glTexImage2D is called 1862 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1863 // determine what OS versions and/or HW is affected. 1864 if (NULL != target->asTexture() && 1865 GrPixelConfigIsUnpremultiplied(target->config()) == 1866 GrPixelConfigIsUnpremultiplied(config)) { 1867 1868 this->internalWriteTexturePixels(target->asTexture(), 1869 left, top, width, height, 1870 config, buffer, rowBytes, flags); 1871 return; 1872 } 1873#endif 1874 1875 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1876 GrPixelConfigSwapRAndB(config); 1877 if (swapRAndB) { 1878 config = GrPixelConfigSwapRAndB(config); 1879 } 1880 1881 const GrTextureDesc desc = { 1882 kNone_GrTextureFlags, kNone_GrAALevel, width, height, config 1883 }; 1884 GrAutoScratchTexture ast(this, desc); 1885 GrTexture* texture = ast.texture(); 1886 if (NULL == texture) { 1887 return; 1888 } 1889 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1890 config, buffer, rowBytes, flags); 1891 1892 GrDrawTarget::AutoStateRestore asr(fGpu); 1893 GrDrawState* drawState = fGpu->drawState(); 1894 reset_draw_state(drawState); 1895 1896 GrMatrix matrix; 1897 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1898 drawState->setViewMatrix(matrix); 1899 drawState->setRenderTarget(target); 1900 drawState->setTexture(0, texture); 1901 1902 matrix.setIDiv(texture->width(), texture->height()); 1903 GrSamplerState sampler; 1904 sampler.reset(GrSamplerState::kClamp_WrapMode, 1905 GrSamplerState::kNearest_Filter, 1906 matrix); 1907 sampler.setRAndBSwap(swapRAndB); 1908 drawState->setSampler(0, sampler); 1909 1910 GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0); 1911 static const int VCOUNT = 4; 1912 // TODO: Use GrGpu::drawRect here 1913 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1914 if (!geo.succeeded()) { 1915 GrPrintf("Failed to get space for vertices!\n"); 1916 return; 1917 } 1918 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1919 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 0, VCOUNT); 1920} 1921//////////////////////////////////////////////////////////////////////////////// 1922 1923void GrContext::setPaint(const GrPaint& paint, GrDrawTarget* target) { 1924 GrDrawState* drawState = target->drawState(); 1925 1926 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1927 int s = i + GrPaint::kFirstTextureStage; 1928 drawState->setTexture(s, paint.getTexture(i)); 1929 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 1930 drawState->setSampler(s, paint.getTextureSampler(i)); 1931 } 1932 1933 drawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1934 1935 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1936 int s = i + GrPaint::kFirstMaskStage; 1937 drawState->setTexture(s, paint.getMask(i)); 1938 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 1939 drawState->setSampler(s, paint.getMaskSampler(i)); 1940 } 1941 1942 drawState->setColor(paint.fColor); 1943 1944 if (paint.fDither) { 1945 drawState->enableState(GrDrawState::kDither_StateBit); 1946 } else { 1947 drawState->disableState(GrDrawState::kDither_StateBit); 1948 } 1949 if (paint.fAntiAlias) { 1950 drawState->enableState(GrDrawState::kHWAntialias_StateBit); 1951 } else { 1952 drawState->disableState(GrDrawState::kHWAntialias_StateBit); 1953 } 1954 drawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1955 drawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1956 1957 if (paint.getActiveMaskStageMask() && !target->canApplyCoverage()) { 1958 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1959 } 1960} 1961 1962GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1963 DrawCategory category) { 1964 if (category != fLastDrawCategory) { 1965 flushDrawBuffer(); 1966 fLastDrawCategory = category; 1967 } 1968 this->setPaint(paint, fGpu); 1969 GrDrawTarget* target = fGpu; 1970 switch (category) { 1971 case kText_DrawCategory: 1972#if DEFER_TEXT_RENDERING 1973 target = fDrawBuffer; 1974 fDrawBuffer->initializeDrawStateAndClip(*fGpu); 1975#else 1976 target = fGpu; 1977#endif 1978 break; 1979 case kUnbuffered_DrawCategory: 1980 target = fGpu; 1981 break; 1982 case kBuffered_DrawCategory: 1983 target = fDrawBuffer; 1984 fDrawBuffer->initializeDrawStateAndClip(*fGpu); 1985 break; 1986 } 1987 return target; 1988} 1989 1990GrPathRenderer* GrContext::getPathRenderer(const GrPath& path, 1991 GrPathFill fill, 1992 bool antiAlias) { 1993 if (NULL == fPathRendererChain) { 1994 fPathRendererChain = 1995 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag); 1996 } 1997 return fPathRendererChain->getPathRenderer(fGpu->getCaps(), path, 1998 fill, antiAlias); 1999} 2000 2001//////////////////////////////////////////////////////////////////////////////// 2002 2003void GrContext::setRenderTarget(GrRenderTarget* target) { 2004 ASSERT_OWNED_RESOURCE(target); 2005 this->flush(false); 2006 fGpu->drawState()->setRenderTarget(target); 2007} 2008 2009GrRenderTarget* GrContext::getRenderTarget() { 2010 return fGpu->drawState()->getRenderTarget(); 2011} 2012 2013const GrRenderTarget* GrContext::getRenderTarget() const { 2014 return fGpu->getDrawState().getRenderTarget(); 2015} 2016 2017const GrMatrix& GrContext::getMatrix() const { 2018 return fGpu->getDrawState().getViewMatrix(); 2019} 2020 2021void GrContext::setMatrix(const GrMatrix& m) { 2022 fGpu->drawState()->setViewMatrix(m); 2023} 2024 2025void GrContext::concatMatrix(const GrMatrix& m) const { 2026 fGpu->drawState()->preConcatViewMatrix(m); 2027} 2028 2029static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 2030 intptr_t mask = 1 << shift; 2031 if (pred) { 2032 bits |= mask; 2033 } else { 2034 bits &= ~mask; 2035 } 2036 return bits; 2037} 2038 2039void GrContext::resetStats() { 2040 fGpu->resetStats(); 2041} 2042 2043const GrGpuStats& GrContext::getStats() const { 2044 return fGpu->getStats(); 2045} 2046 2047void GrContext::printStats() const { 2048 fGpu->printStats(); 2049} 2050 2051GrContext::GrContext(GrGpu* gpu) { 2052 fGpu = gpu; 2053 fGpu->ref(); 2054 fGpu->setContext(this); 2055 2056 fPathRendererChain = NULL; 2057 2058 fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT, 2059 MAX_TEXTURE_CACHE_BYTES); 2060 fFontCache = new GrFontCache(fGpu); 2061 2062 fLastDrawCategory = kUnbuffered_DrawCategory; 2063 2064 fDrawBuffer = NULL; 2065 fDrawBufferVBAllocPool = NULL; 2066 fDrawBufferIBAllocPool = NULL; 2067 2068 fAAFillRectIndexBuffer = NULL; 2069 fAAStrokeRectIndexBuffer = NULL; 2070 2071 int gpuMaxOffscreen = gpu->getCaps().fMaxRenderTargetSize; 2072 if (!PREFER_MSAA_OFFSCREEN_AA || !gpu->getCaps().fFSAASupport) { 2073 gpuMaxOffscreen /= OFFSCREEN_SSAA_SCALE; 2074 } 2075 fMaxOffscreenAASize = GrMin(GR_MAX_OFFSCREEN_AA_SIZE, gpuMaxOffscreen); 2076 2077 this->setupDrawBuffer(); 2078} 2079 2080void GrContext::setupDrawBuffer() { 2081 2082 GrAssert(NULL == fDrawBuffer); 2083 GrAssert(NULL == fDrawBufferVBAllocPool); 2084 GrAssert(NULL == fDrawBufferIBAllocPool); 2085 2086#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT 2087 fDrawBufferVBAllocPool = 2088 new GrVertexBufferAllocPool(fGpu, false, 2089 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 2090 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS); 2091 fDrawBufferIBAllocPool = 2092 new GrIndexBufferAllocPool(fGpu, false, 2093 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 2094 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS); 2095 2096 fDrawBuffer = new GrInOrderDrawBuffer(fGpu, 2097 fDrawBufferVBAllocPool, 2098 fDrawBufferIBAllocPool); 2099#endif 2100 2101#if BATCH_RECT_TO_RECT 2102 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 2103#endif 2104} 2105 2106GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 2107 GrDrawTarget* target; 2108#if DEFER_TEXT_RENDERING 2109 target = prepareToDraw(paint, kText_DrawCategory); 2110#else 2111 target = prepareToDraw(paint, kUnbuffered_DrawCategory); 2112#endif 2113 this->setPaint(paint, target); 2114 return target; 2115} 2116 2117const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 2118 return fGpu->getQuadIndexBuffer(); 2119} 2120 2121void GrContext::convolveInX(GrTexture* texture, 2122 const SkRect& rect, 2123 const float* kernel, 2124 int kernelWidth) { 2125 ASSERT_OWNED_RESOURCE(texture); 2126 2127 float imageIncrement[2] = {1.0f / texture->width(), 0.0f}; 2128 convolve(texture, rect, imageIncrement, kernel, kernelWidth); 2129} 2130 2131void GrContext::convolveInY(GrTexture* texture, 2132 const SkRect& rect, 2133 const float* kernel, 2134 int kernelWidth) { 2135 ASSERT_OWNED_RESOURCE(texture); 2136 2137 float imageIncrement[2] = {0.0f, 1.0f / texture->height()}; 2138 convolve(texture, rect, imageIncrement, kernel, kernelWidth); 2139} 2140 2141void GrContext::convolve(GrTexture* texture, 2142 const SkRect& rect, 2143 float imageIncrement[2], 2144 const float* kernel, 2145 int kernelWidth) { 2146 ASSERT_OWNED_RESOURCE(texture); 2147 2148 GrDrawTarget::AutoStateRestore asr(fGpu); 2149 GrMatrix sampleM; 2150 GrSamplerState sampler(GrSamplerState::kClamp_WrapMode, 2151 GrSamplerState::kConvolution_Filter); 2152 sampler.setConvolutionParams(kernelWidth, kernel, imageIncrement); 2153 sampleM.setIDiv(texture->width(), texture->height()); 2154 sampler.setMatrix(sampleM); 2155 GrDrawState* drawState = fGpu->drawState(); 2156 drawState->setSampler(0, sampler); 2157 drawState->setViewMatrix(GrMatrix::I()); 2158 drawState->setTexture(0, texture); 2159 drawState->setAlpha(0xFF); 2160 drawState->setBlendFunc(kOne_BlendCoeff, kZero_BlendCoeff); 2161 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 2162} 2163 2164/////////////////////////////////////////////////////////////////////////////// 2165