GrContext.cpp revision 4fd99ea4494dee2cff9e6d3de1e5a8750daaed42
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "GrBufferAllocPool.h" 13#include "GrClipIterator.h" 14#include "GrGpu.h" 15#include "GrIndexBuffer.h" 16#include "GrInOrderDrawBuffer.h" 17#include "GrPathRenderer.h" 18#include "GrPathUtils.h" 19#include "GrResourceCache.h" 20#include "GrStencilBuffer.h" 21#include "GrTextStrike.h" 22#include "SkTLazy.h" 23#include "SkTrace.h" 24 25#define DEFER_TEXT_RENDERING 1 26 27#define DEFER_PATHS 1 28 29#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 30 31#define MAX_BLUR_SIGMA 4.0f 32 33 34// When we're using coverage AA but the blend is incompatible (given gpu 35// limitations) should we disable AA or draw wrong? 36#define DISABLE_COVERAGE_AA_FOR_BLEND 1 37 38static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 39static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 40 41static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 42static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 43 44// path rendering is the only thing we defer today that uses non-static indices 45static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = DEFER_PATHS ? 1 << 11 : 0; 46static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = DEFER_PATHS ? 4 : 0; 47 48#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 49 50GrContext* GrContext::Create(GrEngine engine, 51 GrPlatform3DContext context3D) { 52 GrContext* ctx = NULL; 53 GrGpu* fGpu = GrGpu::Create(engine, context3D); 54 if (NULL != fGpu) { 55 ctx = new GrContext(fGpu); 56 fGpu->unref(); 57 } 58 return ctx; 59} 60 61GrContext::~GrContext() { 62 this->flush(); 63 delete fTextureCache; 64 delete fFontCache; 65 delete fDrawBuffer; 66 delete fDrawBufferVBAllocPool; 67 delete fDrawBufferIBAllocPool; 68 69 GrSafeUnref(fAAFillRectIndexBuffer); 70 GrSafeUnref(fAAStrokeRectIndexBuffer); 71 fGpu->unref(); 72 GrSafeUnref(fPathRendererChain); 73 fDrawState->unref(); 74} 75 76void GrContext::contextLost() { 77 contextDestroyed(); 78 this->setupDrawBuffer(); 79} 80 81void GrContext::contextDestroyed() { 82 // abandon first to so destructors 83 // don't try to free the resources in the API. 84 fGpu->abandonResources(); 85 86 // a path renderer may be holding onto resources that 87 // are now unusable 88 GrSafeSetNull(fPathRendererChain); 89 90 delete fDrawBuffer; 91 fDrawBuffer = NULL; 92 93 delete fDrawBufferVBAllocPool; 94 fDrawBufferVBAllocPool = NULL; 95 96 delete fDrawBufferIBAllocPool; 97 fDrawBufferIBAllocPool = NULL; 98 99 GrSafeSetNull(fAAFillRectIndexBuffer); 100 GrSafeSetNull(fAAStrokeRectIndexBuffer); 101 102 fTextureCache->removeAll(); 103 fFontCache->freeAll(); 104 fGpu->markContextDirty(); 105} 106 107void GrContext::resetContext() { 108 fGpu->markContextDirty(); 109} 110 111void GrContext::freeGpuResources() { 112 this->flush(); 113 fTextureCache->removeAll(); 114 fFontCache->freeAll(); 115 // a path renderer may be holding onto resources 116 GrSafeSetNull(fPathRendererChain); 117} 118 119size_t GrContext::getGpuTextureCacheBytes() const { 120 return fTextureCache->getCachedResourceBytes(); 121} 122 123//////////////////////////////////////////////////////////////////////////////// 124 125int GrContext::PaintStageVertexLayoutBits( 126 const GrPaint& paint, 127 const bool hasTexCoords[GrPaint::kTotalStages]) { 128 int stageMask = paint.getActiveStageMask(); 129 int layout = 0; 130 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 131 if ((1 << i) & stageMask) { 132 if (NULL != hasTexCoords && hasTexCoords[i]) { 133 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 134 } else { 135 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i); 136 } 137 } 138 } 139 return layout; 140} 141 142 143//////////////////////////////////////////////////////////////////////////////// 144 145enum { 146 // flags for textures 147 kNPOTBit = 0x1, 148 kFilterBit = 0x2, 149 kScratchBit = 0x4, 150 151 // resource type 152 kTextureBit = 0x8, 153 kStencilBufferBit = 0x10 154}; 155 156GrTexture* GrContext::TextureCacheEntry::texture() const { 157 if (NULL == fEntry) { 158 return NULL; 159 } else { 160 return (GrTexture*) fEntry->resource(); 161 } 162} 163 164namespace { 165// returns true if this is a "special" texture because of gpu NPOT limitations 166bool gen_texture_key_values(const GrGpu* gpu, 167 const GrSamplerState* sampler, 168 GrContext::TextureKey clientKey, 169 int width, 170 int height, 171 int sampleCnt, 172 bool scratch, 173 uint32_t v[4]) { 174 GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t)); 175 // we assume we only need 16 bits of width and height 176 // assert that texture creation will fail anyway if this assumption 177 // would cause key collisions. 178 GrAssert(gpu->getCaps().fMaxTextureSize <= SK_MaxU16); 179 v[0] = clientKey & 0xffffffffUL; 180 v[1] = (clientKey >> 32) & 0xffffffffUL; 181 v[2] = width | (height << 16); 182 183 v[3] = (sampleCnt << 24); 184 GrAssert(sampleCnt >= 0 && sampleCnt < 256); 185 186 if (!gpu->getCaps().fNPOTTextureTileSupport) { 187 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 188 189 bool tiled = NULL != sampler && 190 ((sampler->getWrapX() != GrSamplerState::kClamp_WrapMode) || 191 (sampler->getWrapY() != GrSamplerState::kClamp_WrapMode)); 192 193 if (tiled && !isPow2) { 194 v[3] |= kNPOTBit; 195 if (GrSamplerState::kNearest_Filter != sampler->getFilter()) { 196 v[3] |= kFilterBit; 197 } 198 } 199 } 200 201 if (scratch) { 202 v[3] |= kScratchBit; 203 } 204 205 v[3] |= kTextureBit; 206 207 return v[3] & kNPOTBit; 208} 209 210// we should never have more than one stencil buffer with same combo of 211// (width,height,samplecount) 212void gen_stencil_key_values(int width, int height, 213 int sampleCnt, uint32_t v[4]) { 214 v[0] = width; 215 v[1] = height; 216 v[2] = sampleCnt; 217 v[3] = kStencilBufferBit; 218} 219 220void gen_stencil_key_values(const GrStencilBuffer* sb, 221 uint32_t v[4]) { 222 gen_stencil_key_values(sb->width(), sb->height(), 223 sb->numSamples(), v); 224} 225 226void build_kernel(float sigma, float* kernel, int kernelWidth) { 227 int halfWidth = (kernelWidth - 1) / 2; 228 float sum = 0.0f; 229 float denom = 1.0f / (2.0f * sigma * sigma); 230 for (int i = 0; i < kernelWidth; ++i) { 231 float x = static_cast<float>(i - halfWidth); 232 // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian 233 // is dropped here, since we renormalize the kernel below. 234 kernel[i] = sk_float_exp(- x * x * denom); 235 sum += kernel[i]; 236 } 237 // Normalize the kernel 238 float scale = 1.0f / sum; 239 for (int i = 0; i < kernelWidth; ++i) 240 kernel[i] *= scale; 241} 242 243void scale_rect(SkRect* rect, float xScale, float yScale) { 244 rect->fLeft *= xScale; 245 rect->fTop *= yScale; 246 rect->fRight *= xScale; 247 rect->fBottom *= yScale; 248} 249 250float adjust_sigma(float sigma, int *scaleFactor, int *halfWidth, 251 int *kernelWidth) { 252 *scaleFactor = 1; 253 while (sigma > MAX_BLUR_SIGMA) { 254 *scaleFactor *= 2; 255 sigma *= 0.5f; 256 } 257 *halfWidth = static_cast<int>(ceilf(sigma * 3.0f)); 258 *kernelWidth = *halfWidth * 2 + 1; 259 return sigma; 260} 261 262void apply_morphology(GrGpu* gpu, 263 GrTexture* texture, 264 const SkRect& rect, 265 int radius, 266 GrSamplerState::Filter filter, 267 GrSamplerState::FilterDirection direction) { 268 GrAssert(filter == GrSamplerState::kErode_Filter || 269 filter == GrSamplerState::kDilate_Filter); 270 271 GrDrawTarget::AutoStateRestore asr(gpu); 272 GrDrawState* drawState = gpu->drawState(); 273 GrRenderTarget* target = drawState->getRenderTarget(); 274 drawState->reset(); 275 drawState->setRenderTarget(target); 276 GrMatrix sampleM; 277 sampleM.setIDiv(texture->width(), texture->height()); 278 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, filter, 279 sampleM); 280 drawState->sampler(0)->setMorphologyRadius(radius); 281 drawState->sampler(0)->setFilterDirection(direction); 282 drawState->setTexture(0, texture); 283 gpu->drawSimpleRect(rect, NULL, 1 << 0); 284} 285 286void convolve(GrGpu* gpu, 287 GrTexture* texture, 288 const SkRect& rect, 289 const float* kernel, 290 int kernelWidth, 291 GrSamplerState::FilterDirection direction) { 292 GrDrawTarget::AutoStateRestore asr(gpu); 293 GrDrawState* drawState = gpu->drawState(); 294 GrRenderTarget* target = drawState->getRenderTarget(); 295 drawState->reset(); 296 drawState->setRenderTarget(target); 297 GrMatrix sampleM; 298 sampleM.setIDiv(texture->width(), texture->height()); 299 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 300 GrSamplerState::kConvolution_Filter, 301 sampleM); 302 drawState->sampler(0)->setConvolutionParams(kernelWidth, kernel); 303 drawState->sampler(0)->setFilterDirection(direction); 304 drawState->setTexture(0, texture); 305 gpu->drawSimpleRect(rect, NULL, 1 << 0); 306} 307 308} 309 310GrContext::TextureCacheEntry GrContext::findAndLockTexture( 311 TextureKey key, 312 int width, 313 int height, 314 const GrSamplerState* sampler) { 315 uint32_t v[4]; 316 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v); 317 GrResourceKey resourceKey(v); 318 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 319 GrResourceCache::kNested_LockType)); 320} 321 322bool GrContext::isTextureInCache(TextureKey key, 323 int width, 324 int height, 325 const GrSamplerState* sampler) const { 326 uint32_t v[4]; 327 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v); 328 GrResourceKey resourceKey(v); 329 return fTextureCache->hasKey(resourceKey); 330} 331 332GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 333 ASSERT_OWNED_RESOURCE(sb); 334 uint32_t v[4]; 335 gen_stencil_key_values(sb, v); 336 GrResourceKey resourceKey(v); 337 return fTextureCache->createAndLock(resourceKey, sb); 338} 339 340GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 341 int sampleCnt) { 342 uint32_t v[4]; 343 gen_stencil_key_values(width, height, sampleCnt, v); 344 GrResourceKey resourceKey(v); 345 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 346 GrResourceCache::kSingle_LockType); 347 if (NULL != entry) { 348 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 349 return sb; 350 } else { 351 return NULL; 352 } 353} 354 355void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 356 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 357 fTextureCache->unlock(sbEntry); 358} 359 360static void stretchImage(void* dst, 361 int dstW, 362 int dstH, 363 void* src, 364 int srcW, 365 int srcH, 366 int bpp) { 367 GrFixed dx = (srcW << 16) / dstW; 368 GrFixed dy = (srcH << 16) / dstH; 369 370 GrFixed y = dy >> 1; 371 372 int dstXLimit = dstW*bpp; 373 for (int j = 0; j < dstH; ++j) { 374 GrFixed x = dx >> 1; 375 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 376 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 377 for (int i = 0; i < dstXLimit; i += bpp) { 378 memcpy((uint8_t*) dstRow + i, 379 (uint8_t*) srcRow + (x>>16)*bpp, 380 bpp); 381 x += dx; 382 } 383 y += dy; 384 } 385} 386 387GrContext::TextureCacheEntry GrContext::createAndLockTexture( 388 TextureKey key, 389 const GrSamplerState* sampler, 390 const GrTextureDesc& desc, 391 void* srcData, 392 size_t rowBytes) { 393 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 394 395#if GR_DUMP_TEXTURE_UPLOAD 396 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 397#endif 398 399 TextureCacheEntry entry; 400 uint32_t v[4]; 401 bool special = gen_texture_key_values(fGpu, sampler, key, 402 desc.fWidth, desc.fHeight, 403 desc.fSampleCnt, false, v); 404 GrResourceKey resourceKey(v); 405 406 if (special) { 407 GrAssert(NULL != sampler); 408 TextureCacheEntry clampEntry = this->findAndLockTexture(key, 409 desc.fWidth, 410 desc.fHeight, 411 NULL); 412 413 if (NULL == clampEntry.texture()) { 414 clampEntry = this->createAndLockTexture(key, NULL, desc, 415 srcData, rowBytes); 416 GrAssert(NULL != clampEntry.texture()); 417 if (NULL == clampEntry.texture()) { 418 return entry; 419 } 420 } 421 GrTextureDesc rtDesc = desc; 422 rtDesc.fFlags = rtDesc.fFlags | 423 kRenderTarget_GrTextureFlagBit | 424 kNoStencil_GrTextureFlagBit; 425 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 426 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 427 428 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 429 430 if (NULL != texture) { 431 GrDrawTarget::AutoStateRestore asr(fGpu); 432 GrDrawState* drawState = fGpu->drawState(); 433 drawState->reset(); 434 drawState->setRenderTarget(texture->asRenderTarget()); 435 drawState->setTexture(0, clampEntry.texture()); 436 437 GrSamplerState::Filter filter; 438 // if filtering is not desired then we want to ensure all 439 // texels in the resampled image are copies of texels from 440 // the original. 441 if (GrSamplerState::kNearest_Filter == sampler->getFilter()) { 442 filter = GrSamplerState::kNearest_Filter; 443 } else { 444 filter = GrSamplerState::kBilinear_Filter; 445 } 446 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 447 filter); 448 449 static const GrVertexLayout layout = 450 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 451 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 452 453 if (arg.succeeded()) { 454 GrPoint* verts = (GrPoint*) arg.vertices(); 455 verts[0].setIRectFan(0, 0, 456 texture->width(), 457 texture->height(), 458 2*sizeof(GrPoint)); 459 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 460 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 461 0, 4); 462 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 463 } 464 texture->releaseRenderTarget(); 465 } else { 466 // TODO: Our CPU stretch doesn't filter. But we create separate 467 // stretched textures when the sampler state is either filtered or 468 // not. Either implement filtered stretch blit on CPU or just create 469 // one when FBO case fails. 470 471 rtDesc.fFlags = kNone_GrTextureFlags; 472 // no longer need to clamp at min RT size. 473 rtDesc.fWidth = GrNextPow2(desc.fWidth); 474 rtDesc.fHeight = GrNextPow2(desc.fHeight); 475 int bpp = GrBytesPerPixel(desc.fConfig); 476 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 477 rtDesc.fWidth * 478 rtDesc.fHeight); 479 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 480 srcData, desc.fWidth, desc.fHeight, bpp); 481 482 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 483 484 GrTexture* texture = fGpu->createTexture(rtDesc, 485 stretchedPixels.get(), 486 stretchedRowBytes); 487 GrAssert(NULL != texture); 488 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 489 } 490 fTextureCache->unlock(clampEntry.cacheEntry()); 491 492 } else { 493 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 494 if (NULL != texture) { 495 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 496 } 497 } 498 return entry; 499} 500 501namespace { 502inline void gen_scratch_tex_key_values(const GrGpu* gpu, 503 const GrTextureDesc& desc, 504 uint32_t v[4]) { 505 // Instead of a client-provided key of the texture contents 506 // we create a key of from the descriptor. 507 GrContext::TextureKey descKey = (desc.fFlags << 8) | 508 ((uint64_t) desc.fConfig << 32); 509 // this code path isn't friendly to tiling with NPOT restricitons 510 // We just pass ClampNoFilter() 511 gen_texture_key_values(gpu, NULL, descKey, desc.fWidth, 512 desc.fHeight, desc.fSampleCnt, true, v); 513} 514} 515 516GrContext::TextureCacheEntry GrContext::lockScratchTexture( 517 const GrTextureDesc& inDesc, 518 ScratchTexMatch match) { 519 520 GrTextureDesc desc = inDesc; 521 if (kExact_ScratchTexMatch != match) { 522 // bin by pow2 with a reasonable min 523 static const int MIN_SIZE = 256; 524 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 525 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 526 } 527 528 GrResourceEntry* entry; 529 int origWidth = desc.fWidth; 530 int origHeight = desc.fHeight; 531 bool doubledW = false; 532 bool doubledH = false; 533 534 do { 535 uint32_t v[4]; 536 gen_scratch_tex_key_values(fGpu, desc, v); 537 GrResourceKey key(v); 538 entry = fTextureCache->findAndLock(key, 539 GrResourceCache::kNested_LockType); 540 // if we miss, relax the fit of the flags... 541 // then try doubling width... then height. 542 if (NULL != entry || kExact_ScratchTexMatch == match) { 543 break; 544 } 545 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 546 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 547 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 548 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 549 } else if (!doubledW) { 550 desc.fFlags = inDesc.fFlags; 551 desc.fWidth *= 2; 552 doubledW = true; 553 } else if (!doubledH) { 554 desc.fFlags = inDesc.fFlags; 555 desc.fWidth = origWidth; 556 desc.fHeight *= 2; 557 doubledH = true; 558 } else { 559 break; 560 } 561 562 } while (true); 563 564 if (NULL == entry) { 565 desc.fFlags = inDesc.fFlags; 566 desc.fWidth = origWidth; 567 desc.fHeight = origHeight; 568 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 569 if (NULL != texture) { 570 uint32_t v[4]; 571 gen_scratch_tex_key_values(fGpu, desc, v); 572 GrResourceKey key(v); 573 entry = fTextureCache->createAndLock(key, texture); 574 } 575 } 576 577 // If the caller gives us the same desc/sampler twice we don't want 578 // to return the same texture the second time (unless it was previously 579 // released). So we detach the entry from the cache and reattach at release. 580 if (NULL != entry) { 581 fTextureCache->detach(entry); 582 } 583 return TextureCacheEntry(entry); 584} 585 586void GrContext::unlockTexture(TextureCacheEntry entry) { 587 ASSERT_OWNED_RESOURCE(entry.texture()); 588 // If this is a scratch texture we detached it from the cache 589 // while it was locked (to avoid two callers simultaneously getting 590 // the same texture). 591 if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) { 592 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 593 } else { 594 fTextureCache->unlock(entry.cacheEntry()); 595 } 596} 597 598GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc, 599 void* srcData, 600 size_t rowBytes) { 601 return fGpu->createTexture(desc, srcData, rowBytes); 602} 603 604void GrContext::getTextureCacheLimits(int* maxTextures, 605 size_t* maxTextureBytes) const { 606 fTextureCache->getLimits(maxTextures, maxTextureBytes); 607} 608 609void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 610 fTextureCache->setLimits(maxTextures, maxTextureBytes); 611} 612 613int GrContext::getMaxTextureSize() const { 614 return fGpu->getCaps().fMaxTextureSize; 615} 616 617int GrContext::getMaxRenderTargetSize() const { 618 return fGpu->getCaps().fMaxRenderTargetSize; 619} 620 621/////////////////////////////////////////////////////////////////////////////// 622 623GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 624 return fGpu->createPlatformTexture(desc); 625} 626 627GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 628 return fGpu->createPlatformRenderTarget(desc); 629} 630 631/////////////////////////////////////////////////////////////////////////////// 632 633bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 634 int width, int height) const { 635 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 636 if (!caps.f8BitPaletteSupport) { 637 return false; 638 } 639 640 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 641 642 if (!isPow2) { 643 bool tiled = NULL != sampler && 644 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 645 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 646 if (tiled && !caps.fNPOTTextureTileSupport) { 647 return false; 648 } 649 } 650 return true; 651} 652 653//////////////////////////////////////////////////////////////////////////////// 654 655const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 656 657void GrContext::setClip(const GrClip& clip) { 658 fGpu->setClip(clip); 659 fDrawState->enableState(GrDrawState::kClip_StateBit); 660} 661 662void GrContext::setClip(const GrIRect& rect) { 663 GrClip clip; 664 clip.setFromIRect(rect); 665 fGpu->setClip(clip); 666} 667 668//////////////////////////////////////////////////////////////////////////////// 669 670void GrContext::clear(const GrIRect* rect, const GrColor color) { 671 this->flush(); 672 fGpu->clear(rect, color); 673} 674 675void GrContext::drawPaint(const GrPaint& paint) { 676 // set rect to be big enough to fill the space, but not super-huge, so we 677 // don't overflow fixed-point implementations 678 GrRect r; 679 r.setLTRB(0, 0, 680 GrIntToScalar(getRenderTarget()->width()), 681 GrIntToScalar(getRenderTarget()->height())); 682 GrMatrix inverse; 683 SkTLazy<GrPaint> tmpPaint; 684 const GrPaint* p = &paint; 685 GrAutoMatrix am; 686 687 // We attempt to map r by the inverse matrix and draw that. mapRect will 688 // map the four corners and bound them with a new rect. This will not 689 // produce a correct result for some perspective matrices. 690 if (!this->getMatrix().hasPerspective()) { 691 if (!fDrawState->getViewInverse(&inverse)) { 692 GrPrintf("Could not invert matrix"); 693 return; 694 } 695 inverse.mapRect(&r); 696 } else { 697 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 698 if (!fDrawState->getViewInverse(&inverse)) { 699 GrPrintf("Could not invert matrix"); 700 return; 701 } 702 tmpPaint.set(paint); 703 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 704 p = tmpPaint.get(); 705 } 706 am.set(this, GrMatrix::I()); 707 } 708 // by definition this fills the entire clip, no need for AA 709 if (paint.fAntiAlias) { 710 if (!tmpPaint.isValid()) { 711 tmpPaint.set(paint); 712 p = tmpPaint.get(); 713 } 714 GrAssert(p == tmpPaint.get()); 715 tmpPaint.get()->fAntiAlias = false; 716 } 717 this->drawRect(*p, r); 718} 719 720//////////////////////////////////////////////////////////////////////////////// 721 722namespace { 723inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 724 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 725} 726} 727 728//////////////////////////////////////////////////////////////////////////////// 729 730/* create a triangle strip that strokes the specified triangle. There are 8 731 unique vertices, but we repreat the last 2 to close up. Alternatively we 732 could use an indices array, and then only send 8 verts, but not sure that 733 would be faster. 734 */ 735static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 736 GrScalar width) { 737 const GrScalar rad = GrScalarHalf(width); 738 rect.sort(); 739 740 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 741 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 742 verts[2].set(rect.fRight - rad, rect.fTop + rad); 743 verts[3].set(rect.fRight + rad, rect.fTop - rad); 744 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 745 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 746 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 747 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 748 verts[8] = verts[0]; 749 verts[9] = verts[1]; 750} 751 752static void setInsetFan(GrPoint* pts, size_t stride, 753 const GrRect& r, GrScalar dx, GrScalar dy) { 754 pts->setRectFan(r.fLeft + dx, r.fTop + dy, r.fRight - dx, r.fBottom - dy, stride); 755} 756 757static const uint16_t gFillAARectIdx[] = { 758 0, 1, 5, 5, 4, 0, 759 1, 2, 6, 6, 5, 1, 760 2, 3, 7, 7, 6, 2, 761 3, 0, 4, 4, 7, 3, 762 4, 5, 6, 6, 7, 4, 763}; 764 765int GrContext::aaFillRectIndexCount() const { 766 return GR_ARRAY_COUNT(gFillAARectIdx); 767} 768 769GrIndexBuffer* GrContext::aaFillRectIndexBuffer() { 770 if (NULL == fAAFillRectIndexBuffer) { 771 fAAFillRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gFillAARectIdx), 772 false); 773 if (NULL != fAAFillRectIndexBuffer) { 774 #if GR_DEBUG 775 bool updated = 776 #endif 777 fAAFillRectIndexBuffer->updateData(gFillAARectIdx, 778 sizeof(gFillAARectIdx)); 779 GR_DEBUGASSERT(updated); 780 } 781 } 782 return fAAFillRectIndexBuffer; 783} 784 785static const uint16_t gStrokeAARectIdx[] = { 786 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0, 787 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0, 788 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0, 789 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0, 790 791 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4, 792 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4, 793 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4, 794 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4, 795 796 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8, 797 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8, 798 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8, 799 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8, 800}; 801 802int GrContext::aaStrokeRectIndexCount() const { 803 return GR_ARRAY_COUNT(gStrokeAARectIdx); 804} 805 806GrIndexBuffer* GrContext::aaStrokeRectIndexBuffer() { 807 if (NULL == fAAStrokeRectIndexBuffer) { 808 fAAStrokeRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gStrokeAARectIdx), 809 false); 810 if (NULL != fAAStrokeRectIndexBuffer) { 811 #if GR_DEBUG 812 bool updated = 813 #endif 814 fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx, 815 sizeof(gStrokeAARectIdx)); 816 GR_DEBUGASSERT(updated); 817 } 818 } 819 return fAAStrokeRectIndexBuffer; 820} 821 822static GrVertexLayout aa_rect_layout(const GrDrawTarget* target, 823 bool useCoverage) { 824 GrVertexLayout layout = 0; 825 for (int s = 0; s < GrDrawState::kNumStages; ++s) { 826 if (NULL != target->getDrawState().getTexture(s)) { 827 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s); 828 } 829 } 830 if (useCoverage) { 831 layout |= GrDrawTarget::kCoverage_VertexLayoutBit; 832 } else { 833 layout |= GrDrawTarget::kColor_VertexLayoutBit; 834 } 835 return layout; 836} 837 838void GrContext::fillAARect(GrDrawTarget* target, 839 const GrRect& devRect, 840 bool useVertexCoverage) { 841 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 842 843 size_t vsize = GrDrawTarget::VertexSize(layout); 844 845 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 8, 0); 846 if (!geo.succeeded()) { 847 GrPrintf("Failed to get space for vertices!\n"); 848 return; 849 } 850 GrIndexBuffer* indexBuffer = this->aaFillRectIndexBuffer(); 851 if (NULL == indexBuffer) { 852 GrPrintf("Failed to create index buffer!\n"); 853 return; 854 } 855 856 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 857 858 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 859 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 860 861 setInsetFan(fan0Pos, vsize, devRect, -GR_ScalarHalf, -GR_ScalarHalf); 862 setInsetFan(fan1Pos, vsize, devRect, GR_ScalarHalf, GR_ScalarHalf); 863 864 verts += sizeof(GrPoint); 865 for (int i = 0; i < 4; ++i) { 866 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 867 } 868 869 GrColor innerColor; 870 if (useVertexCoverage) { 871 innerColor = 0xffffffff; 872 } else { 873 innerColor = target->getDrawState().getColor(); 874 } 875 876 verts += 4 * vsize; 877 for (int i = 0; i < 4; ++i) { 878 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 879 } 880 881 target->setIndexSourceToBuffer(indexBuffer); 882 883 target->drawIndexed(kTriangles_PrimitiveType, 0, 884 0, 8, this->aaFillRectIndexCount()); 885} 886 887void GrContext::strokeAARect(GrDrawTarget* target, 888 const GrRect& devRect, 889 const GrVec& devStrokeSize, 890 bool useVertexCoverage) { 891 const GrScalar& dx = devStrokeSize.fX; 892 const GrScalar& dy = devStrokeSize.fY; 893 const GrScalar rx = GrMul(dx, GR_ScalarHalf); 894 const GrScalar ry = GrMul(dy, GR_ScalarHalf); 895 896 GrScalar spare; 897 { 898 GrScalar w = devRect.width() - dx; 899 GrScalar h = devRect.height() - dy; 900 spare = GrMin(w, h); 901 } 902 903 if (spare <= 0) { 904 GrRect r(devRect); 905 r.inset(-rx, -ry); 906 fillAARect(target, r, useVertexCoverage); 907 return; 908 } 909 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 910 size_t vsize = GrDrawTarget::VertexSize(layout); 911 912 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 16, 0); 913 if (!geo.succeeded()) { 914 GrPrintf("Failed to get space for vertices!\n"); 915 return; 916 } 917 GrIndexBuffer* indexBuffer = this->aaStrokeRectIndexBuffer(); 918 if (NULL == indexBuffer) { 919 GrPrintf("Failed to create index buffer!\n"); 920 return; 921 } 922 923 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 924 925 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 926 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 927 GrPoint* fan2Pos = reinterpret_cast<GrPoint*>(verts + 8 * vsize); 928 GrPoint* fan3Pos = reinterpret_cast<GrPoint*>(verts + 12 * vsize); 929 930 setInsetFan(fan0Pos, vsize, devRect, -rx - GR_ScalarHalf, -ry - GR_ScalarHalf); 931 setInsetFan(fan1Pos, vsize, devRect, -rx + GR_ScalarHalf, -ry + GR_ScalarHalf); 932 setInsetFan(fan2Pos, vsize, devRect, rx - GR_ScalarHalf, ry - GR_ScalarHalf); 933 setInsetFan(fan3Pos, vsize, devRect, rx + GR_ScalarHalf, ry + GR_ScalarHalf); 934 935 verts += sizeof(GrPoint); 936 for (int i = 0; i < 4; ++i) { 937 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 938 } 939 940 GrColor innerColor; 941 if (useVertexCoverage) { 942 innerColor = 0xffffffff; 943 } else { 944 innerColor = target->getDrawState().getColor(); 945 } 946 verts += 4 * vsize; 947 for (int i = 0; i < 8; ++i) { 948 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 949 } 950 951 verts += 8 * vsize; 952 for (int i = 0; i < 8; ++i) { 953 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 954 } 955 956 target->setIndexSourceToBuffer(indexBuffer); 957 target->drawIndexed(kTriangles_PrimitiveType, 958 0, 0, 16, aaStrokeRectIndexCount()); 959} 960 961/** 962 * Returns true if the rects edges are integer-aligned. 963 */ 964static bool isIRect(const GrRect& r) { 965 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 966 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 967} 968 969static bool apply_aa_to_rect(GrDrawTarget* target, 970 const GrRect& rect, 971 GrScalar width, 972 const GrMatrix* matrix, 973 GrMatrix* combinedMatrix, 974 GrRect* devRect, 975 bool* useVertexCoverage) { 976 // we use a simple coverage ramp to do aa on axis-aligned rects 977 // we check if the rect will be axis-aligned, and the rect won't land on 978 // integer coords. 979 980 // we are keeping around the "tweak the alpha" trick because 981 // it is our only hope for the fixed-pipe implementation. 982 // In a shader implementation we can give a separate coverage input 983 // TODO: remove this ugliness when we drop the fixed-pipe impl 984 *useVertexCoverage = false; 985 if (!target->canTweakAlphaForCoverage()) { 986 if (disable_coverage_aa_for_blend(target)) { 987#if GR_DEBUG 988 //GrPrintf("Turning off AA to correctly apply blend.\n"); 989#endif 990 return false; 991 } else { 992 *useVertexCoverage = true; 993 } 994 } 995 const GrDrawState& drawState = target->getDrawState(); 996 if (drawState.getRenderTarget()->isMultisampled()) { 997 return false; 998 } 999 1000 if (0 == width && target->willUseHWAALines()) { 1001 return false; 1002 } 1003 1004 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1005 return false; 1006 } 1007 1008 if (NULL != matrix && 1009 !matrix->preservesAxisAlignment()) { 1010 return false; 1011 } 1012 1013 *combinedMatrix = drawState.getViewMatrix(); 1014 if (NULL != matrix) { 1015 combinedMatrix->preConcat(*matrix); 1016 GrAssert(combinedMatrix->preservesAxisAlignment()); 1017 } 1018 1019 combinedMatrix->mapRect(devRect, rect); 1020 devRect->sort(); 1021 1022 if (width < 0) { 1023 return !isIRect(*devRect); 1024 } else { 1025 return true; 1026 } 1027} 1028 1029void GrContext::drawRect(const GrPaint& paint, 1030 const GrRect& rect, 1031 GrScalar width, 1032 const GrMatrix* matrix) { 1033 SK_TRACE_EVENT0("GrContext::drawRect"); 1034 1035 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1036 int stageMask = paint.getActiveStageMask(); 1037 1038 GrRect devRect = rect; 1039 GrMatrix combinedMatrix; 1040 bool useVertexCoverage; 1041 bool needAA = paint.fAntiAlias && 1042 !this->getRenderTarget()->isMultisampled(); 1043 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 1044 &combinedMatrix, &devRect, 1045 &useVertexCoverage); 1046 1047 if (doAA) { 1048 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1049 if (width >= 0) { 1050 GrVec strokeSize;; 1051 if (width > 0) { 1052 strokeSize.set(width, width); 1053 combinedMatrix.mapVectors(&strokeSize, 1); 1054 strokeSize.setAbs(strokeSize); 1055 } else { 1056 strokeSize.set(GR_Scalar1, GR_Scalar1); 1057 } 1058 strokeAARect(target, devRect, strokeSize, useVertexCoverage); 1059 } else { 1060 fillAARect(target, devRect, useVertexCoverage); 1061 } 1062 return; 1063 } 1064 1065 if (width >= 0) { 1066 // TODO: consider making static vertex buffers for these cases. 1067 // Hairline could be done by just adding closing vertex to 1068 // unitSquareVertexBuffer() 1069 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1070 1071 static const int worstCaseVertCount = 10; 1072 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 1073 1074 if (!geo.succeeded()) { 1075 GrPrintf("Failed to get space for vertices!\n"); 1076 return; 1077 } 1078 1079 GrPrimitiveType primType; 1080 int vertCount; 1081 GrPoint* vertex = geo.positions(); 1082 1083 if (width > 0) { 1084 vertCount = 10; 1085 primType = kTriangleStrip_PrimitiveType; 1086 setStrokeRectStrip(vertex, rect, width); 1087 } else { 1088 // hairline 1089 vertCount = 5; 1090 primType = kLineStrip_PrimitiveType; 1091 vertex[0].set(rect.fLeft, rect.fTop); 1092 vertex[1].set(rect.fRight, rect.fTop); 1093 vertex[2].set(rect.fRight, rect.fBottom); 1094 vertex[3].set(rect.fLeft, rect.fBottom); 1095 vertex[4].set(rect.fLeft, rect.fTop); 1096 } 1097 1098 GrDrawState::AutoViewMatrixRestore avmr; 1099 if (NULL != matrix) { 1100 GrDrawState* drawState = target->drawState(); 1101 avmr.set(drawState); 1102 drawState->preConcatViewMatrix(*matrix); 1103 drawState->preConcatSamplerMatrices(stageMask, *matrix); 1104 } 1105 1106 target->drawNonIndexed(primType, 0, vertCount); 1107 } else { 1108#if GR_STATIC_RECT_VB 1109 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1110 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1111 if (NULL == sqVB) { 1112 GrPrintf("Failed to create static rect vb.\n"); 1113 return; 1114 } 1115 target->setVertexSourceToBuffer(layout, sqVB); 1116 GrDrawState* drawState = target->drawState(); 1117 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1118 GrMatrix m; 1119 m.setAll(rect.width(), 0, rect.fLeft, 1120 0, rect.height(), rect.fTop, 1121 0, 0, GrMatrix::I()[8]); 1122 1123 if (NULL != matrix) { 1124 m.postConcat(*matrix); 1125 } 1126 drawState->preConcatViewMatrix(m); 1127 drawState->preConcatSamplerMatrices(stageMask, m); 1128 1129 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1130#else 1131 target->drawSimpleRect(rect, matrix, stageMask); 1132#endif 1133 } 1134} 1135 1136void GrContext::drawRectToRect(const GrPaint& paint, 1137 const GrRect& dstRect, 1138 const GrRect& srcRect, 1139 const GrMatrix* dstMatrix, 1140 const GrMatrix* srcMatrix) { 1141 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 1142 1143 // srcRect refers to paint's first texture 1144 if (NULL == paint.getTexture(0)) { 1145 drawRect(paint, dstRect, -1, dstMatrix); 1146 return; 1147 } 1148 1149 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 1150 1151#if GR_STATIC_RECT_VB 1152 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1153 GrDrawState* drawState = target->drawState(); 1154 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1155 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1156 1157 GrMatrix m; 1158 1159 m.setAll(dstRect.width(), 0, dstRect.fLeft, 1160 0, dstRect.height(), dstRect.fTop, 1161 0, 0, GrMatrix::I()[8]); 1162 if (NULL != dstMatrix) { 1163 m.postConcat(*dstMatrix); 1164 } 1165 drawState->preConcatViewMatrix(m); 1166 1167 // srcRect refers to first stage 1168 int otherStageMask = paint.getActiveStageMask() & 1169 (~(1 << GrPaint::kFirstTextureStage)); 1170 if (otherStageMask) { 1171 drawState->preConcatSamplerMatrices(otherStageMask, m); 1172 } 1173 1174 m.setAll(srcRect.width(), 0, srcRect.fLeft, 1175 0, srcRect.height(), srcRect.fTop, 1176 0, 0, GrMatrix::I()[8]); 1177 if (NULL != srcMatrix) { 1178 m.postConcat(*srcMatrix); 1179 } 1180 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 1181 1182 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1183 if (NULL == sqVB) { 1184 GrPrintf("Failed to create static rect vb.\n"); 1185 return; 1186 } 1187 target->setVertexSourceToBuffer(layout, sqVB); 1188 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1189#else 1190 1191 GrDrawTarget* target; 1192#if BATCH_RECT_TO_RECT 1193 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 1194#else 1195 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1196#endif 1197 1198 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1199 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 1200 srcRects[0] = &srcRect; 1201 srcMatrices[0] = srcMatrix; 1202 1203 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 1204#endif 1205} 1206 1207void GrContext::drawVertices(const GrPaint& paint, 1208 GrPrimitiveType primitiveType, 1209 int vertexCount, 1210 const GrPoint positions[], 1211 const GrPoint texCoords[], 1212 const GrColor colors[], 1213 const uint16_t indices[], 1214 int indexCount) { 1215 SK_TRACE_EVENT0("GrContext::drawVertices"); 1216 1217 GrDrawTarget::AutoReleaseGeometry geo; 1218 1219 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1220 1221 bool hasTexCoords[GrPaint::kTotalStages] = { 1222 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 1223 0 // remaining stages use positions 1224 }; 1225 1226 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 1227 1228 if (NULL != colors) { 1229 layout |= GrDrawTarget::kColor_VertexLayoutBit; 1230 } 1231 int vertexSize = GrDrawTarget::VertexSize(layout); 1232 1233 if (sizeof(GrPoint) != vertexSize) { 1234 if (!geo.set(target, layout, vertexCount, 0)) { 1235 GrPrintf("Failed to get space for vertices!\n"); 1236 return; 1237 } 1238 int texOffsets[GrDrawState::kMaxTexCoords]; 1239 int colorOffset; 1240 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 1241 texOffsets, 1242 &colorOffset, 1243 NULL, 1244 NULL); 1245 void* curVertex = geo.vertices(); 1246 1247 for (int i = 0; i < vertexCount; ++i) { 1248 *((GrPoint*)curVertex) = positions[i]; 1249 1250 if (texOffsets[0] > 0) { 1251 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 1252 } 1253 if (colorOffset > 0) { 1254 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1255 } 1256 curVertex = (void*)((intptr_t)curVertex + vertexSize); 1257 } 1258 } else { 1259 target->setVertexSourceToArray(layout, positions, vertexCount); 1260 } 1261 1262 // we don't currently apply offscreen AA to this path. Need improved 1263 // management of GrDrawTarget's geometry to avoid copying points per-tile. 1264 1265 if (NULL != indices) { 1266 target->setIndexSourceToArray(indices, indexCount); 1267 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1268 } else { 1269 target->drawNonIndexed(primitiveType, 0, vertexCount); 1270 } 1271} 1272 1273/////////////////////////////////////////////////////////////////////////////// 1274#include "SkDraw.h" 1275#include "SkRasterClip.h" 1276 1277namespace { 1278 1279SkPath::FillType gr_fill_to_sk_fill(GrPathFill fill) { 1280 switch (fill) { 1281 case kWinding_PathFill: 1282 return SkPath::kWinding_FillType; 1283 case kEvenOdd_PathFill: 1284 return SkPath::kEvenOdd_FillType; 1285 case kInverseWinding_PathFill: 1286 return SkPath::kInverseWinding_FillType; 1287 case kInverseEvenOdd_PathFill: 1288 return SkPath::kInverseEvenOdd_FillType; 1289 default: 1290 GrCrash("Unexpected fill."); 1291 return SkPath::kWinding_FillType; 1292 } 1293} 1294 1295// gets device coord bounds of path (not considering the fill) and clip. The 1296// path bounds will be a subset of the clip bounds. returns false if path bounds 1297// would be empty. 1298bool get_path_and_clip_bounds(const GrDrawTarget* target, 1299 const GrPath& path, 1300 const GrVec* translate, 1301 GrIRect* pathBounds, 1302 GrIRect* clipBounds) { 1303 // compute bounds as intersection of rt size, clip, and path 1304 const GrRenderTarget* rt = target->getDrawState().getRenderTarget(); 1305 if (NULL == rt) { 1306 return false; 1307 } 1308 *pathBounds = GrIRect::MakeWH(rt->width(), rt->height()); 1309 const GrClip& clip = target->getClip(); 1310 if (clip.hasConservativeBounds()) { 1311 clip.getConservativeBounds().roundOut(clipBounds); 1312 if (!pathBounds->intersect(*clipBounds)) { 1313 return false; 1314 } 1315 } else { 1316 // pathBounds is currently the rt extent, set clip bounds to that rect. 1317 *clipBounds = *pathBounds; 1318 } 1319 GrRect pathSBounds = path.getBounds(); 1320 if (!pathSBounds.isEmpty()) { 1321 if (NULL != translate) { 1322 pathSBounds.offset(*translate); 1323 } 1324 target->getDrawState().getViewMatrix().mapRect(&pathSBounds, 1325 pathSBounds); 1326 GrIRect pathIBounds; 1327 pathSBounds.roundOut(&pathIBounds); 1328 if (!pathBounds->intersect(pathIBounds)) { 1329 return false; 1330 } 1331 } else { 1332 return false; 1333 } 1334 return true; 1335} 1336 1337/** 1338 * sw rasterizes path to A8 mask using the context's matrix and uploads to a 1339 * scratch texture. 1340 */ 1341 1342bool sw_draw_path_to_mask_texture(const GrPath& clientPath, 1343 const GrIRect& pathDevBounds, 1344 GrPathFill fill, 1345 GrContext* context, 1346 const GrPoint* translate, 1347 GrAutoScratchTexture* tex) { 1348 SkPaint paint; 1349 SkPath tmpPath; 1350 const SkPath* pathToDraw = &clientPath; 1351 if (kHairLine_PathFill == fill) { 1352 paint.setStyle(SkPaint::kStroke_Style); 1353 paint.setStrokeWidth(SK_Scalar1); 1354 } else { 1355 paint.setStyle(SkPaint::kFill_Style); 1356 SkPath::FillType skfill = gr_fill_to_sk_fill(fill); 1357 if (skfill != pathToDraw->getFillType()) { 1358 tmpPath = *pathToDraw; 1359 tmpPath.setFillType(skfill); 1360 pathToDraw = &tmpPath; 1361 } 1362 } 1363 paint.setAntiAlias(true); 1364 paint.setColor(SK_ColorWHITE); 1365 1366 GrMatrix matrix = context->getMatrix(); 1367 if (NULL != translate) { 1368 matrix.postTranslate(translate->fX, translate->fY); 1369 } 1370 1371 matrix.postTranslate(-pathDevBounds.fLeft * SK_Scalar1, 1372 -pathDevBounds.fTop * SK_Scalar1); 1373 GrIRect bounds = GrIRect::MakeWH(pathDevBounds.width(), 1374 pathDevBounds.height()); 1375 1376 SkBitmap bm; 1377 bm.setConfig(SkBitmap::kA8_Config, bounds.fRight, bounds.fBottom); 1378 if (!bm.allocPixels()) { 1379 return false; 1380 } 1381 sk_bzero(bm.getPixels(), bm.getSafeSize()); 1382 1383 SkDraw draw; 1384 sk_bzero(&draw, sizeof(draw)); 1385 SkRasterClip rc(bounds); 1386 draw.fRC = &rc; 1387 draw.fClip = &rc.bwRgn(); 1388 draw.fMatrix = &matrix; 1389 draw.fBitmap = &bm; 1390 draw.drawPath(*pathToDraw, paint); 1391 1392 const GrTextureDesc desc = { 1393 kNone_GrTextureFlags, 1394 bounds.fRight, 1395 bounds.fBottom, 1396 kAlpha_8_GrPixelConfig, 1397 {0} // samples 1398 }; 1399 1400 tex->set(context, desc); 1401 GrTexture* texture = tex->texture(); 1402 1403 if (NULL == texture) { 1404 return false; 1405 } 1406 SkAutoLockPixels alp(bm); 1407 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, 1408 bm.getPixels(), bm.rowBytes()); 1409 return true; 1410} 1411 1412void draw_around_inv_path(GrDrawTarget* target, 1413 GrDrawState::StageMask stageMask, 1414 const GrIRect& clipBounds, 1415 const GrIRect& pathBounds) { 1416 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1417 GrRect rect; 1418 if (clipBounds.fTop < pathBounds.fTop) { 1419 rect.iset(clipBounds.fLeft, clipBounds.fTop, 1420 clipBounds.fRight, pathBounds.fTop); 1421 target->drawSimpleRect(rect, NULL, stageMask); 1422 } 1423 if (clipBounds.fLeft < pathBounds.fLeft) { 1424 rect.iset(clipBounds.fLeft, pathBounds.fTop, 1425 pathBounds.fLeft, pathBounds.fBottom); 1426 target->drawSimpleRect(rect, NULL, stageMask); 1427 } 1428 if (clipBounds.fRight > pathBounds.fRight) { 1429 rect.iset(pathBounds.fRight, pathBounds.fTop, 1430 clipBounds.fRight, pathBounds.fBottom); 1431 target->drawSimpleRect(rect, NULL, stageMask); 1432 } 1433 if (clipBounds.fBottom > pathBounds.fBottom) { 1434 rect.iset(clipBounds.fLeft, pathBounds.fBottom, 1435 clipBounds.fRight, clipBounds.fBottom); 1436 target->drawSimpleRect(rect, NULL, stageMask); 1437 } 1438} 1439 1440} 1441 1442void GrContext::drawPath(const GrPaint& paint, const GrPath& path, 1443 GrPathFill fill, const GrPoint* translate) { 1444 1445 if (path.isEmpty()) { 1446 if (GrIsFillInverted(fill)) { 1447 this->drawPaint(paint); 1448 } 1449 return; 1450 } 1451 1452 // Note that below we may sw-rasterize the path into a scratch texture. 1453 // Scratch textures can be recycled after they are returned to the texture 1454 // cache. This presents a potential hazard for buffered drawing. However, 1455 // the writePixels that uploads to the scratch will perform a flush so we're 1456 // OK. 1457 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1458 kUnbuffered_DrawCategory; 1459 GrDrawTarget* target = this->prepareToDraw(paint, category); 1460 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1461 1462 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1463 1464 // An Assumption here is that path renderer would use some form of tweaking 1465 // the src color (either the input alpha or in the frag shader) to implement 1466 // aa. If we have some future driver-mojo path AA that can do the right 1467 // thing WRT to the blend then we'll need some query on the PR. 1468 if (disable_coverage_aa_for_blend(target)) { 1469#if GR_DEBUG 1470 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1471#endif 1472 prAA = false; 1473 } 1474 1475 GrPathRenderer* pr = NULL; 1476 if (prAA) { 1477 pr = this->getPathRenderer(path, fill, target, true); 1478 if (NULL == pr) { 1479 GrAutoScratchTexture ast; 1480 GrIRect pathBounds, clipBounds; 1481 if (!get_path_and_clip_bounds(target, path, translate, 1482 &pathBounds, &clipBounds)) { 1483 return; 1484 } 1485 if (NULL == pr && sw_draw_path_to_mask_texture(path, pathBounds, 1486 fill, this, 1487 translate, &ast)) { 1488 GrTexture* texture = ast.texture(); 1489 GrAssert(NULL != texture); 1490 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1491 enum { 1492 kPathMaskStage = GrPaint::kTotalStages, 1493 }; 1494 target->drawState()->setTexture(kPathMaskStage, texture); 1495 target->drawState()->sampler(kPathMaskStage)->reset(); 1496 GrScalar w = GrIntToScalar(pathBounds.width()); 1497 GrScalar h = GrIntToScalar(pathBounds.height()); 1498 GrRect maskRect = GrRect::MakeWH(w / texture->width(), 1499 h / texture->height()); 1500 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1501 srcRects[kPathMaskStage] = &maskRect; 1502 stageMask |= 1 << kPathMaskStage; 1503 GrRect dstRect = GrRect::MakeLTRB( 1504 SK_Scalar1* pathBounds.fLeft, 1505 SK_Scalar1* pathBounds.fTop, 1506 SK_Scalar1* pathBounds.fRight, 1507 SK_Scalar1* pathBounds.fBottom); 1508 target->drawRect(dstRect, NULL, stageMask, srcRects, NULL); 1509 target->drawState()->setTexture(kPathMaskStage, NULL); 1510 if (GrIsFillInverted(fill)) { 1511 draw_around_inv_path(target, stageMask, 1512 clipBounds, pathBounds); 1513 } 1514 return; 1515 } 1516 } 1517 } else { 1518 pr = this->getPathRenderer(path, fill, target, false); 1519 } 1520 1521 if (NULL == pr) { 1522#if GR_DEBUG 1523 GrPrintf("Unable to find path renderer compatible with path.\n"); 1524#endif 1525 return; 1526 } 1527 1528 pr->drawPath(path, fill, translate, target, stageMask, prAA); 1529} 1530 1531//////////////////////////////////////////////////////////////////////////////// 1532 1533void GrContext::flush(int flagsBitfield) { 1534 if (kDiscard_FlushBit & flagsBitfield) { 1535 fDrawBuffer->reset(); 1536 } else { 1537 this->flushDrawBuffer(); 1538 } 1539 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1540 fGpu->forceRenderTargetFlush(); 1541 } 1542} 1543 1544void GrContext::flushText() { 1545 if (kText_DrawCategory == fLastDrawCategory) { 1546 flushDrawBuffer(); 1547 } 1548} 1549 1550void GrContext::flushDrawBuffer() { 1551#if BATCH_RECT_TO_RECT || DEFER_TEXT_RENDERING 1552 if (fDrawBuffer) { 1553 fDrawBuffer->flushTo(fGpu); 1554 } 1555#endif 1556} 1557 1558void GrContext::internalWriteTexturePixels(GrTexture* texture, 1559 int left, int top, 1560 int width, int height, 1561 GrPixelConfig config, 1562 const void* buffer, 1563 size_t rowBytes, 1564 uint32_t flags) { 1565 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1566 ASSERT_OWNED_RESOURCE(texture); 1567 1568 if (!(kDontFlush_PixelOpsFlag & flags)) { 1569 this->flush(); 1570 } 1571 // TODO: use scratch texture to perform conversion 1572 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1573 GrPixelConfigIsUnpremultiplied(config)) { 1574 return; 1575 } 1576 1577 fGpu->writeTexturePixels(texture, left, top, width, height, 1578 config, buffer, rowBytes); 1579} 1580 1581bool GrContext::internalReadTexturePixels(GrTexture* texture, 1582 int left, int top, 1583 int width, int height, 1584 GrPixelConfig config, 1585 void* buffer, 1586 size_t rowBytes, 1587 uint32_t flags) { 1588 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1589 ASSERT_OWNED_RESOURCE(texture); 1590 1591 // TODO: code read pixels for textures that aren't also rendertargets 1592 GrRenderTarget* target = texture->asRenderTarget(); 1593 if (NULL != target) { 1594 return this->internalReadRenderTargetPixels(target, 1595 left, top, width, height, 1596 config, buffer, rowBytes, 1597 flags); 1598 } else { 1599 return false; 1600 } 1601} 1602 1603#include "SkConfig8888.h" 1604 1605namespace { 1606/** 1607 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1608 * formats are representable as Config8888 and so the function returns false 1609 * if the GrPixelConfig has no equivalent Config8888. 1610 */ 1611bool grconfig_to_config8888(GrPixelConfig config, 1612 SkCanvas::Config8888* config8888) { 1613 switch (config) { 1614 case kRGBA_8888_PM_GrPixelConfig: 1615 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1616 return true; 1617 case kRGBA_8888_UPM_GrPixelConfig: 1618 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1619 return true; 1620 case kBGRA_8888_PM_GrPixelConfig: 1621 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1622 return true; 1623 case kBGRA_8888_UPM_GrPixelConfig: 1624 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1625 return true; 1626 default: 1627 return false; 1628 } 1629} 1630} 1631 1632bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1633 int left, int top, 1634 int width, int height, 1635 GrPixelConfig config, 1636 void* buffer, 1637 size_t rowBytes, 1638 uint32_t flags) { 1639 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1640 ASSERT_OWNED_RESOURCE(target); 1641 1642 if (NULL == target) { 1643 target = fDrawState->getRenderTarget(); 1644 if (NULL == target) { 1645 return false; 1646 } 1647 } 1648 1649 if (!(kDontFlush_PixelOpsFlag & flags)) { 1650 this->flush(); 1651 } 1652 1653 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1654 GrPixelConfigIsUnpremultiplied(config) && 1655 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1656 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1657 if (!grconfig_to_config8888(target->config(), &srcConfig8888) || 1658 !grconfig_to_config8888(config, &dstConfig8888)) { 1659 return false; 1660 } 1661 // do read back using target's own config 1662 this->internalReadRenderTargetPixels(target, 1663 left, top, 1664 width, height, 1665 target->config(), 1666 buffer, rowBytes, 1667 kDontFlush_PixelOpsFlag); 1668 // sw convert the pixels to unpremul config 1669 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer); 1670 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888, 1671 pixels, rowBytes, srcConfig8888, 1672 width, height); 1673 return true; 1674 } 1675 1676 GrTexture* src = target->asTexture(); 1677 bool swapRAndB = NULL != src && 1678 fGpu->preferredReadPixelsConfig(config) == 1679 GrPixelConfigSwapRAndB(config); 1680 1681 bool flipY = NULL != src && 1682 fGpu->readPixelsWillPayForYFlip(target, left, top, 1683 width, height, config, 1684 rowBytes); 1685 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1686 GrPixelConfigIsUnpremultiplied(config)); 1687 1688 if (NULL == src && alphaConversion) { 1689 // we should fallback to cpu conversion here. This could happen when 1690 // we were given an external render target by the client that is not 1691 // also a texture (e.g. FBO 0 in GL) 1692 return false; 1693 } 1694 // we draw to a scratch texture if any of these conversion are applied 1695 GrAutoScratchTexture ast; 1696 if (flipY || swapRAndB || alphaConversion) { 1697 GrAssert(NULL != src); 1698 if (swapRAndB) { 1699 config = GrPixelConfigSwapRAndB(config); 1700 GrAssert(kUnknown_GrPixelConfig != config); 1701 } 1702 // Make the scratch a render target because we don't have a robust 1703 // readTexturePixels as of yet (it calls this function). 1704 const GrTextureDesc desc = { 1705 kRenderTarget_GrTextureFlagBit, 1706 width, height, 1707 config, 1708 {0}, // samples 1709 }; 1710 1711 // When a full readback is faster than a partial we could always make 1712 // the scratch exactly match the passed rect. However, if we see many 1713 // different size rectangles we will trash our texture cache and pay the 1714 // cost of creating and destroying many textures. So, we only request 1715 // an exact match when the caller is reading an entire RT. 1716 ScratchTexMatch match = kApprox_ScratchTexMatch; 1717 if (0 == left && 1718 0 == top && 1719 target->width() == width && 1720 target->height() == height && 1721 fGpu->fullReadPixelsIsFasterThanPartial()) { 1722 match = kExact_ScratchTexMatch; 1723 } 1724 ast.set(this, desc, match); 1725 GrTexture* texture = ast.texture(); 1726 if (!texture) { 1727 return false; 1728 } 1729 target = texture->asRenderTarget(); 1730 GrAssert(NULL != target); 1731 1732 GrDrawTarget::AutoStateRestore asr(fGpu); 1733 GrDrawState* drawState = fGpu->drawState(); 1734 drawState->reset(); 1735 drawState->setRenderTarget(target); 1736 1737 GrMatrix matrix; 1738 if (flipY) { 1739 matrix.setTranslate(SK_Scalar1 * left, 1740 SK_Scalar1 * (top + height)); 1741 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1742 } else { 1743 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1744 } 1745 matrix.postIDiv(src->width(), src->height()); 1746 drawState->sampler(0)->reset(matrix); 1747 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1748 drawState->setTexture(0, src); 1749 GrRect rect; 1750 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1751 fGpu->drawSimpleRect(rect, NULL, 0x1); 1752 left = 0; 1753 top = 0; 1754 } 1755 return fGpu->readPixels(target, 1756 left, top, width, height, 1757 config, buffer, rowBytes, flipY); 1758} 1759 1760void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1761 GrAssert(target); 1762 ASSERT_OWNED_RESOURCE(target); 1763 // In the future we may track whether there are any pending draws to this 1764 // target. We don't today so we always perform a flush. We don't promise 1765 // this to our clients, though. 1766 this->flush(); 1767 fGpu->resolveRenderTarget(target); 1768} 1769 1770void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1771 if (NULL == src || NULL == dst) { 1772 return; 1773 } 1774 ASSERT_OWNED_RESOURCE(src); 1775 1776 GrDrawTarget::AutoStateRestore asr(fGpu); 1777 GrDrawState* drawState = fGpu->drawState(); 1778 drawState->reset(); 1779 drawState->setRenderTarget(dst); 1780 GrMatrix sampleM; 1781 sampleM.setIDiv(src->width(), src->height()); 1782 drawState->setTexture(0, src); 1783 drawState->sampler(0)->reset(sampleM); 1784 SkRect rect = SkRect::MakeXYWH(0, 0, 1785 SK_Scalar1 * src->width(), 1786 SK_Scalar1 * src->height()); 1787 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1788} 1789 1790void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1791 int left, int top, 1792 int width, int height, 1793 GrPixelConfig config, 1794 const void* buffer, 1795 size_t rowBytes, 1796 uint32_t flags) { 1797 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1798 ASSERT_OWNED_RESOURCE(target); 1799 1800 if (NULL == target) { 1801 target = fGpu->drawState()->getRenderTarget(); 1802 if (NULL == target) { 1803 return; 1804 } 1805 } 1806 1807 // TODO: when underlying api has a direct way to do this we should use it 1808 // (e.g. glDrawPixels on desktop GL). 1809 1810 // If the RT is also a texture and we don't have to do PM/UPM conversion 1811 // then take the texture path, which we expect to be at least as fast or 1812 // faster since it doesn't use an intermediate texture as we do below. 1813 1814#if !GR_MAC_BUILD 1815 // At least some drivers on the Mac get confused when glTexImage2D is called 1816 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1817 // determine what OS versions and/or HW is affected. 1818 if (NULL != target->asTexture() && 1819 GrPixelConfigIsUnpremultiplied(target->config()) == 1820 GrPixelConfigIsUnpremultiplied(config)) { 1821 1822 this->internalWriteTexturePixels(target->asTexture(), 1823 left, top, width, height, 1824 config, buffer, rowBytes, flags); 1825 return; 1826 } 1827#endif 1828 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1829 GrPixelConfigIsUnpremultiplied(config) && 1830 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1831 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1832 if (!grconfig_to_config8888(config, &srcConfig8888) || 1833 !grconfig_to_config8888(target->config(), &dstConfig8888)) { 1834 return; 1835 } 1836 // allocate a tmp buffer and sw convert the pixels to premul 1837 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height); 1838 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1839 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1840 src, rowBytes, srcConfig8888, 1841 width, height); 1842 // upload the already premul pixels 1843 this->internalWriteRenderTargetPixels(target, 1844 left, top, 1845 width, height, 1846 target->config(), 1847 tmpPixels, 4 * width, flags); 1848 return; 1849 } 1850 1851 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1852 GrPixelConfigSwapRAndB(config); 1853 if (swapRAndB) { 1854 config = GrPixelConfigSwapRAndB(config); 1855 } 1856 1857 const GrTextureDesc desc = { 1858 kNone_GrTextureFlags, width, height, config, {0} 1859 }; 1860 GrAutoScratchTexture ast(this, desc); 1861 GrTexture* texture = ast.texture(); 1862 if (NULL == texture) { 1863 return; 1864 } 1865 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1866 config, buffer, rowBytes, flags); 1867 1868 GrDrawTarget::AutoStateRestore asr(fGpu); 1869 GrDrawState* drawState = fGpu->drawState(); 1870 drawState->reset(); 1871 1872 GrMatrix matrix; 1873 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1874 drawState->setViewMatrix(matrix); 1875 drawState->setRenderTarget(target); 1876 drawState->setTexture(0, texture); 1877 1878 matrix.setIDiv(texture->width(), texture->height()); 1879 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 1880 GrSamplerState::kNearest_Filter, 1881 matrix); 1882 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1883 1884 GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0); 1885 static const int VCOUNT = 4; 1886 // TODO: Use GrGpu::drawRect here 1887 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1888 if (!geo.succeeded()) { 1889 GrPrintf("Failed to get space for vertices!\n"); 1890 return; 1891 } 1892 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1893 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 0, VCOUNT); 1894} 1895//////////////////////////////////////////////////////////////////////////////// 1896 1897void GrContext::setPaint(const GrPaint& paint) { 1898 1899 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1900 int s = i + GrPaint::kFirstTextureStage; 1901 fDrawState->setTexture(s, paint.getTexture(i)); 1902 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 1903 if (paint.getTexture(i)) { 1904 *fDrawState->sampler(s) = paint.getTextureSampler(i); 1905 } 1906 } 1907 1908 fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1909 1910 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1911 int s = i + GrPaint::kFirstMaskStage; 1912 fDrawState->setTexture(s, paint.getMask(i)); 1913 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 1914 if (paint.getMask(i)) { 1915 *fDrawState->sampler(s) = paint.getMaskSampler(i); 1916 } 1917 } 1918 1919 // disable all stages not accessible via the paint 1920 for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) { 1921 fDrawState->setTexture(s, NULL); 1922 } 1923 1924 fDrawState->setColor(paint.fColor); 1925 1926 if (paint.fDither) { 1927 fDrawState->enableState(GrDrawState::kDither_StateBit); 1928 } else { 1929 fDrawState->disableState(GrDrawState::kDither_StateBit); 1930 } 1931 if (paint.fAntiAlias) { 1932 fDrawState->enableState(GrDrawState::kHWAntialias_StateBit); 1933 } else { 1934 fDrawState->disableState(GrDrawState::kHWAntialias_StateBit); 1935 } 1936 if (paint.fColorMatrixEnabled) { 1937 fDrawState->enableState(GrDrawState::kColorMatrix_StateBit); 1938 fDrawState->setColorMatrix(paint.fColorMatrix); 1939 } else { 1940 fDrawState->disableState(GrDrawState::kColorMatrix_StateBit); 1941 } 1942 fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1943 fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1944 fDrawState->setCoverage(paint.fCoverage); 1945 1946#if GR_DEBUG 1947 if ((paint.getActiveMaskStageMask() || 0xff != paint.fCoverage) && 1948 !fGpu->canApplyCoverage()) { 1949 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1950 } 1951#endif 1952} 1953 1954GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1955 DrawCategory category) { 1956 if (category != fLastDrawCategory) { 1957 this->flushDrawBuffer(); 1958 fLastDrawCategory = category; 1959 } 1960 this->setPaint(paint); 1961 GrDrawTarget* target = fGpu; 1962 switch (category) { 1963 case kText_DrawCategory: 1964#if DEFER_TEXT_RENDERING 1965 target = fDrawBuffer; 1966 fDrawBuffer->setClip(fGpu->getClip()); 1967#else 1968 target = fGpu; 1969#endif 1970 break; 1971 case kUnbuffered_DrawCategory: 1972 target = fGpu; 1973 break; 1974 case kBuffered_DrawCategory: 1975 target = fDrawBuffer; 1976 fDrawBuffer->setClip(fGpu->getClip()); 1977 break; 1978 } 1979 return target; 1980} 1981 1982GrPathRenderer* GrContext::getPathRenderer(const GrPath& path, 1983 GrPathFill fill, 1984 const GrDrawTarget* target, 1985 bool antiAlias) { 1986 if (NULL == fPathRendererChain) { 1987 fPathRendererChain = 1988 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag); 1989 } 1990 return fPathRendererChain->getPathRenderer(path, fill, target, antiAlias); 1991} 1992 1993//////////////////////////////////////////////////////////////////////////////// 1994 1995void GrContext::setRenderTarget(GrRenderTarget* target) { 1996 ASSERT_OWNED_RESOURCE(target); 1997 if (fDrawState->getRenderTarget() != target) { 1998 this->flush(false); 1999 fDrawState->setRenderTarget(target); 2000 } 2001} 2002 2003GrRenderTarget* GrContext::getRenderTarget() { 2004 return fDrawState->getRenderTarget(); 2005} 2006 2007const GrRenderTarget* GrContext::getRenderTarget() const { 2008 return fDrawState->getRenderTarget(); 2009} 2010 2011const GrMatrix& GrContext::getMatrix() const { 2012 return fDrawState->getViewMatrix(); 2013} 2014 2015void GrContext::setMatrix(const GrMatrix& m) { 2016 fDrawState->setViewMatrix(m); 2017} 2018 2019void GrContext::concatMatrix(const GrMatrix& m) const { 2020 fDrawState->preConcatViewMatrix(m); 2021} 2022 2023static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 2024 intptr_t mask = 1 << shift; 2025 if (pred) { 2026 bits |= mask; 2027 } else { 2028 bits &= ~mask; 2029 } 2030 return bits; 2031} 2032 2033void GrContext::resetStats() { 2034 fGpu->resetStats(); 2035} 2036 2037const GrGpuStats& GrContext::getStats() const { 2038 return fGpu->getStats(); 2039} 2040 2041void GrContext::printStats() const { 2042 fGpu->printStats(); 2043} 2044 2045GrContext::GrContext(GrGpu* gpu) { 2046 fGpu = gpu; 2047 fGpu->ref(); 2048 fGpu->setContext(this); 2049 2050 fDrawState = new GrDrawState(); 2051 fGpu->setDrawState(fDrawState); 2052 2053 fPathRendererChain = NULL; 2054 2055 fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT, 2056 MAX_TEXTURE_CACHE_BYTES); 2057 fFontCache = new GrFontCache(fGpu); 2058 2059 fLastDrawCategory = kUnbuffered_DrawCategory; 2060 2061 fDrawBuffer = NULL; 2062 fDrawBufferVBAllocPool = NULL; 2063 fDrawBufferIBAllocPool = NULL; 2064 2065 fAAFillRectIndexBuffer = NULL; 2066 fAAStrokeRectIndexBuffer = NULL; 2067 2068 this->setupDrawBuffer(); 2069} 2070 2071void GrContext::setupDrawBuffer() { 2072 2073 GrAssert(NULL == fDrawBuffer); 2074 GrAssert(NULL == fDrawBufferVBAllocPool); 2075 GrAssert(NULL == fDrawBufferIBAllocPool); 2076 2077#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT 2078 fDrawBufferVBAllocPool = 2079 new GrVertexBufferAllocPool(fGpu, false, 2080 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 2081 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS); 2082 fDrawBufferIBAllocPool = 2083 new GrIndexBufferAllocPool(fGpu, false, 2084 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 2085 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS); 2086 2087 fDrawBuffer = new GrInOrderDrawBuffer(fGpu, 2088 fDrawBufferVBAllocPool, 2089 fDrawBufferIBAllocPool); 2090#endif 2091 2092#if BATCH_RECT_TO_RECT 2093 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 2094#endif 2095 fDrawBuffer->setAutoFlushTarget(fGpu); 2096 fDrawBuffer->setDrawState(fDrawState); 2097} 2098 2099GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 2100#if DEFER_TEXT_RENDERING 2101 return prepareToDraw(paint, kText_DrawCategory); 2102#else 2103 return prepareToDraw(paint, kUnbuffered_DrawCategory); 2104#endif 2105} 2106 2107const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 2108 return fGpu->getQuadIndexBuffer(); 2109} 2110 2111GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 2112 GrAutoScratchTexture* temp1, 2113 GrAutoScratchTexture* temp2, 2114 const SkRect& rect, 2115 float sigmaX, float sigmaY) { 2116 ASSERT_OWNED_RESOURCE(srcTexture); 2117 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 2118 GrClip oldClip = this->getClip(); 2119 GrTexture* origTexture = srcTexture; 2120 GrAutoMatrix avm(this, GrMatrix::I()); 2121 SkIRect clearRect; 2122 int scaleFactorX, halfWidthX, kernelWidthX; 2123 int scaleFactorY, halfWidthY, kernelWidthY; 2124 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &halfWidthX, &kernelWidthX); 2125 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &halfWidthY, &kernelWidthY); 2126 2127 SkRect srcRect(rect); 2128 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 2129 srcRect.roundOut(); 2130 scale_rect(&srcRect, scaleFactorX, scaleFactorY); 2131 this->setClip(srcRect); 2132 2133 const GrTextureDesc desc = { 2134 kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit, 2135 SkScalarFloorToInt(srcRect.width()), 2136 SkScalarFloorToInt(srcRect.height()), 2137 kRGBA_8888_GrPixelConfig, 2138 {0} // samples 2139 }; 2140 2141 temp1->set(this, desc); 2142 if (temp2) temp2->set(this, desc); 2143 2144 GrTexture* dstTexture = temp1->texture(); 2145 GrPaint paint; 2146 paint.reset(); 2147 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 2148 2149 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 2150 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 2151 srcTexture->height()); 2152 this->setRenderTarget(dstTexture->asRenderTarget()); 2153 SkRect dstRect(srcRect); 2154 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 2155 i < scaleFactorY ? 0.5f : 1.0f); 2156 paint.setTexture(0, srcTexture); 2157 this->drawRectToRect(paint, dstRect, srcRect); 2158 srcRect = dstRect; 2159 SkTSwap(srcTexture, dstTexture); 2160 // If temp2 is non-NULL, don't render back to origTexture 2161 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2162 } 2163 2164 if (sigmaX > 0.0f) { 2165 SkAutoTMalloc<float> kernelStorageX(kernelWidthX); 2166 float* kernelX = kernelStorageX.get(); 2167 build_kernel(sigmaX, kernelX, kernelWidthX); 2168 2169 if (scaleFactorX > 1) { 2170 // Clear out a halfWidth to the right of the srcRect to prevent the 2171 // X convolution from reading garbage. 2172 clearRect = SkIRect::MakeXYWH( 2173 srcRect.fRight, srcRect.fTop, halfWidthX, srcRect.height()); 2174 this->clear(&clearRect, 0x0); 2175 } 2176 2177 this->setRenderTarget(dstTexture->asRenderTarget()); 2178 convolve(fGpu, srcTexture, srcRect, kernelX, kernelWidthX, 2179 GrSamplerState::kX_FilterDirection); 2180 SkTSwap(srcTexture, dstTexture); 2181 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2182 } 2183 2184 if (sigmaY > 0.0f) { 2185 SkAutoTMalloc<float> kernelStorageY(kernelWidthY); 2186 float* kernelY = kernelStorageY.get(); 2187 build_kernel(sigmaY, kernelY, kernelWidthY); 2188 2189 if (scaleFactorY > 1 || sigmaX > 0.0f) { 2190 // Clear out a halfWidth below the srcRect to prevent the Y 2191 // convolution from reading garbage. 2192 clearRect = SkIRect::MakeXYWH( 2193 srcRect.fLeft, srcRect.fBottom, srcRect.width(), halfWidthY); 2194 this->clear(&clearRect, 0x0); 2195 } 2196 2197 this->setRenderTarget(dstTexture->asRenderTarget()); 2198 convolve(fGpu, srcTexture, srcRect, kernelY, kernelWidthY, 2199 GrSamplerState::kY_FilterDirection); 2200 SkTSwap(srcTexture, dstTexture); 2201 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2202 } 2203 2204 if (scaleFactorX > 1 || scaleFactorY > 1) { 2205 // Clear one pixel to the right and below, to accommodate bilinear 2206 // upsampling. 2207 clearRect = SkIRect::MakeXYWH( 2208 srcRect.fLeft, srcRect.fBottom, srcRect.width() + 1, 1); 2209 this->clear(&clearRect, 0x0); 2210 clearRect = SkIRect::MakeXYWH( 2211 srcRect.fRight, srcRect.fTop, 1, srcRect.height()); 2212 this->clear(&clearRect, 0x0); 2213 // FIXME: This should be mitchell, not bilinear. 2214 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 2215 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 2216 srcTexture->height()); 2217 this->setRenderTarget(dstTexture->asRenderTarget()); 2218 paint.setTexture(0, srcTexture); 2219 SkRect dstRect(srcRect); 2220 scale_rect(&dstRect, scaleFactorX, scaleFactorY); 2221 this->drawRectToRect(paint, dstRect, srcRect); 2222 srcRect = dstRect; 2223 SkTSwap(srcTexture, dstTexture); 2224 } 2225 this->setRenderTarget(oldRenderTarget); 2226 this->setClip(oldClip); 2227 return srcTexture; 2228} 2229 2230GrTexture* GrContext::applyMorphology(GrTexture* srcTexture, 2231 const GrRect& rect, 2232 GrTexture* temp1, GrTexture* temp2, 2233 GrSamplerState::Filter filter, 2234 SkISize radius) { 2235 ASSERT_OWNED_RESOURCE(srcTexture); 2236 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 2237 GrAutoMatrix avm(this, GrMatrix::I()); 2238 GrClip oldClip = this->getClip(); 2239 this->setClip(GrRect::MakeWH(srcTexture->width(), srcTexture->height())); 2240 if (radius.fWidth > 0) { 2241 this->setRenderTarget(temp1->asRenderTarget()); 2242 apply_morphology(fGpu, srcTexture, rect, radius.fWidth, filter, 2243 GrSamplerState::kX_FilterDirection); 2244 SkIRect clearRect = SkIRect::MakeXYWH(rect.fLeft, rect.fBottom, 2245 rect.width(), radius.fHeight); 2246 this->clear(&clearRect, 0x0); 2247 srcTexture = temp1; 2248 } 2249 if (radius.fHeight > 0) { 2250 this->setRenderTarget(temp2->asRenderTarget()); 2251 apply_morphology(fGpu, srcTexture, rect, radius.fHeight, filter, 2252 GrSamplerState::kY_FilterDirection); 2253 srcTexture = temp2; 2254 } 2255 this->setRenderTarget(oldRenderTarget); 2256 this->setClip(oldClip); 2257 return srcTexture; 2258} 2259 2260/////////////////////////////////////////////////////////////////////////////// 2261