GrContext.cpp revision 5f017a4ab001baf1b9f433a9b02c6e01f93a97a1
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "GrBufferAllocPool.h" 13#include "GrClipIterator.h" 14#include "GrGpu.h" 15#include "GrIndexBuffer.h" 16#include "GrInOrderDrawBuffer.h" 17#include "GrPathRenderer.h" 18#include "GrPathUtils.h" 19#include "GrResourceCache.h" 20#include "GrStencilBuffer.h" 21#include "GrTextStrike.h" 22#include "SkTLazy.h" 23#include "SkTrace.h" 24 25#define DEFER_TEXT_RENDERING 0 26 27#define DEFER_PATHS 0 28 29#define BATCH_RECT_TO_RECT (0 && !GR_STATIC_RECT_VB) 30 31#define MAX_BLUR_SIGMA 4.0f 32 33 34// When we're using coverage AA but the blend is incompatible (given gpu 35// limitations) should we disable AA or draw wrong? 36#define DISABLE_COVERAGE_AA_FOR_BLEND 1 37 38static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 39static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 40 41static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 42static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 43 44// path rendering is the only thing we defer today that uses non-static indices 45static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = DEFER_PATHS ? 1 << 11 : 0; 46static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = DEFER_PATHS ? 4 : 0; 47 48#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 49 50GrContext* GrContext::Create(GrEngine engine, 51 GrPlatform3DContext context3D) { 52 GrContext* ctx = NULL; 53 GrGpu* fGpu = GrGpu::Create(engine, context3D); 54 if (NULL != fGpu) { 55 ctx = new GrContext(fGpu); 56 fGpu->unref(); 57 } 58 return ctx; 59} 60 61GrContext::~GrContext() { 62 this->flush(); 63 delete fTextureCache; 64 delete fFontCache; 65 delete fDrawBuffer; 66 delete fDrawBufferVBAllocPool; 67 delete fDrawBufferIBAllocPool; 68 69 GrSafeUnref(fAAFillRectIndexBuffer); 70 GrSafeUnref(fAAStrokeRectIndexBuffer); 71 fGpu->unref(); 72 GrSafeUnref(fPathRendererChain); 73 fDrawState->unref(); 74} 75 76void GrContext::contextLost() { 77 contextDestroyed(); 78 this->setupDrawBuffer(); 79} 80 81void GrContext::contextDestroyed() { 82 // abandon first to so destructors 83 // don't try to free the resources in the API. 84 fGpu->abandonResources(); 85 86 // a path renderer may be holding onto resources that 87 // are now unusable 88 GrSafeSetNull(fPathRendererChain); 89 90 delete fDrawBuffer; 91 fDrawBuffer = NULL; 92 93 delete fDrawBufferVBAllocPool; 94 fDrawBufferVBAllocPool = NULL; 95 96 delete fDrawBufferIBAllocPool; 97 fDrawBufferIBAllocPool = NULL; 98 99 GrSafeSetNull(fAAFillRectIndexBuffer); 100 GrSafeSetNull(fAAStrokeRectIndexBuffer); 101 102 fTextureCache->removeAll(); 103 fFontCache->freeAll(); 104 fGpu->markContextDirty(); 105} 106 107void GrContext::resetContext() { 108 fGpu->markContextDirty(); 109} 110 111void GrContext::freeGpuResources() { 112 this->flush(); 113 fTextureCache->removeAll(); 114 fFontCache->freeAll(); 115 // a path renderer may be holding onto resources 116 GrSafeSetNull(fPathRendererChain); 117} 118 119size_t GrContext::getGpuTextureCacheBytes() const { 120 return fTextureCache->getCachedResourceBytes(); 121} 122 123//////////////////////////////////////////////////////////////////////////////// 124 125int GrContext::PaintStageVertexLayoutBits( 126 const GrPaint& paint, 127 const bool hasTexCoords[GrPaint::kTotalStages]) { 128 int stageMask = paint.getActiveStageMask(); 129 int layout = 0; 130 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 131 if ((1 << i) & stageMask) { 132 if (NULL != hasTexCoords && hasTexCoords[i]) { 133 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 134 } else { 135 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i); 136 } 137 } 138 } 139 return layout; 140} 141 142 143//////////////////////////////////////////////////////////////////////////////// 144 145enum { 146 // flags for textures 147 kNPOTBit = 0x1, 148 kFilterBit = 0x2, 149 kScratchBit = 0x4, 150 151 // resource type 152 kTextureBit = 0x8, 153 kStencilBufferBit = 0x10 154}; 155 156GrTexture* GrContext::TextureCacheEntry::texture() const { 157 if (NULL == fEntry) { 158 return NULL; 159 } else { 160 return (GrTexture*) fEntry->resource(); 161 } 162} 163 164namespace { 165// returns true if this is a "special" texture because of gpu NPOT limitations 166bool gen_texture_key_values(const GrGpu* gpu, 167 const GrSamplerState* sampler, 168 GrContext::TextureKey clientKey, 169 int width, 170 int height, 171 int sampleCnt, 172 bool scratch, 173 uint32_t v[4]) { 174 GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t)); 175 // we assume we only need 16 bits of width and height 176 // assert that texture creation will fail anyway if this assumption 177 // would cause key collisions. 178 GrAssert(gpu->getCaps().fMaxTextureSize <= SK_MaxU16); 179 v[0] = clientKey & 0xffffffffUL; 180 v[1] = (clientKey >> 32) & 0xffffffffUL; 181 v[2] = width | (height << 16); 182 183 v[3] = (sampleCnt << 24); 184 GrAssert(sampleCnt >= 0 && sampleCnt < 256); 185 186 if (!gpu->getCaps().fNPOTTextureTileSupport) { 187 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 188 189 bool tiled = NULL != sampler && 190 ((sampler->getWrapX() != GrSamplerState::kClamp_WrapMode) || 191 (sampler->getWrapY() != GrSamplerState::kClamp_WrapMode)); 192 193 if (tiled && !isPow2) { 194 v[3] |= kNPOTBit; 195 if (GrSamplerState::kNearest_Filter != sampler->getFilter()) { 196 v[3] |= kFilterBit; 197 } 198 } 199 } 200 201 if (scratch) { 202 v[3] |= kScratchBit; 203 } 204 205 v[3] |= kTextureBit; 206 207 return v[3] & kNPOTBit; 208} 209 210// we should never have more than one stencil buffer with same combo of 211// (width,height,samplecount) 212void gen_stencil_key_values(int width, int height, 213 int sampleCnt, uint32_t v[4]) { 214 v[0] = width; 215 v[1] = height; 216 v[2] = sampleCnt; 217 v[3] = kStencilBufferBit; 218} 219 220void gen_stencil_key_values(const GrStencilBuffer* sb, 221 uint32_t v[4]) { 222 gen_stencil_key_values(sb->width(), sb->height(), 223 sb->numSamples(), v); 224} 225 226void build_kernel(float sigma, float* kernel, int kernelWidth) { 227 int halfWidth = (kernelWidth - 1) / 2; 228 float sum = 0.0f; 229 float denom = 1.0f / (2.0f * sigma * sigma); 230 for (int i = 0; i < kernelWidth; ++i) { 231 float x = static_cast<float>(i - halfWidth); 232 // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian 233 // is dropped here, since we renormalize the kernel below. 234 kernel[i] = sk_float_exp(- x * x * denom); 235 sum += kernel[i]; 236 } 237 // Normalize the kernel 238 float scale = 1.0f / sum; 239 for (int i = 0; i < kernelWidth; ++i) 240 kernel[i] *= scale; 241} 242 243void scale_rect(SkRect* rect, float xScale, float yScale) { 244 rect->fLeft *= xScale; 245 rect->fTop *= yScale; 246 rect->fRight *= xScale; 247 rect->fBottom *= yScale; 248} 249 250float adjust_sigma(float sigma, int *scaleFactor, int *halfWidth, 251 int *kernelWidth) { 252 *scaleFactor = 1; 253 while (sigma > MAX_BLUR_SIGMA) { 254 *scaleFactor *= 2; 255 sigma *= 0.5f; 256 } 257 *halfWidth = static_cast<int>(ceilf(sigma * 3.0f)); 258 *kernelWidth = *halfWidth * 2 + 1; 259 return sigma; 260} 261 262void apply_morphology(GrGpu* gpu, 263 GrTexture* texture, 264 const SkRect& rect, 265 int radius, 266 GrSamplerState::Filter filter, 267 GrSamplerState::FilterDirection direction) { 268 GrAssert(filter == GrSamplerState::kErode_Filter || 269 filter == GrSamplerState::kDilate_Filter); 270 271 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 272 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 273 GrDrawState* drawState = gpu->drawState(); 274 drawState->setRenderTarget(target); 275 GrMatrix sampleM; 276 sampleM.setIDiv(texture->width(), texture->height()); 277 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, filter, 278 sampleM); 279 drawState->sampler(0)->setMorphologyRadius(radius); 280 drawState->sampler(0)->setFilterDirection(direction); 281 drawState->setTexture(0, texture); 282 gpu->drawSimpleRect(rect, NULL, 1 << 0); 283} 284 285void convolve(GrGpu* gpu, 286 GrTexture* texture, 287 const SkRect& rect, 288 const float* kernel, 289 int kernelWidth, 290 GrSamplerState::FilterDirection direction) { 291 GrRenderTarget* target = gpu->drawState()->getRenderTarget(); 292 GrDrawTarget::AutoStateRestore asr(gpu, GrDrawTarget::kReset_ASRInit); 293 GrDrawState* drawState = gpu->drawState(); 294 drawState->setRenderTarget(target); 295 GrMatrix sampleM; 296 sampleM.setIDiv(texture->width(), texture->height()); 297 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 298 GrSamplerState::kConvolution_Filter, 299 sampleM); 300 drawState->sampler(0)->setConvolutionParams(kernelWidth, kernel); 301 drawState->sampler(0)->setFilterDirection(direction); 302 drawState->setTexture(0, texture); 303 gpu->drawSimpleRect(rect, NULL, 1 << 0); 304} 305 306} 307 308GrContext::TextureCacheEntry GrContext::findAndLockTexture( 309 TextureKey key, 310 int width, 311 int height, 312 const GrSamplerState* sampler) { 313 uint32_t v[4]; 314 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v); 315 GrResourceKey resourceKey(v); 316 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 317 GrResourceCache::kNested_LockType)); 318} 319 320bool GrContext::isTextureInCache(TextureKey key, 321 int width, 322 int height, 323 const GrSamplerState* sampler) const { 324 uint32_t v[4]; 325 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v); 326 GrResourceKey resourceKey(v); 327 return fTextureCache->hasKey(resourceKey); 328} 329 330GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 331 ASSERT_OWNED_RESOURCE(sb); 332 uint32_t v[4]; 333 gen_stencil_key_values(sb, v); 334 GrResourceKey resourceKey(v); 335 return fTextureCache->createAndLock(resourceKey, sb); 336} 337 338GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 339 int sampleCnt) { 340 uint32_t v[4]; 341 gen_stencil_key_values(width, height, sampleCnt, v); 342 GrResourceKey resourceKey(v); 343 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 344 GrResourceCache::kSingle_LockType); 345 if (NULL != entry) { 346 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 347 return sb; 348 } else { 349 return NULL; 350 } 351} 352 353void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 354 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 355 fTextureCache->unlock(sbEntry); 356} 357 358static void stretchImage(void* dst, 359 int dstW, 360 int dstH, 361 void* src, 362 int srcW, 363 int srcH, 364 int bpp) { 365 GrFixed dx = (srcW << 16) / dstW; 366 GrFixed dy = (srcH << 16) / dstH; 367 368 GrFixed y = dy >> 1; 369 370 int dstXLimit = dstW*bpp; 371 for (int j = 0; j < dstH; ++j) { 372 GrFixed x = dx >> 1; 373 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 374 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 375 for (int i = 0; i < dstXLimit; i += bpp) { 376 memcpy((uint8_t*) dstRow + i, 377 (uint8_t*) srcRow + (x>>16)*bpp, 378 bpp); 379 x += dx; 380 } 381 y += dy; 382 } 383} 384 385GrContext::TextureCacheEntry GrContext::createAndLockTexture( 386 TextureKey key, 387 const GrSamplerState* sampler, 388 const GrTextureDesc& desc, 389 void* srcData, 390 size_t rowBytes) { 391 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 392 393#if GR_DUMP_TEXTURE_UPLOAD 394 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 395#endif 396 397 TextureCacheEntry entry; 398 uint32_t v[4]; 399 bool special = gen_texture_key_values(fGpu, sampler, key, 400 desc.fWidth, desc.fHeight, 401 desc.fSampleCnt, false, v); 402 GrResourceKey resourceKey(v); 403 404 if (special) { 405 GrAssert(NULL != sampler); 406 TextureCacheEntry clampEntry = this->findAndLockTexture(key, 407 desc.fWidth, 408 desc.fHeight, 409 NULL); 410 411 if (NULL == clampEntry.texture()) { 412 clampEntry = this->createAndLockTexture(key, NULL, desc, 413 srcData, rowBytes); 414 GrAssert(NULL != clampEntry.texture()); 415 if (NULL == clampEntry.texture()) { 416 return entry; 417 } 418 } 419 GrTextureDesc rtDesc = desc; 420 rtDesc.fFlags = rtDesc.fFlags | 421 kRenderTarget_GrTextureFlagBit | 422 kNoStencil_GrTextureFlagBit; 423 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 424 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 425 426 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 427 428 if (NULL != texture) { 429 GrDrawTarget::AutoStateRestore asr(fGpu, 430 GrDrawTarget::kReset_ASRInit); 431 GrDrawState* drawState = fGpu->drawState(); 432 drawState->setRenderTarget(texture->asRenderTarget()); 433 drawState->setTexture(0, clampEntry.texture()); 434 435 GrSamplerState::Filter filter; 436 // if filtering is not desired then we want to ensure all 437 // texels in the resampled image are copies of texels from 438 // the original. 439 if (GrSamplerState::kNearest_Filter == sampler->getFilter()) { 440 filter = GrSamplerState::kNearest_Filter; 441 } else { 442 filter = GrSamplerState::kBilinear_Filter; 443 } 444 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 445 filter); 446 447 static const GrVertexLayout layout = 448 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 449 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 450 451 if (arg.succeeded()) { 452 GrPoint* verts = (GrPoint*) arg.vertices(); 453 verts[0].setIRectFan(0, 0, 454 texture->width(), 455 texture->height(), 456 2*sizeof(GrPoint)); 457 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 458 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 459 0, 4); 460 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 461 } 462 texture->releaseRenderTarget(); 463 } else { 464 // TODO: Our CPU stretch doesn't filter. But we create separate 465 // stretched textures when the sampler state is either filtered or 466 // not. Either implement filtered stretch blit on CPU or just create 467 // one when FBO case fails. 468 469 rtDesc.fFlags = kNone_GrTextureFlags; 470 // no longer need to clamp at min RT size. 471 rtDesc.fWidth = GrNextPow2(desc.fWidth); 472 rtDesc.fHeight = GrNextPow2(desc.fHeight); 473 int bpp = GrBytesPerPixel(desc.fConfig); 474 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 475 rtDesc.fWidth * 476 rtDesc.fHeight); 477 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 478 srcData, desc.fWidth, desc.fHeight, bpp); 479 480 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 481 482 GrTexture* texture = fGpu->createTexture(rtDesc, 483 stretchedPixels.get(), 484 stretchedRowBytes); 485 GrAssert(NULL != texture); 486 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 487 } 488 fTextureCache->unlock(clampEntry.cacheEntry()); 489 490 } else { 491 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 492 if (NULL != texture) { 493 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 494 } 495 } 496 return entry; 497} 498 499namespace { 500inline void gen_scratch_tex_key_values(const GrGpu* gpu, 501 const GrTextureDesc& desc, 502 uint32_t v[4]) { 503 // Instead of a client-provided key of the texture contents 504 // we create a key of from the descriptor. 505 GrContext::TextureKey descKey = (desc.fFlags << 8) | 506 ((uint64_t) desc.fConfig << 32); 507 // this code path isn't friendly to tiling with NPOT restricitons 508 // We just pass ClampNoFilter() 509 gen_texture_key_values(gpu, NULL, descKey, desc.fWidth, 510 desc.fHeight, desc.fSampleCnt, true, v); 511} 512} 513 514GrContext::TextureCacheEntry GrContext::lockScratchTexture( 515 const GrTextureDesc& inDesc, 516 ScratchTexMatch match) { 517 518 GrTextureDesc desc = inDesc; 519 if (kExact_ScratchTexMatch != match) { 520 // bin by pow2 with a reasonable min 521 static const int MIN_SIZE = 256; 522 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 523 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 524 } 525 526 GrResourceEntry* entry; 527 int origWidth = desc.fWidth; 528 int origHeight = desc.fHeight; 529 bool doubledW = false; 530 bool doubledH = false; 531 532 do { 533 uint32_t v[4]; 534 gen_scratch_tex_key_values(fGpu, desc, v); 535 GrResourceKey key(v); 536 entry = fTextureCache->findAndLock(key, 537 GrResourceCache::kNested_LockType); 538 // if we miss, relax the fit of the flags... 539 // then try doubling width... then height. 540 if (NULL != entry || kExact_ScratchTexMatch == match) { 541 break; 542 } 543 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 544 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 545 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 546 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 547 } else if (!doubledW) { 548 desc.fFlags = inDesc.fFlags; 549 desc.fWidth *= 2; 550 doubledW = true; 551 } else if (!doubledH) { 552 desc.fFlags = inDesc.fFlags; 553 desc.fWidth = origWidth; 554 desc.fHeight *= 2; 555 doubledH = true; 556 } else { 557 break; 558 } 559 560 } while (true); 561 562 if (NULL == entry) { 563 desc.fFlags = inDesc.fFlags; 564 desc.fWidth = origWidth; 565 desc.fHeight = origHeight; 566 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 567 if (NULL != texture) { 568 uint32_t v[4]; 569 gen_scratch_tex_key_values(fGpu, desc, v); 570 GrResourceKey key(v); 571 entry = fTextureCache->createAndLock(key, texture); 572 } 573 } 574 575 // If the caller gives us the same desc/sampler twice we don't want 576 // to return the same texture the second time (unless it was previously 577 // released). So we detach the entry from the cache and reattach at release. 578 if (NULL != entry) { 579 fTextureCache->detach(entry); 580 } 581 return TextureCacheEntry(entry); 582} 583 584void GrContext::unlockTexture(TextureCacheEntry entry) { 585 ASSERT_OWNED_RESOURCE(entry.texture()); 586 // If this is a scratch texture we detached it from the cache 587 // while it was locked (to avoid two callers simultaneously getting 588 // the same texture). 589 if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) { 590 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 591 } else { 592 fTextureCache->unlock(entry.cacheEntry()); 593 } 594} 595 596GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc, 597 void* srcData, 598 size_t rowBytes) { 599 return fGpu->createTexture(desc, srcData, rowBytes); 600} 601 602void GrContext::getTextureCacheLimits(int* maxTextures, 603 size_t* maxTextureBytes) const { 604 fTextureCache->getLimits(maxTextures, maxTextureBytes); 605} 606 607void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 608 fTextureCache->setLimits(maxTextures, maxTextureBytes); 609} 610 611int GrContext::getMaxTextureSize() const { 612 return fGpu->getCaps().fMaxTextureSize; 613} 614 615int GrContext::getMaxRenderTargetSize() const { 616 return fGpu->getCaps().fMaxRenderTargetSize; 617} 618 619/////////////////////////////////////////////////////////////////////////////// 620 621GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 622 return fGpu->createPlatformTexture(desc); 623} 624 625GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 626 return fGpu->createPlatformRenderTarget(desc); 627} 628 629/////////////////////////////////////////////////////////////////////////////// 630 631bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 632 int width, int height) const { 633 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 634 if (!caps.f8BitPaletteSupport) { 635 return false; 636 } 637 638 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 639 640 if (!isPow2) { 641 bool tiled = NULL != sampler && 642 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 643 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 644 if (tiled && !caps.fNPOTTextureTileSupport) { 645 return false; 646 } 647 } 648 return true; 649} 650 651//////////////////////////////////////////////////////////////////////////////// 652 653const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 654 655void GrContext::setClip(const GrClip& clip) { 656 fGpu->setClip(clip); 657 fDrawState->enableState(GrDrawState::kClip_StateBit); 658} 659 660void GrContext::setClip(const GrIRect& rect) { 661 GrClip clip; 662 clip.setFromIRect(rect); 663 fGpu->setClip(clip); 664} 665 666//////////////////////////////////////////////////////////////////////////////// 667 668void GrContext::clear(const GrIRect* rect, const GrColor color) { 669 this->flush(); 670 fGpu->clear(rect, color); 671} 672 673void GrContext::drawPaint(const GrPaint& paint) { 674 // set rect to be big enough to fill the space, but not super-huge, so we 675 // don't overflow fixed-point implementations 676 GrRect r; 677 r.setLTRB(0, 0, 678 GrIntToScalar(getRenderTarget()->width()), 679 GrIntToScalar(getRenderTarget()->height())); 680 GrMatrix inverse; 681 SkTLazy<GrPaint> tmpPaint; 682 const GrPaint* p = &paint; 683 GrAutoMatrix am; 684 685 // We attempt to map r by the inverse matrix and draw that. mapRect will 686 // map the four corners and bound them with a new rect. This will not 687 // produce a correct result for some perspective matrices. 688 if (!this->getMatrix().hasPerspective()) { 689 if (!fDrawState->getViewInverse(&inverse)) { 690 GrPrintf("Could not invert matrix"); 691 return; 692 } 693 inverse.mapRect(&r); 694 } else { 695 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 696 if (!fDrawState->getViewInverse(&inverse)) { 697 GrPrintf("Could not invert matrix"); 698 return; 699 } 700 tmpPaint.set(paint); 701 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 702 p = tmpPaint.get(); 703 } 704 am.set(this, GrMatrix::I()); 705 } 706 // by definition this fills the entire clip, no need for AA 707 if (paint.fAntiAlias) { 708 if (!tmpPaint.isValid()) { 709 tmpPaint.set(paint); 710 p = tmpPaint.get(); 711 } 712 GrAssert(p == tmpPaint.get()); 713 tmpPaint.get()->fAntiAlias = false; 714 } 715 this->drawRect(*p, r); 716} 717 718//////////////////////////////////////////////////////////////////////////////// 719 720namespace { 721inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 722 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 723} 724} 725 726//////////////////////////////////////////////////////////////////////////////// 727 728/* create a triangle strip that strokes the specified triangle. There are 8 729 unique vertices, but we repreat the last 2 to close up. Alternatively we 730 could use an indices array, and then only send 8 verts, but not sure that 731 would be faster. 732 */ 733static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 734 GrScalar width) { 735 const GrScalar rad = GrScalarHalf(width); 736 rect.sort(); 737 738 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 739 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 740 verts[2].set(rect.fRight - rad, rect.fTop + rad); 741 verts[3].set(rect.fRight + rad, rect.fTop - rad); 742 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 743 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 744 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 745 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 746 verts[8] = verts[0]; 747 verts[9] = verts[1]; 748} 749 750static void setInsetFan(GrPoint* pts, size_t stride, 751 const GrRect& r, GrScalar dx, GrScalar dy) { 752 pts->setRectFan(r.fLeft + dx, r.fTop + dy, r.fRight - dx, r.fBottom - dy, stride); 753} 754 755static const uint16_t gFillAARectIdx[] = { 756 0, 1, 5, 5, 4, 0, 757 1, 2, 6, 6, 5, 1, 758 2, 3, 7, 7, 6, 2, 759 3, 0, 4, 4, 7, 3, 760 4, 5, 6, 6, 7, 4, 761}; 762 763int GrContext::aaFillRectIndexCount() const { 764 return GR_ARRAY_COUNT(gFillAARectIdx); 765} 766 767GrIndexBuffer* GrContext::aaFillRectIndexBuffer() { 768 if (NULL == fAAFillRectIndexBuffer) { 769 fAAFillRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gFillAARectIdx), 770 false); 771 if (NULL != fAAFillRectIndexBuffer) { 772 #if GR_DEBUG 773 bool updated = 774 #endif 775 fAAFillRectIndexBuffer->updateData(gFillAARectIdx, 776 sizeof(gFillAARectIdx)); 777 GR_DEBUGASSERT(updated); 778 } 779 } 780 return fAAFillRectIndexBuffer; 781} 782 783static const uint16_t gStrokeAARectIdx[] = { 784 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0, 785 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0, 786 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0, 787 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0, 788 789 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4, 790 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4, 791 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4, 792 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4, 793 794 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8, 795 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8, 796 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8, 797 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8, 798}; 799 800int GrContext::aaStrokeRectIndexCount() const { 801 return GR_ARRAY_COUNT(gStrokeAARectIdx); 802} 803 804GrIndexBuffer* GrContext::aaStrokeRectIndexBuffer() { 805 if (NULL == fAAStrokeRectIndexBuffer) { 806 fAAStrokeRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gStrokeAARectIdx), 807 false); 808 if (NULL != fAAStrokeRectIndexBuffer) { 809 #if GR_DEBUG 810 bool updated = 811 #endif 812 fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx, 813 sizeof(gStrokeAARectIdx)); 814 GR_DEBUGASSERT(updated); 815 } 816 } 817 return fAAStrokeRectIndexBuffer; 818} 819 820static GrVertexLayout aa_rect_layout(const GrDrawTarget* target, 821 bool useCoverage) { 822 GrVertexLayout layout = 0; 823 for (int s = 0; s < GrDrawState::kNumStages; ++s) { 824 if (NULL != target->getDrawState().getTexture(s)) { 825 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s); 826 } 827 } 828 if (useCoverage) { 829 layout |= GrDrawTarget::kCoverage_VertexLayoutBit; 830 } else { 831 layout |= GrDrawTarget::kColor_VertexLayoutBit; 832 } 833 return layout; 834} 835 836void GrContext::fillAARect(GrDrawTarget* target, 837 const GrRect& devRect, 838 bool useVertexCoverage) { 839 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 840 841 size_t vsize = GrDrawTarget::VertexSize(layout); 842 843 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 8, 0); 844 if (!geo.succeeded()) { 845 GrPrintf("Failed to get space for vertices!\n"); 846 return; 847 } 848 GrIndexBuffer* indexBuffer = this->aaFillRectIndexBuffer(); 849 if (NULL == indexBuffer) { 850 GrPrintf("Failed to create index buffer!\n"); 851 return; 852 } 853 854 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 855 856 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 857 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 858 859 setInsetFan(fan0Pos, vsize, devRect, -GR_ScalarHalf, -GR_ScalarHalf); 860 setInsetFan(fan1Pos, vsize, devRect, GR_ScalarHalf, GR_ScalarHalf); 861 862 verts += sizeof(GrPoint); 863 for (int i = 0; i < 4; ++i) { 864 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 865 } 866 867 GrColor innerColor; 868 if (useVertexCoverage) { 869 innerColor = 0xffffffff; 870 } else { 871 innerColor = target->getDrawState().getColor(); 872 } 873 874 verts += 4 * vsize; 875 for (int i = 0; i < 4; ++i) { 876 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 877 } 878 879 target->setIndexSourceToBuffer(indexBuffer); 880 881 target->drawIndexed(kTriangles_PrimitiveType, 0, 882 0, 8, this->aaFillRectIndexCount()); 883} 884 885void GrContext::strokeAARect(GrDrawTarget* target, 886 const GrRect& devRect, 887 const GrVec& devStrokeSize, 888 bool useVertexCoverage) { 889 const GrScalar& dx = devStrokeSize.fX; 890 const GrScalar& dy = devStrokeSize.fY; 891 const GrScalar rx = GrMul(dx, GR_ScalarHalf); 892 const GrScalar ry = GrMul(dy, GR_ScalarHalf); 893 894 GrScalar spare; 895 { 896 GrScalar w = devRect.width() - dx; 897 GrScalar h = devRect.height() - dy; 898 spare = GrMin(w, h); 899 } 900 901 if (spare <= 0) { 902 GrRect r(devRect); 903 r.inset(-rx, -ry); 904 fillAARect(target, r, useVertexCoverage); 905 return; 906 } 907 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 908 size_t vsize = GrDrawTarget::VertexSize(layout); 909 910 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 16, 0); 911 if (!geo.succeeded()) { 912 GrPrintf("Failed to get space for vertices!\n"); 913 return; 914 } 915 GrIndexBuffer* indexBuffer = this->aaStrokeRectIndexBuffer(); 916 if (NULL == indexBuffer) { 917 GrPrintf("Failed to create index buffer!\n"); 918 return; 919 } 920 921 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 922 923 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 924 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 925 GrPoint* fan2Pos = reinterpret_cast<GrPoint*>(verts + 8 * vsize); 926 GrPoint* fan3Pos = reinterpret_cast<GrPoint*>(verts + 12 * vsize); 927 928 setInsetFan(fan0Pos, vsize, devRect, -rx - GR_ScalarHalf, -ry - GR_ScalarHalf); 929 setInsetFan(fan1Pos, vsize, devRect, -rx + GR_ScalarHalf, -ry + GR_ScalarHalf); 930 setInsetFan(fan2Pos, vsize, devRect, rx - GR_ScalarHalf, ry - GR_ScalarHalf); 931 setInsetFan(fan3Pos, vsize, devRect, rx + GR_ScalarHalf, ry + GR_ScalarHalf); 932 933 verts += sizeof(GrPoint); 934 for (int i = 0; i < 4; ++i) { 935 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 936 } 937 938 GrColor innerColor; 939 if (useVertexCoverage) { 940 innerColor = 0xffffffff; 941 } else { 942 innerColor = target->getDrawState().getColor(); 943 } 944 verts += 4 * vsize; 945 for (int i = 0; i < 8; ++i) { 946 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 947 } 948 949 verts += 8 * vsize; 950 for (int i = 0; i < 8; ++i) { 951 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 952 } 953 954 target->setIndexSourceToBuffer(indexBuffer); 955 target->drawIndexed(kTriangles_PrimitiveType, 956 0, 0, 16, aaStrokeRectIndexCount()); 957} 958 959/** 960 * Returns true if the rects edges are integer-aligned. 961 */ 962static bool isIRect(const GrRect& r) { 963 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 964 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 965} 966 967static bool apply_aa_to_rect(GrDrawTarget* target, 968 const GrRect& rect, 969 GrScalar width, 970 const GrMatrix* matrix, 971 GrMatrix* combinedMatrix, 972 GrRect* devRect, 973 bool* useVertexCoverage) { 974 // we use a simple coverage ramp to do aa on axis-aligned rects 975 // we check if the rect will be axis-aligned, and the rect won't land on 976 // integer coords. 977 978 // we are keeping around the "tweak the alpha" trick because 979 // it is our only hope for the fixed-pipe implementation. 980 // In a shader implementation we can give a separate coverage input 981 // TODO: remove this ugliness when we drop the fixed-pipe impl 982 *useVertexCoverage = false; 983 if (!target->canTweakAlphaForCoverage()) { 984 if (disable_coverage_aa_for_blend(target)) { 985#if GR_DEBUG 986 //GrPrintf("Turning off AA to correctly apply blend.\n"); 987#endif 988 return false; 989 } else { 990 *useVertexCoverage = true; 991 } 992 } 993 const GrDrawState& drawState = target->getDrawState(); 994 if (drawState.getRenderTarget()->isMultisampled()) { 995 return false; 996 } 997 998 if (0 == width && target->willUseHWAALines()) { 999 return false; 1000 } 1001 1002 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1003 return false; 1004 } 1005 1006 if (NULL != matrix && 1007 !matrix->preservesAxisAlignment()) { 1008 return false; 1009 } 1010 1011 *combinedMatrix = drawState.getViewMatrix(); 1012 if (NULL != matrix) { 1013 combinedMatrix->preConcat(*matrix); 1014 GrAssert(combinedMatrix->preservesAxisAlignment()); 1015 } 1016 1017 combinedMatrix->mapRect(devRect, rect); 1018 devRect->sort(); 1019 1020 if (width < 0) { 1021 return !isIRect(*devRect); 1022 } else { 1023 return true; 1024 } 1025} 1026 1027void GrContext::drawRect(const GrPaint& paint, 1028 const GrRect& rect, 1029 GrScalar width, 1030 const GrMatrix* matrix) { 1031 SK_TRACE_EVENT0("GrContext::drawRect"); 1032 1033 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1034 int stageMask = paint.getActiveStageMask(); 1035 1036 GrRect devRect = rect; 1037 GrMatrix combinedMatrix; 1038 bool useVertexCoverage; 1039 bool needAA = paint.fAntiAlias && 1040 !this->getRenderTarget()->isMultisampled(); 1041 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 1042 &combinedMatrix, &devRect, 1043 &useVertexCoverage); 1044 1045 if (doAA) { 1046 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1047 if (width >= 0) { 1048 GrVec strokeSize;; 1049 if (width > 0) { 1050 strokeSize.set(width, width); 1051 combinedMatrix.mapVectors(&strokeSize, 1); 1052 strokeSize.setAbs(strokeSize); 1053 } else { 1054 strokeSize.set(GR_Scalar1, GR_Scalar1); 1055 } 1056 strokeAARect(target, devRect, strokeSize, useVertexCoverage); 1057 } else { 1058 fillAARect(target, devRect, useVertexCoverage); 1059 } 1060 return; 1061 } 1062 1063 if (width >= 0) { 1064 // TODO: consider making static vertex buffers for these cases. 1065 // Hairline could be done by just adding closing vertex to 1066 // unitSquareVertexBuffer() 1067 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1068 1069 static const int worstCaseVertCount = 10; 1070 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 1071 1072 if (!geo.succeeded()) { 1073 GrPrintf("Failed to get space for vertices!\n"); 1074 return; 1075 } 1076 1077 GrPrimitiveType primType; 1078 int vertCount; 1079 GrPoint* vertex = geo.positions(); 1080 1081 if (width > 0) { 1082 vertCount = 10; 1083 primType = kTriangleStrip_PrimitiveType; 1084 setStrokeRectStrip(vertex, rect, width); 1085 } else { 1086 // hairline 1087 vertCount = 5; 1088 primType = kLineStrip_PrimitiveType; 1089 vertex[0].set(rect.fLeft, rect.fTop); 1090 vertex[1].set(rect.fRight, rect.fTop); 1091 vertex[2].set(rect.fRight, rect.fBottom); 1092 vertex[3].set(rect.fLeft, rect.fBottom); 1093 vertex[4].set(rect.fLeft, rect.fTop); 1094 } 1095 1096 GrDrawState::AutoViewMatrixRestore avmr; 1097 if (NULL != matrix) { 1098 GrDrawState* drawState = target->drawState(); 1099 avmr.set(drawState); 1100 drawState->preConcatViewMatrix(*matrix); 1101 drawState->preConcatSamplerMatrices(stageMask, *matrix); 1102 } 1103 1104 target->drawNonIndexed(primType, 0, vertCount); 1105 } else { 1106#if GR_STATIC_RECT_VB 1107 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1108 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1109 if (NULL == sqVB) { 1110 GrPrintf("Failed to create static rect vb.\n"); 1111 return; 1112 } 1113 target->setVertexSourceToBuffer(layout, sqVB); 1114 GrDrawState* drawState = target->drawState(); 1115 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1116 GrMatrix m; 1117 m.setAll(rect.width(), 0, rect.fLeft, 1118 0, rect.height(), rect.fTop, 1119 0, 0, GrMatrix::I()[8]); 1120 1121 if (NULL != matrix) { 1122 m.postConcat(*matrix); 1123 } 1124 drawState->preConcatViewMatrix(m); 1125 drawState->preConcatSamplerMatrices(stageMask, m); 1126 1127 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1128#else 1129 target->drawSimpleRect(rect, matrix, stageMask); 1130#endif 1131 } 1132} 1133 1134void GrContext::drawRectToRect(const GrPaint& paint, 1135 const GrRect& dstRect, 1136 const GrRect& srcRect, 1137 const GrMatrix* dstMatrix, 1138 const GrMatrix* srcMatrix) { 1139 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 1140 1141 // srcRect refers to paint's first texture 1142 if (NULL == paint.getTexture(0)) { 1143 drawRect(paint, dstRect, -1, dstMatrix); 1144 return; 1145 } 1146 1147 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 1148 1149#if GR_STATIC_RECT_VB 1150 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1151 GrDrawState* drawState = target->drawState(); 1152 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1153 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1154 1155 GrMatrix m; 1156 1157 m.setAll(dstRect.width(), 0, dstRect.fLeft, 1158 0, dstRect.height(), dstRect.fTop, 1159 0, 0, GrMatrix::I()[8]); 1160 if (NULL != dstMatrix) { 1161 m.postConcat(*dstMatrix); 1162 } 1163 drawState->preConcatViewMatrix(m); 1164 1165 // srcRect refers to first stage 1166 int otherStageMask = paint.getActiveStageMask() & 1167 (~(1 << GrPaint::kFirstTextureStage)); 1168 if (otherStageMask) { 1169 drawState->preConcatSamplerMatrices(otherStageMask, m); 1170 } 1171 1172 m.setAll(srcRect.width(), 0, srcRect.fLeft, 1173 0, srcRect.height(), srcRect.fTop, 1174 0, 0, GrMatrix::I()[8]); 1175 if (NULL != srcMatrix) { 1176 m.postConcat(*srcMatrix); 1177 } 1178 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 1179 1180 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1181 if (NULL == sqVB) { 1182 GrPrintf("Failed to create static rect vb.\n"); 1183 return; 1184 } 1185 target->setVertexSourceToBuffer(layout, sqVB); 1186 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1187#else 1188 1189 GrDrawTarget* target; 1190#if BATCH_RECT_TO_RECT 1191 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 1192#else 1193 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1194#endif 1195 1196 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1197 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 1198 srcRects[0] = &srcRect; 1199 srcMatrices[0] = srcMatrix; 1200 1201 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 1202#endif 1203} 1204 1205void GrContext::drawVertices(const GrPaint& paint, 1206 GrPrimitiveType primitiveType, 1207 int vertexCount, 1208 const GrPoint positions[], 1209 const GrPoint texCoords[], 1210 const GrColor colors[], 1211 const uint16_t indices[], 1212 int indexCount) { 1213 SK_TRACE_EVENT0("GrContext::drawVertices"); 1214 1215 GrDrawTarget::AutoReleaseGeometry geo; 1216 1217 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1218 1219 bool hasTexCoords[GrPaint::kTotalStages] = { 1220 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 1221 0 // remaining stages use positions 1222 }; 1223 1224 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 1225 1226 if (NULL != colors) { 1227 layout |= GrDrawTarget::kColor_VertexLayoutBit; 1228 } 1229 int vertexSize = GrDrawTarget::VertexSize(layout); 1230 1231 if (sizeof(GrPoint) != vertexSize) { 1232 if (!geo.set(target, layout, vertexCount, 0)) { 1233 GrPrintf("Failed to get space for vertices!\n"); 1234 return; 1235 } 1236 int texOffsets[GrDrawState::kMaxTexCoords]; 1237 int colorOffset; 1238 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 1239 texOffsets, 1240 &colorOffset, 1241 NULL, 1242 NULL); 1243 void* curVertex = geo.vertices(); 1244 1245 for (int i = 0; i < vertexCount; ++i) { 1246 *((GrPoint*)curVertex) = positions[i]; 1247 1248 if (texOffsets[0] > 0) { 1249 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 1250 } 1251 if (colorOffset > 0) { 1252 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1253 } 1254 curVertex = (void*)((intptr_t)curVertex + vertexSize); 1255 } 1256 } else { 1257 target->setVertexSourceToArray(layout, positions, vertexCount); 1258 } 1259 1260 // we don't currently apply offscreen AA to this path. Need improved 1261 // management of GrDrawTarget's geometry to avoid copying points per-tile. 1262 1263 if (NULL != indices) { 1264 target->setIndexSourceToArray(indices, indexCount); 1265 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1266 } else { 1267 target->drawNonIndexed(primitiveType, 0, vertexCount); 1268 } 1269} 1270 1271/////////////////////////////////////////////////////////////////////////////// 1272#include "SkDraw.h" 1273#include "SkRasterClip.h" 1274 1275namespace { 1276 1277SkPath::FillType gr_fill_to_sk_fill(GrPathFill fill) { 1278 switch (fill) { 1279 case kWinding_PathFill: 1280 return SkPath::kWinding_FillType; 1281 case kEvenOdd_PathFill: 1282 return SkPath::kEvenOdd_FillType; 1283 case kInverseWinding_PathFill: 1284 return SkPath::kInverseWinding_FillType; 1285 case kInverseEvenOdd_PathFill: 1286 return SkPath::kInverseEvenOdd_FillType; 1287 default: 1288 GrCrash("Unexpected fill."); 1289 return SkPath::kWinding_FillType; 1290 } 1291} 1292 1293// gets device coord bounds of path (not considering the fill) and clip. The 1294// path bounds will be a subset of the clip bounds. returns false if path bounds 1295// would be empty. 1296bool get_path_and_clip_bounds(const GrDrawTarget* target, 1297 const GrPath& path, 1298 const GrVec* translate, 1299 GrIRect* pathBounds, 1300 GrIRect* clipBounds) { 1301 // compute bounds as intersection of rt size, clip, and path 1302 const GrRenderTarget* rt = target->getDrawState().getRenderTarget(); 1303 if (NULL == rt) { 1304 return false; 1305 } 1306 *pathBounds = GrIRect::MakeWH(rt->width(), rt->height()); 1307 const GrClip& clip = target->getClip(); 1308 if (clip.hasConservativeBounds()) { 1309 clip.getConservativeBounds().roundOut(clipBounds); 1310 if (!pathBounds->intersect(*clipBounds)) { 1311 return false; 1312 } 1313 } else { 1314 // pathBounds is currently the rt extent, set clip bounds to that rect. 1315 *clipBounds = *pathBounds; 1316 } 1317 GrRect pathSBounds = path.getBounds(); 1318 if (!pathSBounds.isEmpty()) { 1319 if (NULL != translate) { 1320 pathSBounds.offset(*translate); 1321 } 1322 target->getDrawState().getViewMatrix().mapRect(&pathSBounds, 1323 pathSBounds); 1324 GrIRect pathIBounds; 1325 pathSBounds.roundOut(&pathIBounds); 1326 if (!pathBounds->intersect(pathIBounds)) { 1327 return false; 1328 } 1329 } else { 1330 return false; 1331 } 1332 return true; 1333} 1334 1335/** 1336 * sw rasterizes path to A8 mask using the context's matrix and uploads to a 1337 * scratch texture. 1338 */ 1339 1340bool sw_draw_path_to_mask_texture(const GrPath& clientPath, 1341 const GrIRect& pathDevBounds, 1342 GrPathFill fill, 1343 GrContext* context, 1344 const GrPoint* translate, 1345 GrAutoScratchTexture* tex) { 1346 SkPaint paint; 1347 SkPath tmpPath; 1348 const SkPath* pathToDraw = &clientPath; 1349 if (kHairLine_PathFill == fill) { 1350 paint.setStyle(SkPaint::kStroke_Style); 1351 paint.setStrokeWidth(SK_Scalar1); 1352 } else { 1353 paint.setStyle(SkPaint::kFill_Style); 1354 SkPath::FillType skfill = gr_fill_to_sk_fill(fill); 1355 if (skfill != pathToDraw->getFillType()) { 1356 tmpPath = *pathToDraw; 1357 tmpPath.setFillType(skfill); 1358 pathToDraw = &tmpPath; 1359 } 1360 } 1361 paint.setAntiAlias(true); 1362 paint.setColor(SK_ColorWHITE); 1363 1364 GrMatrix matrix = context->getMatrix(); 1365 if (NULL != translate) { 1366 matrix.postTranslate(translate->fX, translate->fY); 1367 } 1368 1369 matrix.postTranslate(-pathDevBounds.fLeft * SK_Scalar1, 1370 -pathDevBounds.fTop * SK_Scalar1); 1371 GrIRect bounds = GrIRect::MakeWH(pathDevBounds.width(), 1372 pathDevBounds.height()); 1373 1374 SkBitmap bm; 1375 bm.setConfig(SkBitmap::kA8_Config, bounds.fRight, bounds.fBottom); 1376 if (!bm.allocPixels()) { 1377 return false; 1378 } 1379 sk_bzero(bm.getPixels(), bm.getSafeSize()); 1380 1381 SkDraw draw; 1382 sk_bzero(&draw, sizeof(draw)); 1383 SkRasterClip rc(bounds); 1384 draw.fRC = &rc; 1385 draw.fClip = &rc.bwRgn(); 1386 draw.fMatrix = &matrix; 1387 draw.fBitmap = &bm; 1388 draw.drawPath(*pathToDraw, paint); 1389 1390 const GrTextureDesc desc = { 1391 kNone_GrTextureFlags, 1392 bounds.fRight, 1393 bounds.fBottom, 1394 kAlpha_8_GrPixelConfig, 1395 0 // samples 1396 }; 1397 1398 tex->set(context, desc); 1399 GrTexture* texture = tex->texture(); 1400 1401 if (NULL == texture) { 1402 return false; 1403 } 1404 SkAutoLockPixels alp(bm); 1405 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, 1406 bm.getPixels(), bm.rowBytes()); 1407 return true; 1408} 1409 1410void draw_around_inv_path(GrDrawTarget* target, 1411 GrDrawState::StageMask stageMask, 1412 const GrIRect& clipBounds, 1413 const GrIRect& pathBounds) { 1414 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1415 GrRect rect; 1416 if (clipBounds.fTop < pathBounds.fTop) { 1417 rect.iset(clipBounds.fLeft, clipBounds.fTop, 1418 clipBounds.fRight, pathBounds.fTop); 1419 target->drawSimpleRect(rect, NULL, stageMask); 1420 } 1421 if (clipBounds.fLeft < pathBounds.fLeft) { 1422 rect.iset(clipBounds.fLeft, pathBounds.fTop, 1423 pathBounds.fLeft, pathBounds.fBottom); 1424 target->drawSimpleRect(rect, NULL, stageMask); 1425 } 1426 if (clipBounds.fRight > pathBounds.fRight) { 1427 rect.iset(pathBounds.fRight, pathBounds.fTop, 1428 clipBounds.fRight, pathBounds.fBottom); 1429 target->drawSimpleRect(rect, NULL, stageMask); 1430 } 1431 if (clipBounds.fBottom > pathBounds.fBottom) { 1432 rect.iset(clipBounds.fLeft, pathBounds.fBottom, 1433 clipBounds.fRight, clipBounds.fBottom); 1434 target->drawSimpleRect(rect, NULL, stageMask); 1435 } 1436} 1437 1438} 1439 1440void GrContext::drawPath(const GrPaint& paint, const GrPath& path, 1441 GrPathFill fill, const GrPoint* translate) { 1442 1443 if (path.isEmpty()) { 1444 if (GrIsFillInverted(fill)) { 1445 this->drawPaint(paint); 1446 } 1447 return; 1448 } 1449 1450 // Note that below we may sw-rasterize the path into a scratch texture. 1451 // Scratch textures can be recycled after they are returned to the texture 1452 // cache. This presents a potential hazard for buffered drawing. However, 1453 // the writePixels that uploads to the scratch will perform a flush so we're 1454 // OK. 1455 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1456 kUnbuffered_DrawCategory; 1457 GrDrawTarget* target = this->prepareToDraw(paint, category); 1458 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1459 1460 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1461 1462 // An Assumption here is that path renderer would use some form of tweaking 1463 // the src color (either the input alpha or in the frag shader) to implement 1464 // aa. If we have some future driver-mojo path AA that can do the right 1465 // thing WRT to the blend then we'll need some query on the PR. 1466 if (disable_coverage_aa_for_blend(target)) { 1467#if GR_DEBUG 1468 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1469#endif 1470 prAA = false; 1471 } 1472 1473 GrPathRenderer* pr = NULL; 1474 if (prAA) { 1475 pr = this->getPathRenderer(path, fill, target, true); 1476 if (NULL == pr) { 1477 GrAutoScratchTexture ast; 1478 GrIRect pathBounds, clipBounds; 1479 if (!get_path_and_clip_bounds(target, path, translate, 1480 &pathBounds, &clipBounds)) { 1481 return; 1482 } 1483 if (NULL == pr && sw_draw_path_to_mask_texture(path, pathBounds, 1484 fill, this, 1485 translate, &ast)) { 1486 GrTexture* texture = ast.texture(); 1487 GrAssert(NULL != texture); 1488 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1489 enum { 1490 kPathMaskStage = GrPaint::kTotalStages, 1491 }; 1492 target->drawState()->setTexture(kPathMaskStage, texture); 1493 target->drawState()->sampler(kPathMaskStage)->reset(); 1494 GrScalar w = GrIntToScalar(pathBounds.width()); 1495 GrScalar h = GrIntToScalar(pathBounds.height()); 1496 GrRect maskRect = GrRect::MakeWH(w / texture->width(), 1497 h / texture->height()); 1498 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1499 srcRects[kPathMaskStage] = &maskRect; 1500 stageMask |= 1 << kPathMaskStage; 1501 GrRect dstRect = GrRect::MakeLTRB( 1502 SK_Scalar1* pathBounds.fLeft, 1503 SK_Scalar1* pathBounds.fTop, 1504 SK_Scalar1* pathBounds.fRight, 1505 SK_Scalar1* pathBounds.fBottom); 1506 target->drawRect(dstRect, NULL, stageMask, srcRects, NULL); 1507 target->drawState()->setTexture(kPathMaskStage, NULL); 1508 if (GrIsFillInverted(fill)) { 1509 draw_around_inv_path(target, stageMask, 1510 clipBounds, pathBounds); 1511 } 1512 return; 1513 } 1514 } 1515 } else { 1516 pr = this->getPathRenderer(path, fill, target, false); 1517 } 1518 1519 if (NULL == pr) { 1520#if GR_DEBUG 1521 GrPrintf("Unable to find path renderer compatible with path.\n"); 1522#endif 1523 return; 1524 } 1525 1526 pr->drawPath(path, fill, translate, target, stageMask, prAA); 1527} 1528 1529//////////////////////////////////////////////////////////////////////////////// 1530 1531void GrContext::flush(int flagsBitfield) { 1532 if (kDiscard_FlushBit & flagsBitfield) { 1533 fDrawBuffer->reset(); 1534 } else { 1535 this->flushDrawBuffer(); 1536 } 1537 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1538 fGpu->forceRenderTargetFlush(); 1539 } 1540} 1541 1542void GrContext::flushDrawBuffer() { 1543 if (fDrawBuffer) { 1544 fDrawBuffer->flushTo(fGpu); 1545 } 1546} 1547 1548void GrContext::internalWriteTexturePixels(GrTexture* texture, 1549 int left, int top, 1550 int width, int height, 1551 GrPixelConfig config, 1552 const void* buffer, 1553 size_t rowBytes, 1554 uint32_t flags) { 1555 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1556 ASSERT_OWNED_RESOURCE(texture); 1557 1558 if (!(kDontFlush_PixelOpsFlag & flags)) { 1559 this->flush(); 1560 } 1561 // TODO: use scratch texture to perform conversion 1562 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1563 GrPixelConfigIsUnpremultiplied(config)) { 1564 return; 1565 } 1566 1567 fGpu->writeTexturePixels(texture, left, top, width, height, 1568 config, buffer, rowBytes); 1569} 1570 1571bool GrContext::internalReadTexturePixels(GrTexture* texture, 1572 int left, int top, 1573 int width, int height, 1574 GrPixelConfig config, 1575 void* buffer, 1576 size_t rowBytes, 1577 uint32_t flags) { 1578 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1579 ASSERT_OWNED_RESOURCE(texture); 1580 1581 // TODO: code read pixels for textures that aren't also rendertargets 1582 GrRenderTarget* target = texture->asRenderTarget(); 1583 if (NULL != target) { 1584 return this->internalReadRenderTargetPixels(target, 1585 left, top, width, height, 1586 config, buffer, rowBytes, 1587 flags); 1588 } else { 1589 return false; 1590 } 1591} 1592 1593#include "SkConfig8888.h" 1594 1595namespace { 1596/** 1597 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1598 * formats are representable as Config8888 and so the function returns false 1599 * if the GrPixelConfig has no equivalent Config8888. 1600 */ 1601bool grconfig_to_config8888(GrPixelConfig config, 1602 SkCanvas::Config8888* config8888) { 1603 switch (config) { 1604 case kRGBA_8888_PM_GrPixelConfig: 1605 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1606 return true; 1607 case kRGBA_8888_UPM_GrPixelConfig: 1608 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1609 return true; 1610 case kBGRA_8888_PM_GrPixelConfig: 1611 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1612 return true; 1613 case kBGRA_8888_UPM_GrPixelConfig: 1614 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1615 return true; 1616 default: 1617 return false; 1618 } 1619} 1620} 1621 1622bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1623 int left, int top, 1624 int width, int height, 1625 GrPixelConfig config, 1626 void* buffer, 1627 size_t rowBytes, 1628 uint32_t flags) { 1629 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1630 ASSERT_OWNED_RESOURCE(target); 1631 1632 if (NULL == target) { 1633 target = fDrawState->getRenderTarget(); 1634 if (NULL == target) { 1635 return false; 1636 } 1637 } 1638 1639 if (!(kDontFlush_PixelOpsFlag & flags)) { 1640 this->flush(); 1641 } 1642 1643 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1644 GrPixelConfigIsUnpremultiplied(config) && 1645 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1646 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1647 if (!grconfig_to_config8888(target->config(), &srcConfig8888) || 1648 !grconfig_to_config8888(config, &dstConfig8888)) { 1649 return false; 1650 } 1651 // do read back using target's own config 1652 this->internalReadRenderTargetPixels(target, 1653 left, top, 1654 width, height, 1655 target->config(), 1656 buffer, rowBytes, 1657 kDontFlush_PixelOpsFlag); 1658 // sw convert the pixels to unpremul config 1659 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer); 1660 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888, 1661 pixels, rowBytes, srcConfig8888, 1662 width, height); 1663 return true; 1664 } 1665 1666 GrTexture* src = target->asTexture(); 1667 bool swapRAndB = NULL != src && 1668 fGpu->preferredReadPixelsConfig(config) == 1669 GrPixelConfigSwapRAndB(config); 1670 1671 bool flipY = NULL != src && 1672 fGpu->readPixelsWillPayForYFlip(target, left, top, 1673 width, height, config, 1674 rowBytes); 1675 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1676 GrPixelConfigIsUnpremultiplied(config)); 1677 1678 if (NULL == src && alphaConversion) { 1679 // we should fallback to cpu conversion here. This could happen when 1680 // we were given an external render target by the client that is not 1681 // also a texture (e.g. FBO 0 in GL) 1682 return false; 1683 } 1684 // we draw to a scratch texture if any of these conversion are applied 1685 GrAutoScratchTexture ast; 1686 if (flipY || swapRAndB || alphaConversion) { 1687 GrAssert(NULL != src); 1688 if (swapRAndB) { 1689 config = GrPixelConfigSwapRAndB(config); 1690 GrAssert(kUnknown_GrPixelConfig != config); 1691 } 1692 // Make the scratch a render target because we don't have a robust 1693 // readTexturePixels as of yet (it calls this function). 1694 const GrTextureDesc desc = { 1695 kRenderTarget_GrTextureFlagBit, 1696 width, height, 1697 config, 1698 0 // samples 1699 }; 1700 1701 // When a full readback is faster than a partial we could always make 1702 // the scratch exactly match the passed rect. However, if we see many 1703 // different size rectangles we will trash our texture cache and pay the 1704 // cost of creating and destroying many textures. So, we only request 1705 // an exact match when the caller is reading an entire RT. 1706 ScratchTexMatch match = kApprox_ScratchTexMatch; 1707 if (0 == left && 1708 0 == top && 1709 target->width() == width && 1710 target->height() == height && 1711 fGpu->fullReadPixelsIsFasterThanPartial()) { 1712 match = kExact_ScratchTexMatch; 1713 } 1714 ast.set(this, desc, match); 1715 GrTexture* texture = ast.texture(); 1716 if (!texture) { 1717 return false; 1718 } 1719 target = texture->asRenderTarget(); 1720 GrAssert(NULL != target); 1721 1722 GrDrawTarget::AutoStateRestore asr(fGpu, 1723 GrDrawTarget::kReset_ASRInit); 1724 GrDrawState* drawState = fGpu->drawState(); 1725 drawState->setRenderTarget(target); 1726 1727 GrMatrix matrix; 1728 if (flipY) { 1729 matrix.setTranslate(SK_Scalar1 * left, 1730 SK_Scalar1 * (top + height)); 1731 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1732 } else { 1733 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1734 } 1735 matrix.postIDiv(src->width(), src->height()); 1736 drawState->sampler(0)->reset(matrix); 1737 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1738 drawState->setTexture(0, src); 1739 GrRect rect; 1740 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1741 fGpu->drawSimpleRect(rect, NULL, 0x1); 1742 left = 0; 1743 top = 0; 1744 } 1745 return fGpu->readPixels(target, 1746 left, top, width, height, 1747 config, buffer, rowBytes, flipY); 1748} 1749 1750void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1751 GrAssert(target); 1752 ASSERT_OWNED_RESOURCE(target); 1753 // In the future we may track whether there are any pending draws to this 1754 // target. We don't today so we always perform a flush. We don't promise 1755 // this to our clients, though. 1756 this->flush(); 1757 fGpu->resolveRenderTarget(target); 1758} 1759 1760void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1761 if (NULL == src || NULL == dst) { 1762 return; 1763 } 1764 ASSERT_OWNED_RESOURCE(src); 1765 1766 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1767 GrDrawState* drawState = fGpu->drawState(); 1768 drawState->setRenderTarget(dst); 1769 GrMatrix sampleM; 1770 sampleM.setIDiv(src->width(), src->height()); 1771 drawState->setTexture(0, src); 1772 drawState->sampler(0)->reset(sampleM); 1773 SkRect rect = SkRect::MakeXYWH(0, 0, 1774 SK_Scalar1 * src->width(), 1775 SK_Scalar1 * src->height()); 1776 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1777} 1778 1779void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1780 int left, int top, 1781 int width, int height, 1782 GrPixelConfig config, 1783 const void* buffer, 1784 size_t rowBytes, 1785 uint32_t flags) { 1786 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1787 ASSERT_OWNED_RESOURCE(target); 1788 1789 if (NULL == target) { 1790 target = fDrawState->getRenderTarget(); 1791 if (NULL == target) { 1792 return; 1793 } 1794 } 1795 1796 // TODO: when underlying api has a direct way to do this we should use it 1797 // (e.g. glDrawPixels on desktop GL). 1798 1799 // If the RT is also a texture and we don't have to do PM/UPM conversion 1800 // then take the texture path, which we expect to be at least as fast or 1801 // faster since it doesn't use an intermediate texture as we do below. 1802 1803#if !GR_MAC_BUILD 1804 // At least some drivers on the Mac get confused when glTexImage2D is called 1805 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1806 // determine what OS versions and/or HW is affected. 1807 if (NULL != target->asTexture() && 1808 GrPixelConfigIsUnpremultiplied(target->config()) == 1809 GrPixelConfigIsUnpremultiplied(config)) { 1810 1811 this->internalWriteTexturePixels(target->asTexture(), 1812 left, top, width, height, 1813 config, buffer, rowBytes, flags); 1814 return; 1815 } 1816#endif 1817 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1818 GrPixelConfigIsUnpremultiplied(config) && 1819 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1820 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1821 if (!grconfig_to_config8888(config, &srcConfig8888) || 1822 !grconfig_to_config8888(target->config(), &dstConfig8888)) { 1823 return; 1824 } 1825 // allocate a tmp buffer and sw convert the pixels to premul 1826 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height); 1827 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1828 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1829 src, rowBytes, srcConfig8888, 1830 width, height); 1831 // upload the already premul pixels 1832 this->internalWriteRenderTargetPixels(target, 1833 left, top, 1834 width, height, 1835 target->config(), 1836 tmpPixels, 4 * width, flags); 1837 return; 1838 } 1839 1840 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1841 GrPixelConfigSwapRAndB(config); 1842 if (swapRAndB) { 1843 config = GrPixelConfigSwapRAndB(config); 1844 } 1845 1846 const GrTextureDesc desc = { 1847 kNone_GrTextureFlags, width, height, config, 0 1848 }; 1849 GrAutoScratchTexture ast(this, desc); 1850 GrTexture* texture = ast.texture(); 1851 if (NULL == texture) { 1852 return; 1853 } 1854 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1855 config, buffer, rowBytes, flags); 1856 1857 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit); 1858 GrDrawState* drawState = fGpu->drawState(); 1859 1860 GrMatrix matrix; 1861 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1862 drawState->setViewMatrix(matrix); 1863 drawState->setRenderTarget(target); 1864 drawState->setTexture(0, texture); 1865 1866 matrix.setIDiv(texture->width(), texture->height()); 1867 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 1868 GrSamplerState::kNearest_Filter, 1869 matrix); 1870 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1871 1872 GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0); 1873 static const int VCOUNT = 4; 1874 // TODO: Use GrGpu::drawRect here 1875 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1876 if (!geo.succeeded()) { 1877 GrPrintf("Failed to get space for vertices!\n"); 1878 return; 1879 } 1880 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1881 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 0, VCOUNT); 1882} 1883//////////////////////////////////////////////////////////////////////////////// 1884 1885void GrContext::setPaint(const GrPaint& paint) { 1886 1887 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1888 int s = i + GrPaint::kFirstTextureStage; 1889 fDrawState->setTexture(s, paint.getTexture(i)); 1890 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 1891 if (paint.getTexture(i)) { 1892 *fDrawState->sampler(s) = paint.getTextureSampler(i); 1893 } 1894 } 1895 1896 fDrawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1897 1898 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1899 int s = i + GrPaint::kFirstMaskStage; 1900 fDrawState->setTexture(s, paint.getMask(i)); 1901 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 1902 if (paint.getMask(i)) { 1903 *fDrawState->sampler(s) = paint.getMaskSampler(i); 1904 } 1905 } 1906 1907 // disable all stages not accessible via the paint 1908 for (int s = GrPaint::kTotalStages; s < GrDrawState::kNumStages; ++s) { 1909 fDrawState->setTexture(s, NULL); 1910 } 1911 1912 fDrawState->setColor(paint.fColor); 1913 1914 if (paint.fDither) { 1915 fDrawState->enableState(GrDrawState::kDither_StateBit); 1916 } else { 1917 fDrawState->disableState(GrDrawState::kDither_StateBit); 1918 } 1919 if (paint.fAntiAlias) { 1920 fDrawState->enableState(GrDrawState::kHWAntialias_StateBit); 1921 } else { 1922 fDrawState->disableState(GrDrawState::kHWAntialias_StateBit); 1923 } 1924 if (paint.fColorMatrixEnabled) { 1925 fDrawState->enableState(GrDrawState::kColorMatrix_StateBit); 1926 fDrawState->setColorMatrix(paint.fColorMatrix); 1927 } else { 1928 fDrawState->disableState(GrDrawState::kColorMatrix_StateBit); 1929 } 1930 fDrawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1931 fDrawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1932 fDrawState->setCoverage(paint.fCoverage); 1933#if GR_DEBUG 1934 if ((paint.getActiveMaskStageMask() || 0xff != paint.fCoverage) && 1935 !fGpu->canApplyCoverage()) { 1936 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1937 } 1938#endif 1939} 1940 1941GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1942 DrawCategory category) { 1943 if (category != fLastDrawCategory) { 1944 this->flushDrawBuffer(); 1945 fLastDrawCategory = category; 1946 } 1947 this->setPaint(paint); 1948 GrDrawTarget* target = fGpu; 1949 switch (category) { 1950 case kUnbuffered_DrawCategory: 1951 target = fGpu; 1952 break; 1953 case kBuffered_DrawCategory: 1954 target = fDrawBuffer; 1955 fDrawBuffer->setClip(fGpu->getClip()); 1956 break; 1957 default: 1958 GrCrash("Unexpected DrawCategory."); 1959 break; 1960 } 1961 return target; 1962} 1963 1964GrPathRenderer* GrContext::getPathRenderer(const GrPath& path, 1965 GrPathFill fill, 1966 const GrDrawTarget* target, 1967 bool antiAlias) { 1968 if (NULL == fPathRendererChain) { 1969 fPathRendererChain = 1970 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag); 1971 } 1972 return fPathRendererChain->getPathRenderer(path, fill, target, antiAlias); 1973} 1974 1975//////////////////////////////////////////////////////////////////////////////// 1976 1977void GrContext::setRenderTarget(GrRenderTarget* target) { 1978 ASSERT_OWNED_RESOURCE(target); 1979 if (fDrawState->getRenderTarget() != target) { 1980 this->flush(false); 1981 fDrawState->setRenderTarget(target); 1982 } 1983} 1984 1985GrRenderTarget* GrContext::getRenderTarget() { 1986 return fDrawState->getRenderTarget(); 1987} 1988 1989const GrRenderTarget* GrContext::getRenderTarget() const { 1990 return fDrawState->getRenderTarget(); 1991} 1992 1993const GrMatrix& GrContext::getMatrix() const { 1994 return fDrawState->getViewMatrix(); 1995} 1996 1997void GrContext::setMatrix(const GrMatrix& m) { 1998 fDrawState->setViewMatrix(m); 1999} 2000 2001void GrContext::concatMatrix(const GrMatrix& m) const { 2002 fDrawState->preConcatViewMatrix(m); 2003} 2004 2005static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 2006 intptr_t mask = 1 << shift; 2007 if (pred) { 2008 bits |= mask; 2009 } else { 2010 bits &= ~mask; 2011 } 2012 return bits; 2013} 2014 2015void GrContext::resetStats() { 2016 fGpu->resetStats(); 2017} 2018 2019const GrGpuStats& GrContext::getStats() const { 2020 return fGpu->getStats(); 2021} 2022 2023void GrContext::printStats() const { 2024 fGpu->printStats(); 2025} 2026 2027GrContext::GrContext(GrGpu* gpu) { 2028 fGpu = gpu; 2029 fGpu->ref(); 2030 fGpu->setContext(this); 2031 2032 fDrawState = new GrDrawState(); 2033 fGpu->setDrawState(fDrawState); 2034 2035 fPathRendererChain = NULL; 2036 2037 fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT, 2038 MAX_TEXTURE_CACHE_BYTES); 2039 fFontCache = new GrFontCache(fGpu); 2040 2041 fLastDrawCategory = kUnbuffered_DrawCategory; 2042 2043 fDrawBuffer = NULL; 2044 fDrawBufferVBAllocPool = NULL; 2045 fDrawBufferIBAllocPool = NULL; 2046 2047 fAAFillRectIndexBuffer = NULL; 2048 fAAStrokeRectIndexBuffer = NULL; 2049 2050 this->setupDrawBuffer(); 2051} 2052 2053void GrContext::setupDrawBuffer() { 2054 2055 GrAssert(NULL == fDrawBuffer); 2056 GrAssert(NULL == fDrawBufferVBAllocPool); 2057 GrAssert(NULL == fDrawBufferIBAllocPool); 2058 2059 fDrawBufferVBAllocPool = 2060 new GrVertexBufferAllocPool(fGpu, false, 2061 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 2062 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS); 2063 fDrawBufferIBAllocPool = 2064 new GrIndexBufferAllocPool(fGpu, false, 2065 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 2066 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS); 2067 2068 fDrawBuffer = new GrInOrderDrawBuffer(fGpu, 2069 fDrawBufferVBAllocPool, 2070 fDrawBufferIBAllocPool); 2071 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 2072 fDrawBuffer->setAutoFlushTarget(fGpu); 2073 fDrawBuffer->setDrawState(fDrawState); 2074} 2075 2076GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 2077#if DEFER_TEXT_RENDERING 2078 return prepareToDraw(paint, kBuffered_DrawCategory); 2079#else 2080 return prepareToDraw(paint, kUnbuffered_DrawCategory); 2081#endif 2082} 2083 2084const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 2085 return fGpu->getQuadIndexBuffer(); 2086} 2087 2088GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 2089 GrAutoScratchTexture* temp1, 2090 GrAutoScratchTexture* temp2, 2091 const SkRect& rect, 2092 float sigmaX, float sigmaY) { 2093 ASSERT_OWNED_RESOURCE(srcTexture); 2094 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 2095 GrClip oldClip = this->getClip(); 2096 GrTexture* origTexture = srcTexture; 2097 GrAutoMatrix avm(this, GrMatrix::I()); 2098 SkIRect clearRect; 2099 int scaleFactorX, halfWidthX, kernelWidthX; 2100 int scaleFactorY, halfWidthY, kernelWidthY; 2101 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &halfWidthX, &kernelWidthX); 2102 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &halfWidthY, &kernelWidthY); 2103 2104 SkRect srcRect(rect); 2105 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 2106 srcRect.roundOut(); 2107 scale_rect(&srcRect, scaleFactorX, scaleFactorY); 2108 this->setClip(srcRect); 2109 2110 const GrTextureDesc desc = { 2111 kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit, 2112 SkScalarFloorToInt(srcRect.width()), 2113 SkScalarFloorToInt(srcRect.height()), 2114 kRGBA_8888_PM_GrPixelConfig, 2115 0 // samples 2116 }; 2117 2118 temp1->set(this, desc); 2119 if (temp2) temp2->set(this, desc); 2120 2121 GrTexture* dstTexture = temp1->texture(); 2122 GrPaint paint; 2123 paint.reset(); 2124 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 2125 2126 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 2127 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 2128 srcTexture->height()); 2129 this->setRenderTarget(dstTexture->asRenderTarget()); 2130 SkRect dstRect(srcRect); 2131 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 2132 i < scaleFactorY ? 0.5f : 1.0f); 2133 paint.setTexture(0, srcTexture); 2134 this->drawRectToRect(paint, dstRect, srcRect); 2135 srcRect = dstRect; 2136 SkTSwap(srcTexture, dstTexture); 2137 // If temp2 is non-NULL, don't render back to origTexture 2138 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2139 } 2140 2141 if (sigmaX > 0.0f) { 2142 SkAutoTMalloc<float> kernelStorageX(kernelWidthX); 2143 float* kernelX = kernelStorageX.get(); 2144 build_kernel(sigmaX, kernelX, kernelWidthX); 2145 2146 if (scaleFactorX > 1) { 2147 // Clear out a halfWidth to the right of the srcRect to prevent the 2148 // X convolution from reading garbage. 2149 clearRect = SkIRect::MakeXYWH( 2150 srcRect.fRight, srcRect.fTop, halfWidthX, srcRect.height()); 2151 this->clear(&clearRect, 0x0); 2152 } 2153 2154 this->setRenderTarget(dstTexture->asRenderTarget()); 2155 convolve(fGpu, srcTexture, srcRect, kernelX, kernelWidthX, 2156 GrSamplerState::kX_FilterDirection); 2157 SkTSwap(srcTexture, dstTexture); 2158 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2159 } 2160 2161 if (sigmaY > 0.0f) { 2162 SkAutoTMalloc<float> kernelStorageY(kernelWidthY); 2163 float* kernelY = kernelStorageY.get(); 2164 build_kernel(sigmaY, kernelY, kernelWidthY); 2165 2166 if (scaleFactorY > 1 || sigmaX > 0.0f) { 2167 // Clear out a halfWidth below the srcRect to prevent the Y 2168 // convolution from reading garbage. 2169 clearRect = SkIRect::MakeXYWH( 2170 srcRect.fLeft, srcRect.fBottom, srcRect.width(), halfWidthY); 2171 this->clear(&clearRect, 0x0); 2172 } 2173 2174 this->setRenderTarget(dstTexture->asRenderTarget()); 2175 convolve(fGpu, srcTexture, srcRect, kernelY, kernelWidthY, 2176 GrSamplerState::kY_FilterDirection); 2177 SkTSwap(srcTexture, dstTexture); 2178 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2179 } 2180 2181 if (scaleFactorX > 1 || scaleFactorY > 1) { 2182 // Clear one pixel to the right and below, to accommodate bilinear 2183 // upsampling. 2184 clearRect = SkIRect::MakeXYWH( 2185 srcRect.fLeft, srcRect.fBottom, srcRect.width() + 1, 1); 2186 this->clear(&clearRect, 0x0); 2187 clearRect = SkIRect::MakeXYWH( 2188 srcRect.fRight, srcRect.fTop, 1, srcRect.height()); 2189 this->clear(&clearRect, 0x0); 2190 // FIXME: This should be mitchell, not bilinear. 2191 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 2192 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 2193 srcTexture->height()); 2194 this->setRenderTarget(dstTexture->asRenderTarget()); 2195 paint.setTexture(0, srcTexture); 2196 SkRect dstRect(srcRect); 2197 scale_rect(&dstRect, scaleFactorX, scaleFactorY); 2198 this->drawRectToRect(paint, dstRect, srcRect); 2199 srcRect = dstRect; 2200 SkTSwap(srcTexture, dstTexture); 2201 } 2202 this->setRenderTarget(oldRenderTarget); 2203 this->setClip(oldClip); 2204 return srcTexture; 2205} 2206 2207GrTexture* GrContext::applyMorphology(GrTexture* srcTexture, 2208 const GrRect& rect, 2209 GrTexture* temp1, GrTexture* temp2, 2210 GrSamplerState::Filter filter, 2211 SkISize radius) { 2212 ASSERT_OWNED_RESOURCE(srcTexture); 2213 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 2214 GrAutoMatrix avm(this, GrMatrix::I()); 2215 GrClip oldClip = this->getClip(); 2216 this->setClip(GrRect::MakeWH(srcTexture->width(), srcTexture->height())); 2217 if (radius.fWidth > 0) { 2218 this->setRenderTarget(temp1->asRenderTarget()); 2219 apply_morphology(fGpu, srcTexture, rect, radius.fWidth, filter, 2220 GrSamplerState::kX_FilterDirection); 2221 SkIRect clearRect = SkIRect::MakeXYWH(rect.fLeft, rect.fBottom, 2222 rect.width(), radius.fHeight); 2223 this->clear(&clearRect, 0x0); 2224 srcTexture = temp1; 2225 } 2226 if (radius.fHeight > 0) { 2227 this->setRenderTarget(temp2->asRenderTarget()); 2228 apply_morphology(fGpu, srcTexture, rect, radius.fHeight, filter, 2229 GrSamplerState::kY_FilterDirection); 2230 srcTexture = temp2; 2231 } 2232 this->setRenderTarget(oldRenderTarget); 2233 this->setClip(oldClip); 2234 return srcTexture; 2235} 2236 2237/////////////////////////////////////////////////////////////////////////////// 2238