GrContext.cpp revision fb4ce6fe9f3f09bf9828aa50b36287f7d22bb78b
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9 10#include "GrContext.h" 11 12#include "GrBufferAllocPool.h" 13#include "GrClipIterator.h" 14#include "GrGpu.h" 15#include "GrIndexBuffer.h" 16#include "GrInOrderDrawBuffer.h" 17#include "GrPathRenderer.h" 18#include "GrPathUtils.h" 19#include "GrResourceCache.h" 20#include "GrStencilBuffer.h" 21#include "GrTextStrike.h" 22#include "SkTLazy.h" 23#include "SkTrace.h" 24 25#define DEFER_TEXT_RENDERING 1 26 27#define DEFER_PATHS 1 28 29#define BATCH_RECT_TO_RECT (1 && !GR_STATIC_RECT_VB) 30 31#define MAX_BLUR_SIGMA 4.0f 32 33 34// When we're using coverage AA but the blend is incompatible (given gpu 35// limitations) should we disable AA or draw wrong? 36#define DISABLE_COVERAGE_AA_FOR_BLEND 1 37 38static const size_t MAX_TEXTURE_CACHE_COUNT = 256; 39static const size_t MAX_TEXTURE_CACHE_BYTES = 16 * 1024 * 1024; 40 41static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 13; 42static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 43 44// path rendering is the only thing we defer today that uses non-static indices 45static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = DEFER_PATHS ? 1 << 11 : 0; 46static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = DEFER_PATHS ? 4 : 0; 47 48#define ASSERT_OWNED_RESOURCE(R) GrAssert(!(R) || (R)->getContext() == this) 49 50GrContext* GrContext::Create(GrEngine engine, 51 GrPlatform3DContext context3D) { 52 GrContext* ctx = NULL; 53 GrGpu* fGpu = GrGpu::Create(engine, context3D); 54 if (NULL != fGpu) { 55 ctx = new GrContext(fGpu); 56 fGpu->unref(); 57 } 58 return ctx; 59} 60 61GrContext::~GrContext() { 62 this->flush(); 63 delete fTextureCache; 64 delete fFontCache; 65 delete fDrawBuffer; 66 delete fDrawBufferVBAllocPool; 67 delete fDrawBufferIBAllocPool; 68 69 GrSafeUnref(fAAFillRectIndexBuffer); 70 GrSafeUnref(fAAStrokeRectIndexBuffer); 71 fGpu->unref(); 72 GrSafeUnref(fPathRendererChain); 73} 74 75void GrContext::contextLost() { 76 contextDestroyed(); 77 this->setupDrawBuffer(); 78} 79 80void GrContext::contextDestroyed() { 81 // abandon first to so destructors 82 // don't try to free the resources in the API. 83 fGpu->abandonResources(); 84 85 // a path renderer may be holding onto resources that 86 // are now unusable 87 GrSafeSetNull(fPathRendererChain); 88 89 delete fDrawBuffer; 90 fDrawBuffer = NULL; 91 92 delete fDrawBufferVBAllocPool; 93 fDrawBufferVBAllocPool = NULL; 94 95 delete fDrawBufferIBAllocPool; 96 fDrawBufferIBAllocPool = NULL; 97 98 GrSafeSetNull(fAAFillRectIndexBuffer); 99 GrSafeSetNull(fAAStrokeRectIndexBuffer); 100 101 fTextureCache->removeAll(); 102 fFontCache->freeAll(); 103 fGpu->markContextDirty(); 104} 105 106void GrContext::resetContext() { 107 fGpu->markContextDirty(); 108} 109 110void GrContext::freeGpuResources() { 111 this->flush(); 112 fTextureCache->removeAll(); 113 fFontCache->freeAll(); 114 // a path renderer may be holding onto resources 115 GrSafeSetNull(fPathRendererChain); 116} 117 118size_t GrContext::getGpuTextureCacheBytes() const { 119 return fTextureCache->getCachedResourceBytes(); 120} 121 122//////////////////////////////////////////////////////////////////////////////// 123 124int GrContext::PaintStageVertexLayoutBits( 125 const GrPaint& paint, 126 const bool hasTexCoords[GrPaint::kTotalStages]) { 127 int stageMask = paint.getActiveStageMask(); 128 int layout = 0; 129 for (int i = 0; i < GrPaint::kTotalStages; ++i) { 130 if ((1 << i) & stageMask) { 131 if (NULL != hasTexCoords && hasTexCoords[i]) { 132 layout |= GrDrawTarget::StageTexCoordVertexLayoutBit(i, i); 133 } else { 134 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(i); 135 } 136 } 137 } 138 return layout; 139} 140 141 142//////////////////////////////////////////////////////////////////////////////// 143 144enum { 145 // flags for textures 146 kNPOTBit = 0x1, 147 kFilterBit = 0x2, 148 kScratchBit = 0x4, 149 150 // resource type 151 kTextureBit = 0x8, 152 kStencilBufferBit = 0x10 153}; 154 155GrTexture* GrContext::TextureCacheEntry::texture() const { 156 if (NULL == fEntry) { 157 return NULL; 158 } else { 159 return (GrTexture*) fEntry->resource(); 160 } 161} 162 163namespace { 164// returns true if this is a "special" texture because of gpu NPOT limitations 165bool gen_texture_key_values(const GrGpu* gpu, 166 const GrSamplerState* sampler, 167 GrContext::TextureKey clientKey, 168 int width, 169 int height, 170 int sampleCnt, 171 bool scratch, 172 uint32_t v[4]) { 173 GR_STATIC_ASSERT(sizeof(GrContext::TextureKey) == sizeof(uint64_t)); 174 // we assume we only need 16 bits of width and height 175 // assert that texture creation will fail anyway if this assumption 176 // would cause key collisions. 177 GrAssert(gpu->getCaps().fMaxTextureSize <= SK_MaxU16); 178 v[0] = clientKey & 0xffffffffUL; 179 v[1] = (clientKey >> 32) & 0xffffffffUL; 180 v[2] = width | (height << 16); 181 182 v[3] = (sampleCnt << 24); 183 GrAssert(sampleCnt >= 0 && sampleCnt < 256); 184 185 if (!gpu->getCaps().fNPOTTextureTileSupport) { 186 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 187 188 bool tiled = NULL != sampler && 189 ((sampler->getWrapX() != GrSamplerState::kClamp_WrapMode) || 190 (sampler->getWrapY() != GrSamplerState::kClamp_WrapMode)); 191 192 if (tiled && !isPow2) { 193 v[3] |= kNPOTBit; 194 if (GrSamplerState::kNearest_Filter != sampler->getFilter()) { 195 v[3] |= kFilterBit; 196 } 197 } 198 } 199 200 if (scratch) { 201 v[3] |= kScratchBit; 202 } 203 204 v[3] |= kTextureBit; 205 206 return v[3] & kNPOTBit; 207} 208 209// we should never have more than one stencil buffer with same combo of 210// (width,height,samplecount) 211void gen_stencil_key_values(int width, int height, 212 int sampleCnt, uint32_t v[4]) { 213 v[0] = width; 214 v[1] = height; 215 v[2] = sampleCnt; 216 v[3] = kStencilBufferBit; 217} 218 219void gen_stencil_key_values(const GrStencilBuffer* sb, 220 uint32_t v[4]) { 221 gen_stencil_key_values(sb->width(), sb->height(), 222 sb->numSamples(), v); 223} 224 225void build_kernel(float sigma, float* kernel, int kernelWidth) { 226 int halfWidth = (kernelWidth - 1) / 2; 227 float sum = 0.0f; 228 float denom = 1.0f / (2.0f * sigma * sigma); 229 for (int i = 0; i < kernelWidth; ++i) { 230 float x = static_cast<float>(i - halfWidth); 231 // Note that the constant term (1/(sqrt(2*pi*sigma^2)) of the Gaussian 232 // is dropped here, since we renormalize the kernel below. 233 kernel[i] = sk_float_exp(- x * x * denom); 234 sum += kernel[i]; 235 } 236 // Normalize the kernel 237 float scale = 1.0f / sum; 238 for (int i = 0; i < kernelWidth; ++i) 239 kernel[i] *= scale; 240} 241 242void scale_rect(SkRect* rect, float xScale, float yScale) { 243 rect->fLeft *= xScale; 244 rect->fTop *= yScale; 245 rect->fRight *= xScale; 246 rect->fBottom *= yScale; 247} 248 249float adjust_sigma(float sigma, int *scaleFactor, int *halfWidth, 250 int *kernelWidth) { 251 *scaleFactor = 1; 252 while (sigma > MAX_BLUR_SIGMA) { 253 *scaleFactor *= 2; 254 sigma *= 0.5f; 255 } 256 *halfWidth = static_cast<int>(ceilf(sigma * 3.0f)); 257 *kernelWidth = *halfWidth * 2 + 1; 258 return sigma; 259} 260 261void apply_morphology(GrGpu* gpu, 262 GrTexture* texture, 263 const SkRect& rect, 264 int radius, 265 GrSamplerState::Filter filter, 266 GrSamplerState::FilterDirection direction) { 267 GrAssert(filter == GrSamplerState::kErode_Filter || 268 filter == GrSamplerState::kDilate_Filter); 269 270 GrDrawTarget::AutoStateRestore asr(gpu); 271 GrDrawState* drawState = gpu->drawState(); 272 GrRenderTarget* target = drawState->getRenderTarget(); 273 drawState->reset(); 274 drawState->setRenderTarget(target); 275 GrMatrix sampleM; 276 sampleM.setIDiv(texture->width(), texture->height()); 277 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, filter, 278 sampleM); 279 drawState->sampler(0)->setMorphologyRadius(radius); 280 drawState->sampler(0)->setFilterDirection(direction); 281 drawState->setTexture(0, texture); 282 gpu->drawSimpleRect(rect, NULL, 1 << 0); 283} 284 285void convolve(GrGpu* gpu, 286 GrTexture* texture, 287 const SkRect& rect, 288 const float* kernel, 289 int kernelWidth, 290 GrSamplerState::FilterDirection direction) { 291 GrDrawTarget::AutoStateRestore asr(gpu); 292 GrDrawState* drawState = gpu->drawState(); 293 GrRenderTarget* target = drawState->getRenderTarget(); 294 drawState->reset(); 295 drawState->setRenderTarget(target); 296 GrMatrix sampleM; 297 sampleM.setIDiv(texture->width(), texture->height()); 298 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 299 GrSamplerState::kConvolution_Filter, 300 sampleM); 301 drawState->sampler(0)->setConvolutionParams(kernelWidth, kernel); 302 drawState->sampler(0)->setFilterDirection(direction); 303 drawState->setTexture(0, texture); 304 gpu->drawSimpleRect(rect, NULL, 1 << 0); 305} 306 307} 308 309GrContext::TextureCacheEntry GrContext::findAndLockTexture( 310 TextureKey key, 311 int width, 312 int height, 313 const GrSamplerState* sampler) { 314 uint32_t v[4]; 315 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v); 316 GrResourceKey resourceKey(v); 317 return TextureCacheEntry(fTextureCache->findAndLock(resourceKey, 318 GrResourceCache::kNested_LockType)); 319} 320 321bool GrContext::isTextureInCache(TextureKey key, 322 int width, 323 int height, 324 const GrSamplerState* sampler) const { 325 uint32_t v[4]; 326 gen_texture_key_values(fGpu, sampler, key, width, height, 0, false, v); 327 GrResourceKey resourceKey(v); 328 return fTextureCache->hasKey(resourceKey); 329} 330 331GrResourceEntry* GrContext::addAndLockStencilBuffer(GrStencilBuffer* sb) { 332 ASSERT_OWNED_RESOURCE(sb); 333 uint32_t v[4]; 334 gen_stencil_key_values(sb, v); 335 GrResourceKey resourceKey(v); 336 return fTextureCache->createAndLock(resourceKey, sb); 337} 338 339GrStencilBuffer* GrContext::findStencilBuffer(int width, int height, 340 int sampleCnt) { 341 uint32_t v[4]; 342 gen_stencil_key_values(width, height, sampleCnt, v); 343 GrResourceKey resourceKey(v); 344 GrResourceEntry* entry = fTextureCache->findAndLock(resourceKey, 345 GrResourceCache::kSingle_LockType); 346 if (NULL != entry) { 347 GrStencilBuffer* sb = (GrStencilBuffer*) entry->resource(); 348 return sb; 349 } else { 350 return NULL; 351 } 352} 353 354void GrContext::unlockStencilBuffer(GrResourceEntry* sbEntry) { 355 ASSERT_OWNED_RESOURCE(sbEntry->resource()); 356 fTextureCache->unlock(sbEntry); 357} 358 359static void stretchImage(void* dst, 360 int dstW, 361 int dstH, 362 void* src, 363 int srcW, 364 int srcH, 365 int bpp) { 366 GrFixed dx = (srcW << 16) / dstW; 367 GrFixed dy = (srcH << 16) / dstH; 368 369 GrFixed y = dy >> 1; 370 371 int dstXLimit = dstW*bpp; 372 for (int j = 0; j < dstH; ++j) { 373 GrFixed x = dx >> 1; 374 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp; 375 void* dstRow = (uint8_t*)dst + j*dstW*bpp; 376 for (int i = 0; i < dstXLimit; i += bpp) { 377 memcpy((uint8_t*) dstRow + i, 378 (uint8_t*) srcRow + (x>>16)*bpp, 379 bpp); 380 x += dx; 381 } 382 y += dy; 383 } 384} 385 386GrContext::TextureCacheEntry GrContext::createAndLockTexture( 387 TextureKey key, 388 const GrSamplerState* sampler, 389 const GrTextureDesc& desc, 390 void* srcData, 391 size_t rowBytes) { 392 SK_TRACE_EVENT0("GrContext::createAndLockTexture"); 393 394#if GR_DUMP_TEXTURE_UPLOAD 395 GrPrintf("GrContext::createAndLockTexture [%d %d]\n", desc.fWidth, desc.fHeight); 396#endif 397 398 TextureCacheEntry entry; 399 uint32_t v[4]; 400 bool special = gen_texture_key_values(fGpu, sampler, key, 401 desc.fWidth, desc.fHeight, 402 desc.fSampleCnt, false, v); 403 GrResourceKey resourceKey(v); 404 405 if (special) { 406 GrAssert(NULL != sampler); 407 TextureCacheEntry clampEntry = this->findAndLockTexture(key, 408 desc.fWidth, 409 desc.fHeight, 410 NULL); 411 412 if (NULL == clampEntry.texture()) { 413 clampEntry = this->createAndLockTexture(key, NULL, desc, 414 srcData, rowBytes); 415 GrAssert(NULL != clampEntry.texture()); 416 if (NULL == clampEntry.texture()) { 417 return entry; 418 } 419 } 420 GrTextureDesc rtDesc = desc; 421 rtDesc.fFlags = rtDesc.fFlags | 422 kRenderTarget_GrTextureFlagBit | 423 kNoStencil_GrTextureFlagBit; 424 rtDesc.fWidth = GrNextPow2(GrMax(desc.fWidth, 64)); 425 rtDesc.fHeight = GrNextPow2(GrMax(desc.fHeight, 64)); 426 427 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0); 428 429 if (NULL != texture) { 430 GrDrawTarget::AutoStateRestore asr(fGpu); 431 GrDrawState* drawState = fGpu->drawState(); 432 drawState->reset(); 433 drawState->setRenderTarget(texture->asRenderTarget()); 434 drawState->setTexture(0, clampEntry.texture()); 435 436 GrSamplerState::Filter filter; 437 // if filtering is not desired then we want to ensure all 438 // texels in the resampled image are copies of texels from 439 // the original. 440 if (GrSamplerState::kNearest_Filter == sampler->getFilter()) { 441 filter = GrSamplerState::kNearest_Filter; 442 } else { 443 filter = GrSamplerState::kBilinear_Filter; 444 } 445 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 446 filter); 447 448 static const GrVertexLayout layout = 449 GrDrawTarget::StageTexCoordVertexLayoutBit(0,0); 450 GrDrawTarget::AutoReleaseGeometry arg(fGpu, layout, 4, 0); 451 452 if (arg.succeeded()) { 453 GrPoint* verts = (GrPoint*) arg.vertices(); 454 verts[0].setIRectFan(0, 0, 455 texture->width(), 456 texture->height(), 457 2*sizeof(GrPoint)); 458 verts[1].setIRectFan(0, 0, 1, 1, 2*sizeof(GrPoint)); 459 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 460 0, 4); 461 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 462 } 463 texture->releaseRenderTarget(); 464 } else { 465 // TODO: Our CPU stretch doesn't filter. But we create separate 466 // stretched textures when the sampler state is either filtered or 467 // not. Either implement filtered stretch blit on CPU or just create 468 // one when FBO case fails. 469 470 rtDesc.fFlags = kNone_GrTextureFlags; 471 // no longer need to clamp at min RT size. 472 rtDesc.fWidth = GrNextPow2(desc.fWidth); 473 rtDesc.fHeight = GrNextPow2(desc.fHeight); 474 int bpp = GrBytesPerPixel(desc.fConfig); 475 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * 476 rtDesc.fWidth * 477 rtDesc.fHeight); 478 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight, 479 srcData, desc.fWidth, desc.fHeight, bpp); 480 481 size_t stretchedRowBytes = rtDesc.fWidth * bpp; 482 483 GrTexture* texture = fGpu->createTexture(rtDesc, 484 stretchedPixels.get(), 485 stretchedRowBytes); 486 GrAssert(NULL != texture); 487 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 488 } 489 fTextureCache->unlock(clampEntry.cacheEntry()); 490 491 } else { 492 GrTexture* texture = fGpu->createTexture(desc, srcData, rowBytes); 493 if (NULL != texture) { 494 entry.set(fTextureCache->createAndLock(resourceKey, texture)); 495 } 496 } 497 return entry; 498} 499 500namespace { 501inline void gen_scratch_tex_key_values(const GrGpu* gpu, 502 const GrTextureDesc& desc, 503 uint32_t v[4]) { 504 // Instead of a client-provided key of the texture contents 505 // we create a key of from the descriptor. 506 GrContext::TextureKey descKey = (desc.fFlags << 8) | 507 ((uint64_t) desc.fConfig << 32); 508 // this code path isn't friendly to tiling with NPOT restricitons 509 // We just pass ClampNoFilter() 510 gen_texture_key_values(gpu, NULL, descKey, desc.fWidth, 511 desc.fHeight, desc.fSampleCnt, true, v); 512} 513} 514 515GrContext::TextureCacheEntry GrContext::lockScratchTexture( 516 const GrTextureDesc& inDesc, 517 ScratchTexMatch match) { 518 519 GrTextureDesc desc = inDesc; 520 if (kExact_ScratchTexMatch != match) { 521 // bin by pow2 with a reasonable min 522 static const int MIN_SIZE = 256; 523 desc.fWidth = GrMax(MIN_SIZE, GrNextPow2(desc.fWidth)); 524 desc.fHeight = GrMax(MIN_SIZE, GrNextPow2(desc.fHeight)); 525 } 526 527 GrResourceEntry* entry; 528 int origWidth = desc.fWidth; 529 int origHeight = desc.fHeight; 530 bool doubledW = false; 531 bool doubledH = false; 532 533 do { 534 uint32_t v[4]; 535 gen_scratch_tex_key_values(fGpu, desc, v); 536 GrResourceKey key(v); 537 entry = fTextureCache->findAndLock(key, 538 GrResourceCache::kNested_LockType); 539 // if we miss, relax the fit of the flags... 540 // then try doubling width... then height. 541 if (NULL != entry || kExact_ScratchTexMatch == match) { 542 break; 543 } 544 if (!(desc.fFlags & kRenderTarget_GrTextureFlagBit)) { 545 desc.fFlags = desc.fFlags | kRenderTarget_GrTextureFlagBit; 546 } else if (desc.fFlags & kNoStencil_GrTextureFlagBit) { 547 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit; 548 } else if (!doubledW) { 549 desc.fFlags = inDesc.fFlags; 550 desc.fWidth *= 2; 551 doubledW = true; 552 } else if (!doubledH) { 553 desc.fFlags = inDesc.fFlags; 554 desc.fWidth = origWidth; 555 desc.fHeight *= 2; 556 doubledH = true; 557 } else { 558 break; 559 } 560 561 } while (true); 562 563 if (NULL == entry) { 564 desc.fFlags = inDesc.fFlags; 565 desc.fWidth = origWidth; 566 desc.fHeight = origHeight; 567 GrTexture* texture = fGpu->createTexture(desc, NULL, 0); 568 if (NULL != texture) { 569 uint32_t v[4]; 570 gen_scratch_tex_key_values(fGpu, desc, v); 571 GrResourceKey key(v); 572 entry = fTextureCache->createAndLock(key, texture); 573 } 574 } 575 576 // If the caller gives us the same desc/sampler twice we don't want 577 // to return the same texture the second time (unless it was previously 578 // released). So we detach the entry from the cache and reattach at release. 579 if (NULL != entry) { 580 fTextureCache->detach(entry); 581 } 582 return TextureCacheEntry(entry); 583} 584 585void GrContext::unlockTexture(TextureCacheEntry entry) { 586 ASSERT_OWNED_RESOURCE(entry.texture()); 587 // If this is a scratch texture we detached it from the cache 588 // while it was locked (to avoid two callers simultaneously getting 589 // the same texture). 590 if (kScratchBit & entry.cacheEntry()->key().getValue32(3)) { 591 fTextureCache->reattachAndUnlock(entry.cacheEntry()); 592 } else { 593 fTextureCache->unlock(entry.cacheEntry()); 594 } 595} 596 597GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& desc, 598 void* srcData, 599 size_t rowBytes) { 600 return fGpu->createTexture(desc, srcData, rowBytes); 601} 602 603void GrContext::getTextureCacheLimits(int* maxTextures, 604 size_t* maxTextureBytes) const { 605 fTextureCache->getLimits(maxTextures, maxTextureBytes); 606} 607 608void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) { 609 fTextureCache->setLimits(maxTextures, maxTextureBytes); 610} 611 612int GrContext::getMaxTextureSize() const { 613 return fGpu->getCaps().fMaxTextureSize; 614} 615 616int GrContext::getMaxRenderTargetSize() const { 617 return fGpu->getCaps().fMaxRenderTargetSize; 618} 619 620/////////////////////////////////////////////////////////////////////////////// 621 622GrTexture* GrContext::createPlatformTexture(const GrPlatformTextureDesc& desc) { 623 return fGpu->createPlatformTexture(desc); 624} 625 626GrRenderTarget* GrContext::createPlatformRenderTarget(const GrPlatformRenderTargetDesc& desc) { 627 return fGpu->createPlatformRenderTarget(desc); 628} 629 630/////////////////////////////////////////////////////////////////////////////// 631 632bool GrContext::supportsIndex8PixelConfig(const GrSamplerState* sampler, 633 int width, int height) const { 634 const GrDrawTarget::Caps& caps = fGpu->getCaps(); 635 if (!caps.f8BitPaletteSupport) { 636 return false; 637 } 638 639 bool isPow2 = GrIsPow2(width) && GrIsPow2(height); 640 641 if (!isPow2) { 642 bool tiled = NULL != sampler && 643 (sampler->getWrapX() != GrSamplerState::kClamp_WrapMode || 644 sampler->getWrapY() != GrSamplerState::kClamp_WrapMode); 645 if (tiled && !caps.fNPOTTextureTileSupport) { 646 return false; 647 } 648 } 649 return true; 650} 651 652//////////////////////////////////////////////////////////////////////////////// 653 654const GrClip& GrContext::getClip() const { return fGpu->getClip(); } 655 656void GrContext::setClip(const GrClip& clip) { 657 fGpu->setClip(clip); 658 fGpu->drawState()->enableState(GrDrawState::kClip_StateBit); 659} 660 661void GrContext::setClip(const GrIRect& rect) { 662 GrClip clip; 663 clip.setFromIRect(rect); 664 fGpu->setClip(clip); 665} 666 667//////////////////////////////////////////////////////////////////////////////// 668 669void GrContext::clear(const GrIRect* rect, const GrColor color) { 670 this->flush(); 671 fGpu->clear(rect, color); 672} 673 674void GrContext::drawPaint(const GrPaint& paint) { 675 // set rect to be big enough to fill the space, but not super-huge, so we 676 // don't overflow fixed-point implementations 677 GrRect r; 678 r.setLTRB(0, 0, 679 GrIntToScalar(getRenderTarget()->width()), 680 GrIntToScalar(getRenderTarget()->height())); 681 GrMatrix inverse; 682 SkTLazy<GrPaint> tmpPaint; 683 const GrPaint* p = &paint; 684 GrDrawState* drawState = fGpu->drawState(); 685 GrAutoMatrix am; 686 687 // We attempt to map r by the inverse matrix and draw that. mapRect will 688 // map the four corners and bound them with a new rect. This will not 689 // produce a correct result for some perspective matrices. 690 if (!this->getMatrix().hasPerspective()) { 691 if (!drawState->getViewInverse(&inverse)) { 692 GrPrintf("Could not invert matrix"); 693 return; 694 } 695 inverse.mapRect(&r); 696 } else { 697 if (paint.getActiveMaskStageMask() || paint.getActiveStageMask()) { 698 if (!drawState->getViewInverse(&inverse)) { 699 GrPrintf("Could not invert matrix"); 700 return; 701 } 702 tmpPaint.set(paint); 703 tmpPaint.get()->preConcatActiveSamplerMatrices(inverse); 704 p = tmpPaint.get(); 705 } 706 am.set(this, GrMatrix::I()); 707 } 708 // by definition this fills the entire clip, no need for AA 709 if (paint.fAntiAlias) { 710 if (!tmpPaint.isValid()) { 711 tmpPaint.set(paint); 712 p = tmpPaint.get(); 713 } 714 GrAssert(p == tmpPaint.get()); 715 tmpPaint.get()->fAntiAlias = false; 716 } 717 this->drawRect(*p, r); 718} 719 720//////////////////////////////////////////////////////////////////////////////// 721 722namespace { 723inline bool disable_coverage_aa_for_blend(GrDrawTarget* target) { 724 return DISABLE_COVERAGE_AA_FOR_BLEND && !target->canApplyCoverage(); 725} 726} 727 728//////////////////////////////////////////////////////////////////////////////// 729 730/* create a triangle strip that strokes the specified triangle. There are 8 731 unique vertices, but we repreat the last 2 to close up. Alternatively we 732 could use an indices array, and then only send 8 verts, but not sure that 733 would be faster. 734 */ 735static void setStrokeRectStrip(GrPoint verts[10], GrRect rect, 736 GrScalar width) { 737 const GrScalar rad = GrScalarHalf(width); 738 rect.sort(); 739 740 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 741 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 742 verts[2].set(rect.fRight - rad, rect.fTop + rad); 743 verts[3].set(rect.fRight + rad, rect.fTop - rad); 744 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 745 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 746 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 747 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 748 verts[8] = verts[0]; 749 verts[9] = verts[1]; 750} 751 752static void setInsetFan(GrPoint* pts, size_t stride, 753 const GrRect& r, GrScalar dx, GrScalar dy) { 754 pts->setRectFan(r.fLeft + dx, r.fTop + dy, r.fRight - dx, r.fBottom - dy, stride); 755} 756 757static const uint16_t gFillAARectIdx[] = { 758 0, 1, 5, 5, 4, 0, 759 1, 2, 6, 6, 5, 1, 760 2, 3, 7, 7, 6, 2, 761 3, 0, 4, 4, 7, 3, 762 4, 5, 6, 6, 7, 4, 763}; 764 765int GrContext::aaFillRectIndexCount() const { 766 return GR_ARRAY_COUNT(gFillAARectIdx); 767} 768 769GrIndexBuffer* GrContext::aaFillRectIndexBuffer() { 770 if (NULL == fAAFillRectIndexBuffer) { 771 fAAFillRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gFillAARectIdx), 772 false); 773 if (NULL != fAAFillRectIndexBuffer) { 774 #if GR_DEBUG 775 bool updated = 776 #endif 777 fAAFillRectIndexBuffer->updateData(gFillAARectIdx, 778 sizeof(gFillAARectIdx)); 779 GR_DEBUGASSERT(updated); 780 } 781 } 782 return fAAFillRectIndexBuffer; 783} 784 785static const uint16_t gStrokeAARectIdx[] = { 786 0 + 0, 1 + 0, 5 + 0, 5 + 0, 4 + 0, 0 + 0, 787 1 + 0, 2 + 0, 6 + 0, 6 + 0, 5 + 0, 1 + 0, 788 2 + 0, 3 + 0, 7 + 0, 7 + 0, 6 + 0, 2 + 0, 789 3 + 0, 0 + 0, 4 + 0, 4 + 0, 7 + 0, 3 + 0, 790 791 0 + 4, 1 + 4, 5 + 4, 5 + 4, 4 + 4, 0 + 4, 792 1 + 4, 2 + 4, 6 + 4, 6 + 4, 5 + 4, 1 + 4, 793 2 + 4, 3 + 4, 7 + 4, 7 + 4, 6 + 4, 2 + 4, 794 3 + 4, 0 + 4, 4 + 4, 4 + 4, 7 + 4, 3 + 4, 795 796 0 + 8, 1 + 8, 5 + 8, 5 + 8, 4 + 8, 0 + 8, 797 1 + 8, 2 + 8, 6 + 8, 6 + 8, 5 + 8, 1 + 8, 798 2 + 8, 3 + 8, 7 + 8, 7 + 8, 6 + 8, 2 + 8, 799 3 + 8, 0 + 8, 4 + 8, 4 + 8, 7 + 8, 3 + 8, 800}; 801 802int GrContext::aaStrokeRectIndexCount() const { 803 return GR_ARRAY_COUNT(gStrokeAARectIdx); 804} 805 806GrIndexBuffer* GrContext::aaStrokeRectIndexBuffer() { 807 if (NULL == fAAStrokeRectIndexBuffer) { 808 fAAStrokeRectIndexBuffer = fGpu->createIndexBuffer(sizeof(gStrokeAARectIdx), 809 false); 810 if (NULL != fAAStrokeRectIndexBuffer) { 811 #if GR_DEBUG 812 bool updated = 813 #endif 814 fAAStrokeRectIndexBuffer->updateData(gStrokeAARectIdx, 815 sizeof(gStrokeAARectIdx)); 816 GR_DEBUGASSERT(updated); 817 } 818 } 819 return fAAStrokeRectIndexBuffer; 820} 821 822static GrVertexLayout aa_rect_layout(const GrDrawTarget* target, 823 bool useCoverage) { 824 GrVertexLayout layout = 0; 825 for (int s = 0; s < GrDrawState::kNumStages; ++s) { 826 if (NULL != target->getDrawState().getTexture(s)) { 827 layout |= GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(s); 828 } 829 } 830 if (useCoverage) { 831 layout |= GrDrawTarget::kCoverage_VertexLayoutBit; 832 } else { 833 layout |= GrDrawTarget::kColor_VertexLayoutBit; 834 } 835 return layout; 836} 837 838void GrContext::fillAARect(GrDrawTarget* target, 839 const GrRect& devRect, 840 bool useVertexCoverage) { 841 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 842 843 size_t vsize = GrDrawTarget::VertexSize(layout); 844 845 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 8, 0); 846 if (!geo.succeeded()) { 847 GrPrintf("Failed to get space for vertices!\n"); 848 return; 849 } 850 GrIndexBuffer* indexBuffer = this->aaFillRectIndexBuffer(); 851 if (NULL == indexBuffer) { 852 GrPrintf("Failed to create index buffer!\n"); 853 return; 854 } 855 856 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 857 858 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 859 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 860 861 setInsetFan(fan0Pos, vsize, devRect, -GR_ScalarHalf, -GR_ScalarHalf); 862 setInsetFan(fan1Pos, vsize, devRect, GR_ScalarHalf, GR_ScalarHalf); 863 864 verts += sizeof(GrPoint); 865 for (int i = 0; i < 4; ++i) { 866 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 867 } 868 869 GrColor innerColor; 870 if (useVertexCoverage) { 871 innerColor = 0xffffffff; 872 } else { 873 innerColor = target->getDrawState().getColor(); 874 } 875 876 verts += 4 * vsize; 877 for (int i = 0; i < 4; ++i) { 878 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 879 } 880 881 target->setIndexSourceToBuffer(indexBuffer); 882 883 target->drawIndexed(kTriangles_PrimitiveType, 0, 884 0, 8, this->aaFillRectIndexCount()); 885} 886 887void GrContext::strokeAARect(GrDrawTarget* target, 888 const GrRect& devRect, 889 const GrVec& devStrokeSize, 890 bool useVertexCoverage) { 891 const GrScalar& dx = devStrokeSize.fX; 892 const GrScalar& dy = devStrokeSize.fY; 893 const GrScalar rx = GrMul(dx, GR_ScalarHalf); 894 const GrScalar ry = GrMul(dy, GR_ScalarHalf); 895 896 GrScalar spare; 897 { 898 GrScalar w = devRect.width() - dx; 899 GrScalar h = devRect.height() - dy; 900 spare = GrMin(w, h); 901 } 902 903 if (spare <= 0) { 904 GrRect r(devRect); 905 r.inset(-rx, -ry); 906 fillAARect(target, r, useVertexCoverage); 907 return; 908 } 909 GrVertexLayout layout = aa_rect_layout(target, useVertexCoverage); 910 size_t vsize = GrDrawTarget::VertexSize(layout); 911 912 GrDrawTarget::AutoReleaseGeometry geo(target, layout, 16, 0); 913 if (!geo.succeeded()) { 914 GrPrintf("Failed to get space for vertices!\n"); 915 return; 916 } 917 GrIndexBuffer* indexBuffer = this->aaStrokeRectIndexBuffer(); 918 if (NULL == indexBuffer) { 919 GrPrintf("Failed to create index buffer!\n"); 920 return; 921 } 922 923 intptr_t verts = reinterpret_cast<intptr_t>(geo.vertices()); 924 925 GrPoint* fan0Pos = reinterpret_cast<GrPoint*>(verts); 926 GrPoint* fan1Pos = reinterpret_cast<GrPoint*>(verts + 4 * vsize); 927 GrPoint* fan2Pos = reinterpret_cast<GrPoint*>(verts + 8 * vsize); 928 GrPoint* fan3Pos = reinterpret_cast<GrPoint*>(verts + 12 * vsize); 929 930 setInsetFan(fan0Pos, vsize, devRect, -rx - GR_ScalarHalf, -ry - GR_ScalarHalf); 931 setInsetFan(fan1Pos, vsize, devRect, -rx + GR_ScalarHalf, -ry + GR_ScalarHalf); 932 setInsetFan(fan2Pos, vsize, devRect, rx - GR_ScalarHalf, ry - GR_ScalarHalf); 933 setInsetFan(fan3Pos, vsize, devRect, rx + GR_ScalarHalf, ry + GR_ScalarHalf); 934 935 verts += sizeof(GrPoint); 936 for (int i = 0; i < 4; ++i) { 937 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 938 } 939 940 GrColor innerColor; 941 if (useVertexCoverage) { 942 innerColor = 0xffffffff; 943 } else { 944 innerColor = target->getDrawState().getColor(); 945 } 946 verts += 4 * vsize; 947 for (int i = 0; i < 8; ++i) { 948 *reinterpret_cast<GrColor*>(verts + i * vsize) = innerColor; 949 } 950 951 verts += 8 * vsize; 952 for (int i = 0; i < 8; ++i) { 953 *reinterpret_cast<GrColor*>(verts + i * vsize) = 0; 954 } 955 956 target->setIndexSourceToBuffer(indexBuffer); 957 target->drawIndexed(kTriangles_PrimitiveType, 958 0, 0, 16, aaStrokeRectIndexCount()); 959} 960 961/** 962 * Returns true if the rects edges are integer-aligned. 963 */ 964static bool isIRect(const GrRect& r) { 965 return GrScalarIsInt(r.fLeft) && GrScalarIsInt(r.fTop) && 966 GrScalarIsInt(r.fRight) && GrScalarIsInt(r.fBottom); 967} 968 969static bool apply_aa_to_rect(GrDrawTarget* target, 970 const GrRect& rect, 971 GrScalar width, 972 const GrMatrix* matrix, 973 GrMatrix* combinedMatrix, 974 GrRect* devRect, 975 bool* useVertexCoverage) { 976 // we use a simple coverage ramp to do aa on axis-aligned rects 977 // we check if the rect will be axis-aligned, and the rect won't land on 978 // integer coords. 979 980 // we are keeping around the "tweak the alpha" trick because 981 // it is our only hope for the fixed-pipe implementation. 982 // In a shader implementation we can give a separate coverage input 983 // TODO: remove this ugliness when we drop the fixed-pipe impl 984 *useVertexCoverage = false; 985 if (!target->canTweakAlphaForCoverage()) { 986 if (disable_coverage_aa_for_blend(target)) { 987#if GR_DEBUG 988 //GrPrintf("Turning off AA to correctly apply blend.\n"); 989#endif 990 return false; 991 } else { 992 *useVertexCoverage = true; 993 } 994 } 995 const GrDrawState& drawState = target->getDrawState(); 996 if (drawState.getRenderTarget()->isMultisampled()) { 997 return false; 998 } 999 1000 if (0 == width && target->willUseHWAALines()) { 1001 return false; 1002 } 1003 1004 if (!drawState.getViewMatrix().preservesAxisAlignment()) { 1005 return false; 1006 } 1007 1008 if (NULL != matrix && 1009 !matrix->preservesAxisAlignment()) { 1010 return false; 1011 } 1012 1013 *combinedMatrix = drawState.getViewMatrix(); 1014 if (NULL != matrix) { 1015 combinedMatrix->preConcat(*matrix); 1016 GrAssert(combinedMatrix->preservesAxisAlignment()); 1017 } 1018 1019 combinedMatrix->mapRect(devRect, rect); 1020 devRect->sort(); 1021 1022 if (width < 0) { 1023 return !isIRect(*devRect); 1024 } else { 1025 return true; 1026 } 1027} 1028 1029void GrContext::drawRect(const GrPaint& paint, 1030 const GrRect& rect, 1031 GrScalar width, 1032 const GrMatrix* matrix) { 1033 SK_TRACE_EVENT0("GrContext::drawRect"); 1034 1035 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1036 int stageMask = paint.getActiveStageMask(); 1037 1038 GrRect devRect = rect; 1039 GrMatrix combinedMatrix; 1040 bool useVertexCoverage; 1041 bool needAA = paint.fAntiAlias && 1042 !this->getRenderTarget()->isMultisampled(); 1043 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, 1044 &combinedMatrix, &devRect, 1045 &useVertexCoverage); 1046 1047 if (doAA) { 1048 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1049 if (width >= 0) { 1050 GrVec strokeSize;; 1051 if (width > 0) { 1052 strokeSize.set(width, width); 1053 combinedMatrix.mapVectors(&strokeSize, 1); 1054 strokeSize.setAbs(strokeSize); 1055 } else { 1056 strokeSize.set(GR_Scalar1, GR_Scalar1); 1057 } 1058 strokeAARect(target, devRect, strokeSize, useVertexCoverage); 1059 } else { 1060 fillAARect(target, devRect, useVertexCoverage); 1061 } 1062 return; 1063 } 1064 1065 if (width >= 0) { 1066 // TODO: consider making static vertex buffers for these cases. 1067 // Hairline could be done by just adding closing vertex to 1068 // unitSquareVertexBuffer() 1069 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1070 1071 static const int worstCaseVertCount = 10; 1072 GrDrawTarget::AutoReleaseGeometry geo(target, layout, worstCaseVertCount, 0); 1073 1074 if (!geo.succeeded()) { 1075 GrPrintf("Failed to get space for vertices!\n"); 1076 return; 1077 } 1078 1079 GrPrimitiveType primType; 1080 int vertCount; 1081 GrPoint* vertex = geo.positions(); 1082 1083 if (width > 0) { 1084 vertCount = 10; 1085 primType = kTriangleStrip_PrimitiveType; 1086 setStrokeRectStrip(vertex, rect, width); 1087 } else { 1088 // hairline 1089 vertCount = 5; 1090 primType = kLineStrip_PrimitiveType; 1091 vertex[0].set(rect.fLeft, rect.fTop); 1092 vertex[1].set(rect.fRight, rect.fTop); 1093 vertex[2].set(rect.fRight, rect.fBottom); 1094 vertex[3].set(rect.fLeft, rect.fBottom); 1095 vertex[4].set(rect.fLeft, rect.fTop); 1096 } 1097 1098 GrDrawState::AutoViewMatrixRestore avmr; 1099 if (NULL != matrix) { 1100 GrDrawState* drawState = target->drawState(); 1101 avmr.set(drawState); 1102 drawState->preConcatViewMatrix(*matrix); 1103 drawState->preConcatSamplerMatrices(stageMask, *matrix); 1104 } 1105 1106 target->drawNonIndexed(primType, 0, vertCount); 1107 } else { 1108#if GR_STATIC_RECT_VB 1109 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1110 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1111 if (NULL == sqVB) { 1112 GrPrintf("Failed to create static rect vb.\n"); 1113 return; 1114 } 1115 target->setVertexSourceToBuffer(layout, sqVB); 1116 GrDrawState* drawState = target->drawState(); 1117 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1118 GrMatrix m; 1119 m.setAll(rect.width(), 0, rect.fLeft, 1120 0, rect.height(), rect.fTop, 1121 0, 0, GrMatrix::I()[8]); 1122 1123 if (NULL != matrix) { 1124 m.postConcat(*matrix); 1125 } 1126 drawState->preConcatViewMatrix(m); 1127 drawState->preConcatSamplerMatrices(stageMask, m); 1128 1129 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1130#else 1131 target->drawSimpleRect(rect, matrix, stageMask); 1132#endif 1133 } 1134} 1135 1136void GrContext::drawRectToRect(const GrPaint& paint, 1137 const GrRect& dstRect, 1138 const GrRect& srcRect, 1139 const GrMatrix* dstMatrix, 1140 const GrMatrix* srcMatrix) { 1141 SK_TRACE_EVENT0("GrContext::drawRectToRect"); 1142 1143 // srcRect refers to paint's first texture 1144 if (NULL == paint.getTexture(0)) { 1145 drawRect(paint, dstRect, -1, dstMatrix); 1146 return; 1147 } 1148 1149 GR_STATIC_ASSERT(!BATCH_RECT_TO_RECT || !GR_STATIC_RECT_VB); 1150 1151#if GR_STATIC_RECT_VB 1152 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1153 GrDrawState* drawState = target->drawState(); 1154 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, NULL); 1155 GrDrawState::AutoViewMatrixRestore avmr(drawState); 1156 1157 GrMatrix m; 1158 1159 m.setAll(dstRect.width(), 0, dstRect.fLeft, 1160 0, dstRect.height(), dstRect.fTop, 1161 0, 0, GrMatrix::I()[8]); 1162 if (NULL != dstMatrix) { 1163 m.postConcat(*dstMatrix); 1164 } 1165 drawState->preConcatViewMatrix(m); 1166 1167 // srcRect refers to first stage 1168 int otherStageMask = paint.getActiveStageMask() & 1169 (~(1 << GrPaint::kFirstTextureStage)); 1170 if (otherStageMask) { 1171 drawState->preConcatSamplerMatrices(otherStageMask, m); 1172 } 1173 1174 m.setAll(srcRect.width(), 0, srcRect.fLeft, 1175 0, srcRect.height(), srcRect.fTop, 1176 0, 0, GrMatrix::I()[8]); 1177 if (NULL != srcMatrix) { 1178 m.postConcat(*srcMatrix); 1179 } 1180 drawState->sampler(GrPaint::kFirstTextureStage)->preConcatMatrix(m); 1181 1182 const GrVertexBuffer* sqVB = fGpu->getUnitSquareVertexBuffer(); 1183 if (NULL == sqVB) { 1184 GrPrintf("Failed to create static rect vb.\n"); 1185 return; 1186 } 1187 target->setVertexSourceToBuffer(layout, sqVB); 1188 target->drawNonIndexed(kTriangleFan_PrimitiveType, 0, 4); 1189#else 1190 1191 GrDrawTarget* target; 1192#if BATCH_RECT_TO_RECT 1193 target = this->prepareToDraw(paint, kBuffered_DrawCategory); 1194#else 1195 target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1196#endif 1197 1198 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1199 const GrMatrix* srcMatrices[GrDrawState::kNumStages] = {NULL}; 1200 srcRects[0] = &srcRect; 1201 srcMatrices[0] = srcMatrix; 1202 1203 target->drawRect(dstRect, dstMatrix, 1, srcRects, srcMatrices); 1204#endif 1205} 1206 1207void GrContext::drawVertices(const GrPaint& paint, 1208 GrPrimitiveType primitiveType, 1209 int vertexCount, 1210 const GrPoint positions[], 1211 const GrPoint texCoords[], 1212 const GrColor colors[], 1213 const uint16_t indices[], 1214 int indexCount) { 1215 SK_TRACE_EVENT0("GrContext::drawVertices"); 1216 1217 GrDrawTarget::AutoReleaseGeometry geo; 1218 1219 GrDrawTarget* target = this->prepareToDraw(paint, kUnbuffered_DrawCategory); 1220 1221 bool hasTexCoords[GrPaint::kTotalStages] = { 1222 NULL != texCoords, // texCoordSrc provides explicit stage 0 coords 1223 0 // remaining stages use positions 1224 }; 1225 1226 GrVertexLayout layout = PaintStageVertexLayoutBits(paint, hasTexCoords); 1227 1228 if (NULL != colors) { 1229 layout |= GrDrawTarget::kColor_VertexLayoutBit; 1230 } 1231 int vertexSize = GrDrawTarget::VertexSize(layout); 1232 1233 if (sizeof(GrPoint) != vertexSize) { 1234 if (!geo.set(target, layout, vertexCount, 0)) { 1235 GrPrintf("Failed to get space for vertices!\n"); 1236 return; 1237 } 1238 int texOffsets[GrDrawState::kMaxTexCoords]; 1239 int colorOffset; 1240 GrDrawTarget::VertexSizeAndOffsetsByIdx(layout, 1241 texOffsets, 1242 &colorOffset, 1243 NULL, 1244 NULL); 1245 void* curVertex = geo.vertices(); 1246 1247 for (int i = 0; i < vertexCount; ++i) { 1248 *((GrPoint*)curVertex) = positions[i]; 1249 1250 if (texOffsets[0] > 0) { 1251 *(GrPoint*)((intptr_t)curVertex + texOffsets[0]) = texCoords[i]; 1252 } 1253 if (colorOffset > 0) { 1254 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i]; 1255 } 1256 curVertex = (void*)((intptr_t)curVertex + vertexSize); 1257 } 1258 } else { 1259 target->setVertexSourceToArray(layout, positions, vertexCount); 1260 } 1261 1262 // we don't currently apply offscreen AA to this path. Need improved 1263 // management of GrDrawTarget's geometry to avoid copying points per-tile. 1264 1265 if (NULL != indices) { 1266 target->setIndexSourceToArray(indices, indexCount); 1267 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount); 1268 } else { 1269 target->drawNonIndexed(primitiveType, 0, vertexCount); 1270 } 1271} 1272 1273/////////////////////////////////////////////////////////////////////////////// 1274#include "SkDraw.h" 1275#include "SkRasterClip.h" 1276 1277namespace { 1278 1279SkPath::FillType gr_fill_to_sk_fill(GrPathFill fill) { 1280 switch (fill) { 1281 case kWinding_PathFill: 1282 return SkPath::kWinding_FillType; 1283 case kEvenOdd_PathFill: 1284 return SkPath::kEvenOdd_FillType; 1285 case kInverseWinding_PathFill: 1286 return SkPath::kInverseWinding_FillType; 1287 case kInverseEvenOdd_PathFill: 1288 return SkPath::kInverseEvenOdd_FillType; 1289 default: 1290 GrCrash("Unexpected fill."); 1291 return SkPath::kWinding_FillType; 1292 } 1293} 1294 1295// gets device coord bounds of path (not considering the fill) and clip. The 1296// path bounds will be a subset of the clip bounds. returns false if path bounds 1297// would be empty. 1298bool get_path_and_clip_bounds(const GrDrawTarget* target, 1299 const GrPath& path, 1300 const GrVec* translate, 1301 GrIRect* pathBounds, 1302 GrIRect* clipBounds) { 1303 // compute bounds as intersection of rt size, clip, and path 1304 const GrRenderTarget* rt = target->getDrawState().getRenderTarget(); 1305 if (NULL == rt) { 1306 return false; 1307 } 1308 *pathBounds = GrIRect::MakeWH(rt->width(), rt->height()); 1309 const GrClip& clip = target->getClip(); 1310 if (clip.hasConservativeBounds()) { 1311 clip.getConservativeBounds().roundOut(clipBounds); 1312 if (!pathBounds->intersect(*clipBounds)) { 1313 return false; 1314 } 1315 } else { 1316 // pathBounds is currently the rt extent, set clip bounds to that rect. 1317 *clipBounds = *pathBounds; 1318 } 1319 GrRect pathSBounds = path.getBounds(); 1320 if (!pathSBounds.isEmpty()) { 1321 if (NULL != translate) { 1322 pathSBounds.offset(*translate); 1323 } 1324 target->getDrawState().getViewMatrix().mapRect(&pathSBounds, 1325 pathSBounds); 1326 GrIRect pathIBounds; 1327 pathSBounds.roundOut(&pathIBounds); 1328 if (!pathBounds->intersect(pathIBounds)) { 1329 return false; 1330 } 1331 } else { 1332 return false; 1333 } 1334 return true; 1335} 1336 1337/** 1338 * sw rasterizes path to A8 mask using the context's matrix and uploads to a 1339 * scratch texture. 1340 */ 1341 1342bool sw_draw_path_to_mask_texture(const GrPath& clientPath, 1343 const GrIRect& pathDevBounds, 1344 GrPathFill fill, 1345 GrContext* context, 1346 const GrPoint* translate, 1347 GrAutoScratchTexture* tex) { 1348 SkPaint paint; 1349 SkPath tmpPath; 1350 const SkPath* pathToDraw = &clientPath; 1351 if (kHairLine_PathFill == fill) { 1352 paint.setStyle(SkPaint::kStroke_Style); 1353 paint.setStrokeWidth(SK_Scalar1); 1354 } else { 1355 paint.setStyle(SkPaint::kFill_Style); 1356 SkPath::FillType skfill = gr_fill_to_sk_fill(fill); 1357 if (skfill != pathToDraw->getFillType()) { 1358 tmpPath = *pathToDraw; 1359 tmpPath.setFillType(skfill); 1360 pathToDraw = &tmpPath; 1361 } 1362 } 1363 paint.setAntiAlias(true); 1364 paint.setColor(SK_ColorWHITE); 1365 1366 GrMatrix matrix = context->getMatrix(); 1367 if (NULL != translate) { 1368 matrix.postTranslate(translate->fX, translate->fY); 1369 } 1370 1371 matrix.postTranslate(-pathDevBounds.fLeft * SK_Scalar1, 1372 -pathDevBounds.fTop * SK_Scalar1); 1373 GrIRect bounds = GrIRect::MakeWH(pathDevBounds.width(), 1374 pathDevBounds.height()); 1375 1376 SkBitmap bm; 1377 bm.setConfig(SkBitmap::kA8_Config, bounds.fRight, bounds.fBottom); 1378 if (!bm.allocPixels()) { 1379 return false; 1380 } 1381 sk_bzero(bm.getPixels(), bm.getSafeSize()); 1382 1383 SkDraw draw; 1384 sk_bzero(&draw, sizeof(draw)); 1385 SkRasterClip rc(bounds); 1386 draw.fRC = &rc; 1387 draw.fClip = &rc.bwRgn(); 1388 draw.fMatrix = &matrix; 1389 draw.fBitmap = &bm; 1390 draw.drawPath(*pathToDraw, paint); 1391 1392 const GrTextureDesc desc = { 1393 kNone_GrTextureFlags, 1394 bounds.fRight, 1395 bounds.fBottom, 1396 kAlpha_8_GrPixelConfig, 1397 {0} // samples 1398 }; 1399 1400 tex->set(context, desc); 1401 GrTexture* texture = tex->texture(); 1402 1403 if (NULL == texture) { 1404 return false; 1405 } 1406 SkAutoLockPixels alp(bm); 1407 texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, 1408 bm.getPixels(), bm.rowBytes()); 1409 return true; 1410} 1411 1412void draw_around_inv_path(GrDrawTarget* target, 1413 GrDrawState::StageMask stageMask, 1414 const GrIRect& clipBounds, 1415 const GrIRect& pathBounds) { 1416 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1417 GrRect rect; 1418 if (clipBounds.fTop < pathBounds.fTop) { 1419 rect.iset(clipBounds.fLeft, clipBounds.fTop, 1420 clipBounds.fRight, pathBounds.fTop); 1421 target->drawSimpleRect(rect, NULL, stageMask); 1422 } 1423 if (clipBounds.fLeft < pathBounds.fLeft) { 1424 rect.iset(clipBounds.fLeft, pathBounds.fTop, 1425 pathBounds.fLeft, pathBounds.fBottom); 1426 target->drawSimpleRect(rect, NULL, stageMask); 1427 } 1428 if (clipBounds.fRight > pathBounds.fRight) { 1429 rect.iset(pathBounds.fRight, pathBounds.fTop, 1430 clipBounds.fRight, pathBounds.fBottom); 1431 target->drawSimpleRect(rect, NULL, stageMask); 1432 } 1433 if (clipBounds.fBottom > pathBounds.fBottom) { 1434 rect.iset(clipBounds.fLeft, pathBounds.fBottom, 1435 clipBounds.fRight, clipBounds.fBottom); 1436 target->drawSimpleRect(rect, NULL, stageMask); 1437 } 1438} 1439 1440} 1441 1442void GrContext::drawPath(const GrPaint& paint, const GrPath& path, 1443 GrPathFill fill, const GrPoint* translate) { 1444 1445 if (path.isEmpty()) { 1446 if (GrIsFillInverted(fill)) { 1447 this->drawPaint(paint); 1448 } 1449 return; 1450 } 1451 1452 // Note that below we may sw-rasterize the path into a scratch texture. 1453 // Scratch textures can be recycled after they are returned to the texture 1454 // cache. This presents a potential hazard for buffered drawing. However, 1455 // the writePixels that uploads to the scratch will perform a flush so we're 1456 // OK. 1457 DrawCategory category = (DEFER_PATHS) ? kBuffered_DrawCategory : 1458 kUnbuffered_DrawCategory; 1459 GrDrawTarget* target = this->prepareToDraw(paint, category); 1460 GrDrawState::StageMask stageMask = paint.getActiveStageMask(); 1461 1462 bool prAA = paint.fAntiAlias && !this->getRenderTarget()->isMultisampled(); 1463 1464 // An Assumption here is that path renderer would use some form of tweaking 1465 // the src color (either the input alpha or in the frag shader) to implement 1466 // aa. If we have some future driver-mojo path AA that can do the right 1467 // thing WRT to the blend then we'll need some query on the PR. 1468 if (disable_coverage_aa_for_blend(target)) { 1469#if GR_DEBUG 1470 //GrPrintf("Turning off AA to correctly apply blend.\n"); 1471#endif 1472 prAA = false; 1473 } 1474 1475 GrPathRenderer* pr = NULL; 1476 if (prAA) { 1477 pr = this->getPathRenderer(path, fill, target, true); 1478 if (NULL == pr) { 1479 GrAutoScratchTexture ast; 1480 GrIRect pathBounds, clipBounds; 1481 if (!get_path_and_clip_bounds(target, path, translate, 1482 &pathBounds, &clipBounds)) { 1483 return; 1484 } 1485 if (NULL == pr && sw_draw_path_to_mask_texture(path, pathBounds, 1486 fill, this, 1487 translate, &ast)) { 1488 GrTexture* texture = ast.texture(); 1489 GrAssert(NULL != texture); 1490 GrDrawTarget::AutoDeviceCoordDraw adcd(target, stageMask); 1491 enum { 1492 kPathMaskStage = GrPaint::kTotalStages, 1493 }; 1494 target->drawState()->setTexture(kPathMaskStage, texture); 1495 target->drawState()->sampler(kPathMaskStage)->reset(); 1496 GrScalar w = GrIntToScalar(pathBounds.width()); 1497 GrScalar h = GrIntToScalar(pathBounds.height()); 1498 GrRect maskRect = GrRect::MakeWH(w / texture->width(), 1499 h / texture->height()); 1500 const GrRect* srcRects[GrDrawState::kNumStages] = {NULL}; 1501 srcRects[kPathMaskStage] = &maskRect; 1502 stageMask |= 1 << kPathMaskStage; 1503 GrRect dstRect = GrRect::MakeLTRB( 1504 SK_Scalar1* pathBounds.fLeft, 1505 SK_Scalar1* pathBounds.fTop, 1506 SK_Scalar1* pathBounds.fRight, 1507 SK_Scalar1* pathBounds.fBottom); 1508 target->drawRect(dstRect, NULL, stageMask, srcRects, NULL); 1509 target->drawState()->setTexture(kPathMaskStage, NULL); 1510 if (GrIsFillInverted(fill)) { 1511 draw_around_inv_path(target, stageMask, 1512 clipBounds, pathBounds); 1513 } 1514 return; 1515 } 1516 } 1517 } else { 1518 pr = this->getPathRenderer(path, fill, target, false); 1519 } 1520 1521 if (NULL == pr) { 1522#if GR_DEBUG 1523 GrPrintf("Unable to find path renderer compatible with path.\n"); 1524#endif 1525 return; 1526 } 1527 1528 pr->drawPath(path, fill, translate, target, stageMask, prAA); 1529} 1530 1531//////////////////////////////////////////////////////////////////////////////// 1532 1533void GrContext::flush(int flagsBitfield) { 1534 if (kDiscard_FlushBit & flagsBitfield) { 1535 fDrawBuffer->reset(); 1536 } else { 1537 this->flushDrawBuffer(); 1538 } 1539 if (kForceCurrentRenderTarget_FlushBit & flagsBitfield) { 1540 fGpu->forceRenderTargetFlush(); 1541 } 1542} 1543 1544void GrContext::flushText() { 1545 if (kText_DrawCategory == fLastDrawCategory) { 1546 flushDrawBuffer(); 1547 } 1548} 1549 1550void GrContext::flushDrawBuffer() { 1551#if BATCH_RECT_TO_RECT || DEFER_TEXT_RENDERING 1552 if (fDrawBuffer) { 1553 fDrawBuffer->flushTo(fGpu); 1554 } 1555#endif 1556} 1557 1558void GrContext::internalWriteTexturePixels(GrTexture* texture, 1559 int left, int top, 1560 int width, int height, 1561 GrPixelConfig config, 1562 const void* buffer, 1563 size_t rowBytes, 1564 uint32_t flags) { 1565 SK_TRACE_EVENT0("GrContext::writeTexturePixels"); 1566 ASSERT_OWNED_RESOURCE(texture); 1567 1568 if (!(kDontFlush_PixelOpsFlag & flags)) { 1569 this->flush(); 1570 } 1571 // TODO: use scratch texture to perform conversion 1572 if (GrPixelConfigIsUnpremultiplied(texture->config()) != 1573 GrPixelConfigIsUnpremultiplied(config)) { 1574 return; 1575 } 1576 1577 fGpu->writeTexturePixels(texture, left, top, width, height, 1578 config, buffer, rowBytes); 1579} 1580 1581bool GrContext::internalReadTexturePixels(GrTexture* texture, 1582 int left, int top, 1583 int width, int height, 1584 GrPixelConfig config, 1585 void* buffer, 1586 size_t rowBytes, 1587 uint32_t flags) { 1588 SK_TRACE_EVENT0("GrContext::readTexturePixels"); 1589 ASSERT_OWNED_RESOURCE(texture); 1590 1591 // TODO: code read pixels for textures that aren't also rendertargets 1592 GrRenderTarget* target = texture->asRenderTarget(); 1593 if (NULL != target) { 1594 return this->internalReadRenderTargetPixels(target, 1595 left, top, width, height, 1596 config, buffer, rowBytes, 1597 flags); 1598 } else { 1599 return false; 1600 } 1601} 1602 1603#include "SkConfig8888.h" 1604 1605namespace { 1606/** 1607 * Converts a GrPixelConfig to a SkCanvas::Config8888. Only byte-per-channel 1608 * formats are representable as Config8888 and so the function returns false 1609 * if the GrPixelConfig has no equivalent Config8888. 1610 */ 1611bool grconfig_to_config8888(GrPixelConfig config, 1612 SkCanvas::Config8888* config8888) { 1613 switch (config) { 1614 case kRGBA_8888_PM_GrPixelConfig: 1615 *config8888 = SkCanvas::kRGBA_Premul_Config8888; 1616 return true; 1617 case kRGBA_8888_UPM_GrPixelConfig: 1618 *config8888 = SkCanvas::kRGBA_Unpremul_Config8888; 1619 return true; 1620 case kBGRA_8888_PM_GrPixelConfig: 1621 *config8888 = SkCanvas::kBGRA_Premul_Config8888; 1622 return true; 1623 case kBGRA_8888_UPM_GrPixelConfig: 1624 *config8888 = SkCanvas::kBGRA_Unpremul_Config8888; 1625 return true; 1626 default: 1627 return false; 1628 } 1629} 1630} 1631 1632bool GrContext::internalReadRenderTargetPixels(GrRenderTarget* target, 1633 int left, int top, 1634 int width, int height, 1635 GrPixelConfig config, 1636 void* buffer, 1637 size_t rowBytes, 1638 uint32_t flags) { 1639 SK_TRACE_EVENT0("GrContext::readRenderTargetPixels"); 1640 ASSERT_OWNED_RESOURCE(target); 1641 1642 if (NULL == target) { 1643 target = fGpu->drawState()->getRenderTarget(); 1644 if (NULL == target) { 1645 return false; 1646 } 1647 } 1648 1649 if (!(kDontFlush_PixelOpsFlag & flags)) { 1650 this->flush(); 1651 } 1652 1653 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1654 GrPixelConfigIsUnpremultiplied(config) && 1655 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1656 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1657 if (!grconfig_to_config8888(target->config(), &srcConfig8888) || 1658 !grconfig_to_config8888(config, &dstConfig8888)) { 1659 return false; 1660 } 1661 // do read back using target's own config 1662 this->internalReadRenderTargetPixels(target, 1663 left, top, 1664 width, height, 1665 target->config(), 1666 buffer, rowBytes, 1667 kDontFlush_PixelOpsFlag); 1668 // sw convert the pixels to unpremul config 1669 uint32_t* pixels = reinterpret_cast<uint32_t*>(buffer); 1670 SkConvertConfig8888Pixels(pixels, rowBytes, dstConfig8888, 1671 pixels, rowBytes, srcConfig8888, 1672 width, height); 1673 return true; 1674 } 1675 1676 GrTexture* src = target->asTexture(); 1677 bool swapRAndB = NULL != src && 1678 fGpu->preferredReadPixelsConfig(config) == 1679 GrPixelConfigSwapRAndB(config); 1680 1681 bool flipY = NULL != src && 1682 fGpu->readPixelsWillPayForYFlip(target, left, top, 1683 width, height, config, 1684 rowBytes); 1685 bool alphaConversion = (!GrPixelConfigIsUnpremultiplied(target->config()) && 1686 GrPixelConfigIsUnpremultiplied(config)); 1687 1688 if (NULL == src && alphaConversion) { 1689 // we should fallback to cpu conversion here. This could happen when 1690 // we were given an external render target by the client that is not 1691 // also a texture (e.g. FBO 0 in GL) 1692 return false; 1693 } 1694 // we draw to a scratch texture if any of these conversion are applied 1695 GrAutoScratchTexture ast; 1696 if (flipY || swapRAndB || alphaConversion) { 1697 GrAssert(NULL != src); 1698 if (swapRAndB) { 1699 config = GrPixelConfigSwapRAndB(config); 1700 GrAssert(kUnknown_GrPixelConfig != config); 1701 } 1702 // Make the scratch a render target because we don't have a robust 1703 // readTexturePixels as of yet (it calls this function). 1704 const GrTextureDesc desc = { 1705 kRenderTarget_GrTextureFlagBit, 1706 width, height, 1707 config, 1708 {0}, // samples 1709 }; 1710 1711 // When a full readback is faster than a partial we could always make 1712 // the scratch exactly match the passed rect. However, if we see many 1713 // different size rectangles we will trash our texture cache and pay the 1714 // cost of creating and destroying many textures. So, we only request 1715 // an exact match when the caller is reading an entire RT. 1716 ScratchTexMatch match = kApprox_ScratchTexMatch; 1717 if (0 == left && 1718 0 == top && 1719 target->width() == width && 1720 target->height() == height && 1721 fGpu->fullReadPixelsIsFasterThanPartial()) { 1722 match = kExact_ScratchTexMatch; 1723 } 1724 ast.set(this, desc, match); 1725 GrTexture* texture = ast.texture(); 1726 if (!texture) { 1727 return false; 1728 } 1729 target = texture->asRenderTarget(); 1730 GrAssert(NULL != target); 1731 1732 GrDrawTarget::AutoStateRestore asr(fGpu); 1733 GrDrawState* drawState = fGpu->drawState(); 1734 drawState->reset(); 1735 drawState->setRenderTarget(target); 1736 1737 GrMatrix matrix; 1738 if (flipY) { 1739 matrix.setTranslate(SK_Scalar1 * left, 1740 SK_Scalar1 * (top + height)); 1741 matrix.set(GrMatrix::kMScaleY, -GR_Scalar1); 1742 } else { 1743 matrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1744 } 1745 matrix.postIDiv(src->width(), src->height()); 1746 drawState->sampler(0)->reset(matrix); 1747 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1748 drawState->setTexture(0, src); 1749 GrRect rect; 1750 rect.setXYWH(0, 0, SK_Scalar1 * width, SK_Scalar1 * height); 1751 fGpu->drawSimpleRect(rect, NULL, 0x1); 1752 left = 0; 1753 top = 0; 1754 } 1755 return fGpu->readPixels(target, 1756 left, top, width, height, 1757 config, buffer, rowBytes, flipY); 1758} 1759 1760void GrContext::resolveRenderTarget(GrRenderTarget* target) { 1761 GrAssert(target); 1762 ASSERT_OWNED_RESOURCE(target); 1763 // In the future we may track whether there are any pending draws to this 1764 // target. We don't today so we always perform a flush. We don't promise 1765 // this to our clients, though. 1766 this->flush(); 1767 fGpu->resolveRenderTarget(target); 1768} 1769 1770void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst) { 1771 if (NULL == src || NULL == dst) { 1772 return; 1773 } 1774 ASSERT_OWNED_RESOURCE(src); 1775 1776 GrDrawTarget::AutoStateRestore asr(fGpu); 1777 GrDrawState* drawState = fGpu->drawState(); 1778 drawState->reset(); 1779 drawState->setRenderTarget(dst); 1780 GrMatrix sampleM; 1781 sampleM.setIDiv(src->width(), src->height()); 1782 drawState->setTexture(0, src); 1783 drawState->sampler(0)->reset(sampleM); 1784 SkRect rect = SkRect::MakeXYWH(0, 0, 1785 SK_Scalar1 * src->width(), 1786 SK_Scalar1 * src->height()); 1787 fGpu->drawSimpleRect(rect, NULL, 1 << 0); 1788} 1789 1790void GrContext::internalWriteRenderTargetPixels(GrRenderTarget* target, 1791 int left, int top, 1792 int width, int height, 1793 GrPixelConfig config, 1794 const void* buffer, 1795 size_t rowBytes, 1796 uint32_t flags) { 1797 SK_TRACE_EVENT0("GrContext::writeRenderTargetPixels"); 1798 ASSERT_OWNED_RESOURCE(target); 1799 1800 if (NULL == target) { 1801 target = fGpu->drawState()->getRenderTarget(); 1802 if (NULL == target) { 1803 return; 1804 } 1805 } 1806 1807 // TODO: when underlying api has a direct way to do this we should use it 1808 // (e.g. glDrawPixels on desktop GL). 1809 1810 // If the RT is also a texture and we don't have to do PM/UPM conversion 1811 // then take the texture path, which we expect to be at least as fast or 1812 // faster since it doesn't use an intermediate texture as we do below. 1813 1814#if !GR_MAC_BUILD 1815 // At least some drivers on the Mac get confused when glTexImage2D is called 1816 // on a texture attached to an FBO. The FBO still sees the old image. TODO: 1817 // determine what OS versions and/or HW is affected. 1818 if (NULL != target->asTexture() && 1819 GrPixelConfigIsUnpremultiplied(target->config()) == 1820 GrPixelConfigIsUnpremultiplied(config)) { 1821 1822 this->internalWriteTexturePixels(target->asTexture(), 1823 left, top, width, height, 1824 config, buffer, rowBytes, flags); 1825 return; 1826 } 1827#endif 1828 if (!GrPixelConfigIsUnpremultiplied(target->config()) && 1829 GrPixelConfigIsUnpremultiplied(config) && 1830 !fGpu->canPreserveReadWriteUnpremulPixels()) { 1831 SkCanvas::Config8888 srcConfig8888, dstConfig8888; 1832 if (!grconfig_to_config8888(config, &srcConfig8888) || 1833 !grconfig_to_config8888(target->config(), &dstConfig8888)) { 1834 return; 1835 } 1836 // allocate a tmp buffer and sw convert the pixels to premul 1837 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(width * height); 1838 const uint32_t* src = reinterpret_cast<const uint32_t*>(buffer); 1839 SkConvertConfig8888Pixels(tmpPixels.get(), 4 * width, dstConfig8888, 1840 src, rowBytes, srcConfig8888, 1841 width, height); 1842 // upload the already premul pixels 1843 this->internalWriteRenderTargetPixels(target, 1844 left, top, 1845 width, height, 1846 target->config(), 1847 tmpPixels, 4 * width, flags); 1848 return; 1849 } 1850 1851 bool swapRAndB = fGpu->preferredReadPixelsConfig(config) == 1852 GrPixelConfigSwapRAndB(config); 1853 if (swapRAndB) { 1854 config = GrPixelConfigSwapRAndB(config); 1855 } 1856 1857 const GrTextureDesc desc = { 1858 kNone_GrTextureFlags, width, height, config, {0} 1859 }; 1860 GrAutoScratchTexture ast(this, desc); 1861 GrTexture* texture = ast.texture(); 1862 if (NULL == texture) { 1863 return; 1864 } 1865 this->internalWriteTexturePixels(texture, 0, 0, width, height, 1866 config, buffer, rowBytes, flags); 1867 1868 GrDrawTarget::AutoStateRestore asr(fGpu); 1869 GrDrawState* drawState = fGpu->drawState(); 1870 drawState->reset(); 1871 1872 GrMatrix matrix; 1873 matrix.setTranslate(GrIntToScalar(left), GrIntToScalar(top)); 1874 drawState->setViewMatrix(matrix); 1875 drawState->setRenderTarget(target); 1876 drawState->setTexture(0, texture); 1877 1878 matrix.setIDiv(texture->width(), texture->height()); 1879 drawState->sampler(0)->reset(GrSamplerState::kClamp_WrapMode, 1880 GrSamplerState::kNearest_Filter, 1881 matrix); 1882 drawState->sampler(0)->setRAndBSwap(swapRAndB); 1883 1884 GrVertexLayout layout = GrDrawTarget::StagePosAsTexCoordVertexLayoutBit(0); 1885 static const int VCOUNT = 4; 1886 // TODO: Use GrGpu::drawRect here 1887 GrDrawTarget::AutoReleaseGeometry geo(fGpu, layout, VCOUNT, 0); 1888 if (!geo.succeeded()) { 1889 GrPrintf("Failed to get space for vertices!\n"); 1890 return; 1891 } 1892 ((GrPoint*)geo.vertices())->setIRectFan(0, 0, width, height); 1893 fGpu->drawNonIndexed(kTriangleFan_PrimitiveType, 0, VCOUNT); 1894} 1895//////////////////////////////////////////////////////////////////////////////// 1896 1897void GrContext::setPaint(const GrPaint& paint, GrDrawTarget* target) { 1898 GrDrawState* drawState = target->drawState(); 1899 1900 for (int i = 0; i < GrPaint::kMaxTextures; ++i) { 1901 int s = i + GrPaint::kFirstTextureStage; 1902 drawState->setTexture(s, paint.getTexture(i)); 1903 ASSERT_OWNED_RESOURCE(paint.getTexture(i)); 1904 if (paint.getTexture(i)) { 1905 *drawState->sampler(s) = paint.getTextureSampler(i); 1906 } 1907 } 1908 1909 drawState->setFirstCoverageStage(GrPaint::kFirstMaskStage); 1910 1911 for (int i = 0; i < GrPaint::kMaxMasks; ++i) { 1912 int s = i + GrPaint::kFirstMaskStage; 1913 drawState->setTexture(s, paint.getMask(i)); 1914 ASSERT_OWNED_RESOURCE(paint.getMask(i)); 1915 if (paint.getMask(i)) { 1916 *drawState->sampler(s) = paint.getMaskSampler(i); 1917 } 1918 } 1919 1920 drawState->setColor(paint.fColor); 1921 1922 if (paint.fDither) { 1923 drawState->enableState(GrDrawState::kDither_StateBit); 1924 } else { 1925 drawState->disableState(GrDrawState::kDither_StateBit); 1926 } 1927 if (paint.fAntiAlias) { 1928 drawState->enableState(GrDrawState::kHWAntialias_StateBit); 1929 } else { 1930 drawState->disableState(GrDrawState::kHWAntialias_StateBit); 1931 } 1932 if (paint.fColorMatrixEnabled) { 1933 drawState->enableState(GrDrawState::kColorMatrix_StateBit); 1934 drawState->setColorMatrix(paint.fColorMatrix); 1935 } else { 1936 drawState->disableState(GrDrawState::kColorMatrix_StateBit); 1937 } 1938 drawState->setBlendFunc(paint.fSrcBlendCoeff, paint.fDstBlendCoeff); 1939 drawState->setColorFilter(paint.fColorFilterColor, paint.fColorFilterXfermode); 1940 drawState->setCoverage(paint.fCoverage); 1941 1942 if (paint.getActiveMaskStageMask() && !target->canApplyCoverage()) { 1943 GrPrintf("Partial pixel coverage will be incorrectly blended.\n"); 1944 } 1945} 1946 1947GrDrawTarget* GrContext::prepareToDraw(const GrPaint& paint, 1948 DrawCategory category) { 1949 if (category != fLastDrawCategory) { 1950 this->flushDrawBuffer(); 1951 fLastDrawCategory = category; 1952 } 1953 this->setPaint(paint, fGpu); 1954 GrDrawTarget* target = fGpu; 1955 switch (category) { 1956 case kText_DrawCategory: 1957#if DEFER_TEXT_RENDERING 1958 target = fDrawBuffer; 1959 fDrawBuffer->initializeDrawStateAndClip(*fGpu); 1960#else 1961 target = fGpu; 1962#endif 1963 break; 1964 case kUnbuffered_DrawCategory: 1965 target = fGpu; 1966 break; 1967 case kBuffered_DrawCategory: 1968 target = fDrawBuffer; 1969 fDrawBuffer->initializeDrawStateAndClip(*fGpu); 1970 break; 1971 } 1972 return target; 1973} 1974 1975GrPathRenderer* GrContext::getPathRenderer(const GrPath& path, 1976 GrPathFill fill, 1977 const GrDrawTarget* target, 1978 bool antiAlias) { 1979 if (NULL == fPathRendererChain) { 1980 fPathRendererChain = 1981 new GrPathRendererChain(this, GrPathRendererChain::kNone_UsageFlag); 1982 } 1983 return fPathRendererChain->getPathRenderer(path, fill, target, antiAlias); 1984} 1985 1986//////////////////////////////////////////////////////////////////////////////// 1987 1988void GrContext::setRenderTarget(GrRenderTarget* target) { 1989 ASSERT_OWNED_RESOURCE(target); 1990 if (fGpu->drawState()->getRenderTarget() != target) { 1991 this->flush(false); 1992 fGpu->drawState()->setRenderTarget(target); 1993 } 1994} 1995 1996GrRenderTarget* GrContext::getRenderTarget() { 1997 return fGpu->drawState()->getRenderTarget(); 1998} 1999 2000const GrRenderTarget* GrContext::getRenderTarget() const { 2001 return fGpu->getDrawState().getRenderTarget(); 2002} 2003 2004const GrMatrix& GrContext::getMatrix() const { 2005 return fGpu->getDrawState().getViewMatrix(); 2006} 2007 2008void GrContext::setMatrix(const GrMatrix& m) { 2009 fGpu->drawState()->setViewMatrix(m); 2010} 2011 2012void GrContext::concatMatrix(const GrMatrix& m) const { 2013 fGpu->drawState()->preConcatViewMatrix(m); 2014} 2015 2016static inline intptr_t setOrClear(intptr_t bits, int shift, intptr_t pred) { 2017 intptr_t mask = 1 << shift; 2018 if (pred) { 2019 bits |= mask; 2020 } else { 2021 bits &= ~mask; 2022 } 2023 return bits; 2024} 2025 2026void GrContext::resetStats() { 2027 fGpu->resetStats(); 2028} 2029 2030const GrGpuStats& GrContext::getStats() const { 2031 return fGpu->getStats(); 2032} 2033 2034void GrContext::printStats() const { 2035 fGpu->printStats(); 2036} 2037 2038GrContext::GrContext(GrGpu* gpu) { 2039 fGpu = gpu; 2040 fGpu->ref(); 2041 fGpu->setContext(this); 2042 2043 fPathRendererChain = NULL; 2044 2045 fTextureCache = new GrResourceCache(MAX_TEXTURE_CACHE_COUNT, 2046 MAX_TEXTURE_CACHE_BYTES); 2047 fFontCache = new GrFontCache(fGpu); 2048 2049 fLastDrawCategory = kUnbuffered_DrawCategory; 2050 2051 fDrawBuffer = NULL; 2052 fDrawBufferVBAllocPool = NULL; 2053 fDrawBufferIBAllocPool = NULL; 2054 2055 fAAFillRectIndexBuffer = NULL; 2056 fAAStrokeRectIndexBuffer = NULL; 2057 2058 this->setupDrawBuffer(); 2059} 2060 2061void GrContext::setupDrawBuffer() { 2062 2063 GrAssert(NULL == fDrawBuffer); 2064 GrAssert(NULL == fDrawBufferVBAllocPool); 2065 GrAssert(NULL == fDrawBufferIBAllocPool); 2066 2067#if DEFER_TEXT_RENDERING || BATCH_RECT_TO_RECT 2068 fDrawBufferVBAllocPool = 2069 new GrVertexBufferAllocPool(fGpu, false, 2070 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 2071 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS); 2072 fDrawBufferIBAllocPool = 2073 new GrIndexBufferAllocPool(fGpu, false, 2074 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 2075 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS); 2076 2077 fDrawBuffer = new GrInOrderDrawBuffer(fGpu, 2078 fDrawBufferVBAllocPool, 2079 fDrawBufferIBAllocPool); 2080#endif 2081 2082#if BATCH_RECT_TO_RECT 2083 fDrawBuffer->setQuadIndexBuffer(this->getQuadIndexBuffer()); 2084#endif 2085 fDrawBuffer->setAutoFlushTarget(fGpu); 2086} 2087 2088GrDrawTarget* GrContext::getTextTarget(const GrPaint& paint) { 2089 GrDrawTarget* target; 2090#if DEFER_TEXT_RENDERING 2091 target = prepareToDraw(paint, kText_DrawCategory); 2092#else 2093 target = prepareToDraw(paint, kUnbuffered_DrawCategory); 2094#endif 2095 this->setPaint(paint, target); 2096 return target; 2097} 2098 2099const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 2100 return fGpu->getQuadIndexBuffer(); 2101} 2102 2103GrTexture* GrContext::gaussianBlur(GrTexture* srcTexture, 2104 GrAutoScratchTexture* temp1, 2105 GrAutoScratchTexture* temp2, 2106 const SkRect& rect, 2107 float sigmaX, float sigmaY) { 2108 ASSERT_OWNED_RESOURCE(srcTexture); 2109 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 2110 GrClip oldClip = this->getClip(); 2111 GrTexture* origTexture = srcTexture; 2112 GrAutoMatrix avm(this, GrMatrix::I()); 2113 SkIRect clearRect; 2114 int scaleFactorX, halfWidthX, kernelWidthX; 2115 int scaleFactorY, halfWidthY, kernelWidthY; 2116 sigmaX = adjust_sigma(sigmaX, &scaleFactorX, &halfWidthX, &kernelWidthX); 2117 sigmaY = adjust_sigma(sigmaY, &scaleFactorY, &halfWidthY, &kernelWidthY); 2118 2119 SkRect srcRect(rect); 2120 scale_rect(&srcRect, 1.0f / scaleFactorX, 1.0f / scaleFactorY); 2121 srcRect.roundOut(); 2122 scale_rect(&srcRect, scaleFactorX, scaleFactorY); 2123 this->setClip(srcRect); 2124 2125 const GrTextureDesc desc = { 2126 kRenderTarget_GrTextureFlagBit | kNoStencil_GrTextureFlagBit, 2127 srcRect.width(), 2128 srcRect.height(), 2129 kRGBA_8888_GrPixelConfig, 2130 {0} // samples 2131 }; 2132 2133 temp1->set(this, desc); 2134 if (temp2) temp2->set(this, desc); 2135 2136 GrTexture* dstTexture = temp1->texture(); 2137 GrPaint paint; 2138 paint.reset(); 2139 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 2140 2141 for (int i = 1; i < scaleFactorX || i < scaleFactorY; i *= 2) { 2142 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 2143 srcTexture->height()); 2144 this->setRenderTarget(dstTexture->asRenderTarget()); 2145 SkRect dstRect(srcRect); 2146 scale_rect(&dstRect, i < scaleFactorX ? 0.5f : 1.0f, 2147 i < scaleFactorY ? 0.5f : 1.0f); 2148 paint.setTexture(0, srcTexture); 2149 this->drawRectToRect(paint, dstRect, srcRect); 2150 srcRect = dstRect; 2151 SkTSwap(srcTexture, dstTexture); 2152 // If temp2 is non-NULL, don't render back to origTexture 2153 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2154 } 2155 2156 if (sigmaX > 0.0f) { 2157 SkAutoTMalloc<float> kernelStorageX(kernelWidthX); 2158 float* kernelX = kernelStorageX.get(); 2159 build_kernel(sigmaX, kernelX, kernelWidthX); 2160 2161 if (scaleFactorX > 1) { 2162 // Clear out a halfWidth to the right of the srcRect to prevent the 2163 // X convolution from reading garbage. 2164 clearRect = SkIRect::MakeXYWH( 2165 srcRect.fRight, srcRect.fTop, halfWidthX, srcRect.height()); 2166 this->clear(&clearRect, 0x0); 2167 } 2168 2169 this->setRenderTarget(dstTexture->asRenderTarget()); 2170 convolve(fGpu, srcTexture, srcRect, kernelX, kernelWidthX, 2171 GrSamplerState::kX_FilterDirection); 2172 SkTSwap(srcTexture, dstTexture); 2173 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2174 } 2175 2176 if (sigmaY > 0.0f) { 2177 SkAutoTMalloc<float> kernelStorageY(kernelWidthY); 2178 float* kernelY = kernelStorageY.get(); 2179 build_kernel(sigmaY, kernelY, kernelWidthY); 2180 2181 if (scaleFactorY > 1 || sigmaX > 0.0f) { 2182 // Clear out a halfWidth below the srcRect to prevent the Y 2183 // convolution from reading garbage. 2184 clearRect = SkIRect::MakeXYWH( 2185 srcRect.fLeft, srcRect.fBottom, srcRect.width(), halfWidthY); 2186 this->clear(&clearRect, 0x0); 2187 } 2188 2189 this->setRenderTarget(dstTexture->asRenderTarget()); 2190 convolve(fGpu, srcTexture, srcRect, kernelY, kernelWidthY, 2191 GrSamplerState::kY_FilterDirection); 2192 SkTSwap(srcTexture, dstTexture); 2193 if (temp2 && dstTexture == origTexture) dstTexture = temp2->texture(); 2194 } 2195 2196 if (scaleFactorX > 1 || scaleFactorY > 1) { 2197 // Clear one pixel to the right and below, to accommodate bilinear 2198 // upsampling. 2199 clearRect = SkIRect::MakeXYWH( 2200 srcRect.fLeft, srcRect.fBottom, srcRect.width() + 1, 1); 2201 this->clear(&clearRect, 0x0); 2202 clearRect = SkIRect::MakeXYWH( 2203 srcRect.fRight, srcRect.fTop, 1, srcRect.height()); 2204 this->clear(&clearRect, 0x0); 2205 // FIXME: This should be mitchell, not bilinear. 2206 paint.textureSampler(0)->setFilter(GrSamplerState::kBilinear_Filter); 2207 paint.textureSampler(0)->matrix()->setIDiv(srcTexture->width(), 2208 srcTexture->height()); 2209 this->setRenderTarget(dstTexture->asRenderTarget()); 2210 paint.setTexture(0, srcTexture); 2211 SkRect dstRect(srcRect); 2212 scale_rect(&dstRect, scaleFactorX, scaleFactorY); 2213 this->drawRectToRect(paint, dstRect, srcRect); 2214 srcRect = dstRect; 2215 SkTSwap(srcTexture, dstTexture); 2216 } 2217 this->setRenderTarget(oldRenderTarget); 2218 this->setClip(oldClip); 2219 return srcTexture; 2220} 2221 2222GrTexture* GrContext::applyMorphology(GrTexture* srcTexture, 2223 const GrRect& rect, 2224 GrTexture* temp1, GrTexture* temp2, 2225 GrSamplerState::Filter filter, 2226 SkISize radius) { 2227 ASSERT_OWNED_RESOURCE(srcTexture); 2228 GrRenderTarget* oldRenderTarget = this->getRenderTarget(); 2229 GrAutoMatrix avm(this, GrMatrix::I()); 2230 GrClip oldClip = this->getClip(); 2231 this->setClip(GrRect::MakeWH(srcTexture->width(), srcTexture->height())); 2232 if (radius.fWidth > 0) { 2233 this->setRenderTarget(temp1->asRenderTarget()); 2234 apply_morphology(fGpu, srcTexture, rect, radius.fWidth, filter, 2235 GrSamplerState::kX_FilterDirection); 2236 SkIRect clearRect = SkIRect::MakeXYWH(rect.fLeft, rect.fBottom, 2237 rect.width(), radius.fHeight); 2238 this->clear(&clearRect, 0x0); 2239 srcTexture = temp1; 2240 } 2241 if (radius.fHeight > 0) { 2242 this->setRenderTarget(temp2->asRenderTarget()); 2243 apply_morphology(fGpu, srcTexture, rect, radius.fHeight, filter, 2244 GrSamplerState::kY_FilterDirection); 2245 srcTexture = temp2; 2246 } 2247 this->setRenderTarget(oldRenderTarget); 2248 this->setClip(oldClip); 2249 return srcTexture; 2250} 2251 2252/////////////////////////////////////////////////////////////////////////////// 2253