GrContext.cpp revision f103cd85392e30d2a534a3a62a034e79abff2d4f
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9#include "GrContext.h" 10 11#include "GrAARectRenderer.h" 12#include "GrAtlasTextContext.h" 13#include "GrBatch.h" 14#include "GrBatchFontCache.h" 15#include "GrBatchTarget.h" 16#include "GrBufferAllocPool.h" 17#include "GrDefaultGeoProcFactory.h" 18#include "GrFontCache.h" 19#include "GrGpuResource.h" 20#include "GrGpuResourcePriv.h" 21#include "GrDistanceFieldTextContext.h" 22#include "GrDrawTargetCaps.h" 23#include "GrGpu.h" 24#include "GrIndexBuffer.h" 25#include "GrInOrderDrawBuffer.h" 26#include "GrLayerCache.h" 27#include "GrOvalRenderer.h" 28#include "GrPathRenderer.h" 29#include "GrPathUtils.h" 30#include "GrRenderTargetPriv.h" 31#include "GrResourceCache.h" 32#include "GrSoftwarePathRenderer.h" 33#include "GrStencilAndCoverTextContext.h" 34#include "GrStrokeInfo.h" 35#include "GrSurfacePriv.h" 36#include "GrTextBlobCache.h" 37#include "GrTexturePriv.h" 38#include "GrTraceMarker.h" 39#include "GrTracing.h" 40#include "SkDashPathPriv.h" 41#include "SkConfig8888.h" 42#include "SkGr.h" 43#include "SkRRect.h" 44#include "SkStrokeRec.h" 45#include "SkTLazy.h" 46#include "SkTLS.h" 47#include "SkTraceEvent.h" 48 49#include "effects/GrConfigConversionEffect.h" 50#include "effects/GrDashingEffect.h" 51#include "effects/GrSingleTextureEffect.h" 52 53static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 54static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 55 56static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; 57static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; 58 59#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 60#define RETURN_IF_ABANDONED if (!fDrawBuffer) { return; } 61#define RETURN_FALSE_IF_ABANDONED if (!fDrawBuffer) { return false; } 62#define RETURN_NULL_IF_ABANDONED if (!fDrawBuffer) { return NULL; } 63 64class GrContext::AutoCheckFlush { 65public: 66 AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); } 67 68 ~AutoCheckFlush() { 69 if (fContext->fFlushToReduceCacheSize) { 70 fContext->flush(); 71 } 72 } 73 74private: 75 GrContext* fContext; 76}; 77 78GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, 79 const Options* opts) { 80 GrContext* context; 81 if (NULL == opts) { 82 context = SkNEW_ARGS(GrContext, (Options())); 83 } else { 84 context = SkNEW_ARGS(GrContext, (*opts)); 85 } 86 87 if (context->init(backend, backendContext)) { 88 return context; 89 } else { 90 context->unref(); 91 return NULL; 92 } 93} 94 95GrContext::GrContext(const Options& opts) : fOptions(opts) { 96 fGpu = NULL; 97 fPathRendererChain = NULL; 98 fSoftwarePathRenderer = NULL; 99 fResourceCache = NULL; 100 fBatchFontCache = NULL; 101 fFontCache = NULL; 102 fDrawBuffer = NULL; 103 fDrawBufferVBAllocPool = NULL; 104 fDrawBufferIBAllocPool = NULL; 105 fFlushToReduceCacheSize = false; 106 fAARectRenderer = NULL; 107 fOvalRenderer = NULL; 108 fMaxTextureSizeOverride = 1 << 20; 109} 110 111bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { 112 SkASSERT(NULL == fGpu); 113 114 fGpu = GrGpu::Create(backend, backendContext, this); 115 if (NULL == fGpu) { 116 return false; 117 } 118 this->initCommon(); 119 return true; 120} 121 122void GrContext::initCommon() { 123 fResourceCache = SkNEW(GrResourceCache); 124 fResourceCache->setOverBudgetCallback(OverBudgetCB, this); 125 126 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu)); 127 128 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this))); 129 130 fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu)); 131 fOvalRenderer = SkNEW_ARGS(GrOvalRenderer, (fGpu)); 132 133 fDidTestPMConversions = false; 134 135 this->setupDrawBuffer(); 136 137 // GrBatchFontCache will eventually replace GrFontCache 138 fBatchFontCache = SkNEW_ARGS(GrBatchFontCache, (this)); 139 140 fTextBlobCache.reset(SkNEW_ARGS(GrTextBlobCache, (TextBlobCacheOverBudgetCB, this))); 141} 142 143GrContext::~GrContext() { 144 if (NULL == fGpu) { 145 return; 146 } 147 148 this->flush(); 149 150 for (int i = 0; i < fCleanUpData.count(); ++i) { 151 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 152 } 153 154 SkDELETE(fResourceCache); 155 SkDELETE(fBatchFontCache); 156 SkDELETE(fFontCache); 157 SkDELETE(fDrawBuffer); 158 SkDELETE(fDrawBufferVBAllocPool); 159 SkDELETE(fDrawBufferIBAllocPool); 160 161 fAARectRenderer->unref(); 162 fOvalRenderer->unref(); 163 164 fGpu->unref(); 165 SkSafeUnref(fPathRendererChain); 166 SkSafeUnref(fSoftwarePathRenderer); 167} 168 169void GrContext::abandonContext() { 170 // abandon first to so destructors 171 // don't try to free the resources in the API. 172 fResourceCache->abandonAll(); 173 174 fGpu->contextAbandoned(); 175 176 // a path renderer may be holding onto resources that 177 // are now unusable 178 SkSafeSetNull(fPathRendererChain); 179 SkSafeSetNull(fSoftwarePathRenderer); 180 181 delete fDrawBuffer; 182 fDrawBuffer = NULL; 183 184 delete fDrawBufferVBAllocPool; 185 fDrawBufferVBAllocPool = NULL; 186 187 delete fDrawBufferIBAllocPool; 188 fDrawBufferIBAllocPool = NULL; 189 190 fAARectRenderer->reset(); 191 fOvalRenderer->reset(); 192 193 fBatchFontCache->freeAll(); 194 fFontCache->freeAll(); 195 fLayerCache->freeAll(); 196} 197 198void GrContext::resetContext(uint32_t state) { 199 fGpu->markContextDirty(state); 200} 201 202void GrContext::freeGpuResources() { 203 this->flush(); 204 205 if (fDrawBuffer) { 206 fDrawBuffer->purgeResources(); 207 } 208 209 fAARectRenderer->reset(); 210 fOvalRenderer->reset(); 211 212 fBatchFontCache->freeAll(); 213 fFontCache->freeAll(); 214 fLayerCache->freeAll(); 215 // a path renderer may be holding onto resources 216 SkSafeSetNull(fPathRendererChain); 217 SkSafeSetNull(fSoftwarePathRenderer); 218 219 fResourceCache->purgeAllUnlocked(); 220} 221 222void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const { 223 if (resourceCount) { 224 *resourceCount = fResourceCache->getBudgetedResourceCount(); 225 } 226 if (resourceBytes) { 227 *resourceBytes = fResourceCache->getBudgetedResourceBytes(); 228 } 229} 230 231GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget, 232 SkGpuDevice* gpuDevice, 233 const SkDeviceProperties& 234 leakyProperties, 235 bool enableDistanceFieldFonts) { 236 if (fGpu->caps()->pathRenderingSupport() && renderTarget->isMultisampled()) { 237 GrStencilBuffer* sb = renderTarget->renderTargetPriv().attachStencilBuffer(); 238 if (sb) { 239 return GrStencilAndCoverTextContext::Create(this, gpuDevice, leakyProperties); 240 } 241 } 242 243#ifdef USE_BITMAP_TEXTBLOBS 244 return GrAtlasTextContext::Create(this, gpuDevice, leakyProperties); 245#else 246 return GrDistanceFieldTextContext::Create(this, gpuDevice, leakyProperties, 247 enableDistanceFieldFonts); 248#endif 249} 250 251//////////////////////////////////////////////////////////////////////////////// 252enum ScratchTextureFlags { 253 kExact_ScratchTextureFlag = 0x1, 254 kNoPendingIO_ScratchTextureFlag = 0x2, 255 kNoCreate_ScratchTextureFlag = 0x4, 256}; 257 258bool GrContext::isConfigTexturable(GrPixelConfig config) const { 259 return fGpu->caps()->isConfigTexturable(config); 260} 261 262bool GrContext::npotTextureTileSupport() const { 263 return fGpu->caps()->npotTextureTileSupport(); 264} 265 266GrTexture* GrContext::createTexture(const GrSurfaceDesc& desc, bool budgeted, const void* srcData, 267 size_t rowBytes) { 268 RETURN_NULL_IF_ABANDONED 269 if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) && 270 !this->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) { 271 return NULL; 272 } 273 if (!GrPixelConfigIsCompressed(desc.fConfig)) { 274 static const uint32_t kFlags = kExact_ScratchTextureFlag | 275 kNoCreate_ScratchTextureFlag; 276 if (GrTexture* texture = this->internalRefScratchTexture(desc, kFlags)) { 277 if (!srcData || texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig, 278 srcData, rowBytes)) { 279 if (!budgeted) { 280 texture->resourcePriv().makeUnbudgeted(); 281 } 282 return texture; 283 } 284 texture->unref(); 285 } 286 } 287 return fGpu->createTexture(desc, budgeted, srcData, rowBytes); 288} 289 290GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& desc, ScratchTexMatch match, 291 bool calledDuringFlush) { 292 RETURN_NULL_IF_ABANDONED 293 // Currently we don't recycle compressed textures as scratch. 294 if (GrPixelConfigIsCompressed(desc.fConfig)) { 295 return NULL; 296 } else { 297 uint32_t flags = 0; 298 if (kExact_ScratchTexMatch == match) { 299 flags |= kExact_ScratchTextureFlag; 300 } 301 if (calledDuringFlush) { 302 flags |= kNoPendingIO_ScratchTextureFlag; 303 } 304 return this->internalRefScratchTexture(desc, flags); 305 } 306} 307 308GrTexture* GrContext::internalRefScratchTexture(const GrSurfaceDesc& inDesc, uint32_t flags) { 309 SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig)); 310 311 SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc); 312 313 if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) { 314 if (!(kExact_ScratchTextureFlag & flags)) { 315 // bin by pow2 with a reasonable min 316 static const int MIN_SIZE = 16; 317 GrSurfaceDesc* wdesc = desc.writable(); 318 wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth)); 319 wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight)); 320 } 321 322 GrScratchKey key; 323 GrTexturePriv::ComputeScratchKey(*desc, &key); 324 uint32_t scratchFlags = 0; 325 if (kNoPendingIO_ScratchTextureFlag & flags) { 326 scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag; 327 } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) { 328 // If it is not a render target then it will most likely be populated by 329 // writePixels() which will trigger a flush if the texture has pending IO. 330 scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag; 331 } 332 GrGpuResource* resource = fResourceCache->findAndRefScratchResource(key, scratchFlags); 333 if (resource) { 334 GrSurface* surface = static_cast<GrSurface*>(resource); 335 GrRenderTarget* rt = surface->asRenderTarget(); 336 if (rt && fGpu->caps()->discardRenderTargetSupport()) { 337 rt->discard(); 338 } 339 return surface->asTexture(); 340 } 341 } 342 343 if (!(kNoCreate_ScratchTextureFlag & flags)) { 344 return fGpu->createTexture(*desc, true, NULL, 0); 345 } 346 347 return NULL; 348} 349 350void GrContext::OverBudgetCB(void* data) { 351 SkASSERT(data); 352 353 GrContext* context = reinterpret_cast<GrContext*>(data); 354 355 // Flush the InOrderDrawBuffer to possibly free up some textures 356 context->fFlushToReduceCacheSize = true; 357} 358 359void GrContext::TextBlobCacheOverBudgetCB(void* data) { 360 SkASSERT(data); 361 362 // Unlike the GrResourceCache, TextBlobs are drawn at the SkGpuDevice level, therefore they 363 // cannot use fFlushTorReduceCacheSize because it uses AutoCheckFlush. The solution is to move 364 // drawText calls to below the GrContext level, but this is not trivial because they call 365 // drawPath on SkGpuDevice 366 GrContext* context = reinterpret_cast<GrContext*>(data); 367 context->flush(); 368} 369 370int GrContext::getMaxTextureSize() const { 371 return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride); 372} 373 374int GrContext::getMaxRenderTargetSize() const { 375 return fGpu->caps()->maxRenderTargetSize(); 376} 377 378int GrContext::getMaxSampleCount() const { 379 return fGpu->caps()->maxSampleCount(); 380} 381 382/////////////////////////////////////////////////////////////////////////////// 383 384GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) { 385 RETURN_NULL_IF_ABANDONED 386 return fGpu->wrapBackendTexture(desc); 387} 388 389GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) { 390 RETURN_NULL_IF_ABANDONED 391 return fGpu->wrapBackendRenderTarget(desc); 392} 393 394//////////////////////////////////////////////////////////////////////////////// 395 396void GrContext::clear(const SkIRect* rect, 397 const GrColor color, 398 bool canIgnoreRect, 399 GrRenderTarget* renderTarget) { 400 RETURN_IF_ABANDONED 401 ASSERT_OWNED_RESOURCE(renderTarget); 402 SkASSERT(renderTarget); 403 404 AutoCheckFlush acf(this); 405 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this); 406 GrDrawTarget* target = this->prepareToDraw(); 407 if (NULL == target) { 408 return; 409 } 410 target->clear(rect, color, canIgnoreRect, renderTarget); 411} 412 413void GrContext::drawPaint(GrRenderTarget* rt, 414 const GrClip& clip, 415 const GrPaint& origPaint, 416 const SkMatrix& viewMatrix) { 417 RETURN_IF_ABANDONED 418 // set rect to be big enough to fill the space, but not super-huge, so we 419 // don't overflow fixed-point implementations 420 SkRect r; 421 r.setLTRB(0, 0, 422 SkIntToScalar(rt->width()), 423 SkIntToScalar(rt->height())); 424 SkTCopyOnFirstWrite<GrPaint> paint(origPaint); 425 426 // by definition this fills the entire clip, no need for AA 427 if (paint->isAntiAlias()) { 428 paint.writable()->setAntiAlias(false); 429 } 430 431 bool isPerspective = viewMatrix.hasPerspective(); 432 433 // We attempt to map r by the inverse matrix and draw that. mapRect will 434 // map the four corners and bound them with a new rect. This will not 435 // produce a correct result for some perspective matrices. 436 if (!isPerspective) { 437 SkMatrix inverse; 438 if (!viewMatrix.invert(&inverse)) { 439 SkDebugf("Could not invert matrix\n"); 440 return; 441 } 442 inverse.mapRect(&r); 443 this->drawRect(rt, clip, *paint, viewMatrix, r); 444 } else { 445 SkMatrix localMatrix; 446 if (!viewMatrix.invert(&localMatrix)) { 447 SkDebugf("Could not invert matrix\n"); 448 return; 449 } 450 451 AutoCheckFlush acf(this); 452 GrPipelineBuilder pipelineBuilder; 453 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, paint, &acf); 454 if (NULL == target) { 455 return; 456 } 457 458 GR_CREATE_TRACE_MARKER("GrContext::drawPaintWithPerspective", target); 459 target->drawRect(&pipelineBuilder, 460 paint->getColor(), 461 SkMatrix::I(), 462 r, 463 NULL, 464 &localMatrix); 465 } 466} 467 468#ifdef SK_DEVELOPER 469void GrContext::dumpFontCache() const { 470 fFontCache->dump(); 471} 472#endif 473 474//////////////////////////////////////////////////////////////////////////////// 475 476static inline bool is_irect(const SkRect& r) { 477 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) && 478 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom); 479} 480 481static bool apply_aa_to_rect(GrDrawTarget* target, 482 GrPipelineBuilder* pipelineBuilder, 483 SkRect* devBoundRect, 484 const SkRect& rect, 485 SkScalar strokeWidth, 486 const SkMatrix& combinedMatrix, 487 GrColor color) { 488 if (pipelineBuilder->getRenderTarget()->isMultisampled()) { 489 return false; 490 } 491 492#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 493 if (strokeWidth >= 0) { 494#endif 495 if (!combinedMatrix.preservesAxisAlignment()) { 496 return false; 497 } 498 499#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 500 } else { 501 if (!combinedMatrix.preservesRightAngles()) { 502 return false; 503 } 504 } 505#endif 506 507 combinedMatrix.mapRect(devBoundRect, rect); 508 if (!combinedMatrix.rectStaysRect()) { 509 return true; 510 } 511 512 if (strokeWidth < 0) { 513 return !is_irect(*devBoundRect); 514 } 515 516 return true; 517} 518 519static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) { 520 return point.fX >= rect.fLeft && point.fX <= rect.fRight && 521 point.fY >= rect.fTop && point.fY <= rect.fBottom; 522} 523 524class StrokeRectBatch : public GrBatch { 525public: 526 struct Geometry { 527 GrColor fColor; 528 SkMatrix fViewMatrix; 529 SkRect fRect; 530 SkScalar fStrokeWidth; 531 }; 532 533 static GrBatch* Create(const Geometry& geometry) { 534 return SkNEW_ARGS(StrokeRectBatch, (geometry)); 535 } 536 537 const char* name() const override { return "StrokeRectBatch"; } 538 539 void getInvariantOutputColor(GrInitInvariantOutput* out) const override { 540 // When this is called on a batch, there is only one geometry bundle 541 out->setKnownFourComponents(fGeoData[0].fColor); 542 } 543 544 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override { 545 out->setKnownSingleComponent(0xff); 546 } 547 548 void initBatchTracker(const GrPipelineInfo& init) override { 549 // Handle any color overrides 550 if (init.fColorIgnored) { 551 fGeoData[0].fColor = GrColor_ILLEGAL; 552 } else if (GrColor_ILLEGAL != init.fOverrideColor) { 553 fGeoData[0].fColor = init.fOverrideColor; 554 } 555 556 // setup batch properties 557 fBatch.fColorIgnored = init.fColorIgnored; 558 fBatch.fColor = fGeoData[0].fColor; 559 fBatch.fUsesLocalCoords = init.fUsesLocalCoords; 560 fBatch.fCoverageIgnored = init.fCoverageIgnored; 561 } 562 563 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) override { 564 SkAutoTUnref<const GrGeometryProcessor> gp( 565 GrDefaultGeoProcFactory::Create(GrDefaultGeoProcFactory::kPosition_GPType, 566 this->color(), 567 this->viewMatrix(), 568 SkMatrix::I())); 569 570 batchTarget->initDraw(gp, pipeline); 571 572 // TODO this is hacky, but the only way we have to initialize the GP is to use the 573 // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch 574 // everywhere we can remove this nastiness 575 GrPipelineInfo init; 576 init.fColorIgnored = fBatch.fColorIgnored; 577 init.fOverrideColor = GrColor_ILLEGAL; 578 init.fCoverageIgnored = fBatch.fCoverageIgnored; 579 init.fUsesLocalCoords = this->usesLocalCoords(); 580 gp->initBatchTracker(batchTarget->currentBatchTracker(), init); 581 582 size_t vertexStride = gp->getVertexStride(); 583 584 SkASSERT(vertexStride == sizeof(GrDefaultGeoProcFactory::PositionAttr)); 585 586 Geometry& args = fGeoData[0]; 587 588 int vertexCount = kVertsPerHairlineRect; 589 if (args.fStrokeWidth > 0) { 590 vertexCount = kVertsPerStrokeRect; 591 } 592 593 const GrVertexBuffer* vertexBuffer; 594 int firstVertex; 595 596 void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride, 597 vertexCount, 598 &vertexBuffer, 599 &firstVertex); 600 601 if (!vertices) { 602 SkDebugf("Could not allocate vertices\n"); 603 return; 604 } 605 606 SkPoint* vertex = reinterpret_cast<SkPoint*>(vertices); 607 608 GrPrimitiveType primType; 609 610 if (args.fStrokeWidth > 0) {; 611 primType = kTriangleStrip_GrPrimitiveType; 612 args.fRect.sort(); 613 this->setStrokeRectStrip(vertex, args.fRect, args.fStrokeWidth); 614 } else { 615 // hairline 616 primType = kLineStrip_GrPrimitiveType; 617 vertex[0].set(args.fRect.fLeft, args.fRect.fTop); 618 vertex[1].set(args.fRect.fRight, args.fRect.fTop); 619 vertex[2].set(args.fRect.fRight, args.fRect.fBottom); 620 vertex[3].set(args.fRect.fLeft, args.fRect.fBottom); 621 vertex[4].set(args.fRect.fLeft, args.fRect.fTop); 622 } 623 624 GrDrawTarget::DrawInfo drawInfo; 625 drawInfo.setPrimitiveType(primType); 626 drawInfo.setVertexBuffer(vertexBuffer); 627 drawInfo.setStartVertex(firstVertex); 628 drawInfo.setVertexCount(vertexCount); 629 drawInfo.setStartIndex(0); 630 drawInfo.setIndexCount(0); 631 drawInfo.setInstanceCount(0); 632 drawInfo.setVerticesPerInstance(0); 633 drawInfo.setIndicesPerInstance(0); 634 batchTarget->draw(drawInfo); 635 } 636 637 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; } 638 639private: 640 StrokeRectBatch(const Geometry& geometry) { 641 this->initClassID<StrokeRectBatch>(); 642 643 fBatch.fHairline = geometry.fStrokeWidth == 0; 644 645 fGeoData.push_back(geometry); 646 } 647 648 /* create a triangle strip that strokes the specified rect. There are 8 649 unique vertices, but we repeat the last 2 to close up. Alternatively we 650 could use an indices array, and then only send 8 verts, but not sure that 651 would be faster. 652 */ 653 void setStrokeRectStrip(SkPoint verts[10], const SkRect& rect, SkScalar width) { 654 const SkScalar rad = SkScalarHalf(width); 655 // TODO we should be able to enable this assert, but we'd have to filter these draws 656 // this is a bug 657 //SkASSERT(rad < rect.width() / 2 && rad < rect.height() / 2); 658 659 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 660 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 661 verts[2].set(rect.fRight - rad, rect.fTop + rad); 662 verts[3].set(rect.fRight + rad, rect.fTop - rad); 663 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 664 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 665 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 666 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 667 verts[8] = verts[0]; 668 verts[9] = verts[1]; 669 } 670 671 672 GrColor color() const { return fBatch.fColor; } 673 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } 674 bool colorIgnored() const { return fBatch.fColorIgnored; } 675 const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; } 676 bool hairline() const { return fBatch.fHairline; } 677 678 bool onCombineIfPossible(GrBatch* t) override { 679 // StrokeRectBatch* that = t->cast<StrokeRectBatch>(); 680 681 // NonAA stroke rects cannot batch right now 682 // TODO make these batchable 683 return false; 684 } 685 686 struct BatchTracker { 687 GrColor fColor; 688 bool fUsesLocalCoords; 689 bool fColorIgnored; 690 bool fCoverageIgnored; 691 bool fHairline; 692 }; 693 694 const static int kVertsPerHairlineRect = 5; 695 const static int kVertsPerStrokeRect = 10; 696 697 BatchTracker fBatch; 698 SkSTArray<1, Geometry, true> fGeoData; 699}; 700 701void GrContext::drawRect(GrRenderTarget* rt, 702 const GrClip& clip, 703 const GrPaint& paint, 704 const SkMatrix& viewMatrix, 705 const SkRect& rect, 706 const GrStrokeInfo* strokeInfo) { 707 RETURN_IF_ABANDONED 708 if (strokeInfo && strokeInfo->isDashed()) { 709 SkPath path; 710 path.addRect(rect); 711 this->drawPath(rt, clip, paint, viewMatrix, path, *strokeInfo); 712 return; 713 } 714 715 AutoCheckFlush acf(this); 716 GrPipelineBuilder pipelineBuilder; 717 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 718 if (NULL == target) { 719 return; 720 } 721 722 GR_CREATE_TRACE_MARKER("GrContext::drawRect", target); 723 SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth(); 724 725 // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking 726 // cases where the RT is fully inside a stroke. 727 if (width < 0) { 728 SkRect rtRect; 729 pipelineBuilder.getRenderTarget()->getBoundsRect(&rtRect); 730 SkRect clipSpaceRTRect = rtRect; 731 bool checkClip = GrClip::kWideOpen_ClipType != clip.clipType(); 732 if (checkClip) { 733 clipSpaceRTRect.offset(SkIntToScalar(clip.origin().fX), 734 SkIntToScalar(clip.origin().fY)); 735 } 736 // Does the clip contain the entire RT? 737 if (!checkClip || clip.quickContains(clipSpaceRTRect)) { 738 SkMatrix invM; 739 if (!viewMatrix.invert(&invM)) { 740 return; 741 } 742 // Does the rect bound the RT? 743 SkPoint srcSpaceRTQuad[4]; 744 invM.mapRectToQuad(srcSpaceRTQuad, rtRect); 745 if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) && 746 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) && 747 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) && 748 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) { 749 // Will it blend? 750 GrColor clearColor; 751 if (paint.isOpaqueAndConstantColor(&clearColor)) { 752 target->clear(NULL, clearColor, true, rt); 753 return; 754 } 755 } 756 } 757 } 758 759 GrColor color = paint.getColor(); 760 SkRect devBoundRect; 761 bool needAA = paint.isAntiAlias() && !pipelineBuilder.getRenderTarget()->isMultisampled(); 762 bool doAA = needAA && apply_aa_to_rect(target, &pipelineBuilder, &devBoundRect, rect, width, 763 viewMatrix, color); 764 765 if (doAA) { 766 if (width >= 0) { 767 const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec(); 768 fAARectRenderer->strokeAARect(target, 769 &pipelineBuilder, 770 color, 771 viewMatrix, 772 rect, 773 devBoundRect, 774 strokeRec); 775 } else { 776 // filled AA rect 777 fAARectRenderer->fillAARect(target, 778 &pipelineBuilder, 779 color, 780 viewMatrix, 781 rect, 782 devBoundRect); 783 } 784 return; 785 } 786 787 if (width >= 0) { 788 StrokeRectBatch::Geometry geometry; 789 geometry.fViewMatrix = viewMatrix; 790 geometry.fColor = color; 791 geometry.fRect = rect; 792 geometry.fStrokeWidth = width; 793 794 SkAutoTUnref<GrBatch> batch(StrokeRectBatch::Create(geometry)); 795 796 SkRect bounds = rect; 797 SkScalar rad = SkScalarHalf(width); 798 bounds.outset(rad, rad); 799 viewMatrix.mapRect(&bounds); 800 target->drawBatch(&pipelineBuilder, batch, &bounds); 801 } else { 802 // filled BW rect 803 target->drawSimpleRect(&pipelineBuilder, color, viewMatrix, rect); 804 } 805} 806 807void GrContext::drawNonAARectToRect(GrRenderTarget* rt, 808 const GrClip& clip, 809 const GrPaint& paint, 810 const SkMatrix& viewMatrix, 811 const SkRect& rectToDraw, 812 const SkRect& localRect, 813 const SkMatrix* localMatrix) { 814 RETURN_IF_ABANDONED 815 AutoCheckFlush acf(this); 816 GrPipelineBuilder pipelineBuilder; 817 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 818 if (NULL == target) { 819 return; 820 } 821 822 GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target); 823 824 target->drawRect(&pipelineBuilder, 825 paint.getColor(), 826 viewMatrix, 827 rectToDraw, 828 &localRect, 829 localMatrix); 830} 831 832static const GrGeometryProcessor* set_vertex_attributes(bool hasLocalCoords, 833 bool hasColors, 834 int* colorOffset, 835 int* texOffset, 836 GrColor color, 837 const SkMatrix& viewMatrix) { 838 *texOffset = -1; 839 *colorOffset = -1; 840 uint32_t flags = GrDefaultGeoProcFactory::kPosition_GPType; 841 if (hasLocalCoords && hasColors) { 842 *colorOffset = sizeof(SkPoint); 843 *texOffset = sizeof(SkPoint) + sizeof(GrColor); 844 flags |= GrDefaultGeoProcFactory::kColor_GPType | 845 GrDefaultGeoProcFactory::kLocalCoord_GPType; 846 } else if (hasLocalCoords) { 847 *texOffset = sizeof(SkPoint); 848 flags |= GrDefaultGeoProcFactory::kLocalCoord_GPType; 849 } else if (hasColors) { 850 *colorOffset = sizeof(SkPoint); 851 flags |= GrDefaultGeoProcFactory::kColor_GPType; 852 } 853 return GrDefaultGeoProcFactory::Create(flags, color, viewMatrix, SkMatrix::I()); 854} 855 856class DrawVerticesBatch : public GrBatch { 857public: 858 struct Geometry { 859 GrColor fColor; 860 SkTDArray<SkPoint> fPositions; 861 SkTDArray<uint16_t> fIndices; 862 SkTDArray<GrColor> fColors; 863 SkTDArray<SkPoint> fLocalCoords; 864 }; 865 866 static GrBatch* Create(const Geometry& geometry, GrPrimitiveType primitiveType, 867 const SkMatrix& viewMatrix, 868 const SkPoint* positions, int vertexCount, 869 const uint16_t* indices, int indexCount, 870 const GrColor* colors, const SkPoint* localCoords) { 871 return SkNEW_ARGS(DrawVerticesBatch, (geometry, primitiveType, viewMatrix, positions, 872 vertexCount, indices, indexCount, colors, 873 localCoords)); 874 } 875 876 const char* name() const override { return "DrawVerticesBatch"; } 877 878 void getInvariantOutputColor(GrInitInvariantOutput* out) const override { 879 // When this is called on a batch, there is only one geometry bundle 880 if (this->hasColors()) { 881 out->setUnknownFourComponents(); 882 } else { 883 out->setKnownFourComponents(fGeoData[0].fColor); 884 } 885 } 886 887 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override { 888 out->setKnownSingleComponent(0xff); 889 } 890 891 void initBatchTracker(const GrPipelineInfo& init) override { 892 // Handle any color overrides 893 if (init.fColorIgnored) { 894 fGeoData[0].fColor = GrColor_ILLEGAL; 895 } else if (GrColor_ILLEGAL != init.fOverrideColor) { 896 fGeoData[0].fColor = init.fOverrideColor; 897 } 898 899 // setup batch properties 900 fBatch.fColorIgnored = init.fColorIgnored; 901 fBatch.fColor = fGeoData[0].fColor; 902 fBatch.fUsesLocalCoords = init.fUsesLocalCoords; 903 fBatch.fCoverageIgnored = init.fCoverageIgnored; 904 } 905 906 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) override { 907 int colorOffset = -1, texOffset = -1; 908 SkAutoTUnref<const GrGeometryProcessor> gp( 909 set_vertex_attributes(this->hasLocalCoords(), this->hasColors(), &colorOffset, 910 &texOffset, this->color(), this->viewMatrix())); 911 912 batchTarget->initDraw(gp, pipeline); 913 914 // TODO this is hacky, but the only way we have to initialize the GP is to use the 915 // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch 916 // everywhere we can remove this nastiness 917 GrPipelineInfo init; 918 init.fColorIgnored = fBatch.fColorIgnored; 919 init.fOverrideColor = GrColor_ILLEGAL; 920 init.fCoverageIgnored = fBatch.fCoverageIgnored; 921 init.fUsesLocalCoords = this->usesLocalCoords(); 922 gp->initBatchTracker(batchTarget->currentBatchTracker(), init); 923 924 size_t vertexStride = gp->getVertexStride(); 925 926 SkASSERT(vertexStride == sizeof(SkPoint) + (this->hasLocalCoords() ? sizeof(SkPoint) : 0) 927 + (this->hasColors() ? sizeof(GrColor) : 0)); 928 929 int instanceCount = fGeoData.count(); 930 931 const GrVertexBuffer* vertexBuffer; 932 int firstVertex; 933 934 void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride, 935 this->vertexCount(), 936 &vertexBuffer, 937 &firstVertex); 938 939 if (!vertices) { 940 SkDebugf("Could not allocate vertices\n"); 941 return; 942 } 943 944 const GrIndexBuffer* indexBuffer; 945 int firstIndex; 946 947 void* indices = NULL; 948 if (this->hasIndices()) { 949 indices = batchTarget->indexPool()->makeSpace(this->indexCount(), 950 &indexBuffer, 951 &firstIndex); 952 953 if (!indices) { 954 SkDebugf("Could not allocate indices\n"); 955 return; 956 } 957 } 958 959 int indexOffset = 0; 960 int vertexOffset = 0; 961 for (int i = 0; i < instanceCount; i++) { 962 const Geometry& args = fGeoData[i]; 963 964 // TODO we can actually cache this interleaved and then just memcopy 965 if (this->hasIndices()) { 966 for (int j = 0; j < args.fIndices.count(); ++j, ++indexOffset) { 967 *((uint16_t*)indices + indexOffset) = args.fIndices[j] + vertexOffset; 968 } 969 } 970 971 for (int j = 0; j < args.fPositions.count(); ++j) { 972 *((SkPoint*)vertices) = args.fPositions[j]; 973 if (this->hasColors()) { 974 *(GrColor*)((intptr_t)vertices + colorOffset) = args.fColors[j]; 975 } 976 if (this->hasLocalCoords()) { 977 *(SkPoint*)((intptr_t)vertices + texOffset) = args.fLocalCoords[j]; 978 } 979 vertices = (void*)((intptr_t)vertices + vertexStride); 980 vertexOffset++; 981 } 982 } 983 984 GrDrawTarget::DrawInfo drawInfo; 985 drawInfo.setPrimitiveType(this->primitiveType()); 986 drawInfo.setVertexBuffer(vertexBuffer); 987 drawInfo.setStartVertex(firstVertex); 988 drawInfo.setVertexCount(this->vertexCount()); 989 if (this->hasIndices()) { 990 drawInfo.setIndexBuffer(indexBuffer); 991 drawInfo.setStartIndex(firstIndex); 992 drawInfo.setIndexCount(this->indexCount()); 993 } else { 994 drawInfo.setStartIndex(0); 995 drawInfo.setIndexCount(0); 996 } 997 batchTarget->draw(drawInfo); 998 } 999 1000 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; } 1001 1002private: 1003 DrawVerticesBatch(const Geometry& geometry, GrPrimitiveType primitiveType, 1004 const SkMatrix& viewMatrix, 1005 const SkPoint* positions, int vertexCount, 1006 const uint16_t* indices, int indexCount, 1007 const GrColor* colors, const SkPoint* localCoords) { 1008 this->initClassID<DrawVerticesBatch>(); 1009 SkASSERT(positions); 1010 1011 fBatch.fViewMatrix = viewMatrix; 1012 Geometry& installedGeo = fGeoData.push_back(geometry); 1013 1014 installedGeo.fPositions.append(vertexCount, positions); 1015 if (indices) { 1016 installedGeo.fIndices.append(indexCount, indices); 1017 fBatch.fHasIndices = true; 1018 } else { 1019 fBatch.fHasIndices = false; 1020 } 1021 1022 if (colors) { 1023 installedGeo.fColors.append(vertexCount, colors); 1024 fBatch.fHasColors = true; 1025 } else { 1026 fBatch.fHasColors = false; 1027 } 1028 1029 if (localCoords) { 1030 installedGeo.fLocalCoords.append(vertexCount, localCoords); 1031 fBatch.fHasLocalCoords = true; 1032 } else { 1033 fBatch.fHasLocalCoords = false; 1034 } 1035 fBatch.fVertexCount = vertexCount; 1036 fBatch.fIndexCount = indexCount; 1037 fBatch.fPrimitiveType = primitiveType; 1038 } 1039 1040 GrPrimitiveType primitiveType() const { return fBatch.fPrimitiveType; } 1041 bool batchablePrimitiveType() const { 1042 return kTriangles_GrPrimitiveType == fBatch.fPrimitiveType || 1043 kLines_GrPrimitiveType == fBatch.fPrimitiveType || 1044 kPoints_GrPrimitiveType == fBatch.fPrimitiveType; 1045 } 1046 GrColor color() const { return fBatch.fColor; } 1047 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } 1048 bool colorIgnored() const { return fBatch.fColorIgnored; } 1049 const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; } 1050 bool hasColors() const { return fBatch.fHasColors; } 1051 bool hasIndices() const { return fBatch.fHasIndices; } 1052 bool hasLocalCoords() const { return fBatch.fHasLocalCoords; } 1053 int vertexCount() const { return fBatch.fVertexCount; } 1054 int indexCount() const { return fBatch.fIndexCount; } 1055 1056 bool onCombineIfPossible(GrBatch* t) override { 1057 DrawVerticesBatch* that = t->cast<DrawVerticesBatch>(); 1058 1059 if (!this->batchablePrimitiveType() || this->primitiveType() != that->primitiveType()) { 1060 return false; 1061 } 1062 1063 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords()); 1064 1065 // We currently use a uniform viewmatrix for this batch 1066 if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) { 1067 return false; 1068 } 1069 1070 if (this->hasColors() != that->hasColors()) { 1071 return false; 1072 } 1073 1074 if (this->hasIndices() != that->hasIndices()) { 1075 return false; 1076 } 1077 1078 if (this->hasLocalCoords() != that->hasLocalCoords()) { 1079 return false; 1080 } 1081 1082 if (!this->hasColors() && this->color() != that->color()) { 1083 return false; 1084 } 1085 1086 if (this->color() != that->color()) { 1087 fBatch.fColor = GrColor_ILLEGAL; 1088 } 1089 fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin()); 1090 fBatch.fVertexCount += that->vertexCount(); 1091 fBatch.fIndexCount += that->indexCount(); 1092 return true; 1093 } 1094 1095 struct BatchTracker { 1096 GrPrimitiveType fPrimitiveType; 1097 SkMatrix fViewMatrix; 1098 GrColor fColor; 1099 bool fUsesLocalCoords; 1100 bool fColorIgnored; 1101 bool fCoverageIgnored; 1102 bool fHasColors; 1103 bool fHasIndices; 1104 bool fHasLocalCoords; 1105 int fVertexCount; 1106 int fIndexCount; 1107 }; 1108 1109 BatchTracker fBatch; 1110 SkSTArray<1, Geometry, true> fGeoData; 1111}; 1112 1113void GrContext::drawVertices(GrRenderTarget* rt, 1114 const GrClip& clip, 1115 const GrPaint& paint, 1116 const SkMatrix& viewMatrix, 1117 GrPrimitiveType primitiveType, 1118 int vertexCount, 1119 const SkPoint positions[], 1120 const SkPoint texCoords[], 1121 const GrColor colors[], 1122 const uint16_t indices[], 1123 int indexCount) { 1124 RETURN_IF_ABANDONED 1125 AutoCheckFlush acf(this); 1126 GrPipelineBuilder pipelineBuilder; 1127 GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope 1128 1129 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1130 if (NULL == target) { 1131 return; 1132 } 1133 1134 GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target); 1135 1136 DrawVerticesBatch::Geometry geometry; 1137 geometry.fColor = paint.getColor(); 1138 1139 SkAutoTUnref<GrBatch> batch(DrawVerticesBatch::Create(geometry, primitiveType, viewMatrix, 1140 positions, vertexCount, indices, 1141 indexCount,colors, texCoords)); 1142 1143 // TODO figure out bounds 1144 target->drawBatch(&pipelineBuilder, batch, NULL); 1145} 1146 1147/////////////////////////////////////////////////////////////////////////////// 1148 1149void GrContext::drawRRect(GrRenderTarget*rt, 1150 const GrClip& clip, 1151 const GrPaint& paint, 1152 const SkMatrix& viewMatrix, 1153 const SkRRect& rrect, 1154 const GrStrokeInfo& strokeInfo) { 1155 RETURN_IF_ABANDONED 1156 if (rrect.isEmpty()) { 1157 return; 1158 } 1159 1160 if (strokeInfo.isDashed()) { 1161 SkPath path; 1162 path.addRRect(rrect); 1163 this->drawPath(rt, clip, paint, viewMatrix, path, strokeInfo); 1164 return; 1165 } 1166 1167 AutoCheckFlush acf(this); 1168 GrPipelineBuilder pipelineBuilder; 1169 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1170 if (NULL == target) { 1171 return; 1172 } 1173 1174 GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target); 1175 1176 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1177 1178 GrColor color = paint.getColor(); 1179 if (!fOvalRenderer->drawRRect(target, 1180 &pipelineBuilder, 1181 color, 1182 viewMatrix, 1183 paint.isAntiAlias(), 1184 rrect, 1185 strokeRec)) { 1186 SkPath path; 1187 path.addRRect(rrect); 1188 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1189 path, strokeInfo); 1190 } 1191} 1192 1193/////////////////////////////////////////////////////////////////////////////// 1194 1195void GrContext::drawDRRect(GrRenderTarget* rt, 1196 const GrClip& clip, 1197 const GrPaint& paint, 1198 const SkMatrix& viewMatrix, 1199 const SkRRect& outer, 1200 const SkRRect& inner) { 1201 RETURN_IF_ABANDONED 1202 if (outer.isEmpty()) { 1203 return; 1204 } 1205 1206 AutoCheckFlush acf(this); 1207 GrPipelineBuilder pipelineBuilder; 1208 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1209 1210 GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target); 1211 1212 GrColor color = paint.getColor(); 1213 if (!fOvalRenderer->drawDRRect(target, 1214 &pipelineBuilder, 1215 color, 1216 viewMatrix, 1217 paint.isAntiAlias(), 1218 outer, 1219 inner)) { 1220 SkPath path; 1221 path.addRRect(inner); 1222 path.addRRect(outer); 1223 path.setFillType(SkPath::kEvenOdd_FillType); 1224 1225 GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle); 1226 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1227 path, fillRec); 1228 } 1229} 1230 1231/////////////////////////////////////////////////////////////////////////////// 1232 1233void GrContext::drawOval(GrRenderTarget* rt, 1234 const GrClip& clip, 1235 const GrPaint& paint, 1236 const SkMatrix& viewMatrix, 1237 const SkRect& oval, 1238 const GrStrokeInfo& strokeInfo) { 1239 RETURN_IF_ABANDONED 1240 if (oval.isEmpty()) { 1241 return; 1242 } 1243 1244 if (strokeInfo.isDashed()) { 1245 SkPath path; 1246 path.addOval(oval); 1247 this->drawPath(rt, clip, paint, viewMatrix, path, strokeInfo); 1248 return; 1249 } 1250 1251 AutoCheckFlush acf(this); 1252 GrPipelineBuilder pipelineBuilder; 1253 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1254 if (NULL == target) { 1255 return; 1256 } 1257 1258 GR_CREATE_TRACE_MARKER("GrContext::drawOval", target); 1259 1260 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1261 1262 GrColor color = paint.getColor(); 1263 if (!fOvalRenderer->drawOval(target, 1264 &pipelineBuilder, 1265 color, 1266 viewMatrix, 1267 paint.isAntiAlias(), 1268 oval, 1269 strokeRec)) { 1270 SkPath path; 1271 path.addOval(oval); 1272 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1273 path, strokeInfo); 1274 } 1275} 1276 1277// Can 'path' be drawn as a pair of filled nested rectangles? 1278static bool is_nested_rects(GrDrawTarget* target, 1279 GrPipelineBuilder* pipelineBuilder, 1280 GrColor color, 1281 const SkMatrix& viewMatrix, 1282 const SkPath& path, 1283 const SkStrokeRec& stroke, 1284 SkRect rects[2]) { 1285 SkASSERT(stroke.isFillStyle()); 1286 1287 if (path.isInverseFillType()) { 1288 return false; 1289 } 1290 1291 // TODO: this restriction could be lifted if we were willing to apply 1292 // the matrix to all the points individually rather than just to the rect 1293 if (!viewMatrix.preservesAxisAlignment()) { 1294 return false; 1295 } 1296 1297 SkPath::Direction dirs[2]; 1298 if (!path.isNestedFillRects(rects, dirs)) { 1299 return false; 1300 } 1301 1302 if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) { 1303 // The two rects need to be wound opposite to each other 1304 return false; 1305 } 1306 1307 // Right now, nested rects where the margin is not the same width 1308 // all around do not render correctly 1309 const SkScalar* outer = rects[0].asScalars(); 1310 const SkScalar* inner = rects[1].asScalars(); 1311 1312 bool allEq = true; 1313 1314 SkScalar margin = SkScalarAbs(outer[0] - inner[0]); 1315 bool allGoE1 = margin >= SK_Scalar1; 1316 1317 for (int i = 1; i < 4; ++i) { 1318 SkScalar temp = SkScalarAbs(outer[i] - inner[i]); 1319 if (temp < SK_Scalar1) { 1320 allGoE1 = false; 1321 } 1322 if (!SkScalarNearlyEqual(margin, temp)) { 1323 allEq = false; 1324 } 1325 } 1326 1327 return allEq || allGoE1; 1328} 1329 1330void GrContext::drawPath(GrRenderTarget* rt, 1331 const GrClip& clip, 1332 const GrPaint& paint, 1333 const SkMatrix& viewMatrix, 1334 const SkPath& path, 1335 const GrStrokeInfo& strokeInfo) { 1336 RETURN_IF_ABANDONED 1337 if (path.isEmpty()) { 1338 if (path.isInverseFillType()) { 1339 this->drawPaint(rt, clip, paint, viewMatrix); 1340 } 1341 return; 1342 } 1343 1344 GrColor color = paint.getColor(); 1345 if (strokeInfo.isDashed()) { 1346 SkPoint pts[2]; 1347 if (path.isLine(pts)) { 1348 AutoCheckFlush acf(this); 1349 GrPipelineBuilder pipelineBuilder; 1350 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1351 if (NULL == target) { 1352 return; 1353 } 1354 1355 if (GrDashingEffect::DrawDashLine(fGpu, target, &pipelineBuilder, color, viewMatrix, 1356 pts, paint, strokeInfo)) { 1357 return; 1358 } 1359 } 1360 1361 // Filter dashed path into new path with the dashing applied 1362 const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo(); 1363 SkTLazy<SkPath> effectPath; 1364 GrStrokeInfo newStrokeInfo(strokeInfo, false); 1365 SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr(); 1366 if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) { 1367 this->drawPath(rt, clip, paint, viewMatrix, *effectPath.get(), newStrokeInfo); 1368 return; 1369 } 1370 1371 this->drawPath(rt, clip, paint, viewMatrix, path, newStrokeInfo); 1372 return; 1373 } 1374 1375 // Note that internalDrawPath may sw-rasterize the path into a scratch texture. 1376 // Scratch textures can be recycled after they are returned to the texture 1377 // cache. This presents a potential hazard for buffered drawing. However, 1378 // the writePixels that uploads to the scratch will perform a flush so we're 1379 // OK. 1380 AutoCheckFlush acf(this); 1381 GrPipelineBuilder pipelineBuilder; 1382 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1383 if (NULL == target) { 1384 return; 1385 } 1386 1387 GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex()); 1388 1389 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1390 1391 bool useCoverageAA = paint.isAntiAlias() && 1392 !pipelineBuilder.getRenderTarget()->isMultisampled(); 1393 1394 if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) { 1395 // Concave AA paths are expensive - try to avoid them for special cases 1396 SkRect rects[2]; 1397 1398 if (is_nested_rects(target, &pipelineBuilder, color, viewMatrix, path, strokeRec, rects)) { 1399 fAARectRenderer->fillAANestedRects(target, &pipelineBuilder, color, viewMatrix, rects); 1400 return; 1401 } 1402 } 1403 1404 SkRect ovalRect; 1405 bool isOval = path.isOval(&ovalRect); 1406 1407 if (!isOval || path.isInverseFillType() || 1408 !fOvalRenderer->drawOval(target, 1409 &pipelineBuilder, 1410 color, 1411 viewMatrix, 1412 paint.isAntiAlias(), 1413 ovalRect, 1414 strokeRec)) { 1415 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1416 path, strokeInfo); 1417 } 1418} 1419 1420void GrContext::internalDrawPath(GrDrawTarget* target, 1421 GrPipelineBuilder* pipelineBuilder, 1422 const SkMatrix& viewMatrix, 1423 GrColor color, 1424 bool useAA, 1425 const SkPath& path, 1426 const GrStrokeInfo& strokeInfo) { 1427 RETURN_IF_ABANDONED 1428 SkASSERT(!path.isEmpty()); 1429 1430 GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target); 1431 1432 1433 // An Assumption here is that path renderer would use some form of tweaking 1434 // the src color (either the input alpha or in the frag shader) to implement 1435 // aa. If we have some future driver-mojo path AA that can do the right 1436 // thing WRT to the blend then we'll need some query on the PR. 1437 bool useCoverageAA = useAA && 1438 !pipelineBuilder->getRenderTarget()->isMultisampled(); 1439 1440 1441 GrPathRendererChain::DrawType type = 1442 useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType : 1443 GrPathRendererChain::kColor_DrawType; 1444 1445 const SkPath* pathPtr = &path; 1446 SkTLazy<SkPath> tmpPath; 1447 SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec()); 1448 1449 // Try a 1st time without stroking the path and without allowing the SW renderer 1450 GrPathRenderer* pr = this->getPathRenderer(target, pipelineBuilder, viewMatrix, *pathPtr, 1451 *stroke, false, type); 1452 1453 if (NULL == pr) { 1454 if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, viewMatrix, NULL)) { 1455 // It didn't work the 1st time, so try again with the stroked path 1456 stroke.writable()->setResScale(SkScalarAbs(viewMatrix.getMaxScale())); 1457 if (stroke->applyToPath(tmpPath.init(), *pathPtr)) { 1458 pathPtr = tmpPath.get(); 1459 stroke.writable()->setFillStyle(); 1460 if (pathPtr->isEmpty()) { 1461 return; 1462 } 1463 } 1464 } 1465 1466 // This time, allow SW renderer 1467 pr = this->getPathRenderer(target, pipelineBuilder, viewMatrix, *pathPtr, *stroke, true, 1468 type); 1469 } 1470 1471 if (NULL == pr) { 1472#ifdef SK_DEBUG 1473 SkDebugf("Unable to find path renderer compatible with path.\n"); 1474#endif 1475 return; 1476 } 1477 1478 pr->drawPath(target, pipelineBuilder, color, viewMatrix, *pathPtr, *stroke, useCoverageAA); 1479} 1480 1481//////////////////////////////////////////////////////////////////////////////// 1482 1483void GrContext::flush(int flagsBitfield) { 1484 if (NULL == fDrawBuffer) { 1485 return; 1486 } 1487 1488 if (kDiscard_FlushBit & flagsBitfield) { 1489 fDrawBuffer->reset(); 1490 } else { 1491 fDrawBuffer->flush(); 1492 } 1493 fResourceCache->notifyFlushOccurred(); 1494 fFlushToReduceCacheSize = false; 1495} 1496 1497bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes, 1498 const void* inPixels, size_t outRowBytes, void* outPixels) { 1499 SkSrcPixelInfo srcPI; 1500 if (!GrPixelConfig2ColorAndProfileType(srcConfig, &srcPI.fColorType, NULL)) { 1501 return false; 1502 } 1503 srcPI.fAlphaType = kUnpremul_SkAlphaType; 1504 srcPI.fPixels = inPixels; 1505 srcPI.fRowBytes = inRowBytes; 1506 1507 SkDstPixelInfo dstPI; 1508 dstPI.fColorType = srcPI.fColorType; 1509 dstPI.fAlphaType = kPremul_SkAlphaType; 1510 dstPI.fPixels = outPixels; 1511 dstPI.fRowBytes = outRowBytes; 1512 1513 return srcPI.convertPixelsTo(&dstPI, width, height); 1514} 1515 1516bool GrContext::writeSurfacePixels(GrSurface* surface, 1517 int left, int top, int width, int height, 1518 GrPixelConfig srcConfig, const void* buffer, size_t rowBytes, 1519 uint32_t pixelOpsFlags) { 1520 RETURN_FALSE_IF_ABANDONED 1521 { 1522 GrTexture* texture = NULL; 1523 if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) && 1524 fGpu->canWriteTexturePixels(texture, srcConfig)) { 1525 1526 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && 1527 surface->surfacePriv().hasPendingIO()) { 1528 this->flush(); 1529 } 1530 return fGpu->writeTexturePixels(texture, left, top, width, height, 1531 srcConfig, buffer, rowBytes); 1532 // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the 1533 // upload is already flushed. 1534 } 1535 } 1536 1537 // If we didn't do a direct texture write then we upload the pixels to a texture and draw. 1538 GrRenderTarget* renderTarget = surface->asRenderTarget(); 1539 if (NULL == renderTarget) { 1540 return false; 1541 } 1542 1543 // We ignore the preferred config unless it is a R/B swap of the src config. In that case 1544 // we will upload the original src data to a scratch texture but we will spoof it as the swapped 1545 // config. This scratch will then have R and B swapped. We correct for this by swapping again 1546 // when drawing the scratch to the dst using a conversion effect. 1547 bool swapRAndB = false; 1548 GrPixelConfig writeConfig = srcConfig; 1549 if (GrPixelConfigSwapRAndB(srcConfig) == 1550 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) { 1551 writeConfig = GrPixelConfigSwapRAndB(srcConfig); 1552 swapRAndB = true; 1553 } 1554 1555 GrSurfaceDesc desc; 1556 desc.fWidth = width; 1557 desc.fHeight = height; 1558 desc.fConfig = writeConfig; 1559 SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch)); 1560 if (!texture) { 1561 return false; 1562 } 1563 1564 SkAutoTUnref<const GrFragmentProcessor> fp; 1565 SkMatrix textureMatrix; 1566 textureMatrix.setIDiv(texture->width(), texture->height()); 1567 1568 // allocate a tmp buffer and sw convert the pixels to premul 1569 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 1570 1571 if (kUnpremul_PixelOpsFlag & pixelOpsFlags) { 1572 if (!GrPixelConfigIs8888(srcConfig)) { 1573 return false; 1574 } 1575 fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); 1576 // handle the unpremul step on the CPU if we couldn't create an effect to do it. 1577 if (NULL == fp) { 1578 size_t tmpRowBytes = 4 * width; 1579 tmpPixels.reset(width * height); 1580 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes, 1581 tmpPixels.get())) { 1582 return false; 1583 } 1584 rowBytes = tmpRowBytes; 1585 buffer = tmpPixels.get(); 1586 } 1587 } 1588 if (NULL == fp) { 1589 fp.reset(GrConfigConversionEffect::Create(texture, 1590 swapRAndB, 1591 GrConfigConversionEffect::kNone_PMConversion, 1592 textureMatrix)); 1593 } 1594 1595 // Even if the client told us not to flush, we still flush here. The client may have known that 1596 // writes to the original surface caused no data hazards, but they can't know that the scratch 1597 // we just got is safe. 1598 if (texture->surfacePriv().hasPendingIO()) { 1599 this->flush(); 1600 } 1601 if (!fGpu->writeTexturePixels(texture, 0, 0, width, height, 1602 writeConfig, buffer, rowBytes)) { 1603 return false; 1604 } 1605 1606 SkMatrix matrix; 1607 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 1608 1609 // This function can be called in the midst of drawing another object (e.g., when uploading a 1610 // SW-rasterized clip while issuing a draw). So we push the current geometry state before 1611 // drawing a rect to the render target. 1612 // The bracket ensures we pop the stack if we wind up flushing below. 1613 { 1614 GrDrawTarget* drawTarget = this->prepareToDraw(); 1615 if (!drawTarget) { 1616 return false; 1617 } 1618 GrDrawTarget::AutoGeometryPush agp(drawTarget); 1619 1620 GrPipelineBuilder pipelineBuilder; 1621 pipelineBuilder.addColorProcessor(fp); 1622 pipelineBuilder.setRenderTarget(renderTarget); 1623 drawTarget->drawSimpleRect(&pipelineBuilder, 1624 GrColor_WHITE, 1625 matrix, 1626 SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height))); 1627 } 1628 1629 if (kFlushWrites_PixelOp & pixelOpsFlags) { 1630 this->flushSurfaceWrites(surface); 1631 } 1632 1633 return true; 1634} 1635 1636// toggles between RGBA and BGRA 1637static SkColorType toggle_colortype32(SkColorType ct) { 1638 if (kRGBA_8888_SkColorType == ct) { 1639 return kBGRA_8888_SkColorType; 1640 } else { 1641 SkASSERT(kBGRA_8888_SkColorType == ct); 1642 return kRGBA_8888_SkColorType; 1643 } 1644} 1645 1646bool GrContext::readRenderTargetPixels(GrRenderTarget* target, 1647 int left, int top, int width, int height, 1648 GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 1649 uint32_t flags) { 1650 RETURN_FALSE_IF_ABANDONED 1651 ASSERT_OWNED_RESOURCE(target); 1652 SkASSERT(target); 1653 1654 if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) { 1655 this->flush(); 1656 } 1657 1658 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. 1659 1660 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll 1661 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. 1662 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, 1663 width, height, dstConfig, 1664 rowBytes); 1665 // We ignore the preferred config if it is different than our config unless it is an R/B swap. 1666 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped 1667 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during 1668 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from 1669 // dstConfig. 1670 GrPixelConfig readConfig = dstConfig; 1671 bool swapRAndB = false; 1672 if (GrPixelConfigSwapRAndB(dstConfig) == 1673 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { 1674 readConfig = GrPixelConfigSwapRAndB(readConfig); 1675 swapRAndB = true; 1676 } 1677 1678 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 1679 1680 if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 1681 // The unpremul flag is only allowed for these two configs. 1682 return false; 1683 } 1684 1685 SkAutoTUnref<GrTexture> tempTexture; 1686 1687 // If the src is a texture and we would have to do conversions after read pixels, we instead 1688 // do the conversions by drawing the src to a scratch texture. If we handle any of the 1689 // conversions in the draw we set the corresponding bool to false so that we don't reapply it 1690 // on the read back pixels. 1691 GrTexture* src = target->asTexture(); 1692 if (src && (swapRAndB || unpremul || flipY)) { 1693 // Make the scratch a render so we can read its pixels. 1694 GrSurfaceDesc desc; 1695 desc.fFlags = kRenderTarget_GrSurfaceFlag; 1696 desc.fWidth = width; 1697 desc.fHeight = height; 1698 desc.fConfig = readConfig; 1699 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 1700 1701 // When a full read back is faster than a partial we could always make the scratch exactly 1702 // match the passed rect. However, if we see many different size rectangles we will trash 1703 // our texture cache and pay the cost of creating and destroying many textures. So, we only 1704 // request an exact match when the caller is reading an entire RT. 1705 ScratchTexMatch match = kApprox_ScratchTexMatch; 1706 if (0 == left && 1707 0 == top && 1708 target->width() == width && 1709 target->height() == height && 1710 fGpu->fullReadPixelsIsFasterThanPartial()) { 1711 match = kExact_ScratchTexMatch; 1712 } 1713 tempTexture.reset(this->refScratchTexture(desc, match)); 1714 if (tempTexture) { 1715 // compute a matrix to perform the draw 1716 SkMatrix textureMatrix; 1717 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1718 textureMatrix.postIDiv(src->width(), src->height()); 1719 1720 SkAutoTUnref<const GrFragmentProcessor> fp; 1721 if (unpremul) { 1722 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); 1723 if (fp) { 1724 unpremul = false; // we no longer need to do this on CPU after the read back. 1725 } 1726 } 1727 // If we failed to create a PM->UPM effect and have no other conversions to perform then 1728 // there is no longer any point to using the scratch. 1729 if (fp || flipY || swapRAndB) { 1730 if (!fp) { 1731 fp.reset(GrConfigConversionEffect::Create( 1732 src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion, 1733 textureMatrix)); 1734 } 1735 swapRAndB = false; // we will handle the swap in the draw. 1736 1737 // We protect the existing geometry here since it may not be 1738 // clear to the caller that a draw operation (i.e., drawSimpleRect) 1739 // can be invoked in this method 1740 { 1741 GrDrawTarget::AutoGeometryPush agp(fDrawBuffer); 1742 GrPipelineBuilder pipelineBuilder; 1743 SkASSERT(fp); 1744 pipelineBuilder.addColorProcessor(fp); 1745 1746 pipelineBuilder.setRenderTarget(tempTexture->asRenderTarget()); 1747 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 1748 fDrawBuffer->drawSimpleRect(&pipelineBuilder, 1749 GrColor_WHITE, 1750 SkMatrix::I(), 1751 rect); 1752 // we want to read back from the scratch's origin 1753 left = 0; 1754 top = 0; 1755 target = tempTexture->asRenderTarget(); 1756 } 1757 this->flushSurfaceWrites(target); 1758 } 1759 } 1760 } 1761 1762 if (!fGpu->readPixels(target, 1763 left, top, width, height, 1764 readConfig, buffer, rowBytes)) { 1765 return false; 1766 } 1767 // Perform any conversions we weren't able to perform using a scratch texture. 1768 if (unpremul || swapRAndB) { 1769 SkDstPixelInfo dstPI; 1770 if (!GrPixelConfig2ColorAndProfileType(dstConfig, &dstPI.fColorType, NULL)) { 1771 return false; 1772 } 1773 dstPI.fAlphaType = kUnpremul_SkAlphaType; 1774 dstPI.fPixels = buffer; 1775 dstPI.fRowBytes = rowBytes; 1776 1777 SkSrcPixelInfo srcPI; 1778 srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType; 1779 srcPI.fAlphaType = kPremul_SkAlphaType; 1780 srcPI.fPixels = buffer; 1781 srcPI.fRowBytes = rowBytes; 1782 1783 return srcPI.convertPixelsTo(&dstPI, width, height); 1784 } 1785 return true; 1786} 1787 1788void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) { 1789 RETURN_IF_ABANDONED 1790 SkASSERT(surface); 1791 ASSERT_OWNED_RESOURCE(surface); 1792 if (surface->surfacePriv().hasPendingIO()) { 1793 this->flush(); 1794 } 1795 GrRenderTarget* rt = surface->asRenderTarget(); 1796 if (fGpu && rt) { 1797 fGpu->resolveRenderTarget(rt); 1798 } 1799} 1800 1801void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) { 1802 RETURN_IF_ABANDONED 1803 SkASSERT(renderTarget); 1804 ASSERT_OWNED_RESOURCE(renderTarget); 1805 AutoCheckFlush acf(this); 1806 GrDrawTarget* target = this->prepareToDraw(); 1807 if (NULL == target) { 1808 return; 1809 } 1810 target->discard(renderTarget); 1811} 1812 1813void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 1814 const SkIPoint& dstPoint, uint32_t pixelOpsFlags) { 1815 RETURN_IF_ABANDONED 1816 if (NULL == src || NULL == dst) { 1817 return; 1818 } 1819 ASSERT_OWNED_RESOURCE(src); 1820 ASSERT_OWNED_RESOURCE(dst); 1821 1822 // Since we're going to the draw target and not GPU, no need to check kNoFlush 1823 // here. 1824 1825 GrDrawTarget* target = this->prepareToDraw(); 1826 if (NULL == target) { 1827 return; 1828 } 1829 target->copySurface(dst, src, srcRect, dstPoint); 1830 1831 if (kFlushWrites_PixelOp & pixelOpsFlags) { 1832 this->flush(); 1833 } 1834} 1835 1836void GrContext::flushSurfaceWrites(GrSurface* surface) { 1837 RETURN_IF_ABANDONED 1838 if (surface->surfacePriv().hasPendingWrite()) { 1839 this->flush(); 1840 } 1841} 1842 1843GrDrawTarget* GrContext::prepareToDraw(GrPipelineBuilder* pipelineBuilder, 1844 GrRenderTarget* rt, 1845 const GrClip& clip, 1846 const GrPaint* paint, 1847 const AutoCheckFlush* acf) { 1848 if (NULL == fGpu || NULL == fDrawBuffer) { 1849 return NULL; 1850 } 1851 1852 ASSERT_OWNED_RESOURCE(rt); 1853 SkASSERT(rt && paint && acf); 1854 pipelineBuilder->setFromPaint(*paint, rt, clip); 1855 return fDrawBuffer; 1856} 1857 1858GrDrawTarget* GrContext::prepareToDraw() { 1859 if (NULL == fGpu) { 1860 return NULL; 1861 } 1862 return fDrawBuffer; 1863} 1864 1865/* 1866 * This method finds a path renderer that can draw the specified path on 1867 * the provided target. 1868 * Due to its expense, the software path renderer has split out so it can 1869 * can be individually allowed/disallowed via the "allowSW" boolean. 1870 */ 1871GrPathRenderer* GrContext::getPathRenderer(const GrDrawTarget* target, 1872 const GrPipelineBuilder* pipelineBuilder, 1873 const SkMatrix& viewMatrix, 1874 const SkPath& path, 1875 const SkStrokeRec& stroke, 1876 bool allowSW, 1877 GrPathRendererChain::DrawType drawType, 1878 GrPathRendererChain::StencilSupport* stencilSupport) { 1879 1880 if (NULL == fPathRendererChain) { 1881 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); 1882 } 1883 1884 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(target, 1885 pipelineBuilder, 1886 viewMatrix, 1887 path, 1888 stroke, 1889 drawType, 1890 stencilSupport); 1891 1892 if (NULL == pr && allowSW) { 1893 if (NULL == fSoftwarePathRenderer) { 1894 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1895 } 1896 pr = fSoftwarePathRenderer; 1897 } 1898 1899 return pr; 1900} 1901 1902//////////////////////////////////////////////////////////////////////////////// 1903bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const { 1904 return fGpu->caps()->isConfigRenderable(config, withMSAA); 1905} 1906 1907int GrContext::getRecommendedSampleCount(GrPixelConfig config, 1908 SkScalar dpi) const { 1909 if (!this->isConfigRenderable(config, true)) { 1910 return 0; 1911 } 1912 int chosenSampleCount = 0; 1913 if (fGpu->caps()->pathRenderingSupport()) { 1914 if (dpi >= 250.0f) { 1915 chosenSampleCount = 4; 1916 } else { 1917 chosenSampleCount = 16; 1918 } 1919 } 1920 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? 1921 chosenSampleCount : 0; 1922} 1923 1924void GrContext::setupDrawBuffer() { 1925 SkASSERT(NULL == fDrawBuffer); 1926 SkASSERT(NULL == fDrawBufferVBAllocPool); 1927 SkASSERT(NULL == fDrawBufferIBAllocPool); 1928 1929 fDrawBufferVBAllocPool = 1930 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false, 1931 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1932 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1933 fDrawBufferIBAllocPool = 1934 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false, 1935 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1936 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1937 1938 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu, 1939 fDrawBufferVBAllocPool, 1940 fDrawBufferIBAllocPool)); 1941} 1942 1943GrDrawTarget* GrContext::getTextTarget() { 1944 return this->prepareToDraw(); 1945} 1946 1947const GrIndexBuffer* GrContext::getQuadIndexBuffer() const { 1948 return fGpu->getQuadIndexBuffer(); 1949} 1950 1951namespace { 1952void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 1953 GrConfigConversionEffect::PMConversion pmToUPM; 1954 GrConfigConversionEffect::PMConversion upmToPM; 1955 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 1956 *pmToUPMValue = pmToUPM; 1957 *upmToPMValue = upmToPM; 1958} 1959} 1960 1961const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture, 1962 bool swapRAndB, 1963 const SkMatrix& matrix) { 1964 if (!fDidTestPMConversions) { 1965 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1966 fDidTestPMConversions = true; 1967 } 1968 GrConfigConversionEffect::PMConversion pmToUPM = 1969 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 1970 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 1971 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); 1972 } else { 1973 return NULL; 1974 } 1975} 1976 1977const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture, 1978 bool swapRAndB, 1979 const SkMatrix& matrix) { 1980 if (!fDidTestPMConversions) { 1981 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1982 fDidTestPMConversions = true; 1983 } 1984 GrConfigConversionEffect::PMConversion upmToPM = 1985 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 1986 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 1987 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); 1988 } else { 1989 return NULL; 1990 } 1991} 1992 1993////////////////////////////////////////////////////////////////////////////// 1994 1995void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const { 1996 if (maxTextures) { 1997 *maxTextures = fResourceCache->getMaxResourceCount(); 1998 } 1999 if (maxTextureBytes) { 2000 *maxTextureBytes = fResourceCache->getMaxResourceBytes(); 2001 } 2002} 2003 2004void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) { 2005 fResourceCache->setLimits(maxTextures, maxTextureBytes); 2006} 2007 2008void GrContext::addResourceToCache(const GrUniqueKey& key, GrGpuResource* resource) { 2009 ASSERT_OWNED_RESOURCE(resource); 2010 if (!resource) { 2011 return; 2012 } 2013 resource->resourcePriv().setUniqueKey(key); 2014} 2015 2016bool GrContext::isResourceInCache(const GrUniqueKey& key) const { 2017 return fResourceCache->hasUniqueKey(key); 2018} 2019 2020GrGpuResource* GrContext::findAndRefCachedResource(const GrUniqueKey& key) { 2021 return fResourceCache->findAndRefUniqueResource(key); 2022} 2023 2024////////////////////////////////////////////////////////////////////////////// 2025 2026void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { 2027 fGpu->addGpuTraceMarker(marker); 2028 if (fDrawBuffer) { 2029 fDrawBuffer->addGpuTraceMarker(marker); 2030 } 2031} 2032 2033void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { 2034 fGpu->removeGpuTraceMarker(marker); 2035 if (fDrawBuffer) { 2036 fDrawBuffer->removeGpuTraceMarker(marker); 2037 } 2038} 2039 2040