GrContext.cpp revision 0acd0d33fd69b6603fa22f88fa45c96aa0907d4d
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9#include "GrContext.h" 10 11#include "GrAARectRenderer.h" 12#include "GrAtlasTextContext.h" 13#include "GrBatch.h" 14#include "GrBatchFontCache.h" 15#include "GrBatchTarget.h" 16#include "GrBufferAllocPool.h" 17#include "GrDefaultGeoProcFactory.h" 18#include "GrGpuResource.h" 19#include "GrGpuResourcePriv.h" 20#include "GrDrawTargetCaps.h" 21#include "GrGpu.h" 22#include "GrIndexBuffer.h" 23#include "GrInOrderDrawBuffer.h" 24#include "GrLayerCache.h" 25#include "GrOvalRenderer.h" 26#include "GrPathRenderer.h" 27#include "GrPathUtils.h" 28#include "GrRenderTargetPriv.h" 29#include "GrResourceCache.h" 30#include "GrResourceProvider.h" 31#include "GrSoftwarePathRenderer.h" 32#include "GrStencilAndCoverTextContext.h" 33#include "GrStrokeInfo.h" 34#include "GrSurfacePriv.h" 35#include "GrTextBlobCache.h" 36#include "GrTexturePriv.h" 37#include "GrTraceMarker.h" 38#include "GrTracing.h" 39#include "GrVertices.h" 40#include "SkDashPathPriv.h" 41#include "SkConfig8888.h" 42#include "SkGr.h" 43#include "SkRRect.h" 44#include "SkStrokeRec.h" 45#include "SkTLazy.h" 46#include "SkTLS.h" 47#include "SkTraceEvent.h" 48 49#include "effects/GrConfigConversionEffect.h" 50#include "effects/GrDashingEffect.h" 51#include "effects/GrSingleTextureEffect.h" 52 53static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15; 54static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4; 55 56static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11; 57static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4; 58 59#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 60#define RETURN_IF_ABANDONED if (!fDrawBuffer) { return; } 61#define RETURN_FALSE_IF_ABANDONED if (!fDrawBuffer) { return false; } 62#define RETURN_NULL_IF_ABANDONED if (!fDrawBuffer) { return NULL; } 63 64class GrContext::AutoCheckFlush { 65public: 66 AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); } 67 68 ~AutoCheckFlush() { 69 if (fContext->fFlushToReduceCacheSize) { 70 fContext->flush(); 71 } 72 } 73 74private: 75 GrContext* fContext; 76}; 77 78GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, 79 const Options* opts) { 80 GrContext* context; 81 if (NULL == opts) { 82 context = SkNEW_ARGS(GrContext, (Options())); 83 } else { 84 context = SkNEW_ARGS(GrContext, (*opts)); 85 } 86 87 if (context->init(backend, backendContext)) { 88 return context; 89 } else { 90 context->unref(); 91 return NULL; 92 } 93} 94 95static int32_t gNextID = 1; 96static int32_t next_id() { 97 int32_t id; 98 do { 99 id = sk_atomic_inc(&gNextID); 100 } while (id == SK_InvalidGenID); 101 return id; 102} 103 104GrContext::GrContext(const Options& opts) : fOptions(opts), fUniqueID(next_id()) { 105 fGpu = NULL; 106 fResourceCache = NULL; 107 fResourceProvider = NULL; 108 fPathRendererChain = NULL; 109 fSoftwarePathRenderer = NULL; 110 fBatchFontCache = NULL; 111 fDrawBuffer = NULL; 112 fDrawBufferVBAllocPool = NULL; 113 fDrawBufferIBAllocPool = NULL; 114 fFlushToReduceCacheSize = false; 115 fAARectRenderer = NULL; 116 fOvalRenderer = NULL; 117 fMaxTextureSizeOverride = 1 << 20; 118} 119 120bool GrContext::init(GrBackend backend, GrBackendContext backendContext) { 121 SkASSERT(NULL == fGpu); 122 123 fGpu = GrGpu::Create(backend, backendContext, this); 124 if (NULL == fGpu) { 125 return false; 126 } 127 this->initCommon(); 128 return true; 129} 130 131void GrContext::initCommon() { 132 fResourceCache = SkNEW(GrResourceCache); 133 fResourceCache->setOverBudgetCallback(OverBudgetCB, this); 134 fResourceProvider = SkNEW_ARGS(GrResourceProvider, (fGpu, fResourceCache)); 135 136 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this))); 137 138 fAARectRenderer = SkNEW(GrAARectRenderer); 139 fOvalRenderer = SkNEW(GrOvalRenderer); 140 141 fDidTestPMConversions = false; 142 143 this->setupDrawBuffer(); 144 145 // GrBatchFontCache will eventually replace GrFontCache 146 fBatchFontCache = SkNEW_ARGS(GrBatchFontCache, (this)); 147 148 fTextBlobCache.reset(SkNEW_ARGS(GrTextBlobCache, (TextBlobCacheOverBudgetCB, this))); 149} 150 151GrContext::~GrContext() { 152 if (NULL == fGpu) { 153 return; 154 } 155 156 this->flush(); 157 158 for (int i = 0; i < fCleanUpData.count(); ++i) { 159 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 160 } 161 162 SkDELETE(fResourceProvider); 163 SkDELETE(fResourceCache); 164 SkDELETE(fBatchFontCache); 165 SkDELETE(fDrawBuffer); 166 SkDELETE(fDrawBufferVBAllocPool); 167 SkDELETE(fDrawBufferIBAllocPool); 168 169 fAARectRenderer->unref(); 170 fOvalRenderer->unref(); 171 172 fGpu->unref(); 173 SkSafeUnref(fPathRendererChain); 174 SkSafeUnref(fSoftwarePathRenderer); 175} 176 177void GrContext::abandonContext() { 178 fResourceProvider->abandon(); 179 // abandon first to so destructors 180 // don't try to free the resources in the API. 181 fResourceCache->abandonAll(); 182 183 fGpu->contextAbandoned(); 184 185 // a path renderer may be holding onto resources that 186 // are now unusable 187 SkSafeSetNull(fPathRendererChain); 188 SkSafeSetNull(fSoftwarePathRenderer); 189 190 delete fDrawBuffer; 191 fDrawBuffer = NULL; 192 193 delete fDrawBufferVBAllocPool; 194 fDrawBufferVBAllocPool = NULL; 195 196 delete fDrawBufferIBAllocPool; 197 fDrawBufferIBAllocPool = NULL; 198 199 fBatchFontCache->freeAll(); 200 fLayerCache->freeAll(); 201 fTextBlobCache->freeAll(); 202} 203 204void GrContext::resetContext(uint32_t state) { 205 fGpu->markContextDirty(state); 206} 207 208void GrContext::freeGpuResources() { 209 this->flush(); 210 211 if (fDrawBuffer) { 212 fDrawBuffer->purgeResources(); 213 } 214 215 fBatchFontCache->freeAll(); 216 fLayerCache->freeAll(); 217 // a path renderer may be holding onto resources 218 SkSafeSetNull(fPathRendererChain); 219 SkSafeSetNull(fSoftwarePathRenderer); 220 221 fResourceCache->purgeAllUnlocked(); 222} 223 224void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const { 225 if (resourceCount) { 226 *resourceCount = fResourceCache->getBudgetedResourceCount(); 227 } 228 if (resourceBytes) { 229 *resourceBytes = fResourceCache->getBudgetedResourceBytes(); 230 } 231} 232 233GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget, 234 SkGpuDevice* gpuDevice, 235 const SkDeviceProperties& 236 leakyProperties, 237 bool enableDistanceFieldFonts) { 238 if (fGpu->caps()->shaderCaps()->pathRenderingSupport() && renderTarget->isMultisampled()) { 239 GrStencilAttachment* sb = renderTarget->renderTargetPriv().attachStencilAttachment(); 240 if (sb) { 241 return GrStencilAndCoverTextContext::Create(this, gpuDevice, leakyProperties); 242 } 243 } 244 245 return GrAtlasTextContext::Create(this, gpuDevice, leakyProperties, enableDistanceFieldFonts); 246} 247 248//////////////////////////////////////////////////////////////////////////////// 249 250bool GrContext::isConfigTexturable(GrPixelConfig config) const { 251 return fGpu->caps()->isConfigTexturable(config); 252} 253 254bool GrContext::npotTextureTileSupport() const { 255 return fGpu->caps()->npotTextureTileSupport(); 256} 257 258void GrContext::OverBudgetCB(void* data) { 259 SkASSERT(data); 260 261 GrContext* context = reinterpret_cast<GrContext*>(data); 262 263 // Flush the InOrderDrawBuffer to possibly free up some textures 264 context->fFlushToReduceCacheSize = true; 265} 266 267void GrContext::TextBlobCacheOverBudgetCB(void* data) { 268 SkASSERT(data); 269 270 // Unlike the GrResourceCache, TextBlobs are drawn at the SkGpuDevice level, therefore they 271 // cannot use fFlushTorReduceCacheSize because it uses AutoCheckFlush. The solution is to move 272 // drawText calls to below the GrContext level, but this is not trivial because they call 273 // drawPath on SkGpuDevice 274 GrContext* context = reinterpret_cast<GrContext*>(data); 275 context->flush(); 276} 277 278int GrContext::getMaxTextureSize() const { 279 return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride); 280} 281 282int GrContext::getMaxRenderTargetSize() const { 283 return fGpu->caps()->maxRenderTargetSize(); 284} 285 286int GrContext::getMaxSampleCount() const { 287 return fGpu->caps()->maxSampleCount(); 288} 289 290/////////////////////////////////////////////////////////////////////////////// 291 292void GrContext::clear(const SkIRect* rect, 293 const GrColor color, 294 bool canIgnoreRect, 295 GrRenderTarget* renderTarget) { 296 RETURN_IF_ABANDONED 297 ASSERT_OWNED_RESOURCE(renderTarget); 298 SkASSERT(renderTarget); 299 300 AutoCheckFlush acf(this); 301 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this); 302 GrDrawTarget* target = this->prepareToDraw(); 303 if (NULL == target) { 304 return; 305 } 306 target->clear(rect, color, canIgnoreRect, renderTarget); 307} 308 309void GrContext::drawPaint(GrRenderTarget* rt, 310 const GrClip& clip, 311 const GrPaint& origPaint, 312 const SkMatrix& viewMatrix) { 313 RETURN_IF_ABANDONED 314 // set rect to be big enough to fill the space, but not super-huge, so we 315 // don't overflow fixed-point implementations 316 SkRect r; 317 r.setLTRB(0, 0, 318 SkIntToScalar(rt->width()), 319 SkIntToScalar(rt->height())); 320 SkTCopyOnFirstWrite<GrPaint> paint(origPaint); 321 322 // by definition this fills the entire clip, no need for AA 323 if (paint->isAntiAlias()) { 324 paint.writable()->setAntiAlias(false); 325 } 326 327 bool isPerspective = viewMatrix.hasPerspective(); 328 329 // We attempt to map r by the inverse matrix and draw that. mapRect will 330 // map the four corners and bound them with a new rect. This will not 331 // produce a correct result for some perspective matrices. 332 if (!isPerspective) { 333 SkMatrix inverse; 334 if (!viewMatrix.invert(&inverse)) { 335 SkDebugf("Could not invert matrix\n"); 336 return; 337 } 338 inverse.mapRect(&r); 339 this->drawRect(rt, clip, *paint, viewMatrix, r); 340 } else { 341 SkMatrix localMatrix; 342 if (!viewMatrix.invert(&localMatrix)) { 343 SkDebugf("Could not invert matrix\n"); 344 return; 345 } 346 347 AutoCheckFlush acf(this); 348 GrPipelineBuilder pipelineBuilder; 349 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, paint, &acf); 350 if (NULL == target) { 351 return; 352 } 353 354 GR_CREATE_TRACE_MARKER("GrContext::drawPaintWithPerspective", target); 355 target->drawRect(&pipelineBuilder, 356 paint->getColor(), 357 SkMatrix::I(), 358 r, 359 NULL, 360 &localMatrix); 361 } 362} 363 364//////////////////////////////////////////////////////////////////////////////// 365 366static inline bool is_irect(const SkRect& r) { 367 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) && 368 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom); 369} 370 371static bool apply_aa_to_rect(GrDrawTarget* target, 372 GrPipelineBuilder* pipelineBuilder, 373 SkRect* devBoundRect, 374 const SkRect& rect, 375 SkScalar strokeWidth, 376 const SkMatrix& combinedMatrix, 377 GrColor color) { 378 if (pipelineBuilder->getRenderTarget()->isMultisampled()) { 379 return false; 380 } 381 382#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 383 if (strokeWidth >= 0) { 384#endif 385 if (!combinedMatrix.preservesAxisAlignment()) { 386 return false; 387 } 388 389#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT) 390 } else { 391 if (!combinedMatrix.preservesRightAngles()) { 392 return false; 393 } 394 } 395#endif 396 397 combinedMatrix.mapRect(devBoundRect, rect); 398 if (!combinedMatrix.rectStaysRect()) { 399 return true; 400 } 401 402 if (strokeWidth < 0) { 403 return !is_irect(*devBoundRect); 404 } 405 406 return true; 407} 408 409static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) { 410 return point.fX >= rect.fLeft && point.fX <= rect.fRight && 411 point.fY >= rect.fTop && point.fY <= rect.fBottom; 412} 413 414class StrokeRectBatch : public GrBatch { 415public: 416 struct Geometry { 417 GrColor fColor; 418 SkMatrix fViewMatrix; 419 SkRect fRect; 420 SkScalar fStrokeWidth; 421 }; 422 423 static GrBatch* Create(const Geometry& geometry) { 424 return SkNEW_ARGS(StrokeRectBatch, (geometry)); 425 } 426 427 const char* name() const override { return "StrokeRectBatch"; } 428 429 void getInvariantOutputColor(GrInitInvariantOutput* out) const override { 430 // When this is called on a batch, there is only one geometry bundle 431 out->setKnownFourComponents(fGeoData[0].fColor); 432 } 433 434 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override { 435 out->setKnownSingleComponent(0xff); 436 } 437 438 void initBatchTracker(const GrPipelineInfo& init) override { 439 // Handle any color overrides 440 if (init.fColorIgnored) { 441 fGeoData[0].fColor = GrColor_ILLEGAL; 442 } else if (GrColor_ILLEGAL != init.fOverrideColor) { 443 fGeoData[0].fColor = init.fOverrideColor; 444 } 445 446 // setup batch properties 447 fBatch.fColorIgnored = init.fColorIgnored; 448 fBatch.fColor = fGeoData[0].fColor; 449 fBatch.fUsesLocalCoords = init.fUsesLocalCoords; 450 fBatch.fCoverageIgnored = init.fCoverageIgnored; 451 } 452 453 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) override { 454 SkAutoTUnref<const GrGeometryProcessor> gp( 455 GrDefaultGeoProcFactory::Create(GrDefaultGeoProcFactory::kPosition_GPType, 456 this->color(), 457 this->viewMatrix(), 458 SkMatrix::I())); 459 460 batchTarget->initDraw(gp, pipeline); 461 462 // TODO this is hacky, but the only way we have to initialize the GP is to use the 463 // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch 464 // everywhere we can remove this nastiness 465 GrPipelineInfo init; 466 init.fColorIgnored = fBatch.fColorIgnored; 467 init.fOverrideColor = GrColor_ILLEGAL; 468 init.fCoverageIgnored = fBatch.fCoverageIgnored; 469 init.fUsesLocalCoords = this->usesLocalCoords(); 470 gp->initBatchTracker(batchTarget->currentBatchTracker(), init); 471 472 size_t vertexStride = gp->getVertexStride(); 473 474 SkASSERT(vertexStride == sizeof(GrDefaultGeoProcFactory::PositionAttr)); 475 476 Geometry& args = fGeoData[0]; 477 478 int vertexCount = kVertsPerHairlineRect; 479 if (args.fStrokeWidth > 0) { 480 vertexCount = kVertsPerStrokeRect; 481 } 482 483 const GrVertexBuffer* vertexBuffer; 484 int firstVertex; 485 486 void* verts = batchTarget->vertexPool()->makeSpace(vertexStride, 487 vertexCount, 488 &vertexBuffer, 489 &firstVertex); 490 491 if (!verts) { 492 SkDebugf("Could not allocate vertices\n"); 493 return; 494 } 495 496 SkPoint* vertex = reinterpret_cast<SkPoint*>(verts); 497 498 GrPrimitiveType primType; 499 500 if (args.fStrokeWidth > 0) {; 501 primType = kTriangleStrip_GrPrimitiveType; 502 args.fRect.sort(); 503 this->setStrokeRectStrip(vertex, args.fRect, args.fStrokeWidth); 504 } else { 505 // hairline 506 primType = kLineStrip_GrPrimitiveType; 507 vertex[0].set(args.fRect.fLeft, args.fRect.fTop); 508 vertex[1].set(args.fRect.fRight, args.fRect.fTop); 509 vertex[2].set(args.fRect.fRight, args.fRect.fBottom); 510 vertex[3].set(args.fRect.fLeft, args.fRect.fBottom); 511 vertex[4].set(args.fRect.fLeft, args.fRect.fTop); 512 } 513 514 GrVertices vertices; 515 vertices.init(primType, vertexBuffer, firstVertex, vertexCount); 516 batchTarget->draw(vertices); 517 } 518 519 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; } 520 521private: 522 StrokeRectBatch(const Geometry& geometry) { 523 this->initClassID<StrokeRectBatch>(); 524 525 fBatch.fHairline = geometry.fStrokeWidth == 0; 526 527 fGeoData.push_back(geometry); 528 529 // setup bounds 530 fBounds = geometry.fRect; 531 SkScalar rad = SkScalarHalf(geometry.fStrokeWidth); 532 fBounds.outset(rad, rad); 533 geometry.fViewMatrix.mapRect(&fBounds); 534 } 535 536 /* create a triangle strip that strokes the specified rect. There are 8 537 unique vertices, but we repeat the last 2 to close up. Alternatively we 538 could use an indices array, and then only send 8 verts, but not sure that 539 would be faster. 540 */ 541 void setStrokeRectStrip(SkPoint verts[10], const SkRect& rect, SkScalar width) { 542 const SkScalar rad = SkScalarHalf(width); 543 // TODO we should be able to enable this assert, but we'd have to filter these draws 544 // this is a bug 545 //SkASSERT(rad < rect.width() / 2 && rad < rect.height() / 2); 546 547 verts[0].set(rect.fLeft + rad, rect.fTop + rad); 548 verts[1].set(rect.fLeft - rad, rect.fTop - rad); 549 verts[2].set(rect.fRight - rad, rect.fTop + rad); 550 verts[3].set(rect.fRight + rad, rect.fTop - rad); 551 verts[4].set(rect.fRight - rad, rect.fBottom - rad); 552 verts[5].set(rect.fRight + rad, rect.fBottom + rad); 553 verts[6].set(rect.fLeft + rad, rect.fBottom - rad); 554 verts[7].set(rect.fLeft - rad, rect.fBottom + rad); 555 verts[8] = verts[0]; 556 verts[9] = verts[1]; 557 } 558 559 560 GrColor color() const { return fBatch.fColor; } 561 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } 562 bool colorIgnored() const { return fBatch.fColorIgnored; } 563 const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; } 564 bool hairline() const { return fBatch.fHairline; } 565 566 bool onCombineIfPossible(GrBatch* t) override { 567 // StrokeRectBatch* that = t->cast<StrokeRectBatch>(); 568 569 // NonAA stroke rects cannot batch right now 570 // TODO make these batchable 571 return false; 572 } 573 574 struct BatchTracker { 575 GrColor fColor; 576 bool fUsesLocalCoords; 577 bool fColorIgnored; 578 bool fCoverageIgnored; 579 bool fHairline; 580 }; 581 582 const static int kVertsPerHairlineRect = 5; 583 const static int kVertsPerStrokeRect = 10; 584 585 BatchTracker fBatch; 586 SkSTArray<1, Geometry, true> fGeoData; 587}; 588 589void GrContext::drawRect(GrRenderTarget* rt, 590 const GrClip& clip, 591 const GrPaint& paint, 592 const SkMatrix& viewMatrix, 593 const SkRect& rect, 594 const GrStrokeInfo* strokeInfo) { 595 RETURN_IF_ABANDONED 596 if (strokeInfo && strokeInfo->isDashed()) { 597 SkPath path; 598 path.addRect(rect); 599 this->drawPath(rt, clip, paint, viewMatrix, path, *strokeInfo); 600 return; 601 } 602 603 AutoCheckFlush acf(this); 604 GrPipelineBuilder pipelineBuilder; 605 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 606 if (NULL == target) { 607 return; 608 } 609 610 GR_CREATE_TRACE_MARKER("GrContext::drawRect", target); 611 SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth(); 612 613 // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking 614 // cases where the RT is fully inside a stroke. 615 if (width < 0) { 616 SkRect rtRect; 617 pipelineBuilder.getRenderTarget()->getBoundsRect(&rtRect); 618 SkRect clipSpaceRTRect = rtRect; 619 bool checkClip = GrClip::kWideOpen_ClipType != clip.clipType(); 620 if (checkClip) { 621 clipSpaceRTRect.offset(SkIntToScalar(clip.origin().fX), 622 SkIntToScalar(clip.origin().fY)); 623 } 624 // Does the clip contain the entire RT? 625 if (!checkClip || clip.quickContains(clipSpaceRTRect)) { 626 SkMatrix invM; 627 if (!viewMatrix.invert(&invM)) { 628 return; 629 } 630 // Does the rect bound the RT? 631 SkPoint srcSpaceRTQuad[4]; 632 invM.mapRectToQuad(srcSpaceRTQuad, rtRect); 633 if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) && 634 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) && 635 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) && 636 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) { 637 // Will it blend? 638 GrColor clearColor; 639 if (paint.isOpaqueAndConstantColor(&clearColor)) { 640 target->clear(NULL, clearColor, true, rt); 641 return; 642 } 643 } 644 } 645 } 646 647 GrColor color = paint.getColor(); 648 SkRect devBoundRect; 649 bool needAA = paint.isAntiAlias() && !pipelineBuilder.getRenderTarget()->isMultisampled(); 650 bool doAA = needAA && apply_aa_to_rect(target, &pipelineBuilder, &devBoundRect, rect, width, 651 viewMatrix, color); 652 653 if (doAA) { 654 if (width >= 0) { 655 const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec(); 656 fAARectRenderer->strokeAARect(target, 657 &pipelineBuilder, 658 color, 659 viewMatrix, 660 rect, 661 devBoundRect, 662 strokeRec); 663 } else { 664 // filled AA rect 665 fAARectRenderer->fillAARect(target, 666 &pipelineBuilder, 667 color, 668 viewMatrix, 669 rect, 670 devBoundRect); 671 } 672 return; 673 } 674 675 if (width >= 0) { 676 StrokeRectBatch::Geometry geometry; 677 geometry.fViewMatrix = viewMatrix; 678 geometry.fColor = color; 679 geometry.fRect = rect; 680 geometry.fStrokeWidth = width; 681 682 SkAutoTUnref<GrBatch> batch(StrokeRectBatch::Create(geometry)); 683 684 // Depending on sub-pixel coordinates and the particular GPU, we may lose a corner of 685 // hairline rects. We jam all the vertices to pixel centers to avoid this, but not when MSAA 686 // is enabled because it can cause ugly artifacts. 687 pipelineBuilder.setState(GrPipelineBuilder::kSnapVerticesToPixelCenters_Flag, 688 0 == width && !rt->isMultisampled()); 689 target->drawBatch(&pipelineBuilder, batch); 690 } else { 691 // filled BW rect 692 target->drawSimpleRect(&pipelineBuilder, color, viewMatrix, rect); 693 } 694} 695 696void GrContext::drawNonAARectToRect(GrRenderTarget* rt, 697 const GrClip& clip, 698 const GrPaint& paint, 699 const SkMatrix& viewMatrix, 700 const SkRect& rectToDraw, 701 const SkRect& localRect, 702 const SkMatrix* localMatrix) { 703 RETURN_IF_ABANDONED 704 AutoCheckFlush acf(this); 705 GrPipelineBuilder pipelineBuilder; 706 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 707 if (NULL == target) { 708 return; 709 } 710 711 GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target); 712 713 target->drawRect(&pipelineBuilder, 714 paint.getColor(), 715 viewMatrix, 716 rectToDraw, 717 &localRect, 718 localMatrix); 719} 720 721static const GrGeometryProcessor* set_vertex_attributes(bool hasLocalCoords, 722 bool hasColors, 723 int* colorOffset, 724 int* texOffset, 725 GrColor color, 726 const SkMatrix& viewMatrix) { 727 *texOffset = -1; 728 *colorOffset = -1; 729 uint32_t flags = GrDefaultGeoProcFactory::kPosition_GPType; 730 if (hasLocalCoords && hasColors) { 731 *colorOffset = sizeof(SkPoint); 732 *texOffset = sizeof(SkPoint) + sizeof(GrColor); 733 flags |= GrDefaultGeoProcFactory::kColor_GPType | 734 GrDefaultGeoProcFactory::kLocalCoord_GPType; 735 } else if (hasLocalCoords) { 736 *texOffset = sizeof(SkPoint); 737 flags |= GrDefaultGeoProcFactory::kLocalCoord_GPType; 738 } else if (hasColors) { 739 *colorOffset = sizeof(SkPoint); 740 flags |= GrDefaultGeoProcFactory::kColor_GPType; 741 } 742 return GrDefaultGeoProcFactory::Create(flags, color, viewMatrix, SkMatrix::I()); 743} 744 745class DrawVerticesBatch : public GrBatch { 746public: 747 struct Geometry { 748 GrColor fColor; 749 SkTDArray<SkPoint> fPositions; 750 SkTDArray<uint16_t> fIndices; 751 SkTDArray<GrColor> fColors; 752 SkTDArray<SkPoint> fLocalCoords; 753 }; 754 755 static GrBatch* Create(const Geometry& geometry, GrPrimitiveType primitiveType, 756 const SkMatrix& viewMatrix, 757 const SkPoint* positions, int vertexCount, 758 const uint16_t* indices, int indexCount, 759 const GrColor* colors, const SkPoint* localCoords, 760 const SkRect& bounds) { 761 return SkNEW_ARGS(DrawVerticesBatch, (geometry, primitiveType, viewMatrix, positions, 762 vertexCount, indices, indexCount, colors, 763 localCoords, bounds)); 764 } 765 766 const char* name() const override { return "DrawVerticesBatch"; } 767 768 void getInvariantOutputColor(GrInitInvariantOutput* out) const override { 769 // When this is called on a batch, there is only one geometry bundle 770 if (this->hasColors()) { 771 out->setUnknownFourComponents(); 772 } else { 773 out->setKnownFourComponents(fGeoData[0].fColor); 774 } 775 } 776 777 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override { 778 out->setKnownSingleComponent(0xff); 779 } 780 781 void initBatchTracker(const GrPipelineInfo& init) override { 782 // Handle any color overrides 783 if (init.fColorIgnored) { 784 fGeoData[0].fColor = GrColor_ILLEGAL; 785 } else if (GrColor_ILLEGAL != init.fOverrideColor) { 786 fGeoData[0].fColor = init.fOverrideColor; 787 } 788 789 // setup batch properties 790 fBatch.fColorIgnored = init.fColorIgnored; 791 fBatch.fColor = fGeoData[0].fColor; 792 fBatch.fUsesLocalCoords = init.fUsesLocalCoords; 793 fBatch.fCoverageIgnored = init.fCoverageIgnored; 794 } 795 796 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) override { 797 int colorOffset = -1, texOffset = -1; 798 SkAutoTUnref<const GrGeometryProcessor> gp( 799 set_vertex_attributes(this->hasLocalCoords(), this->hasColors(), &colorOffset, 800 &texOffset, this->color(), this->viewMatrix())); 801 802 batchTarget->initDraw(gp, pipeline); 803 804 // TODO this is hacky, but the only way we have to initialize the GP is to use the 805 // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch 806 // everywhere we can remove this nastiness 807 GrPipelineInfo init; 808 init.fColorIgnored = fBatch.fColorIgnored; 809 init.fOverrideColor = GrColor_ILLEGAL; 810 init.fCoverageIgnored = fBatch.fCoverageIgnored; 811 init.fUsesLocalCoords = this->usesLocalCoords(); 812 gp->initBatchTracker(batchTarget->currentBatchTracker(), init); 813 814 size_t vertexStride = gp->getVertexStride(); 815 816 SkASSERT(vertexStride == sizeof(SkPoint) + (this->hasLocalCoords() ? sizeof(SkPoint) : 0) 817 + (this->hasColors() ? sizeof(GrColor) : 0)); 818 819 int instanceCount = fGeoData.count(); 820 821 const GrVertexBuffer* vertexBuffer; 822 int firstVertex; 823 824 void* verts = batchTarget->vertexPool()->makeSpace(vertexStride, 825 this->vertexCount(), 826 &vertexBuffer, 827 &firstVertex); 828 829 if (!verts) { 830 SkDebugf("Could not allocate vertices\n"); 831 return; 832 } 833 834 const GrIndexBuffer* indexBuffer = NULL; 835 int firstIndex = 0; 836 837 void* indices = NULL; 838 if (this->hasIndices()) { 839 indices = batchTarget->indexPool()->makeSpace(this->indexCount(), 840 &indexBuffer, 841 &firstIndex); 842 843 if (!indices) { 844 SkDebugf("Could not allocate indices\n"); 845 return; 846 } 847 } 848 849 int indexOffset = 0; 850 int vertexOffset = 0; 851 for (int i = 0; i < instanceCount; i++) { 852 const Geometry& args = fGeoData[i]; 853 854 // TODO we can actually cache this interleaved and then just memcopy 855 if (this->hasIndices()) { 856 for (int j = 0; j < args.fIndices.count(); ++j, ++indexOffset) { 857 *((uint16_t*)indices + indexOffset) = args.fIndices[j] + vertexOffset; 858 } 859 } 860 861 for (int j = 0; j < args.fPositions.count(); ++j) { 862 *((SkPoint*)verts) = args.fPositions[j]; 863 if (this->hasColors()) { 864 *(GrColor*)((intptr_t)verts + colorOffset) = args.fColors[j]; 865 } 866 if (this->hasLocalCoords()) { 867 *(SkPoint*)((intptr_t)verts + texOffset) = args.fLocalCoords[j]; 868 } 869 verts = (void*)((intptr_t)verts + vertexStride); 870 vertexOffset++; 871 } 872 } 873 874 GrVertices vertices; 875 if (this->hasIndices()) { 876 vertices.initIndexed(this->primitiveType(), vertexBuffer, indexBuffer, firstVertex, 877 firstIndex, this->vertexCount(), this->indexCount()); 878 879 } else { 880 vertices.init(this->primitiveType(), vertexBuffer, firstVertex, this->vertexCount()); 881 } 882 batchTarget->draw(vertices); 883 } 884 885 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; } 886 887private: 888 DrawVerticesBatch(const Geometry& geometry, GrPrimitiveType primitiveType, 889 const SkMatrix& viewMatrix, 890 const SkPoint* positions, int vertexCount, 891 const uint16_t* indices, int indexCount, 892 const GrColor* colors, const SkPoint* localCoords, const SkRect& bounds) { 893 this->initClassID<DrawVerticesBatch>(); 894 SkASSERT(positions); 895 896 fBatch.fViewMatrix = viewMatrix; 897 Geometry& installedGeo = fGeoData.push_back(geometry); 898 899 installedGeo.fPositions.append(vertexCount, positions); 900 if (indices) { 901 installedGeo.fIndices.append(indexCount, indices); 902 fBatch.fHasIndices = true; 903 } else { 904 fBatch.fHasIndices = false; 905 } 906 907 if (colors) { 908 installedGeo.fColors.append(vertexCount, colors); 909 fBatch.fHasColors = true; 910 } else { 911 fBatch.fHasColors = false; 912 } 913 914 if (localCoords) { 915 installedGeo.fLocalCoords.append(vertexCount, localCoords); 916 fBatch.fHasLocalCoords = true; 917 } else { 918 fBatch.fHasLocalCoords = false; 919 } 920 fBatch.fVertexCount = vertexCount; 921 fBatch.fIndexCount = indexCount; 922 fBatch.fPrimitiveType = primitiveType; 923 924 this->setBounds(bounds); 925 } 926 927 GrPrimitiveType primitiveType() const { return fBatch.fPrimitiveType; } 928 bool batchablePrimitiveType() const { 929 return kTriangles_GrPrimitiveType == fBatch.fPrimitiveType || 930 kLines_GrPrimitiveType == fBatch.fPrimitiveType || 931 kPoints_GrPrimitiveType == fBatch.fPrimitiveType; 932 } 933 GrColor color() const { return fBatch.fColor; } 934 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; } 935 bool colorIgnored() const { return fBatch.fColorIgnored; } 936 const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; } 937 bool hasColors() const { return fBatch.fHasColors; } 938 bool hasIndices() const { return fBatch.fHasIndices; } 939 bool hasLocalCoords() const { return fBatch.fHasLocalCoords; } 940 int vertexCount() const { return fBatch.fVertexCount; } 941 int indexCount() const { return fBatch.fIndexCount; } 942 943 bool onCombineIfPossible(GrBatch* t) override { 944 DrawVerticesBatch* that = t->cast<DrawVerticesBatch>(); 945 946 if (!this->batchablePrimitiveType() || this->primitiveType() != that->primitiveType()) { 947 return false; 948 } 949 950 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords()); 951 952 // We currently use a uniform viewmatrix for this batch 953 if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) { 954 return false; 955 } 956 957 if (this->hasColors() != that->hasColors()) { 958 return false; 959 } 960 961 if (this->hasIndices() != that->hasIndices()) { 962 return false; 963 } 964 965 if (this->hasLocalCoords() != that->hasLocalCoords()) { 966 return false; 967 } 968 969 if (!this->hasColors() && this->color() != that->color()) { 970 return false; 971 } 972 973 if (this->color() != that->color()) { 974 fBatch.fColor = GrColor_ILLEGAL; 975 } 976 fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin()); 977 fBatch.fVertexCount += that->vertexCount(); 978 fBatch.fIndexCount += that->indexCount(); 979 980 this->joinBounds(that->bounds()); 981 return true; 982 } 983 984 struct BatchTracker { 985 GrPrimitiveType fPrimitiveType; 986 SkMatrix fViewMatrix; 987 GrColor fColor; 988 bool fUsesLocalCoords; 989 bool fColorIgnored; 990 bool fCoverageIgnored; 991 bool fHasColors; 992 bool fHasIndices; 993 bool fHasLocalCoords; 994 int fVertexCount; 995 int fIndexCount; 996 }; 997 998 BatchTracker fBatch; 999 SkSTArray<1, Geometry, true> fGeoData; 1000}; 1001 1002void GrContext::drawVertices(GrRenderTarget* rt, 1003 const GrClip& clip, 1004 const GrPaint& paint, 1005 const SkMatrix& viewMatrix, 1006 GrPrimitiveType primitiveType, 1007 int vertexCount, 1008 const SkPoint positions[], 1009 const SkPoint texCoords[], 1010 const GrColor colors[], 1011 const uint16_t indices[], 1012 int indexCount) { 1013 RETURN_IF_ABANDONED 1014 AutoCheckFlush acf(this); 1015 GrPipelineBuilder pipelineBuilder; 1016 1017 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1018 if (NULL == target) { 1019 return; 1020 } 1021 1022 GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target); 1023 1024 // TODO clients should give us bounds 1025 SkRect bounds; 1026 if (!bounds.setBoundsCheck(positions, vertexCount)) { 1027 SkDebugf("drawVertices call empty bounds\n"); 1028 return; 1029 } 1030 1031 viewMatrix.mapRect(&bounds); 1032 1033 DrawVerticesBatch::Geometry geometry; 1034 geometry.fColor = paint.getColor(); 1035 SkAutoTUnref<GrBatch> batch(DrawVerticesBatch::Create(geometry, primitiveType, viewMatrix, 1036 positions, vertexCount, indices, 1037 indexCount, colors, texCoords, 1038 bounds)); 1039 1040 target->drawBatch(&pipelineBuilder, batch); 1041} 1042 1043/////////////////////////////////////////////////////////////////////////////// 1044 1045void GrContext::drawRRect(GrRenderTarget*rt, 1046 const GrClip& clip, 1047 const GrPaint& paint, 1048 const SkMatrix& viewMatrix, 1049 const SkRRect& rrect, 1050 const GrStrokeInfo& strokeInfo) { 1051 RETURN_IF_ABANDONED 1052 if (rrect.isEmpty()) { 1053 return; 1054 } 1055 1056 if (strokeInfo.isDashed()) { 1057 SkPath path; 1058 path.addRRect(rrect); 1059 this->drawPath(rt, clip, paint, viewMatrix, path, strokeInfo); 1060 return; 1061 } 1062 1063 AutoCheckFlush acf(this); 1064 GrPipelineBuilder pipelineBuilder; 1065 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1066 if (NULL == target) { 1067 return; 1068 } 1069 1070 GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target); 1071 1072 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1073 1074 GrColor color = paint.getColor(); 1075 if (!fOvalRenderer->drawRRect(target, 1076 &pipelineBuilder, 1077 color, 1078 viewMatrix, 1079 paint.isAntiAlias(), 1080 rrect, 1081 strokeRec)) { 1082 SkPath path; 1083 path.addRRect(rrect); 1084 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1085 path, strokeInfo); 1086 } 1087} 1088 1089/////////////////////////////////////////////////////////////////////////////// 1090 1091void GrContext::drawDRRect(GrRenderTarget* rt, 1092 const GrClip& clip, 1093 const GrPaint& paint, 1094 const SkMatrix& viewMatrix, 1095 const SkRRect& outer, 1096 const SkRRect& inner) { 1097 RETURN_IF_ABANDONED 1098 if (outer.isEmpty()) { 1099 return; 1100 } 1101 1102 AutoCheckFlush acf(this); 1103 GrPipelineBuilder pipelineBuilder; 1104 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1105 1106 GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target); 1107 1108 GrColor color = paint.getColor(); 1109 if (!fOvalRenderer->drawDRRect(target, 1110 &pipelineBuilder, 1111 color, 1112 viewMatrix, 1113 paint.isAntiAlias(), 1114 outer, 1115 inner)) { 1116 SkPath path; 1117 path.addRRect(inner); 1118 path.addRRect(outer); 1119 path.setFillType(SkPath::kEvenOdd_FillType); 1120 1121 GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle); 1122 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1123 path, fillRec); 1124 } 1125} 1126 1127/////////////////////////////////////////////////////////////////////////////// 1128 1129void GrContext::drawOval(GrRenderTarget* rt, 1130 const GrClip& clip, 1131 const GrPaint& paint, 1132 const SkMatrix& viewMatrix, 1133 const SkRect& oval, 1134 const GrStrokeInfo& strokeInfo) { 1135 RETURN_IF_ABANDONED 1136 if (oval.isEmpty()) { 1137 return; 1138 } 1139 1140 if (strokeInfo.isDashed()) { 1141 SkPath path; 1142 path.addOval(oval); 1143 this->drawPath(rt, clip, paint, viewMatrix, path, strokeInfo); 1144 return; 1145 } 1146 1147 AutoCheckFlush acf(this); 1148 GrPipelineBuilder pipelineBuilder; 1149 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1150 if (NULL == target) { 1151 return; 1152 } 1153 1154 GR_CREATE_TRACE_MARKER("GrContext::drawOval", target); 1155 1156 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1157 1158 GrColor color = paint.getColor(); 1159 if (!fOvalRenderer->drawOval(target, 1160 &pipelineBuilder, 1161 color, 1162 viewMatrix, 1163 paint.isAntiAlias(), 1164 oval, 1165 strokeRec)) { 1166 SkPath path; 1167 path.addOval(oval); 1168 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1169 path, strokeInfo); 1170 } 1171} 1172 1173// Can 'path' be drawn as a pair of filled nested rectangles? 1174static bool is_nested_rects(GrDrawTarget* target, 1175 GrPipelineBuilder* pipelineBuilder, 1176 GrColor color, 1177 const SkMatrix& viewMatrix, 1178 const SkPath& path, 1179 const SkStrokeRec& stroke, 1180 SkRect rects[2]) { 1181 SkASSERT(stroke.isFillStyle()); 1182 1183 if (path.isInverseFillType()) { 1184 return false; 1185 } 1186 1187 // TODO: this restriction could be lifted if we were willing to apply 1188 // the matrix to all the points individually rather than just to the rect 1189 if (!viewMatrix.preservesAxisAlignment()) { 1190 return false; 1191 } 1192 1193 SkPath::Direction dirs[2]; 1194 if (!path.isNestedFillRects(rects, dirs)) { 1195 return false; 1196 } 1197 1198 if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) { 1199 // The two rects need to be wound opposite to each other 1200 return false; 1201 } 1202 1203 // Right now, nested rects where the margin is not the same width 1204 // all around do not render correctly 1205 const SkScalar* outer = rects[0].asScalars(); 1206 const SkScalar* inner = rects[1].asScalars(); 1207 1208 bool allEq = true; 1209 1210 SkScalar margin = SkScalarAbs(outer[0] - inner[0]); 1211 bool allGoE1 = margin >= SK_Scalar1; 1212 1213 for (int i = 1; i < 4; ++i) { 1214 SkScalar temp = SkScalarAbs(outer[i] - inner[i]); 1215 if (temp < SK_Scalar1) { 1216 allGoE1 = false; 1217 } 1218 if (!SkScalarNearlyEqual(margin, temp)) { 1219 allEq = false; 1220 } 1221 } 1222 1223 return allEq || allGoE1; 1224} 1225 1226void GrContext::drawPath(GrRenderTarget* rt, 1227 const GrClip& clip, 1228 const GrPaint& paint, 1229 const SkMatrix& viewMatrix, 1230 const SkPath& path, 1231 const GrStrokeInfo& strokeInfo) { 1232 RETURN_IF_ABANDONED 1233 if (path.isEmpty()) { 1234 if (path.isInverseFillType()) { 1235 this->drawPaint(rt, clip, paint, viewMatrix); 1236 } 1237 return; 1238 } 1239 1240 GrColor color = paint.getColor(); 1241 1242 // Note that internalDrawPath may sw-rasterize the path into a scratch texture. 1243 // Scratch textures can be recycled after they are returned to the texture 1244 // cache. This presents a potential hazard for buffered drawing. However, 1245 // the writePixels that uploads to the scratch will perform a flush so we're 1246 // OK. 1247 AutoCheckFlush acf(this); 1248 GrPipelineBuilder pipelineBuilder; 1249 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf); 1250 if (NULL == target) { 1251 return; 1252 } 1253 1254 GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex()); 1255 1256 if (!strokeInfo.isDashed()) { 1257 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec(); 1258 bool useCoverageAA = paint.isAntiAlias() && 1259 !pipelineBuilder.getRenderTarget()->isMultisampled(); 1260 1261 if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) { 1262 // Concave AA paths are expensive - try to avoid them for special cases 1263 SkRect rects[2]; 1264 1265 if (is_nested_rects(target, &pipelineBuilder, color, viewMatrix, path, strokeRec, 1266 rects)) { 1267 fAARectRenderer->fillAANestedRects(target, &pipelineBuilder, color, viewMatrix, 1268 rects); 1269 return; 1270 } 1271 } 1272 SkRect ovalRect; 1273 bool isOval = path.isOval(&ovalRect); 1274 1275 if (isOval && !path.isInverseFillType()) { 1276 if (fOvalRenderer->drawOval(target, 1277 &pipelineBuilder, 1278 color, 1279 viewMatrix, 1280 paint.isAntiAlias(), 1281 ovalRect, 1282 strokeRec)) { 1283 return; 1284 } 1285 } 1286 } 1287 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(), 1288 path, strokeInfo); 1289} 1290 1291void GrContext::internalDrawPath(GrDrawTarget* target, 1292 GrPipelineBuilder* pipelineBuilder, 1293 const SkMatrix& viewMatrix, 1294 GrColor color, 1295 bool useAA, 1296 const SkPath& path, 1297 const GrStrokeInfo& strokeInfo) { 1298 RETURN_IF_ABANDONED 1299 SkASSERT(!path.isEmpty()); 1300 1301 GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target); 1302 1303 1304 // An Assumption here is that path renderer would use some form of tweaking 1305 // the src color (either the input alpha or in the frag shader) to implement 1306 // aa. If we have some future driver-mojo path AA that can do the right 1307 // thing WRT to the blend then we'll need some query on the PR. 1308 bool useCoverageAA = useAA && 1309 !pipelineBuilder->getRenderTarget()->isMultisampled(); 1310 1311 1312 GrPathRendererChain::DrawType type = 1313 useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType : 1314 GrPathRendererChain::kColor_DrawType; 1315 1316 const SkPath* pathPtr = &path; 1317 SkTLazy<SkPath> tmpPath; 1318 const GrStrokeInfo* strokeInfoPtr = &strokeInfo; 1319 1320 // Try a 1st time without stroking the path and without allowing the SW renderer 1321 GrPathRenderer* pr = this->getPathRenderer(target, pipelineBuilder, viewMatrix, *pathPtr, 1322 *strokeInfoPtr, false, type); 1323 1324 GrStrokeInfo dashlessStrokeInfo(strokeInfo, false); 1325 if (NULL == pr && strokeInfo.isDashed()) { 1326 // It didn't work above, so try again with dashed stroke converted to a dashless stroke. 1327 if (!strokeInfo.applyDash(tmpPath.init(), &dashlessStrokeInfo, *pathPtr)) { 1328 return; 1329 } 1330 pathPtr = tmpPath.get(); 1331 if (pathPtr->isEmpty()) { 1332 return; 1333 } 1334 strokeInfoPtr = &dashlessStrokeInfo; 1335 pr = this->getPathRenderer(target, pipelineBuilder, viewMatrix, *pathPtr, *strokeInfoPtr, 1336 false, type); 1337 } 1338 1339 if (NULL == pr) { 1340 if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*strokeInfoPtr, viewMatrix, NULL) && 1341 !strokeInfoPtr->isFillStyle()) { 1342 // It didn't work above, so try again with stroke converted to a fill. 1343 if (!tmpPath.isValid()) { 1344 tmpPath.init(); 1345 } 1346 SkStrokeRec* strokeRec = dashlessStrokeInfo.getStrokeRecPtr(); 1347 strokeRec->setResScale(SkScalarAbs(viewMatrix.getMaxScale())); 1348 if (!strokeRec->applyToPath(tmpPath.get(), *pathPtr)) { 1349 return; 1350 } 1351 pathPtr = tmpPath.get(); 1352 if (pathPtr->isEmpty()) { 1353 return; 1354 } 1355 strokeRec->setFillStyle(); 1356 strokeInfoPtr = &dashlessStrokeInfo; 1357 } 1358 1359 // This time, allow SW renderer 1360 pr = this->getPathRenderer(target, pipelineBuilder, viewMatrix, *pathPtr, *strokeInfoPtr, 1361 true, type); 1362 } 1363 1364 if (NULL == pr) { 1365#ifdef SK_DEBUG 1366 SkDebugf("Unable to find path renderer compatible with path.\n"); 1367#endif 1368 return; 1369 } 1370 1371 pr->drawPath(target, pipelineBuilder, color, viewMatrix, *pathPtr, *strokeInfoPtr, useCoverageAA); 1372} 1373 1374//////////////////////////////////////////////////////////////////////////////// 1375 1376void GrContext::flush(int flagsBitfield) { 1377 if (NULL == fDrawBuffer) { 1378 return; 1379 } 1380 1381 if (kDiscard_FlushBit & flagsBitfield) { 1382 fDrawBuffer->reset(); 1383 } else { 1384 fDrawBuffer->flush(); 1385 } 1386 fResourceCache->notifyFlushOccurred(); 1387 fFlushToReduceCacheSize = false; 1388} 1389 1390bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes, 1391 const void* inPixels, size_t outRowBytes, void* outPixels) { 1392 SkSrcPixelInfo srcPI; 1393 if (!GrPixelConfig2ColorAndProfileType(srcConfig, &srcPI.fColorType, NULL)) { 1394 return false; 1395 } 1396 srcPI.fAlphaType = kUnpremul_SkAlphaType; 1397 srcPI.fPixels = inPixels; 1398 srcPI.fRowBytes = inRowBytes; 1399 1400 SkDstPixelInfo dstPI; 1401 dstPI.fColorType = srcPI.fColorType; 1402 dstPI.fAlphaType = kPremul_SkAlphaType; 1403 dstPI.fPixels = outPixels; 1404 dstPI.fRowBytes = outRowBytes; 1405 1406 return srcPI.convertPixelsTo(&dstPI, width, height); 1407} 1408 1409bool GrContext::writeSurfacePixels(GrSurface* surface, 1410 int left, int top, int width, int height, 1411 GrPixelConfig srcConfig, const void* buffer, size_t rowBytes, 1412 uint32_t pixelOpsFlags) { 1413 RETURN_FALSE_IF_ABANDONED 1414 { 1415 GrTexture* texture = NULL; 1416 if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) && 1417 fGpu->canWriteTexturePixels(texture, srcConfig)) { 1418 1419 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && 1420 surface->surfacePriv().hasPendingIO()) { 1421 this->flush(); 1422 } 1423 return fGpu->writeTexturePixels(texture, left, top, width, height, 1424 srcConfig, buffer, rowBytes); 1425 // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the 1426 // upload is already flushed. 1427 } 1428 } 1429 1430 // If we didn't do a direct texture write then we upload the pixels to a texture and draw. 1431 GrRenderTarget* renderTarget = surface->asRenderTarget(); 1432 if (NULL == renderTarget) { 1433 return false; 1434 } 1435 1436 // We ignore the preferred config unless it is a R/B swap of the src config. In that case 1437 // we will upload the original src data to a scratch texture but we will spoof it as the swapped 1438 // config. This scratch will then have R and B swapped. We correct for this by swapping again 1439 // when drawing the scratch to the dst using a conversion effect. 1440 bool swapRAndB = false; 1441 GrPixelConfig writeConfig = srcConfig; 1442 if (GrPixelConfigSwapRAndB(srcConfig) == 1443 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) { 1444 writeConfig = GrPixelConfigSwapRAndB(srcConfig); 1445 swapRAndB = true; 1446 } 1447 1448 GrSurfaceDesc desc; 1449 desc.fWidth = width; 1450 desc.fHeight = height; 1451 desc.fConfig = writeConfig; 1452 SkAutoTUnref<GrTexture> texture(this->textureProvider()->refScratchTexture(desc, 1453 GrTextureProvider::kApprox_ScratchTexMatch)); 1454 if (!texture) { 1455 return false; 1456 } 1457 1458 SkAutoTUnref<const GrFragmentProcessor> fp; 1459 SkMatrix textureMatrix; 1460 textureMatrix.setIDiv(texture->width(), texture->height()); 1461 1462 // allocate a tmp buffer and sw convert the pixels to premul 1463 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 1464 1465 if (kUnpremul_PixelOpsFlag & pixelOpsFlags) { 1466 if (!GrPixelConfigIs8888(srcConfig)) { 1467 return false; 1468 } 1469 fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); 1470 // handle the unpremul step on the CPU if we couldn't create an effect to do it. 1471 if (NULL == fp) { 1472 size_t tmpRowBytes = 4 * width; 1473 tmpPixels.reset(width * height); 1474 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes, 1475 tmpPixels.get())) { 1476 return false; 1477 } 1478 rowBytes = tmpRowBytes; 1479 buffer = tmpPixels.get(); 1480 } 1481 } 1482 if (NULL == fp) { 1483 fp.reset(GrConfigConversionEffect::Create(texture, 1484 swapRAndB, 1485 GrConfigConversionEffect::kNone_PMConversion, 1486 textureMatrix)); 1487 } 1488 1489 // Even if the client told us not to flush, we still flush here. The client may have known that 1490 // writes to the original surface caused no data hazards, but they can't know that the scratch 1491 // we just got is safe. 1492 if (texture->surfacePriv().hasPendingIO()) { 1493 this->flush(); 1494 } 1495 if (!fGpu->writeTexturePixels(texture, 0, 0, width, height, 1496 writeConfig, buffer, rowBytes)) { 1497 return false; 1498 } 1499 1500 SkMatrix matrix; 1501 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 1502 1503 // This function can be called in the midst of drawing another object (e.g., when uploading a 1504 // SW-rasterized clip while issuing a draw). So we push the current geometry state before 1505 // drawing a rect to the render target. 1506 // The bracket ensures we pop the stack if we wind up flushing below. 1507 { 1508 GrDrawTarget* drawTarget = this->prepareToDraw(); 1509 if (!drawTarget) { 1510 return false; 1511 } 1512 1513 GrPipelineBuilder pipelineBuilder; 1514 pipelineBuilder.addColorProcessor(fp); 1515 pipelineBuilder.setRenderTarget(renderTarget); 1516 drawTarget->drawSimpleRect(&pipelineBuilder, 1517 GrColor_WHITE, 1518 matrix, 1519 SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height))); 1520 } 1521 1522 if (kFlushWrites_PixelOp & pixelOpsFlags) { 1523 this->flushSurfaceWrites(surface); 1524 } 1525 1526 return true; 1527} 1528 1529// toggles between RGBA and BGRA 1530static SkColorType toggle_colortype32(SkColorType ct) { 1531 if (kRGBA_8888_SkColorType == ct) { 1532 return kBGRA_8888_SkColorType; 1533 } else { 1534 SkASSERT(kBGRA_8888_SkColorType == ct); 1535 return kRGBA_8888_SkColorType; 1536 } 1537} 1538 1539bool GrContext::readRenderTargetPixels(GrRenderTarget* target, 1540 int left, int top, int width, int height, 1541 GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 1542 uint32_t flags) { 1543 RETURN_FALSE_IF_ABANDONED 1544 ASSERT_OWNED_RESOURCE(target); 1545 SkASSERT(target); 1546 1547 if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) { 1548 this->flush(); 1549 } 1550 1551 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. 1552 1553 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll 1554 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. 1555 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, 1556 width, height, dstConfig, 1557 rowBytes); 1558 // We ignore the preferred config if it is different than our config unless it is an R/B swap. 1559 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped 1560 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during 1561 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from 1562 // dstConfig. 1563 GrPixelConfig readConfig = dstConfig; 1564 bool swapRAndB = false; 1565 if (GrPixelConfigSwapRAndB(dstConfig) == 1566 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { 1567 readConfig = GrPixelConfigSwapRAndB(readConfig); 1568 swapRAndB = true; 1569 } 1570 1571 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 1572 1573 if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 1574 // The unpremul flag is only allowed for these two configs. 1575 return false; 1576 } 1577 1578 SkAutoTUnref<GrTexture> tempTexture; 1579 1580 // If the src is a texture and we would have to do conversions after read pixels, we instead 1581 // do the conversions by drawing the src to a scratch texture. If we handle any of the 1582 // conversions in the draw we set the corresponding bool to false so that we don't reapply it 1583 // on the read back pixels. 1584 GrTexture* src = target->asTexture(); 1585 if (src && (swapRAndB || unpremul || flipY)) { 1586 // Make the scratch a render so we can read its pixels. 1587 GrSurfaceDesc desc; 1588 desc.fFlags = kRenderTarget_GrSurfaceFlag; 1589 desc.fWidth = width; 1590 desc.fHeight = height; 1591 desc.fConfig = readConfig; 1592 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 1593 1594 // When a full read back is faster than a partial we could always make the scratch exactly 1595 // match the passed rect. However, if we see many different size rectangles we will trash 1596 // our texture cache and pay the cost of creating and destroying many textures. So, we only 1597 // request an exact match when the caller is reading an entire RT. 1598 GrTextureProvider::ScratchTexMatch match = GrTextureProvider::kApprox_ScratchTexMatch; 1599 if (0 == left && 1600 0 == top && 1601 target->width() == width && 1602 target->height() == height && 1603 fGpu->fullReadPixelsIsFasterThanPartial()) { 1604 match = GrTextureProvider::kExact_ScratchTexMatch; 1605 } 1606 tempTexture.reset(this->textureProvider()->refScratchTexture(desc, match)); 1607 if (tempTexture) { 1608 // compute a matrix to perform the draw 1609 SkMatrix textureMatrix; 1610 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 1611 textureMatrix.postIDiv(src->width(), src->height()); 1612 1613 SkAutoTUnref<const GrFragmentProcessor> fp; 1614 if (unpremul) { 1615 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); 1616 if (fp) { 1617 unpremul = false; // we no longer need to do this on CPU after the read back. 1618 } 1619 } 1620 // If we failed to create a PM->UPM effect and have no other conversions to perform then 1621 // there is no longer any point to using the scratch. 1622 if (fp || flipY || swapRAndB) { 1623 if (!fp) { 1624 fp.reset(GrConfigConversionEffect::Create( 1625 src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion, 1626 textureMatrix)); 1627 } 1628 swapRAndB = false; // we will handle the swap in the draw. 1629 1630 // We protect the existing geometry here since it may not be 1631 // clear to the caller that a draw operation (i.e., drawSimpleRect) 1632 // can be invoked in this method 1633 { 1634 GrPipelineBuilder pipelineBuilder; 1635 SkASSERT(fp); 1636 pipelineBuilder.addColorProcessor(fp); 1637 1638 pipelineBuilder.setRenderTarget(tempTexture->asRenderTarget()); 1639 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 1640 fDrawBuffer->drawSimpleRect(&pipelineBuilder, 1641 GrColor_WHITE, 1642 SkMatrix::I(), 1643 rect); 1644 // we want to read back from the scratch's origin 1645 left = 0; 1646 top = 0; 1647 target = tempTexture->asRenderTarget(); 1648 } 1649 this->flushSurfaceWrites(target); 1650 } 1651 } 1652 } 1653 1654 if (!fGpu->readPixels(target, 1655 left, top, width, height, 1656 readConfig, buffer, rowBytes)) { 1657 return false; 1658 } 1659 // Perform any conversions we weren't able to perform using a scratch texture. 1660 if (unpremul || swapRAndB) { 1661 SkDstPixelInfo dstPI; 1662 if (!GrPixelConfig2ColorAndProfileType(dstConfig, &dstPI.fColorType, NULL)) { 1663 return false; 1664 } 1665 dstPI.fAlphaType = kUnpremul_SkAlphaType; 1666 dstPI.fPixels = buffer; 1667 dstPI.fRowBytes = rowBytes; 1668 1669 SkSrcPixelInfo srcPI; 1670 srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType; 1671 srcPI.fAlphaType = kPremul_SkAlphaType; 1672 srcPI.fPixels = buffer; 1673 srcPI.fRowBytes = rowBytes; 1674 1675 return srcPI.convertPixelsTo(&dstPI, width, height); 1676 } 1677 return true; 1678} 1679 1680void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) { 1681 RETURN_IF_ABANDONED 1682 SkASSERT(surface); 1683 ASSERT_OWNED_RESOURCE(surface); 1684 if (surface->surfacePriv().hasPendingIO()) { 1685 this->flush(); 1686 } 1687 GrRenderTarget* rt = surface->asRenderTarget(); 1688 if (fGpu && rt) { 1689 fGpu->resolveRenderTarget(rt); 1690 } 1691} 1692 1693void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) { 1694 RETURN_IF_ABANDONED 1695 SkASSERT(renderTarget); 1696 ASSERT_OWNED_RESOURCE(renderTarget); 1697 AutoCheckFlush acf(this); 1698 GrDrawTarget* target = this->prepareToDraw(); 1699 if (NULL == target) { 1700 return; 1701 } 1702 target->discard(renderTarget); 1703} 1704 1705void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 1706 const SkIPoint& dstPoint, uint32_t pixelOpsFlags) { 1707 RETURN_IF_ABANDONED 1708 if (NULL == src || NULL == dst) { 1709 return; 1710 } 1711 ASSERT_OWNED_RESOURCE(src); 1712 ASSERT_OWNED_RESOURCE(dst); 1713 1714 // Since we're going to the draw target and not GPU, no need to check kNoFlush 1715 // here. 1716 1717 GrDrawTarget* target = this->prepareToDraw(); 1718 if (NULL == target) { 1719 return; 1720 } 1721 target->copySurface(dst, src, srcRect, dstPoint); 1722 1723 if (kFlushWrites_PixelOp & pixelOpsFlags) { 1724 this->flush(); 1725 } 1726} 1727 1728void GrContext::flushSurfaceWrites(GrSurface* surface) { 1729 RETURN_IF_ABANDONED 1730 if (surface->surfacePriv().hasPendingWrite()) { 1731 this->flush(); 1732 } 1733} 1734 1735GrDrawTarget* GrContext::prepareToDraw(GrPipelineBuilder* pipelineBuilder, 1736 GrRenderTarget* rt, 1737 const GrClip& clip, 1738 const GrPaint* paint, 1739 const AutoCheckFlush* acf) { 1740 if (NULL == fGpu || NULL == fDrawBuffer) { 1741 return NULL; 1742 } 1743 1744 ASSERT_OWNED_RESOURCE(rt); 1745 SkASSERT(rt && paint && acf); 1746 pipelineBuilder->setFromPaint(*paint, rt, clip); 1747 return fDrawBuffer; 1748} 1749 1750GrDrawTarget* GrContext::prepareToDraw() { 1751 if (NULL == fGpu) { 1752 return NULL; 1753 } 1754 return fDrawBuffer; 1755} 1756 1757/* 1758 * This method finds a path renderer that can draw the specified path on 1759 * the provided target. 1760 * Due to its expense, the software path renderer has split out so it can 1761 * can be individually allowed/disallowed via the "allowSW" boolean. 1762 */ 1763GrPathRenderer* GrContext::getPathRenderer(const GrDrawTarget* target, 1764 const GrPipelineBuilder* pipelineBuilder, 1765 const SkMatrix& viewMatrix, 1766 const SkPath& path, 1767 const GrStrokeInfo& stroke, 1768 bool allowSW, 1769 GrPathRendererChain::DrawType drawType, 1770 GrPathRendererChain::StencilSupport* stencilSupport) { 1771 1772 if (NULL == fPathRendererChain) { 1773 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); 1774 } 1775 1776 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(target, 1777 pipelineBuilder, 1778 viewMatrix, 1779 path, 1780 stroke, 1781 drawType, 1782 stencilSupport); 1783 1784 if (NULL == pr && allowSW) { 1785 if (NULL == fSoftwarePathRenderer) { 1786 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 1787 } 1788 pr = fSoftwarePathRenderer; 1789 } 1790 1791 return pr; 1792} 1793 1794//////////////////////////////////////////////////////////////////////////////// 1795bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const { 1796 return fGpu->caps()->isConfigRenderable(config, withMSAA); 1797} 1798 1799int GrContext::getRecommendedSampleCount(GrPixelConfig config, 1800 SkScalar dpi) const { 1801 if (!this->isConfigRenderable(config, true)) { 1802 return 0; 1803 } 1804 int chosenSampleCount = 0; 1805 if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) { 1806 if (dpi >= 250.0f) { 1807 chosenSampleCount = 4; 1808 } else { 1809 chosenSampleCount = 16; 1810 } 1811 } 1812 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? 1813 chosenSampleCount : 0; 1814} 1815 1816void GrContext::setupDrawBuffer() { 1817 SkASSERT(NULL == fDrawBuffer); 1818 SkASSERT(NULL == fDrawBufferVBAllocPool); 1819 SkASSERT(NULL == fDrawBufferIBAllocPool); 1820 1821 fDrawBufferVBAllocPool = 1822 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, 1823 DRAW_BUFFER_VBPOOL_BUFFER_SIZE, 1824 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)); 1825 fDrawBufferIBAllocPool = 1826 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, 1827 DRAW_BUFFER_IBPOOL_BUFFER_SIZE, 1828 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)); 1829 1830 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (this, 1831 fDrawBufferVBAllocPool, 1832 fDrawBufferIBAllocPool)); 1833} 1834 1835GrDrawTarget* GrContext::getTextTarget() { 1836 return this->prepareToDraw(); 1837} 1838 1839namespace { 1840void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 1841 GrConfigConversionEffect::PMConversion pmToUPM; 1842 GrConfigConversionEffect::PMConversion upmToPM; 1843 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 1844 *pmToUPMValue = pmToUPM; 1845 *upmToPMValue = upmToPM; 1846} 1847} 1848 1849const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture, 1850 bool swapRAndB, 1851 const SkMatrix& matrix) { 1852 if (!fDidTestPMConversions) { 1853 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1854 fDidTestPMConversions = true; 1855 } 1856 GrConfigConversionEffect::PMConversion pmToUPM = 1857 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 1858 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 1859 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); 1860 } else { 1861 return NULL; 1862 } 1863} 1864 1865const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture, 1866 bool swapRAndB, 1867 const SkMatrix& matrix) { 1868 if (!fDidTestPMConversions) { 1869 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 1870 fDidTestPMConversions = true; 1871 } 1872 GrConfigConversionEffect::PMConversion upmToPM = 1873 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 1874 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 1875 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); 1876 } else { 1877 return NULL; 1878 } 1879} 1880 1881////////////////////////////////////////////////////////////////////////////// 1882 1883void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const { 1884 if (maxTextures) { 1885 *maxTextures = fResourceCache->getMaxResourceCount(); 1886 } 1887 if (maxTextureBytes) { 1888 *maxTextureBytes = fResourceCache->getMaxResourceBytes(); 1889 } 1890} 1891 1892void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) { 1893 fResourceCache->setLimits(maxTextures, maxTextureBytes); 1894} 1895 1896////////////////////////////////////////////////////////////////////////////// 1897 1898void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { 1899 fGpu->addGpuTraceMarker(marker); 1900 if (fDrawBuffer) { 1901 fDrawBuffer->addGpuTraceMarker(marker); 1902 } 1903} 1904 1905void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { 1906 fGpu->removeGpuTraceMarker(marker); 1907 if (fDrawBuffer) { 1908 fDrawBuffer->removeGpuTraceMarker(marker); 1909 } 1910} 1911