GrContext.cpp revision fcf7829b67b798aff4c41c4688daa9c7381991e6
1 2/* 3 * Copyright 2011 Google Inc. 4 * 5 * Use of this source code is governed by a BSD-style license that can be 6 * found in the LICENSE file. 7 */ 8 9#include "GrContext.h" 10 11#include "GrAARectRenderer.h" 12#include "GrBatch.h" 13#include "GrBatchFontCache.h" 14#include "GrBatchTarget.h" 15#include "GrBatchTest.h" 16#include "GrCaps.h" 17#include "GrContextOptions.h" 18#include "GrDefaultGeoProcFactory.h" 19#include "GrDrawContext.h" 20#include "GrGpuResource.h" 21#include "GrGpuResourcePriv.h" 22#include "GrGpu.h" 23#include "GrImmediateDrawTarget.h" 24#include "GrIndexBuffer.h" 25#include "GrInOrderDrawBuffer.h" 26#include "GrLayerCache.h" 27#include "GrOvalRenderer.h" 28#include "GrPathRenderer.h" 29#include "GrPathUtils.h" 30#include "GrRenderTargetPriv.h" 31#include "GrResourceCache.h" 32#include "GrResourceProvider.h" 33#include "GrSoftwarePathRenderer.h" 34#include "GrStrokeInfo.h" 35#include "GrSurfacePriv.h" 36#include "GrTextBlobCache.h" 37#include "GrTexturePriv.h" 38#include "GrTraceMarker.h" 39#include "GrTracing.h" 40#include "GrVertices.h" 41#include "SkDashPathPriv.h" 42#include "SkConfig8888.h" 43#include "SkGr.h" 44#include "SkRRect.h" 45#include "SkStrokeRec.h" 46#include "SkSurfacePriv.h" 47#include "SkTLazy.h" 48#include "SkTLS.h" 49#include "SkTraceEvent.h" 50 51#include "effects/GrConfigConversionEffect.h" 52#include "effects/GrDashingEffect.h" 53#include "effects/GrSingleTextureEffect.h" 54 55#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 56#define RETURN_IF_ABANDONED if (fDrawingMgr.abandoned()) { return; } 57#define RETURN_FALSE_IF_ABANDONED if (fDrawingMgr.abandoned()) { return false; } 58#define RETURN_NULL_IF_ABANDONED if (fDrawingMgr.abandoned()) { return NULL; } 59 60 61//////////////////////////////////////////////////////////////////////////////// 62 63void GrContext::DrawingMgr::init(GrContext* context) { 64 fContext = context; 65 66#ifdef IMMEDIATE_MODE 67 fDrawTarget = SkNEW_ARGS(GrImmediateDrawTarget, (context)); 68#else 69 fDrawTarget = SkNEW_ARGS(GrInOrderDrawBuffer, (context)); 70#endif 71} 72 73void GrContext::DrawingMgr::cleanup() { 74 SkSafeSetNull(fDrawTarget); 75 for (int i = 0; i < kNumPixelGeometries; ++i) { 76 SkSafeSetNull(fDrawContext[i][0]); 77 SkSafeSetNull(fDrawContext[i][1]); 78 } 79} 80 81GrContext::DrawingMgr::~DrawingMgr() { 82 this->cleanup(); 83} 84 85void GrContext::DrawingMgr::abandon() { 86 SkSafeSetNull(fDrawTarget); 87 for (int i = 0; i < kNumPixelGeometries; ++i) { 88 for (int j = 0; j < kNumDFTOptions; ++j) { 89 if (fDrawContext[i][j]) { 90 SkSafeSetNull(fDrawContext[i][j]->fDrawTarget); 91 SkSafeSetNull(fDrawContext[i][j]); 92 } 93 } 94 } 95} 96 97void GrContext::DrawingMgr::purgeResources() { 98 if (fDrawTarget) { 99 fDrawTarget->purgeResources(); 100 } 101} 102 103void GrContext::DrawingMgr::reset() { 104 if (fDrawTarget) { 105 fDrawTarget->reset(); 106 } 107} 108 109void GrContext::DrawingMgr::flush() { 110 if (fDrawTarget) { 111 fDrawTarget->flush(); 112 } 113} 114 115GrDrawContext* GrContext::DrawingMgr::drawContext(const SkSurfaceProps* surfaceProps) { 116 if (this->abandoned()) { 117 return NULL; 118 } 119 120 const SkSurfaceProps props(SkSurfacePropsCopyOrDefault(surfaceProps)); 121 122 SkASSERT(props.pixelGeometry() < kNumPixelGeometries); 123 if (!fDrawContext[props.pixelGeometry()][props.isUseDistanceFieldFonts()]) { 124 fDrawContext[props.pixelGeometry()][props.isUseDistanceFieldFonts()] = 125 SkNEW_ARGS(GrDrawContext, (fContext, fDrawTarget, props)); 126 } 127 128 return fDrawContext[props.pixelGeometry()][props.isUseDistanceFieldFonts()]; 129} 130 131//////////////////////////////////////////////////////////////////////////////// 132 133 134GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { 135 GrContextOptions defaultOptions; 136 return Create(backend, backendContext, defaultOptions); 137} 138 139GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, 140 const GrContextOptions& options) { 141 GrContext* context = SkNEW(GrContext); 142 143 if (context->init(backend, backendContext, options)) { 144 return context; 145 } else { 146 context->unref(); 147 return NULL; 148 } 149} 150 151static int32_t gNextID = 1; 152static int32_t next_id() { 153 int32_t id; 154 do { 155 id = sk_atomic_inc(&gNextID); 156 } while (id == SK_InvalidGenID); 157 return id; 158} 159 160GrContext::GrContext() : fUniqueID(next_id()) { 161 fGpu = NULL; 162 fCaps = NULL; 163 fResourceCache = NULL; 164 fResourceProvider = NULL; 165 fPathRendererChain = NULL; 166 fSoftwarePathRenderer = NULL; 167 fBatchFontCache = NULL; 168 fFlushToReduceCacheSize = false; 169} 170 171bool GrContext::init(GrBackend backend, GrBackendContext backendContext, 172 const GrContextOptions& options) { 173 SkASSERT(!fGpu); 174 175 fGpu = GrGpu::Create(backend, backendContext, options, this); 176 if (!fGpu) { 177 return false; 178 } 179 this->initCommon(); 180 return true; 181} 182 183void GrContext::initCommon() { 184 fCaps = SkRef(fGpu->caps()); 185 fResourceCache = SkNEW(GrResourceCache); 186 fResourceCache->setOverBudgetCallback(OverBudgetCB, this); 187 fResourceProvider = SkNEW_ARGS(GrResourceProvider, (fGpu, fResourceCache)); 188 189 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this))); 190 191 fDidTestPMConversions = false; 192 193 fDrawingMgr.init(this); 194 195 // GrBatchFontCache will eventually replace GrFontCache 196 fBatchFontCache = SkNEW_ARGS(GrBatchFontCache, (this)); 197 198 fTextBlobCache.reset(SkNEW_ARGS(GrTextBlobCache, (TextBlobCacheOverBudgetCB, this))); 199} 200 201GrContext::~GrContext() { 202 if (!fGpu) { 203 SkASSERT(!fCaps); 204 return; 205 } 206 207 this->flush(); 208 209 fDrawingMgr.cleanup(); 210 211 for (int i = 0; i < fCleanUpData.count(); ++i) { 212 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 213 } 214 215 SkDELETE(fResourceProvider); 216 SkDELETE(fResourceCache); 217 SkDELETE(fBatchFontCache); 218 219 fGpu->unref(); 220 fCaps->unref(); 221 SkSafeUnref(fPathRendererChain); 222 SkSafeUnref(fSoftwarePathRenderer); 223} 224 225void GrContext::abandonContext() { 226 fResourceProvider->abandon(); 227 // abandon first to so destructors 228 // don't try to free the resources in the API. 229 fResourceCache->abandonAll(); 230 231 fGpu->contextAbandoned(); 232 233 // a path renderer may be holding onto resources that 234 // are now unusable 235 SkSafeSetNull(fPathRendererChain); 236 SkSafeSetNull(fSoftwarePathRenderer); 237 238 fDrawingMgr.abandon(); 239 240 fBatchFontCache->freeAll(); 241 fLayerCache->freeAll(); 242 fTextBlobCache->freeAll(); 243} 244 245void GrContext::resetContext(uint32_t state) { 246 fGpu->markContextDirty(state); 247} 248 249void GrContext::freeGpuResources() { 250 this->flush(); 251 252 fDrawingMgr.purgeResources(); 253 254 fBatchFontCache->freeAll(); 255 fLayerCache->freeAll(); 256 // a path renderer may be holding onto resources 257 SkSafeSetNull(fPathRendererChain); 258 SkSafeSetNull(fSoftwarePathRenderer); 259 260 fResourceCache->purgeAllUnlocked(); 261} 262 263void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const { 264 if (resourceCount) { 265 *resourceCount = fResourceCache->getBudgetedResourceCount(); 266 } 267 if (resourceBytes) { 268 *resourceBytes = fResourceCache->getBudgetedResourceBytes(); 269 } 270} 271 272//////////////////////////////////////////////////////////////////////////////// 273 274void GrContext::OverBudgetCB(void* data) { 275 SkASSERT(data); 276 277 GrContext* context = reinterpret_cast<GrContext*>(data); 278 279 // Flush the InOrderDrawBuffer to possibly free up some textures 280 context->fFlushToReduceCacheSize = true; 281} 282 283void GrContext::TextBlobCacheOverBudgetCB(void* data) { 284 SkASSERT(data); 285 286 // Unlike the GrResourceCache, TextBlobs are drawn at the SkGpuDevice level, therefore they 287 // cannot use fFlushTorReduceCacheSize because it uses AutoCheckFlush. The solution is to move 288 // drawText calls to below the GrContext level, but this is not trivial because they call 289 // drawPath on SkGpuDevice 290 GrContext* context = reinterpret_cast<GrContext*>(data); 291 context->flush(); 292} 293 294//////////////////////////////////////////////////////////////////////////////// 295 296void GrContext::flush(int flagsBitfield) { 297 RETURN_IF_ABANDONED 298 299 if (kDiscard_FlushBit & flagsBitfield) { 300 fDrawingMgr.reset(); 301 } else { 302 fDrawingMgr.flush(); 303 } 304 fResourceCache->notifyFlushOccurred(); 305 fFlushToReduceCacheSize = false; 306} 307 308bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes, 309 const void* inPixels, size_t outRowBytes, void* outPixels) { 310 SkSrcPixelInfo srcPI; 311 if (!GrPixelConfig2ColorAndProfileType(srcConfig, &srcPI.fColorType, NULL)) { 312 return false; 313 } 314 srcPI.fAlphaType = kUnpremul_SkAlphaType; 315 srcPI.fPixels = inPixels; 316 srcPI.fRowBytes = inRowBytes; 317 318 SkDstPixelInfo dstPI; 319 dstPI.fColorType = srcPI.fColorType; 320 dstPI.fAlphaType = kPremul_SkAlphaType; 321 dstPI.fPixels = outPixels; 322 dstPI.fRowBytes = outRowBytes; 323 324 return srcPI.convertPixelsTo(&dstPI, width, height); 325} 326 327bool GrContext::writeSurfacePixels(GrSurface* surface, 328 int left, int top, int width, int height, 329 GrPixelConfig srcConfig, const void* buffer, size_t rowBytes, 330 uint32_t pixelOpsFlags) { 331 RETURN_FALSE_IF_ABANDONED 332 { 333 GrTexture* texture = NULL; 334 if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) && 335 fGpu->canWriteTexturePixels(texture, srcConfig) && 336 (!fCaps->useDrawInsteadOfPartialRenderTargetWrite() || !surface->asRenderTarget() || 337 (width == texture->width() && height == texture->height()))) { 338 339 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && 340 surface->surfacePriv().hasPendingIO()) { 341 this->flush(); 342 } 343 return fGpu->writeTexturePixels(texture, left, top, width, height, 344 srcConfig, buffer, rowBytes); 345 // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the 346 // upload is already flushed. 347 } 348 } 349 350 // If we didn't do a direct texture write then we upload the pixels to a texture and draw. 351 GrRenderTarget* renderTarget = surface->asRenderTarget(); 352 if (!renderTarget) { 353 return false; 354 } 355 356 // We ignore the preferred config unless it is a R/B swap of the src config. In that case 357 // we will upload the original src data to a scratch texture but we will spoof it as the swapped 358 // config. This scratch will then have R and B swapped. We correct for this by swapping again 359 // when drawing the scratch to the dst using a conversion effect. 360 bool swapRAndB = false; 361 GrPixelConfig writeConfig = srcConfig; 362 if (GrPixelConfigSwapRAndB(srcConfig) == 363 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) { 364 writeConfig = GrPixelConfigSwapRAndB(srcConfig); 365 swapRAndB = true; 366 } 367 368 GrSurfaceDesc desc; 369 desc.fWidth = width; 370 desc.fHeight = height; 371 desc.fConfig = writeConfig; 372 SkAutoTUnref<GrTexture> texture(this->textureProvider()->refScratchTexture(desc, 373 GrTextureProvider::kApprox_ScratchTexMatch)); 374 if (!texture) { 375 return false; 376 } 377 378 SkAutoTUnref<const GrFragmentProcessor> fp; 379 SkMatrix textureMatrix; 380 textureMatrix.setIDiv(texture->width(), texture->height()); 381 382 // allocate a tmp buffer and sw convert the pixels to premul 383 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 384 385 if (kUnpremul_PixelOpsFlag & pixelOpsFlags) { 386 if (!GrPixelConfigIs8888(srcConfig)) { 387 return false; 388 } 389 fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix)); 390 // handle the unpremul step on the CPU if we couldn't create an effect to do it. 391 if (!fp) { 392 size_t tmpRowBytes = 4 * width; 393 tmpPixels.reset(width * height); 394 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes, 395 tmpPixels.get())) { 396 return false; 397 } 398 rowBytes = tmpRowBytes; 399 buffer = tmpPixels.get(); 400 } 401 } 402 if (!fp) { 403 fp.reset(GrConfigConversionEffect::Create(texture, 404 swapRAndB, 405 GrConfigConversionEffect::kNone_PMConversion, 406 textureMatrix)); 407 } 408 409 // Even if the client told us not to flush, we still flush here. The client may have known that 410 // writes to the original surface caused no data hazards, but they can't know that the scratch 411 // we just got is safe. 412 if (texture->surfacePriv().hasPendingIO()) { 413 this->flush(); 414 } 415 if (!fGpu->writeTexturePixels(texture, 0, 0, width, height, 416 writeConfig, buffer, rowBytes)) { 417 return false; 418 } 419 420 SkMatrix matrix; 421 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 422 423 GrDrawContext* drawContext = this->drawContext(); 424 if (!drawContext) { 425 return false; 426 } 427 428 GrPaint paint; 429 paint.addColorProcessor(fp); 430 431 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 432 433 drawContext->drawRect(renderTarget, GrClip::WideOpen(), paint, matrix, rect, NULL); 434 435 if (kFlushWrites_PixelOp & pixelOpsFlags) { 436 this->flushSurfaceWrites(surface); 437 } 438 439 return true; 440} 441 442// toggles between RGBA and BGRA 443static SkColorType toggle_colortype32(SkColorType ct) { 444 if (kRGBA_8888_SkColorType == ct) { 445 return kBGRA_8888_SkColorType; 446 } else { 447 SkASSERT(kBGRA_8888_SkColorType == ct); 448 return kRGBA_8888_SkColorType; 449 } 450} 451 452bool GrContext::readRenderTargetPixels(GrRenderTarget* target, 453 int left, int top, int width, int height, 454 GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 455 uint32_t flags) { 456 RETURN_FALSE_IF_ABANDONED 457 ASSERT_OWNED_RESOURCE(target); 458 SkASSERT(target); 459 460 if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) { 461 this->flush(); 462 } 463 464 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul. 465 466 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll 467 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read. 468 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top, 469 width, height, dstConfig, 470 rowBytes); 471 // We ignore the preferred config if it is different than our config unless it is an R/B swap. 472 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped 473 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during 474 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from 475 // dstConfig. 476 GrPixelConfig readConfig = dstConfig; 477 bool swapRAndB = false; 478 if (GrPixelConfigSwapRAndB(dstConfig) == 479 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) { 480 readConfig = GrPixelConfigSwapRAndB(readConfig); 481 swapRAndB = true; 482 } 483 484 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 485 486 if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 487 // The unpremul flag is only allowed for these two configs. 488 return false; 489 } 490 491 SkAutoTUnref<GrTexture> tempTexture; 492 493 // If the src is a texture and we would have to do conversions after read pixels, we instead 494 // do the conversions by drawing the src to a scratch texture. If we handle any of the 495 // conversions in the draw we set the corresponding bool to false so that we don't reapply it 496 // on the read back pixels. 497 GrTexture* src = target->asTexture(); 498 if (src && (swapRAndB || unpremul || flipY)) { 499 // Make the scratch a render so we can read its pixels. 500 GrSurfaceDesc desc; 501 desc.fFlags = kRenderTarget_GrSurfaceFlag; 502 desc.fWidth = width; 503 desc.fHeight = height; 504 desc.fConfig = readConfig; 505 desc.fOrigin = kTopLeft_GrSurfaceOrigin; 506 507 // When a full read back is faster than a partial we could always make the scratch exactly 508 // match the passed rect. However, if we see many different size rectangles we will trash 509 // our texture cache and pay the cost of creating and destroying many textures. So, we only 510 // request an exact match when the caller is reading an entire RT. 511 GrTextureProvider::ScratchTexMatch match = GrTextureProvider::kApprox_ScratchTexMatch; 512 if (0 == left && 513 0 == top && 514 target->width() == width && 515 target->height() == height && 516 fGpu->fullReadPixelsIsFasterThanPartial()) { 517 match = GrTextureProvider::kExact_ScratchTexMatch; 518 } 519 tempTexture.reset(this->textureProvider()->refScratchTexture(desc, match)); 520 if (tempTexture) { 521 // compute a matrix to perform the draw 522 SkMatrix textureMatrix; 523 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top); 524 textureMatrix.postIDiv(src->width(), src->height()); 525 526 SkAutoTUnref<const GrFragmentProcessor> fp; 527 if (unpremul) { 528 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix)); 529 if (fp) { 530 unpremul = false; // we no longer need to do this on CPU after the read back. 531 } 532 } 533 // If we failed to create a PM->UPM effect and have no other conversions to perform then 534 // there is no longer any point to using the scratch. 535 if (fp || flipY || swapRAndB) { 536 if (!fp) { 537 fp.reset(GrConfigConversionEffect::Create( 538 src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion, 539 textureMatrix)); 540 } 541 swapRAndB = false; // we will handle the swap in the draw. 542 543 // We protect the existing geometry here since it may not be 544 // clear to the caller that a draw operation (i.e., drawSimpleRect) 545 // can be invoked in this method 546 { 547 GrDrawContext* drawContext = this->drawContext(); 548 if (!drawContext) { 549 return false; 550 } 551 552 GrPaint paint; 553 paint.addColorProcessor(fp); 554 555 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 556 557 drawContext->drawRect(tempTexture->asRenderTarget(), GrClip::WideOpen(), paint, 558 SkMatrix::I(), rect, NULL); 559 560 // we want to read back from the scratch's origin 561 left = 0; 562 top = 0; 563 target = tempTexture->asRenderTarget(); 564 } 565 this->flushSurfaceWrites(target); 566 } 567 } 568 } 569 570 if (!fGpu->readPixels(target, 571 left, top, width, height, 572 readConfig, buffer, rowBytes)) { 573 return false; 574 } 575 // Perform any conversions we weren't able to perform using a scratch texture. 576 if (unpremul || swapRAndB) { 577 SkDstPixelInfo dstPI; 578 if (!GrPixelConfig2ColorAndProfileType(dstConfig, &dstPI.fColorType, NULL)) { 579 return false; 580 } 581 dstPI.fAlphaType = kUnpremul_SkAlphaType; 582 dstPI.fPixels = buffer; 583 dstPI.fRowBytes = rowBytes; 584 585 SkSrcPixelInfo srcPI; 586 srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType; 587 srcPI.fAlphaType = kPremul_SkAlphaType; 588 srcPI.fPixels = buffer; 589 srcPI.fRowBytes = rowBytes; 590 591 return srcPI.convertPixelsTo(&dstPI, width, height); 592 } 593 return true; 594} 595 596void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) { 597 RETURN_IF_ABANDONED 598 SkASSERT(surface); 599 ASSERT_OWNED_RESOURCE(surface); 600 if (surface->surfacePriv().hasPendingIO()) { 601 this->flush(); 602 } 603 GrRenderTarget* rt = surface->asRenderTarget(); 604 if (fGpu && rt) { 605 fGpu->resolveRenderTarget(rt); 606 } 607} 608 609void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 610 const SkIPoint& dstPoint, uint32_t pixelOpsFlags) { 611 RETURN_IF_ABANDONED 612 if (!src || !dst) { 613 return; 614 } 615 ASSERT_OWNED_RESOURCE(src); 616 ASSERT_OWNED_RESOURCE(dst); 617 618 // Since we're going to the draw target and not GPU, no need to check kNoFlush 619 // here. 620 if (!dst->asRenderTarget()) { 621 return; 622 } 623 624 GrDrawContext* drawContext = this->drawContext(); 625 if (!drawContext) { 626 return; 627 } 628 629 drawContext->copySurface(dst->asRenderTarget(), src, srcRect, dstPoint); 630 631 if (kFlushWrites_PixelOp & pixelOpsFlags) { 632 this->flush(); 633 } 634} 635 636void GrContext::flushSurfaceWrites(GrSurface* surface) { 637 RETURN_IF_ABANDONED 638 if (surface->surfacePriv().hasPendingWrite()) { 639 this->flush(); 640 } 641} 642 643/* 644 * This method finds a path renderer that can draw the specified path on 645 * the provided target. 646 * Due to its expense, the software path renderer has split out so it can 647 * can be individually allowed/disallowed via the "allowSW" boolean. 648 */ 649GrPathRenderer* GrContext::getPathRenderer(const GrDrawTarget* target, 650 const GrPipelineBuilder* pipelineBuilder, 651 const SkMatrix& viewMatrix, 652 const SkPath& path, 653 const GrStrokeInfo& stroke, 654 bool allowSW, 655 GrPathRendererChain::DrawType drawType, 656 GrPathRendererChain::StencilSupport* stencilSupport) { 657 658 if (!fPathRendererChain) { 659 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this)); 660 } 661 662 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(target, 663 pipelineBuilder, 664 viewMatrix, 665 path, 666 stroke, 667 drawType, 668 stencilSupport); 669 670 if (!pr && allowSW) { 671 if (!fSoftwarePathRenderer) { 672 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this)); 673 } 674 pr = fSoftwarePathRenderer; 675 } 676 677 return pr; 678} 679 680//////////////////////////////////////////////////////////////////////////////// 681int GrContext::getRecommendedSampleCount(GrPixelConfig config, 682 SkScalar dpi) const { 683 if (!this->caps()->isConfigRenderable(config, true)) { 684 return 0; 685 } 686 int chosenSampleCount = 0; 687 if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) { 688 if (dpi >= 250.0f) { 689 chosenSampleCount = 4; 690 } else { 691 chosenSampleCount = 16; 692 } 693 } 694 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? 695 chosenSampleCount : 0; 696} 697 698namespace { 699void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 700 GrConfigConversionEffect::PMConversion pmToUPM; 701 GrConfigConversionEffect::PMConversion upmToPM; 702 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 703 *pmToUPMValue = pmToUPM; 704 *upmToPMValue = upmToPM; 705} 706} 707 708const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture, 709 bool swapRAndB, 710 const SkMatrix& matrix) { 711 if (!fDidTestPMConversions) { 712 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 713 fDidTestPMConversions = true; 714 } 715 GrConfigConversionEffect::PMConversion pmToUPM = 716 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 717 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 718 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix); 719 } else { 720 return NULL; 721 } 722} 723 724const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture, 725 bool swapRAndB, 726 const SkMatrix& matrix) { 727 if (!fDidTestPMConversions) { 728 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 729 fDidTestPMConversions = true; 730 } 731 GrConfigConversionEffect::PMConversion upmToPM = 732 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 733 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 734 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix); 735 } else { 736 return NULL; 737 } 738} 739 740////////////////////////////////////////////////////////////////////////////// 741 742void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const { 743 if (maxTextures) { 744 *maxTextures = fResourceCache->getMaxResourceCount(); 745 } 746 if (maxTextureBytes) { 747 *maxTextureBytes = fResourceCache->getMaxResourceBytes(); 748 } 749} 750 751void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) { 752 fResourceCache->setLimits(maxTextures, maxTextureBytes); 753} 754 755////////////////////////////////////////////////////////////////////////////// 756 757void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) { 758 fGpu->addGpuTraceMarker(marker); 759} 760 761void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) { 762 fGpu->removeGpuTraceMarker(marker); 763} 764 765