GrContext.cpp revision d4c741e3d0e0fa633399691c47f76b6c7841ee83
1/* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrContext.h" 9#include "GrContextOptions.h" 10#include "GrDrawingManager.h" 11#include "GrDrawContext.h" 12#include "GrLayerCache.h" 13#include "GrResourceCache.h" 14#include "GrResourceProvider.h" 15#include "GrSoftwarePathRenderer.h" 16#include "GrSurfacePriv.h" 17 18#include "SkConfig8888.h" 19#include "SkGrPriv.h" 20 21#include "batches/GrCopySurfaceBatch.h" 22#include "effects/GrConfigConversionEffect.h" 23#include "text/GrTextBlobCache.h" 24 25#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 26#define ASSERT_SINGLE_OWNER \ 27 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(&fSingleOwner);) 28#define RETURN_IF_ABANDONED if (fDrawingManager->abandoned()) { return; } 29#define RETURN_FALSE_IF_ABANDONED if (fDrawingManager->abandoned()) { return false; } 30#define RETURN_NULL_IF_ABANDONED if (fDrawingManager->abandoned()) { return nullptr; } 31 32//////////////////////////////////////////////////////////////////////////////// 33 34GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { 35 GrContextOptions defaultOptions; 36 return Create(backend, backendContext, defaultOptions); 37} 38 39GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, 40 const GrContextOptions& options) { 41 GrContext* context = new GrContext; 42 43 if (context->init(backend, backendContext, options)) { 44 return context; 45 } else { 46 context->unref(); 47 return nullptr; 48 } 49} 50 51static int32_t gNextID = 1; 52static int32_t next_id() { 53 int32_t id; 54 do { 55 id = sk_atomic_inc(&gNextID); 56 } while (id == SK_InvalidGenID); 57 return id; 58} 59 60GrContext::GrContext() : fUniqueID(next_id()) { 61 fGpu = nullptr; 62 fCaps = nullptr; 63 fResourceCache = nullptr; 64 fResourceProvider = nullptr; 65 fBatchFontCache = nullptr; 66 fFlushToReduceCacheSize = false; 67} 68 69bool GrContext::init(GrBackend backend, GrBackendContext backendContext, 70 const GrContextOptions& options) { 71 ASSERT_SINGLE_OWNER 72 SkASSERT(!fGpu); 73 74 fGpu = GrGpu::Create(backend, backendContext, options, this); 75 if (!fGpu) { 76 return false; 77 } 78 this->initCommon(options); 79 return true; 80} 81 82void GrContext::initCommon(const GrContextOptions& options) { 83 ASSERT_SINGLE_OWNER 84 85 fCaps = SkRef(fGpu->caps()); 86 fResourceCache = new GrResourceCache(fCaps); 87 fResourceCache->setOverBudgetCallback(OverBudgetCB, this); 88 fResourceProvider = new GrResourceProvider(fGpu, fResourceCache, &fSingleOwner); 89 90 fLayerCache.reset(new GrLayerCache(this)); 91 92 fDidTestPMConversions = false; 93 94 GrDrawTarget::Options dtOptions; 95 dtOptions.fClipBatchToBounds = options.fClipBatchToBounds; 96 dtOptions.fDrawBatchBounds = options.fDrawBatchBounds; 97 dtOptions.fMaxBatchLookback = options.fMaxBatchLookback; 98 dtOptions.fMaxBatchLookahead = options.fMaxBatchLookahead; 99 fDrawingManager.reset(new GrDrawingManager(this, dtOptions, &fSingleOwner)); 100 101 // GrBatchFontCache will eventually replace GrFontCache 102 fBatchFontCache = new GrBatchFontCache(this); 103 104 fTextBlobCache.reset(new GrTextBlobCache(TextBlobCacheOverBudgetCB, this)); 105} 106 107GrContext::~GrContext() { 108 ASSERT_SINGLE_OWNER 109 110 if (!fGpu) { 111 SkASSERT(!fCaps); 112 return; 113 } 114 115 this->flush(); 116 117 fDrawingManager->cleanup(); 118 119 for (int i = 0; i < fCleanUpData.count(); ++i) { 120 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 121 } 122 123 delete fResourceProvider; 124 delete fResourceCache; 125 delete fBatchFontCache; 126 127 fGpu->unref(); 128 fCaps->unref(); 129} 130 131GrContextThreadSafeProxy* GrContext::threadSafeProxy() { 132 if (!fThreadSafeProxy) { 133 fThreadSafeProxy.reset(new GrContextThreadSafeProxy(fCaps, this->uniqueID())); 134 } 135 return SkRef(fThreadSafeProxy.get()); 136} 137 138void GrContext::abandonContext() { 139 ASSERT_SINGLE_OWNER 140 141 fResourceProvider->abandon(); 142 143 // Need to abandon the drawing manager first so all the render targets 144 // will be released/forgotten before they too are abandoned. 145 fDrawingManager->abandon(); 146 147 // abandon first to so destructors 148 // don't try to free the resources in the API. 149 fResourceCache->abandonAll(); 150 151 fGpu->disconnect(GrGpu::DisconnectType::kAbandon); 152 153 fBatchFontCache->freeAll(); 154 fLayerCache->freeAll(); 155 fTextBlobCache->freeAll(); 156} 157 158void GrContext::releaseResourcesAndAbandonContext() { 159 ASSERT_SINGLE_OWNER 160 161 fResourceProvider->abandon(); 162 163 // Need to abandon the drawing manager first so all the render targets 164 // will be released/forgotten before they too are abandoned. 165 fDrawingManager->abandon(); 166 167 // Release all resources in the backend 3D API. 168 fResourceCache->releaseAll(); 169 170 fGpu->disconnect(GrGpu::DisconnectType::kCleanup); 171 172 fBatchFontCache->freeAll(); 173 fLayerCache->freeAll(); 174 fTextBlobCache->freeAll(); 175} 176 177void GrContext::resetContext(uint32_t state) { 178 ASSERT_SINGLE_OWNER 179 fGpu->markContextDirty(state); 180} 181 182void GrContext::freeGpuResources() { 183 ASSERT_SINGLE_OWNER 184 185 this->flush(); 186 187 fBatchFontCache->freeAll(); 188 fLayerCache->freeAll(); 189 190 fDrawingManager->freeGpuResources(); 191 192 fResourceCache->purgeAllUnlocked(); 193} 194 195void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const { 196 ASSERT_SINGLE_OWNER 197 198 if (resourceCount) { 199 *resourceCount = fResourceCache->getBudgetedResourceCount(); 200 } 201 if (resourceBytes) { 202 *resourceBytes = fResourceCache->getBudgetedResourceBytes(); 203 } 204} 205 206//////////////////////////////////////////////////////////////////////////////// 207 208void GrContext::OverBudgetCB(void* data) { 209 SkASSERT(data); 210 211 GrContext* context = reinterpret_cast<GrContext*>(data); 212 213 // Flush the GrBufferedDrawTarget to possibly free up some textures 214 context->fFlushToReduceCacheSize = true; 215} 216 217void GrContext::TextBlobCacheOverBudgetCB(void* data) { 218 SkASSERT(data); 219 220 // Unlike the GrResourceCache, TextBlobs are drawn at the SkGpuDevice level, therefore they 221 // cannot use fFlushTorReduceCacheSize because it uses AutoCheckFlush. The solution is to move 222 // drawText calls to below the GrContext level, but this is not trivial because they call 223 // drawPath on SkGpuDevice 224 GrContext* context = reinterpret_cast<GrContext*>(data); 225 context->flush(); 226} 227 228//////////////////////////////////////////////////////////////////////////////// 229 230void GrContext::flush(int flagsBitfield) { 231 ASSERT_SINGLE_OWNER 232 RETURN_IF_ABANDONED 233 234 if (kDiscard_FlushBit & flagsBitfield) { 235 fDrawingManager->reset(); 236 } else { 237 fDrawingManager->flush(); 238 } 239 fResourceCache->notifyFlushOccurred(); 240 fFlushToReduceCacheSize = false; 241} 242 243bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes, 244 const void* inPixels, size_t outRowBytes, void* outPixels) { 245 SkSrcPixelInfo srcPI; 246 if (!GrPixelConfig2ColorAndProfileType(srcConfig, &srcPI.fColorType, nullptr)) { 247 return false; 248 } 249 srcPI.fAlphaType = kUnpremul_SkAlphaType; 250 srcPI.fPixels = inPixels; 251 srcPI.fRowBytes = inRowBytes; 252 253 SkDstPixelInfo dstPI; 254 dstPI.fColorType = srcPI.fColorType; 255 dstPI.fAlphaType = kPremul_SkAlphaType; 256 dstPI.fPixels = outPixels; 257 dstPI.fRowBytes = outRowBytes; 258 259 return srcPI.convertPixelsTo(&dstPI, width, height); 260} 261 262bool GrContext::writeSurfacePixels(GrSurface* surface, 263 int left, int top, int width, int height, 264 GrPixelConfig srcConfig, const void* buffer, size_t rowBytes, 265 uint32_t pixelOpsFlags) { 266 ASSERT_SINGLE_OWNER 267 RETURN_FALSE_IF_ABANDONED 268 ASSERT_OWNED_RESOURCE(surface); 269 SkASSERT(surface); 270 GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::writeSurfacePixels"); 271 272 this->testPMConversionsIfNecessary(pixelOpsFlags); 273 274 // Trim the params here so that if we wind up making a temporary surface it can be as small as 275 // necessary and because GrGpu::getWritePixelsInfo requires it. 276 if (!GrSurfacePriv::AdjustWritePixelParams(surface->width(), surface->height(), 277 GrBytesPerPixel(srcConfig), &left, &top, &width, 278 &height, &buffer, &rowBytes)) { 279 return false; 280 } 281 282 bool applyPremulToSrc = false; 283 if (kUnpremul_PixelOpsFlag & pixelOpsFlags) { 284 if (!GrPixelConfigIs8888(srcConfig)) { 285 return false; 286 } 287 applyPremulToSrc = true; 288 } 289 290 GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference; 291 // Don't prefer to draw for the conversion (and thereby access a texture from the cache) when 292 // we've already determined that there isn't a roundtrip preserving conversion processor pair. 293 if (applyPremulToSrc && !this->didFailPMUPMConversionTest()) { 294 drawPreference = GrGpu::kCallerPrefersDraw_DrawPreference; 295 } 296 297 GrGpu::WritePixelTempDrawInfo tempDrawInfo; 298 if (!fGpu->getWritePixelsInfo(surface, width, height, srcConfig, &drawPreference, 299 &tempDrawInfo)) { 300 return false; 301 } 302 303 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && surface->surfacePriv().hasPendingIO()) { 304 this->flush(); 305 } 306 307 SkAutoTUnref<GrTexture> tempTexture; 308 if (GrGpu::kNoDraw_DrawPreference != drawPreference) { 309 tempTexture.reset( 310 this->textureProvider()->createApproxTexture(tempDrawInfo.fTempSurfaceDesc)); 311 if (!tempTexture && GrGpu::kRequireDraw_DrawPreference == drawPreference) { 312 return false; 313 } 314 } 315 316 // temp buffer for doing sw premul conversion, if needed. 317 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 318 if (tempTexture) { 319 SkAutoTUnref<const GrFragmentProcessor> fp; 320 SkMatrix textureMatrix; 321 textureMatrix.setIDiv(tempTexture->width(), tempTexture->height()); 322 if (applyPremulToSrc) { 323 fp.reset(this->createUPMToPMEffect(tempTexture, tempDrawInfo.fSwizzle, 324 textureMatrix)); 325 // If premultiplying was the only reason for the draw, fall back to a straight write. 326 if (!fp) { 327 if (GrGpu::kCallerPrefersDraw_DrawPreference == drawPreference) { 328 tempTexture.reset(nullptr); 329 } 330 } else { 331 applyPremulToSrc = false; 332 } 333 } 334 if (tempTexture) { 335 if (!fp) { 336 fp.reset(GrConfigConversionEffect::Create(tempTexture, tempDrawInfo.fSwizzle, 337 GrConfigConversionEffect::kNone_PMConversion, textureMatrix)); 338 if (!fp) { 339 return false; 340 } 341 } 342 GrRenderTarget* renderTarget = surface->asRenderTarget(); 343 SkASSERT(renderTarget); 344 if (tempTexture->surfacePriv().hasPendingIO()) { 345 this->flush(); 346 } 347 if (applyPremulToSrc) { 348 size_t tmpRowBytes = 4 * width; 349 tmpPixels.reset(width * height); 350 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes, 351 tmpPixels.get())) { 352 return false; 353 } 354 rowBytes = tmpRowBytes; 355 buffer = tmpPixels.get(); 356 applyPremulToSrc = false; 357 } 358 if (!fGpu->writePixels(tempTexture, 0, 0, width, height, 359 tempDrawInfo.fWriteConfig, buffer, 360 rowBytes)) { 361 return false; 362 } 363 SkMatrix matrix; 364 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 365 sk_sp<GrDrawContext> drawContext(this->drawContext(sk_ref_sp(renderTarget))); 366 if (!drawContext) { 367 return false; 368 } 369 // SRGBTODO: AllowSRGBInputs? (We could force it on here, so we don't need the 370 // per-texture override in config conversion effect?) 371 GrPaint paint; 372 paint.addColorFragmentProcessor(fp); 373 paint.setPorterDuffXPFactory(SkXfermode::kSrc_Mode); 374 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 375 drawContext->drawRect(GrClip::WideOpen(), paint, matrix, rect, nullptr); 376 377 if (kFlushWrites_PixelOp & pixelOpsFlags) { 378 this->flushSurfaceWrites(surface); 379 } 380 } 381 } 382 if (!tempTexture) { 383 if (applyPremulToSrc) { 384 size_t tmpRowBytes = 4 * width; 385 tmpPixels.reset(width * height); 386 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes, 387 tmpPixels.get())) { 388 return false; 389 } 390 rowBytes = tmpRowBytes; 391 buffer = tmpPixels.get(); 392 applyPremulToSrc = false; 393 } 394 return fGpu->writePixels(surface, left, top, width, height, srcConfig, buffer, rowBytes); 395 } 396 return true; 397} 398 399bool GrContext::readSurfacePixels(GrSurface* src, 400 int left, int top, int width, int height, 401 GrPixelConfig dstConfig, void* buffer, size_t rowBytes, 402 uint32_t flags) { 403 ASSERT_SINGLE_OWNER 404 RETURN_FALSE_IF_ABANDONED 405 ASSERT_OWNED_RESOURCE(src); 406 SkASSERT(src); 407 GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::readSurfacePixels"); 408 409 this->testPMConversionsIfNecessary(flags); 410 SkAutoMutexAcquire ama(fReadPixelsMutex); 411 412 // Adjust the params so that if we wind up using an intermediate surface we've already done 413 // all the trimming and the temporary can be the min size required. 414 if (!GrSurfacePriv::AdjustReadPixelParams(src->width(), src->height(), 415 GrBytesPerPixel(dstConfig), &left, 416 &top, &width, &height, &buffer, &rowBytes)) { 417 return false; 418 } 419 420 if (!(kDontFlush_PixelOpsFlag & flags) && src->surfacePriv().hasPendingWrite()) { 421 this->flush(); 422 } 423 424 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 425 if (unpremul && !GrPixelConfigIs8888(dstConfig)) { 426 // The unpremul flag is only allowed for 8888 configs. 427 return false; 428 } 429 430 GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference; 431 // Don't prefer to draw for the conversion (and thereby access a texture from the cache) when 432 // we've already determined that there isn't a roundtrip preserving conversion processor pair. 433 if (unpremul && !this->didFailPMUPMConversionTest()) { 434 drawPreference = GrGpu::kCallerPrefersDraw_DrawPreference; 435 } 436 437 GrGpu::ReadPixelTempDrawInfo tempDrawInfo; 438 if (!fGpu->getReadPixelsInfo(src, width, height, rowBytes, dstConfig, &drawPreference, 439 &tempDrawInfo)) { 440 return false; 441 } 442 443 SkAutoTUnref<GrSurface> surfaceToRead(SkRef(src)); 444 bool didTempDraw = false; 445 if (GrGpu::kNoDraw_DrawPreference != drawPreference) { 446 if (tempDrawInfo.fUseExactScratch) { 447 // We only respect this when the entire src is being read. Otherwise we can trigger too 448 // many odd ball texture sizes and trash the cache. 449 if (width != src->width() || height != src->height()) { 450 tempDrawInfo.fUseExactScratch = false; 451 } 452 } 453 SkAutoTUnref<GrTexture> temp; 454 if (tempDrawInfo.fUseExactScratch) { 455 temp.reset(this->textureProvider()->createTexture(tempDrawInfo.fTempSurfaceDesc, 456 SkBudgeted::kYes)); 457 } else { 458 temp.reset(this->textureProvider()->createApproxTexture(tempDrawInfo.fTempSurfaceDesc)); 459 } 460 if (temp) { 461 SkMatrix textureMatrix; 462 textureMatrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 463 textureMatrix.postIDiv(src->width(), src->height()); 464 SkAutoTUnref<const GrFragmentProcessor> fp; 465 if (unpremul) { 466 fp.reset(this->createPMToUPMEffect(src->asTexture(), tempDrawInfo.fSwizzle, 467 textureMatrix)); 468 if (fp) { 469 unpremul = false; // we no longer need to do this on CPU after the read back. 470 } else if (GrGpu::kCallerPrefersDraw_DrawPreference == drawPreference) { 471 // We only wanted to do the draw in order to perform the unpremul so don't 472 // bother. 473 temp.reset(nullptr); 474 } 475 } 476 if (!fp && temp) { 477 fp.reset(GrConfigConversionEffect::Create(src->asTexture(), tempDrawInfo.fSwizzle, 478 GrConfigConversionEffect::kNone_PMConversion, textureMatrix)); 479 } 480 if (fp) { 481 // SRGBTODO: AllowSRGBInputs? (We could force it on here, so we don't need the 482 // per-texture override in config conversion effect?) 483 GrPaint paint; 484 paint.addColorFragmentProcessor(fp); 485 paint.setPorterDuffXPFactory(SkXfermode::kSrc_Mode); 486 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 487 sk_sp<GrDrawContext> drawContext( 488 this->drawContext(sk_ref_sp(temp->asRenderTarget()))); 489 drawContext->drawRect(GrClip::WideOpen(), paint, SkMatrix::I(), rect, nullptr); 490 surfaceToRead.reset(SkRef(temp.get())); 491 left = 0; 492 top = 0; 493 didTempDraw = true; 494 } 495 } 496 } 497 498 if (GrGpu::kRequireDraw_DrawPreference == drawPreference && !didTempDraw) { 499 return false; 500 } 501 GrPixelConfig configToRead = dstConfig; 502 if (didTempDraw) { 503 this->flushSurfaceWrites(surfaceToRead); 504 configToRead = tempDrawInfo.fReadConfig; 505 } 506 if (!fGpu->readPixels(surfaceToRead, left, top, width, height, configToRead, buffer, 507 rowBytes)) { 508 return false; 509 } 510 511 // Perform umpremul conversion if we weren't able to perform it as a draw. 512 if (unpremul) { 513 SkDstPixelInfo dstPI; 514 if (!GrPixelConfig2ColorAndProfileType(dstConfig, &dstPI.fColorType, nullptr)) { 515 return false; 516 } 517 dstPI.fAlphaType = kUnpremul_SkAlphaType; 518 dstPI.fPixels = buffer; 519 dstPI.fRowBytes = rowBytes; 520 521 SkSrcPixelInfo srcPI; 522 srcPI.fColorType = dstPI.fColorType; 523 srcPI.fAlphaType = kPremul_SkAlphaType; 524 srcPI.fPixels = buffer; 525 srcPI.fRowBytes = rowBytes; 526 527 return srcPI.convertPixelsTo(&dstPI, width, height); 528 } 529 return true; 530} 531 532void GrContext::prepareSurfaceForExternalIO(GrSurface* surface) { 533 ASSERT_SINGLE_OWNER 534 RETURN_IF_ABANDONED 535 SkASSERT(surface); 536 ASSERT_OWNED_RESOURCE(surface); 537 if (surface->surfacePriv().hasPendingIO()) { 538 this->flush(); 539 } 540 GrRenderTarget* rt = surface->asRenderTarget(); 541 if (fGpu && rt) { 542 fGpu->resolveRenderTarget(rt); 543 } 544} 545 546bool GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, 547 const SkIPoint& dstPoint) { 548 ASSERT_SINGLE_OWNER 549 RETURN_FALSE_IF_ABANDONED 550 GR_AUDIT_TRAIL_AUTO_FRAME(&fAuditTrail, "GrContext::copySurface"); 551 552 if (!src || !dst) { 553 return false; 554 } 555 ASSERT_OWNED_RESOURCE(src); 556 ASSERT_OWNED_RESOURCE(dst); 557 558 if (!dst->asRenderTarget()) { 559 SkIRect clippedSrcRect; 560 SkIPoint clippedDstPoint; 561 if (!GrCopySurfaceBatch::ClipSrcRectAndDstPoint(dst, src, srcRect, dstPoint, 562 &clippedSrcRect, &clippedDstPoint)) { 563 return false; 564 } 565 // If we don't have an RT for the dst then we won't have a GrDrawContext to insert the 566 // the copy surface into. In the future we plan to have a more limited Context type 567 // (GrCopyContext?) that has the subset of GrDrawContext operations that should be 568 // allowed on textures that aren't render targets. 569 // For now we just flush any writes to the src and issue an immediate copy to the dst. 570 src->flushWrites(); 571 return fGpu->copySurface(dst, src, clippedSrcRect, clippedDstPoint); 572 } 573 sk_sp<GrDrawContext> drawContext(this->drawContext(sk_ref_sp(dst->asRenderTarget()))); 574 if (!drawContext) { 575 return false; 576 } 577 578 if (!drawContext->copySurface(src, srcRect, dstPoint)) { 579 return false; 580 } 581 return true; 582} 583 584void GrContext::flushSurfaceWrites(GrSurface* surface) { 585 ASSERT_SINGLE_OWNER 586 RETURN_IF_ABANDONED 587 if (surface->surfacePriv().hasPendingWrite()) { 588 this->flush(); 589 } 590} 591 592//////////////////////////////////////////////////////////////////////////////// 593int GrContext::getRecommendedSampleCount(GrPixelConfig config, 594 SkScalar dpi) const { 595 ASSERT_SINGLE_OWNER 596 597 if (!this->caps()->isConfigRenderable(config, true)) { 598 return 0; 599 } 600 int chosenSampleCount = 0; 601 if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) { 602 if (dpi >= 250.0f) { 603 chosenSampleCount = 4; 604 } else { 605 chosenSampleCount = 16; 606 } 607 } 608 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ? chosenSampleCount : 0; 609} 610 611 612sk_sp<GrDrawContext> GrContext::drawContext(sk_sp<GrRenderTarget> rt, 613 const SkSurfaceProps* surfaceProps) { 614 ASSERT_SINGLE_OWNER 615 return fDrawingManager->drawContext(std::move(rt), surfaceProps); 616} 617 618sk_sp<GrDrawContext> GrContext::newDrawContext(BackingFit fit, 619 int width, int height, 620 GrPixelConfig config, 621 int sampleCnt, 622 GrSurfaceOrigin origin) { 623 GrSurfaceDesc desc; 624 desc.fFlags = kRenderTarget_GrSurfaceFlag; 625 desc.fOrigin = origin; 626 desc.fWidth = width; 627 desc.fHeight = height; 628 desc.fConfig = config; 629 desc.fSampleCnt = sampleCnt; 630 631 sk_sp<GrTexture> tex; 632 if (kTight_BackingFit == fit) { 633 tex.reset(this->textureProvider()->createTexture(desc, SkBudgeted::kYes)); 634 } else { 635 tex.reset(this->textureProvider()->createApproxTexture(desc)); 636 } 637 if (!tex) { 638 return nullptr; 639 } 640 641 sk_sp<GrDrawContext> drawContext(this->drawContext(sk_ref_sp(tex->asRenderTarget()))); 642 if (!drawContext) { 643 return nullptr; 644 } 645 646 return drawContext; 647} 648 649bool GrContext::abandoned() const { 650 ASSERT_SINGLE_OWNER 651 return fDrawingManager->abandoned(); 652} 653 654namespace { 655void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) { 656 GrConfigConversionEffect::PMConversion pmToUPM; 657 GrConfigConversionEffect::PMConversion upmToPM; 658 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM); 659 *pmToUPMValue = pmToUPM; 660 *upmToPMValue = upmToPM; 661} 662} 663 664void GrContext::testPMConversionsIfNecessary(uint32_t flags) { 665 ASSERT_SINGLE_OWNER 666 if (SkToBool(kUnpremul_PixelOpsFlag & flags)) { 667 SkAutoMutexAcquire ama(fTestPMConversionsMutex); 668 if (!fDidTestPMConversions) { 669 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion); 670 fDidTestPMConversions = true; 671 } 672 } 673} 674 675const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture, 676 const GrSwizzle& swizzle, 677 const SkMatrix& matrix) const { 678 ASSERT_SINGLE_OWNER 679 // We should have already called this->testPMConversionsIfNecessary(). 680 SkASSERT(fDidTestPMConversions); 681 GrConfigConversionEffect::PMConversion pmToUPM = 682 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion); 683 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) { 684 return GrConfigConversionEffect::Create(texture, swizzle, pmToUPM, matrix); 685 } else { 686 return nullptr; 687 } 688} 689 690const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture, 691 const GrSwizzle& swizzle, 692 const SkMatrix& matrix) const { 693 ASSERT_SINGLE_OWNER 694 // We should have already called this->testPMConversionsIfNecessary(). 695 SkASSERT(fDidTestPMConversions); 696 GrConfigConversionEffect::PMConversion upmToPM = 697 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion); 698 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) { 699 return GrConfigConversionEffect::Create(texture, swizzle, upmToPM, matrix); 700 } else { 701 return nullptr; 702 } 703} 704 705bool GrContext::didFailPMUPMConversionTest() const { 706 ASSERT_SINGLE_OWNER 707 // We should have already called this->testPMConversionsIfNecessary(). 708 SkASSERT(fDidTestPMConversions); 709 // The PM<->UPM tests fail or succeed together so we only need to check one. 710 return GrConfigConversionEffect::kNone_PMConversion == fPMToUPMConversion; 711} 712 713////////////////////////////////////////////////////////////////////////////// 714 715void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const { 716 ASSERT_SINGLE_OWNER 717 if (maxTextures) { 718 *maxTextures = fResourceCache->getMaxResourceCount(); 719 } 720 if (maxTextureBytes) { 721 *maxTextureBytes = fResourceCache->getMaxResourceBytes(); 722 } 723} 724 725void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) { 726 ASSERT_SINGLE_OWNER 727 fResourceCache->setLimits(maxTextures, maxTextureBytes); 728} 729 730////////////////////////////////////////////////////////////////////////////// 731 732void GrContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const { 733 ASSERT_SINGLE_OWNER 734 fResourceCache->dumpMemoryStatistics(traceMemoryDump); 735} 736