GrContext.cpp revision 1f9ed8501b0007846b3032f4bfc38aee98c175a1
1/* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrBackendSemaphore.h" 9#include "GrContext.h" 10#include "GrClip.h" 11#include "GrContextOptions.h" 12#include "GrContextPriv.h" 13#include "GrDrawingManager.h" 14#include "GrGpu.h" 15#include "GrProxyProvider.h" 16#include "GrRenderTargetContext.h" 17#include "GrRenderTargetProxy.h" 18#include "GrResourceCache.h" 19#include "GrResourceProvider.h" 20#include "GrSemaphore.h" 21#include "GrSoftwarePathRenderer.h" 22#include "GrSurfaceContext.h" 23#include "GrSurfacePriv.h" 24#include "GrSurfaceProxyPriv.h" 25#include "GrTexture.h" 26#include "GrTextureContext.h" 27#include "GrTracing.h" 28#include "SkConvertPixels.h" 29#include "SkGr.h" 30#include "SkJSONWriter.h" 31#include "SkMakeUnique.h" 32#include "SkTaskGroup.h" 33#include "SkUnPreMultiplyPriv.h" 34#include "effects/GrConfigConversionEffect.h" 35#include "text/GrTextBlobCache.h" 36 37#include "gl/GrGLGpu.h" 38#include "mock/GrMockGpu.h" 39#ifdef SK_METAL 40#include "mtl/GrMtlTrampoline.h" 41#endif 42#include "ddl/GrDDLGpu.h" 43#ifdef SK_VULKAN 44#include "vk/GrVkGpu.h" 45#endif 46 47#define ASSERT_OWNED_PROXY(P) \ 48SkASSERT(!(P) || !((P)->priv().peekTexture()) || (P)->priv().peekTexture()->getContext() == this) 49#define ASSERT_OWNED_PROXY_PRIV(P) \ 50SkASSERT(!(P) || !((P)->priv().peekTexture()) || (P)->priv().peekTexture()->getContext() == fContext) 51 52#define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this) 53#define ASSERT_SINGLE_OWNER \ 54 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(&fSingleOwner);) 55#define ASSERT_SINGLE_OWNER_PRIV \ 56 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(&fContext->fSingleOwner);) 57#define RETURN_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return; } 58#define RETURN_IF_ABANDONED_PRIV if (fContext->fDrawingManager->wasAbandoned()) { return; } 59#define RETURN_FALSE_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return false; } 60#define RETURN_FALSE_IF_ABANDONED_PRIV if (fContext->fDrawingManager->wasAbandoned()) { return false; } 61#define RETURN_NULL_IF_ABANDONED if (fDrawingManager->wasAbandoned()) { return nullptr; } 62 63//////////////////////////////////////////////////////////////////////////////// 64 65GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) { 66 GrContextOptions defaultOptions; 67 return Create(backend, backendContext, defaultOptions); 68} 69 70GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext, 71 const GrContextOptions& options) { 72 73 sk_sp<GrContext> context(new GrContext(backend)); 74 75 context->fGpu = GrGpu::Make(backend, backendContext, options, context.get()); 76 if (!context->fGpu) { 77 return nullptr; 78 } 79 80 if (!context->init(options)) { 81 return nullptr; 82 } 83 84 return context.release(); 85} 86 87sk_sp<GrContext> GrContext::MakeGL(sk_sp<const GrGLInterface> interface) { 88 GrContextOptions defaultOptions; 89 return MakeGL(std::move(interface), defaultOptions); 90} 91 92sk_sp<GrContext> GrContext::MakeGL(sk_sp<const GrGLInterface> interface, 93 const GrContextOptions& options) { 94 sk_sp<GrContext> context(new GrContext(kOpenGL_GrBackend)); 95 96 context->fGpu = GrGLGpu::Make(std::move(interface), options, context.get()); 97 if (!context->fGpu) { 98 return nullptr; 99 } 100 if (!context->init(options)) { 101 return nullptr; 102 } 103 return context; 104} 105 106sk_sp<GrContext> GrContext::MakeGL(const GrGLInterface* interface) { 107 return MakeGL(sk_ref_sp(interface)); 108} 109 110sk_sp<GrContext> GrContext::MakeGL(const GrGLInterface* interface, 111 const GrContextOptions& options) { 112 return MakeGL(sk_ref_sp(interface), options); 113} 114 115sk_sp<GrContext> GrContext::MakeMock(const GrMockOptions* mockOptions) { 116 GrContextOptions defaultOptions; 117 return MakeMock(mockOptions, defaultOptions); 118} 119 120sk_sp<GrContext> GrContext::MakeMock(const GrMockOptions* mockOptions, 121 const GrContextOptions& options) { 122 sk_sp<GrContext> context(new GrContext(kMock_GrBackend)); 123 124 context->fGpu = GrMockGpu::Make(mockOptions, options, context.get()); 125 if (!context->fGpu) { 126 return nullptr; 127 } 128 if (!context->init(options)) { 129 return nullptr; 130 } 131 return context; 132} 133 134#ifdef SK_VULKAN 135sk_sp<GrContext> GrContext::MakeVulkan(sk_sp<const GrVkBackendContext> backendContext) { 136 GrContextOptions defaultOptions; 137 return MakeVulkan(std::move(backendContext), defaultOptions); 138} 139 140sk_sp<GrContext> GrContext::MakeVulkan(sk_sp<const GrVkBackendContext> backendContext, 141 const GrContextOptions& options) { 142 sk_sp<GrContext> context(new GrContext(kVulkan_GrBackend)); 143 144 context->fGpu = GrVkGpu::Make(std::move(backendContext), options, context.get()); 145 if (!context->fGpu) { 146 return nullptr; 147 } 148 if (!context->init(options)) { 149 return nullptr; 150 } 151 return context; 152} 153#endif 154 155#ifdef SK_METAL 156sk_sp<GrContext> GrContext::MakeMetal(void* device, void* queue) { 157 GrContextOptions defaultOptions; 158 return MakeMetal(device, queue, defaultOptions); 159} 160 161sk_sp<GrContext> GrContext::MakeMetal(void* device, void* queue, const GrContextOptions& options) { 162 sk_sp<GrContext> context(new GrContext(kMetal_GrBackend)); 163 164 context->fGpu = GrMtlTrampoline::MakeGpu(context.get(), options, device, queue); 165 if (!context->fGpu) { 166 return nullptr; 167 } 168 if (!context->init(options)) { 169 return nullptr; 170 } 171 return context; 172} 173#endif 174 175static int32_t gNextID = 1; 176static int32_t next_id() { 177 int32_t id; 178 do { 179 id = sk_atomic_inc(&gNextID); 180 } while (id == SK_InvalidGenID); 181 return id; 182} 183 184sk_sp<GrContext> GrContextPriv::MakeDDL(GrContextThreadSafeProxy* proxy) { 185 sk_sp<GrContext> context(new GrContext(proxy)); 186 187 context->fGpu = GrDDLGpu::Make(context.get(), proxy->fCaps); 188 if (!context->fGpu) { 189 return nullptr; 190 } 191 if (!context->init(proxy->fOptions)) { 192 return nullptr; 193 } 194 return context; 195} 196 197GrContext::GrContext(GrBackend backend) 198 : fUniqueID(next_id()) 199 , fBackend(backend) { 200 fResourceCache = nullptr; 201 fResourceProvider = nullptr; 202 fProxyProvider = nullptr; 203 fAtlasGlyphCache = nullptr; 204} 205 206GrContext::GrContext(GrContextThreadSafeProxy* proxy) 207 : fUniqueID(proxy->fContextUniqueID) 208 , fBackend(proxy->fBackend) { 209 fResourceCache = nullptr; 210 fResourceProvider = nullptr; 211 fProxyProvider = nullptr; 212 fAtlasGlyphCache = nullptr; 213} 214 215bool GrContext::init(const GrContextOptions& options) { 216 ASSERT_SINGLE_OWNER 217 fCaps = fGpu->refCaps(); 218 fResourceCache = new GrResourceCache(fCaps.get(), fUniqueID); 219 fResourceProvider = new GrResourceProvider(fGpu.get(), fResourceCache, &fSingleOwner); 220 fProxyProvider = new GrProxyProvider(fResourceProvider, fResourceCache, fCaps, &fSingleOwner); 221 fResourceCache->setProxyProvider(fProxyProvider); 222 223 // DDL TODO: we need to think through how the task group & persistent cache 224 // get passed on to/shared between all the DDLRecorders created with this context. 225 fThreadSafeProxy.reset(new GrContextThreadSafeProxy(fCaps, this->uniqueID(), fBackend, 226 options)); 227 228 fDisableGpuYUVConversion = options.fDisableGpuYUVConversion; 229 fDidTestPMConversions = false; 230 231 GrPathRendererChain::Options prcOptions; 232 prcOptions.fAllowPathMaskCaching = options.fAllowPathMaskCaching; 233#if GR_TEST_UTILS 234 prcOptions.fGpuPathRenderers = options.fGpuPathRenderers; 235#endif 236 if (options.fDisableDistanceFieldPaths) { 237 prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall; 238 } 239 240 GrAtlasTextContext::Options atlasTextContextOptions; 241 atlasTextContextOptions.fMaxDistanceFieldFontSize = options.fGlyphsAsPathsFontSize; 242 atlasTextContextOptions.fMinDistanceFieldFontSize = options.fMinDistanceFieldFontSize; 243 atlasTextContextOptions.fDistanceFieldVerticesAlwaysHaveW = false; 244#if SK_SUPPORT_ATLAS_TEXT 245 if (GrContextOptions::Enable::kYes == options.fDistanceFieldGlyphVerticesAlwaysHaveW) { 246 atlasTextContextOptions.fDistanceFieldVerticesAlwaysHaveW = true; 247 } 248#endif 249 250 fDrawingManager.reset( 251 new GrDrawingManager(this, prcOptions, atlasTextContextOptions, &fSingleOwner)); 252 253 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing; 254 switch (options.fAllowMultipleGlyphCacheTextures) { 255 case GrContextOptions::Enable::kDefault: 256#ifdef SK_BUILD_FOR_IOS 257 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo; 258#else 259 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes; 260#endif 261 break; 262 case GrContextOptions::Enable::kNo: 263 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo; 264 break; 265 case GrContextOptions::Enable::kYes: 266 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes; 267 break; 268 } 269 fAtlasGlyphCache = new GrAtlasGlyphCache(this, options.fGlyphCacheTextureMaximumBytes, 270 allowMultitexturing); 271 this->contextPriv().addOnFlushCallbackObject(fAtlasGlyphCache); 272 273 fTextBlobCache.reset(new GrTextBlobCache(TextBlobCacheOverBudgetCB, this, this->uniqueID())); 274 275 if (options.fExecutor) { 276 fTaskGroup = skstd::make_unique<SkTaskGroup>(*options.fExecutor); 277 } 278 279 fPersistentCache = options.fPersistentCache; 280 281 return true; 282} 283 284GrContext::~GrContext() { 285 ASSERT_SINGLE_OWNER 286 287 if (!fGpu) { 288 SkASSERT(!fCaps); 289 return; 290 } 291 292 this->flush(); 293 294 fDrawingManager->cleanup(); 295 296 for (int i = 0; i < fCleanUpData.count(); ++i) { 297 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo); 298 } 299 300 delete fResourceProvider; 301 delete fResourceCache; 302 delete fProxyProvider; 303 delete fAtlasGlyphCache; 304} 305 306sk_sp<GrContextThreadSafeProxy> GrContext::threadSafeProxy() { 307 return fThreadSafeProxy; 308} 309 310void GrContext::abandonContext() { 311 ASSERT_SINGLE_OWNER 312 313 fProxyProvider->abandon(); 314 fResourceProvider->abandon(); 315 316 // Need to abandon the drawing manager first so all the render targets 317 // will be released/forgotten before they too are abandoned. 318 fDrawingManager->abandon(); 319 320 // abandon first to so destructors 321 // don't try to free the resources in the API. 322 fResourceCache->abandonAll(); 323 324 fGpu->disconnect(GrGpu::DisconnectType::kAbandon); 325 326 fAtlasGlyphCache->freeAll(); 327 fTextBlobCache->freeAll(); 328} 329 330void GrContext::releaseResourcesAndAbandonContext() { 331 ASSERT_SINGLE_OWNER 332 333 fProxyProvider->abandon(); 334 fResourceProvider->abandon(); 335 336 // Need to abandon the drawing manager first so all the render targets 337 // will be released/forgotten before they too are abandoned. 338 fDrawingManager->abandon(); 339 340 // Release all resources in the backend 3D API. 341 fResourceCache->releaseAll(); 342 343 fGpu->disconnect(GrGpu::DisconnectType::kCleanup); 344 345 fAtlasGlyphCache->freeAll(); 346 fTextBlobCache->freeAll(); 347} 348 349void GrContext::resetContext(uint32_t state) { 350 ASSERT_SINGLE_OWNER 351 fGpu->markContextDirty(state); 352} 353 354void GrContext::freeGpuResources() { 355 ASSERT_SINGLE_OWNER 356 357 this->flush(); 358 359 fAtlasGlyphCache->freeAll(); 360 361 fDrawingManager->freeGpuResources(); 362 363 fResourceCache->purgeAllUnlocked(); 364} 365 366void GrContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed) { 367 ASSERT_SINGLE_OWNER 368 fResourceCache->purgeAsNeeded(); 369 fResourceCache->purgeResourcesNotUsedSince(GrStdSteadyClock::now() - msNotUsed); 370 371 fTextBlobCache->purgeStaleBlobs(); 372} 373 374void GrContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) { 375 ASSERT_SINGLE_OWNER 376 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources); 377} 378 379void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const { 380 ASSERT_SINGLE_OWNER 381 382 if (resourceCount) { 383 *resourceCount = fResourceCache->getBudgetedResourceCount(); 384 } 385 if (resourceBytes) { 386 *resourceBytes = fResourceCache->getBudgetedResourceBytes(); 387 } 388} 389 390size_t GrContext::getResourceCachePurgeableBytes() const { 391 ASSERT_SINGLE_OWNER 392 return fResourceCache->getPurgeableBytes(); 393} 394 395//////////////////////////////////////////////////////////////////////////////// 396 397void GrContext::TextBlobCacheOverBudgetCB(void* data) { 398 SkASSERT(data); 399 // TextBlobs are drawn at the SkGpuDevice level, therefore they cannot rely on 400 // GrRenderTargetContext to perform a necessary flush. The solution is to move drawText calls 401 // to below the GrContext level, but this is not trivial because they call drawPath on 402 // SkGpuDevice. 403 GrContext* context = reinterpret_cast<GrContext*>(data); 404 context->flush(); 405} 406 407//////////////////////////////////////////////////////////////////////////////// 408 409void GrContext::flush() { 410 ASSERT_SINGLE_OWNER 411 RETURN_IF_ABANDONED 412 413 fDrawingManager->flush(nullptr); 414} 415 416GrSemaphoresSubmitted GrContext::flushAndSignalSemaphores(int numSemaphores, 417 GrBackendSemaphore signalSemaphores[]) { 418 ASSERT_SINGLE_OWNER 419 if (fDrawingManager->wasAbandoned()) { return GrSemaphoresSubmitted::kNo; } 420 421 return fDrawingManager->flush(nullptr, numSemaphores, signalSemaphores); 422} 423 424void GrContextPriv::flush(GrSurfaceProxy* proxy) { 425 ASSERT_SINGLE_OWNER_PRIV 426 RETURN_IF_ABANDONED_PRIV 427 ASSERT_OWNED_PROXY_PRIV(proxy); 428 429 fContext->fDrawingManager->flush(proxy); 430} 431 432bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes, 433 const void* inPixels, size_t outRowBytes, void* outPixels) { 434 SkColorType colorType; 435 if (!GrPixelConfigToColorType(srcConfig, &colorType) || 436 4 != SkColorTypeBytesPerPixel(colorType)) 437 { 438 return false; 439 } 440 441 for (int y = 0; y < height; y++) { 442 SkOpts::RGBA_to_rgbA((uint32_t*) outPixels, inPixels, width); 443 outPixels = SkTAddOffset<void>(outPixels, outRowBytes); 444 inPixels = SkTAddOffset<const void>(inPixels, inRowBytes); 445 } 446 447 return true; 448} 449 450static bool valid_premul_config(GrPixelConfig config) { 451 return GrPixelConfigIs8888Unorm(config) || kRGBA_half_GrPixelConfig == config; 452} 453 454static bool valid_pixel_conversion(GrPixelConfig srcConfig, GrPixelConfig dstConfig, 455 bool premulConversion) { 456 // We don't allow conversion between integer configs and float/fixed configs. 457 if (GrPixelConfigIsSint(srcConfig) != GrPixelConfigIsSint(dstConfig)) { 458 return false; 459 } 460 461 // We only allow premul <-> unpremul conversions for some formats 462 if (premulConversion && (!valid_premul_config(srcConfig) || !valid_premul_config(dstConfig))) { 463 return false; 464 } 465 466 return true; 467} 468 469static bool pm_upm_must_round_trip(GrPixelConfig config, SkColorSpace* colorSpace) { 470 return !colorSpace && 471 (kRGBA_8888_GrPixelConfig == config || kBGRA_8888_GrPixelConfig == config); 472} 473 474bool GrContextPriv::writeSurfacePixels(GrSurfaceContext* dst, 475 int left, int top, int width, int height, 476 GrPixelConfig srcConfig, SkColorSpace* srcColorSpace, 477 const void* buffer, size_t rowBytes, 478 uint32_t pixelOpsFlags) { 479 // TODO: Color space conversion 480 481 ASSERT_SINGLE_OWNER_PRIV 482 RETURN_FALSE_IF_ABANDONED_PRIV 483 SkASSERT(dst); 484 ASSERT_OWNED_PROXY_PRIV(dst->asSurfaceProxy()); 485 GR_CREATE_TRACE_MARKER_CONTEXT("GrContextPriv", "writeSurfacePixels", fContext); 486 487 if (!dst->asSurfaceProxy()->instantiate(this->resourceProvider())) { 488 return false; 489 } 490 491 GrSurfaceProxy* dstProxy = dst->asSurfaceProxy(); 492 GrSurface* dstSurface = dstProxy->priv().peekSurface(); 493 494 // The src is unpremul but the dst is premul -> premul the src before or as part of the write 495 const bool premul = SkToBool(kUnpremul_PixelOpsFlag & pixelOpsFlags); 496 if (!valid_pixel_conversion(srcConfig, dstProxy->config(), premul)) { 497 return false; 498 } 499 500 // We need to guarantee round-trip conversion if we are reading and writing 8888 non-sRGB data, 501 // without any color spaces attached, and the caller wants us to premul. 502 bool useConfigConversionEffect = 503 premul && pm_upm_must_round_trip(srcConfig, srcColorSpace) && 504 pm_upm_must_round_trip(dstProxy->config(), dst->colorSpaceInfo().colorSpace()); 505 506 // Are we going to try to premul as part of a draw? For the non-legacy case, we always allow 507 // this. GrConfigConversionEffect fails on some GPUs, so only allow this if it works perfectly. 508 bool premulOnGpu = premul && 509 (!useConfigConversionEffect || fContext->validPMUPMConversionExists()); 510 511 // Trim the params here so that if we wind up making a temporary surface it can be as small as 512 // necessary and because GrGpu::getWritePixelsInfo requires it. 513 if (!GrSurfacePriv::AdjustWritePixelParams(dstSurface->width(), dstSurface->height(), 514 GrBytesPerPixel(srcConfig), &left, &top, &width, 515 &height, &buffer, &rowBytes)) { 516 return false; 517 } 518 519 GrGpu::DrawPreference drawPreference = premulOnGpu ? GrGpu::kCallerPrefersDraw_DrawPreference 520 : GrGpu::kNoDraw_DrawPreference; 521 GrGpu::WritePixelTempDrawInfo tempDrawInfo; 522 if (!fContext->fGpu->getWritePixelsInfo(dstSurface, dstProxy->origin(), width, height, 523 srcConfig, &drawPreference, &tempDrawInfo)) { 524 return false; 525 } 526 527 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) && dstSurface->surfacePriv().hasPendingIO()) { 528 this->flush(nullptr); // MDB TODO: tighten this 529 } 530 531 sk_sp<GrTextureProxy> tempProxy; 532 if (GrGpu::kNoDraw_DrawPreference != drawPreference) { 533 tempProxy = this->proxyProvider()->createProxy(tempDrawInfo.fTempSurfaceDesc, 534 SkBackingFit::kApprox, 535 SkBudgeted::kYes); 536 if (!tempProxy && GrGpu::kRequireDraw_DrawPreference == drawPreference) { 537 return false; 538 } 539 } 540 541 // temp buffer for doing sw premul conversion, if needed. 542 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0); 543 // We need to do sw premul if we were unable to create a RT for drawing, or if we can't do the 544 // premul on the GPU 545 if (premul && (!tempProxy || !premulOnGpu)) { 546 size_t tmpRowBytes = 4 * width; 547 tmpPixels.reset(width * height); 548 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes, 549 tmpPixels.get())) { 550 return false; 551 } 552 rowBytes = tmpRowBytes; 553 buffer = tmpPixels.get(); 554 } 555 556 if (tempProxy) { 557 auto fp = GrSimpleTextureEffect::Make(tempProxy, SkMatrix::I()); 558 if (premulOnGpu) { 559 fp = fContext->createUPMToPMEffect(std::move(fp), useConfigConversionEffect); 560 } 561 fp = GrFragmentProcessor::SwizzleOutput(std::move(fp), tempDrawInfo.fSwizzle); 562 if (!fp) { 563 return false; 564 } 565 566 if (!tempProxy->instantiate(this->resourceProvider())) { 567 return false; 568 } 569 GrTexture* texture = tempProxy->priv().peekTexture(); 570 571 if (tempProxy->priv().hasPendingIO()) { 572 this->flush(tempProxy.get()); 573 } 574 575 if (!fContext->fGpu->writePixels(texture, tempProxy->origin(), 0, 0, width, height, 576 tempDrawInfo.fWriteConfig, buffer, rowBytes)) { 577 return false; 578 } 579 tempProxy = nullptr; 580 581 SkMatrix matrix; 582 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top)); 583 GrRenderTargetContext* renderTargetContext = dst->asRenderTargetContext(); 584 if (!renderTargetContext) { 585 return false; 586 } 587 GrPaint paint; 588 paint.addColorFragmentProcessor(std::move(fp)); 589 paint.setPorterDuffXPFactory(SkBlendMode::kSrc); 590 paint.setAllowSRGBInputs(dst->colorSpaceInfo().isGammaCorrect() || 591 GrPixelConfigIsSRGB(dst->colorSpaceInfo().config())); 592 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 593 renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, matrix, rect, 594 nullptr); 595 596 if (kFlushWrites_PixelOp & pixelOpsFlags) { 597 this->flushSurfaceWrites(renderTargetContext->asRenderTargetProxy()); 598 } 599 } else { 600 return fContext->fGpu->writePixels(dstSurface, dstProxy->origin(), left, top, width, 601 height, srcConfig, buffer, rowBytes); 602 } 603 return true; 604} 605 606bool GrContextPriv::readSurfacePixels(GrSurfaceContext* src, 607 int left, int top, int width, int height, 608 GrPixelConfig dstConfig, SkColorSpace* dstColorSpace, 609 void* buffer, size_t rowBytes, uint32_t flags) { 610 // TODO: Color space conversion 611 612 ASSERT_SINGLE_OWNER_PRIV 613 RETURN_FALSE_IF_ABANDONED_PRIV 614 SkASSERT(src); 615 ASSERT_OWNED_PROXY_PRIV(src->asSurfaceProxy()); 616 GR_CREATE_TRACE_MARKER_CONTEXT("GrContextPriv", "readSurfacePixels", fContext); 617 618 // MDB TODO: delay this instantiation until later in the method 619 if (!src->asSurfaceProxy()->instantiate(this->resourceProvider())) { 620 return false; 621 } 622 623 GrSurfaceProxy* srcProxy = src->asSurfaceProxy(); 624 GrSurface* srcSurface = srcProxy->priv().peekSurface(); 625 626 // The src is premul but the dst is unpremul -> unpremul the src after or as part of the read 627 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags); 628 if (!valid_pixel_conversion(srcProxy->config(), dstConfig, unpremul)) { 629 return false; 630 } 631 632 // We need to guarantee round-trip conversion if we are reading and writing 8888 non-sRGB data, 633 // without any color spaces attached, and the caller wants us to unpremul. 634 bool useConfigConversionEffect = 635 unpremul && 636 pm_upm_must_round_trip(srcProxy->config(), src->colorSpaceInfo().colorSpace()) && 637 pm_upm_must_round_trip(dstConfig, dstColorSpace); 638 639 // Are we going to try to unpremul as part of a draw? For the non-legacy case, we always allow 640 // this. GrConfigConversionEffect fails on some GPUs, so only allow this if it works perfectly. 641 bool unpremulOnGpu = unpremul && 642 (!useConfigConversionEffect || fContext->validPMUPMConversionExists()); 643 644 // Adjust the params so that if we wind up using an intermediate surface we've already done 645 // all the trimming and the temporary can be the min size required. 646 if (!GrSurfacePriv::AdjustReadPixelParams(srcSurface->width(), srcSurface->height(), 647 GrBytesPerPixel(dstConfig), &left, 648 &top, &width, &height, &buffer, &rowBytes)) { 649 return false; 650 } 651 652 GrGpu::DrawPreference drawPreference = unpremulOnGpu ? GrGpu::kCallerPrefersDraw_DrawPreference 653 : GrGpu::kNoDraw_DrawPreference; 654 GrGpu::ReadPixelTempDrawInfo tempDrawInfo; 655 if (!fContext->fGpu->getReadPixelsInfo(srcSurface, srcProxy->origin(), width, height, rowBytes, 656 dstConfig, &drawPreference, &tempDrawInfo)) { 657 return false; 658 } 659 660 if (!(kDontFlush_PixelOpsFlag & flags) && srcSurface->surfacePriv().hasPendingWrite()) { 661 this->flush(nullptr); // MDB TODO: tighten this 662 } 663 664 sk_sp<GrSurfaceProxy> proxyToRead = src->asSurfaceProxyRef(); 665 bool didTempDraw = false; 666 if (GrGpu::kNoDraw_DrawPreference != drawPreference) { 667 if (SkBackingFit::kExact == tempDrawInfo.fTempSurfaceFit) { 668 // We only respect this when the entire src is being read. Otherwise we can trigger too 669 // many odd ball texture sizes and trash the cache. 670 if (width != srcSurface->width() || height != srcSurface->height()) { 671 tempDrawInfo.fTempSurfaceFit= SkBackingFit::kApprox; 672 } 673 } 674 // TODO: Need to decide the semantics of this function for color spaces. Do we support 675 // conversion to a passed-in color space? For now, specifying nullptr means that this 676 // path will do no conversion, so it will match the behavior of the non-draw path. 677 sk_sp<GrRenderTargetContext> tempRTC = fContext->makeDeferredRenderTargetContext( 678 tempDrawInfo.fTempSurfaceFit, 679 tempDrawInfo.fTempSurfaceDesc.fWidth, 680 tempDrawInfo.fTempSurfaceDesc.fHeight, 681 tempDrawInfo.fTempSurfaceDesc.fConfig, 682 nullptr, 683 tempDrawInfo.fTempSurfaceDesc.fSampleCnt, 684 GrMipMapped::kNo, 685 tempDrawInfo.fTempSurfaceDesc.fOrigin); 686 if (tempRTC) { 687 SkMatrix textureMatrix = SkMatrix::MakeTrans(SkIntToScalar(left), SkIntToScalar(top)); 688 sk_sp<GrTextureProxy> proxy = src->asTextureProxyRef(); 689 auto fp = GrSimpleTextureEffect::Make(std::move(proxy), textureMatrix); 690 if (unpremulOnGpu) { 691 fp = fContext->createPMToUPMEffect(std::move(fp), useConfigConversionEffect); 692 // We no longer need to do this on CPU after the read back. 693 unpremul = false; 694 } 695 fp = GrFragmentProcessor::SwizzleOutput(std::move(fp), tempDrawInfo.fSwizzle); 696 if (!fp) { 697 return false; 698 } 699 700 GrPaint paint; 701 paint.addColorFragmentProcessor(std::move(fp)); 702 paint.setPorterDuffXPFactory(SkBlendMode::kSrc); 703 paint.setAllowSRGBInputs(true); 704 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)); 705 tempRTC->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(), rect, 706 nullptr); 707 proxyToRead = tempRTC->asTextureProxyRef(); 708 left = 0; 709 top = 0; 710 didTempDraw = true; 711 } 712 } 713 714 if (!proxyToRead) { 715 return false; 716 } 717 718 if (GrGpu::kRequireDraw_DrawPreference == drawPreference && !didTempDraw) { 719 return false; 720 } 721 GrPixelConfig configToRead = dstConfig; 722 if (didTempDraw) { 723 this->flushSurfaceWrites(proxyToRead.get()); 724 configToRead = tempDrawInfo.fReadConfig; 725 } 726 727 if (!proxyToRead->instantiate(this->resourceProvider())) { 728 return false; 729 } 730 731 GrSurface* surfaceToRead = proxyToRead->priv().peekSurface(); 732 733 if (!fContext->fGpu->readPixels(surfaceToRead, proxyToRead->origin(), 734 left, top, width, height, configToRead, buffer, rowBytes)) { 735 return false; 736 } 737 738 // Perform umpremul conversion if we weren't able to perform it as a draw. 739 if (unpremul) { 740 SkColorType colorType; 741 if (!GrPixelConfigToColorType(dstConfig, &colorType) || 742 4 != SkColorTypeBytesPerPixel(colorType)) 743 { 744 return false; 745 } 746 747 for (int y = 0; y < height; y++) { 748 SkUnpremultiplyRow<false>((uint32_t*) buffer, (const uint32_t*) buffer, width); 749 buffer = SkTAddOffset<void>(buffer, rowBytes); 750 } 751 } 752 return true; 753} 754 755void GrContextPriv::prepareSurfaceForExternalIO(GrSurfaceProxy* proxy) { 756 ASSERT_SINGLE_OWNER_PRIV 757 RETURN_IF_ABANDONED_PRIV 758 SkASSERT(proxy); 759 ASSERT_OWNED_PROXY_PRIV(proxy); 760 fContext->fDrawingManager->prepareSurfaceForExternalIO(proxy, 0, nullptr); 761} 762 763void GrContextPriv::flushSurfaceWrites(GrSurfaceProxy* proxy) { 764 ASSERT_SINGLE_OWNER_PRIV 765 RETURN_IF_ABANDONED_PRIV 766 SkASSERT(proxy); 767 ASSERT_OWNED_PROXY_PRIV(proxy); 768 if (proxy->priv().hasPendingWrite()) { 769 this->flush(proxy); 770 } 771} 772 773void GrContextPriv::flushSurfaceIO(GrSurfaceProxy* proxy) { 774 ASSERT_SINGLE_OWNER_PRIV 775 RETURN_IF_ABANDONED_PRIV 776 SkASSERT(proxy); 777 ASSERT_OWNED_PROXY_PRIV(proxy); 778 if (proxy->priv().hasPendingIO()) { 779 this->flush(proxy); 780 } 781} 782 783//////////////////////////////////////////////////////////////////////////////// 784int GrContext::getRecommendedSampleCount(GrPixelConfig config, 785 SkScalar dpi) const { 786 ASSERT_SINGLE_OWNER 787 788 if (!this->caps()->isConfigRenderable(config, true)) { 789 return 0; 790 } 791 int chosenSampleCount = 0; 792 if (fGpu->caps()->shaderCaps()->pathRenderingSupport()) { 793 if (dpi >= 250.0f) { 794 chosenSampleCount = 4; 795 } else { 796 chosenSampleCount = 16; 797 } 798 } 799 int supportedSampleCount = fGpu->caps()->getSampleCount(chosenSampleCount, config); 800 return chosenSampleCount <= supportedSampleCount ? supportedSampleCount : 0; 801} 802 803sk_sp<GrSurfaceContext> GrContextPriv::makeWrappedSurfaceContext(sk_sp<GrSurfaceProxy> proxy, 804 sk_sp<SkColorSpace> colorSpace) { 805 ASSERT_SINGLE_OWNER_PRIV 806 807 if (proxy->asRenderTargetProxy()) { 808 return this->drawingManager()->makeRenderTargetContext(std::move(proxy), 809 std::move(colorSpace), nullptr); 810 } else { 811 SkASSERT(proxy->asTextureProxy()); 812 return this->drawingManager()->makeTextureContext(std::move(proxy), std::move(colorSpace)); 813 } 814} 815 816sk_sp<GrSurfaceContext> GrContextPriv::makeDeferredSurfaceContext(const GrSurfaceDesc& dstDesc, 817 GrMipMapped mipMapped, 818 SkBackingFit fit, 819 SkBudgeted isDstBudgeted) { 820 821 sk_sp<GrTextureProxy> proxy; 822 if (GrMipMapped::kNo == mipMapped) { 823 proxy = this->proxyProvider()->createProxy(dstDesc, fit, isDstBudgeted); 824 } else { 825 SkASSERT(SkBackingFit::kExact == fit); 826 proxy = this->proxyProvider()->createMipMapProxy(dstDesc, isDstBudgeted); 827 } 828 if (!proxy) { 829 return nullptr; 830 } 831 832 return this->makeWrappedSurfaceContext(std::move(proxy), nullptr); 833} 834 835sk_sp<GrTextureContext> GrContextPriv::makeBackendTextureContext(const GrBackendTexture& tex, 836 GrSurfaceOrigin origin, 837 sk_sp<SkColorSpace> colorSpace) { 838 ASSERT_SINGLE_OWNER_PRIV 839 840 sk_sp<GrSurfaceProxy> proxy = this->proxyProvider()->createWrappedTextureProxy(tex, origin); 841 if (!proxy) { 842 return nullptr; 843 } 844 845 return this->drawingManager()->makeTextureContext(std::move(proxy), std::move(colorSpace)); 846} 847 848sk_sp<GrRenderTargetContext> GrContextPriv::makeBackendTextureRenderTargetContext( 849 const GrBackendTexture& tex, 850 GrSurfaceOrigin origin, 851 int sampleCnt, 852 sk_sp<SkColorSpace> colorSpace, 853 const SkSurfaceProps* props) { 854 ASSERT_SINGLE_OWNER_PRIV 855 856 sk_sp<GrTextureProxy> proxy(this->proxyProvider()->createWrappedTextureProxy(tex, origin, 857 sampleCnt)); 858 if (!proxy) { 859 return nullptr; 860 } 861 862 return this->drawingManager()->makeRenderTargetContext(std::move(proxy), 863 std::move(colorSpace), props); 864} 865 866sk_sp<GrRenderTargetContext> GrContextPriv::makeBackendRenderTargetRenderTargetContext( 867 const GrBackendRenderTarget& backendRT, 868 GrSurfaceOrigin origin, 869 sk_sp<SkColorSpace> colorSpace, 870 const SkSurfaceProps* surfaceProps) { 871 ASSERT_SINGLE_OWNER_PRIV 872 873 sk_sp<GrSurfaceProxy> proxy = this->proxyProvider()->createWrappedRenderTargetProxy(backendRT, 874 origin); 875 if (!proxy) { 876 return nullptr; 877 } 878 879 return this->drawingManager()->makeRenderTargetContext(std::move(proxy), 880 std::move(colorSpace), 881 surfaceProps); 882} 883 884sk_sp<GrRenderTargetContext> GrContextPriv::makeBackendTextureAsRenderTargetRenderTargetContext( 885 const GrBackendTexture& tex, 886 GrSurfaceOrigin origin, 887 int sampleCnt, 888 sk_sp<SkColorSpace> colorSpace, 889 const SkSurfaceProps* props) { 890 ASSERT_SINGLE_OWNER_PRIV 891 892 sk_sp<GrSurfaceProxy> proxy(this->proxyProvider()->createWrappedRenderTargetProxy(tex, origin, 893 sampleCnt)); 894 if (!proxy) { 895 return nullptr; 896 } 897 898 return this->drawingManager()->makeRenderTargetContext(std::move(proxy), 899 std::move(colorSpace), 900 props); 901} 902 903void GrContextPriv::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) { 904 fContext->fDrawingManager->addOnFlushCallbackObject(onFlushCBObject); 905} 906 907 908static inline GrPixelConfig GrPixelConfigFallback(GrPixelConfig config) { 909 switch (config) { 910 case kAlpha_8_GrPixelConfig: 911 case kRGB_565_GrPixelConfig: 912 case kRGBA_4444_GrPixelConfig: 913 case kBGRA_8888_GrPixelConfig: 914 return kRGBA_8888_GrPixelConfig; 915 case kSBGRA_8888_GrPixelConfig: 916 return kSRGBA_8888_GrPixelConfig; 917 case kAlpha_half_GrPixelConfig: 918 return kRGBA_half_GrPixelConfig; 919 default: 920 return kUnknown_GrPixelConfig; 921 } 922} 923 924sk_sp<GrRenderTargetContext> GrContext::makeDeferredRenderTargetContextWithFallback( 925 SkBackingFit fit, 926 int width, int height, 927 GrPixelConfig config, 928 sk_sp<SkColorSpace> colorSpace, 929 int sampleCnt, 930 GrMipMapped mipMapped, 931 GrSurfaceOrigin origin, 932 const SkSurfaceProps* surfaceProps, 933 SkBudgeted budgeted) { 934 if (!this->caps()->isConfigRenderable(config, sampleCnt > 0)) { 935 config = GrPixelConfigFallback(config); 936 } 937 938 return this->makeDeferredRenderTargetContext(fit, width, height, config, std::move(colorSpace), 939 sampleCnt, mipMapped, origin, surfaceProps, 940 budgeted); 941} 942 943sk_sp<GrRenderTargetContext> GrContext::makeDeferredRenderTargetContext( 944 SkBackingFit fit, 945 int width, int height, 946 GrPixelConfig config, 947 sk_sp<SkColorSpace> colorSpace, 948 int sampleCnt, 949 GrMipMapped mipMapped, 950 GrSurfaceOrigin origin, 951 const SkSurfaceProps* surfaceProps, 952 SkBudgeted budgeted) { 953 if (this->abandoned()) { 954 return nullptr; 955 } 956 957 GrSurfaceDesc desc; 958 desc.fFlags = kRenderTarget_GrSurfaceFlag; 959 desc.fOrigin = origin; 960 desc.fWidth = width; 961 desc.fHeight = height; 962 desc.fConfig = config; 963 desc.fSampleCnt = sampleCnt; 964 965 sk_sp<GrTextureProxy> rtp; 966 if (GrMipMapped::kNo == mipMapped) { 967 rtp = fProxyProvider->createProxy(desc, fit, budgeted); 968 } else { 969 rtp = fProxyProvider->createMipMapProxy(desc, budgeted); 970 } 971 if (!rtp) { 972 return nullptr; 973 } 974 975 sk_sp<GrRenderTargetContext> renderTargetContext( 976 fDrawingManager->makeRenderTargetContext(std::move(rtp), 977 std::move(colorSpace), 978 surfaceProps)); 979 if (!renderTargetContext) { 980 return nullptr; 981 } 982 983 renderTargetContext->discard(); 984 985 return renderTargetContext; 986} 987 988bool GrContext::abandoned() const { 989 ASSERT_SINGLE_OWNER 990 return fDrawingManager->wasAbandoned(); 991} 992 993std::unique_ptr<GrFragmentProcessor> GrContext::createPMToUPMEffect( 994 std::unique_ptr<GrFragmentProcessor> fp, bool useConfigConversionEffect) { 995 ASSERT_SINGLE_OWNER 996 // We have specialized effects that guarantee round-trip conversion for some formats 997 if (useConfigConversionEffect) { 998 // We should have already called this->validPMUPMConversionExists() in this case 999 SkASSERT(fDidTestPMConversions); 1000 // ...and it should have succeeded 1001 SkASSERT(this->validPMUPMConversionExists()); 1002 1003 return GrConfigConversionEffect::Make(std::move(fp), PMConversion::kToUnpremul); 1004 } else { 1005 // For everything else (sRGB, half-float, etc...), it doesn't make sense to try and 1006 // explicitly round the results. Just do the obvious, naive thing in the shader. 1007 return GrFragmentProcessor::UnpremulOutput(std::move(fp)); 1008 } 1009} 1010 1011std::unique_ptr<GrFragmentProcessor> GrContext::createUPMToPMEffect( 1012 std::unique_ptr<GrFragmentProcessor> fp, bool useConfigConversionEffect) { 1013 ASSERT_SINGLE_OWNER 1014 // We have specialized effects that guarantee round-trip conversion for these formats 1015 if (useConfigConversionEffect) { 1016 // We should have already called this->validPMUPMConversionExists() in this case 1017 SkASSERT(fDidTestPMConversions); 1018 // ...and it should have succeeded 1019 SkASSERT(this->validPMUPMConversionExists()); 1020 1021 return GrConfigConversionEffect::Make(std::move(fp), PMConversion::kToPremul); 1022 } else { 1023 // For everything else (sRGB, half-float, etc...), it doesn't make sense to try and 1024 // explicitly round the results. Just do the obvious, naive thing in the shader. 1025 return GrFragmentProcessor::PremulOutput(std::move(fp)); 1026 } 1027} 1028 1029bool GrContext::validPMUPMConversionExists() { 1030 ASSERT_SINGLE_OWNER 1031 if (!fDidTestPMConversions) { 1032 fPMUPMConversionsRoundTrip = GrConfigConversionEffect::TestForPreservingPMConversions(this); 1033 fDidTestPMConversions = true; 1034 } 1035 1036 // The PM<->UPM tests fail or succeed together so we only need to check one. 1037 return fPMUPMConversionsRoundTrip; 1038} 1039 1040////////////////////////////////////////////////////////////////////////////// 1041 1042void GrContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const { 1043 ASSERT_SINGLE_OWNER 1044 if (maxResources) { 1045 *maxResources = fResourceCache->getMaxResourceCount(); 1046 } 1047 if (maxResourceBytes) { 1048 *maxResourceBytes = fResourceCache->getMaxResourceBytes(); 1049 } 1050} 1051 1052void GrContext::setResourceCacheLimits(int maxResources, size_t maxResourceBytes) { 1053 ASSERT_SINGLE_OWNER 1054 fResourceCache->setLimits(maxResources, maxResourceBytes); 1055} 1056 1057////////////////////////////////////////////////////////////////////////////// 1058 1059void GrContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const { 1060 ASSERT_SINGLE_OWNER 1061 fResourceCache->dumpMemoryStatistics(traceMemoryDump); 1062} 1063 1064////////////////////////////////////////////////////////////////////////////// 1065 1066SkString GrContext::dump() const { 1067 SkDynamicMemoryWStream stream; 1068 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty); 1069 writer.beginObject(); 1070 1071 static const char* kBackendStr[] = { 1072 "Metal", 1073 "OpenGL", 1074 "Vulkan", 1075 "Mock", 1076 }; 1077 GR_STATIC_ASSERT(0 == kMetal_GrBackend); 1078 GR_STATIC_ASSERT(1 == kOpenGL_GrBackend); 1079 GR_STATIC_ASSERT(2 == kVulkan_GrBackend); 1080 GR_STATIC_ASSERT(3 == kMock_GrBackend); 1081 writer.appendString("backend", kBackendStr[fBackend]); 1082 1083 writer.appendName("caps"); 1084 fCaps->dumpJSON(&writer); 1085 1086 writer.appendName("gpu"); 1087 fGpu->dumpJSON(&writer); 1088 1089 // Flush JSON to the memory stream 1090 writer.endObject(); 1091 writer.flush(); 1092 1093 // Null terminate the JSON data in the memory stream 1094 stream.write8(0); 1095 1096 // Allocate a string big enough to hold all the data, then copy out of the stream 1097 SkString result(stream.bytesWritten()); 1098 stream.copyToAndReset(result.writable_str()); 1099 return result; 1100} 1101