1/* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrVkGpu.h" 9 10#include "GrBackendSemaphore.h" 11#include "GrBackendSurface.h" 12#include "GrContextOptions.h" 13#include "GrGeometryProcessor.h" 14#include "GrGpuResourceCacheAccess.h" 15#include "GrMesh.h" 16#include "GrPipeline.h" 17#include "GrRenderTargetPriv.h" 18#include "GrSurfacePriv.h" 19#include "GrTexturePriv.h" 20 21#include "GrVkCommandBuffer.h" 22#include "GrVkGpuCommandBuffer.h" 23#include "GrVkImage.h" 24#include "GrVkIndexBuffer.h" 25#include "GrVkMemory.h" 26#include "GrVkPipeline.h" 27#include "GrVkPipelineState.h" 28#include "GrVkRenderPass.h" 29#include "GrVkResourceProvider.h" 30#include "GrVkSemaphore.h" 31#include "GrVkTexelBuffer.h" 32#include "GrVkTexture.h" 33#include "GrVkTextureRenderTarget.h" 34#include "GrVkTransferBuffer.h" 35#include "GrVkVertexBuffer.h" 36 37#include "SkConvertPixels.h" 38#include "SkMipMap.h" 39 40#include "vk/GrVkInterface.h" 41#include "vk/GrVkTypes.h" 42 43#include "SkSLCompiler.h" 44 45#if !defined(SK_BUILD_FOR_WIN) 46#include <unistd.h> 47#endif // !defined(SK_BUILD_FOR_WIN) 48 49#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) 50#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X) 51#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X) 52 53#ifdef SK_ENABLE_VK_LAYERS 54VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( 55 VkDebugReportFlagsEXT flags, 56 VkDebugReportObjectTypeEXT objectType, 57 uint64_t object, 58 size_t location, 59 int32_t messageCode, 60 const char* pLayerPrefix, 61 const char* pMessage, 62 void* pUserData) { 63 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { 64 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 65 return VK_TRUE; // skip further layers 66 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) { 67 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 68 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) { 69 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 70 } else { 71 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage); 72 } 73 return VK_FALSE; 74} 75#endif 76 77GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions& options, 78 GrContext* context) { 79 const GrVkBackendContext* vkBackendContext = 80 reinterpret_cast<const GrVkBackendContext*>(backendContext); 81 if (!vkBackendContext) { 82 return nullptr; 83 } else { 84 vkBackendContext->ref(); 85 } 86 87 if (!vkBackendContext->fInterface->validate(vkBackendContext->fExtensions)) { 88 return nullptr; 89 } 90 91 return new GrVkGpu(context, options, vkBackendContext); 92} 93 94//////////////////////////////////////////////////////////////////////////////// 95 96GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, 97 const GrVkBackendContext* backendCtx) 98 : INHERITED(context) 99 , fDevice(backendCtx->fDevice) 100 , fQueue(backendCtx->fQueue) 101 , fResourceProvider(this) 102 , fDisconnected(false) { 103 fBackendContext.reset(backendCtx); 104 105#ifdef SK_ENABLE_VK_LAYERS 106 fCallback = VK_NULL_HANDLE; 107 if (backendCtx->fExtensions & kEXT_debug_report_GrVkExtensionFlag) { 108 // Setup callback creation information 109 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo; 110 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; 111 callbackCreateInfo.pNext = nullptr; 112 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | 113 VK_DEBUG_REPORT_WARNING_BIT_EXT | 114 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT | 115 //VK_DEBUG_REPORT_DEBUG_BIT_EXT | 116 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; 117 callbackCreateInfo.pfnCallback = &DebugReportCallback; 118 callbackCreateInfo.pUserData = nullptr; 119 120 // Register the callback 121 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT( 122 backendCtx->fInstance, &callbackCreateInfo, nullptr, &fCallback)); 123 } 124#endif 125 126 fCompiler = new SkSL::Compiler(); 127 128 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysicalDevice, 129 backendCtx->fFeatures, backendCtx->fExtensions)); 130 fCaps.reset(SkRef(fVkCaps.get())); 131 132 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhysDevMemProps)); 133 134 const VkCommandPoolCreateInfo cmdPoolInfo = { 135 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType 136 nullptr, // pNext 137 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | 138 VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags 139 backendCtx->fGraphicsQueueIndex, // queueFamilyIndex 140 }; 141 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr, 142 &fCmdPool)); 143 144 // must call this after creating the CommandPool 145 fResourceProvider.init(); 146 fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer(); 147 SkASSERT(fCurrentCmdBuffer); 148 fCurrentCmdBuffer->begin(this); 149 150 // set up our heaps 151 fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024)); 152 fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 64*1024*1024)); 153 fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 2*1024*1024)); 154 fHeaps[kVertexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0)); 155 fHeaps[kIndexBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0)); 156 fHeaps[kUniformBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 256*1024)); 157 fHeaps[kTexelBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0)); 158 fHeaps[kCopyReadBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_Strategy, 0)); 159 fHeaps[kCopyWriteBuffer_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strategy, 16*1024*1024)); 160} 161 162void GrVkGpu::destroyResources() { 163 if (fCurrentCmdBuffer) { 164 fCurrentCmdBuffer->end(this); 165 fCurrentCmdBuffer->unref(this); 166 } 167 168 // wait for all commands to finish 169 fResourceProvider.checkCommandBuffers(); 170 VkResult res = VK_CALL(QueueWaitIdle(fQueue)); 171 172 // On windows, sometimes calls to QueueWaitIdle return before actually signalling the fences 173 // on the command buffers even though they have completed. This causes an assert to fire when 174 // destroying the command buffers. Currently this ony seems to happen on windows, so we add a 175 // sleep to make sure the fence signals. 176#ifdef SK_DEBUG 177 if (this->vkCaps().mustSleepOnTearDown()) { 178#if defined(SK_BUILD_FOR_WIN) 179 Sleep(10); // In milliseconds 180#else 181 sleep(1); // In seconds 182#endif 183 } 184#endif 185 186#ifdef SK_DEBUG 187 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res); 188#endif 189 190 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) { 191 fSemaphoresToWaitOn[i]->unref(this); 192 } 193 fSemaphoresToWaitOn.reset(); 194 195 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) { 196 fSemaphoresToSignal[i]->unref(this); 197 } 198 fSemaphoresToSignal.reset(); 199 200 201 fCopyManager.destroyResources(this); 202 203 // must call this just before we destroy the command pool and VkDevice 204 fResourceProvider.destroyResources(VK_ERROR_DEVICE_LOST == res); 205 206 if (fCmdPool != VK_NULL_HANDLE) { 207 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr)); 208 } 209 210#ifdef SK_ENABLE_VK_LAYERS 211 if (fCallback) { 212 VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr)); 213 } 214#endif 215 216} 217 218GrVkGpu::~GrVkGpu() { 219 if (!fDisconnected) { 220 this->destroyResources(); 221 } 222 delete fCompiler; 223} 224 225 226void GrVkGpu::disconnect(DisconnectType type) { 227 INHERITED::disconnect(type); 228 if (!fDisconnected) { 229 if (DisconnectType::kCleanup == type) { 230 this->destroyResources(); 231 } else { 232 fCurrentCmdBuffer->unrefAndAbandon(); 233 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) { 234 fSemaphoresToWaitOn[i]->unrefAndAbandon(); 235 } 236 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) { 237 fSemaphoresToSignal[i]->unrefAndAbandon(); 238 } 239 fCopyManager.abandonResources(); 240 241 // must call this just before we destroy the command pool and VkDevice 242 fResourceProvider.abandonResources(); 243 } 244 fSemaphoresToWaitOn.reset(); 245 fSemaphoresToSignal.reset(); 246#ifdef SK_ENABLE_VK_LAYERS 247 fCallback = VK_NULL_HANDLE; 248#endif 249 fCurrentCmdBuffer = nullptr; 250 fCmdPool = VK_NULL_HANDLE; 251 fDisconnected = true; 252 } 253} 254 255/////////////////////////////////////////////////////////////////////////////// 256 257GrGpuCommandBuffer* GrVkGpu::createCommandBuffer( 258 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo, 259 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) { 260 return new GrVkGpuCommandBuffer(this, colorInfo, stencilInfo); 261} 262 263void GrVkGpu::submitCommandBuffer(SyncQueue sync) { 264 SkASSERT(fCurrentCmdBuffer); 265 fCurrentCmdBuffer->end(this); 266 267 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync, fSemaphoresToSignal, fSemaphoresToWaitOn); 268 269 for (int i = 0; i < fSemaphoresToWaitOn.count(); ++i) { 270 fSemaphoresToWaitOn[i]->unref(this); 271 } 272 fSemaphoresToWaitOn.reset(); 273 for (int i = 0; i < fSemaphoresToSignal.count(); ++i) { 274 fSemaphoresToSignal[i]->unref(this); 275 } 276 fSemaphoresToSignal.reset(); 277 278 fResourceProvider.checkCommandBuffers(); 279 280 // Release old command buffer and create a new one 281 fCurrentCmdBuffer->unref(this); 282 fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer(); 283 SkASSERT(fCurrentCmdBuffer); 284 285 fCurrentCmdBuffer->begin(this); 286} 287 288/////////////////////////////////////////////////////////////////////////////// 289GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern, 290 const void* data) { 291 GrBuffer* buff; 292 switch (type) { 293 case kVertex_GrBufferType: 294 SkASSERT(kDynamic_GrAccessPattern == accessPattern || 295 kStatic_GrAccessPattern == accessPattern); 296 buff = GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern); 297 break; 298 case kIndex_GrBufferType: 299 SkASSERT(kDynamic_GrAccessPattern == accessPattern || 300 kStatic_GrAccessPattern == accessPattern); 301 buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern); 302 break; 303 case kXferCpuToGpu_GrBufferType: 304 SkASSERT(kDynamic_GrAccessPattern == accessPattern || 305 kStream_GrAccessPattern == accessPattern); 306 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type); 307 break; 308 case kXferGpuToCpu_GrBufferType: 309 SkASSERT(kDynamic_GrAccessPattern == accessPattern || 310 kStream_GrAccessPattern == accessPattern); 311 buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type); 312 break; 313 case kTexel_GrBufferType: 314 SkASSERT(kDynamic_GrAccessPattern == accessPattern || 315 kStatic_GrAccessPattern == accessPattern); 316 buff = GrVkTexelBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern); 317 break; 318 case kDrawIndirect_GrBufferType: 319 SkFAIL("DrawIndirect Buffers not supported in vulkan backend."); 320 return nullptr; 321 default: 322 SkFAIL("Unknown buffer type."); 323 return nullptr; 324 } 325 if (data && buff) { 326 buff->updateData(data, size); 327 } 328 return buff; 329} 330 331//////////////////////////////////////////////////////////////////////////////// 332bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, 333 GrPixelConfig srcConfig, DrawPreference* drawPreference, 334 WritePixelTempDrawInfo* tempDrawInfo) { 335 GrRenderTarget* renderTarget = dstSurface->asRenderTarget(); 336 337 // Start off assuming no swizzling 338 tempDrawInfo->fSwizzle = GrSwizzle::RGBA(); 339 tempDrawInfo->fWriteConfig = srcConfig; 340 341 // These settings we will always want if a temp draw is performed. Initially set the config 342 // to srcConfig, though that may be modified if we decide to do a R/B swap 343 tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags; 344 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig; 345 tempDrawInfo->fTempSurfaceDesc.fWidth = width; 346 tempDrawInfo->fTempSurfaceDesc.fHeight = height; 347 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0; 348 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; 349 350 if (dstSurface->config() == srcConfig) { 351 // We only support writing pixels to textures. Forcing a draw lets us write to pure RTs. 352 if (!dstSurface->asTexture()) { 353 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 354 } 355 // If the dst is MSAA, we have to draw, or we'll just be writing to the resolve target. 356 if (renderTarget && renderTarget->numColorSamples() > 1) { 357 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 358 } 359 return true; 360 } 361 362 // Any config change requires a draw 363 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 364 365 bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config(); 366 367 if (!this->vkCaps().isConfigTexturable(srcConfig) && configsAreRBSwaps) { 368 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config(); 369 tempDrawInfo->fSwizzle = GrSwizzle::BGRA(); 370 tempDrawInfo->fWriteConfig = dstSurface->config(); 371 } 372 return true; 373} 374 375bool GrVkGpu::onWritePixels(GrSurface* surface, 376 int left, int top, int width, int height, 377 GrPixelConfig config, 378 const GrMipLevel texels[], int mipLevelCount) { 379 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture()); 380 if (!vkTex) { 381 return false; 382 } 383 384 // Make sure we have at least the base level 385 if (!mipLevelCount || !texels[0].fPixels) { 386 return false; 387 } 388 389 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels. 390 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) { 391 return false; 392 } 393 394 bool success = false; 395 bool linearTiling = vkTex->isLinearTiled(); 396 if (linearTiling) { 397 if (mipLevelCount > 1) { 398 SkDebugf("Can't upload mipmap data to linear tiled texture"); 399 return false; 400 } 401 if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) { 402 // Need to change the layout to general in order to perform a host write 403 vkTex->setImageLayout(this, 404 VK_IMAGE_LAYOUT_GENERAL, 405 VK_ACCESS_HOST_WRITE_BIT, 406 VK_PIPELINE_STAGE_HOST_BIT, 407 false); 408 this->submitCommandBuffer(kForce_SyncQueue); 409 } 410 success = this->uploadTexDataLinear(vkTex, left, top, width, height, config, 411 texels[0].fPixels, texels[0].fRowBytes); 412 } else { 413 int currentMipLevels = vkTex->texturePriv().maxMipMapLevel() + 1; 414 if (mipLevelCount > currentMipLevels) { 415 if (!vkTex->reallocForMipmap(this, mipLevelCount)) { 416 return false; 417 } 418 } 419 success = this->uploadTexDataOptimal(vkTex, left, top, width, height, config, 420 texels, mipLevelCount); 421 } 422 423 return success; 424} 425 426bool GrVkGpu::onTransferPixels(GrTexture* texture, 427 int left, int top, int width, int height, 428 GrPixelConfig config, GrBuffer* transferBuffer, 429 size_t bufferOffset, size_t rowBytes) { 430 // Vulkan only supports 4-byte aligned offsets 431 if (SkToBool(bufferOffset & 0x2)) { 432 return false; 433 } 434 GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture); 435 if (!vkTex) { 436 return false; 437 } 438 GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer); 439 if (!vkBuffer) { 440 return false; 441 } 442 443 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels. 444 if (GrPixelConfigIsSRGB(texture->config()) != GrPixelConfigIsSRGB(config)) { 445 return false; 446 } 447 448 SkDEBUGCODE( 449 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); 450 SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); 451 SkASSERT(bounds.contains(subRect)); 452 ) 453 size_t bpp = GrBytesPerPixel(config); 454 if (rowBytes == 0) { 455 rowBytes = bpp*width; 456 } 457 458 // Set up copy region 459 VkBufferImageCopy region; 460 memset(®ion, 0, sizeof(VkBufferImageCopy)); 461 region.bufferOffset = bufferOffset; 462 region.bufferRowLength = (uint32_t)(rowBytes/bpp); 463 region.bufferImageHeight = 0; 464 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 465 region.imageOffset = { left, top, 0 }; 466 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 }; 467 468 // Change layout of our target so it can be copied to 469 vkTex->setImageLayout(this, 470 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 471 VK_ACCESS_TRANSFER_WRITE_BIT, 472 VK_PIPELINE_STAGE_TRANSFER_BIT, 473 false); 474 475 // Copy the buffer to the image 476 fCurrentCmdBuffer->copyBufferToImage(this, 477 vkBuffer, 478 vkTex, 479 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 480 1, 481 ®ion); 482 483 vkTex->texturePriv().dirtyMipMaps(true); 484 return true; 485} 486 487void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 488 const SkIPoint& dstPoint) { 489 SkASSERT(dst); 490 SkASSERT(src && src->numColorSamples() > 1 && src->msaaImage()); 491 492 if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) { 493 this->submitCommandBuffer(GrVkGpu::kSkip_SyncQueue); 494 } 495 496 // Flip rect if necessary 497 SkIRect srcVkRect = srcRect; 498 int32_t dstY = dstPoint.fY; 499 500 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { 501 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin()); 502 srcVkRect.fTop = src->height() - srcRect.fBottom; 503 srcVkRect.fBottom = src->height() - srcRect.fTop; 504 dstY = dst->height() - dstPoint.fY - srcVkRect.height(); 505 } 506 507 VkImageResolve resolveInfo; 508 resolveInfo.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 509 resolveInfo.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 }; 510 resolveInfo.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 511 resolveInfo.dstOffset = { dstPoint.fX, dstY, 0 }; 512 resolveInfo.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 }; 513 514 GrVkImage* dstImage; 515 GrRenderTarget* dstRT = dst->asRenderTarget(); 516 if (dstRT) { 517 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT); 518 dstImage = vkRT; 519 } else { 520 SkASSERT(dst->asTexture()); 521 dstImage = static_cast<GrVkTexture*>(dst->asTexture()); 522 } 523 dstImage->setImageLayout(this, 524 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 525 VK_ACCESS_TRANSFER_WRITE_BIT, 526 VK_PIPELINE_STAGE_TRANSFER_BIT, 527 false); 528 529 src->msaaImage()->setImageLayout(this, 530 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 531 VK_ACCESS_TRANSFER_READ_BIT, 532 VK_PIPELINE_STAGE_TRANSFER_BIT, 533 false); 534 535 fCurrentCmdBuffer->resolveImage(this, *src->msaaImage(), *dstImage, 1, &resolveInfo); 536} 537 538void GrVkGpu::internalResolveRenderTarget(GrRenderTarget* target, bool requiresSubmit) { 539 if (target->needsResolve()) { 540 SkASSERT(target->numColorSamples() > 1); 541 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target); 542 SkASSERT(rt->msaaImage()); 543 544 const SkIRect& srcRect = rt->getResolveRect(); 545 546 this->resolveImage(target, rt, srcRect, SkIPoint::Make(srcRect.fLeft, srcRect.fTop)); 547 548 rt->flagAsResolved(); 549 550 if (requiresSubmit) { 551 this->submitCommandBuffer(kSkip_SyncQueue); 552 } 553 } 554} 555 556bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, 557 int left, int top, int width, int height, 558 GrPixelConfig dataConfig, 559 const void* data, 560 size_t rowBytes) { 561 SkASSERT(data); 562 SkASSERT(tex->isLinearTiled()); 563 564 SkDEBUGCODE( 565 SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); 566 SkIRect bounds = SkIRect::MakeWH(tex->width(), tex->height()); 567 SkASSERT(bounds.contains(subRect)); 568 ) 569 size_t bpp = GrBytesPerPixel(dataConfig); 570 size_t trimRowBytes = width * bpp; 571 if (!rowBytes) { 572 rowBytes = trimRowBytes; 573 } 574 575 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() || 576 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout()); 577 const VkImageSubresource subres = { 578 VK_IMAGE_ASPECT_COLOR_BIT, 579 0, // mipLevel 580 0, // arraySlice 581 }; 582 VkSubresourceLayout layout; 583 VkResult err; 584 585 const GrVkInterface* interface = this->vkInterface(); 586 587 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, 588 tex->image(), 589 &subres, 590 &layout)); 591 592 int texTop = kBottomLeft_GrSurfaceOrigin == tex->origin() ? tex->height() - top - height : top; 593 const GrVkAlloc& alloc = tex->alloc(); 594 VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp; 595 VkDeviceSize size = height*layout.rowPitch; 596 void* mapPtr; 597 err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size, 0, &mapPtr)); 598 if (err) { 599 return false; 600 } 601 602 if (kBottomLeft_GrSurfaceOrigin == tex->origin()) { 603 // copy into buffer by rows 604 const char* srcRow = reinterpret_cast<const char*>(data); 605 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch; 606 for (int y = 0; y < height; y++) { 607 memcpy(dstRow, srcRow, trimRowBytes); 608 srcRow += rowBytes; 609 dstRow -= layout.rowPitch; 610 } 611 } else { 612 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, trimRowBytes, 613 height); 614 } 615 616 GrVkMemory::FlushMappedAlloc(this, alloc); 617 GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory)); 618 619 return true; 620} 621 622bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, 623 int left, int top, int width, int height, 624 GrPixelConfig dataConfig, 625 const GrMipLevel texels[], int mipLevelCount) { 626 SkASSERT(!tex->isLinearTiled()); 627 // The assumption is either that we have no mipmaps, or that our rect is the entire texture 628 SkASSERT(1 == mipLevelCount || 629 (0 == left && 0 == top && width == tex->width() && height == tex->height())); 630 631 // We assume that if the texture has mip levels, we either upload to all the levels or just the 632 // first. 633 SkASSERT(1 == mipLevelCount || mipLevelCount == (tex->texturePriv().maxMipMapLevel() + 1)); 634 635 if (width == 0 || height == 0) { 636 return false; 637 } 638 639 SkASSERT(this->caps()->isConfigTexturable(tex->config())); 640 size_t bpp = GrBytesPerPixel(dataConfig); 641 642 // texels is const. 643 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes. 644 // Because of this we need to make a non-const shallow copy of texels. 645 SkAutoTMalloc<GrMipLevel> texelsShallowCopy; 646 647 if (mipLevelCount) { 648 texelsShallowCopy.reset(mipLevelCount); 649 memcpy(texelsShallowCopy.get(), texels, mipLevelCount*sizeof(GrMipLevel)); 650 } 651 652 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) { 653 SkASSERT(texelsShallowCopy[currentMipLevel].fPixels); 654 } 655 656 // Determine whether we need to flip when we copy into the buffer 657 bool flipY = (kBottomLeft_GrSurfaceOrigin == tex->origin() && mipLevelCount); 658 659 SkTArray<size_t> individualMipOffsets(mipLevelCount); 660 individualMipOffsets.push_back(0); 661 size_t combinedBufferSize = width * bpp * height; 662 int currentWidth = width; 663 int currentHeight = height; 664 // The alignment must be at least 4 bytes and a multiple of the bytes per pixel of the image 665 // config. This works with the assumption that the bytes in pixel config is always a power of 2. 666 SkASSERT((bpp & (bpp - 1)) == 0); 667 const size_t alignmentMask = 0x3 | (bpp - 1); 668 for (int currentMipLevel = 1; currentMipLevel < mipLevelCount; currentMipLevel++) { 669 currentWidth = SkTMax(1, currentWidth/2); 670 currentHeight = SkTMax(1, currentHeight/2); 671 672 const size_t trimmedSize = currentWidth * bpp * currentHeight; 673 const size_t alignmentDiff = combinedBufferSize & alignmentMask; 674 if (alignmentDiff != 0) { 675 combinedBufferSize += alignmentMask - alignmentDiff + 1; 676 } 677 individualMipOffsets.push_back(combinedBufferSize); 678 combinedBufferSize += trimmedSize; 679 } 680 681 // allocate buffer to hold our mip data 682 GrVkTransferBuffer* transferBuffer = 683 GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuffer::kCopyRead_Type); 684 if(!transferBuffer) 685 return false; 686 687 char* buffer = (char*) transferBuffer->map(); 688 SkTArray<VkBufferImageCopy> regions(mipLevelCount); 689 690 currentWidth = width; 691 currentHeight = height; 692 int layerHeight = tex->height(); 693 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { 694 SkASSERT(1 == mipLevelCount || currentHeight == layerHeight); 695 const size_t trimRowBytes = currentWidth * bpp; 696 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes ? 697 texelsShallowCopy[currentMipLevel].fRowBytes : 698 trimRowBytes; 699 700 // copy data into the buffer, skipping the trailing bytes 701 char* dst = buffer + individualMipOffsets[currentMipLevel]; 702 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels; 703 if (flipY) { 704 src += (currentHeight - 1) * rowBytes; 705 for (int y = 0; y < currentHeight; y++) { 706 memcpy(dst, src, trimRowBytes); 707 src -= rowBytes; 708 dst += trimRowBytes; 709 } 710 } else { 711 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight); 712 } 713 714 VkBufferImageCopy& region = regions.push_back(); 715 memset(®ion, 0, sizeof(VkBufferImageCopy)); 716 region.bufferOffset = transferBuffer->offset() + individualMipOffsets[currentMipLevel]; 717 region.bufferRowLength = currentWidth; 718 region.bufferImageHeight = currentHeight; 719 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1 }; 720 region.imageOffset = { left, flipY ? layerHeight - top - currentHeight : top, 0 }; 721 region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 }; 722 723 currentWidth = SkTMax(1, currentWidth/2); 724 currentHeight = SkTMax(1, currentHeight/2); 725 layerHeight = currentHeight; 726 } 727 728 // no need to flush non-coherent memory, unmap will do that for us 729 transferBuffer->unmap(); 730 731 // Change layout of our target so it can be copied to 732 tex->setImageLayout(this, 733 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 734 VK_ACCESS_TRANSFER_WRITE_BIT, 735 VK_PIPELINE_STAGE_TRANSFER_BIT, 736 false); 737 738 // Copy the buffer to the image 739 fCurrentCmdBuffer->copyBufferToImage(this, 740 transferBuffer, 741 tex, 742 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 743 regions.count(), 744 regions.begin()); 745 transferBuffer->unref(); 746 if (1 == mipLevelCount) { 747 tex->texturePriv().dirtyMipMaps(true); 748 } 749 750 return true; 751} 752 753//////////////////////////////////////////////////////////////////////////////// 754sk_sp<GrTexture> GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, 755 const GrMipLevel texels[], int mipLevelCount) { 756 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); 757 758 VkFormat pixelFormat; 759 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) { 760 return nullptr; 761 } 762 763 if (!fVkCaps->isConfigTexturable(desc.fConfig)) { 764 return nullptr; 765 } 766 767 if (renderTarget && !fVkCaps->isConfigRenderable(desc.fConfig, false)) { 768 return nullptr; 769 } 770 771 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; 772 if (renderTarget) { 773 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 774 } 775 776 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and 777 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we 778 // will be using this texture in some copy or not. Also this assumes, as is the current case, 779 // that all render targets in vulkan are also textures. If we change this practice of setting 780 // both bits, we must make sure to set the destination bit if we are uploading srcData to the 781 // texture. 782 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; 783 784 // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is 785 // requested, this ImageDesc describes the resolved texture. Therefore we always have samples set 786 // to 1. 787 int mipLevels = !mipLevelCount ? 1 : mipLevelCount; 788 GrVkImage::ImageDesc imageDesc; 789 imageDesc.fImageType = VK_IMAGE_TYPE_2D; 790 imageDesc.fFormat = pixelFormat; 791 imageDesc.fWidth = desc.fWidth; 792 imageDesc.fHeight = desc.fHeight; 793 imageDesc.fLevels = mipLevels; 794 imageDesc.fSamples = 1; 795 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL; 796 imageDesc.fUsageFlags = usageFlags; 797 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; 798 799 sk_sp<GrVkTexture> tex; 800 if (renderTarget) { 801 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budgeted, desc, 802 imageDesc); 803 } else { 804 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc); 805 } 806 807 if (!tex) { 808 return nullptr; 809 } 810 811 if (mipLevelCount) { 812 SkASSERT(texels[0].fPixels); 813 if (!this->uploadTexDataOptimal(tex.get(), 0, 0, desc.fWidth, desc.fHeight, desc.fConfig, 814 texels, mipLevelCount)) { 815 tex->unref(); 816 return nullptr; 817 } 818 } 819 820 if (desc.fFlags & kPerformInitialClear_GrSurfaceFlag) { 821 VkClearColorValue zeroClearColor; 822 memset(&zeroClearColor, 0, sizeof(zeroClearColor)); 823 VkImageSubresourceRange range; 824 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; 825 range.baseArrayLayer = 0; 826 range.baseMipLevel = 0; 827 range.layerCount = 1; 828 range.levelCount = 1; 829 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 830 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false); 831 this->currentCommandBuffer()->clearColorImage(this, tex.get(), &zeroClearColor, 1, &range); 832 } 833 return tex; 834} 835 836//////////////////////////////////////////////////////////////////////////////// 837 838bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src, 839 VkDeviceSize offset, VkDeviceSize size) { 840 841 // Update the buffer 842 fCurrentCmdBuffer->updateBuffer(this, buffer, offset, size, src); 843 844 return true; 845} 846 847//////////////////////////////////////////////////////////////////////////////// 848 849static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) { 850 // By default, all textures in Vk use TopLeft 851 if (kDefault_GrSurfaceOrigin == origin) { 852 return kTopLeft_GrSurfaceOrigin; 853 } else { 854 return origin; 855 } 856} 857 858sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex, 859 GrSurfaceOrigin origin, 860 GrBackendTextureFlags flags, 861 int sampleCnt, 862 GrWrapOwnership ownership) { 863 const GrVkImageInfo* info = backendTex.getVkImageInfo(); 864 if (!info) { 865 return nullptr; 866 } 867 868 int maxSize = this->caps()->maxTextureSize(); 869 if (backendTex.width() > maxSize || backendTex.height() > maxSize) { 870 return nullptr; 871 } 872 873 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory) { 874 return nullptr; 875 } 876 877 SkASSERT(backendTex.config() == GrVkFormatToPixelConfig(info->fFormat)); 878 879 GrSurfaceDesc surfDesc; 880 // next line relies on GrBackendTextureFlags matching GrTexture's 881 surfDesc.fFlags = (GrSurfaceFlags)flags; 882 surfDesc.fWidth = backendTex.width(); 883 surfDesc.fHeight = backendTex.height(); 884 surfDesc.fConfig = backendTex.config(); 885 surfDesc.fSampleCnt = this->caps()->getSampleCount(sampleCnt, backendTex.config()); 886 bool renderTarget = SkToBool(flags & kRenderTarget_GrBackendTextureFlag); 887 // In GL, Chrome assumes all textures are BottomLeft 888 // In VK, we don't have this restriction 889 surfDesc.fOrigin = resolve_origin(origin); 890 891 if (!renderTarget) { 892 return GrVkTexture::MakeWrappedTexture(this, surfDesc, ownership, info); 893 } 894 return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, surfDesc, ownership, info); 895} 896 897sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT, 898 GrSurfaceOrigin origin){ 899 // Currently the Vulkan backend does not support wrapping of msaa render targets directly. In 900 // general this is not an issue since swapchain images in vulkan are never multisampled. Thus if 901 // you want a multisampled RT it is best to wrap the swapchain images and then let Skia handle 902 // creating and owning the MSAA images. 903 if (backendRT.sampleCnt()) { 904 return nullptr; 905 } 906 907 const GrVkImageInfo* info = backendRT.getVkImageInfo(); 908 if (!info) { 909 return nullptr; 910 } 911 if (VK_NULL_HANDLE == info->fImage) { 912 return nullptr; 913 } 914 915 GrSurfaceDesc desc; 916 desc.fConfig = backendRT.config(); 917 desc.fFlags = kRenderTarget_GrSurfaceFlag; 918 desc.fWidth = backendRT.width(); 919 desc.fHeight = backendRT.height(); 920 desc.fSampleCnt = 0; 921 922 SkASSERT(kDefault_GrSurfaceOrigin != origin); 923 desc.fOrigin = origin; 924 925 sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, info); 926 if (tgt && backendRT.stencilBits()) { 927 if (!createStencilAttachmentForRenderTarget(tgt.get(), desc.fWidth, desc.fHeight)) { 928 return nullptr; 929 } 930 } 931 return tgt; 932} 933 934sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex, 935 GrSurfaceOrigin origin, 936 int sampleCnt) { 937 938 const GrVkImageInfo* info = tex.getVkImageInfo(); 939 if (!info) { 940 return nullptr; 941 } 942 if (VK_NULL_HANDLE == info->fImage) { 943 return nullptr; 944 } 945 946 GrSurfaceDesc desc; 947 desc.fFlags = kRenderTarget_GrSurfaceFlag; 948 desc.fConfig = tex.config(); 949 desc.fWidth = tex.width(); 950 desc.fHeight = tex.height(); 951 desc.fSampleCnt = this->caps()->getSampleCount(sampleCnt, tex.config()); 952 953 desc.fOrigin = resolve_origin(origin); 954 955 sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(this, desc, info); 956 return tgt; 957} 958 959void GrVkGpu::generateMipmap(GrVkTexture* tex) { 960 // don't do anything for linearly tiled textures (can't have mipmaps) 961 if (tex->isLinearTiled()) { 962 SkDebugf("Trying to create mipmap for linear tiled texture"); 963 return; 964 } 965 966 // determine if we can blit to and from this format 967 const GrVkCaps& caps = this->vkCaps(); 968 if (!caps.configCanBeDstofBlit(tex->config(), false) || 969 !caps.configCanBeSrcofBlit(tex->config(), false) || 970 !caps.mipMapSupport()) { 971 return; 972 } 973 974 if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) { 975 this->submitCommandBuffer(kSkip_SyncQueue); 976 } 977 978 // We may need to resolve the texture first if it is also a render target 979 GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(tex->asRenderTarget()); 980 if (texRT) { 981 this->internalResolveRenderTarget(texRT, false); 982 } 983 984 int width = tex->width(); 985 int height = tex->height(); 986 VkImageBlit blitRegion; 987 memset(&blitRegion, 0, sizeof(VkImageBlit)); 988 989 // SkMipMap doesn't include the base level in the level count so we have to add 1 990 uint32_t levelCount = SkMipMap::ComputeLevelCount(tex->width(), tex->height()) + 1; 991 if (levelCount != tex->mipLevels()) { 992 const GrVkResource* oldResource = tex->resource(); 993 oldResource->ref(); 994 // grab handle to the original image resource 995 VkImage oldImage = tex->image(); 996 997 // change the original image's layout so we can copy from it 998 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 999 VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false); 1000 1001 if (!tex->reallocForMipmap(this, levelCount)) { 1002 oldResource->unref(this); 1003 return; 1004 } 1005 // change the new image's layout so we can blit to it 1006 tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL, 1007 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false); 1008 1009 // Blit original image to top level of new image 1010 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1011 blitRegion.srcOffsets[0] = { 0, 0, 0 }; 1012 blitRegion.srcOffsets[1] = { width, height, 1 }; 1013 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1014 blitRegion.dstOffsets[0] = { 0, 0, 0 }; 1015 blitRegion.dstOffsets[1] = { width, height, 1 }; 1016 1017 fCurrentCmdBuffer->blitImage(this, 1018 oldResource, 1019 oldImage, 1020 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1021 tex->resource(), 1022 tex->image(), 1023 VK_IMAGE_LAYOUT_GENERAL, 1024 1, 1025 &blitRegion, 1026 VK_FILTER_LINEAR); 1027 1028 oldResource->unref(this); 1029 } else { 1030 // change layout of the layers so we can write to them. 1031 tex->setImageLayout(this, VK_IMAGE_LAYOUT_GENERAL, 1032 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false); 1033 } 1034 1035 // setup memory barrier 1036 SkASSERT(kUnknown_GrPixelConfig != GrVkFormatToPixelConfig(tex->imageFormat())); 1037 VkImageAspectFlags aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT; 1038 VkImageMemoryBarrier imageMemoryBarrier = { 1039 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType 1040 NULL, // pNext 1041 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask 1042 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask 1043 VK_IMAGE_LAYOUT_GENERAL, // oldLayout 1044 VK_IMAGE_LAYOUT_GENERAL, // newLayout 1045 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex 1046 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex 1047 tex->image(), // image 1048 { aspectFlags, 0, 1, 0, 1 } // subresourceRange 1049 }; 1050 1051 // Blit the miplevels 1052 uint32_t mipLevel = 1; 1053 while (mipLevel < levelCount) { 1054 int prevWidth = width; 1055 int prevHeight = height; 1056 width = SkTMax(1, width / 2); 1057 height = SkTMax(1, height / 2); 1058 1059 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1; 1060 this->addImageMemoryBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 1061 false, &imageMemoryBarrier); 1062 1063 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 }; 1064 blitRegion.srcOffsets[0] = { 0, 0, 0 }; 1065 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 }; 1066 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 }; 1067 blitRegion.dstOffsets[0] = { 0, 0, 0 }; 1068 blitRegion.dstOffsets[1] = { width, height, 1 }; 1069 fCurrentCmdBuffer->blitImage(this, 1070 *tex, 1071 *tex, 1072 1, 1073 &blitRegion, 1074 VK_FILTER_LINEAR); 1075 ++mipLevel; 1076 } 1077} 1078 1079//////////////////////////////////////////////////////////////////////////////// 1080 1081GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt, 1082 int width, 1083 int height) { 1084 SkASSERT(width >= rt->width()); 1085 SkASSERT(height >= rt->height()); 1086 1087 int samples = rt->numStencilSamples(); 1088 1089 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().preferedStencilFormat(); 1090 1091 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this, 1092 width, 1093 height, 1094 samples, 1095 sFmt)); 1096 fStats.incStencilAttachmentCreates(); 1097 return stencil; 1098} 1099 1100//////////////////////////////////////////////////////////////////////////////// 1101 1102bool copy_testing_data(GrVkGpu* gpu, void* srcData, const GrVkAlloc& alloc, 1103 size_t srcRowBytes, size_t dstRowBytes, int h) { 1104 void* mapPtr; 1105 VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), 1106 alloc.fMemory, 1107 alloc.fOffset, 1108 dstRowBytes * h, 1109 0, 1110 &mapPtr)); 1111 if (err) { 1112 return false; 1113 } 1114 1115 if (srcData) { 1116 // If there is no padding on dst we can do a single memcopy. 1117 // This assumes the srcData comes in with no padding. 1118 SkRectMemcpy(mapPtr, static_cast<size_t>(dstRowBytes), 1119 srcData, srcRowBytes, srcRowBytes, h); 1120 } else { 1121 // If there is no srcdata we always copy 0's into the textures so that it is initialized 1122 // with some data. 1123 if (srcRowBytes == static_cast<size_t>(dstRowBytes)) { 1124 memset(mapPtr, 0, srcRowBytes * h); 1125 } else { 1126 for (int i = 0; i < h; ++i) { 1127 memset(mapPtr, 0, srcRowBytes); 1128 mapPtr = SkTAddOffset<void>(mapPtr, static_cast<size_t>(dstRowBytes)); 1129 } 1130 } 1131 } 1132 GrVkMemory::FlushMappedAlloc(gpu, alloc); 1133 GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory)); 1134 return true; 1135} 1136 1137GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h, 1138 GrPixelConfig config, 1139 bool isRenderTarget) { 1140 1141 VkFormat pixelFormat; 1142 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) { 1143 return 0; 1144 } 1145 1146 bool linearTiling = false; 1147 if (!fVkCaps->isConfigTexturable(config)) { 1148 return 0; 1149 } 1150 1151 if (isRenderTarget && !fVkCaps->isConfigRenderable(config, false)) { 1152 return 0; 1153 } 1154 1155 if (fVkCaps->isConfigTexturableLinearly(config) && 1156 (!isRenderTarget || fVkCaps->isConfigRenderableLinearly(config, false))) { 1157 linearTiling = true; 1158 } 1159 1160 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; 1161 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; 1162 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; 1163 if (isRenderTarget) { 1164 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 1165 } 1166 1167 VkImage image = VK_NULL_HANDLE; 1168 GrVkAlloc alloc = { VK_NULL_HANDLE, 0, 0, 0 }; 1169 1170 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL; 1171 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling) 1172 ? VK_IMAGE_LAYOUT_PREINITIALIZED 1173 : VK_IMAGE_LAYOUT_UNDEFINED; 1174 1175 // Create Image 1176 VkSampleCountFlagBits vkSamples; 1177 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) { 1178 return 0; 1179 } 1180 1181 const VkImageCreateInfo imageCreateInfo = { 1182 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType 1183 NULL, // pNext 1184 0, // VkImageCreateFlags 1185 VK_IMAGE_TYPE_2D, // VkImageType 1186 pixelFormat, // VkFormat 1187 { (uint32_t) w, (uint32_t) h, 1 }, // VkExtent3D 1188 1, // mipLevels 1189 1, // arrayLayers 1190 vkSamples, // samples 1191 imageTiling, // VkImageTiling 1192 usageFlags, // VkImageUsageFlags 1193 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode 1194 0, // queueFamilyCount 1195 0, // pQueueFamilyIndices 1196 initialLayout // initialLayout 1197 }; 1198 1199 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateImage(this->device(), &imageCreateInfo, nullptr, &image)); 1200 1201 if (!GrVkMemory::AllocAndBindImageMemory(this, image, linearTiling, &alloc)) { 1202 VK_CALL(DestroyImage(this->device(), image, nullptr)); 1203 return 0; 1204 } 1205 1206 size_t bpp = GrBytesPerPixel(config); 1207 size_t rowCopyBytes = bpp * w; 1208 if (linearTiling) { 1209 const VkImageSubresource subres = { 1210 VK_IMAGE_ASPECT_COLOR_BIT, 1211 0, // mipLevel 1212 0, // arraySlice 1213 }; 1214 VkSubresourceLayout layout; 1215 1216 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout)); 1217 1218 if (!copy_testing_data(this, srcData, alloc, rowCopyBytes, 1219 static_cast<size_t>(layout.rowPitch), h)) { 1220 GrVkMemory::FreeImageMemory(this, linearTiling, alloc); 1221 VK_CALL(DestroyImage(fDevice, image, nullptr)); 1222 return 0; 1223 } 1224 } else { 1225 SkASSERT(w && h); 1226 1227 VkBuffer buffer; 1228 VkBufferCreateInfo bufInfo; 1229 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo)); 1230 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 1231 bufInfo.flags = 0; 1232 bufInfo.size = rowCopyBytes * h; 1233 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; 1234 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 1235 bufInfo.queueFamilyIndexCount = 0; 1236 bufInfo.pQueueFamilyIndices = nullptr; 1237 VkResult err; 1238 err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer)); 1239 1240 if (err) { 1241 GrVkMemory::FreeImageMemory(this, linearTiling, alloc); 1242 VK_CALL(DestroyImage(fDevice, image, nullptr)); 1243 return 0; 1244 } 1245 1246 GrVkAlloc bufferAlloc = { VK_NULL_HANDLE, 0, 0, 0 }; 1247 if (!GrVkMemory::AllocAndBindBufferMemory(this, buffer, GrVkBuffer::kCopyRead_Type, 1248 true, &bufferAlloc)) { 1249 GrVkMemory::FreeImageMemory(this, linearTiling, alloc); 1250 VK_CALL(DestroyImage(fDevice, image, nullptr)); 1251 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); 1252 return 0; 1253 } 1254 1255 if (!copy_testing_data(this, srcData, bufferAlloc, rowCopyBytes, rowCopyBytes, h)) { 1256 GrVkMemory::FreeImageMemory(this, linearTiling, alloc); 1257 VK_CALL(DestroyImage(fDevice, image, nullptr)); 1258 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); 1259 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); 1260 return 0; 1261 } 1262 1263 const VkCommandBufferAllocateInfo cmdInfo = { 1264 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType 1265 NULL, // pNext 1266 fCmdPool, // commandPool 1267 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level 1268 1 // bufferCount 1269 }; 1270 1271 VkCommandBuffer cmdBuffer; 1272 err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer)); 1273 if (err) { 1274 GrVkMemory::FreeImageMemory(this, linearTiling, alloc); 1275 VK_CALL(DestroyImage(fDevice, image, nullptr)); 1276 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); 1277 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); 1278 return 0; 1279 } 1280 1281 VkCommandBufferBeginInfo cmdBufferBeginInfo; 1282 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 1283 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; 1284 cmdBufferBeginInfo.pNext = nullptr; 1285 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; 1286 cmdBufferBeginInfo.pInheritanceInfo = nullptr; 1287 1288 err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo)); 1289 SkASSERT(!err); 1290 1291 // Set image layout and add barrier 1292 VkImageMemoryBarrier barrier; 1293 memset(&barrier, 0, sizeof(VkImageMemoryBarrier)); 1294 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; 1295 barrier.pNext = nullptr; 1296 barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout); 1297 barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; 1298 barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; 1299 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; 1300 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; 1301 barrier.image = image; 1302 barrier.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0 , 1}; 1303 1304 VK_CALL(CmdPipelineBarrier(cmdBuffer, 1305 GrVkMemory::LayoutToPipelineStageFlags(initialLayout), 1306 VK_PIPELINE_STAGE_TRANSFER_BIT, 1307 0, 1308 0, nullptr, 1309 0, nullptr, 1310 1, &barrier)); 1311 initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; 1312 1313 // Submit copy command 1314 VkBufferImageCopy region; 1315 memset(®ion, 0, sizeof(VkBufferImageCopy)); 1316 region.bufferOffset = 0; 1317 region.bufferRowLength = w; 1318 region.bufferImageHeight = h; 1319 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1320 region.imageOffset = { 0, 0, 0 }; 1321 region.imageExtent = { (uint32_t)w, (uint32_t)h, 1 }; 1322 1323 VK_CALL(CmdCopyBufferToImage(cmdBuffer, buffer, image, initialLayout, 1, ®ion)); 1324 1325 // End CommandBuffer 1326 err = VK_CALL(EndCommandBuffer(cmdBuffer)); 1327 SkASSERT(!err); 1328 1329 // Create Fence for queue 1330 VkFence fence; 1331 VkFenceCreateInfo fenceInfo; 1332 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); 1333 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; 1334 1335 err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence)); 1336 SkASSERT(!err); 1337 1338 VkSubmitInfo submitInfo; 1339 memset(&submitInfo, 0, sizeof(VkSubmitInfo)); 1340 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; 1341 submitInfo.pNext = nullptr; 1342 submitInfo.waitSemaphoreCount = 0; 1343 submitInfo.pWaitSemaphores = nullptr; 1344 submitInfo.pWaitDstStageMask = 0; 1345 submitInfo.commandBufferCount = 1; 1346 submitInfo.pCommandBuffers = &cmdBuffer; 1347 submitInfo.signalSemaphoreCount = 0; 1348 submitInfo.pSignalSemaphores = nullptr; 1349 err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence)); 1350 SkASSERT(!err); 1351 1352 err = VK_CALL(WaitForFences(fDevice, 1, &fence, true, UINT64_MAX)); 1353 if (VK_TIMEOUT == err) { 1354 GrVkMemory::FreeImageMemory(this, linearTiling, alloc); 1355 VK_CALL(DestroyImage(fDevice, image, nullptr)); 1356 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); 1357 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); 1358 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); 1359 VK_CALL(DestroyFence(fDevice, fence, nullptr)); 1360 SkDebugf("Fence failed to signal: %d\n", err); 1361 SkFAIL("failing"); 1362 } 1363 SkASSERT(!err); 1364 1365 // Clean up transfer resources 1366 GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc); 1367 VK_CALL(DestroyBuffer(fDevice, buffer, nullptr)); 1368 VK_CALL(FreeCommandBuffers(fDevice, fCmdPool, 1, &cmdBuffer)); 1369 VK_CALL(DestroyFence(fDevice, fence, nullptr)); 1370 } 1371 1372 GrVkImageInfo* info = new GrVkImageInfo; 1373 info->fImage = image; 1374 info->fAlloc = alloc; 1375 info->fImageTiling = imageTiling; 1376 info->fImageLayout = initialLayout; 1377 info->fFormat = pixelFormat; 1378 info->fLevelCount = 1; 1379 1380 return (GrBackendObject)info; 1381} 1382 1383bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { 1384 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id); 1385 1386 if (backend && backend->fImage && backend->fAlloc.fMemory) { 1387 VkMemoryRequirements req; 1388 memset(&req, 0, sizeof(req)); 1389 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice, 1390 backend->fImage, 1391 &req)); 1392 // TODO: find a better check 1393 // This will probably fail with a different driver 1394 return (req.size > 0) && (req.size <= 8192 * 8192); 1395 } 1396 1397 return false; 1398} 1399 1400void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) { 1401 GrVkImageInfo* backend = reinterpret_cast<GrVkImageInfo*>(id); 1402 if (backend) { 1403 if (!abandon) { 1404 // something in the command buffer may still be using this, so force submit 1405 this->submitCommandBuffer(kForce_SyncQueue); 1406 GrVkImage::DestroyImageInfo(this, backend); 1407 } 1408 delete backend; 1409 } 1410} 1411 1412//////////////////////////////////////////////////////////////////////////////// 1413 1414void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask, 1415 VkPipelineStageFlags dstStageMask, 1416 bool byRegion, 1417 VkMemoryBarrier* barrier) const { 1418 SkASSERT(fCurrentCmdBuffer); 1419 fCurrentCmdBuffer->pipelineBarrier(this, 1420 srcStageMask, 1421 dstStageMask, 1422 byRegion, 1423 GrVkCommandBuffer::kMemory_BarrierType, 1424 barrier); 1425} 1426 1427void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 1428 VkPipelineStageFlags dstStageMask, 1429 bool byRegion, 1430 VkBufferMemoryBarrier* barrier) const { 1431 SkASSERT(fCurrentCmdBuffer); 1432 fCurrentCmdBuffer->pipelineBarrier(this, 1433 srcStageMask, 1434 dstStageMask, 1435 byRegion, 1436 GrVkCommandBuffer::kBufferMemory_BarrierType, 1437 barrier); 1438} 1439 1440void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask, 1441 VkPipelineStageFlags dstStageMask, 1442 bool byRegion, 1443 VkImageMemoryBarrier* barrier) const { 1444 SkASSERT(fCurrentCmdBuffer); 1445 fCurrentCmdBuffer->pipelineBarrier(this, 1446 srcStageMask, 1447 dstStageMask, 1448 byRegion, 1449 GrVkCommandBuffer::kImageMemory_BarrierType, 1450 barrier); 1451} 1452 1453void GrVkGpu::finishFlush() { 1454 // Submit the current command buffer to the Queue 1455 this->submitCommandBuffer(kSkip_SyncQueue); 1456} 1457 1458void GrVkGpu::clearStencil(GrRenderTarget* target) { 1459 if (nullptr == target) { 1460 return; 1461 } 1462 GrStencilAttachment* stencil = target->renderTargetPriv().getStencilAttachment(); 1463 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; 1464 1465 1466 VkClearDepthStencilValue vkStencilColor; 1467 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue)); 1468 1469 vkStencil->setImageLayout(this, 1470 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1471 VK_ACCESS_TRANSFER_WRITE_BIT, 1472 VK_PIPELINE_STAGE_TRANSFER_BIT, 1473 false); 1474 1475 VkImageSubresourceRange subRange; 1476 memset(&subRange, 0, sizeof(VkImageSubresourceRange)); 1477 subRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; 1478 subRange.baseMipLevel = 0; 1479 subRange.levelCount = 1; 1480 subRange.baseArrayLayer = 0; 1481 subRange.layerCount = 1; 1482 1483 // TODO: I imagine that most times we want to clear a stencil it will be at the beginning of a 1484 // draw. Thus we should look into using the load op functions on the render pass to clear out 1485 // the stencil there. 1486 fCurrentCmdBuffer->clearDepthStencilImage(this, vkStencil, &vkStencilColor, 1, &subRange); 1487} 1488 1489inline bool can_copy_image(const GrSurface* dst, 1490 const GrSurface* src, 1491 const GrVkGpu* gpu) { 1492 const GrRenderTarget* dstRT = dst->asRenderTarget(); 1493 const GrRenderTarget* srcRT = src->asRenderTarget(); 1494 if (dstRT && srcRT) { 1495 if (srcRT->numColorSamples() != dstRT->numColorSamples()) { 1496 return false; 1497 } 1498 } else if (dstRT) { 1499 if (dstRT->numColorSamples() > 1) { 1500 return false; 1501 } 1502 } else if (srcRT) { 1503 if (srcRT->numColorSamples() > 1) { 1504 return false; 1505 } 1506 } 1507 1508 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src 1509 // as image usage flags. 1510 if (src->origin() == dst->origin() && 1511 GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) { 1512 return true; 1513 } 1514 1515 return false; 1516} 1517 1518void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, 1519 GrSurface* src, 1520 GrVkImage* dstImage, 1521 GrVkImage* srcImage, 1522 const SkIRect& srcRect, 1523 const SkIPoint& dstPoint) { 1524 SkASSERT(can_copy_image(dst, src, this)); 1525 1526 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if 1527 // the cache is flushed since it is only being written to. 1528 dstImage->setImageLayout(this, 1529 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1530 VK_ACCESS_TRANSFER_WRITE_BIT, 1531 VK_PIPELINE_STAGE_TRANSFER_BIT, 1532 false); 1533 1534 srcImage->setImageLayout(this, 1535 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1536 VK_ACCESS_TRANSFER_READ_BIT, 1537 VK_PIPELINE_STAGE_TRANSFER_BIT, 1538 false); 1539 1540 // Flip rect if necessary 1541 SkIRect srcVkRect = srcRect; 1542 int32_t dstY = dstPoint.fY; 1543 1544 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { 1545 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin()); 1546 srcVkRect.fTop = src->height() - srcRect.fBottom; 1547 srcVkRect.fBottom = src->height() - srcRect.fTop; 1548 dstY = dst->height() - dstPoint.fY - srcVkRect.height(); 1549 } 1550 1551 VkImageCopy copyRegion; 1552 memset(©Region, 0, sizeof(VkImageCopy)); 1553 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1554 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 }; 1555 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1556 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 }; 1557 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 1 }; 1558 1559 fCurrentCmdBuffer->copyImage(this, 1560 srcImage, 1561 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1562 dstImage, 1563 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1564 1, 1565 ©Region); 1566 1567 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 1568 srcRect.width(), srcRect.height()); 1569 this->didWriteToSurface(dst, &dstRect); 1570} 1571 1572inline bool can_copy_as_blit(const GrSurface* dst, 1573 const GrSurface* src, 1574 const GrVkImage* dstImage, 1575 const GrVkImage* srcImage, 1576 const GrVkGpu* gpu) { 1577 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src 1578 // as image usage flags. 1579 const GrVkCaps& caps = gpu->vkCaps(); 1580 if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) || 1581 !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) { 1582 return false; 1583 } 1584 1585 // We cannot blit images that are multisampled. Will need to figure out if we can blit the 1586 // resolved msaa though. 1587 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1) || 1588 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1)) { 1589 return false; 1590 } 1591 1592 return true; 1593} 1594 1595void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, 1596 GrSurface* src, 1597 GrVkImage* dstImage, 1598 GrVkImage* srcImage, 1599 const SkIRect& srcRect, 1600 const SkIPoint& dstPoint) { 1601 SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this)); 1602 1603 dstImage->setImageLayout(this, 1604 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1605 VK_ACCESS_TRANSFER_WRITE_BIT, 1606 VK_PIPELINE_STAGE_TRANSFER_BIT, 1607 false); 1608 1609 srcImage->setImageLayout(this, 1610 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1611 VK_ACCESS_TRANSFER_READ_BIT, 1612 VK_PIPELINE_STAGE_TRANSFER_BIT, 1613 false); 1614 1615 // Flip rect if necessary 1616 SkIRect srcVkRect; 1617 srcVkRect.fLeft = srcRect.fLeft; 1618 srcVkRect.fRight = srcRect.fRight; 1619 SkIRect dstRect; 1620 dstRect.fLeft = dstPoint.fX; 1621 dstRect.fRight = dstPoint.fX + srcRect.width(); 1622 1623 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { 1624 srcVkRect.fTop = src->height() - srcRect.fBottom; 1625 srcVkRect.fBottom = src->height() - srcRect.fTop; 1626 } else { 1627 srcVkRect.fTop = srcRect.fTop; 1628 srcVkRect.fBottom = srcRect.fBottom; 1629 } 1630 1631 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { 1632 dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height(); 1633 } else { 1634 dstRect.fTop = dstPoint.fY; 1635 } 1636 dstRect.fBottom = dstRect.fTop + srcVkRect.height(); 1637 1638 // If we have different origins, we need to flip the top and bottom of the dst rect so that we 1639 // get the correct origintation of the copied data. 1640 if (src->origin() != dst->origin()) { 1641 SkTSwap(dstRect.fTop, dstRect.fBottom); 1642 } 1643 1644 VkImageBlit blitRegion; 1645 memset(&blitRegion, 0, sizeof(VkImageBlit)); 1646 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1647 blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 }; 1648 blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 1 }; 1649 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1650 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 }; 1651 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 }; 1652 1653 fCurrentCmdBuffer->blitImage(this, 1654 *srcImage, 1655 *dstImage, 1656 1, 1657 &blitRegion, 1658 VK_FILTER_NEAREST); // We never scale so any filter works here 1659 1660 this->didWriteToSurface(dst, &dstRect); 1661} 1662 1663inline bool can_copy_as_resolve(const GrSurface* dst, 1664 const GrSurface* src, 1665 const GrVkGpu* gpu) { 1666 // Our src must be a multisampled render target 1667 if (!src->asRenderTarget() || src->asRenderTarget()->numColorSamples() <= 1) { 1668 return false; 1669 } 1670 1671 // The dst must not be a multisampled render target, expect in the case where the dst is the 1672 // resolve texture connected to the msaa src. We check for this in case we are copying a part of 1673 // a surface to a different region in the same surface. 1674 if (dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1 && dst != src) { 1675 return false; 1676 } 1677 1678 // Surfaces must have the same origin. 1679 if (src->origin() != dst->origin()) { 1680 return false; 1681 } 1682 1683 return true; 1684} 1685 1686void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, 1687 GrSurface* src, 1688 const SkIRect& srcRect, 1689 const SkIPoint& dstPoint) { 1690 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget()); 1691 this->resolveImage(dst, srcRT, srcRect, dstPoint); 1692} 1693 1694bool GrVkGpu::onCopySurface(GrSurface* dst, 1695 GrSurface* src, 1696 const SkIRect& srcRect, 1697 const SkIPoint& dstPoint) { 1698 if (can_copy_as_resolve(dst, src, this)) { 1699 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint); 1700 return true; 1701 } 1702 1703 if (this->vkCaps().mustSubmitCommandsBeforeCopyOp()) { 1704 this->submitCommandBuffer(GrVkGpu::kSkip_SyncQueue); 1705 } 1706 1707 if (fCopyManager.copySurfaceAsDraw(this, dst, src, srcRect, dstPoint)) { 1708 return true; 1709 } 1710 1711 GrVkImage* dstImage; 1712 GrVkImage* srcImage; 1713 GrRenderTarget* dstRT = dst->asRenderTarget(); 1714 if (dstRT) { 1715 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT); 1716 dstImage = vkRT->numColorSamples() > 1 ? vkRT->msaaImage() : vkRT; 1717 } else { 1718 SkASSERT(dst->asTexture()); 1719 dstImage = static_cast<GrVkTexture*>(dst->asTexture()); 1720 } 1721 GrRenderTarget* srcRT = src->asRenderTarget(); 1722 if (srcRT) { 1723 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT); 1724 srcImage = vkRT->numColorSamples() > 1 ? vkRT->msaaImage() : vkRT; 1725 } else { 1726 SkASSERT(src->asTexture()); 1727 srcImage = static_cast<GrVkTexture*>(src->asTexture()); 1728 } 1729 1730 // For borrowed textures, we *only* want to copy using draws (to avoid layout changes) 1731 if (srcImage->isBorrowed()) { 1732 return false; 1733 } 1734 1735 if (can_copy_image(dst, src, this)) { 1736 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint); 1737 return true; 1738 } 1739 1740 if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) { 1741 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint); 1742 return true; 1743 } 1744 1745 return false; 1746} 1747 1748void GrVkGpu::onQueryMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&, 1749 int* effectiveSampleCnt, SamplePattern*) { 1750 // TODO: stub. 1751 SkASSERT(!this->caps()->sampleLocationsSupport()); 1752 *effectiveSampleCnt = rt->numStencilSamples(); 1753} 1754 1755bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes, 1756 GrPixelConfig readConfig, DrawPreference* drawPreference, 1757 ReadPixelTempDrawInfo* tempDrawInfo) { 1758 // These settings we will always want if a temp draw is performed. 1759 tempDrawInfo->fTempSurfaceDesc.fFlags = kRenderTarget_GrSurfaceFlag; 1760 tempDrawInfo->fTempSurfaceDesc.fWidth = width; 1761 tempDrawInfo->fTempSurfaceDesc.fHeight = height; 1762 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0; 1763 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL. 1764 tempDrawInfo->fTempSurfaceFit = SkBackingFit::kApprox; 1765 1766 // For now assume no swizzling, we may change that below. 1767 tempDrawInfo->fSwizzle = GrSwizzle::RGBA(); 1768 1769 // Depends on why we need/want a temp draw. Start off assuming no change, the surface we read 1770 // from will be srcConfig and we will read readConfig pixels from it. 1771 // Note that if we require a draw and return a non-renderable format for the temp surface the 1772 // base class will fail for us. 1773 tempDrawInfo->fTempSurfaceDesc.fConfig = srcSurface->config(); 1774 tempDrawInfo->fReadConfig = readConfig; 1775 1776 if (srcSurface->config() == readConfig) { 1777 return true; 1778 } 1779 1780 // Any config change requires a draw 1781 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 1782 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig; 1783 tempDrawInfo->fReadConfig = readConfig; 1784 1785 return true; 1786} 1787 1788bool GrVkGpu::onReadPixels(GrSurface* surface, 1789 int left, int top, int width, int height, 1790 GrPixelConfig config, 1791 void* buffer, 1792 size_t rowBytes) { 1793 VkFormat pixelFormat; 1794 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) { 1795 return false; 1796 } 1797 1798 GrVkImage* image = nullptr; 1799 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget()); 1800 if (rt) { 1801 // resolve the render target if necessary 1802 switch (rt->getResolveType()) { 1803 case GrVkRenderTarget::kCantResolve_ResolveType: 1804 return false; 1805 case GrVkRenderTarget::kAutoResolves_ResolveType: 1806 break; 1807 case GrVkRenderTarget::kCanResolve_ResolveType: 1808 this->internalResolveRenderTarget(rt, false); 1809 break; 1810 default: 1811 SkFAIL("Unknown resolve type"); 1812 } 1813 image = rt; 1814 } else { 1815 image = static_cast<GrVkTexture*>(surface->asTexture()); 1816 } 1817 1818 if (!image) { 1819 return false; 1820 } 1821 1822 // Change layout of our target so it can be used as copy 1823 image->setImageLayout(this, 1824 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1825 VK_ACCESS_TRANSFER_READ_BIT, 1826 VK_PIPELINE_STAGE_TRANSFER_BIT, 1827 false); 1828 1829 size_t bpp = GrBytesPerPixel(config); 1830 size_t tightRowBytes = bpp * width; 1831 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin(); 1832 1833 VkBufferImageCopy region; 1834 memset(®ion, 0, sizeof(VkBufferImageCopy)); 1835 1836 bool copyFromOrigin = this->vkCaps().mustDoCopiesFromOrigin(); 1837 if (copyFromOrigin) { 1838 region.imageOffset = { 0, 0, 0 }; 1839 region.imageExtent = { (uint32_t)(left + width), 1840 (uint32_t)(flipY ? surface->height() - top : top + height), 1841 1 1842 }; 1843 } else { 1844 VkOffset3D offset = { 1845 left, 1846 flipY ? surface->height() - top - height : top, 1847 0 1848 }; 1849 region.imageOffset = offset; 1850 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 }; 1851 } 1852 1853 size_t transBufferRowBytes = bpp * region.imageExtent.width; 1854 GrVkTransferBuffer* transferBuffer = 1855 static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * height, 1856 kXferGpuToCpu_GrBufferType, 1857 kStream_GrAccessPattern)); 1858 1859 // Copy the image to a buffer so we can map it to cpu memory 1860 region.bufferOffset = transferBuffer->offset(); 1861 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below. 1862 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images. 1863 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; 1864 1865 fCurrentCmdBuffer->copyImageToBuffer(this, 1866 image, 1867 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1868 transferBuffer, 1869 1, 1870 ®ion); 1871 1872 // make sure the copy to buffer has finished 1873 transferBuffer->addMemoryBarrier(this, 1874 VK_ACCESS_TRANSFER_WRITE_BIT, 1875 VK_ACCESS_HOST_READ_BIT, 1876 VK_PIPELINE_STAGE_TRANSFER_BIT, 1877 VK_PIPELINE_STAGE_HOST_BIT, 1878 false); 1879 1880 // We need to submit the current command buffer to the Queue and make sure it finishes before 1881 // we can copy the data out of the buffer. 1882 this->submitCommandBuffer(kForce_SyncQueue); 1883 GrVkMemory::InvalidateMappedAlloc(this, transferBuffer->alloc()); 1884 void* mappedMemory = transferBuffer->map(); 1885 1886 if (copyFromOrigin) { 1887 uint32_t skipRows = region.imageExtent.height - height; 1888 mappedMemory = (char*)mappedMemory + transBufferRowBytes * skipRows + bpp * left; 1889 } 1890 1891 if (flipY) { 1892 const char* srcRow = reinterpret_cast<const char*>(mappedMemory); 1893 char* dstRow = reinterpret_cast<char*>(buffer)+(height - 1) * rowBytes; 1894 for (int y = 0; y < height; y++) { 1895 memcpy(dstRow, srcRow, tightRowBytes); 1896 srcRow += transBufferRowBytes; 1897 dstRow -= rowBytes; 1898 } 1899 } else { 1900 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, height); 1901 } 1902 1903 transferBuffer->unmap(); 1904 transferBuffer->unref(); 1905 return true; 1906} 1907 1908// The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple 1909// of the granularity. The width must also be a multiple of the granularity or eaqual to the width 1910// the the entire attachment. Similar requirements for the y and height components. 1911void adjust_bounds_to_granularity(SkIRect* dstBounds, const SkIRect& srcBounds, 1912 const VkExtent2D& granularity, int maxWidth, int maxHeight) { 1913 // Adjust Width 1914 if ((0 != granularity.width && 1 != granularity.width)) { 1915 // Start with the right side of rect so we know if we end up going pass the maxWidth. 1916 int rightAdj = srcBounds.fRight % granularity.width; 1917 if (rightAdj != 0) { 1918 rightAdj = granularity.width - rightAdj; 1919 } 1920 dstBounds->fRight = srcBounds.fRight + rightAdj; 1921 if (dstBounds->fRight > maxWidth) { 1922 dstBounds->fRight = maxWidth; 1923 dstBounds->fLeft = 0; 1924 } else { 1925 dstBounds->fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width; 1926 } 1927 } else { 1928 dstBounds->fLeft = srcBounds.fLeft; 1929 dstBounds->fRight = srcBounds.fRight; 1930 } 1931 1932 // Adjust height 1933 if ((0 != granularity.height && 1 != granularity.height)) { 1934 // Start with the bottom side of rect so we know if we end up going pass the maxHeight. 1935 int bottomAdj = srcBounds.fBottom % granularity.height; 1936 if (bottomAdj != 0) { 1937 bottomAdj = granularity.height - bottomAdj; 1938 } 1939 dstBounds->fBottom = srcBounds.fBottom + bottomAdj; 1940 if (dstBounds->fBottom > maxHeight) { 1941 dstBounds->fBottom = maxHeight; 1942 dstBounds->fTop = 0; 1943 } else { 1944 dstBounds->fTop = srcBounds.fTop - srcBounds.fTop % granularity.height; 1945 } 1946 } else { 1947 dstBounds->fTop = srcBounds.fTop; 1948 dstBounds->fBottom = srcBounds.fBottom; 1949 } 1950} 1951 1952void GrVkGpu::submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>& buffers, 1953 const GrVkRenderPass* renderPass, 1954 const VkClearValue* colorClear, 1955 GrVkRenderTarget* target, 1956 const SkIRect& bounds) { 1957 const SkIRect* pBounds = &bounds; 1958 SkIRect flippedBounds; 1959 if (kBottomLeft_GrSurfaceOrigin == target->origin()) { 1960 flippedBounds = bounds; 1961 flippedBounds.fTop = target->height() - bounds.fBottom; 1962 flippedBounds.fBottom = target->height() - bounds.fTop; 1963 pBounds = &flippedBounds; 1964 } 1965 1966 // The bounds we use for the render pass should be of the granularity supported 1967 // by the device. 1968 const VkExtent2D& granularity = renderPass->granularity(); 1969 SkIRect adjustedBounds; 1970 if ((0 != granularity.width && 1 != granularity.width) || 1971 (0 != granularity.height && 1 != granularity.height)) { 1972 adjust_bounds_to_granularity(&adjustedBounds, *pBounds, granularity, 1973 target->width(), target->height()); 1974 pBounds = &adjustedBounds; 1975 } 1976 1977 fCurrentCmdBuffer->beginRenderPass(this, renderPass, colorClear, *target, *pBounds, true); 1978 for (int i = 0; i < buffers.count(); ++i) { 1979 fCurrentCmdBuffer->executeCommands(this, buffers[i]); 1980 } 1981 fCurrentCmdBuffer->endRenderPass(this); 1982 1983 this->didWriteToSurface(target, &bounds); 1984} 1985 1986GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() { 1987 VkFenceCreateInfo createInfo; 1988 memset(&createInfo, 0, sizeof(VkFenceCreateInfo)); 1989 createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; 1990 createInfo.pNext = nullptr; 1991 createInfo.flags = 0; 1992 VkFence fence = VK_NULL_HANDLE; 1993 1994 VK_CALL_ERRCHECK(CreateFence(this->device(), &createInfo, nullptr, &fence)); 1995 VK_CALL(QueueSubmit(this->queue(), 0, nullptr, fence)); 1996 1997 GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(VkFence)); 1998 return (GrFence)fence; 1999} 2000 2001bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) { 2002 SkASSERT(VK_NULL_HANDLE != (VkFence)fence); 2003 2004 VkResult result = VK_CALL(WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout)); 2005 return (VK_SUCCESS == result); 2006} 2007 2008void GrVkGpu::deleteFence(GrFence fence) const { 2009 VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr)); 2010} 2011 2012sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) { 2013 return GrVkSemaphore::Make(this, isOwned); 2014} 2015 2016sk_sp<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore, 2017 GrWrapOwnership ownership) { 2018 return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), ownership); 2019} 2020 2021void GrVkGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) { 2022 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get()); 2023 2024 const GrVkSemaphore::Resource* resource = vkSem->getResource(); 2025 resource->ref(); 2026 fSemaphoresToSignal.push_back(resource); 2027 2028 if (flush) { 2029 this->submitCommandBuffer(kSkip_SyncQueue); 2030 } 2031} 2032 2033void GrVkGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) { 2034 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore.get()); 2035 2036 const GrVkSemaphore::Resource* resource = vkSem->getResource(); 2037 resource->ref(); 2038 fSemaphoresToWaitOn.push_back(resource); 2039} 2040 2041sk_sp<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) { 2042 SkASSERT(texture); 2043 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture); 2044 vkTexture->setImageLayout(this, 2045 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 2046 VK_ACCESS_SHADER_READ_BIT, 2047 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 2048 false); 2049 this->submitCommandBuffer(kSkip_SyncQueue); 2050 2051 // The image layout change serves as a barrier, so no semaphore is needed 2052 return nullptr; 2053} 2054