1/* 2* Copyright 2015 Google Inc. 3* 4* Use of this source code is governed by a BSD-style license that can be 5* found in the LICENSE file. 6*/ 7 8#include "GrVkCommandBuffer.h" 9 10#include "GrVkFramebuffer.h" 11#include "GrVkImageView.h" 12#include "GrVkPipeline.h" 13#include "GrVkRenderPass.h" 14#include "GrVkRenderTarget.h" 15#include "GrVkPipelineState.h" 16#include "GrVkTransferBuffer.h" 17#include "GrVkUtil.h" 18#include "SkRect.h" 19 20void GrVkCommandBuffer::invalidateState() { 21 fBoundVertexBuffer = VK_NULL_HANDLE; 22 fBoundVertexBufferIsValid = false; 23 fBoundIndexBuffer = VK_NULL_HANDLE; 24 fBoundIndexBufferIsValid = false; 25 26 memset(&fCachedViewport, 0, sizeof(VkViewport)); 27 fCachedViewport.width = - 1.0f; // Viewport must have a width greater than 0 28 29 memset(&fCachedScissor, 0, sizeof(VkRect2D)); 30 fCachedScissor.offset.x = -1; // Scissor offset must be greater that 0 to be valid 31 32 for (int i = 0; i < 4; ++i) { 33 fCachedBlendConstant[i] = -1.0; 34 } 35} 36 37void GrVkCommandBuffer::freeGPUData(const GrVkGpu* gpu) const { 38 SkASSERT(!fIsActive); 39 for (int i = 0; i < fTrackedResources.count(); ++i) { 40 fTrackedResources[i]->unref(gpu); 41 } 42 43 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) { 44 fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu)); 45 } 46 47 GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), gpu->cmdPool(), 48 1, &fCmdBuffer)); 49 50 this->onFreeGPUData(gpu); 51} 52 53void GrVkCommandBuffer::abandonSubResources() const { 54 for (int i = 0; i < fTrackedResources.count(); ++i) { 55 fTrackedResources[i]->unrefAndAbandon(); 56 } 57 58 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) { 59 // We don't recycle resources when abandoning them. 60 fTrackedRecycledResources[i]->unrefAndAbandon(); 61 } 62} 63 64void GrVkCommandBuffer::reset(GrVkGpu* gpu) { 65 SkASSERT(!fIsActive); 66 for (int i = 0; i < fTrackedResources.count(); ++i) { 67 fTrackedResources[i]->unref(gpu); 68 } 69 for (int i = 0; i < fTrackedRecycledResources.count(); ++i) { 70 fTrackedRecycledResources[i]->recycle(const_cast<GrVkGpu*>(gpu)); 71 } 72 73 if (++fNumResets > kNumRewindResetsBeforeFullReset) { 74 fTrackedResources.reset(); 75 fTrackedRecycledResources.reset(); 76 fTrackedResources.setReserve(kInitialTrackedResourcesCount); 77 fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount); 78 fNumResets = 0; 79 } else { 80 fTrackedResources.rewind(); 81 fTrackedRecycledResources.rewind(); 82 } 83 84 85 this->invalidateState(); 86 87 // we will retain resources for later use 88 VkCommandBufferResetFlags flags = 0; 89 GR_VK_CALL(gpu->vkInterface(), ResetCommandBuffer(fCmdBuffer, flags)); 90 91 this->onReset(gpu); 92} 93 94//////////////////////////////////////////////////////////////////////////////// 95// CommandBuffer commands 96//////////////////////////////////////////////////////////////////////////////// 97 98void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu, 99 VkPipelineStageFlags srcStageMask, 100 VkPipelineStageFlags dstStageMask, 101 bool byRegion, 102 BarrierType barrierType, 103 void* barrier) const { 104 SkASSERT(fIsActive); 105 // For images we can have barriers inside of render passes but they require us to add more 106 // support in subpasses which need self dependencies to have barriers inside them. Also, we can 107 // never have buffer barriers inside of a render pass. For now we will just assert that we are 108 // not in a render pass. 109 SkASSERT(!fActiveRenderPass); 110 VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0; 111 112 switch (barrierType) { 113 case kMemory_BarrierType: { 114 const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier); 115 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, 116 dstStageMask, dependencyFlags, 117 1, barrierPtr, 118 0, nullptr, 119 0, nullptr)); 120 break; 121 } 122 123 case kBufferMemory_BarrierType: { 124 const VkBufferMemoryBarrier* barrierPtr = 125 reinterpret_cast<VkBufferMemoryBarrier*>(barrier); 126 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, 127 dstStageMask, dependencyFlags, 128 0, nullptr, 129 1, barrierPtr, 130 0, nullptr)); 131 break; 132 } 133 134 case kImageMemory_BarrierType: { 135 const VkImageMemoryBarrier* barrierPtr = 136 reinterpret_cast<VkImageMemoryBarrier*>(barrier); 137 GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask, 138 dstStageMask, dependencyFlags, 139 0, nullptr, 140 0, nullptr, 141 1, barrierPtr)); 142 break; 143 } 144 } 145 146} 147 148void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu, 149 int numAttachments, 150 const VkClearAttachment* attachments, 151 int numRects, 152 const VkClearRect* clearRects) const { 153 SkASSERT(fIsActive); 154 SkASSERT(fActiveRenderPass); 155 SkASSERT(numAttachments > 0); 156 SkASSERT(numRects > 0); 157#ifdef SK_DEBUG 158 for (int i = 0; i < numAttachments; ++i) { 159 if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) { 160 uint32_t testIndex; 161 SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex)); 162 SkASSERT(testIndex == attachments[i].colorAttachment); 163 } 164 } 165#endif 166 GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer, 167 numAttachments, 168 attachments, 169 numRects, 170 clearRects)); 171} 172 173void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu, 174 GrVkPipelineState* pipelineState, 175 VkPipelineLayout layout, 176 uint32_t firstSet, 177 uint32_t setCount, 178 const VkDescriptorSet* descriptorSets, 179 uint32_t dynamicOffsetCount, 180 const uint32_t* dynamicOffsets) { 181 SkASSERT(fIsActive); 182 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer, 183 VK_PIPELINE_BIND_POINT_GRAPHICS, 184 layout, 185 firstSet, 186 setCount, 187 descriptorSets, 188 dynamicOffsetCount, 189 dynamicOffsets)); 190 pipelineState->addUniformResources(*this); 191} 192 193void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu, 194 const SkTArray<const GrVkRecycledResource*>& recycled, 195 const SkTArray<const GrVkResource*>& resources, 196 VkPipelineLayout layout, 197 uint32_t firstSet, 198 uint32_t setCount, 199 const VkDescriptorSet* descriptorSets, 200 uint32_t dynamicOffsetCount, 201 const uint32_t* dynamicOffsets) { 202 SkASSERT(fIsActive); 203 GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer, 204 VK_PIPELINE_BIND_POINT_GRAPHICS, 205 layout, 206 firstSet, 207 setCount, 208 descriptorSets, 209 dynamicOffsetCount, 210 dynamicOffsets)); 211 for (int i = 0; i < recycled.count(); ++i) { 212 this->addRecycledResource(recycled[i]); 213 } 214 for (int i = 0; i < resources.count(); ++i) { 215 this->addResource(resources[i]); 216 } 217} 218 219void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) { 220 SkASSERT(fIsActive); 221 GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer, 222 VK_PIPELINE_BIND_POINT_GRAPHICS, 223 pipeline->pipeline())); 224 this->addResource(pipeline); 225} 226 227void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu, 228 uint32_t indexCount, 229 uint32_t instanceCount, 230 uint32_t firstIndex, 231 int32_t vertexOffset, 232 uint32_t firstInstance) const { 233 SkASSERT(fIsActive); 234 SkASSERT(fActiveRenderPass); 235 GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer, 236 indexCount, 237 instanceCount, 238 firstIndex, 239 vertexOffset, 240 firstInstance)); 241} 242 243void GrVkCommandBuffer::draw(const GrVkGpu* gpu, 244 uint32_t vertexCount, 245 uint32_t instanceCount, 246 uint32_t firstVertex, 247 uint32_t firstInstance) const { 248 SkASSERT(fIsActive); 249 SkASSERT(fActiveRenderPass); 250 GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer, 251 vertexCount, 252 instanceCount, 253 firstVertex, 254 firstInstance)); 255} 256 257void GrVkCommandBuffer::setViewport(const GrVkGpu* gpu, 258 uint32_t firstViewport, 259 uint32_t viewportCount, 260 const VkViewport* viewports) { 261 SkASSERT(fIsActive); 262 SkASSERT(1 == viewportCount); 263 if (memcmp(viewports, &fCachedViewport, sizeof(VkViewport))) { 264 GR_VK_CALL(gpu->vkInterface(), CmdSetViewport(fCmdBuffer, 265 firstViewport, 266 viewportCount, 267 viewports)); 268 fCachedViewport = viewports[0]; 269 } 270} 271 272void GrVkCommandBuffer::setScissor(const GrVkGpu* gpu, 273 uint32_t firstScissor, 274 uint32_t scissorCount, 275 const VkRect2D* scissors) { 276 SkASSERT(fIsActive); 277 SkASSERT(1 == scissorCount); 278 if (memcmp(scissors, &fCachedScissor, sizeof(VkRect2D))) { 279 GR_VK_CALL(gpu->vkInterface(), CmdSetScissor(fCmdBuffer, 280 firstScissor, 281 scissorCount, 282 scissors)); 283 fCachedScissor = scissors[0]; 284 } 285} 286 287void GrVkCommandBuffer::setBlendConstants(const GrVkGpu* gpu, 288 const float blendConstants[4]) { 289 SkASSERT(fIsActive); 290 if (memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) { 291 GR_VK_CALL(gpu->vkInterface(), CmdSetBlendConstants(fCmdBuffer, blendConstants)); 292 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float)); 293 } 294} 295 296/////////////////////////////////////////////////////////////////////////////// 297// PrimaryCommandBuffer 298//////////////////////////////////////////////////////////////////////////////// 299GrVkPrimaryCommandBuffer::~GrVkPrimaryCommandBuffer() { 300 // Should have ended any render pass we're in the middle of 301 SkASSERT(!fActiveRenderPass); 302} 303 304GrVkPrimaryCommandBuffer* GrVkPrimaryCommandBuffer::Create(const GrVkGpu* gpu, 305 VkCommandPool cmdPool) { 306 const VkCommandBufferAllocateInfo cmdInfo = { 307 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType 308 NULL, // pNext 309 cmdPool, // commandPool 310 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level 311 1 // bufferCount 312 }; 313 314 VkCommandBuffer cmdBuffer; 315 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(), 316 &cmdInfo, 317 &cmdBuffer)); 318 if (err) { 319 return nullptr; 320 } 321 return new GrVkPrimaryCommandBuffer(cmdBuffer); 322} 323 324void GrVkPrimaryCommandBuffer::begin(const GrVkGpu* gpu) { 325 SkASSERT(!fIsActive); 326 VkCommandBufferBeginInfo cmdBufferBeginInfo; 327 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 328 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; 329 cmdBufferBeginInfo.pNext = nullptr; 330 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; 331 cmdBufferBeginInfo.pInheritanceInfo = nullptr; 332 333 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer, 334 &cmdBufferBeginInfo)); 335 fIsActive = true; 336} 337 338void GrVkPrimaryCommandBuffer::end(const GrVkGpu* gpu) { 339 SkASSERT(fIsActive); 340 SkASSERT(!fActiveRenderPass); 341 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer)); 342 this->invalidateState(); 343 fIsActive = false; 344} 345 346void GrVkPrimaryCommandBuffer::beginRenderPass(const GrVkGpu* gpu, 347 const GrVkRenderPass* renderPass, 348 const VkClearValue* clearValues, 349 const GrVkRenderTarget& target, 350 const SkIRect& bounds, 351 bool forSecondaryCB) { 352 SkASSERT(fIsActive); 353 SkASSERT(!fActiveRenderPass); 354 SkASSERT(renderPass->isCompatible(target)); 355 356 VkRenderPassBeginInfo beginInfo; 357 VkRect2D renderArea; 358 renderArea.offset = { bounds.fLeft , bounds.fTop }; 359 renderArea.extent = { (uint32_t)bounds.width(), (uint32_t)bounds.height() }; 360 361 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo)); 362 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; 363 beginInfo.pNext = nullptr; 364 beginInfo.renderPass = renderPass->vkRenderPass(); 365 beginInfo.framebuffer = target.framebuffer()->framebuffer(); 366 beginInfo.renderArea = renderArea; 367 beginInfo.clearValueCount = renderPass->clearValueCount(); 368 beginInfo.pClearValues = clearValues; 369 370 VkSubpassContents contents = forSecondaryCB ? VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS 371 : VK_SUBPASS_CONTENTS_INLINE; 372 373 GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents)); 374 fActiveRenderPass = renderPass; 375 this->addResource(renderPass); 376 target.addResources(*this); 377} 378 379void GrVkPrimaryCommandBuffer::endRenderPass(const GrVkGpu* gpu) { 380 SkASSERT(fIsActive); 381 SkASSERT(fActiveRenderPass); 382 GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer)); 383 fActiveRenderPass = nullptr; 384} 385 386void GrVkPrimaryCommandBuffer::executeCommands(const GrVkGpu* gpu, 387 GrVkSecondaryCommandBuffer* buffer) { 388 SkASSERT(fIsActive); 389 SkASSERT(!buffer->fIsActive); 390 SkASSERT(fActiveRenderPass); 391 SkASSERT(fActiveRenderPass->isCompatible(*buffer->fActiveRenderPass)); 392 393 GR_VK_CALL(gpu->vkInterface(), CmdExecuteCommands(fCmdBuffer, 1, &buffer->fCmdBuffer)); 394 buffer->ref(); 395 fSecondaryCommandBuffers.push_back(buffer); 396 // When executing a secondary command buffer all state (besides render pass state) becomes 397 // invalidated and must be reset. This includes bound buffers, pipelines, dynamic state, etc. 398 this->invalidateState(); 399} 400 401void GrVkPrimaryCommandBuffer::submitToQueue( 402 const GrVkGpu* gpu, 403 VkQueue queue, 404 GrVkGpu::SyncQueue sync, 405 const GrVkSemaphore::Resource* signalSemaphore, 406 SkTArray<const GrVkSemaphore::Resource*>& waitSemaphores) { 407 SkASSERT(!fIsActive); 408 409 VkResult err; 410 if (VK_NULL_HANDLE == fSubmitFence) { 411 VkFenceCreateInfo fenceInfo; 412 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo)); 413 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; 414 err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr, 415 &fSubmitFence)); 416 SkASSERT(!err); 417 } else { 418 GR_VK_CALL(gpu->vkInterface(), ResetFences(gpu->device(), 1, &fSubmitFence)); 419 } 420 421 if (signalSemaphore) { 422 this->addResource(signalSemaphore); 423 } 424 425 int waitCount = waitSemaphores.count(); 426 SkTArray<VkSemaphore> vkWaitSems(waitCount); 427 SkTArray<VkPipelineStageFlags> vkWaitStages(waitCount); 428 if (waitCount) { 429 for (int i = 0; i < waitCount; ++i) { 430 this->addResource(waitSemaphores[i]); 431 vkWaitSems.push_back(waitSemaphores[i]->semaphore()); 432 vkWaitStages.push_back(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); 433 } 434 } 435 SkTArray<VkSemaphore> vkSignalSem; 436 if (signalSemaphore) { 437 vkSignalSem.push_back(signalSemaphore->semaphore()); 438 } 439 440 VkSubmitInfo submitInfo; 441 memset(&submitInfo, 0, sizeof(VkSubmitInfo)); 442 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; 443 submitInfo.pNext = nullptr; 444 submitInfo.waitSemaphoreCount = waitCount; 445 submitInfo.pWaitSemaphores = vkWaitSems.begin(); 446 submitInfo.pWaitDstStageMask = vkWaitStages.begin(); 447 submitInfo.commandBufferCount = 1; 448 submitInfo.pCommandBuffers = &fCmdBuffer; 449 submitInfo.signalSemaphoreCount = vkSignalSem.count(); 450 submitInfo.pSignalSemaphores = vkSignalSem.begin(); 451 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), QueueSubmit(queue, 1, &submitInfo, fSubmitFence)); 452 453 if (GrVkGpu::kForce_SyncQueue == sync) { 454 err = GR_VK_CALL(gpu->vkInterface(), 455 WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX)); 456 if (VK_TIMEOUT == err) { 457 SkDebugf("Fence failed to signal: %d\n", err); 458 SkFAIL("failing"); 459 } 460 SkASSERT(!err); 461 462 // Destroy the fence 463 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr)); 464 fSubmitFence = VK_NULL_HANDLE; 465 } 466} 467 468bool GrVkPrimaryCommandBuffer::finished(const GrVkGpu* gpu) const { 469 if (VK_NULL_HANDLE == fSubmitFence) { 470 return true; 471 } 472 473 VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence)); 474 switch (err) { 475 case VK_SUCCESS: 476 return true; 477 478 case VK_NOT_READY: 479 return false; 480 481 default: 482 SkDebugf("Error getting fence status: %d\n", err); 483 SkFAIL("failing"); 484 break; 485 } 486 487 return false; 488} 489 490void GrVkPrimaryCommandBuffer::onReset(GrVkGpu* gpu) { 491 for (int i = 0; i < fSecondaryCommandBuffers.count(); ++i) { 492 gpu->resourceProvider().recycleSecondaryCommandBuffer(fSecondaryCommandBuffers[i]); 493 } 494 fSecondaryCommandBuffers.reset(); 495} 496 497void GrVkPrimaryCommandBuffer::copyImage(const GrVkGpu* gpu, 498 GrVkImage* srcImage, 499 VkImageLayout srcLayout, 500 GrVkImage* dstImage, 501 VkImageLayout dstLayout, 502 uint32_t copyRegionCount, 503 const VkImageCopy* copyRegions) { 504 SkASSERT(fIsActive); 505 SkASSERT(!fActiveRenderPass); 506 this->addResource(srcImage->resource()); 507 this->addResource(dstImage->resource()); 508 GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer, 509 srcImage->image(), 510 srcLayout, 511 dstImage->image(), 512 dstLayout, 513 copyRegionCount, 514 copyRegions)); 515} 516 517void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu, 518 const GrVkResource* srcResource, 519 VkImage srcImage, 520 VkImageLayout srcLayout, 521 const GrVkResource* dstResource, 522 VkImage dstImage, 523 VkImageLayout dstLayout, 524 uint32_t blitRegionCount, 525 const VkImageBlit* blitRegions, 526 VkFilter filter) { 527 SkASSERT(fIsActive); 528 SkASSERT(!fActiveRenderPass); 529 this->addResource(srcResource); 530 this->addResource(dstResource); 531 GR_VK_CALL(gpu->vkInterface(), CmdBlitImage(fCmdBuffer, 532 srcImage, 533 srcLayout, 534 dstImage, 535 dstLayout, 536 blitRegionCount, 537 blitRegions, 538 filter)); 539} 540 541void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu, 542 GrVkImage* srcImage, 543 VkImageLayout srcLayout, 544 GrVkTransferBuffer* dstBuffer, 545 uint32_t copyRegionCount, 546 const VkBufferImageCopy* copyRegions) { 547 SkASSERT(fIsActive); 548 SkASSERT(!fActiveRenderPass); 549 this->addResource(srcImage->resource()); 550 this->addResource(dstBuffer->resource()); 551 GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer, 552 srcImage->image(), 553 srcLayout, 554 dstBuffer->buffer(), 555 copyRegionCount, 556 copyRegions)); 557} 558 559void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu, 560 GrVkTransferBuffer* srcBuffer, 561 GrVkImage* dstImage, 562 VkImageLayout dstLayout, 563 uint32_t copyRegionCount, 564 const VkBufferImageCopy* copyRegions) { 565 SkASSERT(fIsActive); 566 SkASSERT(!fActiveRenderPass); 567 this->addResource(srcBuffer->resource()); 568 this->addResource(dstImage->resource()); 569 GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer, 570 srcBuffer->buffer(), 571 dstImage->image(), 572 dstLayout, 573 copyRegionCount, 574 copyRegions)); 575} 576 577void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu, 578 GrVkBuffer* dstBuffer, 579 VkDeviceSize dstOffset, 580 VkDeviceSize dataSize, 581 const void* data) { 582 SkASSERT(fIsActive); 583 SkASSERT(!fActiveRenderPass); 584 SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned 585 // TODO: handle larger transfer sizes 586 SkASSERT(dataSize <= 65536); 587 SkASSERT(0 == (dataSize & 0x03)); // four byte aligned 588 this->addResource(dstBuffer->resource()); 589 GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer, 590 dstBuffer->buffer(), 591 dstOffset, 592 dataSize, 593 (const uint32_t*) data)); 594} 595 596void GrVkPrimaryCommandBuffer::clearColorImage(const GrVkGpu* gpu, 597 GrVkImage* image, 598 const VkClearColorValue* color, 599 uint32_t subRangeCount, 600 const VkImageSubresourceRange* subRanges) { 601 SkASSERT(fIsActive); 602 SkASSERT(!fActiveRenderPass); 603 this->addResource(image->resource()); 604 GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer, 605 image->image(), 606 image->currentLayout(), 607 color, 608 subRangeCount, 609 subRanges)); 610} 611 612void GrVkPrimaryCommandBuffer::clearDepthStencilImage(const GrVkGpu* gpu, 613 GrVkImage* image, 614 const VkClearDepthStencilValue* color, 615 uint32_t subRangeCount, 616 const VkImageSubresourceRange* subRanges) { 617 SkASSERT(fIsActive); 618 SkASSERT(!fActiveRenderPass); 619 this->addResource(image->resource()); 620 GR_VK_CALL(gpu->vkInterface(), CmdClearDepthStencilImage(fCmdBuffer, 621 image->image(), 622 image->currentLayout(), 623 color, 624 subRangeCount, 625 subRanges)); 626} 627 628void GrVkPrimaryCommandBuffer::resolveImage(GrVkGpu* gpu, 629 const GrVkImage& srcImage, 630 const GrVkImage& dstImage, 631 uint32_t regionCount, 632 const VkImageResolve* regions) { 633 SkASSERT(fIsActive); 634 SkASSERT(!fActiveRenderPass); 635 636 this->addResource(srcImage.resource()); 637 this->addResource(dstImage.resource()); 638 639 GR_VK_CALL(gpu->vkInterface(), CmdResolveImage(fCmdBuffer, 640 srcImage.image(), 641 srcImage.currentLayout(), 642 dstImage.image(), 643 dstImage.currentLayout(), 644 regionCount, 645 regions)); 646} 647 648void GrVkPrimaryCommandBuffer::onFreeGPUData(const GrVkGpu* gpu) const { 649 SkASSERT(!fActiveRenderPass); 650 // Destroy the fence, if any 651 if (VK_NULL_HANDLE != fSubmitFence) { 652 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr)); 653 } 654} 655 656/////////////////////////////////////////////////////////////////////////////// 657// SecondaryCommandBuffer 658//////////////////////////////////////////////////////////////////////////////// 659 660GrVkSecondaryCommandBuffer* GrVkSecondaryCommandBuffer::Create(const GrVkGpu* gpu, 661 VkCommandPool cmdPool) { 662 const VkCommandBufferAllocateInfo cmdInfo = { 663 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType 664 NULL, // pNext 665 cmdPool, // commandPool 666 VK_COMMAND_BUFFER_LEVEL_SECONDARY, // level 667 1 // bufferCount 668 }; 669 670 VkCommandBuffer cmdBuffer; 671 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(), 672 &cmdInfo, 673 &cmdBuffer)); 674 if (err) { 675 return nullptr; 676 } 677 return new GrVkSecondaryCommandBuffer(cmdBuffer); 678} 679 680 681void GrVkSecondaryCommandBuffer::begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer, 682 const GrVkRenderPass* compatibleRenderPass) { 683 SkASSERT(!fIsActive); 684 SkASSERT(compatibleRenderPass); 685 fActiveRenderPass = compatibleRenderPass; 686 687 VkCommandBufferInheritanceInfo inheritanceInfo; 688 memset(&inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo)); 689 inheritanceInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; 690 inheritanceInfo.pNext = nullptr; 691 inheritanceInfo.renderPass = fActiveRenderPass->vkRenderPass(); 692 inheritanceInfo.subpass = 0; // Currently only using 1 subpass for each render pass 693 inheritanceInfo.framebuffer = framebuffer ? framebuffer->framebuffer() : VK_NULL_HANDLE; 694 inheritanceInfo.occlusionQueryEnable = false; 695 inheritanceInfo.queryFlags = 0; 696 inheritanceInfo.pipelineStatistics = 0; 697 698 VkCommandBufferBeginInfo cmdBufferBeginInfo; 699 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo)); 700 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; 701 cmdBufferBeginInfo.pNext = nullptr; 702 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT | 703 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; 704 cmdBufferBeginInfo.pInheritanceInfo = &inheritanceInfo; 705 706 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer, 707 &cmdBufferBeginInfo)); 708 fIsActive = true; 709} 710 711void GrVkSecondaryCommandBuffer::end(const GrVkGpu* gpu) { 712 SkASSERT(fIsActive); 713 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer)); 714 this->invalidateState(); 715 fIsActive = false; 716} 717 718